hexsha
stringlengths 40
40
| size
int64 1
1.03M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
239
| max_stars_repo_name
stringlengths 5
130
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
sequencelengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
239
| max_issues_repo_name
stringlengths 5
130
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
sequencelengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
239
| max_forks_repo_name
stringlengths 5
130
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
sequencelengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 1
1.03M
| avg_line_length
float64 1
958k
| max_line_length
int64 1
1.03M
| alphanum_fraction
float64 0
1
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
4a21a1b2dcffe0921b70e2fae855c0ed4ad70746 | 1,273 | py | Python | aws_xray_sdk/core/patcher.py | lukaasp/libs | 2865fcfa6a13bae5ce16d2df4a119d96e7b4d514 | [
"Unlicense"
] | null | null | null | aws_xray_sdk/core/patcher.py | lukaasp/libs | 2865fcfa6a13bae5ce16d2df4a119d96e7b4d514 | [
"Unlicense"
] | null | null | null | aws_xray_sdk/core/patcher.py | lukaasp/libs | 2865fcfa6a13bae5ce16d2df4a119d96e7b4d514 | [
"Unlicense"
] | null | null | null | import logging
import importlib
log = logging.getLogger(__name__)
SUPPORTED_MODULES = (
'botocore',
'requests',
'sqlite3',
'mysql',
)
_PATCHED_MODULES = set()
def patch_all():
patch(SUPPORTED_MODULES, raise_errors=False)
def patch(modules_to_patch, raise_errors=True):
for m in modules_to_patch:
_patch_module(m, raise_errors)
def _patch_module(module_to_patch, raise_errors=True):
# boto3 depends on botocore and patch botocore is sufficient
if module_to_patch == 'boto3':
module_to_patch = 'botocore'
if module_to_patch not in SUPPORTED_MODULES:
raise Exception('module %s is currently not supported for patching'
% module_to_patch)
try:
_patch(module_to_patch)
except Exception:
if raise_errors:
raise
log.debug('failed to patch module %s', module_to_patch)
def _patch(module_to_patch):
path = 'aws_xray_sdk.ext.%s' % module_to_patch
if module_to_patch in _PATCHED_MODULES:
log.debug('%s already patched', module_to_patch)
imported_module = importlib.import_module(path)
imported_module.patch()
_PATCHED_MODULES.add(module_to_patch)
log.info('successfully patched module %s', module_to_patch)
| 23.574074 | 75 | 0.695208 |
4a21a1e24c87f131d7bb29493c5a32a430704c1d | 625 | py | Python | client/migrations/0001_initial.py | akshay98322/MinionLabs- | 613b31877e9bde498aa76680936c193256c14956 | [
"MIT"
] | null | null | null | client/migrations/0001_initial.py | akshay98322/MinionLabs- | 613b31877e9bde498aa76680936c193256c14956 | [
"MIT"
] | null | null | null | client/migrations/0001_initial.py | akshay98322/MinionLabs- | 613b31877e9bde498aa76680936c193256c14956 | [
"MIT"
] | null | null | null | # Generated by Django 3.1.7 on 2021-03-28 05:02
from django.db import migrations, models
import phone_field.models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Client',
fields=[
('name', models.CharField(max_length=30)),
('email', models.EmailField(max_length=254)),
('phone', phone_field.models.PhoneField(blank=True, max_length=31)),
('address', models.TextField(primary_key=True, serialize=False)),
],
),
]
| 25 | 84 | 0.5808 |
4a21a2c1c58b9022e74a606291655d8c5a3840ea | 882 | py | Python | app/models/__init__.py | sarahmk125/flask-model | 4347b2d7fd065c10c150acc7376f21d2cbce6dbc | [
"Apache-2.0"
] | null | null | null | app/models/__init__.py | sarahmk125/flask-model | 4347b2d7fd065c10c150acc7376f21d2cbce6dbc | [
"Apache-2.0"
] | null | null | null | app/models/__init__.py | sarahmk125/flask-model | 4347b2d7fd065c10c150acc7376f21d2cbce6dbc | [
"Apache-2.0"
] | null | null | null | import os
import app.utils.constants as constants
import app.utils.secrets as secrets
from app.models.models import FinancialModel, ModelParameter
from app.models.users import User
def init_app(app, db, environment):
basedir = os.path.abspath(os.path.dirname(__file__))
# Init based on environment. Prod: use non-local DB
if environment == constants.DEV_ENVIRONMENT_NAME:
app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///' + os.path.join(basedir, 'data.sqlite')
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False
else:
app.config['SQLALCHEMY_DATABASE_URI'] = f'postgresql://{secrets.PROD_POSTGRES_USER}:{secrets.PROD_POSTGRES_PASS}@{secrets.PROD_POSTGRES_HOST}:{secrets.PROD_POSTGRES_PORT}/postgres'
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = True
db.init_app(app)
with app.app_context():
db.create_all()
| 38.347826 | 188 | 0.739229 |
4a21a631380b21da57e6f6f331e95caac105f2b2 | 369 | py | Python | pyproc/views/base.py | cmin764/pyproc | be69b5a35fbe3818accea472735effec0825f17c | [
"MIT"
] | null | null | null | pyproc/views/base.py | cmin764/pyproc | be69b5a35fbe3818accea472735effec0825f17c | [
"MIT"
] | null | null | null | pyproc/views/base.py | cmin764/pyproc | be69b5a35fbe3818accea472735effec0825f17c | [
"MIT"
] | null | null | null | """Base views, routes and utilities exposed by the pyproc web app."""
import functools
from flask import (
jsonify
)
def responsify(func):
"""Decorator used to automatically serialize dict like responses."""
@functools.wraps(func)
def wrapper(*args, **kwargs):
resp = func(*args, **kwargs)
return jsonify(resp)
return wrapper
| 18.45 | 72 | 0.663957 |
4a21a63e05609dacdeb5d1203ec2a7f17f6ea927 | 1,547 | py | Python | 03_BinarySearch/matrix_median.py | Sheetal0601/InterviewBit | 72ba1507278dafac6e5fb81da20d372e3d141348 | [
"MIT"
] | 61 | 2018-02-18T08:16:31.000Z | 2022-02-17T17:18:57.000Z | 03_BinarySearch/matrix_median.py | Sheetal0601/InterviewBit | 72ba1507278dafac6e5fb81da20d372e3d141348 | [
"MIT"
] | 1 | 2018-02-23T20:06:18.000Z | 2019-12-29T18:52:20.000Z | 03_BinarySearch/matrix_median.py | Sheetal0601/InterviewBit | 72ba1507278dafac6e5fb81da20d372e3d141348 | [
"MIT"
] | 30 | 2018-03-28T19:02:23.000Z | 2021-07-06T20:00:14.000Z | # Matrix Median
# https://www.interviewbit.com/problems/matrix-median/
#
# Given a N cross M matrix in which each row is sorted, find the overall median of the matrix. Assume N*M is odd.
#
# For example,
#
# Matrix=
# [1, 3, 5]
# [2, 6, 9]
# [3, 6, 9]
#
# A = [1, 2, 3, 3, 5, 6, 6, 9, 9]
#
# Median is 5. So, we return 5.
#
# Note: No extra memory is allowed.
#
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
class Solution:
def calc_se(self, A, x):
from bisect import bisect_left, bisect_right
smaller = equal = 0
for row in A:
l, r = bisect_left(row, x), bisect_right(row, x)
smaller += l
equal += r - l
return smaller, equal
# @param A : list of list of integers
# @return an integer
def findMedian(self, A):
n, m = len(A), len(A[0])
k = (n * m + 1) // 2
l, r = min([row[0] for row in A]), max([row[-1] for row in A])
while l <= r:
mid = (l + r) >> 1
smaller, equal = self.calc_se(A, mid)
if smaller < k and smaller + equal >= k:
return mid
elif smaller >= k:
r = mid - 1
else:
l = mid + 1
return -1
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
if __name__ == "__main__":
A = [
[1, 3, 5],
[2, 6, 9],
[3, 6, 9],
]
s = Solution()
print(s.findMedian(A)) | 23.089552 | 113 | 0.418875 |
4a21a6d2b45faa642cfd82e5c028125a2cd3bf68 | 2,661 | py | Python | openmc/capi/nuclide.py | hturner08/openmc | 5e36cb2f5daf7ab9162734e927dd652c1118a5bd | [
"MIT"
] | 1 | 2019-04-10T12:41:16.000Z | 2019-04-10T12:41:16.000Z | openmc/capi/nuclide.py | hturner08/openmc | 5e36cb2f5daf7ab9162734e927dd652c1118a5bd | [
"MIT"
] | 5 | 2015-03-11T02:28:25.000Z | 2018-11-07T14:10:28.000Z | openmc/capi/nuclide.py | dryuri92/openmc | e28e42e8c250cd1ad586d1d9fd1d20847ad92edd | [
"MIT"
] | null | null | null | from collections.abc import Mapping
from ctypes import c_int, c_char_p, POINTER, c_size_t
from weakref import WeakValueDictionary
import numpy as np
from numpy.ctypeslib import as_array
from openmc.exceptions import DataError, AllocationError
from . import _dll
from .core import _FortranObject
from .error import _error_handler
__all__ = ['Nuclide', 'nuclides', 'load_nuclide']
# Nuclide functions
_dll.openmc_get_nuclide_index.argtypes = [c_char_p, POINTER(c_int)]
_dll.openmc_get_nuclide_index.restype = c_int
_dll.openmc_get_nuclide_index.errcheck = _error_handler
_dll.openmc_load_nuclide.argtypes = [c_char_p]
_dll.openmc_load_nuclide.restype = c_int
_dll.openmc_load_nuclide.errcheck = _error_handler
_dll.openmc_nuclide_name.argtypes = [c_int, POINTER(c_char_p)]
_dll.openmc_nuclide_name.restype = c_int
_dll.openmc_nuclide_name.errcheck = _error_handler
_dll.nuclides_size.restype = c_size_t
def load_nuclide(name):
"""Load cross section data for a nuclide.
Parameters
----------
name : str
Name of the nuclide, e.g. 'U235'
"""
_dll.openmc_load_nuclide(name.encode())
class Nuclide(_FortranObject):
"""Nuclide stored internally.
This class exposes a nuclide that is stored internally in the OpenMC
solver. To obtain a view of a nuclide with a given name, use the
:data:`openmc.capi.nuclides` mapping.
Parameters
----------
index : int
Index in the `nuclides` array.
Attributes
----------
name : str
Name of the nuclide, e.g. 'U235'
"""
__instances = WeakValueDictionary()
def __new__(cls, *args):
if args not in cls.__instances:
instance = super().__new__(cls)
cls.__instances[args] = instance
return cls.__instances[args]
def __init__(self, index):
self._index = index
@property
def name(self):
name = c_char_p()
_dll.openmc_nuclide_name(self._index, name)
return name.value.decode()
class _NuclideMapping(Mapping):
"""Provide mapping from nuclide name to index in nuclides array."""
def __getitem__(self, key):
index = c_int()
try:
_dll.openmc_get_nuclide_index(key.encode(), index)
except (DataError, AllocationError) as e:
# __contains__ expects a KeyError to work correctly
raise KeyError(str(e))
return Nuclide(index.value)
def __iter__(self):
for i in range(len(self)):
yield Nuclide(i).name
def __len__(self):
return _dll.nuclides_size()
def __repr__(self):
return repr(dict(self))
nuclides = _NuclideMapping()
| 26.878788 | 72 | 0.685832 |
4a21a8538cff03184356d8421a3f0c8d31ab8021 | 368 | py | Python | tests/test_dataset.py | iris-hep/func_adl_uproot | ac39e6dfa516559c2578040bb856fd9dbe647bdc | [
"MIT"
] | null | null | null | tests/test_dataset.py | iris-hep/func_adl_uproot | ac39e6dfa516559c2578040bb856fd9dbe647bdc | [
"MIT"
] | 36 | 2020-09-03T16:43:16.000Z | 2022-03-16T15:15:39.000Z | tests/test_dataset.py | iris-hep/func_adl_uproot | ac39e6dfa516559c2578040bb856fd9dbe647bdc | [
"MIT"
] | null | null | null | from func_adl_uproot import UprootDataset
def test_uproot_dataset():
ds = UprootDataset('tests/scalars_tree_file.root')
assert ds.value().fields == ['int_branch',
'long_branch',
'float_branch',
'double_branch',
'bool_branch']
| 33.454545 | 54 | 0.480978 |
4a21a9085304a1bb43d5d5fe51cb8e64179bacab | 400 | py | Python | system/link.py | thinkstack-co/ConnectPyse | ded8b426250aee352598f33ad08b7bcc3c6a3017 | [
"MIT"
] | 23 | 2017-01-24T05:44:05.000Z | 2021-11-26T17:08:01.000Z | system/link.py | thinkstack-co/ConnectPyse | ded8b426250aee352598f33ad08b7bcc3c6a3017 | [
"MIT"
] | 10 | 2017-01-14T21:11:10.000Z | 2019-06-16T21:10:29.000Z | system/link.py | thinkstack-co/ConnectPyse | ded8b426250aee352598f33ad08b7bcc3c6a3017 | [
"MIT"
] | 16 | 2017-01-24T02:28:19.000Z | 2021-07-13T17:23:22.000Z | from ..cw_model import CWModel
class Link(CWModel):
def __init__(self, json_dict=None):
self.id = None # (Integer)
self.name = None # *(String(50))
self.tableReferenceId = None # *(Integer)
self.url = None # (String(1000))
self._info = None # (Metadata)
# initialize object with json dict
super().__init__(json_dict)
| 26.666667 | 51 | 0.5725 |
4a21a9a1778c3ef7283c44c8196271a7edbf6c4b | 1,197 | py | Python | exercises/exercise_11_3_16.py | JSBCCA/pythoncode | b7f2af8b0efc2d01d3e4568265eb3a5038a8679f | [
"MIT"
] | null | null | null | exercises/exercise_11_3_16.py | JSBCCA/pythoncode | b7f2af8b0efc2d01d3e4568265eb3a5038a8679f | [
"MIT"
] | null | null | null | exercises/exercise_11_3_16.py | JSBCCA/pythoncode | b7f2af8b0efc2d01d3e4568265eb3a5038a8679f | [
"MIT"
] | null | null | null | def secondhighest_thirdlowest(txt_file):
# opens file and saves as a set
with open(txt_file, 'r') as file:
data_set = set(file.read().splitlines())
# removes first highest and first/second lowest
data_set.remove(max(data_set))
data_set.remove(min(data_set))
data_set.remove(min(data_set))
# returns second highest and third lowest
return str(txt_file) + " third lowest: " + str(min(data_set)) + '\n' + str(
txt_file) + " second highest: " + str(max(data_set))
# py.test exercise_11_3_16.py --cov=exercise_11_3_16.py --cov-report=html
def test_secondhighest_thirdlowest():
assert secondhighest_thirdlowest(
"data_1.txt") == """data_1.txt third lowest: 13.2
data_1.txt second highest: 84.9"""
assert secondhighest_thirdlowest(
"data_2.txt") == """data_2.txt third lowest: 12.5
data_2.txt second highest: 88.9"""
assert secondhighest_thirdlowest(
"data_3.txt") == """data_3.txt third lowest: 10.9
data_3.txt second highest: 89.6"""
if __name__ == "__main__":
print(secondhighest_thirdlowest('data_1.txt'))
print(secondhighest_thirdlowest('data_2.txt'))
print(secondhighest_thirdlowest('data_3.txt'))
| 38.612903 | 79 | 0.6934 |
4a21a9ed9cbaf7c486a09937525096ea46392f92 | 893 | py | Python | fatherClass.py | smithgoo/python3Learn | d0c066c10887db3942ca285b86ce464463998aad | [
"MIT"
] | 1 | 2019-05-30T08:08:34.000Z | 2019-05-30T08:08:34.000Z | fatherClass.py | smithgoo/python3Learn | d0c066c10887db3942ca285b86ce464463998aad | [
"MIT"
] | null | null | null | fatherClass.py | smithgoo/python3Learn | d0c066c10887db3942ca285b86ce464463998aad | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
class Animal(object):
def run(self):
print 'Animal is running....'
pass
class Dog (Animal):
def run(self):
print 'Dog is Running ....'
pass
pass
class cat (Animal):
pass
dog = Dog();
dog.run()
flg = isinstance(dog,Dog)
print '%s',flg
def run_twice(animal):
animal.run()
animal.run()
pass
run_twice(Animal())
print type(123)
print type('str')
print type(None)
let = type(123) ==type('str')
print let
print dir('ABC')
print len('ABC')
print 'ABC'.lower()
class MyObject(object):
"""docstring for MyObject."""
def __init__(self):
self.x = 9
def power(self):
return self.x * self.x
pass
obj = MyObject()
print obj
print hasattr(obj,'x')
print obj.x
print setattr(obj,'y',9)
print hasattr(obj,'y')
# print getattr(obj,'z');
| 11.597403 | 37 | 0.582307 |
4a21a9f432e3e1bd83b55f60e9eb8f12cb8bf556 | 5,620 | py | Python | arxiv_html/settings.py | arXiv/arxiv-readability | 20dac4540aaf689b2ab8fdababf51e89e645f077 | [
"Apache-2.0",
"MIT"
] | 19 | 2019-01-02T16:39:10.000Z | 2022-02-11T12:50:27.000Z | arxiv_html/settings.py | cul-it/arxiv-readability | 20dac4540aaf689b2ab8fdababf51e89e645f077 | [
"Apache-2.0",
"MIT"
] | 2 | 2018-11-12T17:09:14.000Z | 2018-11-12T17:10:07.000Z | arxiv_html/settings.py | cul-it/arxiv-readability | 20dac4540aaf689b2ab8fdababf51e89e645f077 | [
"Apache-2.0",
"MIT"
] | 7 | 2019-01-10T22:02:01.000Z | 2020-12-06T16:28:22.000Z | """
For more information on this file, see
https://docs.djangoproject.com/en/1.11/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.11/ref/settings/
"""
import os
import environ
env = environ.Env()
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.9/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = env("SECRET_KEY")
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = env.bool('DEBUG', default=False)
ALLOWED_HOSTS = env.list('ALLOWED_HOSTS', default=['localhost', 'web'])
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.humanize',
'raven.contrib.django.raven_compat',
'rest_framework',
'arxiv_html.renders',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'arxiv_html.urls'
APPEND_SLASH = True
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'arxiv_html/templates')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'arxiv_html.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.9/ref/settings/#databases
DATABASES = {
'default': env.db('DATABASE_URL', default='psql://postgres@db:5432/postgres'),
}
# Password validation
# https://docs.djangoproject.com/en/1.9/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
AUTHENTICATION_BACKENDS = (
'django.contrib.auth.backends.ModelBackend',
)
LOGIN_REDIRECT_URL = '/'
# LOGIN_URL = '/login/'
# Internationalization
# https://docs.djangoproject.com/en/1.9/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.9/howto/static-files/
# Added this to make redirects and static files work in the case that this is
# not deployed at /, but the server is rewriting to /. I really wanted to not
# do this, and everything else seems fine if I don't, but here it is. -E
FORCE_SCRIPT_NAME = env('FORCE_SCRIPT_NAME', default=None)
STATIC_URL = '/static/'
if not DEBUG:
STATICFILES_STORAGE = 'whitenoise.django.GzipManifestStaticFilesStorage'
STATICFILES_DIRS = [
os.path.join(BASE_DIR, "arxiv_html/static"),
]
STATIC_ROOT = os.path.join(BASE_DIR, "arxiv_html/static_root")
# Uploaded files, including paper source and rendered articles
MEDIA_USE_S3 = env.bool('MEDIA_USE_S3', default=False)
if MEDIA_USE_S3:
DEFAULT_FILE_STORAGE = 'storages.backends.s3boto3.S3Boto3Storage'
AWS_ACCESS_KEY_ID = env('AWS_ACCESS_KEY_ID')
AWS_SECRET_ACCESS_KEY = env('AWS_SECRET_ACCESS_KEY')
AWS_STORAGE_BUCKET_NAME = env('AWS_STORAGE_BUCKET_NAME')
AWS_S3_REGION_NAME = env('AWS_S3_REGION_NAME')
MEDIA_URL = env('MEDIA_URL', default=f'https://{AWS_STORAGE_BUCKET_NAME}.s3.amazonaws.com/')
MEDIA_ROOT = None
else:
MEDIA_ROOT = os.path.join(BASE_DIR, "media")
MEDIA_URL = '/media/'
# Tests
TEST_RUNNER = 'arxiv_html.test_runner.LocalStorageDiscoverRunner'
# Log everything to the console, including tracebacks
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'root': {
'handlers': ['console', 'sentry'],
},
'handlers': {
'sentry': {
'level': 'ERROR',
'class': 'raven.contrib.django.raven_compat.handlers.SentryHandler',
'tags': {'custom-tag': 'x'},
},
'console': {
'level': os.getenv('DJANGO_LOG_LEVEL', 'INFO'),
'class': 'logging.StreamHandler',
},
},
}
# SSL
ENABLE_SSL = env.bool('ENABLE_SSL', default=False)
SESSION_COOKIE_SECURE = ENABLE_SSL
CSRF_COOKIE_SECURE = ENABLE_SSL
# SECURE_SSL_REDIRECT = ENABLE_SSL
if ENABLE_SSL:
SECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTO', 'https')
# Celery
CELERY_BROKER_URL = env('CELERY_BROKER_URL', default='')
CELERY_RESULT_BACKEND = env('CELERY_RESULT_BACKEND', default='')
# Engrafo
ENGRAFO_IMAGE = env('ENGRAFO_IMAGE', default='arxivvanity/engrafo')
ARXIV_SOURCE_URL_FORMAT = "https://arxiv.org/src/{source_id}"
| 28.969072 | 96 | 0.704448 |
4a21aaa9bcf7f814d085fc8bb7e0a9514be1fc75 | 1,658 | py | Python | tables.py | philkjacobs/superlatives | dbf0da8f9491c27694873ab7119d5cf782b64eb1 | [
"MIT"
] | null | null | null | tables.py | philkjacobs/superlatives | dbf0da8f9491c27694873ab7119d5cf782b64eb1 | [
"MIT"
] | 18 | 2017-09-07T03:11:46.000Z | 2018-02-17T18:50:34.000Z | tables.py | philkjacobs/superlatives | dbf0da8f9491c27694873ab7119d5cf782b64eb1 | [
"MIT"
] | 1 | 2017-10-15T10:34:32.000Z | 2017-10-15T10:34:32.000Z | import os
from asyncio import ensure_future
from aiopg.sa import create_engine
from sqlalchemy import (
Column,
Integer,
MetaData,
String,
Table,
)
from urllib import parse
# postgres is not a standard urllib.parse URL
parse.uses_netloc.append("postgres")
metadata = MetaData()
player_stats = Table(
'player_stats',
metadata,
Column('id', Integer, primary_key=True),
Column('open_ts', Integer),
Column('close_ts', Integer),
Column('state', String),
Column('game_id', String),
)
async def create_player_stats_table(conn):
return await conn.execute('''CREATE TABLE IF NOT EXISTS player_stats (
id serial PRIMARY KEY,
open_ts bigint DEFAULT NULL,
close_ts bigint DEFAULT NULL,
state varchar(255) DEFAULT NULL
)''')
async def add_game_id_player_stats(conn):
return await conn.execute('''ALTER TABLE player_stats ADD COLUMN IF NOT EXISTS game_id varchar(255) DEFAULT NULL;
''')
async def async_db_call(fn):
url = parse.urlparse(os.environ.get("DATABASE_URL", "postgres://localhost:5432/supers"))
engine_attrs = {
'database': url.path[1:],
'user': url.username,
'password': url.password,
'host': url.hostname,
'port': url.port,
}
async with create_engine(**engine_attrs) as engine:
async with engine.acquire() as conn:
return await fn(conn)
def setup_and_migrate_db(ioloop):
return all([
ioloop.run_until_complete(ensure_future(async_db_call(create_player_stats_table))),
ioloop.run_until_complete(ensure_future(async_db_call(add_game_id_player_stats))),
])
| 27.180328 | 117 | 0.683353 |
4a21ad754bfcacfcac9876c1293c359869191fb9 | 38,085 | py | Python | pybind/slxos/v16r_1_00b/qos/map_/__init__.py | shivharis/pybind | 4e1c6d54b9fd722ccec25546ba2413d79ce337e6 | [
"Apache-2.0"
] | null | null | null | pybind/slxos/v16r_1_00b/qos/map_/__init__.py | shivharis/pybind | 4e1c6d54b9fd722ccec25546ba2413d79ce337e6 | [
"Apache-2.0"
] | null | null | null | pybind/slxos/v16r_1_00b/qos/map_/__init__.py | shivharis/pybind | 4e1c6d54b9fd722ccec25546ba2413d79ce337e6 | [
"Apache-2.0"
] | 1 | 2021-11-05T22:15:42.000Z | 2021-11-05T22:15:42.000Z |
from operator import attrgetter
import pyangbind.lib.xpathhelper as xpathhelper
from pyangbind.lib.yangtypes import RestrictedPrecisionDecimalType, RestrictedClassType, TypedListType
from pyangbind.lib.yangtypes import YANGBool, YANGListType, YANGDynClass, ReferenceType
from pyangbind.lib.base import PybindBase
from decimal import Decimal
from bitarray import bitarray
import __builtin__
import cos_mutation
import cos_traffic_class
import traffic_class_cos
import dscp_mutation
import dscp_traffic_class
import dscp_cos
class map_(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module brocade-qos-mls - based on the path /qos/map. Each member element of
the container is represented as a class variable - with a specific
YANG type.
"""
__slots__ = ('_pybind_generated_by', '_path_helper', '_yang_name', '_rest_name', '_extmethods', '__cos_mutation','__cos_traffic_class','__traffic_class_cos','__dscp_mutation','__dscp_traffic_class','__dscp_cos',)
_yang_name = 'map'
_rest_name = 'map'
_pybind_generated_by = 'container'
def __init__(self, *args, **kwargs):
path_helper_ = kwargs.pop("path_helper", None)
if path_helper_ is False:
self._path_helper = False
elif path_helper_ is not None and isinstance(path_helper_, xpathhelper.YANGPathHelper):
self._path_helper = path_helper_
elif hasattr(self, "_parent"):
path_helper_ = getattr(self._parent, "_path_helper", False)
self._path_helper = path_helper_
else:
self._path_helper = False
extmethods = kwargs.pop("extmethods", None)
if extmethods is False:
self._extmethods = False
elif extmethods is not None and isinstance(extmethods, dict):
self._extmethods = extmethods
elif hasattr(self, "_parent"):
extmethods = getattr(self._parent, "_extmethods", None)
self._extmethods = extmethods
else:
self._extmethods = False
self.__cos_mutation = YANGDynClass(base=YANGListType("name",cos_mutation.cos_mutation, yang_name="cos-mutation", rest_name="cos-mutation", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='name', extensions={u'tailf-common': {u'info': u'Configure CoS-Mutation map', u'cli-no-key-completion': None, u'cli-full-no': None, u'cli-suppress-list-no': None, u'cli-suppress-key-abbreviation': None, u'cli-full-command': None, u'callpoint': u'cos_mutation', u'cli-mode-name': u'cos-mutation-$(name)'}}), is_container='list', yang_name="cos-mutation", rest_name="cos-mutation", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Configure CoS-Mutation map', u'cli-no-key-completion': None, u'cli-full-no': None, u'cli-suppress-list-no': None, u'cli-suppress-key-abbreviation': None, u'cli-full-command': None, u'callpoint': u'cos_mutation', u'cli-mode-name': u'cos-mutation-$(name)'}}, namespace='urn:brocade.com:mgmt:brocade-qos-mls', defining_module='brocade-qos-mls', yang_type='list', is_config=True)
self.__traffic_class_cos = YANGDynClass(base=YANGListType("traffic_class_cos_map_name",traffic_class_cos.traffic_class_cos, yang_name="traffic-class-cos", rest_name="traffic-class-cos", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='traffic-class-cos-map-name', extensions={u'tailf-common': {u'info': u'Configure Traffic-Class-to-CoS map', u'cli-no-key-completion': None, u'cli-full-no': None, u'cli-suppress-list-no': None, u'cli-suppress-key-abbreviation': None, u'cli-full-command': None, u'callpoint': u'traffic_class_cos', u'cli-mode-name': u'traffic-class-cos-$(traffic-class-cos-map-name)'}}), is_container='list', yang_name="traffic-class-cos", rest_name="traffic-class-cos", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Configure Traffic-Class-to-CoS map', u'cli-no-key-completion': None, u'cli-full-no': None, u'cli-suppress-list-no': None, u'cli-suppress-key-abbreviation': None, u'cli-full-command': None, u'callpoint': u'traffic_class_cos', u'cli-mode-name': u'traffic-class-cos-$(traffic-class-cos-map-name)'}}, namespace='urn:brocade.com:mgmt:brocade-qos-mls', defining_module='brocade-qos-mls', yang_type='list', is_config=True)
self.__dscp_mutation = YANGDynClass(base=YANGListType("dscp_mutation_map_name",dscp_mutation.dscp_mutation, yang_name="dscp-mutation", rest_name="dscp-mutation", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='dscp-mutation-map-name', extensions={u'tailf-common': {u'info': u'Configure DSCP-Mutation map', u'cli-no-key-completion': None, u'cli-suppress-list-no': None, u'cli-suppress-key-abbreviation': None, u'cli-full-command': None, u'callpoint': u'dscp_mutation', u'cli-mode-name': u'dscp-mutation-$(dscp-mutation-map-name)'}}), is_container='list', yang_name="dscp-mutation", rest_name="dscp-mutation", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Configure DSCP-Mutation map', u'cli-no-key-completion': None, u'cli-suppress-list-no': None, u'cli-suppress-key-abbreviation': None, u'cli-full-command': None, u'callpoint': u'dscp_mutation', u'cli-mode-name': u'dscp-mutation-$(dscp-mutation-map-name)'}}, namespace='urn:brocade.com:mgmt:brocade-qos-mls', defining_module='brocade-qos-mls', yang_type='list', is_config=True)
self.__dscp_traffic_class = YANGDynClass(base=YANGListType("dscp_traffic_class_map_name",dscp_traffic_class.dscp_traffic_class, yang_name="dscp-traffic-class", rest_name="dscp-traffic-class", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='dscp-traffic-class-map-name', extensions={u'tailf-common': {u'info': u'Configure DSCP-to-Traffic-Class map', u'cli-no-key-completion': None, u'cli-suppress-list-no': None, u'cli-suppress-key-abbreviation': None, u'cli-full-command': None, u'callpoint': u'dscp_traffic_class', u'cli-mode-name': u'dscp-traffic-class-$(dscp-traffic-class-map-name)'}}), is_container='list', yang_name="dscp-traffic-class", rest_name="dscp-traffic-class", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Configure DSCP-to-Traffic-Class map', u'cli-no-key-completion': None, u'cli-suppress-list-no': None, u'cli-suppress-key-abbreviation': None, u'cli-full-command': None, u'callpoint': u'dscp_traffic_class', u'cli-mode-name': u'dscp-traffic-class-$(dscp-traffic-class-map-name)'}}, namespace='urn:brocade.com:mgmt:brocade-qos-mls', defining_module='brocade-qos-mls', yang_type='list', is_config=True)
self.__cos_traffic_class = YANGDynClass(base=YANGListType("name",cos_traffic_class.cos_traffic_class, yang_name="cos-traffic-class", rest_name="cos-traffic-class", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='name', extensions={u'tailf-common': {u'info': u'Configure CoS-to-Traffic-Class map', u'cli-no-key-completion': None, u'cli-full-no': None, u'cli-suppress-list-no': None, u'cli-suppress-key-abbreviation': None, u'cli-full-command': None, u'callpoint': u'cos_traffic_class', u'cli-mode-name': u'cos-traffic-class-$(name)'}}), is_container='list', yang_name="cos-traffic-class", rest_name="cos-traffic-class", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Configure CoS-to-Traffic-Class map', u'cli-no-key-completion': None, u'cli-full-no': None, u'cli-suppress-list-no': None, u'cli-suppress-key-abbreviation': None, u'cli-full-command': None, u'callpoint': u'cos_traffic_class', u'cli-mode-name': u'cos-traffic-class-$(name)'}}, namespace='urn:brocade.com:mgmt:brocade-qos-mls', defining_module='brocade-qos-mls', yang_type='list', is_config=True)
self.__dscp_cos = YANGDynClass(base=YANGListType("dscp_cos_map_name",dscp_cos.dscp_cos, yang_name="dscp-cos", rest_name="dscp-cos", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='dscp-cos-map-name', extensions={u'tailf-common': {u'info': u'Configure DSCP-to-CoS map', u'cli-no-key-completion': None, u'cli-suppress-list-no': None, u'cli-suppress-key-abbreviation': None, u'cli-full-command': None, u'callpoint': u'dscp_cos', u'cli-mode-name': u'dscp-cos-$(dscp-cos-map-name)'}}), is_container='list', yang_name="dscp-cos", rest_name="dscp-cos", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Configure DSCP-to-CoS map', u'cli-no-key-completion': None, u'cli-suppress-list-no': None, u'cli-suppress-key-abbreviation': None, u'cli-full-command': None, u'callpoint': u'dscp_cos', u'cli-mode-name': u'dscp-cos-$(dscp-cos-map-name)'}}, namespace='urn:brocade.com:mgmt:brocade-qos-mls', defining_module='brocade-qos-mls', yang_type='list', is_config=True)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path()+[self._yang_name]
else:
return [u'qos', u'map']
def _rest_path(self):
if hasattr(self, "_parent"):
if self._rest_name:
return self._parent._rest_path()+[self._rest_name]
else:
return self._parent._rest_path()
else:
return [u'qos', u'map']
def _get_cos_mutation(self):
"""
Getter method for cos_mutation, mapped from YANG variable /qos/map/cos_mutation (list)
"""
return self.__cos_mutation
def _set_cos_mutation(self, v, load=False):
"""
Setter method for cos_mutation, mapped from YANG variable /qos/map/cos_mutation (list)
If this variable is read-only (config: false) in the
source YANG file, then _set_cos_mutation is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_cos_mutation() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=YANGListType("name",cos_mutation.cos_mutation, yang_name="cos-mutation", rest_name="cos-mutation", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='name', extensions={u'tailf-common': {u'info': u'Configure CoS-Mutation map', u'cli-no-key-completion': None, u'cli-full-no': None, u'cli-suppress-list-no': None, u'cli-suppress-key-abbreviation': None, u'cli-full-command': None, u'callpoint': u'cos_mutation', u'cli-mode-name': u'cos-mutation-$(name)'}}), is_container='list', yang_name="cos-mutation", rest_name="cos-mutation", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Configure CoS-Mutation map', u'cli-no-key-completion': None, u'cli-full-no': None, u'cli-suppress-list-no': None, u'cli-suppress-key-abbreviation': None, u'cli-full-command': None, u'callpoint': u'cos_mutation', u'cli-mode-name': u'cos-mutation-$(name)'}}, namespace='urn:brocade.com:mgmt:brocade-qos-mls', defining_module='brocade-qos-mls', yang_type='list', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """cos_mutation must be of a type compatible with list""",
'defined-type': "list",
'generated-type': """YANGDynClass(base=YANGListType("name",cos_mutation.cos_mutation, yang_name="cos-mutation", rest_name="cos-mutation", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='name', extensions={u'tailf-common': {u'info': u'Configure CoS-Mutation map', u'cli-no-key-completion': None, u'cli-full-no': None, u'cli-suppress-list-no': None, u'cli-suppress-key-abbreviation': None, u'cli-full-command': None, u'callpoint': u'cos_mutation', u'cli-mode-name': u'cos-mutation-$(name)'}}), is_container='list', yang_name="cos-mutation", rest_name="cos-mutation", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Configure CoS-Mutation map', u'cli-no-key-completion': None, u'cli-full-no': None, u'cli-suppress-list-no': None, u'cli-suppress-key-abbreviation': None, u'cli-full-command': None, u'callpoint': u'cos_mutation', u'cli-mode-name': u'cos-mutation-$(name)'}}, namespace='urn:brocade.com:mgmt:brocade-qos-mls', defining_module='brocade-qos-mls', yang_type='list', is_config=True)""",
})
self.__cos_mutation = t
if hasattr(self, '_set'):
self._set()
def _unset_cos_mutation(self):
self.__cos_mutation = YANGDynClass(base=YANGListType("name",cos_mutation.cos_mutation, yang_name="cos-mutation", rest_name="cos-mutation", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='name', extensions={u'tailf-common': {u'info': u'Configure CoS-Mutation map', u'cli-no-key-completion': None, u'cli-full-no': None, u'cli-suppress-list-no': None, u'cli-suppress-key-abbreviation': None, u'cli-full-command': None, u'callpoint': u'cos_mutation', u'cli-mode-name': u'cos-mutation-$(name)'}}), is_container='list', yang_name="cos-mutation", rest_name="cos-mutation", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Configure CoS-Mutation map', u'cli-no-key-completion': None, u'cli-full-no': None, u'cli-suppress-list-no': None, u'cli-suppress-key-abbreviation': None, u'cli-full-command': None, u'callpoint': u'cos_mutation', u'cli-mode-name': u'cos-mutation-$(name)'}}, namespace='urn:brocade.com:mgmt:brocade-qos-mls', defining_module='brocade-qos-mls', yang_type='list', is_config=True)
def _get_cos_traffic_class(self):
"""
Getter method for cos_traffic_class, mapped from YANG variable /qos/map/cos_traffic_class (list)
"""
return self.__cos_traffic_class
def _set_cos_traffic_class(self, v, load=False):
"""
Setter method for cos_traffic_class, mapped from YANG variable /qos/map/cos_traffic_class (list)
If this variable is read-only (config: false) in the
source YANG file, then _set_cos_traffic_class is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_cos_traffic_class() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=YANGListType("name",cos_traffic_class.cos_traffic_class, yang_name="cos-traffic-class", rest_name="cos-traffic-class", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='name', extensions={u'tailf-common': {u'info': u'Configure CoS-to-Traffic-Class map', u'cli-no-key-completion': None, u'cli-full-no': None, u'cli-suppress-list-no': None, u'cli-suppress-key-abbreviation': None, u'cli-full-command': None, u'callpoint': u'cos_traffic_class', u'cli-mode-name': u'cos-traffic-class-$(name)'}}), is_container='list', yang_name="cos-traffic-class", rest_name="cos-traffic-class", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Configure CoS-to-Traffic-Class map', u'cli-no-key-completion': None, u'cli-full-no': None, u'cli-suppress-list-no': None, u'cli-suppress-key-abbreviation': None, u'cli-full-command': None, u'callpoint': u'cos_traffic_class', u'cli-mode-name': u'cos-traffic-class-$(name)'}}, namespace='urn:brocade.com:mgmt:brocade-qos-mls', defining_module='brocade-qos-mls', yang_type='list', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """cos_traffic_class must be of a type compatible with list""",
'defined-type': "list",
'generated-type': """YANGDynClass(base=YANGListType("name",cos_traffic_class.cos_traffic_class, yang_name="cos-traffic-class", rest_name="cos-traffic-class", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='name', extensions={u'tailf-common': {u'info': u'Configure CoS-to-Traffic-Class map', u'cli-no-key-completion': None, u'cli-full-no': None, u'cli-suppress-list-no': None, u'cli-suppress-key-abbreviation': None, u'cli-full-command': None, u'callpoint': u'cos_traffic_class', u'cli-mode-name': u'cos-traffic-class-$(name)'}}), is_container='list', yang_name="cos-traffic-class", rest_name="cos-traffic-class", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Configure CoS-to-Traffic-Class map', u'cli-no-key-completion': None, u'cli-full-no': None, u'cli-suppress-list-no': None, u'cli-suppress-key-abbreviation': None, u'cli-full-command': None, u'callpoint': u'cos_traffic_class', u'cli-mode-name': u'cos-traffic-class-$(name)'}}, namespace='urn:brocade.com:mgmt:brocade-qos-mls', defining_module='brocade-qos-mls', yang_type='list', is_config=True)""",
})
self.__cos_traffic_class = t
if hasattr(self, '_set'):
self._set()
def _unset_cos_traffic_class(self):
self.__cos_traffic_class = YANGDynClass(base=YANGListType("name",cos_traffic_class.cos_traffic_class, yang_name="cos-traffic-class", rest_name="cos-traffic-class", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='name', extensions={u'tailf-common': {u'info': u'Configure CoS-to-Traffic-Class map', u'cli-no-key-completion': None, u'cli-full-no': None, u'cli-suppress-list-no': None, u'cli-suppress-key-abbreviation': None, u'cli-full-command': None, u'callpoint': u'cos_traffic_class', u'cli-mode-name': u'cos-traffic-class-$(name)'}}), is_container='list', yang_name="cos-traffic-class", rest_name="cos-traffic-class", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Configure CoS-to-Traffic-Class map', u'cli-no-key-completion': None, u'cli-full-no': None, u'cli-suppress-list-no': None, u'cli-suppress-key-abbreviation': None, u'cli-full-command': None, u'callpoint': u'cos_traffic_class', u'cli-mode-name': u'cos-traffic-class-$(name)'}}, namespace='urn:brocade.com:mgmt:brocade-qos-mls', defining_module='brocade-qos-mls', yang_type='list', is_config=True)
def _get_traffic_class_cos(self):
"""
Getter method for traffic_class_cos, mapped from YANG variable /qos/map/traffic_class_cos (list)
"""
return self.__traffic_class_cos
def _set_traffic_class_cos(self, v, load=False):
"""
Setter method for traffic_class_cos, mapped from YANG variable /qos/map/traffic_class_cos (list)
If this variable is read-only (config: false) in the
source YANG file, then _set_traffic_class_cos is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_traffic_class_cos() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=YANGListType("traffic_class_cos_map_name",traffic_class_cos.traffic_class_cos, yang_name="traffic-class-cos", rest_name="traffic-class-cos", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='traffic-class-cos-map-name', extensions={u'tailf-common': {u'info': u'Configure Traffic-Class-to-CoS map', u'cli-no-key-completion': None, u'cli-full-no': None, u'cli-suppress-list-no': None, u'cli-suppress-key-abbreviation': None, u'cli-full-command': None, u'callpoint': u'traffic_class_cos', u'cli-mode-name': u'traffic-class-cos-$(traffic-class-cos-map-name)'}}), is_container='list', yang_name="traffic-class-cos", rest_name="traffic-class-cos", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Configure Traffic-Class-to-CoS map', u'cli-no-key-completion': None, u'cli-full-no': None, u'cli-suppress-list-no': None, u'cli-suppress-key-abbreviation': None, u'cli-full-command': None, u'callpoint': u'traffic_class_cos', u'cli-mode-name': u'traffic-class-cos-$(traffic-class-cos-map-name)'}}, namespace='urn:brocade.com:mgmt:brocade-qos-mls', defining_module='brocade-qos-mls', yang_type='list', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """traffic_class_cos must be of a type compatible with list""",
'defined-type': "list",
'generated-type': """YANGDynClass(base=YANGListType("traffic_class_cos_map_name",traffic_class_cos.traffic_class_cos, yang_name="traffic-class-cos", rest_name="traffic-class-cos", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='traffic-class-cos-map-name', extensions={u'tailf-common': {u'info': u'Configure Traffic-Class-to-CoS map', u'cli-no-key-completion': None, u'cli-full-no': None, u'cli-suppress-list-no': None, u'cli-suppress-key-abbreviation': None, u'cli-full-command': None, u'callpoint': u'traffic_class_cos', u'cli-mode-name': u'traffic-class-cos-$(traffic-class-cos-map-name)'}}), is_container='list', yang_name="traffic-class-cos", rest_name="traffic-class-cos", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Configure Traffic-Class-to-CoS map', u'cli-no-key-completion': None, u'cli-full-no': None, u'cli-suppress-list-no': None, u'cli-suppress-key-abbreviation': None, u'cli-full-command': None, u'callpoint': u'traffic_class_cos', u'cli-mode-name': u'traffic-class-cos-$(traffic-class-cos-map-name)'}}, namespace='urn:brocade.com:mgmt:brocade-qos-mls', defining_module='brocade-qos-mls', yang_type='list', is_config=True)""",
})
self.__traffic_class_cos = t
if hasattr(self, '_set'):
self._set()
def _unset_traffic_class_cos(self):
self.__traffic_class_cos = YANGDynClass(base=YANGListType("traffic_class_cos_map_name",traffic_class_cos.traffic_class_cos, yang_name="traffic-class-cos", rest_name="traffic-class-cos", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='traffic-class-cos-map-name', extensions={u'tailf-common': {u'info': u'Configure Traffic-Class-to-CoS map', u'cli-no-key-completion': None, u'cli-full-no': None, u'cli-suppress-list-no': None, u'cli-suppress-key-abbreviation': None, u'cli-full-command': None, u'callpoint': u'traffic_class_cos', u'cli-mode-name': u'traffic-class-cos-$(traffic-class-cos-map-name)'}}), is_container='list', yang_name="traffic-class-cos", rest_name="traffic-class-cos", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Configure Traffic-Class-to-CoS map', u'cli-no-key-completion': None, u'cli-full-no': None, u'cli-suppress-list-no': None, u'cli-suppress-key-abbreviation': None, u'cli-full-command': None, u'callpoint': u'traffic_class_cos', u'cli-mode-name': u'traffic-class-cos-$(traffic-class-cos-map-name)'}}, namespace='urn:brocade.com:mgmt:brocade-qos-mls', defining_module='brocade-qos-mls', yang_type='list', is_config=True)
def _get_dscp_mutation(self):
"""
Getter method for dscp_mutation, mapped from YANG variable /qos/map/dscp_mutation (list)
"""
return self.__dscp_mutation
def _set_dscp_mutation(self, v, load=False):
"""
Setter method for dscp_mutation, mapped from YANG variable /qos/map/dscp_mutation (list)
If this variable is read-only (config: false) in the
source YANG file, then _set_dscp_mutation is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_dscp_mutation() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=YANGListType("dscp_mutation_map_name",dscp_mutation.dscp_mutation, yang_name="dscp-mutation", rest_name="dscp-mutation", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='dscp-mutation-map-name', extensions={u'tailf-common': {u'info': u'Configure DSCP-Mutation map', u'cli-no-key-completion': None, u'cli-suppress-list-no': None, u'cli-suppress-key-abbreviation': None, u'cli-full-command': None, u'callpoint': u'dscp_mutation', u'cli-mode-name': u'dscp-mutation-$(dscp-mutation-map-name)'}}), is_container='list', yang_name="dscp-mutation", rest_name="dscp-mutation", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Configure DSCP-Mutation map', u'cli-no-key-completion': None, u'cli-suppress-list-no': None, u'cli-suppress-key-abbreviation': None, u'cli-full-command': None, u'callpoint': u'dscp_mutation', u'cli-mode-name': u'dscp-mutation-$(dscp-mutation-map-name)'}}, namespace='urn:brocade.com:mgmt:brocade-qos-mls', defining_module='brocade-qos-mls', yang_type='list', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """dscp_mutation must be of a type compatible with list""",
'defined-type': "list",
'generated-type': """YANGDynClass(base=YANGListType("dscp_mutation_map_name",dscp_mutation.dscp_mutation, yang_name="dscp-mutation", rest_name="dscp-mutation", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='dscp-mutation-map-name', extensions={u'tailf-common': {u'info': u'Configure DSCP-Mutation map', u'cli-no-key-completion': None, u'cli-suppress-list-no': None, u'cli-suppress-key-abbreviation': None, u'cli-full-command': None, u'callpoint': u'dscp_mutation', u'cli-mode-name': u'dscp-mutation-$(dscp-mutation-map-name)'}}), is_container='list', yang_name="dscp-mutation", rest_name="dscp-mutation", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Configure DSCP-Mutation map', u'cli-no-key-completion': None, u'cli-suppress-list-no': None, u'cli-suppress-key-abbreviation': None, u'cli-full-command': None, u'callpoint': u'dscp_mutation', u'cli-mode-name': u'dscp-mutation-$(dscp-mutation-map-name)'}}, namespace='urn:brocade.com:mgmt:brocade-qos-mls', defining_module='brocade-qos-mls', yang_type='list', is_config=True)""",
})
self.__dscp_mutation = t
if hasattr(self, '_set'):
self._set()
def _unset_dscp_mutation(self):
self.__dscp_mutation = YANGDynClass(base=YANGListType("dscp_mutation_map_name",dscp_mutation.dscp_mutation, yang_name="dscp-mutation", rest_name="dscp-mutation", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='dscp-mutation-map-name', extensions={u'tailf-common': {u'info': u'Configure DSCP-Mutation map', u'cli-no-key-completion': None, u'cli-suppress-list-no': None, u'cli-suppress-key-abbreviation': None, u'cli-full-command': None, u'callpoint': u'dscp_mutation', u'cli-mode-name': u'dscp-mutation-$(dscp-mutation-map-name)'}}), is_container='list', yang_name="dscp-mutation", rest_name="dscp-mutation", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Configure DSCP-Mutation map', u'cli-no-key-completion': None, u'cli-suppress-list-no': None, u'cli-suppress-key-abbreviation': None, u'cli-full-command': None, u'callpoint': u'dscp_mutation', u'cli-mode-name': u'dscp-mutation-$(dscp-mutation-map-name)'}}, namespace='urn:brocade.com:mgmt:brocade-qos-mls', defining_module='brocade-qos-mls', yang_type='list', is_config=True)
def _get_dscp_traffic_class(self):
"""
Getter method for dscp_traffic_class, mapped from YANG variable /qos/map/dscp_traffic_class (list)
"""
return self.__dscp_traffic_class
def _set_dscp_traffic_class(self, v, load=False):
"""
Setter method for dscp_traffic_class, mapped from YANG variable /qos/map/dscp_traffic_class (list)
If this variable is read-only (config: false) in the
source YANG file, then _set_dscp_traffic_class is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_dscp_traffic_class() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=YANGListType("dscp_traffic_class_map_name",dscp_traffic_class.dscp_traffic_class, yang_name="dscp-traffic-class", rest_name="dscp-traffic-class", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='dscp-traffic-class-map-name', extensions={u'tailf-common': {u'info': u'Configure DSCP-to-Traffic-Class map', u'cli-no-key-completion': None, u'cli-suppress-list-no': None, u'cli-suppress-key-abbreviation': None, u'cli-full-command': None, u'callpoint': u'dscp_traffic_class', u'cli-mode-name': u'dscp-traffic-class-$(dscp-traffic-class-map-name)'}}), is_container='list', yang_name="dscp-traffic-class", rest_name="dscp-traffic-class", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Configure DSCP-to-Traffic-Class map', u'cli-no-key-completion': None, u'cli-suppress-list-no': None, u'cli-suppress-key-abbreviation': None, u'cli-full-command': None, u'callpoint': u'dscp_traffic_class', u'cli-mode-name': u'dscp-traffic-class-$(dscp-traffic-class-map-name)'}}, namespace='urn:brocade.com:mgmt:brocade-qos-mls', defining_module='brocade-qos-mls', yang_type='list', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """dscp_traffic_class must be of a type compatible with list""",
'defined-type': "list",
'generated-type': """YANGDynClass(base=YANGListType("dscp_traffic_class_map_name",dscp_traffic_class.dscp_traffic_class, yang_name="dscp-traffic-class", rest_name="dscp-traffic-class", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='dscp-traffic-class-map-name', extensions={u'tailf-common': {u'info': u'Configure DSCP-to-Traffic-Class map', u'cli-no-key-completion': None, u'cli-suppress-list-no': None, u'cli-suppress-key-abbreviation': None, u'cli-full-command': None, u'callpoint': u'dscp_traffic_class', u'cli-mode-name': u'dscp-traffic-class-$(dscp-traffic-class-map-name)'}}), is_container='list', yang_name="dscp-traffic-class", rest_name="dscp-traffic-class", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Configure DSCP-to-Traffic-Class map', u'cli-no-key-completion': None, u'cli-suppress-list-no': None, u'cli-suppress-key-abbreviation': None, u'cli-full-command': None, u'callpoint': u'dscp_traffic_class', u'cli-mode-name': u'dscp-traffic-class-$(dscp-traffic-class-map-name)'}}, namespace='urn:brocade.com:mgmt:brocade-qos-mls', defining_module='brocade-qos-mls', yang_type='list', is_config=True)""",
})
self.__dscp_traffic_class = t
if hasattr(self, '_set'):
self._set()
def _unset_dscp_traffic_class(self):
self.__dscp_traffic_class = YANGDynClass(base=YANGListType("dscp_traffic_class_map_name",dscp_traffic_class.dscp_traffic_class, yang_name="dscp-traffic-class", rest_name="dscp-traffic-class", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='dscp-traffic-class-map-name', extensions={u'tailf-common': {u'info': u'Configure DSCP-to-Traffic-Class map', u'cli-no-key-completion': None, u'cli-suppress-list-no': None, u'cli-suppress-key-abbreviation': None, u'cli-full-command': None, u'callpoint': u'dscp_traffic_class', u'cli-mode-name': u'dscp-traffic-class-$(dscp-traffic-class-map-name)'}}), is_container='list', yang_name="dscp-traffic-class", rest_name="dscp-traffic-class", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Configure DSCP-to-Traffic-Class map', u'cli-no-key-completion': None, u'cli-suppress-list-no': None, u'cli-suppress-key-abbreviation': None, u'cli-full-command': None, u'callpoint': u'dscp_traffic_class', u'cli-mode-name': u'dscp-traffic-class-$(dscp-traffic-class-map-name)'}}, namespace='urn:brocade.com:mgmt:brocade-qos-mls', defining_module='brocade-qos-mls', yang_type='list', is_config=True)
def _get_dscp_cos(self):
"""
Getter method for dscp_cos, mapped from YANG variable /qos/map/dscp_cos (list)
"""
return self.__dscp_cos
def _set_dscp_cos(self, v, load=False):
"""
Setter method for dscp_cos, mapped from YANG variable /qos/map/dscp_cos (list)
If this variable is read-only (config: false) in the
source YANG file, then _set_dscp_cos is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_dscp_cos() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=YANGListType("dscp_cos_map_name",dscp_cos.dscp_cos, yang_name="dscp-cos", rest_name="dscp-cos", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='dscp-cos-map-name', extensions={u'tailf-common': {u'info': u'Configure DSCP-to-CoS map', u'cli-no-key-completion': None, u'cli-suppress-list-no': None, u'cli-suppress-key-abbreviation': None, u'cli-full-command': None, u'callpoint': u'dscp_cos', u'cli-mode-name': u'dscp-cos-$(dscp-cos-map-name)'}}), is_container='list', yang_name="dscp-cos", rest_name="dscp-cos", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Configure DSCP-to-CoS map', u'cli-no-key-completion': None, u'cli-suppress-list-no': None, u'cli-suppress-key-abbreviation': None, u'cli-full-command': None, u'callpoint': u'dscp_cos', u'cli-mode-name': u'dscp-cos-$(dscp-cos-map-name)'}}, namespace='urn:brocade.com:mgmt:brocade-qos-mls', defining_module='brocade-qos-mls', yang_type='list', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """dscp_cos must be of a type compatible with list""",
'defined-type': "list",
'generated-type': """YANGDynClass(base=YANGListType("dscp_cos_map_name",dscp_cos.dscp_cos, yang_name="dscp-cos", rest_name="dscp-cos", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='dscp-cos-map-name', extensions={u'tailf-common': {u'info': u'Configure DSCP-to-CoS map', u'cli-no-key-completion': None, u'cli-suppress-list-no': None, u'cli-suppress-key-abbreviation': None, u'cli-full-command': None, u'callpoint': u'dscp_cos', u'cli-mode-name': u'dscp-cos-$(dscp-cos-map-name)'}}), is_container='list', yang_name="dscp-cos", rest_name="dscp-cos", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Configure DSCP-to-CoS map', u'cli-no-key-completion': None, u'cli-suppress-list-no': None, u'cli-suppress-key-abbreviation': None, u'cli-full-command': None, u'callpoint': u'dscp_cos', u'cli-mode-name': u'dscp-cos-$(dscp-cos-map-name)'}}, namespace='urn:brocade.com:mgmt:brocade-qos-mls', defining_module='brocade-qos-mls', yang_type='list', is_config=True)""",
})
self.__dscp_cos = t
if hasattr(self, '_set'):
self._set()
def _unset_dscp_cos(self):
self.__dscp_cos = YANGDynClass(base=YANGListType("dscp_cos_map_name",dscp_cos.dscp_cos, yang_name="dscp-cos", rest_name="dscp-cos", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='dscp-cos-map-name', extensions={u'tailf-common': {u'info': u'Configure DSCP-to-CoS map', u'cli-no-key-completion': None, u'cli-suppress-list-no': None, u'cli-suppress-key-abbreviation': None, u'cli-full-command': None, u'callpoint': u'dscp_cos', u'cli-mode-name': u'dscp-cos-$(dscp-cos-map-name)'}}), is_container='list', yang_name="dscp-cos", rest_name="dscp-cos", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Configure DSCP-to-CoS map', u'cli-no-key-completion': None, u'cli-suppress-list-no': None, u'cli-suppress-key-abbreviation': None, u'cli-full-command': None, u'callpoint': u'dscp_cos', u'cli-mode-name': u'dscp-cos-$(dscp-cos-map-name)'}}, namespace='urn:brocade.com:mgmt:brocade-qos-mls', defining_module='brocade-qos-mls', yang_type='list', is_config=True)
cos_mutation = __builtin__.property(_get_cos_mutation, _set_cos_mutation)
cos_traffic_class = __builtin__.property(_get_cos_traffic_class, _set_cos_traffic_class)
traffic_class_cos = __builtin__.property(_get_traffic_class_cos, _set_traffic_class_cos)
dscp_mutation = __builtin__.property(_get_dscp_mutation, _set_dscp_mutation)
dscp_traffic_class = __builtin__.property(_get_dscp_traffic_class, _set_dscp_traffic_class)
dscp_cos = __builtin__.property(_get_dscp_cos, _set_dscp_cos)
_pyangbind_elements = {'cos_mutation': cos_mutation, 'cos_traffic_class': cos_traffic_class, 'traffic_class_cos': traffic_class_cos, 'dscp_mutation': dscp_mutation, 'dscp_traffic_class': dscp_traffic_class, 'dscp_cos': dscp_cos, }
| 125.279605 | 1,291 | 0.740029 |
4a21ae49f139eb307c2d6c91811d2a4990f89546 | 55,946 | py | Python | idaes/power_generation/unit_models/boiler_heat_exchanger.py | carldlaird/idaes-pse | cc7a32ca9fa788f483fa8ef85f3d1186ef4a596f | [
"RSA-MD"
] | 112 | 2019-02-11T23:16:36.000Z | 2022-03-23T20:59:57.000Z | idaes/power_generation/unit_models/boiler_heat_exchanger.py | carldlaird/idaes-pse | cc7a32ca9fa788f483fa8ef85f3d1186ef4a596f | [
"RSA-MD"
] | 621 | 2019-03-01T14:44:12.000Z | 2022-03-31T19:49:25.000Z | idaes/power_generation/unit_models/boiler_heat_exchanger.py | carldlaird/idaes-pse | cc7a32ca9fa788f483fa8ef85f3d1186ef4a596f | [
"RSA-MD"
] | 154 | 2019-02-01T23:46:33.000Z | 2022-03-23T15:07:10.000Z | #################################################################################
# The Institute for the Design of Advanced Energy Systems Integrated Platform
# Framework (IDAES IP) was produced under the DOE Institute for the
# Design of Advanced Energy Systems (IDAES), and is copyright (c) 2018-2021
# by the software owners: The Regents of the University of California, through
# Lawrence Berkeley National Laboratory, National Technology & Engineering
# Solutions of Sandia, LLC, Carnegie Mellon University, West Virginia University
# Research Corporation, et al. All rights reserved.
#
# Please see the files COPYRIGHT.md and LICENSE.md for full copyright and
# license information.
#################################################################################
"""
Power Plant IDAES heat exchanger model.
The boiler heat exchanger model consist of a cross flow shell and tube hx
that can be used for any of the boiler components, such as economizer,
reheater or superheaters (primary, secondary, etc).
The model includes shell and tube rigorous heat transfer calculations and
pressure drop calculations for shell side. Note that this model assumes no
phase transitions (if user requires phase transitions, they need a general
model)
The main config arguments:
- delta T method: counter-current or co-current
- tube_arrangement: in-line or staggered
- has radiation: True if model is used as a reheater or superheater unit
Gas emissivity calculated (Gas temperature above 700 K)
General assumtpions:
- SI units (consistent with prop pack)
- heat transfer calc U = f(Nu, Re, Pr)
- Pressure drop tube and shell side (friction factor calc.)
"""
# Import Python libraries
import logging
from enum import Enum
# Import Pyomo libraries
from pyomo.common.config import ConfigBlock, ConfigValue, In, Bool
# Additional import for the unit operation
from pyomo.environ import value, Var, Param, exp, sqrt,\
log, PositiveReals, NonNegativeReals, units as pyunits
# Import IDAES cores
from idaes.core import (ControlVolume0DBlock,
declare_process_block_class,
MaterialBalanceType,
EnergyBalanceType,
MomentumBalanceType,
UnitModelBlockData,
useDefault)
from idaes.core.util.config import is_physical_parameter_block, DefaultBool
from idaes.core.util.misc import add_object_reference
from idaes.core.util.constants import Constants as c
from idaes.core.util import get_solver
import idaes.logger as idaeslog
__author__ = "Boiler subsystem team (J Ma, M Zamarripa)"
__version__ = "1.0.0"
# Set up logger
_log = logging.getLogger(__name__)
class TubeArrangement(Enum):
inLine = 0
staggered = 1
class DeltaTMethod(Enum):
counterCurrent = 0
coCurrent = 1
@declare_process_block_class("BoilerHeatExchanger")
class BoilerHeatExchangerData(UnitModelBlockData):
"""
Standard Heat Exchanger Unit Model Class
"""
CONFIG = ConfigBlock()
CONFIG.declare("dynamic", ConfigValue(
domain=DefaultBool,
default=useDefault,
description="Dynamic model flag",
doc="""Indicates whether this model will be dynamic or not,
**default** = useDefault.
**Valid values:** {
**useDefault** - get flag from parent (default = False),
**True** - set as a dynamic model,
**False** - set as a steady-state model.}"""))
CONFIG.declare("has_holdup", ConfigValue(
default=useDefault,
domain=DefaultBool,
description="Holdup construction flag",
doc="""Indicates whether holdup terms should be constructed or not.
Must be True if dynamic = True,
**default** - False.
**Valid values:** {
**True** - construct holdup terms,
**False** - do not construct holdup terms}"""))
CONFIG.declare("side_1_property_package", ConfigValue(
default=useDefault,
domain=is_physical_parameter_block,
description="Property package to use for control volume",
doc="""Property parameter object used to define property calculations,
**default** - useDefault.
**Valid values:** {
**useDefault** - use default package from parent model or flowsheet,
**PhysicalParameterObject** - a PhysicalParameterBlock object.}"""))
CONFIG.declare("side_1_property_package_args", ConfigBlock(
implicit=True,
description="Arguments to use for constructing property packages",
doc="""A ConfigBlock with arguments to be passed to a property block(s)
and used when constructing these,
**default** - None.
**Valid values:** {
see property package for documentation.}"""))
CONFIG.declare("side_2_property_package", ConfigValue(
default=useDefault,
domain=is_physical_parameter_block,
description="Property package to use for control volume",
doc="""Property parameter object used to define property calculations,
**default** - useDefault.
**Valid values:** {
**useDefault** - use default package from parent model or flowsheet,
**PhysicalParameterObject** - a PhysicalParameterBlock object.}"""))
CONFIG.declare("side_2_property_package_args", ConfigBlock(
implicit=True,
description="Arguments to use for constructing property packages",
doc="""A ConfigBlock with arguments to be passed to a property block(s)
and used when constructing these,
**default** - None.
**Valid values:** {
see property package for documentation.}"""))
CONFIG.declare("material_balance_type", ConfigValue(
default=MaterialBalanceType.useDefault,
domain=In(MaterialBalanceType),
description="Material balance construction flag",
doc="""Indicates what type of material balance should be constructed,
**default** - MaterialBalanceType.componentPhase.
**Valid values:** {
**MaterialBalanceType.none** - exclude material balances,
**MaterialBalanceType.componentPhase** - use phase component balances,
**MaterialBalanceType.componentTotal** - use total component balances,
**MaterialBalanceType.elementTotal** - use total element balances,
**MaterialBalanceType.total** - use total material balance.}"""))
CONFIG.declare("energy_balance_type", ConfigValue(
default=EnergyBalanceType.useDefault,
domain=In(EnergyBalanceType),
description="Energy balance construction flag",
doc="""Indicates what type of energy balance should be constructed,
**default** - EnergyBalanceType.enthalpyTotal.
**Valid values:** {
**EnergyBalanceType.none** - exclude energy balances,
**EnergyBalanceType.enthalpyTotal** - single ethalpy balance for material,
**EnergyBalanceType.enthalpyPhase** - ethalpy balances for each phase,
**EnergyBalanceType.energyTotal** - single energy balance for material,
**EnergyBalanceType.energyPhase** - energy balances for each phase.}"""))
CONFIG.declare("momentum_balance_type", ConfigValue(
default=MomentumBalanceType.pressureTotal,
domain=In(MomentumBalanceType),
description="Momentum balance construction flag",
doc="""Indicates what type of momentum balance should be constructed,
**default** - MomentumBalanceType.pressureTotal.
**Valid values:** {
**MomentumBalanceType.none** - exclude momentum balances,
**MomentumBalanceType.pressureTotal** - single pressure balance for material,
**MomentumBalanceType.pressurePhase** - pressure balances for each phase,
**MomentumBalanceType.momentumTotal** - single momentum balance for material,
**MomentumBalanceType.momentumPhase** - momentum balances for each phase.}"""))
CONFIG.declare("has_pressure_change", ConfigValue(
default=False,
domain=Bool,
description="Pressure change term construction flag",
doc="""Indicates whether terms for pressure change should be
constructed,
**default** - False.
**Valid values:** {
**True** - include pressure change terms,
**False** - exclude pressure change terms.}"""))
CONFIG.declare("delta_T_method", ConfigValue(
default=DeltaTMethod.counterCurrent,
domain=In(DeltaTMethod),
description="Flow configuration in unit to compute delta T",
doc="""Flag indicating type of flow arrangement to use for delta
**default** - DeltaTMethod.counterCurrent
**Valid values:** {
**DeltaTMethod.counterCurrent**}"""))
CONFIG.declare("tube_arrangement", ConfigValue(
default=TubeArrangement.inLine,
domain=In(TubeArrangement),
description='tube configuration',
doc='Tube arrangement could be in-line and staggered'))
CONFIG.declare("side_1_water_phase", ConfigValue(
default='Liq',
domain=In(['Liq', 'Vap']),
description='side 1 water phase',
doc='Define water phase for property calls'))
CONFIG.declare("has_radiation", ConfigValue(
default=False,
domain=In([False, True]),
description='Has side 2 gas radiation',
doc='Define if side 2 gas radiation is to be considered'))
def build(self):
"""
Build method for Boiler heat exchanger model
Args:
None
Returns:
None
"""
# Call UnitModel.build to setup dynamics
super(BoilerHeatExchangerData, self).build()
# Build ControlVolume Block
self.side_1 = ControlVolume0DBlock(default={
"dynamic": self.config.dynamic,
"has_holdup": self.config.has_holdup,
"property_package": self.config.side_1_property_package,
"property_package_args": self.config.side_1_property_package_args})
self.side_2 = ControlVolume0DBlock(default={
"dynamic": self.config.dynamic,
"has_holdup": self.config.has_holdup,
"property_package": self.config.side_2_property_package,
"property_package_args": self.config.side_2_property_package_args})
# Add Geometry
self.side_1.add_geometry()
self.side_2.add_geometry()
# Add state block
self.side_1.add_state_blocks(has_phase_equilibrium=False)
# Add material balance
self.side_1.add_material_balances(
balance_type=self.config.material_balance_type)
# add energy balance
self.side_1.add_energy_balances(
balance_type=self.config.energy_balance_type,
has_heat_transfer=True)
# add momentum balance
self.side_1.add_momentum_balances(
balance_type=self.config.momentum_balance_type,
has_pressure_change=self.config.has_pressure_change)
# Add state block
self.side_2.add_state_blocks(has_phase_equilibrium=False)
# Add material balance
self.side_2.add_material_balances(
balance_type=self.config.material_balance_type)
# add energy balance
self.side_2.add_energy_balances(
balance_type=self.config.energy_balance_type,
has_heat_transfer=True)
# add momentum balance
self.side_2.add_momentum_balances(
balance_type=self.config.momentum_balance_type,
has_pressure_change=self.config.has_pressure_change)
# Set Unit Geometry and control volume
self._set_geometry()
self.side_1_fluid_phase = self.config.side_1_water_phase
# Construct performance equations
self._make_performance()
# Construct performance equations
if self.config.delta_T_method == DeltaTMethod.counterCurrent:
self._make_counter_current()
else:
self._make_co_current()
self.add_inlet_port(name="side_1_inlet", block=self.side_1)
self.add_inlet_port(name="side_2_inlet", block=self.side_2)
self.add_outlet_port(name="side_1_outlet", block=self.side_1)
self.add_outlet_port(name="side_2_outlet", block=self.side_2)
def _set_geometry(self):
"""
Define the geometry of the unit as necessary, and link to holdup volume
Args:
None
Returns:
None
"""
# Elevation difference (outlet - inlet) for static pressure calculation
self.delta_elevation = Var(
initialize=0,
within=NonNegativeReals,
doc='Elevation increase used for static pressure calculation - m',
units=pyunits.m)
# Number of tube columns in the cross section plane
# perpendicular to shell side fluid flow (y direction)
self.tube_ncol = Var(initialize=10.0,
within=PositiveReals,
doc='Number of tube columns')
# Number of tube rows in the direction of shell side
# fluid flow (x direction)
self.tube_nrow = Var(initialize=10.0,
within=PositiveReals,
doc='Number of tube rows')
# Number of inlet tube rows
self.nrow_inlet = Var(initialize=1,
within=PositiveReals,
doc='Number of inlet tube rows')
# Length of a tube in z direction for each path
self.tube_length = Var(initialize=5.0,
within=PositiveReals,
doc='Tube length - m',
units=pyunits.m)
# Inner diameter of tubes
self.tube_di = Var(initialize=0.05,
within=PositiveReals,
doc='Inner diameter of tube - m',
units=pyunits.m)
# Thickness of tube
self.tube_thickness = Var(initialize=0.005,
within=PositiveReals,
doc='Tube thickness - m',
units=pyunits.m)
# Pitch of tubes between two neighboring columns (in y direction).
# Always greater than tube outside diameter
self.pitch_y = Var(initialize=0.1,
within=PositiveReals,
doc='Pitch between two neighboring columns - m',
units=pyunits.m)
# Pitch of tubes between two neighboring rows (in x direction).
# Always greater than tube outside diameter
self.pitch_x = Var(initialize=0.1,
within=PositiveReals,
doc='Pitch between two neighboring rows - m',
units=pyunits.m)
# Tube outside diameter
@self.Expression(doc="Outside diameter of tube - m")
def do_tube(b):
return b.tube_di + b.tube_thickness * 2.0
if self.config.has_radiation is True:
# Mean beam length for radiation
@self.Expression(doc="Mean beam length - m")
def mbl(b):
return 3.6*(b.pitch_x*b.pitch_y/c.pi/b.do_tube - b.do_tube/4.0)
# Mean beam length for radiation divided by sqrt(2)
@self.Expression(doc="Mean beam length - m")
def mbl_div2(b):
return b.mbl/sqrt(2.0)
# Mean beam length for radiation multiplied by sqrt(2)
@self.Expression(doc="Mean beam length - m")
def mbl_mul2(b):
return b.mbl*sqrt(2.0)
# Number of 180 degree bends for the tube
@self.Expression(doc="Nbend_tube")
def nbend_tube(b):
return b.tube_nrow / b.nrow_inlet
# Total flow area on tube side
@self.Expression(doc="Total flow area on tube side - m2")
def area_flow_tube(b):
return 0.25 * c.pi * b.tube_di**2.0 * b.tube_ncol * b.nrow_inlet
# Total flow area on shell side
@self.Expression(doc="Total flow area on shell side - m2")
def area_flow_shell(b):
return b.tube_length * (b.pitch_y - b.do_tube) * b.tube_ncol
# Total heat transfer area based on outside diameter
@self.Expression(doc="Total heat transfer "
"area based on tube outside diamer - m2")
def area_heat_transfer(b):
return c.pi * b.do_tube * b.tube_length * b.tube_ncol * b.tube_nrow
# Ratio of pitch_x/do_tube
@self.Expression(doc="Ratio of pitch in x "
"direction to tube outside diamer")
def pitch_x_to_do(b):
return b.pitch_x / b.do_tube
# Ratio of pitch_y/do_tube
@self.Expression(doc="Ratio of pitch in y "
"direction to tube outside diamer")
def pitch_y_to_do(b):
return b.pitch_y / b.do_tube
if self.config.has_holdup is True:
add_object_reference(self, "volume_side_1", self.side_1.volume)
add_object_reference(self, "volume_side_2", self.side_2.volume)
# Total tube side valume
self.Constraint(doc="Total tube side volume")
def volume_side_1_eqn(b):
return b.volumne_side_1 == (
0.25 * c.pi * b.tube_di**2.0 * b.tube_length
* b.tube_ncol * b.tube_nrow)
# Total shell side valume
self.Constraint(doc="Total shell side volume")
def volume_side_2_eqn(b):
return b.volumne_side_2 == \
b.tube_ncol * b.pitch_y * b.tube_length \
* b.tube_nrow * b.pitch_x - 0.25 * c.pi * b.do_tube**2.0 \
* b.tube_length * b.tube_ncol * b.tube_nrow
def _make_performance(self):
"""
Define constraints which describe the behaviour of the unit model.
Args:
None
Returns:
None
"""
# Set references to balance terms at unit level
add_object_reference(self, "heat_duty", self.side_1.heat)
if self.config.has_pressure_change is True:
add_object_reference(self, "deltaP_tube", self.side_1.deltaP)
add_object_reference(self, "deltaP_shell", self.side_2.deltaP)
# Performance parameters and variables
# Wall thermal conductivity
self.therm_cond_wall = Param(
initialize=43.0,
within=PositiveReals,
doc="Thermal conductivity of the wall - W/(m K)",
units=pyunits.W/pyunits.m/pyunits.K)
# Loss coefficient for a 180 degree bend (u-turn),
# usually related to radius to inside diameter ratio
self.k_loss_uturn = Param(initialize=0.5,
within=PositiveReals,
mutable=True,
doc='Loss coefficient of a tube u-turn')
# Heat transfer resistance due to the fouling on tube side
# (typical boiler hx)
self.tube_r_fouling = Param(
initialize=0.00017,
within=NonNegativeReals,
mutable=True,
doc="Fouling resistance on tube side - K m2 / W",
units=pyunits.K*pyunits.m**2*pyunits.W**-1)
# Heat transfer resistance due to the fouling on shell side
self.shell_r_fouling = Param(
initialize=0.0008,
within=NonNegativeReals,
mutable=True,
doc="Fouling resistance on tube side - K m2 / W",
units=pyunits.K*pyunits.m**2*pyunits.W**-1)
# Correction factor for overall heat transfer coefficient
self.fcorrection_htc = Var(initialize=1.0,
within=NonNegativeReals,
doc="Correction factor for HTC")
# Correction factor for tube side pressure drop due to friction
self.fcorrection_dp_tube = Var(
initialize=1.0,
doc="Correction factor for tube side pressure drop")
# Correction factor for shell side pressure drop due to friction
self.fcorrection_dp_shell = Var(
initialize=1.0,
doc="Correction factor for shell side pressure drop")
# Temperature driving force
self.temperature_driving_force = Var(
self.flowsheet().time,
initialize=1.0,
doc="Mean driving force for heat exchange - K",
units=pyunits.K)
if self.config.has_radiation is True:
# Shell side wall emissivity, converted from parameter to variable
self.emissivity_wall = Var(initialize=0.7,
doc='Shell side wall emissivity')
# Gas emissivity at mbl
self.gas_emissivity = Var(
self.flowsheet().time,
initialize=0.5,
doc="Emissivity at given mean beam length")
# Gas emissivity at mbl/sqrt(2)
self.gas_emissivity_div2 = Var(
self.flowsheet().time,
initialize=0.4,
doc="Emissivity at mean beam length divided by sqrt of 2")
# Gas emissivity at mbl*sqrt(2)
self.gas_emissivity_mul2 = Var(
self.flowsheet().time,
initialize=0.6,
doc="Emissivity at mean beam length multiplied by sqrt of 2")
# Gray fraction of gas in entire spectrum
self.gas_gray_fraction = Var(
self.flowsheet().time,
initialize=0.5,
doc="Gray fraction of gas in entire spectrum")
# Gas-surface radiation exchange factor for shell side wall
self.frad_gas_shell = Var(self.flowsheet().time,
initialize=0.5,
doc="Gas-surface radiation exchange "
"factor for shell side wall")
# Shell side equivalent convective heat transfer coefficient
# due to radiation
self.hconv_shell_rad = Var(
self.flowsheet().time,
initialize=100.0,
doc="Shell convective heat transfer coefficient due to radiation",
units=pyunits.W/pyunits.m**2/pyunits.K)
# Temperature difference at side 1 inlet
self.deltaT_1 = Var(self.flowsheet().time,
initialize=1.0,
doc="Temperature difference at side 1 inlet - K",
units=pyunits.K)
# Temperature difference at side 1 outlet
self.deltaT_2 = Var(self.flowsheet().time,
initialize=1.0,
doc="Temperature difference at side 1 outlet - K",
units=pyunits.K)
# Overall heat transfer coefficient
self.overall_heat_transfer_coefficient = Var(
self.flowsheet().time,
initialize=1.0,
units=pyunits.W/pyunits.m**2/pyunits.K)
# Tube side convective heat transfer coefficient
self.hconv_tube = Var(
self.flowsheet().time,
initialize=100.0,
doc="Tube side convective heat transfer coefficient - W / (m2 K)",
units=pyunits.W/pyunits.m**2/pyunits.K)
# Shell side convective heat transfer coefficient due to convection
self.hconv_shell_conv = Var(
self.flowsheet().time,
initialize=100.0,
doc="Shell side convective heat transfer coefficient due to convection",
units=pyunits.W/pyunits.m**2/pyunits.K)
# Total shell side convective heat transfer coefficient
# including convection and radiation
self.hconv_shell_total = Var(
self.flowsheet().time,
initialize=150.0,
doc="Total shell side convective heat transfer coefficient",
units=pyunits.W/pyunits.m**2/pyunits.K)
# Heat conduction resistance of tube wall
self.rcond_wall = Var(
initialize=1.0,
doc="Heat conduction resistance of wall - K m2 / W",
units=pyunits.m**2*pyunits.K/pyunits.W)
if self.config.has_radiation is True:
# Constraints for gas emissivity
@self.Constraint(self.flowsheet().time, doc="Gas emissivity")
def gas_emissivity_eqn(b, t):
# This is a surrogate model, so need to do units manually
X1 = (b.side_2.properties_in[t].temperature
+ b.side_2.properties_out[t].temperature)/2/pyunits.K
X2 = b.mbl/pyunits.m
X3 = b.side_2.properties_in[t].pressure/pyunits.Pa
X4 = b.side_2.properties_in[t].mole_frac_comp['CO2']
X5 = b.side_2.properties_in[t].mole_frac_comp['H2O']
X6 = b.side_2.properties_in[t].mole_frac_comp['O2']
# Surrogate model fitted using rigorous calc. - 500 samples
# Wide operating range:
# X1: 700 – 1500 (Gas Temperature)
# X2: 0.2 – 1 (Mean beam length)
# X3: 79000-102000 (pressure in Pa)
# X4: 0.12-0.16 (mol frac CO2)
# X5: 0.075-0.15 (mol frac H2O)
# X6: 0.01-0.07 (mol frac O2)
return b.gas_emissivity[t] == \
(- 0.116916606892E-003 * X1
- 0.29111124038936179309056E-001 * X2
+ 0.50509651230704191577346E-006 * X3
+ 1.1844222822155641150488 * X4
- 0.64720757767102773949652E-001 * X5
- 0.35853593221454795048064E-001 * X6
+ 0.12227919099126832724878 * log(X1)
+ 0.45102118316418124410738E-001 * log(X2)
+ 0.33111863480179408447679E-001 * log(X3)
+ 0.17674928397780117345084E-001 * log(X5)
- 0.12541139396423576016226E-001 * exp(X2)
- 0.90251708836308952577099 * exp(X4)
+ 0.32447078857791738538963E-002 * X2**2
- 0.31332075610864829615706E-004 * X1*X2
- 0.54639645449809960433102E-009 * X1*X3
- 0.19721467902854980460033E-003 * X1*X5
+ 0.45275517692290622763507E-004 * X1*X6
+ 0.75458754990630776904396E-006 * X2*X3
+ 0.39691751689931338564765E-001 * X2*X4
+ 0.73169514231974708273754 * X2*X5
- 0.35852614507684822664491E-001 * X2*X6
+ 0.39743672195685803976177E-005 * X3*X5
+ 0.58802879141883679897383E-008 * (X1*X2)**2
- 1.2994610452829884472692 * (X2*X5)**2)
# Constraints for gas emissivity at mbl/sqrt(2)
@self.Constraint(self.flowsheet().time,
doc="Gas emissivity at a lower mean beam length")
def gas_emissivity_div2_eqn(b, t):
# This is a surrogate model, so need to do units manually
X1 = (b.side_2.properties_in[t].temperature
+ b.side_2.properties_out[t].temperature)/2/pyunits.K
X2 = b.mbl_div2/pyunits.m
X3 = b.side_2.properties_in[t].pressure/pyunits.Pa
X4 = b.side_2.properties_in[t].mole_frac_comp['CO2']
X5 = b.side_2.properties_in[t].mole_frac_comp['H2O']
X6 = b.side_2.properties_in[t].mole_frac_comp['O2']
# Surrogate model fitted using rigorous calc. - 500 samples
# Wide operating range:
# X1: 700 – 1500 (Gas Temperature)
# X2: 0.2 – 1 (Mean beam length)
# X3: 79000-102000 (pressure in Pa)
# X4: 0.12-0.16 (mol frac CO2)
# X5: 0.075-0.15 (mol frac H2O)
# X6: 0.01-0.07 (mol frac O2)
return b.gas_emissivity_div2[t] == \
(- 0.116916606892E-003 * X1
- 0.29111124038936179309056E-001 * X2
+ 0.50509651230704191577346E-006 * X3
+ 1.1844222822155641150488 * X4
- 0.64720757767102773949652E-001 * X5
- 0.35853593221454795048064E-001 * X6
+ 0.12227919099126832724878 * log(X1)
+ 0.45102118316418124410738E-001 * log(X2)
+ 0.33111863480179408447679E-001 * log(X3)
+ 0.17674928397780117345084E-001 * log(X5)
- 0.12541139396423576016226E-001 * exp(X2)
- 0.90251708836308952577099 * exp(X4)
+ 0.32447078857791738538963E-002 * X2**2
- 0.31332075610864829615706E-004 * X1*X2
- 0.54639645449809960433102E-009 * X1*X3
- 0.19721467902854980460033E-003 * X1*X5
+ 0.45275517692290622763507E-004 * X1*X6
+ 0.75458754990630776904396E-006 * X2*X3
+ 0.39691751689931338564765E-001 * X2*X4
+ 0.73169514231974708273754 * X2*X5
- 0.35852614507684822664491E-001 * X2*X6
+ 0.39743672195685803976177E-005 * X3*X5
+ 0.58802879141883679897383E-008 * (X1*X2)**2
- 1.2994610452829884472692 * (X2*X5)**2)
# Constraints for gas emissivity at mbl*sqrt(2)
@self.Constraint(self.flowsheet().time,
doc="Gas emissivity at a higher mean beam length")
def gas_emissivity_mul2_eqn(b, t):
# This is a surrogate model, so need to do units manually
X1 = (b.side_2.properties_in[t].temperature
+ b.side_2.properties_out[t].temperature)/2/pyunits.K
X2 = b.mbl_mul2/pyunits.m
X3 = b.side_2.properties_in[t].pressure/pyunits.Pa
X4 = b.side_2.properties_in[t].mole_frac_comp['CO2']
X5 = b.side_2.properties_in[t].mole_frac_comp['H2O']
X6 = b.side_2.properties_in[t].mole_frac_comp['O2']
# Surrogate model fitted using rigorous calc. 500 samples
# Wide operating range:
# X1: 700 – 1500 (Gas Temperature)
# X2: 0.2 – 1 (Mean beam length)
# X3: 79000-102000 (pressure in Pa)
# X4: 0.12-0.16 (mol frac CO2)
# X5: 0.075-0.15 (mol frac H2O)
# X6: 0.01-0.07 (mol frac O2)
return b.gas_emissivity_mul2[t] == \
(- 0.116916606892E-003 * X1
- 0.29111124038936179309056E-001 * X2
+ 0.50509651230704191577346E-006 * X3
+ 1.1844222822155641150488 * X4
- 0.64720757767102773949652E-001 * X5
- 0.35853593221454795048064E-001 * X6
+ 0.12227919099126832724878 * log(X1)
+ 0.45102118316418124410738E-001 * log(X2)
+ 0.33111863480179408447679E-001 * log(X3)
+ 0.17674928397780117345084E-001 * log(X5)
- 0.12541139396423576016226E-001 * exp(X2)
- 0.90251708836308952577099 * exp(X4)
+ 0.32447078857791738538963E-002 * X2**2
- 0.31332075610864829615706E-004 * X1*X2
- 0.54639645449809960433102E-009 * X1*X3
- 0.19721467902854980460033E-003 * X1*X5
+ 0.45275517692290622763507E-004 * X1*X6
+ 0.75458754990630776904396E-006 * X2*X3
+ 0.39691751689931338564765E-001 * X2*X4
+ 0.73169514231974708273754 * X2*X5
- 0.35852614507684822664491E-001 * X2*X6
+ 0.39743672195685803976177E-005 * X3*X5
+ 0.58802879141883679897383E-008 * (X1*X2)**2
- 1.2994610452829884472692 * (X2*X5)**2)
# fraction of gray gas spectrum
@self.Constraint(self.flowsheet().time,
doc="Fraction of gray gas spectrum")
def gas_gray_fraction_eqn(b, t):
return (b.gas_gray_fraction[t]*(2*b.gas_emissivity_div2[t] -
b.gas_emissivity_mul2[t]) ==
b.gas_emissivity_div2[t]**2)
# gas-surface radiation exchange factor
# between gas and shell side wall
@self.Constraint(self.flowsheet().time,
doc="Gas-surface radiation exchange "
"factor between gas and shell side wall")
def frad_gas_shell_eqn(b, t):
return (b.frad_gas_shell[t] *
((1/b.emissivity_wall-1)*b.gas_emissivity[t] +
b.gas_gray_fraction[t]) ==
b.gas_gray_fraction[t]*b.gas_emissivity[t])
# equivalent convective heat transfer coefficent due to radiation
@self.Constraint(self.flowsheet().time,
doc="Equivalent convective heat transfer "
"coefficent due to radiation")
def hconv_shell_rad_eqn(b, t):
return b.hconv_shell_rad[t] == \
c.stefan_constant * b.frad_gas_shell[t] * \
((b.side_2.properties_in[t].temperature +
b.side_2.properties_out[t].temperature)/2
+ b.side_1.properties_in[t].temperature) * \
(((b.side_2.properties_in[t].temperature
+ b.side_2.properties_out[t].temperature)/2)**2 +
b.side_1.properties_in[t].temperature**2)
# Energy balance equation
@self.Constraint(self.flowsheet().time,
doc="Energy balance between two sides")
def energy_balance(b, t):
return b.side_1.heat[t] / 1e6 == -b.side_2.heat[t] / 1e6
# Heat transfer correlation
@self.Constraint(self.flowsheet().time,
doc="Heat transfer correlation")
def heat_transfer_correlation(b, t):
return b.heat_duty[t] / 1e6 == \
(b.overall_heat_transfer_coefficient[t] *
b.area_heat_transfer *
b.temperature_driving_force[t]) / 1e6
# Driving force
@self.Constraint(self.flowsheet().time,
doc="Simplified Log mean temperature "
"difference calculation")
def LMTD(b, t):
return b.temperature_driving_force[t] == \
((b.deltaT_1[t]**0.3241 +
b.deltaT_2[t]**0.3241)/1.99996)**(1/0.3241)
# Tube side heat transfer coefficient and pressure drop
# -----------------------------------------------------
# Velocity on tube side
self.v_tube = Var(self.flowsheet().time,
initialize=1.0,
doc="Velocity on tube side - m/s",
units=pyunits.m/pyunits.s)
# Reynalds number on tube side
self.N_Re_tube = Var(self.flowsheet().time,
initialize=10000.0,
doc="Reynolds number on tube side")
if self.config.has_pressure_change is True:
# Friction factor on tube side
self.friction_factor_tube = Var(self.flowsheet().time,
initialize=1.0,
doc='Friction factor on tube side')
# Pressure drop due to friction on tube side
self.deltaP_tube_friction = Var(
self.flowsheet().time,
initialize=-10.0,
doc="Pressure drop due to friction on tube side - Pa",
units=pyunits.Pa)
# Pressure drop due to 180 degree turn on tube side
self.deltaP_tube_uturn = Var(
self.flowsheet().time,
initialize=-10.0,
doc="Pressure drop due to u-turn on tube side - Pa",
units=pyunits.Pa)
# Prandtl number on tube side
self.N_Pr_tube = Var(self.flowsheet().time, initialize=1,
doc="Prandtl number on tube side")
# Nusselt number on tube side
self.N_Nu_tube = Var(self.flowsheet().time, initialize=1,
doc="Nusselts number on tube side")
# Velocity equation
@self.Constraint(self.flowsheet().time,
doc="Tube side velocity equation - m/s")
def v_tube_eqn(b, t):
return (b.v_tube[t] * b.area_flow_tube *
b.side_1.properties_in[t].dens_mol_phase[
self.side_1_fluid_phase] ==
b.side_1.properties_in[t].flow_mol)
# Reynolds number
@self.Constraint(self.flowsheet().time,
doc="Reynolds number equation on tube side")
def N_Re_tube_eqn(b, t):
return (b.N_Re_tube[t] *
b.side_1.properties_in[t].visc_d_phase[
self.side_1_fluid_phase] ==
b.tube_di * b.v_tube[t] *
b.side_1.properties_in[t].dens_mass_phase[
self.side_1_fluid_phase])
if self.config.has_pressure_change is True:
# Friction factor
@self.Constraint(self.flowsheet().time,
doc="Darcy friction factor on tube side")
def friction_factor_tube_eqn(b, t):
return b.friction_factor_tube[t]*b.N_Re_tube[t]**0.25 == \
0.3164*b.fcorrection_dp_tube
# Pressure drop due to friction
@self.Constraint(self.flowsheet().time,
doc="Pressure drop due to friction on tube side")
def deltaP_tube_friction_eqn(b, t):
return (b.deltaP_tube_friction[t]*b.tube_di*b.nrow_inlet ==
-0.5 * b.side_1.properties_in[t].dens_mass_phase[
self.side_1_fluid_phase] *
b.v_tube[t]**2 * b.friction_factor_tube[t] *
b.tube_length * b.tube_nrow)
# Pressure drop due to u-turn
@self.Constraint(self.flowsheet().time,
doc="Pressure drop due to u-turn on tube side")
def deltaP_tube_uturn_eqn(b, t):
return (b.deltaP_tube_uturn[t] ==
-0.5 * b.side_1.properties_in[t].dens_mass_phase[
self.side_1_fluid_phase] *
b.v_tube[t]**2 * b.k_loss_uturn)
# Total pressure drop on tube side
@self.Constraint(self.flowsheet().time,
doc="Total pressure drop on tube side")
def deltaP_tube_eqn(b, t):
return (b.deltaP_tube[t] ==
b.deltaP_tube_friction[t] + b.deltaP_tube_uturn[t] -
b.delta_elevation * c.acceleration_gravity *
(b.side_1.properties_in[t].dens_mass_phase[
self.side_1_fluid_phase] +
b.side_1.properties_out[t].dens_mass_phase[
self.side_1_fluid_phase]) / 2.0)
# Prandtl number
@self.Constraint(self.flowsheet().time,
doc="Prandtl number equation on tube side")
def N_Pr_tube_eqn(b, t):
return (b.N_Pr_tube[t] *
b.side_1.properties_in[t].therm_cond_phase[
self.side_1_fluid_phase] *
b.side_1.properties_in[t].mw ==
b.side_1.properties_in[t].cp_mol_phase[
self.side_1_fluid_phase] *
b.side_1.properties_in[t].visc_d_phase[
self.side_1_fluid_phase])
# Nusselts number
@self.Constraint(self.flowsheet().time,
doc="Nusselts number equation on tube side")
def N_Nu_tube_eqn(b, t):
return b.N_Nu_tube[t] == \
0.023 * b.N_Re_tube[t]**0.8 * b.N_Pr_tube[t]**0.4
# Heat transfer coefficient
@self.Constraint(self.flowsheet().time,
doc="Convective heat transfer "
"coefficient equation on tube side")
def hconv_tube_eqn(b, t):
return (b.hconv_tube[t]*self.tube_di/1000 ==
b.N_Nu_tube[t] *
b.side_1.properties_in[t].therm_cond_phase[
self.side_1_fluid_phase]/1000)
# Pressure drop and heat transfer coefficient on shell side
# ----------------------------------------------------------
# Tube arrangement factor
if self.config.tube_arrangement == TubeArrangement.inLine:
self.f_arrangement = Param(initialize=0.788,
doc="In-line tube arrangement factor")
elif self.config.tube_arrangement == TubeArrangement.staggered:
self.f_arrangement = Param(initialize=1.0,
doc="Staggered tube arrangement factor")
else:
raise Exception('tube arrangement type not supported')
# Velocity on shell side
self.v_shell = Var(self.flowsheet().time,
initialize=1.0,
doc="Velocity on shell side - m/s",
units=pyunits.m/pyunits.s)
# Reynalds number on shell side
self.N_Re_shell = Var(self.flowsheet().time,
initialize=10000.0,
doc="Reynolds number on shell side")
# Friction factor on shell side
self.friction_factor_shell = Var(self.flowsheet().time,
initialize=1.0,
doc='Friction factor on shell side')
# Prandtl number on shell side
self.N_Pr_shell = Var(self.flowsheet().time,
initialize=1,
doc="Prandtl number on shell side")
# Nusselt number on shell side
self.N_Nu_shell = Var(self.flowsheet().time,
initialize=1,
doc="Nusselts number on shell side")
# Velocity equation on shell side
@self.Constraint(self.flowsheet().time, doc="Velocity on shell side")
def v_shell_eqn(b, t):
return b.v_shell[t] * \
b.side_2.properties_in[t].dens_mol_phase["Vap"] * \
b.area_flow_shell == \
sum(b.side_2.properties_in[t].flow_mol_comp[j]
for j in b.side_2.properties_in[t].params.component_list)
# Reynolds number
@self.Constraint(self.flowsheet().time,
doc="Reynolds number equation on shell side")
def N_Re_shell_eqn(b, t):
return b.N_Re_shell[t] * b.side_2.properties_in[t].visc_d == \
b.do_tube * b.v_shell[t] \
* b.side_2.properties_in[t].dens_mol_phase["Vap"] *\
sum(b.side_2.properties_in[t].mw_comp[c]
* b.side_2.properties_in[t].mole_frac_comp[c]
for c in b.side_2.properties_in[t].
params.component_list)
if self.config.has_pressure_change is True:
# Friction factor on shell side
if self.config.tube_arrangement == TubeArrangement.inLine:
@self.Constraint(self.flowsheet().time,
doc="In-line friction factor on shell side")
def friction_factor_shell_eqn(b, t):
return b.friction_factor_shell[t] \
* b.N_Re_shell[t]**0.15 == \
(0.044 + 0.08 * b.pitch_x_to_do
/ (b.pitch_y_to_do - 1.0)**(0.43 + 1.13
/ b.pitch_x_to_do)
) * b.fcorrection_dp_shell
elif self.config.tube_arrangement == TubeArrangement.staggered:
@self.Constraint(self.flowsheet().time,
doc="Staggered friction factor on shell side")
def friction_factor_shell_eqn(b, t):
return b.friction_factor_shell[t] \
* b.N_Re_shell[t]**0.16 == \
(0.25 + 0.118 / (b.pitch_y_to_do - 1.0)**1.08) \
* b.fcorrection_dp_shell
else:
raise Exception('tube arrangement type not supported')
# Pressure drop on shell side
@self.Constraint(self.flowsheet().time,
doc="Pressure change on shell side")
def deltaP_shell_eqn(b, t):
return (
b.deltaP_shell[t] ==
-1.4 * b.friction_factor_shell[t] * b.tube_nrow *
b.side_2.properties_in[t].dens_mol_phase["Vap"] *
sum(b.side_2.properties_in[t].mw_comp[c] *
b.side_2.properties_in[t].mole_frac_comp[c] for c
in b.side_2.properties_in[t].params.component_list) *
b.v_shell[t]**2)
# Prandtl number
@self.Constraint(self.flowsheet().time,
doc="Prandtl number equation on shell side")
def N_Pr_shell_eqn(b, t):
return b.N_Pr_shell[t] * b.side_2.properties_in[t].therm_cond \
* sum(b.side_2.properties_in[t].mw_comp[c]
* b.side_2.properties_in[t].mole_frac_comp[c]
for c in b.side_2.properties_in[t].
params.component_list) == \
b.side_2.properties_in[t].cp_mol * \
b.side_2.properties_in[t].visc_d
# Nusselt number, currently assume Re>300
@self.Constraint(self.flowsheet().time,
doc="Nusselts number equation on shell side")
def N_Nu_shell_eqn(b, t):
return b.N_Nu_shell[t] == b.f_arrangement * 0.33 \
* b.N_Re_shell[t]**0.6 * b.N_Pr_shell[t]**0.333333
# Convective heat transfer coefficient on shell side due to convection
@self.Constraint(self.flowsheet().time,
doc="Convective heat transfer coefficient equation"
"on shell side due to convection")
def hconv_shell_conv_eqn(b, t):
return b.hconv_shell_conv[t] * b.do_tube / 1000 == \
b.N_Nu_shell[t] * b.side_2.properties_in[t].therm_cond\
/ 1000
# Total convective heat transfer coefficient on shell side
@self.Constraint(self.flowsheet().time,
doc="Total convective heat transfer "
"coefficient equation on shell side")
def hconv_shell_total_eqn(b, t):
if self.config.has_radiation is True:
return b.hconv_shell_total[t] == \
b.hconv_shell_conv[t] + b.hconv_shell_rad[t]
else:
return b.hconv_shell_total[t] == b.hconv_shell_conv[t]
# Wall conduction heat transfer resistance
# based on outside surface area
@self.Constraint(doc="Wall conduction heat transfer resistance")
def rcond_wall_eqn(b):
return b.rcond_wall * b.therm_cond_wall == \
0.5 * b.do_tube * log(b.do_tube / b.tube_di)
# Overall heat transfer coefficient
@self.Constraint(self.flowsheet().time,
doc="Wall conduction heat transfer resistance")
def overall_heat_transfer_coefficient_eqn(b, t):
return b.overall_heat_transfer_coefficient[t] \
* (b.rcond_wall + b.tube_r_fouling + b.shell_r_fouling +
1.0 / b.hconv_shell_total[t]
+ b.do_tube / b.hconv_tube[t] / b.tube_di) == \
b.fcorrection_htc
def _make_co_current(self):
"""
Add temperature driving force Constraints for co-current flow.
Args:
None
Returns:
None
"""
# Temperature Differences
@self.Constraint(self.flowsheet().time,
doc="Side 1 inlet temperature difference")
def temperature_difference_1(b, t):
return b.deltaT_1[t] == (
b.side_2.properties_in[t].temperature -
b.side_1.properties_in[t].temperature)
@self.Constraint(self.flowsheet().time,
doc="Side 1 outlet temperature difference")
def temperature_difference_2(b, t):
return b.deltaT_2[t] == (
b.side_2.properties_out[t].temperature -
b.side_1.properties_out[t].temperature)
def _make_counter_current(self):
"""
Add temperature driving force Constraints for counter-current flow.
Args:
None
Returns:
None
"""
# Temperature Differences
@self.Constraint(self.flowsheet().time,
doc="Side 1 inlet temperature difference")
def temperature_difference_1(b, t):
return b.deltaT_1[t] == (
b.side_2.properties_out[t].temperature -
b.side_1.properties_in[t].temperature)
@self.Constraint(self.flowsheet().time,
doc="Side 1 outlet temperature difference")
def temperature_difference_2(b, t):
return b.deltaT_2[t] == (
b.side_2.properties_in[t].temperature -
b.side_1.properties_out[t].temperature)
def model_check(blk):
"""
Model checks for unit - calls model checks for both control volume
Blocks.
Args:
None
Returns:
None
"""
# Run control volume block model checks
blk.side_1.model_check()
blk.side_2.model_check()
def initialize(blk, state_args_1=None, state_args_2=None,
outlvl=idaeslog.NOTSET, solver=None, optarg=None):
'''
General Heat Exchanger initialisation routine.
Keyword Arguments:
state_args_1 : a dict of arguments to be passed to the property
package(s) for side 1 of the heat exchanger to
provide an initial state for initialization
(see documentation of the specific property package)
(default = None).
state_args_2 : a dict of arguments to be passed to the property
package(s) for side 2 of the heat exchanger to
provide an initial state for initialization
(see documentation of the specific property package)
(default = None).
outlvl : sets output level of initialisation routine
optarg : solver options dictionary object (default=None, use
default solver options)
solver : str indicating which solver to use during
initialization (default = None, use default solver)
Returns:
None
'''
# Set solver options
init_log = idaeslog.getInitLogger(blk.name, outlvl, tag="unit")
solve_log = idaeslog.getSolveLogger(blk.name, outlvl, tag="unit")
# Create solver
opt = get_solver(solver, optarg)
# ---------------------------------------------------------------------
# Initialize inlet property blocks
flags1 = blk.side_1.initialize(outlvl=outlvl,
optarg=optarg,
solver=solver,
state_args=state_args_1)
flags2 = blk.side_2.initialize(outlvl=outlvl,
optarg=optarg,
solver=solver,
state_args=state_args_2)
init_log.info('{} Initialisation Step 1 Complete.'.format(blk.name))
# ---------------------------------------------------------------------
# Initialize temperature differentials
p1_flags = {}
p2_flags = {}
h1_flags = {}
t2_flags = {}
for t in blk.flowsheet().time:
p1_flags[t] = blk.side_1.properties_out[t].pressure.fixed
if not blk.side_1.properties_out[t].pressure.fixed \
and blk.config.has_pressure_change:
blk.side_1.properties_out[t].pressure.fix(
value(blk.side_1.properties_in[t].pressure))
p2_flags[t] = blk.side_2.properties_out[t].pressure.fixed
if not blk.side_2.properties_out[t].pressure.fixed \
and blk.config.has_pressure_change:
blk.side_2.properties_out[t].pressure.fix(
value(blk.side_2.properties_in[t].pressure))
h1_flags[t] = blk.side_1.properties_out[t].enth_mol.fixed
if not blk.side_1.properties_out[t].enth_mol.fixed:
blk.side_1.properties_out[t].enth_mol.fix(
value(blk.side_1.properties_in[t].enth_mol)+100.0)
t2_flags[t] = blk.side_2.properties_out[t].temperature.fixed
if not blk.side_2.properties_out[t].temperature.fixed:
blk.side_2.properties_out[t].temperature.fix(
value(blk.side_2.properties_in[t].temperature)-5.0)
# assuming Delta T min approach
# Deactivate Constraints
blk.heat_transfer_correlation.deactivate()
blk.LMTD.deactivate()
blk.energy_balance.deactivate()
if blk.config.has_pressure_change:
blk.deltaP_tube_eqn.deactivate()
blk.deltaP_shell_eqn.deactivate()
with idaeslog.solver_log(solve_log, idaeslog.DEBUG) as slc:
res = opt.solve(blk, tee=slc.tee)
init_log.info_high("Initialization Step 2 {}.".format(idaeslog.condition(res)))
# Activate energy balance and driving force
for t in blk.flowsheet().time:
if not p1_flags[t]:
blk.side_1.properties_out[t].pressure.unfix()
if not p2_flags[t]:
blk.side_2.properties_out[t].pressure.unfix()
if not h1_flags[t]:
blk.side_1.properties_out[t].enth_mol.unfix()
if not t2_flags[t]:
blk.side_2.properties_out[t].temperature.unfix()
blk.heat_transfer_correlation.activate()
blk.LMTD.activate()
blk.energy_balance.activate()
if blk.config.has_pressure_change:
blk.deltaP_tube_eqn.activate()
blk.deltaP_shell_eqn.activate()
with idaeslog.solver_log(solve_log, idaeslog.DEBUG) as slc:
res = opt.solve(blk, tee=slc.tee)
init_log.info_high("Initialization Step 3 {}.".format(idaeslog.condition(res)))
# ---------------------------------------------------------------------
# Release Inlet state
blk.side_1.release_state(flags1, outlvl)
blk.side_2.release_state(flags2, outlvl)
init_log.info('{} Initialisation Complete.'.format(blk.name))
| 44.972669 | 87 | 0.563615 |
4a21aeab199536dfc7861a7a9169544117ed5c17 | 1,383 | py | Python | great_expectations/datasource/data_connector/sorter/numeric_sorter.py | vanderGoes/great_expectations | 9790cd992a8a4de672c640e89ddd7278a0ca0889 | [
"Apache-2.0"
] | 2 | 2020-01-28T13:51:53.000Z | 2020-01-28T23:13:06.000Z | great_expectations/datasource/data_connector/sorter/numeric_sorter.py | vanderGoes/great_expectations | 9790cd992a8a4de672c640e89ddd7278a0ca0889 | [
"Apache-2.0"
] | null | null | null | great_expectations/datasource/data_connector/sorter/numeric_sorter.py | vanderGoes/great_expectations | 9790cd992a8a4de672c640e89ddd7278a0ca0889 | [
"Apache-2.0"
] | 1 | 2022-01-26T03:25:34.000Z | 2022-01-26T03:25:34.000Z | import logging
from typing import Any
import great_expectations.exceptions as ge_exceptions
from great_expectations.core.batch import BatchDefinition
from great_expectations.datasource.data_connector.sorter import Sorter
from great_expectations.util import is_int, is_numeric
logger = logging.getLogger(__name__)
class NumericSorter(Sorter):
def get_partition_key(self, batch_definition: BatchDefinition) -> Any:
partition_definition: dict = batch_definition.partition_definition
partition_value: Any = partition_definition[self.name]
if not is_numeric(value=partition_value):
raise ge_exceptions.SorterError(
# what is the identifying characteristic of batch_definition?
f"""BatchDefinition with PartitionDefinition "{self.name}" with value "{partition_value}" has value
"{partition_value}" which cannot be part of numeric sort.
"""
)
if is_int(value=partition_value):
return int(partition_value)
# The case of strings having floating point number format used as references to partitions should be rare.
return round(float(partition_value))
def __repr__(self) -> str:
doc_fields_dict: dict = {
"name": self.name,
"reverse": self.reverse,
"type": "NumericSorter",
}
return str(doc_fields_dict)
| 39.514286 | 115 | 0.709328 |
4a21aed46c4e0f9b6ae1291a09340821cbef57e4 | 2,981 | py | Python | migrations/versions/a59cf08dedf3_.py | jparker/therminator_server | 578d205d539edda0416a0636b57f327e1be97572 | [
"MIT"
] | null | null | null | migrations/versions/a59cf08dedf3_.py | jparker/therminator_server | 578d205d539edda0416a0636b57f327e1be97572 | [
"MIT"
] | null | null | null | migrations/versions/a59cf08dedf3_.py | jparker/therminator_server | 578d205d539edda0416a0636b57f327e1be97572 | [
"MIT"
] | null | null | null | """empty message
Revision ID: a59cf08dedf3
Revises:
Create Date: 2017-06-01 16:33:25.483741
"""
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import postgresql
# revision identifiers, used by Alembic.
revision = 'a59cf08dedf3'
down_revision = None
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('users',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('name', sa.String(length=255), nullable=False),
sa.Column('email', sa.String(length=255), nullable=False),
sa.Column('password', sa.String(length=255), nullable=False),
sa.Column('api_key', sa.String(length=255), server_default=sa.text("encode(gen_random_bytes(32), 'hex')"), nullable=False),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('api_key'),
sa.UniqueConstraint('email')
)
op.create_table('homes',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('user_id', sa.Integer(), nullable=False),
sa.Column('name', sa.String(length=255), nullable=False),
sa.Column('timezone', sa.String(length=255), server_default='UTC', nullable=False),
sa.ForeignKeyConstraint(['user_id'], ['users.id'], ),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('user_id', 'name', name='user_id_name_unq')
)
op.create_table('sensors',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('home_id', sa.Integer(), nullable=False),
sa.Column('name', sa.String(length=255), nullable=False),
sa.Column('uuid', postgresql.UUID(), server_default=sa.text('gen_random_uuid()'), nullable=False),
sa.ForeignKeyConstraint(['home_id'], ['homes.id'], ),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('home_id', 'name', name='home_id_name_unq'),
sa.UniqueConstraint('uuid')
)
op.create_table('readings',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('sensor_id', sa.Integer(), nullable=False),
sa.Column('timestamp', sa.DateTime(), nullable=False),
sa.Column('int_temp', sa.Float(), server_default='0.0', nullable=False),
sa.Column('ext_temp', sa.Float(), nullable=False),
sa.Column('humidity', sa.Float(), server_default='0.0', nullable=False),
sa.Column('resistance', sa.Float(), server_default='0.0', nullable=False),
sa.CheckConstraint('humidity >= 0 AND humidity <= 100', name='humidity_between_0_and_100'),
sa.CheckConstraint('resistance >= 0', name='resistance_must_be_positive'),
sa.ForeignKeyConstraint(['sensor_id'], ['sensors.id'], ),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('sensor_id', 'timestamp', name='sensor_id_timestamp_unq')
)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_table('readings')
op.drop_table('sensors')
op.drop_table('homes')
op.drop_table('users')
# ### end Alembic commands ###
| 40.283784 | 127 | 0.681315 |
4a21af1fb6a112b3ef677af1594b707004223479 | 17,934 | py | Python | pkg/workloads/cortex/lib/client/python.py | lapaniku/cortex | 746be852caeff2ad80fcf45dcbaaf1899163ad2e | [
"Apache-2.0"
] | null | null | null | pkg/workloads/cortex/lib/client/python.py | lapaniku/cortex | 746be852caeff2ad80fcf45dcbaaf1899163ad2e | [
"Apache-2.0"
] | null | null | null | pkg/workloads/cortex/lib/client/python.py | lapaniku/cortex | 746be852caeff2ad80fcf45dcbaaf1899163ad2e | [
"Apache-2.0"
] | null | null | null | # Copyright 2020 Cortex Labs, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import datetime
import threading as td
import multiprocessing as mp
from typing import Any, Optional, Callable
from cortex.lib.log import cx_logger as logger
from cortex.lib.exceptions import UserRuntimeException, CortexException, UserException, WithBreak
from cortex.lib.model import (
ModelsHolder,
LockedModel,
ModelsTree,
LockedModelsTree,
CuratedModelResources,
find_ondisk_model_info,
find_ondisk_models_with_lock,
)
from cortex.lib.concurrency import LockedFile
from cortex import consts
class PythonClient:
def __init__(
self,
api_spec: dict,
models: ModelsHolder,
model_dir: str,
models_tree: Optional[ModelsTree],
lock_dir: Optional[str] = "/run/cron",
load_model_fn: Optional[Callable[[str], Any]] = None,
):
"""
Setup Python model client.
Args:
api_spec: API configuration.
models: Holding all models into memory.
model_dir: Where the models are saved on disk.
models_tree: A tree of the available models from upstream.
lock_dir: Where the resource locks are found. Only when processes_per_replica > 0 and caching disabled.
load_model_fn: Function to load model into memory.
"""
self._api_spec = api_spec
self._models = models
self._models_tree = models_tree
self._model_dir = model_dir
self._lock_dir = lock_dir
self._spec_models = CuratedModelResources(api_spec["curated_model_resources"])
if (
self._api_spec["predictor"]["models"]
and self._api_spec["predictor"]["models"]["dir"] is not None
):
self._models_dir = True
else:
self._models_dir = False
self._spec_model_names = self._spec_models.get_field("name")
# for when local models are used
self._spec_local_model_names = self._spec_models.get_local_model_names()
self._local_model_ts = int(datetime.datetime.now(datetime.timezone.utc).timestamp())
self._multiple_processes = self._api_spec["predictor"]["processes_per_replica"] > 1
self._caching_enabled = self._is_model_caching_enabled()
if callable(load_model_fn):
self._models.set_callback("load", load_model_fn)
def set_load_method(self, load_model_fn: Callable[[str], Any]) -> None:
self._models.set_callback("load", load_model_fn)
def get_model(self, model_name: Optional[str] = None, model_version: str = "latest") -> Any:
"""
Retrieve a model for inference.
Args:
model_name (optional): Name of the model to retrieve (when multiple models are deployed in an API).
When predictor.models.paths is specified, model_name should be the name of one of the models listed in the API config.
When predictor.models.dir is specified, model_name should be the name of a top-level directory in the models dir.
model_version (string, optional): Version of the model to retrieve. Can be omitted or set to "latest" to select the highest version.
Returns:
The value that's returned by your predictor's load_model() method.
"""
if model_version != "latest" and not model_version.isnumeric():
raise UserRuntimeException(
"model_version must be either a parse-able numeric value or 'latest'"
)
# when predictor:model_path or predictor:models:paths is specified
if not self._models_dir:
# when predictor:model_path is provided
if consts.SINGLE_MODEL_NAME in self._spec_model_names:
model_name = consts.SINGLE_MODEL_NAME
model = self._get_model(model_name, model_version)
if model is None:
raise UserRuntimeException(
f"model {model_name} of version {model_version} wasn't found"
)
return model
# when predictor:models:paths is specified
if model_name is None:
raise UserRuntimeException(
f"model_name was not specified, choose one of the following: {self._spec_model_names}"
)
if model_name not in self._spec_model_names:
raise UserRuntimeException(
f"'{model_name}' model wasn't found in the list of available models"
)
# when predictor:models:dir is specified
if self._models_dir:
if model_name is None:
raise UserRuntimeException("model_name was not specified")
if not self._caching_enabled:
available_models = find_ondisk_models_with_lock(self._lock_dir)
if model_name not in available_models:
raise UserRuntimeException(
f"'{model_name}' model wasn't found in the list of available models"
)
model = self._get_model(model_name, model_version)
if model is None:
raise UserRuntimeException(
f"model {model_name} of version {model_version} wasn't found"
)
return model
def _get_model(self, model_name: str, model_version: str) -> Any:
"""
Checks if versioned model is on disk, then checks if model is in memory,
and if not, it loads it into memory, and returns the model.
Args:
model_name: Name of the model, as it's specified in predictor:models:paths or in the other case as they are named on disk.
model_version: Version of the model, as it's found on disk. Can also infer the version number from the "latest" tag.
Exceptions:
RuntimeError: if another thread tried to load the model at the very same time.
Returns:
The model as returned by self._load_model method.
None if the model wasn't found or if it didn't pass the validation.
"""
model = None
tag = ""
if model_version == "latest":
tag = model_version
if not self._caching_enabled:
# determine model version
if tag == "latest":
model_version = self._get_latest_model_version_from_disk(model_name)
model_id = model_name + "-" + model_version
# grab shared access to versioned model
resource = os.path.join(self._lock_dir, model_id + ".txt")
with LockedFile(resource, "r", reader_lock=True) as f:
# check model status
file_status = f.read()
if file_status == "" or file_status == "not-available":
raise WithBreak
current_upstream_ts = int(file_status.split(" ")[1])
update_model = False
# grab shared access to models holder and retrieve model
with LockedModel(self._models, "r", model_name, model_version):
status, local_ts = self._models.has_model(model_name, model_version)
if status == "not-available" or (
status == "in-memory" and local_ts != current_upstream_ts
):
update_model = True
raise WithBreak
model, _ = self._models.get_model(model_name, model_version, tag)
# load model into memory and retrieve it
if update_model:
with LockedModel(self._models, "w", model_name, model_version):
status, _ = self._models.has_model(model_name, model_version)
if status == "not-available" or (
status == "in-memory" and local_ts != current_upstream_ts
):
if status == "not-available":
logger().info(
f"loading model {model_name} of version {model_version} (thread {td.get_ident()})"
)
else:
logger().info(
f"reloading model {model_name} of version {model_version} (thread {td.get_ident()})"
)
try:
self._models.load_model(
model_name,
model_version,
current_upstream_ts,
[tag],
)
except Exception as e:
raise UserRuntimeException(
f"failed (re-)loading model {model_name} of version {model_version} (thread {td.get_ident()})",
str(e),
)
model, _ = self._models.get_model(model_name, model_version, tag)
if not self._multiple_processes and self._caching_enabled:
# determine model version
try:
if tag == "latest":
model_version = self._get_latest_model_version_from_tree(
model_name, self._models_tree.model_info(model_name)
)
except ValueError:
# if model_name hasn't been found
raise UserRuntimeException(
f"'{model_name}' model of tag latest wasn't found in the list of available models"
)
# grab shared access to model tree
available_model = True
with LockedModelsTree(self._models_tree, "r", model_name, model_version):
# check if the versioned model exists
model_id = model_name + "-" + model_version
if model_id not in self._models_tree:
available_model = False
raise WithBreak
# retrieve model tree's metadata
upstream_model = self._models_tree[model_id]
current_upstream_ts = int(upstream_model["timestamp"].timestamp())
if not available_model:
return None
# grab shared access to models holder and retrieve model
update_model = False
with LockedModel(self._models, "r", model_name, model_version):
status, local_ts = self._models.has_model(model_name, model_version)
if status in ["not-available", "on-disk"] or (
status != "not-available"
and local_ts != current_upstream_ts
and not (status == "in-memory" and model_name in self._spec_local_model_names)
):
update_model = True
raise WithBreak
model, _ = self._models.get_model(model_name, model_version, tag)
# download, load into memory the model and retrieve it
if update_model:
# grab exclusive access to models holder
with LockedModel(self._models, "w", model_name, model_version):
# check model status
status, local_ts = self._models.has_model(model_name, model_version)
# refresh disk model
if model_name not in self._spec_local_model_names and (
status == "not-available"
or (status in ["on-disk", "in-memory"] and local_ts != current_upstream_ts)
):
if status == "not-available":
logger().info(
f"model {model_name} of version {model_version} not found locally; continuing with the download..."
)
elif status == "on-disk":
logger().info(
f"found newer model {model_name} of vesion {model_version} on the S3 upstream than the one on the disk"
)
else:
logger().info(
f"found newer model {model_name} of vesion {model_version} on the S3 upstream than the one loaded into memory"
)
# remove model from disk and memory
if status == "on-disk":
logger().info(
f"removing model from disk for model {model_name} of version {model_version}"
)
self._models.remove_model(model_name, model_version)
if status == "in-memory":
logger().info(
f"removing model from disk and memory for model {model_name} of version {model_version}"
)
self._models.remove_model(model_name, model_version)
# download model
logger().info(
f"downloading model {model_name} of version {model_version} from the S3 upstream"
)
date = self._models.download_model(
upstream_model["bucket"],
model_name,
model_version,
upstream_model["path"],
)
if not date:
raise WithBreak
current_upstream_ts = date.timestamp()
# give the local model a timestamp initialized at start time
if model_name in self._spec_local_model_names:
current_upstream_ts = self._local_model_ts
# load model
try:
logger().info(
f"loading model {model_name} of version {model_version} into memory"
)
self._models.load_model(
model_name,
model_version,
current_upstream_ts,
[tag],
)
except Exception as e:
raise UserRuntimeException(
f"failed (re-)loading model {model_name} of version {model_version} (thread {td.get_ident()})",
str(e),
)
# retrieve model
model, _ = self._models.get_model(model_name, model_version, tag)
return model
def _get_latest_model_version_from_disk(self, model_name: str) -> str:
"""
Get the highest version for a specific model name.
Must only be used when processes_per_replica > 0 and caching disabled.
"""
versions, timestamps = find_ondisk_model_info(self._lock_dir, model_name)
if len(versions) == 0:
raise UserRuntimeException(
"'{}' model's versions have been removed; add at least a version to the model to resume operations".format(
model_name
)
)
return str(max(map(lambda x: int(x), versions)))
def _get_latest_model_version_from_tree(self, model_name: str, model_info: dict) -> str:
"""
Get the highest version for a specific model name.
Must only be used when processes_per_replica = 1 and caching is enabled.
"""
versions, timestamps = model_info["versions"], model_info["timestamps"]
return str(max(map(lambda x: int(x), versions)))
def _is_model_caching_enabled(self) -> bool:
"""
Checks if model caching is enabled (models:cache_size and models:disk_cache_size).
"""
return (
self._api_spec["predictor"]["models"]
and self._api_spec["predictor"]["models"]["cache_size"] is not None
and self._api_spec["predictor"]["models"]["disk_cache_size"] is not None
)
@property
def metadata(self) -> dict:
"""
The returned dictionary will be like in the following example:
{
...
"yolov3": {
"versions": [
"2",
"1"
],
"timestamps": [
1601668127,
1601668127
]
}
...
}
"""
if not self._caching_enabled:
return find_ondisk_models_with_lock(self._lock_dir, include_timestamps=True)
else:
models_info = self._models_tree.get_all_models_info()
for model_name in models_info.keys():
del models_info[model_name]["bucket"]
del models_info[model_name]["model_paths"]
return models_info
@property
def caching(self) -> bool:
return self._caching_enabled
| 43.318841 | 144 | 0.54472 |
4a21afabcc1de7d8e7be62d3467ee30bd85699fb | 16,175 | py | Python | open_spiel/python/algorithms/alpha_zero/alpha_zero.py | wyz2368/open_spiel_egta | 6bcb3d4d863e7d89283029dd860412c3ef1731dd | [
"Apache-2.0"
] | null | null | null | open_spiel/python/algorithms/alpha_zero/alpha_zero.py | wyz2368/open_spiel_egta | 6bcb3d4d863e7d89283029dd860412c3ef1731dd | [
"Apache-2.0"
] | null | null | null | open_spiel/python/algorithms/alpha_zero/alpha_zero.py | wyz2368/open_spiel_egta | 6bcb3d4d863e7d89283029dd860412c3ef1731dd | [
"Apache-2.0"
] | 1 | 2020-12-25T03:02:31.000Z | 2020-12-25T03:02:31.000Z | # Copyright 2019 DeepMind Technologies Ltd. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
# Copyright 2019 DeepMind Technologies Ltd. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""AlphaZero Bot implemented in TensorFlow."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import functools
import math
from typing import Sequence
import numpy as np
import tensorflow.compat.v1 as tf
from open_spiel.python.algorithms import dqn
from open_spiel.python.algorithms import masked_softmax
from open_spiel.python.algorithms import mcts
import pyspiel
class TrainInput(collections.namedtuple(
"TrainInput", "observation legals_mask policy value")):
"""Inputs for training the Model."""
@staticmethod
def stack(train_inputs):
observation, legals_mask, policy, value = zip(*train_inputs)
return TrainInput(
np.array(observation),
np.array(legals_mask),
np.array(policy),
np.expand_dims(np.array(value), 1))
class Losses(collections.namedtuple("Losses", "policy value l2")):
"""Losses from a training step."""
@property
def total(self):
return self.policy + self.value + self.l2
def __str__(self):
return ("Losses(total: {:.3f}, policy: {:.3f}, value: {:.3f}, "
"l2: {:.3f})").format(self.total, self.policy, self.value, self.l2)
def __add__(self, other):
return Losses(self.policy + other.policy,
self.value + other.value,
self.l2 + other.l2)
def __truediv__(self, n):
return Losses(self.policy / n, self.value / n, self.l2 / n)
class AlphaZero(object):
"""AlphaZero implementation.
Follows the pseudocode AlphaZero implementation given in the paper
DOI:10.1126/science.aar6404.
"""
def __init__(self,
game,
bot,
model,
replay_buffer_capacity=int(1e6),
action_selection_transition=30):
"""AlphaZero constructor.
Args:
game: a pyspiel.Game object
bot: an MCTSBot object.
model: A Model.
replay_buffer_capacity: the size of the replay buffer in which the results
of self-play games are stored.
action_selection_transition: an integer representing the move number in a
game of self-play when greedy action selection is used. Before this,
actions are sampled from the MCTS policy.
Raises:
ValueError: if incorrect inputs are supplied.
"""
game_info = game.get_type()
if game.num_players() != 2:
raise ValueError("Game must be a 2-player game")
if game_info.chance_mode != pyspiel.GameType.ChanceMode.DETERMINISTIC:
raise ValueError("The game must be a Deterministic one, not {}".format(
game.chance_mode))
if (game_info.information !=
pyspiel.GameType.Information.PERFECT_INFORMATION):
raise ValueError(
"The game must be a perfect information one, not {}".format(
game.information))
if game_info.dynamics != pyspiel.GameType.Dynamics.SEQUENTIAL:
raise ValueError("The game must be turn-based, not {}".format(
game.dynamics))
if game_info.utility != pyspiel.GameType.Utility.ZERO_SUM:
raise ValueError("The game must be 0-sum, not {}".format(game.utility))
if game.num_players() != 2:
raise ValueError("Game must have exactly 2 players.")
self.game = game
self.bot = bot
self.model = model
self.replay_buffer = dqn.ReplayBuffer(replay_buffer_capacity)
self.action_selection_transition = action_selection_transition
def update(self, num_training_epochs=10, batch_size=128, verbose=False):
"""Trains the neural net.
Randomly sampls data from the replay buffer. An update resets the optimizer
state.
Args:
num_training_epochs: An epoch represents one pass over the training data.
The total number training iterations this corresponds to is
num_training_epochs * len(replay_buffer)/batch_size.
batch_size: the number of examples sampled from the replay buffer and
used for each net training iteration.
verbose: whether to print training metrics during training.
Returns:
A list of length num_training_epochs. Each element of this list is
a Losses tuples, averaged per epoch.
"""
num_epoch_iters = math.ceil(len(self.replay_buffer) / float(batch_size))
losses = []
for epoch in range(num_training_epochs):
epoch_losses = []
for _ in range(num_epoch_iters):
train_data = self.replay_buffer.sample(batch_size)
epoch_losses.append(self.model.update(train_data))
epoch_losses = sum(epoch_losses, Losses(0, 0, 0)) / len(epoch_losses)
losses.append(epoch_losses)
if verbose:
print("Epoch {}: {}".format(epoch, epoch_losses))
return losses
def self_play(self, num_self_play_games=5000):
"""Uses the current state of the net with MCTS to play full games against.
Args:
num_self_play_games: the number of self-play games to play using the
current net and MCTS.
"""
for _ in range(num_self_play_games):
self._self_play_single()
def _self_play_single(self):
"""Play a single game and add it to the replay buffer."""
state = self.game.new_initial_state()
trajectory = []
while not state.is_terminal():
root = self.bot.mcts_search(state)
target_policy = np.zeros(self.game.num_distinct_actions(),
dtype=np.float32)
for child in root.children:
target_policy[child.action] = child.explore_count
target_policy /= sum(target_policy)
trajectory.append(TrainInput(
state.observation_tensor(), state.legal_actions_mask(),
target_policy, root.total_reward / root.explore_count))
action = self._select_action(root.children, len(trajectory))
state.apply_action(action)
terminal_rewards = state.rewards()
for state in trajectory:
self.replay_buffer.add(
TrainInput(state.observation, state.legals_mask, state.policy,
terminal_rewards[0]))
def _select_action(self, children, game_history_len):
explore_counts = [(child.explore_count, child.action) for child in children]
if game_history_len < self.action_selection_transition:
probs = np_softmax(np.array([i[0] for i in explore_counts]))
action_index = np.random.choice(range(len(probs)), p=probs)
action = explore_counts[action_index][1]
else:
_, action = max(explore_counts)
return action
def np_softmax(logits):
max_logit = np.amax(logits, axis=-1, keepdims=True)
exp_logit = np.exp(logits - max_logit)
return exp_logit / np.sum(exp_logit, axis=-1, keepdims=True)
class AlphaZeroKerasEvaluator(mcts.Evaluator):
"""An AlphaZero MCTS Evaluator."""
def __init__(self, game, model):
"""An AlphaZero MCTS Evaluator."""
self.model = model
self._input_shape = game.observation_tensor_shape()
self._num_actions = game.num_distinct_actions()
@functools.lru_cache(maxsize=2**12)
def value_and_prior(self, state):
# Make a singleton batch
obs = np.expand_dims(state.observation_tensor(), 0)
mask = np.expand_dims(state.legal_actions_mask(), 0)
value, policy = self.model.inference(obs, mask)
return value[0, 0], policy[0] # Unpack batch
def evaluate(self, state):
value, _ = self.value_and_prior(state)
return np.array([value, -value])
def prior(self, state):
_, policy = self.value_and_prior(state)
return [(action, policy[action]) for action in state.legal_actions()]
class Model(object):
"""A wrapper around a keras model, and optimizer."""
def __init__(self, keras_model, l2_regularization, learning_rate, device):
"""A wrapper around a keras model, and optimizer.
Args:
keras_model: a Keras Model object.
l2_regularization: the amount of l2 regularization to use during training.
learning_rate: a learning rate for the adam optimizer.
device: The device used to run the keras_model during evaluation and
training. Possible values are 'cpu', 'gpu', or a tf.device(...) object.
"""
if device == "gpu":
if not tf.test.is_gpu_available():
raise ValueError("GPU support is unavailable.")
self._device = tf.device("gpu:0")
elif device == "cpu":
self._device = tf.device("cpu:0")
else:
self._device = device
self._keras_model = keras_model
self._optimizer = tf.train.AdamOptimizer(learning_rate)
self._l2_regularization = l2_regularization
def inference(self, obs, mask):
with self._device:
value, policy = self._keras_model(obs)
policy = masked_softmax.np_masked_softmax(np.array(policy), np.array(mask))
return value, policy
def update(self, train_inputs: Sequence[TrainInput]):
"""Run an update step."""
batch = TrainInput.stack(train_inputs)
with self._device:
with tf.GradientTape() as tape:
values, policy_logits = self._keras_model(
batch.observation, training=True)
loss_value = tf.losses.mean_squared_error(
values, tf.stop_gradient(batch.value))
loss_policy = tf.nn.softmax_cross_entropy_with_logits_v2(
logits=policy_logits, labels=tf.stop_gradient(batch.policy))
loss_policy = tf.reduce_mean(loss_policy)
loss_l2 = 0
for weights in self._keras_model.trainable_variables:
loss_l2 += self._l2_regularization * tf.nn.l2_loss(weights)
loss = loss_policy + loss_value + loss_l2
grads = tape.gradient(loss, self._keras_model.trainable_variables)
self._optimizer.apply_gradients(
zip(grads, self._keras_model.trainable_variables),
global_step=tf.train.get_or_create_global_step())
return Losses(policy=float(loss_policy), value=float(loss_value),
l2=float(loss_l2))
def cascade(x, fns):
for fn in fns:
x = fn(x)
return x
def keras_resnet(input_shape,
num_actions,
num_residual_blocks=19,
num_filters=256,
value_head_hidden_size=256,
activation="relu",
data_format="channels_last"):
"""A ResNet implementation following AlphaGo Zero.
This ResNet implementation copies as closely as possible the
description found in the Methods section of the AlphaGo Zero Nature paper.
It is mentioned in the AlphaZero Science paper supplementary material that
"AlphaZero uses the same network architecture as AlphaGo Zero". Note that
this implementation only supports flat policy distributions.
Arguments:
input_shape: A tuple of 3 integers specifying the non-batch dimensions of
input tensor shape.
num_actions: The determines the output size of the policy head.
num_residual_blocks: The number of residual blocks. Can be 0.
num_filters: the number of convolution filters to use in the residual blocks
value_head_hidden_size: number of hidden units in the value head dense layer
activation: the activation function to use in the net. Does not affect the
final tanh activation in the value head.
data_format: Can take values 'channels_first' or 'channels_last' (default).
Which input dimension to interpret as the channel dimension. The input
is (1, channel, width, height) with (1, width, height, channel)
Returns:
A keras Model with a single input and two outputs (value head, policy head).
The policy is a flat distribution over actions.
"""
def residual_layer(inputs, num_filters, kernel_size):
return cascade(inputs, [
tf.keras.layers.Conv2D(num_filters, kernel_size, padding="same"),
tf.keras.layers.BatchNormalization(),
tf.keras.layers.Activation(activation),
tf.keras.layers.Conv2D(num_filters, kernel_size, padding="same"),
tf.keras.layers.BatchNormalization(axis=-1),
lambda x: tf.keras.layers.add([x, inputs]),
tf.keras.layers.Activation(activation),
])
def resnet_body(inputs, num_filters, kernel_size):
x = cascade(inputs, [
tf.keras.layers.Conv2D(num_filters, kernel_size, padding="same"),
tf.keras.layers.BatchNormalization(),
tf.keras.layers.Activation(activation),
])
for _ in range(num_residual_blocks):
x = residual_layer(x, num_filters, kernel_size)
return x
def resnet_value_head(inputs, hidden_size):
return cascade(inputs, [
tf.keras.layers.Conv2D(filters=1, kernel_size=1),
tf.keras.layers.BatchNormalization(),
tf.keras.layers.Activation(activation),
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(hidden_size, activation),
tf.keras.layers.Dense(1, activation="tanh", name="value"),
])
def resnet_policy_head(inputs, num_classes):
return cascade(inputs, [
tf.keras.layers.Conv2D(filters=2, kernel_size=1),
tf.keras.layers.BatchNormalization(),
tf.keras.layers.Activation(activation),
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(num_classes, name="policy"),
])
input_size = int(np.prod(input_shape))
inputs = tf.keras.Input(shape=input_size, name="input")
torso = tf.keras.layers.Reshape(input_shape)(inputs)
# Note: Keras with TensorFlow 1.15 does not support the data_format arg on CPU
# for convolutions. Hence why this transpose is needed.
if data_format == "channels_first":
torso = tf.keras.backend.permute_dimensions(torso, (0, 2, 3, 1))
torso = resnet_body(torso, num_filters, 3)
value_head = resnet_value_head(torso, value_head_hidden_size)
policy_head = resnet_policy_head(torso, num_actions)
return tf.keras.Model(inputs=inputs, outputs=[value_head, policy_head])
def keras_mlp(input_shape,
num_actions,
num_layers=2,
num_hidden=128,
activation="relu"):
"""A simple MLP implementation with both a value and policy head.
Arguments:
input_shape: A tuple of 3 integers specifying the non-batch dimensions of
input tensor shape.
num_actions: The determines the output size of the policy head.
num_layers: The number of dense layers before the policy and value heads.
num_hidden: the number of hidden units in the dense layers.
activation: the activation function to use in the net. Does not affect the
final tanh activation in the value head.
Returns:
A keras Model with a single input and two outputs (value head, policy head).
The policy is a flat distribution over actions.
"""
input_size = int(np.prod(input_shape))
inputs = tf.keras.Input(shape=input_size, name="input")
torso = inputs
for _ in range(num_layers):
torso = tf.keras.layers.Dense(num_hidden, activation=activation)(torso)
policy = tf.keras.layers.Dense(num_actions, name="policy")(torso)
value = tf.keras.layers.Dense(1, activation="tanh", name="value")(torso)
return tf.keras.Model(inputs=inputs, outputs=[value, policy])
| 37.880562 | 80 | 0.697805 |
4a21afd95eb1f8c4c7aa2f62a7b89b80a78d8c23 | 838 | py | Python | config.py | EricConnect/site-hr-server-python | fdbd44f43d5020d67614f3687dbfdc3f1d165d7d | [
"Apache-2.0"
] | null | null | null | config.py | EricConnect/site-hr-server-python | fdbd44f43d5020d67614f3687dbfdc3f1d165d7d | [
"Apache-2.0"
] | null | null | null | config.py | EricConnect/site-hr-server-python | fdbd44f43d5020d67614f3687dbfdc3f1d165d7d | [
"Apache-2.0"
] | null | null | null | # Define the application directory
import os
BASE_DIR = os.path.abspath(os.path.dirname(__file__))
# Statement for enabling the development environment
DEBUG = True
# Define the database - we are working with
# SQLite for this example
SQLALCHEMY_DATABASE_URI = 'sqlite:///' + os.path.join(BASE_DIR, 'app.db')
DATABASE_CONNECT_OPTIONS = {}
SQLALCHEMY_TRACK_MODIFICATIONS = False
# Application threads. A common general assumption is
# using 2 per available processor cores - to handle
# incoming requests using one and performing background
# operations using the other.
THREADS_PER_PAGE = 2
# Enable protection agains *Cross-site Request Forgery (CSRF)*
CSRF_ENABLED = True
# Use a secure, unique and absolutely secret key for
# signing the data.
CSRF_SESSION_KEY = "secret"
# Secret key for signing cookies
SECRET_KEY = "secret"
| 27.032258 | 73 | 0.778043 |
4a21b0c7227a71cc5dae21a67816762a4b82ae49 | 4,841 | py | Python | custom_components/tahoma/climate_devices/atlantic_electrical_towel_dryer.py | rguillard77/ha-tahoma | 88bff1c15ec2c48160c0e1da85c4c6155b8a5c26 | [
"MIT"
] | null | null | null | custom_components/tahoma/climate_devices/atlantic_electrical_towel_dryer.py | rguillard77/ha-tahoma | 88bff1c15ec2c48160c0e1da85c4c6155b8a5c26 | [
"MIT"
] | null | null | null | custom_components/tahoma/climate_devices/atlantic_electrical_towel_dryer.py | rguillard77/ha-tahoma | 88bff1c15ec2c48160c0e1da85c4c6155b8a5c26 | [
"MIT"
] | null | null | null | """Support for Atlantic Electrical Towel Dryer."""
from typing import Optional
from pyoverkiz.enums import OverkizState
from homeassistant.components.climate import (
SUPPORT_PRESET_MODE,
SUPPORT_TARGET_TEMPERATURE,
ClimateEntity,
)
from homeassistant.components.climate.const import (
HVAC_MODE_AUTO,
HVAC_MODE_HEAT,
HVAC_MODE_OFF,
PRESET_NONE,
)
from homeassistant.const import ATTR_TEMPERATURE, TEMP_CELSIUS
from ..coordinator import OverkizDataUpdateCoordinator
from ..entity import OverkizEntity
COMMAND_SET_TARGET_TEMPERATURE = "setTargetTemperature"
COMMAND_SET_DEROGATED_TARGET_TEMPERATURE = "setDerogatedTargetTemperature"
COMMAND_SET_TOWEL_DRYER_OPERATING_MODE = "setTowelDryerOperatingMode"
COMMAND_SET_TOWEL_DRYER_TEMPORARY_STATE = "setTowelDryerTemporaryState"
CORE_COMFORT_ROOM_TEMPERATURE_STATE = "core:ComfortRoomTemperatureState"
CORE_OPERATING_MODE_STATE = "core:OperatingModeState"
CORE_TARGET_TEMPERATURE_STATE = "core:TargetTemperatureState"
IO_TARGET_HEATING_LEVEL_STATE = "io:TargetHeatingLevelState"
IO_TOWEL_DRYER_TEMPORARY_STATE_STATE = "io:TowelDryerTemporaryStateState"
IO_EFFECTIVE_TEMPERATURE_SETPOINT_STATE = "io:EffectiveTemperatureSetpointState"
PRESET_BOOST = "boost"
PRESET_DRYING = "drying"
PRESET_FROST_PROTECTION = "frost_protection"
PRESET_STATE_FROST_PROTECTION = "frostprotection"
PRESET_STATE_OFF = "off"
PRESET_STATE_ECO = "eco"
PRESET_STATE_BOOST = "boost"
PRESET_STATE_COMFORT = "comfort"
PRESET_STATE_COMFORT1 = "comfort-1"
PRESET_STATE_COMFORT2 = "comfort-2"
# Map Home Assistant presets to TaHoma presets
PRESET_MODE_TO_TAHOMA = {
PRESET_BOOST: "boost",
PRESET_DRYING: "drying",
PRESET_NONE: "permanentHeating",
}
TAHOMA_TO_PRESET_MODE = {v: k for k, v in PRESET_MODE_TO_TAHOMA.items()}
# Map TaHoma HVAC modes to Home Assistant HVAC modes
TAHOMA_TO_HVAC_MODE = {
"external": HVAC_MODE_HEAT, # manu
"standby": HVAC_MODE_OFF,
"internal": HVAC_MODE_AUTO, # prog
}
HVAC_MODE_TO_TAHOMA = {v: k for k, v in TAHOMA_TO_HVAC_MODE.items()}
class AtlanticElectricalTowelDryer(OverkizEntity, ClimateEntity):
"""Representation of Atlantic Electrical Towel Dryer."""
_attr_hvac_modes = [*HVAC_MODE_TO_TAHOMA]
_attr_preset_modes = [*PRESET_MODE_TO_TAHOMA]
_attr_supported_features = SUPPORT_PRESET_MODE | SUPPORT_TARGET_TEMPERATURE
_attr_temperature_unit = TEMP_CELSIUS
def __init__(self, device_url: str, coordinator: OverkizDataUpdateCoordinator):
"""Init method."""
super().__init__(device_url, coordinator)
self.temperature_device = self.executor.linked_device(7)
@property
def hvac_mode(self) -> str:
"""Return hvac operation ie. heat, cool mode."""
if CORE_OPERATING_MODE_STATE in self.device.states:
return TAHOMA_TO_HVAC_MODE[
self.executor.select_state(CORE_OPERATING_MODE_STATE)
]
if OverkizState.CORE_ON_OFF in self.device.states:
return TAHOMA_TO_HVAC_MODE[
self.executor.select_state(OverkizState.CORE_ON_OFF)
]
async def async_set_hvac_mode(self, hvac_mode: str) -> None:
"""Set new target hvac mode."""
await self.executor.async_execute_command(
COMMAND_SET_TOWEL_DRYER_OPERATING_MODE, HVAC_MODE_TO_TAHOMA[hvac_mode]
)
@property
def preset_mode(self) -> Optional[str]:
"""Return the current preset mode, e.g., home, away, temp."""
return TAHOMA_TO_PRESET_MODE[
self.executor.select_state(IO_TOWEL_DRYER_TEMPORARY_STATE_STATE)
]
async def async_set_preset_mode(self, preset_mode: str) -> None:
"""Set new preset mode."""
await self.executor.async_execute_command(
COMMAND_SET_TOWEL_DRYER_TEMPORARY_STATE, PRESET_MODE_TO_TAHOMA[preset_mode]
)
@property
def target_temperature(self) -> None:
"""Return the temperature."""
if self.hvac_mode == HVAC_MODE_AUTO:
return self.executor.select_state(IO_EFFECTIVE_TEMPERATURE_SETPOINT_STATE)
return self.executor.select_state(CORE_TARGET_TEMPERATURE_STATE)
@property
def current_temperature(self) -> float:
"""Return current temperature."""
return float(
self.temperature_device.states.get(OverkizState.CORE_TEMPERATURE).value
)
async def async_set_temperature(self, **kwargs) -> None:
"""Set new temperature."""
temperature = kwargs.get(ATTR_TEMPERATURE)
if self.hvac_mode == HVAC_MODE_AUTO:
await self.executor.async_execute_command(
COMMAND_SET_DEROGATED_TARGET_TEMPERATURE, temperature
)
else:
await self.executor.async_execute_command(
COMMAND_SET_TARGET_TEMPERATURE, temperature
)
| 35.595588 | 87 | 0.735385 |
4a21b3089993d32b8bb99eb8bb1ba7a5148202a2 | 7,029 | py | Python | development/generator/scraper.py | Jecosine/banAna | 5d59573f54a4e24e91276427843d0fc5dff0d540 | [
"Apache-2.0"
] | 2 | 2020-07-24T16:40:27.000Z | 2020-08-05T16:18:37.000Z | development/generator/scraper.py | Jecosine/banAna | 5d59573f54a4e24e91276427843d0fc5dff0d540 | [
"Apache-2.0"
] | 2 | 2020-07-25T06:50:39.000Z | 2022-02-09T22:28:06.000Z | development/generator/scraper.py | Jecosine/banAna | 5d59573f54a4e24e91276427843d0fc5dff0d540 | [
"Apache-2.0"
] | null | null | null | '''
Date: 2020-09-01 08:25:56
LastEditors: Jecosine
LastEditTime: 2020-09-02 02:39:43
'''
import requests, json
from bs4 import BeautifulSoup as bs
from dbconnect import *
from entities import *
import uuid
from entities import *
import time, random
import os
header = {
"authority": "s.taobao.com",
"Connection": "close",
"method": "GET",
"path": "/search?q=%E7%BD%90%E5%A4%B4",
"scheme": "https",
"accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9",
"accept-encoding": "gzip, deflate, br",
"accept-language": "en-US,en;q=0.9",
"cache-control": "max-age=0",
"cookie": "miid=64701631356235077; tg=0; thw=cn; tracknick=ssterbn; enc=%2B2fo3lfZQYGYkeDbygR78aKw5g0ffv7Vj5Hj%2FE4PHNE0sAJ16KRIcwf9noQ%2BO8kx8GxjctBsMA16E%2BH9FSzP3w%3D%3D; hng=CN%7Czh-CN%7CCNY%7C156; cna=ZrK6F9DgnGsCAXjvrMyL6Zcc; t=6118bfc0cc83c3b506b31f8fec8e0e05; cookie2=1a607747accbf7c1e54499131cbe25f4; v=0; alitrackid=www.taobao.com; lastalitrackid=www.taobao.com; _samesite_flag_=true; sgcookie=EI%2B%2B7eEY0Rj7lbxreUjzg; uc3=vt3=F8dCufXBxARzp6EQjzs%3D&lg2=U%2BGCWk%2F75gdr5Q%3D%3D&id2=UNk%2FSAmLHA659Q%3D%3D&nk2=EE2hco6oSg%3D%3D; csg=fc1f9fd9; lgc=ssterbn; dnk=ssterbn; skt=2320177accecab6e; existShop=MTU5ODU3MjkxOQ%3D%3D; uc4=id4=0%40Ug41ScrCICCOKFQS03t%2Bo7PH%2FX0d&nk4=0%40EpRXjaKQBrXF4ZocVtJveCgF; _cc_=U%2BGCWk%2F7og%3D%3D; mt=ci=64_1; _utk=VocP@qJyn^AtWdm; _tb_token_=f84a38d4757fd; _m_h5_tk=f3624e43e2f63c802bd48e38a2f253ec_1598929987849; _m_h5_tk_enc=16651b58c6e730aff32c38ae01a09779; uc1=cookie16=WqG3DMC9UpAPBHGz5QBErFxlCA%3D%3D&pas=0&existShop=false&cookie21=URm48syIYRhCU6d3XQ%3D%3D&cookie14=UoTV5OMU5mZD1w%3D%3D; JSESSIONID=001D0BFC8F6C7E82E921EE19BE8904B4; tfstk=cfhdBeDI3hxH014tuvpgPGPeMc8cak-LXwZlwEF3ydsPi6fRNsx-ijdb0pa5v5LO.; l=eBL-q3mqv8ao72myBO5ahurza77OfIdb41PzaNbMiInca6ZdtKKgZNQ4Opu6Sdtj_tCKoetyVFjMrdLHR3AmiNAJz3h2q_rtexvO.; isg=BLW1YiBfBjkwIma0oS0pbUxpxDFvMmlEq_jFfDfbnyyyDtUA_4G1FiqMXNo4SYH8",
"dnt": "1",
"sec-fetch-dest": "document",
"sec-fetch-mode": "navigate",
"sec-fetch-site": "same-origin",
"sec-fetch-user": "?1",
"upgrade-insecure-requests": "1",
"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/84.0.4147.135 Safari/537.36"
}
def get_url(item_name, page):
url = "https://s.taobao.com/search?q={}&s={}"
return url.format(item_name, page * 44 + (0 if page == 0 else 1))
def dfs(a, l):
if not a.get("children"):
l.append([a["id"], a["title"]])
return
for i in a["children"]:
dfs(i, l)
def get_cate():
with open("cateData.json", 'rb') as f:
a = f.read().decode('utf-8')
a = json.loads(a)
# a = a["data"]
l = []
for i in a:
dfs(i, l)
return l
def get_content(curpath, url, s):
# s = requests.Session()
res = s.get(url, headers=header, timeout=(3.05, 24))
html = res.content
with open(curpath, 'wb') as f:
r = f.write(html)
print(str(r) + " written --- ", end="")
if r == 0:
print(" -- ERROR: Please Check file !! --")
def to_number(s):
if not s:
return 0
s = s[:-3]
if s[-1] == '+':
s = s[:-1]
if s[-1] == '万':
s = s[:-1]
s = int(float(s) * 10000 + random.randint(10, 500))
else:
s = int(s)
return s
db = DBConnection()
cates = []
def get_json(path, name, cid):
global db, cates
with open(path + name + ".html", 'rb') as f:
content = f.read().decode("utf-8")
bsobj = bs(content, 'html.parser')
try:
sc = bsobj.find("head")
sc = sc.findAll("script")[-1]
sc = sc.get_text()
except Exception as e:
print("--ERROR: "+ str(e) + "--")
return
flag = -1
flag1 = 0
start = 0
end = 0
l = len(sc)
# print("string length: " + l, end="")
for i in range(l):
if sc[i] == '"':
flag1 += 1
continue
if flag1 & 1:
continue
else:
if sc[i] == '[':
if flag == -1:
flag = 1
start = i
else:
flag += 1
elif sc[i] == ']':
flag -= 1
if flag == 0:
end = i + 1
break
sc = sc[start:end]
if len(sc) <= 1000:
with open("err.txt", 'ab') as f:
f.write("{} {} - page {} empty\n".format(cid, cates[cid], name).encode("utf-8"))
return ()
# return sc
print("string length: " + str(len(sc)), end="")
sc = json.loads(sc)
if len(sc) < 40:
with open("err.txt", 'ab') as f:
f.write("{} {}\n".format(cid, cates[cid]).encode("utf-8"))
return ()
print(", count: " + str(len(sc)) + "...", end="")
items = []
for i in sc:
db.cursor.execute("select businessId from business where tsid=%s", (i["user_id"], ))
bid = db.cursor.fetchall()
if(bid == []):
bid = uuid.uuid4().hex[:10]
db.cursor.execute("insert into business values(%s,%s,%s,%s)", (bid, i["user_id"], i["nick"], '{}-{}-{} 00:49:06'.format(random.randint(2010, 2020), random.randint(1, 12), random.randint(1, 28))))
db.save_database()
else:
bid = bid[0][0]
items.append((uuid.uuid4().hex[:10], i["raw_title"], '["%s"]'%i["pic_url"], bid, 1, i["user_id"], i["nid"], i["item_loc"], float(i["view_price"]), to_number(i.get("view_sales")), cid))
return tuple(items)
# with open(path + name + '.json', 'wb') as f:
# f.write(sc.encode("utf-8"))
def mainprocess():
global db,cates
cnt = 0
db = DBConnection()
l = get_cate()
cates = {i[0]:i[1] for i in l}
sql = "insert into item values (%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s)"
# requests.adapters.DEFAULT_RETRIES = 15
print("starting ...")
length = len(l)
for i in range(576, length):
id, name = l[i]
cnt += 1
# if cnt > 200:
# break
print("processing: %s - %s..." % (str(i), name))
# s = requests.Session()
# s.keep_alive = False
for p in range(4):
c = 0
print(" page %s..." % str(p), end="")
# if not os.path.exists("data/%s" % id):
# os.mkdir("data/%s" % id)
# get_content("data/%s/%s.html"%(id, str(p)), get_url(name, p), s)
items = get_json("data/%s/" % id, str(p), id)
# print(items)
# db.cursor.executemany(sql, items)
for x in range(len(items)):
try:
db.cursor.execute(sql, items[x])
except Exception as e:
pass
c += 1
db.save_database()
print("{} written, done".format(c))
# time.sleep(random.random() * 2)
# s.close()
if __name__ == "__main__":
mainprocess() | 37.790323 | 1,334 | 0.563523 |
4a21b3478fa70d8aba045ece5940285a4825cbd0 | 937 | py | Python | src/ping.py | Brunopaes/friday | b805e44313ec3b454ff8da7b07caaa30f01c26af | [
"RSA-MD"
] | 5 | 2020-12-14T01:31:02.000Z | 2021-02-19T23:41:15.000Z | src/ping.py | Brunopaes/friday | b805e44313ec3b454ff8da7b07caaa30f01c26af | [
"RSA-MD"
] | 16 | 2020-09-16T16:12:51.000Z | 2022-02-22T02:20:02.000Z | src/ping.py | Brunopaes/friday | b805e44313ec3b454ff8da7b07caaa30f01c26af | [
"RSA-MD"
] | 1 | 2020-12-10T19:40:30.000Z | 2020-12-10T19:40:30.000Z | # -*- coding: utf-8 -*-
from speedtest import Speedtest
class InternetSpeedRate:
def __init__(self):
self.speedtest = Speedtest()
def get_internet_info(self):
""" Internet rate information.
Parameters
----------
Returns
-------
"""
try:
self.speedtest.download()
self.speedtest.upload()
return self.speedtest.results.dict()
except Exception as e:
e.args
def get_download_rate(self):
""" Internet download information.
Parameters
----------
Returns
-------
"""
return self.speedtest.download()
def get_upload_rate(self):
""" Internet upload information.
Parameters
----------
Returns
-------
"""
return self.speedtest.upload()
print(InternetSpeedRate().get_internet_info())
| 18.019231 | 48 | 0.512273 |
4a21b37f75faf2dff302e33698fec949b6737a1b | 23,109 | py | Python | util/afutil.py | henrypinkard/DeepAutofocus | 09bb02aa082238991aa187ffaf0104c93ebc386c | [
"BSD-3-Clause"
] | 15 | 2020-05-15T06:15:58.000Z | 2021-06-20T09:08:04.000Z | util/afutil.py | henrypinkard/DeepAutofocus | 09bb02aa082238991aa187ffaf0104c93ebc386c | [
"BSD-3-Clause"
] | 1 | 2019-07-04T09:37:34.000Z | 2019-07-12T05:27:06.000Z | util/afutil.py | henrypinkard/DeepAF | 09bb02aa082238991aa187ffaf0104c93ebc386c | [
"BSD-3-Clause"
] | 2 | 2020-02-12T19:47:20.000Z | 2020-03-06T06:34:18.000Z | import matplotlib.patches as mpatches
import matplotlib.pyplot as plt
import numpy as np
from scipy import interpolate
from joblib import Parallel, delayed
import dask.array as da
from util.defocusnetwork import DefocusNetwork
from util.imageprocessing import radialaverage
from util.pygellan import MagellanDataset
import h5py
import os
from util.magellanhdf import MagellanHDFContainer
import json
def get_patch_metadata(image_size, split_k):
"""
Split up raw image from sensor into sub patches for training network.
:param image_size: tuple with (image width, image height)
:param split_k: number of sub images to split into along each dimension (i.e. split_k=2 gives 4 sub images)
:return: pixel dimension of patches (they are square and a power of two), number of patches from each raw image
"""
shape = min(image_size)
patch_size = 2**int(np.log2(shape/split_k))
# patch_size = int(shape / split_k)
patches_per_image = (shape // patch_size) **2
return patch_size, patches_per_image
def calc_focal_plane(data, position_index, split_k, parallel=None, show_output=False):
"""
Calculate radially averaged power spectrum of images at different focal postitions, and take the mean of high
frequencies to measure focus qaulity. Then use these measurements to compute the optimal focal plane
:param data: implementation of DataWrapper class
:param position_index:
:param split_k:
:param parallel: if supplied use multiple threads to speed up power spectrum computations
:param show_output if supplied, create a plot showing the calculation of th ground truth focal plane
:return:
"""
# print("\rCalculating focal plane, position {} of {} ".format(position_index, data.get_num_xy_positions()),end='')
def crop(image, index, split_k):
"""
Crop raw image to appropriate patch size
:return: One sub crop
"""
y_tile_index = index // split_k
x_tile_index = index % split_k
return image[y_tile_index * patch_size:(y_tile_index + 1) * patch_size, x_tile_index * patch_size:(x_tile_index + 1) * patch_size]
def calc_power_spectrum(image):
"""
:return: Raidally averaged log power spectrum
"""
pixelsft = np.fft.fftshift(np.fft.fft2(image))
powerspectrum = pixelsft * pixelsft.conj()
logpowerspectrummag = np.log(np.abs(powerspectrum))
return radialaverage(logpowerspectrummag)
def compute_focal_plane(powerspectralist):
"""
Compute focal plane from a list of radially averaged power spectra, interpolating to get sub-z spacing percision
:param powerspectralist: list of radially averaged power spectra
:return:
"""
powerspectra_arr = np.array(powerspectralist)
# take sum of log power spectra (lower half
pssum = np.sum(powerspectra_arr[:, powerspectra_arr.shape[1] // 4:], axis=1)
# interpolate to find non integer best focal plane
interpolant = interpolate.interp1d(np.arange(pssum.shape[0]), pssum, kind='cubic')
xx = np.linspace(0, pssum.shape[0] - 1, 10000)
yy = interpolant(xx)
if show_output:
plt.figure(1)
plt.plot(xx * data.get_pixel_size_z_um(), yy)
plt.plot(np.arange(pssum.shape[0]) * data.get_pixel_size_z_um(), pssum, 'o')
plt.xlabel('Focal position (um)')
plt.ylabel('High frequency content')
return xx[np.argmax(yy)] * data.get_pixel_size_z_um()
patch_size, patches_per_image = get_patch_metadata((data.get_image_width(), data.get_image_height()), split_k)
num_crops = split_k**2
radial_avg_power_spectrum = lambda image: calc_power_spectrum(crop(image, 0, 1))
num_slices = data.get_num_z_slices_at(position_index)
#load images
images = [data.read_ground_truth_image(z_index=slice, position_index=position_index)
for slice in range(num_slices)]
if parallel is None:
powerspectra = [radial_avg_power_spectrum(image) for image in images]
else:
powerspectra = parallel(delayed(radial_avg_power_spectrum)(image) for image in images)
#Use same focal plane for all crops
focal_plane = compute_focal_plane(powerspectra)
best_focal_planes = {crop_index: focal_plane for crop_index in range(num_crops)}
print("\rCalculated focal plane, position {} of {}: {:.3f}".format(position_index,
data.get_num_xy_positions(),focal_plane),end='')
return best_focal_planes
def generator_fn(data_wrapper_list, focal_planes, tile_split_k, position_indices_list, ignore_first_slice=False):
"""
Function that generates pairs of training images and defocus distances used for training defocus prediction network
:param data_wrapper_list list of DataWrappers
:param focal_planes nested dict with DataWrapper, position index, and crop index as keys
:param tile_split_k number of crops to divide each image into for training
:param position_indices_list list same length as data_wrapper_list that has list of position indices to use for each
dataset
:param ignore_first_slice discard the top z slice (which was often not in the focal positon it was supposed to
be on the system we used for testing)
and true focal plane position as values
:yield: dictionary with LED name key and image value for a random slice/position among
valid slices and in the set of positions we specified
"""
for data_wrapper, position_indices in zip(data_wrapper_list, position_indices_list):
dataset_slice_pos_tuples = []
#get all slice index position index combinations
for pos_index in position_indices:
slice_indices = np.arange(data_wrapper.get_num_z_slices_at(position_index=pos_index))
for z_index in slice_indices:
if z_index == 0 and ignore_first_slice:
continue
dataset_slice_pos_tuples.append((data_wrapper, z_index, pos_index))
print('{} sliceposition tuples'.format(len(dataset_slice_pos_tuples)),end='')
indices = np.arange(len(dataset_slice_pos_tuples))
def inner_generator(indices, focal_planes):
patch_size, patches_per_image = get_patch_metadata((dataset_slice_pos_tuples[0][0].get_image_width(),
dataset_slice_pos_tuples[0][0].get_image_height()), tile_split_k)
for index in indices:
data_wrapper, z_index, pos_index = dataset_slice_pos_tuples[index]
for patch_index in range(patches_per_image):
single_led_images = read_patch(data_wrapper, pos_index=pos_index, z_index=z_index,
split_k=tile_split_k, patch_index=patch_index)
defocus_dist = focal_planes[data_wrapper][pos_index][patch_index] - \
data_wrapper.get_pixel_size_z_um()*z_index
yield single_led_images, defocus_dist
return lambda: inner_generator(indices, focal_planes)
def feature_vector_generator_fn(feature_vectors, defocus_dists, mode, split_k, training_fraction=0.8):
"""
Generator function feature vectors (i.e the part of the Fourier transform that feeds into trainable layers of network)
:param feature_vectors: 2d numpy array (n x feature vector length)
:param defocus_dists: numpy array of defocus distances
:param mode: 'training', 'validation', or 'all'
:param split_k: number of crops to split data into
:param training_fraction: fraction of data to use in training set
:return: generator function that gives one feture vector-defocus distance pair at a time
"""
n = feature_vectors.shape[0]
#Split every XY position crop completely into training or validation so they represent different image content
n_full = n / (split_k**2)
full_indices = np.arange(n_full)
np.random.shuffle(full_indices)
num_train = int(len(full_indices) * training_fraction)
if mode == 'trianing':
full_indices = full_indices[:num_train]
elif (mode == 'validation'):
full_indices = full_indices[num_train:]
elif (mode == 'all'):
pass
#get actual data indices
splits_per_tile = split_k**2
data_indices = np.concatenate([np.arange(splits_per_tile*index, splits_per_tile*(index+1)) for index in full_indices]).astype(np.int32)
if mode == 'training':
np.random.seed(123)
np.random.shuffle(data_indices)
#not sure if this is absolutely needed but just in case...
# feature_vectors = np.copy(feature_vectors)
# defocus_dists = np.copy(defocus_dists)
def inner_generator(linescans, defocus_dists, indices):
#yield data in a shuffled order
for index in indices:
yield linescans[index, :], defocus_dists[index]
return lambda: inner_generator(feature_vectors, defocus_dists, data_indices)
def read_patch(data_wrapper, pos_index, z_index, split_k, patch_index):
"""
Crop a square region out of larger image for netwrok training
:param data_wrapper:
:param pos_index: index of XY position
:param z_index: z slice index
:param split_k: number of crops along each dimension
:param patch_index: index of the crop
:return: 2d numpy array of floats corresponding to image patch
"""
return data_wrapper.read_prediction_image(position_index=pos_index, z_index=z_index,
patch_index=patch_index, split_k=split_k)
def read_or_calc_focal_planes(data_wrapper, split_k, n_cores=1, show_output=False):
"""
Compute or load pre computed focal planes for each XY position
:param data_wrapper:
:param split_k: splits per image
:param n_cores: number of threads to use for parallelization using joblib. If set to 1
parallelization not used
:return:
"""
def get_name(pos_index):
return 'pos{}_focal_plane'.format(pos_index)
def read_or_compute(pos_index, parallel=None):
if data_wrapper.read_focal_plane(get_name(pos_index)) is None:
#calculate and save it
focal_plane = calc_focal_plane(data_wrapper, pos_index, split_k=split_k, parallel=parallel, show_output=show_output)
for crop_index in focal_plane.keys():
data_wrapper.store_focal_plane(get_name(pos_index), focal_plane[crop_index])
else:
print('Reading precomputed focal plane pos index {} of {}\r'.format(pos_index + 1,
data_wrapper.get_num_xy_positions()), end='')
#read saved value from previous computation
focal_plane = {}
for crop_index in range(split_k**2):
focal_plane[crop_index] = data_wrapper.read_focal_plane(get_name(pos_index))
return focal_plane
if n_cores == 1:
#single threaded execution
focal_planes = {pos_index: read_or_compute(pos_index=pos_index) for pos_index in range(data_wrapper.get_num_xy_positions())}
else:
#parallelized
with Parallel(n_jobs=n_cores) as parallel:
focal_planes = {pos_index: read_or_compute(pos_index=pos_index, parallel=parallel) for pos_index
in range(data_wrapper.get_num_xy_positions())}
return focal_planes
def read_or_calc_design_mat(data_wrapper, position_indices, focal_planes, deterministic_params):
"""
Load a precomputed design matrix, or use the DefoucusNetwork class to compute it and then store for future use. The
design matrix corresponds to the 'determninstic' beginning part of the graph (i.e. the Fourier transform)
:param data_wrapper:
:param position_indices
:param focal_planes:
:param deterministic_params: dictionary of parameters describing the structure of the network
:return:
"""
param_id_string = 'new' + str(deterministic_params) + 'p' +'_'.join(map(str,position_indices))
# compute or read from storage deterministic outputs
feature_name = 'features_' + param_id_string
defocus_name = 'defocus_dists_' + param_id_string
features = data_wrapper.read_array(feature_name)
defocus_dists = data_wrapper.read_array(defocus_name)
if features is None:
patch_size, patches_per_image = get_patch_metadata((data_wrapper.get_image_width(),
data_wrapper.get_image_height()), deterministic_params['tile_split_k'])
generator = generator_fn([data_wrapper], focal_planes, tile_split_k=deterministic_params['tile_split_k'],
position_indices_list=[position_indices], ignore_first_slice=True)
#Use the deterministic part of the network only to compute design matrix
with DefocusNetwork(input_shape=(patch_size, patch_size), train_generator=generator,
deterministic_params=deterministic_params) as network:
features, defocus_dists = network.evaluate_deterministic_graph()
data_wrapper.store_array(feature_name, features)
data_wrapper.store_array(defocus_name, defocus_dists)
return features, defocus_dists
def compile_deterministic_data(data_wrapper_list, postion_indices_list, focal_planes, deterministic_params, virtual=False):
"""
For all hdf wrappers in data, load design matrix and targets and concatenate them
Puts the data that has already been fourier transformed and flattened into design matrix
Computes this using a deterministic neural network if needed, otherwise loads it from the file
to save time
:param data_wrapper_list list of DataWrapper objects to compute on
:param postion_indices_list corresponding list of position indices to use from each one
"""
deterministic_train_data = [read_or_calc_design_mat(dataset, position_indices, focal_planes,
deterministic_params) for dataset, position_indices in zip(data_wrapper_list, postion_indices_list)]
# collect training data from all experiments
features = []
targets = []
for f, t in deterministic_train_data:
if np.any(np.isnan(f)):
raise Exception('NAN detected in deterministic data')
features.append(f)
targets.append(t)
#pool all data together
targets = np.concatenate(targets)
#store in dask arrays to keep them on disk
if virtual:
da_features = [da.from_array(feature_vec, chunks=(1024, -1)) for feature_vec in features]
features = da.concatenate(da_features, axis=0)
else:
features = np.concatenate(features)
return features, targets
def plot_results(pred, target, color, draw_rect=False, range=None):
#don't plot too many points
indices = np.arange(pred.shape[0])
np.random.shuffle(indices)
plt.scatter(target[indices[:500]], pred[indices[:500]], marker='o', c=color, linewidths=0, edgecolors=None)
plt.xlabel('True defocus (µm)')
plt.ylabel('Predicted defocus (µm)')
if draw_rect:
min_target = np.min(target)
max_target = np.max(target)
height = (max_target - min_target)*np.sqrt(2)
width = 5
plt.gca().add_patch(mpatches.Rectangle([min_target, min_target+width/np.sqrt(2)], width, height,
angle=-45, color=[0, 1, 0, 0.2]))
# plt.plot([min_target, max_target], [min_target, max_target], 'g-')
if range is not None:
plt.ylim([-range[0], range[1]])
plt.xlim([-range[0], range[1]])
def cartToNa(point_list_cart, z_offset=8):
"""functions for calcuating the NA of an LED on the quasi-dome based on it's index for the quasi-dome illuminator
converts a list of cartesian points to numerical aperture (NA)
Args:
point_list_cart: List of (x,y,z) positions relative to the sample (origin)
z_offset : Optional, offset of LED array in z, mm
Returns:
A 2D numpy array where the first dimension is the number of LEDs loaded and the second is (Na_x, NA_y)
"""
yz = np.sqrt(point_list_cart[:, 1] ** 2 + (point_list_cart[:, 2] + z_offset) ** 2)
xz = np.sqrt(point_list_cart[:, 0] ** 2 + (point_list_cart[:, 2] + z_offset) ** 2)
result = np.zeros((np.size(point_list_cart, 0), 2))
result[:, 0] = np.sin(np.arctan(point_list_cart[:, 0] / yz))
result[:, 1] = np.sin(np.arctan(point_list_cart[:, 1] / xz))
return(result)
def loadLedPositonsFromJson(file_name, z_offset=8):
"""Function which loads LED positions from a json file
Args:
fileName: Location of file to load
zOffset : Optional, offset of LED array in z, mm
micro : 'TE300B' or 'TE300A'
Returns:
A 2D numpy array where the first dimension is the number of LEDs loaded and the second is (x, y, z) in mm
"""
json_data = open(file_name).read()
data = json.loads(json_data)
source_list_cart = np.zeros((len(data['led_list']), 3))
x = [d['x'] for d in data['led_list']]
y = [d['y'] for d in data['led_list']]
z = [d['z'] for d in data['led_list']]
source_list_cart[:, 0] = x
source_list_cart[:, 1] = y
source_list_cart[:, 2] = z
source_list_na = cartToNa(source_list_cart, z_offset=z_offset)
return source_list_na, source_list_cart
def get_led_na(led_index):
source_list_na, source_list_cart = loadLedPositonsFromJson('quasi_dome_design.json')
angles_xy = np.arcsin(np.abs(source_list_na))
angle = np.arctan(np.sqrt(np.tan(angles_xy[:, 0])**2 + np.tan(angles_xy[:, 1])**2 ))
return np.sin(angle[led_index - 1])
def get_led_nas(led_index):
source_list_na, source_list_cart = loadLedPositonsFromJson('quasi_dome_design.json')
return source_list_na[led_index - 1]
def get_led_angle(led_index):
source_list_na, source_list_cart = loadLedPositonsFromJson('quasi_dome_design.json')
angles_xy = np.arcsin(np.abs(source_list_na))
angle = np.arctan(np.sqrt(np.tan(angles_xy[:, 0])**2 + np.tan(angles_xy[:, 1])**2 ))
return angle[led_index - 1] / (2*3.14) *360
class MagellanWithAnnotation(MagellanDataset):
"""
This class takes the python wrapper for a Micro-Magellan dataset, and adds in the ability to store annoations in an
hdf5 file
"""
def __init__(self, dataset_path):
super().__init__(dataset_path=dataset_path)
self.file = h5py.File(os.path.join(dataset_path, 'annotations'))
def write_annotation(self, name, value):
"""
store string:scalar annotation in top level
"""
self.file.attrs[name] = value
self.file.flush()
def read_annotation(self, name):
"""
read a scalar annotation from top level
:return:
"""
if name not in self.file.attrs:
return None
return self.file.attrs[name]
def store_array(self, name, array):
"""
Store a numpy array. if array of the same name already exists, overwrite it
:param name:
:param array:
:return:
"""
if name in self.file:
# delete and remake
del (self.file[name])
self.file.create_dataset(name, data=array)
self.file.flush()
def read_array(self, name):
"""
Return previously stored numoy array
"""
if name in self.file:
return self.file[name]
return None
class HDFDataWrapper:
"""
Version that reads the deprecated magellan hdf files
"""
def __init__(self, path):
self.hdf = MagellanHDFContainer(path)
def read_ground_truth_image(self, position_index, z_index):
"""
Read image in which focus quality can be measured form quality of image
:param pos_index: index of xy position
:param z_index: index of z slice (starting at 0)
:param xy_slice: (cropped region of image)
:return:
"""
return self.hdf.read_image(channel_name='DPC_Bottom', position_index=position_index,
relative_z_index=z_index)
def read_prediction_image(self, position_index, z_index, patch_index, split_k):
"""
Read image used for single shot prediction (i.e. single LED image)
:param pos_index: index of xy position
:param z_index: index of z slice (starting at 0)
:param split_k: number of crops along each dimension
:param patch_index: index of the crop
:return:
"""
patch_size, patches_per_image = get_patch_metadata((self.get_image_width(),
self.get_image_height()), split_k)
y_tile_index = patch_index // split_k
x_tile_index = patch_index % split_k
xy_slice = [[y_tile_index * patch_size, (y_tile_index + 1) * patch_size],
[x_tile_index * patch_size, (x_tile_index + 1) * patch_size]]
return self.hdf.read_image(channel_name='autofocus', position_index=position_index,
relative_z_index=z_index, xy_slice=xy_slice)
def get_image_width(self):
"""
:return: image width in pixels
"""
return self.hdf.tilewidth
def get_image_height(self):
"""
:return: image height in pixels
"""
return self.hdf.tileheight
def get_num_z_slices_at(self, position_index):
"""
return number of z slices (i.e. focal planes) at the given XY position
:param position_index:
:return:
"""
return self.hdf.get_num_slices_at(position_index)
def get_pixel_size_z_um(self):
"""
:return: distance in um between consecutive z slices
"""
return self.hdf.pixelsizeZ_um
def get_num_xy_positions(self):
"""
:return: total number of xy positons in data set
"""
return self.hdf.num_positions
def store_focal_plane(self, name, focal_position):
"""
Store the computed focal plane as a string, float pair
"""
self.hdf.write_annotation(name, focal_position)
def read_focal_plane(self, name):
"""
read a previously computed focal plane
:param name: key corresponding to an xy position for whch focal plane has already been computed
:return:
"""
return self.hdf.read_annotation(name)
def store_array(self, name, array):
"""
Store a numpy array containing the design matrix for training the non-deterministic part of the network (i.e.
after the Fourier transform) so that it can be retrained quickly without having to recompute
:param name:
:param array: (n examples) x (d feature length) numpy array
"""
self.hdf.store_array(name, array)
def read_array(self, name):
"""
Read and return a previously computed array
:param name:
:return:
"""
return self.hdf.read_array(name)
| 44.185468 | 139 | 0.674715 |
4a21b41126efccf8555c701432b94f9a829a4a91 | 2,733 | py | Python | examples/Graph_Adversarial_Learning/Untargeted/Poisoning/TensorFlow/Metattack.py | TobiasSchmidtDE/GraphGallery | e627e4f454e0ce3813171305a524f5190a6e6f45 | [
"MIT"
] | null | null | null | examples/Graph_Adversarial_Learning/Untargeted/Poisoning/TensorFlow/Metattack.py | TobiasSchmidtDE/GraphGallery | e627e4f454e0ce3813171305a524f5190a6e6f45 | [
"MIT"
] | null | null | null | examples/Graph_Adversarial_Learning/Untargeted/Poisoning/TensorFlow/Metattack.py | TobiasSchmidtDE/GraphGallery | e627e4f454e0ce3813171305a524f5190a6e6f45 | [
"MIT"
] | null | null | null | import numpy as np
import graphgallery as gg
from graphgallery import functional as gf
from graphgallery.datasets import NPZDataset
data = NPZDataset('cora',
root="~/GraphData/datasets/",
verbose=False,
transform="standardize")
graph = data.graph
splits = data.split_nodes(random_state=15)
# GPU is recommended
device = "gpu"
################### Surrogate model ############################
trainer = gg.gallery.nodeclas.GCN(device=device, seed=None).setup_graph(graph).build()
his = trainer.fit(splits.train_nodes,
splits.val_nodes,
verbose=1,
epochs=200)
################### Attacker model ############################
unlabeled_nodes = np.hstack([splits.val_nodes, splits.test_nodes])
self_training_labels = trainer.predict(unlabeled_nodes).argmax(1)
attacker = gg.attack.untargeted.Metattack(graph, device=device, seed=123).process(splits.train_nodes,
unlabeled_nodes,
self_training_labels,
lr=0.1, # cora lr=0.1, citeseer lr=0.01 reaches the best performance
lambda_=.5,
use_relu=False)
attacker.attack(0.05)
################### Victim model ############################
# Before attack
trainer = gg.gallery.nodeclas.GCN(device=device, seed=123).setup_graph(graph).build()
his = trainer.fit(splits.train_nodes,
splits.val_nodes,
verbose=1,
epochs=100)
original_result = trainer.evaluate(splits.test_nodes)
# After attack
# If a validation set is used, the attacker will be less effective, but we dont know why
trainer = gg.gallery.nodeclas.GCN(attacker.g, device=device, seed=123).process().build()
his = trainer.fit(splits.train_nodes,
# splits.val_nodes,
verbose=1,
epochs=100)
perturbed_result = trainer.evaluate(splits.test_nodes)
################### Results ############################
print(f"original prediction {original_result.accuracy:.2%}")
print(f"perturbed prediction {perturbed_result.accuracy:.2%}")
print(
f"The accuracy has gone down {original_result.accuracy-perturbed_result.accuracy:.2%}"
)
"""original prediction 83.50%
perturbed prediction 76.91%
The accuracy has gone down 6.59%"""
| 43.380952 | 152 | 0.525064 |
4a21b5c313592e5df82bbc546cb6a85e73add8a2 | 1,487 | py | Python | mobo/algorithms.py | yunshengtian/DGEMO | 6e656cf1a5912638369b09698a3d9cadc2055874 | [
"MIT"
] | 41 | 2020-10-21T01:17:45.000Z | 2022-02-07T01:42:44.000Z | mobo/algorithms.py | yunshengtian/DGEMO | 6e656cf1a5912638369b09698a3d9cadc2055874 | [
"MIT"
] | 2 | 2020-11-06T19:28:22.000Z | 2021-03-11T15:19:45.000Z | mobo/algorithms.py | yunshengtian/DGEMO | 6e656cf1a5912638369b09698a3d9cadc2055874 | [
"MIT"
] | 9 | 2020-11-16T05:24:49.000Z | 2022-01-21T08:19:17.000Z | from .mobo import MOBO
'''
High-level algorithm specifications by providing config
'''
class DGEMO(MOBO):
'''
DGEMO
'''
config = {
'surrogate': 'gp',
'acquisition': 'identity',
'solver': 'discovery',
'selection': 'dgemo',
}
class TSEMO(MOBO):
'''
TSEMO
'''
config = {
'surrogate': 'ts',
'acquisition': 'identity',
'solver': 'nsga2',
'selection': 'hvi',
}
class USEMO_EI(MOBO):
'''
USeMO, using EI as acquisition
'''
config = {
'surrogate': 'gp',
'acquisition': 'ei',
'solver': 'nsga2',
'selection': 'uncertainty',
}
class MOEAD_EGO(MOBO):
'''
MOEA/D-EGO
'''
config = {
'surrogate': 'gp',
'acquisition': 'ei',
'solver': 'moead',
'selection': 'moead',
}
class ParEGO(MOBO):
'''
ParEGO
'''
config = {
'surrogate': 'gp',
'acquisition': 'ei',
'solver': 'parego',
'selection': 'random',
}
'''
Define new algorithms here
'''
class Custom(MOBO):
'''
Totally rely on user arguments to specify each component
'''
config = None
def get_algorithm(name):
'''
Get class of algorithm by name
'''
algo = {
'dgemo': DGEMO,
'tsemo': TSEMO,
'usemo-ei': USEMO_EI,
'moead-ego': MOEAD_EGO,
'parego': ParEGO,
'custom': Custom,
}
return algo[name] | 16.340659 | 60 | 0.488231 |
4a21b700e7fdfebc20aa12e07ad32f65708cac97 | 6,936 | py | Python | speedrunpy/game.py | null2264/speedrun.py | 3396a0b38e757348bb42f484ce3929b791db1a9e | [
"MIT"
] | 1 | 2021-04-05T11:03:43.000Z | 2021-04-05T11:03:43.000Z | speedrunpy/game.py | null2264/speedrun.py | 3396a0b38e757348bb42f484ce3929b791db1a9e | [
"MIT"
] | null | null | null | speedrunpy/game.py | null2264/speedrun.py | 3396a0b38e757348bb42f484ce3929b791db1a9e | [
"MIT"
] | null | null | null | """
MIT License
Copyright (c) 2021-Present null2264
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
from __future__ import annotations
import datetime
from typing import Any, Dict, List, Optional, Union
from .asset import Asset
from .category import Category
from .errors import NoDataFound
from .http import HTTPClient
from .level import Level
from .mixin import SRCObjectMixin
from .name import Name
from .page import Page
from .user import User
from .utils import zulu_to_utc
from .variable import Variable
class Game(SRCObjectMixin):
__slots__ = (
"_http",
"id",
"name",
"abbreviation",
"weblink",
"released",
"_release_date",
"ruleset",
"romhack",
"gametypes",
"platforms",
"regions",
"genres",
"engines",
"developers",
"publishers",
"moderators",
"_created",
"assets",
"levels",
"categories",
"variables",
)
def __init__(self, payload: Dict[str, Any], http: HTTPClient) -> None:
super().__init__(payload)
self._http = http
# Dataset given in _bulk mode
self.id: str = payload["id"]
self.name: Name = Name(payload["names"])
self.abbreviation: str = payload["abbreviation"]
self.weblink: str = payload["weblink"]
# Optionals (will always returns None when _bulk mode active)
self.released: Optional[int] = payload.get("released")
self._release_date: Optional[str] = payload.get("release-date")
self.ruleset: Optional[Dict[str, Union[bool, Any]]] = payload.get("ruleset")
self.romhack: Optional[bool] = payload.get("romhack")
self.gametypes: Optional[Dict[str, Any]] = payload.get("gametypes")
self.platforms: Optional[Dict[str, Any]] = payload.get("platforms")
self.regions: Optional[Dict[str, Any]] = payload.get("regions")
self.genres: Optional[Dict[str, Any]] = payload.get("genres")
self.engines: Optional[Dict[str, Any]] = payload.get("engines")
self.developers: Optional[Dict[str, Any]] = payload.get("developers")
self.publishers: Optional[Dict[str, Any]] = payload.get("publishers")
moderators: Optional[List[Any]] = payload.get("moderators", dict()).get("data")
self.moderators: Optional[List[User]] = None
if moderators:
# NOTE: This will NOT include moderator's role,
# Because mod role is broken (verifier referred as super-mod in the api)
self.moderators = [User(i, http=self._http) for i in moderators]
self._created: Optional[str] = payload.get("created")
assets: Optional[Dict[str, Any]] = payload.get("assets")
self.assets: Optional[Dict[str, Asset]] = None
if assets:
self.assets = {
k: Asset(v, http=self._http) for k, v in assets.items() if v["uri"]
}
levels: Optional[Dict[str, Any]] = payload.get("levels")
self.levels: Optional[List[Level]] = None
if levels:
self.levels = [Level(i) for i in levels["data"]]
categories: Optional[Dict[str, Any]] = payload.get("categories")
self.categories: Optional[List[Category]] = None
if categories:
self.categories = [Category(i) for i in categories["data"]]
variables: Optional[Dict[str, Any]] = payload.get("variables")
self.variables: Optional[List[Variable]] = None
if variables:
self.variables = [Variable(i) for i in variables["data"]]
def __str__(self) -> Optional[str]:
return self.name.international
def __repr__(self) -> str:
return f"<{self.__class__.__name__} id={self.id} name={self.name}>"
def __eq__(self, other: Any) -> bool:
return isinstance(other, Game) and self.id == other.id
def __ne__(self, other: Any) -> bool:
return not self.__eq__(other)
@property
def release_date(self) -> Optional[datetime.datetime]:
if self._release_date:
return datetime.datetime.fromisoformat(self._release_date).replace(
tzinfo=datetime.timezone.utc
)
@property
def created(self) -> Optional[datetime.datetime]:
if self._created:
created = zulu_to_utc(self._created)
return datetime.datetime.fromisoformat(created)
async def get_derived_games(
self,
*,
name: Optional[str] = None,
abbreviation: Optional[str] = None,
released: Optional[int] = None,
gametype: Optional[str] = None,
platform: Optional[str] = None,
region: Optional[str] = None,
genre: Optional[str] = None,
engine: Optional[str] = None,
developer: Optional[str] = None,
publisher: Optional[str] = None,
moderator: Optional[str] = None,
_bulk: bool = False,
offset: Optional[int] = None,
max: Optional[int] = None,
error_on_empty: bool = False,
) -> Page[Game]:
"""|coro|
Get derived games
"""
data = await self._http._derived_games(
self.id,
name=name,
abbreviation=abbreviation,
released=released,
gametype=gametype,
platform=platform,
region=region,
genre=genre,
engine=engine,
developer=developer,
publisher=publisher,
moderator=moderator,
_bulk=_bulk,
offset=offset,
max=max,
)
games: List[Game] = [Game(i, http=self._http) for i in data["data"]]
if error_on_empty and not games:
raise NoDataFound
return Page(
page_info=data["pagination"],
data=games,
)
get_romhacks = get_derived_games
async def get_records(self):
pass
| 34.167488 | 87 | 0.62327 |
4a21b7475afe46c7897bf7497912bc89b15d395d | 844 | py | Python | doc/tutorials/shader_toy/shadertoy_demo_3.py | janscas/arcade | d83dda946563429c8ee7d1a036bc0407758c638f | [
"MIT"
] | null | null | null | doc/tutorials/shader_toy/shadertoy_demo_3.py | janscas/arcade | d83dda946563429c8ee7d1a036bc0407758c638f | [
"MIT"
] | null | null | null | doc/tutorials/shader_toy/shadertoy_demo_3.py | janscas/arcade | d83dda946563429c8ee7d1a036bc0407758c638f | [
"MIT"
] | null | null | null | import arcade
from arcade.experimental import Shadertoy
# Derive an application window from Arcade's parent Window class
class MyGame(arcade.Window):
def __init__(self):
# Call the parent constructor
super().__init__(width=1920, height=1080)
# Load a file and create a shader from it
file_name = "circle_6.glsl"
self.shadertoy = Shadertoy(size=self.get_size(),
main_source=open(file_name).read())
def on_draw(self):
# Set uniform data to send to the GLSL shader
self.shadertoy.program['pos'] = self.mouse["x"], self.mouse["y"]
self.shadertoy.program['color'] = arcade.get_three_float_color(arcade.color.LIGHT_BLUE)
# Run the GLSL code
self.shadertoy.render()
if __name__ == "__main__":
MyGame()
arcade.run()
| 32.461538 | 95 | 0.645735 |
4a21b7647217b31a210a39d652652b65529e9a0d | 436 | py | Python | openbadge-server/openbadge/migrations/0002_datafile_project.py | daniellandau/openbadge-server | af2d1f900efa9099ca72ddba300be1535c782f29 | [
"MIT"
] | 4 | 2018-11-24T05:09:04.000Z | 2020-12-09T18:41:14.000Z | openbadge-server/openbadge/migrations/0002_datafile_project.py | daniellandau/openbadge-server | af2d1f900efa9099ca72ddba300be1535c782f29 | [
"MIT"
] | 19 | 2016-10-13T22:01:21.000Z | 2019-05-13T22:14:45.000Z | openbadge-server/openbadge/migrations/0002_datafile_project.py | daniellandau/openbadge-server | af2d1f900efa9099ca72ddba300be1535c782f29 | [
"MIT"
] | 9 | 2017-08-11T04:10:56.000Z | 2021-03-08T17:29:23.000Z | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('openbadge', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='datafile',
name='project',
field=models.ForeignKey(related_name='data', to='openbadge.Project', null=True),
),
]
| 21.8 | 92 | 0.610092 |
4a21b786539254357cc6c9e9cd454ab58a8d53e4 | 8,729 | py | Python | dragonchain/transaction_processor/level_3_actions.py | cheeseandcereal/dragonchain | 34d34e344b887c2a0eeb591ede2015cc2506a323 | [
"Apache-2.0"
] | null | null | null | dragonchain/transaction_processor/level_3_actions.py | cheeseandcereal/dragonchain | 34d34e344b887c2a0eeb591ede2015cc2506a323 | [
"Apache-2.0"
] | null | null | null | dragonchain/transaction_processor/level_3_actions.py | cheeseandcereal/dragonchain | 34d34e344b887c2a0eeb591ede2015cc2506a323 | [
"Apache-2.0"
] | null | null | null | # Copyright 2020 Dragonchain, Inc.
# Licensed under the Apache License, Version 2.0 (the "Apache License")
# with the following modification; you may not use this file except in
# compliance with the Apache License and the following modification to it:
# Section 6. Trademarks. is deleted and replaced with:
# 6. Trademarks. This License does not grant permission to use the trade
# names, trademarks, service marks, or product names of the Licensor
# and its affiliates, except as required to comply with Section 4(c) of
# the License and to reproduce the content of the NOTICE file.
# You may obtain a copy of the Apache License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the Apache License with the above modification is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the Apache License for the specific
# language governing permissions and limitations under the Apache License.
import os
import time
import math
from typing import Set, Union, Tuple, List, Iterable, TYPE_CHECKING
from dragonchain.lib.dao import block_dao
from dragonchain.lib.dto import l3_block_model
from dragonchain.lib import keys
from dragonchain.lib import broadcast
from dragonchain.lib import matchmaking
from dragonchain.lib import party
from dragonchain.lib import queue
from dragonchain import logger
from dragonchain.transaction_processor import shared_functions
if TYPE_CHECKING:
from dragonchain.lib.dto import l2_block_model
from dragonchain.lib.types import L1Headers
PROOF_SCHEME = os.environ["PROOF_SCHEME"].lower()
ADDRESS = os.environ["INTERNAL_ID"]
_log = logger.get_logger()
def execute() -> None:
"""Gets the next L2 block arrays from the queue and processes it"""
matchmaking.renew_registration_if_necessary()
t0 = time.time()
l1_headers, l2_blocks = get_new_blocks()
if l1_headers and l2_blocks:
t1 = time.time()
_log.info(f"[L3] Got next L2 block array from dcid: {l1_headers['dc_id']} blockid: {l1_headers['block_id']}")
ddss, valid_block_count, regions, clouds = verify_blocks(l2_blocks, l1_headers)
if not valid_block_count:
_log.info("[L3] None of the L2 blocks sent up were valid. Not creating any block/verifications")
clear_processing_blocks()
recurse_if_necessary()
return
t2 = time.time()
l3_block = create_block(l1_headers, ddss, valid_block_count, regions, clouds, l2_blocks)
t3 = time.time()
send_data(l3_block)
t4 = time.time()
# Clear our processing queue (finished successfully)
clear_processing_blocks()
total = t4 - t0
_log.info(
f"[L3] Processed {len(l2_blocks)} l2 blocks for l1 block id {l1_headers['dc_id']} with dcid {l1_headers['block_id']} in {total:.4f} seconds"
)
_log.info(f"[L3] Retrieving L2 block list from queue: {t1 - t0:.4f} sec ({((t1 - t0) / total) * 100:.1f}% of processing)")
_log.info(f"[L3] Verified all L2 blocks in list: {t2 - t1:.4f} sec ({((t2 - t1) / total) * 100:.1f}% of processing)")
_log.info(f"[L3] Creating block with proof: {t3 - t2:.4f} sec ({((t3 - t2) / total) * 100:.1f}% of processing)")
_log.info(f"[L3] Uploading block and broadcasting down: {t4 - t3:.4f} sec ({((t4 - t3) / total) * 100:.1f}% of processing)")
recurse_if_necessary()
def clear_processing_blocks() -> None:
queue.clear_processing_queue()
def send_data(block: l3_block_model.L3BlockModel) -> None:
_log.info("[L3] Uploading block")
block_dao.insert_block(block)
_log.info("[L3] Inserting complete. Broadcasting block")
broadcast.dispatch(block)
def recurse_if_necessary() -> None:
if queue.is_not_empty():
_log.info("[L3] Another block is queue, immediately starting processing")
execute()
else:
_log.info("[L3] Block processing complete and no new block to process. Waiting")
def get_new_blocks() -> Union[Tuple[None, None], Tuple["L1Headers", List["l2_block_model.L2BlockModel"]]]:
# Safety check to recover after unexpected crash while creating last block if necessary
queue.check_and_recover_processing_if_necessary()
return queue.get_next_l2_blocks()
def get_verifying_keys(chain_id: str) -> keys.DCKeys:
return keys.DCKeys(chain_id)
def verify_blocks(l2_blocks: Iterable["l2_block_model.L2BlockModel"], l1_headers: "L1Headers") -> Tuple[int, int, List[str], List[str]]:
ddss = 0
l2_count = 0
regions: Set[str] = set()
clouds: Set[str] = set()
checked: Set[str] = set()
for block in l2_blocks:
# We use a checked array with proofs (which are unique) to make sure we don't process
# a block twice, and ensures the block we're looking at is actually relevant
check = (
block.proof not in checked
and block.l1_dc_id == l1_headers["dc_id"]
and block.l1_block_id == l1_headers["block_id"]
and block.l1_proof == l1_headers["proof"]
)
if check:
clouds, regions, ddss, l2_count = verify_block(block, clouds, regions, ddss, l2_count)
else:
_log.info(f"[L3] L2 block was duplicated or not relevant to this verification.\n{block.__dict__}")
# Finally, add this block into our checked blocks list
checked.add(block.proof)
return ddss, l2_count, list(regions), list(clouds)
def verify_block(
block: "l2_block_model.L2BlockModel", clouds: Set[str], regions: Set[str], ddss: int, l2_count: int
) -> Tuple[Set[str], Set[str], int, int]:
try:
l2_verify_keys = get_verifying_keys(block.dc_id)
_log.info(f"[L3] Verifying proof for L2 block id {block.block_id} from {block.dc_id}")
if l2_verify_keys.verify_block(block):
l2_count += 1
l2_ddss = block.current_ddss or "0"
matchmaking_config = matchmaking.get_registration(block.dc_id)
clouds.add(matchmaking_config["cloud"])
regions.add(matchmaking_config["region"])
ddss += int(float(l2_ddss))
_log.info(f"[L3] Finished processing valid L2 block {block.block_id}")
else:
_log.info(f"[L3] Proof for L2 block id {block.block_id} from {block.dc_id} was invalid. Not including block in stats.")
except Exception:
_log.exception("[L3] Could not get L2's verifying keys. Not incrementing stats for this block.")
return clouds, regions, ddss, l2_count
def get_next_block_info() -> Tuple[int, str]:
previous = block_dao.get_last_block_proof()
_log.info(f"[L3] Got previous block information: {previous}")
if not previous:
# Throws an exception if sanity check fails
shared_functions.sanity_check_empty_chain()
block_id = 1
prev_proof = ""
else:
block_id = int(previous["block_id"]) + 1
prev_proof = previous["proof"]
_log.info(f"[L3] Block ID: {block_id}")
return block_id, prev_proof
def create_block(
l1_headers: "L1Headers",
ddss: Union[str, float, int],
valid_block_count: int,
regions: List[str],
clouds: List[str],
l2_blocks: Iterable["l2_block_model.L2BlockModel"],
) -> l3_block_model.L3BlockModel:
block_id, prev_proof = get_next_block_info()
# Pull configuration from matchmaking directly to get DDSS (not stored locally)
l2_proofs = []
for block in l2_blocks:
l2_proofs.append({"dc_id": block.dc_id, "block_id": block.block_id, "proof": block.proof})
l3_block = l3_block_model.L3BlockModel(
dc_id=keys.get_public_id(),
current_ddss=party.get_address_ddss(ADDRESS), # Get DDSS from party, cached hourly
block_id=str(block_id),
timestamp=str(math.floor(time.time())),
prev_proof=prev_proof,
scheme=PROOF_SCHEME,
l1_dc_id=l1_headers["dc_id"],
l1_block_id=l1_headers["block_id"],
l1_proof=l1_headers["proof"],
l2_proofs=l2_proofs,
ddss=str(ddss),
l2_count=str(valid_block_count),
regions=regions,
clouds=clouds,
)
sign_block(l3_block)
return l3_block
def sign_block(l3_block: l3_block_model.L3BlockModel) -> None:
if PROOF_SCHEME == "work":
_log.info("[L3] Performing PoW on block")
l3_block.proof, l3_block.nonce = keys.get_my_keys().pow_block(l3_block)
else:
_log.info("[L3] Signing block")
l3_block.proof = keys.get_my_keys().sign_block(l3_block)
_log.info(f"[L3] Finished Block:\n{l3_block.export_as_at_rest()}")
| 38.96875 | 152 | 0.680032 |
4a21b7e4eab913e8f57cb579261415eb822a5338 | 13,000 | py | Python | nsls2ptycho/core/widgets/mplcanvastool.py | dmgav/ptycho_gui | a0c60146d81b99425f6ed39c6c722874ffff63bf | [
"MIT"
] | 2 | 2019-08-05T20:12:45.000Z | 2021-06-12T13:10:03.000Z | nsls2ptycho/core/widgets/mplcanvastool.py | Yongme/ptycho_gui | 4474008f85b0aad4519fb2236be8b81c8c6e818f | [
"MIT"
] | 47 | 2019-01-15T21:08:55.000Z | 2019-08-05T18:41:57.000Z | nsls2ptycho/core/widgets/mplcanvastool.py | Yongme/ptycho_gui | 4474008f85b0aad4519fb2236be8b81c8c6e818f | [
"MIT"
] | 3 | 2019-08-05T19:04:00.000Z | 2021-08-04T13:45:05.000Z | import os
from PyQt5 import QtWidgets
from matplotlib.backends.backend_qt5agg import FigureCanvasQTAgg as FigureCanvas
from matplotlib.figure import Figure
from matplotlib.pyplot import Axes
import numpy as np
from nsls2ptycho.core.widgets.imgTools import estimate_roi
from nsls2ptycho.core.widgets.eventhandler import EventHandler
class MplCanvasTool(QtWidgets.QWidget):
def __init__(self, parent=None, width=5, height=4, dpi=100):
super().__init__(parent)
fig = Figure(figsize=(width, height), dpi=dpi)
ax = Axes(fig, [0., 0., 1., 1.])
ax.set_axis_off()
fig.add_axes(ax)
self.ax = ax
self.fig = fig
self.canvas = FigureCanvas(fig)
# initialized by _get_roi_bar()
self.sp_x0 = None
self.sp_y0 = None
self.sp_w = None
self.sp_h = None
self._roi_all = None
self.ref_roi_side = [64, 96, 128, 160, 192, 224, 256] # x 32, square
self._actions = {}
self._active = None
self._eventHandler = EventHandler()
self.roi_changed = self._eventHandler.roi_changed
self._ids = []
layout = QtWidgets.QVBoxLayout()
layout.addWidget(self.canvas)
layout.addLayout(self._get_toolbar())
layout.addLayout(self._get_roi_bar())
self.setLayout(layout)
self._eventHandler.brush_changed.connect(self.update_overlay)
self.image = None
self.image_data = None
self.image_handler = None
self.overlay = None
self.overlay_handler = None
self.reset()
def _get_toolbar(self):
self.btn_home = QtWidgets.QPushButton('RESET')
#self.btn_pan_zoom = QtWidgets.QPushButton('PAN/ZOOM')
self.btn_roi = QtWidgets.QPushButton('ROI')
self.btn_brush = QtWidgets.QPushButton('BRUSH')
#self.btn_roi_adjust = QtWidgets.QPushButton('ADJUST')
#self.btn_pan_zoom.setCheckable(True)
self.btn_roi.setCheckable(True)
self.btn_brush.setCheckable(True)
self.btn_home.clicked.connect(self._on_reset)
#self.btn_pan_zoom.clicked.connect(lambda: self._update_buttons('pan/zoom'))
self.btn_roi.clicked.connect(lambda: self._update_buttons('roi'))
self.btn_brush.clicked.connect(lambda: self._update_buttons('brush'))
#self.btn_roi_adjust.clicked.connect(self._on_adjust_roi)
#self._actions['pan/zoom'] = self.btn_pan_zoom
self._actions['roi'] = self.btn_roi
self._actions['brush'] = self.btn_brush
layout = QtWidgets.QHBoxLayout()
layout.addWidget(self.btn_home)
#layout.addWidget(self.btn_pan_zoom)
layout.addWidget(self.btn_roi)
#layout.addWidget(self.btn_roi_adjust)
layout.addWidget(self.btn_brush)
return layout
def _get_roi_bar(self):
self.sp_x0 = QtWidgets.QSpinBox(self)
self.sp_y0 = QtWidgets.QSpinBox(self)
self.sp_w = QtWidgets.QSpinBox(self)
self.sp_h = QtWidgets.QSpinBox(self)
self._roi_all = [self.sp_x0, self.sp_y0, self.sp_w, self.sp_h]
for sp in self._roi_all:
sp.setMaximum(9999)
sp.setMinimum(0)
sp.setValue(0)
sp.valueChanged.connect(self._update_roi_canvas)
self.coord_label = QtWidgets.QLabel('(x, y), value')
self._eventHandler.roi_changed.connect(self._update_roi)
self._eventHandler.coord_changed.connect(self._update_coord)
layout = QtWidgets.QHBoxLayout()
layout.addWidget(QtWidgets.QLabel('x0'))
layout.addWidget(self.sp_x0)
layout.addWidget(QtWidgets.QLabel('y0'))
layout.addWidget(self.sp_y0)
layout.addWidget(QtWidgets.QLabel('w'))
layout.addWidget(self.sp_w)
layout.addWidget(QtWidgets.QLabel('h'))
layout.addWidget(self.sp_h)
layout.addWidget(self.coord_label)
spacerItem = QtWidgets.QSpacerItem(0,0,QtWidgets.QSizePolicy.Expanding,QtWidgets.QSizePolicy.Preferred)
layout.addItem(spacerItem)
return layout
def _update_coord(self, ix, iy):
if self.image is None or ix < 0 or ix >= self.image.shape[1] or iy < 0 or iy >= self.image.shape[0]:
value = 'None'
else:
value = '{:.2e}'.format(self.image[iy, ix])
self.coord_label.setText('({:d}, {:d}), {:s}'.format(ix, iy, value))
def _update_roi(self, x0, y0, w, h):
for sp in self._roi_all: sp.valueChanged.disconnect(self._update_roi_canvas)
self.sp_x0.setValue(x0)
self.sp_y0.setValue(y0)
self.sp_w.setValue(w)
self.sp_h.setValue(h)
for sp in self._roi_all: sp.valueChanged.connect(self._update_roi_canvas)
def _update_roi_canvas(self):
x0 = self.sp_x0.value()
y0 = self.sp_y0.value()
w = self.sp_w.value()
h = self.sp_h.value()
if w <= 0 or h <= 0: return
self._eventHandler.set_curr_roi(self.ax, (x0, y0), w, h)
def _update_buttons(self, op_name):
if self._active == op_name:
self._active = None
else:
self._active = op_name
#self._actions['pan/zoom'].setChecked(self._active == 'pan/zoom')
self._actions['roi'].setChecked(self._active == 'roi')
self._actions['brush'].setChecked(self._active == 'brush')
for id in self._ids:
self.canvas.mpl_disconnect(id)
self._ids = []
#if self._active == 'pan/zoom':
# self._ids = self._eventHandler.zoom_pan_factory(self.ax)
#el
if self._active == 'roi':
self._ids = self._eventHandler.roi_factory(self.ax)
elif self._active == 'brush':
self._ids = self._eventHandler.brush_factory(self.ax)
else:
self._ids = self._eventHandler.zoom_pan_factory(self.ax)
def _on_reset(self):
# clear bad pixels to restore a clean state
# TODO: investigate why self.clear_overlay() is not working
self.overlay = None
self.set_overlay([], [])
# clear all ROIs (red and blue)
for sp in self._roi_all:
sp.valueChanged.disconnect(self._update_roi_canvas)
self._eventHandler.roi_changed.disconnect(self._update_roi)
for sp in self._roi_all:
sp.setValue(0.)
self._eventHandler.ref_rect = None
self._eventHandler.ref_idx = -1
for rect in self._eventHandler.all_rect:
rect.remove()
self._eventHandler.all_rect = []
#self.ax.figure.canvas.draw()
#self.canvas.draw()
for sp in self._roi_all:
sp.valueChanged.connect(self._update_roi_canvas)
self._eventHandler.roi_changed.connect(self._update_roi)
if self.image_handler:
width = self.image.shape[1]
height = self.image.shape[0]
self.ax.set_xlim(0, width)
self.ax.set_ylim(height, 0)
self.canvas.draw()
def _on_adjust_roi(self):
'''
Always use original image data (i.e. not log scaled one) for roi prediction.
Also, currently, it ignores user selected (red-colored) roi
todo: adjust based on user selected roi
'''
if self.image is None:
return
_x0, _y0, _w, _h = estimate_roi(self.image, threshold=1.0)
cx = np.int(np.round(_x0 + _w//2))
cy = np.int(np.round(_y0 + _h//2))
side = 32 * (np.maximum(_w, _h) // 32)
x0 = np.int(np.maximum(cx - side//2, 0))
y0 = np.int(np.maximum(cy - side//2, 0))
x1 = x0 + side
y1 = y0 + side
offset_x = np.maximum(x1 - self.image.shape[1] + 1, 0)
x1 = x1 - offset_x
offset_y = np.maximum(y1 - self.image.shape[0] + 1, 0)
y1 = y1 - offset_y
h = y1 - y0
w = x1 - x0
self._eventHandler.set_curr_roi(self.ax, (x0, y0), w, h)
self._update_roi(x0, y0, w, h)
def reset(self):
for sp in self._roi_all:
sp.setValue(0.)
self.image = None
self.image_data = None
self.image_handler = None
self.overlay = None
self.overlay_handler = None
self.ax.clear()
self.ax.set_axis_off()
self.canvas.draw()
def draw_image(self, image, cmap='gray', init_roi=False, use_log=False):
# TODO: merge this function and use_logscale()
#print(cmap, init_roi, use_log)
if use_log:
print('log scale')
image_data = np.nan_to_num(np.log(image + 1.))
else:
image_data = image
if self.image_handler is None:
self.image_handler = self.ax.imshow(image_data, cmap=cmap)
else:
self.image_handler.set_data(image_data)
# todo: update data min, max (maybe not needed)
self.image_handler.set_clim(vmin=np.min(self.image_data), vmax=np.max(self.image_data))
self.image = image
self.image_data = image_data
if init_roi:
self._on_adjust_roi()
if len(self._ids) == 0:
self._ids = self._eventHandler.zoom_pan_factory(self.ax)
self.canvas.draw()
def update_overlay(self, pixel):
'''
Update overlay image from brushed pixels
'''
if self.image is None: return
if self.overlay is None or self.overlay.shape[:2] != self.image.shape:
self.overlay = np.zeros(self.image.shape + (4,), dtype=np.float32)
highlight = (1., 0., 0., .5)
x, y = pixel
if self.overlay[y, x, 0] == 1.:
self.overlay[y, x] = (0., 0., 0., 0.)
else:
self.overlay[y, x] = highlight
if self.overlay_handler is None:
self.overlay_handler = self.ax.imshow(self.overlay)
else:
self.overlay_handler.set_data(self.overlay)
self.overlay_handler.set_visible(True)
# todo: set show badpixel flag
self.canvas.draw()
def set_overlay(self, rows, cols):
if self.image is None: return
if len(rows) != len(cols): return
highlight = (1, 0, 0, .5)
if self.overlay is None:
self.overlay = np.zeros(self.image.shape + (4,), dtype=np.float32)
self.overlay[rows, cols] = highlight
if self.overlay_handler is None:
self.overlay_handler = self.ax.imshow(self.overlay)
else:
self.overlay_handler.set_data(self.overlay)
self.overlay_handler.set_visible(True)
self.canvas.draw()
def clear_overlay(self):
if self.overlay is None: return
self.overlay[:,:,0] = 0
self.canvas.draw()
def show_overlay(self, state):
if self.overlay_handler is None: return
self.overlay_handler.set_visible(state)
self.canvas.draw()
def use_logscale(self, state):
# TODO: merge this function and draw_image()
if self.image is None: return
if state:
self.image_data = np.log(np.clip(self.image, 1., None))
else:
self.image_data = self.image
self.image_handler.set_data(self.image_data)
self.image_handler.set_clim(vmin=np.min(self.image_data), vmax=np.max(self.image_data))
self.canvas.draw()
def get_red_roi(self):
'''
Return red colored ROI.
If there are multiple, return the largest area one
'''
all_roi = self._eventHandler.get_red_roi()
largest_roi = None
largest_area = 0.
for roi in all_roi:
xy, width, height = roi
# canonicalize the ROI
x0, y0 = xy
if width < 0:
x0 += width
width = -width
if height < 0:
y0 += height
height = -height
area = width * height
if area > largest_area:
largest_area = area
largest_roi = (
np.int(np.floor(x0 + 0.5)),
np.int(np.floor(y0 + 0.5)),
np.int(np.round(width)),
np.int(np.round(height))
)
return largest_roi
def get_blue_roi(self):
'''
Return blue colored ROI
'''
all_roi = []
for roi in self._eventHandler.get_blue_roi():
xy, width, height = roi
# canonicalize the ROI
x0, y0 = xy
if width < 0:
x0 += width
width = -width
if height < 0:
y0 += height
height = -height
all_roi.append((
np.int(np.floor(x0 + 0.5)),
np.int(np.floor(y0 + 0.5)),
np.int(np.round(width)),
np.int(np.round(height))
))
return all_roi
def get_badpixels(self):
if self.overlay is None: return None
return np.where(self.overlay[:,:,0])
| 33.505155 | 111 | 0.586923 |
4a21b7f1c321169c08536f883d32964e577e1614 | 1,956 | py | Python | metallicity.py | kadglass/SHELS_metallicity | a58f1f561ecd292b3f3281121f57e4564b7461b6 | [
"BSD-3-Clause"
] | 1 | 2020-01-27T18:50:08.000Z | 2020-01-27T18:50:08.000Z | metallicity.py | kadglass/SHELS_metallicity | a58f1f561ecd292b3f3281121f57e4564b7461b6 | [
"BSD-3-Clause"
] | null | null | null | metallicity.py | kadglass/SHELS_metallicity | a58f1f561ecd292b3f3281121f57e4564b7461b6 | [
"BSD-3-Clause"
] | null | null | null |
from astropy.table import Table
from numpy import log10
bin_size = [0.1, 0.2, 0.3, 0.4, 0.5]
environment = ["void", "wall"]
for i in bin_size:
for j in environment:
file = "/Users/leilani/Desktop/SHELS/LGamboa/{0}Bin_{1}.txt".format(i, j, "a+")
data =Table.read(file, format = "ascii.commented_header")
N2_list = []
O3N2_list = []
N2O2_list = []
for h in range( len(data) ):
### DEFINITIONS ###
# 0II/0III/NII are observed
HaF = data["HaF"][h]
HbF = data["HbF"][h]
OII = data["OII"][h] #3727
NII = data["NII"][h] #6583
OIII = data["OIII"][h] #5007
SSFR = data["SSFR"][h]
mass = data["mass"][h]
### EQUATIONS ###
# 02/03/N2 are ratios
N2 = NII / HaF
O3N2 = OIII / HbF / N2 #yes, this is the correct equation (see Brown et al)
N2O2 = NII / OII
# Average SSFR at Mstar
mass = log10(mass)
aveSSFR = 283.728 - ( 116.265 * mass) + ( 17.4403 * (mass ** 2) ) - ( 1.17146 * (mass ** 3) ) + ( 0.0296526 * (mass ** 4) )
# Delta log(SSFR)
d_logSSFR = log10(SSFR) - aveSSFR
# N2 Method
N2_metallicity = 9.12 + ( 0.58 * log10(N2) ) - ( 0.19 * d_logSSFR )
N2_list.append(N2_metallicity)
# O3N2 Method
O3N2_metallicity = 8.98 - ( 0.32 * log10(O3N2) ) - ( 0.18 * d_logSSFR )
O3N2_list.append(O3N2_metallicity)
# N2O2 Method
N2O2_metallicity = 9.20 + ( 0.54 * log10(N2O2) ) - ( 0.36 * d_logSSFR )
N2O2_list.append(N2O2_metallicity)
data["N2"] = N2_list
data["O3N2"] = O3N2_list
data["N2O2"] = N2O2_list
data.write(file, format = "ascii.commented_header", overwrite = True)
| 27.166667 | 135 | 0.482618 |
4a21b8135bf64f5f8e4cf350d28a3b62761024fc | 972 | py | Python | sympy/polys/polyerrors.py | matthew-brett/sympy | 7b87b62144c28f2e734e9106897c72806b99d181 | [
"BSD-3-Clause"
] | null | null | null | sympy/polys/polyerrors.py | matthew-brett/sympy | 7b87b62144c28f2e734e9106897c72806b99d181 | [
"BSD-3-Clause"
] | null | null | null | sympy/polys/polyerrors.py | matthew-brett/sympy | 7b87b62144c28f2e734e9106897c72806b99d181 | [
"BSD-3-Clause"
] | null | null | null | """Definitions of common exceptions for `polys` module. """
class OperationNotSupported(Exception):
def __init__(self, poly, func):
self.poly = poly
self.func = func
def __str__(self): # pragma: no cover
return "`%s` operation not supported by %s representation" % (self.func, self.poly.rep.__class__.__name__)
class ExactQuotientFailed(Exception):
pass
class HeuristicGCDFailed(Exception):
pass
class HomomorphismFailed(Exception):
pass
class IsomorphismFailed(Exception):
pass
class ExtraneousFactors(Exception):
pass
class UnificationFailed(Exception):
pass
class GeneratorsNeeded(Exception):
pass
class EvaluationFailed(Exception):
pass
class RefinementFailed(Exception):
pass
class PolynomialError(Exception):
pass
class CoercionFailed(Exception):
pass
class NotInvertible(Exception):
pass
class NotAlgebraic(Exception):
pass
class DomainError(Exception):
pass
| 18 | 114 | 0.72428 |
4a21b996e5977e2965e5b7f91a838adc6023d86c | 2,044 | py | Python | rl/util.py | sheecegardezi/keras-rl | 4b673771bff47cc84d5e4a4088c6575ecce963af | [
"MIT"
] | null | null | null | rl/util.py | sheecegardezi/keras-rl | 4b673771bff47cc84d5e4a4088c6575ecce963af | [
"MIT"
] | null | null | null | rl/util.py | sheecegardezi/keras-rl | 4b673771bff47cc84d5e4a4088c6575ecce963af | [
"MIT"
] | null | null | null | from keras.models import model_from_config, Sequential, Model, model_from_config
import keras.optimizers as optimizers
from keras.optimizers import optimizer_from_config
def clone_model(model, custom_objects={}):
# Requires Keras 1.0.7 since get_config has breaking changes.
config = {
'class_name': model.__class__.__name__,
'config': model.get_config(),
}
clone = model_from_config(config, custom_objects=custom_objects)
clone.set_weights(model.get_weights())
return clone
def clone_optimizer(optimizer):
# Requires Keras 1.0.7 since get_config has breaking changes.
params = dict([(k, v) for k, v in optimizer.get_config().items()])
config = {
'class_name': optimizer.__class__.__name__,
'config': params,
}
clone = optimizer_from_config(config)
return clone
def get_soft_target_model_updates(target, source, tau):
target_weights = target.trainable_weights + sum([l.non_trainable_weights for l in target.layers], [])
source_weights = source.trainable_weights + sum([l.non_trainable_weights for l in source.layers], [])
assert len(target_weights) == len(source_weights)
# Create updates.
updates = []
for tw, sw in zip(target_weights, source_weights):
updates.append((tw, tau * sw + (1. - tau) * tw))
return updates
def get_object_config(o):
config = {
'class_name': o.__class__.__name__,
'config': o.get_config()
}
return config
class AdditionalUpdatesOptimizer(optimizers.Optimizer):
def __init__(self, optimizer, additional_updates):
super(AdditionalUpdatesOptimizer, self).__init__()
self.optimizer = optimizer
self.additional_updates = additional_updates
def get_updates(self, params, constraints, loss):
updates = self.optimizer.get_updates(params, constraints, loss)
updates += self.additional_updates
self.updates = updates
return self.updates
def get_config(self):
return self.optimizer.get_config()
| 32.967742 | 105 | 0.701076 |
4a21ba40a43a7a209f76e35ac34383bf6c7708bc | 43,597 | gyp | Python | ash/ash.gyp | SlimKatLegacy/android_external_chromium_org | ee480ef5039d7c561fc66ccf52169ead186f1bea | [
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | 2 | 2015-03-04T02:36:53.000Z | 2016-06-25T11:22:17.000Z | ash/ash.gyp | j4ckfrost/android_external_chromium_org | a1a3dad8b08d1fcf6b6b36c267158ed63217c780 | [
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | null | null | null | ash/ash.gyp | j4ckfrost/android_external_chromium_org | a1a3dad8b08d1fcf6b6b36c267158ed63217c780 | [
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | 4 | 2015-02-09T08:49:30.000Z | 2017-08-26T02:03:34.000Z | # Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
{
'variables': {
'chromium_code': 1,
'grit_out_dir': '<(SHARED_INTERMEDIATE_DIR)/chrome',
},
'includes': [
'ash_resources.gypi',
],
'targets': [
{
'target_name': 'ash',
'type': '<(component)',
'dependencies': [
'../base/base.gyp:base',
'../base/base.gyp:base_i18n',
'../base/third_party/dynamic_annotations/dynamic_annotations.gyp:dynamic_annotations',
'../cc/cc.gyp:cc',
'../content/content.gyp:content',
'../content/content.gyp:content_browser',
'../ipc/ipc.gyp:ipc',
'../media/media.gyp:media',
'../net/net.gyp:net',
'../skia/skia.gyp:skia',
'../third_party/icu/icu.gyp:icui18n',
'../third_party/icu/icu.gyp:icuuc',
'../ui/app_list/app_list.gyp:app_list',
'../ui/aura/aura.gyp:aura',
'../ui/base/strings/ui_strings.gyp:ui_strings',
'../ui/compositor/compositor.gyp:compositor',
'../ui/events/events.gyp:events',
'../ui/gfx/gfx.gyp:gfx',
'../ui/keyboard/keyboard.gyp:keyboard',
'../ui/message_center/message_center.gyp:message_center',
'../ui/oak/oak.gyp:oak',
'../ui/resources/ui_resources.gyp:ui_resources',
'../ui/ui.gyp:ui',
'../ui/views/controls/webview/webview.gyp:webview',
'../ui/views/views.gyp:views',
'../ui/web_dialogs/web_dialogs.gyp:web_dialogs',
'../url/url.gyp:url_lib',
'ash_strings.gyp:ash_strings',
'ash_resources',
],
'defines': [
'ASH_IMPLEMENTATION',
],
'sources': [
# All .cc, .h under ash, except unittests
'accelerators/accelerator_commands.cc',
'accelerators/accelerator_commands.h',
'accelerators/accelerator_controller.cc',
'accelerators/accelerator_controller.h',
'accelerators/accelerator_dispatcher.cc',
'accelerators/accelerator_dispatcher.h',
'accelerators/accelerator_filter.cc',
'accelerators/accelerator_filter.h',
'accelerators/accelerator_table.cc',
'accelerators/accelerator_table.h',
'accelerators/debug_commands.cc',
'accelerators/debug_commands.h',
'accelerators/exit_warning_handler.cc',
'accelerators/exit_warning_handler.h',
'accelerators/focus_manager_factory.cc',
'accelerators/focus_manager_factory.h',
'accelerators/nested_dispatcher_controller.cc',
'accelerators/nested_dispatcher_controller.h',
'accessibility_delegate.h',
'autoclick/autoclick_controller.cc',
'autoclick/autoclick_controller.h',
'ash_constants.cc',
'ash_constants.h',
'ash_switches.cc',
'ash_switches.h',
'cancel_mode.cc',
'cancel_mode.h',
'caps_lock_delegate.h',
'caps_lock_delegate_stub.cc',
'caps_lock_delegate_stub.h',
'debug.cc',
'debug.h',
'default_accessibility_delegate.cc',
'default_accessibility_delegate.h',
'default_user_wallpaper_delegate.cc',
'default_user_wallpaper_delegate.h',
'desktop_background/desktop_background_controller.cc',
'desktop_background/desktop_background_controller.h',
'desktop_background/desktop_background_controller_observer.h',
'desktop_background/desktop_background_view.cc',
'desktop_background/desktop_background_view.h',
'desktop_background/desktop_background_widget_controller.cc',
'desktop_background/desktop_background_widget_controller.h',
'desktop_background/user_wallpaper_delegate.h',
'desktop_background/wallpaper_resizer.cc',
'desktop_background/wallpaper_resizer.h',
'desktop_background/wallpaper_resizer_observer.h',
'display/display_change_observer_chromeos.cc',
'display/display_change_observer_chromeos.h',
'display/display_controller.cc',
'display/display_controller.h',
'display/display_error_observer_chromeos.cc',
'display/display_error_observer_chromeos.h',
'display/display_info.h',
'display/display_info.cc',
'display/display_layout.h',
'display/display_layout.cc',
'display/display_layout_store.h',
'display/display_layout_store.cc',
'display/display_manager.cc',
'display/display_manager.h',
'display/display_pref_util.h',
'display/event_transformation_handler.cc',
'display/event_transformation_handler.h',
'display/mirror_window_controller.cc',
'display/mirror_window_controller.h',
'display/mouse_cursor_event_filter.cc',
'display/mouse_cursor_event_filter.h',
'display/output_configurator_animation.cc',
'display/output_configurator_animation.h',
'display/resolution_notification_controller.cc',
'display/resolution_notification_controller.h',
'display/root_window_transformers.cc',
'display/root_window_transformers.h',
'display/screen_position_controller.cc',
'display/screen_position_controller.h',
'display/shared_display_edge_indicator.cc',
'display/shared_display_edge_indicator.h',
'display/virtual_keyboard_window_controller.cc',
'display/virtual_keyboard_window_controller.h',
'drag_drop/drag_drop_controller.cc',
'drag_drop/drag_drop_controller.h',
'drag_drop/drag_drop_tracker.cc',
'drag_drop/drag_drop_tracker.h',
'drag_drop/drag_image_view.cc',
'drag_drop/drag_image_view.h',
'event_rewriter_delegate.h',
'first_run/desktop_cleaner.cc',
'first_run/desktop_cleaner.h',
'first_run/first_run_helper.cc',
'first_run/first_run_helper.h',
'first_run/first_run_helper_impl.cc',
'first_run/first_run_helper_impl.h',
'focus_cycler.cc',
'focus_cycler.h',
'high_contrast/high_contrast_controller.cc',
'high_contrast/high_contrast_controller.h',
'host/root_window_host_factory.cc',
'host/root_window_host_factory.h',
'host/root_window_host_factory_win.cc',
'keyboard_overlay/keyboard_overlay_delegate.cc',
'keyboard_overlay/keyboard_overlay_delegate.h',
'keyboard_overlay/keyboard_overlay_view.cc',
'keyboard_overlay/keyboard_overlay_view.h',
'keyboard_uma_event_filter.cc',
'keyboard_uma_event_filter.h',
'launcher/launcher.cc',
'launcher/launcher.h',
'launcher/launcher_types.cc',
'launcher/launcher_types.h',
'magnifier/magnification_controller.cc',
'magnifier/magnification_controller.h',
'magnifier/magnifier_constants.h',
'magnifier/partial_magnification_controller.cc',
'magnifier/partial_magnification_controller.h',
'metrics/user_metrics_recorder.cc',
'metrics/user_metrics_recorder.h',
'multi_profile_uma.cc',
'multi_profile_uma.h',
'popup_message.cc',
'popup_message.h',
'root_window_controller.cc',
'root_window_controller.h',
'root_window_settings.cc',
'root_window_settings.h',
'rotator/screen_rotation.cc',
'rotator/screen_rotation.h',
'scoped_target_root_window.cc',
'scoped_target_root_window.h',
'screen_ash.cc',
'screen_ash.h',
'screensaver/screensaver_view.cc',
'screensaver/screensaver_view.h',
'screenshot_delegate.h',
'session_state_delegate.h',
'session_state_observer.cc',
'session_state_observer.h',
'shelf/alternate_app_list_button.cc',
'shelf/alternate_app_list_button.h',
'shelf/app_list_button.cc',
'shelf/app_list_button.h',
'shelf/app_list_shelf_item_delegate.cc',
'shelf/app_list_shelf_item_delegate.h',
'shelf/background_animator.cc',
'shelf/background_animator.h',
'shelf/overflow_bubble.cc',
'shelf/overflow_bubble.h',
'shelf/overflow_bubble_view.cc',
'shelf/overflow_bubble_view.h',
'shelf/overflow_button.cc',
'shelf/overflow_button.h',
'shelf/scoped_observer_with_duplicated_sources.h',
'shelf/shelf_alignment_menu.cc',
'shelf/shelf_alignment_menu.h',
'shelf/shelf_bezel_event_filter.cc',
'shelf/shelf_bezel_event_filter.h',
'shelf/shelf_button.cc',
'shelf/shelf_button.h',
'shelf/shelf_button_host.h',
'shelf/shelf_delegate.h',
'shelf/shelf_icon_observer.h',
'shelf/shelf_item_delegate.h',
'shelf/shelf_item_delegate_manager.cc',
'shelf/shelf_item_delegate_manager.h',
'shelf/shelf_layout_manager.cc',
'shelf/shelf_layout_manager.h',
'shelf/shelf_layout_manager_observer.h',
'shelf/shelf_menu_model.h',
'shelf/shelf_model.cc',
'shelf/shelf_model.h',
'shelf/shelf_model_observer.h',
'shelf/shelf_navigator.cc',
'shelf/shelf_navigator.h',
'shelf/shelf_tooltip_manager.cc',
'shelf/shelf_tooltip_manager.h',
'shelf/shelf_types.h',
'shelf/shelf_util.cc',
'shelf/shelf_util.h',
'shelf/shelf_view.cc',
'shelf/shelf_view.h',
'shelf/shelf_widget.cc',
'shelf/shelf_widget.h',
'shelf/shelf_window_watcher.cc',
'shelf/shelf_window_watcher.h',
'shelf/shelf_window_watcher_item_delegate.cc',
'shelf/shelf_window_watcher_item_delegate.h',
'shell.cc',
'shell.h',
'shell_delegate.h',
'shell_factory.h',
'shell_window_ids.h',
'system/bluetooth/bluetooth_observer.h',
'system/bluetooth/tray_bluetooth.cc',
'system/bluetooth/tray_bluetooth.h',
'system/brightness_control_delegate.h',
'system/chromeos/audio/tray_audio.cc',
'system/chromeos/audio/tray_audio.h',
'system/chromeos/enterprise/enterprise_domain_observer.h',
'system/chromeos/brightness/brightness_controller_chromeos.cc',
'system/chromeos/brightness/brightness_controller_chromeos.h',
'system/chromeos/brightness/tray_brightness.cc',
'system/chromeos/brightness/tray_brightness.h',
'system/chromeos/enterprise/tray_enterprise.h',
'system/chromeos/enterprise/tray_enterprise.cc',
'system/chromeos/keyboard_brightness_controller.cc',
'system/chromeos/keyboard_brightness_controller.h',
'system/chromeos/label_tray_view.h',
'system/chromeos/label_tray_view.cc',
'system/chromeos/managed/tray_locally_managed_user.h',
'system/chromeos/managed/tray_locally_managed_user.cc',
'system/chromeos/network/network_connect.cc',
'system/chromeos/network/network_connect.h',
'system/chromeos/network/network_detailed_view.h',
'system/chromeos/network/network_icon.cc',
'system/chromeos/network/network_icon.h',
'system/chromeos/network/network_icon_animation.cc',
'system/chromeos/network/network_icon_animation.h',
'system/chromeos/network/network_icon_animation_observer.h',
'system/chromeos/network/network_observer.h',
'system/chromeos/network/network_state_list_detailed_view.cc',
'system/chromeos/network/network_state_list_detailed_view.h',
'system/chromeos/network/network_state_notifier.cc',
'system/chromeos/network/network_state_notifier.h',
'system/chromeos/network/tray_network.cc',
'system/chromeos/network/tray_network.h',
'system/chromeos/network/tray_network_state_observer.cc',
'system/chromeos/network/tray_network_state_observer.h',
'system/chromeos/network/tray_sms.cc',
'system/chromeos/network/tray_sms.h',
'system/chromeos/network/tray_vpn.cc',
'system/chromeos/network/tray_vpn.h',
'system/chromeos/power/power_event_observer.cc',
'system/chromeos/power/power_event_observer.h',
'system/chromeos/power/power_status.cc',
'system/chromeos/power/power_status.h',
'system/chromeos/power/power_status_view.cc',
'system/chromeos/power/power_status_view.h',
'system/chromeos/power/tray_power.cc',
'system/chromeos/power/tray_power.h',
'system/chromeos/power/user_activity_notifier.cc',
'system/chromeos/power/user_activity_notifier.h',
'system/chromeos/power/video_activity_notifier.cc',
'system/chromeos/power/video_activity_notifier.h',
'system/chromeos/screen_security/screen_capture_observer.h',
'system/chromeos/screen_security/screen_capture_tray_item.cc',
'system/chromeos/screen_security/screen_capture_tray_item.h',
'system/chromeos/screen_security/screen_share_observer.h',
'system/chromeos/screen_security/screen_share_tray_item.cc',
'system/chromeos/screen_security/screen_share_tray_item.h',
'system/chromeos/screen_security/screen_tray_item.cc',
'system/chromeos/screen_security/screen_tray_item.h',
'system/chromeos/settings/tray_settings.cc',
'system/chromeos/settings/tray_settings.h',
'system/chromeos/system_clock_observer.cc',
'system/chromeos/system_clock_observer.h',
'system/chromeos/tray_display.cc',
'system/chromeos/tray_display.h',
'system/chromeos/tray_tracing.cc',
'system/chromeos/tray_tracing.h',
'system/date/clock_observer.h',
'system/date/date_view.cc',
'system/date/date_view.h',
'system/date/tray_date.cc',
'system/date/tray_date.h',
'system/drive/drive_observer.h',
'system/drive/tray_drive.cc',
'system/drive/tray_drive.h',
'system/ime/ime_observer.h',
'system/ime/tray_ime.cc',
'system/ime/tray_ime.h',
'system/keyboard_brightness/keyboard_brightness_control_delegate.h',
'system/locale/locale_notification_controller.cc',
'system/locale/locale_notification_controller.h',
'system/logout_button/logout_button_observer.h',
'system/logout_button/logout_button_tray.cc',
'system/logout_button/logout_button_tray.h',
'system/monitor/tray_monitor.cc',
'system/monitor/tray_monitor.h',
'system/session_length_limit/session_length_limit_observer.h',
'system/session_length_limit/tray_session_length_limit.cc',
'system/session_length_limit/tray_session_length_limit.h',
'system/status_area_widget.cc',
'system/status_area_widget.h',
'system/status_area_widget_delegate.cc',
'system/status_area_widget_delegate.h',
'system/system_notifier.cc',
'system/system_notifier.h',
'system/tray/actionable_view.cc',
'system/tray/actionable_view.h',
'system/tray/default_system_tray_delegate.cc',
'system/tray/default_system_tray_delegate.h',
'system/tray/fixed_sized_image_view.cc',
'system/tray/fixed_sized_image_view.h',
'system/tray/fixed_sized_scroll_view.cc',
'system/tray/fixed_sized_scroll_view.h',
'system/tray/hover_highlight_view.cc',
'system/tray/hover_highlight_view.h',
'system/tray/special_popup_row.cc',
'system/tray/special_popup_row.h',
'system/tray/system_tray.cc',
'system/tray/system_tray.h',
'system/tray/system_tray_bubble.cc',
'system/tray/system_tray_bubble.h',
'system/tray/system_tray_delegate.cc',
'system/tray/system_tray_delegate.h',
'system/tray/system_tray_item.cc',
'system/tray/system_tray_item.h',
'system/tray/system_tray_notifier.cc',
'system/tray/system_tray_notifier.h',
'system/tray/throbber_view.cc',
'system/tray/throbber_view.h',
'system/tray/tray_background_view.cc',
'system/tray/tray_background_view.h',
'system/tray/tray_bar_button_with_title.cc',
'system/tray/tray_bar_button_with_title.h',
'system/tray/tray_bubble_wrapper.cc',
'system/tray/tray_bubble_wrapper.h',
'system/tray/tray_constants.cc',
'system/tray/tray_constants.h',
'system/tray/tray_details_view.cc',
'system/tray/tray_details_view.h',
'system/tray/tray_empty.cc',
'system/tray/tray_empty.h',
'system/tray/tray_event_filter.cc',
'system/tray/tray_event_filter.h',
'system/tray/tray_image_item.cc',
'system/tray/tray_image_item.h',
'system/tray/tray_item_more.cc',
'system/tray/tray_item_more.h',
'system/tray/tray_item_view.cc',
'system/tray/tray_item_view.h',
'system/tray/tray_notification_view.cc',
'system/tray/tray_notification_view.h',
'system/tray/tray_popup_header_button.cc',
'system/tray/tray_popup_header_button.h',
'system/tray/tray_popup_label_button.cc',
'system/tray/tray_popup_label_button.cc',
'system/tray/tray_popup_label_button.h',
'system/tray/tray_popup_label_button_border.cc',
'system/tray/tray_popup_label_button_border.h',
'system/tray/tray_utils.cc',
'system/tray/tray_utils.h',
'system/tray/view_click_listener.h',
'system/tray_accessibility.cc',
'system/tray_accessibility.h',
'system/tray_caps_lock.cc',
'system/tray_caps_lock.h',
'system/tray_update.cc',
'system/tray_update.h',
'system/user/login_status.cc',
'system/user/login_status.h',
'system/user/tray_user.cc',
'system/user/tray_user.h',
'system/user/tray_user_separator.cc',
'system/user/tray_user_separator.h',
'system/user/update_observer.h',
'system/user/user_observer.h',
'system/web_notification/web_notification_tray.cc',
'system/web_notification/web_notification_tray.h',
'touch/touch_hud_debug.cc',
'touch/touch_hud_debug.h',
'touch/touch_hud_projection.cc',
'touch/touch_hud_projection.h',
'touch/touch_observer_hud.cc',
'touch/touch_observer_hud.h',
'touch/touch_uma.cc',
'touch/touch_uma.h',
'volume_control_delegate.h',
'wm/app_list_controller.cc',
'wm/app_list_controller.h',
'wm/always_on_top_controller.cc',
'wm/always_on_top_controller.h',
'wm/ash_native_cursor_manager.cc',
'wm/ash_native_cursor_manager.h',
'wm/ash_focus_rules.cc',
'wm/ash_focus_rules.h',
'wm/base_layout_manager.cc',
'wm/base_layout_manager.h',
'wm/boot_splash_screen_chromeos.cc',
'wm/boot_splash_screen_chromeos.h',
'wm/caption_buttons/alternate_frame_size_button.cc',
'wm/caption_buttons/alternate_frame_size_button.h',
'wm/caption_buttons/alternate_frame_size_button_delegate.h',
'wm/caption_buttons/bubble_contents_button_row.cc',
'wm/caption_buttons/bubble_contents_button_row.h',
'wm/caption_buttons/caption_button_types.h',
'wm/caption_buttons/frame_caption_button.cc',
'wm/caption_buttons/frame_caption_button.h',
'wm/caption_buttons/frame_caption_button_container_view.cc',
'wm/caption_buttons/frame_caption_button_container_view.h',
'wm/caption_buttons/frame_maximize_button.cc',
'wm/caption_buttons/frame_maximize_button.h',
'wm/caption_buttons/frame_maximize_button_observer.h',
'wm/caption_buttons/maximize_bubble_controller.cc',
'wm/caption_buttons/maximize_bubble_controller.h',
'wm/caption_buttons/maximize_bubble_controller_bubble.cc',
'wm/caption_buttons/maximize_bubble_controller_bubble.h',
'wm/coordinate_conversion.cc',
'wm/coordinate_conversion.h',
'wm/custom_frame_view_ash.cc',
'wm/custom_frame_view_ash.h',
'wm/default_window_resizer.cc',
'wm/default_window_resizer.h',
'wm/dock/docked_window_layout_manager.cc',
'wm/dock/docked_window_layout_manager.h',
'wm/dock/docked_window_layout_manager_observer.h',
'wm/dock/docked_window_resizer.cc',
'wm/dock/docked_window_resizer.h',
'wm/drag_window_controller.cc',
'wm/drag_window_controller.h',
'wm/drag_window_resizer.cc',
'wm/drag_window_resizer.h',
'wm/event_client_impl.cc',
'wm/event_client_impl.h',
'wm/event_rewriter_event_filter.cc',
'wm/event_rewriter_event_filter.h',
'wm/frame_border_hit_test_controller.cc',
'wm/frame_border_hit_test_controller.h',
'wm/header_painter.cc',
'wm/header_painter.h',
'wm/gestures/long_press_affordance_handler.cc',
'wm/gestures/long_press_affordance_handler.h',
'wm/gestures/overview_gesture_handler.cc',
'wm/gestures/overview_gesture_handler.h',
'wm/gestures/shelf_gesture_handler.cc',
'wm/gestures/shelf_gesture_handler.h',
'wm/gestures/system_pinch_handler.cc',
'wm/gestures/system_pinch_handler.h',
'wm/gestures/tray_gesture_handler.cc',
'wm/gestures/tray_gesture_handler.h',
'wm/gestures/two_finger_drag_handler.cc',
'wm/gestures/two_finger_drag_handler.h',
'wm/image_cursors.cc',
'wm/image_cursors.h',
'wm/immersive_fullscreen_controller.cc',
'wm/immersive_fullscreen_controller.h',
'wm/immersive_revealed_lock.cc',
'wm/immersive_revealed_lock.h',
'wm/lock_state_controller.cc',
'wm/lock_state_controller.h',
'wm/lock_state_observer.h',
'wm/mru_window_tracker.cc',
'wm/mru_window_tracker.h',
'wm/overlay_event_filter.cc',
'wm/overlay_event_filter.h',
'wm/overview/scoped_transform_overview_window.cc',
'wm/overview/scoped_transform_overview_window.h',
'wm/overview/scoped_window_copy.cc',
'wm/overview/scoped_window_copy.h',
'wm/overview/window_overview.cc',
'wm/overview/window_overview.h',
'wm/overview/window_selector.cc',
'wm/overview/window_selector.h',
'wm/overview/window_selector_controller.cc',
'wm/overview/window_selector_controller.h',
'wm/overview/window_selector_delegate.h',
'wm/overview/window_selector_item.cc',
'wm/overview/window_selector_item.h',
'wm/overview/window_selector_panels.cc',
'wm/overview/window_selector_panels.h',
'wm/overview/window_selector_window.cc',
'wm/overview/window_selector_window.h',
'wm/panels/panel_frame_view.cc',
'wm/panels/panel_frame_view.h',
'wm/panels/panel_layout_manager.cc',
'wm/panels/panel_layout_manager.h',
'wm/panels/panel_window_event_handler.cc',
'wm/panels/panel_window_event_handler.h',
'wm/panels/panel_window_resizer.cc',
'wm/panels/panel_window_resizer.h',
'wm/partial_screenshot_view.cc',
'wm/partial_screenshot_view.h',
'wm/power_button_controller.cc',
'wm/power_button_controller.h',
'wm/resize_shadow.cc',
'wm/resize_shadow.h',
'wm/resize_shadow_controller.cc',
'wm/resize_shadow_controller.h',
'wm/root_window_layout_manager.cc',
'wm/root_window_layout_manager.h',
'wm/screen_dimmer.cc',
'wm/screen_dimmer.h',
'wm/session_state_animator.cc',
'wm/session_state_animator.h',
'wm/solo_window_tracker.cc',
'wm/solo_window_tracker.h',
'wm/stacking_controller.cc',
'wm/stacking_controller.h',
'wm/status_area_layout_manager.cc',
'wm/status_area_layout_manager.h',
'wm/sticky_keys.cc',
'wm/sticky_keys.h',
'wm/system_background_controller.cc',
'wm/system_background_controller.h',
'wm/system_gesture_event_filter.cc',
'wm/system_gesture_event_filter.h',
'wm/system_modal_container_event_filter.cc',
'wm/system_modal_container_event_filter.h',
'wm/system_modal_container_event_filter_delegate.h',
'wm/system_modal_container_layout_manager.cc',
'wm/system_modal_container_layout_manager.h',
'wm/toplevel_window_event_handler.cc',
'wm/toplevel_window_event_handler.h',
'wm/user_activity_detector.cc',
'wm/user_activity_detector.h',
'wm/user_activity_observer.h',
'wm/video_detector.cc',
'wm/video_detector.h',
'wm/window_animations.cc',
'wm/window_animations.h',
'wm/window_cycle_controller.cc',
'wm/window_cycle_controller.h',
'wm/window_cycle_list.cc',
'wm/window_cycle_list.h',
'wm/window_positioner.cc',
'wm/window_positioner.h',
'wm/window_state.cc',
'wm/window_state.h',
'wm/window_state_delegate.cc',
'wm/window_state_delegate.h',
'wm/window_state_observer.h',
'wm/window_properties.cc',
'wm/window_properties.h',
'wm/window_resizer.cc',
'wm/window_resizer.h',
'wm/window_util.cc',
'wm/window_util.h',
'wm/wm_types.cc',
'wm/wm_types.h',
'wm/workspace_controller.cc',
'wm/workspace_controller.h',
'wm/workspace/magnetism_matcher.cc',
'wm/workspace/magnetism_matcher.h',
'wm/workspace/multi_window_resize_controller.cc',
'wm/workspace/multi_window_resize_controller.h',
'wm/workspace/phantom_window_controller.cc',
'wm/workspace/phantom_window_controller.h',
'wm/workspace/snap_sizer.cc',
'wm/workspace/snap_sizer.h',
'wm/workspace/snap_types.h',
'wm/workspace/workspace_event_handler.cc',
'wm/workspace/workspace_event_handler.h',
'wm/workspace/workspace_layout_manager.cc',
'wm/workspace/workspace_layout_manager.h',
'wm/workspace/workspace_types.h',
'wm/workspace/workspace_window_resizer.cc',
'wm/workspace/workspace_window_resizer.h',
],
'conditions': [
['OS=="win"', {
'sources/': [
['exclude', 'host/root_window_host_factory.cc'],
['exclude', 'wm/sticky_keys.cc'],
['exclude', 'wm/sticky_keys.h'],
],
# TODO(jschuh): crbug.com/167187 fix size_t to int truncations.
'msvs_disabled_warnings': [ 4267, ],
}],
['OS!="linux"', {
'sources/': [
['exclude', 'system/monitor/tray_monitor.cc'],
['exclude', 'system/monitor/tray_monitor.h'],
],
}],
['use_x11!=1', {
'sources/': [
['exclude', 'display/display_change_observer_chromeos.cc'],
['exclude', 'display/display_change_observer_chromeos.h'],
['exclude', 'display/display_error_observer_chromeos.cc'],
['exclude', 'display/display_error_observer_chromeos.h'],
],
}],
['chromeos==1', {
'dependencies': [
'../chromeos/chromeos.gyp:chromeos',
# Ash #includes power_supply_properties.pb.h directly.
'../chromeos/chromeos.gyp:power_manager_proto',
],
}, { # else: chromeos!=1
'sources/': [
['exclude', '/chromeos/'],
['exclude', 'display/output_configurator_animation.cc'],
['exclude', 'display/output_configurator_animation.h'],
],
}],
],
},
{
'target_name': 'ash_test_support',
'type': 'static_library',
'dependencies': [
'../skia/skia.gyp:skia',
'../testing/gtest.gyp:gtest',
'../ui/app_list/app_list.gyp:app_list_test_support',
'ash',
'ash_resources',
],
'sources': [
'shell/toplevel_window.cc',
'shell/toplevel_window.h',
'shell/keyboard_controller_proxy_stub.cc',
'shell/keyboard_controller_proxy_stub.h',
'test/app_list_controller_test_api.cc',
'test/app_list_controller_test_api.h',
'test/ash_test_base.cc',
'test/ash_test_base.h',
'test/ash_test_helper.cc',
'test/ash_test_helper.h',
'test/cursor_manager_test_api.cc',
'test/cursor_manager_test_api.h',
'test/display_manager_test_api.cc',
'test/display_manager_test_api.h',
'test/launcher_test_api.cc',
'test/launcher_test_api.h',
'test/mirror_window_test_api.cc',
'test/mirror_window_test_api.h',
'test/overflow_bubble_view_test_api.cc',
'test/overflow_bubble_view_test_api.h',
'test/shelf_item_delegate_manager_test_api.cc',
'test/shelf_item_delegate_manager_test_api.h',
'test/shelf_view_test_api.cc',
'test/shelf_view_test_api.h',
'test/shell_test_api.cc',
'test/shell_test_api.h',
'test/test_activation_delegate.cc',
'test/test_activation_delegate.h',
'test/test_screenshot_delegate.cc',
'test/test_screenshot_delegate.cc',
'test/test_session_state_delegate.cc',
'test/test_session_state_delegate.h',
'test/test_shelf_delegate.cc',
'test/test_shelf_delegate.h',
'test/test_shelf_item_delegate.cc',
'test/test_shelf_item_delegate.h',
'test/test_shell_delegate.cc',
'test/test_shell_delegate.h',
'test/test_suite.cc',
'test/test_suite.h',
'test/test_suite_init.h',
'test/test_suite_init.mm',
'test/test_system_tray_delegate.cc',
'test/test_system_tray_delegate.h',
'test/test_user_wallpaper_delegate.cc',
'test/test_user_wallpaper_delegate.h',
'test/ui_controls_factory_ash.cc',
'test/ui_controls_factory_ash.h',
],
'conditions': [
['OS=="win"', {
'dependencies': [
'../ipc/ipc.gyp:ipc',
'../ui/metro_viewer/metro_viewer.gyp:metro_viewer_messages',
'../win8/win8.gyp:metro_viewer',
'../win8/win8.gyp:test_support_win8',
'../win8/win8_tests.gyp:test_registrar',
],
'sources': [
'test/test_metro_viewer_process_host.cc',
'test/test_metro_viewer_process_host.h',
],
}],
],
},
{
'target_name': 'ash_unittests',
'type': 'executable',
'dependencies': [
'../base/base.gyp:base',
'../base/base.gyp:test_support_base',
'../chrome/chrome_resources.gyp:packed_resources',
'../content/content.gyp:content_browser',
'../content/content_shell_and_tests.gyp:test_support_content',
'../skia/skia.gyp:skia',
'../testing/gtest.gyp:gtest',
'../third_party/icu/icu.gyp:icui18n',
'../third_party/icu/icu.gyp:icuuc',
'../ui/app_list/app_list.gyp:app_list',
'../ui/aura/aura.gyp:aura',
'../ui/aura/aura.gyp:aura_test_support',
'../ui/compositor/compositor.gyp:compositor',
'../ui/events/events.gyp:events',
'../ui/events/events.gyp:events_test_support',
'../ui/gfx/gfx.gyp:gfx',
'../ui/keyboard/keyboard.gyp:keyboard',
'../ui/message_center/message_center.gyp:message_center',
'../ui/message_center/message_center.gyp:message_center_test_support',
'../ui/resources/ui_resources.gyp:ui_resources',
'../ui/ui.gyp:ui',
'../ui/ui_unittests.gyp:ui_test_support',
'../ui/views/views.gyp:views',
'../ui/views/views.gyp:views_examples_with_content_lib',
'../ui/views/views.gyp:views_test_support',
'../ui/views/views.gyp:views_with_content_test_support',
'../ui/web_dialogs/web_dialogs.gyp:web_dialogs_test_support',
'../url/url.gyp:url_lib',
'ash_strings.gyp:ash_strings',
'ash',
'ash_resources',
'ash_test_support',
],
'sources': [
'../ui/compositor/test/layer_animator_test_controller.cc',
'../ui/compositor/test/layer_animator_test_controller.h',
'../ui/views/test/test_views_delegate.cc',
'../ui/views/test/test_views_delegate.h',
'accelerators/accelerator_commands_unittest.cc',
'accelerators/accelerator_controller_unittest.cc',
'accelerators/accelerator_filter_unittest.cc',
'accelerators/accelerator_table_unittest.cc',
'accelerators/nested_dispatcher_controller_unittest.cc',
'autoclick/autoclick_unittest.cc',
'desktop_background/desktop_background_controller_unittest.cc',
'desktop_background/wallpaper_resizer_unittest.cc',
'dip_unittest.cc',
'display/display_change_observer_chromeos_unittest.cc',
'display/display_controller_unittest.cc',
'display/display_error_observer_chromeos_unittest.cc',
'display/display_info_unittest.cc',
'display/display_manager_unittest.cc',
'display/mirror_window_controller_unittest.cc',
'display/virtual_keyboard_window_controller_unittest.cc',
'display/mouse_cursor_event_filter_unittest.cc',
'display/resolution_notification_controller_unittest.cc',
'display/root_window_transformers_unittest.cc',
'display/screen_position_controller_unittest.cc',
'drag_drop/drag_drop_controller_unittest.cc',
'drag_drop/drag_drop_tracker_unittest.cc',
'extended_desktop_unittest.cc',
'focus_cycler_unittest.cc',
'keyboard_overlay/keyboard_overlay_delegate_unittest.cc',
'keyboard_overlay/keyboard_overlay_view_unittest.cc',
'launcher/launcher_unittest.cc',
'magnifier/magnification_controller_unittest.cc',
'root_window_controller_unittest.cc',
'screen_ash_unittest.cc',
'screensaver/screensaver_view_unittest.cc',
'session_state_delegate_stub.cc',
'session_state_delegate_stub.h',
'shelf/scoped_observer_with_duplicated_sources_unittest.cc',
'shelf/shelf_layout_manager_unittest.cc',
'shelf/shelf_model_unittest.cc',
'shelf/shelf_navigator_unittest.cc',
'shelf/shelf_tooltip_manager_unittest.cc',
'shelf/shelf_view_unittest.cc',
'shelf/shelf_widget_unittest.cc',
'shelf/shelf_window_watcher_unittest.cc',
'shell/app_list.cc',
'shell/bubble.cc',
'shell/context_menu.cc',
'shell/context_menu.h',
'shell/lock_view.cc',
'shell/panel_window.cc',
'shell/panel_window.h',
'shell/shelf_delegate_impl.cc',
'shell/shelf_delegate_impl.h',
'shell/shell_delegate_impl.cc',
'shell/shell_delegate_impl.h',
'shell/widgets.cc',
'shell/window_type_launcher.cc',
'shell/window_type_launcher.h',
'shell/window_watcher.cc',
'shell/window_watcher.h',
'shell/window_watcher_shelf_item_delegate.cc',
'shell/window_watcher_shelf_item_delegate.h',
'shell/window_watcher_unittest.cc',
'shell_unittest.cc',
'system/chromeos/managed/tray_locally_managed_user_unittest.cc',
'system/chromeos/network/network_state_notifier_unittest.cc',
'system/chromeos/power/power_event_observer_unittest.cc',
'system/chromeos/power/power_status_unittest.cc',
'system/chromeos/power/tray_power_unittest.cc',
'system/chromeos/screen_security/screen_tray_item_unittest.cc',
'system/chromeos/tray_display_unittest.cc',
'system/date/date_view_unittest.cc',
'system/session_length_limit/tray_session_length_limit_unittest.cc',
'system/tray/system_tray_unittest.cc',
'system/tray/tray_details_view_unittest.cc',
'system/user/tray_user_unittest.cc',
'system/web_notification/web_notification_tray_unittest.cc',
'test/ash_test_helper_unittest.cc',
'test/ash_unittests.cc',
'tooltips/tooltip_controller_unittest.cc',
'touch/touch_observer_hud_unittest.cc',
'wm/app_list_controller_unittest.cc',
'wm/ash_native_cursor_manager_unittest.cc',
'wm/base_layout_manager_unittest.cc',
'wm/caption_buttons/alternate_frame_size_button_unittest.cc',
'wm/caption_buttons/frame_caption_button_container_view_unittest.cc',
'wm/caption_buttons/frame_maximize_button_unittest.cc',
'wm/dock/docked_window_layout_manager_unittest.cc',
'wm/dock/docked_window_resizer_unittest.cc',
'wm/drag_window_resizer_unittest.cc',
'wm/gestures/overview_gesture_handler_unittest.cc',
'wm/header_painter_unittest.cc',
'wm/immersive_fullscreen_controller_unittest.cc',
'wm/lock_state_controller_unittest.cc',
'wm/mru_window_tracker_unittest.cc',
'wm/overview/window_selector_unittest.cc',
'wm/panels/panel_layout_manager_unittest.cc',
'wm/panels/panel_window_resizer_unittest.cc',
'wm/partial_screenshot_view_unittest.cc',
'wm/resize_shadow_and_cursor_unittest.cc',
'wm/screen_dimmer_unittest.cc',
'wm/solo_window_tracker_unittest.cc',
'wm/stacking_controller_unittest.cc',
'wm/sticky_keys_unittest.cc',
'wm/system_gesture_event_filter_unittest.cc',
'wm/system_modal_container_layout_manager_unittest.cc',
'wm/toplevel_window_event_handler_unittest.cc',
'wm/user_activity_detector_unittest.cc',
'wm/video_detector_unittest.cc',
'wm/window_animations_unittest.cc',
'wm/window_cycle_controller_unittest.cc',
'wm/window_manager_unittest.cc',
'wm/window_modality_controller_unittest.cc',
'wm/window_positioner_unittest.cc',
'wm/window_util_unittest.cc',
'wm/workspace/magnetism_matcher_unittest.cc',
'wm/workspace/multi_window_resize_controller_unittest.cc',
'wm/workspace/snap_sizer_unittest.cc',
'wm/workspace/workspace_event_handler_test_helper.cc',
'wm/workspace/workspace_event_handler_test_helper.h',
'wm/workspace/workspace_event_handler_unittest.cc',
'wm/workspace/workspace_layout_manager_unittest.cc',
'wm/workspace/workspace_window_resizer_unittest.cc',
'wm/workspace_controller_test_helper.cc',
'wm/workspace_controller_test_helper.h',
'wm/workspace_controller_unittest.cc',
],
'conditions': [
['OS=="win"', {
'sources/': [
# TODO(zork): fix this test to build on Windows. See: crosbug.com/26906
['exclude', 'focus_cycler_unittest.cc'],
# All tests for multiple displays: not supported on Windows Ash.
['exclude', 'accelerators/nested_dispatcher_controller_unittest.cc'],
['exclude', 'wm/drag_window_resizer_unittest.cc'],
# Can't resize on Windows Ash. http://crbug.com/165962
['exclude', 'ash_root_window_transformer_unittest.cc'],
['exclude', 'magnifier/magnification_controller_unittest.cc'],
['exclude', 'wm/workspace/workspace_window_resizer_unittest.cc'],
['exclude', 'wm/sticky_keys_unittest.cc'],
['exclude', 'autoclick/autoclick_unittest.cc'],
],
'sources': [
'<(SHARED_INTERMEDIATE_DIR)/ui/ui_resources/ui_unscaled_resources.rc',
],
# TODO(jschuh): crbug.com/167187 fix size_t to int truncations.
'msvs_disabled_warnings': [ 4267, ],
}],
['OS=="win" and win_use_allocator_shim==1', {
'dependencies': [
'../base/allocator/allocator.gyp:allocator',
],
}],
['use_x11!=1', {
'sources/': [
['exclude', 'display/display_change_observer_chromeos_unittest.cc'],
['exclude', 'display/display_error_observer_chromeos_unittest.cc'],
],
}],
['chromeos==1', {
'dependencies': [
'../chromeos/chromeos.gyp:power_manager_proto',
],
'sources': [
'first_run/first_run_helper_unittest.cc',
],
}],
['OS=="linux" and component=="shared_library" and linux_use_tcmalloc==1', {
'dependencies': [
'<(DEPTH)/base/allocator/allocator.gyp:allocator',
],
'link_settings': {
'ldflags': ['-rdynamic'],
},
}],
],
},
{
'target_name': 'ash_shell',
'type': 'executable',
'dependencies': [
'ash_strings.gyp:ash_strings',
'../base/base.gyp:base',
'../base/base.gyp:base_i18n',
'../chrome/chrome_resources.gyp:packed_resources',
'../content/content_shell_and_tests.gyp:content_shell_lib',
'../content/content.gyp:content',
'../skia/skia.gyp:skia',
'../third_party/icu/icu.gyp:icui18n',
'../third_party/icu/icu.gyp:icuuc',
'../ui/app_list/app_list.gyp:app_list',
'../ui/aura/aura.gyp:aura',
'../ui/compositor/compositor.gyp:compositor',
'../ui/events/events.gyp:events',
'../ui/gfx/gfx.gyp:gfx',
'../ui/keyboard/keyboard.gyp:keyboard',
'../ui/message_center/message_center.gyp:message_center',
'../ui/resources/ui_resources.gyp:ui_resources',
'../ui/ui.gyp:ui',
'../ui/views/views.gyp:views',
'../ui/views/views.gyp:views_examples_lib',
'../ui/views/views.gyp:views_examples_with_content_lib',
'../ui/views/views.gyp:views_test_support',
'ash',
'ash_resources',
],
'sources': [
'session_state_delegate_stub.cc',
'session_state_delegate_stub.h',
'shell/app_list.cc',
'shell/bubble.cc',
'shell/content_client/shell_browser_main_parts.cc',
'shell/content_client/shell_browser_main_parts.h',
'shell/content_client/shell_content_browser_client.cc',
'shell/content_client/shell_content_browser_client.h',
'shell/content_client/shell_main_delegate.cc',
'shell/content_client/shell_main_delegate.h',
'shell/context_menu.cc',
'shell/context_menu.h',
'shell/example_factory.h',
'shell/keyboard_controller_proxy_stub.cc',
'shell/keyboard_controller_proxy_stub.h',
'shell/lock_view.cc',
'shell/panel_window.cc',
'shell/panel_window.h',
'shell/shelf_delegate_impl.cc',
'shell/shelf_delegate_impl.h',
'shell/shell_delegate_impl.cc',
'shell/shell_delegate_impl.h',
'shell/shell_main.cc',
'shell/shell_main_parts.cc',
'shell/shell_main_parts.h',
'shell/toplevel_window.cc',
'shell/toplevel_window.h',
'shell/widgets.cc',
'shell/window_type_launcher.cc',
'shell/window_type_launcher.h',
'shell/window_watcher.cc',
'shell/window_watcher.h',
'shell/window_watcher_shelf_item_delegate.cc',
'shell/window_watcher_shelf_item_delegate.h',
'../content/app/startup_helper_win.cc',
'../ui/views/test/test_views_delegate.cc',
],
'conditions': [
['OS=="win"', {
'msvs_settings': {
'VCLinkerTool': {
'SubSystem': '2', # Set /SUBSYSTEM:WINDOWS
},
},
'dependencies': [
'../sandbox/sandbox.gyp:sandbox',
],
}],
],
},
],
}
| 42.995069 | 94 | 0.653187 |
4a21bb1cab0cb48a487d56870f636ff0a505978c | 4,952 | py | Python | models/unet/run_unet.py | divelab/mri | e181b446acfc6f9ac3f42657f710dd583e77d1aa | [
"MIT"
] | 1 | 2019-04-01T05:16:37.000Z | 2019-04-01T05:16:37.000Z | models/unet/run_unet.py | jtamir/fastMRI | e9b97be6949ec656d01d5d89f0ceea1d25ac4ba8 | [
"MIT"
] | null | null | null | models/unet/run_unet.py | jtamir/fastMRI | e9b97be6949ec656d01d5d89f0ceea1d25ac4ba8 | [
"MIT"
] | 1 | 2018-12-13T17:17:23.000Z | 2018-12-13T17:17:23.000Z | """
Copyright (c) Facebook, Inc. and its affiliates.
This source code is licensed under the MIT license found in the
LICENSE file in the root directory of this source tree.
"""
import pathlib
import sys
from collections import defaultdict
import numpy as np
import torch
from torch.utils.data import DataLoader
from common.args import Args
from common.utils import save_reconstructions
from data import transforms
from data.mri_data import SliceData
from models.unet.unet_model import UnetModel
class DataTransform:
"""
Data Transformer for running U-Net models on a test dataset.
"""
def __init__(self, resolution, which_challenge):
"""
Args:
resolution (int): Resolution of the image.
which_challenge (str): Either "singlecoil" or "multicoil" denoting the dataset.
"""
if which_challenge not in ('singlecoil', 'multicoil'):
raise ValueError(f'Challenge should either be "singlecoil" or "multicoil"')
self.resolution = resolution
self.which_challenge = which_challenge
def __call__(self, kspace, target, attrs, fname, slice):
"""
Args:
kspace (numpy.Array): k-space measurements
target (numpy.Array): Target image
attrs (dict): Acquisition related information stored in the HDF5 object
fname (pathlib.Path): Path to the input file
slice (int): Serial number of the slice
Returns:
(tuple): tuple containing:
image (torch.Tensor): Normalized zero-filled input image
mean (float): Mean of the zero-filled image
std (float): Standard deviation of the zero-filled image
fname (pathlib.Path): Path to the input file
slice (int): Serial number of the slice
"""
masked_kspace = transforms.to_tensor(kspace)
# Inverse Fourier Transform to get zero filled solution
image = transforms.ifft2(masked_kspace)
# Crop input image
image = transforms.complex_center_crop(image, (self.resolution, self.resolution))
# Absolute value
image = transforms.complex_abs(image)
# Apply Root-Sum-of-Squares if multicoil data
if self.which_challenge == 'multicoil':
image = transforms.root_sum_of_squares(image)
# Normalize input
image, mean, std = transforms.normalize_instance(image)
image = image.clamp(-6, 6)
return image, mean, std, fname, slice
def create_data_loaders(args):
data = SliceData(
root=args.data_path / f'{args.challenge}_{args.data_split}',
transform=DataTransform(args.resolution, args.challenge),
sample_rate=1.,
challenge=args.challenge
)
data_loader = DataLoader(
dataset=data,
batch_size=args.batch_size,
num_workers=4,
pin_memory=True,
)
return data_loader
def load_model(checkpoint_file):
checkpoint = torch.load(checkpoint_file)
args = checkpoint['args']
model = UnetModel(1, 1, args.num_chans, args.num_pools, args.drop_prob).to(args.device)
if args.data_parallel:
model = torch.nn.DataParallel(model)
model.load_state_dict(checkpoint['model'])
return model
def run_unet(args, model, data_loader):
model.eval()
reconstructions = defaultdict(list)
with torch.no_grad():
for (input, mean, std, fnames, slices) in data_loader:
input = input.unsqueeze(1).to(args.device)
recons = model(input).to('cpu').squeeze(1)
for i in range(recons.shape[0]):
recons[i] = recons[i] * std[i] + mean[i]
reconstructions[fnames[i]].append((slices[i].numpy(), recons[i].numpy()))
reconstructions = {
fname: np.stack([pred for _, pred in sorted(slice_preds)])
for fname, slice_preds in reconstructions.items()
}
return reconstructions
def main(args):
data_loader = create_data_loaders(args)
model = load_model(args.checkpoint)
reconstructions = run_unet(args, model, data_loader)
save_reconstructions(reconstructions, args.out_dir)
def create_arg_parser():
parser = Args()
parser.add_argument('--data-split', choices=['val', 'test'], required=True,
help='Which data partition to run on: "val" or "test"')
parser.add_argument('--checkpoint', type=pathlib.Path, required=True,
help='Path to the U-Net model')
parser.add_argument('--out-dir', type=pathlib.Path, required=True,
help='Path to save the reconstructions to')
parser.add_argument('--batch-size', default=16, type=int, help='Mini-batch size')
parser.add_argument('--device', type=str, default='cuda', help='Which device to run on')
return parser
if __name__ == '__main__':
args = create_arg_parser().parse_args(sys.argv[1:])
main(args)
| 35.884058 | 92 | 0.65206 |
4a21bd33954b88f5b64d265b7886632a8194efb2 | 1,706 | py | Python | openfmbsim/devices/conducting_equipment.py | garretfick/openfmb-device-simulator | d9065387d037723c1054d0fb3e12698f0435bb63 | [
"Apache-2.0"
] | 2 | 2019-09-24T20:21:19.000Z | 2021-04-17T09:17:13.000Z | openfmbsim/devices/conducting_equipment.py | garretfick/openfmb-device-simulator | d9065387d037723c1054d0fb3e12698f0435bb63 | [
"Apache-2.0"
] | 3 | 2019-08-12T15:57:15.000Z | 2021-05-28T03:10:58.000Z | openfmbsim/devices/conducting_equipment.py | garretfick/openfmb-device-simulator | d9065387d037723c1054d0fb3e12698f0435bb63 | [
"Apache-2.0"
] | 6 | 2019-07-29T13:39:16.000Z | 2021-05-02T00:56:26.000Z | # Copyright 2019 Smarter Grid Solutions
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Base class for all devices that provide readings."""
from datetime import datetime
import threading
import uuid
from ..name_generator import make_random_name
class ConductingEquipment(object):
"""Defines a simple conducting equipment."""
def __init__(self, cond_equipment_mrid: uuid.UUID = None,
cond_equipment_name: str = None):
"""Construct a new instance of this conducting equipment.
:param cond_equipment_mrid: The MRID of the conducting unit.
:param cond_equipment_name: The name of the conducting unit.
"""
self.mrid = (cond_equipment_mrid
if cond_equipment_mrid is not None
else uuid.uuid4())
self.name = (cond_equipment_name
if cond_equipment_name is not None
else make_random_name())
self.last_update = datetime.utcnow()
self.lock = threading.Lock()
@property
def device_mrid(self) -> uuid.UUID:
"""Get the ID of the underlying device."""
return self.mrid
| 36.297872 | 75 | 0.665885 |
4a21bde0e00d58bbda13c730e4191f2fa5fc06f9 | 15,992 | py | Python | flexget/plugins/input/plex.py | blastcodem/Flexget | bcd2552e9c77187e2fd82a42e79a30ff05c065ec | [
"MIT"
] | null | null | null | flexget/plugins/input/plex.py | blastcodem/Flexget | bcd2552e9c77187e2fd82a42e79a30ff05c065ec | [
"MIT"
] | null | null | null | flexget/plugins/input/plex.py | blastcodem/Flexget | bcd2552e9c77187e2fd82a42e79a30ff05c065ec | [
"MIT"
] | null | null | null | """Plugin for plex media server (www.plexapp.com)."""
from xml.dom.minidom import parseString
import re
import logging
import os
from os.path import basename
from socket import gethostbyname
from string import find
from flexget import plugin
from flexget.entry import Entry
from flexget.event import event
from flexget.utils import requests
log = logging.getLogger('plex')
class InputPlex(object):
"""
Uses a plex media server (www.plexapp.com) tv section as an input.
'section' Required parameter, numerical (/library/sections/<num>) or section name.
'selection' Can be set to different keys:
- all : Default
- unwatched :
- recentlyAdded :
- recentlyViewed :
- recentlyViewedShows : Series only.
'all' and 'recentlyViewedShows' will only produce a list of show names while the other three will produce
filename and download url.
'username' Myplex (http://my.plexapp.com) username, used to connect to shared PMS'.
'password' Myplex (http://my.plexapp.com) password, used to connect to shared PMS'.
'server' Host/IP of PMS to connect to.
'lowercase_title' Convert filename (title) to lower case.
'strip_non_alpha' Sanitize filename (title), stripping all non-alphanumeric letters.
Better to turn off in case of non-english titles.
'strip_year' Remove year from title, ex: Show Name (2012) 01x01 => Show Name 01x01.
Movies will have year added to their filename unless this is set.
'strip_parens' Remove information in parens from title, ex: Show Name (UK)(2012) 01x01 => Show Name 01x01.
'original_filename' Use filename stored in PMS instead of transformed name. lowercase_title and strip_year
will be ignored.
'unwatched_only' Request only unwatched media from PMS.
'fetch' What to download, can be set to the following values:
- file The file itself, default.
- art Series or movie art as configured in PMS
- cover Series cover for series, movie cover for movies.
- thumb Episode thumbnail, series only.
- season_cover Season cover, series only. If used in movies, movie cover will be set.
Default paramaters:
server : localhost
port : 32400
selection : all
lowercase_title : no
strip_non_alpha : yes
strip_year : yes
strip_parens : no
original_filename: no
unwatched_only : no
fetch : file
Example:
plex:
server: 192.168.1.23
section: 3
selection: recentlyAdded
fetch: series_art
"""
def validator(self):
from flexget import validator
config = validator.factory('dict')
config.accept('text', key='server')
config.accept('text', key='selection')
config.accept('integer', key='port')
config.accept('text', key='section', required=True)
config.accept('integer', key='section', required=True)
config.accept('text', key='username')
config.accept('text', key='password')
config.accept('boolean', key='lowercase_title')
config.accept('boolean', key='strip_non_alpha')
config.accept('boolean', key='strip_year')
config.accept('boolean', key='strip_parens')
config.accept('boolean', key='original_filename')
config.accept('boolean', key='unwatched_only')
config.accept('text', key='fetch')
return config
def prepare_config(self, config):
config.setdefault('server', '127.0.0.1')
config.setdefault('port', 32400)
config.setdefault('selection', 'all')
config.setdefault('username', '')
config.setdefault('password', '')
config.setdefault('lowercase_title', False)
config.setdefault('strip_non_alpha', True)
config.setdefault('strip_year', True)
config.setdefault('strip_parens', False)
config.setdefault('original_filename', False)
config.setdefault('unwatched_only', False)
config.setdefault('fetch', 'file')
config['plexserver'] = config['server']
config = self.plex_format_server(config)
return config
def plex_get_globalaccesstoken(self, config):
header = {'X-Plex-Client-Identifier': 'flexget'}
try:
r = requests.post('https://my.plexapp.com/users/sign_in.xml',
auth=(config['username'], config['password']), headers=header)
except requests.RequestException as error:
raise plugin.PluginError('Could not log in to myplex! Error: %s' % error)
if 'Ivalid email' in r.text:
raise plugin.PluginError('Myplex: invalid username and/or password!')
dom = parseString(r.text)
globalaccesstoken = dom.getElementsByTagName('authentication-token')[0].firstChild.nodeValue
if not globalaccesstoken:
raise plugin.PluginError('Myplex: could not find a server!')
else:
log.debug('Myplex: Got global accesstoken: %s' % globalaccesstoken)
return globalaccesstoken
def plex_get_accesstoken(self, config, globalaccesstoken = ""):
accesstoken = ""
if not globalaccesstoken:
globalaccesstoken = self.plex_get_globalaccesstoken(config)
try:
r = requests.get("https://my.plexapp.com/pms/servers?X-Plex-Token=%s" % globalaccesstoken)
except requests.RequestException as e:
raise plugin.PluginError("Could not get servers from my.plexapp.com using "
"authentication-token: %s. (%s)" % (globalaccesstoken, e))
dom = parseString(r.text)
for node in dom.getElementsByTagName('Server'):
if node.getAttribute('address') == config['server']:
accesstoken = node.getAttribute('accessToken')
log.debug("Got plextoken: %s" % accesstoken)
if not accesstoken:
raise plugin.PluginError('Could not retrieve accesstoken for %s.' % config['server'])
else:
return accesstoken
def plex_format_server(self, config):
if gethostbyname(config['server']) != config['server']:
config['server'] = gethostbyname(config['server'])
return config
def plex_section_is_int(self, section):
return isinstance(section, int)
def on_task_input(self, task, config):
config = self.prepare_config(config)
accesstoken = ""
urlconfig = {}
urlappend = "?"
entries = []
data = {}
if config['unwatched_only'] and config['section'] != 'recentlyViewedShows' and config['section'] != 'all':
urlconfig['unwatched'] = '1'
if config['username'] and config['password'] and config['server'] != '127.0.0.1':
accesstoken = self.plex_get_accesstoken(config)
log.debug("Got accesstoken: %s" % accesstoken)
urlconfig['X-Plex-Token'] = accesstoken
for key in urlconfig:
urlappend += '%s=%s&' % (key, urlconfig[key])
if not self.plex_section_is_int(config['section']):
try:
path = "/library/sections/"
r = requests.get("http://%s:%d%s%s" %(config['plexserver'], config['port'], path, urlappend))
except requests.RequestException as e:
raise plugin.PluginError('Error retrieving source: %s' % e)
dom = parseString(r.text.encode("utf-8"))
for node in dom.getElementsByTagName('Directory'):
if node.getAttribute('title') == config['section']:
config['section'] = int(node.getAttribute('key'))
if not self.plex_section_is_int(config['section']):
raise plugin.PluginError('Could not find section \'%s\'' % config['section'])
log.debug("Fetching http://%s:%d/library/sections/%s/%s%s" %
(config['server'], config['port'], config['section'], config['selection'], urlappend))
try:
path = "/library/sections/%s/%s" % (config['section'], config['selection'])
r = requests.get("http://%s:%d%s%s" %(config['plexserver'], config['port'], path, urlappend))
except requests.RequestException as e:
raise plugin.PluginError('There is no section with number %d. (%s)' % (config['section'], e) )
dom = parseString(r.text.encode("utf-8"))
plexsectionname = dom.getElementsByTagName('MediaContainer')[0].getAttribute('title1')
viewgroup = dom.getElementsByTagName('MediaContainer')[0].getAttribute('viewGroup')
log.debug("Plex section \"%s\" is a \"%s\" section" % (plexsectionname, viewgroup))
if (viewgroup != "movie" and viewgroup != "show" and viewgroup != "episode"):
raise plugin.PluginError("Section is neither a movie nor tv show section!")
domroot = "Directory"
titletag = "title"
if viewgroup == "episode":
domroot = "Video"
titletag = "grandparentTitle"
thumbtag = "thumb"
arttag = "art"
seasoncovertag = "parentThumb"
covertag = "grandparentThumb"
elif viewgroup == "movie":
domroot = "Video"
titletag = "title"
arttag = "art"
seasoncovertag = "thumb"
covertag = "thumb"
if config['fetch'] == "thumb":
raise plugin.PluginError("Movie sections does not have any thumbnails to download!")
for node in dom.getElementsByTagName(domroot):
e = Entry()
e['plex_server'] = config['plexserver']
e['plex_port'] = config['port']
e['plex_section'] = config['section']
e['plex_section_name'] = plexsectionname
e['plex_episode_thumb'] = ''
title = node.getAttribute(titletag)
if config['strip_year']:
title = re.sub(r'^(.*)\(\d{4}\)(.*)', r'\1\2', title)
if config['strip_parens']:
title = re.sub(r'\(.*?\)', r'', title)
title = title.strip()
if config['strip_non_alpha']:
title = re.sub(r'[\(\)]', r'', title)
title = re.sub(r'&', r'And', title)
title = re.sub(r'[^A-Za-z0-9- \']', r'', title)
if config['lowercase_title']:
title = title.lower()
if viewgroup == "show":
e['title'] = title
e['url'] = 'NULL'
entries.append(e)
# show ends here.
continue
e['plex_art'] = "http://%s:%d%s%s" % (config['server'], config['port'],
node.getAttribute(arttag), urlappend)
e['plex_cover'] = "http://%s:%d%s%s" % (config['server'], config['port'],
node.getAttribute(covertag), urlappend)
e['plex_season_cover'] = "http://%s:%d%s%s" % (config['server'], config['port'],
node.getAttribute(seasoncovertag), urlappend)
if viewgroup == "episode":
e['plex_thumb'] = "http://%s:%d%s%s" % (config['server'], config['port'], node.getAttribute('thumb'), urlappend)
season = int(node.getAttribute('parentIndex'))
if node.getAttribute('parentIndex') == node.getAttribute('year'):
season = node.getAttribute('originallyAvailableAt')
filenamemap = "%s_%s%s_%s_%s_%s.%s"
episode = ""
elif node.getAttribute('index'):
episode = int(node.getAttribute('index'))
filenamemap = "%s_%02dx%02d_%s_%s_%s.%s"
else:
log.debug("Could not get episode number for '%s' (Hint, ratingKey: %s)"
% (title, node.getAttribute('ratingKey')))
break
elif viewgroup == "movie":
filenamemap = "%s_%s_%s_%s.%s"
e['plex_duration'] = node.getAttribute('duration')
year = node.getAttribute('year')
e['plex_summary'] = node.getAttribute('summary')
count = node.getAttribute('viewCount')
offset = node.getAttribute('viewOffset')
if count:
e['plex_status'] = "seen"
elif offset:
e['plex_status'] = "inprogress"
else:
e['plex_status'] = "unwatched"
for media in node.getElementsByTagName('Media'):
vcodec = media.getAttribute('videoCodec')
acodec = media.getAttribute('audioCodec')
if config['fetch'] == "file" or not config['fetch']:
container = media.getAttribute('container')
else:
container = "jpg"
resolution = media.getAttribute('videoResolution') + "p"
for part in media.getElementsByTagName('Part'):
if config['fetch'] == "file" or not config['fetch']:
key = part.getAttribute('key')
elif config['fetch'] == "art":
key = node.getAttribute(arttag)
elif config['fetch'] == "cover":
key = node.getAttribute(arttag)
elif config['fetch'] == "season_cover":
key = node.getAttribute(seasoncovertag)
elif config['fetch'] == "thumb":
key = node.getAttribute(thumbtag)
# key = part.getAttribute('key')
duration = part.getAttribute('duration')
if viewgroup == "show":
e['plex_title'] = episodetitle
elif viewgroup == "movie":
e['plex_title'] = title
if config['original_filename']:
filename, fileext = os.path.splitext(basename(part.getAttribute('file')))
if config['fetch'] != 'file':
filename += ".jpg"
else:
filename = "%s.%s" % (filename, fileext)
else:
if viewgroup == "episode":
filename = filenamemap % (title.replace(" ", "."), season, episode, resolution, vcodec,
acodec, container)
title = filename
elif viewgroup == "movie":
filename = filenamemap % (title.replace(" ", "."), resolution, vcodec,
acodec, container)
e['plex_url'] = "http://%s:%d%s%s" % (config['server'], config['port'], key, urlappend)
e['plex_path'] = key
e['url'] = "http://%s:%d%s%s" % (config['server'], config['port'], key, urlappend)
e['plex_duration'] = duration
e['filename'] = filename
e['title'] = title
if key == "":
log.debug("Could not find anything in PMS to download. Next!")
else:
entries.append(e)
return entries
@event('plugin.register')
def register_plugin():
plugin.register(InputPlex, 'plex', api_ver=2)
| 49.206154 | 183 | 0.53977 |
4a21bdeaced06c899cb3fc12343cbdf823a3ff9d | 318 | py | Python | Dataset/Leetcode/train/1/57.py | kkcookies99/UAST | fff81885aa07901786141a71e5600a08d7cb4868 | [
"MIT"
] | null | null | null | Dataset/Leetcode/train/1/57.py | kkcookies99/UAST | fff81885aa07901786141a71e5600a08d7cb4868 | [
"MIT"
] | null | null | null | Dataset/Leetcode/train/1/57.py | kkcookies99/UAST | fff81885aa07901786141a71e5600a08d7cb4868 | [
"MIT"
] | null | null | null | class Solution(object):
def XXX(self, nums, target):
index_List = []
for i in range(len(nums)):
for j in range(i+1, len(nums)):
if nums[i] + nums[j] == target:
index_List.append(i)
index_List.append(j)
return index_List
| 28.909091 | 47 | 0.487421 |
4a21bedd0eb364d7963a82a8cd00df31d26e87d4 | 4,312 | py | Python | tests/test_linops/test_kronecker.py | feimeng93/probnum | 4e46273c0157d26b9be2a7a415ccf69a3691ec22 | [
"MIT"
] | 1 | 2021-04-14T14:17:12.000Z | 2021-04-14T14:17:12.000Z | tests/test_linops/test_kronecker.py | jzenn/probnum | cb9e5ec07384913049a312ac62cfec88970f1c8d | [
"MIT"
] | 16 | 2021-03-08T07:25:31.000Z | 2022-03-28T21:05:53.000Z | tests/test_linops/test_kronecker.py | jzenn/probnum | cb9e5ec07384913049a312ac62cfec88970f1c8d | [
"MIT"
] | 2 | 2022-01-23T14:24:08.000Z | 2022-01-29T01:26:47.000Z | """Tests for Kronecker-type linear operators."""
import unittest
import numpy as np
from probnum import linops
from tests.testing import NumpyAssertions
class LinearOperatorKroneckerTestCase(unittest.TestCase, NumpyAssertions):
"""Test Kronecker-type operators."""
def setUp(self):
self.kronecker_matrices = [
(np.array([[4, 1, 4], [2, 3, 2]]), np.array([[-1, 4], [2, 1]])),
(np.array([[0.4, 2, 0.8], [-0.4, 0, -0.9]]), np.array([[1, 4]])),
]
self.symmkronecker_matrices = [
(np.array([[4, 1], [2, 3]]), np.array([[-1, 4], [2, 1]])),
(
np.array([[0.4, 2, 0.8], [-0.4, 0, -0.9], [1, 0, 2]]),
np.array([[1, 4, 0], [-3, -0.4, -100], [0.18, -2, 10]]),
),
]
def test_vec2svec_dimension(self):
"""Check faulty dimension for Q."""
for n in [-1, 0, 1.1, np.inf, np.nan]:
with self.subTest():
with self.assertRaises(
ValueError,
msg="Invalid input dimension n should raise a ValueError.",
):
linops.Svec(dim=n)
def test_symmetrize(self):
"""The Symmetrize operators should symmetrize vectors and columns of
matrices."""
for n in [1, 2, 3, 5, 12]:
with self.subTest():
x = np.random.uniform(size=n * n)
X = np.reshape(x, (n, n))
y = linops.Symmetrize(dim=n) @ x
self.assertArrayEqual(
y.reshape(n, n), 0.5 * (X + X.T), msg="Matrix not symmetric."
)
Z = np.random.uniform(size=(9, 5))
W = linops.Symmetrize(dim=3) @ Z
self.assertArrayEqual(
W,
np.vstack([linops.Symmetrize(dim=3) @ col for col in Z.T]).T,
msg="Matrix columns were not symmetrized.",
)
self.assertArrayEqual(
np.shape(W),
np.shape(Z),
msg="Symmetrized matrix columns do not have the right shape.",
)
def test_kronecker_transpose(self):
"""Kronecker product transpose property: (A (x) B)^T = A^T (x) B^T."""
for A, B in self.kronecker_matrices:
with self.subTest():
W = linops.Kronecker(A=A, B=B)
V = linops.Kronecker(A=A.T, B=B.T)
self.assertAllClose(W.T.todense(), V.todense())
def test_kronecker_explicit(self):
"""Test the Kronecker operator against explicit matrix representations."""
for A, B in self.kronecker_matrices:
with self.subTest():
W = linops.Kronecker(A=A, B=B)
AkronB = np.kron(A, B)
self.assertAllClose(W.todense(), AkronB)
def test_symmkronecker_todense_symmetric(self):
"""Dense matrix from symmetric Kronecker product of two symmetric matrices must
be symmetric."""
C = np.array([[5, 1], [1, 10]])
D = np.array([[-2, 0.1], [0.1, 8]])
Ws = linops.SymmetricKronecker(A=C, B=C)
Ws_dense = Ws.todense()
self.assertArrayEqual(
Ws_dense,
Ws_dense.T,
msg="Symmetric Kronecker product of symmetric matrices is not symmetric.",
)
def test_symmkronecker_explicit(self):
"""Test the symmetric Kronecker operator against explicit matrix
representations."""
pass
def test_symmkronecker_transpose(self):
"""Kronecker product transpose property: (A (x) B)^T = A^T (x) B^T."""
for A, B in self.symmkronecker_matrices:
with self.subTest():
W = linops.SymmetricKronecker(A=A, B=B)
V = linops.SymmetricKronecker(A=A.T, B=B.T)
self.assertAllClose(W.T.todense(), V.todense())
def test_symmkronecker_commutation(self):
"""Symmetric Kronecker products fulfill A (x)_s B = B (x)_s A"""
for A, B in self.symmkronecker_matrices:
with self.subTest():
W = linops.SymmetricKronecker(A=A, B=B)
V = linops.SymmetricKronecker(A=B, B=A)
self.assertAllClose(W.todense(), V.todense())
| 36.542373 | 87 | 0.523655 |
4a21bf664982fcd9fa213cb00de0e03c1f6d7682 | 13,183 | py | Python | zs/tests/test_reader.py | njsmith/zs | 42ca7679c2b986243abcff048bc42d49c204048c | [
"BSD-2-Clause"
] | 35 | 2015-06-19T02:36:14.000Z | 2021-09-22T21:19:59.000Z | zs/tests/test_reader.py | njsmith/zs | 42ca7679c2b986243abcff048bc42d49c204048c | [
"BSD-2-Clause"
] | 2 | 2016-03-07T00:52:41.000Z | 2021-11-14T16:48:33.000Z | zs/tests/test_reader.py | njsmith/zs | 42ca7679c2b986243abcff048bc42d49c204048c | [
"BSD-2-Clause"
] | 4 | 2015-01-30T09:23:18.000Z | 2019-02-05T18:03:08.000Z | # This file is part of ZS
# Copyright (C) 2013-2014 Nathaniel Smith <[email protected]>
# See file LICENSE.txt for license information.
import os
import os.path
import sys
import hashlib
from six import int2byte, byte2int, BytesIO, integer_types
from nose.tools import assert_raises
from .util import test_data_path
from .http_harness import web_server
from zs import ZS, ZSError, ZSCorrupt
from zs._zs import pack_data_records
from zs.common import read_length_prefixed, codec_shorthands
# letters.zs contains records:
# [b, bb, d, dd, f, ff, ..., z, zz]
letters_records = []
for i in range(1, 26, 2):
letter = int2byte(byte2int(b"a") + i)
letters_records += [letter, 2 * letter]
letters_sha256 = hashlib.sha256(pack_data_records(letters_records)).digest()
def identity(x):
return x
def _check_map_helper(records, arg1, arg2):
assert arg1 == 1
assert arg2 == 2
return records
def _check_raise_helper(records, exc):
raise exc
def check_letters_zs(z, codec_shorthand):
assert isinstance(z.root_index_offset, integer_types)
assert isinstance(z.root_index_length, integer_types)
assert isinstance(z.total_file_length, integer_types)
assert z.codec == codec_shorthands[codec_shorthand]
assert z.data_sha256 == letters_sha256
assert z.metadata == {
u"test-data": u"letters",
u"build-info": {
u"user": u"test-user",
u"host": u"test-host",
u"time": u"2000-01-01T00:00:00.000000Z",
u"version": u"zs test",
},
}
assert isinstance(z.root_index_level, integer_types)
assert list(z) == letters_records
assert list(z.search()) == letters_records
if "ZS_QUICK_TEST" in os.environ:
chars = "m"
else:
chars = "abcdefghijklmnopqrstuvwxyz"
for char in chars:
byte = char.encode("ascii")
for (start, stop, prefix) in [
(None, None, None),
(byte, None, None),
(None, byte, None),
(None, None, byte),
(byte, byte, None),
(byte, int2byte(byte2int(byte) + 1), None),
(byte, int2byte(byte2int(byte) + 2), None),
(byte, int2byte(byte2int(byte) + 3), None),
(byte, b"q", None),
(None, 2 * byte, byte),
(b"m", b"s", byte),
]:
print("start=%r, stop=%r, prefix=%r" % (start, stop, prefix))
expected = letters_records
if start is not None:
expected = [r for r in expected if r >= start]
if stop is not None:
expected = [r for r in expected if not r >= stop]
if prefix is not None:
expected = [r for r in expected if r.startswith(prefix)]
assert list(z.search(start=start, stop=stop, prefix=prefix)
) == expected
map_blocks = list(z.block_map(
_check_map_helper,
# test args and kwargs argument passing
args=(1,), kwargs={"arg2": 2},
start=start, stop=stop, prefix=prefix))
assert sum(map_blocks, []) == expected
for term in [b"\n", b"\x00"]:
expected_dump = term.join(expected + [b""])
out = BytesIO()
z.dump(out, start=start, stop=stop, prefix=prefix,
terminator=term)
assert out.getvalue() == expected_dump
out = BytesIO()
z.dump(out, start=start, stop=stop, prefix=prefix,
length_prefixed="uleb128")
assert (list(read_length_prefixed(BytesIO(out.getvalue()), "uleb128"))
== expected)
out = BytesIO()
z.dump(out, start=start, stop=stop, prefix=prefix,
length_prefixed="u64le")
assert (list(read_length_prefixed(BytesIO(out.getvalue()), "u64le"))
== expected)
assert list(z.search(stop=b"bb", prefix=b"b")) == [b"b"]
assert_raises(ValueError, list,
z.block_map(_check_raise_helper, args=(ValueError,)))
assert_raises(ValueError, z.block_exec,
_check_raise_helper, args=(ValueError,))
z.validate()
def test_zs():
for codec in codec_shorthands:
p = test_data_path("letters-%s.zs" % (codec,))
for parallelism in [0, 2, "guess"]:
with ZS(path=p, parallelism=parallelism) as z:
check_letters_zs(z, codec)
# This is much slower, and the above test will have already exercised most of
# the tricky code, so we make this test less exhaustive.
def test_http_zs():
with web_server(test_data_path()) as root_url:
codec = "deflate"
url = "%s/letters-%s.zs" % (root_url, codec)
for parallelism in [0, 2]:
with ZS(url=url, parallelism=parallelism) as z:
check_letters_zs(z, codec)
def test_http_notices_lack_of_range_support():
with web_server(test_data_path(), range_support=False) as root_url:
codec = "deflate"
url = "%s/letters-%s.zs" % (root_url, codec)
assert_raises(ZSError, lambda: list(ZS(url=url)))
def test_zs_args():
p = test_data_path("letters-none.zs")
# can't pass both path and url
assert_raises(ValueError, ZS, path=p, url="x")
# parallelism must be >= 0
assert_raises(ValueError, ZS, path=p, parallelism=-1)
def test_zs_close():
z = ZS(test_data_path("letters-none.zs"))
z.close()
for call in [[list, z.search()],
[list,
z.block_map(_check_raise_helper, AssertionError)],
[list, z],
[z.dump, BytesIO()],
[z.validate],
]:
print(repr(call))
assert_raises(ZSError, *call)
# But calling .close() twice is fine.
z.close()
# smoke test for __del__ method
ZS(test_data_path("letters-none.zs"))
def test_context_manager_closes():
with ZS(test_data_path("letters-none.zs")) as z:
assert list(z.search()) == letters_records
assert_raises(ZSError, list, z.search())
def test_block_exec():
# This function tricky to test in a multiprocessing world, because we need
# some way to communicate back from the subprocesses that the execution
# actually happened... instead we just test it in serial
# mode. (Fortunately it is a super-trivial function.)
z = ZS(test_data_path("letters-none.zs"), parallelism=0)
# b/c we're in serial mode, the fn doesn't need to be pickleable
class CountBlocks(object):
def __init__(self):
self.count = 0
def __call__(self, records):
self.count += 1
count_blocks = CountBlocks()
z.block_exec(count_blocks)
assert count_blocks.count > 1
assert count_blocks.count == len(list(z.block_map(identity)))
def test_big_headers():
from zs.reader import _lower_header_size_guess
with _lower_header_size_guess():
z = ZS(test_data_path("letters-none.zs"))
assert z.codec == "none"
assert z.data_sha256 == letters_sha256
assert z.metadata == {
u"test-data": u"letters",
u"build-info": {
u"user": u"test-user",
u"host": u"test-host",
u"time": u"2000-01-01T00:00:00.000000Z",
u"version": u"zs test",
},
}
assert list(z) == letters_records
def test_broken_files():
import glob
unchecked_paths = set(glob.glob(test_data_path("broken-files/*.zs")))
# Files that should fail even on casual use (no validate)
for basename, msg_fragment in [
("short-root", ["partial read", "root index length"]),
("truncated-root", "unexpected EOF"),
("bad-magic", "bad magic"),
("incomplete-magic", "partially written"),
("header-checksum", "header checksum"),
("root-checksum", "checksum mismatch"),
("bad-codec", "unrecognized compression"),
("non-dict-metadata", "bad metadata"),
("truncated-data-1", "unexpectedly ran out of data"),
("truncated-data-2", "unexpected EOF"),
("truncated-data-3", "unexpected EOF"),
("wrong-root-offset", ["checksum mismatch", "root block missing"]),
("root-is-data", ["expecting index block", "bad level"]),
("wrong-root-level-1", ["expecting index block", "bad index ref"]),
("partial-data-1", "past end of block"),
("partial-data-2", "end of buffer"),
("empty-data", "empty block"),
("partial-index-1", "end of buffer"),
("partial-index-2", "end of buffer"),
("partial-index-3", "past end of block"),
("partial-index-4", "past end of block"),
("empty-index", "empty block"),
("bad-total-length", "header says it should"),
("bad-level-root", ["extension block", "root block missing"]),
("bad-level-index-2", ["extension block", "dangling or multiple refs"]),
("post-header-junk", "checksum mismatch"),
]:
print(basename)
def any_match(mfs, haystack):
if isinstance(mfs, str):
mfs = [mfs]
for mf in mfs:
if mf in haystack:
return True
return False
# to prevent accidental false success:
assert not any_match(msg_fragment, basename)
p = test_data_path("broken-files/%s.zs" % (basename,))
with assert_raises(ZSCorrupt) as cm:
with ZS(p) as z:
list(z)
# use start= to ensure that we do an index traversal
list(z.search(start=b"\x00"))
assert any_match(msg_fragment, str(cm.exception))
with assert_raises(ZSCorrupt) as cm:
with ZS(p) as z:
z.validate()
assert any_match(msg_fragment, str(cm.exception))
unchecked_paths.discard(p)
# Files that might look okay locally, but validate should detect problems
for basename, msg_fragment in [
("unref-data", "unreferenced"),
("unref-index", "unreferenced"),
("wrong-root-length", "root index length"),
("wrong-root-level-2", "level 3 to level 1"),
("repeated-index", "multiple ref"),
("bad-ref-length", "!= actual length"),
("bad-index-order", "unsorted offsets"),
("bad-index-order", "unsorted records"),
("bad-data-order", "unsorted records"),
("bad-index-key-1", "too large for block"),
("bad-index-key-2", "too small for block"),
("bad-index-key-3", "too small for block"),
("bad-sha256", "data hash mismatch"),
# not really an accurate message -- this file has a level 1 index
# pointing to an extension block. the reader doesn't blow up at
# this because it knows that below a level 1 index is data and
# switches to streaming read, and then streaming read ignores
# extension blocks, so only fsck() will catch it. And fsck() uses
# a streaming read so extension blocks are invisible to it, and
# all it sees is that there's this reference pointing into an
# invisible hole in space, which looks like a dangling reference.
("bad-level-index-1", "dangling"),
]:
print(basename)
# to prevent accidental false success:
assert msg_fragment not in basename
p = test_data_path("broken-files/%s.zs" % (basename,))
with ZS(p) as z:
with assert_raises(ZSCorrupt) as cm:
z.validate()
assert msg_fragment in str(cm.exception)
unchecked_paths.discard(p)
# Files that are a bit tricky, but should in fact be okay
for basename in [
"good-index-key-1",
"good-index-key-2",
"good-index-key-3",
"good-extension-blocks",
"good-extension-header-fields",
]:
print(basename)
p = test_data_path("broken-files/%s.zs" % (basename,))
with ZS(p) as z:
list(z)
z.validate()
unchecked_paths.discard(p)
assert not unchecked_paths
def test_extension_blocks():
# Check that the reader happily skips over the extension blocks in the
# middle of the file.
with ZS(test_data_path("broken-files/good-extension-blocks.zs")) as z:
assert list(z) == [b"a", b"b", b"c", b"d"]
def test_ref_loops():
# Had a bunch of trouble eliminating reference loops in the ZS object.
# Don't use 'with' statement here b/c that keeps another ref which just
# confuses things.
z = ZS(test_data_path("letters-none.zs"))
try:
# 1 for 'z', one for the temporary passed to sys.getrefcount
print(sys.getrefcount(z))
assert sys.getrefcount(z) == 2
list(z)
assert sys.getrefcount(z) == 2
finally:
z.close()
| 39.352239 | 84 | 0.579079 |
4a21bf77b28e779fb9c125e86a7508e8ee8de0c5 | 1,562 | py | Python | var/spack/repos/builtin/packages/piranha/package.py | MiddelkoopT/spack | 4d94c4c4600f42a7a3bb3d06ec879140bc259304 | [
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | null | null | null | var/spack/repos/builtin/packages/piranha/package.py | MiddelkoopT/spack | 4d94c4c4600f42a7a3bb3d06ec879140bc259304 | [
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | null | null | null | var/spack/repos/builtin/packages/piranha/package.py | MiddelkoopT/spack | 4d94c4c4600f42a7a3bb3d06ec879140bc259304 | [
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 1 | 2022-01-18T23:39:24.000Z | 2022-01-18T23:39:24.000Z | # Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class Piranha(CMakePackage):
"""Piranha is a computer-algebra library for the symbolic manipulation of
sparse multivariate polynomials and other closely-related symbolic objects
(such as Poisson series)."""
homepage = "https://bluescarni.github.io/piranha/sphinx/"
url = "https://github.com/bluescarni/piranha/archive/v0.5.tar.gz"
git = "https://github.com/bluescarni/piranha.git"
version('develop', branch='master')
version('0.5', sha256='34a89bda8208ff48cfb116efa7d53c09e8a9b3838af4bb96ba2e19e4930b3a58')
variant('python', default=True,
description='Build the Python bindings')
# Build dependencies
depends_on('[email protected]:', type='build')
extends('python', when='+python')
depends_on('[email protected]:', type='build', when='+python')
# Other dependencies
depends_on('boost+iostreams+regex+serialization',
when='~python')
depends_on('boost+iostreams+regex+serialization+python',
when='+python')
depends_on('bzip2')
depends_on('gmp') # mpir is a drop-in replacement for this
depends_on('mpfr') # Could also be built against mpir
def cmake_args(self):
return [
'-DBUILD_PYRANHA=%s' % ('ON' if '+python' in self.spec else 'OFF'),
'-DBUILD_TESTS:BOOL=ON',
]
| 36.325581 | 93 | 0.668374 |
4a21c05dc648cedbf0b4c2947f2de897b1c6cba9 | 3,502 | py | Python | src/qa-lstm.py | Asteur/qa | cc9ec2af44d3e261cc865988d9828de165ec47e4 | [
"Apache-2.0"
] | 261 | 2016-10-08T09:53:30.000Z | 2021-03-29T09:10:05.000Z | src/qa-lstm.py | Asteur/qa | cc9ec2af44d3e261cc865988d9828de165ec47e4 | [
"Apache-2.0"
] | 5 | 2017-04-06T13:15:53.000Z | 2018-06-12T11:58:49.000Z | src/qa-lstm.py | Asteur/qa | cc9ec2af44d3e261cc865988d9828de165ec47e4 | [
"Apache-2.0"
] | 75 | 2016-10-10T08:13:36.000Z | 2019-11-08T02:24:03.000Z | # -*- coding: utf-8 -*-
from __future__ import division, print_function
from gensim.models import Word2Vec
from keras.callbacks import ModelCheckpoint
from keras.layers import Dense, Merge, Dropout
from keras.layers.embeddings import Embedding
from keras.layers.recurrent import LSTM
from keras.models import Sequential
from sklearn.cross_validation import train_test_split
import numpy as np
import os
import kaggle
DATA_DIR = "../data/comp_data"
MODEL_DIR = "../data/models"
WORD2VEC_BIN = "GoogleNews-vectors-negative300.bin.gz"
WORD2VEC_EMBED_SIZE = 300
QA_TRAIN_FILE = "8thGr-NDMC-Train.csv"
QA_EMBED_SIZE = 64
BATCH_SIZE = 32
NBR_EPOCHS = 20
## extract data
print("Loading and formatting data...")
qapairs = kaggle.get_question_answer_pairs(
os.path.join(DATA_DIR, QA_TRAIN_FILE))
question_maxlen = max([len(qapair[0]) for qapair in qapairs])
answer_maxlen = max([len(qapair[1]) for qapair in qapairs])
seq_maxlen = max([question_maxlen, answer_maxlen])
word2idx = kaggle.build_vocab([], qapairs, [])
vocab_size = len(word2idx) + 1 # include mask character 0
Xq, Xa, Y = kaggle.vectorize_qapairs(qapairs, word2idx, seq_maxlen)
Xqtrain, Xqtest, Xatrain, Xatest, Ytrain, Ytest = \
train_test_split(Xq, Xa, Y, test_size=0.3, random_state=42)
print(Xqtrain.shape, Xqtest.shape, Xatrain.shape, Xatest.shape,
Ytrain.shape, Ytest.shape)
# get embeddings from word2vec
# see https://github.com/fchollet/keras/issues/853
print("Loading Word2Vec model and generating embedding matrix...")
word2vec = Word2Vec.load_word2vec_format(
os.path.join(DATA_DIR, WORD2VEC_BIN), binary=True)
embedding_weights = np.zeros((vocab_size, WORD2VEC_EMBED_SIZE))
for word, index in word2idx.items():
try:
embedding_weights[index, :] = word2vec[word.lower()]
except KeyError:
pass # keep as zero (not ideal, but what else can we do?)
del word2vec
del word2idx
print("Building model...")
qenc = Sequential()
qenc.add(Embedding(output_dim=WORD2VEC_EMBED_SIZE, input_dim=vocab_size,
weights=[embedding_weights], mask_zero=True))
qenc.add(LSTM(QA_EMBED_SIZE, input_length=seq_maxlen, return_sequences=False))
qenc.add(Dropout(0.3))
aenc = Sequential()
aenc.add(Embedding(output_dim=WORD2VEC_EMBED_SIZE, input_dim=vocab_size,
weights=[embedding_weights], mask_zero=True))
aenc.add(LSTM(QA_EMBED_SIZE, input_length=seq_maxlen, return_sequences=False))
aenc.add(Dropout(0.3))
model = Sequential()
model.add(Merge([qenc, aenc], mode="sum"))
model.add(Dense(2, activation="softmax"))
model.compile(optimizer="adam", loss="categorical_crossentropy",
metrics=["accuracy"])
print("Training...")
checkpoint = ModelCheckpoint(
filepath=os.path.join(MODEL_DIR, "qa-lstm-best.hdf5"),
verbose=1, save_best_only=True)
model.fit([Xqtrain, Xatrain], Ytrain, batch_size=BATCH_SIZE,
nb_epoch=NBR_EPOCHS, validation_split=0.1,
callbacks=[checkpoint])
print("Evaluation...")
loss, acc = model.evaluate([Xqtest, Xatest], Ytest, batch_size=BATCH_SIZE)
print("Test loss/accuracy final model = %.4f, %.4f" % (loss, acc))
model.save_weights(os.path.join(MODEL_DIR, "qa-lstm-final.hdf5"))
with open(os.path.join(MODEL_DIR, "qa-lstm.json"), "wb") as fjson:
fjson.write(model.to_json())
model.load_weights(filepath=os.path.join(MODEL_DIR, "qa-lstm-best.hdf5"))
loss, acc = model.evaluate([Xqtest, Xatest], Ytest, batch_size=BATCH_SIZE)
print("Test loss/accuracy best model = %.4f, %.4f" % (loss, acc))
| 35.734694 | 78 | 0.739863 |
4a21c1496e1714e98680704d42db8ed15a975153 | 147 | py | Python | simeng/util/statistics.py | wstlabs/similarity-engine | fde4dd31b0f1738573513159f950823cb2d4a7ce | [
"Apache-2.0"
] | null | null | null | simeng/util/statistics.py | wstlabs/similarity-engine | fde4dd31b0f1738573513159f950823cb2d4a7ce | [
"Apache-2.0"
] | null | null | null | simeng/util/statistics.py | wstlabs/similarity-engine | fde4dd31b0f1738573513159f950823cb2d4a7ce | [
"Apache-2.0"
] | null | null | null | from collections import defaultdict
def valhist(pairs):
h = defaultdict(int)
for item,value in pairs:
h[value] += 1
return h
| 16.333333 | 35 | 0.646259 |
4a21c1c45070dd9d5012e87b835e5d373e4f99bf | 4,148 | py | Python | fdk_client/platform/OAuthClient.py | kavish-d/fdk-client-python | a1023eb530473322cb52e095fc4ceb226c1e6037 | [
"MIT"
] | null | null | null | fdk_client/platform/OAuthClient.py | kavish-d/fdk-client-python | a1023eb530473322cb52e095fc4ceb226c1e6037 | [
"MIT"
] | null | null | null | fdk_client/platform/OAuthClient.py | kavish-d/fdk-client-python | a1023eb530473322cb52e095fc4ceb226c1e6037 | [
"MIT"
] | null | null | null | """OAuth Client."""
from threading import Timer
from typing import Dict
from urllib import parse
import base64
import asyncio
from ..common.exceptions import FDKOAuthCodeError
from ..common.aiohttp_helper import AiohttpHelper
from ..common.utils import get_headers_with_signature
class OAuthClient:
def __init__(self, config):
self._conf = config
self.token = None
self.refreshToken = None
self.retryOAuthTokenTimer = None
self.raw_token = None
self.token_expires_in = None
async def getAccessToken(self):
return self.token
async def setToken(self, token):
self.raw_token = token
self.token_expires_in = token.get("expires_in")
self.token = token.get("access_token")
self.refreshToken = token.get("refresh_token") if token.get("refresh_token") else None
if self.refreshToken:
await self.retryOAuthToken(token.get("expires_in"))
async def retryOAuthToken(self, expires_in):
if self.retryOAuthTokenTimer:
self.retryOAuthTokenTimer.cancel()
if expires_in > 60:
self.retryOAuthTokenTimer = Timer(float(expires_in - 60), lambda: asyncio.run(self.renewAccessToken()))
self.retryOAuthTokenTimer.start()
async def startAuthorization(self, options: Dict):
query = {
"access_mode": options.get("access_mode", ""),
"client_id": self._conf.apiKey,
"redirect_uri": options.get("redirectUri", ""),
"response_type": "code",
"scope": ",".join(options.get("scope", [])),
"state": options.get("state", "")
}
queryString = parse.urlencode(query)
reqPath = f"/service/panel/authentication/v1.0/company/{self._conf.companyId}/oauth/authorize"
signingOptions = {
"method": "GET",
"host": self._conf.domain,
"path": reqPath,
"body": None,
"headers": {},
"signQuery": True
}
queryString = await get_headers_with_signature(self._conf.domain, "get",
f"/service/panel/authentication/v1.0/company/"
f"{self._conf.companyId}/oauth/authorize",
queryString, {}, sign_query=True)
return f"{self._conf.domain}{signingOptions['path']}?{queryString}"
async def verifyCallback(self, query):
if query.get("error"):
raise FDKOAuthCodeError(query["error_description"])
# try:
res = await self.getAccesstokenObj(grant_type="authorization_code", code=query.get("code", ""))
await self.setToken(res)
# except Exception as e:
# if error.isAxiosError:
# throw new FDKTokenIssueError(error.message)
async def renewAccessToken(self):
res = await self.getAccesstokenObj(grant_type="refresh_token", refresh_token=self.refreshToken)
await self.setToken(res)
return res
async def getAccesstokenObj(self, grant_type="", refresh_token="", code=""):
reqData = {
"grant_type": grant_type,
}
if grant_type == "refresh_token":
reqData = {**reqData, "refresh_token": refresh_token}
elif grant_type == "authorization_code":
reqData = {**reqData, "code": code}
token = base64.b64encode(f"{self._conf.apiKey}:{self._conf.apiSecret}".encode()).decode()
url = f"{self._conf.domain}/service/panel/authentication/v1.0/company/{self._conf.companyId}/oauth/token"
headers = {
"Authorization": f"Basic {token}"
}
headers = await get_headers_with_signature(self._conf.domain, "post",
f"/service/panel/authentication/v1.0/company/{self._conf.companyId}/oauth/token",
"", headers, reqData, ["Authorization"])
response = await AiohttpHelper().aiohttp_request("POST", url, reqData, headers)
return response["json"]
| 41.069307 | 126 | 0.598843 |
4a21c1df470fda18bd0a93cd3a7f2717cc7b2fec | 2,817 | py | Python | src/utils/utils.py | Armagaan/cf-gnnexplainer | 22b415e114c52d8d60ca45a40c3cb33c1947400c | [
"MIT"
] | 15 | 2021-06-23T12:59:29.000Z | 2022-03-22T21:01:49.000Z | src/utils/utils.py | Armagaan/cf-gnnexplainer | 22b415e114c52d8d60ca45a40c3cb33c1947400c | [
"MIT"
] | 3 | 2021-07-12T06:31:56.000Z | 2021-09-08T09:21:12.000Z | src/utils/utils.py | Armagaan/cf-gnnexplainer | 22b415e114c52d8d60ca45a40c3cb33c1947400c | [
"MIT"
] | 6 | 2021-09-23T17:47:31.000Z | 2022-03-21T11:09:32.000Z | import os
import errno
import torch
import numpy as np
import pandas as pd
from torch_geometric.utils import k_hop_subgraph, dense_to_sparse, to_dense_adj, subgraph
def mkdir_p(path):
try:
os.makedirs(path)
except OSError as exc: # Python >2.5
if exc.errno == errno.EEXIST and os.path.isdir(path):
pass
else:
raise
def safe_open(path, w):
''' Open "path" for writing, creating any parent directories as needed.'''
mkdir_p(os.path.dirname(path))
return open(path, w)
def accuracy(output, labels):
preds = output.max(1)[1].type_as(labels)
correct = preds.eq(labels).double()
correct = correct.sum()
return correct / len(labels)
def get_degree_matrix(adj):
return torch.diag(sum(adj))
def normalize_adj(adj):
# Normalize adjacancy matrix according to reparam trick in GCN paper
A_tilde = adj + torch.eye(adj.shape[0])
D_tilde = get_degree_matrix(A_tilde)
# Raise to power -1/2, set all infs to 0s
D_tilde_exp = D_tilde ** (-1 / 2)
D_tilde_exp[torch.isinf(D_tilde_exp)] = 0
# Create norm_adj = (D + I)^(-1/2) * (A + I) * (D + I) ^(-1/2)
norm_adj = torch.mm(torch.mm(D_tilde_exp, A_tilde), D_tilde_exp)
return norm_adj
def get_neighbourhood(node_idx, edge_index, n_hops, features, labels):
edge_subset = k_hop_subgraph(node_idx, n_hops, edge_index[0]) # Get all nodes involved
edge_subset_relabel = subgraph(edge_subset[0], edge_index[0], relabel_nodes=True) # Get relabelled subset of edges
sub_adj = to_dense_adj(edge_subset_relabel[0]).squeeze()
sub_feat = features[edge_subset[0], :]
sub_labels = labels[edge_subset[0]]
new_index = np.array([i for i in range(len(edge_subset[0]))])
node_dict = dict(zip(edge_subset[0].numpy(), new_index)) # Maps orig labels to new
# print("Num nodes in subgraph: {}".format(len(edge_subset[0])))
return sub_adj, sub_feat, sub_labels, node_dict
def create_symm_matrix_from_vec(vector, n_rows):
matrix = torch.zeros(n_rows, n_rows)
idx = torch.tril_indices(n_rows, n_rows)
matrix[idx[0], idx[1]] = vector
symm_matrix = torch.tril(matrix) + torch.tril(matrix, -1).t()
return symm_matrix
def create_vec_from_symm_matrix(matrix, P_vec_size):
idx = torch.tril_indices(matrix.shape[0], matrix.shape[0])
vector = matrix[idx[0], idx[1]]
return vector
def index_to_mask(index, size):
mask = torch.zeros(size, dtype=torch.bool, device=index.device)
mask[index] = 1
return mask
def get_S_values(pickled_results, header):
df_prep = []
for example in pickled_results:
if example != []:
df_prep.append(example[0])
return pd.DataFrame(df_prep, columns=header)
def redo_dataset_pgexplainer_format(dataset, train_idx, test_idx):
dataset.data.train_mask = index_to_mask(train_idx, size=dataset.data.num_nodes)
dataset.data.test_mask = index_to_mask(test_idx[len(test_idx)], size=dataset.data.num_nodes) | 31.3 | 121 | 0.734824 |
4a21c3078e4d89765288ad05c11efa3641519e47 | 4,085 | py | Python | official/vision/beta/modeling/decoders/aspp.py | faizoctar/models | 126ce652d0efdc3fa82d46d7f1fbd508262a56f8 | [
"Apache-2.0"
] | 1 | 2019-10-05T17:06:09.000Z | 2019-10-05T17:06:09.000Z | official/vision/beta/modeling/decoders/aspp.py | faizoctar/models | 126ce652d0efdc3fa82d46d7f1fbd508262a56f8 | [
"Apache-2.0"
] | null | null | null | official/vision/beta/modeling/decoders/aspp.py | faizoctar/models | 126ce652d0efdc3fa82d46d7f1fbd508262a56f8 | [
"Apache-2.0"
] | 1 | 2020-10-19T05:01:53.000Z | 2020-10-19T05:01:53.000Z | # Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""ASPP decoder."""
# Import libraries
import tensorflow as tf
from official.vision import keras_cv
@tf.keras.utils.register_keras_serializable(package='Vision')
class ASPP(tf.keras.layers.Layer):
"""ASPP."""
def __init__(self,
level,
dilation_rates,
num_filters=256,
use_sync_bn=False,
norm_momentum=0.99,
norm_epsilon=0.001,
dropout_rate=0.0,
kernel_initializer='VarianceScaling',
kernel_regularizer=None,
interpolation='bilinear',
**kwargs):
"""ASPP initialization function.
Args:
level: `int` level to apply ASPP.
dilation_rates: `list` of dilation rates.
num_filters: `int` number of output filters in ASPP.
use_sync_bn: if True, use synchronized batch normalization.
norm_momentum: `float` normalization omentum for the moving average.
norm_epsilon: `float` small float added to variance to avoid dividing by
zero.
dropout_rate: `float` rate for dropout regularization.
kernel_initializer: kernel_initializer for convolutional layers.
kernel_regularizer: tf.keras.regularizers.Regularizer object for Conv2D.
interpolation: interpolation method, one of bilinear, nearest, bicubic,
area, lanczos3, lanczos5, gaussian, or mitchellcubic.
**kwargs: keyword arguments to be passed.
"""
super(ASPP, self).__init__(**kwargs)
self._config_dict = {
'level': level,
'dilation_rates': dilation_rates,
'num_filters': num_filters,
'use_sync_bn': use_sync_bn,
'norm_momentum': norm_momentum,
'norm_epsilon': norm_epsilon,
'dropout_rate': dropout_rate,
'kernel_initializer': kernel_initializer,
'kernel_regularizer': kernel_regularizer,
'interpolation': interpolation,
}
def build(self, input_shape):
self.aspp = keras_cv.layers.SpatialPyramidPooling(
output_channels=self._config_dict['num_filters'],
dilation_rates=self._config_dict['dilation_rates'],
use_sync_bn=self._config_dict['use_sync_bn'],
batchnorm_momentum=self._config_dict['norm_momentum'],
batchnorm_epsilon=self._config_dict['norm_epsilon'],
dropout=self._config_dict['dropout_rate'],
kernel_initializer=self._config_dict['kernel_initializer'],
kernel_regularizer=self._config_dict['kernel_regularizer'],
interpolation=self._config_dict['interpolation'])
def call(self, inputs):
"""ASPP call method.
The output of ASPP will be a dict of level, Tensor even if only one
level is present. Hence, this will be compatible with the rest of the
segmentation model interfaces..
Args:
inputs: A dict of tensors
- key: `str`, the level of the multilevel feature maps.
- values: `Tensor`, [batch, height_l, width_l, filter_size].
Returns:
A dict of tensors
- key: `str`, the level of the multilevel feature maps.
- values: `Tensor`, output of ASPP module.
"""
outputs = {}
level = str(self._config_dict['level'])
outputs[level] = self.aspp(inputs[level])
return outputs
def get_config(self):
return self._config_dict
@classmethod
def from_config(cls, config, custom_objects=None):
return cls(**config)
| 37.477064 | 80 | 0.667319 |
4a21c322da0a997e5387c492effd9bd85b23a8a4 | 1,061 | py | Python | arrow/commands/annotations/add_transcript.py | GMOD/python-apollo3 | c1c47e985d95c8995374f6daa5c2e52b6d94ee0d | [
"MIT"
] | 5 | 2017-06-27T19:41:57.000Z | 2021-06-05T13:36:11.000Z | arrow/commands/annotations/add_transcript.py | galaxy-genome-annotation/python-apollo | 1257e050ee3fc0a7f7ab8a8c780aefee5c8143f8 | [
"MIT"
] | 28 | 2017-07-24T15:10:37.000Z | 2021-09-03T11:56:35.000Z | arrow/commands/annotations/add_transcript.py | MoffMade/python-apollo | 3cc61458cf5c20bd44fde656b8364417b915cfb8 | [
"MIT"
] | 10 | 2017-05-10T19:13:44.000Z | 2021-08-09T04:52:33.000Z | import click
from arrow.cli import pass_context, json_loads
from arrow.decorators import custom_exception, dict_output
@click.command('add_transcript')
@click.option(
"--transcript",
help="Transcript data",
type=str
)
@click.option(
"--suppress_history",
help="Suppress the history of this operation",
is_flag=True
)
@click.option(
"--suppress_events",
help="Suppress instant update of the user interface",
is_flag=True
)
@click.option(
"--organism",
help="Organism Common Name",
type=str
)
@click.option(
"--sequence",
help="Sequence Name",
type=str
)
@pass_context
@custom_exception
@dict_output
def cli(ctx, transcript={}, suppress_history=False, suppress_events=False, organism="", sequence=""):
"""Add a single transcript annotation
Output:
A standard apollo feature dictionary ({"features": [{...}]})
"""
return ctx.gi.annotations.add_transcript(transcript=transcript, suppress_history=suppress_history, suppress_events=suppress_events, organism=organism, sequence=sequence)
| 24.674419 | 173 | 0.717248 |
4a21c45d97af86d7881fb4dc197458aba50bd0cf | 2,145 | py | Python | test/D/HSTeoh/Common/libCompileOptions.py | moroten/scons | 20927b42ed4f0cb87f51287fa3b4b6cf915afcf8 | [
"MIT"
] | 1 | 2017-01-28T15:39:07.000Z | 2017-01-28T15:39:07.000Z | test/D/HSTeoh/Common/libCompileOptions.py | moroten/scons | 20927b42ed4f0cb87f51287fa3b4b6cf915afcf8 | [
"MIT"
] | 4 | 2019-04-11T16:27:45.000Z | 2019-04-11T23:56:30.000Z | test/D/HSTeoh/Common/libCompileOptions.py | moroten/scons | 20927b42ed4f0cb87f51287fa3b4b6cf915afcf8 | [
"MIT"
] | 2 | 2018-01-16T11:29:16.000Z | 2020-05-13T16:48:26.000Z | """
These tests check a problem with the lib/ar setting.
"""
#
# __COPYRIGHT__
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "__FILE__ __REVISION__ __DATE__ __DEVELOPER__"
import TestSCons
from SCons.Environment import Base
from os.path import abspath, dirname
import sys
sys.path.insert(1, abspath(dirname(__file__) + '/../../Support'))
from executablesSearch import isExecutableOfToolAvailable
def testForTool(tool):
test = TestSCons.TestSCons()
if not isExecutableOfToolAvailable(test, tool) :
test.skip_test("Required executable for tool '{0}' not found, skipping test.\n".format(tool))
test.dir_fixture('LibCompileOptions')
test.write('SConstruct', open('SConstruct_template', 'r').read().format('tools=["{0}", "link", "ar"]'.format(tool)))
test.run()
test.must_exist(test.workpath('mylib.o'))
test.must_exist(test.workpath('mylib.a' if Base()['PLATFORM'] == 'win32' else 'libmylib.a'))
test.must_exist(test.workpath('prog'))
test.pass_test()
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
| 33.515625 | 120 | 0.74359 |
4a21c47f2a596ef58a03ae6c7e439ed0be0d973b | 403 | py | Python | leetcode/0141.环形链表/0141-环形链表.py | ruisunyc/- | ef2fd0d58aa683311896bb9442510fedcd013313 | [
"Apache-2.0"
] | 2 | 2021-01-08T01:16:32.000Z | 2021-01-08T09:36:32.000Z | leetcode/0141.环形链表/0141-环形链表.py | ruisunyc/- | ef2fd0d58aa683311896bb9442510fedcd013313 | [
"Apache-2.0"
] | null | null | null | leetcode/0141.环形链表/0141-环形链表.py | ruisunyc/- | ef2fd0d58aa683311896bb9442510fedcd013313 | [
"Apache-2.0"
] | null | null | null | # Definition for singly-linked list.
# class ListNode:
# def __init__(self, x):
# self.val = x
# self.next = None
class Solution:
def hasCycle(self, head: ListNode) -> bool:
low = fast = head
while fast and fast.next:
fast = fast.next.next
low = low.next
if low == fast:return True
return False | 28.785714 | 47 | 0.51861 |
4a21c4fe70c5a65dbdf57cd40e952c732f73c026 | 3,281 | py | Python | nevergrad/common/test_tools.py | xavierzw/nevergrad | 97fd5ce56e6c86692e206073516cbd41dd0ce629 | [
"MIT"
] | null | null | null | nevergrad/common/test_tools.py | xavierzw/nevergrad | 97fd5ce56e6c86692e206073516cbd41dd0ce629 | [
"MIT"
] | null | null | null | nevergrad/common/test_tools.py | xavierzw/nevergrad | 97fd5ce56e6c86692e206073516cbd41dd0ce629 | [
"MIT"
] | null | null | null | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import itertools
from typing import Iterable, List, Any, Tuple
import numpy as np
from . import tools
from . import testing
@testing.parametrized(
void=([], []),
one=(["a"], []),
two=([1, 2], [(1, 2)]),
three=([1, 2, 3], [(1, 2), (2, 3)]),
)
def test_pairwise(iterator: Iterable[Any], expected: List[Tuple[Any, ...]]) -> None:
output = list(tools.pairwise(iterator))
testing.printed_assert_equal(output, expected)
@testing.parametrized(
void=({}, ["i1", "i2", "i3"]),
value=({"c1": "i2-c1"}, ["i2"]),
function=({"c1": lambda x: x == "i2-c1"}, ["i2"]),
values=({"c1": ["i3-c1", "i2-c1"]}, ["i2", "i3"]),
conditions=({"c1": ["i3-c1", "i2-c1"], "c2": "i3-c2"}, ["i3"]),
)
def test_selector(criteria: Any, expected: List[str]) -> None:
df = tools.Selector(index=["i1", "i2", "i3"], columns=["c1", "c2"])
for i, c in itertools.product(df.index, df.columns):
df.loc[i, c] = f"{i}-{c}"
df_select = df.select(**criteria)
df_drop = df.select_and_drop(**criteria)
# indices
testing.assert_set_equal(df_select.index, expected)
testing.assert_set_equal(df_drop.index, expected)
# columns
testing.assert_set_equal(df_select.columns, df)
testing.assert_set_equal(df_drop.columns, set(df_select.columns) - set(criteria))
# values
for i, c in itertools.product(df_select.index, df_select.columns):
assert df.loc[i, c] == f"{i}-{c}", "Erroneous values"
# instance
assert isinstance(df_select, tools.Selector)
assert isinstance(df_drop, tools.Selector)
def test_roundrobin() -> None:
output = list(tools.roundrobin([1, 2, 3], (x for x in [4, 5, 6, 7]), (8,)))
np.testing.assert_array_equal(output, [1, 4, 8, 2, 5, 3, 6, 7])
def test_selector_unique_single() -> None:
df = tools.Selector(index=["i1", "i2", "i3"], columns=["c1"], data=[1, 2, 2])
testing.assert_set_equal(df.unique("c1"), [1, 2])
def test_selector_unique_multiple() -> None:
df = tools.Selector(index=["i1", "i2", "i3"], columns=["c1", "c2"], data=[[2, 1], [2, 2], [2, 1]])
testing.printed_assert_equal(df.unique(["c1", "c2"]), {(2, 1), (2, 2)})
def test_grouper() -> None:
output = list(tools.grouper('ABCDEFG', 3, 'x'))
testing.printed_assert_equal(output, [list(x) for x in ["ABC", "DEF", "Gxx"]])
def test_selector_assert_equivalent() -> None:
select1 = tools.Selector(columns=["a", "b"], data=[[0, 1], [2, 3]])
select2 = tools.Selector(columns=["b", "a"], data=[[3, 2], [1, 0]])
select3 = tools.Selector(columns=["a", "b"], data=[[0, 5], [2, 3]])
select1.assert_equivalent(select2)
np.testing.assert_raises(AssertionError, select1.assert_equivalent, select3)
def test_sleeper() -> None:
min_sleep = 1e-5
sleeper = tools.Sleeper(min_sleep=min_sleep)
np.testing.assert_equal(sleeper._get_advised_sleep_duration(), min_sleep)
sleeper.start_timer()
np.testing.assert_equal(sleeper._get_advised_sleep_duration(), min_sleep)
sleeper.stop_timer()
np.testing.assert_equal(sleeper._get_advised_sleep_duration(), min_sleep)
| 37.712644 | 102 | 0.63883 |
4a21c6019679632d8223825bf0d1ee1d4afcf1eb | 9 | py | Python | dephell/repositories/_git/__init__.py | OliverHofkens/dephell | 6303f416018910668f1635b70cd828a2fd2b2d9e | [
"MIT"
] | 1,880 | 2019-03-21T10:08:25.000Z | 2022-03-31T12:41:55.000Z | dephell/repositories/_git/__init__.py | rachmadaniHaryono/dephell | 0ef500c8f2d5f05244bac191b1b1383f68464cd2 | [
"MIT"
] | 356 | 2019-03-21T19:08:56.000Z | 2021-01-08T17:45:43.000Z | dephell/repositories/_git/__init__.py | rachmadaniHaryono/dephell | 0ef500c8f2d5f05244bac191b1b1383f68464cd2 | [
"MIT"
] | 157 | 2019-04-23T01:13:37.000Z | 2022-03-24T22:41:18.000Z | # WIP!!!
| 4.5 | 8 | 0.333333 |
4a21c63b65e4397cb38e995fa01b69400852109f | 2,234 | py | Python | tests/validation/test_executable_definitions.py | KingDarBoja/graphql-core | 22970e94f1016e813848fc0ab5d1e7ab9ad612e4 | [
"MIT"
] | 590 | 2015-10-06T18:22:49.000Z | 2022-03-22T16:32:17.000Z | tests/validation/test_executable_definitions.py | KingDarBoja/graphql-core | 22970e94f1016e813848fc0ab5d1e7ab9ad612e4 | [
"MIT"
] | 300 | 2015-10-06T18:58:11.000Z | 2022-03-22T14:01:44.000Z | tests/validation/test_executable_definitions.py | KingDarBoja/graphql-core | 22970e94f1016e813848fc0ab5d1e7ab9ad612e4 | [
"MIT"
] | 270 | 2015-10-08T19:47:38.000Z | 2022-03-10T04:17:51.000Z | from functools import partial
from graphql.validation import ExecutableDefinitionsRule
from .harness import assert_validation_errors
assert_errors = partial(assert_validation_errors, ExecutableDefinitionsRule)
assert_valid = partial(assert_errors, errors=[])
def describe_validate_executable_definitions():
def with_only_operation():
assert_valid(
"""
query Foo {
dog {
name
}
}
"""
)
def with_operation_and_fragment():
assert_valid(
"""
query Foo {
dog {
name
...Frag
}
}
fragment Frag on Dog {
name
}
"""
)
def with_type_definition():
assert_errors(
"""
query Foo {
dog {
name
}
}
type Cow {
name: String
}
extend type Dog {
color: String
}
""",
[
{
"message": "The 'Cow' definition is not executable.",
"locations": [(8, 13)],
},
{
"message": "The 'Dog' definition is not executable.",
"locations": [(12, 13)],
},
],
)
def with_schema_definition():
assert_errors(
"""
schema {
query: Query
}
type Query {
test: String
}
extend schema @directive
""",
[
{
"message": "The schema definition is not executable.",
"locations": [(2, 13)],
},
{
"message": "The 'Query' definition is not executable.",
"locations": [(6, 13)],
},
{
"message": "The schema definition is not executable.",
"locations": [(10, 13)],
},
],
)
| 23.030928 | 76 | 0.378693 |
4a21c666abea5688cae282487c437a2d7e6a2ec9 | 6,353 | py | Python | heat/engine/resources/openstack/scaling_policy.py | redhat-openstack/heat | 6b9be0a868b857e942c1cc90594d0f3a0d0725d0 | [
"Apache-2.0"
] | null | null | null | heat/engine/resources/openstack/scaling_policy.py | redhat-openstack/heat | 6b9be0a868b857e942c1cc90594d0f3a0d0725d0 | [
"Apache-2.0"
] | null | null | null | heat/engine/resources/openstack/scaling_policy.py | redhat-openstack/heat | 6b9be0a868b857e942c1cc90594d0f3a0d0725d0 | [
"Apache-2.0"
] | null | null | null | #
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from heat.common import exception
from heat.engine import attributes
from heat.engine import constraints
from heat.engine import properties
from heat.engine import resource
from heat.engine import signal_responder
from heat.scaling import cooldown
from heat.openstack.common import log as logging
LOG = logging.getLogger(__name__)
class AutoScalingPolicy(signal_responder.SignalResponder,
cooldown.CooldownMixin):
"""A resource to manage scaling of `OS::Heat::AutoScalingGroup`.
**Note** while it may incidentally support
`AWS::AutoScaling::AutoScalingGroup` for now, please don't use it for that
purpose and use `AWS::AutoScaling::ScalingPolicy` instead.
"""
PROPERTIES = (
AUTO_SCALING_GROUP_NAME, SCALING_ADJUSTMENT, ADJUSTMENT_TYPE,
COOLDOWN,
) = (
'auto_scaling_group_id', 'scaling_adjustment', 'adjustment_type',
'cooldown',
)
EXACT_CAPACITY, CHANGE_IN_CAPACITY, PERCENT_CHANGE_IN_CAPACITY = (
'exact_capacity', 'change_in_capacity', 'percent_change_in_capacity')
ATTRIBUTES = (
ALARM_URL,
) = (
'alarm_url',
)
properties_schema = {
# TODO(Qiming): property name should be AUTO_SCALING_GROUP_ID
AUTO_SCALING_GROUP_NAME: properties.Schema(
properties.Schema.STRING,
_('AutoScaling group ID to apply policy to.'),
required=True
),
SCALING_ADJUSTMENT: properties.Schema(
properties.Schema.NUMBER,
_('Size of adjustment.'),
required=True,
update_allowed=True
),
ADJUSTMENT_TYPE: properties.Schema(
properties.Schema.STRING,
_('Type of adjustment (absolute or percentage).'),
required=True,
constraints=[
constraints.AllowedValues([CHANGE_IN_CAPACITY,
EXACT_CAPACITY,
PERCENT_CHANGE_IN_CAPACITY]),
],
update_allowed=True
),
COOLDOWN: properties.Schema(
properties.Schema.NUMBER,
_('Cooldown period, in seconds.'),
update_allowed=True
),
}
attributes_schema = {
ALARM_URL: attributes.Schema(
_("A signed url to handle the alarm.")
),
}
def handle_create(self):
super(AutoScalingPolicy, self).handle_create()
self.resource_id_set(self._get_user_id())
def handle_update(self, json_snippet, tmpl_diff, prop_diff):
"""
If Properties has changed, update self.properties, so we get the new
values during any subsequent adjustment.
"""
if prop_diff:
self.properties = json_snippet.properties(self.properties_schema,
self.context)
def _get_adjustement_type(self):
adjustment_type = self.properties[self.ADJUSTMENT_TYPE]
return ''.join([t.capitalize() for t in adjustment_type.split('_')])
def handle_signal(self, details=None):
if self.action in (self.SUSPEND, self.DELETE):
msg = _('Cannot signal resource during %s') % self.action
raise Exception(msg)
# ceilometer sends details like this:
# {u'alarm_id': ID, u'previous': u'ok', u'current': u'alarm',
# u'reason': u'...'})
# in this policy we currently assume that this gets called
# only when there is an alarm. But the template writer can
# put the policy in all the alarm notifiers (nodata, and ok).
#
# our watchrule has upper case states so lower() them all.
if details is None:
alarm_state = 'alarm'
else:
alarm_state = details.get('current',
details.get('state', 'alarm')).lower()
LOG.info(_('Alarm %(name)s, new state %(state)s')
% {'name': self.name, 'state': alarm_state})
if alarm_state != 'alarm':
return
if self._cooldown_inprogress():
LOG.info(_("%(name)s NOT performing scaling action, "
"cooldown %(cooldown)s")
% {'name': self.name,
'cooldown': self.properties[self.COOLDOWN]})
return
asgn_id = self.properties[self.AUTO_SCALING_GROUP_NAME]
group = self.stack.resource_by_refid(asgn_id)
try:
if group is None:
raise exception.NotFound(
_('Alarm %(alarm)s could not find '
'scaling group named "%(group)s"') % {
'alarm': self.name, 'group': asgn_id})
LOG.info(_('%(name)s Alarm, adjusting Group %(group)s with id '
'%(asgn_id)s by %(filter)s')
% {'name': self.name, 'group': group.name,
'asgn_id': asgn_id,
'filter': self.properties[self.SCALING_ADJUSTMENT]})
adjustment_type = self._get_adjustement_type()
group.adjust(self.properties[self.SCALING_ADJUSTMENT],
adjustment_type)
finally:
self._cooldown_timestamp(
"%s : %s" %
(self.properties[self.ADJUSTMENT_TYPE],
self.properties[self.SCALING_ADJUSTMENT]))
def _resolve_attribute(self, name):
if name == self.ALARM_URL and self.resource_id is not None:
return unicode(self._get_signed_url())
def FnGetRefId(self):
return resource.Resource.FnGetRefId(self)
def resource_mapping():
return {
'OS::Heat::ScalingPolicy': AutoScalingPolicy,
}
| 36.936047 | 78 | 0.593578 |
4a21c86f99560254758e13220d15cb43352d413e | 8,522 | py | Python | networkapi/plugins/BGP/NXAPI/Generic.py | vinicius-marinho/GloboNetworkAPI | 94651d3b4dd180769bc40ec966814f3427ccfb5b | [
"Apache-2.0"
] | 73 | 2015-04-13T17:56:11.000Z | 2022-03-24T06:13:07.000Z | networkapi/plugins/BGP/NXAPI/Generic.py | leopoldomauricio/GloboNetworkAPI | 3b5b2e336d9eb53b2c113977bfe466b23a50aa29 | [
"Apache-2.0"
] | 99 | 2015-04-03T01:04:46.000Z | 2021-10-03T23:24:48.000Z | networkapi/plugins/BGP/NXAPI/Generic.py | shildenbrand/GloboNetworkAPI | 515d5e961456cee657c08c275faa1b69b7452719 | [
"Apache-2.0"
] | 64 | 2015-08-05T21:26:29.000Z | 2022-03-22T01:06:28.000Z | # -*- coding: utf-8 -*-
import json
import logging
import requests
from requests.auth import HTTPBasicAuth
from requests.exceptions import HTTPError
from networkapi.equipamento.models import EquipamentoAcesso
from networkapi.plugins import exceptions
from networkapi.plugins.BGP.base import BaseBgpPlugin
log = logging.getLogger(__name__)
class NxApiPlugin(BaseBgpPlugin):
"""Plugin base para interação com NX API."""
protocol = 'http'
def __init__(self, **kwargs):
super(NxApiPlugin, self).__init__(**kwargs)
self.equipment_access = self._get_equipment_access()
def create_neighbor(self, kwargs):
commands = []
if kwargs['local_as']:
cmd = 'router bgp {local_as}'.format(remote_as=kwargs['local_as'])
commands.append(cmd)
else:
raise Exception('Local AS is needed.')
if kwargs['vrf']:
cmd = 'vrf {vrf}'.format(vrf=kwargs['vrf'])
commands.append(cmd)
else:
raise Exception('VRF is needed.')
if kwargs['remote_ip']:
if kwargs['remote_as']:
cmd = 'neighbor {remote_ip} remote-as {remote_as}'.format(
remote_ip=kwargs['remote_ip'],
remote_as=kwargs['remote_as'])
commands.append(cmd)
else:
raise Exception('Remote AS is needed.')
else:
raise Exception('Remote Ip is needed.')
if kwargs['description']:
cmd = 'description {description}'.format(
description=kwargs['description'])
commands.append(cmd)
else:
raise Exception('Description is needed.')
cmd = 'dynamic-capability'
if kwargs['virtual_interface']:
cmd = 'update-source {virtual_interface}'.format(
virtual_interface=kwargs['virtual_interface'])
commands.append(cmd)
else:
raise Exception('Interface is needed.')
if kwargs['timers']:
cmd = 'timers {timer_keepalive}'.format(
timer_keepalive=kwargs['timer_keepalive'])
if kwargs['timers']:
cmd += ' {timer_timeout}'.format(
timer_timeout=kwargs['timer_timeout'])
commands.append(cmd)
else:
raise Exception('Timer timeout is needed.')
else:
raise Exception('Keep alive is needed.')
if kwargs['password']:
cmd = 'password {password}'.format(password=kwargs['password'])
commands.append(cmd)
if kwargs['maximum_hops']:
cmd = 'maximum-hops {maximum_hops}'.format(
maximum_hops=kwargs['maximum_hops'])
commands.append(cmd)
cmd = 'address-family {address_family} unicast'.format(
address_family=kwargs['address_family'])
if kwargs['route_map_in']:
'route-map {route_map_in} in'.format(
route_map_in=kwargs['route_map_in'])
if kwargs['route_map_out']:
'route-map {route_map_out} out'.format(
route_map_out=kwargs['route_map_out'])
if kwargs['community']:
cmd = 'send-community both'
commands.append(cmd)
if kwargs['remove_private_as']:
cmd = 'remove-private-as'
commands.append(cmd)
if kwargs['next_hop_self']:
cmd = 'next-hop-self'
commands.append(cmd)
cmd = 'next-hop-third-party'
if kwargs['soft_reconfiguration']:
cmd = 'soft-reconfiguration inbound'
commands.append(cmd)
payload = json.dumps(self._contruct(commands))
self._request(data=payload,
contentType='json-rpc', path='ins')
def delete_neighbor(self, kwargs):
commands = []
if kwargs['local_as']:
cmd = 'router bgp {local_as}'.format(remote_as=kwargs['local_as'])
commands.append(cmd)
else:
raise Exception('Local AS is needed.')
if kwargs['vrf']:
cmd = 'vrf {vrf}'.format(vrf=kwargs['vrf'])
commands.append(cmd)
else:
raise Exception('VRF is needed.')
if kwargs['remote_ip']:
if kwargs['remote_as']:
cmd = 'no neighbor {remote_ip} remote-as {remote_as}'.format(
remote_ip=kwargs['remote_ip'],
remote_as=kwargs['remote_as'])
commands.append(cmd)
else:
raise Exception('Remote AS is needed.')
else:
raise Exception('Remote Ip is needed.')
payload = json.dumps(self._contruct(commands))
self._request(data=payload,
contentType='json-rpc', path='ins')
def _contruct(self, commands):
payload = list()
for index, command in enumerate(commands):
payload.append({
'jsonrpc': '2.0',
'method': 'cli_ascii',
'params': {
'cmd': command,
'version': 1.2
},
'id': index
})
return payload
def _request(self, **kwargs):
# Params and default values
params = {
'path': '',
'data': None,
'contentType': 'json-rpc',
'verify': False
}
# Setting params via kwargs or use the defaults
for param in params:
if param in kwargs:
params[param] = kwargs.get(param)
headers = self._get_headers(content_type=params['contentType'])
uri = self._get_uri(path=params['path'])
log.info(
'Starting {method} request to NX-API {equipment} at {uri}. \
Data to be sent: {data}'.format(
method=params['method'], equipment=self.equipment.nome,
uri=uri, data=params['data']))
try:
# Raises AttributeError if method is not valid
request = requests.post(
uri,
auth=self._get_auth(),
headers=headers,
verify=params['verify'],
data=params['data']
)
request.raise_for_status()
try:
return json.loads(request.text)
except:
return
except HTTPError:
try:
response = json.loads(request.text)
for error in response['errors']['error']:
log.error(error['error-message'])
except:
log.error('Unknown error during request to NX-API')
raise HTTPError(request.status_code)
def _get_auth(self):
return self._basic_auth()
def _basic_auth(self):
return HTTPBasicAuth(
self.equipment_access.user,
self.equipment_access.password
)
def _get_host(self):
if not hasattr(self, 'host') or self.host is None:
if not isinstance(self.equipment_access, EquipamentoAcesso):
log.error(
'No fqdn could be found for equipment {equipment}.'.format(
equipment=self.equipment.nome))
raise exceptions.InvalidEquipmentAccessException()
self.host = self.equipment_access.fqdn.strip()
if self.host.find('://') < 0:
self.host = self.protocol + '://' + self.host
return self.host
def _get_uri(self, host=None, path='ins'):
if host is None:
host = self._get_host()
host = host.strip()
path = path.strip()
if host[len(host) - 1] == '/':
host = host[0:len(host) - 1]
if path[0] == '/':
path = path[1:len(path)]
self.uri = host + '/' + path
return self.uri
def _get_headers(self, content_type):
types = {
'json-rpc': 'application/json-rpc'
}
return {'content-type': types[content_type]}
def _get_equipment_access(self):
try:
return EquipamentoAcesso.search(
None, self.equipment, 'http').uniqueResult()
except Exception:
log.error('Access type %s not found for equipment %s.' %
('http', self.equipment.nome))
raise exceptions.InvalidEquipmentAccessException()
| 30.219858 | 79 | 0.538019 |
4a21c96c261b0a488f73bc5deaf1d6a95c0f2d1b | 1,490 | py | Python | PLADD/Markov.py | tgieseking/Power-Grid-PLADD-Model | a4262bc2bef45362b31f976cdfb72b54ba05764d | [
"MIT"
] | null | null | null | PLADD/Markov.py | tgieseking/Power-Grid-PLADD-Model | a4262bc2bef45362b31f976cdfb72b54ba05764d | [
"MIT"
] | null | null | null | PLADD/Markov.py | tgieseking/Power-Grid-PLADD-Model | a4262bc2bef45362b31f976cdfb72b54ba05764d | [
"MIT"
] | null | null | null | import random
import numpy as np
class MarkovComponent:
# a Markov Chain.
def __init__(self, transient_matrix, absorbing_matrix, outputs, start_index = 0):
self.transient_matrix = transient_matrix
# The transition matrix between transient nodes
self.absorbing_matrix = absorbing_matrix
# The transition matrix from transient to absorbing nodes
self.outputs = outputs
# What the component should output when it ends on each absorbing node
self.start_index = start_index
self.final_state_probs = self.calculate_final_state_probs()
# Precompute the probabilities of reaching each final state
def run_simulation(self):
current_node = self.start_node
while(not current_node.absorbing):
current_node = current_node.next_node()
return current_node.output
def advance_timestep(self):
num_absorbing = self.final_state_probs.size
final_state_index = np.random.choice(num_absorbing, p=self.final_state_probs)
self.current_output = self.outputs[final_state_index]
def calculate_final_state_probs(self):
num_transient, num_absorbing = self.absorbing_matrix.shape
start_probabilies = np.zeros(num_transient)
start_probabilies[self.start_index] = 1.0
final_state_probs = self.absorbing_matrix.T @ np.linalg.solve(np.eye(num_transient) - self.transient_matrix.T, start_probabilies)
return final_state_probs
| 40.27027 | 137 | 0.724832 |
4a21cabc764334b166ca3eeecea8cd433ada54ff | 999 | py | Python | tests/torchutils/logger/test_logger.py | lzcn/torchutils | 8dc78ddcde72f27758e9774f3d1f5f6172e1a5e9 | [
"MIT"
] | 2 | 2021-01-15T03:13:46.000Z | 2021-04-20T16:20:52.000Z | tests/torchutils/logger/test_logger.py | lzcn/torchutils | 8dc78ddcde72f27758e9774f3d1f5f6172e1a5e9 | [
"MIT"
] | null | null | null | tests/torchutils/logger/test_logger.py | lzcn/torchutils | 8dc78ddcde72f27758e9774f3d1f5f6172e1a5e9 | [
"MIT"
] | null | null | null | import logging
import pytest
import torchutils
LEVELS = ["CRITICAL", "ERROR", "WARNING", "INFO", "DEBUG"]
LOGGER = logging.getLogger("main")
def test_stream_logger(capsys):
torchutils.logger.config()
LOGGER.critical("")
captured = capsys.readouterr()
assert "main" in captured.err
def test_file_logger(tmp_path):
d = tmp_path / "log"
d.mkdir()
p = d / "file.log"
torchutils.logger.config(log_file=p)
LOGGER.critical("")
captured = p.read_text()
assert "main" in captured
@pytest.mark.parametrize("level", LEVELS)
def test_logger_level(capsys, level):
torchutils.logger.config(level=level)
LOGGER.critical("")
LOGGER.error("")
LOGGER.warning("")
LOGGER.info("")
LOGGER.debug("")
captured = capsys.readouterr()
value = logging.getLevelName(level)
for name in LEVELS:
if logging.getLevelName(name) < value:
assert name not in captured.err
else:
assert name in captured.err
| 23.232558 | 58 | 0.654655 |
4a21cae50d38dbe0f047d97e69ba8c562017e720 | 59,861 | py | Python | psutil/tests/test_process.py | leokhoa/psutil | 978296429c3eac20f25e6dff7c2e2ab59327221e | [
"BSD-3-Clause"
] | 1 | 2020-07-27T09:45:55.000Z | 2020-07-27T09:45:55.000Z | psutil/tests/test_process.py | leokhoa/psutil | 978296429c3eac20f25e6dff7c2e2ab59327221e | [
"BSD-3-Clause"
] | null | null | null | psutil/tests/test_process.py | leokhoa/psutil | 978296429c3eac20f25e6dff7c2e2ab59327221e | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python3
# Copyright (c) 2009, Giampaolo Rodola'. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Tests for psutil.Process class."""
import collections
import errno
import getpass
import itertools
import os
import signal
import socket
import subprocess
import sys
import textwrap
import time
import types
import psutil
from psutil import AIX
from psutil import BSD
from psutil import LINUX
from psutil import MACOS
from psutil import NETBSD
from psutil import OPENBSD
from psutil import OSX
from psutil import POSIX
from psutil import SUNOS
from psutil import WINDOWS
from psutil._common import open_text
from psutil._compat import long
from psutil._compat import PY3
from psutil._compat import super
from psutil.tests import APPVEYOR
from psutil.tests import call_until
from psutil.tests import CI_TESTING
from psutil.tests import CIRRUS
from psutil.tests import copyload_shared_lib
from psutil.tests import create_exe
from psutil.tests import GITHUB_WHEELS
from psutil.tests import GLOBAL_TIMEOUT
from psutil.tests import HAS_CPU_AFFINITY
from psutil.tests import HAS_ENVIRON
from psutil.tests import HAS_IONICE
from psutil.tests import HAS_MEMORY_MAPS
from psutil.tests import HAS_PROC_CPU_NUM
from psutil.tests import HAS_PROC_IO_COUNTERS
from psutil.tests import HAS_RLIMIT
from psutil.tests import HAS_THREADS
from psutil.tests import mock
from psutil.tests import process_namespace
from psutil.tests import PsutilTestCase
from psutil.tests import PYPY
from psutil.tests import PYTHON_EXE
from psutil.tests import reap_children
from psutil.tests import retry_on_failure
from psutil.tests import sh
from psutil.tests import skip_on_access_denied
from psutil.tests import skip_on_not_implemented
from psutil.tests import ThreadTask
from psutil.tests import TRAVIS
from psutil.tests import unittest
from psutil.tests import wait_for_pid
# ===================================================================
# --- psutil.Process class tests
# ===================================================================
class TestProcess(PsutilTestCase):
"""Tests for psutil.Process class."""
def spawn_psproc(self, *args, **kwargs):
sproc = self.spawn_testproc(*args, **kwargs)
return psutil.Process(sproc.pid)
# ---
def test_pid(self):
p = psutil.Process()
self.assertEqual(p.pid, os.getpid())
with self.assertRaises(AttributeError):
p.pid = 33
def test_kill(self):
p = self.spawn_psproc()
p.kill()
code = p.wait()
if WINDOWS:
self.assertEqual(code, signal.SIGTERM)
else:
self.assertEqual(code, -signal.SIGKILL)
self.assertProcessGone(p)
def test_terminate(self):
p = self.spawn_psproc()
p.terminate()
code = p.wait()
if WINDOWS:
self.assertEqual(code, signal.SIGTERM)
else:
self.assertEqual(code, -signal.SIGTERM)
self.assertProcessGone(p)
def test_send_signal(self):
sig = signal.SIGKILL if POSIX else signal.SIGTERM
p = self.spawn_psproc()
p.send_signal(sig)
code = p.wait()
if WINDOWS:
self.assertEqual(code, sig)
else:
self.assertEqual(code, -sig)
self.assertProcessGone(p)
@unittest.skipIf(not POSIX, "not POSIX")
def test_send_signal_mocked(self):
sig = signal.SIGTERM
p = self.spawn_psproc()
with mock.patch('psutil.os.kill',
side_effect=OSError(errno.ESRCH, "")):
self.assertRaises(psutil.NoSuchProcess, p.send_signal, sig)
p = self.spawn_psproc()
with mock.patch('psutil.os.kill',
side_effect=OSError(errno.EPERM, "")):
self.assertRaises(psutil.AccessDenied, p.send_signal, sig)
def test_wait_exited(self):
# Test waitpid() + WIFEXITED -> WEXITSTATUS.
# normal return, same as exit(0)
cmd = [PYTHON_EXE, "-c", "pass"]
p = self.spawn_psproc(cmd)
code = p.wait()
self.assertEqual(code, 0)
self.assertProcessGone(p)
# exit(1), implicit in case of error
cmd = [PYTHON_EXE, "-c", "1 / 0"]
p = self.spawn_psproc(cmd, stderr=subprocess.PIPE)
code = p.wait()
self.assertEqual(code, 1)
self.assertProcessGone(p)
# via sys.exit()
cmd = [PYTHON_EXE, "-c", "import sys; sys.exit(5);"]
p = self.spawn_psproc(cmd)
code = p.wait()
self.assertEqual(code, 5)
self.assertProcessGone(p)
# via os._exit()
cmd = [PYTHON_EXE, "-c", "import os; os._exit(5);"]
p = self.spawn_psproc(cmd)
code = p.wait()
self.assertEqual(code, 5)
self.assertProcessGone(p)
def test_wait_stopped(self):
p = self.spawn_psproc()
if POSIX:
# Test waitpid() + WIFSTOPPED and WIFCONTINUED.
# Note: if a process is stopped it ignores SIGTERM.
p.send_signal(signal.SIGSTOP)
self.assertRaises(psutil.TimeoutExpired, p.wait, timeout=0.001)
p.send_signal(signal.SIGCONT)
self.assertRaises(psutil.TimeoutExpired, p.wait, timeout=0.001)
p.send_signal(signal.SIGTERM)
self.assertEqual(p.wait(), -signal.SIGTERM)
self.assertEqual(p.wait(), -signal.SIGTERM)
else:
p.suspend()
self.assertRaises(psutil.TimeoutExpired, p.wait, timeout=0.001)
p.resume()
self.assertRaises(psutil.TimeoutExpired, p.wait, timeout=0.001)
p.terminate()
self.assertEqual(p.wait(), signal.SIGTERM)
self.assertEqual(p.wait(), signal.SIGTERM)
def test_wait_non_children(self):
# Test wait() against a process which is not our direct
# child.
child, grandchild = self.spawn_children_pair()
self.assertRaises(psutil.TimeoutExpired, child.wait, 0.01)
self.assertRaises(psutil.TimeoutExpired, grandchild.wait, 0.01)
# We also terminate the direct child otherwise the
# grandchild will hang until the parent is gone.
child.terminate()
grandchild.terminate()
child_ret = child.wait()
grandchild_ret = grandchild.wait()
if POSIX:
self.assertEqual(child_ret, -signal.SIGTERM)
# For processes which are not our children we're supposed
# to get None.
self.assertEqual(grandchild_ret, None)
else:
self.assertEqual(child_ret, signal.SIGTERM)
self.assertEqual(child_ret, signal.SIGTERM)
def test_wait_timeout(self):
p = self.spawn_psproc()
p.name()
self.assertRaises(psutil.TimeoutExpired, p.wait, 0.01)
self.assertRaises(psutil.TimeoutExpired, p.wait, 0)
self.assertRaises(ValueError, p.wait, -1)
def test_wait_timeout_nonblocking(self):
p = self.spawn_psproc()
self.assertRaises(psutil.TimeoutExpired, p.wait, 0)
p.kill()
stop_at = time.time() + GLOBAL_TIMEOUT
while time.time() < stop_at:
try:
code = p.wait(0)
break
except psutil.TimeoutExpired:
pass
else:
raise self.fail('timeout')
if POSIX:
self.assertEqual(code, -signal.SIGKILL)
else:
self.assertEqual(code, signal.SIGTERM)
self.assertProcessGone(p)
def test_cpu_percent(self):
p = psutil.Process()
p.cpu_percent(interval=0.001)
p.cpu_percent(interval=0.001)
for x in range(100):
percent = p.cpu_percent(interval=None)
self.assertIsInstance(percent, float)
self.assertGreaterEqual(percent, 0.0)
with self.assertRaises(ValueError):
p.cpu_percent(interval=-1)
def test_cpu_percent_numcpus_none(self):
# See: https://github.com/giampaolo/psutil/issues/1087
with mock.patch('psutil.cpu_count', return_value=None) as m:
psutil.Process().cpu_percent()
assert m.called
def test_cpu_times(self):
times = psutil.Process().cpu_times()
assert (times.user > 0.0) or (times.system > 0.0), times
assert (times.children_user >= 0.0), times
assert (times.children_system >= 0.0), times
if LINUX:
assert times.iowait >= 0.0, times
# make sure returned values can be pretty printed with strftime
for name in times._fields:
time.strftime("%H:%M:%S", time.localtime(getattr(times, name)))
def test_cpu_times_2(self):
user_time, kernel_time = psutil.Process().cpu_times()[:2]
utime, ktime = os.times()[:2]
# Use os.times()[:2] as base values to compare our results
# using a tolerance of +/- 0.1 seconds.
# It will fail if the difference between the values is > 0.1s.
if (max([user_time, utime]) - min([user_time, utime])) > 0.1:
self.fail("expected: %s, found: %s" % (utime, user_time))
if (max([kernel_time, ktime]) - min([kernel_time, ktime])) > 0.1:
self.fail("expected: %s, found: %s" % (ktime, kernel_time))
@unittest.skipIf(not HAS_PROC_CPU_NUM, "not supported")
def test_cpu_num(self):
p = psutil.Process()
num = p.cpu_num()
self.assertGreaterEqual(num, 0)
if psutil.cpu_count() == 1:
self.assertEqual(num, 0)
self.assertIn(p.cpu_num(), range(psutil.cpu_count()))
def test_create_time(self):
p = self.spawn_psproc()
now = time.time()
create_time = p.create_time()
# Use time.time() as base value to compare our result using a
# tolerance of +/- 1 second.
# It will fail if the difference between the values is > 2s.
difference = abs(create_time - now)
if difference > 2:
self.fail("expected: %s, found: %s, difference: %s"
% (now, create_time, difference))
# make sure returned value can be pretty printed with strftime
time.strftime("%Y %m %d %H:%M:%S", time.localtime(p.create_time()))
@unittest.skipIf(not POSIX, 'POSIX only')
@unittest.skipIf(TRAVIS or CIRRUS, 'not reliable on TRAVIS/CIRRUS')
def test_terminal(self):
terminal = psutil.Process().terminal()
if sys.stdout.isatty():
tty = os.path.realpath(sh('tty'))
self.assertEqual(terminal, tty)
else:
self.assertIsNone(terminal)
@unittest.skipIf(not HAS_PROC_IO_COUNTERS, 'not supported')
@skip_on_not_implemented(only_if=LINUX)
def test_io_counters(self):
p = psutil.Process()
# test reads
io1 = p.io_counters()
with open(PYTHON_EXE, 'rb') as f:
f.read()
io2 = p.io_counters()
if not BSD and not AIX:
self.assertGreater(io2.read_count, io1.read_count)
self.assertEqual(io2.write_count, io1.write_count)
if LINUX:
self.assertGreater(io2.read_chars, io1.read_chars)
self.assertEqual(io2.write_chars, io1.write_chars)
else:
self.assertGreaterEqual(io2.read_bytes, io1.read_bytes)
self.assertGreaterEqual(io2.write_bytes, io1.write_bytes)
# test writes
io1 = p.io_counters()
with open(self.get_testfn(), 'wb') as f:
if PY3:
f.write(bytes("x" * 1000000, 'ascii'))
else:
f.write("x" * 1000000)
io2 = p.io_counters()
self.assertGreaterEqual(io2.write_count, io1.write_count)
self.assertGreaterEqual(io2.write_bytes, io1.write_bytes)
self.assertGreaterEqual(io2.read_count, io1.read_count)
self.assertGreaterEqual(io2.read_bytes, io1.read_bytes)
if LINUX:
self.assertGreater(io2.write_chars, io1.write_chars)
self.assertGreaterEqual(io2.read_chars, io1.read_chars)
# sanity check
for i in range(len(io2)):
if BSD and i >= 2:
# On BSD read_bytes and write_bytes are always set to -1.
continue
self.assertGreaterEqual(io2[i], 0)
self.assertGreaterEqual(io2[i], 0)
@unittest.skipIf(not HAS_IONICE, "not supported")
@unittest.skipIf(not LINUX, "linux only")
def test_ionice_linux(self):
p = psutil.Process()
if not CI_TESTING:
self.assertEqual(p.ionice()[0], psutil.IOPRIO_CLASS_NONE)
self.assertEqual(psutil.IOPRIO_CLASS_NONE, 0)
self.assertEqual(psutil.IOPRIO_CLASS_RT, 1) # high
self.assertEqual(psutil.IOPRIO_CLASS_BE, 2) # normal
self.assertEqual(psutil.IOPRIO_CLASS_IDLE, 3) # low
init = p.ionice()
try:
# low
p.ionice(psutil.IOPRIO_CLASS_IDLE)
self.assertEqual(tuple(p.ionice()), (psutil.IOPRIO_CLASS_IDLE, 0))
with self.assertRaises(ValueError): # accepts no value
p.ionice(psutil.IOPRIO_CLASS_IDLE, value=7)
# normal
p.ionice(psutil.IOPRIO_CLASS_BE)
self.assertEqual(tuple(p.ionice()), (psutil.IOPRIO_CLASS_BE, 0))
p.ionice(psutil.IOPRIO_CLASS_BE, value=7)
self.assertEqual(tuple(p.ionice()), (psutil.IOPRIO_CLASS_BE, 7))
with self.assertRaises(ValueError):
p.ionice(psutil.IOPRIO_CLASS_BE, value=8)
try:
p.ionice(psutil.IOPRIO_CLASS_RT, value=7)
except psutil.AccessDenied:
pass
# errs
self.assertRaisesRegex(
ValueError, "ioclass accepts no value",
p.ionice, psutil.IOPRIO_CLASS_NONE, 1)
self.assertRaisesRegex(
ValueError, "ioclass accepts no value",
p.ionice, psutil.IOPRIO_CLASS_IDLE, 1)
self.assertRaisesRegex(
ValueError, "'ioclass' argument must be specified",
p.ionice, value=1)
finally:
ioclass, value = init
if ioclass == psutil.IOPRIO_CLASS_NONE:
value = 0
p.ionice(ioclass, value)
@unittest.skipIf(not HAS_IONICE, "not supported")
@unittest.skipIf(not WINDOWS, 'not supported on this win version')
def test_ionice_win(self):
p = psutil.Process()
if not CI_TESTING:
self.assertEqual(p.ionice(), psutil.IOPRIO_NORMAL)
init = p.ionice()
try:
# base
p.ionice(psutil.IOPRIO_VERYLOW)
self.assertEqual(p.ionice(), psutil.IOPRIO_VERYLOW)
p.ionice(psutil.IOPRIO_LOW)
self.assertEqual(p.ionice(), psutil.IOPRIO_LOW)
try:
p.ionice(psutil.IOPRIO_HIGH)
except psutil.AccessDenied:
pass
else:
self.assertEqual(p.ionice(), psutil.IOPRIO_HIGH)
# errs
self.assertRaisesRegex(
TypeError, "value argument not accepted on Windows",
p.ionice, psutil.IOPRIO_NORMAL, value=1)
self.assertRaisesRegex(
ValueError, "is not a valid priority",
p.ionice, psutil.IOPRIO_HIGH + 1)
finally:
p.ionice(init)
@unittest.skipIf(not HAS_RLIMIT, "not supported")
def test_rlimit_get(self):
import resource
p = psutil.Process(os.getpid())
names = [x for x in dir(psutil) if x.startswith('RLIMIT')]
assert names, names
for name in names:
value = getattr(psutil, name)
self.assertGreaterEqual(value, 0)
if name in dir(resource):
self.assertEqual(value, getattr(resource, name))
# XXX - On PyPy RLIMIT_INFINITY returned by
# resource.getrlimit() is reported as a very big long
# number instead of -1. It looks like a bug with PyPy.
if PYPY:
continue
self.assertEqual(p.rlimit(value), resource.getrlimit(value))
else:
ret = p.rlimit(value)
self.assertEqual(len(ret), 2)
self.assertGreaterEqual(ret[0], -1)
self.assertGreaterEqual(ret[1], -1)
@unittest.skipIf(not HAS_RLIMIT, "not supported")
def test_rlimit_set(self):
p = self.spawn_psproc()
p.rlimit(psutil.RLIMIT_NOFILE, (5, 5))
self.assertEqual(p.rlimit(psutil.RLIMIT_NOFILE), (5, 5))
# If pid is 0 prlimit() applies to the calling process and
# we don't want that.
with self.assertRaises(ValueError):
psutil._psplatform.Process(0).rlimit(0)
with self.assertRaises(ValueError):
p.rlimit(psutil.RLIMIT_NOFILE, (5, 5, 5))
@unittest.skipIf(not HAS_RLIMIT, "not supported")
def test_rlimit(self):
p = psutil.Process()
testfn = self.get_testfn()
soft, hard = p.rlimit(psutil.RLIMIT_FSIZE)
try:
p.rlimit(psutil.RLIMIT_FSIZE, (1024, hard))
with open(testfn, "wb") as f:
f.write(b"X" * 1024)
# write() or flush() doesn't always cause the exception
# but close() will.
with self.assertRaises(IOError) as exc:
with open(testfn, "wb") as f:
f.write(b"X" * 1025)
self.assertEqual(exc.exception.errno if PY3 else exc.exception[0],
errno.EFBIG)
finally:
p.rlimit(psutil.RLIMIT_FSIZE, (soft, hard))
self.assertEqual(p.rlimit(psutil.RLIMIT_FSIZE), (soft, hard))
@unittest.skipIf(not HAS_RLIMIT, "not supported")
def test_rlimit_infinity(self):
# First set a limit, then re-set it by specifying INFINITY
# and assume we overridden the previous limit.
p = psutil.Process()
soft, hard = p.rlimit(psutil.RLIMIT_FSIZE)
try:
p.rlimit(psutil.RLIMIT_FSIZE, (1024, hard))
p.rlimit(psutil.RLIMIT_FSIZE, (psutil.RLIM_INFINITY, hard))
with open(self.get_testfn(), "wb") as f:
f.write(b"X" * 2048)
finally:
p.rlimit(psutil.RLIMIT_FSIZE, (soft, hard))
self.assertEqual(p.rlimit(psutil.RLIMIT_FSIZE), (soft, hard))
@unittest.skipIf(not HAS_RLIMIT, "not supported")
def test_rlimit_infinity_value(self):
# RLIMIT_FSIZE should be RLIM_INFINITY, which will be a really
# big number on a platform with large file support. On these
# platforms we need to test that the get/setrlimit functions
# properly convert the number to a C long long and that the
# conversion doesn't raise an error.
p = psutil.Process()
soft, hard = p.rlimit(psutil.RLIMIT_FSIZE)
self.assertEqual(psutil.RLIM_INFINITY, hard)
p.rlimit(psutil.RLIMIT_FSIZE, (soft, hard))
def test_num_threads(self):
# on certain platforms such as Linux we might test for exact
# thread number, since we always have with 1 thread per process,
# but this does not apply across all platforms (MACOS, Windows)
p = psutil.Process()
if OPENBSD:
try:
step1 = p.num_threads()
except psutil.AccessDenied:
raise unittest.SkipTest("on OpenBSD this requires root access")
else:
step1 = p.num_threads()
with ThreadTask():
step2 = p.num_threads()
self.assertEqual(step2, step1 + 1)
@unittest.skipIf(not WINDOWS, 'WINDOWS only')
def test_num_handles(self):
# a better test is done later into test/_windows.py
p = psutil.Process()
self.assertGreater(p.num_handles(), 0)
@unittest.skipIf(not HAS_THREADS, 'not supported')
def test_threads(self):
p = psutil.Process()
if OPENBSD:
try:
step1 = p.threads()
except psutil.AccessDenied:
raise unittest.SkipTest("on OpenBSD this requires root access")
else:
step1 = p.threads()
with ThreadTask():
step2 = p.threads()
self.assertEqual(len(step2), len(step1) + 1)
athread = step2[0]
# test named tuple
self.assertEqual(athread.id, athread[0])
self.assertEqual(athread.user_time, athread[1])
self.assertEqual(athread.system_time, athread[2])
@retry_on_failure()
@skip_on_access_denied(only_if=MACOS)
@unittest.skipIf(not HAS_THREADS, 'not supported')
def test_threads_2(self):
p = self.spawn_psproc()
if OPENBSD:
try:
p.threads()
except psutil.AccessDenied:
raise unittest.SkipTest(
"on OpenBSD this requires root access")
self.assertAlmostEqual(
p.cpu_times().user,
sum([x.user_time for x in p.threads()]), delta=0.1)
self.assertAlmostEqual(
p.cpu_times().system,
sum([x.system_time for x in p.threads()]), delta=0.1)
@retry_on_failure()
def test_memory_info(self):
p = psutil.Process()
# step 1 - get a base value to compare our results
rss1, vms1 = p.memory_info()[:2]
percent1 = p.memory_percent()
self.assertGreater(rss1, 0)
self.assertGreater(vms1, 0)
# step 2 - allocate some memory
memarr = [None] * 1500000
rss2, vms2 = p.memory_info()[:2]
percent2 = p.memory_percent()
# step 3 - make sure that the memory usage bumped up
self.assertGreater(rss2, rss1)
self.assertGreaterEqual(vms2, vms1) # vms might be equal
self.assertGreater(percent2, percent1)
del memarr
if WINDOWS:
mem = p.memory_info()
self.assertEqual(mem.rss, mem.wset)
self.assertEqual(mem.vms, mem.pagefile)
mem = p.memory_info()
for name in mem._fields:
self.assertGreaterEqual(getattr(mem, name), 0)
def test_memory_full_info(self):
p = psutil.Process()
total = psutil.virtual_memory().total
mem = p.memory_full_info()
for name in mem._fields:
value = getattr(mem, name)
self.assertGreaterEqual(value, 0, msg=(name, value))
if name == 'vms' and OSX or LINUX:
continue
self.assertLessEqual(value, total, msg=(name, value, total))
if LINUX or WINDOWS or MACOS:
self.assertGreaterEqual(mem.uss, 0)
if LINUX:
self.assertGreaterEqual(mem.pss, 0)
self.assertGreaterEqual(mem.swap, 0)
@unittest.skipIf(not HAS_MEMORY_MAPS, "not supported")
def test_memory_maps(self):
p = psutil.Process()
maps = p.memory_maps()
paths = [x for x in maps]
self.assertEqual(len(paths), len(set(paths)))
ext_maps = p.memory_maps(grouped=False)
for nt in maps:
if not nt.path.startswith('['):
assert os.path.isabs(nt.path), nt.path
if POSIX:
try:
assert os.path.exists(nt.path) or \
os.path.islink(nt.path), nt.path
except AssertionError:
if not LINUX:
raise
else:
# https://github.com/giampaolo/psutil/issues/759
with open_text('/proc/self/smaps') as f:
data = f.read()
if "%s (deleted)" % nt.path not in data:
raise
else:
# XXX - On Windows we have this strange behavior with
# 64 bit dlls: they are visible via explorer but cannot
# be accessed via os.stat() (wtf?).
if '64' not in os.path.basename(nt.path):
assert os.path.exists(nt.path), nt.path
for nt in ext_maps:
for fname in nt._fields:
value = getattr(nt, fname)
if fname == 'path':
continue
elif fname in ('addr', 'perms'):
assert value, value
else:
self.assertIsInstance(value, (int, long))
assert value >= 0, value
@unittest.skipIf(not HAS_MEMORY_MAPS, "not supported")
def test_memory_maps_lists_lib(self):
# Make sure a newly loaded shared lib is listed.
p = psutil.Process()
with copyload_shared_lib() as path:
def normpath(p):
return os.path.realpath(os.path.normcase(p))
libpaths = [normpath(x.path)
for x in p.memory_maps()]
self.assertIn(normpath(path), libpaths)
def test_memory_percent(self):
p = psutil.Process()
p.memory_percent()
self.assertRaises(ValueError, p.memory_percent, memtype="?!?")
if LINUX or MACOS or WINDOWS:
p.memory_percent(memtype='uss')
def test_is_running(self):
p = self.spawn_psproc()
assert p.is_running()
assert p.is_running()
p.kill()
p.wait()
assert not p.is_running()
assert not p.is_running()
def test_exe(self):
p = self.spawn_psproc()
exe = p.exe()
try:
self.assertEqual(exe, PYTHON_EXE)
except AssertionError:
if WINDOWS and len(exe) == len(PYTHON_EXE):
# on Windows we don't care about case sensitivity
normcase = os.path.normcase
self.assertEqual(normcase(exe), normcase(PYTHON_EXE))
else:
# certain platforms such as BSD are more accurate returning:
# "/usr/local/bin/python2.7"
# ...instead of:
# "/usr/local/bin/python"
# We do not want to consider this difference in accuracy
# an error.
ver = "%s.%s" % (sys.version_info[0], sys.version_info[1])
try:
self.assertEqual(exe.replace(ver, ''),
PYTHON_EXE.replace(ver, ''))
except AssertionError:
# Tipically MACOS. Really not sure what to do here.
pass
out = sh([exe, "-c", "import os; print('hey')"])
self.assertEqual(out, 'hey')
def test_cmdline(self):
cmdline = [PYTHON_EXE, "-c", "import time; time.sleep(60)"]
p = self.spawn_psproc(cmdline)
try:
self.assertEqual(' '.join(p.cmdline()), ' '.join(cmdline))
except AssertionError:
# XXX - most of the times the underlying sysctl() call on Net
# and Open BSD returns a truncated string.
# Also /proc/pid/cmdline behaves the same so it looks
# like this is a kernel bug.
# XXX - AIX truncates long arguments in /proc/pid/cmdline
if NETBSD or OPENBSD or AIX:
self.assertEqual(p.cmdline()[0], PYTHON_EXE)
else:
raise
@unittest.skipIf(PYPY, "broken on PYPY")
def test_long_cmdline(self):
testfn = self.get_testfn()
create_exe(testfn)
cmdline = [testfn] + (["0123456789"] * 20)
p = self.spawn_psproc(cmdline)
self.assertEqual(p.cmdline(), cmdline)
def test_name(self):
p = self.spawn_psproc(PYTHON_EXE)
name = p.name().lower()
pyexe = os.path.basename(os.path.realpath(sys.executable)).lower()
assert pyexe.startswith(name), (pyexe, name)
@unittest.skipIf(PYPY, "unreliable on PYPY")
def test_long_name(self):
testfn = self.get_testfn(suffix="0123456789" * 2)
create_exe(testfn)
p = self.spawn_psproc(testfn)
self.assertEqual(p.name(), os.path.basename(testfn))
# XXX
@unittest.skipIf(SUNOS, "broken on SUNOS")
@unittest.skipIf(AIX, "broken on AIX")
@unittest.skipIf(PYPY, "broken on PYPY")
def test_prog_w_funky_name(self):
# Test that name(), exe() and cmdline() correctly handle programs
# with funky chars such as spaces and ")", see:
# https://github.com/giampaolo/psutil/issues/628
funky_path = self.get_testfn(suffix='foo bar )')
create_exe(funky_path)
cmdline = [funky_path, "-c",
"import time; [time.sleep(0.01) for x in range(3000)];"
"arg1", "arg2", "", "arg3", ""]
p = self.spawn_psproc(cmdline)
# ...in order to try to prevent occasional failures on travis
if TRAVIS:
wait_for_pid(p.pid)
self.assertEqual(p.cmdline(), cmdline)
self.assertEqual(p.name(), os.path.basename(funky_path))
self.assertEqual(os.path.normcase(p.exe()),
os.path.normcase(funky_path))
@unittest.skipIf(not POSIX, 'POSIX only')
def test_uids(self):
p = psutil.Process()
real, effective, saved = p.uids()
# os.getuid() refers to "real" uid
self.assertEqual(real, os.getuid())
# os.geteuid() refers to "effective" uid
self.assertEqual(effective, os.geteuid())
# No such thing as os.getsuid() ("saved" uid), but starting
# from python 2.7 we have os.getresuid() which returns all
# of them.
if hasattr(os, "getresuid"):
self.assertEqual(os.getresuid(), p.uids())
@unittest.skipIf(not POSIX, 'POSIX only')
def test_gids(self):
p = psutil.Process()
real, effective, saved = p.gids()
# os.getuid() refers to "real" uid
self.assertEqual(real, os.getgid())
# os.geteuid() refers to "effective" uid
self.assertEqual(effective, os.getegid())
# No such thing as os.getsgid() ("saved" gid), but starting
# from python 2.7 we have os.getresgid() which returns all
# of them.
if hasattr(os, "getresuid"):
self.assertEqual(os.getresgid(), p.gids())
def test_nice(self):
p = psutil.Process()
self.assertRaises(TypeError, p.nice, "str")
init = p.nice()
try:
if WINDOWS:
for prio in [psutil.NORMAL_PRIORITY_CLASS,
psutil.IDLE_PRIORITY_CLASS,
psutil.BELOW_NORMAL_PRIORITY_CLASS,
psutil.REALTIME_PRIORITY_CLASS,
psutil.HIGH_PRIORITY_CLASS,
psutil.ABOVE_NORMAL_PRIORITY_CLASS]:
with self.subTest(prio=prio):
try:
p.nice(prio)
except psutil.AccessDenied:
pass
else:
self.assertEqual(p.nice(), prio)
else:
try:
if hasattr(os, "getpriority"):
self.assertEqual(
os.getpriority(os.PRIO_PROCESS, os.getpid()),
p.nice())
p.nice(1)
self.assertEqual(p.nice(), 1)
if hasattr(os, "getpriority"):
self.assertEqual(
os.getpriority(os.PRIO_PROCESS, os.getpid()),
p.nice())
# XXX - going back to previous nice value raises
# AccessDenied on MACOS
if not MACOS:
p.nice(0)
self.assertEqual(p.nice(), 0)
except psutil.AccessDenied:
pass
finally:
try:
p.nice(init)
except psutil.AccessDenied:
pass
def test_status(self):
p = psutil.Process()
self.assertEqual(p.status(), psutil.STATUS_RUNNING)
def test_username(self):
p = self.spawn_psproc()
username = p.username()
if WINDOWS:
domain, username = username.split('\\')
self.assertEqual(username, getpass.getuser())
if 'USERDOMAIN' in os.environ:
self.assertEqual(domain, os.environ['USERDOMAIN'])
else:
self.assertEqual(username, getpass.getuser())
def test_cwd(self):
p = self.spawn_psproc()
self.assertEqual(p.cwd(), os.getcwd())
def test_cwd_2(self):
cmd = [PYTHON_EXE, "-c",
"import os, time; os.chdir('..'); time.sleep(60)"]
p = self.spawn_psproc(cmd)
call_until(p.cwd, "ret == os.path.dirname(os.getcwd())")
@unittest.skipIf(not HAS_CPU_AFFINITY, 'not supported')
def test_cpu_affinity(self):
p = psutil.Process()
initial = p.cpu_affinity()
assert initial, initial
self.addCleanup(p.cpu_affinity, initial)
if hasattr(os, "sched_getaffinity"):
self.assertEqual(initial, list(os.sched_getaffinity(p.pid)))
self.assertEqual(len(initial), len(set(initial)))
all_cpus = list(range(len(psutil.cpu_percent(percpu=True))))
# Work around travis failure:
# https://travis-ci.org/giampaolo/psutil/builds/284173194
for n in all_cpus if not TRAVIS else initial:
p.cpu_affinity([n])
self.assertEqual(p.cpu_affinity(), [n])
if hasattr(os, "sched_getaffinity"):
self.assertEqual(p.cpu_affinity(),
list(os.sched_getaffinity(p.pid)))
# also test num_cpu()
if hasattr(p, "num_cpu"):
self.assertEqual(p.cpu_affinity()[0], p.num_cpu())
# [] is an alias for "all eligible CPUs"; on Linux this may
# not be equal to all available CPUs, see:
# https://github.com/giampaolo/psutil/issues/956
p.cpu_affinity([])
if LINUX:
self.assertEqual(p.cpu_affinity(), p._proc._get_eligible_cpus())
else:
self.assertEqual(p.cpu_affinity(), all_cpus)
if hasattr(os, "sched_getaffinity"):
self.assertEqual(p.cpu_affinity(),
list(os.sched_getaffinity(p.pid)))
#
self.assertRaises(TypeError, p.cpu_affinity, 1)
p.cpu_affinity(initial)
# it should work with all iterables, not only lists
if not TRAVIS:
p.cpu_affinity(set(all_cpus))
p.cpu_affinity(tuple(all_cpus))
@unittest.skipIf(not HAS_CPU_AFFINITY, 'not supported')
def test_cpu_affinity_errs(self):
p = self.spawn_psproc()
invalid_cpu = [len(psutil.cpu_times(percpu=True)) + 10]
self.assertRaises(ValueError, p.cpu_affinity, invalid_cpu)
self.assertRaises(ValueError, p.cpu_affinity, range(10000, 11000))
self.assertRaises(TypeError, p.cpu_affinity, [0, "1"])
self.assertRaises(ValueError, p.cpu_affinity, [0, -1])
@unittest.skipIf(not HAS_CPU_AFFINITY, 'not supported')
def test_cpu_affinity_all_combinations(self):
p = psutil.Process()
initial = p.cpu_affinity()
assert initial, initial
self.addCleanup(p.cpu_affinity, initial)
# All possible CPU set combinations.
if len(initial) > 12:
initial = initial[:12] # ...otherwise it will take forever
combos = []
for l in range(0, len(initial) + 1):
for subset in itertools.combinations(initial, l):
if subset:
combos.append(list(subset))
for combo in combos:
p.cpu_affinity(combo)
self.assertEqual(p.cpu_affinity(), combo)
# TODO: #595
@unittest.skipIf(BSD, "broken on BSD")
# can't find any process file on Appveyor
@unittest.skipIf(APPVEYOR, "unreliable on APPVEYOR")
def test_open_files(self):
p = psutil.Process()
testfn = self.get_testfn()
files = p.open_files()
self.assertNotIn(testfn, files)
with open(testfn, 'wb') as f:
f.write(b'x' * 1024)
f.flush()
# give the kernel some time to see the new file
files = call_until(p.open_files, "len(ret) != %i" % len(files))
filenames = [os.path.normcase(x.path) for x in files]
self.assertIn(os.path.normcase(testfn), filenames)
if LINUX:
for file in files:
if file.path == testfn:
self.assertEqual(file.position, 1024)
for file in files:
assert os.path.isfile(file.path), file
# another process
cmdline = "import time; f = open(r'%s', 'r'); time.sleep(60);" % testfn
p = self.spawn_psproc([PYTHON_EXE, "-c", cmdline])
for x in range(100):
filenames = [os.path.normcase(x.path) for x in p.open_files()]
if testfn in filenames:
break
time.sleep(.01)
else:
self.assertIn(os.path.normcase(testfn), filenames)
for file in filenames:
assert os.path.isfile(file), file
# TODO: #595
@unittest.skipIf(BSD, "broken on BSD")
# can't find any process file on Appveyor
@unittest.skipIf(APPVEYOR, "unreliable on APPVEYOR")
def test_open_files_2(self):
# test fd and path fields
p = psutil.Process()
normcase = os.path.normcase
testfn = self.get_testfn()
with open(testfn, 'w') as fileobj:
for file in p.open_files():
if normcase(file.path) == normcase(fileobj.name) or \
file.fd == fileobj.fileno():
break
else:
self.fail("no file found; files=%s" % repr(p.open_files()))
self.assertEqual(normcase(file.path), normcase(fileobj.name))
if WINDOWS:
self.assertEqual(file.fd, -1)
else:
self.assertEqual(file.fd, fileobj.fileno())
# test positions
ntuple = p.open_files()[0]
self.assertEqual(ntuple[0], ntuple.path)
self.assertEqual(ntuple[1], ntuple.fd)
# test file is gone
self.assertNotIn(fileobj.name, p.open_files())
@unittest.skipIf(not POSIX, 'POSIX only')
def test_num_fds(self):
p = psutil.Process()
testfn = self.get_testfn()
start = p.num_fds()
file = open(testfn, 'w')
self.addCleanup(file.close)
self.assertEqual(p.num_fds(), start + 1)
sock = socket.socket()
self.addCleanup(sock.close)
self.assertEqual(p.num_fds(), start + 2)
file.close()
sock.close()
self.assertEqual(p.num_fds(), start)
@skip_on_not_implemented(only_if=LINUX)
@unittest.skipIf(OPENBSD or NETBSD, "not reliable on OPENBSD & NETBSD")
def test_num_ctx_switches(self):
p = psutil.Process()
before = sum(p.num_ctx_switches())
for x in range(500000):
after = sum(p.num_ctx_switches())
if after > before:
return
self.fail("num ctx switches still the same after 50.000 iterations")
def test_ppid(self):
p = psutil.Process()
if hasattr(os, 'getppid'):
self.assertEqual(p.ppid(), os.getppid())
p = self.spawn_psproc()
self.assertEqual(p.ppid(), os.getpid())
if APPVEYOR:
# Occasional failures, see:
# https://ci.appveyor.com/project/giampaolo/psutil/build/
# job/0hs623nenj7w4m33
return
def test_parent(self):
p = self.spawn_psproc()
self.assertEqual(p.parent().pid, os.getpid())
lowest_pid = psutil.pids()[0]
self.assertIsNone(psutil.Process(lowest_pid).parent())
def test_parent_multi(self):
parent = psutil.Process()
child, grandchild = self.spawn_children_pair()
self.assertEqual(grandchild.parent(), child)
self.assertEqual(child.parent(), parent)
def test_parent_disappeared(self):
# Emulate a case where the parent process disappeared.
p = self.spawn_psproc()
with mock.patch("psutil.Process",
side_effect=psutil.NoSuchProcess(0, 'foo')):
self.assertIsNone(p.parent())
@retry_on_failure()
def test_parents(self):
parent = psutil.Process()
assert parent.parents()
child, grandchild = self.spawn_children_pair()
self.assertEqual(child.parents()[0], parent)
self.assertEqual(grandchild.parents()[0], child)
self.assertEqual(grandchild.parents()[1], parent)
def test_children(self):
parent = psutil.Process()
self.assertEqual(parent.children(), [])
self.assertEqual(parent.children(recursive=True), [])
# On Windows we set the flag to 0 in order to cancel out the
# CREATE_NO_WINDOW flag (enabled by default) which creates
# an extra "conhost.exe" child.
child = self.spawn_psproc(creationflags=0)
children1 = parent.children()
children2 = parent.children(recursive=True)
for children in (children1, children2):
self.assertEqual(len(children), 1)
self.assertEqual(children[0].pid, child.pid)
self.assertEqual(children[0].ppid(), parent.pid)
def test_children_recursive(self):
# Test children() against two sub processes, p1 and p2, where
# p1 (our child) spawned p2 (our grandchild).
parent = psutil.Process()
child, grandchild = self.spawn_children_pair()
self.assertEqual(parent.children(), [child])
self.assertEqual(parent.children(recursive=True), [child, grandchild])
# If the intermediate process is gone there's no way for
# children() to recursively find it.
child.terminate()
child.wait()
self.assertEqual(parent.children(recursive=True), [])
def test_children_duplicates(self):
# find the process which has the highest number of children
table = collections.defaultdict(int)
for p in psutil.process_iter():
try:
table[p.ppid()] += 1
except psutil.Error:
pass
# this is the one, now let's make sure there are no duplicates
pid = sorted(table.items(), key=lambda x: x[1])[-1][0]
if LINUX and pid == 0:
raise self.skipTest("PID 0")
p = psutil.Process(pid)
try:
c = p.children(recursive=True)
except psutil.AccessDenied: # windows
pass
else:
self.assertEqual(len(c), len(set(c)))
def test_parents_and_children(self):
parent = psutil.Process()
child, grandchild = self.spawn_children_pair()
# forward
children = parent.children(recursive=True)
self.assertEqual(len(children), 2)
self.assertEqual(children[0], child)
self.assertEqual(children[1], grandchild)
# backward
parents = grandchild.parents()
self.assertEqual(parents[0], child)
self.assertEqual(parents[1], parent)
def test_suspend_resume(self):
p = self.spawn_psproc()
p.suspend()
for x in range(100):
if p.status() == psutil.STATUS_STOPPED:
break
time.sleep(0.01)
p.resume()
self.assertNotEqual(p.status(), psutil.STATUS_STOPPED)
def test_invalid_pid(self):
self.assertRaises(TypeError, psutil.Process, "1")
self.assertRaises(ValueError, psutil.Process, -1)
def test_as_dict(self):
p = psutil.Process()
d = p.as_dict(attrs=['exe', 'name'])
self.assertEqual(sorted(d.keys()), ['exe', 'name'])
p = psutil.Process(min(psutil.pids()))
d = p.as_dict(attrs=['connections'], ad_value='foo')
if not isinstance(d['connections'], list):
self.assertEqual(d['connections'], 'foo')
# Test ad_value is set on AccessDenied.
with mock.patch('psutil.Process.nice', create=True,
side_effect=psutil.AccessDenied):
self.assertEqual(
p.as_dict(attrs=["nice"], ad_value=1), {"nice": 1})
# Test that NoSuchProcess bubbles up.
with mock.patch('psutil.Process.nice', create=True,
side_effect=psutil.NoSuchProcess(p.pid, "name")):
self.assertRaises(
psutil.NoSuchProcess, p.as_dict, attrs=["nice"])
# Test that ZombieProcess is swallowed.
with mock.patch('psutil.Process.nice', create=True,
side_effect=psutil.ZombieProcess(p.pid, "name")):
self.assertEqual(
p.as_dict(attrs=["nice"], ad_value="foo"), {"nice": "foo"})
# By default APIs raising NotImplementedError are
# supposed to be skipped.
with mock.patch('psutil.Process.nice', create=True,
side_effect=NotImplementedError):
d = p.as_dict()
self.assertNotIn('nice', list(d.keys()))
# ...unless the user explicitly asked for some attr.
with self.assertRaises(NotImplementedError):
p.as_dict(attrs=["nice"])
# errors
with self.assertRaises(TypeError):
p.as_dict('name')
with self.assertRaises(ValueError):
p.as_dict(['foo'])
with self.assertRaises(ValueError):
p.as_dict(['foo', 'bar'])
def test_oneshot(self):
p = psutil.Process()
with mock.patch("psutil._psplatform.Process.cpu_times") as m:
with p.oneshot():
p.cpu_times()
p.cpu_times()
self.assertEqual(m.call_count, 1)
with mock.patch("psutil._psplatform.Process.cpu_times") as m:
p.cpu_times()
p.cpu_times()
self.assertEqual(m.call_count, 2)
def test_oneshot_twice(self):
# Test the case where the ctx manager is __enter__ed twice.
# The second __enter__ is supposed to resut in a NOOP.
p = psutil.Process()
with mock.patch("psutil._psplatform.Process.cpu_times") as m1:
with mock.patch("psutil._psplatform.Process.oneshot_enter") as m2:
with p.oneshot():
p.cpu_times()
p.cpu_times()
with p.oneshot():
p.cpu_times()
p.cpu_times()
self.assertEqual(m1.call_count, 1)
self.assertEqual(m2.call_count, 1)
with mock.patch("psutil._psplatform.Process.cpu_times") as m:
p.cpu_times()
p.cpu_times()
self.assertEqual(m.call_count, 2)
def test_oneshot_cache(self):
# Make sure oneshot() cache is nonglobal. Instead it's
# supposed to be bound to the Process instance, see:
# https://github.com/giampaolo/psutil/issues/1373
p1, p2 = self.spawn_children_pair()
p1_ppid = p1.ppid()
p2_ppid = p2.ppid()
self.assertNotEqual(p1_ppid, p2_ppid)
with p1.oneshot():
self.assertEqual(p1.ppid(), p1_ppid)
self.assertEqual(p2.ppid(), p2_ppid)
with p2.oneshot():
self.assertEqual(p1.ppid(), p1_ppid)
self.assertEqual(p2.ppid(), p2_ppid)
def test_halfway_terminated_process(self):
# Test that NoSuchProcess exception gets raised in case the
# process dies after we create the Process object.
# Example:
# >>> proc = Process(1234)
# >>> time.sleep(2) # time-consuming task, process dies in meantime
# >>> proc.name()
# Refers to Issue #15
def assert_raises_nsp(fun, fun_name):
try:
ret = fun()
except psutil.ZombieProcess: # differentiate from NSP
raise
except psutil.NoSuchProcess:
pass
except psutil.AccessDenied:
if OPENBSD and fun_name in ('threads', 'num_threads'):
return
raise
else:
# NtQuerySystemInformation succeeds even if process is gone.
if WINDOWS and fun_name in ('exe', 'name'):
return
raise self.fail("%r didn't raise NSP and returned %r "
"instead" % (fun, ret))
p = self.spawn_psproc()
p.terminate()
p.wait()
if WINDOWS: # XXX
call_until(psutil.pids, "%s not in ret" % p.pid)
self.assertProcessGone(p)
ns = process_namespace(p)
for fun, name in ns.iter(ns.all):
assert_raises_nsp(fun, name)
# NtQuerySystemInformation succeeds even if process is gone.
if WINDOWS:
normcase = os.path.normcase
self.assertEqual(normcase(p.exe()), normcase(PYTHON_EXE))
@unittest.skipIf(not POSIX, 'POSIX only')
def test_zombie_process(self):
def succeed_or_zombie_p_exc(fun):
try:
return fun()
except (psutil.ZombieProcess, psutil.AccessDenied):
pass
parent, zombie = self.spawn_zombie()
# A zombie process should always be instantiable
zproc = psutil.Process(zombie.pid)
# ...and at least its status always be querable
self.assertEqual(zproc.status(), psutil.STATUS_ZOMBIE)
# ...and it should be considered 'running'
assert zproc.is_running()
# ...and as_dict() shouldn't crash
zproc.as_dict()
# ...its parent should 'see' it (edit: not true on BSD and MACOS
# descendants = [x.pid for x in psutil.Process().children(
# recursive=True)]
# self.assertIn(zpid, descendants)
# XXX should we also assume ppid be usable? Note: this
# would be an important use case as the only way to get
# rid of a zombie is to kill its parent.
# self.assertEqual(zpid.ppid(), os.getpid())
# ...and all other APIs should be able to deal with it
ns = process_namespace(zproc)
for fun, name in ns.iter(ns.all):
succeed_or_zombie_p_exc(fun)
assert psutil.pid_exists(zproc.pid)
if not TRAVIS and MACOS:
# For some reason this started failing all of the sudden.
# Maybe they upgraded MACOS version?
# https://travis-ci.org/giampaolo/psutil/jobs/310896404
self.assertIn(zproc.pid, psutil.pids())
self.assertIn(zproc.pid, [x.pid for x in psutil.process_iter()])
psutil._pmap = {}
self.assertIn(zproc.pid, [x.pid for x in psutil.process_iter()])
@unittest.skipIf(not POSIX, 'POSIX only')
def test_zombie_process_is_running_w_exc(self):
# Emulate a case where internally is_running() raises
# ZombieProcess.
p = psutil.Process()
with mock.patch("psutil.Process",
side_effect=psutil.ZombieProcess(0)) as m:
assert p.is_running()
assert m.called
@unittest.skipIf(not POSIX, 'POSIX only')
def test_zombie_process_status_w_exc(self):
# Emulate a case where internally status() raises
# ZombieProcess.
p = psutil.Process()
with mock.patch("psutil._psplatform.Process.status",
side_effect=psutil.ZombieProcess(0)) as m:
self.assertEqual(p.status(), psutil.STATUS_ZOMBIE)
assert m.called
def test_pid_0(self):
# Process(0) is supposed to work on all platforms except Linux
if 0 not in psutil.pids():
self.assertRaises(psutil.NoSuchProcess, psutil.Process, 0)
# These 2 are a contradiction, but "ps" says PID 1's parent
# is PID 0.
assert not psutil.pid_exists(0)
self.assertEqual(psutil.Process(1).ppid(), 0)
return
p = psutil.Process(0)
exc = psutil.AccessDenied if WINDOWS else ValueError
self.assertRaises(exc, p.wait)
self.assertRaises(exc, p.terminate)
self.assertRaises(exc, p.suspend)
self.assertRaises(exc, p.resume)
self.assertRaises(exc, p.kill)
self.assertRaises(exc, p.send_signal, signal.SIGTERM)
# test all methods
ns = process_namespace(p)
for fun, name in ns.iter(ns.getters + ns.setters):
try:
ret = fun()
except psutil.AccessDenied:
pass
else:
if name in ("uids", "gids"):
self.assertEqual(ret.real, 0)
elif name == "username":
user = 'NT AUTHORITY\\SYSTEM' if WINDOWS else 'root'
self.assertEqual(p.username(), user)
elif name == "name":
assert name, name
if not OPENBSD:
self.assertIn(0, psutil.pids())
assert psutil.pid_exists(0)
@unittest.skipIf(not HAS_ENVIRON, "not supported")
def test_environ(self):
def clean_dict(d):
# Most of these are problematic on Travis.
d.pop("PSUTIL_TESTING", None)
d.pop("PLAT", None)
d.pop("HOME", None)
if MACOS:
d.pop("__CF_USER_TEXT_ENCODING", None)
d.pop("VERSIONER_PYTHON_PREFER_32_BIT", None)
d.pop("VERSIONER_PYTHON_VERSION", None)
return dict(
[(k.replace("\r", "").replace("\n", ""),
v.replace("\r", "").replace("\n", ""))
for k, v in d.items()])
self.maxDiff = None
p = psutil.Process()
d1 = clean_dict(p.environ())
d2 = clean_dict(os.environ.copy())
if not OSX and GITHUB_WHEELS:
self.assertEqual(d1, d2)
@unittest.skipIf(not HAS_ENVIRON, "not supported")
@unittest.skipIf(not POSIX, "POSIX only")
def test_weird_environ(self):
# environment variables can contain values without an equals sign
code = textwrap.dedent("""
#include <unistd.h>
#include <fcntl.h>
char * const argv[] = {"cat", 0};
char * const envp[] = {"A=1", "X", "C=3", 0};
int main(void) {
/* Close stderr on exec so parent can wait for the execve to
* finish. */
if (fcntl(2, F_SETFD, FD_CLOEXEC) != 0)
return 0;
return execve("/bin/cat", argv, envp);
}
""")
path = self.get_testfn()
create_exe(path, c_code=code)
sproc = self.spawn_testproc(
[path], stdin=subprocess.PIPE, stderr=subprocess.PIPE)
p = psutil.Process(sproc.pid)
wait_for_pid(p.pid)
assert p.is_running()
# Wait for process to exec or exit.
self.assertEqual(sproc.stderr.read(), b"")
self.assertEqual(p.environ(), {"A": "1", "C": "3"})
sproc.communicate()
self.assertEqual(sproc.returncode, 0)
# ===================================================================
# --- Limited user tests
# ===================================================================
if POSIX and os.getuid() == 0:
class LimitedUserTestCase(TestProcess):
"""Repeat the previous tests by using a limited user.
Executed only on UNIX and only if the user who run the test script
is root.
"""
# the uid/gid the test suite runs under
if hasattr(os, 'getuid'):
PROCESS_UID = os.getuid()
PROCESS_GID = os.getgid()
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
# re-define all existent test methods in order to
# ignore AccessDenied exceptions
for attr in [x for x in dir(self) if x.startswith('test')]:
meth = getattr(self, attr)
def test_(self):
try:
meth()
except psutil.AccessDenied:
pass
setattr(self, attr, types.MethodType(test_, self))
def setUp(self):
super().setUp()
os.setegid(1000)
os.seteuid(1000)
def tearDown(self):
os.setegid(self.PROCESS_UID)
os.seteuid(self.PROCESS_GID)
super().tearDown()
def test_nice(self):
try:
psutil.Process().nice(-1)
except psutil.AccessDenied:
pass
else:
self.fail("exception not raised")
@unittest.skipIf(1, "causes problem as root")
def test_zombie_process(self):
pass
# ===================================================================
# --- psutil.Popen tests
# ===================================================================
class TestPopen(PsutilTestCase):
"""Tests for psutil.Popen class."""
@classmethod
def tearDownClass(cls):
reap_children()
def test_misc(self):
# XXX this test causes a ResourceWarning on Python 3 because
# psutil.__subproc instance doesn't get propertly freed.
# Not sure what to do though.
cmd = [PYTHON_EXE, "-c", "import time; time.sleep(60);"]
with psutil.Popen(cmd, stdout=subprocess.PIPE,
stderr=subprocess.PIPE) as proc:
proc.name()
proc.cpu_times()
proc.stdin
self.assertTrue(dir(proc))
self.assertRaises(AttributeError, getattr, proc, 'foo')
proc.terminate()
if POSIX:
self.assertEqual(proc.wait(), -signal.SIGTERM)
else:
self.assertEqual(proc.wait(), signal.SIGTERM)
def test_ctx_manager(self):
with psutil.Popen([PYTHON_EXE, "-V"],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
stdin=subprocess.PIPE) as proc:
proc.communicate()
assert proc.stdout.closed
assert proc.stderr.closed
assert proc.stdin.closed
self.assertEqual(proc.returncode, 0)
def test_kill_terminate(self):
# subprocess.Popen()'s terminate(), kill() and send_signal() do
# not raise exception after the process is gone. psutil.Popen
# diverges from that.
cmd = [PYTHON_EXE, "-c", "import time; time.sleep(60);"]
with psutil.Popen(cmd, stdout=subprocess.PIPE,
stderr=subprocess.PIPE) as proc:
proc.terminate()
proc.wait()
self.assertRaises(psutil.NoSuchProcess, proc.terminate)
self.assertRaises(psutil.NoSuchProcess, proc.kill)
self.assertRaises(psutil.NoSuchProcess, proc.send_signal,
signal.SIGTERM)
if WINDOWS and sys.version_info >= (2, 7):
self.assertRaises(psutil.NoSuchProcess, proc.send_signal,
signal.CTRL_C_EVENT)
self.assertRaises(psutil.NoSuchProcess, proc.send_signal,
signal.CTRL_BREAK_EVENT)
if __name__ == '__main__':
from psutil.tests.runner import run_from_name
run_from_name(__file__)
| 38.62 | 79 | 0.571741 |
4a21cbd881bc43b8d026847144377769705edb80 | 105,894 | py | Python | astropy/table/table.py | rirze/astropy | 29d5afb71b66eba45f147666443a847246176944 | [
"BSD-3-Clause"
] | null | null | null | astropy/table/table.py | rirze/astropy | 29d5afb71b66eba45f147666443a847246176944 | [
"BSD-3-Clause"
] | 1 | 2020-01-06T19:22:26.000Z | 2020-01-06T19:22:26.000Z | astropy/table/table.py | rirze/astropy | 29d5afb71b66eba45f147666443a847246176944 | [
"BSD-3-Clause"
] | null | null | null | # Licensed under a 3-clause BSD style license - see LICENSE.rst
from .index import TableIndices, TableLoc, TableILoc, TableLocIndices
import re
import sys
from collections import OrderedDict, Mapping
import warnings
from copy import deepcopy
import numpy as np
from numpy import ma
from .. import log
from ..io import registry as io_registry
from ..units import Quantity, QuantityInfo
from ..utils import isiterable, ShapedLikeNDArray
from ..utils.console import color_print
from ..utils.metadata import MetaData
from ..utils.data_info import BaseColumnInfo, MixinInfo, ParentDtypeInfo, DataInfo
from ..utils.exceptions import AstropyDeprecationWarning, NoValue
from . import groups
from .pprint import TableFormatter
from .column import (BaseColumn, Column, MaskedColumn, _auto_names, FalseArray,
col_copy)
from .row import Row
from .np_utils import fix_column_name, recarray_fromrecords
from .info import TableInfo, serialize_method_as
from .index import Index, _IndexModeContext, get_index
from . import conf
__doctest_skip__ = ['Table.read', 'Table.write',
'Table.convert_bytestring_to_unicode',
'Table.convert_unicode_to_bytestring',
]
class TableReplaceWarning(UserWarning):
"""
Warning class for cases when a table column is replaced via the
Table.__setitem__ syntax e.g. t['a'] = val.
This does not inherit from AstropyWarning because we want to use
stacklevel=3 to show the user where the issue occurred in their code.
"""
pass
def descr(col):
"""Array-interface compliant full description of a column.
This returns a 3-tuple (name, type, shape) that can always be
used in a structured array dtype definition.
"""
col_dtype = 'O' if (col.info.dtype is None) else col.info.dtype
col_shape = col.shape[1:] if hasattr(col, 'shape') else ()
return (col.info.name, col_dtype, col_shape)
def has_info_class(obj, cls):
return hasattr(obj, 'info') and isinstance(obj.info, cls)
class TableColumns(OrderedDict):
"""OrderedDict subclass for a set of columns.
This class enhances item access to provide convenient access to columns
by name or index, including slice access. It also handles renaming
of columns.
The initialization argument ``cols`` can be a list of ``Column`` objects
or any structure that is valid for initializing a Python dict. This
includes a dict, list of (key, val) tuples or [key, val] lists, etc.
Parameters
----------
cols : dict, list, tuple; optional
Column objects as data structure that can init dict (see above)
"""
def __init__(self, cols={}):
if isinstance(cols, (list, tuple)):
# `cols` should be a list of two-tuples, but it is allowed to have
# columns (BaseColumn or mixins) in the list.
newcols = []
for col in cols:
if has_info_class(col, BaseColumnInfo):
newcols.append((col.info.name, col))
else:
newcols.append(col)
cols = newcols
super().__init__(cols)
def __getitem__(self, item):
"""Get items from a TableColumns object.
::
tc = TableColumns(cols=[Column(name='a'), Column(name='b'), Column(name='c')])
tc['a'] # Column('a')
tc[1] # Column('b')
tc['a', 'b'] # <TableColumns names=('a', 'b')>
tc[1:3] # <TableColumns names=('b', 'c')>
"""
if isinstance(item, str):
return OrderedDict.__getitem__(self, item)
elif isinstance(item, (int, np.integer)):
return self.values()[item]
elif (isinstance(item, np.ndarray) and item.shape == () and item.dtype.kind == 'i'):
return self.values()[item.item()]
elif isinstance(item, tuple):
return self.__class__([self[x] for x in item])
elif isinstance(item, slice):
return self.__class__([self[x] for x in list(self)[item]])
else:
raise IndexError('Illegal key or index value for {} object'
.format(self.__class__.__name__))
def __setitem__(self, item, value):
if item in self:
raise ValueError("Cannot replace column '{0}'. Use Table.replace_column() instead."
.format(item))
super().__setitem__(item, value)
def __repr__(self):
names = ("'{0}'".format(x) for x in self.keys())
return "<{1} names=({0})>".format(",".join(names), self.__class__.__name__)
def _rename_column(self, name, new_name):
if name == new_name:
return
if new_name in self:
raise KeyError("Column {0} already exists".format(new_name))
mapper = {name: new_name}
new_names = [mapper.get(name, name) for name in self]
cols = list(self.values())
self.clear()
self.update(list(zip(new_names, cols)))
# Define keys and values for Python 2 and 3 source compatibility
def keys(self):
return list(OrderedDict.keys(self))
def values(self):
return list(OrderedDict.values(self))
def isinstance(self, cls):
"""
Return a list of columns which are instances of the specified classes.
Parameters
----------
cls : class or tuple of classes
Column class (including mixin) or tuple of Column classes.
Returns
-------
col_list : list of Columns
List of Column objects which are instances of given classes.
"""
cols = [col for col in self.values() if isinstance(col, cls)]
return cols
def not_isinstance(self, cls):
"""
Return a list of columns which are not instances of the specified classes.
Parameters
----------
cls : class or tuple of classes
Column class (including mixin) or tuple of Column classes.
Returns
-------
col_list : list of Columns
List of Column objects which are not instances of given classes.
"""
cols = [col for col in self.values() if not isinstance(col, cls)]
return cols
class Table:
"""A class to represent tables of heterogeneous data.
`Table` provides a class for heterogeneous tabular data, making use of a
`numpy` structured array internally to store the data values. A key
enhancement provided by the `Table` class is the ability to easily modify
the structure of the table by adding or removing columns, or adding new
rows of data. In addition table and column metadata are fully supported.
`Table` differs from `~astropy.nddata.NDData` by the assumption that the
input data consists of columns of homogeneous data, where each column
has a unique identifier and may contain additional metadata such as the
data unit, format, and description.
Parameters
----------
data : numpy ndarray, dict, list, Table, or table-like object, optional
Data to initialize table.
masked : bool, optional
Specify whether the table is masked.
names : list, optional
Specify column names.
dtype : list, optional
Specify column data types.
meta : dict, optional
Metadata associated with the table.
copy : bool, optional
Copy the input data. If the input is a Table the ``meta`` is always
copied regardless of the ``copy`` parameter.
Default is True.
rows : numpy ndarray, list of lists, optional
Row-oriented data for table instead of ``data`` argument.
copy_indices : bool, optional
Copy any indices in the input data. Default is True.
**kwargs : dict, optional
Additional keyword args when converting table-like object.
"""
meta = MetaData()
# Define class attributes for core container objects to allow for subclass
# customization.
Row = Row
Column = Column
MaskedColumn = MaskedColumn
TableColumns = TableColumns
TableFormatter = TableFormatter
def as_array(self, keep_byteorder=False):
"""
Return a new copy of the table in the form of a structured np.ndarray or
np.ma.MaskedArray object (as appropriate).
Parameters
----------
keep_byteorder : bool, optional
By default the returned array has all columns in native byte
order. However, if this option is `True` this preserves the
byte order of all columns (if any are non-native).
Returns
-------
table_array : np.ndarray (unmasked) or np.ma.MaskedArray (masked)
Copy of table as a numpy structured array
"""
if len(self.columns) == 0:
return None
sys_byteorder = ('>', '<')[sys.byteorder == 'little']
native_order = ('=', sys_byteorder)
dtype = []
cols = self.columns.values()
for col in cols:
col_descr = descr(col)
byteorder = col.info.dtype.byteorder
if not keep_byteorder and byteorder not in native_order:
new_dt = np.dtype(col_descr[1]).newbyteorder('=')
col_descr = (col_descr[0], new_dt, col_descr[2])
dtype.append(col_descr)
empty_init = ma.empty if self.masked else np.empty
data = empty_init(len(self), dtype=dtype)
for col in cols:
# When assigning from one array into a field of a structured array,
# Numpy will automatically swap those columns to their destination
# byte order where applicable
data[col.info.name] = col
return data
def __init__(self, data=None, masked=None, names=None, dtype=None,
meta=None, copy=True, rows=None, copy_indices=True,
**kwargs):
# Set up a placeholder empty table
self._set_masked(masked)
self.columns = self.TableColumns()
self.meta = meta
self.formatter = self.TableFormatter()
self._copy_indices = True # copy indices from this Table by default
self._init_indices = copy_indices # whether to copy indices in init
self.primary_key = None
# Must copy if dtype are changing
if not copy and dtype is not None:
raise ValueError('Cannot specify dtype when copy=False')
# Row-oriented input, e.g. list of lists or list of tuples, list of
# dict, Row instance. Set data to something that the subsequent code
# will parse correctly.
is_list_of_dict = False
if rows is not None:
if data is not None:
raise ValueError('Cannot supply both `data` and `rows` values')
if all(isinstance(row, dict) for row in rows):
is_list_of_dict = True # Avoid doing the all(...) test twice.
data = rows
elif isinstance(rows, self.Row):
data = rows
else:
rec_data = recarray_fromrecords(rows)
data = [rec_data[name] for name in rec_data.dtype.names]
# Infer the type of the input data and set up the initialization
# function, number of columns, and potentially the default col names
default_names = None
if hasattr(data, '__astropy_table__'):
# Data object implements the __astropy_table__ interface method.
# Calling that method returns an appropriate instance of
# self.__class__ and respects the `copy` arg. The returned
# Table object should NOT then be copied (though the meta
# will be deep-copied anyway).
data = data.__astropy_table__(self.__class__, copy, **kwargs)
copy = False
elif kwargs:
raise TypeError('__init__() got unexpected keyword argument {!r}'
.format(list(kwargs.keys())[0]))
if (isinstance(data, np.ndarray) and
data.shape == (0,) and
not data.dtype.names):
data = None
if isinstance(data, self.Row):
data = data._table[data._index:data._index + 1]
if isinstance(data, (list, tuple)):
init_func = self._init_from_list
if data and (is_list_of_dict or all(isinstance(row, dict) for row in data)):
n_cols = len(data[0])
else:
n_cols = len(data)
elif isinstance(data, np.ndarray):
if data.dtype.names:
init_func = self._init_from_ndarray # _struct
n_cols = len(data.dtype.names)
default_names = data.dtype.names
else:
init_func = self._init_from_ndarray # _homog
if data.shape == ():
raise ValueError('Can not initialize a Table with a scalar')
elif len(data.shape) == 1:
data = data[np.newaxis, :]
n_cols = data.shape[1]
elif isinstance(data, Mapping):
init_func = self._init_from_dict
default_names = list(data)
n_cols = len(default_names)
elif isinstance(data, Table):
init_func = self._init_from_table
n_cols = len(data.colnames)
default_names = data.colnames
# don't copy indices if the input Table is in non-copy mode
self._init_indices = self._init_indices and data._copy_indices
elif data is None:
if names is None:
if dtype is None:
return # Empty table
try:
# No data nor names but dtype is available. This must be
# valid to initialize a structured array.
dtype = np.dtype(dtype)
names = dtype.names
dtype = [dtype[name] for name in names]
except Exception:
raise ValueError('dtype was specified but could not be '
'parsed for column names')
# names is guaranteed to be set at this point
init_func = self._init_from_list
n_cols = len(names)
data = [[]] * n_cols
else:
raise ValueError('Data type {0} not allowed to init Table'
.format(type(data)))
# Set up defaults if names and/or dtype are not specified.
# A value of None means the actual value will be inferred
# within the appropriate initialization routine, either from
# existing specification or auto-generated.
if names is None:
names = default_names or [None] * n_cols
if dtype is None:
dtype = [None] * n_cols
# Numpy does not support bytes column names on Python 3, so fix them
# up now.
names = [fix_column_name(name) for name in names]
self._check_names_dtype(names, dtype, n_cols)
# Finally do the real initialization
init_func(data, names, dtype, n_cols, copy)
# Whatever happens above, the masked property should be set to a boolean
if type(self.masked) is not bool:
raise TypeError("masked property has not been set to True or False")
def __getstate__(self):
columns = OrderedDict((key, col if isinstance(col, BaseColumn) else col_copy(col))
for key, col in self.columns.items())
return (columns, self.meta)
def __setstate__(self, state):
columns, meta = state
self.__init__(columns, meta=meta)
@property
def mask(self):
# Dynamic view of available masks
if self.masked:
mask_table = Table([col.mask for col in self.columns.values()],
names=self.colnames, copy=False)
# Set hidden attribute to force inplace setitem so that code like
# t.mask['a'] = [1, 0, 1] will correctly set the underlying mask.
# See #5556 for discussion.
mask_table._setitem_inplace = True
else:
mask_table = None
return mask_table
@mask.setter
def mask(self, val):
self.mask[:] = val
@property
def _mask(self):
"""This is needed so that comparison of a masked Table and a
MaskedArray works. The requirement comes from numpy.ma.core
so don't remove this property."""
return self.as_array().mask
def filled(self, fill_value=None):
"""Return a copy of self, with masked values filled.
If input ``fill_value`` supplied then that value is used for all
masked entries in the table. Otherwise the individual
``fill_value`` defined for each table column is used.
Parameters
----------
fill_value : str
If supplied, this ``fill_value`` is used for all masked entries
in the entire table.
Returns
-------
filled_table : Table
New table with masked values filled
"""
if self.masked:
data = [col.filled(fill_value) for col in self.columns.values()]
else:
data = self
return self.__class__(data, meta=deepcopy(self.meta))
@property
def indices(self):
'''
Return the indices associated with columns of the table
as a TableIndices object.
'''
lst = []
for column in self.columns.values():
for index in column.info.indices:
if sum([index is x for x in lst]) == 0: # ensure uniqueness
lst.append(index)
return TableIndices(lst)
@property
def loc(self):
'''
Return a TableLoc object that can be used for retrieving
rows by index in a given data range. Note that both loc
and iloc work only with single-column indices.
'''
return TableLoc(self)
@property
def loc_indices(self):
"""
Return a TableLocIndices object that can be used for retrieving
the row indices corresponding to given table index key value or values.
"""
return TableLocIndices(self)
@property
def iloc(self):
'''
Return a TableILoc object that can be used for retrieving
indexed rows in the order they appear in the index.
'''
return TableILoc(self)
def add_index(self, colnames, engine=None, unique=False):
'''
Insert a new index among one or more columns.
If there are no indices, make this index the
primary table index.
Parameters
----------
colnames : str or list
List of column names (or a single column name) to index
engine : type or None
Indexing engine class to use, from among SortedArray, BST,
FastBST, FastRBT, and SCEngine. If the supplied argument is None
(by default), use SortedArray.
unique : bool
Whether the values of the index must be unique. Default is False.
'''
if isinstance(colnames, str):
colnames = (colnames,)
columns = self.columns[tuple(colnames)].values()
# make sure all columns support indexing
for col in columns:
if not getattr(col.info, '_supports_indexing', False):
raise ValueError('Cannot create an index on column "{0}", of '
'type "{1}"'.format(col.info.name, type(col)))
index = Index(columns, engine=engine, unique=unique)
if not self.indices:
self.primary_key = colnames
for col in columns:
col.info.indices.append(index)
def remove_indices(self, colname):
'''
Remove all indices involving the given column.
If the primary index is removed, the new primary
index will be the most recently added remaining
index.
Parameters
----------
colname : str
Name of column
'''
col = self.columns[colname]
for index in self.indices:
try:
index.col_position(col.info.name)
except ValueError:
pass
else:
for c in index.columns:
c.info.indices.remove(index)
def index_mode(self, mode):
'''
Return a context manager for an indexing mode.
Parameters
----------
mode : str
Either 'freeze', 'copy_on_getitem', or 'discard_on_copy'.
In 'discard_on_copy' mode,
indices are not copied whenever columns or tables are copied.
In 'freeze' mode, indices are not modified whenever columns are
modified; at the exit of the context, indices refresh themselves
based on column values. This mode is intended for scenarios in
which one intends to make many additions or modifications in an
indexed column.
In 'copy_on_getitem' mode, indices are copied when taking column
slices as well as table slices, so col[i0:i1] will preserve
indices.
'''
return _IndexModeContext(self, mode)
def __array__(self, dtype=None):
"""Support converting Table to np.array via np.array(table).
Coercion to a different dtype via np.array(table, dtype) is not
supported and will raise a ValueError.
"""
if dtype is not None:
raise ValueError('Datatype coercion is not allowed')
# This limitation is because of the following unexpected result that
# should have made a table copy while changing the column names.
#
# >>> d = astropy.table.Table([[1,2],[3,4]])
# >>> np.array(d, dtype=[('a', 'i8'), ('b', 'i8')])
# array([(0, 0), (0, 0)],
# dtype=[('a', '<i8'), ('b', '<i8')])
return self.as_array().data if self.masked else self.as_array()
def _check_names_dtype(self, names, dtype, n_cols):
"""Make sure that names and dtype are both iterable and have
the same length as data.
"""
for inp_list, inp_str in ((dtype, 'dtype'), (names, 'names')):
if not isiterable(inp_list):
raise ValueError('{0} must be a list or None'.format(inp_str))
if len(names) != n_cols or len(dtype) != n_cols:
raise ValueError(
'Arguments "names" and "dtype" must match number of columns'
.format(inp_str))
def _set_masked_from_cols(self, cols):
if self.masked is None:
if any(isinstance(col, (MaskedColumn, ma.MaskedArray)) for col in cols):
self._set_masked(True)
else:
self._set_masked(False)
elif not self.masked:
if any(np.any(col.mask) for col in cols if isinstance(col, (MaskedColumn, ma.MaskedArray))):
self._set_masked(True)
def _init_from_list_of_dicts(self, data, names, dtype, n_cols, copy):
names_from_data = set()
for row in data:
names_from_data.update(row)
cols = {}
for name in names_from_data:
cols[name] = []
for i, row in enumerate(data):
try:
cols[name].append(row[name])
except KeyError:
raise ValueError('Row {0} has no value for column {1}'.format(i, name))
if all(name is None for name in names):
names = sorted(names_from_data)
self._init_from_dict(cols, names, dtype, n_cols, copy)
return
def _init_from_list(self, data, names, dtype, n_cols, copy):
"""Initialize table from a list of columns. A column can be a
Column object, np.ndarray, mixin, or any other iterable object.
"""
if data and all(isinstance(row, dict) for row in data):
self._init_from_list_of_dicts(data, names, dtype, n_cols, copy)
return
# Set self.masked appropriately, then get class to create column instances.
self._set_masked_from_cols(data)
cols = []
def_names = _auto_names(n_cols)
for col, name, def_name, dtype in zip(data, names, def_names, dtype):
# Structured ndarray gets viewed as a mixin unless already a valid
# mixin class
if (isinstance(col, np.ndarray) and len(col.dtype) > 1 and
not self._add_as_mixin_column(col)):
col = col.view(NdarrayMixin)
if isinstance(col, (Column, MaskedColumn)):
col = self.ColumnClass(name=(name or col.info.name or def_name),
data=col, dtype=dtype,
copy=copy, copy_indices=self._init_indices)
elif self._add_as_mixin_column(col):
# Copy the mixin column attributes if they exist since the copy below
# may not get this attribute.
if copy:
col = col_copy(col, copy_indices=self._init_indices)
col.info.name = name or col.info.name or def_name
elif isinstance(col, np.ndarray) or isiterable(col):
col = self.ColumnClass(name=(name or def_name), data=col, dtype=dtype,
copy=copy, copy_indices=self._init_indices)
else:
raise ValueError('Elements in list initialization must be '
'either Column or list-like')
cols.append(col)
self._init_from_cols(cols)
def _init_from_ndarray(self, data, names, dtype, n_cols, copy):
"""Initialize table from an ndarray structured array"""
data_names = data.dtype.names or _auto_names(n_cols)
struct = data.dtype.names is not None
names = [name or data_names[i] for i, name in enumerate(names)]
cols = ([data[name] for name in data_names] if struct else
[data[:, i] for i in range(n_cols)])
# Set self.masked appropriately, then get class to create column instances.
self._set_masked_from_cols(cols)
if copy:
self._init_from_list(cols, names, dtype, n_cols, copy)
else:
dtype = [(name, col.dtype, col.shape[1:]) for name, col in zip(names, cols)]
newdata = data.view(dtype).ravel()
columns = self.TableColumns()
for name in names:
columns[name] = self.ColumnClass(name=name, data=newdata[name])
columns[name].info.parent_table = self
self.columns = columns
def _init_from_dict(self, data, names, dtype, n_cols, copy):
"""Initialize table from a dictionary of columns"""
# TODO: is this restriction still needed with no ndarray?
if not copy:
raise ValueError('Cannot use copy=False with a dict data input')
data_list = [data[name] for name in names]
self._init_from_list(data_list, names, dtype, n_cols, copy)
def _init_from_table(self, data, names, dtype, n_cols, copy):
"""Initialize table from an existing Table object """
table = data # data is really a Table, rename for clarity
self.meta.clear()
self.meta.update(deepcopy(table.meta))
self.primary_key = table.primary_key
cols = list(table.columns.values())
self._init_from_list(cols, names, dtype, n_cols, copy)
def _convert_col_for_table(self, col):
"""
Make sure that all Column objects have correct class for this type of
Table. For a base Table this most commonly means setting to
MaskedColumn if the table is masked. Table subclasses like QTable
override this method.
"""
if col.__class__ is not self.ColumnClass and isinstance(col, Column):
col = self.ColumnClass(col) # copy attributes and reference data
return col
def _init_from_cols(self, cols):
"""Initialize table from a list of Column or mixin objects"""
lengths = set(len(col) for col in cols)
if len(lengths) != 1:
raise ValueError('Inconsistent data column lengths: {0}'
.format(lengths))
# Set the table masking
self._set_masked_from_cols(cols)
# Make sure that all Column-based objects have correct class. For
# plain Table this is self.ColumnClass, but for instance QTable will
# convert columns with units to a Quantity mixin.
newcols = [self._convert_col_for_table(col) for col in cols]
self._make_table_from_cols(self, newcols)
# Deduplicate indices. It may happen that after pickling or when
# initing from an existing table that column indices which had been
# references to a single index object got *copied* into an independent
# object. This results in duplicates which will cause downstream problems.
index_dict = {}
for col in self.itercols():
for i, index in enumerate(col.info.indices or []):
names = tuple(ind_col.info.name for ind_col in index.columns)
if names in index_dict:
col.info.indices[i] = index_dict[names]
else:
index_dict[names] = index
def _new_from_slice(self, slice_):
"""Create a new table as a referenced slice from self."""
table = self.__class__(masked=self.masked)
table.meta.clear()
table.meta.update(deepcopy(self.meta))
table.primary_key = self.primary_key
cols = self.columns.values()
newcols = []
for col in cols:
col.info._copy_indices = self._copy_indices
newcol = col[slice_]
if col.info.indices:
newcol = col.info.slice_indices(newcol, slice_, len(col))
newcols.append(newcol)
col.info._copy_indices = True
self._make_table_from_cols(table, newcols)
return table
@staticmethod
def _make_table_from_cols(table, cols):
"""
Make ``table`` in-place so that it represents the given list of ``cols``.
"""
colnames = set(col.info.name for col in cols)
if None in colnames:
raise TypeError('Cannot have None for column name')
if len(colnames) != len(cols):
raise ValueError('Duplicate column names')
columns = table.TableColumns((col.info.name, col) for col in cols)
for col in cols:
col.info.parent_table = table
if table.masked and not hasattr(col, 'mask'):
col.mask = FalseArray(col.shape)
table.columns = columns
def itercols(self):
"""
Iterate over the columns of this table.
Examples
--------
To iterate over the columns of a table::
>>> t = Table([[1], [2]])
>>> for col in t.itercols():
... print(col)
col0
----
1
col1
----
2
Using ``itercols()`` is similar to ``for col in t.columns.values()``
but is syntactically preferred.
"""
for colname in self.columns:
yield self[colname]
def _base_repr_(self, html=False, descr_vals=None, max_width=None,
tableid=None, show_dtype=True, max_lines=None,
tableclass=None):
if descr_vals is None:
descr_vals = [self.__class__.__name__]
if self.masked:
descr_vals.append('masked=True')
descr_vals.append('length={0}'.format(len(self)))
descr = ' '.join(descr_vals)
if html:
from ..utils.xml.writer import xml_escape
descr = '<i>{0}</i>\n'.format(xml_escape(descr))
else:
descr = '<{0}>\n'.format(descr)
if tableid is None:
tableid = 'table{id}'.format(id=id(self))
data_lines, outs = self.formatter._pformat_table(
self, tableid=tableid, html=html, max_width=max_width,
show_name=True, show_unit=None, show_dtype=show_dtype,
max_lines=max_lines, tableclass=tableclass)
out = descr + '\n'.join(data_lines)
return out
def _repr_html_(self):
return self._base_repr_(html=True, max_width=-1,
tableclass=conf.default_notebook_table_class)
def __repr__(self):
return self._base_repr_(html=False, max_width=None)
def __str__(self):
return '\n'.join(self.pformat())
def __bytes__(self):
return str(self).encode('utf-8')
@property
def has_mixin_columns(self):
"""
True if table has any mixin columns (defined as columns that are not Column
subclasses).
"""
return any(has_info_class(col, MixinInfo) for col in self.columns.values())
def _add_as_mixin_column(self, col):
"""
Determine if ``col`` should be added to the table directly as
a mixin column.
"""
if isinstance(col, BaseColumn):
return False
# Is it a mixin but not not Quantity (which gets converted to Column with
# unit set).
return has_info_class(col, MixinInfo) and not has_info_class(col, QuantityInfo)
def pprint(self, max_lines=None, max_width=None, show_name=True,
show_unit=None, show_dtype=False, align=None):
"""Print a formatted string representation of the table.
If no value of ``max_lines`` is supplied then the height of the
screen terminal is used to set ``max_lines``. If the terminal
height cannot be determined then the default is taken from the
configuration item ``astropy.conf.max_lines``. If a negative
value of ``max_lines`` is supplied then there is no line limit
applied.
The same applies for max_width except the configuration item is
``astropy.conf.max_width``.
Parameters
----------
max_lines : int
Maximum number of lines in table output.
max_width : int or `None`
Maximum character width of output.
show_name : bool
Include a header row for column names. Default is True.
show_unit : bool
Include a header row for unit. Default is to show a row
for units only if one or more columns has a defined value
for the unit.
show_dtype : bool
Include a header row for column dtypes. Default is True.
align : str or list or tuple or `None`
Left/right alignment of columns. Default is right (None) for all
columns. Other allowed values are '>', '<', '^', and '0=' for
right, left, centered, and 0-padded, respectively. A list of
strings can be provided for alignment of tables with multiple
columns.
"""
lines, outs = self.formatter._pformat_table(self, max_lines, max_width,
show_name=show_name, show_unit=show_unit,
show_dtype=show_dtype, align=align)
if outs['show_length']:
lines.append('Length = {0} rows'.format(len(self)))
n_header = outs['n_header']
for i, line in enumerate(lines):
if i < n_header:
color_print(line, 'red')
else:
print(line)
def _make_index_row_display_table(self, index_row_name):
if index_row_name not in self.columns:
idx_col = self.ColumnClass(name=index_row_name, data=np.arange(len(self)))
return self.__class__([idx_col] + self.columns.values(),
copy=False)
else:
return self
def show_in_notebook(self, tableid=None, css=None, display_length=50,
table_class='astropy-default', show_row_index='idx'):
"""Render the table in HTML and show it in the IPython notebook.
Parameters
----------
tableid : str or `None`
An html ID tag for the table. Default is ``table{id}-XXX``, where
id is the unique integer id of the table object, id(self), and XXX
is a random number to avoid conflicts when printing the same table
multiple times.
table_class : str or `None`
A string with a list of HTML classes used to style the table.
The special default string ('astropy-default') means that the string
will be retrieved from the configuration item
``astropy.table.default_notebook_table_class``. Note that these
table classes may make use of bootstrap, as this is loaded with the
notebook. See `this page <http://getbootstrap.com/css/#tables>`_
for the list of classes.
css : string
A valid CSS string declaring the formatting for the table. Defaults
to ``astropy.table.jsviewer.DEFAULT_CSS_NB``.
display_length : int, optional
Number or rows to show. Defaults to 50.
show_row_index : str or False
If this does not evaluate to False, a column with the given name
will be added to the version of the table that gets displayed.
This new column shows the index of the row in the table itself,
even when the displayed table is re-sorted by another column. Note
that if a column with this name already exists, this option will be
ignored. Defaults to "idx".
Notes
-----
Currently, unlike `show_in_browser` (with ``jsviewer=True``), this
method needs to access online javascript code repositories. This is due
to modern browsers' limitations on accessing local files. Hence, if you
call this method while offline (and don't have a cached version of
jquery and jquery.dataTables), you will not get the jsviewer features.
"""
from .jsviewer import JSViewer
from IPython.display import HTML
if tableid is None:
tableid = 'table{0}-{1}'.format(id(self),
np.random.randint(1, 1e6))
jsv = JSViewer(display_length=display_length)
if show_row_index:
display_table = self._make_index_row_display_table(show_row_index)
else:
display_table = self
if table_class == 'astropy-default':
table_class = conf.default_notebook_table_class
html = display_table._base_repr_(html=True, max_width=-1, tableid=tableid,
max_lines=-1, show_dtype=False,
tableclass=table_class)
columns = display_table.columns.values()
sortable_columns = [i for i, col in enumerate(columns)
if col.dtype.kind in 'iufc']
html += jsv.ipynb(tableid, css=css, sort_columns=sortable_columns)
return HTML(html)
def show_in_browser(self, max_lines=5000, jsviewer=False,
browser='default', jskwargs={'use_local_files': True},
tableid=None, table_class="display compact",
css=None, show_row_index='idx'):
"""Render the table in HTML and show it in a web browser.
Parameters
----------
max_lines : int
Maximum number of rows to export to the table (set low by default
to avoid memory issues, since the browser view requires duplicating
the table in memory). A negative value of ``max_lines`` indicates
no row limit.
jsviewer : bool
If `True`, prepends some javascript headers so that the table is
rendered as a `DataTables <https://datatables.net>`_ data table.
This allows in-browser searching & sorting.
browser : str
Any legal browser name, e.g. ``'firefox'``, ``'chrome'``,
``'safari'`` (for mac, you may need to use ``'open -a
"/Applications/Google Chrome.app" {}'`` for Chrome). If
``'default'``, will use the system default browser.
jskwargs : dict
Passed to the `astropy.table.JSViewer` init. Defaults to
``{'use_local_files': True}`` which means that the JavaScript
libraries will be served from local copies.
tableid : str or `None`
An html ID tag for the table. Default is ``table{id}``, where id
is the unique integer id of the table object, id(self).
table_class : str or `None`
A string with a list of HTML classes used to style the table.
Default is "display compact", and other possible values can be
found in https://www.datatables.net/manual/styling/classes
css : string
A valid CSS string declaring the formatting for the table. Defaults
to ``astropy.table.jsviewer.DEFAULT_CSS``.
show_row_index : str or False
If this does not evaluate to False, a column with the given name
will be added to the version of the table that gets displayed.
This new column shows the index of the row in the table itself,
even when the displayed table is re-sorted by another column. Note
that if a column with this name already exists, this option will be
ignored. Defaults to "idx".
"""
import os
import webbrowser
import tempfile
from .jsviewer import DEFAULT_CSS
from urllib.parse import urljoin
from urllib.request import pathname2url
if css is None:
css = DEFAULT_CSS
# We can't use NamedTemporaryFile here because it gets deleted as
# soon as it gets garbage collected.
tmpdir = tempfile.mkdtemp()
path = os.path.join(tmpdir, 'table.html')
with open(path, 'w') as tmp:
if jsviewer:
if show_row_index:
display_table = self._make_index_row_display_table(show_row_index)
else:
display_table = self
display_table.write(tmp, format='jsviewer', css=css,
max_lines=max_lines, jskwargs=jskwargs,
table_id=tableid, table_class=table_class)
else:
self.write(tmp, format='html')
try:
br = webbrowser.get(None if browser == 'default' else browser)
except webbrowser.Error:
log.error("Browser '{}' not found.".format(browser))
else:
br.open(urljoin('file:', pathname2url(path)))
def pformat(self, max_lines=None, max_width=None, show_name=True,
show_unit=None, show_dtype=False, html=False, tableid=None,
align=None, tableclass=None):
"""Return a list of lines for the formatted string representation of
the table.
If no value of ``max_lines`` is supplied then the height of the
screen terminal is used to set ``max_lines``. If the terminal
height cannot be determined then the default is taken from the
configuration item ``astropy.conf.max_lines``. If a negative
value of ``max_lines`` is supplied then there is no line limit
applied.
The same applies for ``max_width`` except the configuration item is
``astropy.conf.max_width``.
Parameters
----------
max_lines : int or `None`
Maximum number of rows to output
max_width : int or `None`
Maximum character width of output
show_name : bool
Include a header row for column names. Default is True.
show_unit : bool
Include a header row for unit. Default is to show a row
for units only if one or more columns has a defined value
for the unit.
show_dtype : bool
Include a header row for column dtypes. Default is True.
html : bool
Format the output as an HTML table. Default is False.
tableid : str or `None`
An ID tag for the table; only used if html is set. Default is
"table{id}", where id is the unique integer id of the table object,
id(self)
align : str or list or tuple or `None`
Left/right alignment of columns. Default is right (None) for all
columns. Other allowed values are '>', '<', '^', and '0=' for
right, left, centered, and 0-padded, respectively. A list of
strings can be provided for alignment of tables with multiple
columns.
tableclass : str or list of str or `None`
CSS classes for the table; only used if html is set. Default is
None.
Returns
-------
lines : list
Formatted table as a list of strings.
"""
lines, outs = self.formatter._pformat_table(
self, max_lines, max_width, show_name=show_name,
show_unit=show_unit, show_dtype=show_dtype, html=html,
tableid=tableid, tableclass=tableclass, align=align)
if outs['show_length']:
lines.append('Length = {0} rows'.format(len(self)))
return lines
def more(self, max_lines=None, max_width=None, show_name=True,
show_unit=None, show_dtype=False):
"""Interactively browse table with a paging interface.
Supported keys::
f, <space> : forward one page
b : back one page
r : refresh same page
n : next row
p : previous row
< : go to beginning
> : go to end
q : quit browsing
h : print this help
Parameters
----------
max_lines : int
Maximum number of lines in table output
max_width : int or `None`
Maximum character width of output
show_name : bool
Include a header row for column names. Default is True.
show_unit : bool
Include a header row for unit. Default is to show a row
for units only if one or more columns has a defined value
for the unit.
show_dtype : bool
Include a header row for column dtypes. Default is True.
"""
self.formatter._more_tabcol(self, max_lines, max_width, show_name=show_name,
show_unit=show_unit, show_dtype=show_dtype)
def __getitem__(self, item):
if isinstance(item, str):
return self.columns[item]
elif isinstance(item, (int, np.integer)):
return self.Row(self, item)
elif (isinstance(item, np.ndarray) and item.shape == () and item.dtype.kind == 'i'):
return self.Row(self, item.item())
elif self._is_list_or_tuple_of_str(item):
out = self.__class__([self[x] for x in item],
meta=deepcopy(self.meta),
copy_indices=self._copy_indices)
out._groups = groups.TableGroups(out, indices=self.groups._indices,
keys=self.groups._keys)
return out
elif ((isinstance(item, np.ndarray) and item.size == 0) or
(isinstance(item, (tuple, list)) and not item)):
# If item is an empty array/list/tuple then return the table with no rows
return self._new_from_slice([])
elif (isinstance(item, slice) or
isinstance(item, np.ndarray) or
isinstance(item, list) or
isinstance(item, tuple) and all(isinstance(x, np.ndarray)
for x in item)):
# here for the many ways to give a slice; a tuple of ndarray
# is produced by np.where, as in t[np.where(t['a'] > 2)]
# For all, a new table is constructed with slice of all columns
return self._new_from_slice(item)
else:
raise ValueError('Illegal type {0} for table item access'
.format(type(item)))
def __setitem__(self, item, value):
# If the item is a string then it must be the name of a column.
# If that column doesn't already exist then create it now.
if isinstance(item, str) and item not in self.colnames:
NewColumn = self.MaskedColumn if self.masked else self.Column
# If value doesn't have a dtype and won't be added as a mixin then
# convert to a numpy array.
if not hasattr(value, 'dtype') and not self._add_as_mixin_column(value):
value = np.asarray(value)
# Structured ndarray gets viewed as a mixin (unless already a valid
# mixin class).
if (isinstance(value, np.ndarray) and len(value.dtype) > 1 and
not self._add_as_mixin_column(value)):
value = value.view(NdarrayMixin)
# Make new column and assign the value. If the table currently
# has no rows (len=0) of the value is already a Column then
# define new column directly from value. In the latter case
# this allows for propagation of Column metadata. Otherwise
# define a new column with the right length and shape and then
# set it from value. This allows for broadcasting, e.g. t['a']
# = 1.
name = item
# If this is a column-like object that could be added directly to table
if isinstance(value, BaseColumn) or self._add_as_mixin_column(value):
# If we're setting a new column to a scalar, broadcast it.
# (things will fail in _init_from_cols if this doesn't work)
if (len(self) > 0 and (getattr(value, 'isscalar', False) or
getattr(value, 'shape', None) == () or
len(value) == 1)):
new_shape = (len(self),) + getattr(value, 'shape', ())[1:]
if isinstance(value, np.ndarray):
value = np.broadcast_to(value, shape=new_shape,
subok=True)
elif isinstance(value, ShapedLikeNDArray):
value = value._apply(np.broadcast_to, shape=new_shape,
subok=True)
new_column = col_copy(value)
new_column.info.name = name
elif len(self) == 0:
new_column = NewColumn(value, name=name)
else:
new_column = NewColumn(name=name, length=len(self), dtype=value.dtype,
shape=value.shape[1:],
unit=getattr(value, 'unit', None))
new_column[:] = value
# Now add new column to the table
self.add_columns([new_column], copy=False)
else:
n_cols = len(self.columns)
if isinstance(item, str):
# Set an existing column by first trying to replace, and if
# this fails do an in-place update. See definition of mask
# property for discussion of the _setitem_inplace attribute.
if (not getattr(self, '_setitem_inplace', False)
and not conf.replace_inplace):
try:
self._replace_column_warnings(item, value)
return
except Exception:
pass
self.columns[item][:] = value
elif isinstance(item, (int, np.integer)):
self._set_row(idx=item, colnames=self.colnames, vals=value)
elif (isinstance(item, slice) or
isinstance(item, np.ndarray) or
isinstance(item, list) or
(isinstance(item, tuple) and # output from np.where
all(isinstance(x, np.ndarray) for x in item))):
if isinstance(value, Table):
vals = (col for col in value.columns.values())
elif isinstance(value, np.ndarray) and value.dtype.names:
vals = (value[name] for name in value.dtype.names)
elif np.isscalar(value):
import itertools
vals = itertools.repeat(value, n_cols)
else: # Assume this is an iterable that will work
if len(value) != n_cols:
raise ValueError('Right side value needs {0} elements (one for each column)'
.format(n_cols))
vals = value
for col, val in zip(self.columns.values(), vals):
col[item] = val
else:
raise ValueError('Illegal type {0} for table item access'
.format(type(item)))
def __delitem__(self, item):
if isinstance(item, str):
self.remove_column(item)
elif isinstance(item, (int, np.integer)):
self.remove_row(item)
elif (isinstance(item, (list, tuple, np.ndarray)) and
all(isinstance(x, str) for x in item)):
self.remove_columns(item)
elif (isinstance(item, (list, np.ndarray)) and
np.asarray(item).dtype.kind == 'i'):
self.remove_rows(item)
elif isinstance(item, slice):
self.remove_rows(item)
else:
raise IndexError('illegal key or index value')
def _ipython_key_completions_(self):
return self.colnames
def field(self, item):
"""Return column[item] for recarray compatibility."""
return self.columns[item]
@property
def masked(self):
return self._masked
@masked.setter
def masked(self, masked):
raise Exception('Masked attribute is read-only (use t = Table(t, masked=True)'
' to convert to a masked table)')
def _set_masked(self, masked):
"""
Set the table masked property.
Parameters
----------
masked : bool
State of table masking (`True` or `False`)
"""
if hasattr(self, '_masked'):
# The only allowed change is from None to False or True, or False to True
if self._masked is None and masked in [False, True]:
self._masked = masked
elif self._masked is False and masked is True:
log.info("Upgrading Table to masked Table. Use Table.filled() to convert to unmasked table.")
self._masked = masked
elif self._masked is masked:
raise Exception("Masked attribute is already set to {0}".format(masked))
else:
raise Exception("Cannot change masked attribute to {0} once it is set to {1}"
.format(masked, self._masked))
else:
if masked in [True, False, None]:
self._masked = masked
else:
raise ValueError("masked should be one of True, False, None")
if self._masked:
self._column_class = self.MaskedColumn
else:
self._column_class = self.Column
@property
def ColumnClass(self):
if self._column_class is None:
return self.Column
else:
return self._column_class
@property
def dtype(self):
return np.dtype([descr(col) for col in self.columns.values()])
@property
def colnames(self):
return list(self.columns.keys())
@staticmethod
def _is_list_or_tuple_of_str(names):
"""Check that ``names`` is a tuple or list of strings"""
return (isinstance(names, (tuple, list)) and names and
all(isinstance(x, str) for x in names))
def keys(self):
return list(self.columns.keys())
def __len__(self):
if len(self.columns) == 0:
return 0
lengths = set(len(col) for col in self.columns.values())
if len(lengths) != 1:
len_strs = [' {0} : {1}'.format(name, len(col)) for name, col in self.columns.items()]
raise ValueError('Column length mismatch:\n{0}'.format('\n'.join(len_strs)))
return lengths.pop()
def index_column(self, name):
"""
Return the positional index of column ``name``.
Parameters
----------
name : str
column name
Returns
-------
index : int
Positional index of column ``name``.
Examples
--------
Create a table with three columns 'a', 'b' and 'c'::
>>> t = Table([[1, 2, 3], [0.1, 0.2, 0.3], ['x', 'y', 'z']],
... names=('a', 'b', 'c'))
>>> print(t)
a b c
--- --- ---
1 0.1 x
2 0.2 y
3 0.3 z
Get index of column 'b' of the table::
>>> t.index_column('b')
1
"""
try:
return self.colnames.index(name)
except ValueError:
raise ValueError("Column {0} does not exist".format(name))
def add_column(self, col, index=None, name=None, rename_duplicate=False, copy=True):
"""
Add a new Column object ``col`` to the table. If ``index``
is supplied then insert column before ``index`` position
in the list of columns, otherwise append column to the end
of the list.
Parameters
----------
col : Column
Column object to add.
index : int or `None`
Insert column before this position or at end (default).
name : str
Column name
rename_duplicate : bool
Uniquify column name if it already exist. Default is False.
copy : bool
Make a copy of the new column. Default is True.
Examples
--------
Create a table with two columns 'a' and 'b'::
>>> t = Table([[1, 2, 3], [0.1, 0.2, 0.3]], names=('a', 'b'))
>>> print(t)
a b
--- ---
1 0.1
2 0.2
3 0.3
Create a third column 'c' and append it to the end of the table::
>>> col_c = Column(name='c', data=['x', 'y', 'z'])
>>> t.add_column(col_c)
>>> print(t)
a b c
--- --- ---
1 0.1 x
2 0.2 y
3 0.3 z
Add column 'd' at position 1. Note that the column is inserted
before the given index::
>>> col_d = Column(name='d', data=['a', 'b', 'c'])
>>> t.add_column(col_d, 1)
>>> print(t)
a d b c
--- --- --- ---
1 a 0.1 x
2 b 0.2 y
3 c 0.3 z
Add second column named 'b' with rename_duplicate::
>>> t = Table([[1, 2, 3], [0.1, 0.2, 0.3]], names=('a', 'b'))
>>> col_b = Column(name='b', data=[1.1, 1.2, 1.3])
>>> t.add_column(col_b, rename_duplicate=True)
>>> print(t)
a b b_1
--- --- ---
1 0.1 1.1
2 0.2 1.2
3 0.3 1.3
Add an unnamed column or mixin object in the table using a default name
or by specifying an explicit name with ``name``. Name can also be overridden::
>>> t = Table([[1, 2], [0.1, 0.2]], names=('a', 'b'))
>>> col_c = Column(data=['x', 'y'])
>>> t.add_column(col_c)
>>> t.add_column(col_c, name='c')
>>> col_b = Column(name='b', data=[1.1, 1.2])
>>> t.add_column(col_b, name='d')
>>> print(t)
a b col2 c d
--- --- ---- --- ---
1 0.1 x x 1.1
2 0.2 y y 1.2
To add several columns use add_columns.
"""
if index is None:
index = len(self.columns)
if name is not None:
name = (name,)
self.add_columns([col], [index], name, copy=copy, rename_duplicate=rename_duplicate)
def add_columns(self, cols, indexes=None, names=None, copy=True, rename_duplicate=False):
"""
Add a list of new Column objects ``cols`` to the table. If a
corresponding list of ``indexes`` is supplied then insert column
before each ``index`` position in the *original* list of columns,
otherwise append columns to the end of the list.
Parameters
----------
cols : list of Columns
Column objects to add.
indexes : list of ints or `None`
Insert column before this position or at end (default).
names : list of str
Column names
copy : bool
Make a copy of the new columns. Default is True.
rename_duplicate : bool
Uniquify new column names if they duplicate the existing ones.
Default is False.
Examples
--------
Create a table with two columns 'a' and 'b'::
>>> t = Table([[1, 2, 3], [0.1, 0.2, 0.3]], names=('a', 'b'))
>>> print(t)
a b
--- ---
1 0.1
2 0.2
3 0.3
Create column 'c' and 'd' and append them to the end of the table::
>>> col_c = Column(name='c', data=['x', 'y', 'z'])
>>> col_d = Column(name='d', data=['u', 'v', 'w'])
>>> t.add_columns([col_c, col_d])
>>> print(t)
a b c d
--- --- --- ---
1 0.1 x u
2 0.2 y v
3 0.3 z w
Add column 'c' at position 0 and column 'd' at position 1. Note that
the columns are inserted before the given position::
>>> t = Table([[1, 2, 3], [0.1, 0.2, 0.3]], names=('a', 'b'))
>>> col_c = Column(name='c', data=['x', 'y', 'z'])
>>> col_d = Column(name='d', data=['u', 'v', 'w'])
>>> t.add_columns([col_c, col_d], [0, 1])
>>> print(t)
c a d b
--- --- --- ---
x 1 u 0.1
y 2 v 0.2
z 3 w 0.3
Add second column 'b' and column 'c' with ``rename_duplicate``::
>>> t = Table([[1, 2, 3], [0.1, 0.2, 0.3]], names=('a', 'b'))
>>> col_b = Column(name='b', data=[1.1, 1.2, 1.3])
>>> col_c = Column(name='c', data=['x', 'y', 'z'])
>>> t.add_columns([col_b, col_c], rename_duplicate=True)
>>> print(t)
a b b_1 c
--- --- --- ---
1 0.1 1.1 x
2 0.2 1.2 y
3 0.3 1.3 z
Add unnamed columns or mixin objects in the table using default names
or by specifying explicit names with ``names``. Names can also be overridden::
>>> t = Table()
>>> col_a = Column(data=['x', 'y'])
>>> col_b = Column(name='b', data=['u', 'v'])
>>> t.add_columns([col_a, col_b])
>>> t.add_columns([col_a, col_b], names=['c', 'd'])
>>> print(t)
col0 b c d
---- --- --- ---
x u x u
y v y v
"""
if indexes is None:
indexes = [len(self.columns)] * len(cols)
elif len(indexes) != len(cols):
raise ValueError('Number of indexes must match number of cols')
if copy:
cols = [col_copy(col) for col in cols]
if len(self.columns) == 0:
# No existing table data, init from cols
newcols = cols
else:
newcols = list(self.columns.values())
new_indexes = list(range(len(newcols) + 1))
for col, index in zip(cols, indexes):
i = new_indexes.index(index)
new_indexes.insert(i, None)
newcols.insert(i, col)
if names is None:
names = (None,) * len(cols)
elif len(names) != len(cols):
raise ValueError('Number of names must match number of cols')
for i, (col, name) in enumerate(zip(cols, names)):
if name is None:
if col.info.name is not None:
continue
name = 'col{}'.format(i + len(self.columns))
if col.info.parent_table is not None:
col = col_copy(col)
col.info.name = name
if rename_duplicate:
existing_names = set(self.colnames)
for col in cols:
i = 1
orig_name = col.info.name
if col.info.name in existing_names:
# If the column belongs to another table then copy it
# before renaming
while col.info.name in existing_names:
# Iterate until a unique name is found
if col.info.parent_table is not None:
col = col_copy(col)
new_name = '{0}_{1}'.format(orig_name, i)
col.info.name = new_name
i += 1
existing_names.add(new_name)
self._init_from_cols(newcols)
def _replace_column_warnings(self, name, col):
"""
Same as replace_column but issues warnings under various circumstances.
"""
warns = conf.replace_warnings
if 'refcount' in warns and name in self.colnames:
refcount = sys.getrefcount(self[name])
if name in self.colnames:
old_col = self[name]
# This may raise an exception (e.g. t['a'] = 1) in which case none of
# the downstream code runs.
self.replace_column(name, col)
if 'always' in warns:
warnings.warn("replaced column '{}'".format(name),
TableReplaceWarning, stacklevel=3)
if 'slice' in warns:
try:
# Check for ndarray-subclass slice. An unsliced instance
# has an ndarray for the base while sliced has the same class
# as parent.
if isinstance(old_col.base, old_col.__class__):
msg = ("replaced column '{}' which looks like an array slice. "
"The new column no longer shares memory with the "
"original array.".format(name))
warnings.warn(msg, TableReplaceWarning, stacklevel=3)
except AttributeError:
pass
if 'refcount' in warns:
# Did reference count change?
new_refcount = sys.getrefcount(self[name])
if refcount != new_refcount:
msg = ("replaced column '{}' and the number of references "
"to the column changed.".format(name))
warnings.warn(msg, TableReplaceWarning, stacklevel=3)
if 'attributes' in warns:
# Any of the standard column attributes changed?
changed_attrs = []
new_col = self[name]
# Check base DataInfo attributes that any column will have
for attr in DataInfo.attr_names:
if getattr(old_col.info, attr) != getattr(new_col.info, attr):
changed_attrs.append(attr)
if changed_attrs:
msg = ("replaced column '{}' and column attributes {} changed."
.format(name, changed_attrs))
warnings.warn(msg, TableReplaceWarning, stacklevel=3)
def replace_column(self, name, col):
"""
Replace column ``name`` with the new ``col`` object.
Parameters
----------
name : str
Name of column to replace
col : column object (list, ndarray, Column, etc)
New column object to replace the existing column
Examples
--------
Replace column 'a' with a float version of itself::
>>> t = Table([[1, 2, 3], [0.1, 0.2, 0.3]], names=('a', 'b'))
>>> float_a = t['a'].astype(float)
>>> t.replace_column('a', float_a)
"""
if name not in self.colnames:
raise ValueError('column name {0} is not in the table'.format(name))
if self[name].info.indices:
raise ValueError('cannot replace a table index column')
t = self.__class__([col], names=[name])
cols = OrderedDict(self.columns)
cols[name] = t[name]
self._init_from_cols(cols.values())
def remove_row(self, index):
"""
Remove a row from the table.
Parameters
----------
index : int
Index of row to remove
Examples
--------
Create a table with three columns 'a', 'b' and 'c'::
>>> t = Table([[1, 2, 3], [0.1, 0.2, 0.3], ['x', 'y', 'z']],
... names=('a', 'b', 'c'))
>>> print(t)
a b c
--- --- ---
1 0.1 x
2 0.2 y
3 0.3 z
Remove row 1 from the table::
>>> t.remove_row(1)
>>> print(t)
a b c
--- --- ---
1 0.1 x
3 0.3 z
To remove several rows at the same time use remove_rows.
"""
# check the index against the types that work with np.delete
if not isinstance(index, (int, np.integer)):
raise TypeError("Row index must be an integer")
self.remove_rows(index)
def remove_rows(self, row_specifier):
"""
Remove rows from the table.
Parameters
----------
row_specifier : slice, int, or array of ints
Specification for rows to remove
Examples
--------
Create a table with three columns 'a', 'b' and 'c'::
>>> t = Table([[1, 2, 3], [0.1, 0.2, 0.3], ['x', 'y', 'z']],
... names=('a', 'b', 'c'))
>>> print(t)
a b c
--- --- ---
1 0.1 x
2 0.2 y
3 0.3 z
Remove rows 0 and 2 from the table::
>>> t.remove_rows([0, 2])
>>> print(t)
a b c
--- --- ---
2 0.2 y
Note that there are no warnings if the slice operator extends
outside the data::
>>> t = Table([[1, 2, 3], [0.1, 0.2, 0.3], ['x', 'y', 'z']],
... names=('a', 'b', 'c'))
>>> t.remove_rows(slice(10, 20, 1))
>>> print(t)
a b c
--- --- ---
1 0.1 x
2 0.2 y
3 0.3 z
"""
# Update indices
for index in self.indices:
index.remove_rows(row_specifier)
keep_mask = np.ones(len(self), dtype=bool)
keep_mask[row_specifier] = False
columns = self.TableColumns()
for name, col in self.columns.items():
newcol = col[keep_mask]
newcol.info.parent_table = self
columns[name] = newcol
self._replace_cols(columns)
# Revert groups to default (ungrouped) state
if hasattr(self, '_groups'):
del self._groups
def remove_column(self, name):
"""
Remove a column from the table.
This can also be done with::
del table[name]
Parameters
----------
name : str
Name of column to remove
Examples
--------
Create a table with three columns 'a', 'b' and 'c'::
>>> t = Table([[1, 2, 3], [0.1, 0.2, 0.3], ['x', 'y', 'z']],
... names=('a', 'b', 'c'))
>>> print(t)
a b c
--- --- ---
1 0.1 x
2 0.2 y
3 0.3 z
Remove column 'b' from the table::
>>> t.remove_column('b')
>>> print(t)
a c
--- ---
1 x
2 y
3 z
To remove several columns at the same time use remove_columns.
"""
self.remove_columns([name])
def remove_columns(self, names):
'''
Remove several columns from the table.
Parameters
----------
names : list
A list containing the names of the columns to remove
Examples
--------
Create a table with three columns 'a', 'b' and 'c'::
>>> t = Table([[1, 2, 3], [0.1, 0.2, 0.3], ['x', 'y', 'z']],
... names=('a', 'b', 'c'))
>>> print(t)
a b c
--- --- ---
1 0.1 x
2 0.2 y
3 0.3 z
Remove columns 'b' and 'c' from the table::
>>> t.remove_columns(['b', 'c'])
>>> print(t)
a
---
1
2
3
Specifying only a single column also works. Remove column 'b' from the table::
>>> t = Table([[1, 2, 3], [0.1, 0.2, 0.3], ['x', 'y', 'z']],
... names=('a', 'b', 'c'))
>>> t.remove_columns('b')
>>> print(t)
a c
--- ---
1 x
2 y
3 z
This gives the same as using remove_column.
'''
if isinstance(names, str):
names = [names]
for name in names:
if name not in self.columns:
raise KeyError("Column {0} does not exist".format(name))
for name in names:
self.columns.pop(name)
def _convert_string_dtype(self, in_kind, out_kind):
"""
Convert string-like columns to/from bytestring and unicode (internal only).
Parameters
----------
in_kind : str
Input dtype.kind
out_kind : str
Output dtype.kind
"""
# If there are no `in_kind` columns then do nothing
cols = self.columns.values()
if not any(col.dtype.kind == in_kind for col in cols):
return
newcols = []
for col in cols:
if col.dtype.kind == in_kind:
newdtype = re.sub(in_kind, out_kind, col.dtype.str)
newcol = col.__class__(col, dtype=newdtype)
else:
newcol = col
newcols.append(newcol)
self._init_from_cols(newcols)
def convert_bytestring_to_unicode(self, python3_only=NoValue):
"""
Convert bytestring columns (dtype.kind='S') to unicode (dtype.kind='U') assuming
ASCII encoding.
Internally this changes string columns to represent each character
in the string with a 4-byte UCS-4 equivalent, so it is inefficient
for memory but allows scripts to manipulate string arrays with
natural syntax.
"""
if python3_only is not NoValue:
warnings.warn('The "python3_only" keyword is now deprecated.',
AstropyDeprecationWarning)
self._convert_string_dtype('S', 'U')
def convert_unicode_to_bytestring(self, python3_only=NoValue):
"""
Convert ASCII-only unicode columns (dtype.kind='U') to bytestring (dtype.kind='S').
When exporting a unicode string array to a file, it may be desirable
to encode unicode columns as bytestrings. This routine takes
advantage of numpy automated conversion which works for strings that
are pure ASCII.
"""
if python3_only is not NoValue:
warnings.warn('The "python3_only" keyword is now deprecated.',
AstropyDeprecationWarning)
self._convert_string_dtype('U', 'S')
def keep_columns(self, names):
'''
Keep only the columns specified (remove the others).
Parameters
----------
names : list
A list containing the names of the columns to keep. All other
columns will be removed.
Examples
--------
Create a table with three columns 'a', 'b' and 'c'::
>>> t = Table([[1, 2, 3],[0.1, 0.2, 0.3],['x', 'y', 'z']],
... names=('a', 'b', 'c'))
>>> print(t)
a b c
--- --- ---
1 0.1 x
2 0.2 y
3 0.3 z
Specifying only a single column name keeps only this column.
Keep only column 'a' of the table::
>>> t.keep_columns('a')
>>> print(t)
a
---
1
2
3
Specifying a list of column names is keeps is also possible.
Keep columns 'a' and 'c' of the table::
>>> t = Table([[1, 2, 3],[0.1, 0.2, 0.3],['x', 'y', 'z']],
... names=('a', 'b', 'c'))
>>> t.keep_columns(['a', 'c'])
>>> print(t)
a c
--- ---
1 x
2 y
3 z
'''
if isinstance(names, str):
names = [names]
for name in names:
if name not in self.columns:
raise KeyError("Column {0} does not exist".format(name))
remove = list(set(self.keys()) - set(names))
self.remove_columns(remove)
def rename_column(self, name, new_name):
'''
Rename a column.
This can also be done directly with by setting the ``name`` attribute
for a column::
table[name].name = new_name
TODO: this won't work for mixins
Parameters
----------
name : str
The current name of the column.
new_name : str
The new name for the column
Examples
--------
Create a table with three columns 'a', 'b' and 'c'::
>>> t = Table([[1,2],[3,4],[5,6]], names=('a','b','c'))
>>> print(t)
a b c
--- --- ---
1 3 5
2 4 6
Renaming column 'a' to 'aa'::
>>> t.rename_column('a' , 'aa')
>>> print(t)
aa b c
--- --- ---
1 3 5
2 4 6
'''
if name not in self.keys():
raise KeyError("Column {0} does not exist".format(name))
self.columns[name].info.name = new_name
def _set_row(self, idx, colnames, vals):
try:
assert len(vals) == len(colnames)
except Exception:
raise ValueError('right hand side must be a sequence of values with '
'the same length as the number of selected columns')
# Keep track of original values before setting each column so that
# setting row can be transactional.
orig_vals = []
cols = self.columns
try:
for name, val in zip(colnames, vals):
orig_vals.append(cols[name][idx])
cols[name][idx] = val
except Exception:
# If anything went wrong first revert the row update then raise
for name, val in zip(colnames, orig_vals[:-1]):
cols[name][idx] = val
raise
def add_row(self, vals=None, mask=None):
"""Add a new row to the end of the table.
The ``vals`` argument can be:
sequence (e.g. tuple or list)
Column values in the same order as table columns.
mapping (e.g. dict)
Keys corresponding to column names. Missing values will be
filled with np.zeros for the column dtype.
`None`
All values filled with np.zeros for the column dtype.
This method requires that the Table object "owns" the underlying array
data. In particular one cannot add a row to a Table that was
initialized with copy=False from an existing array.
The ``mask`` attribute should give (if desired) the mask for the
values. The type of the mask should match that of the values, i.e. if
``vals`` is an iterable, then ``mask`` should also be an iterable
with the same length, and if ``vals`` is a mapping, then ``mask``
should be a dictionary.
Parameters
----------
vals : tuple, list, dict or `None`
Use the specified values in the new row
mask : tuple, list, dict or `None`
Use the specified mask values in the new row
Examples
--------
Create a table with three columns 'a', 'b' and 'c'::
>>> t = Table([[1,2],[4,5],[7,8]], names=('a','b','c'))
>>> print(t)
a b c
--- --- ---
1 4 7
2 5 8
Adding a new row with entries '3' in 'a', '6' in 'b' and '9' in 'c'::
>>> t.add_row([3,6,9])
>>> print(t)
a b c
--- --- ---
1 4 7
2 5 8
3 6 9
"""
self.insert_row(len(self), vals, mask)
def insert_row(self, index, vals=None, mask=None):
"""Add a new row before the given ``index`` position in the table.
The ``vals`` argument can be:
sequence (e.g. tuple or list)
Column values in the same order as table columns.
mapping (e.g. dict)
Keys corresponding to column names. Missing values will be
filled with np.zeros for the column dtype.
`None`
All values filled with np.zeros for the column dtype.
The ``mask`` attribute should give (if desired) the mask for the
values. The type of the mask should match that of the values, i.e. if
``vals`` is an iterable, then ``mask`` should also be an iterable
with the same length, and if ``vals`` is a mapping, then ``mask``
should be a dictionary.
Parameters
----------
vals : tuple, list, dict or `None`
Use the specified values in the new row
mask : tuple, list, dict or `None`
Use the specified mask values in the new row
"""
colnames = self.colnames
N = len(self)
if index < -N or index > N:
raise IndexError("Index {0} is out of bounds for table with length {1}"
.format(index, N))
if index < 0:
index += N
def _is_mapping(obj):
"""Minimal checker for mapping (dict-like) interface for obj"""
attrs = ('__getitem__', '__len__', '__iter__', 'keys', 'values', 'items')
return all(hasattr(obj, attr) for attr in attrs)
if mask is not None and not self.masked:
# Possibly issue upgrade warning and update self.ColumnClass. This
# does not change the existing columns.
self._set_masked(True)
if _is_mapping(vals) or vals is None:
# From the vals and/or mask mappings create the corresponding lists
# that have entries for each table column.
if mask is not None and not _is_mapping(mask):
raise TypeError("Mismatch between type of vals and mask")
# Now check that the mask is specified for the same keys as the
# values, otherwise things get really confusing.
if mask is not None and set(vals.keys()) != set(mask.keys()):
raise ValueError('keys in mask should match keys in vals')
if vals and any(name not in colnames for name in vals):
raise ValueError('Keys in vals must all be valid column names')
vals_list = []
mask_list = []
for name in colnames:
if vals and name in vals:
vals_list.append(vals[name])
mask_list.append(False if mask is None else mask[name])
else:
col = self[name]
if hasattr(col, 'dtype'):
# Make a placeholder zero element of the right type which is masked.
# This assumes the appropriate insert() method will broadcast a
# numpy scalar to the right shape.
vals_list.append(np.zeros(shape=(), dtype=col.dtype))
# For masked table any unsupplied values are masked by default.
mask_list.append(self.masked and vals is not None)
else:
raise ValueError("Value must be supplied for column '{0}'".format(name))
vals = vals_list
mask = mask_list
if isiterable(vals):
if mask is not None and (not isiterable(mask) or _is_mapping(mask)):
raise TypeError("Mismatch between type of vals and mask")
if len(self.columns) != len(vals):
raise ValueError('Mismatch between number of vals and columns')
if mask is not None:
if len(self.columns) != len(mask):
raise ValueError('Mismatch between number of masks and columns')
else:
mask = [False] * len(self.columns)
else:
raise TypeError('Vals must be an iterable or mapping or None')
columns = self.TableColumns()
try:
# Insert val at index for each column
for name, col, val, mask_ in zip(colnames, self.columns.values(), vals, mask):
# If the new row caused a change in self.ColumnClass then
# Column-based classes need to be converted first. This is
# typical for adding a row with mask values to an unmasked table.
if isinstance(col, Column) and not isinstance(col, self.ColumnClass):
col = self.ColumnClass(col, copy=False)
newcol = col.insert(index, val, axis=0)
if not isinstance(newcol, BaseColumn):
newcol.info.name = name
if self.masked:
newcol.mask = FalseArray(newcol.shape)
if len(newcol) != N + 1:
raise ValueError('Incorrect length for column {0} after inserting {1}'
' (expected {2}, got {3})'
.format(name, val, len(newcol), N + 1))
newcol.info.parent_table = self
# Set mask if needed
if self.masked:
newcol.mask[index] = mask_
columns[name] = newcol
# insert row in indices
for table_index in self.indices:
table_index.insert_row(index, vals, self.columns.values())
except Exception as err:
raise ValueError("Unable to insert row because of exception in column '{0}':\n{1}"
.format(name, err))
else:
self._replace_cols(columns)
# Revert groups to default (ungrouped) state
if hasattr(self, '_groups'):
del self._groups
def _replace_cols(self, columns):
for col, new_col in zip(self.columns.values(), columns.values()):
new_col.info.indices = []
for index in col.info.indices:
index.columns[index.col_position(col.info.name)] = new_col
new_col.info.indices.append(index)
self.columns = columns
def argsort(self, keys=None, kind=None):
"""
Return the indices which would sort the table according to one or
more key columns. This simply calls the `numpy.argsort` function on
the table with the ``order`` parameter set to ``keys``.
Parameters
----------
keys : str or list of str
The column name(s) to order the table by
kind : {'quicksort', 'mergesort', 'heapsort'}, optional
Sorting algorithm.
Returns
-------
index_array : ndarray, int
Array of indices that sorts the table by the specified key
column(s).
"""
if isinstance(keys, str):
keys = [keys]
# use index sorted order if possible
if keys is not None:
index = get_index(self, self[keys])
if index is not None:
return index.sorted_data()
kwargs = {}
if keys:
kwargs['order'] = keys
if kind:
kwargs['kind'] = kind
if keys:
data = self[keys].as_array()
else:
data = self.as_array()
return data.argsort(**kwargs)
def sort(self, keys=None):
'''
Sort the table according to one or more keys. This operates
on the existing table and does not return a new table.
Parameters
----------
keys : str or list of str
The key(s) to order the table by. If None, use the
primary index of the Table.
Examples
--------
Create a table with 3 columns::
>>> t = Table([['Max', 'Jo', 'John'], ['Miller','Miller','Jackson'],
... [12,15,18]], names=('firstname','name','tel'))
>>> print(t)
firstname name tel
--------- ------- ---
Max Miller 12
Jo Miller 15
John Jackson 18
Sorting according to standard sorting rules, first 'name' then 'firstname'::
>>> t.sort(['name','firstname'])
>>> print(t)
firstname name tel
--------- ------- ---
John Jackson 18
Jo Miller 15
Max Miller 12
'''
if keys is None:
if not self.indices:
raise ValueError("Table sort requires input keys or a table index")
keys = [x.info.name for x in self.indices[0].columns]
if isinstance(keys, str):
keys = [keys]
indexes = self.argsort(keys)
sort_index = get_index(self, self[keys])
if sort_index is not None:
# avoid inefficient relabelling of sorted index
prev_frozen = sort_index._frozen
sort_index._frozen = True
for col in self.columns.values():
col[:] = col.take(indexes, axis=0)
if sort_index is not None:
# undo index freeze
sort_index._frozen = prev_frozen
# now relabel the sort index appropriately
sort_index.sort()
def reverse(self):
'''
Reverse the row order of table rows. The table is reversed
in place and there are no function arguments.
Examples
--------
Create a table with three columns::
>>> t = Table([['Max', 'Jo', 'John'], ['Miller','Miller','Jackson'],
... [12,15,18]], names=('firstname','name','tel'))
>>> print(t)
firstname name tel
--------- ------- ---
Max Miller 12
Jo Miller 15
John Jackson 18
Reversing order::
>>> t.reverse()
>>> print(t)
firstname name tel
--------- ------- ---
John Jackson 18
Jo Miller 15
Max Miller 12
'''
for col in self.columns.values():
col[:] = col[::-1]
for index in self.indices:
index.reverse()
@classmethod
def read(cls, *args, **kwargs):
"""
Read and parse a data table and return as a Table.
This function provides the Table interface to the astropy unified I/O
layer. This allows easily reading a file in many supported data formats
using syntax such as::
>>> from astropy.table import Table
>>> dat = Table.read('table.dat', format='ascii')
>>> events = Table.read('events.fits', format='fits')
See http://docs.astropy.org/en/stable/io/unified.html for details.
Parameters
----------
format : str
File format specifier.
*args : tuple, optional
Positional arguments passed through to data reader. If supplied the
first argument is the input filename.
**kwargs : dict, optional
Keyword arguments passed through to data reader.
Returns
-------
out : `Table`
Table corresponding to file contents
Notes
-----
"""
# The hanging Notes section just above is a section placeholder for
# import-time processing that collects available formats into an
# RST table and inserts at the end of the docstring. DO NOT REMOVE.
out = io_registry.read(cls, *args, **kwargs)
# For some readers (e.g., ascii.ecsv), the returned `out` class is not
# guaranteed to be the same as the desired output `cls`. If so,
# try coercing to desired class without copying (io.registry.read
# would normally do a copy). The normal case here is swapping
# Table <=> QTable.
if cls is not out.__class__:
try:
out = cls(out, copy=False)
except Exception:
raise TypeError('could not convert reader output to {0} '
'class.'.format(cls.__name__))
return out
def write(self, *args, **kwargs):
"""Write this Table object out in the specified format.
This function provides the Table interface to the astropy unified I/O
layer. This allows easily writing a file in many supported data formats
using syntax such as::
>>> from astropy.table import Table
>>> dat = Table([[1, 2], [3, 4]], names=('a', 'b'))
>>> dat.write('table.dat', format='ascii')
See http://docs.astropy.org/en/stable/io/unified.html for details.
Parameters
----------
format : str
File format specifier.
serialize_method : str, dict, optional
Serialization method specifier for columns.
*args : tuple, optional
Positional arguments passed through to data writer. If supplied the
first argument is the output filename.
**kwargs : dict, optional
Keyword arguments passed through to data writer.
Notes
-----
"""
serialize_method = kwargs.pop('serialize_method', None)
with serialize_method_as(self, serialize_method):
io_registry.write(self, *args, **kwargs)
def copy(self, copy_data=True):
'''
Return a copy of the table.
Parameters
----------
copy_data : bool
If `True` (the default), copy the underlying data array.
Otherwise, use the same data array. The ``meta`` is always
deepcopied regardless of the value for ``copy_data``.
'''
out = self.__class__(self, copy=copy_data)
# If the current table is grouped then do the same in the copy
if hasattr(self, '_groups'):
out._groups = groups.TableGroups(out, indices=self._groups._indices,
keys=self._groups._keys)
return out
def __deepcopy__(self, memo=None):
return self.copy(True)
def __copy__(self):
return self.copy(False)
def __lt__(self, other):
return super().__lt__(other)
def __gt__(self, other):
return super().__gt__(other)
def __le__(self, other):
return super().__le__(other)
def __ge__(self, other):
return super().__ge__(other)
def __eq__(self, other):
if isinstance(other, Table):
other = other.as_array()
if self.masked:
if isinstance(other, np.ma.MaskedArray):
result = self.as_array() == other
else:
# If mask is True, then by definition the row doesn't match
# because the other array is not masked.
false_mask = np.zeros(1, dtype=[(n, bool) for n in self.dtype.names])
result = (self.as_array().data == other) & (self.mask == false_mask)
else:
if isinstance(other, np.ma.MaskedArray):
# If mask is True, then by definition the row doesn't match
# because the other array is not masked.
false_mask = np.zeros(1, dtype=[(n, bool) for n in other.dtype.names])
result = (self.as_array() == other.data) & (other.mask == false_mask)
else:
result = self.as_array() == other
return result
def __ne__(self, other):
return ~self.__eq__(other)
@property
def groups(self):
if not hasattr(self, '_groups'):
self._groups = groups.TableGroups(self)
return self._groups
def group_by(self, keys):
"""
Group this table by the specified ``keys``
This effectively splits the table into groups which correspond to
unique values of the ``keys`` grouping object. The output is a new
`TableGroups` which contains a copy of this table but sorted by row
according to ``keys``.
The ``keys`` input to `group_by` can be specified in different ways:
- String or list of strings corresponding to table column name(s)
- Numpy array (homogeneous or structured) with same length as this table
- `Table` with same length as this table
Parameters
----------
keys : str, list of str, numpy array, or `Table`
Key grouping object
Returns
-------
out : `Table`
New table with groups set
"""
return groups.table_group_by(self, keys)
def to_pandas(self):
"""
Return a :class:`pandas.DataFrame` instance
Returns
-------
dataframe : :class:`pandas.DataFrame`
A pandas :class:`pandas.DataFrame` instance
Raises
------
ImportError
If pandas is not installed
ValueError
If the Table contains mixin or multi-dimensional columns
"""
from pandas import DataFrame
if self.has_mixin_columns:
raise ValueError("Cannot convert a table with mixin columns to a pandas DataFrame")
if any(getattr(col, 'ndim', 1) > 1 for col in self.columns.values()):
raise ValueError("Cannot convert a table with multi-dimensional columns to a pandas DataFrame")
out = OrderedDict()
for name, column in self.columns.items():
if isinstance(column, MaskedColumn) and np.any(column.mask):
if column.dtype.kind in ['i', 'u']:
out[name] = column.astype(float).filled(np.nan)
warnings.warn(
"converted column '{}' from integer to float".format(
name), TableReplaceWarning, stacklevel=3)
elif column.dtype.kind in ['f', 'c']:
out[name] = column.filled(np.nan)
else:
out[name] = column.astype(object).filled(np.nan)
else:
out[name] = column
if out[name].dtype.byteorder not in ('=', '|'):
out[name] = out[name].byteswap().newbyteorder()
return DataFrame(out)
@classmethod
def from_pandas(cls, dataframe):
"""
Create a `Table` from a :class:`pandas.DataFrame` instance
Parameters
----------
dataframe : :class:`pandas.DataFrame`
The pandas :class:`pandas.DataFrame` instance
Returns
-------
table : `Table`
A `Table` (or subclass) instance
"""
out = OrderedDict()
for name in dataframe.columns:
column = dataframe[name]
mask = np.array(column.isnull())
data = np.array(column)
if data.dtype.kind == 'O':
# If all elements of an object array are string-like or np.nan
# then coerce back to a native numpy str/unicode array.
string_types = (str, bytes)
nan = np.nan
if all(isinstance(x, string_types) or x is nan for x in data):
# Force any missing (null) values to b''. Numpy will
# upcast to str/unicode as needed.
data[mask] = b''
# When the numpy object array is represented as a list then
# numpy initializes to the correct string or unicode type.
data = np.array([x for x in data])
if np.any(mask):
out[name] = MaskedColumn(data=data, name=name, mask=mask)
else:
out[name] = Column(data=data, name=name)
return cls(out)
info = TableInfo()
class QTable(Table):
"""A class to represent tables of heterogeneous data.
`QTable` provides a class for heterogeneous tabular data which can be
easily modified, for instance adding columns or new rows.
The `QTable` class is identical to `Table` except that columns with an
associated ``unit`` attribute are converted to `~astropy.units.Quantity`
objects.
Parameters
----------
data : numpy ndarray, dict, list, Table, or table-like object, optional
Data to initialize table.
masked : bool, optional
Specify whether the table is masked.
names : list, optional
Specify column names.
dtype : list, optional
Specify column data types.
meta : dict, optional
Metadata associated with the table.
copy : bool, optional
Copy the input data. Default is True.
rows : numpy ndarray, list of lists, optional
Row-oriented data for table instead of ``data`` argument.
copy_indices : bool, optional
Copy any indices in the input data. Default is True.
**kwargs : dict, optional
Additional keyword args when converting table-like object.
"""
def _add_as_mixin_column(self, col):
"""
Determine if ``col`` should be added to the table directly as
a mixin column.
"""
return has_info_class(col, MixinInfo)
def _convert_col_for_table(self, col):
if (isinstance(col, Column) and getattr(col, 'unit', None) is not None):
# We need to turn the column into a quantity, or a subclass
# identified in the unit (such as u.mag()).
q_cls = getattr(col.unit, '_quantity_class', Quantity)
qcol = q_cls(col.data, col.unit, copy=False)
qcol.info = col.info
col = qcol
else:
col = super()._convert_col_for_table(col)
return col
class NdarrayMixin(np.ndarray):
"""
Mixin column class to allow storage of arbitrary numpy
ndarrays within a Table. This is a subclass of numpy.ndarray
and has the same initialization options as ndarray().
"""
info = ParentDtypeInfo()
def __new__(cls, obj, *args, **kwargs):
self = np.array(obj, *args, **kwargs).view(cls)
if 'info' in getattr(obj, '__dict__', ()):
self.info = obj.info
return self
def __array_finalize__(self, obj):
if obj is None:
return
if callable(super().__array_finalize__):
super().__array_finalize__(obj)
# Self was created from template (e.g. obj[slice] or (obj * 2))
# or viewcast e.g. obj.view(Column). In either case we want to
# init Column attributes for self from obj if possible.
if 'info' in getattr(obj, '__dict__', ()):
self.info = obj.info
def __reduce__(self):
# patch to pickle Quantity objects (ndarray subclasses), see
# http://www.mail-archive.com/[email protected]/msg02446.html
object_state = list(super().__reduce__())
object_state[2] = (object_state[2], self.__dict__)
return tuple(object_state)
def __setstate__(self, state):
# patch to unpickle NdarrayMixin objects (ndarray subclasses), see
# http://www.mail-archive.com/[email protected]/msg02446.html
nd_state, own_state = state
super().__setstate__(nd_state)
self.__dict__.update(own_state)
| 36.705026 | 109 | 0.547821 |
4a21cce6a8775ff208f2fbc94ff5c4e4fc25720e | 781 | py | Python | 2) Detecting Edges and Applying Image Filters/#1 Blurring/other_blurs.py | RezaFirouzii/python-opencv-review | 454a2be7fa36516a2b1fbd4e6162068bba25c989 | [
"MIT"
] | null | null | null | 2) Detecting Edges and Applying Image Filters/#1 Blurring/other_blurs.py | RezaFirouzii/python-opencv-review | 454a2be7fa36516a2b1fbd4e6162068bba25c989 | [
"MIT"
] | null | null | null | 2) Detecting Edges and Applying Image Filters/#1 Blurring/other_blurs.py | RezaFirouzii/python-opencv-review | 454a2be7fa36516a2b1fbd4e6162068bba25c989 | [
"MIT"
] | null | null | null | import cv2 as cv
if __name__ == "__main__":
img = cv.imread('../../assets/test1.jpg')
cv.imshow('Original Image', img)
size = 9 # all kernels will be 9x9
# median blur filter
blur_img = cv.medianBlur(img, size)
cv.imshow("Median Blur Output", blur_img)
# guassian blur filter
blur_img = cv.GaussianBlur(img, (size, size), 0)
cv.imshow("Guassian Blur Output", blur_img)
cv.waitKey(0)
cv.destroyAllWindows()
# hint:
# "median blur"
# median blur is used to remove salt-pepper noise.
# it takes the median of a pixel neighbors and replaces it with the median.
# "guassian blur"
# guassian blur smoothens everything out equally.
# it makes no difference for edges and makes every pixel blur. | 28.925926 | 82 | 0.650448 |
4a21cd50f04c8284783e55666d4ed43eb5b82e52 | 7,045 | py | Python | 3ddfa/inference.py | bruinxiong/Rotate-and-Render | 135d2b7b02ca4b3bdf7961b260466ff8b64bdb59 | [
"CC-BY-4.0"
] | 397 | 2020-03-18T06:45:04.000Z | 2022-03-28T12:43:25.000Z | 3ddfa/inference.py | bruinxiong/Rotate-and-Render | 135d2b7b02ca4b3bdf7961b260466ff8b64bdb59 | [
"CC-BY-4.0"
] | 39 | 2020-03-18T17:11:45.000Z | 2022-03-29T08:55:55.000Z | 3ddfa/inference.py | bruinxiong/Rotate-and-Render | 135d2b7b02ca4b3bdf7961b260466ff8b64bdb59 | [
"CC-BY-4.0"
] | 104 | 2020-03-18T11:54:26.000Z | 2022-03-18T10:22:54.000Z | #!/usr/bin/env python3
# coding: utf-8
__author__ = 'cleardusk'
"""
The pipeline of 3DDFA prediction: given one image, predict the 3d face vertices, 68 landmarks and visualization.
[todo]
1. CPU optimization: https://pmchojnacki.wordpress.com/2018/10/07/slow-pytorch-cpu-performance
"""
import torch
import torchvision.transforms as transforms
import mobilenet_v1
import numpy as np
import cv2
import os
import math
from tqdm import tqdm
import time
import face_alignment
from utils.ddfa import ToTensorGjz, NormalizeGjz, str2bool
import scipy.io as sio
from utils.inference import get_suffix, parse_roi_box_from_landmark, crop_img, predict_68pts, dump_to_ply, dump_vertex, \
draw_landmarks, predict_dense, parse_roi_box_from_bbox, get_colors, write_obj_with_colors, get_aligned_param, get_5lmk_from_68lmk
from utils.cv_plot import plot_pose_box
from utils.estimate_pose import parse_pose
from utils.params import param_mean, param_std
from utils.render import get_depths_image, cget_depths_image, cpncc, crender_colors
from utils.paf import gen_img_paf
import argparse
import torch.backends.cudnn as cudnn
STD_SIZE = 120
def main(args):
# 1. load pre-tained model
checkpoint_fp = 'models/phase1_wpdc_vdc.pth.tar'
arch = 'mobilenet_1'
checkpoint = torch.load(checkpoint_fp, map_location=lambda storage, loc: storage)['state_dict']
model = getattr(mobilenet_v1, arch)(num_classes=62) # 62 = 12(pose) + 40(shape) +10(expression)
model_dict = model.state_dict()
# because the model is trained by multiple gpus, prefix module should be removed
for k in checkpoint.keys():
model_dict[k.replace('module.', '')] = checkpoint[k]
model.load_state_dict(model_dict)
if args.mode == 'gpu':
cudnn.benchmark = True
model = model.cuda()
model.eval()
tri = sio.loadmat('visualize/tri.mat')['tri']
transform = transforms.Compose([ToTensorGjz(), NormalizeGjz(mean=127.5, std=128)])
# 2. parse images list
with open(args.img_list) as f:
img_list = [x.strip() for x in f.readlines()]
landmark_list = []
alignment_model = face_alignment.FaceAlignment(face_alignment.LandmarksType._2D, flip_input=False)
if not os.path.exists(args.save_dir):
os.mkdir(args.save_dir)
if not os.path.exists(args.save_lmk_dir):
os.mkdir(args.save_lmk_dir)
for img_idx, img_fp in enumerate(tqdm(img_list)):
img_ori = cv2.imread(os.path.join(args.img_prefix, img_fp))
pts_res = []
Ps = [] # Camera matrix collection
poses = [] # pose collection, [todo: validate it]
vertices_lst = [] # store multiple face vertices
ind = 0
suffix = get_suffix(img_fp)
# face alignment model use RGB as input, result is a tuple with landmarks and boxes
preds = alignment_model.get_landmarks(img_ori[:, :, ::-1])
pts_2d_68 = preds[0][0]
pts_2d_5 = get_5lmk_from_68lmk(pts_2d_68)
landmark_list.append(pts_2d_5)
roi_box = parse_roi_box_from_landmark(pts_2d_68.T)
img = crop_img(img_ori, roi_box)
# import pdb; pdb.set_trace()
# forward: one step
img = cv2.resize(img, dsize=(STD_SIZE, STD_SIZE), interpolation=cv2.INTER_LINEAR)
input = transform(img).unsqueeze(0)
with torch.no_grad():
if args.mode == 'gpu':
input = input.cuda()
param = model(input)
param = param.squeeze().cpu().numpy().flatten().astype(np.float32)
# 68 pts
pts68 = predict_68pts(param, roi_box)
# two-step for more accurate bbox to crop face
if args.bbox_init == 'two':
roi_box = parse_roi_box_from_landmark(pts68)
img_step2 = crop_img(img_ori, roi_box)
img_step2 = cv2.resize(img_step2, dsize=(STD_SIZE, STD_SIZE), interpolation=cv2.INTER_LINEAR)
input = transform(img_step2).unsqueeze(0)
with torch.no_grad():
if args.mode == 'gpu':
input = input.cuda()
param = model(input)
param = param.squeeze().cpu().numpy().flatten().astype(np.float32)
pts68 = predict_68pts(param, roi_box)
pts_res.append(pts68)
P, pose = parse_pose(param)
Ps.append(P)
poses.append(pose)
# dense face 3d vertices
vertices = predict_dense(param, roi_box)
if args.dump_2d_img:
wfp_2d_img = os.path.join(args.save_dir, os.path.basename(img_fp))
colors = get_colors(img_ori, vertices)
# aligned_param = get_aligned_param(param)
# vertices_aligned = predict_dense(aligned_param, roi_box)
# h, w, c = 120, 120, 3
h, w, c = img_ori.shape
img_2d = crender_colors(vertices.T, (tri - 1).T, colors[:, ::-1], h, w)
cv2.imwrite(wfp_2d_img, img_2d[:, :, ::-1])
if args.dump_param:
split = img_fp.split('/')
save_name = os.path.join(args.save_dir, '{}.txt'.format(os.path.splitext(split[-1])[0]))
this_param = param * param_std + param_mean
this_param = np.concatenate((this_param, roi_box))
this_param.tofile(save_name, sep=' ')
if args.dump_lmk:
save_path = os.path.join(args.save_lmk_dir, 'realign_lmk')
with open(save_path, 'w') as f:
for idx, (fname, land) in enumerate(zip(img_list, landmark_list)):
# f.write('{} {} {} {}')
land = land.astype(np.int)
land_str = ' '.join([str(x) for x in land])
msg = f'{fname} {idx} {land_str}\n'
f.write(msg)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='3DDFA inference pipeline')
parser.add_argument('-m', '--mode', default='gpu', type=str, help='gpu or cpu mode')
parser.add_argument('--bbox_init', default='two', type=str,
help='one|two: one-step bbox initialization or two-step')
parser.add_argument('--dump_2d_img', default='true', type=str2bool, help='whether to save 3d rendered image')
parser.add_argument('--dump_param', default='true', type=str2bool, help='whether to save param')
parser.add_argument('--dump_lmk', default='true', type=str2bool, help='whether to save landmarks')
parser.add_argument('--save_dir', default='results', type=str, help='dir to save result')
parser.add_argument('--save_lmk_dir', default='example', type=str, help='dir to save landmark result')
parser.add_argument('--img_list', default='example/file_list.txt', type=str, help='test image list file')
parser.add_argument('--img_prefix', default='example/Images', type=str, help='test image prefix')
parser.add_argument('--rank', default=0, type=int, help='used when parallel run')
parser.add_argument('--world_size', default=1, type=int, help='used when parallel run')
parser.add_argument('--resume_idx', default=0, type=int)
args = parser.parse_args()
main(args)
| 41.441176 | 133 | 0.654791 |
4a21cd963dd1b26557ec46b2c45ceb9605938c86 | 274 | py | Python | web/helpers.py | gak/remrun | 6bfdd5b2935ae6f6ccfe5baea07b357b2bcf2589 | [
"Apache-2.0"
] | null | null | null | web/helpers.py | gak/remrun | 6bfdd5b2935ae6f6ccfe5baea07b357b2bcf2589 | [
"Apache-2.0"
] | 1 | 2015-11-21T02:25:37.000Z | 2015-11-21T02:25:37.000Z | web/helpers.py | gak/remrun | 6bfdd5b2935ae6f6ccfe5baea07b357b2bcf2589 | [
"Apache-2.0"
] | null | null | null | # noinspection PyUnresolvedReferences
from web.version import version
# figlet aafont
BANNER = '''\033[1;37m
,_ _ ,_ _ _ _
| (_) | | | (/_ | | | \033[0;34mv\033[1;34m{version}
\033[0m
'''.format(**locals()).strip()
def banner():
return BANNER
| 17.125 | 60 | 0.580292 |
4a21ce2496318515427a4a7015130fcee028954e | 3,692 | py | Python | vendor/google/cloud/Dataproc/synth.py | codewithkyle/dnd-website | 9d30d58732b2e56d0bbd777d701759ff12dba265 | [
"MIT"
] | null | null | null | vendor/google/cloud/Dataproc/synth.py | codewithkyle/dnd-website | 9d30d58732b2e56d0bbd777d701759ff12dba265 | [
"MIT"
] | null | null | null | vendor/google/cloud/Dataproc/synth.py | codewithkyle/dnd-website | 9d30d58732b2e56d0bbd777d701759ff12dba265 | [
"MIT"
] | null | null | null | # Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This script is used to synthesize generated parts of this library."""
import os
import synthtool as s
import synthtool.gcp as gcp
import logging
logging.basicConfig(level=logging.DEBUG)
gapic = gcp.GAPICGenerator()
common = gcp.CommonTemplates()
for version in ['V1', 'V1beta2']:
lower_version = version.lower()
library = gapic.php_library(
service='dataproc',
version=lower_version,
artman_output_name=f'google-cloud-dataproc-{lower_version}')
# copy all src including partial veneer classes
s.move(library / 'src')
# copy proto files to src also
s.move(library / 'proto/src/Google/Cloud/Dataproc', 'src/')
s.move(library / 'tests/')
# copy GPBMetadata file to metadata
s.move(library / 'proto/src/GPBMetadata/Google/Cloud/Dataproc', 'metadata/')
# document and utilize apiEndpoint instead of serviceAddress
s.replace(
"**/Gapic/*GapicClient.php",
r"'serviceAddress' =>",
r"'apiEndpoint' =>")
s.replace(
"**/Gapic/*GapicClient.php",
r"@type string \$serviceAddress\n\s+\*\s+The address",
r"""@type string $serviceAddress
* **Deprecated**. This option will be removed in a future major release. Please
* utilize the `$apiEndpoint` option instead.
* @type string $apiEndpoint
* The address""")
s.replace(
"**/Gapic/*GapicClient.php",
r"\$transportConfig, and any \$serviceAddress",
r"$transportConfig, and any `$apiEndpoint`")
# fix year
for client in ['ClusterController', 'JobController']:
s.replace(
f'**/V1/Gapic/{client}GapicClient.php',
r'Copyright \d{4}',
'Copyright 2017')
s.replace(
f'**/V1/{client}Client.php',
r'Copyright \d{4}',
'Copyright 2017')
s.replace(
'**/V1beta2/Gapic/*GapicClient.php',
r'Copyright \d{4}',
r'Copyright 2019')
s.replace(
'**/V1beta2/*Client.php',
r'Copyright \d{4}',
r'Copyright 2019')
s.replace(
'**/V1/Gapic/WorkflowTemplateServiceGapicClient.php',
r'Copyright \d{4}',
'Copyright 2018')
s.replace(
'**/V1/WorkflowTemplateServiceClient.php',
r'Copyright \d{4}',
'Copyright 2018')
s.replace(
'tests/**/V1/*Test.php',
r'Copyright \d{4}',
'Copyright 2018')
s.replace(
'tests/**/V1beta2/*Test.php',
r'Copyright \d{4}',
'Copyright 2019')
### [START] protoc backwards compatibility fixes
# roll back to private properties.
s.replace(
"src/**/V*/**/*.php",
r"Generated from protobuf field ([^\n]{0,})\n\s{5}\*/\n\s{4}protected \$",
r"""Generated from protobuf field \1
*/
private $""")
# prevent proto messages from being marked final
s.replace(
"src/**/V*/**/*.php",
r"final class",
r"class")
# Replace "Unwrapped" with "Value" for method names.
s.replace(
"src/**/V*/**/*.php",
r"public function ([s|g]\w{3,})Unwrapped",
r"public function \1Value"
)
### [END] protoc backwards compatibility fixes
# fix relative cloud.google.com links
s.replace(
"src/**/V*/**/*.php",
r"(.{0,})\]\((/.{0,})\)",
r"\1](https://cloud.google.com\2)"
)
| 28.4 | 94 | 0.64572 |
4a21cf2b28c1f0c1bd68cf9b43d90e9226d987dd | 198 | py | Python | fortunate/fortunate/urls.py | kryptn/Fortunate | a308a2c181d66aeeb9a4f7769eac8bf8e41fe3b4 | [
"MIT"
] | null | null | null | fortunate/fortunate/urls.py | kryptn/Fortunate | a308a2c181d66aeeb9a4f7769eac8bf8e41fe3b4 | [
"MIT"
] | null | null | null | fortunate/fortunate/urls.py | kryptn/Fortunate | a308a2c181d66aeeb9a4f7769eac8bf8e41fe3b4 | [
"MIT"
] | null | null | null | from fortunate.views import TokenAPI, FortuneAPI
routes = [{'rule': '/token/', 'view_func': TokenAPI.as_view('token')},
{'rule': '/fortune/', 'view_func': FortuneAPI.as_view('fortune')}]
| 39.6 | 76 | 0.656566 |
4a21cf9e9fcefbb4f7dc1c5956ef3736e384f438 | 1,839 | py | Python | facebook_business/adobjects/reachfrequencyadformat.py | MyrikLD/facebook-python-business-sdk | a53c8ba0e8f7d0b41b385c60089f6ba00fa5c814 | [
"CNRI-Python"
] | 576 | 2018-05-01T19:09:32.000Z | 2022-03-31T11:45:11.000Z | facebook_business/adobjects/reachfrequencyadformat.py | MyrikLD/facebook-python-business-sdk | a53c8ba0e8f7d0b41b385c60089f6ba00fa5c814 | [
"CNRI-Python"
] | 217 | 2018-05-03T07:31:59.000Z | 2022-03-29T14:19:52.000Z | facebook_business/adobjects/reachfrequencyadformat.py | MyrikLD/facebook-python-business-sdk | a53c8ba0e8f7d0b41b385c60089f6ba00fa5c814 | [
"CNRI-Python"
] | 323 | 2018-05-01T20:32:26.000Z | 2022-03-29T07:05:12.000Z | # Copyright 2014 Facebook, Inc.
# You are hereby granted a non-exclusive, worldwide, royalty-free license to
# use, copy, modify, and distribute this software in source code or binary
# form for use in connection with the web services and APIs provided by
# Facebook.
# As with any software that integrates with the Facebook platform, your use
# of this software is subject to the Facebook Developer Principles and
# Policies [http://developers.facebook.com/policy/]. This copyright notice
# shall be included in all copies or substantial portions of the software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
from facebook_business.adobjects.abstractobject import AbstractObject
"""
This class is auto-generated.
For any issues or feature requests related to this class, please let us know on
github and we'll fix in our codegen framework. We'll not be able to accept
pull request for this class.
"""
class ReachFrequencyAdFormat(
AbstractObject,
):
def __init__(self, api=None):
super(ReachFrequencyAdFormat, self).__init__()
self._isReachFrequencyAdFormat = True
self._api = api
class Field(AbstractObject.Field):
details = 'details'
type = 'type'
_field_types = {
'details': 'Object',
'type': 'string',
}
@classmethod
def _get_field_enum_info(cls):
field_enum_info = {}
return field_enum_info
| 34.055556 | 79 | 0.736813 |
4a21d08f9dd83f5a1859de09d2ec5648d315fd65 | 5,485 | py | Python | verify.py | michael-yxchen/sgx-evoting | 55f9958e02c1397641a579dad22483b1bcd18a37 | [
"BSD-3-Clause"
] | 1 | 2021-06-17T14:11:34.000Z | 2021-06-17T14:11:34.000Z | verify.py | michael-yxchen/sgx-evoting | 55f9958e02c1397641a579dad22483b1bcd18a37 | [
"BSD-3-Clause"
] | 11 | 2021-05-10T06:17:17.000Z | 2021-09-29T23:35:31.000Z | verify.py | michael-yxchen/sgx-evoting | 55f9958e02c1397641a579dad22483b1bcd18a37 | [
"BSD-3-Clause"
] | 3 | 2021-07-30T12:59:53.000Z | 2021-07-31T23:05:16.000Z | import base64
import json
import os
import pathlib
import sys
import time
import auditee
import requests
from blessings import Terminal
from colorama import init as init_colorama # , Fore, Back, Style
from cryptography.hazmat.primitives.asymmetric import ec
from cryptography.hazmat.primitives import hashes, serialization
init_colorama()
term = Terminal()
SOURCE_CODE = pathlib.Path("/home/photon/sgxiot")
SIGNED_ENCLAVE = SOURCE_CODE.joinpath("enclave", "enclave.signed.so")
DEMO_DIR = SOURCE_CODE.joinpath("demo_sgx")
IAS_REPORT = SOURCE_CODE.joinpath("demo_sgx/ias_report.json")
def little2big_endian(b):
return swap_endians(b)
def swap_endians(b, *, length=32, from_byteorder="little", to_byteorder="big"):
return int.from_bytes(b, from_byteorder).to_bytes(length, "big")
##############################################################################
# #
# Verify quote with IAS #
# #
##############################################################################
print(f"{term.bold}Reading quote from file ...{term.normal}")
time.sleep(4)
with open(DEMO_DIR.joinpath("quote.bin"), "rb") as f:
quote_bytes = f.read()
quote_b64 = base64.b64encode(quote_bytes)
quote_dict = {"isvEnclaveQuote": quote_b64.decode()}
print(f"{term.blue}{quote_b64.decode()}{term.normal}\n")
# send the quote for verification
# To send the quote over to Intel, you need your API primary subscription key,
# which you should have set as an environment variable before starting the
# container. (See the prerequisite section if needed.)
url = "https://api.trustedservices.intel.com/sgx/dev/attestation/v4/report"
headers = {
"Content-Type": "application/json",
"Ocp-Apim-Subscription-Key": os.environ["IAS_PRIMARY_KEY"],
}
print(
f"{term.bold}Sending quote to Intel's Attestation Service for verification ...{term.normal}"
)
time.sleep(4)
res = requests.post(url, json=quote_dict, headers=headers)
if res.ok:
print(f"{term.green}Attestation report verification succeeded!\n{term.normal}")
else:
sys.exit(
f"{term.red}Attestatin verification failed, with status: "
f"{res.status_code} and reason: {res.reason}\n"
f"Did you set SGX_SPID and IAS_PRIMARY_KEY?\n"
"See https://github.com/sbellem/sgx-iot#set-environment-variables{term.normal}"
)
print(f"{term.bold}IAS response is: {term.normal}")
print(f"{term.blue}{json.dumps(res.json(), indent=4)}")
time.sleep(5)
ias_report = {"body": res.json(), "headers": dict(res.headers)}
with open(DEMO_DIR.joinpath("ias_report.json"), "w") as f:
json.dump(ias_report, f)
##############################################################################
# #
# Verify reported MRENCLAVE #
# #
##############################################################################
print(
f"{term.bold}Verify reported MRENCLAVE against trusted source code ...{term.normal}"
)
time.sleep(4)
match = auditee.verify_mrenclave(SOURCE_CODE, SIGNED_ENCLAVE, ias_report=IAS_REPORT,)
if not match:
sys.exit(
f"{term.red}MRENCLAVE of remote attestation report does not match trusted source code.{term.normal}"
)
time.sleep(5)
##############################################################################
# #
# Extract Pulic Key from attestation report #
# #
##############################################################################
print(f"{term.bold}\nExtracting public key from IAS report ...{term.normal}")
quote_body = res.json()["isvEnclaveQuoteBody"]
report_data = base64.b64decode(quote_body)[368:432]
x_little = report_data[:32]
y_little = report_data[32:]
x = little2big_endian(x_little)
y = little2big_endian(y_little)
point = b"\x04" + x + y
pubkey = ec.EllipticCurvePublicKey.from_encoded_point(curve=ec.SECP256R1(), data=point)
pubkey_pem = pubkey.public_bytes(
encoding=serialization.Encoding.PEM,
format=serialization.PublicFormat.SubjectPublicKeyInfo,
)
print(f"{term.blue}{pubkey_pem.decode()}{term.normal}")
time.sleep(4)
##############################################################################
# #
# Verify Signature #
# #
##############################################################################
with open(DEMO_DIR.joinpath("Sensor_Data.signature"), "rb") as f:
signature = f.read()
with open(SOURCE_CODE.joinpath("Sensor_Data")) as f:
sensor_data = f.read()
print(
f"{term.bold}\nVerifying signature:{term.normal}\n"
f"{term.blue}{signature.hex()}{term.normal}\n"
f"{term.bold}for sensor data:{term.normal}\n"
f"{sensor_data}\n"
)
pubkey.verify(
signature, sensor_data.encode(), signature_algorithm=ec.ECDSA(hashes.SHA256()),
)
print(f"{term.green}Signature verification successful!{term.normal}")
| 36.812081 | 108 | 0.540018 |
4a21d0c1c3b9b78dc28ccc59d8254ad7358252a8 | 2,839 | py | Python | 06-Curriculum-Resources/utils/jupyter_linters/lintnb/lintnb.py | anirudhmungre/sneaky-lessons | 8e48015c50865059db96f8cd369bcc15365d66c7 | [
"ADSL"
] | null | null | null | 06-Curriculum-Resources/utils/jupyter_linters/lintnb/lintnb.py | anirudhmungre/sneaky-lessons | 8e48015c50865059db96f8cd369bcc15365d66c7 | [
"ADSL"
] | null | null | null | 06-Curriculum-Resources/utils/jupyter_linters/lintnb/lintnb.py | anirudhmungre/sneaky-lessons | 8e48015c50865059db96f8cd369bcc15365d66c7 | [
"ADSL"
] | null | null | null | # -*- coding: utf-8 -*-
"""Jupyter Notebook Linter.
This module parses all notebooks and runs linters on the code and
markdown cells. A temporary file called `deleteme` is generated from
the notebook's code and markdown cells. This needs to be added to the
gitignore as a backup, but the file should be removed at the end.
Example:
$ python lintnb path/to/notebooks
Todo:
* Update README
* Add to setup.py
"""
import re
import click
import nbformat
import subprocess
from pathlib import Path
from colorama import init, Fore, Style
# Initialize Colorama (Windows Only)
init()
# Regex to highlight spelling issues
cspell_regex = re.compile(r"(Unknown word: )(.*?)\n", re.S)
def check_code(linter_commands):
"""Lint Notebook Code Cells."""
# Execute the linter
try:
completed = subprocess.run(
linter_commands, stdout=subprocess.PIPE, stderr=subprocess.STDOUT
)
except subprocess.CalledProcessError as err:
print("Error: ", err)
else:
print(
cspell_regex.sub(
f"\g<1>{Fore.RED}\g<2>\n{Style.RESET_ALL}",
completed.stdout.decode("utf-8"),
)
)
@click.command()
@click.argument("notebook_directory", default=".")
def cli(notebook_directory):
# Create Paths
notebook_path = Path(notebook_directory)
# Find all notebooks
# Exclude notebooks ending in `-checkpoints`
notebooks = notebook_path.glob("**/*[!-checkpoints].ipynb")
for notebook_path in notebooks:
# Open each notebook and parse the code cells
with open(notebook_path, "r") as notebook:
nb = nbformat.read(notebook, as_version=4)
code_cells = [i.source for i in nb.cells if i.cell_type == "code"]
code_cells_str = "\n".join(code_cells).strip()
md_cells = [i.source for i in nb.cells if i.cell_type == "markdown"]
md_cells_str = "\n".join(md_cells).strip()
# Output the code cells to a temp file for linting
tmp_path = Path(f"deleteme")
tmp_path.write_text(code_cells_str)
print(f"Linting file: {notebook_path.resolve()}")
# Run pycodeStyle for code cells
linter_commands = ["pycodestyle", "--ignore=E302,W292", tmp_path]
check_code(linter_commands)
# Run cspell for code cells
linter_commands = ["cspell", "-u", tmp_path]
check_code(linter_commands)
# Output the markdown cells to a temp file for linting
tmp_path.write_text(md_cells_str)
# Run cspell for markdown cells
linter_commands = ["cspell", "-u", tmp_path]
check_code(linter_commands)
# Clean up temp file
tmp_path.unlink()
if __name__ == "__main__":
cli()
| 29.268041 | 80 | 0.630856 |
4a21d1acc63dd92a6160ad557ab1de932b2889c6 | 70,596 | py | Python | tensorflow/python/kernel_tests/cwise_ops_test.py | 285219011/hello-world | dfb71ea206eb9f61e5d97c9727caa1a6449e39cb | [
"Apache-2.0"
] | 6 | 2017-04-25T01:30:41.000Z | 2019-12-11T15:08:46.000Z | tensorflow/python/kernel_tests/cwise_ops_test.py | PaulTR/tensorflow | 84bcff1e814ee5697b5980535583737f8e81d82f | [
"Apache-2.0"
] | null | null | null | tensorflow/python/kernel_tests/cwise_ops_test.py | PaulTR/tensorflow | 84bcff1e814ee5697b5980535583737f8e81d82f | [
"Apache-2.0"
] | 4 | 2017-04-14T07:31:18.000Z | 2021-08-30T11:06:24.000Z | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Functional tests for coefficient-wise operations.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
import numpy as np
import tensorflow as tf
_ADD = lambda x, y: x + y
_SUB = lambda x, y: x - y
_MUL = lambda x, y: x * y
_POW = lambda x, y: x ** y
_TRUEDIV = lambda x, y: x / y
_FLOORDIV = lambda x, y: x // y
_MOD = lambda x, y: x % y
_NEG = lambda x: -x
_ABS = abs
_LT = lambda x, y: x < y
_LE = lambda x, y: x <= y
_GT = lambda x, y: x > y
_GE = lambda x, y: x >= y
_AND = lambda x, y: x & y
_OR = lambda x, y: x | y
_XOR = lambda x, y: x ^ y
_INV = lambda x: ~x
# TODO(zongheng): it'd be great to factor out this function and various random
# SparseTensor gen funcs.
def _sparsify(x, thresh=0.5, index_dtype=np.int64):
x[x < thresh] = 0
non_zero = np.where(x)
x_indices = np.vstack(non_zero).astype(index_dtype).T
x_values = x[non_zero]
x_shape = x.shape
return tf.SparseTensor(
indices=x_indices, values=x_values, shape=x_shape), x_values
class UnaryOpTest(tf.test.TestCase):
def _compareCpu(self, x, np_func, tf_func):
np_ans = np_func(x)
with self.test_session(use_gpu=False):
inx = tf.convert_to_tensor(x)
if x.dtype in (np.float32, np.float64):
y = 1.1 * tf_func(inx)
np_ans *= 1.1
else:
y = tf_func(inx)
tf_cpu = y.eval()
self.assertShapeEqual(np_ans, y)
if x.dtype == np.float16:
self.assertAllClose(np_ans, tf_cpu, rtol=1e-3, atol=1e-3)
else:
self.assertAllClose(np_ans, tf_cpu)
if (x.dtype in (np.complex64, np.complex128) and
tf_func in (tf.sign, tf.sqrt, tf.rsqrt, tf.log)):
return # Return early
if x.dtype == np.float16:
s = list(np.shape(x))
jacob_t, _ = tf.test.compute_gradient(inx,
s,
y,
s,
x_init_value=x)
xf = x.astype(np.float)
inxf = tf.convert_to_tensor(xf)
yf = tf_func(inxf)
_, jacob_n = tf.test.compute_gradient(inxf,
s,
yf,
s,
x_init_value=xf)
jacob_n = jacob_n.astype(np.float16)
self.assertAllClose(jacob_t, jacob_n, rtol=5e-3, atol=5e-3)
elif x.dtype in (np.float32, np.complex64):
s = list(np.shape(x))
jacob_t, jacob_n = tf.test.compute_gradient(inx,
s,
y,
s,
x_init_value=x)
self.assertAllClose(jacob_t, jacob_n, rtol=1e-3, atol=1e-3)
elif x.dtype in (np.float64, np.complex128):
s = list(np.shape(x))
jacob_t, jacob_n = tf.test.compute_gradient(inx,
s,
y,
s,
x_init_value=x)
self.assertAllClose(jacob_t, jacob_n, rtol=1e-5, atol=1e-5)
def _check(self, result_tensor, result_np, input_sp_t, tol):
self.assertTrue(isinstance(result_tensor, tf.SparseTensor))
self.assertTrue(isinstance(input_sp_t, tf.SparseTensor))
self.assertAllEqual(input_sp_t.indices.eval(), result_tensor.indices.eval())
self.assertAllEqual(input_sp_t.shape.eval(), result_tensor.shape.eval())
if tol is None:
self.assertAllClose(result_np, result_tensor.values.eval())
else:
self.assertAllClose(result_np, result_tensor.values.eval(), rtol=tol,
atol=tol)
def _compareSparseCpu(self, x, np_func, tf_func, tol):
x_sp, x_sp_vals = _sparsify(x)
res_np = np_func(x_sp_vals)
with self.test_session(use_gpu=False):
self._check(tf_func(x_sp), res_np, x_sp, tol)
def _compareGpu(self, x, np_func, tf_func):
np_ans = np_func(x)
with self.test_session(use_gpu=True):
result = tf_func(tf.convert_to_tensor(x))
tf_gpu = result.eval()
if x.dtype == np.float16:
self.assertAllClose(np_ans, tf_gpu, rtol=1e-3, atol=1e-3)
else:
self.assertAllClose(np_ans, tf_gpu)
# TODO(zhifengc/ke): make gradient checker work on GPU.
def _compareSparseGpu(self, x, np_func, tf_func, tol):
x_sp, x_sp_vals = _sparsify(x)
res_np = np_func(x_sp_vals)
with self.test_session(use_gpu=True):
self._check(tf_func(x_sp), res_np, x_sp, tol)
def _compareBoth(self, x, np_func, tf_func):
self._compareCpu(x, np_func, tf_func)
self._compareGpu(x, np_func, tf_func)
def _compareBothSparse(self, x, np_func, tf_func, tol=None):
self._compareSparseCpu(x, np_func, tf_func, tol)
self._compareSparseGpu(x, np_func, tf_func, tol)
def _inv(self, x):
return 1.0 / x
def _rsqrt(self, x):
return self._inv(np.sqrt(x))
def _sigmoid(self, x):
return 1.0 / (1.0 + np.exp(-x))
def _replace_domain_error_with_inf(self, fn):
def func(x):
try:
return fn(x)
except ValueError as e:
if "domain error" in str(e):
return np.inf * np.ones_like(x)
else:
raise e
return func
def testFloatBasic(self):
x = np.arange(-3, 3).reshape(1, 3, 2).astype(np.float32)
y = (x + .5).astype(np.float32) # no zero
z = (x + 15.5).astype(np.float32) # all positive
k = np.arange(-0.90, 0.90, 0.25).astype(np.float32) # between -1 and 1
self._compareBoth(x, np.abs, tf.abs)
self._compareBoth(x, np.abs, _ABS)
self._compareBoth(x, np.negative, tf.neg)
self._compareBoth(x, np.negative, _NEG)
self._compareBoth(y, self._inv, tf.inv)
self._compareBoth(x, np.square, tf.square)
self._compareBoth(z, np.sqrt, tf.sqrt)
self._compareBoth(z, self._rsqrt, tf.rsqrt)
self._compareBoth(x, np.exp, tf.exp)
self._compareBoth(z, np.log, tf.log)
self._compareBoth(x, np.tanh, tf.tanh)
self._compareBoth(x, self._sigmoid, tf.sigmoid)
self._compareBoth(y, np.sign, tf.sign)
self._compareBoth(x, np.sin, tf.sin)
self._compareBoth(x, np.cos, tf.cos)
self._compareBoth(k, np.arcsin, tf.asin)
self._compareBoth(k, np.arccos, tf.acos)
self._compareBoth(x, np.arctan, tf.atan)
self._compareBoth(x, np.tan, tf.tan)
self._compareBoth(
y,
np.vectorize(self._replace_domain_error_with_inf(math.lgamma)),
tf.lgamma)
self._compareBoth(x, np.vectorize(math.erf), tf.erf)
self._compareBoth(x, np.vectorize(math.erfc), tf.erfc)
self._compareBothSparse(x, np.abs, tf.abs)
self._compareBothSparse(x, np.negative, tf.neg)
self._compareBothSparse(x, np.square, tf.square)
self._compareBothSparse(z, np.sqrt, tf.sqrt, tol=1e-3)
self._compareBothSparse(y, np.sign, tf.sign)
def testFloatTanhEdge(self):
x = np.arange(40, 40 + 6).reshape(6).astype(np.float32)
self._compareBoth(x, np.tanh, tf.tanh)
x = np.arange(-40, -40 + 6).reshape(6).astype(np.float32)
self._compareBoth(x, np.tanh, tf.tanh)
def testFloatEmpty(self):
x = np.empty((2, 0, 5), dtype=np.float32)
self._compareBoth(x, np.abs, tf.abs)
self._compareBoth(x, np.abs, _ABS)
self._compareBoth(x, np.negative, tf.neg)
self._compareBoth(x, np.negative, _NEG)
self._compareBoth(x, self._inv, tf.inv)
self._compareBoth(x, np.square, tf.square)
self._compareBoth(x, np.sqrt, tf.sqrt)
self._compareBoth(x, self._rsqrt, tf.rsqrt)
self._compareBoth(x, np.exp, tf.exp)
self._compareBoth(x, np.log, tf.log)
self._compareBoth(x, np.tanh, tf.tanh)
self._compareBoth(x, self._sigmoid, tf.sigmoid)
self._compareBoth(x, np.sign, tf.sign)
self._compareBoth(x, np.sin, tf.sin)
self._compareBoth(x, np.cos, tf.cos)
# Can't use vectorize below, so just use some arbitrary function
self._compareBoth(x, np.sign, tf.lgamma)
self._compareBoth(x, np.sign, tf.erf)
self._compareBoth(x, np.sign, tf.erfc)
self._compareBoth(x, np.tan, tf.tan)
self._compareBoth(x, np.arcsin, tf.asin)
self._compareBoth(x, np.arccos, tf.acos)
self._compareBoth(x, np.arctan, tf.atan)
self._compareBothSparse(x, np.abs, tf.abs)
self._compareBothSparse(x, np.negative, tf.neg)
self._compareBothSparse(x, np.square, tf.square)
self._compareBothSparse(x, np.sqrt, tf.sqrt, tol=1e-3)
self._compareBothSparse(x, np.sign, tf.sign)
def testDoubleBasic(self):
x = np.arange(-3, 3).reshape(1, 3, 2).astype(np.float64)
y = (x + .5).astype(np.float64) # no zero
z = (x + 15.5).astype(np.float64) # all positive
k = np.arange(-0.90, 0.90, 0.35).reshape(1, 3, 2).astype(np.float64) # between -1 and 1
self._compareBoth(x, np.abs, tf.abs)
self._compareBoth(x, np.abs, _ABS)
self._compareBoth(x, np.negative, tf.neg)
self._compareBoth(x, np.negative, _NEG)
self._compareBoth(y, self._inv, tf.inv)
self._compareBoth(x, np.square, tf.square)
self._compareBoth(z, np.sqrt, tf.sqrt)
self._compareBoth(z, self._rsqrt, tf.rsqrt)
self._compareBoth(x, np.exp, tf.exp)
self._compareBoth(z, np.log, tf.log)
self._compareBoth(x, np.tanh, tf.tanh)
self._compareBoth(x, self._sigmoid, tf.sigmoid)
self._compareBoth(y, np.sign, tf.sign)
self._compareBoth(x, np.sin, tf.sin)
self._compareBoth(x, np.cos, tf.cos)
self._compareBoth(
y,
np.vectorize(self._replace_domain_error_with_inf(math.lgamma)),
tf.lgamma)
self._compareBoth(x, np.vectorize(math.erf), tf.erf)
self._compareBoth(x, np.vectorize(math.erfc), tf.erfc)
self._compareBoth(x, np.arctan, tf.atan)
self._compareBoth(k, np.arcsin, tf.asin)
self._compareBoth(k, np.arccos, tf.acos)
self._compareBoth(k, np.tan, tf.tan)
self._compareBothSparse(x, np.abs, tf.abs)
self._compareBothSparse(x, np.negative, tf.neg)
self._compareBothSparse(x, np.square, tf.square)
self._compareBothSparse(z, np.sqrt, tf.sqrt, tol=1e-3)
self._compareBothSparse(y, np.sign, tf.sign)
def testHalfBasic(self):
x = np.arange(-3, 3).reshape(1, 3, 2).astype(np.float16)
y = (x + .5).astype(np.float16) # no zero
z = (x + 15.5).astype(np.float16) # all positive
self._compareBoth(x, np.abs, tf.abs)
self._compareBoth(x, np.abs, _ABS)
self._compareBoth(x, np.negative, tf.neg)
self._compareBoth(x, np.negative, _NEG)
self._compareBoth(y, self._inv, tf.inv)
self._compareBoth(x, np.square, tf.square)
self._compareBoth(z, np.sqrt, tf.sqrt)
self._compareBoth(z, self._rsqrt, tf.rsqrt)
self._compareBoth(x, np.exp, tf.exp)
self._compareBoth(z, np.log, tf.log)
self._compareBoth(x, np.tanh, tf.tanh)
self._compareBoth(x, self._sigmoid, tf.sigmoid)
self._compareBoth(y, np.sign, tf.sign)
self._compareBoth(x, np.sin, tf.sin)
self._compareBoth(x, np.cos, tf.cos)
self._compareBoth(
y,
np.vectorize(self._replace_domain_error_with_inf(math.lgamma)),
tf.lgamma)
self._compareBoth(x, np.vectorize(math.erf), tf.erf)
self._compareBoth(x, np.vectorize(math.erfc), tf.erfc)
self._compareBothSparse(x, np.abs, tf.abs)
self._compareBothSparse(x, np.negative, tf.neg)
self._compareBothSparse(x, np.square, tf.square)
self._compareBothSparse(z, np.sqrt, tf.sqrt, tol=1e-3)
self._compareBothSparse(y, np.sign, tf.sign)
def testInt32Basic(self):
x = np.arange(-6, 6, 2).reshape(1, 3, 2).astype(np.int32)
self._compareCpu(x, np.abs, tf.abs)
self._compareCpu(x, np.abs, _ABS)
self._compareBoth(x, np.negative, tf.neg)
self._compareBoth(x, np.negative, _NEG)
self._compareBoth(x, np.square, tf.square)
self._compareCpu(x, np.sign, tf.sign)
self._compareBothSparse(x, np.abs, tf.abs)
self._compareBothSparse(x, np.negative, tf.neg)
self._compareBothSparse(x, np.square, tf.square)
self._compareBothSparse(x, np.sign, tf.sign)
def testInt64Basic(self):
x = np.arange(
-6 << 40, 6 << 40, 2 << 40).reshape(1, 3, 2).astype(np.int64)
self._compareCpu(x, np.abs, tf.abs)
self._compareCpu(x, np.abs, _ABS)
self._compareCpu(x, np.negative, tf.neg)
self._compareCpu(x, np.negative, _NEG)
self._compareCpu(x, np.square, tf.square)
self._compareCpu(x, np.sign, tf.sign)
self._compareBothSparse(x, np.abs, tf.abs)
self._compareBothSparse(x, np.negative, tf.neg)
self._compareBothSparse(x, np.square, tf.square)
self._compareBothSparse(x, np.sign, tf.sign)
def testComplex64Basic(self):
x = np.complex(1, 1) * np.arange(-3, 3).reshape(1, 3, 2).astype(
np.complex64)
y = x + 0.5 # no zeros
self._compareCpu(x, np.abs, tf.complex_abs)
self._compareCpu(x, np.abs, _ABS)
self._compareCpu(x, np.negative, tf.neg)
self._compareCpu(x, np.negative, _NEG)
self._compareCpu(y, self._inv, tf.inv)
self._compareCpu(x, np.square, tf.square)
self._compareCpu(x, np.sqrt, tf.sqrt)
self._compareCpu(y, self._rsqrt, tf.rsqrt)
self._compareCpu(x, np.exp, tf.exp)
self._compareCpu(y, np.log, tf.log)
self._compareCpu(x, np.tanh, tf.tanh)
self._compareCpu(x, self._sigmoid, tf.sigmoid)
self._compareCpu(x, np.sin, tf.sin)
self._compareCpu(x, np.cos, tf.cos)
self._compareBothSparse(x, np.abs, tf.abs)
self._compareBothSparse(x, np.negative, tf.neg)
self._compareBothSparse(x, np.square, tf.square)
self._compareBothSparse(x, np.sqrt, tf.sqrt, 1e-3)
# Numpy uses an incorrect definition of sign; use the right one instead.
def complex_sign(x):
return x / np.abs(x)
self._compareCpu(y, complex_sign, tf.sign)
self._compareBothSparse(y, complex_sign, tf.sign)
def testComplex128Basic(self):
x = np.complex(1, 1) * np.arange(-3, 3).reshape(1, 3, 2).astype(
np.complex128)
y = x + 0.5 # no zeros
self._compareCpu(x, np.abs, tf.abs)
self._compareCpu(x, np.abs, _ABS)
self._compareCpu(x, np.negative, tf.neg)
self._compareCpu(x, np.negative, _NEG)
self._compareCpu(y, self._inv, tf.inv)
self._compareCpu(x, np.square, tf.square)
self._compareCpu(x, np.sqrt, tf.sqrt)
self._compareCpu(y, self._rsqrt, tf.rsqrt)
self._compareCpu(x, np.exp, tf.exp)
self._compareCpu(y, np.log, tf.log)
self._compareCpu(x, np.tanh, tf.tanh)
self._compareCpu(x, self._sigmoid, tf.sigmoid)
self._compareCpu(x, np.sin, tf.sin)
self._compareCpu(x, np.cos, tf.cos)
self._compareBothSparse(x, np.abs, tf.abs)
self._compareBothSparse(x, np.negative, tf.neg)
self._compareBothSparse(x, np.square, tf.square)
self._compareBothSparse(x, np.sqrt, tf.sqrt, 1e-3)
# Numpy uses an incorrect definition of sign; use the right one instead.
def complex_sign(x):
return x / np.abs(x)
self._compareCpu(y, complex_sign, tf.sign)
self._compareBothSparse(y, complex_sign, tf.sign)
class BinaryOpTest(tf.test.TestCase):
def _compareCpu(self, x, y, np_func, tf_func, also_compare_variables=False):
np_ans = np_func(x, y)
with self.test_session(use_gpu=False):
inx = tf.convert_to_tensor(x)
iny = tf.convert_to_tensor(y)
out = tf_func(inx, iny)
tf_cpu = out.eval()
# Test that the op takes precedence over numpy operators.
np_left = tf_func(x, iny).eval()
np_right = tf_func(inx, y).eval()
if also_compare_variables:
var_x = tf.Variable(x)
var_y = tf.Variable(y)
tf.initialize_all_variables().run()
print(type(x), type(y), type(var_x), type(var_y))
print(type(tf_func(x, var_y)), type(tf_func(var_x, y)))
np_var_left = tf_func(x, var_y).eval()
np_var_right = tf_func(var_x, y).eval()
if np_ans.dtype != np.object:
self.assertAllClose(np_ans, tf_cpu)
self.assertAllClose(np_ans, np_left)
self.assertAllClose(np_ans, np_right)
if also_compare_variables:
self.assertAllClose(np_ans, np_var_left)
self.assertAllClose(np_ans, np_var_right)
self.assertShapeEqual(np_ans, out)
def _compareGradientX(self, x, y, np_func, tf_func,
numeric_gradient_type=None):
z = np_func(x, y)
zs = list(z.shape)
with self.test_session():
inx = tf.convert_to_tensor(x)
iny = tf.convert_to_tensor(y)
if x.dtype in (np.float32, np.float64):
out = 1.1 * tf_func(inx, iny)
else:
out = tf_func(inx, iny)
xs = list(x.shape)
jacob_t, jacob_n = tf.test.compute_gradient(inx,
xs,
out,
zs,
x_init_value=x)
if numeric_gradient_type is not None:
xf = x.astype(numeric_gradient_type)
yf = y.astype(numeric_gradient_type)
inxf = tf.convert_to_tensor(xf)
inyf = tf.convert_to_tensor(yf)
outf = tf_func(inxf, inyf)
_, jacob_n = tf.test.compute_gradient(inxf,
xs,
outf,
zs,
x_init_value=xf,
delta=1e-3)
jacob_n = jacob_n.astype(x.dtype)
if x.dtype == np.float16:
self.assertAllClose(jacob_t, jacob_n, rtol=1e-3, atol=1e-3)
elif x.dtype == np.float32:
self.assertAllClose(jacob_t, jacob_n, rtol=1e-3, atol=1e-3)
elif x.dtype == np.float64:
self.assertAllClose(jacob_t, jacob_n, rtol=1e-5, atol=1e-5)
def _compareGradientY(self, x, y, np_func, tf_func,
numeric_gradient_type=None):
z = np_func(x, y)
zs = list(z.shape)
with self.test_session():
inx = tf.convert_to_tensor(x)
iny = tf.convert_to_tensor(y)
if x.dtype in (np.float32, np.float64):
out = 1.1 * tf_func(inx, iny)
else:
out = tf_func(inx, iny)
ys = list(np.shape(y))
jacob_t, jacob_n = tf.test.compute_gradient(iny,
ys,
out,
zs,
x_init_value=y)
if numeric_gradient_type is not None:
xf = x.astype(numeric_gradient_type)
yf = y.astype(numeric_gradient_type)
inxf = tf.convert_to_tensor(xf)
inyf = tf.convert_to_tensor(yf)
outf = tf_func(inxf, inyf)
_, jacob_n = tf.test.compute_gradient(inyf,
ys,
outf,
zs,
x_init_value=yf)
jacob_n = jacob_n.astype(x.dtype)
if x.dtype == np.float16:
self.assertAllClose(jacob_t, jacob_n, rtol=1e-3, atol=1e-3)
elif x.dtype == np.float32:
self.assertAllClose(jacob_t, jacob_n, rtol=1e-3, atol=1e-3)
elif x.dtype == np.float64:
self.assertAllClose(jacob_t, jacob_n, rtol=1e-5, atol=1e-5)
def _compareGpu(self, x, y, np_func, tf_func):
np_ans = np_func(x, y)
with self.test_session(use_gpu=True):
inx = tf.convert_to_tensor(x)
iny = tf.convert_to_tensor(y)
out = tf_func(inx, iny)
tf_gpu = out.eval()
self.assertAllClose(np_ans, tf_gpu)
self.assertShapeEqual(np_ans, out)
# TODO(zhifengc/ke): make gradient checker work on GPU.
def _compareBoth(self, x, y, np_func, tf_func, also_compare_variables=False):
self._compareCpu(x, y, np_func, tf_func, also_compare_variables)
if x.dtype in (np.float16, np.float32, np.float64):
if tf_func not in (_FLOORDIV, tf.floordiv, tf.igamma, tf.igammac, tf.zeta, tf.polygamma):
self._compareGradientX(x, y, np_func, tf_func)
self._compareGradientY(x, y, np_func, tf_func)
if tf_func in (tf.igamma, tf.igammac, tf.zeta, tf.polygamma):
# These methods only support gradients in the second parameter
self._compareGradientY(x, y, np_func, tf_func)
self._compareGpu(x, y, np_func, tf_func)
def testFloatBasic(self):
x = np.linspace(-5, 20, 15).reshape(1, 3, 5).astype(np.float32)
y = np.linspace(20, -5, 15).reshape(1, 3, 5).astype(np.float32)
self._compareBoth(x, y, np.add, tf.add, also_compare_variables=True)
self._compareBoth(x, y, np.subtract, tf.sub)
self._compareBoth(x, y, np.multiply, tf.mul)
self._compareBoth(x, y + 0.1, np.true_divide, tf.truediv)
self._compareBoth(x, y + 0.1, np.floor_divide, tf.floordiv)
self._compareBoth(x, y, np.add, _ADD)
self._compareBoth(x, y, np.subtract, _SUB)
self._compareBoth(x, y, np.multiply, _MUL)
self._compareBoth(x, y + 0.1, np.true_divide, _TRUEDIV)
self._compareBoth(x, y + 0.1, np.floor_divide, _FLOORDIV)
try:
from scipy import special # pylint: disable=g-import-not-at-top
a_pos_small = np.linspace(0.1, 2, 15).reshape(1, 3, 5).astype(np.float32)
x_pos_small = np.linspace(0.1, 10, 15).reshape(1, 3, 5).astype(np.float32)
self._compareBoth(a_pos_small, x_pos_small, special.gammainc, tf.igamma)
self._compareBoth(a_pos_small, x_pos_small, special.gammaincc, tf.igammac)
# Need x > 1
self._compareBoth(x_pos_small + 1, a_pos_small, special.zeta, tf.zeta)
n_small = np.arange(0, 15).reshape(1, 3, 5).astype(np.float32)
self._compareBoth(n_small, x_pos_small, special.polygamma, tf.polygamma)
except ImportError as e:
tf.logging.warn("Cannot test special functions: %s" % str(e))
def testFloatDifferentShapes(self):
x = np.array([1, 2, 3, 4]).reshape(2, 2).astype(np.float32)
y = np.array([1, 2]).reshape(2, 1).astype(np.float32)
with self.test_session() as sess:
inx = tf.convert_to_tensor(x)
iny = tf.convert_to_tensor(y)
s = tf.reduce_sum(inx * iny)
gx, gy = sess.run(tf.gradients(s, [inx, iny]))
# gx is simply the broadcasted y
self.assertAllEqual(gx, np.array([1, 1, 2, 2])
.reshape(2, 2).astype(np.float32))
# gy is x's column summed up
self.assertAllEqual(gy, np.array([3, 7]).
reshape(2, 1).astype(np.float32))
def testFloatVariableOverload(self):
x = np.array([1, 2, 3, 4]).reshape(2, 2).astype(np.int32)
y = np.array([1, 2]).reshape(2, 1).astype(np.int32)
var_x = tf.Variable(x)
var_y = tf.Variable(y)
with self.test_session() as sess:
sess.run([var_x.initializer, var_y.initializer])
left_result = (var_x * y).eval()
right_result = (x * var_y).eval()
np_result = x * y
self.assertAllEqual(np_result, left_result)
self.assertAllEqual(np_result, right_result)
def testDoubleBasic(self):
x = np.linspace(-5, 20, 15).reshape(1, 3, 5).astype(np.float64)
y = np.linspace(20, -5, 15).reshape(1, 3, 5).astype(np.float64)
self._compareBoth(x, y, np.add, tf.add)
self._compareBoth(x, y, np.subtract, tf.sub)
self._compareBoth(x, y, np.multiply, tf.mul)
self._compareBoth(x, y + 0.1, np.true_divide, tf.truediv)
self._compareBoth(x, y + 0.1, np.floor_divide, tf.floordiv)
self._compareBoth(x, y, np.add, _ADD)
self._compareBoth(x, y, np.subtract, _SUB)
self._compareBoth(x, y, np.multiply, _MUL)
self._compareBoth(x, y + 0.1, np.true_divide, _TRUEDIV)
self._compareBoth(x, y + 0.1, np.floor_divide, _FLOORDIV)
try:
from scipy import special # pylint: disable=g-import-not-at-top
a_pos_small = np.linspace(0.1, 2, 15).reshape(1, 3, 5).astype(np.float32)
x_pos_small = np.linspace(0.1, 10, 15).reshape(1, 3, 5).astype(np.float32)
self._compareBoth(a_pos_small, x_pos_small, special.gammainc, tf.igamma)
self._compareBoth(a_pos_small, x_pos_small, special.gammaincc, tf.igammac)
except ImportError as e:
tf.logging.warn("Cannot test special functions: %s" % str(e))
def testInt8Basic(self):
x = np.arange(1, 13, 2).reshape(1, 3, 2).astype(np.int8)
y = np.arange(1, 7, 1).reshape(1, 3, 2).astype(np.int8)
self._compareBoth(x, y, np.multiply, tf.mul)
self._compareBoth(x, y, np.multiply, _MUL)
def testInt16Basic(self):
x = np.arange(1, 13, 2).reshape(1, 3, 2).astype(np.int16)
y = np.arange(1, 7, 1).reshape(1, 3, 2).astype(np.int16)
self._compareBoth(x, y, np.multiply, tf.mul)
self._compareBoth(x, y, np.multiply, _MUL)
def testInt32Basic(self):
x = np.arange(1, 13, 2).reshape(1, 3, 2).astype(np.int32)
y = np.arange(1, 7, 1).reshape(1, 3, 2).astype(np.int32)
self._compareBoth(x, y, np.add, tf.add)
self._compareBoth(x, y, np.subtract, tf.sub)
self._compareBoth(x, y, np.multiply, tf.mul)
self._compareBoth(x, y, np.true_divide, tf.truediv)
self._compareBoth(x, y, np.floor_divide, tf.floordiv)
self._compareBoth(x, y, np.mod, tf.mod)
self._compareBoth(x, y, np.add, _ADD)
self._compareBoth(x, y, np.subtract, _SUB)
self._compareBoth(x, y, np.multiply, _MUL)
self._compareBoth(x, y, np.true_divide, _TRUEDIV)
self._compareBoth(x, y, np.floor_divide, _FLOORDIV)
self._compareBoth(x, y, np.mod, _MOD)
# _compareBoth tests on GPU only for floating point types, so test
# _MOD for int32 on GPU by calling _compareGpu
self._compareGpu(x, y, np.mod, _MOD)
def testInt64Basic(self):
x = np.arange(1 << 40, 13 << 40, 2 << 40).reshape(1, 3, 2).astype(np.int64)
y = np.arange(1, 7, 1).reshape(1, 3, 2).astype(np.int64)
self._compareBoth(x, y, np.subtract, tf.sub)
self._compareBoth(x, y, np.multiply, tf.mul)
self._compareBoth(x, y, np.true_divide, tf.truediv)
self._compareBoth(x, y, np.floor_divide, tf.floordiv)
self._compareBoth(x, y, np.mod, tf.mod)
self._compareBoth(x, y, np.subtract, _SUB)
self._compareBoth(x, y, np.multiply, _MUL)
self._compareBoth(x, y, np.true_divide, _TRUEDIV)
self._compareBoth(x, y, np.floor_divide, _FLOORDIV)
self._compareBoth(x, y, np.mod, _MOD)
def testComplex64Basic(self):
x = np.complex(1, 1) * np.linspace(-10, 10, 6).reshape(1, 3, 2).astype(
np.complex64)
y = np.complex(1, 1) * np.linspace(20, -20, 6).reshape(1, 3, 2).astype(
np.complex64)
self._compareCpu(x, y, np.add, tf.add)
self._compareCpu(x, y, np.subtract, tf.sub)
self._compareCpu(x, y, np.multiply, tf.mul)
self._compareCpu(x, y + 0.1, np.true_divide, tf.truediv)
self._compareCpu(x, y, np.add, _ADD)
self._compareCpu(x, y, np.subtract, _SUB)
self._compareCpu(x, y, np.multiply, _MUL)
self._compareCpu(x, y + 0.1, np.true_divide, _TRUEDIV)
def testComplex128Basic(self):
x = np.complex(1, 1) * np.linspace(-10, 10, 6).reshape(1, 3, 2).astype(
np.complex128)
y = np.complex(1, 1) * np.linspace(20, -20, 6).reshape(1, 3, 2).astype(
np.complex128)
self._compareCpu(x, y, np.add, tf.add)
self._compareCpu(x, y, np.subtract, tf.sub)
self._compareCpu(x, y, np.multiply, tf.mul)
self._compareCpu(x, y + 0.1, np.true_divide, tf.truediv)
self._compareCpu(x, y, np.add, _ADD)
self._compareCpu(x, y, np.subtract, _SUB)
self._compareCpu(x, y, np.multiply, _MUL)
self._compareCpu(x, y + 0.1, np.true_divide, _TRUEDIV)
def testStringComparison(self):
x = np.array([["abc", "bh"], ["c", ""]])
y = np.array([["abc", "bh"], ["def", "hi"]])
with self.test_session(use_gpu=False) as sess:
cmp_eq = tf.equal(x, y)
cmp_not_eq = tf.not_equal(x, y)
values = sess.run([cmp_eq, cmp_not_eq])
self.assertAllEqual([[True, True], [False, False]], values[0])
self.assertAllEqual([[False, False], [True, True]], values[1])
def testString(self):
x = np.array([["x_0_0", "x_0_1", "x_0_2"],
["x_1_0", "x_1_1", "x_1_2"],
["x_2_0", "x_2_1", "x_2_2"]], dtype=np.object)
y = np.array([["y_0_0", "y_0_1", "y_0_2"],
["y_1_0", "y_1_1", "y_1_2"],
["y_2_0", "y_2_1", "y_2_2"]], dtype=np.object)
z = np.array([["z_0", "z_1", "z_2"]], dtype=np.object)
w = np.array("w", dtype=np.object)
self._compareCpu(x, y, _ADD, _ADD)
self._compareCpu(x, z, _ADD, _ADD)
self._compareCpu(x, w, _ADD, _ADD)
self._compareCpu(z, w, _ADD, _ADD)
def _compareBCast(self, xs, ys, dtype, np_func, tf_func):
x = (1 + np.linspace(0, 5, np.prod(xs))).astype(dtype).reshape(xs)
y = (1 + np.linspace(0, 5, np.prod(ys))).astype(dtype).reshape(ys)
self._compareCpu(x, y, np_func, tf_func)
if x.dtype in (np.float16, np.float32, np.float64):
if tf_func not in (_FLOORDIV, tf.floordiv):
if x.dtype == np.float16:
# Compare fp16 theoretical gradients to fp32 numerical gradients,
# since fp16 numerical gradients are too imprecise unless great
# care is taken with choosing the inputs and the delta. This is
# a weaker check (in particular, it does not test the op itself,
# only its gradient), but it's much better than nothing.
self._compareGradientX(x, y, np_func, tf_func, np.float)
self._compareGradientY(x, y, np_func, tf_func, np.float)
else:
self._compareGradientX(x, y, np_func, tf_func)
self._compareGradientY(x, y, np_func, tf_func)
self._compareGpu(x, y, np_func, tf_func)
# TODO(josh11b,vrv): Refactor this to use parameterized tests.
def _testBCastByFunc(self, funcs, xs, ys):
dtypes = [
np.float16,
np.float32,
np.float64,
np.int32,
np.int64,
np.complex64,
np.complex128,
]
for dtype in dtypes:
for (np_func, tf_func) in funcs:
if (dtype in (np.complex64, np.complex128) and
tf_func in (_FLOORDIV, tf.floordiv)):
continue # floordiv makes no sense for complex numbers
self._compareBCast(xs, ys, dtype, np_func, tf_func)
self._compareBCast(ys, xs, dtype, np_func, tf_func)
def _testBCastA(self, xs, ys):
funcs = [
(np.add, tf.add),
(np.add, _ADD),
]
self._testBCastByFunc(funcs, xs, ys)
def _testBCastB(self, xs, ys):
funcs = [
(np.subtract, tf.sub),
(np.subtract, _SUB),
(np.power, tf.pow),
]
self._testBCastByFunc(funcs, xs, ys)
def _testBCastC(self, xs, ys):
funcs = [
(np.multiply, tf.mul),
(np.multiply, _MUL),
]
self._testBCastByFunc(funcs, xs, ys)
def _testBCastD(self, xs, ys):
funcs = [
(np.true_divide, tf.truediv),
(np.floor_divide, tf.floordiv),
(np.true_divide, _TRUEDIV),
(np.floor_divide, _FLOORDIV),
]
self._testBCastByFunc(funcs, xs, ys)
def testBCast_0A(self):
self._testBCastA([1, 3, 2], [1])
def testBCast_0B(self):
self._testBCastB([1, 3, 2], [1])
def testBCast_0C(self):
self._testBCastC([1, 3, 2], [1])
def testBCast_0D(self):
self._testBCastD([1, 3, 2], [1])
def testBCast_1A(self):
self._testBCastA([1, 3, 2], [2])
def testBCast_1B(self):
self._testBCastB([1, 3, 2], [2])
def testBCast_1C(self):
self._testBCastC([1, 3, 2], [2])
def testBCast_1D(self):
self._testBCastD([1, 3, 2], [2])
def testBCast_2A(self):
self._testBCastA([1, 3, 2], [3, 2])
def testBCast_2B(self):
self._testBCastB([1, 3, 2], [3, 2])
def testBCast_2C(self):
self._testBCastC([1, 3, 2], [3, 2])
def testBCast_2D(self):
self._testBCastD([1, 3, 2], [3, 2])
def testBCast_3A(self):
self._testBCastA([1, 3, 2], [3, 1])
def testBCast_3B(self):
self._testBCastB([1, 3, 2], [3, 1])
def testBCast_3C(self):
self._testBCastC([1, 3, 2], [3, 1])
def testBCast_3D(self):
self._testBCastD([1, 3, 2], [3, 1])
def testBCast_4A(self):
self._testBCastA([1, 3, 2], [1, 3, 2])
def testBCast_4B(self):
self._testBCastB([1, 3, 2], [1, 3, 2])
def testBCast_4C(self):
self._testBCastC([1, 3, 2], [1, 3, 2])
def testBCast_4D(self):
self._testBCastD([1, 3, 2], [1, 3, 2])
def testBCast_5A(self):
self._testBCastA([1, 3, 2], [2, 3, 1])
def testBCast_5B(self):
self._testBCastB([1, 3, 2], [2, 3, 1])
def testBCast_5C(self):
self._testBCastC([1, 3, 2], [2, 3, 1])
def testBCast_5D(self):
self._testBCastD([1, 3, 2], [2, 3, 1])
def testBCast_6A(self):
self._testBCastA([1, 3, 2], [2, 1, 1])
def testBCast_6B(self):
self._testBCastB([1, 3, 2], [2, 1, 1])
def testBCast_6C(self):
self._testBCastC([1, 3, 2], [2, 1, 1])
def testBCast_6D(self):
self._testBCastD([1, 3, 2], [2, 1, 1])
def testBCast_7A(self):
self._testBCastA([1, 3, 2], [1, 3, 1])
def testBCast_7B(self):
self._testBCastB([1, 3, 2], [1, 3, 1])
def testBCast_7C(self):
self._testBCastC([1, 3, 2], [1, 3, 1])
def testBCast_7D(self):
self._testBCastD([1, 3, 2], [1, 3, 1])
def testBCast_8A(self):
self._testBCastA([2, 1, 5], [2, 3, 1])
def testBCast_8B(self):
self._testBCastB([2, 1, 5], [2, 3, 1])
def testBCast_8C(self):
self._testBCastC([2, 1, 5], [2, 3, 1])
def testBCast_8D(self):
self._testBCastD([2, 1, 5], [2, 3, 1])
def testBCast_9A(self):
self._testBCastA([2, 0, 5], [2, 0, 1])
def testBCast_9B(self):
self._testBCastB([2, 0, 5], [2, 0, 1])
def testBCast_9C(self):
self._testBCastC([2, 0, 5], [2, 0, 1])
def testBCast_9D(self):
self._testBCastD([2, 0, 5], [2, 0, 1])
def testBCast_10A(self):
self._testBCastA([2, 3, 0], [2, 3, 1])
def testBCast_10B(self):
self._testBCastB([2, 3, 0], [2, 3, 1])
def testBCast_10C(self):
self._testBCastC([2, 3, 0], [2, 3, 1])
def testBCast_10D(self):
self._testBCastD([2, 3, 0], [2, 3, 1])
def testBCast_11A(self):
self._testBCastA([1, 3, 2], [1, 3, 2])
def testBCast_11B(self):
self._testBCastB([1, 3, 2], [1, 3, 2])
def testBCast_11C(self):
self._testBCastC([1, 3, 2], [1, 3, 2])
def testBCast_11D(self):
self._testBCastD([1, 3, 2], [1, 3, 2])
def testBCast_12A(self):
self._testBCastA([1, 1, 1, 1, 3, 2], [1, 3, 2])
def testBCast_12B(self):
self._testBCastB([1, 1, 1, 1, 3, 2], [1, 3, 2])
def testBCast_12C(self):
self._testBCastC([1, 1, 1, 1, 3, 2], [1, 3, 2])
def testBCast_12D(self):
self._testBCastD([1, 1, 1, 1, 3, 2], [1, 3, 2])
def testBCast_13A(self):
self._testBCastA([1, 3, 2, 1, 1], [1])
def testBCast_13B(self):
self._testBCastB([1, 3, 2, 1, 1], [1])
def testBCast_13C(self):
self._testBCastC([1, 3, 2, 1, 1], [1])
def testBCast_13D(self):
self._testBCastD([1, 3, 2, 1, 1], [1])
def testBCast_14A(self):
self._testBCastA([2, 3, 1, 1, 5], [1])
def testBCast_14B(self):
self._testBCastB([2, 3, 1, 1, 5], [1])
def testBCast_14C(self):
self._testBCastC([2, 3, 1, 1, 5], [1])
def testBCast_14D(self):
self._testBCastD([2, 3, 1, 1, 5], [1])
def testBCast_15A(self):
self._testBCastA([10, 3, 1, 2], [3, 1, 2])
def testBCast_15B(self):
self._testBCastB([10, 3, 1, 2], [3, 1, 2])
def testBCast_15C(self):
self._testBCastC([10, 3, 1, 2], [3, 1, 2])
def testBCast_15D(self):
self._testBCastD([10, 3, 1, 2], [3, 1, 2])
def testMismatchedDimensions(self):
for func in [tf.add, tf.sub, tf.mul, tf.div, _ADD, _SUB, _MUL, _TRUEDIV,
_FLOORDIV]:
with self.assertRaisesWithPredicateMatch(
ValueError, lambda e: "Incompatible shapes" in str(e)):
func(tf.convert_to_tensor([10.0, 20.0, 30.0]),
tf.convert_to_tensor([[40.0, 50.0], [60.0, 70.0]]))
class ComparisonOpTest(tf.test.TestCase):
def _compare(self, func, x, y, dtype):
with self.test_session(use_gpu=False):
out = func(tf.convert_to_tensor(np.array([x]).astype(dtype)),
tf.convert_to_tensor(np.array([y]).astype(dtype)))
ret = out.eval()
return ret[0]
def testScalarCompareScalar(self):
dtypes = [np.float16, np.float32, np.float64, np.int32, np.int64]
data = [-1, 0, 1]
for t in dtypes:
for x in data:
for y in data:
self.assertEqual(self._compare(tf.less, x, y, t), x < y)
self.assertEqual(self._compare(tf.less_equal, x, y, t), x <= y)
self.assertEqual(self._compare(tf.greater, x, y, t), x > y)
self.assertEqual(self._compare(tf.greater_equal, x, y, t), x >= y)
self.assertEqual(self._compare(tf.equal, x, y, t), x == y)
self.assertEqual(self._compare(tf.not_equal, x, y, t), x != y)
data = [-1, 0, 1, -1j, 1j, 1 + 1j, 1 - 1j]
for t in [np.complex64, np.complex128]:
for x in data:
for y in data:
self.assertEqual(self._compare(tf.equal, x, y, t), x == y)
self.assertEqual(self._compare(tf.not_equal, x, y, t), x != y)
def _compareCpu(self, x, y, np_func, tf_func):
np_ans = np_func(x, y)
with self.test_session(use_gpu=False):
out = tf_func(tf.convert_to_tensor(x), tf.convert_to_tensor(y))
tf_cpu = out.eval()
self.assertAllEqual(np_ans, tf_cpu)
def _compareGpu(self, x, y, np_func, tf_func):
np_ans = np_func(x, y)
with self.test_session(use_gpu=True):
out = tf_func(tf.convert_to_tensor(x), tf.convert_to_tensor(y))
tf_gpu = out.eval()
self.assertAllEqual(np_ans, tf_gpu)
def _compareBoth(self, x, y, np_func, tf_func):
self._compareCpu(x, y, np_func, tf_func)
if x.dtype == np.float16 or x.dtype == np.float32 or x.dtype == np.float64:
self._compareGpu(x, y, np_func, tf_func)
def testTensorCompareTensor(self):
x = np.linspace(-15, 15, 6).reshape(1, 3, 2)
y = np.linspace(20, -10, 6).reshape(1, 3, 2)
for t in [np.float16, np.float32, np.float64, np.int32, np.int64]:
xt = x.astype(t)
yt = y.astype(t)
self._compareBoth(xt, yt, np.less, tf.less)
self._compareBoth(xt, yt, np.less_equal, tf.less_equal)
self._compareBoth(xt, yt, np.greater, tf.greater)
self._compareBoth(xt, yt, np.greater_equal, tf.greater_equal)
self._compareBoth(xt, yt, np.equal, tf.equal)
self._compareBoth(xt, yt, np.not_equal, tf.not_equal)
# TODO(zhifengc): complex64 doesn't work on GPU yet.
for t in [np.complex64, np.complex128]:
self._compareCpu(x.astype(t), y.astype(t), np.equal, tf.equal)
self._compareCpu(x.astype(t), y.astype(t), np.not_equal, tf.not_equal)
def _compareBCast(self, xs, ys, dtype, np_func, tf_func):
x = np.linspace(-15, 15, np.prod(xs)).astype(dtype).reshape(xs)
y = np.linspace(20, -10, np.prod(ys)).astype(dtype).reshape(ys)
self._compareCpu(x, y, np_func, tf_func)
self._compareCpu(y, x, np_func, tf_func)
if x.dtype == np.float16 or x.dtype == np.float32 or x.dtype == np.float64:
self._compareGpu(x, y, np_func, tf_func)
self._compareGpu(y, x, np_func, tf_func)
def _testBCastByFunc(self, np_func, tf_func):
shapes = [
([1, 3, 2], [1]),
([1, 3, 2], [2]),
([1, 3, 2], [3, 2]),
([1, 3, 2], [3, 1]),
([1, 3, 2], [1, 3, 2]),
([1, 3, 2], [2, 3, 1]),
([1, 3, 2], [2, 1, 1]),
([1, 3, 2], [1, 3, 1]),
([2, 1, 5], [2, 3, 1]),
([2, 0, 5], [2, 0, 1]),
([2, 3, 0], [2, 3, 1]),
]
dtypes = [
np.float16,
np.float32,
np.float64,
np.int32,
np.int64,
]
for (xs, ys) in shapes:
for dtype in dtypes:
self._compareBCast(xs, ys, dtype, np_func, tf_func)
def testBCastLess(self):
self._testBCastByFunc(np.less, tf.less)
def testBCastLessEqual(self):
self._testBCastByFunc(np.less_equal, tf.less_equal)
def testBCastGreater(self):
self._testBCastByFunc(np.greater, tf.greater)
def testBCastGreaterEqual(self):
self._testBCastByFunc(np.greater_equal, tf.greater_equal)
def testBCastEqual(self):
self._testBCastByFunc(np.equal, tf.equal)
def testBCastNotEqual(self):
self._testBCastByFunc(np.not_equal, tf.not_equal)
def testShapeMismatch(self):
dtypes = [np.float16, np.float32, np.float64, np.int32, np.int64]
funcs = [tf.less, tf.less_equal, tf.greater,
tf.greater_equal, tf.equal, tf.not_equal]
x = np.arange(0, 10).reshape([2, 5])
y = np.arange(0, 10).reshape([5, 2])
for t in dtypes:
for f in funcs:
with self.assertRaisesWithPredicateMatch(
ValueError, lambda e: "Incompatible shapes" in str(e)):
f(x.astype(t), y.astype(t))
class LogicalOpTest(tf.test.TestCase):
def _compareBinary(self, x, y, np_func, tf_func, use_gpu=False):
np_ans = np_func(x, y)
with self.test_session(use_gpu=use_gpu):
inx = tf.convert_to_tensor(x)
iny = tf.convert_to_tensor(y)
out = tf_func(inx, iny)
tf_val = out.eval()
self.assertEqual(out.dtype, tf.bool)
self.assertAllEqual(np_ans, tf_val)
self.assertShapeEqual(np_ans, out)
def _not(self, x, use_gpu=False):
np_ans = np.logical_not(x)
with self.test_session(use_gpu=use_gpu):
out = tf.logical_not(tf.convert_to_tensor(x))
tf_val = out.eval()
self.assertEqual(out.dtype, tf.bool)
self.assertAllEqual(np_ans, tf_val)
self.assertShapeEqual(np_ans, out)
def testScalar(self):
data = [np.array([True]), np.array([False])]
for use_gpu in [True, False]:
for x in data:
self._not(x, use_gpu)
for x in data:
for y in data:
self._compareBinary(
x, y, np.logical_and, tf.logical_and, use_gpu)
self._compareBinary(
x, y, np.logical_or, tf.logical_or, use_gpu)
self._compareBinary(
x, y, np.logical_xor, tf.logical_xor, use_gpu)
def testTensor(self):
x = np.random.randint(0, 2, 6).astype(np.bool).reshape(1, 3, 2)
y = np.random.randint(0, 2, 6).astype(np.bool).reshape(1, 3, 2)
for use_gpu in [True, False]:
self._not(x, use_gpu)
self._compareBinary(x, y, np.logical_and, tf.logical_and, use_gpu)
self._compareBinary(x, y, np.logical_or, tf.logical_or, use_gpu)
self._compareBinary(x, y, np.logical_xor, tf.logical_xor, use_gpu)
def testBCast(self):
shapes = [
([1, 3, 2], [1]),
([1, 3, 2], [2]),
([1, 3, 2], [3, 2]),
([1, 3, 2], [3, 1]),
([1, 3, 2], [1, 3, 2]),
([1, 3, 2], [2, 3, 1]),
([1, 3, 2], [2, 1, 1]),
([1, 3, 2], [1, 3, 1]),
([2, 1, 5], [2, 3, 1]),
([2, 0, 5], [2, 0, 1]),
([2, 3, 0], [2, 3, 1]),
]
for (xs, ys) in shapes:
x = np.random.randint(0, 2, np.prod(xs)).astype(np.bool).reshape(xs)
y = np.random.randint(0, 2, np.prod(ys)).astype(np.bool).reshape(ys)
for use_gpu in [True, False]:
self._compareBinary(x, y, np.logical_and, tf.logical_and, use_gpu)
self._compareBinary(x, y, np.logical_or, tf.logical_or, use_gpu)
self._compareBinary(x, y, np.logical_xor, tf.logical_xor, use_gpu)
def testShapeMismatch(self):
x = np.random.randint(0, 2, 6).astype(np.bool).reshape(1, 3, 2)
y = np.random.randint(0, 2, 6).astype(np.bool).reshape(3, 2, 1)
for f in [tf.logical_and, tf.logical_or, tf.logical_xor]:
with self.assertRaisesWithPredicateMatch(
ValueError, lambda e: "Incompatible shapes" in str(e)):
f(x, y)
def testUsingAsPythonValueFails(self):
# Ensure that we raise an error when the user attempts to treat a
# `Tensor` as a Python `bool`.
b = tf.constant(False)
with self.assertRaises(TypeError):
if b:
pass
x = tf.constant(3)
y = tf.constant(4)
with self.assertRaises(TypeError):
if x > y:
pass
z = tf.constant(7)
# The chained comparison should fail because Python computes `x <
# y` and short-circuits the comparison with `z` if it is `False`.
with self.assertRaises(TypeError):
_ = x < y < z
class SelectOpTest(tf.test.TestCase):
def _compare(self, c, x, y, use_gpu):
np_ans = np.where(c, x, y)
with self.test_session(use_gpu=use_gpu):
out = tf.select(c, x, y)
tf_ans = out.eval()
self.assertAllEqual(np_ans, tf_ans)
self.assertShapeEqual(np_ans, out)
def _compareGradientX(self, c, x, y, numeric_gradient_type=None):
with self.test_session():
inx = tf.convert_to_tensor(x)
iny = tf.convert_to_tensor(y)
out = tf.select(c, inx, iny)
s = list(np.shape(c))
jacob_t, jacob_n = tf.test.compute_gradient(inx,
s,
out,
s,
x_init_value=x)
if numeric_gradient_type is not None:
xf = x.astype(numeric_gradient_type)
yf = y.astype(numeric_gradient_type)
inxf = tf.convert_to_tensor(xf)
inyf = tf.convert_to_tensor(yf)
outf = tf.select(c, inxf, inyf)
_, jacob_n = tf.test.compute_gradient(inxf,
s,
outf,
s,
x_init_value=xf)
jacob_n = jacob_n.astype(x.dtype)
if x.dtype == np.float16:
self.assertAllClose(jacob_t, jacob_n, rtol=1e-3, atol=1e-3)
elif x.dtype == np.float32:
self.assertAllClose(jacob_t, jacob_n, rtol=1e-3, atol=1e-3)
elif x.dtype == np.float64:
self.assertAllClose(jacob_t, jacob_n, rtol=1e-5, atol=1e-5)
def _compareGradientY(self, c, x, y, numeric_gradient_type=None):
with self.test_session():
inx = tf.convert_to_tensor(x)
iny = tf.convert_to_tensor(y)
out = tf.select(c, inx, iny)
s = list(np.shape(c))
jacob_t, jacob_n = tf.test.compute_gradient(iny,
s,
out,
s,
x_init_value=y,
delta=1.0)
if numeric_gradient_type is not None:
xf = x.astype(numeric_gradient_type)
yf = y.astype(numeric_gradient_type)
inxf = tf.convert_to_tensor(xf)
inyf = tf.convert_to_tensor(yf)
outf = tf.select(c, inxf, inyf)
_, jacob_n = tf.test.compute_gradient(inyf,
s,
outf,
s,
x_init_value=yf)
jacob_n = jacob_n.astype(x.dtype)
if x.dtype == np.float16:
self.assertAllClose(jacob_t, jacob_n, rtol=1e-3, atol=1e-3)
elif x.dtype == np.float32:
self.assertAllClose(jacob_t, jacob_n, rtol=1e-3, atol=1e-3)
elif x.dtype == np.float64:
self.assertAllClose(jacob_t, jacob_n, rtol=1e-5, atol=1e-5)
def testBasic(self):
c = np.random.randint(0, 2, 6).astype(np.bool).reshape(1, 3, 2)
x = np.random.rand(1, 3, 2) * 100
y = np.random.rand(1, 3, 2) * 100
for t in [np.float16, np.float32, np.float64, np.int32, np.int64,
np.complex64, np.complex128]:
xt = x.astype(t)
yt = y.astype(t)
self._compare(c, xt, yt, use_gpu=False)
if t in [np.float16, np.float32, np.float64]:
self._compare(c, xt, yt, use_gpu=True)
def testGradients(self):
c = np.random.randint(0, 2, 6).astype(np.bool).reshape(1, 3, 2)
x = np.random.rand(1, 3, 2) * 100
y = np.random.rand(1, 3, 2) * 100
for t in [np.float16, np.float32, np.float64]:
xt = x.astype(t)
yt = y.astype(t)
if t == np.float16:
# Compare fp16 theoretical gradients to fp32 numerical gradients,
# since fp16 numerical gradients are too imprecise unless great
# care is taken with choosing the inputs and the delta. This is
# a weaker check (in particular, it does not test the op itself,
# only its gradient), but it's much better than nothing.
self._compareGradientX(c, xt, yt, np.float)
self._compareGradientY(c, xt, yt, np.float)
else:
self._compareGradientX(c, xt, yt)
self._compareGradientY(c, xt, yt)
def testShapeMismatch(self):
c = np.random.randint(0, 2, 6).astype(np.bool).reshape(1, 3, 2)
x = np.random.rand(1, 3, 2) * 100
y = np.random.rand(2, 5, 3) * 100
for t in [np.float16, np.float32, np.float64, np.int32, np.int64,
np.complex64, np.complex128]:
xt = x.astype(t)
yt = y.astype(t)
with self.assertRaises(ValueError):
tf.select(c, xt, yt)
def testEmptyTensor(self):
c = np.random.randint(0, 3, 0).astype(np.bool).reshape(1, 3, 0)
x = np.random.rand(1, 3, 0) * 100
y = np.random.rand(1, 3, 0) * 100
z_expected = np.zeros((1, 3, 0), dtype=np.float32)
with self.test_session():
xt = x.astype(np.float32)
yt = y.astype(np.float32)
z = tf.select(c, xt, yt).eval()
self.assertAllEqual(z_expected, z)
def testNan(self):
"""Verify that nans don't propagate where they shouldn't."""
with self.test_session():
for c in False, True:
for a in 7.0, np.nan:
for b in 5.0, np.nan:
x = tf.select(c, a, b).eval()
y = a if c else b
self.assertEqual(np.isnan(x), np.isnan(y))
class BatchSelectOpTest(tf.test.TestCase):
"""Test broadcasting of Select when 'c' is a vec and 't' &'e' are rank2+."""
def _compare(self, c, x, y, use_gpu):
np_ans = np.dstack(
[x_i if c_i else y_i for c_i, x_i, y_i in zip(c, x, y)]).transpose(
[2, 0, 1])
with self.test_session(use_gpu=use_gpu):
out = tf.select(c, x, y)
tf_ans = out.eval()
self.assertAllEqual(np_ans, tf_ans)
self.assertShapeEqual(np_ans, out)
def _compareGradientX(self, c, x, y, numeric_gradient_type=None):
with self.test_session():
inx = tf.convert_to_tensor(x)
iny = tf.convert_to_tensor(y)
out = tf.select(c, inx, iny)
s = list(np.shape(x))
jacob_t, jacob_n = tf.test.compute_gradient(inx,
s,
out,
s,
x_init_value=x)
if numeric_gradient_type is not None:
xf = x.astype(numeric_gradient_type)
yf = y.astype(numeric_gradient_type)
inxf = tf.convert_to_tensor(xf)
inyf = tf.convert_to_tensor(yf)
outf = tf.select(c, inxf, inyf)
_, jacob_n = tf.test.compute_gradient(inxf,
s,
outf,
s,
x_init_value=xf)
jacob_n = jacob_n.astype(x.dtype)
if x.dtype == np.float16:
self.assertAllClose(jacob_t, jacob_n, rtol=1e-3, atol=1e-3)
elif x.dtype == np.float32:
self.assertAllClose(jacob_t, jacob_n, rtol=1e-3, atol=1e-3)
elif x.dtype == np.float64:
self.assertAllClose(jacob_t, jacob_n, rtol=1e-5, atol=1e-5)
def _compareGradientY(self, c, x, y, numeric_gradient_type=None):
with self.test_session():
inx = tf.convert_to_tensor(x)
iny = tf.convert_to_tensor(y)
out = tf.select(c, inx, iny)
s = list(np.shape(x))
jacob_t, jacob_n = tf.test.compute_gradient(iny,
s,
out,
s,
x_init_value=y)
if numeric_gradient_type is not None:
xf = x.astype(numeric_gradient_type)
yf = y.astype(numeric_gradient_type)
inxf = tf.convert_to_tensor(xf)
inyf = tf.convert_to_tensor(yf)
outf = tf.select(c, inxf, inyf)
_, jacob_n = tf.test.compute_gradient(inyf,
s,
outf,
s,
x_init_value=yf)
jacob_n = jacob_n.astype(x.dtype)
if x.dtype == np.float16:
self.assertAllClose(jacob_t, jacob_n, rtol=1e-3, atol=1e-3)
elif x.dtype == np.float32:
self.assertAllClose(jacob_t, jacob_n, rtol=1e-3, atol=1e-3)
elif x.dtype == np.float64:
self.assertAllClose(jacob_t, jacob_n, rtol=1e-5, atol=1e-5)
def testBasic(self):
c = np.random.randint(0, 2, 16).astype(np.bool)
x = np.random.rand(16, 2, 8) * 100
y = np.random.rand(16, 2, 8) * 100
for t in [np.float16, np.float32, np.float64, np.int32, np.int64,
np.complex64, np.complex128]:
xt = x.astype(t)
yt = y.astype(t)
self._compare(c, xt, yt, use_gpu=False)
if t in [np.float16, np.float32, np.float64]:
self._compare(c, xt, yt, use_gpu=True)
def testGradients(self):
c = np.random.randint(0, 2, 16).astype(np.bool)
x = np.random.rand(16, 2, 8) * 100
y = np.random.rand(16, 2, 8) * 100
for t in [np.float16, np.float32, np.float64]:
xt = x.astype(t)
yt = y.astype(t)
if t == np.float16:
# Compare fp16 theoretical gradients to fp32 numerical gradients,
# since fp16 numerical gradients are too imprecise unless great
# care is taken with choosing the inputs and the delta. This is
# a weaker check (in particular, it does not test the op itself,
# only its gradient), but it's much better than nothing.
self._compareGradientX(c, xt, yt, np.float)
self._compareGradientY(c, xt, yt, np.float)
else:
self._compareGradientX(c, xt, yt)
self._compareGradientY(c, xt, yt)
def testShapeMismatch(self):
c = np.random.randint(0, 2, 8).astype(np.bool)
x = np.random.rand(16, 3, 2) * 100
y = np.random.rand(16, 3, 2) * 100
for t in [np.float16, np.float32, np.float64, np.int32, np.int64,
np.complex64, np.complex128]:
xt = x.astype(t)
yt = y.astype(t)
with self.assertRaises(ValueError):
tf.select(c, xt, yt)
class MinMaxOpTest(tf.test.TestCase):
def _compare(self, x, y, use_gpu):
np_min, np_max = np.minimum(x, y), np.maximum(x, y)
with self.test_session(use_gpu=use_gpu) as sess:
inx = tf.convert_to_tensor(x)
iny = tf.convert_to_tensor(y)
omin, omax = tf.minimum(inx, iny), tf.maximum(inx, iny)
tf_min, tf_max = sess.run([omin, omax])
self.assertAllEqual(np_min, tf_min)
self.assertAllEqual(np_max, tf_max)
def testBasic(self):
x = np.random.rand(1, 3, 2) * 100.
y = np.random.rand(1, 3, 2) * 100.
for t in [np.float16, np.float32, np.float64, np.int32, np.int64]:
self._compare(x.astype(t), y.astype(t), use_gpu=False)
self._compare(x.astype(t), y.astype(t), use_gpu=True)
def testDifferentShapes(self):
x = np.random.rand(1, 3, 2) * 100.
y = np.random.rand(2) * 100. # should broadcast
for t in [np.float16, np.float32, np.float64, np.int32, np.int64]:
self._compare(x.astype(t), y.astype(t), use_gpu=False)
self._compare(x.astype(t), y.astype(t), use_gpu=True)
def testScalar(self):
x = np.random.rand(1, 3, 2) * 100.
y = np.asscalar(np.random.rand(1) * 100.) # should broadcast
# dropped np.float64, int64 because TF automatically converts to 32 bit
for t in [np.float32, np.int32]:
self._compare(x.astype(t), t(y), use_gpu=False)
self._compare(x.astype(t), t(y), use_gpu=True)
def _compareGradientX(self, func, x, y):
with self.test_session():
inx = tf.convert_to_tensor(x)
iny = tf.convert_to_tensor(y)
out = func(inx, iny)
s = list(np.shape(x))
jacob_t, jacob_n = tf.test.compute_gradient(inx,
s,
out,
s,
x_init_value=x)
if x.dtype == np.float16:
self.assertAllClose(jacob_t, jacob_n, rtol=1e-3, atol=1e-3)
elif x.dtype == np.float32:
self.assertAllClose(jacob_t, jacob_n, rtol=1e-3, atol=1e-3)
elif x.dtype == np.float64:
self.assertAllClose(jacob_t, jacob_n, rtol=1e-5, atol=1e-5)
def _compareGradientY(self, func, x, y):
with self.test_session():
inx = tf.convert_to_tensor(x)
iny = tf.convert_to_tensor(y)
out = func(inx, iny)
s = list(np.shape(x))
jacob_t, jacob_n = tf.test.compute_gradient(iny,
s,
out,
s,
x_init_value=y)
if x.dtype == np.float16:
self.assertAllClose(jacob_t, jacob_n, rtol=1e-3, atol=1e-3)
elif x.dtype == np.float32:
self.assertAllClose(jacob_t, jacob_n, rtol=1e-3, atol=1e-3)
elif x.dtype == np.float64:
self.assertAllClose(jacob_t, jacob_n, rtol=1e-5, atol=1e-5)
def testGradients(self):
x = np.random.rand(1, 3, 2) * 100.
# ensure x != y
y = x + (np.random.randint(2, size=x.shape) - .5) * 2 # -1 or +1
self._compareGradientX(tf.maximum, x, y)
self._compareGradientY(tf.maximum, x, y)
self._compareGradientX(tf.minimum, x, y)
self._compareGradientY(tf.minimum, x, y)
class MathOpsOverloadTest(tf.test.TestCase):
def _computeTensorAndLiteral(self, x, y, dtype, func):
with self.test_session(use_gpu=False):
inx = tf.convert_to_tensor(x, dtype=dtype)
z = func(inx, y) # Should use __add__, __sub__, etc.
return z.eval()
def _computeLiteralAndTensor(self, x, y, dtype, func):
with self.test_session(use_gpu=False):
iny = tf.convert_to_tensor(y, dtype=dtype)
z = func(x, iny) # Should use __radd__, __rsub__, etc.
return z.eval()
def _compareBinary(self, x, y, dtype, np_func, tf_func):
np_ans = np_func(x, y).astype(dtype.as_numpy_dtype)
self.assertAllClose(np_ans, self._computeTensorAndLiteral(
x, y, dtype, tf_func))
self.assertAllClose(np_ans, self._computeLiteralAndTensor(
x, y, dtype, tf_func))
def _compareUnary(self, x, dtype, np_func, tf_func):
np_ans = np_func(x).astype(dtype.as_numpy_dtype)
with self.test_session(use_gpu=False):
self.assertAllClose(np_ans, tf_func(tf.convert_to_tensor(x, dtype=dtype)).eval())
def testOverload(self):
dtypes = [
tf.float16,
tf.float32,
tf.float64,
tf.int32,
tf.int64,
tf.complex64,
tf.complex128,
]
funcs = [
(np.add, _ADD),
(np.subtract, _SUB),
(np.multiply, _MUL),
(np.power, _POW),
(np.true_divide, _TRUEDIV),
(np.floor_divide, _FLOORDIV),
]
for dtype in dtypes:
for np_func, tf_func in funcs:
if dtype in (tf.complex64, tf.complex128) and tf_func == _FLOORDIV:
continue # floordiv makes no sense for complex
self._compareBinary(10, 5, dtype, np_func, tf_func)
# Mod only works for int32 and int64.
for dtype in [tf.int32, tf.int64]:
self._compareBinary(10, 3, dtype, np.mod, _MOD)
def testOverloadComparisons(self):
dtypes = [
tf.float16,
tf.float32,
tf.float64,
tf.int32,
tf.int64,
]
funcs = [
(np.less, _LT),
(np.less_equal, _LE),
(np.greater, _GT),
(np.greater_equal, _GE),
]
for dtype in dtypes:
for np_func, tf_func in funcs:
self._compareBinary(10, 5, dtype, np_func, tf_func)
logical_funcs = [
(np.logical_and, _AND),
(np.logical_or, _OR),
(np.logical_xor, _XOR),
(np.equal, tf.equal),
(np.not_equal, tf.not_equal)
]
for np_func, tf_func in logical_funcs:
self._compareBinary(True, False, tf.bool, np_func, tf_func)
self._compareBinary(True, True, tf.bool, np_func, tf_func)
self._compareBinary(False, False, tf.bool, np_func, tf_func)
self._compareBinary(False, True, tf.bool, np_func, tf_func)
self._compareBinary([True, True, False, False],
[True, False, True, False],
tf.bool, np_func, tf_func)
self._compareUnary(True, tf.bool, np.logical_not, _INV)
self._compareUnary(False, tf.bool, np.logical_not, _INV)
self._compareUnary([True, False], tf.bool, np.logical_not, _INV)
class IsFiniteInfNanTest(tf.test.TestCase):
def _compare(self, x, use_gpu):
np_finite, np_inf, np_nan = np.isfinite(x), np.isinf(x), np.isnan(x)
with self.test_session(use_gpu=use_gpu) as sess:
inx = tf.convert_to_tensor(x)
ofinite, oinf, onan = tf.is_finite(inx), tf.is_inf(
inx), tf.is_nan(inx)
tf_finite, tf_inf, tf_nan = sess.run([ofinite, oinf, onan])
self.assertAllEqual(np_inf, tf_inf)
self.assertAllEqual(np_nan, tf_nan)
self.assertAllEqual(np_finite, tf_finite)
self.assertShapeEqual(np_inf, oinf)
self.assertShapeEqual(np_nan, onan)
self.assertShapeEqual(np_finite, ofinite)
def _testDtype(self, dtype):
fi = np.finfo(dtype)
data = np.array([0, -1, 1, fi.resolution, -fi.resolution, fi.min, fi.max,
-np.inf, np.inf, np.nan]).astype(dtype)
self._compare(data, use_gpu=False)
self._compare(data, use_gpu=True)
def testHalf(self):
self._testDtype(np.float16)
def testFloat(self):
self._testDtype(np.float32)
def testDouble(self):
self._testDtype(np.float64)
class RoundingTest(tf.test.TestCase):
def _compare(self, x, use_gpu):
np_floor, np_ceil = np.floor(x), np.ceil(x)
with self.test_session(use_gpu=use_gpu) as sess:
inx = tf.convert_to_tensor(x)
ofloor, oceil = tf.floor(inx), tf.ceil(inx)
tf_floor, tf_ceil = sess.run([ofloor, oceil])
self.assertAllEqual(np_floor, tf_floor)
self.assertAllEqual(np_ceil, tf_ceil)
self.assertShapeEqual(np_floor, ofloor)
self.assertShapeEqual(np_ceil, oceil)
def _testDtype(self, dtype):
data = (np.arange(-3, 3) / 4.).reshape([1, 3, 2]).astype(dtype)
self._compare(data, use_gpu=True)
self._compare(data, use_gpu=True)
def testTypes(self):
for dtype in [np.float16, np.float32, np.float64]:
self._testDtype(dtype)
class ComplexMakeRealImagTest(tf.test.TestCase):
def _compareMake(self, real, imag, use_gpu):
np_ans = real + (1j) * imag
with self.test_session(use_gpu=use_gpu):
real = tf.convert_to_tensor(real)
imag = tf.convert_to_tensor(imag)
tf_ans = tf.complex(real, imag)
out = tf_ans.eval()
self.assertAllEqual(np_ans, out)
self.assertShapeEqual(np_ans, tf_ans)
def testMake(self):
real = (np.arange(-3, 3) / 4.).reshape([1, 3, 2]).astype(np.float32)
imag = (np.arange(-3, 3) / 5.).reshape([1, 3, 2]).astype(np.float32)
for use_gpu in [False, True]:
self._compareMake(real, imag, use_gpu)
self._compareMake(real, 12.0, use_gpu)
self._compareMake(23.0, imag, use_gpu)
def _compareRealImag(self, cplx, use_gpu):
np_real, np_imag = np.real(cplx), np.imag(cplx)
with self.test_session(use_gpu=use_gpu) as sess:
inx = tf.convert_to_tensor(cplx)
tf_real = tf.real(inx)
tf_imag = tf.imag(inx)
tf_real_val, tf_imag_val = sess.run([tf_real, tf_imag])
self.assertAllEqual(np_real, tf_real_val)
self.assertAllEqual(np_imag, tf_imag_val)
self.assertShapeEqual(np_real, tf_real)
self.assertShapeEqual(np_imag, tf_imag)
def testRealImag64(self):
real = (np.arange(-3, 3) / 4.).reshape([1, 3, 2]).astype(np.float32)
imag = (np.arange(-3, 3) / 5.).reshape([1, 3, 2]).astype(np.float32)
cplx = real + 1j * imag
self._compareRealImag(cplx, use_gpu=False)
self._compareRealImag(cplx, use_gpu=True)
def testRealImag128(self):
real = (np.arange(-3, 3) / 4.).reshape([1, 3, 2]).astype(np.float64)
imag = (np.arange(-3, 3) / 5.).reshape([1, 3, 2]).astype(np.float64)
cplx = real + 1j * imag
self._compareRealImag(cplx, use_gpu=False)
self._compareRealImag(cplx, use_gpu=True)
def _compareConj(self, cplx, use_gpu):
np_ans = np.conj(cplx)
with self.test_session(use_gpu=use_gpu):
inx = tf.convert_to_tensor(cplx)
tf_conj = tf.conj(inx)
tf_ans = tf_conj.eval()
self.assertAllEqual(np_ans, tf_ans)
self.assertShapeEqual(np_ans, tf_conj)
def testConj64(self):
real = (np.arange(-3, 3) / 4.).reshape([1, 3, 2]).astype(np.float32)
imag = (np.arange(-3, 3) / 5.).reshape([1, 3, 2]).astype(np.float32)
cplx = real + 1j * imag
self._compareConj(cplx, use_gpu=False)
self._compareConj(cplx, use_gpu=True)
def testConj128(self):
real = (np.arange(-3, 3) / 4.).reshape([1, 3, 2]).astype(np.float64)
imag = (np.arange(-3, 3) / 5.).reshape([1, 3, 2]).astype(np.float64)
cplx = real + 1j * imag
self._compareConj(cplx, use_gpu=False)
self._compareConj(cplx, use_gpu=True)
def _compareGradient(self, x):
# x[:, 0] is real, x[:, 1] is imag. We combine real and imag into
# complex numbers. Then, we extract real and imag parts and
# computes the squared sum. This is obviously the same as sum(real
# * real) + sum(imag * imag). We just want to make sure the
# gradient function is checked.
with self.test_session():
inx = tf.convert_to_tensor(x)
real, imag = tf.split(1, 2, inx)
real, imag = tf.reshape(real, [-1]), tf.reshape(imag, [-1])
cplx = tf.complex(real, imag)
cplx = tf.conj(cplx)
loss = tf.reduce_sum(
tf.square(tf.real(cplx))) + tf.reduce_sum(
tf.square(tf.imag(cplx)))
epsilon = 1e-3
jacob_t, jacob_n = tf.test.compute_gradient(inx,
list(x.shape),
loss,
[1],
x_init_value=x,
delta=epsilon)
self.assertAllClose(jacob_t, jacob_n, rtol=epsilon, atol=epsilon)
def _compareBroadcastGradient(self, x):
x_ = tf.convert_to_tensor(x)
epsilon = 1e-3
with self.test_session():
for args in [(x_, 0.), (0., x_)]:
z = tf.reduce_sum(tf.complex_abs(tf.complex(*args)))
jacob_t, jacob_n = tf.test.compute_gradient(x_,
list(x.shape),
z,
[1],
x_init_value=x,
delta=epsilon)
self.assertAllClose(jacob_t, jacob_n, rtol=epsilon, atol=epsilon)
def testGradient(self):
# complex64
data = np.arange(1, 2, 0.10).reshape([5, 2]).astype(np.float32)
self._compareGradient(data)
self._compareBroadcastGradient(data)
# complex128
data = np.arange(1, 2, 0.10).reshape([5, 2]).astype(np.float64)
self._compareGradient(data)
def _compareMulGradient(self, data):
# data is a float matrix of shape [n, 4]. data[:, 0], data[:, 1],
# data[:, 2], data[:, 3] are real parts of x, imaginary parts of
# x, real parts of y and imaginary parts of y.
with self.test_session():
inp = tf.convert_to_tensor(data)
xr, xi, yr, yi = tf.split(1, 4, inp)
def vec(x): # Reshape to a vector
return tf.reshape(x, [-1])
xr, xi, yr, yi = vec(xr), vec(xi), vec(yr), vec(yi)
def cplx(r, i): # Combine to a complex vector
return tf.complex(r, i)
x, y = cplx(xr, xi), cplx(yr, yi)
# z is x times y in complex plane.
z = x * y
# Defines the loss function as the sum of all coefficients of z.
loss = tf.reduce_sum(tf.real(z) + tf.imag(z))
epsilon = 0.005
jacob_t, jacob_n = tf.test.compute_gradient(inp,
list(data.shape),
loss,
[1],
x_init_value=data,
delta=epsilon)
self.assertAllClose(jacob_t, jacob_n, rtol=epsilon, atol=epsilon)
def testMulGradient(self):
data = np.arange(1, 2, 0.125).reshape([2, 4]).astype(np.float32)
self._compareMulGradient(data)
class AccumulateTest(tf.test.TestCase):
def testSimple(self):
with self.test_session():
random_arrays = [np.random.rand(16, 16, 16, 16).astype(np.float32)
for _ in range(20)]
random_tensors = [tf.convert_to_tensor(x, dtype=tf.float32)
for x in random_arrays]
tf_val = tf.accumulate_n(random_tensors)
np_val = random_arrays[0]
for random_array in random_arrays[1:]:
np_val += random_array
self.assertAllClose(np_val, tf_val.eval())
def testZeroArgs(self):
with self.test_session():
with self.assertRaises(ValueError):
tf_val = tf.accumulate_n([])
tf_val.eval()
if __name__ == "__main__":
tf.test.main()
| 37.995694 | 95 | 0.594963 |
4a21d30893f50c629bfe1126f39a262fa063511c | 13,985 | py | Python | thumbor/transformer.py | enterstudio/thumbor | 2f1529604a0f5b2d6d87132b5616841842313215 | [
"MIT"
] | null | null | null | thumbor/transformer.py | enterstudio/thumbor | 2f1529604a0f5b2d6d87132b5616841842313215 | [
"MIT"
] | null | null | null | thumbor/transformer.py | enterstudio/thumbor | 2f1529604a0f5b2d6d87132b5616841842313215 | [
"MIT"
] | null | null | null | #!/usr/bin/python
# -*- coding: utf-8 -*-
# thumbor imaging service
# https://github.com/thumbor/thumbor/wiki
# Licensed under the MIT license:
# http://www.opensource.org/licenses/mit-license
# Copyright (c) 2011 globo.com [email protected]
import math
import sys
from thumbor.point import FocalPoint
from thumbor.utils import logger
import tornado.gen as gen
trim_enabled = True
try:
from thumbor.ext.filters import _bounding_box
except ImportError:
logger.warn("Error importing bounding_box filter, trimming won't work")
trim_enabled = False
class Transformer(object):
def __init__(self, context):
self.context = context
self.engine = self.context.request.engine
self.target_height = None
self.target_width = None
def _calculate_target_dimensions(self):
source_width, source_height = self.engine.size
source_width = float(source_width)
source_height = float(source_height)
if not self.context.request.width and not self.context.request.height:
self.target_width = source_width
self.target_height = source_height
else:
if self.context.request.width:
if self.context.request.width == "orig":
self.target_width = source_width
else:
self.target_width = float(self.context.request.width)
else:
self.target_width = self.engine.get_proportional_width(self.context.request.height)
if self.context.request.height:
if self.context.request.height == "orig":
self.target_height = source_height
else:
self.target_height = float(self.context.request.height)
else:
self.target_height = self.engine.get_proportional_height(self.context.request.width)
def get_target_dimensions(self):
"""
Returns the target dimensions and calculates them if necessary.
The target dimensions are display independent.
:return: Target dimensions as a tuple (width, height)
:rtype: (int, int)
"""
if self.target_height is None:
self._calculate_target_dimensions()
return int(self.target_width), int(self.target_height)
def adjust_focal_points(self):
source_width, source_height = self.engine.size
self.focal_points = None
if self.context.request.focal_points:
if self.context.request.should_crop:
self.focal_points = []
crop = self.context.request.crop
for point in self.context.request.focal_points:
if point.x < crop['left'] or point.x > crop['right'] or point.y < crop['top'] or point.y > crop['bottom']:
continue
point.x -= crop['left'] or 0
point.y -= crop['top'] or 0
self.focal_points.append(point)
else:
self.focal_points = self.context.request.focal_points
if not self.focal_points:
self.focal_points = [
FocalPoint.from_alignment(self.context.request.halign,
self.context.request.valign,
source_width,
source_height)
]
self.engine.focus(self.focal_points)
def transform(self, callback):
self.done_callback = callback
if self.context.config.RESPECT_ORIENTATION:
self.engine.reorientate()
self.trim()
self.smart_detect()
def trim(self):
is_gifsicle = (self.context.request.engine.extension == '.gif' and self.context.config.USE_GIFSICLE_ENGINE)
if self.context.request.trim is None or not trim_enabled or is_gifsicle:
return
mode, data = self.engine.image_data_as_rgb()
box = _bounding_box.apply(
mode,
self.engine.size[0],
self.engine.size[1],
self.context.request.trim_pos,
self.context.request.trim_tolerance,
data
)
if box[2] < box[0] or box[3] < box[1]:
logger.warn("Ignoring trim, there wouldn't be any image left, check the tolerance.")
return
self.engine.crop(box[0], box[1], box[2] + 1, box[3] + 1)
if self.context.request.should_crop:
self.context.request.crop['left'] -= box[0]
self.context.request.crop['top'] -= box[1]
self.context.request.crop['right'] -= box[0]
self.context.request.crop['bottom'] -= box[1]
@property
def smart_storage_key(self):
return self.context.request.image_url
@gen.coroutine
def smart_detect(self):
is_gifsicle = (self.context.request.engine.extension == '.gif' and self.context.config.USE_GIFSICLE_ENGINE)
if (not (self.context.modules.detectors and self.context.request.smart)) or is_gifsicle:
self.do_image_operations()
return
try:
# Beware! Boolean hell ahead.
#
# The `running_smart_detection` flag is needed so we can know
# whether `after_smart_detect()` is running synchronously or not.
#
# If we're running it in a sync fashion it will set
# `should_run_image_operations` to True so we can avoid running
# image operation inside the try block.
self.should_run_image_operations = False
self.running_smart_detection = True
yield self.do_smart_detection()
self.running_smart_detection = False
except Exception:
if not self.context.config.IGNORE_SMART_ERRORS:
raise
logger.exception("Ignored error during smart detection")
if self.context.config.USE_CUSTOM_ERROR_HANDLING:
self.context.modules.importer.error_handler.handle_error(
context=self.context,
handler=self.context.request_handler,
exception=sys.exc_info()
)
self.context.request.prevent_result_storage = True
self.context.request.detection_error = True
self.do_image_operations()
if self.should_run_image_operations:
self.do_image_operations()
@gen.coroutine
def do_smart_detection(self):
focal_points = yield gen.maybe_future(self.context.modules.storage.get_detector_data(self.smart_storage_key))
if focal_points is not None:
self.after_smart_detect(focal_points, points_from_storage=True)
else:
detectors = self.context.modules.detectors
detectors[0](self.context, index=0, detectors=detectors).detect(self.after_smart_detect)
def after_smart_detect(self, focal_points=[], points_from_storage=False):
for point in focal_points:
self.context.request.focal_points.append(FocalPoint.from_dict(point))
if self.context.request.focal_points and self.context.modules.storage and not points_from_storage:
storage = self.context.modules.storage
points = []
for point in self.context.request.focal_points:
points.append(point.to_dict())
storage.put_detector_data(self.smart_storage_key, points)
if self.running_smart_detection:
self.should_run_image_operations = True
return
self.do_image_operations()
def img_operation_worker(self):
if '.gif' == self.context.request.engine.extension and 'cover()' in self.context.request.filters:
self.extract_cover()
self.manual_crop()
self._calculate_target_dimensions()
self.adjust_focal_points()
if self.context.request.debug:
self.debug()
else:
if self.context.request.fit_in:
self.fit_in_resize()
else:
self.auto_crop()
self.resize()
self.flip()
def do_image_operations(self):
"""
If ENGINE_THREADPOOL_SIZE > 0, this will schedule the image operations
into a threadpool. If not, it just executes them synchronously, and
calls self.done_callback when it's finished.
The actual work happens in self.img_operation_worker
"""
def inner(future):
self.done_callback()
self.context.thread_pool.queue(
operation=self.img_operation_worker,
callback=inner
)
def extract_cover(self):
self.engine.extract_cover()
def manual_crop(self):
if self.context.request.should_crop:
def limit(dimension, maximum):
return min(max(dimension, 0), maximum)
source_width, source_height = self.engine.size
crop = self.context.request.crop
crop['left'] = limit(crop['left'], source_width)
crop['top'] = limit(crop['top'], source_height)
crop['right'] = limit(crop['right'], source_width)
crop['bottom'] = limit(crop['bottom'], source_height)
if crop['left'] >= crop['right'] or crop['top'] >= crop['bottom']:
self.context.request.should_crop = False
crop['left'] = crop['right'] = crop['top'] = crop['bottom'] = 0
return
self.engine.crop(crop['left'], crop['top'], crop['right'], crop['bottom'])
def auto_crop(self):
source_width, source_height = self.engine.size
target_height = self.target_height or 1
target_width = self.target_width or 1
source_ratio = round(float(source_width) / source_height, 2)
target_ratio = round(float(target_width) / target_height, 2)
if source_ratio == target_ratio:
return
focal_x, focal_y = self.get_center_of_mass()
if self.target_width / source_width > self.target_height / source_height:
crop_width = source_width
crop_height = int(round(source_width * self.target_height / target_width, 0))
else:
crop_width = int(round(math.ceil(self.target_width * source_height / target_height), 0))
crop_height = source_height
crop_left = int(round(min(max(focal_x - (crop_width / 2), 0.0), source_width - crop_width)))
crop_right = min(crop_left + crop_width, source_width)
crop_top = int(round(min(max(focal_y - (crop_height / 2), 0.0), source_height - crop_height)))
crop_bottom = min(crop_top + crop_height, source_height)
self.engine.crop(crop_left, crop_top, crop_right, crop_bottom)
def flip(self):
if self.context.request.horizontal_flip:
self.engine.flip_horizontally()
if self.context.request.vertical_flip:
self.engine.flip_vertically()
def get_center_of_mass(self):
total_weight = 0.0
total_x = 0.0
total_y = 0.0
for focal_point in self.focal_points:
total_weight += focal_point.weight
total_x += focal_point.x * focal_point.weight
total_y += focal_point.y * focal_point.weight
x = total_x / total_weight
y = total_y / total_weight
return x, y
def resize(self):
source_width, source_height = self.engine.size
if self.target_width == source_width and self.target_height == source_height:
return
self.engine.resize(self.target_width or 1, self.target_height or 1) # avoiding 0px images
def fit_in_resize(self):
source_width, source_height = self.engine.size
# invert width and height if image orientation is not the same as request orientation and need adaptive
if self.context.request.adaptive and (
(source_width - source_height < 0 and self.target_width - self.target_height > 0) or
(source_width - source_height > 0 and self.target_width - self.target_height < 0)
):
tmp = self.context.request.width
self.context.request.width = self.context.request.height
self.context.request.height = tmp
tmp = self.target_width
self.target_width = self.target_height
self.target_height = tmp
sign = 1
if self.context.request.full:
sign = -1
if sign == 1 and self.target_width >= source_width and self.target_height >= source_height:
return
if source_width / self.target_width * sign >= source_height / self.target_height * sign:
resize_height = round(source_height * self.target_width / source_width)
resize_width = self.target_width
else:
resize_height = self.target_height
resize_width = round(source_width * self.target_height / source_height)
# ensure that filter should work on the real image size and not on the request
# size which might be smaller than the resized image in case `full-fit-in` is
# being used
self.context.request.width = int(max(self.context.request.width, resize_width))
self.context.request.height = int(max(self.context.request.height, resize_height))
self.engine.resize(resize_width, resize_height)
def debug(self):
if not self.context.request.focal_points:
return
for point in self.context.request.focal_points:
if point.width <= 1:
point.width = 10
if point.height <= 1:
point.height = 10
self.engine.draw_rectangle(int(point.x - (point.width / 2)),
int(point.y - (point.height / 2)),
point.width,
point.height)
| 38.42033 | 126 | 0.613085 |
4a21d39b734c4938934a4544d5bad6aefd3165ff | 9,506 | py | Python | datalabs/operations/aggregate/text_matching.py | ExpressAI/DataLab | c3eddd4068f131d031c2486c60b650092bb0ae84 | [
"Apache-2.0"
] | 54 | 2022-01-26T06:58:58.000Z | 2022-03-31T05:11:35.000Z | datalabs/operations/aggregate/text_matching.py | ExpressAI/DataLab | c3eddd4068f131d031c2486c60b650092bb0ae84 | [
"Apache-2.0"
] | 81 | 2022-01-26T06:46:41.000Z | 2022-03-24T05:05:31.000Z | datalabs/operations/aggregate/text_matching.py | ExpressAI/DataLab | c3eddd4068f131d031c2486c60b650092bb0ae84 | [
"Apache-2.0"
] | 7 | 2022-02-06T09:28:31.000Z | 2022-03-16T01:06:37.000Z | from typing import Any, Callable, Iterator, List, Mapping, Optional
import numpy as np
import sacrebleu
from tqdm import tqdm
from datalabs.operations.aggregate.aggregating import Aggregating, aggregating
from datalabs.operations.featurize import get_gender_bias
from datalabs.operations.operation import dataset_operation, DatasetOperation
class TextMatchingAggregating(Aggregating, DatasetOperation):
def __init__(
self,
name: str = None,
func: Callable[..., Any] = None,
resources: Optional[Mapping[str, Any]] = None,
contributor: str = None,
processed_fields: List = ["text1", "text2"],
generated_field: str = None,
task="text-matching",
description=None,
):
super().__init__(
name=name,
func=func,
resources=resources,
contributor=contributor,
task=task,
description=description,
)
self._type = "TextMatchingAggregating"
self.processed_fields = ["text1", "text2"]
if isinstance(processed_fields, str):
self.processed_fields[0] = processed_fields
else:
self.processed_fields = processed_fields
self.generated_field = generated_field
self._data_type = "Dataset"
class text_matching_aggregating(aggregating, dataset_operation):
def __init__(
self,
name: Optional[str] = None,
resources: Optional[Mapping[str, Any]] = None,
contributor: str = None,
processed_fields: List = ["text1", "text2"],
generated_field: str = None,
task="text-matching",
description=None,
):
super().__init__(
name=name,
resources=resources,
contributor=contributor,
description=description,
)
self.processed_fields = processed_fields
self.generated_field = generated_field
self.task = task
def __call__(self, *param_arg):
if callable(self.name):
tf_class = TextMatchingAggregating(name=self.name.__name__, func=self.name)
return tf_class(*param_arg)
else:
f = param_arg[0]
name = self.name or f.__name__
tf_cls = TextMatchingAggregating(
name=name,
func=f,
resources=self.resources,
contributor=self.contributor,
processed_fields=self.processed_fields,
generated_field=self.generated_field,
task=self.task,
description=self.description,
)
return tf_cls
def get_similarity_by_sacrebleu(text1, text2):
# pip install sacrebleu
references = [text1]
hypothesis = text2
score = sacrebleu.sentence_bleu(hypothesis, references).score
return score
@text_matching_aggregating(
name="get_statistics",
contributor="datalab",
task="text-matching, natural-language-inference",
description="Calculate the overall statistics (e.g., average length) of a given "
"text pair classification datasets. e,g. natural language inference",
)
def get_statistics(samples: Iterator):
"""
Input:
samples: [{
"text1":
"text2":
}]
Output:
dict:
usage:
you can test it with following code:
from datalabs import load_dataset
from aggregate.text_matching import *
dataset = load_dataset('sick')
res = dataset['test'].apply(get_statistics)
print(next(res))
"""
# for hate speech
# from hatesonar import Sonar
# sonar = Sonar()
sample_infos = []
text1_lengths = []
text2_lengths = []
labels_to_number = {}
vocab = {}
number_of_tokens = 0
gender_results = []
# hatespeech = {
# "hate_speech":{"ratio":0,"texts":[]},
# "offensive_language":{"ratio":0,"texts":[]},
# "neither":{"ratio":0,"texts":[]}}
text1_divided_text2 = []
similarities = []
for sample in tqdm(samples):
text1, text2, label = sample["text1"], sample["text2"], sample["label"]
similarity_of_text_pair = get_similarity_by_sacrebleu(text1, text2)
similarities.append(similarity_of_text_pair)
# average length of text1
text1_length = len(text1.split(" "))
text1_lengths.append(text1_length)
# average length of text2
text2_length = len(text2.split(" "))
text2_lengths.append(text2_length)
# text1/text2
text1_divided_text2.append(len(text1.split(" ")) / len(text2.split(" ")))
# label info
if label in labels_to_number.keys():
labels_to_number[label] += 1
else:
labels_to_number[label] = 1
# update the number of tokens
number_of_tokens += len(text1.split())
number_of_tokens += len(text2.split())
# Vocabulary info
for w in (text1 + text2).split(" "):
if w in vocab.keys():
vocab[w] += 1
else:
vocab[w] = 1
# Gender info
gender_result1 = get_gender_bias.func(text1)
gender_result2 = get_gender_bias.func(text2)
gender_results.append(gender_result1["gender_bias_info"])
gender_results.append(gender_result2["gender_bias_info"])
# hataspeech
# results = sonar.ping(text=text1)
# class_1 = results['top_class']
# confidence = 0
# for value in results['classes']:
# if value['class_name'] == class_1:
# confidence = value['confidence']
# break
#
# hatespeech[class_1]["ratio"] += 1
# if class_1 != "neither":
# hatespeech[class_1]["texts"].append(text1)
# results = sonar.ping(text=text2)
# class_2 = results['top_class']
# confidence = 0
# for value in results['classes']:
# if value['class_name'] == class_2:
# confidence = value['confidence']
# break
#
# hatespeech[class_2]["ratio"] += 1
# if class_2 != "neither":
# hatespeech[class_2]["texts"].append(text2)
sample_info = {
"text1": text1,
"text2": text2,
"label": label,
"text1_length": text1_length,
"text2_length": text2_length,
"text1_gender": gender_result1,
"text2_gender": gender_result2,
# "text1_hate_speech_class":class_1,
# "text2_hate_speech_class":class_2,
"text1_divided_text2": len(text1.split(" ")) / len(text2.split(" ")),
"similarity_of_text_pair": similarity_of_text_pair,
}
if len(sample_infos) < 10000:
sample_infos.append(sample_info)
# ------------------ Dataset-level ----------------
# get vocabulary
vocab_sorted = dict(sorted(vocab.items(), key=lambda item: item[1], reverse=True))
# compute dataset-level gender_ratio
gender_ratio = {
"word": {"male": 0, "female": 0},
"single_name": {"male": 0, "female": 0},
}
for result in gender_results:
res_word = result["word"] # noqa
gender_ratio["word"]["male"] += result["word"]["male"]
gender_ratio["word"]["female"] += result["word"]["female"]
gender_ratio["single_name"]["male"] += result["single_name"]["male"]
gender_ratio["single_name"]["female"] += result["single_name"]["female"]
n_gender = gender_ratio["word"]["male"] + gender_ratio["word"]["female"]
if n_gender != 0:
gender_ratio["word"]["male"] /= n_gender
gender_ratio["word"]["female"] /= n_gender
else:
gender_ratio["word"]["male"] = 0
gender_ratio["word"]["female"] = 0
n_gender = (
gender_ratio["single_name"]["male"] + gender_ratio["single_name"]["female"]
)
if n_gender != 0:
gender_ratio["single_name"]["male"] /= n_gender
gender_ratio["single_name"]["female"] /= n_gender
else:
gender_ratio["single_name"]["male"] = 0
gender_ratio["single_name"]["female"] = 0
# get ratio of hate_speech:offensive_language:neither
# for k,v in hatespeech.items():
# hatespeech[k]["ratio"] /= 2* len(samples)
res = {
"dataset-level": {
"length_info": {
"max_text1_length": np.max(text1_lengths),
"min_text1_length": np.min(text1_lengths),
"average_text1_length": np.average(text1_lengths),
"max_text2_length": np.max(text2_lengths),
"min_text2_length": np.min(text2_lengths),
"average_text2_length": np.average(text2_lengths),
"text1_divided_text2": np.average(text1_divided_text2),
},
"label_info": {
"ratio": min(labels_to_number.values())
* 1.0
/ max(labels_to_number.values()),
"distribution": labels_to_number,
},
"vocabulary_info": vocab_sorted,
"number_of_samples": len(samples),
"number_of_tokens": number_of_tokens,
"gender_info": gender_ratio,
"average_similarity": np.average(similarities),
# "hatespeech_info": hatespeech,
},
"sample-level": sample_infos,
}
return res
| 33.237762 | 87 | 0.577951 |
4a21d3ddfe098e06439edc0703696a88eebaf349 | 399 | py | Python | BookMeeting/BookMeeting/wsgi.py | yutanguyen25/BookMeeting | e4c3115e09b4bbbe6ec7d739a7c3febf37b8a63d | [
"MIT"
] | null | null | null | BookMeeting/BookMeeting/wsgi.py | yutanguyen25/BookMeeting | e4c3115e09b4bbbe6ec7d739a7c3febf37b8a63d | [
"MIT"
] | null | null | null | BookMeeting/BookMeeting/wsgi.py | yutanguyen25/BookMeeting | e4c3115e09b4bbbe6ec7d739a7c3febf37b8a63d | [
"MIT"
] | null | null | null | """
WSGI config for BookMeeting project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/4.0/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'BookMeeting.settings')
application = get_wsgi_application()
| 23.470588 | 78 | 0.789474 |
4a21d40cb6e103c77caf2cd2ba4decbacbaa01d9 | 1,156 | py | Python | setup.py | popeliao/alpaca-trade-api-python | ebc913af7c67f6dc90c3b6eecb7ff35740cadba0 | [
"Apache-2.0"
] | null | null | null | setup.py | popeliao/alpaca-trade-api-python | ebc913af7c67f6dc90c3b6eecb7ff35740cadba0 | [
"Apache-2.0"
] | null | null | null | setup.py | popeliao/alpaca-trade-api-python | ebc913af7c67f6dc90c3b6eecb7ff35740cadba0 | [
"Apache-2.0"
] | 1 | 2019-07-27T03:04:17.000Z | 2019-07-27T03:04:17.000Z | #!/usr/bin/env python
import ast
import re
from setuptools import setup
_version_re = re.compile(r'__version__\s+=\s+(.*)')
with open('alpaca_trade_api/__init__.py', 'rb') as f:
version = str(ast.literal_eval(_version_re.search(
f.read().decode('utf-8')).group(1)))
with open('README.md') as readme_file:
README = readme_file.read()
setup(
name='alpaca-trade-api',
version=version,
description='Alpaca API python client',
long_description=README,
long_description_content_type='text/markdown',
author='Alpaca',
author_email='[email protected]',
url='https://github.com/alpacahq/alpaca-trade-api-python',
keywords='financial,timeseries,api,trade',
packages=['alpaca_trade_api', 'alpaca_trade_api.polygon'],
install_requires=[
'asyncio-nats-client',
'pandas',
'requests',
'urllib3<1.25',
'websocket-client',
'websockets>=8.0',
],
tests_require=[
'pytest',
'pytest-cov',
'requests-mock',
'coverage>=4.4.1',
'mock>=1.0.1',
'flake8',
],
setup_requires=['pytest-runner', 'flake8'],
)
| 25.688889 | 62 | 0.624567 |
4a21d40d13a2e3a03b8076fdc21c9fe4265d0cfb | 5,241 | py | Python | src/clients/python/simple_client.py | wilwang-nv/tensorrt-inference-server | a99ab7b1320f06a2ebce6088f2ecc31faf10e13e | [
"BSD-3-Clause"
] | null | null | null | src/clients/python/simple_client.py | wilwang-nv/tensorrt-inference-server | a99ab7b1320f06a2ebce6088f2ecc31faf10e13e | [
"BSD-3-Clause"
] | null | null | null | src/clients/python/simple_client.py | wilwang-nv/tensorrt-inference-server | a99ab7b1320f06a2ebce6088f2ecc31faf10e13e | [
"BSD-3-Clause"
] | 1 | 2020-08-15T09:56:00.000Z | 2020-08-15T09:56:00.000Z | #!/usr/bin/python
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of NVIDIA CORPORATION nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
# OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import argparse
import numpy as np
import os
from builtins import range
from tensorrtserver.api import *
FLAGS = None
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('-v', '--verbose', action="store_true", required=False, default=False,
help='Enable verbose output')
parser.add_argument('-u', '--url', type=str, required=False, default='localhost:8000',
help='Inference server URL. Default is localhost:8000.')
parser.add_argument('-i', '--protocol', type=str, required=False, default='http',
help='Protocol ("http"/"grpc") used to ' +
'communicate with inference service. Default is "http".')
parser.add_argument('-H', dest='http_headers', metavar="HTTP_HEADER",
required=False, action='append',
help='HTTP headers to add to inference server requests. ' +
'Format is -H"Header:Value".')
FLAGS = parser.parse_args()
protocol = ProtocolType.from_str(FLAGS.protocol)
# We use a simple model that takes 2 input tensors of 16 integers
# each and returns 2 output tensors of 16 integers each. One
# output tensor is the element-wise sum of the inputs and one
# output is the element-wise difference.
model_name = "simple"
model_version = -1
batch_size = 1
# Create a health context, get the ready and live state of server.
health_ctx = ServerHealthContext(FLAGS.url, protocol,
http_headers=FLAGS.http_headers, verbose=FLAGS.verbose)
print("Health for model {}".format(model_name))
print("Live: {}".format(health_ctx.is_live()))
print("Ready: {}".format(health_ctx.is_ready()))
# Create a status context and get server status
status_ctx = ServerStatusContext(FLAGS.url, protocol, model_name,
http_headers=FLAGS.http_headers, verbose=FLAGS.verbose)
print("Status for model {}".format(model_name))
print(status_ctx.get_server_status())
# Create the inference context for the model.
infer_ctx = InferContext(FLAGS.url, protocol, model_name, model_version,
http_headers=FLAGS.http_headers, verbose=FLAGS.verbose)
# Create the data for the two input tensors. Initialize the first
# to unique integers and the second to all ones.
input0_data = np.arange(start=0, stop=16, dtype=np.int32)
input1_data = np.ones(shape=16, dtype=np.int32)
# Send inference request to the inference server. Get results for
# both output tensors.
result = infer_ctx.run({ 'INPUT0' : (input0_data,),
'INPUT1' : (input1_data,) },
{ 'OUTPUT0' : InferContext.ResultFormat.RAW,
'OUTPUT1' : InferContext.ResultFormat.RAW },
batch_size)
# We expect there to be 2 results (each with batch-size 1). Walk
# over all 16 result elements and print the sum and difference
# calculated by the model.
output0_data = result['OUTPUT0'][0]
output1_data = result['OUTPUT1'][0]
for i in range(16):
print(str(input0_data[i]) + " + " + str(input1_data[i]) + " = " + str(output0_data[i]))
print(str(input0_data[i]) + " - " + str(input1_data[i]) + " = " + str(output1_data[i]))
if (input0_data[i] + input1_data[i]) != output0_data[i]:
print("error: incorrect sum");
sys.exit(1);
if (input0_data[i] - input1_data[i]) != output1_data[i]:
print("error: incorrect difference");
sys.exit(1);
| 48.981308 | 95 | 0.666667 |
4a21d4117e03ba5e579655ccb4a6a0eee892fa9e | 661 | py | Python | odps/df/backends/sqlalchemy/tests/__init__.py | wjsi/aliyun-odps-python-sdk | 8b064340e4376def201b8d8fdc0c2fa021aae9be | [
"Apache-2.0"
] | 412 | 2015-11-01T09:27:52.000Z | 2022-03-26T05:04:03.000Z | odps/df/backends/sqlalchemy/tests/__init__.py | wjsi/aliyun-odps-python-sdk | 8b064340e4376def201b8d8fdc0c2fa021aae9be | [
"Apache-2.0"
] | 168 | 2015-11-16T09:46:39.000Z | 2022-03-17T06:35:26.000Z | odps/df/backends/sqlalchemy/tests/__init__.py | wjsi/aliyun-odps-python-sdk | 8b064340e4376def201b8d8fdc0c2fa021aae9be | [
"Apache-2.0"
] | 103 | 2015-12-01T08:10:09.000Z | 2022-02-21T12:46:35.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 1999-2017 Alibaba Group Holding Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
SKIP_IN_CI = True
| 36.722222 | 74 | 0.747352 |
4a21d587a16f5b0b7d797a75ae3d2b23ac80be1c | 1,114 | py | Python | tests/test_orth.py | Novermars/chaospy | 800f26203c21ee69c61bdba6a53f6cbede653167 | [
"MIT"
] | 1 | 2020-04-29T20:53:25.000Z | 2020-04-29T20:53:25.000Z | tests/test_orth.py | TribleCircle/chaospy | f22aa31e2a338a32a6d09b810c5b629c10a87236 | [
"BSD-3-Clause"
] | null | null | null | tests/test_orth.py | TribleCircle/chaospy | f22aa31e2a338a32a6d09b810c5b629c10a87236 | [
"BSD-3-Clause"
] | 1 | 2019-11-24T17:16:30.000Z | 2019-11-24T17:16:30.000Z | """Testing polynomial related to distributions
"""
import chaospy as cp
import numpy as np
def test_basic_mom():
dist = cp.Normal(0, 1)
res = np.array([1, 0, 1, 0, 3])
assert np.allclose(dist.mom(np.arange(5)), res)
def test_operator_E():
dist = cp.Normal(0, 1)
res = np.array([1, 0, 1, 0, 3])
x = cp.variable()
poly = x**np.arange(5)
assert np.allclose(cp.E(poly, dist), res)
def test_orth_ttr():
dist = cp.Normal(0, 1)
orth = cp.orth_ttr(5, dist)
outer = cp.outer(orth, orth)
Cov1 = cp.E(outer, dist)
Diatoric = Cov1 - np.diag(np.diag(Cov1))
assert np.allclose(Diatoric, 0)
Cov2 = cp.Cov(orth[1:], dist)
assert np.allclose(Cov1[1:,1:], Cov2)
def test_orth_chol():
dist = cp.Normal(0, 1)
orth1 = cp.orth_ttr(5, dist, normed=True)
orth2 = cp.orth_chol(5, dist, normed=True)
eps = cp.sum((orth1-orth2)**2)
assert np.allclose(eps(np.linspace(-100, 100, 5)), 0)
def test_orth_norms():
dist = cp.Normal(0, 1)
orth = cp.orth_ttr(5, dist, normed=True)
norms = cp.E(orth**2, dist)
assert np.allclose(norms, 1)
| 24.217391 | 57 | 0.614004 |
4a21d6a57744865eec9c0102d6a11b59d3a1c6bd | 2,219 | py | Python | sbol3/custom.py | brsynth/pySBOL3 | 5ae15b4f171991b3cd216b7548ffde7902f41c12 | [
"MIT"
] | 14 | 2020-09-14T20:28:08.000Z | 2022-01-23T13:04:31.000Z | sbol3/custom.py | brsynth/pySBOL3 | 5ae15b4f171991b3cd216b7548ffde7902f41c12 | [
"MIT"
] | 203 | 2020-05-13T16:15:21.000Z | 2022-03-24T17:40:09.000Z | sbol3/custom.py | brsynth/pySBOL3 | 5ae15b4f171991b3cd216b7548ffde7902f41c12 | [
"MIT"
] | 8 | 2020-07-29T16:37:19.000Z | 2022-03-23T12:22:55.000Z | from typing import List
import rdflib
from . import *
class CustomIdentified(Identified):
def __init__(self, type_uri: str = None,
*, name: str = None, description: str = None,
derived_from: List[str] = None,
generated_by: List[str] = None,
measures: List[SBOLObject] = None,
identity: str = None,
sbol_type_uri: str = SBOL_IDENTIFIED) -> None:
super().__init__(identity=identity, type_uri=type_uri,
name=name, description=description,
derived_from=derived_from, generated_by=generated_by,
measures=measures)
self._rdf_types.append(sbol_type_uri)
def validate(self, report: ValidationReport = None) -> ValidationReport:
report = super().validate(report)
if len(self._rdf_types) < 2:
message = 'Extension classes must have at least 2 rdf:type properties'
report.addError(self.identity, None, message)
return report
class CustomTopLevel(TopLevel):
def __init__(self, identity: str = None, type_uri: str = None,
*, namespace: str = None,
attachments: List[str] = None,
name: str = None, description: str = None,
derived_from: List[str] = None,
generated_by: List[str] = None,
measures: List[SBOLObject] = None,
sbol_type_uri: str = SBOL_TOP_LEVEL) -> None:
super().__init__(identity=identity, type_uri=type_uri,
namespace=namespace,
attachments=attachments, name=name,
description=description, derived_from=derived_from,
generated_by=generated_by, measures=measures)
self._rdf_types.append(sbol_type_uri)
def validate(self, report: ValidationReport = None) -> ValidationReport:
report = super().validate(report)
if len(self._rdf_types) < 2:
message = 'Extension classes must have at least 2 rdf:type properties'
report.addError(self.identity, None, message)
return report
| 41.092593 | 82 | 0.584498 |
4a21d79581cf13a7dd729c96eadd13629644c0ed | 15,134 | py | Python | sgnlp/models/lsr/modeling.py | benedictleedm/sgnlp | 03f0fda8c517d9ca4baf737ce4c46b2495bbd3ba | [
"MIT"
] | null | null | null | sgnlp/models/lsr/modeling.py | benedictleedm/sgnlp | 03f0fda8c517d9ca4baf737ce4c46b2495bbd3ba | [
"MIT"
] | null | null | null | sgnlp/models/lsr/modeling.py | benedictleedm/sgnlp | 03f0fda8c517d9ca4baf737ce4c46b2495bbd3ba | [
"MIT"
] | null | null | null | from typing import Optional
import torch
import torch.nn as nn
import numpy as np
from dataclasses import dataclass
from torch.nn.utils.rnn import pad_sequence
from transformers import PreTrainedModel, BertModel
from .config import LsrConfig
from .modules.encoder import Encoder
from .modules.attention import SelfAttention
from .modules.reasoner import DynamicReasoner
from .modules.reasoner import StructInduction
@dataclass
class LsrModelOutput:
"""
Output type of :class:`~sgnlp.models.lsr.modeling.LsrModel`
Args:
prediction (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, max_h_t_count, num_relations)`):
Prediction scores for all head to tail entity combinations from the final layer.
Note that the sigmoid function has not been applied at this point.
loss (:obj:`torch.FloatTensor` of shape :obj:`(1,)`, `optional`, returned when `labels` is provided ):
Loss on relation prediction task.
"""
prediction: torch.FloatTensor
loss: Optional[torch.FloatTensor] = None
class LsrPreTrainedModel(PreTrainedModel):
"""
An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
models.
"""
config_class = LsrConfig
base_model_prefix = "lsr"
def _init_weights(self, module):
""" Initialize the weights """
if isinstance(module, nn.Linear):
# Slightly different from the TF version which uses truncated_normal for initialization
# cf https://github.com/pytorch/pytorch/pull/5617
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(module, nn.Embedding):
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
if module.padding_idx is not None:
module.weight.data[module.padding_idx].zero_()
elif isinstance(module, nn.LayerNorm):
module.bias.data.zero_()
module.weight.data.fill_(1.0)
class LsrModel(LsrPreTrainedModel):
"""The Latent Structure Refinement Model performs relation classification on all pairs of entity clusters.
This model is also a PyTorch `torch.nn.Module <https://pytorch.org/docs/stable/nn.html#torch.nn.Module>`__
subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to
general usage and behavior.
Args:
config (:class:`~sgnlp.models.lsr.config.LsrConfig`):
Model configuration class with all the parameters of the model. Initializing with a config file does not
load the weights associated with the model, only the configuration.
Use the :obj:`.from_pretrained` method to load the model weights.
Example::
from sgnlp.models.lsr import LsrModel, LsrConfig
# Method 1: Loading a default model
config = LsrConfig()
model = LsrModel(config)
# Method 2: Loading from pretrained
config = LsrConfig.from_pretrained('https://sgnlp.blob.core.windows.net/models/lsr/config.json')
model = LsrModel.from_pretrained('https://sgnlp.blob.core.windows.net/models/lsr/pytorch_model.bin',
config=config)
"""
def __init__(self, config: LsrConfig):
super().__init__(config)
self.config = config
# Common
self.dropout = nn.Dropout(config.dropout_rate)
self.relu = nn.ReLU()
# Document encoder layers
if config.use_bert:
self.bert = BertModel.from_pretrained("bert-base-uncased")
bert_hidden_size = 768
self.linear_re = nn.Linear(bert_hidden_size, config.hidden_dim)
else:
self.word_emb = nn.Embedding(config.word_embedding_shape[0], config.word_embedding_shape[1])
if not config.finetune_emb:
self.word_emb.weight.requires_grad = False
self.ner_emb = nn.Embedding(13, config.ner_dim, padding_idx=0)
self.coref_embed = nn.Embedding(config.max_length, config.coref_dim, padding_idx=0)
self.linear_re = nn.Linear(config.hidden_dim * 2, config.hidden_dim)
input_size = config.word_embedding_shape[1] + config.coref_dim + config.ner_dim
self.rnn_sent = Encoder(input_size, config.hidden_dim, config.dropout_emb, config.dropout_rate)
# Induce latent structure layers
self.use_struct_att = config.use_struct_att
if self.use_struct_att:
self.struct_induction = StructInduction(config.hidden_dim // 2, config.hidden_dim, True)
self.dropout_gcn = nn.Dropout(config.dropout_gcn)
self.use_reasoning_block = config.use_reasoning_block
if self.use_reasoning_block:
self.reasoner = nn.ModuleList()
self.reasoner.append(DynamicReasoner(config.hidden_dim, config.reasoner_layer_sizes[0], self.dropout_gcn))
self.reasoner.append(DynamicReasoner(config.hidden_dim, config.reasoner_layer_sizes[1], self.dropout_gcn))
# Output layers
self.dis_embed = nn.Embedding(20, config.distance_size, padding_idx=10)
self.self_att = SelfAttention(config.hidden_dim)
self.bili = torch.nn.Bilinear(config.hidden_dim + config.distance_size,
config.hidden_dim + config.distance_size, config.hidden_dim)
self.linear_output = nn.Linear(2 * config.hidden_dim, config.num_relations)
self.init_weights()
def load_pretrained_word_embedding(self, pretrained_word_embedding):
self.word_emb.weight.data.copy_(torch.from_numpy(pretrained_word_embedding))
def doc_encoder(self, input_sent, context_seg):
batch_size = context_seg.shape[0]
docs_emb = [] # sentence embedding
docs_len = []
sents_emb = []
for batch_no in range(batch_size):
sent_list = []
sent_lens = []
sent_index = ((context_seg[batch_no] == 1).nonzero()).squeeze(
-1).tolist() # array of start point for sentences in a document
pre_index = 0
for i, index in enumerate(sent_index):
if i != 0:
if i == 1:
sent_list.append(input_sent[batch_no][pre_index:index + 1])
sent_lens.append(index - pre_index + 1)
else:
sent_list.append(input_sent[batch_no][pre_index + 1:index + 1])
sent_lens.append(index - pre_index)
pre_index = index
sents = pad_sequence(sent_list).permute(1, 0, 2)
sent_lens_t = torch.LongTensor(sent_lens).to(device=self.device)
docs_len.append(sent_lens)
sents_output, sent_emb = self.rnn_sent(sents, sent_lens_t) # sentence embeddings for a document.
doc_emb = None
for i, (sen_len, emb) in enumerate(zip(sent_lens, sents_output)):
if i == 0:
doc_emb = emb[:sen_len]
else:
doc_emb = torch.cat([doc_emb, emb[:sen_len]], dim=0)
docs_emb.append(doc_emb)
sents_emb.append(sent_emb.squeeze(1))
docs_emb = pad_sequence(docs_emb).permute(1, 0, 2) # B * # sentence * Dimension
sents_emb = pad_sequence(sents_emb).permute(1, 0, 2)
return docs_emb, sents_emb
def forward(self, context_idxs, context_pos, context_ner, h_mapping, t_mapping,
relation_mask, dis_h_2_t, dis_t_2_h, context_seg, node_position, entity_position,
node_sent_num, all_node_num, entity_num_list, sdp_position, sdp_num_list, context_masks=None,
context_starts=None, relation_multi_label=None, **kwargs):
# TODO: current kwargs are ignored, to allow preprocessing to pass in unnecessary arguments
# TODO: Fix upstream preprocessing such that it is filtered out before passing in.
"""
Args:
context_idxs (:obj:`torch.LongTensor` of shape :obj:`(batch_size, max_tokens_length)`):
Token IDs.
context_pos (:obj:`torch.LongTensor` of shape :obj:`(batch_size, max_tokens_length)`):
Coref position IDs.
context_ner (:obj:`torch.LongTensor` of shape :obj:`(batch_size, max_tokens_length)`):
NER tag IDs.
h_mapping (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, h_t_limit, max_tokens_length)`):
Head entity position mapping.
t_mapping (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, h_t_limit, max_tokens_length)`):
Tail entity position mapping.
relation_mask (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, h_t_limit)`):
Relation mask. 1 if relation exists in position else 0.
dis_h_2_t (:obj:`torch.LongTensor` of shape :obj:`(batch_size, h_t_limit)`):
Distance encoding from head to tail.
dis_t_2_h (:obj:`torch.LongTensor` of shape :obj:`(batch_size, h_t_limit)`):
Distance encoding from tail to head.
context_seg (:obj:`torch.LongTensor` of shape :obj:`(batch_size, max_tokens_length)`):
Start position of sentences in document. 1 to mark position is start of sentence else 0.
node_position (:obj:`torch.LongTensor` of shape :obj:`(batch_size, max_node_number, max_tokens_length)`):
Mention node position.
entity_position (:obj:`torch.LongTensor` of shape :obj:`(batch_size, max_entity_number, max_tokens_length)`):
Entity node position. An entity refers to all mentions referring to the same entity.
node_sent_num (:obj:`torch.LongTensor` of shape :obj:`(batch_size, max_sent_num)`):
Number of mention nodes in each sentence of a document.
all_node_num (:obj:`torch.LongTensor` of shape :obj:`(1)`):
Total number of nodes (mention + MDP) in a document.
entity_num_list (:obj:`List[int]` of shape :obj:`(batch_size)`):
Number of entity nodes in each document.
sdp_position (:obj:`torch.LongTensor` of shape :obj:`(batch_size, max_entity_number, max_tokens_length)`):
Meta dependency paths (MDP) node position.
sdp_num_list (:obj:`List[int]` of shape :obj:`(batch_size)`):
Number of MDP nodes in each document.
context_masks (:obj:`torch.LongTensor` of shape :obj:`(batch_size, max_length)`, `optional`):
Mask for padding tokens. Used by bert model only.
context_starts (:obj:`torch.LongTensor` of shape :obj:`(batch_size, max_length)`, `optional`):
Tensor indicating start of words. Used by bert model only.
relation_multi_label (:obj:`torch.LongTensor` of shape :obj:`(batch_size, h_t_limit, num_relations)`):
Label for all possible head to tail entity relations.
Returns:
output (:class:`~sgnlp.models.lsr.modeling.LsrModelOutput`)
"""
# Step 1: Encode the document
if self.config.use_bert:
context_output = self.bert(context_idxs, attention_mask=context_masks)[0]
context_output = [layer[starts.nonzero().squeeze(1)]
for layer, starts in zip(context_output, context_starts)]
context_output = pad_sequence(context_output, batch_first=True, padding_value=-1)
context_output = torch.nn.functional.pad(context_output,
(0, 0, 0, context_idxs.size(-1) - context_output.size(-2)))
context_output = self.dropout(torch.relu(self.linear_re(context_output)))
max_doc_len = 512
else:
sent_emb = torch.cat(
[self.word_emb(context_idxs), self.coref_embed(context_pos), self.ner_emb(context_ner)],
dim=-1)
docs_rep, sents_rep = self.doc_encoder(sent_emb, context_seg)
max_doc_len = docs_rep.shape[1]
context_output = self.dropout(torch.relu(self.linear_re(docs_rep)))
# Step 2: Extract all node reps of a document graph
# extract mention node representations
mention_num_list = torch.sum(node_sent_num, dim=1).tolist()
max_mention_num = max(mention_num_list)
mentions_rep = torch.bmm(node_position[:, :max_mention_num, :max_doc_len],
context_output) # mentions rep
# extract meta dependency paths (MDP) node representations
max_sdp_num = max(sdp_num_list)
sdp_rep = torch.bmm(sdp_position[:, :max_sdp_num, :max_doc_len], context_output)
# extract entity node representations
entity_rep = torch.bmm(entity_position[:, :, :max_doc_len], context_output)
# concatenate all nodes of an instance
gcn_inputs = []
all_node_num_batch = []
for batch_no, (m_n, e_n, s_n) in enumerate(zip(mention_num_list, entity_num_list, sdp_num_list)):
m_rep = mentions_rep[batch_no][:m_n]
e_rep = entity_rep[batch_no][:e_n]
s_rep = sdp_rep[batch_no][:s_n]
gcn_inputs.append(torch.cat((m_rep, e_rep, s_rep), dim=0))
node_num = m_n + e_n + s_n
all_node_num_batch.append(node_num)
gcn_inputs = pad_sequence(gcn_inputs).permute(1, 0, 2)
output = gcn_inputs
# Step 3: Induce the Latent Structure
if self.use_reasoning_block:
for i in range(len(self.reasoner)):
output = self.reasoner[i](output)
elif self.use_struct_att:
gcn_inputs, _ = self.struct_induction(gcn_inputs)
max_all_node_num = torch.max(all_node_num).item()
assert (gcn_inputs.shape[1] == max_all_node_num)
node_position = node_position.permute(0, 2, 1)
output = torch.bmm(node_position[:, :max_doc_len, :max_mention_num], output[:, :max_mention_num])
context_output = torch.add(context_output, output)
start_re_output = torch.matmul(h_mapping[:, :, :max_doc_len], context_output) # aggregation
end_re_output = torch.matmul(t_mapping[:, :, :max_doc_len], context_output) # aggregation
s_rep = torch.cat([start_re_output, self.dis_embed(dis_h_2_t)], dim=-1)
t_rep = torch.cat([end_re_output, self.dis_embed(dis_t_2_h)], dim=-1)
re_rep = self.dropout(self.relu(self.bili(s_rep, t_rep)))
re_rep = self.self_att(re_rep, re_rep, relation_mask)
prediction = self.linear_output(re_rep)
loss = None
if relation_multi_label is not None:
loss_fn = nn.BCEWithLogitsLoss(reduction='none')
loss = torch.sum(loss_fn(prediction, relation_multi_label) * relation_mask.unsqueeze(2)) \
/ torch.sum(relation_mask)
return LsrModelOutput(prediction=prediction, loss=loss)
| 50.112583 | 121 | 0.644773 |
4a21d8300a7e1e10567ca5fed9ba82401411bf14 | 2,207 | py | Python | huaweicloud-sdk-meeting/huaweicloudsdkmeeting/v1/model/add_publication_response.py | wuchen-huawei/huaweicloud-sdk-python-v3 | 3683d703f4320edb2b8516f36f16d485cff08fc2 | [
"Apache-2.0"
] | 1 | 2021-11-03T07:54:50.000Z | 2021-11-03T07:54:50.000Z | huaweicloud-sdk-meeting/huaweicloudsdkmeeting/v1/model/add_publication_response.py | wuchen-huawei/huaweicloud-sdk-python-v3 | 3683d703f4320edb2b8516f36f16d485cff08fc2 | [
"Apache-2.0"
] | null | null | null | huaweicloud-sdk-meeting/huaweicloudsdkmeeting/v1/model/add_publication_response.py | wuchen-huawei/huaweicloud-sdk-python-v3 | 3683d703f4320edb2b8516f36f16d485cff08fc2 | [
"Apache-2.0"
] | null | null | null | # coding: utf-8
import pprint
import re
import six
from huaweicloudsdkcore.sdk_response import SdkResponse
class AddPublicationResponse(SdkResponse):
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
}
attribute_map = {
}
def __init__(self):
"""AddPublicationResponse - a model defined in huaweicloud sdk"""
super(AddPublicationResponse, self).__init__()
self.discriminator = None
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, AddPublicationResponse):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| 26.914634 | 74 | 0.537834 |
4a21d84a1ac131c23a773790f253b9dd8d58a5a7 | 18,726 | py | Python | tensorflow_federated/python/learning/framework/evaluation_test.py | truthiswill/federated | d25eeac036dfc2a485120a195fd904223cfc823a | [
"Apache-2.0"
] | null | null | null | tensorflow_federated/python/learning/framework/evaluation_test.py | truthiswill/federated | d25eeac036dfc2a485120a195fd904223cfc823a | [
"Apache-2.0"
] | null | null | null | tensorflow_federated/python/learning/framework/evaluation_test.py | truthiswill/federated | d25eeac036dfc2a485120a195fd904223cfc823a | [
"Apache-2.0"
] | null | null | null | # Copyright 2021, The TensorFlow Federated Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import collections
from absl.testing import parameterized
import tensorflow as tf
from tensorflow_federated.python.core.api import computation_base
from tensorflow_federated.python.core.api import computations
from tensorflow_federated.python.core.api import test_case
from tensorflow_federated.python.core.backends.native import execution_contexts
from tensorflow_federated.python.core.impl.federated_context import intrinsics
from tensorflow_federated.python.core.impl.types import computation_types
from tensorflow_federated.python.core.impl.types import placements
from tensorflow_federated.python.learning import keras_utils
from tensorflow_federated.python.learning import model_utils
from tensorflow_federated.python.learning.framework import evaluation
# Convenience aliases.
StructType = computation_types.StructType
TensorType = computation_types.TensorType
def keras_model_builder():
# Create a simple linear regression model, single output.
# We initialize all weights to one.
return tf.keras.Sequential([
tf.keras.layers.Dense(
1,
kernel_initializer='ones',
bias_initializer='ones',
input_shape=(1,))
])
def create_dataset():
# Create data satisfying y = 2*x + 1
x = [[1.0], [2.0], [3.0]]
y = [[3.0], [5.0], [7.0]]
return tf.data.Dataset.from_tensor_slices((x, y)).batch(1)
def get_input_spec():
return create_dataset().element_spec
def tff_model_builder():
return keras_utils.from_keras_model(
keras_model=keras_model_builder(),
input_spec=get_input_spec(),
loss=tf.keras.losses.MeanSquaredError(),
metrics=[tf.keras.metrics.MeanSquaredError()])
class BuildEvalWorkTest(tf.test.TestCase, parameterized.TestCase):
@parameterized.named_parameters(
('default_simulation_loop', False),
('experimental_simulation_loop', True),
)
def test_evaluation_types(self, use_experimental_simulation_loop):
model = tff_model_builder()
model_weights_type = model_utils.weights_type_from_model(model)
client_eval_work = evaluation.build_eval_work(
tff_model_builder, model_weights_type, get_input_spec(),
use_experimental_simulation_loop)
self.assertIsInstance(client_eval_work, computation_base.Computation)
type_signature = client_eval_work.type_signature
self.assertLen(type_signature.parameter, 2)
type_signature.parameter[0].check_assignable_from(model_weights_type)
type_signature.parameter[1].check_assignable_from(
computation_types.SequenceType(get_input_spec()))
@parameterized.named_parameters(
('default_simulation_loop', False),
('experimental_simulation_loop', True),
)
def test_evaluation_on_default_weights(self,
use_experimental_simulation_loop):
model = tff_model_builder()
model_weights_type = model_utils.weights_type_from_model(model)
model_weights = model_utils.ModelWeights.from_model(model)
client_eval_work = evaluation.build_eval_work(
tff_model_builder, model_weights_type, get_input_spec(),
use_experimental_simulation_loop)
# All weights are set to 1, so the model outputs f(x) = x + 1.
eval_metrics = client_eval_work(model_weights, create_dataset())
self.assertCountEqual(eval_metrics.keys(),
['local_outputs', 'num_examples'])
self.assertEqual(eval_metrics['num_examples'], 3)
local_outputs = eval_metrics['local_outputs']
self.assertCountEqual(local_outputs.keys(), ['loss', 'mean_squared_error'])
self.assertEqual(local_outputs['loss'], local_outputs['mean_squared_error'])
expected_loss_sum = (3.0 - 2.0)**2 + (5.0 - 3.0)**2 + (7.0 - 4.0)**2
self.assertAllClose(
local_outputs['loss'], [expected_loss_sum, 3.0], atol=1e-6)
def test_evaluation_on_input_weights(self):
model = tff_model_builder()
model_weights_type = model_utils.weights_type_from_model(model)
model_weights = model_utils.ModelWeights.from_model(model)
zero_weights = tf.nest.map_structure(tf.zeros_like, model_weights)
tf.nest.map_structure(lambda v, t: v.assign(t), model_weights, zero_weights)
client_eval_work = evaluation.build_eval_work(tff_model_builder,
model_weights_type,
get_input_spec())
# We compute metrics where all weights are set to 0, so the model should
# output f(x) = 0.
eval_metrics = client_eval_work(model_weights, create_dataset())
self.assertCountEqual(eval_metrics.keys(),
['local_outputs', 'num_examples'])
self.assertEqual(eval_metrics['num_examples'], 3)
local_outputs = eval_metrics['local_outputs']
self.assertCountEqual(local_outputs.keys(), ['loss', 'mean_squared_error'])
self.assertEqual(local_outputs['loss'], local_outputs['mean_squared_error'])
expected_loss_sum = 9.0 + 25.0 + 49.0
self.assertAllClose(
local_outputs['loss'], [expected_loss_sum, 3.0], atol=1e-6)
class BuildModelMetricsAggregatorTest(tf.test.TestCase):
def _get_metrics_type(self):
return StructType([
('local_outputs',
StructType([
('mean_squared_error', (TensorType(tf.float32),
TensorType(tf.float32))),
('loss', (TensorType(tf.float32), TensorType(tf.float32))),
])),
('num_examples', TensorType(tf.float32)),
])
def _get_aggregated_metrics_type(self):
return StructType([
('eval',
StructType([
('mean_squared_error', TensorType(tf.float32)),
('loss', TensorType(tf.float32)),
])),
('stat', StructType([
('num_examples', TensorType(tf.float32)),
])),
])
def test_metrics_aggregator_types(self):
model = tff_model_builder()
metrics_type = self._get_metrics_type()
model_metrics_aggregator = evaluation.build_model_metrics_aggregator(
model, metrics_type)
self.assertIsInstance(model_metrics_aggregator,
computation_base.Computation)
aggregator_parameter = model_metrics_aggregator.type_signature.parameter
aggregator_parameter.check_assignable_from(
computation_types.at_clients(metrics_type))
aggregator_result = model_metrics_aggregator.type_signature.result
aggregator_result.check_assignable_from(
computation_types.at_server(self._get_aggregated_metrics_type()))
def test_metrics_aggregator_correctness_with_one_client(self):
client_metrics = collections.OrderedDict(
local_outputs=collections.OrderedDict(
mean_squared_error=(4.0, 2.0), loss=(5.0, 1.0)),
num_examples=10.0)
model = tff_model_builder()
metrics_type = self._get_metrics_type()
model_metrics_aggregator = evaluation.build_model_metrics_aggregator(
model, metrics_type)
aggregate_metrics = model_metrics_aggregator([client_metrics])
expected_metrics = collections.OrderedDict(
eval=collections.OrderedDict(mean_squared_error=2.0, loss=5.0),
stat=collections.OrderedDict(num_examples=10.0))
self.assertAllClose(aggregate_metrics, expected_metrics, atol=1e-6)
def test_metrics_aggregator_correctness_with_three_client(self):
client_metrics1 = collections.OrderedDict(
local_outputs=collections.OrderedDict(
mean_squared_error=(4.0, 2.0), loss=(5.0, 1.0)),
num_examples=10.0)
client_metrics2 = collections.OrderedDict(
local_outputs=collections.OrderedDict(
mean_squared_error=(4.0, 4.0), loss=(1.0, 5.0)),
num_examples=7.0)
client_metrics3 = collections.OrderedDict(
local_outputs=collections.OrderedDict(
mean_squared_error=(6.0, 2.0), loss=(5.0, 5.0)),
num_examples=3.0)
model = tff_model_builder()
metrics_type = self._get_metrics_type()
model_metrics_aggregator = evaluation.build_model_metrics_aggregator(
model, metrics_type)
federated_metrics = [client_metrics1, client_metrics2, client_metrics3]
aggregate_metrics = model_metrics_aggregator(federated_metrics)
expected_metrics = collections.OrderedDict(
eval=collections.OrderedDict(mean_squared_error=1.75, loss=1.0),
stat=collections.OrderedDict(num_examples=20.0))
self.assertAllClose(aggregate_metrics, expected_metrics, atol=1e-6)
class EvalComposerTest(tf.test.TestCase):
def create_test_distributor(self):
@computations.federated_computation(computation_types.at_server(tf.float32))
def basic_distribute(x):
return intrinsics.federated_broadcast(x)
return basic_distribute
def create_test_client_work(self):
@tf.function
def multiply_and_add(x, dataset):
total_sum = 0.0
for a in dataset:
total_sum = total_sum + x * a
return total_sum
@computations.tf_computation(tf.float32,
computation_types.SequenceType(tf.float32))
def basic_client_work(x, dataset):
return multiply_and_add(x, dataset)
return basic_client_work
def create_test_aggregator(self):
@computations.federated_computation(
computation_types.at_clients(tf.float32))
def basic_aggregate(x):
return intrinsics.federated_sum(x)
return basic_aggregate
def test_basic_composition_has_expected_types(self):
eval_computation = evaluation.compose_eval_computation(
self.create_test_distributor(), self.create_test_client_work(),
self.create_test_aggregator())
expected_parameter = computation_types.StructType([
computation_types.at_server(tf.float32),
computation_types.at_clients(
computation_types.SequenceType(tf.float32))
])
eval_computation.type_signature.parameter.check_assignable_from(
expected_parameter)
expected_result = computation_types.at_server(tf.float32)
eval_computation.type_signature.result.check_assignable_from(
expected_result)
def test_basic_composition_computes_expected_value(self):
eval_computation = evaluation.compose_eval_computation(
self.create_test_distributor(), self.create_test_client_work(),
self.create_test_aggregator())
client_data = [[1.0, 2.0, 3.0], [-1.0, -2.0, -5.0]]
actual_result = eval_computation(1.0, client_data)
self.assertEqual(actual_result, -2.0)
def test_basic_composition_with_struct_type(self):
distributor_struct = computation_types.at_server(StructType([tf.float32]))
@computations.federated_computation(distributor_struct)
def distributor_with_struct_parameter(x):
return intrinsics.federated_broadcast(x[0])
eval_computation = evaluation.compose_eval_computation(
distributor_with_struct_parameter, self.create_test_client_work(),
self.create_test_aggregator())
expected_parameter = computation_types.StructType([
distributor_struct,
computation_types.at_clients(
computation_types.SequenceType(tf.float32))
])
eval_computation.type_signature.parameter.check_assignable_from(
expected_parameter)
expected_result = computation_types.at_server(tf.float32)
eval_computation.type_signature.result.check_assignable_from(
expected_result)
def test_raises_on_python_callable_distributor(self):
def python_distributor(x):
return x
with self.assertRaises(TypeError):
evaluation.compose_eval_computation(python_distributor,
self.create_test_client_work(),
self.create_test_aggregator())
def test_raises_on_python_callable_client_work(self):
def python_client_work(x, y):
del y
return x
with self.assertRaises(TypeError):
evaluation.compose_eval_computation(self.create_test_distributor(),
python_client_work,
self.create_test_aggregator())
def test_raises_on_python_callable_aggregator(self):
def python_aggregator(x):
return x
with self.assertRaises(TypeError):
evaluation.compose_eval_computation(self.create_test_distributor(),
self.create_test_client_work(),
python_aggregator)
def test_no_arg_distributor_raises(self):
@computations.federated_computation
def no_arg_distribute():
return intrinsics.federated_value(1.0, placements.CLIENTS)
with self.assertRaises(evaluation.FederatedEvalTypeError):
evaluation.compose_eval_computation(no_arg_distribute,
self.create_test_client_work(),
self.create_test_aggregator())
def test_two_arg_distributor_raises(self):
@computations.federated_computation(
computation_types.at_server(tf.float32),
computation_types.at_server(tf.float32))
def two_arg_distribute(x, y):
del y
return intrinsics.federated_broadcast(x)
with self.assertRaises(evaluation.FederatedEvalTypeError):
evaluation.compose_eval_computation(two_arg_distribute,
self.create_test_client_work(),
self.create_test_aggregator())
def test_distributor_with_client_parameter_raises(self):
@computations.federated_computation(
computation_types.at_clients(tf.float32))
def distributor_with_client_parameter(x):
return x
with self.assertRaises(evaluation.FederatedEvalTypeError):
evaluation.compose_eval_computation(distributor_with_client_parameter,
self.create_test_client_work(),
self.create_test_aggregator())
def test_distributor_with_server_result_raises(self):
@computations.federated_computation(computation_types.at_server(tf.float32))
def distributor_with_server_result(x):
return x
with self.assertRaises(evaluation.FederatedEvalTypeError):
evaluation.compose_eval_computation(distributor_with_server_result,
self.create_test_client_work(),
self.create_test_aggregator())
def test_federated_client_work_raises(self):
@computations.federated_computation(
computation_types.at_clients(tf.float32),
computation_types.at_clients(
computation_types.SequenceType(tf.float32)))
def federated_client_work(model, dataset):
return intrinsics.federated_map(self.create_test_client_work(),
(model, dataset))
with self.assertRaises(evaluation.FederatedEvalTypeError):
evaluation.compose_eval_computation(self.create_test_distributor(),
federated_client_work,
self.create_test_aggregator())
def test_no_arg_aggregator_raises(self):
@computations.federated_computation
def no_arg_aggregate():
return intrinsics.federated_value(1.0, placements.SERVER)
with self.assertRaises(evaluation.FederatedEvalTypeError):
evaluation.compose_eval_computation(self.create_test_distributor(),
self.create_test_client_work(),
no_arg_aggregate)
def test_two_arg_aggregator_raises(self):
@computations.federated_computation(
computation_types.at_clients(tf.float32),
computation_types.at_clients(tf.float32))
def two_arg_aggregate(x, y):
del y
return intrinsics.federated_sum(x)
with self.assertRaises(evaluation.FederatedEvalTypeError):
evaluation.compose_eval_computation(self.create_test_distributor(),
self.create_test_client_work(),
two_arg_aggregate)
def test_aggregator_with_server_parameter_raises(self):
@computations.federated_computation(computation_types.at_server(tf.float32))
def aggregator_with_server_parameter(x):
return x
with self.assertRaises(evaluation.FederatedEvalTypeError):
evaluation.compose_eval_computation(self.create_test_distributor(),
self.create_test_client_work(),
aggregator_with_server_parameter)
def test_aggregator_with_client_result_raises(self):
@computations.federated_computation(
computation_types.at_clients(tf.float32))
def aggregator_with_client_result(x):
return x
with self.assertRaises(evaluation.FederatedEvalTypeError):
evaluation.compose_eval_computation(self.create_test_distributor(),
self.create_test_client_work(),
aggregator_with_client_result)
def test_distributor_client_work_type_mismatch_raises(self):
@computations.tf_computation(tf.int32, tf.float32)
def client_work_with_int_parameter(x, y):
del x
return y
with self.assertRaises(evaluation.FederatedEvalInputOutputError):
evaluation.compose_eval_computation(self.create_test_distributor(),
client_work_with_int_parameter,
self.create_test_aggregator())
def test_client_work_aggregator_type_mismatch_raises(self):
@computations.tf_computation(tf.float32, tf.int32)
def client_work_with_int_result(x, y):
del x
return y
with self.assertRaises(evaluation.FederatedEvalInputOutputError):
evaluation.compose_eval_computation(self.create_test_distributor(),
client_work_with_int_result,
self.create_test_aggregator())
if __name__ == '__main__':
execution_contexts.set_local_python_execution_context()
test_case.main()
| 39.423158 | 80 | 0.699562 |
4a21d86055108782edb7aedb43e417bd4374e05c | 7,091 | py | Python | pyspeckit/spectrum/models/n2dp.py | mwcraig/pyspeckit | 6d6c09aac29549a8c094d97fb385c9283422bb82 | [
"MIT"
] | null | null | null | pyspeckit/spectrum/models/n2dp.py | mwcraig/pyspeckit | 6d6c09aac29549a8c094d97fb385c9283422bb82 | [
"MIT"
] | null | null | null | pyspeckit/spectrum/models/n2dp.py | mwcraig/pyspeckit | 6d6c09aac29549a8c094d97fb385c9283422bb82 | [
"MIT"
] | 1 | 2018-10-02T15:11:17.000Z | 2018-10-02T15:11:17.000Z | """
===========
N2D+ fitter
===========
Reference for line params:
Dore (priv. comm.) line frequencies in CDMS,
line strength can also be obtained from Splatalogue
L. Dore, P. Caselli, S. Beninati, T. Bourke, P. C. Myers and G. Cazzoli A&A 413, 1177-1181 (2004)
http://adsabs.harvard.edu/abs/2004A%26A...413.1177D
L. Pagani, F. Daniel, and M. L. Dubernet A\%A 494, 719-727 (2009)
DOI: 10.1051/0004-6361:200810570
"""
from . import hyperfine
import astropy.units as u
# line_names = ['J1-0', 'J2-1', 'J3-2',]
# line_names = ['J2-1', 'J3-2',]
freq_dict_cen ={
# 'J1-0': 77109.2697e6,
'J2-1': 154217.1805e6,
'J3-2': 231321.9119e6,
}
voff_lines_dict={
####### J 2-1
'J2-1_01': -5.6031,
'J2-1_02': -5.5332,
'J2-1_03': -5.3617,
'J2-1_04': -5.0993,
'J2-1_05': -4.9677,
'J2-1_06': -4.7052,
'J2-1_07': -3.8195,
'J2-1_08': -3.5571,
'J2-1_09': -2.8342,
'J2-1_10': -2.3388,
'J2-1_11': -1.9449,
'J2-1_12': -1.9002,
'J2-1_13': -1.7733,
'J2-1_14': -1.3965,
'J2-1_15': -1.0025,
'J2-1_16': -0.7968,
'J2-1_17': -0.5740,
'J2-1_18': -0.2311,
'J2-1_19': -0.0085,
'J2-1_20': 0.0000,
'J2-1_21': 0.1351,
'J2-1_22': 0.1457,
'J2-1_23': 0.1886,
'J2-1_24': 0.2538,
'J2-1_25': 0.6165,
'J2-1_26': 0.7541,
'J2-1_27': 0.8789,
'J2-1_28': 2.5594,
'J2-1_29': 3.0143,
'J2-1_30': 3.0632,
'J2-1_31': 3.1579,
'J2-1_32': 3.4572,
'J2-1_33': 3.6394,
'J2-1_34': 3.7234,
'J2-1_35': 3.9567,
'J2-1_36': 4.2049,
'J2-1_37': 4.5817,
'J2-1_38': 4.6054,
'J2-1_39': 8.4164,
'J2-1_40': 9.0414,
####### J 3-2
'J3-2_01': -3.7164,
'J3-2_02': -3.5339,
'J3-2_03': -3.2997,
'J3-2_04': -3.2130,
'J3-2_05': -3.0633,
'J3-2_06': -2.8958,
'J3-2_07': -2.7424,
'J3-2_08': -2.6466,
'J3-2_09': -2.5748,
'J3-2_10': -1.9177,
'J3-2_11': -1.2333,
'J3-2_02': -0.7628,
'J3-2_13': -0.7590,
'J3-2_14': -0.7306,
'J3-2_15': -0.5953,
'J3-2_16': -0.5765,
'J3-2_17': -0.3419,
'J3-2_18': -0.0925,
'J3-2_19': -0.0210,
'J3-2_20': 0.0000,
'J3-2_21': 0.0065,
'J3-2_22': 0.0616,
'J3-2_23': 0.0618,
'J3-2_24': 0.0675,
'J3-2_25': 0.0748,
'J3-2_26': 0.2212,
'J3-2_27': 0.2691,
'J3-2_28': 0.4515,
'J3-2_29': 0.5422,
'J3-2_30': 0.5647,
'J3-2_31': 0.6050,
'J3-2_32': 0.6596,
'J3-2_33': 0.9222,
'J3-2_34': 1.0897,
'J3-2_35': 1.9586,
'J3-2_36': 2.0471,
'J3-2_37': 2.5218,
'J3-2_38': 2.5500,
'J3-2_39': 2.6156,
'J3-2_40': 3.0245,
'J3-2_41': 3.1786,
'J3-2_42': 3.3810,
'J3-2_43': 3.6436,
'J3-2_44': 4.2066,
}
line_strength_dict = {
####### J 2-1
'J2-1_01': 0.008262,
'J2-1_02': 0.005907,
'J2-1_03': 0.031334,
'J2-1_04': 0.013833,
'J2-1_05': 0.013341,
'J2-1_06': 0.010384,
'J2-1_07': 0.000213,
'J2-1_08': 0.000675,
'J2-1_09': 0.000150,
'J2-1_10': 0.001202,
'J2-1_11': 0.000963,
'J2-1_12': 0.000878,
'J2-1_13': 0.002533,
'J2-1_14': 0.000362,
'J2-1_15': 0.000162,
'J2-1_16': 0.021268,
'J2-1_17': 0.031130,
'J2-1_18': 0.000578,
'J2-1_19': 0.001008,
'J2-1_20': 0.200000,
'J2-1_21': 0.111666,
'J2-1_22': 0.088138,
'J2-1_23': 0.142511,
'J2-1_24': 0.011550,
'J2-1_25': 0.027472,
'J2-1_26': 0.012894,
'J2-1_27': 0.066406,
'J2-1_28': 0.013082,
'J2-1_29': 0.003207,
'J2-1_30': 0.061847,
'J2-1_31': 0.004932,
'J2-1_32': 0.035910,
'J2-1_33': 0.011102,
'J2-1_34': 0.038958,
'J2-1_35': 0.019743,
'J2-1_36': 0.004297,
'J2-1_37': 0.001830,
'J2-1_38': 0.000240,
'J2-1_39': 0.000029,
'J2-1_40': 0.000004,
####### J 3-2
'J3-2_01': 0.001842,
'J3-2_02': 0.001819,
'J3-2_03': 0.003544,
'J3-2_04': 0.014100,
'J3-2_05': 0.011404,
'J3-2_06': 0.000088,
'J3-2_07': 0.002201,
'J3-2_08': 0.002153,
'J3-2_09': 0.000059,
'J3-2_10': 0.000058,
'J3-2_11': 0.000203,
'J3-2_12': 0.000259,
'J3-2_13': 0.000248,
'J3-2_14': 0.000437,
'J3-2_15': 0.010215,
'J3-2_16': 0.000073,
'J3-2_17': 0.007445,
'J3-2_18': 0.000155,
'J3-2_19': 0.000272,
'J3-2_20': 0.174603,
'J3-2_21': 0.018678,
'J3-2_22': 0.100524,
'J3-2_23': 0.135563,
'J3-2_24': 0.124910,
'J3-2_25': 0.060970,
'J3-2_26': 0.088513,
'J3-2_27': 0.001085,
'J3-2_28': 0.094480,
'J3-2_29': 0.013955,
'J3-2_30': 0.007236,
'J3-2_31': 0.022222,
'J3-2_32': 0.047921,
'J3-2_33': 0.015427,
'J3-2_34': 0.000070,
'J3-2_35': 0.000796,
'J3-2_36': 0.001373,
'J3-2_37': 0.007147,
'J3-2_38': 0.016574,
'J3-2_39': 0.009776,
'J3-2_40': 0.000995,
'J3-2_41': 0.000491,
'J3-2_42': 0.000067,
'J3-2_43': 0.000039,
'J3-2_44': 0.000010,
}
# freq_dict = {
# 'J2-1': (voff_lines_dict['J2-1']*u.km/u.s).to(u.GHz, equivalencies=u.doppler_radio(freq_dict_cen['J2-1']*u.Hz)).value,
# 'J3-2': (voff_lines_dict['J3-2']*u.km/u.s).to(u.GHz, equivalencies=u.doppler_radio(freq_dict_cen['J3-2']*u.Hz)).value,
# }
# Get frequency dictionary in Hz based on the offset velocity and rest frequency
conv_J21=u.doppler_radio(freq_dict_cen['J2-1']*u.Hz)
conv_J32=u.doppler_radio(freq_dict_cen['J3-2']*u.Hz)
freq_dict = {
name: ((voff_lines_dict[name]*u.km/u.s).to(u.Hz, equivalencies=conv_J21).value) for name in voff_lines_dict.keys() if "J2-1" in name
}
freq_dict.update({
name: ((voff_lines_dict[name]*u.km/u.s).to(u.Hz, equivalencies=conv_J32).value) for name in voff_lines_dict.keys() if "J3-2" in name
})
# I don't know yet how to use this parameter... in CLASS it does not exist
# Note to Jaime: this is the sum of the degeneracy values for all hyperfines
# for a given line; it gives the relative weights between the J=2-1 and J=3-2
# lines, for example (the hyperfine weights are treated as normalized within
# one rotational transition)
w21 = sum(val for name,val in line_strength_dict.items() if 'J2-1' in name)
w32 = sum(val for name,val in line_strength_dict.items() if 'J3-2' in name)
relative_strength_total_degeneracy = {
name : w21 for name in line_strength_dict.keys() if "J2-1" in name
}
relative_strength_total_degeneracy.update({
name : w32 for name in line_strength_dict.keys() if "J3-2" in name
})
# Get the list of line names from the previous lists
line_names = [name for name in voff_lines_dict.keys()]
# 'J2-1': np.array([1]*len(voff_lines_dict['J2-1'])),
# 'J3-2': np.array([1]*len(voff_lines_dict['J3-2'])),
# }
# aval_dict = {
# # 'J1-0': 10**(-4.90770),
# 'J2-1': 10**(-3.92220),
# 'J3-2': 10**(-3.35866),
# }
n2dp_vtau = hyperfine.hyperfinemodel(line_names, voff_lines_dict, freq_dict,
line_strength_dict,
relative_strength_total_degeneracy)
n2dp_vtau_fitter = n2dp_vtau.fitter
n2dp_vtau_vheight_fitter = n2dp_vtau.vheight_fitter
n2dp_vtau_tbg_fitter = n2dp_vtau.background_fitter
| 27.807843 | 136 | 0.561839 |
4a21d8e2044bff2d72bd5141d2705e2a36a8b3dc | 252 | py | Python | archive/management/commands/testis.py | pastpages/savemy.news | 39ff49ffd2f63308a847243dccc95b82b69cb06c | [
"MIT"
] | 19 | 2017-11-06T17:06:44.000Z | 2020-10-15T16:59:12.000Z | archive/management/commands/testis.py | pastpages/savemy.news | 39ff49ffd2f63308a847243dccc95b82b69cb06c | [
"MIT"
] | 25 | 2017-11-06T17:45:02.000Z | 2021-09-22T17:54:35.000Z | archive/management/commands/testis.py | palewire/savemy.news | 39ff49ffd2f63308a847243dccc95b82b69cb06c | [
"MIT"
] | 1 | 2019-03-16T17:43:59.000Z | 2019-03-16T17:43:59.000Z | from django.core.management.base import BaseCommand
from archive import tasks
from archive.models import Clip
class Command(BaseCommand):
def handle(self, *args, **options):
clip = Clip.objects.all()[0]
tasks.is_memento(clip.id)
| 22.909091 | 51 | 0.718254 |
4a21dc265b277815b5e181a73df37dc687255a85 | 1,149 | py | Python | src/RIOT/tests/pkg_utensor/generate_digit.py | ARte-team/ARte | 19f17f57522e1b18ba390718fc94be246451837b | [
"MIT"
] | 2 | 2020-04-30T08:17:45.000Z | 2020-05-23T08:46:54.000Z | src/RIOT/tests/pkg_utensor/generate_digit.py | ARte-team/ARte | 19f17f57522e1b18ba390718fc94be246451837b | [
"MIT"
] | null | null | null | src/RIOT/tests/pkg_utensor/generate_digit.py | ARte-team/ARte | 19f17f57522e1b18ba390718fc94be246451837b | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
"""Generate a binary file from a sample image of the MNIST dataset.
Pixel of the sample are stored as float32, images have size 28x28.
"""
import os
import argparse
import numpy as np
import matplotlib.pyplot as plt
import tensorflow as tf
SCRIPT_DIR = os.path.dirname(os.path.realpath(__file__))
def main(args):
_, (mnist_test, _) = tf.keras.datasets.mnist.load_data()
data = mnist_test[args.index]
output_path = os.path.join(SCRIPT_DIR, args.output)
np.ndarray.tofile(data.astype('float32'), output_path)
if args.no_plot is False:
plt.gray()
plt.imshow(data.reshape(28, 28))
plt.show()
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("-i", "--index", type=int, default=0,
help="Image index in MNIST test dataset")
parser.add_argument("-o", "--output", type=str, default='digit',
help="Output filename")
parser.add_argument("--no-plot", default=False, action='store_true',
help="Disable image display in matplotlib")
main(parser.parse_args())
| 28.02439 | 72 | 0.655352 |
4a21dc925b9129d7417e4390116145c14f911549 | 2,086 | py | Python | scripts/support__ignore_these_files/multi_output.py | adelyame/TradingNeuralNetwork | 1b62b47bd1e82a94c58d6cdec6f6d1a5421f2a6a | [
"BSD-3-Clause"
] | 7 | 2021-02-09T20:05:52.000Z | 2022-01-06T04:07:16.000Z | scripts/support__ignore_these_files/multi_output.py | adelyame/TradingNeuralNetwork | 1b62b47bd1e82a94c58d6cdec6f6d1a5421f2a6a | [
"BSD-3-Clause"
] | 1 | 2021-02-09T17:00:16.000Z | 2021-02-09T17:00:16.000Z | scripts/support__ignore_these_files/multi_output.py | adelyame/TradingNeuralNetwork | 1b62b47bd1e82a94c58d6cdec6f6d1a5421f2a6a | [
"BSD-3-Clause"
] | 5 | 2021-02-17T19:26:05.000Z | 2022-02-13T01:19:02.000Z | import tensorflow as tf
from tensorflow.keras.layers import Dense
from tensorflow.keras import Model
from sklearn.datasets import load_iris
from tensorflow.keras.utils import to_categorical
import tensorflow.keras.backend as K
tf.keras.backend.set_floatx('float64')
import numpy as np
iris, target = load_iris(return_X_y=True)
K.clear_session()
X = iris[:, :3]
y = iris[:, 3]
z = target
ds = tf.data.Dataset.from_tensor_slices((X, y, z)).shuffle(buffer_size=150).batch(32)
class MyModel(Model):
def __init__(self):
super(MyModel, self).__init__()
self.d0 = Dense(16, activation='relu')
self.d1 = Dense(32, activation='relu')
self.d2_1 = Dense(1)
self.d2_2 = Dense(4, activation='softmax')
def call(self, x):
x = self.d0(x)
x = self.d1(x)
y_1 = self.d2_1(x)
y_2 = self.d2_2(x)
return y_1, y_2
model = MyModel()
loss_objects = [tf.keras.losses.MeanAbsoluteError(), tf.keras.losses.SparseCategoricalCrossentropy()]
optimizer = tf.keras.optimizers.Adam(learning_rate=1e-3)
acc = tf.keras.metrics.Accuracy(name='categorical loss')
loss = tf.keras.metrics.MeanAbsoluteError()
#error = tf.keras.metrics.MeanAbsoluteError()
@tf.function
def train_step(inputs, targets):
with tf.GradientTape() as tape:
outputs = model(inputs)
losses = [l(t, o) for l,o,t in zip(loss_objects, outputs, targets)]
gradients = tape.gradient(losses, model.trainable_variables)
#print(gradients)
optimizer.apply_gradients(zip(gradients, model.trainable_variables))
#optimizer.apply_gradients(zip(gradients[1], model.trainable_variables))
return outputs
for epoch in range(50):
for xx, yy, zz in ds: # what to do with zz, the categorical target?
outs = train_step(xx, [yy,zz])
res1 = acc.update_state(zz, np.argmax(outs[1], axis=1))
res2 = loss.update_state(yy, outs[0])
template = 'Epoch {:>2}, Accuracy: {:>5.2f}, MAE: {:>5.2f}'
print(template.format(epoch+1, acc.result(), loss.result()))
acc.reset_states()
loss.reset_states()
| 30.676471 | 101 | 0.683605 |
4a21dceabf164071fd372cf3f759e41065c8684a | 925 | bzl | Python | third_party/llvm/workspace.bzl | functionxu123/tensorflow | 9ddf6a26ba7b97ba33fbfb3b1e44f04f1498fac7 | [
"Apache-2.0"
] | 1 | 2022-02-15T11:09:26.000Z | 2022-02-15T11:09:26.000Z | third_party/llvm/workspace.bzl | functionxu123/tensorflow | 9ddf6a26ba7b97ba33fbfb3b1e44f04f1498fac7 | [
"Apache-2.0"
] | null | null | null | third_party/llvm/workspace.bzl | functionxu123/tensorflow | 9ddf6a26ba7b97ba33fbfb3b1e44f04f1498fac7 | [
"Apache-2.0"
] | null | null | null | """Provides the repository macro to import LLVM."""
load("//third_party:repo.bzl", "tf_http_archive")
def repo(name):
"""Imports LLVM."""
LLVM_COMMIT = "18bf42c0a68828b5e7247bcee87ec56f3e6f234b"
LLVM_SHA256 = "d8c13b005ddd8d25db6c20caf01584a28b04f768b11c66fc6b8a711f9dcf2416"
tf_http_archive(
name = name,
sha256 = LLVM_SHA256,
strip_prefix = "llvm-project-{commit}".format(commit = LLVM_COMMIT),
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/github.com/llvm/llvm-project/archive/{commit}.tar.gz".format(commit = LLVM_COMMIT),
"https://github.com/llvm/llvm-project/archive/{commit}.tar.gz".format(commit = LLVM_COMMIT),
],
build_file = "//third_party/llvm:llvm.BUILD",
patch_file = ["//third_party/llvm:macos_build_fix.patch"],
link_files = {"//third_party/llvm:run_lit.sh": "mlir/run_lit.sh"},
)
| 42.045455 | 149 | 0.674595 |
4a21dd6e430840c9a91af033cae31a905fb716b0 | 24,073 | py | Python | tvcharts/src/plugin.py | wedebe/enigma2-plugins | 58e1897866ad65294283970e96e5f2841c3cb6e2 | [
"OLDAP-2.3"
] | null | null | null | tvcharts/src/plugin.py | wedebe/enigma2-plugins | 58e1897866ad65294283970e96e5f2841c3cb6e2 | [
"OLDAP-2.3"
] | null | null | null | tvcharts/src/plugin.py | wedebe/enigma2-plugins | 58e1897866ad65294283970e96e5f2841c3cb6e2 | [
"OLDAP-2.3"
] | null | null | null | #####################################################
# TVCharts Plugin for Enigma2 Dreamboxes
# Coded by Homey (c) 2011
#
# Version: 1.5
# Support: www.i-have-a-dreambox.com
#####################################################
from Components.About import about
from Components.ActionMap import ActionMap
from Components.Button import Button
from Components.config import config, configfile, getConfigListEntry, ConfigSubsection, ConfigYesNo, ConfigInteger, ConfigSelection
from Components.ConfigList import ConfigList, ConfigListScreen
from Components.Label import Label
from Components.MenuList import MenuList
from Components.MultiContent import MultiContentEntryText, MultiContentEntryPixmapAlphaTest
from Components.Network import iNetwork
from Components.ServiceEventTracker import ServiceEventTracker
from Components.Sources.StaticText import StaticText
from Components.UsageConfig import preferredTimerPath
from Components.Pixmap import Pixmap
from RecordTimer import RecordTimer, RecordTimerEntry, parseEvent
from ServiceReference import ServiceReference
from Screens.EventView import EventViewSimple
from Screens.MessageBox import MessageBox
from Screens.Screen import Screen
from Screens.Setup import SetupSummary
from Screens.TimerEntry import TimerEntry
from Screens.TimerEdit import TimerSanityConflict
from Tools.Directories import fileExists, pathExists, SCOPE_SKIN_IMAGE, SCOPE_ACTIVE_SKIN, resolveFilename
from Tools.HardwareInfo import HardwareInfo
from Plugins.Plugin import PluginDescriptor
from enigma import eTimer, eEPGCache, loadPNG, eListboxPythonMultiContent, gFont, eServiceReference, eServiceCenter, iPlayableService, BT_SCALE
from random import randint
from os import system as os_system
from time import time, gmtime, strftime
from twisted.web.client import getPage
from xml.dom.minidom import parse, parseString
from urllib import urlencode
import timer
import xml.etree.cElementTree
import Screens.Standby
##############################
##### CONFIG SETTINGS #####
##############################
config.plugins.tvcharts = ConfigSubsection()
config.plugins.tvcharts.enabled = ConfigYesNo(default=True)
config.plugins.tvcharts.maxentries = ConfigInteger(default=10, limits=(5, 100))
config.plugins.tvcharts.maxtimerentries = ConfigInteger(default=10, limits=(5, 100))
config.plugins.tvcharts.submittimers = ConfigYesNo(default=True)
config.plugins.tvcharts.submitplugins = ConfigYesNo(default=True)
config.plugins.tvcharts.bouquetfilter = ConfigYesNo(default=True)
##########################################################
session = []
#Channellist Menu Entry
class ChannelListMenu(MenuList):
def __init__(self, list, enableWrapAround=False):
MenuList.__init__(self, list, enableWrapAround, eListboxPythonMultiContent)
self.l.setFont(0, gFont("Regular", 24))
self.l.setFont(1, gFont("Regular", 20))
self.l.setFont(2, gFont("Regular", 16))
self.l.setItemHeight(76)
def ChannelListEntryComponent(type, channelname, serviceref, eventid, eventname, starttime, endtime, usercount, percent):
res = [(serviceref, eventid)]
# PIXMAP / PICON
pixmap = resolveFilename(SCOPE_ACTIVE_SKIN, "picon_default.png")
searchPaths = ('/usr/share/enigma2/picon/', '/media/cf/picon/', '/media/usb/picon/')
srefstring = serviceref
pos = srefstring.rfind(':')
if pos != -1:
srefstring = srefstring[:pos].rstrip(':').replace(':', '_')
for path in searchPaths:
pngname = path + srefstring + ".png"
if fileExists(pngname):
pixmap = pngname
# Build Menu
if type == "tvcharts":
res.append(MultiContentEntryPixmapAlphaTest(pos=(8, 8), size=(100, 60), png=loadPNG(pixmap), flags=BT_SCALE))
res.append(MultiContentEntryText(pos=(130, 5), size=(480, 30), font=0, text="%s (Viewer: %s)" % (channelname, usercount)))
res.append(MultiContentEntryText(pos=(130, 35), size=(480, 25), font=1, text=eventname))
elif type == "timercharts":
res.append(MultiContentEntryPixmapAlphaTest(pos=(10, 10), size=(100, 60), png=loadPNG(pixmap), flags=BT_SCALE))
res.append(MultiContentEntryText(pos=(130, 5), size=(480, 28), font=0, text="%s (User: %s)" % (channelname, usercount)))
res.append(MultiContentEntryText(pos=(130, 33), size=(480, 25), font=1, text=eventname))
res.append(MultiContentEntryText(pos=(130, 57), size=(480, 20), font=2, text="%s Uhr - %s Uhr (%smin)" % (strftime("%d.%m.%Y %H:%M", gmtime(starttime)), strftime("%H:%M", gmtime(endtime)), int((endtime - starttime) / 60))))
elif type == "moviecharts":
res.append(MultiContentEntryPixmapAlphaTest(pos=(8, 8), size=(100, 60), png=loadPNG(pixmap), flags=BT_SCALE))
res.append(MultiContentEntryText(pos=(130, 5), size=(480, 30), font=0, text=eventname))
res.append(MultiContentEntryText(pos=(130, 33), size=(480, 25), font=1, text="Viewer: %s" % (usercount)))
res.append(MultiContentEntryText(pos=(130, 57), size=(480, 20), font=2, text="%s Uhr - %s" % (strftime("%d.%m.%Y %H:%M", gmtime(starttime)), channelname)))
return res
##############################
##### TV Charts MAIN #####
##############################
class TVChartsMain(Screen):
skin = """
<screen position="center,center" size="620,510" title="TV Charts">
<widget name="channellist" position="10,10" zPosition="1" size="600,458" scrollbarMode="showOnDemand" />
<widget name="info" position="0,447" zPosition="2" size="620,20" font="Regular;18" noWrap="1" foregroundColor="#ffffff" transparent="1" halign="center" valign="center" />
<ePixmap name="red" position="22,470" zPosition="3" size="140,40" pixmap="/usr/share/enigma2/skin_default/buttons/red.png" transparent="1" alphatest="on" />
<ePixmap name="green" position="167,470" zPosition="3" size="140,40" pixmap="/usr/share/enigma2/skin_default/buttons/green.png" transparent="1" alphatest="on" />
<ePixmap name="yellow" position="312,470" zPosition="3" size="140,40" pixmap="/usr/share/enigma2/skin_default/buttons/yellow.png" transparent="1" alphatest="on" />
<ePixmap name="blue" position="457,470" zPosition="3" size="140,40" pixmap="/usr/share/enigma2/skin_default/buttons/blue.png" transparent="1" alphatest="on" />
<widget name="key_red" position="22,470" zPosition="4" size="140,40" valign="center" halign="center" font="Regular;21" transparent="1" foregroundColor="white" shadowColor="black" shadowOffset="-1,-1" />
<widget name="key_green" position="167,470" zPosition="4" size="140,40" valign="center" halign="center" font="Regular;21" transparent="1" foregroundColor="white" shadowColor="black" shadowOffset="-1,-1" />
<widget name="key_yellow" position="312,470" zPosition="4" size="140,40" valign="center" halign="center" font="Regular;21" transparent="1" foregroundColor="white" shadowColor="black" shadowOffset="-1,-1" />
<widget name="key_blue" position="457,470" zPosition="4" size="140,40" valign="center" halign="center" font="Regular;21" transparent="1" foregroundColor="white" shadowColor="black" shadowOffset="-1,-1" />
</screen>"""
def __init__(self, session):
Screen.__init__(self, session)
self.session = session
self["channellist"] = ChannelListMenu([])
self["info"] = Label()
self["key_red"] = Button("TV Charts")
self["key_green"] = Button("Timer Charts")
self["key_yellow"] = Button("Movie Charts")
self["key_blue"] = Button("Settings")
self["actions"] = ActionMap(["OkCancelActions", "ColorActions", "EPGSelectActions"],
{
"ok": self.okClicked,
"red": self.switchToTVCharts,
"green": self.switchToTimerCharts,
"yellow": self.switchToMovieCharts,
"blue": self.SettingsMenu,
"info": self.ShowEventInfo,
"cancel": self.close
}, -1)
self.epgcache = eEPGCache.getInstance()
self.eventcache = []
self.RefreshTimer = eTimer()
self.RefreshTimer.callback.append(self.downloadList)
self.onLayoutFinish.append(self.firstPluginExec)
def firstPluginExec(self):
self.updateEventCache()
self.switchToTVCharts()
def okClicked(self):
current = self["channellist"].getCurrent()
if current is None:
return
if self.mode == "tvcharts":
service = eServiceReference(str(current[0][0]))
self.session.nav.playService(service)
elif self.mode == "timercharts":
serviceref = ServiceReference(current[0][0])
eventid = int(current[0][1])
event = self.getEventFromId(serviceref, eventid)
if event is not None:
newEntry = RecordTimerEntry(serviceref, *parseEvent(event), checkOldTimers=True, dirname=preferredTimerPath())
self.session.openWithCallback(self.addTimerCallback, TimerEntry, newEntry)
else:
self.session.open(MessageBox, "Sorry, no EPG Info available for this event", type=MessageBox.TYPE_ERROR, timeout=10)
elif self.mode == "moviecharts":
print "[TVCharts] ToDo: Show Movie Info here ..."
return
def addTimerCallback(self, answer):
if answer[0]:
entry = answer[1]
simulTimerList = self.session.nav.RecordTimer.record(entry)
if simulTimerList is not None:
for x in simulTimerList:
if x.setAutoincreaseEnd(entry):
self.session.nav.RecordTimer.timeChanged(x)
simulTimerList = self.session.nav.RecordTimer.record(entry)
if simulTimerList is not None:
self.session.openWithCallback(self.finishSanityCorrection, TimerSanityConflict, simulTimerList)
else:
print "Timeredit aborted"
def finishSanityCorrection(self, answer):
self.addTimerCallback(answer)
def SettingsMenu(self):
self.session.open(TVChartsSetup)
def ShowEventInfo(self):
current = self["channellist"].getCurrent()
if current is None:
return
serviceref = current[0][0]
eventid = current[0][1]
service = ServiceReference(serviceref)
event = self.getEventFromId(service, eventid)
if event is not None:
self.session.open(EventViewSimple, event, service)
def getEventFromId(self, service, eventid):
event = None
if self.epgcache is not None and eventid is not None:
event = self.epgcache.lookupEventId(service.ref, eventid)
return event
def updateEventCache(self):
try:
from Screens.ChannelSelection import service_types_tv
from Components.Sources.ServiceList import ServiceList
bouquetlist = ServiceList(eServiceReference(service_types_tv + ' FROM BOUQUET "bouquets.tv" ORDER BY bouquet'), validate_commands=False).getServicesAsList()
for bouquetitem in bouquetlist:
serviceHandler = eServiceCenter.getInstance()
list = serviceHandler.list(eServiceReference(str(bouquetitem[0])))
services = list and list.getContent('S')
search = ['IBDCTSERNX']
if services: # It's a Bouquet
search.extend([(service, 0, -1) for service in services])
events = self.epgcache.lookupEvent(search)
for eventinfo in events:
#0 eventID | 4 eventname | 5 short descr | 6 long descr | 7 serviceref | 8 channelname
self.eventcache.append((eventinfo[0], eventinfo[7], eventinfo[8], eventinfo[4]))
except Exception:
print "[TVCharts Plugin] Error creating eventcache!"
def switchToTVCharts(self):
self.mode = "tvcharts"
self.setTitle("TV Charts")
self["channellist"].setList([])
self.feedurl = "http://www.dreambox-plugins.de/feeds/topchannels.php"
self.downloadList()
def switchToTimerCharts(self):
self.mode = "timercharts"
self.setTitle("Timer Charts")
self["channellist"].setList([])
self.feedurl = "http://www.dreambox-plugins.de/feeds/toptimers.php?limit=%s" % config.plugins.tvcharts.maxtimerentries.value
self.downloadList()
def switchToMovieCharts(self):
self.mode = "moviecharts"
self.setTitle("Movie Charts")
self["channellist"].setList([])
self.feedurl = "http://www.dreambox-plugins.de/feeds/topmovies.php"
self.downloadList()
def downloadList(self):
if config.plugins.tvcharts.enabled.value:
self["info"].setText("Downloading feeds from server ...")
getPage(self.feedurl).addCallback(self.downloadListCallback).addErrback(self.downloadListError)
else:
self["info"].setText("Error: Plugin disabled in Settings ...")
def downloadListError(self, error=""):
print str(error)
self.session.open(MessageBox, "Error downloading Feed:\n%s" % str(error), type=MessageBox.TYPE_ERROR)
self["info"].setText("Error downloading Feed!")
def downloadListCallback(self, page=""):
self["info"].setText("Parsing Feeds ...")
channellist = []
channelcount = 0
useronline = 0
totalusers = 0
totaltimer = 0
totalmovies = 0
xml = parseString(page)
if self.mode == "tvcharts":
for node in xml.getElementsByTagName("DATA"):
useronline = int(node.getElementsByTagName("USERCOUNT")[0].childNodes[0].data)
totalusers = int(node.getElementsByTagName("TOTALUSERS")[0].childNodes[0].data)
for node in xml.getElementsByTagName("CHANNEL"):
event_id = None
inBouquet = False
channelname = str(node.getElementsByTagName("NAME")[0].childNodes[0].data)
serviceref = str(node.getElementsByTagName("SERVICEREF")[0].childNodes[0].data)
eventname = str(node.getElementsByTagName("EVENTNAME")[0].childNodes[0].data)
usercount = int(node.getElementsByTagName("USERCOUNT")[0].childNodes[0].data)
percent = int(node.getElementsByTagName("PERCENT")[0].childNodes[0].data)
# Look for favourite channel for this event in my bouqets
for sepginfo in self.eventcache:
if sepginfo[2] == channelname:
inBouquet = True
if sepginfo[3] == eventname:
event_id = sepginfo[0]
if sepginfo[3] == eventname and sepginfo[1] != serviceref:
if channelname[0:3].lower() == sepginfo[2][0:3].lower():
serviceref = sepginfo[1]
channelname = sepginfo[2]
inBouquet = True
break
elif sepginfo[3] == eventname and sepginfo[1] == serviceref:
break
# Skip Channels that are not in my bouquets
if config.plugins.tvcharts.bouquetfilter.value and not inBouquet:
continue
# Skip Channels that are not in my bouquets
channelcount += 1
if channelcount > config.plugins.tvcharts.maxentries.value:
break
# Add to List
channellist.append(ChannelListEntryComponent(self.mode, channelname, serviceref, event_id, eventname, 0, 0, usercount, percent))
if totalusers > 0:
self.setTitle("TV Charts (User online: %s of %s)" % (useronline, totalusers))
elif self.mode == "timercharts":
for node in xml.getElementsByTagName("DATA"):
totaltimer = int(node.getElementsByTagName("TIMERCOUNT")[0].childNodes[0].data)
for node in xml.getElementsByTagName("TIMER"):
eitID = int(node.getElementsByTagName("ID")[0].childNodes[0].data)
channelname = str(node.getElementsByTagName("CHANNELNAME")[0].childNodes[0].data)
serviceref = str(node.getElementsByTagName("SERVICEREF")[0].childNodes[0].data)
eventname = str(node.getElementsByTagName("EVENTNAME")[0].childNodes[0].data)
starttime = int(node.getElementsByTagName("STARTTIME")[0].childNodes[0].data)
endtime = int(node.getElementsByTagName("ENDTIME")[0].childNodes[0].data)
usercount = int(node.getElementsByTagName("USERCOUNT")[0].childNodes[0].data)
percent = int(node.getElementsByTagName("PERCENT")[0].childNodes[0].data)
# Look for favourite channel for this event in my bouqets
for sepginfo in self.eventcache:
if sepginfo[2] == channelname:
serviceref = sepginfo[1]
channelname = sepginfo[2]
inBouquet = True
break
# Add to List
channellist.append(ChannelListEntryComponent(self.mode, channelname, serviceref, eitID, eventname, starttime, endtime, usercount, percent))
if totaltimer > 0:
self.setTitle("Timer Charts (Total Timer: %s)" % (totaltimer))
elif self.mode == "moviecharts":
for node in xml.getElementsByTagName("DATA"):
totalmovies = int(node.getElementsByTagName("MOVIECOUNT")[0].childNodes[0].data)
for node in xml.getElementsByTagName("MOVIE"):
eventid = int(node.getElementsByTagName("EVENTID")[0].childNodes[0].data)
eventname = str(node.getElementsByTagName("EVENTNAME")[0].childNodes[0].data)
channelname = str(node.getElementsByTagName("CHANNELNAME")[0].childNodes[0].data)
serviceref = str(node.getElementsByTagName("SERVICEREF")[0].childNodes[0].data)
starttime = int(node.getElementsByTagName("STARTTIME")[0].childNodes[0].data)
usercount = int(node.getElementsByTagName("USERCOUNT")[0].childNodes[0].data)
# Add to List
channellist.append(ChannelListEntryComponent(self.mode, channelname, serviceref, eventid, eventname, starttime, 0, usercount, 0))
#if totalmovies > 0:
# self.setTitle("Movie Charts (Total Movies: %s)" % (totalmovies))
self["info"].setText("")
self["channellist"].setList(channellist)
self.RefreshTimer.start(60000, True)
############################
##### SETTINGS SCREEN #####
############################
class TVChartsSetup(Screen, ConfigListScreen):
def __init__(self, session):
Screen.__init__(self, session)
self.skinName = ["TVChartsSetup", "Setup"]
self.setup_title = _("TV Charts Settings")
self.onChangedEntry = []
self.list = []
ConfigListScreen.__init__(self, self.list, session=session, on_change=self.changedEntry)
self["actions"] = ActionMap(["SetupActions", "ColorActions"],
{
"ok": self.SaveSettings,
"green": self.SaveSettings,
"red": self.Exit,
"cancel": self.Exit
}, -2)
self["key_green"] = StaticText(_("OK"))
self["key_red"] = StaticText(_("Cancel"))
self.createSetup()
self.onLayoutFinish.append(self.layoutFinished)
def layoutFinished(self):
self.setTitle(self.setup_title)
def createSetup(self):
self.list = [getConfigListEntry(_("TV Charts Plugin Enable"), config.plugins.tvcharts.enabled)]
if config.plugins.tvcharts.enabled.value:
self.list.extend((
getConfigListEntry(_("Max Toplist Entries"), config.plugins.tvcharts.maxentries),
getConfigListEntry(_("Max Timerlist Entries"), config.plugins.tvcharts.maxtimerentries),
getConfigListEntry(_("Enable Bouquet-Filter?"), config.plugins.tvcharts.bouquetfilter),
getConfigListEntry(_("Submit Timerlist?"), config.plugins.tvcharts.submittimers),
getConfigListEntry(_("Submit Pluginlist?"), config.plugins.tvcharts.submitplugins)
))
self["config"].list = self.list
self["config"].setList(self.list)
def keyLeft(self):
ConfigListScreen.keyLeft(self)
if self["config"].getCurrent()[1] == config.plugins.tvcharts.enabled:
self.createSetup()
def keyRight(self):
ConfigListScreen.keyRight(self)
if self["config"].getCurrent()[1] == config.plugins.tvcharts.enabled:
self.createSetup()
def changedEntry(self):
for x in self.onChangedEntry:
x()
def getCurrentEntry(self):
return self["config"].getCurrent()[0]
def getCurrentValue(self):
return str(self["config"].getCurrent()[1].getText())
def createSummary(self):
return SetupSummary
def SaveSettings(self):
config.plugins.tvcharts.save()
configfile.save()
self.close()
def Exit(self):
self.close()
##############################
##### UPDATE STATUS #####
##############################
class DBUpdateStatus(Screen):
def __init__(self, session):
Screen.__init__(self, session)
self.DBStatusTimer = eTimer()
self.DBStatusTimer.callback.append(self.updateStatus)
self.__event_tracker = ServiceEventTracker(screen=self, eventmap={
iPlayableService.evUpdatedInfo: self.restartTimer,
iPlayableService.evUpdatedEventInfo: self.restartTimer
})
self.recordtimer = session.nav.RecordTimer
self.NetworkConnectionAvailable = False
self.LastTimerlistUpdate = 0
self.timerlist = ""
self.pluginlist = ""
self.onShow.append(self.restartTimer)
def restartTimer(self):
if self.NetworkConnectionAvailable:
self.DBStatusTimer.stop()
self.DBStatusTimer.start((randint(15, 60)) * 1000, True)
else:
iNetwork.checkNetworkState(self.checkNetworkCB)
def checkNetworkCB(self, data):
if data is not None:
if data <= 2:
self.NetworkConnectionAvailable = True
self.restartTimer()
else:
self.NetworkConnectionAvailable = False
self.DBStatusTimer.stop()
def updateStatus(self):
print "[TVCharts] Status Update ..."
self.DBStatusTimer.stop()
if not config.plugins.tvcharts.enabled.value or Screens.Standby.inStandby:
return
# Get Channelname
sref = self.session.nav.getCurrentlyPlayingServiceReference()
if sref is not None:
ref = eServiceReference(sref.toString())
ref.setName("")
serviceHandler = eServiceCenter.getInstance()
info = serviceHandler.info(ref)
channel_name = info and info.getName(ref).replace('\xc2\x86', '').replace('\xc2\x87', '').decode("utf-8", "ignore").encode("utf-8") or ""
self.serviceref = ref.toString()
else:
channel_name = ""
self.serviceref = ""
# Get Event Info
service = self.session.nav.getCurrentService()
info = service and service.info()
event = info and info.getEvent(0)
event_name = event and event.getEventName() or ""
event_description = ""
event_begin = 0
if event is not None:
curEvent = parseEvent(event)
event_begin = int(curEvent[0]) + (config.recording.margin_before.getValue() * 60)
event_description = event.getExtendedDescription()
# Get Box Info
self.BoxID = iNetwork.getAdapterAttribute("eth0", "mac")
self.DeviceName = HardwareInfo().get_device_name()
try:
from enigma import getEnigmaVersionString
from boxbranding import getImageVersion, getImageBuild
self.EnigmaVersion = getEnigmaVersionString()
self.ImageVersion = getImageVersion() + '.' + getImageBuild()
except:
self.EnigmaVersion = about.getEnigmaVersionString()
self.ImageVersion = about.getVersionString()
# Get TimerList
self.timerlist = ""
if config.plugins.tvcharts.submittimers.value and self.LastTimerlistUpdate <= (time() - 1800):
self.LastTimerlistUpdate = time()
try:
for timer in self.recordtimer.timer_list:
if timer.disabled == 0 and timer.justplay == 0:
self.timerlist += "%s|%s|%s|%s|%s|%s|%s\n" % (timer.eit, str(int(timer.begin) + (config.recording.margin_before.getValue() * 60)), str(int(timer.end) - (config.recording.margin_after.getValue() * 60)), str(timer.service_ref), timer.name, timer.service_ref.getServiceName().replace('\xc2\x86', '').replace('\xc2\x87', '').decode("utf-8", "ignore").encode("utf-8"), timer.repeated)
except Exception:
print "[TVCharts] Error loading timers!"
# Get Pluginlist
if config.plugins.tvcharts.submitplugins.value and self.pluginlist == "":
try:
os_system("opkg list_installed | grep enigma2-plugin- > /tmp/plugins.txt")
for plugin in open('/tmp/plugins.txt', 'r'):
self.pluginlist += plugin[0:plugin.find(' - ')] + "\n"
os_system("rm -f /tmp/plugins.txt")
except Exception:
print "[TVCharts] Error loading plugins!"
# Status Update
getPage(url='http://www.dreambox-plugins.de/feeds/TVCharts/status.php', agent="Mozilla/5.0 (Windows; U; MSIE 7.0; Windows NT 6.0; en-US)", timeout=60, method='POST', headers={'Content-Type': 'application/x-www-form-urlencoded'}, postdata=urlencode({'boxid': self.BoxID, 'devicename': self.DeviceName, 'imageversion': self.ImageVersion, 'enigmaversion': self.EnigmaVersion, 'lastchannel': channel_name, 'lastevent': event_name, 'eventdescr': event_description, 'lastbegin': event_begin, 'lastserviceref': self.serviceref, 'timerlist': self.timerlist, 'pluginlist': self.pluginlist})).addErrback(self.updateError)
# Restart Timer
self.DBStatusTimer.start(900000, True)
def updateError(self, error=""):
self.NetworkConnectionAvailable = False
self.DBStatusTimer.stop()
#############################
##### INIT PLUGIN #####
#############################
def main(session, **kwargs):
session.open(TVChartsMain)
def autostart(reason, **kwargs):
global session
if "session" in kwargs:
session = kwargs["session"]
DBUpdateStatus(session)
def Plugins(path, **kwargs):
return [
PluginDescriptor(where=[PluginDescriptor.WHERE_SESSIONSTART], fnc=autostart),
PluginDescriptor(name="TV Charts", description="TV Charts Plugin", icon="plugin.png", where=PluginDescriptor.WHERE_EXTENSIONSMENU, fnc=main),
PluginDescriptor(name="TV Charts", description="TV Charts Plugin", icon="plugin.png", where=PluginDescriptor.WHERE_PLUGINMENU, fnc=main)]
| 40.121667 | 613 | 0.714992 |
4a21ddc71170b072e0e97b22dbd0638ec787a5d1 | 22,257 | py | Python | TopicModel/embedded_topic_model/models/etm.py | xding2/Pipline-for-TVNews | a8473cbc65f11c8e964d44132aa0e586d05669f4 | [
"MIT"
] | 16 | 2021-04-07T09:21:32.000Z | 2022-03-21T17:05:29.000Z | TopicModel/embedded_topic_model/models/etm.py | xding2/Pipline-for-TVNews | a8473cbc65f11c8e964d44132aa0e586d05669f4 | [
"MIT"
] | 4 | 2021-02-01T04:33:04.000Z | 2021-08-08T16:50:52.000Z | TopicModel/embedded_topic_model/models/etm.py | xding2/Pipline-for-TVNews | a8473cbc65f11c8e964d44132aa0e586d05669f4 | [
"MIT"
] | 3 | 2021-09-29T09:18:46.000Z | 2022-03-06T00:30:55.000Z | from __future__ import print_function
import torch
import numpy as np
import os
import math
from typing import List
from torch import optim
from gensim.models import KeyedVectors
from embedded_topic_model.models.model import Model
from embedded_topic_model.utils import data
from embedded_topic_model.utils import embedding
from embedded_topic_model.utils import metrics
class ETM(object):
"""
Creates an embedded topic model instance. The model hyperparameters are:
vocabulary (list of str): training dataset vocabulary
embeddings (str or KeyedVectors): KeyedVectors instance containing word-vector mapping for embeddings, or its path
use_c_format_w2vec (bool): wheter input embeddings use word2vec C format. Both BIN and TXT formats are supported
model_path (str): path to save trained model. If None, the model won't be automatically saved
batch_size (int): input batch size for training
num_topics (int): number of topics
rho_size (int): dimension of rho
emb_size (int): dimension of embeddings
t_hidden_size (int): dimension of hidden space of q(theta)
theta_act (str): tanh, softplus, relu, rrelu, leakyrelu, elu, selu, glu)
train_embeddings (int): whether to fix rho or train it
lr (float): learning rate
lr_factor (float): divide learning rate by this...
epochs (int): number of epochs to train. 150 for 20ng 100 for others
optimizer_type (str): choice of optimizer
seed (int): random seed (default: 1)
enc_drop (float): dropout rate on encoder
clip (float): gradient clipping
nonmono (int): number of bad hits allowed
wdecay (float): some l2 regularization
anneal_lr (bool): whether to anneal the learning rate or not
bow_norm (bool): normalize the bows or not
num_words (int): number of words for topic viz
log_interval (int): when to log training
visualize_every (int): when to visualize results
eval_batch_size (int): input batch size for evaluation
eval_perplexity (bool): whether to compute perplexity on document completion task
debug_mode (bool): wheter or not should log model operations
"""
def __init__(
self,
vocabulary,
embeddings=None,
use_c_format_w2vec=False,
model_path=None,
batch_size=1000,
num_topics=50,
rho_size=300,
emb_size=300,
t_hidden_size=800,
theta_act='relu',
train_embeddings=False,
lr=0.005,
lr_factor=4.0,
epochs=20,
optimizer_type='adam',
seed=2019,
enc_drop=0.0,
clip=0.0,
nonmono=10,
wdecay=1.2e-6,
anneal_lr=False,
bow_norm=True,
num_words=10,
log_interval=2,
visualize_every=10,
eval_batch_size=1000,
eval_perplexity=False,
debug_mode=False,
):
self.vocabulary = vocabulary
self.vocabulary_size = len(self.vocabulary)
self.model_path = model_path
self.batch_size = batch_size
self.num_topics = num_topics
self.rho_size = rho_size
self.emb_size = emb_size
self.t_hidden_size = t_hidden_size
self.theta_act = theta_act
self.lr_factor = lr_factor
self.epochs = epochs
self.seed = seed
self.enc_drop = enc_drop
self.clip = clip
self.nonmono = nonmono
self.anneal_lr = anneal_lr
self.bow_norm = bow_norm
self.num_words = num_words
self.log_interval = log_interval
self.visualize_every = visualize_every
self.eval_batch_size = eval_batch_size
self.eval_perplexity = eval_perplexity
self.debug_mode = debug_mode
self.device = torch.device(
'cuda' if torch.cuda.is_available() else 'cpu')
np.random.seed(self.seed)
torch.manual_seed(self.seed)
if torch.cuda.is_available():
torch.cuda.manual_seed(self.seed)
self.embeddings = None if train_embeddings else self._initialize_embeddings(
embeddings, use_c_format_w2vec=use_c_format_w2vec)
self.model = Model(
self.device,
self.num_topics,
self.vocabulary_size,
self.t_hidden_size,
self.rho_size,
self.emb_size,
self.theta_act,
self.embeddings,
train_embeddings,
self.enc_drop,
self.debug_mode).to(
self.device)
self.optimizer = self._get_optimizer(optimizer_type, lr, wdecay)
def __str__(self):
return f'{self.model}'
def _get_extension(self, path):
assert isinstance(path, str), 'path extension is not str'
filename = path.split(os.path.sep)[-1]
return filename.split('.')[-1]
def _get_embeddings_from_original_word2vec(self, embeddings_file):
if self._get_extension(embeddings_file) == 'txt':
if self.debug_mode:
print('Reading embeddings from original word2vec TXT file...')
vectors = {}
iterator = embedding.MemoryFriendlyFileIterator(embeddings_file)
for line in iterator:
word = line[0]
if word in self.vocabulary:
vect = np.array(line[1:]).astype(np.float)
vectors[word] = vect
return vectors
elif self._get_extension(embeddings_file) == 'bin':
if self.debug_mode:
print('Reading embeddings from original word2vec BIN file...')
return KeyedVectors.load_word2vec_format(
embeddings_file,
binary=True
)
else:
raise Exception('Original Word2Vec file without BIN/TXT extension')
def _initialize_embeddings(
self,
embeddings,
use_c_format_w2vec=False
):
vectors = embeddings if isinstance(embeddings, KeyedVectors) else {}
if use_c_format_w2vec:
vectors = self._get_embeddings_from_original_word2vec(embeddings)
elif isinstance(embeddings, str):
if self.debug_mode:
print('Reading embeddings from word2vec file...')
vectors = KeyedVectors.load(embeddings, mmap='r')
model_embeddings = np.zeros((self.vocabulary_size, self.emb_size))
for i, word in enumerate(self.vocabulary):
try:
model_embeddings[i] = vectors[word]
except KeyError:
model_embeddings[i] = np.random.normal(
scale=0.6, size=(self.emb_size, ))
return torch.from_numpy(model_embeddings).to(self.device)
def _get_optimizer(self, optimizer_type, learning_rate, wdecay):
if optimizer_type == 'adam':
return optim.Adam(
self.model.parameters(),
lr=learning_rate,
weight_decay=wdecay)
elif optimizer_type == 'adagrad':
return optim.Adagrad(
self.model.parameters(),
lr=learning_rate,
weight_decay=wdecay)
elif optimizer_type == 'adadelta':
return optim.Adadelta(
self.model.parameters(),
lr=learning_rate,
weight_decay=wdecay)
elif optimizer_type == 'rmsprop':
return optim.RMSprop(
self.model.parameters(),
lr=learning_rate,
weight_decay=wdecay)
elif optimizer_type == 'asgd':
return optim.ASGD(
self.model.parameters(),
lr=learning_rate,
t0=0,
lambd=0.,
weight_decay=wdecay)
else:
if self.debug_mode:
print('Defaulting to vanilla SGD')
return optim.SGD(self.model.parameters(), lr=learning_rate)
def _set_training_data(self, train_data):
self.train_tokens = train_data['tokens']
self.train_counts = train_data['counts']
self.num_docs_train = len(self.train_tokens)
def _set_test_data(self, test_data):
self.test_tokens = test_data['test']['tokens']
self.test_counts = test_data['test']['counts']
self.num_docs_test = len(self.test_tokens)
self.test_1_tokens = test_data['test1']['tokens']
self.test_1_counts = test_data['test1']['counts']
self.num_docs_test_1 = len(self.test_1_tokens)
self.test_2_tokens = test_data['test2']['tokens']
self.test_2_counts = test_data['test2']['counts']
self.num_docs_test_2 = len(self.test_2_tokens)
def _train(self, epoch):
self.model.train()
acc_loss = 0
acc_kl_theta_loss = 0
cnt = 0
indices = torch.randperm(self.num_docs_train)
indices = torch.split(indices, self.batch_size)
for idx, ind in enumerate(indices):
self.optimizer.zero_grad()
self.model.zero_grad()
data_batch = data.get_batch(
self.train_tokens,
self.train_counts,
ind,
self.vocabulary_size,
self.device)
sums = data_batch.sum(1).unsqueeze(1)
if self.bow_norm:
normalized_data_batch = data_batch / sums
else:
normalized_data_batch = data_batch
recon_loss, kld_theta = self.model(
data_batch, normalized_data_batch)
total_loss = recon_loss + kld_theta
total_loss.backward()
if self.clip > 0:
torch.nn.utils.clip_grad_norm_(
self.model.parameters(), self.clip)
self.optimizer.step()
acc_loss += torch.sum(recon_loss).item()
acc_kl_theta_loss += torch.sum(kld_theta).item()
cnt += 1
if idx % self.log_interval == 0 and idx > 0:
cur_loss = round(acc_loss / cnt, 2)
cur_kl_theta = round(acc_kl_theta_loss / cnt, 2)
cur_real_loss = round(cur_loss + cur_kl_theta, 2)
cur_loss = round(acc_loss / cnt, 2)
cur_kl_theta = round(acc_kl_theta_loss / cnt, 2)
cur_real_loss = round(cur_loss + cur_kl_theta, 2)
if self.debug_mode:
print('Epoch {} - Learning Rate: {} - KL theta: {} - Rec loss: {} - NELBO: {}'.format(
epoch, self.optimizer.param_groups[0]['lr'], cur_kl_theta, cur_loss, cur_real_loss))
def _perplexity(self, test_data) -> float:
"""Computes perplexity on document completion for a given testing data.
The document completion task is described on the original ETM's article: https://arxiv.org/pdf/1907.04907.pdf
Parameters:
===
test_data (dict): BOW testing dataset, split in tokens and counts and used for perplexity
Returns:
===
float: perplexity score on document completion task
"""
self._set_test_data(test_data)
self.model.eval()
with torch.no_grad():
# get \beta here
beta = self.model.get_beta()
# do dc here
acc_loss = 0
cnt = 0
indices_1 = torch.split(
torch.tensor(
range(
self.num_docs_test_1)),
self.eval_batch_size)
for idx, ind in enumerate(indices_1):
# get theta from first half of docs
data_batch_1 = data.get_batch(
self.test_1_tokens,
self.test_1_counts,
ind,
self.vocabulary_size,
self.device)
sums_1 = data_batch_1.sum(1).unsqueeze(1)
if self.bow_norm:
normalized_data_batch_1 = data_batch_1 / sums_1
else:
normalized_data_batch_1 = data_batch_1
theta, _ = self.model.get_theta(normalized_data_batch_1)
# get prediction loss using second half
data_batch_2 = data.get_batch(
self.test_2_tokens,
self.test_2_counts,
ind,
self.vocabulary_size,
self.device)
sums_2 = data_batch_2.sum(1).unsqueeze(1)
res = torch.mm(theta, beta)
preds = torch.log(res)
recon_loss = -(preds * data_batch_2).sum(1)
loss = recon_loss / sums_2.squeeze()
loss = loss.mean().item()
acc_loss += loss
cnt += 1
cur_loss = acc_loss / cnt
ppl_dc = round(math.exp(cur_loss), 1)
if self.debug_mode:
print(f'Document Completion Task Perplexity: {ppl_dc}')
return ppl_dc
def get_topics(self, top_n_words=10) -> List[str]:
"""
Gets topics. By default, returns the 10 most relevant terms for each topic.
Parameters:
===
top_n_words (int): number of top words per topic to return
Returns:
===
list of str: topic list
"""
with torch.no_grad():
topics = []
gammas = self.model.get_beta()
for k in range(self.num_topics):
gamma = gammas[k]
top_words = list(gamma.cpu().numpy().argsort()
[-top_n_words:][::-1])
topic_words = [self.vocabulary[a] for a in top_words]
topics.append(topic_words)
return topics
def get_most_similar_words(self, queries, n_most_similar=20) -> dict:
"""
Gets the nearest neighborhoring words for a list of tokens. By default, returns the 20 most similar words for each token in 'queries' array.
Parameters:
===
queries (list of str): words to find similar ones
n_most_similar (int): number of most similar words to get for each word given in the input. By default is 20
Returns:
===
dict of (str, list of str): dictionary containing the mapping between query words given and their respective similar words
"""
self.model.eval()
# visualize word embeddings by using V to get nearest neighbors
with torch.no_grad():
try:
self.embeddings = self.model.rho.weight # Vocab_size x E
except BaseException:
self.embeddings = self.model.rho # Vocab_size x E
neighbors = {}
for word in queries:
neighbors[word] = metrics.nearest_neighbors(
word, self.embeddings, self.vocabulary, n_most_similar)
return neighbors
def fit(self, train_data, test_data=None):
"""
Trains the model with the given training data.
Optionally receives testing data for perplexity calculation. The testing data is
only used if the 'eval_perplexity' model parameter is True.
Parameters:
===
train_data (dict): BOW training dataset, split in tokens and counts
test_data (dict): optional. BOW testing dataset, split in tokens and counts. Used for perplexity calculation, if activated
Returns:
===
self (ETM): the instance itself
"""
self._set_training_data(train_data)
best_val_ppl = 1e9
all_val_ppls = []
if self.debug_mode:
print(f'Topics before training: {self.get_topics()}')
for epoch in range(1, self.epochs):
self._train(epoch)
if self.eval_perplexity:
val_ppl = self._perplexity(
test_data)
if val_ppl < best_val_ppl:
if self.model_path is not None:
self._save_model(self.model_path)
best_val_ppl = val_ppl
else:
# check whether to anneal lr
lr = self.optimizer.param_groups[0]['lr']
if self.anneal_lr and (len(all_val_ppls) > self.nonmono and val_ppl > min(
all_val_ppls[:-self.nonmono]) and lr > 1e-5):
self.optimizer.param_groups[0]['lr'] /= self.lr_factor
all_val_ppls.append(val_ppl)
if self.debug_mode and (epoch % self.visualize_every == 0):
print(f'Topics: {self.get_topics()}')
if self.model_path is not None:
self._save_model(self.model_path)
if self.eval_perplexity and self.model_path is not None:
self._load_model(self.model_path)
val_ppl = self._perplexity(train_data)
return self
def get_topic_word_matrix(self) -> List[List[str]]:
"""
Obtains the topic-word matrix learned for the model.
The topic-word matrix lists all words for each discovered topic.
As such, this method will return a matrix representing the words.
Returns:
===
list of list of str: topic-word matrix.
Example:
[['world', 'planet', 'stars', 'moon', 'astrophysics'], ...]
"""
self.model = self.model.to(self.device)
self.model.eval()
with torch.no_grad():
beta = self.model.get_beta()
topics = []
for i in range(self.num_topics):
words = list(beta[i].cpu().numpy())
topic_words = [self.vocabulary[a] for a, _ in enumerate(words)]
topics.append(topic_words)
return topics
def get_topic_word_dist(self) -> torch.Tensor:
"""
Obtains the topic-word distribution matrix.
The topic-word distribution matrix lists the probabilities for each word on each topic.
This is a normalized distribution matrix, and as such, each row sums to one.
Returns:
===
torch.Tensor: topic-word distribution matrix, with KxV dimension, where
K is the number of topics and V is the vocabulary size
Example:
tensor([[3.2238e-04, 3.7851e-03, 3.2811e-04, ..., 8.4206e-05, 7.9504e-05,
4.0738e-04],
[3.6089e-05, 3.0677e-03, 1.3650e-04, ..., 4.5665e-05, 1.3241e-04,
5.8661e-05]])
"""
self.model = self.model.to(self.device)
self.model.eval()
with torch.no_grad():
return self.model.get_beta()
def get_document_topic_dist(self) -> torch.Tensor:
"""
Obtains the document-topic distribution matrix.
The document-topic distribution matrix lists the probabilities for each topic on each document.
This is a normalized distribution matrix, and as such, each row sums to one.
Returns:
===
torch.Tensor: topic-word distribution matrix, with DxK dimension, where
D is the number of documents in the corpus and K is the number of topics
Example:
tensor([[0.1840, 0.0489, 0.1020, 0.0726, 0.1952, 0.1042, 0.1275, 0.1657],
[0.1417, 0.0918, 0.2263, 0.0840, 0.0900, 0.1635, 0.1209, 0.0817]])
"""
self.model = self.model.to(self.device)
self.model.eval()
with torch.no_grad():
indices = torch.tensor(range(self.num_docs_train))
indices = torch.split(indices, self.batch_size)
thetas = []
for idx, ind in enumerate(indices):
data_batch = data.get_batch(
self.train_tokens,
self.train_counts,
ind,
self.vocabulary_size,
self.device)
sums = data_batch.sum(1).unsqueeze(1)
normalized_data_batch = data_batch / sums if self.bow_norm else data_batch
theta, _ = self.model.get_theta(normalized_data_batch)
thetas.append(theta)
return torch.cat(tuple(thetas), 0)
def get_topic_coherence(self, top_n=10) -> float:
"""
Calculates NPMI topic coherence for the model.
By default, considers the 10 most relevant terms for each topic in coherence computation.
Parameters:
===
top_n (int): number of words per topic to consider in coherence computation
Returns:
===
float: the model's topic coherence
"""
self.model = self.model.to(self.device)
self.model.eval()
with torch.no_grad():
beta = self.model.get_beta().data.cpu().numpy()
return metrics.get_topic_coherence(
beta, self.train_tokens, self.vocabulary, top_n)
def get_topic_diversity(self, top_n=25) -> float:
"""
Calculates topic diversity for the model.
By default, considers the 25 most relevant terms for each topic in diversity computation.
Parameters:
===
top_n (int): number of words per topic to consider in diversity computation
Returns:
===
float: the model's topic diversity
"""
self.model = self.model.to(self.device)
self.model.eval()
with torch.no_grad():
beta = self.model.get_beta().data.cpu().numpy()
return metrics.get_topic_diversity(beta, top_n)
def _save_model(self, model_path):
assert self.model is not None, \
'no model to save'
if not os.path.exists(model_path):
os.makedirs(os.path.dirname(model_path), exist_ok=True)
with open(model_path, 'wb') as file:
torch.save(self.model, file)
def _load_model(self, model_path):
assert os.path.exists(model_path), \
"model path doesn't exists"
with open(model_path, 'rb') as file:
self.model = torch.load(file)
self.model = self.model.to(self.device)
| 36.249186 | 148 | 0.57856 |
4a21de942f81d6e5e434fc68dd817e7d47749ae0 | 500 | py | Python | env/lib/python3.8/site-packages/plotly/validators/layout/xaxis/tickfont/_size.py | acrucetta/Chicago_COVI_WebApp | a37c9f492a20dcd625f8647067394617988de913 | [
"MIT",
"Unlicense"
] | 76 | 2020-07-06T14:44:05.000Z | 2022-02-14T15:30:21.000Z | env/lib/python3.8/site-packages/plotly/validators/layout/xaxis/tickfont/_size.py | acrucetta/Chicago_COVI_WebApp | a37c9f492a20dcd625f8647067394617988de913 | [
"MIT",
"Unlicense"
] | 11 | 2020-08-09T02:30:14.000Z | 2022-03-12T00:50:14.000Z | env/lib/python3.8/site-packages/plotly/validators/layout/xaxis/tickfont/_size.py | acrucetta/Chicago_COVI_WebApp | a37c9f492a20dcd625f8647067394617988de913 | [
"MIT",
"Unlicense"
] | 11 | 2020-07-12T16:18:07.000Z | 2022-02-05T16:48:35.000Z | import _plotly_utils.basevalidators
class SizeValidator(_plotly_utils.basevalidators.NumberValidator):
def __init__(
self, plotly_name="size", parent_name="layout.xaxis.tickfont", **kwargs
):
super(SizeValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "ticks"),
min=kwargs.pop("min", 1),
role=kwargs.pop("role", "style"),
**kwargs
)
| 31.25 | 79 | 0.618 |
4a21deddfeed45b923b6877ae4e1faf3ec0969ff | 1,478 | py | Python | Code Bundle/Chapter06/test_simple2.py | ghanigreen/pytest_code | dbdcc322b3469c62ad328043060518edf2b2d83f | [
"MIT"
] | 46 | 2018-06-28T04:40:08.000Z | 2022-02-14T05:36:48.000Z | Code Bundle/Chapter06/test_simple2.py | ghanigreen/pytest_code | dbdcc322b3469c62ad328043060518edf2b2d83f | [
"MIT"
] | null | null | null | Code Bundle/Chapter06/test_simple2.py | ghanigreen/pytest_code | dbdcc322b3469c62ad328043060518edf2b2d83f | [
"MIT"
] | 22 | 2018-06-10T23:20:29.000Z | 2022-02-24T06:47:18.000Z |
import csv
import shutil
import tempfile
import unittest
from collections import namedtuple
from pathlib import Path
import pytest
DATA = """
Main Grid,48,44
2nd Grid,24,21
3rd Grid,24,48
"""
GridData = namedtuple("GridData", "name total_cells active_cells")
def convert_size(s):
return int(s)
def parse_grid_data(fields):
return GridData(
name=str(fields[0]),
total_cells=convert_size(fields[1]),
active_cells=convert_size(fields[2]),
)
def iter_grids_from_csv(path):
with path.open() as f:
for fields in csv.reader(f.readlines()):
yield parse_grid_data(fields)
class Test(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.temp_dir = Path(tempfile.mkdtemp())
cls.filepath = cls.temp_dir / "data.csv"
cls.filepath.write_text(DATA.strip())
@classmethod
def tearDownClass(cls):
shutil.rmtree(cls.temp_dir)
def setUp(self):
self.grids = list(iter_grids_from_csv(self.filepath))
def test_read_properties(self):
assert self.grids[0] == GridData("Main Grid", 48, 44)
assert self.grids[1] == GridData("2nd Grid", 24, 21)
assert self.grids[2] == GridData("3rd Grid", 24, 48)
def test_invalid_path(self):
with pytest.raises(IOError):
list(iter_grids_from_csv(Path("invalid file")))
@unittest.expectedFailure
def test_write_properties(self):
self.fail("not implemented yet")
| 23.09375 | 66 | 0.665765 |
4a21df1ab3dd04f64521fd0c90c008745340f7ea | 614 | py | Python | verification/domain/models/comment.py | DhivakharVenkatachalam/snet-marketplace-service | 6aee606bc9b00d418caeae26c64deae03792e0ce | [
"MIT"
] | 14 | 2019-02-12T09:14:52.000Z | 2021-03-11T18:42:22.000Z | verification/domain/models/comment.py | prashantramangupta/snet-marketplace-service | 7c293054e4b0207deefecc46defd743c064472a4 | [
"MIT"
] | 1,079 | 2019-01-10T04:31:24.000Z | 2022-03-29T06:16:42.000Z | verification/domain/models/comment.py | prashantramangupta/snet-marketplace-service | 7c293054e4b0207deefecc46defd743c064472a4 | [
"MIT"
] | 20 | 2018-12-18T13:06:41.000Z | 2021-09-17T11:13:01.000Z | class Comment:
def __init__(self, comment, created_by, created_at):
self.__comment = comment
self.__created_by = created_by
self.__created_at = created_at
def to_dict(self):
comment_dict = {
"comment": self.__comment,
"created_by": self.__created_by,
"created_at": self.__created_at
}
return comment_dict
@property
def comment(self):
return self.__comment
@property
def created_by(self):
return self.__created_by
@property
def created_at(self):
return self.__created_at
| 23.615385 | 56 | 0.614007 |
4a21e02fc2e287db1b05c4ac5b7be847ad0743f2 | 8,115 | py | Python | kdeepmodel/train_npy_model.py | ktdiedrich/kdeepmodel | c59e3ff0a6d4a30b19213bfcb587d2013a6b7549 | [
"Apache-2.0"
] | null | null | null | kdeepmodel/train_npy_model.py | ktdiedrich/kdeepmodel | c59e3ff0a6d4a30b19213bfcb587d2013a6b7549 | [
"Apache-2.0"
] | null | null | null | kdeepmodel/train_npy_model.py | ktdiedrich/kdeepmodel | c59e3ff0a6d4a30b19213bfcb587d2013a6b7549 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python3
"""Train model to classify images
* Default hyper-parameters set in parameter dictionary
* Override default hyper-parameters with command line or web page arguments
see: Python flask https://palletsprojects.com/p/flask/
see: Javascript React https://reactjs.org/
* Dictionary of current training hyper-parameters saved to JSON in output directory with model
* Training output and or saves intermediate images and graphs for debugging and optimization,
see: Tensorboard https://www.tensorflow.org/guide
see: https://seaborn.pydata.org/
* Optimize hyper-parameters with genetic algorithms
see: https://github.com/handcraftsman/GeneticAlgorithmsWithPython/
* Inference with another script with command line or web-page arguments
* Sample data https://www.kaggle.com/simjeg/lymphoma-subtype-classification-fl-vs-cll/
Karl Diedrich, PhD <[email protected]>
"""
import os
import numpy as np
import matplotlib
matplotlib.use('Agg') # write plots to PNG files
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.model_selection import train_test_split
from keras.utils import to_categorical
from keras import layers
from keras import models, optimizers
import json
from keras.applications import ResNet50V2
param = dict()
param['test_size'] = 0.2
param['show'] = False
param['print'] = False
param['epochs'] = 40
param['batch_size'] = 32
param['output_dir'] = '.'
param['model_output_name'] = "trained_model.h5"
param['figure_name'] = 'training_history.png'
param['validation_split'] = 0.2
param['figure_size'] = (9, 9)
param['learning_rate'] = 2e-5
param['dropout'] = 0.5
def normalize(data):
return data/data.max()
def prepare_data(x_input, y_ground, test_size, shuffle=True, prep_x_func=None):
"""Load NPY format training and ground truth
:return: (X_train, X_test, Y_train, Y_test)
"""
X = np.load(x_input).astype(np.float)
Y = np.load(y_ground).astype(np.float)
print("X: {} {}".format(X.shape, X.dtype))
print("Y: {} {}".format(Y.shape, Y.dtype))
if prep_x_func is not None:
X = prep_x_func(X)
Y_labels = to_categorical(Y)
X_train, X_test, Y_train, Y_test = train_test_split(X, Y_labels, shuffle=shuffle, test_size=test_size)
return (X_train, X_test, Y_train, Y_test)
def create_model(input_shape, output_shape, dropout=0.5):
model = models.Sequential()
model.add(layers.Conv2D(32, (3, 3), activation='relu', input_shape=input_shape))
model.add(layers.MaxPooling2D((2, 2)))
model.add(layers.Conv2D(64, (3, 3), activation='relu'))
model.add(layers.MaxPooling2D((2, 2)))
model.add(layers.Conv2D(128, (3, 3), activation='relu'))
model.add(layers.MaxPooling2D((2, 2)))
model.add(layers.Conv2D(128, (3, 3), activation='relu'))
model.add(layers.MaxPooling2D((2, 2)))
model.add(layers.Flatten())
model.add(layers.Dense(512, activation='relu'))
model.add(layers.Dropout(rate=dropout))
model.add(layers.Dense(output_shape, activation='softmax'))
return model
def feature_prediction_model(input_shape, output_shape, dropout=0.5):
model = models.Sequential()
model.add(layers.Dense(256, activation="relu", input_dim=input_shape[0]))
model.add(layers.Dropout(dropout))
model.add(layers.Dense(output_shape, activation="softmax"))
return model
def extract_features(data):
conv_base = ResNet50V2(include_top=False, weights="imagenet", input_shape=data[0].shape)
features = conv_base.predict(data)
features = np.reshape(features, (len(features), np.prod(features[0].shape)))
return features
def plot_history(history, ax, title, label):
epochs = range(0, len(history))
plot_ax = sns.scatterplot(x=epochs, y=history, ax=ax)
plot_ax.set_title("{}".format(title))
plot_ax.set_xlabel("epochs")
plot_ax.set_ylabel(label)
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser(description='Load NPY format image and ground truth data for model training.')
parser.add_argument('x_input', type=str, help='X input data')
parser.add_argument("y_ground", type=str, help='Y target ground truth')
parser.add_argument("--output_dir", "-o", type=str, required=False, default=param['output_dir'],
help="output directory, default {}".format(param['output_dir']))
parser.add_argument("--test_size", "-t", type=float, action="store", default=param['test_size'], required=False,
help="test proportion size, default {}".format(param['test_size']))
parser.add_argument("--epochs", "-e", type=int, action="store", help="epochs, default {}".format(param['epochs']),
default=param['epochs'], required=False)
parser.add_argument("--batch_size", "-b", type=int, action="store", default=param['batch_size'], required=False,
help="batch size, default {}".format(param['batch_size']))
parser.add_argument("--show", "-s", action="store_true", default=param['show'], required=False,
help="show example images, default {}".format(param['show']))
parser.add_argument("--print", "-p", action="store_true", default=param['print'], required=False,
help="print statements for development and debugging, default {}".format(param['print']))
args = parser.parse_args()
param['x_input'] = args.x_input
param['y_ground'] = args.y_ground
param['test_size'] = args.test_size
param['epochs'] = args.epochs
param['batch_size'] = args.batch_size
param['show'] = args.show
param['print'] = args.print
param['output_dir'] = args.output_dir
#X_train, X_test, Y_train, Y_test = prepare_data(param['x_input'], param['y_ground'], test_size=param['test_size'],
# prep_x_func=normalize)
X_train, X_test, Y_train, Y_test = prepare_data(param['x_input'], param['y_ground'], test_size=param['test_size'],
prep_x_func=extract_features)
param['input_shape'] = X_train[0].shape
param['output_shape'] = Y_train.shape[1]
# model = create_model(input_shape=param['input_shape'], output_shape=param['output_shape'], dropout=param['dropout'])
model = feature_prediction_model(input_shape=param['input_shape'], output_shape=param['output_shape'], dropout=param['dropout'])
if args.show:
plt.imshow(X_train[0])
plt.show()
if args.print:
print("X train: {}, X test: {}, Y train: {}, Y test: {}".format(X_train.shape, X_test.shape,
Y_train.shape, Y_test.shape))
print("Y: {}".format(Y_train[0:10]))
model.summary()
model.compile(optimizer=optimizers.RMSprop(learning_rate=param['learning_rate']),
loss='categorical_crossentropy',
metrics=['accuracy'])
if not os.path.exists(param['output_dir']):
os.makedirs(param['output_dir'])
with open(os.path.join(param['output_dir'], 'param.json'), 'w') as fp:
json.dump(param, fp)
callbacks = model.fit(X_train, Y_train, epochs=param['epochs'], batch_size=param['batch_size'],
validation_split=param['validation_split'])
test_loss, test_acc = model.evaluate(X_test, Y_test)
print("test loss {}, accuracy {}".format(test_loss, test_acc))
model.save(os.path.join(param['output_dir'], param['model_output_name']))
fig, axes = plt.subplots(2, 2, sharex=True, sharey=False, figsize=param['figure_size'])
fig.suptitle('History: test: loss {:.2}, accuracy {:.2}'.format(test_loss, test_acc))
plot_history(callbacks.history['loss'], axes[0, 0], 'Training', 'loss')
plot_history(callbacks.history['accuracy'], axes[0, 1], 'Training', 'accuracy')
plot_history(callbacks.history['val_loss'], axes[1, 0], 'Validation', 'loss')
plot_history(callbacks.history['val_accuracy'], axes[1, 1], 'Validation', 'accuracy')
plt.savefig(os.path.join(param['output_dir'], param['figure_name']))
print("fin")
| 45.847458 | 132 | 0.674677 |
4a21e0b39143c49dddb5bc9b2f0b828192db28ac | 5,107 | py | Python | neuralmonkey/runners/beamsearch_runner.py | rahulrawat11/neuralmonkey | e2924dceb54a46500326f61c71bf2b312825c838 | [
"BSD-3-Clause"
] | 15 | 2018-04-11T09:18:09.000Z | 2021-03-12T03:04:20.000Z | neuralmonkey/runners/beamsearch_runner.py | bastings/neuralmonkey | 8d194701448a7d318396ecf6a82eb2dc6dec9dec | [
"BSD-3-Clause"
] | null | null | null | neuralmonkey/runners/beamsearch_runner.py | bastings/neuralmonkey | 8d194701448a7d318396ecf6a82eb2dc6dec9dec | [
"BSD-3-Clause"
] | 6 | 2017-07-25T15:30:28.000Z | 2019-10-31T16:14:48.000Z | from typing import Callable, List, Dict, Optional
import numpy as np
from typeguard import check_argument_types
from neuralmonkey.model.model_part import ModelPart
from neuralmonkey.decoders.beam_search_decoder import (BeamSearchDecoder,
SearchStepOutput)
from neuralmonkey.runners.base_runner import (BaseRunner, Executable,
ExecutionResult, NextExecute)
from neuralmonkey.vocabulary import Vocabulary, END_TOKEN
class BeamSearchExecutable(Executable):
def __init__(self,
rank: int,
all_encoders: List[ModelPart],
bs_outputs: SearchStepOutput,
vocabulary: Vocabulary,
postprocess: Optional[Callable]) -> None:
self._rank = rank
self._all_encoders = all_encoders
self._bs_outputs = bs_outputs
self._vocabulary = vocabulary
self._postprocess = postprocess
self.result = None # type: Optional[ExecutionResult]
def next_to_execute(self) -> NextExecute:
return self._all_encoders, {'bs_outputs': self._bs_outputs}, {}
def collect_results(self, results: List[Dict]) -> None:
if len(results) > 1:
raise ValueError("Beam search runner does not support ensembling.")
evaluated_bs = results[0]['bs_outputs']
max_time = evaluated_bs.scores.shape[0]
# pick the end of the hypothesis based on its rank
hyp_index = np.argpartition(
-evaluated_bs.scores[-1], self._rank - 1)[self._rank - 1]
bs_score = evaluated_bs.scores[-1][hyp_index]
# now backtrack
output_tokens = [] # type: List[str]
for time in reversed(range(max_time)):
token_id = evaluated_bs.token_ids[time][hyp_index]
token = self._vocabulary.index_to_word[token_id]
output_tokens.append(token)
hyp_index = evaluated_bs.parent_ids[time][hyp_index]
output_tokens.reverse()
before_eos_tokens = [] # type: List[str]
for tok in output_tokens:
if tok == END_TOKEN:
break
before_eos_tokens.append(tok)
if self._postprocess is not None:
decoded_tokens = self._postprocess([before_eos_tokens])
else:
decoded_tokens = [before_eos_tokens]
self.result = ExecutionResult(
outputs=decoded_tokens,
losses=[bs_score],
scalar_summaries=None,
histogram_summaries=None,
image_summaries=None)
class BeamSearchRunner(BaseRunner):
def __init__(self,
output_series: str,
decoder: BeamSearchDecoder,
rank: int = 1,
postprocess: Callable[[List[str]], List[str]] = None) -> None:
super(BeamSearchRunner, self).__init__(output_series, decoder)
check_argument_types()
if rank < 1 or rank > decoder.beam_size:
raise ValueError(
("Rank of output hypothesis must be between 1 and the beam "
"size ({}), was {}.").format(decoder.beam_size, rank))
self._rank = rank
self._postprocess = postprocess
def get_executable(self,
compute_losses: bool = False,
summaries: bool = True) -> BeamSearchExecutable:
return BeamSearchExecutable(
self._rank, self.all_coders, self._decoder.outputs,
self._decoder.vocabulary, self._postprocess)
@property
def loss_names(self) -> List[str]:
return ["beam_search_score"]
@property
def decoder_data_id(self) -> Optional[str]:
return None
def beam_search_runner_range(output_series: str,
decoder: BeamSearchDecoder,
max_rank: int = None,
postprocess: Callable[
[List[str]], List[str]]=None
) -> List[BeamSearchRunner]:
"""A list of beam search runners for a range of ranks from 1 to max_rank.
This means there is max_rank output series where the n-th series contains
the n-th best hypothesis from the beam search.
Args:
output_series: Prefix of output series.
decoder: Beam search decoder shared by all runners.
max_rank: Maximum rank of the hypotheses.
postprocess: Series-level postprocess applied on output.
Returns:
List of beam search runners getting hypotheses with rank from 1 to
max_rank.
"""
check_argument_types()
if max_rank is None:
max_rank = decoder.beam_size
if max_rank > decoder.beam_size:
raise ValueError(
("The maximum rank ({}) cannot be "
"bigger than beam size {}.").format(
max_rank, decoder.beam_size))
return [BeamSearchRunner("{}.rank{:03d}".format(output_series, r),
decoder, r, postprocess)
for r in range(1, max_rank + 1)]
| 35.964789 | 79 | 0.602311 |
4a21e1348e9ddf56d06d2f0eda047085cf111c3f | 8,990 | py | Python | nova/network/security_group/security_group_base.py | iawells/gluon-variant-nova | 8b1bc6a042b30710b8828e011f79206acc3cca46 | [
"Apache-2.0"
] | null | null | null | nova/network/security_group/security_group_base.py | iawells/gluon-variant-nova | 8b1bc6a042b30710b8828e011f79206acc3cca46 | [
"Apache-2.0"
] | null | null | null | nova/network/security_group/security_group_base.py | iawells/gluon-variant-nova | 8b1bc6a042b30710b8828e011f79206acc3cca46 | [
"Apache-2.0"
] | null | null | null | # Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# Copyright 2011 Piston Cloud Computing, Inc.
# Copyright 2012 Red Hat, Inc.
# Copyright 2013 Nicira, Inc.
# All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import urllib
from oslo_config import cfg
from nova import exception
from nova.i18n import _
from nova import utils
CONF = cfg.CONF
class SecurityGroupBase(object):
def __init__(self, skip_policy_check=False):
self.skip_policy_check = skip_policy_check
def parse_cidr(self, cidr):
if cidr:
try:
cidr = urllib.unquote(cidr).decode()
except Exception as e:
self.raise_invalid_cidr(cidr, e)
if not utils.is_valid_cidr(cidr):
self.raise_invalid_cidr(cidr)
return cidr
else:
return '0.0.0.0/0'
@staticmethod
def new_group_ingress_rule(grantee_group_id, protocol, from_port,
to_port):
return SecurityGroupBase._new_ingress_rule(
protocol, from_port, to_port, group_id=grantee_group_id)
@staticmethod
def new_cidr_ingress_rule(grantee_cidr, protocol, from_port, to_port):
return SecurityGroupBase._new_ingress_rule(
protocol, from_port, to_port, cidr=grantee_cidr)
@staticmethod
def _new_ingress_rule(ip_protocol, from_port, to_port,
group_id=None, cidr=None):
values = {}
if group_id:
values['group_id'] = group_id
# Open everything if an explicit port range or type/code are not
# specified, but only if a source group was specified.
ip_proto_upper = ip_protocol.upper() if ip_protocol else ''
if (ip_proto_upper == 'ICMP' and
from_port is None and to_port is None):
from_port = -1
to_port = -1
elif (ip_proto_upper in ['TCP', 'UDP'] and from_port is None
and to_port is None):
from_port = 1
to_port = 65535
elif cidr:
values['cidr'] = cidr
if ip_protocol and from_port is not None and to_port is not None:
ip_protocol = str(ip_protocol)
try:
# Verify integer conversions
from_port = int(from_port)
to_port = int(to_port)
except ValueError:
if ip_protocol.upper() == 'ICMP':
raise exception.InvalidInput(reason=_("Type and"
" Code must be integers for ICMP protocol type"))
else:
raise exception.InvalidInput(reason=_("To and From ports "
"must be integers"))
if ip_protocol.upper() not in ['TCP', 'UDP', 'ICMP']:
raise exception.InvalidIpProtocol(protocol=ip_protocol)
# Verify that from_port must always be less than
# or equal to to_port
if (ip_protocol.upper() in ['TCP', 'UDP'] and
(from_port > to_port)):
raise exception.InvalidPortRange(from_port=from_port,
to_port=to_port, msg="Former value cannot"
" be greater than the later")
# Verify valid TCP, UDP port ranges
if (ip_protocol.upper() in ['TCP', 'UDP'] and
(from_port < 1 or to_port > 65535)):
raise exception.InvalidPortRange(from_port=from_port,
to_port=to_port, msg="Valid TCP ports should"
" be between 1-65535")
# Verify ICMP type and code
if (ip_protocol.upper() == "ICMP" and
(from_port < -1 or from_port > 255 or
to_port < -1 or to_port > 255)):
raise exception.InvalidPortRange(from_port=from_port,
to_port=to_port, msg="For ICMP, the"
" type:code must be valid")
values['protocol'] = ip_protocol
values['from_port'] = from_port
values['to_port'] = to_port
else:
# If cidr based filtering, protocol and ports are mandatory
if cidr:
return None
return values
def create_security_group_rule(self, context, security_group, new_rule):
if self.rule_exists(security_group, new_rule):
msg = (_('This rule already exists in group %s') %
new_rule['parent_group_id'])
self.raise_group_already_exists(msg)
return self.add_rules(context, new_rule['parent_group_id'],
security_group['name'],
[new_rule])[0]
def rule_exists(self, security_group, new_rule):
"""Indicates whether the specified rule is already
defined in the given security group.
"""
for rule in security_group['rules']:
keys = ('group_id', 'cidr', 'from_port', 'to_port', 'protocol')
for key in keys:
if rule.get(key) != new_rule.get(key):
break
else:
return rule.get('id') or True
return False
def validate_property(self, value, property, allowed):
pass
def ensure_default(self, context):
pass
def trigger_rules_refresh(self, context, id):
"""Called when a rule is added to or removed from a security_group."""
pass
def trigger_members_refresh(self, context, group_ids):
"""Called when a security group gains a new or loses a member.
Sends an update request to each compute node for each instance for
which this is relevant.
"""
pass
def populate_security_groups(self, instance, security_groups):
"""Called when populating the database for an instances
security groups.
"""
raise NotImplementedError()
def create_security_group(self, context, name, description):
raise NotImplementedError()
def update_security_group(self, context, security_group,
name, description):
raise NotImplementedError()
def get(self, context, name=None, id=None, map_exception=False):
raise NotImplementedError()
def list(self, context, names=None, ids=None, project=None,
search_opts=None):
raise NotImplementedError()
def destroy(self, context, security_group):
raise NotImplementedError()
def add_rules(self, context, id, name, vals):
raise NotImplementedError()
def remove_rules(self, context, security_group, rule_ids):
raise NotImplementedError()
def get_rule(self, context, id):
raise NotImplementedError()
def get_instance_security_groups(self, context, instance_uuid,
detailed=False):
raise NotImplementedError()
def add_to_instance(self, context, instance, security_group_name):
"""Add security group to the instance.
:param context: The request context.
:param instance: nova.objects.instance.Instance object.
:param security_group_name: security group name to add
"""
raise NotImplementedError()
def remove_from_instance(self, context, instance, security_group_name):
"""Remove the security group associated with the instance.
:param context: The request context.
:param instance: nova.objects.instance.Instance object.
:param security_group_name: security group name to remove
"""
raise NotImplementedError()
@staticmethod
def raise_invalid_property(msg):
raise exception.Invalid(msg)
@staticmethod
def raise_group_already_exists(msg):
raise exception.Invalid(msg)
@staticmethod
def raise_invalid_group(msg):
raise exception.Invalid(msg)
@staticmethod
def raise_invalid_cidr(cidr, decoding_exception=None):
raise exception.InvalidCidr(cidr=cidr)
@staticmethod
def raise_over_quota(msg):
raise exception.SecurityGroupLimitExceeded(msg)
@staticmethod
def raise_not_found(msg):
raise exception.SecurityGroupNotFound(msg)
| 35.674603 | 78 | 0.607341 |
4a21e199b71efeda2220c9c6f1c04aa8765d46d5 | 10,458 | py | Python | zerver/tests/webhooks/test_github_webhook.py | erinis-eligro/Zulip-outcast | 51153a6ce219370aee79bfe462f6e4fb956993d9 | [
"Apache-2.0"
] | null | null | null | zerver/tests/webhooks/test_github_webhook.py | erinis-eligro/Zulip-outcast | 51153a6ce219370aee79bfe462f6e4fb956993d9 | [
"Apache-2.0"
] | 1 | 2019-11-02T09:06:05.000Z | 2019-11-02T09:06:05.000Z | zerver/tests/webhooks/test_github_webhook.py | erinis-eligro/zulip-outcasts | 51153a6ce219370aee79bfe462f6e4fb956993d9 | [
"Apache-2.0"
] | null | null | null | import ujson
from typing import Dict, Optional, Text
from zerver.models import Message
from zerver.lib.webhooks.git import COMMITS_LIMIT
from zerver.lib.test_classes import WebhookTestCase
class GithubWebhookTest(WebhookTestCase):
STREAM_NAME = 'github'
URL_TEMPLATE = "/api/v1/external/github?stream={stream}&api_key={api_key}"
FIXTURE_DIR_NAME = 'github_webhook'
EXPECTED_SUBJECT_REPO_EVENTS = u"public-repo"
EXPECTED_SUBJECT_ISSUE_EVENTS = u"public-repo / Issue #2 Spelling error in the README file"
EXPECTED_SUBJECT_PR_EVENTS = u"public-repo / PR #1 Update the README with new information"
EXPECTED_SUBJECT_DEPLOYMENT_EVENTS = u"public-repo / Deployment on production"
EXPECTED_SUBJECT_ORGANIZATION_EVENTS = u"baxterandthehackers organization"
EXPECTED_SUBJECT_BRANCH_EVENTS = u"public-repo / changes"
EXPECTED_SUBJECT_WIKI_EVENTS = u"public-repo / Wiki Pages"
def test_push_1_commit(self):
# type: () -> None
expected_message = u"baxterthehacker [pushed](https://github.com/baxterthehacker/public-repo/compare/9049f1265b7d...0d1a26e67d8f) to branch changes\n\n* [0d1a26e](https://github.com/baxterthehacker/public-repo/commit/0d1a26e67d8f5eaf1f6ba5c57fc3c7d91ac0fd1c): Update README.md"
self.send_and_test_stream_message('push_1_commit', self.EXPECTED_SUBJECT_BRANCH_EVENTS, expected_message, HTTP_X_GITHUB_EVENT='push')
def test_push_50_commits(self):
# type: () -> None
commit_info = "* [0d1a26e](https://github.com/baxterthehacker/public-repo/commit/0d1a26e67d8f5eaf1f6ba5c57fc3c7d91ac0fd1c): Update README.md\n"
expected_message = u"baxterthehacker [pushed](https://github.com/baxterthehacker/public-repo/compare/9049f1265b7d...0d1a26e67d8f) to branch changes\n\n{}[and 40 more commit(s)]".format(
commit_info * COMMITS_LIMIT
)
self.send_and_test_stream_message('push_50_commits', self.EXPECTED_SUBJECT_BRANCH_EVENTS, expected_message, HTTP_X_GITHUB_EVENT='push')
def test_commit_comment_msg(self):
# type: () -> None
expected_message = u"baxterthehacker [commented](https://github.com/baxterthehacker/public-repo/commit/9049f1265b7d61be4a8904a9a27120d2064dab3b#commitcomment-11056394) on [9049f12](https://github.com/baxterthehacker/public-repo/commit/9049f1265b7d61be4a8904a9a27120d2064dab3b)\n~~~ quote\nThis is a really good change! :+1:\n~~~"
self.send_and_test_stream_message('commit_comment', self.EXPECTED_SUBJECT_REPO_EVENTS, expected_message, HTTP_X_GITHUB_EVENT='commit_comment')
def test_create_msg(self):
# type: () -> None
expected_message = u"baxterthehacker created tag 0.0.1"
self.send_and_test_stream_message('create', self.EXPECTED_SUBJECT_REPO_EVENTS, expected_message, HTTP_X_GITHUB_EVENT='create')
def test_delete_msg(self):
# type: () -> None
expected_message = u"baxterthehacker deleted tag simple-tag"
self.send_and_test_stream_message('delete', self.EXPECTED_SUBJECT_REPO_EVENTS, expected_message, HTTP_X_GITHUB_EVENT='delete')
def test_deployment_msg(self):
# type: () -> None
expected_message = u"baxterthehacker created new deployment"
self.send_and_test_stream_message('deployment', self.EXPECTED_SUBJECT_DEPLOYMENT_EVENTS, expected_message, HTTP_X_GITHUB_EVENT='deployment')
def test_deployment_status_msg(self):
# type: () -> None
expected_message = u"Deployment changed status to success"
self.send_and_test_stream_message('deployment_status', self.EXPECTED_SUBJECT_DEPLOYMENT_EVENTS, expected_message, HTTP_X_GITHUB_EVENT='deployment_status')
def test_fork_msg(self):
# type: () -> None
expected_message = u"baxterandthehackers forked [public-repo](https://github.com/baxterandthehackers/public-repo)"
self.send_and_test_stream_message('fork', self.EXPECTED_SUBJECT_REPO_EVENTS, expected_message, HTTP_X_GITHUB_EVENT='fork')
def test_issue_comment_msg(self):
# type: () -> None
expected_message = u"baxterthehacker [commented](https://github.com/baxterthehacker/public-repo/issues/2#issuecomment-99262140) on [Issue #2](https://github.com/baxterthehacker/public-repo/issues/2)\n\n~~~ quote\nYou are totally right! I'll get this fixed right away.\n~~~"
self.send_and_test_stream_message('issue_comment', self.EXPECTED_SUBJECT_ISSUE_EVENTS, expected_message, HTTP_X_GITHUB_EVENT='issue_comment')
def test_issue_msg(self):
# type: () -> None
expected_message = u"baxterthehacker opened [Issue #2](https://github.com/baxterthehacker/public-repo/issues/2)\n\n~~~ quote\nIt looks like you accidently spelled 'commit' with two 't's.\n~~~"
self.send_and_test_stream_message('issue', self.EXPECTED_SUBJECT_ISSUE_EVENTS, expected_message, HTTP_X_GITHUB_EVENT='issue')
def test_membership_msg(self):
# type: () -> None
expected_message = u"baxterthehacker added [kdaigle](https://github.com/kdaigle) to Contractors team"
self.send_and_test_stream_message('membership', self.EXPECTED_SUBJECT_ORGANIZATION_EVENTS, expected_message, HTTP_X_GITHUB_EVENT='membership')
def test_member_msg(self):
# type: () -> None
expected_message = u"baxterthehacker added [octocat](https://github.com/octocat) to [public-repo](https://github.com/baxterthehacker/public-repo)"
self.send_and_test_stream_message('member', self.EXPECTED_SUBJECT_REPO_EVENTS, expected_message, HTTP_X_GITHUB_EVENT='member')
def test_pull_request_opened_msg(self):
# type: () -> None
expected_message = u"baxterthehacker opened [PR](https://github.com/baxterthehacker/public-repo/pull/1)\nfrom `changes` to `master`\n\n~~~ quote\nThis is a pretty simple change that we need to pull into master.\n~~~"
self.send_and_test_stream_message('opened_pull_request', self.EXPECTED_SUBJECT_PR_EVENTS, expected_message, HTTP_X_GITHUB_EVENT='pull_request')
def test_pull_request_closed_msg(self):
# type: () -> None
expected_message = u"baxterthehacker closed without merge [PR](https://github.com/baxterthehacker/public-repo/pull/1)"
self.send_and_test_stream_message('closed_pull_request', self.EXPECTED_SUBJECT_PR_EVENTS, expected_message, HTTP_X_GITHUB_EVENT='pull_request')
def test_pull_request_merged_msg(self):
# type: () -> None
expected_message = u"baxterthehacker merged [PR](https://github.com/baxterthehacker/public-repo/pull/1)"
self.send_and_test_stream_message('merged_pull_request', self.EXPECTED_SUBJECT_PR_EVENTS, expected_message, HTTP_X_GITHUB_EVENT='pull_request')
def test_public_msg(self):
# type: () -> None
expected_message = u"baxterthehacker made [the repository](https://github.com/baxterthehacker/public-repo) public"
self.send_and_test_stream_message('public', self.EXPECTED_SUBJECT_REPO_EVENTS, expected_message, HTTP_X_GITHUB_EVENT='public')
def test_wiki_pages_msg(self):
# type: () -> None
expected_message = u"jasonrudolph:\n* created [Home](https://github.com/baxterthehacker/public-repo/wiki/Home)\n* created [Home](https://github.com/baxterthehacker/public-repo/wiki/Home)"
self.send_and_test_stream_message('wiki_pages', self.EXPECTED_SUBJECT_WIKI_EVENTS, expected_message, HTTP_X_GITHUB_EVENT='gollum')
def test_watch_msg(self):
# type: () -> None
expected_message = u"baxterthehacker starred [the repository](https://github.com/baxterthehacker/public-repo)"
self.send_and_test_stream_message('watch_repository', self.EXPECTED_SUBJECT_REPO_EVENTS, expected_message, HTTP_X_GITHUB_EVENT='watch')
def test_repository_msg(self):
# type: () -> None
expected_message = u"baxterthehacker created [the repository](https://github.com/baxterandthehackers/public-repo)"
self.send_and_test_stream_message('repository', self.EXPECTED_SUBJECT_REPO_EVENTS, expected_message, HTTP_X_GITHUB_EVENT='repository')
def test_team_add_msg(self):
# type: () -> None
expected_message = u"[The repository](https://github.com/baxterandthehackers/public-repo) was added to team github"
self.send_and_test_stream_message('team_add', self.EXPECTED_SUBJECT_REPO_EVENTS, expected_message, HTTP_X_GITHUB_EVENT='team_add')
def test_release_msg(self):
# type: () -> None
expected_message = u"baxterthehacker published [the release](https://github.com/baxterthehacker/public-repo/releases/tag/0.0.1)"
self.send_and_test_stream_message('release', self.EXPECTED_SUBJECT_REPO_EVENTS, expected_message, HTTP_X_GITHUB_EVENT='release')
def test_page_build_msg(self):
# type: () -> None
expected_message = u"Github Pages build, trigerred by baxterthehacker, is built"
self.send_and_test_stream_message('page_build', self.EXPECTED_SUBJECT_REPO_EVENTS, expected_message, HTTP_X_GITHUB_EVENT='page_build')
def test_status_msg(self):
# type: () -> None
expected_message = u"[9049f12](https://github.com/baxterthehacker/public-repo/commit/9049f1265b7d61be4a8904a9a27120d2064dab3b) changed it's status to success"
self.send_and_test_stream_message('status', self.EXPECTED_SUBJECT_REPO_EVENTS, expected_message, HTTP_X_GITHUB_EVENT='status')
def test_pull_request_review_msg(self):
# type: () -> None
expected_message = u"baxterthehacker submitted [PR Review](https://github.com/baxterthehacker/public-repo/pull/1#pullrequestreview-2626884)"
self.send_and_test_stream_message('pull_request_review', self.EXPECTED_SUBJECT_PR_EVENTS, expected_message, HTTP_X_GITHUB_EVENT='pull_request_review')
def test_pull_request_review_comment_msg(self):
# type: () -> None
expected_message = u"baxterthehacker created [PR Review Comment](https://github.com/baxterthehacker/public-repo/pull/1#discussion_r29724692)\n\n~~~ quote\nMaybe you should use more emojji on this line.\n~~~"
self.send_and_test_stream_message('pull_request_review_comment', self.EXPECTED_SUBJECT_PR_EVENTS, expected_message, HTTP_X_GITHUB_EVENT='pull_request_review_comment')
def test_push_tag_msg(self):
# type: () -> None
expected_message = u"baxterthehacker pushed tag abc"
self.send_and_test_stream_message('push_tag', self.EXPECTED_SUBJECT_REPO_EVENTS, expected_message, HTTP_X_GITHUB_EVENT='push')
| 68.802632 | 337 | 0.75502 |
4a21e1ed89ee633a9f6a4c01fd654a424e04c704 | 640 | py | Python | var/spack/repos/builtin/packages/py-wget/package.py | player1537-forks/spack | 822b7632222ec5a91dc7b7cda5fc0e08715bd47c | [
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 11 | 2015-10-04T02:17:46.000Z | 2018-02-07T18:23:00.000Z | var/spack/repos/builtin/packages/py-wget/package.py | player1537-forks/spack | 822b7632222ec5a91dc7b7cda5fc0e08715bd47c | [
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 22 | 2017-08-01T22:45:10.000Z | 2022-03-10T07:46:31.000Z | var/spack/repos/builtin/packages/py-wget/package.py | player1537-forks/spack | 822b7632222ec5a91dc7b7cda5fc0e08715bd47c | [
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 4 | 2016-06-10T17:57:39.000Z | 2018-09-11T04:59:38.000Z | # Copyright 2013-2022 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class PyWget(PythonPackage):
"""pure python download utility
Download the file for your platform. If you're not sure which to choose,
learn more about installing packages."""
pypi = "wget/wget-3.2.zip"
version('3.2', sha256='35e630eca2aa50ce998b9b1a127bb26b30dfee573702782aa982f875e3f16061')
# pip silently replaces distutils with setuptools
depends_on('py-setuptools', type='build')
| 30.47619 | 93 | 0.74375 |
4a21e3f40c287cb834cabaf5ef63f80cf762968f | 12,752 | py | Python | fs/tests/test_remote.py | jwilk-forks/pyfilesystem | 44573f70e72b2cf378ee20d1c8bc2084ba975e16 | [
"BSD-3-Clause"
] | 314 | 2015-04-11T10:52:26.000Z | 2022-01-26T07:00:30.000Z | fs/tests/test_remote.py | jwilk-forks/pyfilesystem | 44573f70e72b2cf378ee20d1c8bc2084ba975e16 | [
"BSD-3-Clause"
] | 94 | 2015-04-11T10:43:16.000Z | 2021-10-06T11:21:26.000Z | fs/tests/test_remote.py | jwilk-forks/pyfilesystem | 44573f70e72b2cf378ee20d1c8bc2084ba975e16 | [
"BSD-3-Clause"
] | 95 | 2015-04-21T02:13:20.000Z | 2021-11-26T05:07:59.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
fs.tests.test_remote: testcases for FS remote support utilities
"""
from fs.tests import FSTestCases, ThreadingTestCases
import unittest
import threading
import random
import time
import sys
from fs.remote import *
from fs import SEEK_END
from fs.wrapfs import WrapFS, wrap_fs_methods
from fs.tempfs import TempFS
from fs.path import *
from fs.local_functools import wraps
from six import PY3, b
class RemoteTempFS(TempFS):
"""
Simple filesystem implementing setfilecontents
for RemoteFileBuffer tests
"""
def __repr__(self):
return '<RemoteTempFS: %s>' % self._temp_dir
def open(self, path, mode='rb', write_on_flush=True, **kwargs):
if 'a' in mode or 'r' in mode or '+' in mode:
f = super(RemoteTempFS, self).open(path, mode='rb', **kwargs)
f = TellAfterCloseFile(f)
else:
f = None
return RemoteFileBuffer(self,
path,
mode,
f,
write_on_flush=write_on_flush)
def setcontents(self, path, data, encoding=None, errors=None, chunk_size=64*1024):
f = super(RemoteTempFS, self).open(path, 'wb', encoding=encoding, errors=errors, chunk_size=chunk_size)
if getattr(data, 'read', False):
f.write(data.read())
else:
f.write(data)
f.close()
class TellAfterCloseFile(object):
"""File-like object that allows calling tell() after it's been closed."""
def __init__(self, file):
self._finalpos = None
self.file = file
def close(self):
if self._finalpos is None:
self._finalpos = self.file.tell()
self.file.close()
def tell(self):
if self._finalpos is not None:
return self._finalpos
return self.file.tell()
def __getattr__(self, attr):
return getattr(self.file, attr)
class TestRemoteFileBuffer(unittest.TestCase, FSTestCases, ThreadingTestCases):
class FakeException(Exception): pass
def setUp(self):
self.fs = RemoteTempFS()
self.original_setcontents = self.fs.setcontents
def tearDown(self):
self.fs.close()
self.fakeOff()
def fake_setcontents(self, path, content=b(''), chunk_size=16*1024):
''' Fake replacement for RemoteTempFS setcontents() '''
raise self.FakeException("setcontents should not be called here!")
def fakeOn(self):
'''
Turn on fake_setcontents(). When setcontents on RemoteTempFS
is called, FakeException is raised and nothing is stored.
'''
self.fs.setcontents = self.fake_setcontents
def fakeOff(self):
''' Switch off fake_setcontents(). '''
self.fs.setcontents = self.original_setcontents
def test_ondemand(self):
'''
Tests on-demand loading of remote content in RemoteFileBuffer
'''
contents = b("Tristatricettri stribrnych strikacek strikalo") + \
b("pres tristatricettri stribrnych strech.")
f = self.fs.open('test.txt', 'wb')
f.write(contents)
f.close()
# During following tests, no setcontents() should be called.
self.fakeOn()
f = self.fs.open('test.txt', 'rb')
self.assertEquals(f.read(10), contents[:10])
f.wrapped_file.seek(0, SEEK_END)
self.assertEquals(f._rfile.tell(), 10)
f.seek(20)
self.assertEquals(f.tell(), 20)
self.assertEquals(f._rfile.tell(), 20)
f.seek(0, SEEK_END)
self.assertEquals(f._rfile.tell(), len(contents))
f.close()
f = self.fs.open('test.txt', 'ab')
self.assertEquals(f.tell(), len(contents))
f.close()
self.fakeOff()
# Writing over the rfile edge
f = self.fs.open('test.txt', 'wb+')
self.assertEquals(f.tell(), 0)
f.seek(len(contents) - 5)
# Last 5 characters not loaded from remote file
self.assertEquals(f._rfile.tell(), len(contents) - 5)
# Confirm that last 5 characters are still in rfile buffer
self.assertEquals(f._rfile.read(), contents[-5:])
# Rollback position 5 characters before eof
f._rfile.seek(len(contents[:-5]))
# Write 10 new characters (will make contents longer for 5 chars)
f.write(b('1234567890'))
f.flush()
# We are on the end of file (and buffer not serve anything anymore)
self.assertEquals(f.read(), b(''))
f.close()
self.fakeOn()
# Check if we wrote everything OK from
# previous writing over the remote buffer edge
f = self.fs.open('test.txt', 'rb')
self.assertEquals(f.read(), contents[:-5] + b('1234567890'))
f.close()
self.fakeOff()
def test_writeonflush(self):
'''
Test 'write_on_flush' switch of RemoteFileBuffer.
When True, flush() should call setcontents and store
to remote destination.
When False, setcontents should be called only on close().
'''
self.fakeOn()
f = self.fs.open('test.txt', 'wb', write_on_flush=True)
f.write(b('Sample text'))
self.assertRaises(self.FakeException, f.flush)
f.write(b('Second sample text'))
self.assertRaises(self.FakeException, f.close)
self.fakeOff()
f.close()
self.fakeOn()
f = self.fs.open('test.txt', 'wb', write_on_flush=False)
f.write(b('Sample text'))
# FakeException is not raised, because setcontents is not called
f.flush()
f.write(b('Second sample text'))
self.assertRaises(self.FakeException, f.close)
self.fakeOff()
def test_flush_and_continue(self):
'''
This tests if partially loaded remote buffer can be flushed
back to remote destination and opened file is still
in good condition.
'''
contents = b("Zlutoucky kun upel dabelske ody.")
contents2 = b('Ententyky dva spaliky cert vyletel z elektriky')
f = self.fs.open('test.txt', 'wb')
f.write(contents)
f.close()
f = self.fs.open('test.txt', 'rb+')
# Check if we read just 10 characters
self.assertEquals(f.read(10), contents[:10])
self.assertEquals(f._rfile.tell(), 10)
# Write garbage to file to mark it as _changed
f.write(b('x'))
# This should read the rest of file and store file back to again.
f.flush()
f.seek(0)
# Try if we have unocrrupted file locally...
self.assertEquals(f.read(), contents[:10] + b('x') + contents[11:])
f.close()
# And if we have uncorrupted file also on storage
f = self.fs.open('test.txt', 'rb')
self.assertEquals(f.read(), contents[:10] + b('x') + contents[11:])
f.close()
# Now try it again, but write garbage behind edge of remote file
f = self.fs.open('test.txt', 'rb+')
self.assertEquals(f.read(10), contents[:10])
# Write garbage to file to mark it as _changed
f.write(contents2)
f.flush()
f.seek(0)
# Try if we have unocrrupted file locally...
self.assertEquals(f.read(), contents[:10] + contents2)
f.close()
# And if we have uncorrupted file also on storage
f = self.fs.open('test.txt', 'rb')
self.assertEquals(f.read(), contents[:10] + contents2)
f.close()
class TestCacheFS(unittest.TestCase,FSTestCases,ThreadingTestCases):
"""Test simple operation of CacheFS"""
def setUp(self):
self._check_interval = sys.getcheckinterval()
sys.setcheckinterval(10)
self.wrapped_fs = TempFS()
self.fs = CacheFS(self.wrapped_fs,cache_timeout=0.01)
def tearDown(self):
self.fs.close()
sys.setcheckinterval(self._check_interval)
def test_values_are_used_from_cache(self):
old_timeout = self.fs.cache_timeout
self.fs.cache_timeout = None
try:
self.assertFalse(self.fs.isfile("hello"))
self.wrapped_fs.setcontents("hello",b("world"))
self.assertTrue(self.fs.isfile("hello"))
self.wrapped_fs.remove("hello")
self.assertTrue(self.fs.isfile("hello"))
self.fs.clear_cache()
self.assertFalse(self.fs.isfile("hello"))
finally:
self.fs.cache_timeout = old_timeout
def test_values_are_updated_in_cache(self):
old_timeout = self.fs.cache_timeout
self.fs.cache_timeout = None
try:
self.assertFalse(self.fs.isfile("hello"))
self.wrapped_fs.setcontents("hello",b("world"))
self.assertTrue(self.fs.isfile("hello"))
self.wrapped_fs.remove("hello")
self.assertTrue(self.fs.isfile("hello"))
self.wrapped_fs.setcontents("hello",b("world"))
self.assertTrue(self.fs.isfile("hello"))
self.fs.remove("hello")
self.assertFalse(self.fs.isfile("hello"))
finally:
self.fs.cache_timeout = old_timeout
class TestConnectionManagerFS(unittest.TestCase,FSTestCases):#,ThreadingTestCases):
"""Test simple operation of ConnectionManagerFS"""
def setUp(self):
self._check_interval = sys.getcheckinterval()
sys.setcheckinterval(10)
self.fs = ConnectionManagerFS(TempFS())
def tearDown(self):
self.fs.close()
sys.setcheckinterval(self._check_interval)
class DisconnectingFS(WrapFS):
"""FS subclass that raises lots of RemoteConnectionErrors."""
def __init__(self,fs=None):
if fs is None:
fs = TempFS()
self._connected = True
self._continue = True
self._bounce_thread = None
super(DisconnectingFS,self).__init__(fs)
if random.choice([True,False]):
raise RemoteConnectionError("")
self._bounce_thread = threading.Thread(target=self._bounce)
self._bounce_thread.daemon = True
self._bounce_thread.start()
def __getstate__(self):
state = super(DisconnectingFS,self).__getstate__()
del state["_bounce_thread"]
return state
def __setstate__(self,state):
super(DisconnectingFS,self).__setstate__(state)
self._bounce_thread = threading.Thread(target=self._bounce)
self._bounce_thread.daemon = True
self._bounce_thread.start()
def _bounce(self):
while self._continue:
time.sleep(random.random()*0.1)
self._connected = not self._connected
def setcontents(self, path, data=b(''), encoding=None, errors=None, chunk_size=64*1024):
return self.wrapped_fs.setcontents(path, data, encoding=encoding, errors=errors, chunk_size=chunk_size)
def close(self):
if not self.closed:
self._continue = False
if self._bounce_thread is not None:
self._bounce_thread.join()
self._connected = True
super(DisconnectingFS,self).close()
def disconnecting_wrapper(func):
"""Method wrapper to raise RemoteConnectionError if not connected."""
@wraps(func)
def wrapper(self,*args,**kwds):
if not self._connected:
raise RemoteConnectionError("")
return func(self,*args,**kwds)
return wrapper
DisconnectingFS = wrap_fs_methods(disconnecting_wrapper,DisconnectingFS,exclude=["close"])
class DisconnectRecoveryFS(WrapFS):
"""FS subclass that recovers from RemoteConnectionErrors by waiting."""
pass
def recovery_wrapper(func):
"""Method wrapper to recover from RemoteConnectionErrors by waiting."""
@wraps(func)
def wrapper(self,*args,**kwds):
while True:
try:
return func(self,*args,**kwds)
except RemoteConnectionError:
self.wrapped_fs.wait_for_connection()
return wrapper
# this also checks that wrap_fs_methods works as a class decorator
DisconnectRecoveryFS = wrap_fs_methods(recovery_wrapper)(DisconnectRecoveryFS)
class TestConnectionManagerFS_disconnect(TestConnectionManagerFS):
"""Test ConnectionManagerFS's ability to wait for reconnection."""
def setUp(self):
self._check_interval = sys.getcheckinterval()
sys.setcheckinterval(10)
c_fs = ConnectionManagerFS(DisconnectingFS,poll_interval=0.1)
self.fs = DisconnectRecoveryFS(c_fs)
def tearDown(self):
self.fs.close()
sys.setcheckinterval(self._check_interval)
if __name__ == '__main__':
unittest.main()
| 33.557895 | 111 | 0.619432 |
4a21e52f360b63232739789a9c4ed0bf10f75e8a | 1,981 | py | Python | runner.py | eranns/simple_learn | 763855954613db2ca17dd918f1767449cbd808a1 | [
"MIT"
] | null | null | null | runner.py | eranns/simple_learn | 763855954613db2ca17dd918f1767449cbd808a1 | [
"MIT"
] | null | null | null | runner.py | eranns/simple_learn | 763855954613db2ca17dd918f1767449cbd808a1 | [
"MIT"
] | null | null | null | # Copyright (c) 2020 Sharvil Kekre skekre98
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# ML package
from sklearn.datasets import load_iris
# pip package
from simple_learn.classifiers import SimpleClassifier, SimpleClassifierList
class SimpleRunner:
def __init__(self):
iris = load_iris()
self.x = iris.data
self.y = iris.target
def run(self):
# print SimpleClassifier created by iris dataset
print("\nCreating classification model...")
self.simple_classifier()
print("\nCreating classification rankings...")
self.simple_classifier_list()
def simple_classifier(self):
clf = SimpleClassifier()
clf.fit(self.x, self.y)
print(clf)
def simple_classifier_list(self):
clf_list = SimpleClassifierList()
clf_list.fit(self.x, self.y)
print(clf_list)
def main():
SR = SimpleRunner()
SR.run()
if __name__ == "__main__":
main()
| 33.016667 | 80 | 0.720848 |
4a21e54964c01a34fbcd902b01b7568c83f49800 | 1,030 | py | Python | agogosml/agogosml/reader/input_reader_factory.py | cicorias/agogosml | 60e0b52c2fc721bdd965aadaf8c1afd1ddb9b7d1 | [
"MIT"
] | 13 | 2018-12-07T21:02:20.000Z | 2019-02-22T14:36:31.000Z | agogosml/agogosml/reader/input_reader_factory.py | cicorias/agogosml | 60e0b52c2fc721bdd965aadaf8c1afd1ddb9b7d1 | [
"MIT"
] | 43 | 2018-11-30T11:31:43.000Z | 2019-04-03T16:09:06.000Z | agogosml/agogosml/reader/input_reader_factory.py | cicorias/agogosml | 60e0b52c2fc721bdd965aadaf8c1afd1ddb9b7d1 | [
"MIT"
] | 13 | 2018-11-29T00:31:29.000Z | 2019-02-22T18:50:28.000Z | """Factory for InputReader."""
from typing import Optional
from agogosml.common.abstract_streaming_client import AbstractStreamingClient
from agogosml.common.abstract_streaming_client import create_streaming_client_from_config
from agogosml.common.http_message_sender import HttpMessageSender
from agogosml.reader.input_reader import InputReader
class InputReaderFactory:
"""Factory for InputReader."""
@staticmethod
def create(config: dict, streaming_client: Optional[AbstractStreamingClient] = None):
"""Resolve an input reader given the configuration."""
if not config:
raise Exception('No config were set for the InputReader manager')
client = streaming_client or create_streaming_client_from_config(config.get('client'))
# host and port from the client
app_host = config.get('APP_HOST')
app_port = config.get('APP_PORT')
msg_sender = HttpMessageSender({'HOST': app_host, 'PORT': app_port})
return InputReader(client, msg_sender)
| 36.785714 | 94 | 0.748544 |
4a21e63bbe5abdfee319502dabb79a930d4ae1fe | 691 | py | Python | Python/Algorithms/21.py | DimitrisJim/leetcode_solutions | 765ea578748f8c9b21243dec9dc8a16163e85c0c | [
"Unlicense"
] | 2 | 2021-01-15T17:22:54.000Z | 2021-05-16T19:58:02.000Z | Python/Algorithms/21.py | DimitrisJim/leetcode_solutions | 765ea578748f8c9b21243dec9dc8a16163e85c0c | [
"Unlicense"
] | null | null | null | Python/Algorithms/21.py | DimitrisJim/leetcode_solutions | 765ea578748f8c9b21243dec9dc8a16163e85c0c | [
"Unlicense"
] | null | null | null | class ListNode:
def __init__(self, val=0, next=None):
self.val = val
self.next = next
class Solution:
def mergeTwoLists(self, l1, l2):
if not l1 and not l2:
return None
new = ListNode()
ref = new
while l1 and l2:
if l1.val <= l2.val:
ref.val, l1 = l1.val, l1.next
else:
ref.val, l2 = l2.val, l2.next
ref.next = ListNode()
ref = ref.next
rem = l1 if l1 else l2
while rem:
ref.val, rem = rem.val, rem.next
if rem:
ref.next = ListNode()
ref = ref.next
return new
| 24.678571 | 45 | 0.457308 |
4a21e736d2a10580c5952bea195f724d2338ae23 | 5,175 | py | Python | api/assessment/migrations/0001_initial.py | cad106uk/market-access-api | a357c33bbec93408b193e598a5628634126e9e99 | [
"MIT"
] | null | null | null | api/assessment/migrations/0001_initial.py | cad106uk/market-access-api | a357c33bbec93408b193e598a5628634126e9e99 | [
"MIT"
] | null | null | null | api/assessment/migrations/0001_initial.py | cad106uk/market-access-api | a357c33bbec93408b193e598a5628634126e9e99 | [
"MIT"
] | null | null | null | # Generated by Django 2.2.3 on 2019-08-07 10:10
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import simple_history.models
class Migration(migrations.Migration):
initial = True
dependencies = [
('barriers', '0032_auto_20190722_0905'),
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('interactions', '0003_auto_20190322_1221'),
]
operations = [
migrations.CreateModel(
name='HistoricalAssessment',
fields=[
('id', models.IntegerField(auto_created=True, blank=True, db_index=True, verbose_name='ID')),
('created_on', models.DateTimeField(blank=True, db_index=True, editable=False, null=True)),
('modified_on', models.DateTimeField(blank=True, editable=False, null=True)),
('archived', models.BooleanField(default=False)),
('archived_on', models.DateTimeField(blank=True, null=True)),
('archived_reason', models.TextField(blank=True, null=True)),
('impact', models.CharField(choices=[('HIGH', 'High'), ('MEDIUMHIGH', 'Medium High'), ('MEDIUMLOW', 'Medium Low'), ('LOW', 'Low')], max_length=25)),
('explanation', models.TextField()),
('value_to_economy', models.BigIntegerField(null=True)),
('import_market_size', models.BigIntegerField(null=True)),
('commercial_value', models.BigIntegerField(null=True)),
('is_active', models.BooleanField(default=True)),
('history_id', models.AutoField(primary_key=True, serialize=False)),
('history_change_reason', models.CharField(max_length=100, null=True)),
('history_date', models.DateTimeField()),
('history_type', models.CharField(choices=[('+', 'Created'), ('~', 'Changed'), ('-', 'Deleted')], max_length=1)),
('archived_by', models.ForeignKey(blank=True, db_constraint=False, null=True, on_delete=django.db.models.deletion.DO_NOTHING, related_name='+', to=settings.AUTH_USER_MODEL)),
('barrier', models.ForeignKey(blank=True, db_constraint=False, null=True, on_delete=django.db.models.deletion.DO_NOTHING, related_name='+', to='barriers.BarrierInstance')),
('created_by', models.ForeignKey(blank=True, db_constraint=False, null=True, on_delete=django.db.models.deletion.DO_NOTHING, related_name='+', to=settings.AUTH_USER_MODEL)),
('history_user', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='+', to=settings.AUTH_USER_MODEL)),
('modified_by', models.ForeignKey(blank=True, db_constraint=False, null=True, on_delete=django.db.models.deletion.DO_NOTHING, related_name='+', to=settings.AUTH_USER_MODEL)),
],
options={
'verbose_name': 'historical assessment',
'ordering': ('-history_date', '-history_id'),
'get_latest_by': 'history_date',
},
bases=(simple_history.models.HistoricalChanges, models.Model),
),
migrations.CreateModel(
name='Assessment',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_on', models.DateTimeField(auto_now_add=True, db_index=True, null=True)),
('modified_on', models.DateTimeField(auto_now=True, null=True)),
('archived', models.BooleanField(default=False)),
('archived_on', models.DateTimeField(blank=True, null=True)),
('archived_reason', models.TextField(blank=True, null=True)),
('impact', models.CharField(choices=[('HIGH', 'High'), ('MEDIUMHIGH', 'Medium High'), ('MEDIUMLOW', 'Medium Low'), ('LOW', 'Low')], max_length=25)),
('explanation', models.TextField()),
('value_to_economy', models.BigIntegerField(null=True)),
('import_market_size', models.BigIntegerField(null=True)),
('commercial_value', models.BigIntegerField(null=True)),
('is_active', models.BooleanField(default=True)),
('archived_by', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='+', to=settings.AUTH_USER_MODEL)),
('barrier', models.OneToOneField(on_delete=django.db.models.deletion.PROTECT, to='barriers.BarrierInstance')),
('created_by', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='+', to=settings.AUTH_USER_MODEL)),
('documents', models.ManyToManyField(help_text='assessment documents', related_name='assessment_documents', to='interactions.Document')),
('modified_by', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='+', to=settings.AUTH_USER_MODEL)),
],
options={
'abstract': False,
},
),
]
| 66.346154 | 190 | 0.632271 |
4a21e73eb34167b5389dcef6f1c27e0f0c519e50 | 12,448 | py | Python | fastai/train.py | JiahuaWU/fastai | 13a2df812d875abf0558004283392ab40d9bdea1 | [
"Apache-2.0"
] | 59 | 2020-08-18T03:41:35.000Z | 2022-03-23T03:51:55.000Z | fastai/train.py | JiahuaWU/fastai | 13a2df812d875abf0558004283392ab40d9bdea1 | [
"Apache-2.0"
] | 17 | 2020-08-25T14:15:32.000Z | 2022-03-27T02:12:19.000Z | fastai/train.py | JiahuaWU/fastai | 13a2df812d875abf0558004283392ab40d9bdea1 | [
"Apache-2.0"
] | 89 | 2020-08-17T23:45:42.000Z | 2022-03-27T20:53:43.000Z | "Provides advanced training extensions to `fastai.basic_train`. Includes half-precision, learning rate finder, mixup, and one-cycle"
from .torch_core import *
from .callback import *
from .callbacks import *
from .basic_data import *
from .basic_train import *
__all__ = ['BnFreeze', 'GradientClipping', 'ShowGraph', 'Interpretation', 'ClassificationInterpretation', 'MultiLabelClassificationInterpretation',
'fit_one_cycle', 'lr_find', 'one_cycle_scheduler', 'to_fp16', 'to_fp32', 'mixup', 'AccumulateScheduler', 'fit_fc']
def one_cycle_scheduler(lr_max:float, **kwargs:Any)->OneCycleScheduler:
"Instantiate a `OneCycleScheduler` with `lr_max`."
return partial(OneCycleScheduler, lr_max=lr_max, **kwargs)
def fit_one_cycle(learn:Learner, cyc_len:int, max_lr:Union[Floats,slice]=defaults.lr,
moms:Tuple[float,float]=(0.95,0.85), div_factor:float=25., pct_start:float=0.3, final_div:float=None,
wd:float=None, callbacks:Optional[CallbackList]=None, tot_epochs:int=None, start_epoch:int=None)->None:
"Fit a model following the 1cycle policy."
max_lr = learn.lr_range(max_lr)
callbacks = listify(callbacks)
callbacks.append(OneCycleScheduler(learn, max_lr, moms=moms, div_factor=div_factor, pct_start=pct_start,
final_div=final_div, tot_epochs=tot_epochs, start_epoch=start_epoch))
learn.fit(cyc_len, max_lr, wd=wd, callbacks=callbacks)
def fit_fc(learn:Learner, tot_epochs:int=1, lr:float=defaults.lr, moms:Tuple[float,float]=(0.95,0.85), start_pct:float=0.72,
wd:float=None, callbacks:Optional[CallbackList]=None)->None:
"Fit a model with Flat Cosine Annealing"
max_lr = learn.lr_range(lr)
callbacks = listify(callbacks)
callbacks.append(FlatCosAnnealScheduler(learn, lr, moms=moms, start_pct=start_pct, tot_epochs=tot_epochs))
learn.fit(tot_epochs, max_lr, wd=wd, callbacks=callbacks)
def lr_find(learn:Learner, start_lr:Floats=1e-7, end_lr:Floats=10, num_it:int=100, stop_div:bool=True, wd:float=None):
"Explore lr from `start_lr` to `end_lr` over `num_it` iterations in `learn`. If `stop_div`, stops when loss diverges."
start_lr = learn.lr_range(start_lr)
start_lr = np.array(start_lr) if is_listy(start_lr) else start_lr
end_lr = learn.lr_range(end_lr)
end_lr = np.array(end_lr) if is_listy(end_lr) else end_lr
cb = LRFinder(learn, start_lr, end_lr, num_it, stop_div)
epochs = int(np.ceil(num_it/len(learn.data.train_dl))) * (num_distrib() or 1)
learn.fit(epochs, start_lr, callbacks=[cb], wd=wd)
def to_fp16(learn:Learner, loss_scale:float=None, max_noskip:int=1000, dynamic:bool=True, clip:float=None,
flat_master:bool=False, max_scale:float=2**24, loss_fp32:bool=True)->Learner:
"Put `learn` in FP16 precision mode."
learn.to_fp32()
learn.model = model2half(learn.model)
learn.data.add_tfm(batch_to_half)
learn.mp_cb = MixedPrecision(learn, loss_scale=loss_scale, max_noskip=max_noskip, dynamic=dynamic, clip=clip,
flat_master=flat_master, max_scale=max_scale, loss_fp32=loss_fp32)
learn.callbacks.append(learn.mp_cb)
return learn
def to_fp32(learn:Learner):
"Put `learn` back to FP32 precision mode."
learn.data.remove_tfm(batch_to_half)
for cb in learn.callbacks:
if isinstance(cb, MixedPrecision): learn.callbacks.remove(cb)
learn.model = learn.model.float()
return learn
def mixup(learn:Learner, alpha:float=0.4, stack_x:bool=False, stack_y:bool=True) -> Learner:
"Add mixup https://arxiv.org/abs/1710.09412 to `learn`."
learn.callback_fns.append(partial(MixUpCallback, alpha=alpha, stack_x=stack_x, stack_y=stack_y))
return learn
Learner.fit_one_cycle = fit_one_cycle
Learner.lr_find = lr_find
Learner.to_fp16 = to_fp16
Learner.to_fp32 = to_fp32
Learner.mixup = mixup
Learner.fit_fc = fit_fc
class ShowGraph(LearnerCallback):
"Update a graph of learner stats and metrics after each epoch."
def on_epoch_end(self, n_epochs:int, last_metrics:MetricsList, **kwargs)->bool:
"If we have `last_metrics` plot them in our pbar graph"
if last_metrics is not None and last_metrics[0] is not None:
rec = self.learn.recorder
iters = range_of(rec.losses)
val_iter = np.array(rec.nb_batches).cumsum()
x_bounds = (0, (n_epochs - len(rec.nb_batches)) * rec.nb_batches[-1] + len(rec.losses))
y_bounds = (0, max((max(Tensor(rec.losses)), max(Tensor(rec.val_losses)))))
rec.pbar.update_graph([(iters, rec.losses), (val_iter, rec.val_losses)], x_bounds, y_bounds)
return {}
class BnFreeze(LearnerCallback):
"Freeze moving average statistics in all non-trainable batchnorm layers."
def on_epoch_begin(self, **kwargs:Any)->None:
"Put bn layers in eval mode just after `model.train()`."
set_bn_eval(self.learn.model)
class GradientClipping(LearnerCallback):
"Gradient clipping during training."
def __init__(self, learn:Learner, clip:float = 0.):
super().__init__(learn)
self.clip = clip
def on_backward_end(self, **kwargs):
"Clip the gradient before the optimizer step."
if self.clip: nn.utils.clip_grad_norm_(self.learn.model.parameters(), self.clip)
def clip_grad(learn:Learner, clip:float=0.1)->Learner:
"Add gradient clipping of `clip` during training."
learn.callback_fns.append(partial(GradientClipping, clip=clip))
return learn
Learner.clip_grad = clip_grad
class AccumulateScheduler(LearnerCallback):
"Does accumlated step every nth step by accumulating gradients"
def __init__(self, learn:Learner, n_step:int = 1, drop_last:bool = False):
super().__init__(learn)
self.n_step,self.drop_last = n_step,drop_last
def on_train_begin(self, **kwargs):
"check if loss is reduction"
if hasattr(self.loss_func, "reduction") and (self.loss_func.reduction != "sum"):
warn("For better gradients consider 'reduction=sum'")
def on_epoch_begin(self, **kwargs):
"init samples and batches, change optimizer"
self.acc_samples, self.acc_batches = 0., 0.
def on_batch_begin(self, last_input, last_target, **kwargs):
"accumulate samples and batches"
self.acc_samples += last_input.shape[0]
self.acc_batches += 1
def on_backward_end(self, **kwargs):
"accumulated step and reset samples, True will result in no stepping"
if (self.acc_batches % self.n_step) == 0:
for p in (self.learn.model.parameters()):
if p.requires_grad: p.grad.div_(self.acc_samples)
self.acc_samples = 0
else: return {'skip_step':True, 'skip_zero':True}
def on_epoch_end(self, **kwargs):
"step the rest of the accumulated grads if not perfectly divisible"
for p in (self.learn.model.parameters()):
if p.requires_grad: p.grad.div_(self.acc_samples)
if not self.drop_last: self.learn.opt.step()
self.learn.opt.zero_grad()
class Interpretation():
"Interpretation base class, can be inherited for task specific Interpretation classes"
def __init__(self, learn:Learner, preds:Tensor, y_true:Tensor, losses:Tensor, ds_type:DatasetType=DatasetType.Valid):
self.data,self.preds,self.y_true,self.losses,self.ds_type, self.learn = \
learn.data,preds,y_true,losses,ds_type,learn
self.ds = (self.data.train_ds if ds_type == DatasetType.Train else
self.data.test_ds if ds_type == DatasetType.Test else
self.data.valid_ds if ds_type == DatasetType.Valid else
self.data.single_ds if ds_type == DatasetType.Single else
self.data.fix_ds)
@classmethod
def from_learner(cls, learn: Learner, ds_type:DatasetType=DatasetType.Valid, activ:nn.Module=None):
"Gets preds, y_true, losses to construct base class from a learner"
preds_res = learn.get_preds(ds_type=ds_type, activ=activ, with_loss=True)
return cls(learn, *preds_res)
def top_losses(self, k:int=None, largest=True):
"`k` largest(/smallest) losses and indexes, defaulting to all losses (sorted by `largest`)."
return self.losses.topk(ifnone(k, len(self.losses)), largest=largest)
# def top_scores(self, metric:Callable=None, k:int=None, largest=True):
# "`k` largest(/smallest) metric scores and indexes, defaulting to all scores (sorted by `largest`)."
# self.scores = metric(self.preds, self.y_true)
# return self.scores.topk(ifnone(k, len(self.scores)), largest=largest)
class ClassificationInterpretation(Interpretation):
"Interpretation methods for classification models."
def __init__(self, learn:Learner, preds:Tensor, y_true:Tensor, losses:Tensor, ds_type:DatasetType=DatasetType.Valid):
super().__init__(learn,preds,y_true,losses,ds_type)
self.pred_class = self.preds.argmax(dim=1)
def confusion_matrix(self, slice_size:int=1):
"Confusion matrix as an `np.ndarray`."
x=torch.arange(0,self.data.c)
if slice_size is None: cm = ((self.pred_class==x[:,None]) & (self.y_true==x[:,None,None])).sum(2)
else:
cm = torch.zeros(self.data.c, self.data.c, dtype=x.dtype)
for i in range(0, self.y_true.shape[0], slice_size):
cm_slice = ((self.pred_class[i:i+slice_size]==x[:,None])
& (self.y_true[i:i+slice_size]==x[:,None,None])).sum(2)
torch.add(cm, cm_slice, out=cm)
return to_np(cm)
def plot_confusion_matrix(self, normalize:bool=False, title:str='Confusion matrix', cmap:Any="Blues", slice_size:int=1,
norm_dec:int=2, plot_txt:bool=True, return_fig:bool=None, **kwargs)->Optional[plt.Figure]:
"Plot the confusion matrix, with `title` and using `cmap`."
# This function is mainly copied from the sklearn docs
cm = self.confusion_matrix(slice_size=slice_size)
if normalize: cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
fig = plt.figure(**kwargs)
plt.imshow(cm, interpolation='nearest', cmap=cmap)
plt.title(title)
tick_marks = np.arange(self.data.c)
plt.xticks(tick_marks, self.data.y.classes, rotation=90)
plt.yticks(tick_marks, self.data.y.classes, rotation=0)
if plot_txt:
thresh = cm.max() / 2.
for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):
coeff = f'{cm[i, j]:.{norm_dec}f}' if normalize else f'{cm[i, j]}'
plt.text(j, i, coeff, horizontalalignment="center", verticalalignment="center", color="white" if cm[i, j] > thresh else "black")
ax = fig.gca()
ax.set_ylim(len(self.data.y.classes)-.5,-.5)
plt.tight_layout()
plt.ylabel('Actual')
plt.xlabel('Predicted')
plt.grid(False)
if ifnone(return_fig, defaults.return_fig): return fig
def most_confused(self, min_val:int=1, slice_size:int=1)->Collection[Tuple[str,str,int]]:
"Sorted descending list of largest non-diagonal entries of confusion matrix, presented as actual, predicted, number of occurrences."
cm = self.confusion_matrix(slice_size=slice_size)
np.fill_diagonal(cm, 0)
res = [(self.data.classes[i],self.data.classes[j],cm[i,j])
for i,j in zip(*np.where(cm>=min_val))]
return sorted(res, key=itemgetter(2), reverse=True)
def _learner_interpret(learn:Learner, ds_type:DatasetType=DatasetType.Valid):
"Create a `ClassificationInterpretation` object from `learner` on `ds_type` with `tta`."
return ClassificationInterpretation.from_learner(learn, ds_type=ds_type)
Learner.interpret = _learner_interpret
class MultiLabelClassificationInterpretation(Interpretation):
"Interpretation methods for classification models."
def __init__(self, learn:Learner, preds:Tensor, y_true:Tensor, losses:Tensor, ds_type:DatasetType=DatasetType.Valid,
sigmoid:bool=True, thresh:float=0.3):
raise NotImplementedError
super(MultiLabelClassificationInterpretation, self).__init__(learn,preds,y_true,losses,ds_type)
self.pred_class = self.preds.sigmoid(dim=1)>thresh if sigmoid else self.preds>thresh
| 51.651452 | 147 | 0.683323 |
4a21e90c9a1dbeba7d3614c270fc96365dbf0870 | 1,643 | py | Python | tests/chainer_tests/functions_tests/test_relu.py | umitanuki/chainer | 225c56b233e684ff4855451d2af4c2fb66915f21 | [
"MIT"
] | null | null | null | tests/chainer_tests/functions_tests/test_relu.py | umitanuki/chainer | 225c56b233e684ff4855451d2af4c2fb66915f21 | [
"MIT"
] | null | null | null | tests/chainer_tests/functions_tests/test_relu.py | umitanuki/chainer | 225c56b233e684ff4855451d2af4c2fb66915f21 | [
"MIT"
] | 1 | 2018-11-18T00:36:51.000Z | 2018-11-18T00:36:51.000Z | import unittest
import numpy
import chainer
from chainer import cuda
from chainer import functions
from chainer import gradient_check
from chainer import testing
from chainer.testing import attr
from chainer.testing import condition
class TestReLU(unittest.TestCase):
def setUp(self):
# Avoid unstability of numerical grad
self.x = numpy.random.uniform(.5, 1, (3, 2)).astype(numpy.float32)
self.x *= numpy.random.randint(2, size=(3, 2)) * 2 - 1
self.gy = numpy.random.uniform(-1, 1, (3, 2)).astype(numpy.float32)
def check_backward(self, x_data, y_grad, use_cudnn=True):
x = chainer.Variable(x_data)
y = functions.relu(x, use_cudnn=use_cudnn)
self.assertEqual(y.data.dtype, numpy.float32)
y.grad = y_grad
y.backward()
func = y.creator
f = lambda: func.forward((x.data,))
gx, = gradient_check.numerical_grad(f, (x.data,), (y.grad,))
gradient_check.assert_allclose(gx, x.grad)
@condition.retry(3)
def test_backward_cpu(self):
self.check_backward(self.x, self.gy)
@attr.cudnn
@condition.retry(3)
def test_backward_gpu(self):
self.check_backward(cuda.to_gpu(self.x), cuda.to_gpu(self.gy))
@attr.gpu
@condition.retry(3)
def test_backward_cpu_no_cudnn(self):
self.check_backward(cuda.to_gpu(self.x), cuda.to_gpu(self.gy), False)
class TestReLUZeroDim(TestReLU):
def setUp(self):
self.x = numpy.random.uniform(-1, 1, ()).astype(numpy.float32)
self.gy = numpy.random.uniform(-1, 1, ()).astype(numpy.float32)
testing.run_module(__name__, __file__)
| 28.327586 | 77 | 0.669507 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.