text
stringlengths 4
1.02M
| meta
dict |
---|---|
from setuptools import setup
setup(
name='multitables',
version='2.0.1',
url='https://github.com/ghcollin/multitables',
description='High performance parallel reading of HDF5 files using PyTables, multiprocessing, and shared memory.',
long_description=open("README.rst").read(),
long_description_content_type="text/x-rst",
keywords='tables hdf5 parallel concurrent',
license='MIT',
author='ghcollin',
author_email='',
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Intended Audience :: Information Technology',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.5',
'Topic :: Database',
'Topic :: Software Development :: Libraries :: Python Modules'
],
packages=['multitables'],
install_requires=['numpy', 'tables', 'msgpack >= 0.6.0', 'wrapt']
)
| {
"content_hash": "e82322bf5114ef443c7b7637e67b4a93",
"timestamp": "",
"source": "github",
"line_count": 27,
"max_line_length": 118,
"avg_line_length": 37.888888888888886,
"alnum_prop": 0.6363636363636364,
"repo_name": "ghcollin/multitables",
"id": "260aaebe440176ac10eaa43ae74575aa34086802",
"size": "1023",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "44491"
}
],
"symlink_target": ""
} |
import time
from bs4 import BeautifulSoup
from couchpotato.core.helpers.variable import tryInt
from couchpotato.core.logger import CPLog
from couchpotato.core.media._base.providers.torrent.base import TorrentProvider
log = CPLog(__name__)
class Base(TorrentProvider):
urls = {
'test': 'http://scenefz.net',
'login': 'http://scenefz.net/takelogin.php',
'login_check': 'http://scenefz.net/my.php',
'search': 'http://scenefz.net/browse.php?%s',
'baseurl': 'http://scenefz.net/%s',
}
http_time_between_calls = 1 # Seconds
def _searchOnTitle(self, title, movie, quality, results):
url = self.urls['search'] % self.buildUrl(title, movie, quality)
data = self.getHTMLData(url)
if data:
html = BeautifulSoup(data)
try:
result_table = html.find('table', attrs={'id': 'torrenttable'})
if not result_table:
return False
entries = result_table.find_all('tr')
for result in entries[0:]:
try:
if result['id'] == 'tdivider_title':
continue
except: pass
if result.find_all(id='tdivider_title'):
continue
all_cells = result.find_all('td')
torrent = all_cells[1].find('a')
download = all_cells[5].find_all('a')[2]
torrent_id = torrent['href']
torrent_id = torrent_id.replace('details.php?id=', '')
torrent_name = torrent.getText()
torrent_size = self.parseSize(str(all_cells[2].getText()).replace(',', '.'))
seed_leech = all_cells[4].find_all('a')
torrent_seeders = tryInt(seed_leech[0].getText())
torrent_leechers = tryInt(seed_leech[1].getText())
torrent_url = self.urls['baseurl'] % download['href']
torrent_detail_url = self.urls['baseurl'] % torrent['href']
results.append({
'id': torrent_id,
'name': torrent_name,
'size': torrent_size,
'seeders': torrent_seeders,
'leechers': torrent_leechers,
'url': torrent_url,
'detail_url': torrent_detail_url,
})
except Exception as e:
log.error('Failed getting results from {}: {}'.format(self.getName(), e))
def getLoginParams(self):
log.info('Logging in to scenefz.net with user [{}]'.format(self.conf('username')))
return {
'username': self.conf('username'),
'password': self.conf('password'),
}
@staticmethod
def loginSuccess(output):
return 'loading...' in output.lower()
loginCheckSuccess = loginSuccess
| {
"content_hash": "e49debe58f891df8b2cae39051a02d35",
"timestamp": "",
"source": "github",
"line_count": 84,
"max_line_length": 96,
"avg_line_length": 36.05952380952381,
"alnum_prop": 0.5057774843182569,
"repo_name": "valentinolaru/couchpotato-searchers-plugins",
"id": "a121045575dc253e31bf439185601979981ebcd5",
"size": "3029",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "scenefz/main.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "23397"
}
],
"symlink_target": ""
} |
import random
import string
import zmq
import numpy as np
import weakref
from .ringbuffer import RingBuffer
from .streamhelpers import all_transfermodes
from ..rpc import ObjectProxy
from .arraytools import fix_struct_dtype, make_dtype
default_stream = dict(
protocol='tcp',
interface='127.0.0.1',
port='*',
transfermode='plaindata',
streamtype='analogsignal',
dtype='float32',
shape=(-1, 1),
axisorder=None,
buffer_size=0,
compression='',
scale=None,
offset=None,
units='',
sample_rate=1.,
double=False,#make sens only for transfermode='sharemem',
fill=None,
)
class OutputStream(object):
"""Class for streaming data to an InputStream.
Streams allow data to be sent between objects that may exist on different
threads, processes, or machines. They offer a variety of transfer methods
including TCP for remote connections and IPC for local connections.
Parameters
----------
spec : dict
Required parameters for this stream. These may not be overridden when
calling :func:`configure` later on.
node : Node or None
name : str or None
"""
def __init__(self, spec=None, node=None, name=None):
spec = {} if spec is None else spec
self.last_index = 0
self.configured = False
self.spec = spec # this is a priori stream params, and must be change when Node.configure
if node is not None:
self.node = weakref.ref(node)
else:
self.node = None
self.name = name
def configure(self, **kargs):
"""
Configure the output stream.
Parameters
----------
protocol : 'tcp', 'udp', 'inproc' or 'inpc' (linux only)
The type of protocol used for the zmq.PUB socket
interface : str
The bind adress for the zmq.PUB socket
port : str
The port for the zmq.PUB socket
transfermode: str
The method used for data transfer:
* 'plaindata': data are sent over a plain socket in two parts: (frame index, data).
* 'sharedmem': data are stored in shared memory in a ring buffer and the current frame index is sent over the socket.
* 'shared_cuda_buffer': (planned) data are stored in shared Cuda buffer and the current frame index is sent over the socket.
* 'share_opencl_buffer': (planned) data are stored in shared OpenCL buffer and the current frame index is sent over the socket.
All registered transfer modes can be found in `pyacq.core.stream.all_transfermodes`.
streamtype: 'analogsignal', 'digitalsignal', 'event' or 'image/video'
The nature of data to be transferred.
dtype: str ('float32','float64', [('r', 'uint16'), ('g', 'uint16'), , ('b', 'uint16')], ...)
The numpy.dtype of the data buffer. It can be a composed dtype for event or images.
shape: list
The shape of each data frame. If the stream will send chunks of variable length,
then use -1 for the first (time) dimension.
* For ``streamtype=image``, the shape should be ``(-1, H, W)`` or ``(n_frames, H, W)``.
* For ``streamtype=analogsignal`` the shape should be ``(n_samples, n_channels)`` or ``(-1, n_channels)``.
compression: '', 'blosclz', 'blosc-lz4'
The compression for the data stream. The default uses no compression.
scale: float
An optional scale factor + offset to apply to the data before it is sent over the stream.
``output = offset + scale * input``
offset:
See *scale*.
units: str
Units of the stream data. Mainly used for 'analogsignal'.
sample_rate: float or None
Sample rate of the stream in Hz.
kwargs :
All extra keyword arguments are passed to the DataSender constructor
for the chosen transfermode (for example, see
:class:`SharedMemSender <stream.sharedmemstream.SharedMemSender>`).
"""
self.params = dict(default_stream)
self.params.update(self.spec)
for k in kargs:
if k in self.spec:
assert kargs[k]==self.spec[k], \
'Cannot configure {}={}; already in fixed in self.spec {}={}'.format(k, kargs[k], k, self.spec[k])
self.params.update(kargs)
if 'dtype' in self.params:
# fix error in structred dtype with bad serilization
self.params['dtype'] = fix_struct_dtype(self.params['dtype'])
shape = self.params['shape']
assert shape[0] == -1 or shape[0] > 0, "First element in shape must be -1 or > 0."
for i in range(1, len(shape)):
assert shape[i] > 0, "Shape index %d must be > 0." % i
if self.params['protocol'] in ('inproc', 'ipc'):
pipename = u'pyacq_pipe_'+''.join(random.SystemRandom().choice(string.ascii_uppercase + string.digits) for _ in range(24))
self.params['interface'] = pipename
self.url = '{protocol}://{interface}'.format(**self.params)
else:
self.url = '{protocol}://{interface}:{port}'.format(**self.params)
context = zmq.Context.instance()
self.socket = context.socket(zmq.PUB)
self.socket.linger = 1000 # don't let socket deadlock when exiting
self.socket.bind(self.url)
self.addr = self.socket.getsockopt(zmq.LAST_ENDPOINT).decode()
self.port = self.addr.rpartition(':')[2]
self.params['port'] = self.port
transfermode = self.params['transfermode']
if transfermode not in all_transfermodes:
raise ValueError("Unsupported transfer mode '%s'" % transfermode)
sender_class = all_transfermodes[transfermode][0]
self.sender = sender_class(self.socket, self.params)
self.configured = True
if self.node and self.node():
self.node().after_output_configure(self.name)
def send(self, data, index=None, **kargs):
"""Send a data chunk and its frame index.
Parameters
----------
index: int
The absolute sample index. This is the index of the last sample + 1.
data: np.ndarray or bytes
The chunk of data to send.
"""
if index is None:
index = self.last_index + data.shape[0]
self.last_index = index
self.sender.send(index, data, **kargs)
def close(self):
"""Close the output.
This closes the socket and releases shared memory, if necessary.
"""
self.sender.close()
self.socket.close()
del self.socket
del self.sender
def reset_buffer_index(self):
"""
Reset the buffer index.
Usefull for multiple start/stop on Node to reset the index.
"""
self.last_index = 0
self.sender.reset_index()
def _shape_equal(shape1, shape2):
"""
Check if shape of stream are compatible.
More or less shape1==shape2 but deal with:
* shape can be list or tuple
* shape can have one dim with -1
"""
shape1 = list(shape1)
shape2 = list(shape2)
if len(shape1) != len(shape2):
return False
for i in range(len(shape1)):
if shape1[i]==-1 or shape2[i]==-1:
continue
if shape1[i]!=shape2[i]:
return False
return True
class InputStream(object):
"""Class for streaming data from an OutputStream.
Streams allow data to be sent between objects that may exist on different
threads, processes, or machines. They offer a variety of transfer methods
including TCP for remote connections and IPC for local connections.
Typical InputStream usage:
1. Use :func:`InputStream.connect()` to connect to an :class:`OutputStream`
defined elsewhere. Usually, the argument will actually be a proxy to a
remote :class:`OutputStream`.
2. Poll for incoming data packets with :func:`InputStream.poll()`.
3. Receive the next packet with :func:`InputStream.recv()`.
Optionally, use :func:`InputStream.set_buffer()` to attach a
:class:`RingBuffer` for easier data handling.
"""
def __init__(self, spec=None, node=None, name=None):
self.spec = {} if spec is None else spec
self.configured = False
if node is not None:
self.node = weakref.ref(node)
else:
self.node = None
self.name = name
self.buffer = None
self._own_buffer = False # whether InputStream should populate buffer
def connect(self, output):
"""Connect an output to this input.
Any data send over the stream using :func:`output.send() <OutputStream.send>`
can be retrieved using :func:`input.recv() <InputStream.recv>`.
Parameters
----------
output : OutputStream (or proxy to a remote OutputStream)
The OutputStream to connect.
"""
if isinstance(output, dict):
self.params = output
elif isinstance(output, OutputStream):
self.params = output.params
elif isinstance(output, ObjectProxy):
self.params = output.params._get_value()
else:
raise TypeError("Invalid type for stream: %s" % type(output))
if self.params['protocol'] in ('inproc', 'ipc'):
self.url = '{protocol}://{interface}'.format(**self.params)
else:
self.url = '{protocol}://{interface}:{port}'.format(**self.params)
# allow some keys in self.spec to override self.params
readonly_params = ['protocol', 'transfermode', 'shape', 'dtype']
for k,v in self.spec.items():
if k in readonly_params:
if k=='shape':
valid = _shape_equal(v, self.params[k])
elif k=='dtype':
#~ valid = v == self.params[k]
valid = make_dtype(v) == make_dtype(self.params[k])
else:
valid = (v == self.params[k])
if not valid:
raise ValueError("InputStream parameter %s=%s does not match connected output %s=%s." %
(k, v, k, self.params[k]))
else:
self.params[k] = v
context = zmq.Context.instance()
self.socket = context.socket(zmq.SUB)
self.socket.linger = 1000 # don't let socket deadlock when exiting
self.socket.setsockopt(zmq.SUBSCRIBE, b'')
#~ self.socket.setsockopt(zmq.DELAY_ATTACH_ON_CONNECT,1)
self.socket.connect(self.url)
transfermode = self.params['transfermode']
if transfermode not in all_transfermodes:
raise ValueError("Unsupported transfer mode '%s'" % transfermode)
receiver_class = all_transfermodes[transfermode][1]
self.receiver = receiver_class(self.socket, self.params)
self.connected = True
if self.node and self.node():
self.node().after_input_connect(self.name)
def poll(self, timeout=None):
"""Poll the socket of input stream.
Return True if a new packet is available.
"""
return self.socket.poll(timeout=timeout)
def recv(self, **kargs):
"""
Receive a chunk of data.
Returns
-------
index: int
The absolute sample index. This is the index of the last sample + 1.
data: np.ndarray or bytes
The received chunk of data.
If the stream uses ``transfermode='sharedarray'``, then the data is
returned as None and you must use ``input_stream[start:stop]``
to read from the shared array or ``input_stream.recv(with_data=True)``
to return the received data chunk.
"""
index, data = self.receiver.recv(**kargs)
if self._own_buffer and data is not None and self.buffer is not None:
self.buffer.new_chunk(data, index=index)
return index, data
def empty_queue(self):
"""
Receive all pending messing in the zmq queue without consuming them.
This is usefull when a Node do not start at the same time than other nodes
but was already connected. In that case the zmq water mecanism put
messages in a queue and when you start cusuming you get old message.
This can be annoying.
This recv every thing with timeout=0 and so empty the queue.
"""
while self.socket.poll(timeout=0)>0:
self.socket.recv_multipart()
def close(self):
"""Close the stream.
This closes the socket. No data can be received after this point.
"""
self.receiver.close()
self.socket.close()
del self.socket
def __getitem__(self, *args):
"""Return a data slice from the RingBuffer attached to this InputStream.
If no RingBuffer is attached, raise an exception. See ``set_buffer()``.
"""
if self.buffer is None:
raise TypeError("No ring buffer configured for this InputStream.")
return self.buffer.__getitem__(*args)
def get_data(self, *args, **kargs):
"""
Return a segment of the RingBuffer attached to this InputStream.
If no RingBuffer is attached, raise an exception.
For parameters, see :func:`RingBuffer.get_data()`.
See also: :func:`InputStream.set_buffer()`.
"""
if self.buffer is None:
raise TypeError("No ring buffer configured for this InputStream.")
return self.buffer.get_data(*args, **kargs)
def set_buffer(self, size=None, double=True, axisorder=None, shmem=None, fill=None):
"""Ensure that this InputStream has a RingBuffer at least as large as
*size* and with the specified double-mode and axis order.
If necessary, this will attach a new RingBuffer to the stream and remove
any existing buffer.
"""
# first see if we already have a buffer that meets requirements
bufs = []
if self.buffer is not None:
bufs.append((self.buffer, self._own_buffer))
if self.receiver.buffer is not None:
bufs.append((self.receiver.buffer, False))
for buf, own in bufs:
if buf.shape[0] >= size and buf.double == double and (axisorder is None or all(buf.axisorder == axisorder)):
self.buffer = buf
self._own_buffer = own
return
# attach a new buffer
shape = (size,) + tuple(self.params['shape'][1:])
dtype = make_dtype(self.params['dtype'])
self.buffer = RingBuffer(shape=shape, dtype=dtype, double=double, axisorder=axisorder, shmem=shmem, fill=fill)
self._own_buffer = True
def reset_buffer_index(self):
"""
Reset the buffer index.
Usefull for multiple start/stop on Node to reset the index.
"""
if self.buffer is not None and self._own_buffer:
self.buffer.reset_index()
| {
"content_hash": "088edba5b8484ac836f53b1f8f03ff84",
"timestamp": "",
"source": "github",
"line_count": 394,
"max_line_length": 139,
"avg_line_length": 39.33756345177665,
"alnum_prop": 0.5875217755984257,
"repo_name": "pyacq/pyacq",
"id": "c3354c62169834a024a2c7ee8cc02cd44e36a1f9",
"size": "15670",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pyacq/core/stream/stream.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "PowerShell",
"bytes": "4137"
},
{
"name": "Python",
"bytes": "566825"
},
{
"name": "Shell",
"bytes": "489"
}
],
"symlink_target": ""
} |
import pytest
from src.str_permutations import helper_perm, str_permutation
def test_helper_perm1():
all_perm = set()
assert helper_perm('abc', 2, all_perm) == set(['abc'])
def test_helper_perm2():
all_perm = set()
assert helper_perm('abc', 1, all_perm) == set(['abc', 'acb'])
def test_helper_perm3():
all_perm = set()
result = set(['abc', 'acb', 'bac', 'bca', 'cab', 'cba'])
assert helper_perm('abc', 0, all_perm) == result
def test_helper_perm4():
all_perm = set()
assert helper_perm('', 1, all_perm) == set([])
def test_helper_perm5():
all_perm = set()
assert helper_perm(None, 1, all_perm) == set([])
def test_str_permutation1():
result = ['abc', 'acb', 'bac', 'bca', 'cab', 'cba']
assert str_permutation('abc') == result
def test_str_permutation2():
assert str_permutation('') == []
def test_str_permutation3():
assert str_permutation(None) == []
| {
"content_hash": "4f3a60f5b049fdc1ffdcde817b42f368",
"timestamp": "",
"source": "github",
"line_count": 41,
"max_line_length": 65,
"avg_line_length": 22.634146341463413,
"alnum_prop": 0.5969827586206896,
"repo_name": "tanyaweaver/code-katas",
"id": "7b6238194ebc51bb6ee63900f0a3def51244a76b",
"size": "928",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test/test_permutations.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "42495"
}
],
"symlink_target": ""
} |
import os
import sys
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
DEBUG = False
TEMPLATE_DEBUG = DEBUG
FEATURES = {
'ENABLE_DJANGO_ADMIN_SITE': True,
'ADMIN_LIB': 'xadmin', # 可以在 xadmin, admin中选择,admin
# 二者不能同时使用,admin的用法看文档
# xadmin的Admin集成自object,
# register时使用xadmin.site.register
# 文件放在 adminx.py 而不是admin.py
'USE_SQLITE3': False,
'EMAIL_AS_USERNAME': False, # 使用邮箱做用户名
'USE_YUN_STORAGE': False,
'ENABLE_SOCIAL_AUTH': False, # 启用三分登陆
}
# 启用EMAIL_AS_USERNAME: True后会使用 email作为用户名
# 实际上就是将user的username = email, 其他的用户名之类的请放到profile里面
# https://github.com/dabapps/django-email-as-username
#
# from emailusernames.utils import create_user, create_superuser
#
# create_user('[email protected]', 'password')
# 当然也可以知己诶把user的username设置为email就ok了
# create_superuser('[email protected]', 'password')
# from emailusernames.utils import get_user, user_exists
#
# user = get_user('[email protected]')
# ...
#
# if user_exists('[email protected]'):
# ...
#
#
ADMINS = (
# ('Your Name', '[email protected]'),
)
MANAGERS = ADMINS
AUTH_PROFILE_MODULE = 'myauth.UserProfile'
# db config
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.mysql', # Add 'postgresql_psycopg2', 'mysql', 'sqlite3' or 'oracle'.
'NAME': 'dev', # Or path to database file if using sqlite3.
'USER': 'root', # Not used with sqlite3.
'PASSWORD': '', # Not used with sqlite3.
'HOST': '', # Set to empty string for localhost. Not used with sqlite3.
'PORT': '', # Set to empty string for default. Not used with sqlite3.
}
}
if FEATURES.get('USE_SQLITE3'):
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': 'dev.db',
}
}
if 'test' in sys.argv:
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3', # Add 'postgresql_psycopg2', 'mysql', 'sqlite3' or 'oracle'.
}
}
# Hosts/domain names that are valid for this site; required if DEBUG is False
# See https://docs.djangoproject.com/en/1.4/ref/settings/#allowed-hosts
ALLOWED_HOSTS = ['*']
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# In a Windows environment this must be set to your system time zone.
TIME_ZONE = 'Asia/Shanghai'
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'zh-CN'
LANGUAGES = (
# ('en', 'English'),
('zh-cn', 'Simplified Chinese'),
)
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# If you set this to False, Django will not format dates, numbers and
# calendars according to the current locale.
USE_L10N = True
# If you set this to False, Django will not use timezone-aware datetimes.
USE_TZ = True
LOCALE_PATHS = (
os.path.join(BASE_DIR, 'locale'),
)
# Absolute filesystem path to the directory that will hold user-uploaded files.
# Example: "/home/media/media.lawrence.com/media/"
MEDIA_ROOT = os.path.join(BASE_DIR, 'medias') # 如果不用云存储使用绝对路径
if FEATURES.get('USE_YUN_STORAGE'):
MEDIA_ROOT = 'medias' # 云存储使用这种相对路径
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash.
# Examples: "http://media.lawrence.com/media/", "http://example.com/media/"
MEDIA_URL = '/media/'
# Absolute path to the directory static files should be collected to.
# Don't put anything in this directory yourself; store your static files
# in apps' "static/" subdirectories and in STATICFILES_DIRS.
# Example: "/home/media/media.lawrence.com/static/"
STATIC_ROOT = os.path.join(BASE_DIR, 'statics') # 如果不用云存储使用绝对路径
if FEATURES.get('USE_YUN_STORAGE'):
STATIC_ROOT = 'statics' # 云存储使用这种相对路径
# URL prefix for static files.
# Example: "http://media.lawrence.com/static/"
STATIC_URL = '/static/'
TEMPLATE_DIRS = (
os.path.join(BASE_DIR, 'templates'),
)
STATICFILES_DIRS = (
os.path.join(BASE_DIR, "static_dev"),
)
# List of finder classes that know how to find static files in
# various locations.
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
# 'django.contrib.staticfiles.finders.DefaultStorageFinder',
'pipeline.finders.PipelineFinder',
'pipeline.finders.CachedFileFinder',
)
# Make this unique, and don't share it with anybody.
SECRET_KEY = '@lba)zukbwoe1yx#!3g5!9i_ti-6dyw=!3zmfl@kh31e2(=6^0'
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
# 'django.template.loaders.eggs.Loader',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.locale.LocaleMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
# Uncomment the next line for simple clickjacking protection:
# 'django.middleware.clickjacking.XFrameOptionsMiddleware',
'custom_django_partial.middleware.HttpResponseNotAllowedMiddleware',
'djangomako.middleware.MakoMiddleware',
'pipeline.middleware.MinifyHTMLMiddleware',
)
# django默认的 TEMPLATE_CONTEXT_PROCESSORS
TEMPLATE_CONTEXT_PROCESSORS = (
'django.contrib.auth.context_processors.auth',
'django.core.context_processors.debug',
'django.core.context_processors.i18n',
'django.core.context_processors.media',
'django.core.context_processors.static',
'django.core.context_processors.tz',
'django.contrib.messages.context_processors.messages',
)
ROOT_URLCONF = 'django-scaffold.urls'
STATICFILES_STORAGE = 'pipeline.storage.PipelineCachedStorage'
# Python dotted path to the WSGI application used by Django's runserver.
WSGI_APPLICATION = 'django-scaffold.wsgi.application'
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
'pipeline',
'south',
'ecstatic',
'myauth', # 自定义权限相关的东西放在这里
'app', # clone后默认的小demo
# Uncomment the next line to enable admin documentation:
# 'django.contrib.admindocs',
)
# A sample logging configuration. The only tangible logging
# performed by this configuration is to send an email to
# the site admins on every HTTP 500 error when DEBUG=False.
# See http://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
from . import logsettings
LOGGING = logsettings.get_logger_config(debug=DEBUG)
# memcached
CACHES = {
"default": {
"BACKEND": "django.core.cache.backends.memcached.MemcachedCache",
"KEY_FUNCTION": "utils.memcache.safe_key",
"KEY_PREFIX": "memcached_default",
"TIMEOUT": str(60 * 3),
"LOCATION": [
"localhost:11211"
],
},
}
# FEATURES
# 开启admin
if FEATURES.get('ADMIN_LIB', '') == 'admin':
if DEBUG or FEATURES.get('ENABLE_DJANGO_ADMIN_SITE'):
INSTALLED_APPS += (
'django.contrib.admin',
)
# 开启xadmin
if FEATURES.get('ADMIN_LIB', '') == 'xadmin':
if DEBUG or FEATURES.get('ENABLE_DJANGO_ADMIN_SITE'):
INSTALLED_APPS += (
'xadmin',
'crispy_forms',
)
AUTHENTICATION_BACKENDS = ('django.contrib.auth.backends.ModelBackend',)
ECSTATIC_MANIFEST_FILE = os.path.join(BASE_DIR, 'staticmanifest.json')
# 个人配置或者一些不想丢到git里面的配置
if FEATURES.get('USE_YUN_STORAGE'):
# 默认选择七牛
# Doc: https://github.com/duoduo369/django-qiniu-storage
QINIU_ACCESS_KEY = ''
QINIU_SECRET_KEY = ''
QINIU_BUCKET_NAME = ''
QINIU_BUCKET_DOMAIN = ''
# 下面这两行是指在七牛云里面静态文件没有hash码
DEFAULT_FILE_STORAGE = 'qiniustorage.backends.QiniuMediaStorage'
# STATICFILES_STORAGE = 'qiniustorage.backends.QiniuStaticStorage'
# 下面这这行是指在七牛云里面添加hash码, 注意只要静态文件这个
STATICFILES_STORAGE = 'custom_django_partial.storages.QiniuCachedStaticStorage'
if FEATURES.get('EMAIL_AS_USERNAME'):
AUTHENTICATION_BACKENDS = (
'myauth.backends.EmailAuthBackend',
)
# 如果你有其他的AUTHENTICATION_BACKENDS请加在这里
# 如果你有非常复杂的AUTHENTICATION_BACKENDS顺序
# 请直接重写这个tuple,但是注意EMAIL_AS_USERNAME的这个FEATURE
# 如果启用这个FEATURE, 需要关掉django默认的Backend,默认用email
# AUTHENTICATION_BACKENDS = (
# 'emailusernames.backends.EmailAuthBackend',
# # Uncomment the following to make Django tests pass:
# # 'django.contrib.auth.backends.ModelBackend',
# )
#
# 如果有其他AUTHENTICATION_BACKENDS的配置,在settings.py最下面重写
if FEATURES.get('ENABLE_SOCIAL_AUTH'):
if FEATURES.get('EMAIL_AS_USERNAME'):
assert 0, u'你启用了EMAIL_AS_USERNAME请自行配置下面的backends'
INSTALLED_APPS += (
'social_auth',
)
####################### oauth ###################################
AUTHENTICATION_BACKENDS = (
# 去掉注释开启下面的oauth
#'social_auth.backends.contrib.douban.Douban2Backend',
#'social_auth.backends.contrib.qq.QQBackend',
#'social_auth.backends.contrib.weibo.WeiboBackend',
#'social_auth.backends.contrib.renren.RenRenBackend',
#'social_auth.backends.contrib.baidu.BaiduBackend',
#'social_auth.backends.contrib.weixin.WeixinBackend',
'django.contrib.auth.backends.ModelBackend',
# 使用EMAIL_AS_USERNAME, 注释掉 django.contrib.auth.backends.ModelBackend
# 解开下行的注释, 去掉上面的assert 0
# 'myauth.backends.EmailAuthBackend',
)
TEMPLATE_CONTEXT_PROCESSORS += (
'django.contrib.auth.context_processors.auth',
# login in htemplate can use "{% url socialauth_begin 'douban-oauth2' %}"
'social_auth.context_processors.social_auth_by_type_backends',
'social_auth.context_processors.social_auth_login_redirect',
)
SOCIAL_AUTH_PIPELINE = (
'social.pipeline.social_auth.social_details',
'social.pipeline.social_auth.social_uid',
'social.pipeline.social_auth.auth_allowed',
'social.pipeline.partial.save_status_to_session',
'social.pipeline.social_auth.save_authentication_user_detail_to_session',
)
# 注意我在SOCIAL_AUTH_PIPELINE并没有使用文档里面的一些pipeline
# 因为文档中的pipeline会在新auth的时候创建新的django用户,当使用
# 邮箱作为名字的时候搞不定, 因此我将注册流程从默认的SOCIAL_AUTH_PIPELINE中截断,
# (有时候你想在三分用户输入用户名密码成功回跳后让用户绑定邮箱之类的),
# 重写了django-social-auth使得在SOCIAL_AUTH_AUTHENTICATION_SUCCESS_URL
# 的sessions中将三方返回的信息都放在一个叫做authentication_user_detail的
# request.session.get('authentication_user_detail')
# 之后根据这些信息来创建用户等等满足你自己注册需求
SOCIAL_AUTH_DISCONNECT_PIPELINE = (
# Verifies that the social association can be disconnected from the current
# user (ensure that the user login mechanism is not compromised by this
# disconnection).
'social.pipeline.disconnect.allowed_to_disconnect',
# Collects the social associations to disconnect.
'social.pipeline.disconnect.get_entries',
# Revoke any access_token when possible.
'social.pipeline.disconnect.revoke_tokens',
# Removes the social associations.
'social.pipeline.disconnect.disconnect'
)
SOCIAL_AUTH_LOGIN_URL = '/login-url'
SOCIAL_AUTH_LOGIN_ERROR_URL = '/login-error'
SOCIAL_AUTH_LOGIN_REDIRECT_URL = '/logged-in'
SOCIAL_AUTH_NEW_USER_REDIRECT_URL = '/new-users-redirect-url'
SOCIAL_AUTH_NEW_ASSOCIATION_REDIRECT_URL = '/new-association-redirect-url'
SOCIAL_AUTH_BACKEND_ERROR_URL = '/new-error-url'
SOCIAL_AUTH_AUTHENTICATION_SUCCESS_URL = '/authentication_success_url'
SOCIAL_AUTH_WEIBO_KEY = ''
SOCIAL_AUTH_WEIBO_SECRET = ''
SOCIAL_AUTH_QQ_KEY = ''
SOCIAL_AUTH_QQ_SECRET = ''
SOCIAL_AUTH_DOUBAN_OAUTH2_KEY = ''
SOCIAL_AUTH_DOUBAN_OAUTH2_SECRET = ''
SOCIAL_AUTH_RENREN_KEY = ''
SOCIAL_AUTH_RENREN_SECRET = ''
SOCIAL_AUTH_BAIDU_KEY = ''
SOCIAL_AUTH_BAIDU_SECRET = ''
SOCIAL_AUTH_WEIXIN_KEY = ''
SOCIAL_AUTH_WEIXIN_SECRET = ''
SOCIAL_AUTH_WEIXIN_SCOPE = ['snsapi_login',]
try:
from .local_settings import *
except ImportError:
pass
| {
"content_hash": "29462fff3f1f4fffa81c8e5b8ecd68a9",
"timestamp": "",
"source": "github",
"line_count": 385,
"max_line_length": 112,
"avg_line_length": 32.994805194805195,
"alnum_prop": 0.6804691805085413,
"repo_name": "duoduo369/django-scaffold",
"id": "ac58327b9de9333e7b812404bdb9a3ffe85fc1b2",
"size": "13834",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "django-scaffold/settings.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "3932"
},
{
"name": "JavaScript",
"bytes": "349401"
},
{
"name": "Python",
"bytes": "34394"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import
from injector import exceptions, graph, injector
class Dependencies(object):
""" A factory for setting up and building an Injector instance. """
def __init__(self):
self._factories = dict()
def register_value(self, name, value):
"""
Bind a value to a name. The Injector will always return the value as-is.
:param name: A string naming the dependency (e.g. 'db-host-name')
:param value: Any value (e.g. 'master.postgres.internal')
"""
self.register_factory(name, lambda: value)
def register_factory(self, name, factory, dependencies=None):
""" Binds a factory to a name. The injector will call the factory function once
(if the name is ever used), and always return the value that the factory returns.
The factory will be called with the dependencies (if any listed) as arguments.
:param name: A string naming the dependency (e.g. 'db-connection')
:param factory: A factory function to create the dependency
:param dependencies: (optional) A list of dependencies of the factory function
"""
self._check_name(name)
self._factories[name] = (factory, dependencies)
def _check_name(self, name):
if not name or not isinstance(name, str):
raise exceptions.BadNameException("Bad name: {!r}".format(name))
if name in self._factories:
raise exceptions.DuplicateNameException("Duplicate name: {}".format(name))
def _make_dependency_graph(self):
return graph.DependencyGraph({
name: dependencies or []
for name, (_, dependencies) in self._factories.items()
})
def _check_injector_state(self):
dependency_graph = self._make_dependency_graph()
if dependency_graph.has_missing_dependencies():
raise exceptions.MissingDependencyException()
if dependency_graph.has_circular_dependencies():
raise exceptions.CircularDependencyException()
def build_injector(self):
""" Builds an injector instance that can be used to inject dependencies.
Also checks for common errors (missing dependencies and circular dependencies).
:return: Injector
"""
self._check_injector_state()
return injector.Injector(self._factories)
| {
"content_hash": "45ff309c60b8ce5026bfd42b0d799042",
"timestamp": "",
"source": "github",
"line_count": 59,
"max_line_length": 89,
"avg_line_length": 40.355932203389834,
"alnum_prop": 0.6551868962620747,
"repo_name": "jmikkola/dep_injector",
"id": "cc6931bfb24cca213a19455fdd829d20a2dde0d4",
"size": "2381",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "injector/dependencies.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "6791"
},
{
"name": "Python",
"bytes": "20806"
}
],
"symlink_target": ""
} |
import itertools
from django.contrib.admin.utils import NestedObjects
from django.db import router
def related_objects(obj):
collector = NestedObjects(using=router.db_for_write(obj))
collector.collect([obj])
def flatten(elem):
if isinstance(elem, list):
return itertools.chain.from_iterable(map(flatten, elem))
elif obj != elem:
return (elem,)
return ()
return flatten(collector.nested())
| {
"content_hash": "102cfb68f99c93f17de6cd87f793fbe2",
"timestamp": "",
"source": "github",
"line_count": 17,
"max_line_length": 68,
"avg_line_length": 26.88235294117647,
"alnum_prop": 0.6673960612691466,
"repo_name": "AccentDesign/Accent_SoftDelete",
"id": "79bae60db035d8f967dfc5055e18707f807eb859",
"size": "457",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "soft_delete/utils.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "193"
},
{
"name": "Python",
"bytes": "21192"
}
],
"symlink_target": ""
} |
BOT_NAME = 'tbp_scraper'
SPIDER_MODULES = ['tbp_scraper.spiders']
NEWSPIDER_MODULE = 'tbp_scraper.spiders'
# Crawl responsibly by identifying yourself (and your website) on the user-agent
#USER_AGENT = 'tbp_scraper (+http://www.yourdomain.com)'
# Obey robots.txt rules
ROBOTSTXT_OBEY = True
# Configure maximum concurrent requests performed by Scrapy (default: 16)
#CONCURRENT_REQUESTS = 32
# Configure a delay for requests for the same website (default: 0)
# See http://scrapy.readthedocs.org/en/latest/topics/settings.html#download-delay
# See also autothrottle settings and docs
#DOWNLOAD_DELAY = 3
# The download delay setting will honor only one of:
#CONCURRENT_REQUESTS_PER_DOMAIN = 16
#CONCURRENT_REQUESTS_PER_IP = 16
# Disable cookies (enabled by default)
#COOKIES_ENABLED = False
# Disable Telnet Console (enabled by default)
#TELNETCONSOLE_ENABLED = False
# Override the default request headers:
#DEFAULT_REQUEST_HEADERS = {
# 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
# 'Accept-Language': 'en',
#}
# Enable or disable spider middlewares
# See http://scrapy.readthedocs.org/en/latest/topics/spider-middleware.html
#SPIDER_MIDDLEWARES = {
# 'tbp_scraper.middlewares.MyCustomSpiderMiddleware': 543,
#}
# Enable or disable downloader middlewares
# See http://scrapy.readthedocs.org/en/latest/topics/downloader-middleware.html
#DOWNLOADER_MIDDLEWARES = {
# 'tbp_scraper.middlewares.MyCustomDownloaderMiddleware': 543,
#}
# Enable or disable extensions
# See http://scrapy.readthedocs.org/en/latest/topics/extensions.html
#EXTENSIONS = {
# 'scrapy.extensions.telnet.TelnetConsole': None,
#}
# Configure item pipelines
# See http://scrapy.readthedocs.org/en/latest/topics/item-pipeline.html
#ITEM_PIPELINES = {
# 'tbp_scraper.pipelines.SomePipeline': 300,
#}
# Enable and configure the AutoThrottle extension (disabled by default)
# See http://doc.scrapy.org/en/latest/topics/autothrottle.html
#AUTOTHROTTLE_ENABLED = True
# The initial download delay
#AUTOTHROTTLE_START_DELAY = 5
# The maximum download delay to be set in case of high latencies
#AUTOTHROTTLE_MAX_DELAY = 60
# The average number of requests Scrapy should be sending in parallel to
# each remote server
#AUTOTHROTTLE_TARGET_CONCURRENCY = 1.0
# Enable showing throttling stats for every response received:
#AUTOTHROTTLE_DEBUG = False
# Enable and configure HTTP caching (disabled by default)
# See http://scrapy.readthedocs.org/en/latest/topics/downloader-middleware.html#httpcache-middleware-settings
#HTTPCACHE_ENABLED = True
#HTTPCACHE_EXPIRATION_SECS = 0
#HTTPCACHE_DIR = 'httpcache'
#HTTPCACHE_IGNORE_HTTP_CODES = []
#HTTPCACHE_STORAGE = 'scrapy.extensions.httpcache.FilesystemCacheStorage'
| {
"content_hash": "9618c69c517cb13027a7d2fe9e840dc5",
"timestamp": "",
"source": "github",
"line_count": 79,
"max_line_length": 109,
"avg_line_length": 34.58227848101266,
"alnum_prop": 0.7675695461200586,
"repo_name": "nathanpucheril/CalExam",
"id": "db4004ab86dae3df6b84a177164f318e42be00ae",
"size": "3168",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Scrapy Attempt/tbp_scraper/tbp_scraper/settings.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "5666"
},
{
"name": "HTML",
"bytes": "131123"
},
{
"name": "JavaScript",
"bytes": "58666"
},
{
"name": "Python",
"bytes": "16397"
}
],
"symlink_target": ""
} |
import os
import pytest
import re
import uuid
from subprocess import call
from tests.common.test_vector import TestDimension
from tests.common.impala_test_suite import ImpalaTestSuite
from tests.common.skip import SkipIfS3, SkipIfIsilon
# Number of tables to create per thread
NUM_TBLS_PER_THREAD = 10
# Each client will get a different test id.
TEST_IDS = xrange(0, 10)
# Simple stress test for DDL operations. Attempts to create, cache,
# uncache, then drop many different tables in parallel.
class TestDdlStress(ImpalaTestSuite):
@classmethod
def get_workload(self):
return 'targeted-stress'
@classmethod
def add_test_dimensions(cls):
super(TestDdlStress, cls).add_test_dimensions()
cls.TestMatrix.add_dimension(TestDimension('test_id', *TEST_IDS))
cls.TestMatrix.add_constraint(lambda v: v.get_value('exec_option')['batch_size'] == 0)
cls.TestMatrix.add_constraint(lambda v:
v.get_value('table_format').file_format == 'text' and\
v.get_value('table_format').compression_codec == 'none')
@SkipIfS3.caching
@SkipIfIsilon.caching
@pytest.mark.stress
def test_create_cache_many_tables(self, vector):
self.client.set_configuration(vector.get_value('exec_option'))
self.client.execute("create database if not exists ddl_stress_testdb")
self.client.execute("use ddl_stress_testdb")
# Since this test runs concurrently, UUIDs generated by separate processes may
# (arguably) not be guaranteed to be unique. To be certain, we add the test ID to the
# tables we create.
test_id = vector.vector_values[0].value
tbl_uniquifier = "proc_%s_%s" % (test_id, str(uuid.uuid4()).replace('-', ''))
for i in xrange(NUM_TBLS_PER_THREAD):
tbl_name = "tmp_%s_%s" % (tbl_uniquifier, i)
# Create a partitioned and unpartitioned table
self.client.execute("create table %s (i int)" % tbl_name)
self.client.execute("create table %s_part (i int) partitioned by (j int)" %\
tbl_name)
# Add some data to each
self.client.execute("insert overwrite table %s select int_col from "\
"functional.alltypestiny" % tbl_name)
self.client.execute("insert overwrite table %s_part partition(j) "\
"values (1, 1), (2, 2), (3, 3), (4, 4), (4, 4)" % tbl_name)
# Cache the data the unpartitioned table
self.client.execute("alter table %s set cached in 'testPool'" % tbl_name)
# Cache, uncache, then re-cache the data in the partitioned table.
self.client.execute("alter table %s_part set cached in 'testPool'" % tbl_name)
self.client.execute("alter table %s_part set uncached" % tbl_name)
self.client.execute("alter table %s_part set cached in 'testPool'" % tbl_name)
# Drop the tables, this should remove the cache requests.
self.client.execute("drop table %s" % tbl_name)
self.client.execute("drop table %s_part" % tbl_name)
| {
"content_hash": "764e1c4bb425a8ddd03671579086e998",
"timestamp": "",
"source": "github",
"line_count": 66,
"max_line_length": 90,
"avg_line_length": 44.015151515151516,
"alnum_prop": 0.6919104991394148,
"repo_name": "scalingdata/Impala",
"id": "76309fdfa4624e2c5b5d2c087738d04abd925fda",
"size": "3527",
"binary": false,
"copies": "1",
"ref": "refs/heads/rocana-master",
"path": "tests/stress/test_ddl_stress.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "69445"
},
{
"name": "C++",
"bytes": "5890891"
},
{
"name": "CMake",
"bytes": "89845"
},
{
"name": "CSS",
"bytes": "86925"
},
{
"name": "Groff",
"bytes": "1633"
},
{
"name": "HTML",
"bytes": "56"
},
{
"name": "Java",
"bytes": "3280226"
},
{
"name": "Lex",
"bytes": "21429"
},
{
"name": "PLSQL",
"bytes": "3066"
},
{
"name": "PLpgSQL",
"bytes": "393"
},
{
"name": "Python",
"bytes": "1526425"
},
{
"name": "SQLPL",
"bytes": "187"
},
{
"name": "Shell",
"bytes": "145481"
},
{
"name": "Thrift",
"bytes": "240246"
},
{
"name": "Yacc",
"bytes": "78633"
}
],
"symlink_target": ""
} |
import requests
url = "https://api.pdf.co/v1/file/upload/url"
# You can also upload your own file into PDF.co and use it as url. Check "Upload File" samples for code snippets: https://github.com/bytescout/pdf-co-api-samples/tree/master/File%20Upload/
payload = {'name': 'sample.pdf',
'url': 'https://bytescout-com.s3.amazonaws.com/files/demo-files/cloud-api/pdf-split/sample.pdf'}
files = [
]
headers = {
'x-api-key': '{{x-api-key}}'
}
response = requests.request("POST", url, headers=headers, json = payload, files = files)
print(response.text.encode('utf8'))
| {
"content_hash": "724cd0f240cc0b647d5c92aa691db5c7",
"timestamp": "",
"source": "github",
"line_count": 17,
"max_line_length": 192,
"avg_line_length": 33.64705882352941,
"alnum_prop": 0.7027972027972028,
"repo_name": "bytescout/ByteScout-SDK-SourceCode",
"id": "c483a174bffac529464fd0461d8ee8205fc390f3",
"size": "572",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "PDF.co Web API/File Upload for Small Files (up to 50KB)/Upload Small File from Url/Python/UploadSmallFileFromUrl.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "ASP.NET",
"bytes": "364116"
},
{
"name": "Apex",
"bytes": "243500"
},
{
"name": "Batchfile",
"bytes": "151832"
},
{
"name": "C",
"bytes": "224568"
},
{
"name": "C#",
"bytes": "12909855"
},
{
"name": "C++",
"bytes": "440474"
},
{
"name": "CSS",
"bytes": "56817"
},
{
"name": "Classic ASP",
"bytes": "46655"
},
{
"name": "Dockerfile",
"bytes": "776"
},
{
"name": "Gherkin",
"bytes": "3386"
},
{
"name": "HTML",
"bytes": "17276296"
},
{
"name": "Java",
"bytes": "1483408"
},
{
"name": "JavaScript",
"bytes": "3033610"
},
{
"name": "PHP",
"bytes": "838746"
},
{
"name": "Pascal",
"bytes": "398090"
},
{
"name": "PowerShell",
"bytes": "715204"
},
{
"name": "Python",
"bytes": "703542"
},
{
"name": "QMake",
"bytes": "880"
},
{
"name": "TSQL",
"bytes": "3080"
},
{
"name": "VBA",
"bytes": "383773"
},
{
"name": "VBScript",
"bytes": "1504410"
},
{
"name": "Visual Basic .NET",
"bytes": "9489450"
}
],
"symlink_target": ""
} |
import re
import typing
from collections import defaultdict
import hermes.language as lng
from hermes.tag.pos import PartOfSpeech
from hermes.types import TOKEN, PART_OF_SPEECH, LANGUAGE
from .span import Span
def return_none():
"""
Simple method that returns None for defaultdict to allow pickling
:return: None
"""
return None
class HString(Span):
"""
"""
def __init__(self, document: 'Document', start: int, end: int) -> None:
super(HString, self).__init__(start, end)
self._document = document
self._attributes = defaultdict(return_none)
self._tokens = []
def is_empty(self):
return self.start == self.end
def __len__(self):
return len(self._document.content)
def __lt__(self, other):
return self.start < other.start or (self.start == other.start and self.end < other.end)
@property
def content(self) -> str:
return self._document.content[self.start:self.end] if self._document and not self.is_empty() else ""
def __contains__(self, item) -> bool:
return item in self._attributes
def __getitem__(self, attribute) -> typing.Any:
"""
Gets the value of the given attribute
:param attribute: The attribute whose value should be returned
:return: The value of the attribute or None if the attribute is not present
"""
if isinstance(attribute, slice):
return HString(self, attribute.start, attribute.stop)
if attribute in self._attributes:
return self._attributes[attribute]
return None
def __setitem__(self, attribute, value) -> None:
"""
Sets the value of the given the attribute on this HString
:param attribute: The attribute to set
:param value: The value of the attribute
:return: None
"""
if value is None:
if attribute in self._attributes:
del self._attributes[attribute]
else:
self._attributes[attribute] = value
def __delitem__(self, key) -> typing.Any:
del (self._attributes[key])
def __getslice__(self, i, j):
return HString(self._document, i, j)
def __unicode__(self) -> str:
return self.content
def __str__(self) -> str:
return self.content
def __repr__(self) -> str:
return self.content
def has(self, attribute):
return attribute in self._attributes
@staticmethod
def union(*hstr):
if hstr:
start = min(hstr).start
end = max(hstr).end
return HString(hstr[0].document, start, end)
return HString(None, 0, 0)
def token_length(self):
return len(self.tokens())
def lemma(self):
if 'lemma' in self._attributes:
return self['lemma']
tkn = self.tokens()
if len(tkn) == 0:
return self.lower()
return " ".join([t.lemma() for t in tkn])
def is_stopword(self) -> bool:
return self.language().is_stopword(self)
def language(self) -> lng.Language:
if LANGUAGE in self._attributes:
return self._attributes[LANGUAGE]
return self._document.language() if self._document else lng.UNKNOWN
def rstrip(self, stripper=lambda x: x.is_stopword()):
if self.is_empty():
return self
for tkn in reversed(self.tokens()):
if not stripper(tkn):
return HString(self.document, self.start, tkn.end)
return HString(None, 0, 0)
def lstrip(self, stripper=lambda x: x.is_stopword()):
if self.is_empty():
return self
for tkn in self.tokens():
if not stripper(tkn):
return HString(self.document, tkn.start, self.end)
return HString(None, 0, 0)
def strip(self, stripper=lambda x: x.is_stopword()):
ls = self.lstrip(stripper)
return ls.rstrip(stripper) if len(ls.tokens()) > 1 else ls
def find(self, string, start=0) -> 'HString':
idx = self.content.find(string, start)
if idx >= 0:
return HString(self._document, idx, idx + len(string))
return HString(None, 0, 0)
def re_find(self, pattern, start=0) -> 'HString':
if isinstance(pattern, str):
regex = re.compile(pattern)
r = regex.search(self.content, start)
else:
r = pattern.search(self.content, start)
if r:
return HString(self._document, r.start(), r.end())
return HString(None, 0, 0)
def annotation(self, annotation_type) -> typing.List['Annotation']:
return self._document.annotation(annotation_type, start=self.start, end=self.end) if self._document else []
@property
def document(self) -> 'Document':
return self._document
def pos(self) -> PartOfSpeech:
p = self[PART_OF_SPEECH]
return p if p else PartOfSpeech.guess(self)
def tokens(self) -> typing.List['Annotation']:
if not self._tokens:
self._tokens = self.annotation(TOKEN)
return self._tokens
@property
def attributes(self) -> typing.Dict[str, typing.Any]:
return self._attributes
def lower(self) -> str:
return self.content.lower()
def upper(self) -> str:
return self.content.upper()
| {
"content_hash": "3f9d56e69a5146a3e8590ce850b7119a",
"timestamp": "",
"source": "github",
"line_count": 176,
"max_line_length": 115,
"avg_line_length": 30.602272727272727,
"alnum_prop": 0.5930189379873747,
"repo_name": "dbracewell/pyHermes",
"id": "b171b1b1ec2a4dfe5f6cd3396b09b729bacecc0c",
"size": "5386",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "hermes/core/hstring.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "96939"
}
],
"symlink_target": ""
} |
"""
Test the output of `frame diagnose` for calling virtual methods
"""
import lldb
from lldbsuite.test.decorators import *
from lldbsuite.test.lldbtest import *
from lldbsuite.test import lldbutil
class TestDiagnoseVirtualMethodCall(TestBase):
mydir = TestBase.compute_mydir(__file__)
@skipUnlessDarwin
@skipIfDarwinEmbedded # <rdar://problem/33842388> frame diagnose doesn't work for armv7 or arm64
def test_diagnose_virtual_method_call(self):
TestBase.setUp(self)
self.build()
exe = self.getBuildArtifact("a.out")
self.runCmd("file " + exe, CURRENT_EXECUTABLE_SET)
self.runCmd("run", RUN_SUCCEEDED)
self.expect("thread list", "Thread should be stopped",
substrs=['stopped'])
self.expect("frame diagnose", "Crash diagnosis was accurate", "foo")
| {
"content_hash": "3c85c6bfdd075633243f54b111ad2bfa",
"timestamp": "",
"source": "github",
"line_count": 25,
"max_line_length": 101,
"avg_line_length": 33.8,
"alnum_prop": 0.6840236686390533,
"repo_name": "endlessm/chromium-browser",
"id": "7ea42dea49c147962841443bc13d327e2c141f11",
"size": "845",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "third_party/llvm/lldb/test/API/commands/frame/diagnose/virtual-method-call/TestDiagnoseDereferenceVirtualMethodCall.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [],
"symlink_target": ""
} |
import unittest
import sys
import os
ROOT_PATH = os.path.dirname(__file__)
if __name__ == '__main__':
if 'GAE_SDK' in os.environ:
SDK_PATH = os.environ['GAE_SDK']
sys.path.insert(0, SDK_PATH)
import dev_appserver
dev_appserver.fix_sys_path()
tests = unittest.TestLoader().discover(ROOT_PATH, "*tests.py")
result = unittest.TextTestRunner().run(tests)
if not result.wasSuccessful():
sys.exit(1)
| {
"content_hash": "fc00f3e17768ae405a3e7773afdb7f02",
"timestamp": "",
"source": "github",
"line_count": 22,
"max_line_length": 66,
"avg_line_length": 20.818181818181817,
"alnum_prop": 0.6157205240174672,
"repo_name": "renzon/gaecookie",
"id": "3adc1ec8e4b62082301006d70b43e2cb3a522217",
"size": "497",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/testloader.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "24416"
}
],
"symlink_target": ""
} |
from django.test import TestCase
from corehq.apps.domain.shortcuts import create_domain
from corehq.apps.users.analytics import update_analytics_indexes, get_count_of_active_commcare_users_in_domain, \
get_count_of_inactive_commcare_users_in_domain, get_active_commcare_users_in_domain, \
get_inactive_commcare_users_in_domain
from corehq.apps.users.dbaccessors.all_commcare_users import delete_all_users
from corehq.apps.users.models import CommCareUser, WebUser
class UserAnalyticsTest(TestCase):
@classmethod
def setUpClass(cls):
super(UserAnalyticsTest, cls).setUpClass()
delete_all_users()
cls.domain = create_domain('test')
cls.active_user = CommCareUser.create(
domain='test',
username='active',
password='secret',
is_active=True,
)
cls.active_user_2 = CommCareUser.create(
domain='test',
username='active2',
password='secret',
is_active=True,
)
cls.inactive_user = CommCareUser.create(
domain='test',
username='inactive',
password='secret',
is_active=False
)
cls.web_user = WebUser.create(
domain='test',
username='web',
password='secret',
)
update_analytics_indexes()
@classmethod
def tearDownClass(cls):
delete_all_users()
cls.domain.delete()
super(UserAnalyticsTest, cls).tearDownClass()
def test_get_count_of_active_commcare_users_in_domain(self):
self.assertEqual(2, get_count_of_active_commcare_users_in_domain('test'))
def test_get_count_of_active_commcare_users_in_domain_no_results(self):
self.assertEqual(0, get_count_of_active_commcare_users_in_domain('missing'))
def test_get_count_of_inactive_commcare_users_in_domain(self):
self.assertEqual(1, get_count_of_inactive_commcare_users_in_domain('test'))
def test_get_count_of_inactive_commcare_users_in_domain_no_results(self):
self.assertEqual(0, get_count_of_inactive_commcare_users_in_domain('missing'))
def test_get_active_commcare_users_in_domain(self):
users = get_active_commcare_users_in_domain('test')
self.assertEqual(2, len(users))
self.assertEqual(set(['active', 'active2']), set([u.username for u in users]))
def test_get_inactive_commcare_users_in_domain(self):
users = get_inactive_commcare_users_in_domain('test')
self.assertEqual(1, len(users))
self.assertEqual('inactive', users[0].username)
def test_get_active_commcare_users_in_domain_no_results(self):
self.assertEqual(0, len(get_active_commcare_users_in_domain('missing')))
def test_get_inactive_commcare_users_in_domain_no_results(self):
self.assertEqual(0, len(get_inactive_commcare_users_in_domain('missing')))
| {
"content_hash": "24168fa384729acd3f7ff3d9ee5e92bf",
"timestamp": "",
"source": "github",
"line_count": 74,
"max_line_length": 113,
"avg_line_length": 39.432432432432435,
"alnum_prop": 0.6559287183002056,
"repo_name": "qedsoftware/commcare-hq",
"id": "4cb04973b4f1c36bace243a85f1ddabe175de282",
"size": "2918",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "corehq/apps/users/tests/test_analytics.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "ActionScript",
"bytes": "15950"
},
{
"name": "CSS",
"bytes": "508392"
},
{
"name": "HTML",
"bytes": "2869325"
},
{
"name": "JavaScript",
"bytes": "2395360"
},
{
"name": "PHP",
"bytes": "2232"
},
{
"name": "PLpgSQL",
"bytes": "125298"
},
{
"name": "Python",
"bytes": "14670713"
},
{
"name": "Shell",
"bytes": "37514"
}
],
"symlink_target": ""
} |
import matplotlib.pyplot as plt
from permeability.functions import savitzky_golay
from matplotlib.ticker import LogLocator, MultipleLocator, FormatStrFormatter
from matplotlib.ticker import LogFormatter
import numpy as np
import pdb
#import matplotlib
#matplotlib.rc('font', family='Arial')
def plot_forces(z_windows, forces, fig_filename='forces.pdf',
z_units=u'\u00c5', force_units=u'kcal/mol-\u00c5', plot_mean=True,
sweep_alpha=0.5, grid=True):
"""Plot the forces from analyzing the force data
Params
------
z_windows : np.ndarary, shape=(n,)
The z_values for the forces
forces : np.ndarray, shape=(m, n)
The forces at each window from each sweep
fig_filename : str
Name of file to save, default=forces.pdf'
z_units : str
Units of z, default=u'\u00c5'
force_units : str
Force units, default=u'kcal/mol-\u00c5'
plot_mean : bool
Plot the mean value of the forces if True, default=True
sweep_alpha : float
Opacity of line for individual sweeps, default=0.3
grid : bool
Plot gridlines on major ticks if True, default=True
Returns
-------
This function saves a figure of the forces from each sweep as a function
of z position.
"""
if forces.ndim == 1: # only has data from 1 sweep
forces = forces.reshape((1, -1))
fig, ax = plt.subplots()
if plot_mean:
mean_force = np.mean(forces, axis=0)
ax.plot(z_windows, mean_force)
std_err = np.std(forces, axis=0) / np.sqrt(forces.shape[0])
ax.fill_between(z_windows, mean_force+std_err, mean_force-std_err,
facecolor='#a8a8a8', edgecolor='#a8a8a8')
for force_series in forces:
ax.plot(z_windows, force_series, alpha=sweep_alpha, zorder=0)
ax.set_xlabel(u'z [{z_units}]'.format(**locals()), fontweight='bold')
ax.set_ylabel(u'F(z) [{force_units}]'.format(**locals()), fontweight='bold')
ax.grid(grid)
zmin = z_windows[0]
plt.xlim(zmin,-zmin)
ax.tick_params(axis='both', which='major', pad=8)
fig.tight_layout()
fig.savefig('{fig_filename}'.format(**locals()))
plt.show()
def plot_free_energy_z(z_windows, free_energy, fig_filename='delta_G.pdf',
z_units=u'\u00c5', energy_units=u'kcal/mol', plot_mean=True,
sweep_alpha=0.5, grid=True):
"""Plot the free energy profile
Params
------
z_windows : np.ndarary, shape=(n,)
The z_values for the free energy profile
free_energy : np.ndarray, shape=(m, n)
The free_energy at each window from each sweep
fig_filename : str
Name of file to save, default=delta_G.pdf'
z_units : str
Units of z, default=u'\u00c5'
energy_units : str
Energy units, default=u'kcal/mol'
plot_mean : bool
Plot the mean value of the free energy if True, default=True
sweep_alpha : float
Opacity of line for individual sweeps, default=0.3
grid : bool
Plot gridlines on major ticks if True, default=True
Returns
-------
This function saves a figure of the free energy from each sweep as a
function of z position.
"""
if free_energy.ndim == 1: # only has data from 1 sweep
free_energy = free_energy.reshape((1, -1))
fig, ax = plt.subplots()
if plot_mean:
mean_free_energy = np.mean(free_energy, axis=0)
ax.plot(z_windows, mean_free_energy)
std_err = np.std(free_energy, axis=0) / np.sqrt(free_energy.shape[0])
ax.fill_between(z_windows, mean_free_energy+std_err,
mean_free_energy-std_err,
facecolor='#a8a8a8', edgecolor='#a8a8a8')
for free_energy_series in free_energy:
ax.plot(z_windows, free_energy_series, alpha=sweep_alpha, zorder=0)
ax.set_xlabel(u'z [{z_units}]'.format(**locals()), fontweight='bold')
ax.set_ylabel(u'$\Delta$G(z) [{energy_units}]'.format(**locals()), fontweight='bold')
ax.grid(grid)
zmin = z_windows[0]
plt.xlim(zmin,-zmin)
ax.tick_params(axis='both', which='major', pad=8)
fig.tight_layout()
fig.savefig(fig_filename)
plt.show()
def plot_timeseries(time, forces, time_units='ps', force_units=u'kcal/mol-\u00c5',
grid=True, fig_filename='force_timeseries.png'):
fig, ax = plt.subplots()
for force_series in forces.T:
ax.plot(time, force_series,zorder=0)
smoothdata = savitzky_golay(force_series, 15001, 3)
ax.plot(time, smoothdata)
ax.set_xlabel(u'time [{time_units}]'.format(**locals()), fontweight='bold')
ax.set_ylabel(u'F_z(z) [{force_units}]'.format(**locals()), fontweight='bold')
ax.grid(grid,color='c')
ax.tick_params(axis='both', which='major', pad=8)
fig.tight_layout()
fig.savefig('{fig_filename}'.format(**locals()))
plt.show()
def plot_rot_acfs_time(time, racfs, time_units='ps', normalize=True, grid=True,
fig_filename='racf_per_window.png'):
fig, ax = plt.subplots()
for i, racf in enumerate(racfs):
if normalize:
racf /= racf[0]
if np.mod(i,2)==0: # just to make the graph less crowded
ax.plot(time, racf)
ax.set_xlabel('t [{0}]'.format(time_units), fontweight='bold')
ax.set_ylabel(r'$\langle\Theta$(t)$\Theta$(0)$\rangle$', fontweight='bold')
plt.xlim(time[0],time[-1])
ax.grid(grid)
ax.tick_params(axis='both', which='major', pad=8)
fig.tight_layout()
fig.savefig(fig_filename)
def plot_force_acfs_time(time, facfs, time_units='ps', normalize=True, grid=True,
fig_filename='acf_per_window.png'):
fig, ax = plt.subplots()
for i, facf in enumerate(facfs):
if normalize:
facf /= facf[0]
if np.mod(i,2)==0: # just to make the graph less crowded
ax.semilogx(time, facf)
ax.set_xlabel('t [{0}]'.format(time_units), fontweight='bold')
ax.set_ylabel(r'$\langle\Delta$F(t)$\Delta$F(0)$\rangle$', fontweight='bold')
plt.xlim(time[0],time[-1])
ax.grid(grid)
ax.tick_params(axis='both', which='major', pad=8)
fig.tight_layout()
fig.savefig(fig_filename)
def plot_int_acfs_time(time, int_facfs, time_units='ps', grid=True,
fig_filename='int_acf_per_window.png'):
fig, ax = plt.subplots()
for i, int_facf in enumerate(int_facfs):
if np.mod(i,2)==0: # just to make the graph less crowded
ax.loglog(time, int_facf)
ax.set_xlabel('t [{0}]'.format(time_units), fontweight='bold')
ax.set_ylabel(r"$\int_0^t\langle\Delta$F(t')$\Delta$F(0)$\rangle$dt'", fontweight='bold')
plt.xlim(time[0],time[-1])
ax.grid(grid)
ax.tick_params(axis='both', which='major', pad=8)
fig.tight_layout()
fig.savefig(fig_filename)
def plot_resistance_z(z_windows, resist, resist_err, z_units=u'\u00c5', Res_units=u's/cm\u00b2',
fig_filename='res_z.pdf', grid=True, sys_name=None, figax=(None,None),
sweep_alpha=0.5, savefig=False, addlegend=False):
"""Plot the diffusion coefficient as a function of z-position.
Resistant input is in 1e-5 s/cm2
"""
if figax == (None, None):
fig, ax = plt.subplots()
else:
fig, ax = figax
line, = ax.plot(z_windows, resist, label=sys_name)
#line, = ax.semilogy(z_windows, resist, label=sys_name)
#resist_err *= 0.85
#ax.fill_between(z_windows, resist+resist_err,
# resist-resist_err,
# facecolor=line.get_color(), edgecolor=line.get_color(), alpha=0.2)
ax.set_xlabel(u'z [{0}]'.format(z_units), fontweight='bold')
ax.set_ylabel(u'R(z) [{0}]'.format(Res_units), fontweight='bold')
ax.grid(grid)
zmin = z_windows[0]
plt.xlim(zmin,-zmin)
ax.tick_params(axis='both', which='major', pad=8)
for label in ax.get_yticklabels()[::2]:
label.set_visible(False)
if addlegend:
ax.legend(bbox_to_anchor=(0., 1.02, 1., .102), loc=3,
ncol=2, mode="expand", borderaxespad=0., fontsize='medium')
if savefig:
fig.tight_layout()
fig.savefig(fig_filename, bbox_inches='tight')
return fig, ax
def plot_diffusion_coefficient_z(z_windows, diffusion_coeff, diffusion_coeff_err,
z_units=u'\u00c5', D_units=u'cm\u00b2/s', fig_filename='d_z.pdf',
grid=True):
"""Plot the diffusion coefficient as a function of z-position.
"""
zmin = z_windows[0]
fig, ax = plt.subplots()
ax.plot(z_windows, diffusion_coeff)
ax.plot([zmin, zmin+10],[3.86e-5, 3.86e-5],linestyle='--', color='r')
ax.plot([-zmin-10, -zmin],[3.86e-5, 3.86e-5],linestyle='--', color='r')
ax.fill_between(z_windows, diffusion_coeff+diffusion_coeff_err,
diffusion_coeff-diffusion_coeff_err,
facecolor='#a8a8a8', edgecolor='#a8a8a8')
ax.set_xlabel(u'z [{0}]'.format(z_units), fontweight='bold')
ax.set_ylabel(u'D(z) [{0}]'.format(D_units), fontweight='bold')
plt.ylim(0,3e-4)
plt.xlim(zmin,-zmin)
ax.tick_params(axis='both', which='major', pad=8)
ax.grid(grid)
fig.tight_layout()
fig.savefig(fig_filename)
def plot_sym_diffusion_coefficient_z(z_windows, diffusion_coeff, diffusion_coeff_err,
z_units=u'\u00c5', D_units=u'cm\u00b2/s', fig_filename='d-sym_z.pdf',
grid=True, sys_name=None, figax=(None,None), savefig=False, addlegend=False):
"""Plot the diffusion coefficient as a function of z-position.
"""
if figax == (None, None):
fig, ax = plt.subplots()
else:
fig, ax = figax
line, = ax.semilogy(z_windows, diffusion_coeff, label=sys_name)
# from Raabe and Sadus, JCP, 2012
zmin = z_windows[0]
ax.plot([zmin, zmin+10],[3.86e-5, 3.86e-5],linestyle='--', color='r')
ax.plot([-zmin-10, -zmin],[3.86e-5, 3.86e-5],linestyle='--', color='r')
ax.fill_between(z_windows, diffusion_coeff+diffusion_coeff_err,
diffusion_coeff-diffusion_coeff_err,
facecolor=line.get_color(), edgecolor=line.get_color(), alpha=0.2)
ax.set_xlabel(u'z [{0}]'.format(z_units), fontweight='bold')
ax.set_ylabel(u'D(z) [{0}]'.format(D_units), fontweight='bold')
if addlegend:
ax.legend(bbox_to_anchor=(0., 1.02, 1., .102), loc=3,
ncol=2, mode="expand", borderaxespad=0., fontsize='medium')
plt.xlim(zmin,-zmin)
ax.tick_params(axis='both', which='major', pad=8)
ax.grid(grid)
if savefig:
fig.tight_layout()
fig.savefig(fig_filename, bbox_inches='tight')
return fig, ax
def plot_symmetrized_free_energy(z_windows, delta_G, delta_G_err, z_units=u'\u00c5',
energy_units=u'kcal/mol', fig_filename='delG-sym.pdf', grid=True, sys_name=None,
figax=(None,None), savefig=False, addlegend=False):
"""Plot symmetrized delta G
Params
------
z_windows : np.ndarray, shape=(n,)
The location of the windows in z
delta_G : np.ndarray, shape=(n,)
The symmetrized free energy profile, in energy units
delta_G_err : np.ndarray, shape=(n,)
The error in the symmetrized free energy profile, in energy units
z_units : str
The units of the z-values in z_windows
energy_units : str
The units of delta_G
fig_filename : str
The name of the figure file to write
grid : bool
Draw gridlines on major ticks if True
Returns
-------
None. This figure draws a figure of the symmetrized free energy profile
and saves it to disk.
"""
if figax == (None, None):
fig, ax = plt.subplots()
else:
fig, ax = figax
line, = ax.plot(z_windows, delta_G, label=sys_name)
ax.fill_between(z_windows, delta_G+delta_G_err,
delta_G-delta_G_err,
facecolor=line.get_color(), edgecolor=line.get_color(), alpha=0.2)
ax.set_xlabel(u'z [{0}]'.format(z_units), fontweight='bold')
ax.set_ylabel(u'$\Delta$G(z) [{0}]'.format(energy_units), fontweight='bold')
ax.grid(grid)
if addlegend:
ax.legend(bbox_to_anchor=(0., 1.02, 1., .102), loc=3,
ncol=2, mode="expand", borderaxespad=0., fontsize='medium')
zmin = z_windows[0]
plt.xlim(zmin,-zmin)
ax.tick_params(axis='both', which='major', pad=8)
if savefig:
ax.set_ylim(bottom=0)
fig.tight_layout()
fig.savefig(fig_filename, bbox_inches='tight')
return fig, ax
def plot_sym_exp_free_energy(z_windows, dG, dG_err, diffz, diffz_err,
resist, resist_err, T, kB=1.9872041e-3, z_units=u'\u00c5',
fig_filename='expdelG-sym.pdf', grid=True, addlegend=False):
"""Plot symmetrized delta G
Params
------
z_windows : np.ndarray, shape=(n,)
The location of the windows in z
dG : np.ndarray, shape=(n,)
The symmetrized free energy profile, in energy units
dG_err : np.ndarray, shape=(n,)
The error in the symmetrized free energy profile, in energy units
diffz : np.ndarray, shape=(n,)
The diffusion coefficient profile
diffz_err : np.ndarray, shape=(n,)
The error in the diffusion profike
resist : np.ndarray, shape=(n,)
The resistance profile
resist_err : np.ndarray, shape=(n,)
The error in the resistance profile
T : float
Temperature
kB : float
Boltzmann constant
z_units : str
The units of the z-values in z_windows
energy_units : str
The units of delta_G
fig_filename : str
The name of the figure file to write
grid : bool
Draw gridlines on major ticks if True
Returns
-------
None. This figure draws a figure of the symmetrized free energy profile
and saves it to disk.
"""
# purely for testing purposes:
#resist_err *=0.9
#dG_err *= 0.9
fig, ax = plt.subplots()
expdG = np.exp(dG/(kB*T))
expdGerr = np.exp(dG / (kB*T)) * dG_err / (kB*T)
line, = ax.semilogy(z_windows, expdG, label=u'exp(\u03B2\u0394G(z))') # dimensionless
ax.fill_between(z_windows, expdG+expdGerr,
expdG-expdGerr,
facecolor=line.get_color(), edgecolor=line.get_color(), alpha=0.2)
invdiffz = 1/diffz
invdifferr = diffz_err/(diffz**2)
line, = ax.semilogy(z_windows, invdiffz, label='D(z)$^{-1}$') # s/cm2 \u207b\u00b9
ax.fill_between(z_windows, invdiffz+invdifferr,
invdiffz-invdifferr,
facecolor=line.get_color(), edgecolor=line.get_color(), alpha=0.2)
#ax.semilogy(z_windows, np.exp(delta_G/(kB*T))/diff_sym) # dimensionless
#err = np.exp(delta_G) * delta_G_err
#val = np.exp(delta_G)
#ax.plot(z_windows, np.exp(dG/(kB*T))/diffz)
line, = ax.plot(z_windows, resist, label='R(z)')
ax.fill_between(z_windows, resist+resist_err,
resist-resist_err,
facecolor=line.get_color(), edgecolor=line.get_color(), alpha=0.2)
#print(resist-resist_err, resist, resist_err)
#ax.fill_between(z_windows, np.exp(delta_G),
# np.exp(delta_G-delta_G_err),
# facecolor='#a8a8a8', edgecolor='#a8a8a8')
ax.set_xlabel(u'z [{0}]'.format(z_units), fontweight='bold')
#ax.set_ylabel(u'1/D(z), exp(\u03B2G(z))', fontweight='bold')
ax.grid(grid)
zmin = z_windows[0]
plt.xlim(zmin,-zmin)
ax.tick_params(axis='both', which='major', pad=8)
for label in ax.get_yticklabels()[::2]:
label.set_visible(False)
l = ax.legend(loc=8,fontsize='medium')
for text in l.get_texts():
text.set_family('Arial')
fig.tight_layout()
fig.savefig(fig_filename)
| {
"content_hash": "863811ce38608b152ecb8e31bd48bbd8",
"timestamp": "",
"source": "github",
"line_count": 404,
"max_line_length": 97,
"avg_line_length": 38.5990099009901,
"alnum_prop": 0.6138258304476081,
"repo_name": "tcmoore3/permeability",
"id": "1b6c72045d0a508ce85f566e92f479dd19f52a35",
"size": "15594",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "permeability/plotting.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "42557"
}
],
"symlink_target": ""
} |
"""
Onshape REST API
The Onshape REST API consumed by all clients. # noqa: E501
The version of the OpenAPI document: 1.113
Contact: [email protected]
Generated by: https://openapi-generator.tech
"""
from __future__ import absolute_import
import re # noqa: F401
import sys # noqa: F401
import six # noqa: F401
import nulltype # noqa: F401
from onshape_client.oas.model_utils import ( # noqa: F401
ModelComposed,
ModelNormal,
ModelSimple,
date,
datetime,
file_type,
int,
none_type,
str,
validate_get_composed_info,
)
try:
from onshape_client.oas.models import bt_identity_info
except ImportError:
bt_identity_info = sys.modules["onshape_client.oas.models.bt_identity_info"]
try:
from onshape_client.oas.models import bt_rbac_role_info
except ImportError:
bt_rbac_role_info = sys.modules["onshape_client.oas.models.bt_rbac_role_info"]
class RoleMapEntry(ModelNormal):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
Attributes:
allowed_values (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
with a capitalized key describing the allowed value and an allowed
value. These dicts store the allowed enum values.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
discriminator_value_class_map (dict): A dict to go from the discriminator
variable value to the discriminator class name.
validations (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
that stores validations for max_length, min_length, max_items,
min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum,
inclusive_minimum, and regex.
additional_properties_type (tuple): A tuple of classes accepted
as additional properties values.
"""
allowed_values = {}
validations = {}
additional_properties_type = None
@staticmethod
def openapi_types():
"""
This must be a class method so a model may have properties that are
of type self, this ensures that we don't create a cyclic import
Returns
openapi_types (dict): The key is attribute name
and the value is attribute type.
"""
return {
"identities": ([bt_identity_info.BTIdentityInfo],), # noqa: E501
"role": (bt_rbac_role_info.BTRbacRoleInfo,), # noqa: E501
}
@staticmethod
def discriminator():
return None
attribute_map = {
"identities": "identities", # noqa: E501
"role": "role", # noqa: E501
}
@staticmethod
def _composed_schemas():
return None
required_properties = set(
[
"_data_store",
"_check_type",
"_from_server",
"_path_to_item",
"_configuration",
]
)
def __init__(
self,
_check_type=True,
_from_server=False,
_path_to_item=(),
_configuration=None,
**kwargs
): # noqa: E501
"""role_map_entry.RoleMapEntry - a model defined in OpenAPI
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_from_server (bool): True if the data is from the server
False if the data is from the client (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
identities ([bt_identity_info.BTIdentityInfo]): [optional] # noqa: E501
role (bt_rbac_role_info.BTRbacRoleInfo): [optional] # noqa: E501
"""
self._data_store = {}
self._check_type = _check_type
self._from_server = _from_server
self._path_to_item = _path_to_item
self._configuration = _configuration
for var_name, var_value in six.iteritems(kwargs):
if (
var_name not in self.attribute_map
and self._configuration is not None
and self._configuration.discard_unknown_keys
and self.additional_properties_type is None
):
# discard variable.
continue
setattr(self, var_name, var_value)
| {
"content_hash": "eae0bd82f91d7c32c83a54f91c794daa",
"timestamp": "",
"source": "github",
"line_count": 153,
"max_line_length": 84,
"avg_line_length": 33.58169934640523,
"alnum_prop": 0.5895289996107435,
"repo_name": "onshape-public/onshape-clients",
"id": "3be2e802decf4bf03819a3afa2f65b2631413fb1",
"size": "5155",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "python/onshape_client/oas/models/role_map_entry.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "4873"
},
{
"name": "Go",
"bytes": "59674"
},
{
"name": "HTML",
"bytes": "3851790"
},
{
"name": "JavaScript",
"bytes": "2217"
},
{
"name": "Makefile",
"bytes": "559"
},
{
"name": "Python",
"bytes": "7560009"
},
{
"name": "Shell",
"bytes": "3475"
},
{
"name": "TypeScript",
"bytes": "1412661"
}
],
"symlink_target": ""
} |
from alembic import util, command, config
import argparse
import inspect
class AlembicCommandLine(object):
prog = None
description = None
allowed_commands = None
def __init__(self, prog=None, description=None, allowed_commands=None):
if prog is not None:
self.prog = prog
if description is not None:
self.description = description
if allowed_commands is not None:
self.allowed_commands = allowed_commands
self.parser = self.generate_options()
def add_command_options(self, parser, positional, kwargs):
if 'template' in kwargs:
parser.add_argument("-t", "--template",
default='generic',
type=str,
help="Setup template for use with 'init'")
if 'message' in kwargs:
parser.add_argument("-m", "--message",
type=str,
help="Message string to use with 'revision'")
if 'sql' in kwargs:
parser.add_argument("--sql",
action="store_true",
help="Don't emit SQL to database - dump to "
"standard output/file instead")
if 'tag' in kwargs:
parser.add_argument("--tag",
type=str,
help="Arbitrary 'tag' name - can be used by "
"custom env.py scripts.")
if 'autogenerate' in kwargs:
parser.add_argument("--autogenerate",
action="store_true",
help="Populate revision script with candidate "
"migration operations, based on comparison "
"of database to model.")
# "current" command
if 'head_only' in kwargs:
parser.add_argument("--head-only",
action="store_true",
help="Only show current version and "
"whether or not this is the head revision.")
if 'rev_range' in kwargs:
parser.add_argument("-r", "--rev-range",
action="store",
help="Specify a revision range; "
"format is [start]:[end]")
positional_help = {
'directory': "location of scripts directory",
'revision': "revision identifier"
}
for arg in positional:
parser.add_argument(arg, help=positional_help.get(arg))
def add_options(self, parser):
parser.add_argument("-c", "--config",
type=str,
default="alembic.ini",
help="Alternate config file")
parser.add_argument("-n", "--name",
type=str,
default="alembic",
help="Name of section in .ini file to "
"use for Alembic config")
parser.add_argument("-x", action="append",
help="Additional arguments consumed by "
"custom env.py scripts, e.g. -x "
"setting1=somesetting -x setting2=somesetting")
def generate_options(self):
parser = argparse.ArgumentParser(prog=self.prog)
self.add_options(parser)
subparsers = parser.add_subparsers()
for fn, name, doc, positional, kwarg in self.get_commands():
subparser = subparsers.add_parser(name, help=doc)
self.add_command_options(subparser, positional, kwarg)
subparser.set_defaults(cmd=(fn, positional, kwarg))
return parser
def get_commands(self):
cmds = []
for fn in [getattr(command, n) for n in dir(command)]:
if (inspect.isfunction(fn) and
fn.__name__[0] != '_' and
fn.__module__ == 'alembic.command'):
if (self.allowed_commands and
fn.__name__ not in self.allowed_commands):
continue
spec = inspect.getargspec(fn)
if spec[3]:
positional = spec[0][1:-len(spec[3])]
kwarg = spec[0][-len(spec[3]):]
else:
positional = spec[0][1:]
kwarg = []
cmds.append((fn, fn.__name__, fn.__doc__, positional, kwarg))
return cmds
def get_config(self, options):
return config.Config(file_=options.config,
ini_section=options.name,
cmd_opts=options)
def run_cmd(self, config, options):
fn, positional, kwarg = options.cmd
try:
fn(config, *[getattr(options, k) for k in positional],
**dict((k, getattr(options, k)) for k in kwarg))
except util.CommandError as e:
util.err(str(e))
def main(self, argv=None):
options = self.parser.parse_args(argv)
if not hasattr(options, "cmd"):
# see http://bugs.python.org/issue9253, argparse
# behavior changed incompatibly in py3.3
self.parser.error("too few arguments")
else:
self.run_cmd(self.get_config(options), options)
if __name__ == '__main__':
cmdline = AlembicCommandLine()
cmdline.main()
| {
"content_hash": "81830fd95d4b225923d39e31ac7e3820",
"timestamp": "",
"source": "github",
"line_count": 142,
"max_line_length": 77,
"avg_line_length": 39.359154929577464,
"alnum_prop": 0.4877437824297728,
"repo_name": "SandyWalsh/stacktach-winchester",
"id": "74315d90853ccb50d6e8d6018b567f14aa463a3b",
"size": "5589",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "winchester/db/alembic_command.py",
"mode": "33188",
"license": "apache-2.0",
"language": [],
"symlink_target": ""
} |
"""Tools for using Python's :mod:`json` module with BSON documents.
This module provides two helper methods `dumps` and `loads` that wrap the
native :mod:`json` methods and provide explicit BSON conversion to and from
json. This allows for specialized encoding and decoding of BSON documents
into `Mongo Extended JSON
<http://www.mongodb.org/display/DOCS/Mongo+Extended+JSON>`_'s *Strict*
mode. This lets you encode / decode BSON documents to JSON even when
they use special BSON types.
Example usage (serialization):
.. doctest::
>>> from bson import Binary, Code
>>> from bson.json_util import dumps
>>> dumps([{'foo': [1, 2]},
... {'bar': {'hello': 'world'}},
... {'code': Code("function x() { return 1; }")},
... {'bin': Binary("\x01\x02\x03\x04")}])
'[{"foo": [1, 2]}, {"bar": {"hello": "world"}}, {"code": {"$code": "function x() { return 1; }", "$scope": {}}}, {"bin": {"$binary": "AQIDBA==", "$type": "00"}}]'
Example usage (deserialization):
.. doctest::
>>> from bson.json_util import loads
>>> loads('[{"foo": [1, 2]}, {"bar": {"hello": "world"}}, {"code": {"$scope": {}, "$code": "function x() { return 1; }"}}, {"bin": {"$type": "00", "$binary": "AQIDBA=="}}]')
[{u'foo': [1, 2]}, {u'bar': {u'hello': u'world'}}, {u'code': Code('function x() { return 1; }', {})}, {u'bin': Binary('...', 0)}]
Alternatively, you can manually pass the `default` to :func:`json.dumps`.
It won't handle :class:`~bson.binary.Binary` and :class:`~bson.code.Code`
instances (as they are extended strings you can't provide custom defaults),
but it will be faster as there is less recursion.
.. versionchanged:: 2.8
The output format for :class:`~bson.timestamp.Timestamp` has changed from
'{"t": <int>, "i": <int>}' to '{"$timestamp": {"t": <int>, "i": <int>}}'.
This new format will be decoded to an instance of
:class:`~bson.timestamp.Timestamp`. The old format will continue to be
decoded to a python dict as before. Encoding to the old format is no longer
supported as it was never correct and loses type information.
Added support for $numberLong and $undefined - new in MongoDB 2.6 - and
parsing $date in ISO-8601 format.
.. versionchanged:: 2.7
Preserves order when rendering SON, Timestamp, Code, Binary, and DBRef
instances.
.. versionchanged:: 2.3
Added dumps and loads helpers to automatically handle conversion to and
from json and supports :class:`~bson.binary.Binary` and
:class:`~bson.code.Code`
"""
import base64
import calendar
import collections
import datetime
import json
import re
import uuid
from bson import EPOCH_AWARE, RE_TYPE, SON
from bson.binary import Binary
from bson.code import Code
from bson.dbref import DBRef
from bson.int64 import Int64
from bson.max_key import MaxKey
from bson.min_key import MinKey
from bson.objectid import ObjectId
from bson.regex import Regex
from bson.timestamp import Timestamp
from bson.tz_util import utc
from bson.py3compat import PY3, iteritems, string_type, text_type
_RE_OPT_TABLE = {
"i": re.I,
"l": re.L,
"m": re.M,
"s": re.S,
"u": re.U,
"x": re.X,
}
def dumps(obj, *args, **kwargs):
"""Helper function that wraps :class:`json.dumps`.
Recursive function that handles all BSON types including
:class:`~bson.binary.Binary` and :class:`~bson.code.Code`.
.. versionchanged:: 2.7
Preserves order when rendering SON, Timestamp, Code, Binary, and DBRef
instances.
"""
return json.dumps(_json_convert(obj), *args, **kwargs)
def loads(s, *args, **kwargs):
"""Helper function that wraps :class:`json.loads`.
Automatically passes the object_hook for BSON type conversion.
"""
kwargs['object_hook'] = lambda dct: object_hook(dct)
return json.loads(s, *args, **kwargs)
def _json_convert(obj):
"""Recursive helper method that converts BSON types so they can be
converted into json.
"""
if hasattr(obj, 'iteritems') or hasattr(obj, 'items'): # PY3 support
return SON(((k, _json_convert(v)) for k, v in iteritems(obj)))
elif hasattr(obj, '__iter__') and not isinstance(obj, (text_type, bytes)):
return list((_json_convert(v) for v in obj))
try:
return default(obj)
except TypeError:
return obj
def object_hook(dct):
if "$oid" in dct:
return ObjectId(str(dct["$oid"]))
if "$ref" in dct:
return DBRef(dct["$ref"], dct["$id"], dct.get("$db", None))
if "$date" in dct:
dtm = dct["$date"]
# mongoexport 2.6 and newer
if isinstance(dtm, string_type):
aware = datetime.datetime.strptime(
dtm[:23], "%Y-%m-%dT%H:%M:%S.%f").replace(tzinfo=utc)
offset = dtm[23:]
if not offset or offset == 'Z':
# UTC
return aware
else:
if len(offset) == 5:
# Offset from mongoexport is in format (+|-)HHMM
secs = (int(offset[1:3]) * 3600 + int(offset[3:]) * 60)
elif ':' in offset and len(offset) == 6:
# RFC-3339 format (+|-)HH:MM
hours, minutes = offset[1:].split(':')
secs = (int(hours) * 3600 + int(minutes) * 60)
else:
# Not RFC-3339 compliant or mongoexport output.
raise ValueError("invalid format for offset")
if offset[0] == "-":
secs *= -1
return aware - datetime.timedelta(seconds=secs)
# mongoexport 2.6 and newer, time before the epoch (SERVER-15275)
elif isinstance(dtm, collections.Mapping):
secs = float(dtm["$numberLong"]) / 1000.0
# mongoexport before 2.6
else:
secs = float(dtm) / 1000.0
return EPOCH_AWARE + datetime.timedelta(seconds=secs)
if "$regex" in dct:
flags = 0
# PyMongo always adds $options but some other tools may not.
for opt in dct.get("$options", ""):
flags |= _RE_OPT_TABLE.get(opt, 0)
return Regex(dct["$regex"], flags)
if "$minKey" in dct:
return MinKey()
if "$maxKey" in dct:
return MaxKey()
if "$binary" in dct:
if isinstance(dct["$type"], int):
dct["$type"] = "%02x" % dct["$type"]
subtype = int(dct["$type"], 16)
if subtype >= 0xffffff80: # Handle mongoexport values
subtype = int(dct["$type"][6:], 16)
return Binary(base64.b64decode(dct["$binary"].encode()), subtype)
if "$code" in dct:
return Code(dct["$code"], dct.get("$scope"))
if "$uuid" in dct:
return uuid.UUID(dct["$uuid"])
if "$undefined" in dct:
return None
if "$numberLong" in dct:
return Int64(dct["$numberLong"])
if "$timestamp" in dct:
tsp = dct["$timestamp"]
return Timestamp(tsp["t"], tsp["i"])
return dct
def default(obj):
# We preserve key order when rendering SON, DBRef, etc. as JSON by
# returning a SON for those types instead of a dict.
if isinstance(obj, ObjectId):
return {"$oid": str(obj)}
if isinstance(obj, DBRef):
return _json_convert(obj.as_doc())
if isinstance(obj, datetime.datetime):
# TODO share this code w/ bson.py?
if obj.utcoffset() is not None:
obj = obj - obj.utcoffset()
millis = int(calendar.timegm(obj.timetuple()) * 1000 +
obj.microsecond / 1000)
return {"$date": millis}
if isinstance(obj, (RE_TYPE, Regex)):
flags = ""
if obj.flags & re.IGNORECASE:
flags += "i"
if obj.flags & re.LOCALE:
flags += "l"
if obj.flags & re.MULTILINE:
flags += "m"
if obj.flags & re.DOTALL:
flags += "s"
if obj.flags & re.UNICODE:
flags += "u"
if obj.flags & re.VERBOSE:
flags += "x"
if isinstance(obj.pattern, text_type):
pattern = obj.pattern
else:
pattern = obj.pattern.decode('utf-8')
return SON([("$regex", pattern), ("$options", flags)])
if isinstance(obj, MinKey):
return {"$minKey": 1}
if isinstance(obj, MaxKey):
return {"$maxKey": 1}
if isinstance(obj, Timestamp):
return {"$timestamp": SON([("t", obj.time), ("i", obj.inc)])}
if isinstance(obj, Code):
return SON([('$code', str(obj)), ('$scope', obj.scope)])
if isinstance(obj, Binary):
return SON([
('$binary', base64.b64encode(obj).decode()),
('$type', "%02x" % obj.subtype)])
if PY3 and isinstance(obj, bytes):
return SON([
('$binary', base64.b64encode(obj).decode()),
('$type', "00")])
if isinstance(obj, uuid.UUID):
return {"$uuid": obj.hex}
raise TypeError("%r is not JSON serializable" % obj)
| {
"content_hash": "1036a28a634abaf800a520825e2956d0",
"timestamp": "",
"source": "github",
"line_count": 243,
"max_line_length": 176,
"avg_line_length": 36.75308641975309,
"alnum_prop": 0.581793752099429,
"repo_name": "llvtt/mongo-python-driver",
"id": "5ac43c36786a8405de3d87a971d5ddf23cd25950",
"size": "9510",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "bson/json_util.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "168405"
},
{
"name": "Python",
"bytes": "1307704"
}
],
"symlink_target": ""
} |
import logging
import os
from test import LisaTest
TESTS_DIRECTORY = os.path.dirname(os.path.realpath(__file__))
TESTS_CONF = os.path.join(TESTS_DIRECTORY, "smoke_test.config")
class SFreq(LisaTest):
"""Tests for SchedFreq framework"""
@classmethod
def setUpClass(cls, *args, **kwargs):
super(SFreq, cls)._init(TESTS_CONF, *args, **kwargs)
def test_regression(self):
"""Check that there is not regression on energy"""
# TODO
# vim :set tabstop=4 shiftwidth=4 expandtab
| {
"content_hash": "137c97223dad82e4ff18e4cfb5cec85f",
"timestamp": "",
"source": "github",
"line_count": 20,
"max_line_length": 63,
"avg_line_length": 25.75,
"alnum_prop": 0.6776699029126214,
"repo_name": "JaviMerino/lisa",
"id": "a6cc83cce691c7e203c0fe1c40decf872908452b",
"size": "1152",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/sfreq/smoke_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "6979934"
},
{
"name": "Python",
"bytes": "334420"
}
],
"symlink_target": ""
} |
from newsfeedsDataLayer.models import Artical
from newsfeedsDataLayer.models import User_id
#import os
#User_id().insertUser(1)
#User_id().updateKeywords(1, "iPhone 苹果 谷歌 鲍尔默 长角 ZDNetXPVistaMaryJoFoley15 小米 电商 互联网 百度 永福 易信")
#User_id().updateKeywords(11, "百度 永福 易信 ")
#RelatedReading().insertRelatedReading("1461be38528df0efea5a0bb48ed57da5")
#result = Artical().recommendation(2)
#Artical().everyClassificationRecommendation(1)
#result = Artical().recommendation(11,"NewsfeedsSoftware/spiders/spiderStartupController_package/Configuration.xml")
#print "********************************"
#for re in result:
# print re["title"]
#print User_id().getLastUserId()
#User_id().readArtiaclThenAppendKeywords(1, "65958f7f7e5c8256de240c60ec368594")
#print str(os.getcwd())+"1111111111111111111111111"
'''
def getUserKeyWord(user_id):
keyword = User_id.objects.filter(userID = user_id).values()[0]["keywords"]
keywordSet = dict()
for key in keyword.strip().split('$'):
if key !="":
word = key.split('-')
count = word[1].split('?')
keywordSet[word[0]] = count[0]
for key in keywordSet:
print key,keywordSet[key]
getUserKeyWord(11)
''' | {
"content_hash": "771a00a6190c5715cf139faedf7c9c35",
"timestamp": "",
"source": "github",
"line_count": 32,
"max_line_length": 116,
"avg_line_length": 38.03125,
"alnum_prop": 0.6803615447822514,
"repo_name": "zhaishuai/NewsfeedsService",
"id": "69dcb103d936dc77b5bec90e5209438b0c7a9c82",
"size": "1350",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "NewsfeedsService/test.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "1245"
},
{
"name": "JavaScript",
"bytes": "80280"
},
{
"name": "Python",
"bytes": "216331"
}
],
"symlink_target": ""
} |
from p4app import P4Mininet
from camus import CamusApp
from camus_topo import SingleSwitchTopo, FatTreeTopo, getPort
import subprocess
import sys
import time
K = 4
itch_app = CamusApp('spec.p4', ingress_name='MyIngress')
itch_app.generateQueryPipeline('itch_camus.p4')
topo = FatTreeTopo(K)
topo.subscribe('h_0_0_0', 'add_order.shares = 1')
topo.subscribe('h_0_0_1', 'add_order.price = 2')
topo.subscribe('h_2_1_0', 'add_order.stock = "GOOGL"')
net = P4Mininet(program='itch.p4', topo=topo)
net.start()
# Core switches should forward all traffic down
for core in range(int(topo.core_count)):
sw = net.get('cs_%d' % core)
sw.insertTableEntry(table_name='MyIngress.packet_direction',
default_action=True,
action_name='MyIngress.direction_is_down')
non_core_switches = [(t, pod, x) for t in ['as', 'es'] for pod in range(int(topo.pod_count)) for x in range(int(topo.edge_count/topo.pod_count))]
# Add rules for detecting whether a packet is going up or down the tree
for sw_type,pod,x in non_core_switches:
sw = net.get('%s_%d_%d' % (sw_type, pod, x))
port = None
for sw2 in topo.upstream_for_sw[sw.name]:
link = topo.linkInfo(sw.name, sw2)
port = getPort(link, sw.name)
sw.insertTableEntry(table_name='MyIngress.packet_direction',
match_fields={'standard_metadata.ingress_port': port},
action_name='MyIngress.direction_is_down')
if port is not None:
sw.insertTableEntry(table_name='MyIngress.forward_up',
default_action=True,
action_name='MyIngress.ipv4_forward',
action_params={'port': port})
hosts = [(pod, edge, host) for pod in range(int(K)) for edge in range(int(K/2)) for host in range(int(K/2))]
# IPv4 routing
for pod,edge,host in hosts:
hops = []
host_name = 'h_%d_%d_%d'% (pod, edge, host)
edge_sw = net.get('es_%d_%d' % (pod, edge))
edge_port = getPort(topo.linkInfo(edge_sw.name, host_name), edge_sw.name)
hops.append((edge_sw, edge_port))
for aggr in range(int(topo.aggr_count / topo.pod_count)):
aggr_sw = net.get('as_%d_%d' % (pod, aggr))
port = getPort(topo.linkInfo(edge_sw.name, aggr_sw.name), aggr_sw.name)
hops.append((aggr_sw, port))
for core in range(int((K/2)*aggr), int((K/2)*(aggr+1))):
core_sw = net.get('cs_%d' % core)
port = getPort(topo.linkInfo(aggr_sw.name, core_sw.name), core_sw.name)
hops.append((core_sw, port))
edge_sw.insertTableEntry(table_name='MyEgress.rewrite_dst',
match_fields={'standard_metadata.egress_port': edge_port},
action_name='MyEgress.set_dst',
action_params={'mac': '00:00:00:%02x:%02x:%02x' % (pod, edge, host+1),
'ip': '10.%d.%d.%d' % (pod, edge, host+1)})
for sw,port in hops:
sw.insertTableEntry(table_name='MyIngress.ipv4_lpm',
match_fields={'hdr.ipv4.dstAddr': ["10.%d.%d.%d" % (pod, edge, host+1), 32]},
action_name='MyIngress.ipv4_forward',
action_params={'port': port})
# Compile rules and install them on each switch
for sw_name in topo.switches():
rules = topo.rules_for_sw[sw_name]
if not rules: continue
runtime_config = itch_app.compileRules(rules=rules, ingress_name='MyIngress')
sw = net.get(sw_name)
for entry in runtime_config.entries():
sw.insertTableEntry(**entry)
for mgid,ports in runtime_config.mcastGroups().items():
sw.addMulticastGroup(mgid=mgid, ports=ports)
print("populated")
#net.pingAll()
h1, h2, h3, h4 = net.get('h_0_0_0'), net.get('h_0_0_1'), net.get('h_2_1_0'), net.get('h_3_1_1')
subscriber1 = h1.popen('./subscriber.py 1234', stdout=sys.stdout, stderr=sys.stdout)
subscriber2 = h2.popen('./subscriber.py 1234', stdout=sys.stdout, stderr=sys.stdout)
subscriber3 = h3.popen('./subscriber.py 1234', stdout=sys.stdout, stderr=sys.stdout)
time.sleep(0.4)
print("started subscribers")
h4.cmd('./publisher.py 10.255.255.255 1234')
time.sleep(0.4)
print("shutting down")
subscriber1.terminate()
subscriber2.terminate()
subscriber3.terminate()
| {
"content_hash": "58c3a913722ad37493512f3f5ce8a5e8",
"timestamp": "",
"source": "github",
"line_count": 117,
"max_line_length": 145,
"avg_line_length": 37.1025641025641,
"alnum_prop": 0.6120709513936881,
"repo_name": "theojepsen/p4app",
"id": "1e620eb5948ed177a431c0731d2300e94650aa67",
"size": "4341",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "examples/itch.p4app/main.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "1801"
},
{
"name": "Python",
"bytes": "50828"
},
{
"name": "Shell",
"bytes": "2916"
}
],
"symlink_target": ""
} |
"""cell.bin.cell"""
from kombu import Connection
from .base import Command, Option
from cell import Agent
from cell.utils import instantiate
__all__ = ['cell', 'main']
DEFAULT_BROKER_URL = 'amqp://guest:guest@localhost:5672//'
class cell(Command):
args = '<agent object names>'
option_list = (
Option('-i', '--id',
default=None, action='store', dest='id',
help='Id of the agent (or automatically generated).'),
Option('-l', '--loglevel',
default=None, action='store', dest='loglevel',
help='Loglevel (CRITICAL/ERROR/WARNING/INFO/DEBUG).'),
Option('-f', '--logfile',
default=None, action='store', dest='logfile',
help='Logfile. Default is stderr.'),
Option('-b', '--broker',
default=DEFAULT_BROKER_URL, action='store', dest='broker',
help='Broker URL. Default is %s' % (DEFAULT_BROKER_URL, )),
)
def run(self, *actors, **kwargs):
if not actors:
self.exit_usage('No actor specified')
actors = [instantiate(actor) for actor in list(actors)]
connection = Connection(kwargs.get('broker'))
agent = Agent(connection, actors=actors, id=kwargs.get('id'))
agent.run_from_commandline(loglevel=kwargs.get('loglevel'),
logfile=kwargs.get('logfile'))
def main(argv=None):
return cell().execute_from_commandline(argv)
if __name__ == '__main__':
main()
| {
"content_hash": "b42017a664af0f8e8296148d5f09193f",
"timestamp": "",
"source": "github",
"line_count": 49,
"max_line_length": 72,
"avg_line_length": 29.551020408163264,
"alnum_prop": 0.5994475138121547,
"repo_name": "celery/cell",
"id": "67edb6079bd6ea06e0fcfc7b02530295eb2baf56",
"size": "1448",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "cell/bin/cell.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Makefile",
"bytes": "1495"
},
{
"name": "Python",
"bytes": "158962"
},
{
"name": "Shell",
"bytes": "2154"
}
],
"symlink_target": ""
} |
import socket
s = socket.socket()
host = socket.gethostname()
port = 1024
s.bind((host, port))
s.listen(5)
while True:
c, addr = s.accept()
print 'Got connection from', addr
c.send('Thank you for connecting')
c.close()
| {
"content_hash": "eaf90f23c1beea336b3cbb16786dd6ae",
"timestamp": "",
"source": "github",
"line_count": 14,
"max_line_length": 38,
"avg_line_length": 17,
"alnum_prop": 0.6470588235294118,
"repo_name": "Great-Li-Xin/PythonDev",
"id": "2521609265559ef5dfde86d3fab65cb5bb08485b",
"size": "238",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "NetWork/Server.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "ASP",
"bytes": "15519"
},
{
"name": "ColdFusion",
"bytes": "11470"
},
{
"name": "HTML",
"bytes": "9727"
},
{
"name": "Java",
"bytes": "9296"
},
{
"name": "Mako",
"bytes": "824"
},
{
"name": "PHP",
"bytes": "8114"
},
{
"name": "Python",
"bytes": "487811"
}
],
"symlink_target": ""
} |
"""Tests for ceilometer/storage/impl_mongodb.py
.. note::
In order to run the tests against another MongoDB server set the
environment variable CEILOMETER_TEST_MONGODB_URL to point to a MongoDB
server before running the tests.
"""
from ceilometer.alarm.storage import impl_mongodb as impl_mongodb_alarm
from ceilometer.event.storage import impl_mongodb as impl_mongodb_event
from ceilometer.storage import base
from ceilometer.storage import impl_mongodb
from ceilometer.tests import base as test_base
from ceilometer.tests import db as tests_db
from ceilometer.tests.storage import test_storage_scenarios
@tests_db.run_with('mongodb')
class MongoDBConnection(tests_db.TestBase,
tests_db.MixinTestsWithBackendScenarios):
def test_connection_pooling(self):
test_conn = impl_mongodb.Connection(self.db_manager.url)
self.assertEqual(self.conn.conn, test_conn.conn)
def test_replica_set(self):
url = self.db_manager._url + '?replicaSet=foobar'
conn = impl_mongodb.Connection(url)
self.assertTrue(conn.conn)
def test_recurse_sort_keys(self):
sort_keys = ['k1', 'k2', 'k3']
marker = {'k1': 'v1', 'k2': 'v2', 'k3': 'v3'}
flag = '$lt'
ret = impl_mongodb.Connection._recurse_sort_keys(sort_keys=sort_keys,
marker=marker,
flag=flag)
expect = {'k3': {'$lt': 'v3'}, 'k2': {'eq': 'v2'}, 'k1': {'eq': 'v1'}}
self.assertEqual(expect, ret)
@tests_db.run_with('mongodb')
class MongoDBTestMarkerBase(test_storage_scenarios.DBTestBase,
tests_db.MixinTestsWithBackendScenarios):
# NOTE(Fengqian): All these three test case are the same for resource
# and meter collection. As to alarm, we will set up in AlarmTestPagination.
def test_get_marker(self):
marker_pairs = {'user_id': 'user-id-4'}
ret = impl_mongodb.Connection._get_marker(self.conn.db.resource,
marker_pairs)
self.assertEqual('project-id-4', ret['project_id'])
def test_get_marker_None(self):
marker_pairs = {'user_id': 'user-id-foo'}
try:
ret = impl_mongodb.Connection._get_marker(self.conn.db.resource,
marker_pairs)
self.assertEqual('project-id-foo', ret['project_id'])
except base.NoResultFound:
self.assertTrue(True)
def test_get_marker_multiple(self):
try:
marker_pairs = {'project_id': 'project-id'}
ret = impl_mongodb.Connection._get_marker(self.conn.db.resource,
marker_pairs)
self.assertEqual('project-id-foo', ret['project_id'])
except base.MultipleResultsFound:
self.assertTrue(True)
@tests_db.run_with('mongodb')
class IndexTest(tests_db.TestBase,
tests_db.MixinTestsWithBackendScenarios):
def _test_ttl_index_absent(self, conn, coll_name, ttl_opt):
# create a fake index and check it is deleted
coll = getattr(conn.db, coll_name)
index_name = '%s_ttl' % coll_name
self.CONF.set_override(ttl_opt, -1, group='database')
conn.upgrade()
self.assertNotIn(index_name, coll.index_information())
self.CONF.set_override(ttl_opt, 456789, group='database')
conn.upgrade()
self.assertEqual(456789,
coll.index_information()
[index_name]['expireAfterSeconds'])
def test_meter_ttl_index_absent(self):
self._test_ttl_index_absent(self.conn, 'meter',
'metering_time_to_live')
def test_event_ttl_index_absent(self):
self._test_ttl_index_absent(self.event_conn, 'event',
'event_time_to_live')
def _test_ttl_index_present(self, conn, coll_name, ttl_opt):
coll = getattr(conn.db, coll_name)
self.CONF.set_override(ttl_opt, 456789, group='database')
conn.upgrade()
index_name = '%s_ttl' % coll_name
self.assertEqual(456789,
coll.index_information()
[index_name]['expireAfterSeconds'])
self.CONF.set_override(ttl_opt, -1, group='database')
conn.upgrade()
self.assertNotIn(index_name, coll.index_information())
def test_meter_ttl_index_present(self):
self._test_ttl_index_present(self.conn, 'meter',
'metering_time_to_live')
def test_event_ttl_index_present(self):
self._test_ttl_index_present(self.event_conn, 'event',
'event_time_to_live')
@tests_db.run_with('mongodb')
class AlarmTestPagination(test_storage_scenarios.AlarmTestBase,
tests_db.MixinTestsWithBackendScenarios):
def test_alarm_get_marker(self):
self.add_some_alarms()
marker_pairs = {'name': 'red-alert'}
ret = impl_mongodb.Connection._get_marker(self.alarm_conn.db.alarm,
marker_pairs=marker_pairs)
self.assertEqual('test.one', ret['rule']['meter_name'])
def test_alarm_get_marker_None(self):
self.add_some_alarms()
try:
marker_pairs = {'name': 'user-id-foo'}
ret = impl_mongodb.Connection._get_marker(self.alarm_conn.db.alarm,
marker_pairs)
self.assertEqual('meter_name-foo', ret['rule']['meter_name'])
except base.NoResultFound:
self.assertTrue(True)
def test_alarm_get_marker_multiple(self):
self.add_some_alarms()
try:
marker_pairs = {'user_id': 'me'}
ret = impl_mongodb.Connection._get_marker(self.alarm_conn.db.alarm,
marker_pairs)
self.assertEqual('counter-name-foo', ret['rule']['meter_name'])
except base.MultipleResultsFound:
self.assertTrue(True)
class CapabilitiesTest(test_base.BaseTestCase):
# Check the returned capabilities list, which is specific to each DB
# driver
def test_capabilities(self):
expected_capabilities = {
'meters': {'pagination': False,
'query': {'simple': True,
'metadata': True,
'complex': False}},
'resources': {'pagination': False,
'query': {'simple': True,
'metadata': True,
'complex': False}},
'samples': {'pagination': False,
'query': {'simple': True,
'metadata': True,
'complex': True}},
'statistics': {'pagination': False,
'groupby': True,
'query': {'simple': True,
'metadata': True,
'complex': False},
'aggregation': {'standard': True,
'selectable': {
'max': True,
'min': True,
'sum': True,
'avg': True,
'count': True,
'stddev': True,
'cardinality': True}}
},
}
actual_capabilities = impl_mongodb.Connection.get_capabilities()
self.assertEqual(expected_capabilities, actual_capabilities)
def test_event_capabilities(self):
expected_capabilities = {
'events': {'query': {'simple': True}},
}
actual_capabilities = impl_mongodb_event.Connection.get_capabilities()
self.assertEqual(expected_capabilities, actual_capabilities)
def test_alarm_capabilities(self):
expected_capabilities = {
'alarms': {'query': {'simple': True,
'complex': True},
'history': {'query': {'simple': True,
'complex': True}}},
}
actual_capabilities = impl_mongodb_alarm.Connection.get_capabilities()
self.assertEqual(expected_capabilities, actual_capabilities)
def test_storage_capabilities(self):
expected_capabilities = {
'storage': {'production_ready': True},
}
actual_capabilities = (impl_mongodb.Connection.
get_storage_capabilities())
self.assertEqual(expected_capabilities, actual_capabilities)
| {
"content_hash": "47e31c765cfa0483264aee61f8e3aa10",
"timestamp": "",
"source": "github",
"line_count": 213,
"max_line_length": 79,
"avg_line_length": 42.59154929577465,
"alnum_prop": 0.5283289241622575,
"repo_name": "yanheven/ceilometer",
"id": "28f6776cae32931ac253b65213fcd41366cc55f3",
"size": "9673",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ceilometer/tests/storage/test_impl_mongodb.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "2969045"
},
{
"name": "Shell",
"bytes": "4227"
}
],
"symlink_target": ""
} |
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Deleting field 'Link.subscribe'
db.delete_column('projects_link', 'subscribe')
# Deleting field 'Link.subscription'
db.delete_column('projects_link', 'subscription_id')
def backwards(self, orm):
# Adding field 'Link.subscribe'
db.add_column('projects_link', 'subscribe', self.gf('django.db.models.fields.BooleanField')(default=False), keep_default=False)
# Adding field 'Link.subscription'
db.add_column('projects_link', 'subscription', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['subscriber.Subscription'], null=True, blank=True), keep_default=False)
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'unique': 'True', 'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'projects.link': {
'Meta': {'object_name': 'Link'},
'blog': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'featured': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['projects.Project']", 'null': 'True', 'blank': 'True'}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '200'})
},
'projects.project': {
'Meta': {'object_name': 'Project'},
'description': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'featured': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'featured_image': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'followers': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'projects_following'", 'blank': 'True', 'to': "orm['users.Profile']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'inactive': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'long_description': ('django.db.models.fields.TextField', [], {}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'owners': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "u'projects_owned'", 'symmetrical': 'False', 'to': "orm['users.Profile']"}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '100', 'db_index': 'True'}),
'team_members': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['users.Profile']", 'symmetrical': 'False', 'blank': 'True'}),
'topics': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['topics.Topic']", 'symmetrical': 'False'})
},
'taggit.tag': {
'Meta': {'object_name': 'Tag'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '100', 'db_index': 'True'})
},
'taggit.taggeditem': {
'Meta': {'object_name': 'TaggedItem'},
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'taggit_taggeditem_tagged_items'", 'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'object_id': ('django.db.models.fields.IntegerField', [], {'db_index': 'True'}),
'tag': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'taggit_taggeditem_items'", 'to': "orm['taggit.Tag']"})
},
'topics.topic': {
'Meta': {'object_name': 'Topic'},
'description': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'draft': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('django.db.models.fields.files.ImageField', [], {'max_length': '250', 'null': 'True', 'blank': 'True'}),
'long_description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '100', 'db_index': 'True'})
},
'users.profile': {
'Meta': {'object_name': 'Profile'},
'avatar': ('django.db.models.fields.files.ImageField', [], {'max_length': '250', 'null': 'True', 'blank': 'True'}),
'bio': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'featured': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'featured_image': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'user': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['auth.User']", 'unique': 'True', 'primary_key': 'True'}),
'website': ('django.db.models.fields.URLField', [], {'max_length': '255', 'blank': 'True'})
}
}
complete_apps = ['projects']
| {
"content_hash": "4dbad13b3c960cfe2a287dcff2fe1fae",
"timestamp": "",
"source": "github",
"line_count": 123,
"max_line_length": 195,
"avg_line_length": 72.6910569105691,
"alnum_prop": 0.5558662342019909,
"repo_name": "mozilla/betafarm",
"id": "b06677dd07958d8023da0c3d5fbbe36da85808a5",
"size": "8959",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "apps/projects/migrations/0012_auto__del_field_link_subscribe__del_field_link_subscription.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "127124"
},
{
"name": "HTML",
"bytes": "222114"
},
{
"name": "JavaScript",
"bytes": "38349"
},
{
"name": "Python",
"bytes": "1829931"
},
{
"name": "Shell",
"bytes": "1213"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from django.utils.html import format_html, format_html_join
from wagtail.core.blocks import CharBlock
from wagtail.core.blocks.list_block import ListBlock
from omni_blocks.blocks.struct_blocks import BasicCardBlock, FlowBlock
class BasicCardGridBlock(ListBlock):
"""Block for displaying a grid of cards."""
def __init__(self, **kwargs):
"""
Instantiates an ImageChooserBlock instance,
then passes it to the super class for list rendering.
:param kwargs: Default keyword args
:type kwargs: {}
"""
child_block = BasicCardBlock(required=True)
super(BasicCardGridBlock, self).__init__(child_block, **kwargs)
class Meta(object):
"""Wagtail properties."""
icon = "form"
label = "Basic Card Grid"
template = "blocks/basic_card_grid_block.html"
class FlowListBlock(ListBlock):
"""Block for displaying lists of data points."""
def __init__(self, **kwargs):
"""
Instantiates FlowBlock instance, then passes it to the super
class for list rendering.
:param kwargs: Default keyword args
:type kwargs: {}
"""
child_block = FlowBlock(required=True)
super(FlowListBlock, self).__init__(child_block, **kwargs)
class Meta(object):
"""Wagtail properties."""
icon = "list-ul"
label = "Flow List"
template = "blocks/flow_list_block.html"
class ULBlock(ListBlock):
"""Block for displaying an unordered of rich text."""
def __init__(self, **kwargs):
"""
Instantiates an RichTextBlock instance,
then passes it to the super class for list rendering.
:param kwargs: Default keyword args
:type kwargs: {}
"""
child_block = CharBlock(required=True)
super(ULBlock, self).__init__(child_block, **kwargs)
class Meta(object):
"""Wagtail properties."""
icon = "list-ul"
label = "Unordered List"
def render_basic(self, value, context=None):
child_list = []
for child_value in value:
child_list.append((self.child_block.render(child_value, context=context),))
children = format_html_join("\n", "<li>{0}</li>", child_list)
return format_html('<ul class="written_content_list">{0}</ul>', children)
class OLBlock(ULBlock):
"""Block for displaying an ordered of rich text."""
class Meta(object):
"""Wagtail properties."""
icon = "list-ol"
label = "Ordered List"
def render_basic(self, value, context=None):
child_list = []
for child_value in value:
child_list.append((self.child_block.render(child_value, context=context),))
children = format_html_join("\n", "<li>{0}</li>", child_list)
return format_html('<ol class="written_content_list">{0}</ol>', children)
| {
"content_hash": "7336aaabe7c70eb67b4be8d7371d062c",
"timestamp": "",
"source": "github",
"line_count": 98,
"max_line_length": 87,
"avg_line_length": 29.948979591836736,
"alnum_prop": 0.6149914821124361,
"repo_name": "omni-digital/omni-blocks",
"id": "b9cd89a2c51493e311ddda8119b1631ef2f556aa",
"size": "2935",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "omni_blocks/blocks/list_blocks.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "5776"
},
{
"name": "Makefile",
"bytes": "1336"
},
{
"name": "Python",
"bytes": "34057"
}
],
"symlink_target": ""
} |
import logging
import os
import re
from webkitpy.common.memoized import memoized
from webkitpy.common.system.deprecated_logging import log
from webkitpy.common.system.executive import Executive, ScriptError
from .commitmessage import CommitMessage
from .scm import AuthenticationError, SCM, commit_error_handler
from .svn import SVN, SVNRepository
_log = logging.getLogger(__name__)
def run_command(*args, **kwargs):
# FIXME: This should not be a global static.
# New code should use Executive.run_command directly instead
return Executive().run_command(*args, **kwargs)
class AmbiguousCommitError(Exception):
def __init__(self, num_local_commits, working_directory_is_clean):
self.num_local_commits = num_local_commits
self.working_directory_is_clean = working_directory_is_clean
class Git(SCM, SVNRepository):
# Git doesn't appear to document error codes, but seems to return
# 1 or 128, mostly.
ERROR_FILE_IS_MISSING = 128
executable_name = 'git'
def __init__(self, cwd, **kwargs):
SCM.__init__(self, cwd, **kwargs)
self._check_git_architecture()
def _machine_is_64bit(self):
import platform
# This only is tested on Mac.
if not platform.mac_ver()[0]:
return False
# platform.architecture()[0] can be '64bit' even if the machine is 32bit:
# http://mail.python.org/pipermail/pythonmac-sig/2009-September/021648.html
# Use the sysctl command to find out what the processor actually supports.
return self.run(['sysctl', '-n', 'hw.cpu64bit_capable']).rstrip() == '1'
def _executable_is_64bit(self, path):
# Again, platform.architecture() fails us. On my machine
# git_bits = platform.architecture(executable=git_path, bits='default')[0]
# git_bits is just 'default', meaning the call failed.
file_output = self.run(['file', path])
return re.search('x86_64', file_output)
def _check_git_architecture(self):
if not self._machine_is_64bit():
return
# We could path-search entirely in python or with
# which.py (http://code.google.com/p/which), but this is easier:
git_path = self.run(['which', self.executable_name]).rstrip()
if self._executable_is_64bit(git_path):
return
webkit_dev_thread_url = "https://lists.webkit.org/pipermail/webkit-dev/2010-December/015287.html"
log("Warning: This machine is 64-bit, but the git binary (%s) does not support 64-bit.\nInstall a 64-bit git for better performance, see:\n%s\n" % (git_path, webkit_dev_thread_url))
def _run_git(self, command_args, **kwargs):
full_command_args = [self.executable_name] + command_args
full_kwargs = kwargs
if not 'cwd' in full_kwargs:
full_kwargs['cwd'] = self.checkout_root
return self.run(full_command_args, **full_kwargs)
@classmethod
def in_working_directory(cls, path, executive=None):
try:
executive = executive or Executive()
return executive.run_command([cls.executable_name, 'rev-parse', '--is-inside-work-tree'], cwd=path, error_handler=Executive.ignore_error).rstrip() == "true"
except OSError, e:
# The Windows bots seem to through a WindowsError when git isn't installed.
return False
def find_checkout_root(self, path):
# "git rev-parse --show-cdup" would be another way to get to the root
checkout_root = self._run_git(['rev-parse', '--show-toplevel'], cwd=(path or "./")).strip()
if not self._filesystem.isabs(checkout_root): # Sometimes git returns relative paths
checkout_root = self._filesystem.join(path, checkout_root)
return checkout_root
def to_object_name(self, filepath):
# FIXME: This can't be the right way to append a slash.
root_end_with_slash = self._filesystem.join(self.find_checkout_root(self._filesystem.dirname(filepath)), '')
# FIXME: This seems to want some sort of rel_path instead?
return filepath.replace(root_end_with_slash, '')
@classmethod
def read_git_config(cls, key, cwd=None):
# FIXME: This should probably use cwd=self.checkout_root.
# Pass --get-all for cases where the config has multiple values
# Pass the cwd if provided so that we can handle the case of running webkit-patch outside of the working directory.
# FIXME: This should use an Executive.
return run_command([cls.executable_name, "config", "--get-all", key], error_handler=Executive.ignore_error, cwd=cwd).rstrip('\n')
@staticmethod
def commit_success_regexp():
return "^Committed r(?P<svn_revision>\d+)$"
def discard_local_commits(self):
self._run_git(['reset', '--hard', self.remote_branch_ref()])
def local_commits(self):
return self._run_git(['log', '--pretty=oneline', 'HEAD...' + self.remote_branch_ref()]).splitlines()
def rebase_in_progress(self):
return self._filesystem.exists(self.absolute_path(self._filesystem.join('.git', 'rebase-apply')))
def working_directory_is_clean(self):
return self._run_git(['diff', 'HEAD', '--no-renames', '--name-only']) == ""
def clean_working_directory(self):
# Could run git clean here too, but that wouldn't match working_directory_is_clean
self._run_git(['reset', '--hard', 'HEAD'])
# Aborting rebase even though this does not match working_directory_is_clean
if self.rebase_in_progress():
self._run_git(['rebase', '--abort'])
def status_command(self):
# git status returns non-zero when there are changes, so we use git diff name --name-status HEAD instead.
# No file contents printed, thus utf-8 autodecoding in self.run is fine.
return [self.executable_name, "diff", "--name-status", "--no-renames", "HEAD"]
def _status_regexp(self, expected_types):
return '^(?P<status>[%s])\t(?P<filename>.+)$' % expected_types
def add_list(self, paths, return_exit_code=False):
return self._run_git(["add"] + paths, return_exit_code=return_exit_code)
def delete_list(self, paths):
return self._run_git(["rm", "-f"] + paths)
def exists(self, path):
return_code = self._run_git(["show", "HEAD:%s" % path], return_exit_code=True, decode_output=False)
return return_code != self.ERROR_FILE_IS_MISSING
def _branch_from_ref(self, ref):
return ref.replace('refs/heads/', '')
def _current_branch(self):
return self._branch_from_ref(self._run_git(['symbolic-ref', '-q', 'HEAD']).strip())
def _upstream_branch(self):
current_branch = self._current_branch()
return self._branch_from_ref(self.read_git_config('branch.%s.merge' % current_branch, cwd=self.checkout_root).strip())
def merge_base(self, git_commit):
if git_commit:
# Rewrite UPSTREAM to the upstream branch
if 'UPSTREAM' in git_commit:
upstream = self._upstream_branch()
if not upstream:
raise ScriptError(message='No upstream/tracking branch set.')
git_commit = git_commit.replace('UPSTREAM', upstream)
# Special-case <refname>.. to include working copy changes, e.g., 'HEAD....' shows only the diffs from HEAD.
if git_commit.endswith('....'):
return git_commit[:-4]
if '..' not in git_commit:
git_commit = git_commit + "^.." + git_commit
return git_commit
return self.remote_merge_base()
def changed_files(self, git_commit=None):
# FIXME: --diff-filter could be used to avoid the "extract_filenames" step.
status_command = [self.executable_name, 'diff', '-r', '--name-status', "--no-renames", "--no-ext-diff", "--full-index", self.merge_base(git_commit)]
# FIXME: I'm not sure we're returning the same set of files that SVN.changed_files is.
# Added (A), Copied (C), Deleted (D), Modified (M), Renamed (R)
return self.run_status_and_extract_filenames(status_command, self._status_regexp("ADM"))
def _changes_files_for_commit(self, git_commit):
# --pretty="format:" makes git show not print the commit log header,
changed_files = self._run_git(["show", "--pretty=format:", "--name-only", git_commit]).splitlines()
# instead it just prints a blank line at the top, so we skip the blank line:
return changed_files[1:]
def changed_files_for_revision(self, revision):
commit_id = self.git_commit_from_svn_revision(revision)
return self._changes_files_for_commit(commit_id)
def revisions_changing_file(self, path, limit=5):
# raise a script error if path does not exists to match the behavior of the svn implementation.
if not self._filesystem.exists(path):
raise ScriptError(message="Path %s does not exist." % path)
# git rev-list head --remove-empty --limit=5 -- path would be equivalent.
commit_ids = self._run_git(["log", "--remove-empty", "--pretty=format:%H", "-%s" % limit, "--", path]).splitlines()
return filter(lambda revision: revision, map(self.svn_revision_from_git_commit, commit_ids))
def conflicted_files(self):
# We do not need to pass decode_output for this diff command
# as we're passing --name-status which does not output any data.
status_command = [self.executable_name, 'diff', '--name-status', '--no-renames', '--diff-filter=U']
return self.run_status_and_extract_filenames(status_command, self._status_regexp("U"))
def added_files(self):
return self.run_status_and_extract_filenames(self.status_command(), self._status_regexp("A"))
def deleted_files(self):
return self.run_status_and_extract_filenames(self.status_command(), self._status_regexp("D"))
@staticmethod
def supports_local_commits():
return True
def display_name(self):
return "git"
def svn_revision(self, path):
_log.debug('Running git.head_svn_revision... (Temporary logging message)')
git_log = self._run_git(['log', '-25', path])
match = re.search("^\s*git-svn-id:.*@(?P<svn_revision>\d+)\ ", git_log, re.MULTILINE)
if not match:
return ""
return str(match.group('svn_revision'))
def prepend_svn_revision(self, diff):
revision = self.head_svn_revision()
if not revision:
return diff
return "Subversion Revision: " + revision + '\n' + diff
def create_patch(self, git_commit=None, changed_files=None):
"""Returns a byte array (str()) representing the patch file.
Patch files are effectively binary since they may contain
files of multiple different encodings."""
# Put code changes at the top of the patch and layout tests
# at the bottom, this makes for easier reviewing.
config_path = self._filesystem.dirname(self._filesystem.path_to_module('webkitpy.common.config'))
order_file = self._filesystem.join(config_path, 'orderfile')
order = ""
if self._filesystem.exists(order_file):
order = "-O%s" % order_file
command = [self.executable_name, 'diff', '--binary', '--no-color', "--no-ext-diff", "--full-index", "--no-renames", order, self.merge_base(git_commit), "--"]
if changed_files:
command += changed_files
return self.prepend_svn_revision(self.run(command, decode_output=False, cwd=self.checkout_root))
def _run_git_svn_find_rev(self, arg):
# git svn find-rev always exits 0, even when the revision or commit is not found.
return self._run_git(['svn', 'find-rev', arg]).rstrip()
def _string_to_int_or_none(self, string):
try:
return int(string)
except ValueError, e:
return None
@memoized
def git_commit_from_svn_revision(self, svn_revision):
git_commit = self._run_git_svn_find_rev('r%s' % svn_revision)
if not git_commit:
# FIXME: Alternatively we could offer to update the checkout? Or return None?
raise ScriptError(message='Failed to find git commit for revision %s, your checkout likely needs an update.' % svn_revision)
return git_commit
@memoized
def svn_revision_from_git_commit(self, git_commit):
svn_revision = self._run_git_svn_find_rev(git_commit)
return self._string_to_int_or_none(svn_revision)
def contents_at_revision(self, path, revision):
"""Returns a byte array (str()) containing the contents
of path @ revision in the repository."""
return self._run_git(["show", "%s:%s" % (self.git_commit_from_svn_revision(revision), path)], decode_output=False)
def diff_for_revision(self, revision):
git_commit = self.git_commit_from_svn_revision(revision)
return self.create_patch(git_commit)
def diff_for_file(self, path, log=None):
return self._run_git(['diff', 'HEAD', '--no-renames', '--', path])
def show_head(self, path):
return self._run_git(['show', 'HEAD:' + self.to_object_name(path)], decode_output=False)
def committer_email_for_revision(self, revision):
git_commit = self.git_commit_from_svn_revision(revision)
committer_email = self._run_git(["log", "-1", "--pretty=format:%ce", git_commit])
# Git adds an extra @repository_hash to the end of every committer email, remove it:
return committer_email.rsplit("@", 1)[0]
def apply_reverse_diff(self, revision):
# Assume the revision is an svn revision.
git_commit = self.git_commit_from_svn_revision(revision)
# I think this will always fail due to ChangeLogs.
self._run_git(['revert', '--no-commit', git_commit], error_handler=Executive.ignore_error)
def revert_files(self, file_paths):
self._run_git(['checkout', 'HEAD'] + file_paths)
def _assert_can_squash(self, working_directory_is_clean):
squash = Git.read_git_config('webkit-patch.commit-should-always-squash', cwd=self.checkout_root)
should_squash = squash and squash.lower() == "true"
if not should_squash:
# Only warn if there are actually multiple commits to squash.
num_local_commits = len(self.local_commits())
if num_local_commits > 1 or (num_local_commits > 0 and not working_directory_is_clean):
raise AmbiguousCommitError(num_local_commits, working_directory_is_clean)
def commit_with_message(self, message, username=None, password=None, git_commit=None, force_squash=False, changed_files=None):
# Username is ignored during Git commits.
working_directory_is_clean = self.working_directory_is_clean()
if git_commit:
# Special-case HEAD.. to mean working-copy changes only.
if git_commit.upper() == 'HEAD..':
if working_directory_is_clean:
raise ScriptError(message="The working copy is not modified. --git-commit=HEAD.. only commits working copy changes.")
self.commit_locally_with_message(message)
return self._commit_on_branch(message, 'HEAD', username=username, password=password)
# Need working directory changes to be committed so we can checkout the merge branch.
if not working_directory_is_clean:
# FIXME: webkit-patch land will modify the ChangeLogs to correct the reviewer.
# That will modify the working-copy and cause us to hit this error.
# The ChangeLog modification could be made to modify the existing local commit.
raise ScriptError(message="Working copy is modified. Cannot commit individual git_commits.")
return self._commit_on_branch(message, git_commit, username=username, password=password)
if not force_squash:
self._assert_can_squash(working_directory_is_clean)
self._run_git(['reset', '--soft', self.remote_merge_base()])
self.commit_locally_with_message(message)
return self.push_local_commits_to_server(username=username, password=password)
def _commit_on_branch(self, message, git_commit, username=None, password=None):
branch_name = self._current_branch()
commit_ids = self.commit_ids_from_commitish_arguments([git_commit])
# We want to squash all this branch's commits into one commit with the proper description.
# We do this by doing a "merge --squash" into a new commit branch, then dcommitting that.
MERGE_BRANCH_NAME = 'webkit-patch-land'
self.delete_branch(MERGE_BRANCH_NAME)
# We might be in a directory that's present in this branch but not in the
# trunk. Move up to the top of the tree so that git commands that expect a
# valid CWD won't fail after we check out the merge branch.
# FIXME: We should never be using chdir! We can instead pass cwd= to run_command/self.run!
self._filesystem.chdir(self.checkout_root)
# Stuff our change into the merge branch.
# We wrap in a try...finally block so if anything goes wrong, we clean up the branches.
commit_succeeded = True
try:
self._run_git(['checkout', '-q', '-b', MERGE_BRANCH_NAME, self.remote_branch_ref()])
for commit in commit_ids:
# We're on a different branch now, so convert "head" to the branch name.
commit = re.sub(r'(?i)head', branch_name, commit)
# FIXME: Once changed_files and create_patch are modified to separately handle each
# commit in a commit range, commit each cherry pick so they'll get dcommitted separately.
self._run_git(['cherry-pick', '--no-commit', commit])
self._run_git(['commit', '-m', message])
output = self.push_local_commits_to_server(username=username, password=password)
except Exception, e:
log("COMMIT FAILED: " + str(e))
output = "Commit failed."
commit_succeeded = False
finally:
# And then swap back to the original branch and clean up.
self.clean_working_directory()
self._run_git(['checkout', '-q', branch_name])
self.delete_branch(MERGE_BRANCH_NAME)
return output
def svn_commit_log(self, svn_revision):
svn_revision = self.strip_r_from_svn_revision(svn_revision)
return self._run_git(['svn', 'log', '-r', svn_revision])
def last_svn_commit_log(self):
return self._run_git(['svn', 'log', '--limit=1'])
def svn_blame(self, path):
return self._run_git(['svn', 'blame', path])
# Git-specific methods:
def _branch_ref_exists(self, branch_ref):
return self._run_git(['show-ref', '--quiet', '--verify', branch_ref], return_exit_code=True) == 0
def delete_branch(self, branch_name):
if self._branch_ref_exists('refs/heads/' + branch_name):
self._run_git(['branch', '-D', branch_name])
def remote_merge_base(self):
return self._run_git(['merge-base', self.remote_branch_ref(), 'HEAD']).strip()
def remote_branch_ref(self):
# Use references so that we can avoid collisions, e.g. we don't want to operate on refs/heads/trunk if it exists.
remote_branch_refs = Git.read_git_config('svn-remote.svn.fetch', cwd=self.checkout_root)
if not remote_branch_refs:
remote_master_ref = 'refs/remotes/origin/master'
if not self._branch_ref_exists(remote_master_ref):
raise ScriptError(message="Can't find a branch to diff against. svn-remote.svn.fetch is not in the git config and %s does not exist" % remote_master_ref)
return remote_master_ref
# FIXME: What's the right behavior when there are multiple svn-remotes listed?
# For now, just use the first one.
first_remote_branch_ref = remote_branch_refs.split('\n')[0]
return first_remote_branch_ref.split(':')[1]
def commit_locally_with_message(self, message):
self._run_git(['commit', '--all', '-F', '-'], input=message)
def push_local_commits_to_server(self, username=None, password=None):
dcommit_command = ['svn', 'dcommit']
if (not username or not password) and not self.has_authorization_for_realm(SVN.svn_server_realm):
raise AuthenticationError(SVN.svn_server_host, prompt_for_password=True)
if username:
dcommit_command.extend(["--username", username])
output = self._run_git(dcommit_command, error_handler=commit_error_handler, input=password)
return output
# This function supports the following argument formats:
# no args : rev-list trunk..HEAD
# A..B : rev-list A..B
# A...B : error!
# A B : [A, B] (different from git diff, which would use "rev-list A..B")
def commit_ids_from_commitish_arguments(self, args):
if not len(args):
args.append('%s..HEAD' % self.remote_branch_ref())
commit_ids = []
for commitish in args:
if '...' in commitish:
raise ScriptError(message="'...' is not supported (found in '%s'). Did you mean '..'?" % commitish)
elif '..' in commitish:
commit_ids += reversed(self._run_git(['rev-list', commitish]).splitlines())
else:
# Turn single commits or branch or tag names into commit ids.
commit_ids += self._run_git(['rev-parse', '--revs-only', commitish]).splitlines()
return commit_ids
def commit_message_for_local_commit(self, commit_id):
commit_lines = self._run_git(['cat-file', 'commit', commit_id]).splitlines()
# Skip the git headers.
first_line_after_headers = 0
for line in commit_lines:
first_line_after_headers += 1
if line == "":
break
return CommitMessage(commit_lines[first_line_after_headers:])
def files_changed_summary_for_commit(self, commit_id):
return self._run_git(['diff-tree', '--shortstat', '--no-renames', '--no-commit-id', commit_id])
| {
"content_hash": "dddf61a851bb56b537d5352c761e5fad",
"timestamp": "",
"source": "github",
"line_count": 467,
"max_line_length": 189,
"avg_line_length": 47.8372591006424,
"alnum_prop": 0.6335720680393913,
"repo_name": "leighpauls/k2cro4",
"id": "f68823871ff4f766d640b3658b771322e7a7fc39",
"size": "23935",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "third_party/WebKit/Tools/Scripts/webkitpy/common/checkout/scm/git.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "ASP",
"bytes": "3062"
},
{
"name": "AppleScript",
"bytes": "25392"
},
{
"name": "Arduino",
"bytes": "464"
},
{
"name": "Assembly",
"bytes": "68131038"
},
{
"name": "C",
"bytes": "242794338"
},
{
"name": "C#",
"bytes": "11024"
},
{
"name": "C++",
"bytes": "353525184"
},
{
"name": "Common Lisp",
"bytes": "3721"
},
{
"name": "D",
"bytes": "1931"
},
{
"name": "Emacs Lisp",
"bytes": "1639"
},
{
"name": "F#",
"bytes": "4992"
},
{
"name": "FORTRAN",
"bytes": "10404"
},
{
"name": "Java",
"bytes": "3845159"
},
{
"name": "JavaScript",
"bytes": "39146656"
},
{
"name": "Lua",
"bytes": "13768"
},
{
"name": "Matlab",
"bytes": "22373"
},
{
"name": "Objective-C",
"bytes": "21887598"
},
{
"name": "PHP",
"bytes": "2344144"
},
{
"name": "Perl",
"bytes": "49033099"
},
{
"name": "Prolog",
"bytes": "2926122"
},
{
"name": "Python",
"bytes": "39863959"
},
{
"name": "R",
"bytes": "262"
},
{
"name": "Racket",
"bytes": "359"
},
{
"name": "Ruby",
"bytes": "304063"
},
{
"name": "Scheme",
"bytes": "14853"
},
{
"name": "Shell",
"bytes": "9195117"
},
{
"name": "Tcl",
"bytes": "1919771"
},
{
"name": "Verilog",
"bytes": "3092"
},
{
"name": "Visual Basic",
"bytes": "1430"
},
{
"name": "eC",
"bytes": "5079"
}
],
"symlink_target": ""
} |
"""Fast Gradient Boosting decision trees for classification and regression."""
# Author: Nicolas Hug
from abc import ABC, abstractmethod
from collections.abc import Iterable
from functools import partial
from numbers import Real, Integral
import warnings
import numpy as np
from timeit import default_timer as time
from ..._loss.loss import (
_LOSSES,
BaseLoss,
HalfBinomialLoss,
HalfMultinomialLoss,
HalfPoissonLoss,
PinballLoss,
)
from ...base import BaseEstimator, RegressorMixin, ClassifierMixin, is_classifier
from ...utils import check_random_state, resample, compute_sample_weight
from ...utils.validation import (
check_is_fitted,
check_consistent_length,
_check_sample_weight,
)
from ...utils._param_validation import Interval, StrOptions
from ...utils._openmp_helpers import _openmp_effective_n_threads
from ...utils.multiclass import check_classification_targets
from ...metrics import check_scoring
from ...model_selection import train_test_split
from ...preprocessing import LabelEncoder
from ._gradient_boosting import _update_raw_predictions
from .common import Y_DTYPE, X_DTYPE, G_H_DTYPE
from .binning import _BinMapper
from .grower import TreeGrower
_LOSSES = _LOSSES.copy()
# TODO(1.3): Remove "binary_crossentropy" and "categorical_crossentropy"
_LOSSES.update(
{
"poisson": HalfPoissonLoss,
"quantile": PinballLoss,
"binary_crossentropy": HalfBinomialLoss,
"categorical_crossentropy": HalfMultinomialLoss,
}
)
def _update_leaves_values(loss, grower, y_true, raw_prediction, sample_weight):
"""Update the leaf values to be predicted by the tree.
Update equals:
loss.fit_intercept_only(y_true - raw_prediction)
This is only applied if loss.need_update_leaves_values is True.
Note: It only works, if the loss is a function of the residual, as is the
case for AbsoluteError and PinballLoss. Otherwise, one would need to get
the minimum of loss(y_true, raw_prediction + x) in x. A few examples:
- AbsoluteError: median(y_true - raw_prediction).
- PinballLoss: quantile(y_true - raw_prediction).
See also notes about need_update_leaves_values in BaseLoss.
"""
# TODO: Ideally this should be computed in parallel over the leaves using something
# similar to _update_raw_predictions(), but this requires a cython version of
# median().
for leaf in grower.finalized_leaves:
indices = leaf.sample_indices
if sample_weight is None:
sw = None
else:
sw = sample_weight[indices]
update = loss.fit_intercept_only(
y_true=y_true[indices] - raw_prediction[indices],
sample_weight=sw,
)
leaf.value = grower.shrinkage * update
# Note that the regularization is ignored here
class BaseHistGradientBoosting(BaseEstimator, ABC):
"""Base class for histogram-based gradient boosting estimators."""
_parameter_constraints: dict = {
"loss": [BaseLoss],
"learning_rate": [Interval(Real, 0, None, closed="neither")],
"max_iter": [Interval(Integral, 1, None, closed="left")],
"max_leaf_nodes": [Interval(Integral, 2, None, closed="left"), None],
"max_depth": [Interval(Integral, 1, None, closed="left"), None],
"min_samples_leaf": [Interval(Integral, 1, None, closed="left")],
"l2_regularization": [Interval(Real, 0, None, closed="left")],
"monotonic_cst": ["array-like", None],
"interaction_cst": [Iterable, None],
"n_iter_no_change": [Interval(Integral, 1, None, closed="left")],
"validation_fraction": [
Interval(Real, 0, 1, closed="neither"),
Interval(Integral, 1, None, closed="left"),
None,
],
"tol": [Interval(Real, 0, None, closed="left")],
"max_bins": [Interval(Integral, 2, 255, closed="both")],
"categorical_features": ["array-like", None],
"warm_start": ["boolean"],
"early_stopping": [StrOptions({"auto"}), "boolean"],
"scoring": [str, callable, None],
"verbose": ["verbose"],
"random_state": ["random_state"],
}
@abstractmethod
def __init__(
self,
loss,
*,
learning_rate,
max_iter,
max_leaf_nodes,
max_depth,
min_samples_leaf,
l2_regularization,
max_bins,
categorical_features,
monotonic_cst,
interaction_cst,
warm_start,
early_stopping,
scoring,
validation_fraction,
n_iter_no_change,
tol,
verbose,
random_state,
):
self.loss = loss
self.learning_rate = learning_rate
self.max_iter = max_iter
self.max_leaf_nodes = max_leaf_nodes
self.max_depth = max_depth
self.min_samples_leaf = min_samples_leaf
self.l2_regularization = l2_regularization
self.max_bins = max_bins
self.monotonic_cst = monotonic_cst
self.interaction_cst = interaction_cst
self.categorical_features = categorical_features
self.warm_start = warm_start
self.early_stopping = early_stopping
self.scoring = scoring
self.validation_fraction = validation_fraction
self.n_iter_no_change = n_iter_no_change
self.tol = tol
self.verbose = verbose
self.random_state = random_state
def _validate_parameters(self):
"""Validate parameters passed to __init__.
The parameters that are directly passed to the grower are checked in
TreeGrower."""
if self.monotonic_cst is not None and self.n_trees_per_iteration_ != 1:
raise ValueError(
"monotonic constraints are not supported for multiclass classification."
)
def _finalize_sample_weight(self, sample_weight, y):
"""Finalize sample weight.
Used by subclasses to adjust sample_weights. This is useful for implementing
class weights.
"""
return sample_weight
def _check_categories(self, X):
"""Check and validate categorical features in X
Return
------
is_categorical : ndarray of shape (n_features,) or None, dtype=bool
Indicates whether a feature is categorical. If no feature is
categorical, this is None.
known_categories : list of size n_features or None
The list contains, for each feature:
- an array of shape (n_categories,) with the unique cat values
- None if the feature is not categorical
None if no feature is categorical.
"""
if self.categorical_features is None:
return None, None
categorical_features = np.asarray(self.categorical_features)
if categorical_features.size == 0:
return None, None
if categorical_features.dtype.kind not in ("i", "b"):
raise ValueError(
"categorical_features must be an array-like of "
"bools or array-like of ints."
)
n_features = X.shape[1]
# check for categorical features as indices
if categorical_features.dtype.kind == "i":
if (
np.max(categorical_features) >= n_features
or np.min(categorical_features) < 0
):
raise ValueError(
"categorical_features set as integer "
"indices must be in [0, n_features - 1]"
)
is_categorical = np.zeros(n_features, dtype=bool)
is_categorical[categorical_features] = True
else:
if categorical_features.shape[0] != n_features:
raise ValueError(
"categorical_features set as a boolean mask "
"must have shape (n_features,), got: "
f"{categorical_features.shape}"
)
is_categorical = categorical_features
if not np.any(is_categorical):
return None, None
# compute the known categories in the training data. We need to do
# that here instead of in the BinMapper because in case of early
# stopping, the mapper only gets a fraction of the training data.
known_categories = []
for f_idx in range(n_features):
if is_categorical[f_idx]:
categories = np.unique(X[:, f_idx])
missing = np.isnan(categories)
if missing.any():
categories = categories[~missing]
if categories.size > self.max_bins:
raise ValueError(
f"Categorical feature at index {f_idx} is "
"expected to have a "
f"cardinality <= {self.max_bins}"
)
if (categories >= self.max_bins).any():
raise ValueError(
f"Categorical feature at index {f_idx} is "
"expected to be encoded with "
f"values < {self.max_bins}"
)
else:
categories = None
known_categories.append(categories)
return is_categorical, known_categories
def _check_interaction_cst(self, n_features):
"""Check and validation for interaction constraints."""
if self.interaction_cst is None:
return None
if not (
isinstance(self.interaction_cst, Iterable)
and all(isinstance(x, Iterable) for x in self.interaction_cst)
):
raise ValueError(
"Interaction constraints must be None or an iterable of iterables, "
f"got: {self.interaction_cst!r}."
)
invalid_indices = [
x
for cst_set in self.interaction_cst
for x in cst_set
if not (isinstance(x, Integral) and 0 <= x < n_features)
]
if invalid_indices:
raise ValueError(
"Interaction constraints must consist of integer indices in [0,"
f" n_features - 1] = [0, {n_features - 1}], specifying the position of"
f" features, got invalid indices: {invalid_indices!r}"
)
constraints = [set(group) for group in self.interaction_cst]
# Add all not listed features as own group by default.
rest = set(range(n_features)) - set().union(*constraints)
if len(rest) > 0:
constraints.append(rest)
return constraints
def fit(self, X, y, sample_weight=None):
"""Fit the gradient boosting model.
Parameters
----------
X : array-like of shape (n_samples, n_features)
The input samples.
y : array-like of shape (n_samples,)
Target values.
sample_weight : array-like of shape (n_samples,) default=None
Weights of training data.
.. versionadded:: 0.23
Returns
-------
self : object
Fitted estimator.
"""
self._validate_params()
fit_start_time = time()
acc_find_split_time = 0.0 # time spent finding the best splits
acc_apply_split_time = 0.0 # time spent splitting nodes
acc_compute_hist_time = 0.0 # time spent computing histograms
# time spent predicting X for gradient and hessians update
acc_prediction_time = 0.0
X, y = self._validate_data(X, y, dtype=[X_DTYPE], force_all_finite=False)
y = self._encode_y(y)
check_consistent_length(X, y)
# Do not create unit sample weights by default to later skip some
# computation
if sample_weight is not None:
sample_weight = _check_sample_weight(sample_weight, X, dtype=np.float64)
# TODO: remove when PDP supports sample weights
self._fitted_with_sw = True
sample_weight = self._finalize_sample_weight(sample_weight, y)
rng = check_random_state(self.random_state)
# When warm starting, we want to re-use the same seed that was used
# the first time fit was called (e.g. for subsampling or for the
# train/val split).
if not (self.warm_start and self._is_fitted()):
self._random_seed = rng.randint(np.iinfo(np.uint32).max, dtype="u8")
self._validate_parameters()
# used for validation in predict
n_samples, self._n_features = X.shape
self.is_categorical_, known_categories = self._check_categories(X)
# Encode constraints into a list of sets of features indices (integers).
interaction_cst = self._check_interaction_cst(self._n_features)
# we need this stateful variable to tell raw_predict() that it was
# called from fit() (this current method), and that the data it has
# received is pre-binned.
# predicting is faster on pre-binned data, so we want early stopping
# predictions to be made on pre-binned data. Unfortunately the _scorer
# can only call predict() or predict_proba(), not raw_predict(), and
# there's no way to tell the scorer that it needs to predict binned
# data.
self._in_fit = True
# `_openmp_effective_n_threads` is used to take cgroups CPU quotes
# into account when determine the maximum number of threads to use.
n_threads = _openmp_effective_n_threads()
if isinstance(self.loss, str):
self._loss = self._get_loss(sample_weight=sample_weight)
elif isinstance(self.loss, BaseLoss):
self._loss = self.loss
if self.early_stopping == "auto":
self.do_early_stopping_ = n_samples > 10000
else:
self.do_early_stopping_ = self.early_stopping
# create validation data if needed
self._use_validation_data = self.validation_fraction is not None
if self.do_early_stopping_ and self._use_validation_data:
# stratify for classification
# instead of checking predict_proba, loss.n_classes >= 2 would also work
stratify = y if hasattr(self._loss, "predict_proba") else None
# Save the state of the RNG for the training and validation split.
# This is needed in order to have the same split when using
# warm starting.
if sample_weight is None:
X_train, X_val, y_train, y_val = train_test_split(
X,
y,
test_size=self.validation_fraction,
stratify=stratify,
random_state=self._random_seed,
)
sample_weight_train = sample_weight_val = None
else:
# TODO: incorporate sample_weight in sampling here, as well as
# stratify
(
X_train,
X_val,
y_train,
y_val,
sample_weight_train,
sample_weight_val,
) = train_test_split(
X,
y,
sample_weight,
test_size=self.validation_fraction,
stratify=stratify,
random_state=self._random_seed,
)
else:
X_train, y_train, sample_weight_train = X, y, sample_weight
X_val = y_val = sample_weight_val = None
# Bin the data
# For ease of use of the API, the user-facing GBDT classes accept the
# parameter max_bins, which doesn't take into account the bin for
# missing values (which is always allocated). However, since max_bins
# isn't the true maximal number of bins, all other private classes
# (binmapper, histbuilder...) accept n_bins instead, which is the
# actual total number of bins. Everywhere in the code, the
# convention is that n_bins == max_bins + 1
n_bins = self.max_bins + 1 # + 1 for missing values
self._bin_mapper = _BinMapper(
n_bins=n_bins,
is_categorical=self.is_categorical_,
known_categories=known_categories,
random_state=self._random_seed,
n_threads=n_threads,
)
X_binned_train = self._bin_data(X_train, is_training_data=True)
if X_val is not None:
X_binned_val = self._bin_data(X_val, is_training_data=False)
else:
X_binned_val = None
# Uses binned data to check for missing values
has_missing_values = (
(X_binned_train == self._bin_mapper.missing_values_bin_idx_)
.any(axis=0)
.astype(np.uint8)
)
if self.verbose:
print("Fitting gradient boosted rounds:")
n_samples = X_binned_train.shape[0]
# First time calling fit, or no warm start
if not (self._is_fitted() and self.warm_start):
# Clear random state and score attributes
self._clear_state()
# initialize raw_predictions: those are the accumulated values
# predicted by the trees for the training data. raw_predictions has
# shape (n_samples, n_trees_per_iteration) where
# n_trees_per_iterations is n_classes in multiclass classification,
# else 1.
# self._baseline_prediction has shape (1, n_trees_per_iteration)
self._baseline_prediction = self._loss.fit_intercept_only(
y_true=y_train, sample_weight=sample_weight_train
).reshape((1, -1))
raw_predictions = np.zeros(
shape=(n_samples, self.n_trees_per_iteration_),
dtype=self._baseline_prediction.dtype,
order="F",
)
raw_predictions += self._baseline_prediction
# predictors is a matrix (list of lists) of TreePredictor objects
# with shape (n_iter_, n_trees_per_iteration)
self._predictors = predictors = []
# Initialize structures and attributes related to early stopping
self._scorer = None # set if scoring != loss
raw_predictions_val = None # set if scoring == loss and use val
self.train_score_ = []
self.validation_score_ = []
if self.do_early_stopping_:
# populate train_score and validation_score with the
# predictions of the initial model (before the first tree)
if self.scoring == "loss":
# we're going to compute scoring w.r.t the loss. As losses
# take raw predictions as input (unlike the scorers), we
# can optimize a bit and avoid repeating computing the
# predictions of the previous trees. We'll re-use
# raw_predictions (as it's needed for training anyway) for
# evaluating the training loss, and create
# raw_predictions_val for storing the raw predictions of
# the validation data.
if self._use_validation_data:
raw_predictions_val = np.zeros(
shape=(X_binned_val.shape[0], self.n_trees_per_iteration_),
dtype=self._baseline_prediction.dtype,
order="F",
)
raw_predictions_val += self._baseline_prediction
self._check_early_stopping_loss(
raw_predictions=raw_predictions,
y_train=y_train,
sample_weight_train=sample_weight_train,
raw_predictions_val=raw_predictions_val,
y_val=y_val,
sample_weight_val=sample_weight_val,
n_threads=n_threads,
)
else:
self._scorer = check_scoring(self, self.scoring)
# _scorer is a callable with signature (est, X, y) and
# calls est.predict() or est.predict_proba() depending on
# its nature.
# Unfortunately, each call to _scorer() will compute
# the predictions of all the trees. So we use a subset of
# the training set to compute train scores.
# Compute the subsample set
(
X_binned_small_train,
y_small_train,
sample_weight_small_train,
) = self._get_small_trainset(
X_binned_train, y_train, sample_weight_train, self._random_seed
)
self._check_early_stopping_scorer(
X_binned_small_train,
y_small_train,
sample_weight_small_train,
X_binned_val,
y_val,
sample_weight_val,
)
begin_at_stage = 0
# warm start: this is not the first time fit was called
else:
# Check that the maximum number of iterations is not smaller
# than the number of iterations from the previous fit
if self.max_iter < self.n_iter_:
raise ValueError(
"max_iter=%d must be larger than or equal to "
"n_iter_=%d when warm_start==True" % (self.max_iter, self.n_iter_)
)
# Convert array attributes to lists
self.train_score_ = self.train_score_.tolist()
self.validation_score_ = self.validation_score_.tolist()
# Compute raw predictions
raw_predictions = self._raw_predict(X_binned_train, n_threads=n_threads)
if self.do_early_stopping_ and self._use_validation_data:
raw_predictions_val = self._raw_predict(
X_binned_val, n_threads=n_threads
)
else:
raw_predictions_val = None
if self.do_early_stopping_ and self.scoring != "loss":
# Compute the subsample set
(
X_binned_small_train,
y_small_train,
sample_weight_small_train,
) = self._get_small_trainset(
X_binned_train, y_train, sample_weight_train, self._random_seed
)
# Get the predictors from the previous fit
predictors = self._predictors
begin_at_stage = self.n_iter_
# initialize gradients and hessians (empty arrays).
# shape = (n_samples, n_trees_per_iteration).
gradient, hessian = self._loss.init_gradient_and_hessian(
n_samples=n_samples, dtype=G_H_DTYPE, order="F"
)
for iteration in range(begin_at_stage, self.max_iter):
if self.verbose:
iteration_start_time = time()
print(
"[{}/{}] ".format(iteration + 1, self.max_iter), end="", flush=True
)
# Update gradients and hessians, inplace
# Note that self._loss expects shape (n_samples,) for
# n_trees_per_iteration = 1 else shape (n_samples, n_trees_per_iteration).
if self._loss.constant_hessian:
self._loss.gradient(
y_true=y_train,
raw_prediction=raw_predictions,
sample_weight=sample_weight_train,
gradient_out=gradient,
n_threads=n_threads,
)
else:
self._loss.gradient_hessian(
y_true=y_train,
raw_prediction=raw_predictions,
sample_weight=sample_weight_train,
gradient_out=gradient,
hessian_out=hessian,
n_threads=n_threads,
)
# Append a list since there may be more than 1 predictor per iter
predictors.append([])
# 2-d views of shape (n_samples, n_trees_per_iteration_) or (n_samples, 1)
# on gradient and hessian to simplify the loop over n_trees_per_iteration_.
if gradient.ndim == 1:
g_view = gradient.reshape((-1, 1))
h_view = hessian.reshape((-1, 1))
else:
g_view = gradient
h_view = hessian
# Build `n_trees_per_iteration` trees.
for k in range(self.n_trees_per_iteration_):
grower = TreeGrower(
X_binned=X_binned_train,
gradients=g_view[:, k],
hessians=h_view[:, k],
n_bins=n_bins,
n_bins_non_missing=self._bin_mapper.n_bins_non_missing_,
has_missing_values=has_missing_values,
is_categorical=self.is_categorical_,
monotonic_cst=self.monotonic_cst,
interaction_cst=interaction_cst,
max_leaf_nodes=self.max_leaf_nodes,
max_depth=self.max_depth,
min_samples_leaf=self.min_samples_leaf,
l2_regularization=self.l2_regularization,
shrinkage=self.learning_rate,
n_threads=n_threads,
)
grower.grow()
acc_apply_split_time += grower.total_apply_split_time
acc_find_split_time += grower.total_find_split_time
acc_compute_hist_time += grower.total_compute_hist_time
if self._loss.need_update_leaves_values:
_update_leaves_values(
loss=self._loss,
grower=grower,
y_true=y_train,
raw_prediction=raw_predictions[:, k],
sample_weight=sample_weight_train,
)
predictor = grower.make_predictor(
binning_thresholds=self._bin_mapper.bin_thresholds_
)
predictors[-1].append(predictor)
# Update raw_predictions with the predictions of the newly
# created tree.
tic_pred = time()
_update_raw_predictions(raw_predictions[:, k], grower, n_threads)
toc_pred = time()
acc_prediction_time += toc_pred - tic_pred
should_early_stop = False
if self.do_early_stopping_:
if self.scoring == "loss":
# Update raw_predictions_val with the newest tree(s)
if self._use_validation_data:
for k, pred in enumerate(self._predictors[-1]):
raw_predictions_val[:, k] += pred.predict_binned(
X_binned_val,
self._bin_mapper.missing_values_bin_idx_,
n_threads,
)
should_early_stop = self._check_early_stopping_loss(
raw_predictions=raw_predictions,
y_train=y_train,
sample_weight_train=sample_weight_train,
raw_predictions_val=raw_predictions_val,
y_val=y_val,
sample_weight_val=sample_weight_val,
n_threads=n_threads,
)
else:
should_early_stop = self._check_early_stopping_scorer(
X_binned_small_train,
y_small_train,
sample_weight_small_train,
X_binned_val,
y_val,
sample_weight_val,
)
if self.verbose:
self._print_iteration_stats(iteration_start_time)
# maybe we could also early stop if all the trees are stumps?
if should_early_stop:
break
if self.verbose:
duration = time() - fit_start_time
n_total_leaves = sum(
predictor.get_n_leaf_nodes()
for predictors_at_ith_iteration in self._predictors
for predictor in predictors_at_ith_iteration
)
n_predictors = sum(
len(predictors_at_ith_iteration)
for predictors_at_ith_iteration in self._predictors
)
print(
"Fit {} trees in {:.3f} s, ({} total leaves)".format(
n_predictors, duration, n_total_leaves
)
)
print(
"{:<32} {:.3f}s".format(
"Time spent computing histograms:", acc_compute_hist_time
)
)
print(
"{:<32} {:.3f}s".format(
"Time spent finding best splits:", acc_find_split_time
)
)
print(
"{:<32} {:.3f}s".format(
"Time spent applying splits:", acc_apply_split_time
)
)
print(
"{:<32} {:.3f}s".format("Time spent predicting:", acc_prediction_time)
)
self.train_score_ = np.asarray(self.train_score_)
self.validation_score_ = np.asarray(self.validation_score_)
del self._in_fit # hard delete so we're sure it can't be used anymore
return self
def _is_fitted(self):
return len(getattr(self, "_predictors", [])) > 0
def _clear_state(self):
"""Clear the state of the gradient boosting model."""
for var in ("train_score_", "validation_score_"):
if hasattr(self, var):
delattr(self, var)
def _get_small_trainset(self, X_binned_train, y_train, sample_weight_train, seed):
"""Compute the indices of the subsample set and return this set.
For efficiency, we need to subsample the training set to compute scores
with scorers.
"""
# TODO: incorporate sample_weights here in `resample`
subsample_size = 10000
if X_binned_train.shape[0] > subsample_size:
indices = np.arange(X_binned_train.shape[0])
stratify = y_train if is_classifier(self) else None
indices = resample(
indices,
n_samples=subsample_size,
replace=False,
random_state=seed,
stratify=stratify,
)
X_binned_small_train = X_binned_train[indices]
y_small_train = y_train[indices]
if sample_weight_train is not None:
sample_weight_small_train = sample_weight_train[indices]
else:
sample_weight_small_train = None
X_binned_small_train = np.ascontiguousarray(X_binned_small_train)
return (X_binned_small_train, y_small_train, sample_weight_small_train)
else:
return X_binned_train, y_train, sample_weight_train
def _check_early_stopping_scorer(
self,
X_binned_small_train,
y_small_train,
sample_weight_small_train,
X_binned_val,
y_val,
sample_weight_val,
):
"""Check if fitting should be early-stopped based on scorer.
Scores are computed on validation data or on training data.
"""
if is_classifier(self):
y_small_train = self.classes_[y_small_train.astype(int)]
if sample_weight_small_train is None:
self.train_score_.append(
self._scorer(self, X_binned_small_train, y_small_train)
)
else:
self.train_score_.append(
self._scorer(
self,
X_binned_small_train,
y_small_train,
sample_weight=sample_weight_small_train,
)
)
if self._use_validation_data:
if is_classifier(self):
y_val = self.classes_[y_val.astype(int)]
if sample_weight_val is None:
self.validation_score_.append(self._scorer(self, X_binned_val, y_val))
else:
self.validation_score_.append(
self._scorer(
self, X_binned_val, y_val, sample_weight=sample_weight_val
)
)
return self._should_stop(self.validation_score_)
else:
return self._should_stop(self.train_score_)
def _check_early_stopping_loss(
self,
raw_predictions,
y_train,
sample_weight_train,
raw_predictions_val,
y_val,
sample_weight_val,
n_threads=1,
):
"""Check if fitting should be early-stopped based on loss.
Scores are computed on validation data or on training data.
"""
self.train_score_.append(
-self._loss(
y_true=y_train,
raw_prediction=raw_predictions,
sample_weight=sample_weight_train,
n_threads=n_threads,
)
)
if self._use_validation_data:
self.validation_score_.append(
-self._loss(
y_true=y_val,
raw_prediction=raw_predictions_val,
sample_weight=sample_weight_val,
n_threads=n_threads,
)
)
return self._should_stop(self.validation_score_)
else:
return self._should_stop(self.train_score_)
def _should_stop(self, scores):
"""
Return True (do early stopping) if the last n scores aren't better
than the (n-1)th-to-last score, up to some tolerance.
"""
reference_position = self.n_iter_no_change + 1
if len(scores) < reference_position:
return False
# A higher score is always better. Higher tol means that it will be
# harder for subsequent iteration to be considered an improvement upon
# the reference score, and therefore it is more likely to early stop
# because of the lack of significant improvement.
reference_score = scores[-reference_position] + self.tol
recent_scores = scores[-reference_position + 1 :]
recent_improvements = [score > reference_score for score in recent_scores]
return not any(recent_improvements)
def _bin_data(self, X, is_training_data):
"""Bin data X.
If is_training_data, then fit the _bin_mapper attribute.
Else, the binned data is converted to a C-contiguous array.
"""
description = "training" if is_training_data else "validation"
if self.verbose:
print(
"Binning {:.3f} GB of {} data: ".format(X.nbytes / 1e9, description),
end="",
flush=True,
)
tic = time()
if is_training_data:
X_binned = self._bin_mapper.fit_transform(X) # F-aligned array
else:
X_binned = self._bin_mapper.transform(X) # F-aligned array
# We convert the array to C-contiguous since predicting is faster
# with this layout (training is faster on F-arrays though)
X_binned = np.ascontiguousarray(X_binned)
toc = time()
if self.verbose:
duration = toc - tic
print("{:.3f} s".format(duration))
return X_binned
def _print_iteration_stats(self, iteration_start_time):
"""Print info about the current fitting iteration."""
log_msg = ""
predictors_of_ith_iteration = [
predictors_list
for predictors_list in self._predictors[-1]
if predictors_list
]
n_trees = len(predictors_of_ith_iteration)
max_depth = max(
predictor.get_max_depth() for predictor in predictors_of_ith_iteration
)
n_leaves = sum(
predictor.get_n_leaf_nodes() for predictor in predictors_of_ith_iteration
)
if n_trees == 1:
log_msg += "{} tree, {} leaves, ".format(n_trees, n_leaves)
else:
log_msg += "{} trees, {} leaves ".format(n_trees, n_leaves)
log_msg += "({} on avg), ".format(int(n_leaves / n_trees))
log_msg += "max depth = {}, ".format(max_depth)
if self.do_early_stopping_:
if self.scoring == "loss":
factor = -1 # score_ arrays contain the negative loss
name = "loss"
else:
factor = 1
name = "score"
log_msg += "train {}: {:.5f}, ".format(name, factor * self.train_score_[-1])
if self._use_validation_data:
log_msg += "val {}: {:.5f}, ".format(
name, factor * self.validation_score_[-1]
)
iteration_time = time() - iteration_start_time
log_msg += "in {:0.3f}s".format(iteration_time)
print(log_msg)
def _raw_predict(self, X, n_threads=None):
"""Return the sum of the leaves values over all predictors.
Parameters
----------
X : array-like of shape (n_samples, n_features)
The input samples.
n_threads : int, default=None
Number of OpenMP threads to use. `_openmp_effective_n_threads` is called
to determine the effective number of threads use, which takes cgroups CPU
quotes into account. See the docstring of `_openmp_effective_n_threads`
for details.
Returns
-------
raw_predictions : array, shape (n_samples, n_trees_per_iteration)
The raw predicted values.
"""
is_binned = getattr(self, "_in_fit", False)
if not is_binned:
X = self._validate_data(
X, dtype=X_DTYPE, force_all_finite=False, reset=False
)
check_is_fitted(self)
if X.shape[1] != self._n_features:
raise ValueError(
"X has {} features but this estimator was trained with "
"{} features.".format(X.shape[1], self._n_features)
)
n_samples = X.shape[0]
raw_predictions = np.zeros(
shape=(n_samples, self.n_trees_per_iteration_),
dtype=self._baseline_prediction.dtype,
order="F",
)
raw_predictions += self._baseline_prediction
# We intentionally decouple the number of threads used at prediction
# time from the number of threads used at fit time because the model
# can be deployed on a different machine for prediction purposes.
n_threads = _openmp_effective_n_threads(n_threads)
self._predict_iterations(
X, self._predictors, raw_predictions, is_binned, n_threads
)
return raw_predictions
def _predict_iterations(self, X, predictors, raw_predictions, is_binned, n_threads):
"""Add the predictions of the predictors to raw_predictions."""
if not is_binned:
(
known_cat_bitsets,
f_idx_map,
) = self._bin_mapper.make_known_categories_bitsets()
for predictors_of_ith_iteration in predictors:
for k, predictor in enumerate(predictors_of_ith_iteration):
if is_binned:
predict = partial(
predictor.predict_binned,
missing_values_bin_idx=self._bin_mapper.missing_values_bin_idx_,
n_threads=n_threads,
)
else:
predict = partial(
predictor.predict,
known_cat_bitsets=known_cat_bitsets,
f_idx_map=f_idx_map,
n_threads=n_threads,
)
raw_predictions[:, k] += predict(X)
def _staged_raw_predict(self, X):
"""Compute raw predictions of ``X`` for each iteration.
This method allows monitoring (i.e. determine error on testing set)
after each stage.
Parameters
----------
X : array-like of shape (n_samples, n_features)
The input samples.
Yields
------
raw_predictions : generator of ndarray of shape \
(n_samples, n_trees_per_iteration)
The raw predictions of the input samples. The order of the
classes corresponds to that in the attribute :term:`classes_`.
"""
X = self._validate_data(X, dtype=X_DTYPE, force_all_finite=False, reset=False)
check_is_fitted(self)
if X.shape[1] != self._n_features:
raise ValueError(
"X has {} features but this estimator was trained with "
"{} features.".format(X.shape[1], self._n_features)
)
n_samples = X.shape[0]
raw_predictions = np.zeros(
shape=(n_samples, self.n_trees_per_iteration_),
dtype=self._baseline_prediction.dtype,
order="F",
)
raw_predictions += self._baseline_prediction
# We intentionally decouple the number of threads used at prediction
# time from the number of threads used at fit time because the model
# can be deployed on a different machine for prediction purposes.
n_threads = _openmp_effective_n_threads()
for iteration in range(len(self._predictors)):
self._predict_iterations(
X,
self._predictors[iteration : iteration + 1],
raw_predictions,
is_binned=False,
n_threads=n_threads,
)
yield raw_predictions.copy()
def _compute_partial_dependence_recursion(self, grid, target_features):
"""Fast partial dependence computation.
Parameters
----------
grid : ndarray, shape (n_samples, n_target_features)
The grid points on which the partial dependence should be
evaluated.
target_features : ndarray, shape (n_target_features)
The set of target features for which the partial dependence
should be evaluated.
Returns
-------
averaged_predictions : ndarray, shape \
(n_trees_per_iteration, n_samples)
The value of the partial dependence function on each grid point.
"""
if getattr(self, "_fitted_with_sw", False):
raise NotImplementedError(
"{} does not support partial dependence "
"plots with the 'recursion' method when "
"sample weights were given during fit "
"time.".format(self.__class__.__name__)
)
grid = np.asarray(grid, dtype=X_DTYPE, order="C")
averaged_predictions = np.zeros(
(self.n_trees_per_iteration_, grid.shape[0]), dtype=Y_DTYPE
)
for predictors_of_ith_iteration in self._predictors:
for k, predictor in enumerate(predictors_of_ith_iteration):
predictor.compute_partial_dependence(
grid, target_features, averaged_predictions[k]
)
# Note that the learning rate is already accounted for in the leaves
# values.
return averaged_predictions
def _more_tags(self):
return {"allow_nan": True}
@abstractmethod
def _get_loss(self, sample_weight):
pass
@abstractmethod
def _encode_y(self, y=None):
pass
@property
def n_iter_(self):
"""Number of iterations of the boosting process."""
check_is_fitted(self)
return len(self._predictors)
class HistGradientBoostingRegressor(RegressorMixin, BaseHistGradientBoosting):
"""Histogram-based Gradient Boosting Regression Tree.
This estimator is much faster than
:class:`GradientBoostingRegressor<sklearn.ensemble.GradientBoostingRegressor>`
for big datasets (n_samples >= 10 000).
This estimator has native support for missing values (NaNs). During
training, the tree grower learns at each split point whether samples
with missing values should go to the left or right child, based on the
potential gain. When predicting, samples with missing values are
assigned to the left or right child consequently. If no missing values
were encountered for a given feature during training, then samples with
missing values are mapped to whichever child has the most samples.
This implementation is inspired by
`LightGBM <https://github.com/Microsoft/LightGBM>`_.
Read more in the :ref:`User Guide <histogram_based_gradient_boosting>`.
.. versionadded:: 0.21
Parameters
----------
loss : {'squared_error', 'absolute_error', 'poisson', 'quantile'}, \
default='squared_error'
The loss function to use in the boosting process. Note that the
"squared error" and "poisson" losses actually implement
"half least squares loss" and "half poisson deviance" to simplify the
computation of the gradient. Furthermore, "poisson" loss internally
uses a log-link and requires ``y >= 0``.
"quantile" uses the pinball loss.
.. versionchanged:: 0.23
Added option 'poisson'.
.. versionchanged:: 1.1
Added option 'quantile'.
quantile : float, default=None
If loss is "quantile", this parameter specifies which quantile to be estimated
and must be between 0 and 1.
learning_rate : float, default=0.1
The learning rate, also known as *shrinkage*. This is used as a
multiplicative factor for the leaves values. Use ``1`` for no
shrinkage.
max_iter : int, default=100
The maximum number of iterations of the boosting process, i.e. the
maximum number of trees.
max_leaf_nodes : int or None, default=31
The maximum number of leaves for each tree. Must be strictly greater
than 1. If None, there is no maximum limit.
max_depth : int or None, default=None
The maximum depth of each tree. The depth of a tree is the number of
edges to go from the root to the deepest leaf.
Depth isn't constrained by default.
min_samples_leaf : int, default=20
The minimum number of samples per leaf. For small datasets with less
than a few hundred samples, it is recommended to lower this value
since only very shallow trees would be built.
l2_regularization : float, default=0
The L2 regularization parameter. Use ``0`` for no regularization
(default).
max_bins : int, default=255
The maximum number of bins to use for non-missing values. Before
training, each feature of the input array `X` is binned into
integer-valued bins, which allows for a much faster training stage.
Features with a small number of unique values may use less than
``max_bins`` bins. In addition to the ``max_bins`` bins, one more bin
is always reserved for missing values. Must be no larger than 255.
categorical_features : array-like of {bool, int} of shape (n_features) \
or shape (n_categorical_features,), default=None
Indicates the categorical features.
- None : no feature will be considered categorical.
- boolean array-like : boolean mask indicating categorical features.
- integer array-like : integer indices indicating categorical
features.
For each categorical feature, there must be at most `max_bins` unique
categories, and each categorical value must be in [0, max_bins -1].
During prediction, categories encoded as a negative value are treated as
missing values.
Read more in the :ref:`User Guide <categorical_support_gbdt>`.
.. versionadded:: 0.24
monotonic_cst : array-like of int of shape (n_features), default=None
Indicates the monotonic constraint to enforce on each feature.
- 1: monotonic increase
- 0: no constraint
- -1: monotonic decrease
Read more in the :ref:`User Guide <monotonic_cst_gbdt>`.
.. versionadded:: 0.23
interaction_cst : iterable of iterables of int, default=None
Specify interaction constraints, i.e. sets of features which can
only interact with each other in child nodes splits.
Each iterable materializes a constraint by the set of indices of
the features that are allowed to interact with each other.
If there are more features than specified in these constraints,
they are treated as if they were specified as an additional set.
For instance, with 5 features in total, `interaction_cst=[{0, 1}]`
is equivalent to `interaction_cst=[{0, 1}, {2, 3, 4}]`,
and specifies that each branch of a tree will either only split
on features 0 and 1 or only split on features 2, 3 and 4.
.. versionadded:: 1.2
warm_start : bool, default=False
When set to ``True``, reuse the solution of the previous call to fit
and add more estimators to the ensemble. For results to be valid, the
estimator should be re-trained on the same data only.
See :term:`the Glossary <warm_start>`.
early_stopping : 'auto' or bool, default='auto'
If 'auto', early stopping is enabled if the sample size is larger than
10000. If True, early stopping is enabled, otherwise early stopping is
disabled.
.. versionadded:: 0.23
scoring : str or callable or None, default='loss'
Scoring parameter to use for early stopping. It can be a single
string (see :ref:`scoring_parameter`) or a callable (see
:ref:`scoring`). If None, the estimator's default scorer is used. If
``scoring='loss'``, early stopping is checked w.r.t the loss value.
Only used if early stopping is performed.
validation_fraction : int or float or None, default=0.1
Proportion (or absolute size) of training data to set aside as
validation data for early stopping. If None, early stopping is done on
the training data. Only used if early stopping is performed.
n_iter_no_change : int, default=10
Used to determine when to "early stop". The fitting process is
stopped when none of the last ``n_iter_no_change`` scores are better
than the ``n_iter_no_change - 1`` -th-to-last one, up to some
tolerance. Only used if early stopping is performed.
tol : float, default=1e-7
The absolute tolerance to use when comparing scores during early
stopping. The higher the tolerance, the more likely we are to early
stop: higher tolerance means that it will be harder for subsequent
iterations to be considered an improvement upon the reference score.
verbose : int, default=0
The verbosity level. If not zero, print some information about the
fitting process.
random_state : int, RandomState instance or None, default=None
Pseudo-random number generator to control the subsampling in the
binning process, and the train/validation data split if early stopping
is enabled.
Pass an int for reproducible output across multiple function calls.
See :term:`Glossary <random_state>`.
Attributes
----------
do_early_stopping_ : bool
Indicates whether early stopping is used during training.
n_iter_ : int
The number of iterations as selected by early stopping, depending on
the `early_stopping` parameter. Otherwise it corresponds to max_iter.
n_trees_per_iteration_ : int
The number of tree that are built at each iteration. For regressors,
this is always 1.
train_score_ : ndarray, shape (n_iter_+1,)
The scores at each iteration on the training data. The first entry
is the score of the ensemble before the first iteration. Scores are
computed according to the ``scoring`` parameter. If ``scoring`` is
not 'loss', scores are computed on a subset of at most 10 000
samples. Empty if no early stopping.
validation_score_ : ndarray, shape (n_iter_+1,)
The scores at each iteration on the held-out validation data. The
first entry is the score of the ensemble before the first iteration.
Scores are computed according to the ``scoring`` parameter. Empty if
no early stopping or if ``validation_fraction`` is None.
is_categorical_ : ndarray, shape (n_features, ) or None
Boolean mask for the categorical features. ``None`` if there are no
categorical features.
n_features_in_ : int
Number of features seen during :term:`fit`.
.. versionadded:: 0.24
feature_names_in_ : ndarray of shape (`n_features_in_`,)
Names of features seen during :term:`fit`. Defined only when `X`
has feature names that are all strings.
.. versionadded:: 1.0
See Also
--------
GradientBoostingRegressor : Exact gradient boosting method that does not
scale as good on datasets with a large number of samples.
sklearn.tree.DecisionTreeRegressor : A decision tree regressor.
RandomForestRegressor : A meta-estimator that fits a number of decision
tree regressors on various sub-samples of the dataset and uses
averaging to improve the statistical performance and control
over-fitting.
AdaBoostRegressor : A meta-estimator that begins by fitting a regressor
on the original dataset and then fits additional copies of the
regressor on the same dataset but where the weights of instances are
adjusted according to the error of the current prediction. As such,
subsequent regressors focus more on difficult cases.
Examples
--------
>>> from sklearn.ensemble import HistGradientBoostingRegressor
>>> from sklearn.datasets import load_diabetes
>>> X, y = load_diabetes(return_X_y=True)
>>> est = HistGradientBoostingRegressor().fit(X, y)
>>> est.score(X, y)
0.92...
"""
_parameter_constraints: dict = {
**BaseHistGradientBoosting._parameter_constraints,
"loss": [
StrOptions({"squared_error", "absolute_error", "poisson", "quantile"}),
BaseLoss,
],
"quantile": [Interval(Real, 0, 1, closed="both"), None],
}
def __init__(
self,
loss="squared_error",
*,
quantile=None,
learning_rate=0.1,
max_iter=100,
max_leaf_nodes=31,
max_depth=None,
min_samples_leaf=20,
l2_regularization=0.0,
max_bins=255,
categorical_features=None,
monotonic_cst=None,
interaction_cst=None,
warm_start=False,
early_stopping="auto",
scoring="loss",
validation_fraction=0.1,
n_iter_no_change=10,
tol=1e-7,
verbose=0,
random_state=None,
):
super(HistGradientBoostingRegressor, self).__init__(
loss=loss,
learning_rate=learning_rate,
max_iter=max_iter,
max_leaf_nodes=max_leaf_nodes,
max_depth=max_depth,
min_samples_leaf=min_samples_leaf,
l2_regularization=l2_regularization,
max_bins=max_bins,
monotonic_cst=monotonic_cst,
interaction_cst=interaction_cst,
categorical_features=categorical_features,
early_stopping=early_stopping,
warm_start=warm_start,
scoring=scoring,
validation_fraction=validation_fraction,
n_iter_no_change=n_iter_no_change,
tol=tol,
verbose=verbose,
random_state=random_state,
)
self.quantile = quantile
def predict(self, X):
"""Predict values for X.
Parameters
----------
X : array-like, shape (n_samples, n_features)
The input samples.
Returns
-------
y : ndarray, shape (n_samples,)
The predicted values.
"""
check_is_fitted(self)
# Return inverse link of raw predictions after converting
# shape (n_samples, 1) to (n_samples,)
return self._loss.link.inverse(self._raw_predict(X).ravel())
def staged_predict(self, X):
"""Predict regression target for each iteration.
This method allows monitoring (i.e. determine error on testing set)
after each stage.
.. versionadded:: 0.24
Parameters
----------
X : array-like of shape (n_samples, n_features)
The input samples.
Yields
------
y : generator of ndarray of shape (n_samples,)
The predicted values of the input samples, for each iteration.
"""
for raw_predictions in self._staged_raw_predict(X):
yield self._loss.link.inverse(raw_predictions.ravel())
def _encode_y(self, y):
# Just convert y to the expected dtype
self.n_trees_per_iteration_ = 1
y = y.astype(Y_DTYPE, copy=False)
if self.loss == "poisson":
# Ensure y >= 0 and sum(y) > 0
if not (np.all(y >= 0) and np.sum(y) > 0):
raise ValueError(
"loss='poisson' requires non-negative y and sum(y) > 0."
)
return y
def _get_loss(self, sample_weight):
if self.loss == "quantile":
return _LOSSES[self.loss](
sample_weight=sample_weight, quantile=self.quantile
)
else:
return _LOSSES[self.loss](sample_weight=sample_weight)
class HistGradientBoostingClassifier(ClassifierMixin, BaseHistGradientBoosting):
"""Histogram-based Gradient Boosting Classification Tree.
This estimator is much faster than
:class:`GradientBoostingClassifier<sklearn.ensemble.GradientBoostingClassifier>`
for big datasets (n_samples >= 10 000).
This estimator has native support for missing values (NaNs). During
training, the tree grower learns at each split point whether samples
with missing values should go to the left or right child, based on the
potential gain. When predicting, samples with missing values are
assigned to the left or right child consequently. If no missing values
were encountered for a given feature during training, then samples with
missing values are mapped to whichever child has the most samples.
This implementation is inspired by
`LightGBM <https://github.com/Microsoft/LightGBM>`_.
Read more in the :ref:`User Guide <histogram_based_gradient_boosting>`.
.. versionadded:: 0.21
Parameters
----------
loss : {'log_loss', 'auto', 'binary_crossentropy', 'categorical_crossentropy'}, \
default='log_loss'
The loss function to use in the boosting process.
For binary classification problems, 'log_loss' is also known as logistic loss,
binomial deviance or binary crossentropy. Internally, the model fits one tree
per boosting iteration and uses the logistic sigmoid function (expit) as
inverse link function to compute the predicted positive class probability.
For multiclass classification problems, 'log_loss' is also known as multinomial
deviance or categorical crossentropy. Internally, the model fits one tree per
boosting iteration and per class and uses the softmax function as inverse link
function to compute the predicted probabilities of the classes.
.. deprecated:: 1.1
The loss arguments 'auto', 'binary_crossentropy' and
'categorical_crossentropy' were deprecated in v1.1 and will be removed in
version 1.3. Use `loss='log_loss'` which is equivalent.
learning_rate : float, default=0.1
The learning rate, also known as *shrinkage*. This is used as a
multiplicative factor for the leaves values. Use ``1`` for no
shrinkage.
max_iter : int, default=100
The maximum number of iterations of the boosting process, i.e. the
maximum number of trees for binary classification. For multiclass
classification, `n_classes` trees per iteration are built.
max_leaf_nodes : int or None, default=31
The maximum number of leaves for each tree. Must be strictly greater
than 1. If None, there is no maximum limit.
max_depth : int or None, default=None
The maximum depth of each tree. The depth of a tree is the number of
edges to go from the root to the deepest leaf.
Depth isn't constrained by default.
min_samples_leaf : int, default=20
The minimum number of samples per leaf. For small datasets with less
than a few hundred samples, it is recommended to lower this value
since only very shallow trees would be built.
l2_regularization : float, default=0
The L2 regularization parameter. Use 0 for no regularization.
max_bins : int, default=255
The maximum number of bins to use for non-missing values. Before
training, each feature of the input array `X` is binned into
integer-valued bins, which allows for a much faster training stage.
Features with a small number of unique values may use less than
``max_bins`` bins. In addition to the ``max_bins`` bins, one more bin
is always reserved for missing values. Must be no larger than 255.
categorical_features : array-like of {bool, int} of shape (n_features) \
or shape (n_categorical_features,), default=None
Indicates the categorical features.
- None : no feature will be considered categorical.
- boolean array-like : boolean mask indicating categorical features.
- integer array-like : integer indices indicating categorical
features.
For each categorical feature, there must be at most `max_bins` unique
categories, and each categorical value must be in [0, max_bins -1].
During prediction, categories encoded as a negative value are treated as
missing values.
Read more in the :ref:`User Guide <categorical_support_gbdt>`.
.. versionadded:: 0.24
monotonic_cst : array-like of int of shape (n_features), default=None
Indicates the monotonic constraint to enforce on each feature.
- 1: monotonic increase
- 0: no constraint
- -1: monotonic decrease
The constraints are only valid for binary classifications and hold
over the probability of the positive class.
Read more in the :ref:`User Guide <monotonic_cst_gbdt>`.
.. versionadded:: 0.23
interaction_cst : iterable of iterables of int, default=None
Specify interaction constraints, i.e. sets of features which can
only interact with each other in child nodes splits.
Each iterable materializes a constraint by the set of indices of
the features that are allowed to interact with each other.
If there are more features than specified in these constraints,
they are treated as if they were specified as an additional set.
For instance, with 5 features in total, `interaction_cst=[{0, 1}]`
is equivalent to `interaction_cst=[{0, 1}, {2, 3, 4}]`,
and specifies that each branch of a tree will either only split
on features 0 and 1 or only split on features 2, 3 and 4.
.. versionadded:: 1.2
warm_start : bool, default=False
When set to ``True``, reuse the solution of the previous call to fit
and add more estimators to the ensemble. For results to be valid, the
estimator should be re-trained on the same data only.
See :term:`the Glossary <warm_start>`.
early_stopping : 'auto' or bool, default='auto'
If 'auto', early stopping is enabled if the sample size is larger than
10000. If True, early stopping is enabled, otherwise early stopping is
disabled.
.. versionadded:: 0.23
scoring : str or callable or None, default='loss'
Scoring parameter to use for early stopping. It can be a single
string (see :ref:`scoring_parameter`) or a callable (see
:ref:`scoring`). If None, the estimator's default scorer
is used. If ``scoring='loss'``, early stopping is checked
w.r.t the loss value. Only used if early stopping is performed.
validation_fraction : int or float or None, default=0.1
Proportion (or absolute size) of training data to set aside as
validation data for early stopping. If None, early stopping is done on
the training data. Only used if early stopping is performed.
n_iter_no_change : int, default=10
Used to determine when to "early stop". The fitting process is
stopped when none of the last ``n_iter_no_change`` scores are better
than the ``n_iter_no_change - 1`` -th-to-last one, up to some
tolerance. Only used if early stopping is performed.
tol : float, default=1e-7
The absolute tolerance to use when comparing scores. The higher the
tolerance, the more likely we are to early stop: higher tolerance
means that it will be harder for subsequent iterations to be
considered an improvement upon the reference score.
verbose : int, default=0
The verbosity level. If not zero, print some information about the
fitting process.
random_state : int, RandomState instance or None, default=None
Pseudo-random number generator to control the subsampling in the
binning process, and the train/validation data split if early stopping
is enabled.
Pass an int for reproducible output across multiple function calls.
See :term:`Glossary <random_state>`.
class_weight : dict or 'balanced', default=None
Weights associated with classes in the form `{class_label: weight}`.
If not given, all classes are supposed to have weight one.
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as `n_samples / (n_classes * np.bincount(y))`.
Note that these weights will be multiplied with sample_weight (passed
through the fit method) if `sample_weight` is specified.
.. versionadded:: 1.2
Attributes
----------
classes_ : array, shape = (n_classes,)
Class labels.
do_early_stopping_ : bool
Indicates whether early stopping is used during training.
n_iter_ : int
The number of iterations as selected by early stopping, depending on
the `early_stopping` parameter. Otherwise it corresponds to max_iter.
n_trees_per_iteration_ : int
The number of tree that are built at each iteration. This is equal to 1
for binary classification, and to ``n_classes`` for multiclass
classification.
train_score_ : ndarray, shape (n_iter_+1,)
The scores at each iteration on the training data. The first entry
is the score of the ensemble before the first iteration. Scores are
computed according to the ``scoring`` parameter. If ``scoring`` is
not 'loss', scores are computed on a subset of at most 10 000
samples. Empty if no early stopping.
validation_score_ : ndarray, shape (n_iter_+1,)
The scores at each iteration on the held-out validation data. The
first entry is the score of the ensemble before the first iteration.
Scores are computed according to the ``scoring`` parameter. Empty if
no early stopping or if ``validation_fraction`` is None.
is_categorical_ : ndarray, shape (n_features, ) or None
Boolean mask for the categorical features. ``None`` if there are no
categorical features.
n_features_in_ : int
Number of features seen during :term:`fit`.
.. versionadded:: 0.24
feature_names_in_ : ndarray of shape (`n_features_in_`,)
Names of features seen during :term:`fit`. Defined only when `X`
has feature names that are all strings.
.. versionadded:: 1.0
See Also
--------
GradientBoostingClassifier : Exact gradient boosting method that does not
scale as good on datasets with a large number of samples.
sklearn.tree.DecisionTreeClassifier : A decision tree classifier.
RandomForestClassifier : A meta-estimator that fits a number of decision
tree classifiers on various sub-samples of the dataset and uses
averaging to improve the predictive accuracy and control over-fitting.
AdaBoostClassifier : A meta-estimator that begins by fitting a classifier
on the original dataset and then fits additional copies of the
classifier on the same dataset where the weights of incorrectly
classified instances are adjusted such that subsequent classifiers
focus more on difficult cases.
Examples
--------
>>> from sklearn.ensemble import HistGradientBoostingClassifier
>>> from sklearn.datasets import load_iris
>>> X, y = load_iris(return_X_y=True)
>>> clf = HistGradientBoostingClassifier().fit(X, y)
>>> clf.score(X, y)
1.0
"""
# TODO(1.3): Remove "binary_crossentropy", "categorical_crossentropy", "auto"
_parameter_constraints: dict = {
**BaseHistGradientBoosting._parameter_constraints,
"loss": [
StrOptions(
{
"log_loss",
"binary_crossentropy",
"categorical_crossentropy",
"auto",
},
deprecated={
"auto",
"binary_crossentropy",
"categorical_crossentropy",
},
),
BaseLoss,
],
"class_weight": [dict, StrOptions({"balanced"}), None],
}
def __init__(
self,
loss="log_loss",
*,
learning_rate=0.1,
max_iter=100,
max_leaf_nodes=31,
max_depth=None,
min_samples_leaf=20,
l2_regularization=0.0,
max_bins=255,
categorical_features=None,
monotonic_cst=None,
interaction_cst=None,
warm_start=False,
early_stopping="auto",
scoring="loss",
validation_fraction=0.1,
n_iter_no_change=10,
tol=1e-7,
verbose=0,
random_state=None,
class_weight=None,
):
super(HistGradientBoostingClassifier, self).__init__(
loss=loss,
learning_rate=learning_rate,
max_iter=max_iter,
max_leaf_nodes=max_leaf_nodes,
max_depth=max_depth,
min_samples_leaf=min_samples_leaf,
l2_regularization=l2_regularization,
max_bins=max_bins,
categorical_features=categorical_features,
monotonic_cst=monotonic_cst,
interaction_cst=interaction_cst,
warm_start=warm_start,
early_stopping=early_stopping,
scoring=scoring,
validation_fraction=validation_fraction,
n_iter_no_change=n_iter_no_change,
tol=tol,
verbose=verbose,
random_state=random_state,
)
self.class_weight = class_weight
def _finalize_sample_weight(self, sample_weight, y):
"""Adjust sample_weights with class_weights."""
if self.class_weight is None:
return sample_weight
expanded_class_weight = compute_sample_weight(self.class_weight, y)
if sample_weight is not None:
return sample_weight * expanded_class_weight
else:
return expanded_class_weight
def predict(self, X):
"""Predict classes for X.
Parameters
----------
X : array-like, shape (n_samples, n_features)
The input samples.
Returns
-------
y : ndarray, shape (n_samples,)
The predicted classes.
"""
# TODO: This could be done in parallel
encoded_classes = np.argmax(self.predict_proba(X), axis=1)
return self.classes_[encoded_classes]
def staged_predict(self, X):
"""Predict classes at each iteration.
This method allows monitoring (i.e. determine error on testing set)
after each stage.
.. versionadded:: 0.24
Parameters
----------
X : array-like of shape (n_samples, n_features)
The input samples.
Yields
------
y : generator of ndarray of shape (n_samples,)
The predicted classes of the input samples, for each iteration.
"""
for proba in self.staged_predict_proba(X):
encoded_classes = np.argmax(proba, axis=1)
yield self.classes_.take(encoded_classes, axis=0)
def predict_proba(self, X):
"""Predict class probabilities for X.
Parameters
----------
X : array-like, shape (n_samples, n_features)
The input samples.
Returns
-------
p : ndarray, shape (n_samples, n_classes)
The class probabilities of the input samples.
"""
raw_predictions = self._raw_predict(X)
return self._loss.predict_proba(raw_predictions)
def staged_predict_proba(self, X):
"""Predict class probabilities at each iteration.
This method allows monitoring (i.e. determine error on testing set)
after each stage.
Parameters
----------
X : array-like of shape (n_samples, n_features)
The input samples.
Yields
------
y : generator of ndarray of shape (n_samples,)
The predicted class probabilities of the input samples,
for each iteration.
"""
for raw_predictions in self._staged_raw_predict(X):
yield self._loss.predict_proba(raw_predictions)
def decision_function(self, X):
"""Compute the decision function of ``X``.
Parameters
----------
X : array-like, shape (n_samples, n_features)
The input samples.
Returns
-------
decision : ndarray, shape (n_samples,) or \
(n_samples, n_trees_per_iteration)
The raw predicted values (i.e. the sum of the trees leaves) for
each sample. n_trees_per_iteration is equal to the number of
classes in multiclass classification.
"""
decision = self._raw_predict(X)
if decision.shape[1] == 1:
decision = decision.ravel()
return decision
def staged_decision_function(self, X):
"""Compute decision function of ``X`` for each iteration.
This method allows monitoring (i.e. determine error on testing set)
after each stage.
Parameters
----------
X : array-like of shape (n_samples, n_features)
The input samples.
Yields
------
decision : generator of ndarray of shape (n_samples,) or \
(n_samples, n_trees_per_iteration)
The decision function of the input samples, which corresponds to
the raw values predicted from the trees of the ensemble . The
classes corresponds to that in the attribute :term:`classes_`.
"""
for staged_decision in self._staged_raw_predict(X):
if staged_decision.shape[1] == 1:
staged_decision = staged_decision.ravel()
yield staged_decision
def _encode_y(self, y):
# encode classes into 0 ... n_classes - 1 and sets attributes classes_
# and n_trees_per_iteration_
check_classification_targets(y)
label_encoder = LabelEncoder()
encoded_y = label_encoder.fit_transform(y)
self.classes_ = label_encoder.classes_
n_classes = self.classes_.shape[0]
# only 1 tree for binary classification. For multiclass classification,
# we build 1 tree per class.
self.n_trees_per_iteration_ = 1 if n_classes <= 2 else n_classes
encoded_y = encoded_y.astype(Y_DTYPE, copy=False)
return encoded_y
def _get_loss(self, sample_weight):
# TODO(1.3): Remove "auto", "binary_crossentropy", "categorical_crossentropy"
if self.loss in ("auto", "binary_crossentropy", "categorical_crossentropy"):
warnings.warn(
f"The loss '{self.loss}' was deprecated in v1.1 and will be removed in "
"version 1.3. Use 'log_loss' which is equivalent.",
FutureWarning,
)
if self.loss in ("log_loss", "auto"):
if self.n_trees_per_iteration_ == 1:
return HalfBinomialLoss(sample_weight=sample_weight)
else:
return HalfMultinomialLoss(
sample_weight=sample_weight, n_classes=self.n_trees_per_iteration_
)
if self.loss == "categorical_crossentropy":
if self.n_trees_per_iteration_ == 1:
raise ValueError(
f"loss='{self.loss}' is not suitable for a binary classification "
"problem. Please use loss='log_loss' instead."
)
else:
return HalfMultinomialLoss(
sample_weight=sample_weight, n_classes=self.n_trees_per_iteration_
)
if self.loss == "binary_crossentropy":
if self.n_trees_per_iteration_ > 1:
raise ValueError(
f"loss='{self.loss}' is not defined for multiclass "
f"classification with n_classes={self.n_trees_per_iteration_}, "
"use loss='log_loss' instead."
)
else:
return HalfBinomialLoss(sample_weight=sample_weight)
| {
"content_hash": "74b7f4e9f92a5a820ecaf5152e57411c",
"timestamp": "",
"source": "github",
"line_count": 1953,
"max_line_length": 88,
"avg_line_length": 40.48489503328213,
"alnum_prop": 0.5829106959920067,
"repo_name": "lesteve/scikit-learn",
"id": "470020dbd492bb827640ebe3ee2aef737afade5e",
"size": "79067",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "sklearn/ensemble/_hist_gradient_boosting/gradient_boosting.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "42335"
},
{
"name": "C++",
"bytes": "147316"
},
{
"name": "Cython",
"bytes": "668702"
},
{
"name": "Makefile",
"bytes": "1644"
},
{
"name": "Python",
"bytes": "10429018"
},
{
"name": "Shell",
"bytes": "42936"
}
],
"symlink_target": ""
} |
"""
Database level utilities for PyTimeline.
"""
from pymongo.database import Database as PymongoDatabase
from pytimeline.collection import Collection
class Database(PymongoDatabase):
"""
A Mongo database.
"""
def __getattr__(self, name):
"""
Get a collection of this database.
"""
return Collection(self, name)
def create_collection(self, name, options=None, **kwargs):
"""
Explicitly create a collection in this database.
This method masks original method and raises
:class:`NotImplementedError`.
"""
raise NotImplementedError
| {
"content_hash": "d4eca83193ffb8e8377649abaa63f872",
"timestamp": "",
"source": "github",
"line_count": 30,
"max_line_length": 62,
"avg_line_length": 21.266666666666666,
"alnum_prop": 0.6394984326018809,
"repo_name": "tadasv/pytimeline",
"id": "9af135e987de3bd3251b80acb95bcac9fe39ca64",
"size": "638",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "pytimeline/database.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "36296"
},
{
"name": "Shell",
"bytes": "5639"
}
],
"symlink_target": ""
} |
import sys
from airflow.hooks.mssql_hook import MsSqlHook
from airflow import configuration
from airflow import models
from airflow.utils import db
from hovertools import command_line
from .hooklib.basehooktest import BaseHookTest
# This catches sys.exit() calls, which are called by the Click library.
# If this is not done, all nosetests fail.
sys.exit = lambda *x: None
TMP_REPO_DIR = 'tmp'
class MsSqlHookTest(BaseHookTest):
def __init__(self, *args, **kwargs):
super(MsSqlHookTest, self).__init__('hooktests/hooks/specs/mssql.yaml',
*args,
**kwargs)
def setUp(self):
super(MsSqlHookTest, self).setUp()
command_line.cli(['--repo', TMP_REPO_DIR, 'refresh', 'mssql_hook_test'])
configuration.load_test_config()
db.merge_conn(
models.Connection(
conn_id='mssql_hook_test', conn_type='mssql',
host='localhost', port=1433, login='SA',
password=u'secret123_', schema='master'))
self.db_hook = MsSqlHook(mssql_conn_id='mssql_hook_test', schema='master')
def tearDown(self):
pass
def test_records(self):
statement = "select * from master.dbo.sysprocesses"
rows = self.db_hook.get_records(statement)
| {
"content_hash": "d610a2dc584c64bf48d276b119e629ad",
"timestamp": "",
"source": "github",
"line_count": 40,
"max_line_length": 82,
"avg_line_length": 34.2,
"alnum_prop": 0.6030701754385965,
"repo_name": "gtoonstra/airflow-hovercraft",
"id": "9199fb3282f426f98c0b59a9f38cb3d00a6eb2a5",
"size": "1935",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "hooktests/hooks/test_mssql_hook.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Gherkin",
"bytes": "13882"
},
{
"name": "Python",
"bytes": "43367"
},
{
"name": "Shell",
"bytes": "16335"
}
],
"symlink_target": ""
} |
from django.conf.urls import patterns, include, url
from django.contrib import admin
urlpatterns = patterns('',
# Examples:
# url(r'^$', 'trucoGemSite.views.home', name='home'),
# url(r'^blog/', include('blog.urls')),
url(r'^admin/', include(admin.site.urls)),
url(r'^', include('truco.urls'), name='truco')
)
| {
"content_hash": "4f93578d7401f4160ceb357d6379f483",
"timestamp": "",
"source": "github",
"line_count": 11,
"max_line_length": 57,
"avg_line_length": 30.181818181818183,
"alnum_prop": 0.6355421686746988,
"repo_name": "emmanuel-santos/GEM",
"id": "8ed953443b3dcb134cace959149910e0bc5bf522",
"size": "332",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "trucoGemSite/trucoGemSite/urls.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "3742"
},
{
"name": "HTML",
"bytes": "13598"
},
{
"name": "JavaScript",
"bytes": "1295"
},
{
"name": "Makefile",
"bytes": "487"
},
{
"name": "Python",
"bytes": "98063"
}
],
"symlink_target": ""
} |
import argparse
import _pickle as pickle
import io
import os
import brica1
import cherrypy
import msgpack
import numpy as np
from PIL import Image
from PIL import ImageOps
from cognitive import interpreter
from ml.cnn_feature_extractor import CnnFeatureExtractor
from config import BRICA_CONFIG_FILE
from config.model import CNN_FEATURE_EXTRACTOR, CAFFE_MODEL, MODEL_TYPE
import logging
import logging.config
from config.log import CHERRYPY_ACCESS_LOG, CHERRYPY_ERROR_LOG, LOGGING, APP_KEY, INBOUND_KEY, OUTBOUND_KEY
from cognitive.service import AgentService
from tool.result_logger import ResultLogger
import keras
logging.config.dictConfig(LOGGING)
inbound_logger = logging.getLogger(INBOUND_KEY)
app_logger = logging.getLogger(APP_KEY)
outbound_logger = logging.getLogger(OUTBOUND_KEY)
def unpack(payload, depth_image_count=1, depth_image_dim=32*32):
dat = msgpack.unpackb(payload)
image = []
for i in range(depth_image_count):
image.append(Image.open(io.BytesIO(bytearray(dat[b'image'][i]))))
depth = []
for i in range(depth_image_count):
d = (Image.open(io.BytesIO(bytearray(dat[b'depth'][i]))))
depth.append(np.array(ImageOps.grayscale(d)).reshape(depth_image_dim))
reward = dat[b'reward']
observation = {"image": image, "depth": depth}
rotation = dat[b'rotation']
movement = dat[b'movement']
return reward, observation, rotation, movement
def unpack_reset(payload):
dat = msgpack.unpackb(payload)
reward = dat[b'reward']
success = dat[b'success']
failure = dat[b'failure']
elapsed = dat[b'elapsed']
finished = dat[b'finished']
return reward, success, failure, elapsed, finished
use_gpu = int(os.getenv('GPU', '-1'))
depth_image_dim = 32 * 32
depth_image_count = 1
image_feature_dim = 256 * 6 * 6
image_feature_count = 1
feature_output_dim = (depth_image_dim * depth_image_count) + (image_feature_dim * image_feature_count)
class Root(object):
def __init__(self, **kwargs):
if os.path.exists(CNN_FEATURE_EXTRACTOR):
app_logger.info("loading... {}".format(CNN_FEATURE_EXTRACTOR))
self.feature_extractor = pickle.load(open(CNN_FEATURE_EXTRACTOR, 'rb'))
app_logger.info("done")
else:
self.feature_extractor = CnnFeatureExtractor(use_gpu, CAFFE_MODEL, MODEL_TYPE, image_feature_dim)
pickle.dump(self.feature_extractor, open(CNN_FEATURE_EXTRACTOR, 'wb'))
app_logger.info("pickle.dump finished")
self.agent_service = AgentService(BRICA_CONFIG_FILE, self.feature_extractor)
self.result_logger = ResultLogger()
@cherrypy.expose()
def flush(self, identifier):
self.agent_service.initialize(identifier)
@cherrypy.expose
def create(self, identifier):
body = cherrypy.request.body.read()
reward, observation, rotation, movement = unpack(body)
inbound_logger.info('reward: {}, depth: {}, rotation: {}, movement: {}'
.format(reward, observation['depth'], rotation, movement))
feature = self.feature_extractor.feature(observation)
self.result_logger.initialize()
result = self.agent_service.create(reward, feature, identifier)
outbound_logger.info('action: {}'.format(result))
return str(result)
@cherrypy.expose
def step(self, identifier):
body = cherrypy.request.body.read()
reward, observation, rotation, movement = unpack(body)
inbound_logger.info('reward: {}, depth: {}, rotation: {}, movement: {}'
.format(reward, observation['depth'], rotation, movement))
result = self.agent_service.step(reward, observation, identifier)
self.result_logger.step()
outbound_logger.info('result: {}'.format(result))
return str(result)
@cherrypy.expose
def reset(self, identifier):
body = cherrypy.request.body.read()
reward, success, failure, elapsed, finished = unpack_reset(body)
inbound_logger.info('reward: {}, success: {}, failure: {}, elapsed: {}'.format(
reward, success, failure, elapsed))
result = self.agent_service.reset(reward, identifier)
self.result_logger.report(success, failure, finished)
outbound_logger.info('result: {}'.format(result))
return str(result)
def main(args):
cherrypy.config.update({'server.socket_host': args.host, 'server.socket_port': args.port, 'log.screen': False,
'log.access_file': CHERRYPY_ACCESS_LOG, 'log.error_file': CHERRYPY_ERROR_LOG})
cherrypy.quickstart(Root())
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='LIS Backend')
parser.add_argument('--host', default='localhost', type=str, help='Server hostname')
parser.add_argument('--port', default=8765, type=int, help='Server port number')
args = parser.parse_args()
main(args)
| {
"content_hash": "07213e4e04b06d801226419e899e1cc0",
"timestamp": "",
"source": "github",
"line_count": 143,
"max_line_length": 114,
"avg_line_length": 34.65734265734266,
"alnum_prop": 0.6684826472962067,
"repo_name": "tosanai/wbai_hackathon_2017",
"id": "8a35a908ebefd75634308e54874e5b8f262e5c26",
"size": "4980",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "agent/server.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C#",
"bytes": "323143"
},
{
"name": "JavaScript",
"bytes": "629"
},
{
"name": "Python",
"bytes": "70142"
},
{
"name": "ShaderLab",
"bytes": "1090"
},
{
"name": "Shell",
"bytes": "323"
}
],
"symlink_target": ""
} |
""" PyTorch {{cookiecutter.modelname}} model. """
{% if cookiecutter.is_encoder_decoder_model == "False" %}
import math
import os
import torch
import torch.utils.checkpoint
from torch import nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from typing import Optional, Tuple, Union
from ...activations import ACT2FN
from ...utils import (
add_code_sample_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
replace_return_docstrings,
)
from ...modeling_outputs import (
BaseModelOutputWithPastAndCrossAttentions,
CausalLMOutputWithCrossAttentions,
MaskedLMOutput,
MultipleChoiceModelOutput,
QuestionAnsweringModelOutput,
SequenceClassifierOutput,
TokenClassifierOutput,
)
from ...modeling_utils import PreTrainedModel, SequenceSummary
from ...pytorch_utils import (
apply_chunking_to_forward,
find_pruneable_heads_and_indices,
prune_linear_layer,
)
from ...utils import logging
from .configuration_{{cookiecutter.lowercase_modelname}} import {{cookiecutter.camelcase_modelname}}Config
logger = logging.get_logger(__name__)
_CHECKPOINT_FOR_DOC = "{{cookiecutter.checkpoint_identifier}}"
_CONFIG_FOR_DOC = "{{cookiecutter.camelcase_modelname}}Config"
_TOKENIZER_FOR_DOC = "{{cookiecutter.camelcase_modelname}}Tokenizer"
{{cookiecutter.uppercase_modelname}}_PRETRAINED_MODEL_ARCHIVE_LIST = [
"{{cookiecutter.checkpoint_identifier}}",
# See all {{cookiecutter.modelname}} models at https://huggingface.co/models?filter={{cookiecutter.lowercase_modelname}}
]
def load_tf_weights_in_{{cookiecutter.lowercase_modelname}}(model, config, tf_checkpoint_path):
"""Load tf checkpoints in a pytorch model."""
try:
import re
import numpy as np
import tensorflow as tf
except ImportError:
logger.error(
"Loading a TensorFlow model in PyTorch, requires TensorFlow to be installed. Please see "
"https://www.tensorflow.org/install/ for installation instructions."
)
raise
tf_path = os.path.abspath(tf_checkpoint_path)
logger.info(f"Converting TensorFlow checkpoint from {tf_path}")
# Load weights from TF model
init_vars = tf.train.list_variables(tf_path)
names = []
arrays = []
for name, shape in init_vars:
logger.info(f"Loading TF weight {name} with shape {shape}")
array = tf.train.load_variable(tf_path, name)
names.append(name)
arrays.append(array)
for name, array in zip(names, arrays):
name = name.split("/")
# adam_v and adam_m are variables used in AdamWeightDecayOptimizer to calculated m and v
# which are not required for using pretrained model
if any(
n in ["adam_v", "adam_m", "AdamWeightDecayOptimizer", "AdamWeightDecayOptimizer_1", "global_step"]
for n in name
):
logger.info(f"Skipping {'/'.join(name)}")
continue
pointer = model
for m_name in name:
if re.fullmatch(r"[A-Za-z]+_\d+", m_name):
scope_names = re.split(r"_(\d+)", m_name)
else:
scope_names = [m_name]
if scope_names[0] == "kernel" or scope_names[0] == "gamma":
pointer = getattr(pointer, "weight")
elif scope_names[0] == "output_bias" or scope_names[0] == "beta":
pointer = getattr(pointer, "bias")
elif scope_names[0] == "output_weights":
pointer = getattr(pointer, "weight")
elif scope_names[0] == "squad":
pointer = getattr(pointer, "classifier")
else:
try:
pointer = getattr(pointer, scope_names[0])
except AttributeError:
logger.info(f"Skipping {'/'.join(name)}")
continue
if len(scope_names) >= 2:
num = int(scope_names[1])
pointer = pointer[num]
if m_name[-11:] == "_embeddings":
pointer = getattr(pointer, "weight")
elif m_name == "kernel":
array = np.transpose(array)
try:
assert (
pointer.shape == array.shape
), f"Pointer shape {pointer.shape} and array shape {array.shape} mismatched"
except AssertionError as e:
e.args += (pointer.shape, array.shape)
raise
logger.info(f"Initialize PyTorch weight {name}")
pointer.data = torch.from_numpy(array)
return model
# Copied from transformers.models.bert.modeling_bert.BertEmbeddings with Bert->{{cookiecutter.camelcase_modelname}}
class {{cookiecutter.camelcase_modelname}}Embeddings(nn.Module):
"""Construct the embeddings from word, position and token_type embeddings."""
def __init__(self, config):
super().__init__()
self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=config.pad_token_id)
self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.hidden_size)
self.token_type_embeddings = nn.Embedding(config.type_vocab_size, config.hidden_size)
# self.LayerNorm is not snake-cased to stick with TensorFlow model variable name and be able to load
# any TensorFlow checkpoint file
self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
# position_ids (1, len position emb) is contiguous in memory and exported when serialized
self.register_buffer("position_ids", torch.arange(config.max_position_embeddings).expand((1, -1)))
self.position_embedding_type = getattr(config, "position_embedding_type", "absolute")
self.register_buffer(
"token_type_ids",
torch.zeros(self.position_ids.size(), dtype=torch.long, device=self.position_ids.device),
persistent=False,
)
def forward(
self, input_ids=None, token_type_ids=None, position_ids=None, inputs_embeds=None, past_key_values_length=0
):
if input_ids is not None:
input_shape = input_ids.size()
else:
input_shape = inputs_embeds.size()[:-1]
seq_length = input_shape[1]
if position_ids is None:
position_ids = self.position_ids[:, past_key_values_length : seq_length + past_key_values_length]
# Setting the token_type_ids to the registered buffer in constructor where it is all zeros, which usually occurs
# when its auto-generated, registered buffer helps users when tracing the model without passing token_type_ids, solves
# issue #5664
if token_type_ids is None:
if hasattr(self, "token_type_ids"):
buffered_token_type_ids = self.token_type_ids[:, :seq_length]
buffered_token_type_ids_expanded = buffered_token_type_ids.expand(input_shape[0], seq_length)
token_type_ids = buffered_token_type_ids_expanded
else:
token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=self.position_ids.device)
if inputs_embeds is None:
inputs_embeds = self.word_embeddings(input_ids)
token_type_embeddings = self.token_type_embeddings(token_type_ids)
embeddings = inputs_embeds + token_type_embeddings
if self.position_embedding_type == "absolute":
position_embeddings = self.position_embeddings(position_ids)
embeddings += position_embeddings
embeddings = self.LayerNorm(embeddings)
embeddings = self.dropout(embeddings)
return embeddings
# Copied from transformers.models.bert.modeling_bert.BertSelfAttention with Bert->{{cookiecutter.camelcase_modelname}}
class {{cookiecutter.camelcase_modelname}}SelfAttention(nn.Module):
def __init__(self, config, position_embedding_type=None):
super().__init__()
if config.hidden_size % config.num_attention_heads != 0 and not hasattr(config, "embedding_size"):
raise ValueError(
f"The hidden size ({config.hidden_size}) is not a multiple of the number of attention "
f"heads ({config.num_attention_heads})"
)
self.num_attention_heads = config.num_attention_heads
self.attention_head_size = int(config.hidden_size / config.num_attention_heads)
self.all_head_size = self.num_attention_heads * self.attention_head_size
self.query = nn.Linear(config.hidden_size, self.all_head_size)
self.key = nn.Linear(config.hidden_size, self.all_head_size)
self.value = nn.Linear(config.hidden_size, self.all_head_size)
self.dropout = nn.Dropout(config.attention_probs_dropout_prob)
self.position_embedding_type = position_embedding_type or getattr(config, "position_embedding_type", "absolute")
if self.position_embedding_type == "relative_key" or self.position_embedding_type == "relative_key_query":
self.max_position_embeddings = config.max_position_embeddings
self.distance_embedding = nn.Embedding(2 * config.max_position_embeddings - 1, self.attention_head_size)
self.is_decoder = config.is_decoder
def transpose_for_scores(self, x):
new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size)
x = x.view(*new_x_shape)
return x.permute(0, 2, 1, 3)
def forward(
self,
hidden_states,
attention_mask=None,
head_mask=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
past_key_value=None,
output_attentions=False,
):
mixed_query_layer = self.query(hidden_states)
# If this is instantiated as a cross-attention module, the keys
# and values come from an encoder; the attention mask needs to be
# such that the encoder's padding tokens are not attended to.
is_cross_attention = encoder_hidden_states is not None
if is_cross_attention and past_key_value is not None:
# reuse k,v, cross_attentions
key_layer = past_key_value[0]
value_layer = past_key_value[1]
attention_mask = encoder_attention_mask
elif is_cross_attention:
key_layer = self.transpose_for_scores(self.key(encoder_hidden_states))
value_layer = self.transpose_for_scores(self.value(encoder_hidden_states))
attention_mask = encoder_attention_mask
elif past_key_value is not None:
key_layer = self.transpose_for_scores(self.key(hidden_states))
value_layer = self.transpose_for_scores(self.value(hidden_states))
key_layer = torch.cat([past_key_value[0], key_layer], dim=2)
value_layer = torch.cat([past_key_value[1], value_layer], dim=2)
else:
key_layer = self.transpose_for_scores(self.key(hidden_states))
value_layer = self.transpose_for_scores(self.value(hidden_states))
query_layer = self.transpose_for_scores(mixed_query_layer)
if self.is_decoder:
# if cross_attention save Tuple(torch.Tensor, torch.Tensor) of all cross attention key/value_states.
# Further calls to cross_attention layer can then reuse all cross-attention
# key/value_states (first "if" case)
# if uni-directional self-attention (decoder) save Tuple(torch.Tensor, torch.Tensor) of
# all previous decoder key/value_states. Further calls to uni-directional self-attention
# can concat previous decoder key/value_states to current projected key/value_states (third "elif" case)
# if encoder bi-directional self-attention `past_key_value` is always `None`
past_key_value = (key_layer, value_layer)
# Take the dot product between "query" and "key" to get the raw attention scores.
attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2))
if self.position_embedding_type == "relative_key" or self.position_embedding_type == "relative_key_query":
seq_length = hidden_states.size()[1]
position_ids_l = torch.arange(seq_length, dtype=torch.long, device=hidden_states.device).view(-1, 1)
position_ids_r = torch.arange(seq_length, dtype=torch.long, device=hidden_states.device).view(1, -1)
distance = position_ids_l - position_ids_r
positional_embedding = self.distance_embedding(distance + self.max_position_embeddings - 1)
positional_embedding = positional_embedding.to(dtype=query_layer.dtype) # fp16 compatibility
if self.position_embedding_type == "relative_key":
relative_position_scores = torch.einsum("bhld,lrd->bhlr", query_layer, positional_embedding)
attention_scores = attention_scores + relative_position_scores
elif self.position_embedding_type == "relative_key_query":
relative_position_scores_query = torch.einsum("bhld,lrd->bhlr", query_layer, positional_embedding)
relative_position_scores_key = torch.einsum("bhrd,lrd->bhlr", key_layer, positional_embedding)
attention_scores = attention_scores + relative_position_scores_query + relative_position_scores_key
attention_scores = attention_scores / math.sqrt(self.attention_head_size)
if attention_mask is not None:
# Apply the attention mask is (precomputed for all layers in {{cookiecutter.camelcase_modelname}}Model forward() function)
attention_scores = attention_scores + attention_mask
# Normalize the attention scores to probabilities.
attention_probs = nn.functional.softmax(attention_scores, dim=-1)
# This is actually dropping out entire tokens to attend to, which might
# seem a bit unusual, but is taken from the original Transformer paper.
attention_probs = self.dropout(attention_probs)
# Mask heads if we want to
if head_mask is not None:
attention_probs = attention_probs * head_mask
context_layer = torch.matmul(attention_probs, value_layer)
context_layer = context_layer.permute(0, 2, 1, 3).contiguous()
new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,)
context_layer = context_layer.view(*new_context_layer_shape)
outputs = (context_layer, attention_probs) if output_attentions else (context_layer,)
if self.is_decoder:
outputs = outputs + (past_key_value,)
return outputs
# Copied from transformers.models.bert.modeling_bert.BertSelfOutput with Bert->{{cookiecutter.camelcase_modelname}}
class {{cookiecutter.camelcase_modelname}}SelfOutput(nn.Module):
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, hidden_states, input_tensor):
hidden_states = self.dense(hidden_states)
hidden_states = self.dropout(hidden_states)
hidden_states = self.LayerNorm(hidden_states + input_tensor)
return hidden_states
# Copied from transformers.models.bert.modeling_bert.BertAttention with Bert->{{cookiecutter.camelcase_modelname}}
class {{cookiecutter.camelcase_modelname}}Attention(nn.Module):
def __init__(self, config, position_embedding_type=None):
super().__init__()
self.self = {{cookiecutter.camelcase_modelname}}SelfAttention(config, position_embedding_type=position_embedding_type)
self.output = {{cookiecutter.camelcase_modelname}}SelfOutput(config)
self.pruned_heads = set()
def prune_heads(self, heads):
if len(heads) == 0:
return
heads, index = find_pruneable_heads_and_indices(
heads, self.self.num_attention_heads, self.self.attention_head_size, self.pruned_heads
)
# Prune linear layers
self.self.query = prune_linear_layer(self.self.query, index)
self.self.key = prune_linear_layer(self.self.key, index)
self.self.value = prune_linear_layer(self.self.value, index)
self.output.dense = prune_linear_layer(self.output.dense, index, dim=1)
# Update hyper params and store pruned heads
self.self.num_attention_heads = self.self.num_attention_heads - len(heads)
self.self.all_head_size = self.self.attention_head_size * self.self.num_attention_heads
self.pruned_heads = self.pruned_heads.union(heads)
def forward(
self,
hidden_states,
attention_mask=None,
head_mask=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
past_key_value=None,
output_attentions=False,
):
self_outputs = self.self(
hidden_states,
attention_mask,
head_mask,
encoder_hidden_states,
encoder_attention_mask,
past_key_value,
output_attentions,
)
attention_output = self.output(self_outputs[0], hidden_states)
outputs = (attention_output,) + self_outputs[1:] # add attentions if we output them
return outputs
# Copied from transformers.models.bert.modeling_bert.BertIntermediate with Bert->{{cookiecutter.camelcase_modelname}}
class {{cookiecutter.camelcase_modelname}}Intermediate(nn.Module):
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.hidden_size, config.intermediate_size)
if isinstance(config.hidden_act, str):
self.intermediate_act_fn = ACT2FN[config.hidden_act]
else:
self.intermediate_act_fn = config.hidden_act
def forward(self, hidden_states):
hidden_states = self.dense(hidden_states)
hidden_states = self.intermediate_act_fn(hidden_states)
return hidden_states
# Copied from transformers.models.bert.modeling_bert.BertOutput with Bert->{{cookiecutter.camelcase_modelname}}
class {{cookiecutter.camelcase_modelname}}Output(nn.Module):
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.intermediate_size, config.hidden_size)
self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, hidden_states, input_tensor):
hidden_states = self.dense(hidden_states)
hidden_states = self.dropout(hidden_states)
hidden_states = self.LayerNorm(hidden_states + input_tensor)
return hidden_states
# Copied from transformers.models.bert.modeling_bert.BertLayer with Bert->{{cookiecutter.camelcase_modelname}}
class {{cookiecutter.camelcase_modelname}}Layer(nn.Module):
def __init__(self, config):
super().__init__()
self.chunk_size_feed_forward = config.chunk_size_feed_forward
self.seq_len_dim = 1
self.attention = {{cookiecutter.camelcase_modelname}}Attention(config)
self.is_decoder = config.is_decoder
self.add_cross_attention = config.add_cross_attention
if self.add_cross_attention:
assert self.is_decoder, f"{self} should be used as a decoder model if cross attention is added"
self.crossattention = {{cookiecutter.camelcase_modelname}}Attention(config, position_embedding_type="absolute")
self.intermediate = {{cookiecutter.camelcase_modelname}}Intermediate(config)
self.output = {{cookiecutter.camelcase_modelname}}Output(config)
def forward(
self,
hidden_states,
attention_mask=None,
head_mask=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
past_key_value=None,
output_attentions=False,
):
# decoder uni-directional self-attention cached key/values tuple is at positions 1,2
self_attn_past_key_value = past_key_value[:2] if past_key_value is not None else None
self_attention_outputs = self.attention(
hidden_states,
attention_mask,
head_mask,
output_attentions=output_attentions,
past_key_value=self_attn_past_key_value,
)
attention_output = self_attention_outputs[0]
# if decoder, the last output is tuple of self-attn cache
if self.is_decoder:
outputs = self_attention_outputs[1:-1]
present_key_value = self_attention_outputs[-1]
else:
outputs = self_attention_outputs[1:] # add self attentions if we output attention weights
cross_attn_present_key_value = None
if self.is_decoder and encoder_hidden_states is not None:
assert hasattr(
self, "crossattention"
), f"If `encoder_hidden_states` are passed, {self} has to be instantiated with cross-attention layers by setting `config.add_cross_attention=True`"
# cross_attn cached key/values tuple is at positions 3,4 of past_key_value tuple
cross_attn_past_key_value = past_key_value[-2:] if past_key_value is not None else None
cross_attention_outputs = self.crossattention(
attention_output,
attention_mask,
head_mask,
encoder_hidden_states,
encoder_attention_mask,
cross_attn_past_key_value,
output_attentions,
)
attention_output = cross_attention_outputs[0]
outputs = outputs + cross_attention_outputs[1:-1] # add cross attentions if we output attention weights
# add cross-attn cache to positions 3,4 of present_key_value tuple
cross_attn_present_key_value = cross_attention_outputs[-1]
present_key_value = present_key_value + cross_attn_present_key_value
layer_output = apply_chunking_to_forward(
self.feed_forward_chunk, self.chunk_size_feed_forward, self.seq_len_dim, attention_output
)
outputs = (layer_output,) + outputs
# if decoder, return the attn key/values as the last output
if self.is_decoder:
outputs = outputs + (present_key_value,)
return outputs
def feed_forward_chunk(self, attention_output):
intermediate_output = self.intermediate(attention_output)
layer_output = self.output(intermediate_output, attention_output)
return layer_output
# Copied from transformers.models.bert.modeling_bert.BertEncoder with Bert->{{cookiecutter.camelcase_modelname}}
class {{cookiecutter.camelcase_modelname}}Encoder(nn.Module):
def __init__(self, config):
super().__init__()
self.config = config
self.layer = nn.ModuleList([{{cookiecutter.camelcase_modelname}}Layer(config) for _ in range(config.num_hidden_layers)])
self.gradient_checkpointing = False
def forward(
self,
hidden_states,
attention_mask=None,
head_mask=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
past_key_values=None,
use_cache=None,
output_attentions=False,
output_hidden_states=False,
return_dict=True,
):
all_hidden_states = () if output_hidden_states else None
all_self_attentions = () if output_attentions else None
all_cross_attentions = () if output_attentions and self.config.add_cross_attention else None
next_decoder_cache = () if use_cache else None
for i, layer_module in enumerate(self.layer):
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
layer_head_mask = head_mask[i] if head_mask is not None else None
past_key_value = past_key_values[i] if past_key_values is not None else None
if self.gradient_checkpointing and self.training:
if use_cache:
logger.warning(
"`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..."
)
use_cache = False
def create_custom_forward(module):
def custom_forward(*inputs):
return module(*inputs, past_key_value, output_attentions)
return custom_forward
layer_outputs = torch.utils.checkpoint.checkpoint(
create_custom_forward(layer_module),
hidden_states,
attention_mask,
layer_head_mask,
encoder_hidden_states,
encoder_attention_mask,
)
else:
layer_outputs = layer_module(
hidden_states,
attention_mask,
layer_head_mask,
encoder_hidden_states,
encoder_attention_mask,
past_key_value,
output_attentions,
)
hidden_states = layer_outputs[0]
if use_cache:
next_decoder_cache += (layer_outputs[-1],)
if output_attentions:
all_self_attentions = all_self_attentions + (layer_outputs[1],)
if self.config.add_cross_attention:
all_cross_attentions = all_cross_attentions + (layer_outputs[2],)
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
if not return_dict:
return tuple(
v
for v in [
hidden_states,
next_decoder_cache,
all_hidden_states,
all_self_attentions,
all_cross_attentions,
]
if v is not None
)
return BaseModelOutputWithPastAndCrossAttentions(
last_hidden_state=hidden_states,
past_key_values=next_decoder_cache,
hidden_states=all_hidden_states,
attentions=all_self_attentions,
cross_attentions=all_cross_attentions,
)
# Copied from transformers.models.bert.modeling_bert.BertPredictionHeadTransform with Bert->{{cookiecutter.camelcase_modelname}}
class {{cookiecutter.camelcase_modelname}}PredictionHeadTransform(nn.Module):
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
if isinstance(config.hidden_act, str):
self.transform_act_fn = ACT2FN[config.hidden_act]
else:
self.transform_act_fn = config.hidden_act
self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
def forward(self, hidden_states):
hidden_states = self.dense(hidden_states)
hidden_states = self.transform_act_fn(hidden_states)
hidden_states = self.LayerNorm(hidden_states)
return hidden_states
# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->{{cookiecutter.camelcase_modelname}}
class {{cookiecutter.camelcase_modelname}}LMPredictionHead(nn.Module):
def __init__(self, config):
super().__init__()
self.transform = {{cookiecutter.camelcase_modelname}}PredictionHeadTransform(config)
# The output weights are the same as the input embeddings, but there is
# an output-only bias for each token.
self.decoder = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
self.bias = nn.Parameter(torch.zeros(config.vocab_size))
# Need a link between the two variables so that the bias is correctly resized with `resize_token_embeddings`
self.decoder.bias = self.bias
def forward(self, hidden_states):
hidden_states = self.transform(hidden_states)
hidden_states = self.decoder(hidden_states)
return hidden_states
# Copied from transformers.models.bert.modeling_bert.BertOnlyMLMHead with Bert->{{cookiecutter.camelcase_modelname}}
class {{cookiecutter.camelcase_modelname}}OnlyMLMHead(nn.Module):
def __init__(self, config):
super().__init__()
self.predictions = {{cookiecutter.camelcase_modelname}}LMPredictionHead(config)
def forward(self, sequence_output):
prediction_scores = self.predictions(sequence_output)
return prediction_scores
class {{cookiecutter.camelcase_modelname}}PreTrainedModel(PreTrainedModel):
"""
An abstract class to handle weights initialization and
a simple interface for downloading and loading pretrained models.
"""
config_class = {{cookiecutter.camelcase_modelname}}Config
load_tf_weights = load_tf_weights_in_{{cookiecutter.lowercase_modelname}}
base_model_prefix = "{{cookiecutter.lowercase_modelname}}"
supports_gradient_checkpointing = True
_keys_to_ignore_on_load_missing = [r"position_ids"]
def _init_weights(self, module):
""" Initialize the weights """
if isinstance(module, nn.Linear):
# Slightly different from the TF version which uses truncated_normal for initialization
# cf https://github.com/pytorch/pytorch/pull/5617
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(module, nn.Embedding):
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
if module.padding_idx is not None:
module.weight.data[module.padding_idx].zero_()
elif isinstance(module, nn.LayerNorm):
module.bias.data.zero_()
module.weight.data.fill_(1.0)
def _set_gradient_checkpointing(self, module, value=False):
if isinstance(module, {{cookiecutter.camelcase_modelname}}Encoder):
module.gradient_checkpointing = value
{{cookiecutter.uppercase_modelname}}_START_DOCSTRING = r"""
This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class.
Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general
usage and behavior.
Parameters:
config ([`~{{cookiecutter.camelcase_modelname}}Config`]): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the configuration.
Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
"""
{{cookiecutter.uppercase_modelname}}_INPUTS_DOCSTRING = r"""
Args:
input_ids (`torch.LongTensor` of shape `({0})`):
Indices of input sequence tokens in the vocabulary.
Indices can be obtained using [`{{cookiecutter.camelcase_modelname}}Tokenizer`].
See [`PreTrainedTokenizer.encode`] and
[`PreTrainedTokenizer.__call__`] for details.
[What are input IDs?](../glossary#input-ids)
attention_mask (`torch.FloatTensor` of shape `({0})`, *optional*):
Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
[What are attention masks?](../glossary#attention-mask)
token_type_ids (`torch.LongTensor` of shape `({0})`, *optional*):
Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0, 1]`:
- 0 corresponds to a *sentence A* token,
- 1 corresponds to a *sentence B* token.
[What are token type IDs?](../glossary#token-type-ids)
position_ids (`torch.LongTensor` of shape `({0})`, *optional*):
Indices of positions of each input sequence tokens in the position embeddings.
Selected in the range `[0, config.max_position_embeddings - 1]`.
[What are position IDs?](../glossary#position-ids)
head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*):
Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`:
- 1 indicates the head is **not masked**,
- 0 indicates the head is **masked**.
inputs_embeds (`torch.FloatTensor` of shape `({0}, hidden_size)`, *optional*):
Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation.
This is useful if you want more control over how to convert *input_ids* indices into associated vectors
than the model's internal embedding lookup matrix.
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
tensors for more detail.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
"""
@add_start_docstrings(
"The bare {{cookiecutter.modelname}} Model transformer outputting raw hidden-states without any specific head on top.",
{{cookiecutter.uppercase_modelname}}_START_DOCSTRING,
)
class {{cookiecutter.camelcase_modelname}}Model({{cookiecutter.camelcase_modelname}}PreTrainedModel):
"""
The model can behave as an encoder (with only self-attention) as well
as a decoder, in which case a layer of cross-attention is added between
the self-attention layers, following the architecture described in [Attention is
all you need](https://arxiv.org/abs/1706.03762) by Ashish Vaswani,
Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N. Gomez, Lukasz Kaiser and Illia Polosukhin.
To behave as an decoder the model needs to be initialized with the
`is_decoder` argument of the configuration set to `True`.
To be used in a Seq2Seq model, the model needs to initialized with both `is_decoder`
argument and `add_cross_attention` set to `True`; an
`encoder_hidden_states` is then expected as an input to the forward pass.
"""
def __init__(self, config):
super().__init__(config)
self.config = config
self.embeddings = {{cookiecutter.camelcase_modelname}}Embeddings(config)
self.encoder = {{cookiecutter.camelcase_modelname}}Encoder(config)
# Initialize weights and apply final processing
self.post_init()
def get_input_embeddings(self):
return self.embeddings.word_embeddings
def set_input_embeddings(self, value):
self.embeddings.word_embeddings = value
def _prune_heads(self, heads_to_prune):
"""Prunes heads of the model.
heads_to_prune: dict of {layer_num: list of heads to prune in this layer}
See base class PreTrainedModel
"""
for layer, heads in heads_to_prune.items():
self.encoder.layer[layer].attention.prune_heads(heads)
@add_start_docstrings_to_model_forward({{cookiecutter.uppercase_modelname}}_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
@add_code_sample_docstrings(
processor_class=_TOKENIZER_FOR_DOC,
checkpoint=_CHECKPOINT_FOR_DOC,
output_type=BaseModelOutputWithPastAndCrossAttentions,
config_class=_CONFIG_FOR_DOC,
)
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
past_key_values=None,
use_cache=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
r"""
encoder_hidden_states (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention
if the model is configured as a decoder.
encoder_attention_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, *optional*):
Mask to avoid performing attention on the padding token indices of the encoder input. This mask
is used in the cross-attention if the model is configured as a decoder.
Mask values selected in `[0, 1]`:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
past_key_values (`tuple(tuple(torch.FloatTensor))` of length `config.n_layers` with each tuple having 4 tensors of shape `(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`):
Contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding.
If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids`
(those that don't have their past key value states given to this model) of shape `(batch_size, 1)`
instead of all `decoder_input_ids` of shape `(batch_size, sequence_length)`.
use_cache (`bool`, *optional*):
If set to `True`, `past_key_values` key value states are returned and can be used to speed up
decoding (see `past_key_values`).
"""
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
if self.config.is_decoder:
use_cache = use_cache if use_cache is not None else self.config.use_cache
else:
use_cache = False
if input_ids is not None and inputs_embeds is not None:
raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
elif input_ids is not None:
input_shape = input_ids.size()
elif inputs_embeds is not None:
input_shape = inputs_embeds.size()[:-1]
else:
raise ValueError("You have to specify either input_ids or inputs_embeds")
batch_size, seq_length = input_shape
device = input_ids.device if input_ids is not None else inputs_embeds.device
# past_key_values_length
past_key_values_length = past_key_values[0][0].shape[2] if past_key_values is not None else 0
if attention_mask is None:
attention_mask = torch.ones(((batch_size, seq_length + past_key_values_length)), device=device)
if token_type_ids is None:
if hasattr(self.embeddings, "token_type_ids"):
buffered_token_type_ids = self.embeddings.token_type_ids[:, :seq_length]
buffered_token_type_ids_expanded = buffered_token_type_ids.expand(batch_size, seq_length)
token_type_ids = buffered_token_type_ids_expanded
else:
token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device)
# We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
# ourselves in which case we just need to make it broadcastable to all heads.
extended_attention_mask: torch.Tensor = self.get_extended_attention_mask(attention_mask, input_shape)
# If a 2D or 3D attention mask is provided for the cross-attention
# we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]
if self.config.is_decoder and encoder_hidden_states is not None:
encoder_batch_size, encoder_sequence_length, _ = encoder_hidden_states.size()
encoder_hidden_shape = (encoder_batch_size, encoder_sequence_length)
if encoder_attention_mask is None:
encoder_attention_mask = torch.ones(encoder_hidden_shape, device=device)
encoder_extended_attention_mask = self.invert_attention_mask(encoder_attention_mask)
else:
encoder_extended_attention_mask = None
# Prepare head mask if needed
# 1.0 in head_mask indicate we keep the head
# attention_probs has shape bsz x n_heads x N x N
# input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
# and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers)
embedding_output = self.embeddings(
input_ids=input_ids,
position_ids=position_ids,
token_type_ids=token_type_ids,
inputs_embeds=inputs_embeds,
past_key_values_length=past_key_values_length,
)
encoder_outputs = self.encoder(
embedding_output,
attention_mask=extended_attention_mask,
head_mask=head_mask,
encoder_hidden_states=encoder_hidden_states,
encoder_attention_mask=encoder_extended_attention_mask,
past_key_values=past_key_values,
use_cache=use_cache,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
sequence_output = encoder_outputs[0]
if not return_dict:
return (sequence_output,) + encoder_outputs[1:]
return BaseModelOutputWithPastAndCrossAttentions(
last_hidden_state=sequence_output,
past_key_values=encoder_outputs.past_key_values,
hidden_states=encoder_outputs.hidden_states,
attentions=encoder_outputs.attentions,
cross_attentions=encoder_outputs.cross_attentions,
)
@add_start_docstrings("""{{cookiecutter.modelname}} Model with a `language modeling` head on top. """, {{cookiecutter.uppercase_modelname}}_START_DOCSTRING)
class {{cookiecutter.camelcase_modelname}}ForMaskedLM({{cookiecutter.camelcase_modelname}}PreTrainedModel):
def __init__(self, config):
super().__init__(config)
if config.is_decoder:
logger.warning(
"If you want to use `{{cookiecutter.camelcase_modelname}}ForMaskedLM` make sure `config.is_decoder=False` for "
"bi-directional self-attention."
)
self.{{cookiecutter.lowercase_modelname}} = {{cookiecutter.camelcase_modelname}}Model(config)
self.cls = {{cookiecutter.camelcase_modelname}}OnlyMLMHead(config)
# Initialize weights and apply final processing
self.post_init()
def get_output_embeddings(self):
return self.cls.predictions.decoder
def set_output_embeddings(self, new_embeddings):
self.cls.predictions.decoder = new_embeddings
@add_start_docstrings_to_model_forward({{cookiecutter.uppercase_modelname}}_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
@add_code_sample_docstrings(
processor_class=_TOKENIZER_FOR_DOC,
checkpoint=_CHECKPOINT_FOR_DOC,
output_type=MaskedLMOutput,
config_class=_CONFIG_FOR_DOC,
)
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
labels=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
r"""
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Labels for computing the masked language modeling loss.
Indices should be in `[-100, 0, ..., config.vocab_size]` (see `input_ids` docstring)
Tokens with indices set to `-100` are ignored (masked), the loss is only computed for the tokens with labels
in `[0, ..., config.vocab_size]`.
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
outputs = self.{{cookiecutter.lowercase_modelname}}(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
encoder_hidden_states=encoder_hidden_states,
encoder_attention_mask=encoder_attention_mask,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
sequence_output = outputs[0]
prediction_scores = self.cls(sequence_output)
masked_lm_loss = None
if labels is not None:
loss_fct = CrossEntropyLoss() # -100 index = padding token
masked_lm_loss = loss_fct(prediction_scores.view(-1, self.config.vocab_size), labels.view(-1))
if not return_dict:
output = (prediction_scores,) + outputs[1:]
return ((masked_lm_loss,) + output) if masked_lm_loss is not None else output
return MaskedLMOutput(
loss=masked_lm_loss,
logits=prediction_scores,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
def prepare_inputs_for_generation(self, input_ids, attention_mask=None, **model_kwargs):
input_shape = input_ids.shape
effective_batch_size = input_shape[0]
# add a dummy token
assert self.config.pad_token_id is not None, "The PAD token should be defined for generation"
attention_mask = torch.cat([attention_mask, attention_mask.new_zeros((attention_mask.shape[0], 1))], dim=-1)
dummy_token = torch.full(
(effective_batch_size, 1), self.config.pad_token_id, dtype=torch.long, device=input_ids.device
)
input_ids = torch.cat([input_ids, dummy_token], dim=1)
return {"input_ids": input_ids, "attention_mask": attention_mask}
@add_start_docstrings(
"""{{cookiecutter.modelname}} Model with a `language modeling` head on top for CLM fine-tuning. """, {{cookiecutter.uppercase_modelname}}_START_DOCSTRING
)
class {{cookiecutter.camelcase_modelname}}ForCausalLM({{cookiecutter.camelcase_modelname}}PreTrainedModel):
_keys_to_ignore_on_load_missing = [r"position_ids", r"predictions.decoder.bias"]
def __init__(self, config):
super().__init__(config)
if not config.is_decoder:
logger.warning("If you want to use `{{cookiecutter.camelcase_modelname}}ForCausalLM` as a standalone, add `is_decoder=True.`")
self.{{cookiecutter.lowercase_modelname}} = {{cookiecutter.camelcase_modelname}}Model(config)
self.cls = {{cookiecutter.camelcase_modelname}}OnlyMLMHead(config)
# Initialize weights and apply final processing
self.post_init()
def get_output_embeddings(self):
return self.cls.predictions.decoder
def set_output_embeddings(self, new_embeddings):
self.cls.predictions.decoder = new_embeddings
@add_start_docstrings_to_model_forward({{cookiecutter.uppercase_modelname}}_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
@replace_return_docstrings(output_type=CausalLMOutputWithCrossAttentions, config_class=_CONFIG_FOR_DOC)
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
inputs_embeds=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
head_mask=None,
cross_attn_head_mask=None,
past_key_values=None,
labels=None,
use_cache=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
r"""
encoder_hidden_states (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if
the model is configured as a decoder.
encoder_attention_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, *optional*):
Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in
the cross-attention if the model is configured as a decoder. Mask values selected in `[0, 1]`:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
past_key_values (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`):
Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2
tensors of shape `(batch_size, num_heads, sequence_length, embed_size_per_head)`) and 2 additional
tensors of shape `(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)`. The two
additional tensors are only required when the model is used as a decoder in a Sequence to Sequence
model.
Contains pre-computed hidden-states (key and values in the self-attention blocks and in the
cross-attention blocks) that can be used (see `past_key_values` input) to speed up sequential
decoding.
If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids`
(those that don't have their past key value states given to this model) of shape `(batch_size, 1)`
instead of all `decoder_input_ids` of shape `(batch_size, sequence_length)`.
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Labels for computing the left-to-right language modeling loss (next word prediction). Indices should be in
`[-100, 0, ..., config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are
ignored (masked), the loss is only computed for the tokens with labels n `[0, ..., config.vocab_size]`.
use_cache (`bool`, *optional*):
If set to `True`, `past_key_values` key value states are returned and can be used to speed up
decoding (see `past_key_values`).
Returns:
Example:
```python
>>> from transformers import {{cookiecutter.camelcase_modelname}}Tokenizer, {{cookiecutter.camelcase_modelname}}ForCausalLM, {{cookiecutter.camelcase_modelname}}Config
>>> import torch
>>> tokenizer = {{cookiecutter.camelcase_modelname}}Tokenizer.from_pretrained('{{cookiecutter.checkpoint_identifier}}')
>>> config = {{cookiecutter.camelcase_modelname}}Config.from_pretrained("{{cookiecutter.checkpoint_identifier}}")
>>> config.is_decoder = True
>>> model = {{cookiecutter.camelcase_modelname}}ForCausalLM.from_pretrained('{{cookiecutter.checkpoint_identifier}}', config=config)
>>> inputs = tokenizer("Hello, my dog is cute", return_tensors="pt")
>>> outputs = model(**inputs)
>>> prediction_logits = outputs.logits
```
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
outputs = self.{{cookiecutter.lowercase_modelname}}(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
encoder_hidden_states=encoder_hidden_states,
encoder_attention_mask=encoder_attention_mask,
past_key_values=past_key_values,
use_cache=use_cache,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
sequence_output = outputs[0]
prediction_scores = self.cls(sequence_output)
lm_loss = None
if labels is not None:
# we are doing next-token prediction; shift prediction scores and input ids by one
shifted_prediction_scores = prediction_scores[:, :-1, :].contiguous()
labels = labels[:, 1:].contiguous()
loss_fct = CrossEntropyLoss()
lm_loss = loss_fct(shifted_prediction_scores.view(-1, self.config.vocab_size), labels.view(-1))
if not return_dict:
output = (prediction_scores,) + outputs[1:]
return ((lm_loss,) + output) if lm_loss is not None else output
return CausalLMOutputWithCrossAttentions(
loss=lm_loss,
logits=prediction_scores,
past_key_values=outputs.past_key_values,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
cross_attentions=outputs.cross_attentions,
)
def prepare_inputs_for_generation(self, input_ids, past=None, attention_mask=None, **model_kwargs):
input_shape = input_ids.shape
# if model is used as a decoder in encoder-decoder model, the decoder attention mask is created on the fly
if attention_mask is None:
attention_mask = input_ids.new_ones(input_shape)
# cut decoder_input_ids if past is used
if past is not None:
input_ids = input_ids[:, -1:]
return {"input_ids": input_ids, "attention_mask": attention_mask, "past_key_values": past}
def _reorder_cache(self, past, beam_idx):
reordered_past = ()
for layer_past in past:
reordered_past += (tuple(past_state.index_select(0, beam_idx) for past_state in layer_past[:2]) + layer_past[2:],)
return reordered_past
class {{cookiecutter.camelcase_modelname}}ClassificationHead(nn.Module):
"""Head for sentence-level classification tasks."""
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.out_proj = nn.Linear(config.hidden_size, config.num_labels)
self.config = config
def forward(self, features, **kwargs):
x = features[:, 0, :] # take <s> token (equiv. to [CLS])
x = self.dropout(x)
x = self.dense(x)
x = ACT2FN[self.config.hidden_act](x)
x = self.dropout(x)
x = self.out_proj(x)
return x
@add_start_docstrings(
"""{{cookiecutter.modelname}} Model transformer with a sequence classification/regression head on top (a linear layer on top of
the pooled output) e.g. for GLUE tasks. """,
{{cookiecutter.uppercase_modelname}}_START_DOCSTRING,
)
class {{cookiecutter.camelcase_modelname}}ForSequenceClassification({{cookiecutter.camelcase_modelname}}PreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.num_labels = config.num_labels
self.{{cookiecutter.lowercase_modelname}} = {{cookiecutter.camelcase_modelname}}Model(config)
self.classifier = {{cookiecutter.camelcase_modelname}}ClassificationHead(config)
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward({{cookiecutter.uppercase_modelname}}_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
@add_code_sample_docstrings(
processor_class=_TOKENIZER_FOR_DOC,
checkpoint=_CHECKPOINT_FOR_DOC,
output_type=SequenceClassifierOutput,
config_class=_CONFIG_FOR_DOC,
)
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
labels=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
r"""
labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
Labels for computing the sequence classification/regression loss.
Indices should be in `[0, ..., config.num_labels - 1]`.
If `config.num_labels == 1` a regression loss is computed (Mean-Square loss),
If `config.num_labels > 1` a classification loss is computed (Cross-Entropy).
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
outputs = self.{{cookiecutter.lowercase_modelname}}(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
sequence_output = outputs[0]
logits = self.classifier(sequence_output)
loss = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
self.config.problem_type = "regression"
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
self.config.problem_type = "single_label_classification"
else:
self.config.problem_type = "multi_label_classification"
if self.config.problem_type == "regression":
loss_fct = MSELoss()
if self.num_labels == 1:
loss = loss_fct(logits.squeeze(), labels.squeeze())
else:
loss = loss_fct(logits, labels)
elif self.config.problem_type == "single_label_classification":
loss_fct = CrossEntropyLoss()
loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
elif self.config.problem_type == "multi_label_classification":
loss_fct = BCEWithLogitsLoss()
loss = loss_fct(logits, labels)
if not return_dict:
output = (logits,) + outputs[1:]
return ((loss,) + output) if loss is not None else output
return SequenceClassifierOutput(
loss=loss,
logits=logits,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
@add_start_docstrings(
"""{{cookiecutter.modelname}} Model with a multiple choice classification head on top (a linear layer on top of
the pooled output and a softmax) e.g. for RocStories/SWAG tasks. """,
{{cookiecutter.uppercase_modelname}}_START_DOCSTRING,
)
class {{cookiecutter.camelcase_modelname}}ForMultipleChoice({{cookiecutter.camelcase_modelname}}PreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.{{cookiecutter.lowercase_modelname}} = {{cookiecutter.camelcase_modelname}}Model(config)
self.sequence_summary = SequenceSummary(config)
self.classifier = nn.Linear(config.hidden_size, 1)
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward({{cookiecutter.uppercase_modelname}}_INPUTS_DOCSTRING.format("batch_size, num_choices, sequence_length"))
@add_code_sample_docstrings(
processor_class=_TOKENIZER_FOR_DOC,
checkpoint=_CHECKPOINT_FOR_DOC,
output_type=MultipleChoiceModelOutput,
config_class=_CONFIG_FOR_DOC,
)
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
labels=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
r"""
labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
Labels for computing the multiple choice classification loss.
Indices should be in `[0, ..., num_choices-1]` where `num_choices` is the size of the second dimension
of the input tensors. (See `input_ids` above)
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
num_choices = input_ids.shape[1] if input_ids is not None else inputs_embeds.shape[1]
input_ids = input_ids.view(-1, input_ids.size(-1)) if input_ids is not None else None
attention_mask = attention_mask.view(-1, attention_mask.size(-1)) if attention_mask is not None else None
token_type_ids = token_type_ids.view(-1, token_type_ids.size(-1)) if token_type_ids is not None else None
position_ids = position_ids.view(-1, position_ids.size(-1)) if position_ids is not None else None
inputs_embeds = (
inputs_embeds.view(-1, inputs_embeds.size(-2), inputs_embeds.size(-1))
if inputs_embeds is not None
else None
)
outputs = self.{{cookiecutter.lowercase_modelname}}(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
sequence_output = outputs[0]
pooled_output = self.sequence_summary(sequence_output)
logits = self.classifier(pooled_output)
reshaped_logits = logits.view(-1, num_choices)
loss = None
if labels is not None:
loss_fct = CrossEntropyLoss()
loss = loss_fct(reshaped_logits, labels)
if not return_dict:
output = (reshaped_logits,) + outputs[1:]
return ((loss,) + output) if loss is not None else output
return MultipleChoiceModelOutput(
loss=loss,
logits=reshaped_logits,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
@add_start_docstrings(
"""{{cookiecutter.modelname}} Model with a token classification head on top (a linear layer on top of
the hidden-states output) e.g. for Named-Entity-Recognition (NER) tasks. """,
{{cookiecutter.uppercase_modelname}}_START_DOCSTRING,
)
class {{cookiecutter.camelcase_modelname}}ForTokenClassification({{cookiecutter.camelcase_modelname}}PreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.num_labels = config.num_labels
self.{{cookiecutter.lowercase_modelname}} = {{cookiecutter.camelcase_modelname}}Model(config)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.classifier = nn.Linear(config.hidden_size, config.num_labels)
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward({{cookiecutter.uppercase_modelname}}_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
@add_code_sample_docstrings(
processor_class=_TOKENIZER_FOR_DOC,
checkpoint=_CHECKPOINT_FOR_DOC,
output_type=TokenClassifierOutput,
config_class=_CONFIG_FOR_DOC,
)
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
labels=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
r"""
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Labels for computing the token classification loss.
Indices should be in `[0, ..., config.num_labels - 1]`.
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
outputs = self.{{cookiecutter.lowercase_modelname}}(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
sequence_output = outputs[0]
sequence_output = self.dropout(sequence_output)
logits = self.classifier(sequence_output)
loss = None
if labels is not None:
loss_fct = CrossEntropyLoss()
loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
if not return_dict:
output = (logits,) + outputs[1:]
return ((loss,) + output) if loss is not None else output
return TokenClassifierOutput(
loss=loss,
logits=logits,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
@add_start_docstrings(
"""{{cookiecutter.modelname}} Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear
layers on top of the hidden-states output to compute `span start logits` and `span end logits`). """,
{{cookiecutter.uppercase_modelname}}_START_DOCSTRING,
)
class {{cookiecutter.camelcase_modelname}}ForQuestionAnswering({{cookiecutter.camelcase_modelname}}PreTrainedModel):
def __init__(self, config):
super().__init__(config)
config.num_labels = 2
self.num_labels = config.num_labels
self.{{cookiecutter.lowercase_modelname}} = {{cookiecutter.camelcase_modelname}}Model(config)
self.qa_outputs = nn.Linear(config.hidden_size, config.num_labels)
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward({{cookiecutter.uppercase_modelname}}_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
@add_code_sample_docstrings(
processor_class=_TOKENIZER_FOR_DOC,
checkpoint=_CHECKPOINT_FOR_DOC,
output_type=QuestionAnsweringModelOutput,
config_class=_CONFIG_FOR_DOC,
)
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
start_positions=None,
end_positions=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
r"""
start_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
Labels for position (index) of the start of the labelled span for computing the token classification loss.
Positions are clamped to the length of the sequence (`sequence_length`).
Position outside of the sequence are not taken into account for computing the loss.
end_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
Labels for position (index) of the end of the labelled span for computing the token classification loss.
Positions are clamped to the length of the sequence (`sequence_length`).
Position outside of the sequence are not taken into account for computing the loss.
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
outputs = self.{{cookiecutter.lowercase_modelname}}(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
sequence_output = outputs[0]
logits = self.qa_outputs(sequence_output)
start_logits, end_logits = logits.split(1, dim=-1)
start_logits = start_logits.squeeze(-1)
end_logits = end_logits.squeeze(-1)
total_loss = None
if start_positions is not None and end_positions is not None:
# If we are on multi-GPU, split add a dimension
if len(start_positions.size()) > 1:
start_positions = start_positions.squeeze(-1)
if len(end_positions.size()) > 1:
end_positions = end_positions.squeeze(-1)
# sometimes the start/end positions are outside our model inputs, we ignore these terms
ignored_index = start_logits.size(1)
start_positions = start_positions.clamp(0, ignored_index)
end_positions = end_positions.clamp(0, ignored_index)
loss_fct = CrossEntropyLoss(ignore_index=ignored_index)
start_loss = loss_fct(start_logits, start_positions)
end_loss = loss_fct(end_logits, end_positions)
total_loss = (start_loss + end_loss) / 2
if not return_dict:
output = (start_logits, end_logits) + outputs[1:]
return ((total_loss,) + output) if total_loss is not None else output
return QuestionAnsweringModelOutput(
loss=total_loss,
start_logits=start_logits,
end_logits=end_logits,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
{% else %}
import math
import copy
import random
from typing import Optional, Tuple, List, Union
import torch
from torch import nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACT2FN
from ...utils import (
add_code_sample_docstrings,
add_end_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
replace_return_docstrings,
)
from ...modeling_outputs import (
BaseModelOutput,
BaseModelOutputWithPastAndCrossAttentions,
Seq2SeqLMOutput,
Seq2SeqModelOutput,
Seq2SeqQuestionAnsweringModelOutput,
Seq2SeqSequenceClassifierOutput,
CausalLMOutputWithCrossAttentions
)
from ...modeling_utils import PreTrainedModel
from ...utils import logging
from .configuration_{{cookiecutter.lowercase_modelname}} import {{cookiecutter.camelcase_modelname}}Config
logger = logging.get_logger(__name__)
_CHECKPOINT_FOR_DOC = "{{cookiecutter.checkpoint_identifier}}"
_CONFIG_FOR_DOC = "{{cookiecutter.camelcase_modelname}}Config"
_TOKENIZER_FOR_DOC = "{{cookiecutter.camelcase_modelname}}Tokenizer"
{{cookiecutter.uppercase_modelname}}_PRETRAINED_MODEL_ARCHIVE_LIST = [
"{{cookiecutter.checkpoint_identifier}}",
# See all {{cookiecutter.modelname}} models at https://huggingface.co/models?filter={{cookiecutter.lowercase_modelname}}
]
def shift_tokens_right(input_ids: torch.Tensor, pad_token_id: int, decoder_start_token_id: int):
"""
Shift input ids one token to the right.
"""
shifted_input_ids = input_ids.new_zeros(input_ids.shape)
shifted_input_ids[:, 1:] = input_ids[:, :-1].clone()
shifted_input_ids[:, 0] = decoder_start_token_id
assert pad_token_id is not None, "self.model.config.pad_token_id has to be defined."
# replace possible -100 values in labels by `pad_token_id`
shifted_input_ids.masked_fill_(shifted_input_ids == -100, pad_token_id)
return shifted_input_ids
def _make_causal_mask(input_ids_shape: torch.Size, dtype: torch.dtype, past_key_values_length: int = 0):
"""
Make causal mask used for bi-directional self-attention.
"""
bsz, tgt_len = input_ids_shape
mask = torch.full((tgt_len, tgt_len), torch.tensor(torch.finfo(dtype).min))
mask_cond = torch.arange(mask.size(-1))
mask.masked_fill_(mask_cond < (mask_cond + 1).view(mask.size(-1), 1), 0)
mask = mask.to(dtype)
if past_key_values_length > 0:
mask = torch.cat([torch.zeros(tgt_len, past_key_values_length, dtype=dtype), mask], dim=-1)
return mask[None, None, :, :].expand(bsz, 1, tgt_len, tgt_len + past_key_values_length)
def _expand_mask(
mask: torch.Tensor, dtype: torch.dtype, tgt_len: Optional[int] = None
):
"""
Expands attention_mask from `[bsz, seq_len]` to `[bsz, 1, tgt_seq_len, src_seq_len]`.
"""
bsz, src_len = mask.size()
tgt_len = tgt_len if tgt_len is not None else src_len
expanded_mask = mask[:, None, None, :].expand(bsz, 1, tgt_len, src_len).to(dtype)
inverted_mask = 1.0 - expanded_mask
return inverted_mask.masked_fill(inverted_mask.bool(), torch.finfo(dtype).min)
class {{cookiecutter.camelcase_modelname}}LearnedPositionalEmbedding(nn.Embedding):
"""
This module learns positional embeddings up to a fixed maximum size.
"""
def __init__(self, num_embeddings: int, embedding_dim: int):
super().__init__(num_embeddings, embedding_dim)
def forward(self, input_ids_shape: torch.Size, past_key_values_length: int = 0):
"""`input_ids_shape` is expected to be [bsz x seqlen]."""
bsz, seq_len = input_ids_shape[:2]
positions = torch.arange(
past_key_values_length, past_key_values_length + seq_len, dtype=torch.long, device=self.weight.device
)
return super().forward(positions)
class {{cookiecutter.camelcase_modelname}}Attention(nn.Module):
"""Multi-headed attention from 'Attention Is All You Need' paper"""
def __init__(
self,
embed_dim: int,
num_heads: int,
dropout: float = 0.0,
is_decoder: bool = False,
bias: bool = True,
):
super().__init__()
self.embed_dim = embed_dim
self.num_heads = num_heads
self.dropout = dropout
self.head_dim = embed_dim // num_heads
assert (
self.head_dim * num_heads == self.embed_dim
), f"embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim} and `num_heads`: {num_heads})."
self.scaling = self.head_dim ** -0.5
self.is_decoder = is_decoder
self.k_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
self.v_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
self.q_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
self.out_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
def _shape(self, tensor: torch.Tensor, seq_len: int, bsz: int):
return tensor.view(bsz, seq_len, self.num_heads, self.head_dim).transpose(1, 2).contiguous()
def forward(
self,
hidden_states: torch.Tensor,
key_value_states: Optional[torch.Tensor] = None,
past_key_value: Optional[Tuple[torch.Tensor]] = None,
attention_mask: Optional[torch.Tensor] = None,
layer_head_mask: Optional[torch.Tensor] = None,
output_attentions: bool = False,
) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:
"""Input shape: Batch x Time x Channel"""
# if key_value_states are provided this layer is used as a cross-attention layer
# for the decoder
is_cross_attention = key_value_states is not None
bsz, tgt_len, embed_dim = hidden_states.size()
# get query proj
query_states = self.q_proj(hidden_states) * self.scaling
# get key, value proj
if is_cross_attention and past_key_value is not None:
# reuse k,v, cross_attentions
key_states = past_key_value[0]
value_states = past_key_value[1]
elif is_cross_attention:
# cross_attentions
key_states = self._shape(self.k_proj(key_value_states), -1, bsz)
value_states = self._shape(self.v_proj(key_value_states), -1, bsz)
elif past_key_value is not None:
# reuse k, v, self_attention
key_states = self._shape(self.k_proj(hidden_states), -1, bsz)
value_states = self._shape(self.v_proj(hidden_states), -1, bsz)
key_states = torch.cat([past_key_value[0], key_states], dim=2)
value_states = torch.cat([past_key_value[1], value_states], dim=2)
else:
# self_attention
key_states = self._shape(self.k_proj(hidden_states), -1, bsz)
value_states = self._shape(self.v_proj(hidden_states), -1, bsz)
if self.is_decoder:
# if cross_attention save Tuple(torch.Tensor, torch.Tensor) of all cross attention key/value_states.
# Further calls to cross_attention layer can then reuse all cross-attention
# key/value_states (first "if" case)
# if uni-directional self-attention (decoder) save Tuple(torch.Tensor, torch.Tensor) of
# all previous decoder key/value_states. Further calls to uni-directional self-attention
# can concat previous decoder key/value_states to current projected key/value_states (third "elif" case)
# if encoder bi-directional self-attention `past_key_value` is always `None`
past_key_value = (key_states, value_states)
proj_shape = (bsz * self.num_heads, -1, self.head_dim)
query_states = self._shape(query_states, tgt_len, bsz).view(*proj_shape)
key_states = key_states.view(*proj_shape)
value_states = value_states.view(*proj_shape)
src_len = key_states.size(1)
attn_weights = torch.bmm(query_states, key_states.transpose(1, 2))
if attn_weights.size() != (bsz * self.num_heads, tgt_len, src_len):
raise ValueError(
f"Attention weights should be of size {(bsz * self.num_heads, tgt_len, src_len)}, but is {attn_weights.size()}"
)
if attention_mask is not None:
if attention_mask.size() != (bsz, 1, tgt_len, src_len):
raise ValueError(
f"Attention mask should be of size {(bsz, 1, tgt_len, src_len)}, but is {attention_mask.size()}"
)
attn_weights = attn_weights.view(bsz, self.num_heads, tgt_len, src_len) + attention_mask
attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len)
attn_weights = nn.functional.softmax(attn_weights, dim=-1)
if layer_head_mask is not None:
if layer_head_mask.size() != (self.num_heads,):
raise ValueError(
f"Head mask for a single layer should be of size {(self.num_heads,)}, but is {layer_head_mask.size()}"
)
attn_weights = layer_head_mask.view(1, -1, 1, 1) * attn_weights.view(bsz, self.num_heads, tgt_len, src_len)
attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len)
if output_attentions:
# this operation is a bit akward, but it's required to
# make sure that attn_weights keeps its gradient.
# In order to do so, attn_weights have to reshaped
# twice and have to be reused in the following
attn_weights_reshaped = attn_weights.view(bsz, self.num_heads, tgt_len, src_len)
attn_weights = attn_weights_reshaped.view(bsz * self.num_heads, tgt_len, src_len)
else:
attn_weights_reshaped = None
attn_probs = nn.functional.dropout(attn_weights, p=self.dropout, training=self.training)
attn_output = torch.bmm(attn_probs, value_states)
if attn_output.size() != (bsz * self.num_heads, tgt_len, self.head_dim):
raise ValueError(
f"`attn_output` should be of size {(bsz, self.num_heads, tgt_len, self.head_dim)}, but is {attn_output.size()}"
)
attn_output = attn_output.view(bsz, self.num_heads, tgt_len, self.head_dim)
attn_output = attn_output.transpose(1, 2)
attn_output = attn_output.reshape(bsz, tgt_len, embed_dim)
attn_output = self.out_proj(attn_output)
return attn_output, attn_weights_reshaped, past_key_value
class {{cookiecutter.camelcase_modelname}}EncoderLayer(nn.Module):
def __init__(self, config: {{cookiecutter.camelcase_modelname}}Config):
super().__init__()
self.embed_dim = config.d_model
self.self_attn = {{cookiecutter.camelcase_modelname}}Attention(
embed_dim=self.embed_dim,
num_heads=config.encoder_attention_heads,
dropout=config.attention_dropout,
)
self.self_attn_layer_norm = nn.LayerNorm(self.embed_dim)
self.dropout = config.dropout
self.activation_fn = ACT2FN[config.activation_function]
self.activation_dropout = config.activation_dropout
self.fc1 = nn.Linear(self.embed_dim, config.encoder_ffn_dim)
self.fc2 = nn.Linear(config.encoder_ffn_dim, self.embed_dim)
self.final_layer_norm = nn.LayerNorm(self.embed_dim)
def forward(
self,
hidden_states: torch.Tensor,
attention_mask: torch.Tensor,
layer_head_mask: torch.Tensor,
output_attentions: bool = False,
):
"""
Args:
hidden_states (`torch.FloatTensor`): input to the layer of shape *(seq_len, batch, embed_dim)*
attention_mask (`torch.FloatTensor`): attention mask of size
*(batch, 1, tgt_len, src_len)* where padding elements are indicated by very large negative values.
layer_head_mask (`torch.FloatTensor`): mask for attention heads in a given layer of size
*(config.encoder_attention_heads,)*.
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers. See `attentions` under
returned tensors for more detail.
"""
residual = hidden_states
hidden_states, attn_weights, _ = self.self_attn(
hidden_states=hidden_states,
attention_mask=attention_mask,
layer_head_mask=layer_head_mask,
output_attentions=output_attentions,
)
hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
hidden_states = residual + hidden_states
hidden_states = self.self_attn_layer_norm(hidden_states)
residual = hidden_states
hidden_states = self.activation_fn(self.fc1(hidden_states))
hidden_states = nn.functional.dropout(hidden_states, p=self.activation_dropout, training=self.training)
hidden_states = self.fc2(hidden_states)
hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
hidden_states = residual + hidden_states
hidden_states = self.final_layer_norm(hidden_states)
if hidden_states.dtype == torch.float16 and (torch.isinf(hidden_states).any() or torch.isnan(hidden_states).any()):
clamp_value = torch.finfo(hidden_states.dtype).max - 1000
hidden_states = torch.clamp(hidden_states, min=-clamp_value, max=clamp_value)
outputs = (hidden_states,)
if output_attentions:
outputs += (attn_weights,)
return outputs
class {{cookiecutter.camelcase_modelname}}DecoderLayer(nn.Module):
def __init__(self, config: {{cookiecutter.camelcase_modelname}}Config):
super().__init__()
self.embed_dim = config.d_model
self.self_attn = {{cookiecutter.camelcase_modelname}}Attention(
embed_dim=self.embed_dim,
num_heads=config.decoder_attention_heads,
dropout=config.attention_dropout,
is_decoder=True,
)
self.dropout = config.dropout
self.activation_fn = ACT2FN[config.activation_function]
self.activation_dropout = config.activation_dropout
self.self_attn_layer_norm = nn.LayerNorm(self.embed_dim)
self.encoder_attn = {{cookiecutter.camelcase_modelname}}Attention(
self.embed_dim,
config.decoder_attention_heads,
dropout=config.attention_dropout,
is_decoder=True,
)
self.encoder_attn_layer_norm = nn.LayerNorm(self.embed_dim)
self.fc1 = nn.Linear(self.embed_dim, config.decoder_ffn_dim)
self.fc2 = nn.Linear(config.decoder_ffn_dim, self.embed_dim)
self.final_layer_norm = nn.LayerNorm(self.embed_dim)
def forward(
self,
hidden_states: torch.Tensor,
attention_mask: Optional[torch.Tensor] = None,
encoder_hidden_states: Optional[torch.Tensor] = None,
encoder_attention_mask: Optional[torch.Tensor] = None,
layer_head_mask: Optional[torch.Tensor] = None,
cross_layer_head_mask: Optional[torch.Tensor] = None,
past_key_value: Optional[Tuple[torch.Tensor]] = None,
output_attentions: Optional[bool] = False,
use_cache: Optional[bool] = True,
):
"""
Args:
hidden_states (`torch.FloatTensor`): input to the layer of shape *(seq_len, batch, embed_dim)*
attention_mask (`torch.FloatTensor`): attention mask of size
*(batch, 1, tgt_len, src_len)* where padding elements are indicated by very large negative values.
encoder_hidden_states (`torch.FloatTensor`): cross attention input to the layer of shape *(seq_len, batch, embed_dim)*
encoder_attention_mask (`torch.FloatTensor`): encoder attention mask of size
*(batch, 1, tgt_len, src_len)* where padding elements are indicated by very large negative values.
layer_head_mask (`torch.FloatTensor`): mask for attention heads in a given layer of size
*(encoder_attention_heads,)*.
cross_layer_head_mask (`torch.FloatTensor`): mask for cross-attention heads in a given layer of
size *(decoder_attention_heads,)*.
past_key_value (`Tuple(torch.FloatTensor)`): cached past key and value projection states
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers. See `attentions` under
returned tensors for more detail.
"""
residual = hidden_states
# Self Attention
# decoder uni-directional self-attention cached key/values tuple is at positions 1,2
self_attn_past_key_value = past_key_value[:2] if past_key_value is not None else None
# add present self-attn cache to positions 1,2 of present_key_value tuple
hidden_states, self_attn_weights, present_key_value = self.self_attn(
hidden_states=hidden_states,
past_key_value=self_attn_past_key_value,
attention_mask=attention_mask,
layer_head_mask=layer_head_mask,
output_attentions=output_attentions,
)
hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
hidden_states = residual + hidden_states
hidden_states = self.self_attn_layer_norm(hidden_states)
# Cross-Attention Block
cross_attn_present_key_value = None
cross_attn_weights = None
if encoder_hidden_states is not None:
residual = hidden_states
# cross_attn cached key/values tuple is at positions 3,4 of present_key_value tuple
cross_attn_past_key_value = past_key_value[-2:] if past_key_value is not None else None
hidden_states, cross_attn_weights, cross_attn_present_key_value = self.encoder_attn(
hidden_states=hidden_states,
key_value_states=encoder_hidden_states,
attention_mask=encoder_attention_mask,
layer_head_mask=cross_layer_head_mask,
past_key_value=cross_attn_past_key_value,
output_attentions=output_attentions,
)
hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
hidden_states = residual + hidden_states
hidden_states = self.encoder_attn_layer_norm(hidden_states)
# add cross-attn to positions 3,4 of present_key_value tuple
present_key_value = present_key_value + cross_attn_present_key_value
# Fully Connected
residual = hidden_states
hidden_states = self.activation_fn(self.fc1(hidden_states))
hidden_states = nn.functional.dropout(hidden_states, p=self.activation_dropout, training=self.training)
hidden_states = self.fc2(hidden_states)
hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
hidden_states = residual + hidden_states
hidden_states = self.final_layer_norm(hidden_states)
outputs = (hidden_states,)
if output_attentions:
outputs += (self_attn_weights, cross_attn_weights)
if use_cache:
outputs += (present_key_value,)
return outputs
# Copied from transformers.models.bart.modeling_bart.BartClassificationHead with Bart->{{cookiecutter.camelcase_modelname}}
class {{cookiecutter.camelcase_modelname}}ClassificationHead(nn.Module):
"""Head for sentence-level classification tasks."""
def __init__(
self,
input_dim: int,
inner_dim: int,
num_classes: int,
pooler_dropout: float,
):
super().__init__()
self.dense = nn.Linear(input_dim, inner_dim)
self.dropout = nn.Dropout(p=pooler_dropout)
self.out_proj = nn.Linear(inner_dim, num_classes)
def forward(self, hidden_states: torch.Tensor):
hidden_states = self.dropout(hidden_states)
hidden_states = self.dense(hidden_states)
hidden_states = torch.tanh(hidden_states)
hidden_states = self.dropout(hidden_states)
hidden_states = self.out_proj(hidden_states)
return hidden_states
class {{cookiecutter.camelcase_modelname}}PreTrainedModel(PreTrainedModel):
config_class = {{cookiecutter.camelcase_modelname}}Config
base_model_prefix = "model"
supports_gradient_checkpointing = True
def _init_weights(self, module):
std = self.config.init_std
if isinstance(module, nn.Linear):
module.weight.data.normal_(mean=0.0, std=std)
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(module, nn.Embedding):
module.weight.data.normal_(mean=0.0, std=std)
if module.padding_idx is not None:
module.weight.data[module.padding_idx].zero_()
def _set_gradient_checkpointing(self, module, value=False):
if isinstance(module, ({{cookiecutter.camelcase_modelname}}Decoder, {{cookiecutter.camelcase_modelname}}Encoder)):
module.gradient_checkpointing = value
{{cookiecutter.uppercase_modelname}}_START_DOCSTRING = r"""
This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic
methods the library implements for all its model (such as downloading or saving, resizing the input embeddings,
pruning heads etc.)
This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module)
subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to
general usage and behavior.
Parameters:
config ([`~{{cookiecutter.camelcase_modelname}}Config`]):
Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model
weights.
"""
{{cookiecutter.uppercase_modelname}}_GENERATION_EXAMPLE = r"""
Summarization example:
```python
>>> from transformers import {{cookiecutter.camelcase_modelname}}Tokenizer, {{cookiecutter.camelcase_modelname}}ForConditionalGeneration
>>> model = {{cookiecutter.camelcase_modelname}}ForConditionalGeneration.from_pretrained('{{cookiecutter.checkpoint_identifier}}')
>>> tokenizer = {{cookiecutter.camelcase_modelname}}Tokenizer.from_pretrained('{{cookiecutter.checkpoint_identifier}}')
>>> ARTICLE_TO_SUMMARIZE = "My friends are cool but they eat too many carbs."
>>> inputs = tokenizer([ARTICLE_TO_SUMMARIZE], max_length=1024, return_tensors='pt')
>>> # Generate Summary
>>> summary_ids = model.generate(inputs['input_ids'], num_beams=4, max_length=5)
>>> print(tokenizer.decode(summary_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False))
```
"""
{{cookiecutter.uppercase_modelname}}_INPUTS_DOCSTRING = r"""
Args:
input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide
it.
Indices can be obtained using [`~{{cookiecutter.camelcase_modelname}}Tokenizer`]. See
[`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for
details.
[What are input IDs?](../glossary#input-ids)
attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
[What are attention masks?](../glossary#attention-mask)
decoder_input_ids (`torch.LongTensor` of shape `(batch_size, target_sequence_length)`, *optional*):
Provide for translation and summarization training. By default, the model will create this tensor by
shifting the `input_ids` to the right, following the paper.
decoder_attention_mask (`torch.LongTensor` of shape `(batch_size, target_sequence_length)`, *optional*):
Default behavior: generate a tensor that ignores pad tokens in `decoder_input_ids`. Causal mask will
also be used by default.
If you want to change padding behavior, you should read [`modeling_{{cookiecutter.lowercase_modelname}}._prepare_decoder_attention_mask`] and
modify to your needs. See diagram 1 in [the paper](https://arxiv.org/abs/1910.13461) for more
information on the default strategy.
head_mask (`torch.Tensor` of shape `(encoder_layers, encoder_attention_heads)`, *optional*):
Mask to nullify selected heads of the attention modules in the encoder. Mask values selected in `[0, 1]`:
- 1 indicates the head is **not masked**,
- 0 indicates the head is **masked**.
decoder_head_mask (`torch.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*):
Mask to nullify selected heads of the attention modules in the decoder. Mask values selected in `[0, 1]`:
- 1 indicates the head is **not masked**,
- 0 indicates the head is **masked**.
cross_attn_head_mask (`torch.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*):
Mask to nullify selected heads of the cross-attention modules. Mask values selected in `[0, 1]`:
- 1 indicates the head is **not masked**,
- 0 indicates the head is **masked**.
encoder_outputs (`tuple(tuple(torch.FloatTensor)`, *optional*):
Tuple consists of (`last_hidden_state`, *optional*: `hidden_states`, *optional*:
`attentions`) `last_hidden_state` of shape `(batch_size, sequence_length, hidden_size)`,
*optional*) is a sequence of hidden-states at the output of the last layer of the encoder. Used in the
cross-attention of the decoder.
past_key_values (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`):
Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors
of shape `(batch_size, num_heads, sequence_length, embed_size_per_head)`) and 2 additional tensors of
shape `(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)`.
Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention
blocks) that can be used (see `past_key_values` input) to speed up sequential decoding.
If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids`
(those that don't have their past key value states given to this model) of shape `(batch_size, 1)`
instead of all `decoder_input_ids` of shape `(batch_size, sequence_length)`. inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert `input_ids` indices into associated
vectors than the model's internal embedding lookup matrix.
decoder_inputs_embeds (`torch.FloatTensor` of shape `(batch_size, target_sequence_length, hidden_size)`, *optional*):
Optionally, instead of passing `decoder_input_ids` you can choose to directly pass an embedded
representation. If `past_key_values` is used, optionally only the last `decoder_inputs_embeds`
have to be input (see `past_key_values`). This is useful if you want more control over how to convert
`decoder_input_ids` indices into associated vectors than the model's internal embedding lookup matrix.
If `decoder_input_ids` and `decoder_inputs_embeds` are both unset, `decoder_inputs_embeds`
takes the value of `inputs_embeds`.
use_cache (`bool`, *optional*):
If set to `True`, `past_key_values` key value states are returned and can be used to speed up
decoding (see `past_key_values`).
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
tensors for more detail.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
"""
{{cookiecutter.uppercase_modelname}}_STANDALONE_INPUTS_DOCSTRING = r"""
Args:
input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide
it.
Indices can be obtained using [`ProphetNetTokenizer`]. See
[`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for
details.
[What are input IDs?](../glossary#input-ids)
attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
[What are attention masks?](../glossary#attention-mask)
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
tensors for more detail.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
"""
class {{cookiecutter.camelcase_modelname}}Encoder({{cookiecutter.camelcase_modelname}}PreTrainedModel):
"""
Transformer encoder consisting of *config.encoder_layers* self attention layers. Each layer is a
[`{{cookiecutter.camelcase_modelname}}EncoderLayer`].
Args:
config: {{cookiecutter.camelcase_modelname}}Config
embed_tokens (nn.Embedding): output embedding
"""
def __init__(self, config: {{cookiecutter.camelcase_modelname}}Config, embed_tokens: Optional[nn.Embedding] = None):
super().__init__(config)
self.dropout = config.dropout
self.layerdrop = config.encoder_layerdrop
embed_dim = config.d_model
self.padding_idx = config.pad_token_id
self.max_source_positions = config.max_position_embeddings
self.embed_scale = math.sqrt(embed_dim) if config.scale_embedding else 1.0
if embed_tokens is not None:
self.embed_tokens = embed_tokens
else:
self.embed_tokens = nn.Embedding(config.vocab_size, embed_dim, self.padding_idx)
self.embed_positions = {{cookiecutter.camelcase_modelname}}LearnedPositionalEmbedding(
config.max_position_embeddings,
embed_dim,
)
self.layers = nn.ModuleList([{{cookiecutter.camelcase_modelname}}EncoderLayer(config) for _ in range(config.encoder_layers)])
self.layernorm_embedding = nn.LayerNorm(embed_dim)
self.gradient_checkpointing = False
# Initialize weights and apply final processing
self.post_init()
def forward(
self,
input_ids=None,
attention_mask=None,
head_mask=None,
inputs_embeds=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
r"""
Args:
input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you
provide it.
Indices can be obtained using [`~{{cookiecutter.camelcase_modelname}}Tokenizer`]. See
[`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`]
for details.
[What are input IDs?](../glossary#input-ids)
attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
[What are attention masks?](../glossary#attention-mask)
head_mask (`torch.Tensor` of shape `(encoder_layers, encoder_attention_heads)`, *optional*):
Mask to nullify selected heads of the attention modules. Mask values selected in `[0, 1]`:
- 1 indicates the head is **not masked**,
- 0 indicates the head is **masked**.
inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
Optionally, instead of passing `input_ids` you can choose to directly pass an embedded
representation. This is useful if you want more control over how to convert `input_ids` indices
into associated vectors than the model's internal embedding lookup matrix.
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers. See `attentions` under
returned tensors for more detail.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors
for more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
"""
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
# retrieve input_ids and inputs_embeds
if input_ids is not None and inputs_embeds is not None:
raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
elif input_ids is not None:
input_shape = input_ids.size()
input_ids = input_ids.view(-1, input_shape[-1])
elif inputs_embeds is not None:
input_shape = inputs_embeds.size()[:-1]
else:
raise ValueError("You have to specify either input_ids or inputs_embeds")
if inputs_embeds is None:
inputs_embeds = self.embed_tokens(input_ids) * self.embed_scale
embed_pos = self.embed_positions(input_shape)
hidden_states = inputs_embeds + embed_pos
hidden_states = self.layernorm_embedding(hidden_states)
hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
# expand attention_mask
if attention_mask is not None:
# [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len]
attention_mask = _expand_mask(attention_mask, inputs_embeds.dtype)
encoder_states = () if output_hidden_states else None
all_attentions = () if output_attentions else None
# check if head_mask has a correct number of layers specified if desired
if head_mask is not None:
assert head_mask.size()[0] == (
len(self.layers)
), f"The head_mask should be specified for {len(self.layers)} layers, but it is for {head_mask.size()[0]}."
for idx, encoder_layer in enumerate(self.layers):
if output_hidden_states:
encoder_states = encoder_states + (hidden_states,)
# add LayerDrop (see https://arxiv.org/abs/1909.11556 for description)
dropout_probability = random.uniform(0, 1)
if self.training and (dropout_probability < self.layerdrop): # skip the layer
layer_outputs = (None, None)
else:
if self.gradient_checkpointing and self.training:
def create_custom_forward(module):
def custom_forward(*inputs):
return module(*inputs, output_attentions)
return custom_forward
layer_outputs = torch.utils.checkpoint.checkpoint(
create_custom_forward(encoder_layer),
hidden_states,
attention_mask,
(head_mask[idx] if head_mask is not None else None),
)
else:
layer_outputs = encoder_layer(
hidden_states,
attention_mask,
layer_head_mask=(head_mask[idx] if head_mask is not None else None),
output_attentions=output_attentions,
)
hidden_states = layer_outputs[0]
if output_attentions:
all_attentions = all_attentions + (layer_outputs[1],)
if output_hidden_states:
encoder_states = encoder_states + (hidden_states,)
if not return_dict:
return tuple(v for v in [hidden_states, encoder_states, all_attentions] if v is not None)
return BaseModelOutput(
last_hidden_state=hidden_states, hidden_states=encoder_states, attentions=all_attentions
)
class {{cookiecutter.camelcase_modelname}}Decoder({{cookiecutter.camelcase_modelname}}PreTrainedModel):
"""
Transformer decoder consisting of *config.decoder_layers* layers. Each layer is a [`{{cookiecutter.camelcase_modelname}}DecoderLayer`]
Args:
config: {{cookiecutter.camelcase_modelname}}Config
embed_tokens (nn.Embedding): output embedding
"""
def __init__(self, config: {{cookiecutter.camelcase_modelname}}Config, embed_tokens: Optional[nn.Embedding] = None):
super().__init__(config)
self.dropout = config.dropout
self.layerdrop = config.decoder_layerdrop
self.padding_idx = config.pad_token_id
self.max_target_positions = config.max_position_embeddings
self.embed_scale = math.sqrt(config.d_model) if config.scale_embedding else 1.0
if embed_tokens is not None:
self.embed_tokens = embed_tokens
else:
self.embed_tokens = nn.Embedding(config.vocab_size, config.d_model, self.padding_idx)
self.embed_positions = {{cookiecutter.camelcase_modelname}}LearnedPositionalEmbedding(
config.max_position_embeddings,
config.d_model,
)
self.layers = nn.ModuleList([{{cookiecutter.camelcase_modelname}}DecoderLayer(config) for _ in range(config.decoder_layers)])
self.layernorm_embedding = nn.LayerNorm(config.d_model)
self.gradient_checkpointing = False
# Initialize weights and apply final processing
self.post_init()
def get_input_embeddings(self):
return self.embed_tokens
def set_input_embeddings(self, value):
self.embed_tokens = value
# Copied from transformers.models.bart.modeling_bart.BartDecoder._prepare_decoder_attention_mask
def _prepare_decoder_attention_mask(self, attention_mask, input_shape, inputs_embeds, past_key_values_length):
# create causal mask
# [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len]
combined_attention_mask = None
if input_shape[-1] > 1:
combined_attention_mask = _make_causal_mask(
input_shape, inputs_embeds.dtype, past_key_values_length=past_key_values_length
).to(self.device)
if attention_mask is not None:
# [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len]
expanded_attn_mask = _expand_mask(attention_mask, inputs_embeds.dtype, tgt_len=input_shape[-1])
combined_attention_mask = (
expanded_attn_mask if combined_attention_mask is None else expanded_attn_mask + combined_attention_mask
)
return combined_attention_mask
def forward(
self,
input_ids=None,
attention_mask=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
head_mask=None,
cross_attn_head_mask=None,
past_key_values=None,
inputs_embeds=None,
use_cache=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
r"""
Args:
input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you
provide it.
Indices can be obtained using [`~{{cookiecutter.camelcase_modelname}}Tokenizer`]. See
[`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`]
for details.
[What are input IDs?](../glossary#input-ids)
attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
[What are attention masks?](../glossary#attention-mask)
encoder_hidden_states (`torch.FloatTensor` of shape `(batch_size, encoder_sequence_length, hidden_size)`, *optional*):
Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention
of the decoder.
encoder_attention_mask (`torch.LongTensor` of shape `(batch_size, encoder_sequence_length)`, *optional*):
Mask to avoid performing cross-attention on padding tokens indices of encoder input_ids. Mask values
selected in `[0, 1]`:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
[What are attention masks?](../glossary#attention-mask)
head_mask (`torch.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*):
Mask to nullify selected heads of the attention modules. Mask values selected in `[0, 1]`:
- 1 indicates the head is **not masked**,
- 0 indicates the head is **masked**.
cross_attn_head_mask (`torch.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*):
Mask to nullify selected heads of the cross-attention modules. Mask values selected in `[0, 1]`:
- 1 indicates the head is **not masked**,
- 0 indicates the head is **masked**.
past_key_values (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`):
Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2
tensors of shape `(batch_size, num_heads, sequence_length, embed_size_per_head)`) and 2 additional
tensors of shape `(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)`.
Contains pre-computed hidden-states (key and values in the self-attention blocks and in the
cross-attention blocks) that can be used (see `past_key_values` input) to speed up sequential
decoding.
If `past_key_values` are used, the user can optionally input only the last
`decoder_input_ids` (those that don't have their past key value states given to this model) of
shape `(batch_size, 1)` instead of all `decoder_input_ids` of shape `(batch_size,
sequence_length)`. inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert `input_ids` indices
into associated vectors than the model's internal embedding lookup matrix.
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers. See `attentions` under
returned tensors for more detail.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors
for more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
"""
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
use_cache = use_cache if use_cache is not None else self.config.use_cache
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
# retrieve input_ids and inputs_embeds
if input_ids is not None and inputs_embeds is not None:
raise ValueError("You cannot specify both decoder_input_ids and decoder_inputs_embeds at the same time")
elif input_ids is not None:
input_shape = input_ids.size()
input_ids = input_ids.view(-1, input_shape[-1])
elif inputs_embeds is not None:
input_shape = inputs_embeds.size()[:-1]
else:
raise ValueError("You have to specify either decoder_input_ids or decoder_inputs_embeds")
# past_key_values_length
past_key_values_length = past_key_values[0][0].shape[2] if past_key_values is not None else 0
if inputs_embeds is None:
inputs_embeds = self.embed_tokens(input_ids) * self.embed_scale
attention_mask = self._prepare_decoder_attention_mask(attention_mask, input_shape, inputs_embeds, past_key_values_length)
# expand encoder attention mask
if encoder_hidden_states is not None and encoder_attention_mask is not None:
# [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len]
encoder_attention_mask = _expand_mask(encoder_attention_mask, inputs_embeds.dtype, tgt_len=input_shape[-1])
# embed positions
positions = self.embed_positions(input_shape, past_key_values_length)
hidden_states = inputs_embeds + positions
hidden_states = self.layernorm_embedding(hidden_states)
hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
# decoder layers
all_hidden_states = () if output_hidden_states else None
all_self_attns = () if output_attentions else None
all_cross_attentions = () if (output_attentions and encoder_hidden_states is not None) else None
next_decoder_cache = () if use_cache else None
# check if head_mask/cross_attn_head_mask has a correct number of layers specified if desired
for attn_mask, mask_name in zip([head_mask, cross_attn_head_mask], ["head_mask", "cross_attn_head_mask"]):
if attn_mask is not None:
assert attn_mask.size()[0] == (
len(self.layers)
), f"The `{mask_name}` should be specified for {len(self.layers)} layers, but it is for {head_mask.size()[0]}."
for idx, decoder_layer in enumerate(self.layers):
# add LayerDrop (see https://arxiv.org/abs/1909.11556 for description)
if output_hidden_states:
all_hidden_states += (hidden_states,)
dropout_probability = random.uniform(0, 1)
if self.training and (dropout_probability < self.layerdrop):
continue
past_key_value = past_key_values[idx] if past_key_values is not None else None
if self.gradient_checkpointing and self.training:
if use_cache:
logger.warning("`use_cache = True` is incompatible with gradient checkpointing`. Setting `use_cache = False`...")
use_cache = False
def create_custom_forward(module):
def custom_forward(*inputs):
# None for past_key_value
return module(*inputs, output_attentions, use_cache)
return custom_forward
layer_outputs = torch.utils.checkpoint.checkpoint(
create_custom_forward(decoder_layer),
hidden_states,
attention_mask,
encoder_hidden_states,
encoder_attention_mask,
head_mask[idx] if head_mask is not None else None,
cross_attn_head_mask[idx] if cross_attn_head_mask is not None else None,
None,
)
else:
layer_outputs = decoder_layer(
hidden_states,
attention_mask=attention_mask,
encoder_hidden_states=encoder_hidden_states,
encoder_attention_mask=encoder_attention_mask,
layer_head_mask=(head_mask[idx] if head_mask is not None else None),
cross_layer_head_mask=(cross_attn_head_mask[idx] if cross_attn_head_mask is not None else None),
past_key_value=past_key_value,
output_attentions=output_attentions,
use_cache=use_cache,
)
hidden_states = layer_outputs[0]
if use_cache:
next_decoder_cache += (layer_outputs[3 if output_attentions else 1],)
if output_attentions:
all_self_attns += (layer_outputs[1],)
if encoder_hidden_states is not None:
all_cross_attentions += (layer_outputs[2],)
# add hidden states from the last decoder layer
if output_hidden_states:
all_hidden_states += (hidden_states,)
next_cache = next_decoder_cache if use_cache else None
if not return_dict:
return tuple(
v
for v in [hidden_states, next_cache, all_hidden_states, all_self_attns, all_cross_attentions]
if v is not None
)
return BaseModelOutputWithPastAndCrossAttentions(
last_hidden_state=hidden_states,
past_key_values=next_cache,
hidden_states=all_hidden_states,
attentions=all_self_attns,
cross_attentions=all_cross_attentions,
)
@add_start_docstrings(
"The bare {{cookiecutter.modelname}} Model outputting raw hidden-states without any specific head on top.",
{{cookiecutter.uppercase_modelname}}_START_DOCSTRING,
)
class {{cookiecutter.camelcase_modelname}}Model({{cookiecutter.camelcase_modelname}}PreTrainedModel):
def __init__(self, config: {{cookiecutter.camelcase_modelname}}Config):
super().__init__(config)
padding_idx, vocab_size = config.pad_token_id, config.vocab_size
self.shared = nn.Embedding(vocab_size, config.d_model, padding_idx)
self.encoder = {{cookiecutter.camelcase_modelname}}Encoder(config, self.shared)
self.decoder = {{cookiecutter.camelcase_modelname}}Decoder(config, self.shared)
# Initialize weights and apply final processing
self.post_init()
def get_input_embeddings(self):
return self.shared
def set_input_embeddings(self, value):
self.shared = value
self.encoder.embed_tokens = self.shared
self.decoder.embed_tokens = self.shared
def get_encoder(self):
return self.encoder
def get_decoder(self):
return self.decoder
@add_start_docstrings_to_model_forward({{cookiecutter.uppercase_modelname}}_INPUTS_DOCSTRING)
@add_code_sample_docstrings(
processor_class=_TOKENIZER_FOR_DOC,
checkpoint=_CHECKPOINT_FOR_DOC,
output_type=Seq2SeqModelOutput,
config_class=_CONFIG_FOR_DOC,
)
def forward(
self,
input_ids=None,
attention_mask=None,
decoder_input_ids=None,
decoder_attention_mask=None,
head_mask=None,
decoder_head_mask=None,
cross_attn_head_mask=None,
encoder_outputs=None,
past_key_values=None,
inputs_embeds=None,
decoder_inputs_embeds=None,
use_cache=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
use_cache = use_cache if use_cache is not None else self.config.use_cache
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
if encoder_outputs is None:
encoder_outputs = self.encoder(
input_ids=input_ids,
attention_mask=attention_mask,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
# If the user passed a tuple for encoder_outputs, we wrap it in a BaseModelOutput when return_dict=True
elif return_dict and not isinstance(encoder_outputs, BaseModelOutput):
encoder_outputs = BaseModelOutput(
last_hidden_state=encoder_outputs[0],
hidden_states=encoder_outputs[1] if len(encoder_outputs) > 1 else None,
attentions=encoder_outputs[2] if len(encoder_outputs) > 2 else None,
)
# decoder outputs consists of (dec_features, past_key_value, dec_hidden, dec_attn)
decoder_outputs = self.decoder(
input_ids=decoder_input_ids,
attention_mask=decoder_attention_mask,
encoder_hidden_states=encoder_outputs[0],
encoder_attention_mask=attention_mask,
head_mask=decoder_head_mask,
cross_attn_head_mask=cross_attn_head_mask,
past_key_values=past_key_values,
inputs_embeds=decoder_inputs_embeds,
use_cache=use_cache,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
if not return_dict:
return decoder_outputs + encoder_outputs
return Seq2SeqModelOutput(
last_hidden_state=decoder_outputs.last_hidden_state,
past_key_values=decoder_outputs.past_key_values,
decoder_hidden_states=decoder_outputs.hidden_states,
decoder_attentions=decoder_outputs.attentions,
cross_attentions=decoder_outputs.cross_attentions,
encoder_last_hidden_state=encoder_outputs.last_hidden_state,
encoder_hidden_states=encoder_outputs.hidden_states,
encoder_attentions=encoder_outputs.attentions,
)
@add_start_docstrings(
"The {{cookiecutter.modelname}} Model with a language modeling head. Can be used for summarization.", {{cookiecutter.uppercase_modelname}}_START_DOCSTRING
)
class {{cookiecutter.camelcase_modelname}}ForConditionalGeneration({{cookiecutter.camelcase_modelname}}PreTrainedModel):
base_model_prefix = "model"
_keys_to_ignore_on_load_missing = [
r"final_logits_bias",
r"encoder\.version",
r"decoder\.version",
r"lm_head\.weight",
]
def __init__(self, config: {{cookiecutter.camelcase_modelname}}Config):
super().__init__(config)
self.model = {{cookiecutter.camelcase_modelname}}Model(config)
self.register_buffer("final_logits_bias", torch.zeros((1, self.model.shared.num_embeddings)))
self.lm_head = nn.Linear(config.d_model, self.model.shared.num_embeddings, bias=False)
# Initialize weights and apply final processing
self.post_init()
def get_encoder(self):
return self.model.get_encoder()
def get_decoder(self):
return self.model.get_decoder()
def resize_token_embeddings(self, new_num_tokens: int) -> nn.Embedding:
new_embeddings = super().resize_token_embeddings(new_num_tokens)
self._resize_final_logits_bias(new_num_tokens)
return new_embeddings
def _resize_final_logits_bias(self, new_num_tokens: int) -> None:
old_num_tokens = self.final_logits_bias.shape[-1]
if new_num_tokens <= old_num_tokens:
new_bias = self.final_logits_bias[:, :new_num_tokens]
else:
extra_bias = torch.zeros((1, new_num_tokens - old_num_tokens), device=self.final_logits_bias.device)
new_bias = torch.cat([self.final_logits_bias, extra_bias], dim=1)
self.register_buffer("final_logits_bias", new_bias)
def get_output_embeddings(self):
return self.lm_head
def set_output_embeddings(self, new_embeddings):
self.lm_head = new_embeddings
@add_start_docstrings_to_model_forward({{cookiecutter.uppercase_modelname}}_INPUTS_DOCSTRING)
@replace_return_docstrings(output_type=Seq2SeqLMOutput, config_class=_CONFIG_FOR_DOC)
@add_end_docstrings({{cookiecutter.uppercase_modelname}}_GENERATION_EXAMPLE)
def forward(
self,
input_ids=None,
attention_mask=None,
decoder_input_ids=None,
decoder_attention_mask=None,
head_mask=None,
decoder_head_mask=None,
cross_attn_head_mask=None,
encoder_outputs=None,
past_key_values=None,
inputs_embeds=None,
decoder_inputs_embeds=None,
labels=None,
use_cache=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
r"""
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Labels for computing the masked language modeling loss. Indices should either be in `[0, ..., config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored
(masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.
Returns:
Conditional generation example:
```python
>>> from transformers import {{cookiecutter.camelcase_modelname}}Tokenizer, {{cookiecutter.camelcase_modelname}}ForConditionalGeneration
>>> tokenizer = {{cookiecutter.camelcase_modelname}}Tokenizer.from_pretrained('{{cookiecutter.checkpoint_identifier}}')
>>> TXT = "My friends are <mask> but they eat too many carbs."
>>> model = {{cookiecutter.camelcase_modelname}}ForConditionalGeneration.from_pretrained('{{cookiecutter.checkpoint_identifier}}')
>>> input_ids = tokenizer([TXT], return_tensors='pt')['input_ids']
>>> logits = model(input_ids).logits
>>> masked_index = (input_ids[0] == tokenizer.mask_token_id).nonzero().item()
>>> probs = logits[0, masked_index].softmax(dim=0)
>>> values, predictions = probs.topk(5)
>>> tokenizer.decode(predictions).split()
```
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
if labels is not None:
if use_cache:
logger.warning("The `use_cache` argument is changed to `False` since `labels` is provided.")
use_cache = False
if decoder_input_ids is None and decoder_inputs_embeds is None:
decoder_input_ids = shift_tokens_right(labels, self.config.pad_token_id, self.config.decoder_start_token_id)
outputs = self.model(
input_ids,
attention_mask=attention_mask,
decoder_input_ids=decoder_input_ids,
encoder_outputs=encoder_outputs,
decoder_attention_mask=decoder_attention_mask,
head_mask=head_mask,
decoder_head_mask=decoder_head_mask,
cross_attn_head_mask=cross_attn_head_mask,
past_key_values=past_key_values,
inputs_embeds=inputs_embeds,
decoder_inputs_embeds=decoder_inputs_embeds,
use_cache=use_cache,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
lm_logits = self.lm_head(outputs[0]) + self.final_logits_bias
masked_lm_loss = None
if labels is not None:
loss_fct = CrossEntropyLoss()
masked_lm_loss = loss_fct(lm_logits.view(-1, self.config.vocab_size), labels.view(-1))
if not return_dict:
output = (lm_logits,) + outputs[1:]
return ((masked_lm_loss,) + output) if masked_lm_loss is not None else output
return Seq2SeqLMOutput(
loss=masked_lm_loss,
logits=lm_logits,
past_key_values=outputs.past_key_values,
decoder_hidden_states=outputs.decoder_hidden_states,
decoder_attentions=outputs.decoder_attentions,
cross_attentions=outputs.cross_attentions,
encoder_last_hidden_state=outputs.encoder_last_hidden_state,
encoder_hidden_states=outputs.encoder_hidden_states,
encoder_attentions=outputs.encoder_attentions,
)
def prepare_inputs_for_generation(
self,
decoder_input_ids,
past=None,
attention_mask=None,
head_mask=None,
decoder_head_mask=None,
cross_attn_head_mask=None,
use_cache=None,
encoder_outputs=None,
**kwargs
):
# cut decoder_input_ids if past is used
if past is not None:
decoder_input_ids = decoder_input_ids[:, -1:]
return {
"input_ids": None, # encoder_outputs is defined. input_ids not needed
"encoder_outputs": encoder_outputs,
"past_key_values": past,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
"use_cache": use_cache, # change this to avoid caching (presumably for debugging)
}
@staticmethod
def _reorder_cache(past, beam_idx):
reordered_past = ()
for layer_past in past:
reordered_past += (tuple(past_state.index_select(0, beam_idx) for past_state in layer_past),)
return reordered_past
@add_start_docstrings(
"""
{{cookiecutter.camelcase_modelname}} model with a sequence classification/head on top (a linear layer on top of the pooled output) e.g. for GLUE
tasks.
""",
{{cookiecutter.uppercase_modelname}}_START_DOCSTRING,
)
class {{cookiecutter.camelcase_modelname}}ForSequenceClassification({{cookiecutter.camelcase_modelname}}PreTrainedModel):
def __init__(self, config: {{cookiecutter.camelcase_modelname}}Config, **kwargs):
super().__init__(config, **kwargs)
self.model = {{cookiecutter.camelcase_modelname}}Model(config)
self.classification_head = {{cookiecutter.camelcase_modelname}}ClassificationHead(
config.d_model,
config.d_model,
config.num_labels,
config.classifier_dropout,
)
self.model._init_weights(self.classification_head.dense)
self.model._init_weights(self.classification_head.out_proj)
@add_start_docstrings_to_model_forward({{cookiecutter.uppercase_modelname}}_INPUTS_DOCSTRING)
@add_code_sample_docstrings(
processor_class=_TOKENIZER_FOR_DOC,
checkpoint=_CHECKPOINT_FOR_DOC,
output_type=Seq2SeqSequenceClassifierOutput,
config_class=_CONFIG_FOR_DOC,
)
def forward(
self,
input_ids=None,
attention_mask=None,
decoder_input_ids=None,
decoder_attention_mask=None,
encoder_outputs=None,
inputs_embeds=None,
decoder_inputs_embeds=None,
labels=None,
use_cache=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
r"""
labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
Labels for computing the sequence classification/regression loss. Indices should be in `[0, ..., config.num_labels - 1]`. If `config.num_labels > 1` a classification loss is computed (Cross-Entropy).
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
if labels is not None:
use_cache = False
if input_ids is None and inputs_embeds is not None:
raise NotImplementedError(
f"Passing input embeddings is currently not supported for {self.__class__.__name__}"
)
outputs = self.model(
input_ids,
attention_mask=attention_mask,
decoder_input_ids=decoder_input_ids,
decoder_attention_mask=decoder_attention_mask,
encoder_outputs=encoder_outputs,
inputs_embeds=inputs_embeds,
decoder_inputs_embeds=decoder_inputs_embeds,
use_cache=use_cache,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
hidden_states = outputs[0] # last hidden state
eos_mask = input_ids.eq(self.config.eos_token_id)
if len(torch.unique_consecutive(eos_mask.sum(1))) > 1:
raise ValueError("All examples must have the same number of <eos> tokens.")
sentence_representation = hidden_states[eos_mask, :].view(hidden_states.size(0), -1, hidden_states.size(-1))[
:, -1, :
]
logits = self.classification_head(sentence_representation)
loss = None
if labels is not None:
if self.config.problem_type is None:
if self.config.num_labels == 1:
self.config.problem_type = "regression"
elif self.config.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
self.config.problem_type = "single_label_classification"
else:
self.config.problem_type = "multi_label_classification"
if self.config.problem_type == "regression":
loss_fct = MSELoss()
if self.config.num_labels == 1:
loss = loss_fct(logits.squeeze(), labels.squeeze())
else:
loss = loss_fct(logits, labels)
elif self.config.problem_type == "single_label_classification":
loss_fct = CrossEntropyLoss()
loss = loss_fct(logits.view(-1, self.config.num_labels), labels.view(-1))
elif self.config.problem_type == "multi_label_classification":
loss_fct = BCEWithLogitsLoss()
loss = loss_fct(logits, labels)
if not return_dict:
output = (logits,) + outputs[1:]
return ((loss,) + output) if loss is not None else output
return Seq2SeqSequenceClassifierOutput(
loss=loss,
logits=logits,
past_key_values=outputs.past_key_values,
decoder_hidden_states=outputs.decoder_hidden_states,
decoder_attentions=outputs.decoder_attentions,
cross_attentions=outputs.cross_attentions,
encoder_last_hidden_state=outputs.encoder_last_hidden_state,
encoder_hidden_states=outputs.encoder_hidden_states,
encoder_attentions=outputs.encoder_attentions,
)
@add_start_docstrings(
"""
{{cookiecutter.modelname}} Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear
layer on top of the hidden-states output to compute `span start logits` and `span end logits`).
""",
{{cookiecutter.uppercase_modelname}}_START_DOCSTRING,
)
class {{cookiecutter.camelcase_modelname}}ForQuestionAnswering({{cookiecutter.camelcase_modelname}}PreTrainedModel):
def __init__(self, config):
super().__init__(config)
config.num_labels = 2
self.num_labels = config.num_labels
self.model = {{cookiecutter.camelcase_modelname}}Model(config)
self.qa_outputs = nn.Linear(config.hidden_size, config.num_labels)
self.model._init_weights(self.qa_outputs)
@add_start_docstrings_to_model_forward({{cookiecutter.uppercase_modelname}}_INPUTS_DOCSTRING)
@add_code_sample_docstrings(
processor_class=_TOKENIZER_FOR_DOC,
checkpoint=_CHECKPOINT_FOR_DOC,
output_type=Seq2SeqQuestionAnsweringModelOutput,
config_class=_CONFIG_FOR_DOC,
)
def forward(
self,
input_ids=None,
attention_mask=None,
decoder_input_ids=None,
decoder_attention_mask=None,
encoder_outputs=None,
start_positions=None,
end_positions=None,
inputs_embeds=None,
decoder_inputs_embeds=None,
use_cache=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
r"""
start_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
Labels for position (index) of the start of the labelled span for computing the token classification loss.
Positions are clamped to the length of the sequence (*sequence_length*). Position outside of the sequence
are not taken into account for computing the loss.
end_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
Labels for position (index) of the end of the labelled span for computing the token classification loss.
Positions are clamped to the length of the sequence (*sequence_length*). Position outside of the sequence
are not taken into account for computing the loss.
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
if start_positions is not None and end_positions is not None:
use_cache = False
outputs = self.model(
input_ids,
attention_mask=attention_mask,
decoder_input_ids=decoder_input_ids,
decoder_attention_mask=decoder_attention_mask,
encoder_outputs=encoder_outputs,
inputs_embeds=inputs_embeds,
decoder_inputs_embeds=decoder_inputs_embeds,
use_cache=use_cache,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
sequence_output = outputs[0]
logits = self.qa_outputs(sequence_output)
start_logits, end_logits = logits.split(1, dim=-1)
start_logits = start_logits.squeeze(-1)
end_logits = end_logits.squeeze(-1)
total_loss = None
if start_positions is not None and end_positions is not None:
# If we are on multi-GPU, split add a dimension
if len(start_positions.size()) > 1:
start_positions = start_positions.squeeze(-1)
if len(end_positions.size()) > 1:
end_positions = end_positions.squeeze(-1)
# sometimes the start/end positions are outside our model inputs, we ignore these terms
ignored_index = start_logits.size(1)
start_positions = start_positions.clamp(0, ignored_index)
end_positions = end_positions.clamp(0, ignored_index)
loss_fct = CrossEntropyLoss(ignore_index=ignored_index)
start_loss = loss_fct(start_logits, start_positions)
end_loss = loss_fct(end_logits, end_positions)
total_loss = (start_loss + end_loss) / 2
if not return_dict:
output = (
start_logits,
end_logits,
) + outputs[1:]
return ((total_loss,) + output) if total_loss is not None else output
return Seq2SeqQuestionAnsweringModelOutput(
loss=total_loss,
start_logits=start_logits,
end_logits=end_logits,
past_key_values=outputs.past_key_values,
decoder_hidden_states=outputs.decoder_hidden_states,
decoder_attentions=outputs.decoder_attentions,
cross_attentions=outputs.cross_attentions,
encoder_last_hidden_state=outputs.encoder_last_hidden_state,
encoder_hidden_states=outputs.encoder_hidden_states,
encoder_attentions=outputs.encoder_attentions,
)
# Copied from transformers.models.bart.modeling_bart.BartDecoderWrapper with Bart->{{cookiecutter.camelcase_modelname}}
class {{cookiecutter.camelcase_modelname}}DecoderWrapper({{cookiecutter.camelcase_modelname}}PreTrainedModel):
"""
This wrapper class is a helper class to correctly load pretrained checkpoints when the causal language model is
used in combination with the [`EncoderDecoderModel`] framework.
"""
def __init__(self, config):
super().__init__(config)
self.decoder = {{cookiecutter.camelcase_modelname}}Decoder(config)
def forward(self, *args, **kwargs):
return self.decoder(*args, **kwargs)
# Copied from transformers.models.bart.modeling_bart.BartForCausalLM with Bart->{{cookiecutter.camelcase_modelname}}
class {{cookiecutter.camelcase_modelname}}ForCausalLM({{cookiecutter.camelcase_modelname}}PreTrainedModel):
def __init__(self, config):
config = copy.deepcopy(config)
config.is_decoder = True
config.is_encoder_decoder = False
super().__init__(config)
self.model = {{cookiecutter.camelcase_modelname}}DecoderWrapper(config)
self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
# Initialize weights and apply final processing
self.post_init()
def get_input_embeddings(self):
return self.model.decoder.embed_tokens
def set_input_embeddings(self, value):
self.model.decoder.embed_tokens = value
def get_output_embeddings(self):
return self.lm_head
def set_output_embeddings(self, new_embeddings):
self.lm_head = new_embeddings
def set_decoder(self, decoder):
self.model.decoder = decoder
def get_decoder(self):
return self.model.decoder
@replace_return_docstrings(output_type=CausalLMOutputWithCrossAttentions, config_class=_CONFIG_FOR_DOC)
def forward(
self,
input_ids=None,
attention_mask=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
head_mask=None,
cross_attn_head_mask=None,
past_key_values=None,
inputs_embeds=None,
labels=None,
use_cache=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
r"""
Args:
input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you
provide it.
Indices can be obtained using [`~{{cookiecutter.camelcase_modelname}}Tokenizer`]. See
[`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`]
for details.
[What are input IDs?](../glossary#input-ids)
attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
[What are attention masks?](../glossary#attention-mask)
encoder_hidden_states (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention
if the model is configured as a decoder.
encoder_attention_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, *optional*):
Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used
in the cross-attention if the model is configured as a decoder. Mask values selected in `[0, 1]`:
head_mask (`torch.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*):
Mask to nullify selected heads of the attention modules. Mask values selected in `[0, 1]`:
- 1 indicates the head is **not masked**,
- 0 indicates the head is **masked**.
cross_attn_head_mask (`torch.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*):
Mask to nullify selected heads of the cross-attention modules. Mask values selected in `[0, 1]`:
- 1 indicates the head is **not masked**,
- 0 indicates the head is **masked**.
past_key_values (`tuple(tuple(torch.FloatTensor))` of length `config.n_layers` with each tuple having 4 tensors of shape `(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`):
Contains precomputed key and value hidden-states of the attention blocks. Can be used to speed up
decoding.
If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids`
(those that don't have their past key value states given to this model) of shape `(batch_size, 1)`
instead of all `decoder_input_ids` of shape `(batch_size, sequence_length)`.
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Labels for computing the masked language modeling loss. Indices should either be in `[0, ..., config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are
ignored (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.
use_cache (`bool`, *optional*):
If set to `True`, `past_key_values` key value states are returned and can be used to speed up
decoding (see `past_key_values`).
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers. See `attentions` under
returned tensors for more detail.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors
for more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
Returns:
Example:
```python
>>> from transformers import {{cookiecutter.camelcase_modelname}}Tokenizer, {{cookiecutter.camelcase_modelname}}ForCausalLM
>>> tokenizer = {{cookiecutter.camelcase_modelname}}Tokenizer.from_pretrained('facebook/bart-large')
>>> model = {{cookiecutter.camelcase_modelname}}ForCausalLM.from_pretrained('facebook/bart-large', add_cross_attention=False)
>>> assert model.config.is_decoder, f"{model.__class__} has to be configured as a decoder."
>>> inputs = tokenizer("Hello, my dog is cute", return_tensors="pt")
>>> outputs = model(**inputs)
>>> logits = outputs.logits
```
"""
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
# decoder outputs consists of (dec_features, layer_state, dec_hidden, dec_attn)
outputs = self.model.decoder(
input_ids=input_ids,
attention_mask=attention_mask,
encoder_hidden_states=encoder_hidden_states,
encoder_attention_mask=encoder_attention_mask,
head_mask=head_mask,
cross_attn_head_mask=cross_attn_head_mask,
past_key_values=past_key_values,
inputs_embeds=inputs_embeds,
use_cache=use_cache,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
logits = self.lm_head(outputs[0])
loss = None
if labels is not None:
loss_fct = CrossEntropyLoss()
loss = loss_fct(logits.view(-1, self.config.vocab_size), labels.view(-1))
if not return_dict:
output = (logits,) + outputs[1:]
return (loss,) + output if loss is not None else output
return CausalLMOutputWithCrossAttentions(
loss=loss,
logits=logits,
past_key_values=outputs.past_key_values,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
cross_attentions=outputs.cross_attentions,
)
def prepare_inputs_for_generation(self, input_ids, past=None, attention_mask=None, use_cache=None, **kwargs):
# if model is used as a decoder in encoder-decoder model, the decoder attention mask is created on the fly
if attention_mask is None:
attention_mask = input_ids.new_ones(input_ids.shape)
if past:
input_ids = input_ids[:, -1:]
# first step, decoder_cached_states are empty
return {
"input_ids": input_ids, # encoder_outputs is defined. input_ids not needed
"attention_mask": attention_mask,
"past_key_values": past,
"use_cache": use_cache,
}
@staticmethod
def _reorder_cache(past, beam_idx):
reordered_past = ()
for layer_past in past:
reordered_past += (tuple(past_state.index_select(0, beam_idx) for past_state in layer_past),)
return reordered_past
{% endif -%}
| {
"content_hash": "372520643969b2bcd861805bd11c9c55",
"timestamp": "",
"source": "github",
"line_count": 3338,
"max_line_length": 390,
"avg_line_length": 46.27471539844218,
"alnum_prop": 0.6340077040106173,
"repo_name": "huggingface/transformers",
"id": "9e2154901aa6967409478415d102bfeb16be48ad",
"size": "155116",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "templates/adding_a_new_model/cookiecutter-template-{{cookiecutter.modelname}}/modeling_{{cookiecutter.lowercase_modelname}}.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "6021"
},
{
"name": "C++",
"bytes": "12959"
},
{
"name": "Cuda",
"bytes": "175419"
},
{
"name": "Dockerfile",
"bytes": "18218"
},
{
"name": "Jsonnet",
"bytes": "937"
},
{
"name": "Makefile",
"bytes": "3430"
},
{
"name": "Python",
"bytes": "35742012"
},
{
"name": "Shell",
"bytes": "30374"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
]
operations = [
migrations.CreateModel(
name='Inc',
fields=[
('id', models.AutoField(auto_created=True, serialize=False, primary_key=True, verbose_name='ID')),
('number', models.IntegerField(default=0)),
],
options={
},
bases=(models.Model,),
),
]
| {
"content_hash": "ec5e06451f8d5197d5d35692ae667d43",
"timestamp": "",
"source": "github",
"line_count": 22,
"max_line_length": 114,
"avg_line_length": 23.954545454545453,
"alnum_prop": 0.5351043643263758,
"repo_name": "matthewbentley/counter",
"id": "4cf4fc5c31642eaffbca983832ed7408679abacc",
"size": "551",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "count/migrations/0001_initial.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "6140"
},
{
"name": "Shell",
"bytes": "396"
}
],
"symlink_target": ""
} |
"""Constants for the Toon integration."""
from datetime import timedelta
from homeassistant.components.binary_sensor import (
DEVICE_CLASS_CONNECTIVITY,
DEVICE_CLASS_PROBLEM,
)
from homeassistant.components.sensor import DEVICE_CLASS_POWER, DEVICE_CLASS_TEMPERATURE
from homeassistant.const import (
ATTR_DEVICE_CLASS,
ATTR_ICON,
ATTR_NAME,
ATTR_UNIT_OF_MEASUREMENT,
ENERGY_KILO_WATT_HOUR,
POWER_WATT,
TEMP_CELSIUS,
UNIT_PERCENTAGE,
)
DOMAIN = "toon"
CONF_AGREEMENT = "agreement"
CONF_AGREEMENT_ID = "agreement_id"
CONF_CLOUDHOOK_URL = "cloudhook_url"
CONF_MIGRATE = "migrate"
DEFAULT_SCAN_INTERVAL = timedelta(seconds=300)
DEFAULT_MAX_TEMP = 30.0
DEFAULT_MIN_TEMP = 6.0
CURRENCY_EUR = "EUR"
VOLUME_CM3 = "CM3"
VOLUME_M3 = "M3"
ATTR_DEFAULT_ENABLED = "default_enabled"
ATTR_INVERTED = "inverted"
ATTR_MEASUREMENT = "measurement"
ATTR_SECTION = "section"
BINARY_SENSOR_ENTITIES = {
"thermostat_info_boiler_connected_None": {
ATTR_NAME: "Boiler Module Connection",
ATTR_SECTION: "thermostat",
ATTR_MEASUREMENT: "boiler_module_connected",
ATTR_INVERTED: False,
ATTR_DEVICE_CLASS: DEVICE_CLASS_CONNECTIVITY,
ATTR_ICON: "mdi:check-network-outline",
ATTR_DEFAULT_ENABLED: False,
},
"thermostat_info_burner_info_1": {
ATTR_NAME: "Boiler Heating",
ATTR_SECTION: "thermostat",
ATTR_MEASUREMENT: "heating",
ATTR_INVERTED: False,
ATTR_DEVICE_CLASS: None,
ATTR_ICON: "mdi:fire",
ATTR_DEFAULT_ENABLED: False,
},
"thermostat_info_burner_info_2": {
ATTR_NAME: "Hot Tap Water",
ATTR_SECTION: "thermostat",
ATTR_MEASUREMENT: "hot_tapwater",
ATTR_INVERTED: False,
ATTR_DEVICE_CLASS: None,
ATTR_ICON: "mdi:water-pump",
ATTR_DEFAULT_ENABLED: True,
},
"thermostat_info_burner_info_3": {
ATTR_NAME: "Boiler Preheating",
ATTR_SECTION: "thermostat",
ATTR_MEASUREMENT: "pre_heating",
ATTR_INVERTED: False,
ATTR_DEVICE_CLASS: None,
ATTR_ICON: "mdi:fire",
ATTR_DEFAULT_ENABLED: False,
},
"thermostat_info_burner_info_None": {
ATTR_NAME: "Boiler Burner",
ATTR_SECTION: "thermostat",
ATTR_MEASUREMENT: "burner",
ATTR_INVERTED: False,
ATTR_DEVICE_CLASS: None,
ATTR_ICON: "mdi:fire",
ATTR_DEFAULT_ENABLED: True,
},
"thermostat_info_error_found_255": {
ATTR_NAME: "Boiler Status",
ATTR_SECTION: "thermostat",
ATTR_MEASUREMENT: "error_found",
ATTR_INVERTED: False,
ATTR_DEVICE_CLASS: DEVICE_CLASS_PROBLEM,
ATTR_ICON: "mdi:alert",
ATTR_DEFAULT_ENABLED: True,
},
"thermostat_info_ot_communication_error_0": {
ATTR_NAME: "OpenTherm Connection",
ATTR_SECTION: "thermostat",
ATTR_MEASUREMENT: "opentherm_communication_error",
ATTR_INVERTED: False,
ATTR_DEVICE_CLASS: DEVICE_CLASS_PROBLEM,
ATTR_ICON: "mdi:check-network-outline",
ATTR_DEFAULT_ENABLED: False,
},
"thermostat_program_overridden": {
ATTR_NAME: "Thermostat Program Override",
ATTR_SECTION: "thermostat",
ATTR_MEASUREMENT: "program_overridden",
ATTR_INVERTED: False,
ATTR_DEVICE_CLASS: None,
ATTR_ICON: "mdi:gesture-tap",
ATTR_DEFAULT_ENABLED: True,
},
}
SENSOR_ENTITIES = {
"current_display_temperature": {
ATTR_NAME: "Temperature",
ATTR_SECTION: "thermostat",
ATTR_MEASUREMENT: "current_display_temperature",
ATTR_UNIT_OF_MEASUREMENT: TEMP_CELSIUS,
ATTR_DEVICE_CLASS: DEVICE_CLASS_TEMPERATURE,
ATTR_ICON: None,
ATTR_DEFAULT_ENABLED: False,
},
"gas_average": {
ATTR_NAME: "Average Gas Usage",
ATTR_SECTION: "gas_usage",
ATTR_MEASUREMENT: "average",
ATTR_UNIT_OF_MEASUREMENT: VOLUME_CM3,
ATTR_DEVICE_CLASS: None,
ATTR_ICON: "mdi:gas-cylinder",
ATTR_DEFAULT_ENABLED: True,
},
"gas_average_daily": {
ATTR_NAME: "Average Daily Gas Usage",
ATTR_SECTION: "gas_usage",
ATTR_MEASUREMENT: "day_average",
ATTR_UNIT_OF_MEASUREMENT: VOLUME_M3,
ATTR_DEVICE_CLASS: None,
ATTR_ICON: "mdi:gas-cylinder",
ATTR_DEFAULT_ENABLED: False,
},
"gas_daily_usage": {
ATTR_NAME: "Gas Usage Today",
ATTR_SECTION: "gas_usage",
ATTR_MEASUREMENT: "day_usage",
ATTR_UNIT_OF_MEASUREMENT: VOLUME_M3,
ATTR_DEVICE_CLASS: None,
ATTR_ICON: "mdi:gas-cylinder",
ATTR_DEFAULT_ENABLED: True,
},
"gas_daily_cost": {
ATTR_NAME: "Gas Cost Today",
ATTR_SECTION: "gas_usage",
ATTR_MEASUREMENT: "day_cost",
ATTR_UNIT_OF_MEASUREMENT: CURRENCY_EUR,
ATTR_DEVICE_CLASS: None,
ATTR_ICON: "mdi:gas-cylinder",
ATTR_DEFAULT_ENABLED: True,
},
"gas_meter_reading": {
ATTR_NAME: "Gas Meter",
ATTR_SECTION: "gas_usage",
ATTR_MEASUREMENT: "meter",
ATTR_UNIT_OF_MEASUREMENT: VOLUME_M3,
ATTR_DEVICE_CLASS: None,
ATTR_ICON: "mdi:gas-cylinder",
ATTR_DEFAULT_ENABLED: False,
},
"gas_value": {
ATTR_NAME: "Current Gas Usage",
ATTR_SECTION: "gas_usage",
ATTR_MEASUREMENT: "current",
ATTR_UNIT_OF_MEASUREMENT: VOLUME_CM3,
ATTR_DEVICE_CLASS: None,
ATTR_ICON: "mdi:gas-cylinder",
ATTR_DEFAULT_ENABLED: True,
},
"power_average": {
ATTR_NAME: "Average Power Usage",
ATTR_SECTION: "power_usage",
ATTR_MEASUREMENT: "average",
ATTR_UNIT_OF_MEASUREMENT: POWER_WATT,
ATTR_DEVICE_CLASS: DEVICE_CLASS_POWER,
ATTR_ICON: "mdi:power-plug",
ATTR_DEFAULT_ENABLED: False,
},
"power_average_daily": {
ATTR_NAME: "Average Daily Energy Usage",
ATTR_SECTION: "power_usage",
ATTR_MEASUREMENT: "day_average",
ATTR_UNIT_OF_MEASUREMENT: ENERGY_KILO_WATT_HOUR,
ATTR_DEVICE_CLASS: None,
ATTR_ICON: "mdi:power-plug",
ATTR_DEFAULT_ENABLED: False,
},
"power_daily_cost": {
ATTR_NAME: "Energy Cost Today",
ATTR_SECTION: "power_usage",
ATTR_MEASUREMENT: "day_cost",
ATTR_UNIT_OF_MEASUREMENT: CURRENCY_EUR,
ATTR_DEVICE_CLASS: None,
ATTR_ICON: "mdi:power-plug",
ATTR_DEFAULT_ENABLED: True,
},
"power_daily_value": {
ATTR_NAME: "Energy Usage Today",
ATTR_SECTION: "power_usage",
ATTR_MEASUREMENT: "day_usage",
ATTR_UNIT_OF_MEASUREMENT: ENERGY_KILO_WATT_HOUR,
ATTR_DEVICE_CLASS: None,
ATTR_ICON: "mdi:power-plug",
ATTR_DEFAULT_ENABLED: True,
},
"power_meter_reading": {
ATTR_NAME: "Electricity Meter Feed IN Tariff 1",
ATTR_SECTION: "power_usage",
ATTR_MEASUREMENT: "meter_high",
ATTR_UNIT_OF_MEASUREMENT: ENERGY_KILO_WATT_HOUR,
ATTR_DEVICE_CLASS: None,
ATTR_ICON: "mdi:power-plug",
ATTR_DEFAULT_ENABLED: False,
},
"power_meter_reading_low": {
ATTR_NAME: "Electricity Meter Feed IN Tariff 2",
ATTR_SECTION: "power_usage",
ATTR_MEASUREMENT: "meter_high",
ATTR_UNIT_OF_MEASUREMENT: ENERGY_KILO_WATT_HOUR,
ATTR_DEVICE_CLASS: None,
ATTR_ICON: "mdi:power-plug",
ATTR_DEFAULT_ENABLED: False,
},
"power_value": {
ATTR_NAME: "Current Power Usage",
ATTR_SECTION: "power_usage",
ATTR_MEASUREMENT: "current",
ATTR_UNIT_OF_MEASUREMENT: POWER_WATT,
ATTR_DEVICE_CLASS: DEVICE_CLASS_POWER,
ATTR_ICON: "mdi:power-plug",
ATTR_DEFAULT_ENABLED: True,
},
"solar_meter_reading_produced": {
ATTR_NAME: "Electricity Meter Feed OUT Tariff 1",
ATTR_SECTION: "power_usage",
ATTR_MEASUREMENT: "meter_produced_high",
ATTR_UNIT_OF_MEASUREMENT: ENERGY_KILO_WATT_HOUR,
ATTR_DEVICE_CLASS: None,
ATTR_ICON: "mdi:power-plug",
ATTR_DEFAULT_ENABLED: False,
},
"solar_meter_reading_low_produced": {
ATTR_NAME: "Electricity Meter Feed OUT Tariff 2",
ATTR_SECTION: "power_usage",
ATTR_MEASUREMENT: "meter_produced_low",
ATTR_UNIT_OF_MEASUREMENT: ENERGY_KILO_WATT_HOUR,
ATTR_DEVICE_CLASS: None,
ATTR_ICON: "mdi:power-plug",
ATTR_DEFAULT_ENABLED: False,
},
"solar_value": {
ATTR_NAME: "Current Solar Power Production",
ATTR_SECTION: "power_usage",
ATTR_MEASUREMENT: "current_solar",
ATTR_UNIT_OF_MEASUREMENT: POWER_WATT,
ATTR_DEVICE_CLASS: DEVICE_CLASS_POWER,
ATTR_ICON: "mdi:solar-power",
ATTR_DEFAULT_ENABLED: True,
},
"solar_maximum": {
ATTR_NAME: "Max Solar Power Production Today",
ATTR_SECTION: "power_usage",
ATTR_MEASUREMENT: "day_max_solar",
ATTR_UNIT_OF_MEASUREMENT: POWER_WATT,
ATTR_DEVICE_CLASS: None,
ATTR_ICON: "mdi:solar-power",
ATTR_DEFAULT_ENABLED: True,
},
"solar_produced": {
ATTR_NAME: "Solar Power Production to Grid",
ATTR_SECTION: "power_usage",
ATTR_MEASUREMENT: "current_produced",
ATTR_UNIT_OF_MEASUREMENT: POWER_WATT,
ATTR_DEVICE_CLASS: DEVICE_CLASS_POWER,
ATTR_ICON: "mdi:solar-power",
ATTR_DEFAULT_ENABLED: True,
},
"power_usage_day_produced_solar": {
ATTR_NAME: "Solar Energy Produced Today",
ATTR_SECTION: "power_usage",
ATTR_MEASUREMENT: "day_produced_solar",
ATTR_UNIT_OF_MEASUREMENT: ENERGY_KILO_WATT_HOUR,
ATTR_DEVICE_CLASS: None,
ATTR_ICON: "mdi:solar-power",
ATTR_DEFAULT_ENABLED: True,
},
"power_usage_day_to_grid_usage": {
ATTR_NAME: "Energy Produced To Grid Today",
ATTR_SECTION: "power_usage",
ATTR_MEASUREMENT: "day_to_grid_usage",
ATTR_UNIT_OF_MEASUREMENT: ENERGY_KILO_WATT_HOUR,
ATTR_DEVICE_CLASS: None,
ATTR_ICON: "mdi:solar-power",
ATTR_DEFAULT_ENABLED: False,
},
"power_usage_day_from_grid_usage": {
ATTR_NAME: "Energy Usage From Grid Today",
ATTR_SECTION: "power_usage",
ATTR_MEASUREMENT: "day_from_grid_usage",
ATTR_UNIT_OF_MEASUREMENT: ENERGY_KILO_WATT_HOUR,
ATTR_DEVICE_CLASS: None,
ATTR_ICON: "mdi:power-plug",
ATTR_DEFAULT_ENABLED: False,
},
"solar_average_produced": {
ATTR_NAME: "Average Solar Power Production to Grid",
ATTR_SECTION: "power_usage",
ATTR_MEASUREMENT: "average_produced",
ATTR_UNIT_OF_MEASUREMENT: POWER_WATT,
ATTR_DEVICE_CLASS: DEVICE_CLASS_POWER,
ATTR_ICON: "mdi:solar-power",
ATTR_DEFAULT_ENABLED: False,
},
"thermostat_info_current_modulation_level": {
ATTR_NAME: "Boiler Modulation Level",
ATTR_SECTION: "thermostat",
ATTR_MEASUREMENT: "current_modulation_level",
ATTR_UNIT_OF_MEASUREMENT: UNIT_PERCENTAGE,
ATTR_DEVICE_CLASS: None,
ATTR_ICON: "mdi:percent",
ATTR_DEFAULT_ENABLED: False,
},
"power_usage_current_covered_by_solar": {
ATTR_NAME: "Current Power Usage Covered By Solar",
ATTR_SECTION: "power_usage",
ATTR_MEASUREMENT: "current_covered_by_solar",
ATTR_UNIT_OF_MEASUREMENT: UNIT_PERCENTAGE,
ATTR_DEVICE_CLASS: None,
ATTR_ICON: "mdi:solar-power",
ATTR_DEFAULT_ENABLED: True,
},
}
SWITCH_ENTITIES = {
"thermostat_holiday_mode": {
ATTR_NAME: "Holiday Mode",
ATTR_SECTION: "thermostat",
ATTR_MEASUREMENT: "holiday_mode",
ATTR_INVERTED: False,
ATTR_ICON: "mdi:airport",
ATTR_DEFAULT_ENABLED: True,
},
"thermostat_program": {
ATTR_NAME: "Thermostat Program",
ATTR_SECTION: "thermostat",
ATTR_MEASUREMENT: "program",
ATTR_INVERTED: False,
ATTR_ICON: "mdi:calendar-clock",
ATTR_DEFAULT_ENABLED: True,
},
}
| {
"content_hash": "a3b80c251d5c3589e610128e2c87063d",
"timestamp": "",
"source": "github",
"line_count": 360,
"max_line_length": 88,
"avg_line_length": 33.708333333333336,
"alnum_prop": 0.6051091882983106,
"repo_name": "mKeRix/home-assistant",
"id": "d7f403f70132b833798dd6b167336953d095a4e1",
"size": "12135",
"binary": false,
"copies": "1",
"ref": "refs/heads/dev",
"path": "homeassistant/components/toon/const.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "1466026"
},
{
"name": "Python",
"bytes": "4770710"
},
{
"name": "Ruby",
"bytes": "379"
},
{
"name": "Shell",
"bytes": "12407"
}
],
"symlink_target": ""
} |
'''
A class for finding a vertex cut. A vertex cut is a a set of nodes, ideally the smallest number, that when removed yields a disconnected graph.
Apparently this is np-hard, so we settle for the following heuristic: iteratively remove the node that has the most edges until the graph is disconnected.
Examples:
#>>> sparse_input_sequence = [("a","b",3),("b","a",3),("a","a",3),("b","b",3),("c","c",3)]
#>>> VertexCut().work(sparse_input_sequence)
#['a']
>>> matrix = np.array([[3, 3, 1],[3, 3, 1],[1, 1, 3]])
>>> VertexCut().work(matrix,2)
[0]
'''
import numpy as np
import scipy as sp
from collections import defaultdict
import logging
import itertools
class VertexCut(object):
def work(self, matrix, minvalue):
assert(len(matrix.shape) == 2 and matrix.shape[0] == matrix.shape[1])
graph = self._load_graph_from_matrix(matrix, minvalue)
node_list = []
while self._piece_count(graph) < len(graph):
logging.debug("len(graph)={0}".format(len(graph)))
aMostConnectedNode = self._find_a_most_connected_node(graph)
#logging.debug("aMostConnectedNode={0}".format(aMostConnectedNode))
self._remove_node(graph, aMostConnectedNode)
#logging.debug("removed")
node_list.append(aMostConnectedNode)
if len(node_list) % 10 == 0:
logging.info("# nodes removed is {0}".format(len(node_list)))
return node_list
def _load_graph_from_matrix(self, matrix, minvalue):
where = np.where(matrix >= minvalue)
sparse_input_sequence = itertools.izip(where[0],where[1])
graph = self._load_graph(sparse_input_sequence,len(where[0]))
return graph
def _load_graph(self, sparse_input_sequence,len_input_sequence):
graph = defaultdict(list)
i = -1
for node1, node2 in sparse_input_sequence:
i += 1
#don't need graph[node1] because singleton's don't need to be included in the graph
if node1 != node2 :
if i % 100000 == 0:
logging.info("added {0} of {1} ({2}%)".format(i, len_input_sequence, float(i)/len_input_sequence*100.0))
graph[node1].append(node2)
#!! self._check_that_symmetric(graph)
return graph
def _check_that_symmetric(self, graph):
for node1, list in graph.iteritems():
for node2 in list:
if not node1 in graph[node2]:
raise Exception("expect symmetric graph {0}, {1}".format(node1, node2))
def _remove_node(self, graph, node1):
node2List = graph[node1]
del graph[node1]
for node2 in node2List:
graph[node2].remove(node1)
def _find_a_most_connected_node(self, graph):
best_node, best_list = max(graph.iteritems(), key=lambda pair : len(pair[1])) # find the node connected to the most other nodes
logging.debug("Removing a node with {0} connections".format(len(best_list)))
return best_node
def _piece_count(self, graph):
unassigned = set(graph.iterkeys())
pieceCount = 0
while len(unassigned) > 0:
seed = unassigned.pop()
pieceCount += 1
workList = [seed]
nextWorkList = []
while len(workList) > 0:
for node1 in workList:
for node2 in graph[node1]:
if node2 in unassigned:
unassigned.remove(node2)
nextWorkList.append(node2)
workList = nextWorkList
nextWorkList = []
logging.debug("piece_count={0}".format(pieceCount))
return pieceCount
if __name__ == "__main__":
logging.basicConfig(level=logging.INFO)
import doctest
doctest.testmod()
print "done"
| {
"content_hash": "7467307b01afdcb8f2a178d75c805fa6",
"timestamp": "",
"source": "github",
"line_count": 106,
"max_line_length": 154,
"avg_line_length": 36.65094339622642,
"alnum_prop": 0.585070785070785,
"repo_name": "MicrosoftGenomics/FaST-LMM",
"id": "8487add61654dd80e0ed7f3cab0bab24f4af76cb",
"size": "3885",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "fastlmm/util/VertexCut.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "884"
},
{
"name": "C",
"bytes": "353"
},
{
"name": "C++",
"bytes": "15585"
},
{
"name": "Jupyter Notebook",
"bytes": "45336"
},
{
"name": "Makefile",
"bytes": "6787"
},
{
"name": "Python",
"bytes": "1266089"
}
],
"symlink_target": ""
} |
import numpy as np
import matplotlib.pyplot as plt
from sys import argv
# Bit-reversal function
# def _bit_reverse(n):
# return int(bin(n)[:1:-1], 2)
def _bit_reverse(n, width=8):
b = '{:0{width}b}'.format(n, width=width)
return int(b[::-1], 2)
# bit_reverse = np.frompyfunc(_bit_reverse, 1, 1)
bit_reverse = np.vectorize(_bit_reverse, excluded='width')
# Function generating view angles for one cycle
def gen_theta_one_cycle(K, N_theta):
n = np.arange(N_theta)
width = len(bin(K))-3
# return n*K//N_theta
# return (n*K//N_theta) % K
# return bit_reverse((n*K//N_theta) % K)
return (n*K + bit_reverse((n*K//N_theta) % K, width=width)) * (np.pi/N_theta)
# Function generating view angles for mutiple cycle
def gen_theta(K, N_theta, TotalNumCycles=1):
offset = 0
thetas = np.array([],dtype=np.float)
for i in range(TotalNumCycles):
thetas = np.append(thetas, gen_theta_one_cycle(K, N_theta)+offset)
offset += np.pi*K
return thetas
def calc_dropped_angles(thetas, dtheta_min, verbose=True, TotalNumCycles=1):
dthetas = thetas[1:] - thetas[:-1]
dropped_angles = np.append([False], dthetas<dtheta_min)
if verbose:
print("Minimum angle between frames %s" % np.min(dthetas))
print("Maximum angle between frames %s" % np.max(dthetas))
if TotalNumCycles > 1:
n_one_cycle = dthetas.size//TotalNumCycles
print("Dropped %s frames in the first cycle, %s frames in subsequent cycles." %
(np.sum(dropped_angles[:n_one_cycle]), np.sum(dropped_angles[n_one_cycle:2*n_one_cycle])))
print("Total number of dropped frames %s" % np.sum(dropped_angles))
return dropped_angles
if __name__ == "__main__":
if len(argv) >= 3:
K = int(argv[1])
N_theta = int(argv[2])
else:
K = 4
N_theta = 16
TotalNumCycles = 1
for i in range(3,len(argv)):
if 'n_cycles=' in argv[i]:
TotalNumCycles = int(argv[i].split('=')[-1])
thetas = gen_theta(K, N_theta, TotalNumCycles)
dtheta_min = 0
for i in range(3,len(argv)):
if 'dtheta_min=' in argv[i]:
dtheta_min = float(argv[i].split('=')[-1])
dropped_angles = calc_dropped_angles(thetas, dtheta_min, TotalNumCycles=TotalNumCycles)
if not ('nowrap' in argv):
thetas = thetas % np.pi
if 'sort' in argv:
indx = np.argsort(thetas)
thetas = thetas[indx]
if dtheta_min != 0:
dropped_angles = dropped_angles[indx]
plt.plot(thetas, 'ro--', label='Viewing angles')
if dtheta_min != 0:
plt.plot(np.arange(len(thetas))[dropped_angles], thetas[dropped_angles], 'go', label='Dropped angles')
plt.xlabel('Viewing angle index')
plt.ylabel('Viewing angle (Rad)')
plt.legend(loc='upper center', ncol=2, bbox_to_anchor=(0.5,1.12), fancybox=True, shadow=True)
plt.show()
| {
"content_hash": "2c3d8753aa3e9c25cb9315ec626a8973",
"timestamp": "",
"source": "github",
"line_count": 82,
"max_line_length": 110,
"avg_line_length": 35.75609756097561,
"alnum_prop": 0.6091405184174625,
"repo_name": "decarlof/timbir",
"id": "296c04d4efcc77f67b59d9ed6f89b5e8a8995bff",
"size": "2932",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "doc/collection/APS_32ID/interlaced/TIMBIR_angles.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "508609"
},
{
"name": "C++",
"bytes": "7786"
},
{
"name": "Makefile",
"bytes": "9349"
},
{
"name": "Shell",
"bytes": "2077"
}
],
"symlink_target": ""
} |
import collections
import copy
import warnings
from io import BytesIO, StringIO
import numpy as np
import pytest
from astropy.io import fits
from astropy.io.fits.card import _pad
from astropy.io.fits.header import _pad_length
from astropy.io.fits.util import encode_ascii
from astropy.io.fits.verify import VerifyError, VerifyWarning
from astropy.utils.exceptions import AstropyUserWarning
from astropy.utils.misc import _NOT_OVERWRITING_MSG_MATCH
from .conftest import FitsTestCase
def test_shallow_copy():
"""Make sure that operations on a shallow copy do not alter the original.
#4990."""
original_header = fits.Header([('a', 1), ('b', 1)])
copied_header = copy.copy(original_header)
# Modifying the original dict should not alter the copy
original_header['c'] = 100
assert 'c' not in copied_header
# and changing the copy should not change the original.
copied_header['a'] = 0
assert original_header['a'] == 1
def test_init_with_header():
"""Make sure that creating a Header from another Header makes a copy if
copy is True."""
original_header = fits.Header([('a', 10)])
new_header = fits.Header(original_header, copy=True)
original_header['a'] = 20
assert new_header['a'] == 10
new_header['a'] = 0
assert original_header['a'] == 20
def test_init_with_dict():
dict1 = {'a': 11, 'b': 12, 'c': 13, 'd': 14, 'e': 15}
h1 = fits.Header(dict1)
for i in dict1:
assert dict1[i] == h1[i]
def test_init_with_ordereddict():
# Create a list of tuples. Each tuple consisting of a letter and the number
list1 = [(i, j) for j, i in enumerate('abcdefghijklmnopqrstuvwxyz')]
# Create an ordered dictionary and a header from this dictionary
dict1 = collections.OrderedDict(list1)
h1 = fits.Header(dict1)
# Check that the order is preserved of the initial list
assert all(h1[val] == list1[i][1] for i, val in enumerate(h1))
class TestHeaderFunctions(FitsTestCase):
"""Test Header and Card objects."""
def test_rename_keyword(self):
"""Test renaming keyword with rename_keyword."""
header = fits.Header([('A', 'B', 'C'), ('D', 'E', 'F')])
header.rename_keyword('A', 'B')
assert 'A' not in header
assert 'B' in header
assert header[0] == 'B'
assert header['B'] == 'B'
assert header.comments['B'] == 'C'
@pytest.mark.parametrize('key', ['A', 'a'])
def test_indexing_case(self, key):
"""Check that indexing is case insensitive"""
header = fits.Header([('A', 'B', 'C'), ('D', 'E', 'F')])
assert key in header
assert header[key] == 'B'
assert header.get(key) == 'B'
assert header.index(key) == 0
assert header.comments[key] == 'C'
assert header.count(key) == 1
header.remove(key, ignore_missing=False)
def test_card_constructor_default_args(self):
"""Test Card constructor with default argument values."""
c = fits.Card()
assert '' == c.keyword
def test_card_from_bytes(self):
"""
Test loading a Card from a `bytes` object (assuming latin-1 encoding).
"""
c = fits.Card.fromstring(b"ABC = 'abc'")
assert c.keyword == 'ABC'
assert c.value == 'abc'
def test_string_value_card(self):
"""Test Card constructor with string value"""
c = fits.Card('abc', '<8 ch')
assert str(c) == _pad("ABC = '<8 ch '")
c = fits.Card('nullstr', '')
assert str(c) == _pad("NULLSTR = ''")
def test_boolean_value_card(self):
"""Test Card constructor with boolean value"""
c = fits.Card("abc", True)
assert str(c) == _pad("ABC = T")
c = fits.Card.fromstring('ABC = F')
assert c.value is False
def test_long_integer_value_card(self):
"""Test Card constructor with long integer value"""
c = fits.Card('long_int', -467374636747637647347374734737437)
assert str(c) == _pad("LONG_INT= -467374636747637647347374734737437")
def test_floating_point_value_card(self):
"""Test Card constructor with floating point value"""
c = fits.Card('floatnum', -467374636747637647347374734737437.)
if (str(c) != _pad("FLOATNUM= -4.6737463674763E+32") and
str(c) != _pad("FLOATNUM= -4.6737463674763E+032")):
assert str(c) == _pad("FLOATNUM= -4.6737463674763E+32")
def test_complex_value_card(self):
"""Test Card constructor with complex value"""
c = fits.Card('abc',
(1.2345377437887837487e88 + 6324767364763746367e-33j))
f1 = _pad("ABC = (1.23453774378878E+88, 6.32476736476374E-15)")
f2 = _pad("ABC = (1.2345377437887E+088, 6.3247673647637E-015)")
f3 = _pad("ABC = (1.23453774378878E+88, 6.32476736476374E-15)")
if str(c) != f1 and str(c) != f2:
assert str(c) == f3
def test_card_image_constructed_too_long(self):
"""Test that over-long cards truncate the comment"""
# card image constructed from key/value/comment is too long
# (non-string value)
c = fits.Card('abc', 9, 'abcde' * 20)
with pytest.warns(fits.verify.VerifyWarning):
assert (str(c) ==
"ABC = 9 "
"/ abcdeabcdeabcdeabcdeabcdeabcdeabcdeabcdeabcdeab")
c = fits.Card('abc', 'a' * 68, 'abcdefg')
with pytest.warns(fits.verify.VerifyWarning):
assert str(c) == f"ABC = '{'a' * 68}'"
def test_constructor_filter_illegal_data_structures(self):
"""Test that Card constructor raises exceptions on bad arguments"""
pytest.raises(ValueError, fits.Card, ('abc',), {'value': (2, 3)})
pytest.raises(ValueError, fits.Card, 'key', [], 'comment')
def test_keyword_too_long(self):
"""Test that long Card keywords are allowed, but with a warning"""
pytest.warns(UserWarning, fits.Card, 'abcdefghi', 'long')
def test_illegal_characters_in_key(self):
"""
Test that Card constructor allows illegal characters in the keyword,
but creates a HIERARCH card.
"""
# This test used to check that a ValueError was raised, because a
# keyword like 'abc+' was simply not allowed. Now it should create a
# HIERARCH card.
with pytest.warns(AstropyUserWarning) as w:
c = fits.Card('abc+', 9)
assert len(w) == 1
assert c.image == _pad('HIERARCH abc+ = 9')
def test_add_history(self):
header = fits.Header([('A', 'B', 'C'), ('HISTORY', 1),
('HISTORY', 2), ('HISTORY', 3), ('', '', ''),
('', '', '')])
header.add_history(4)
# One of the blanks should get used, so the length shouldn't change
assert len(header) == 6
assert header.cards[4].value == 4
assert header['HISTORY'] == [1, 2, 3, 4]
assert repr(header['HISTORY']) == '1\n2\n3\n4'
header.add_history(0, after='A')
assert len(header) == 6
assert header.cards[1].value == 0
assert header['HISTORY'] == [0, 1, 2, 3, 4]
def test_add_blank(self):
header = fits.Header([('A', 'B', 'C'), ('', 1), ('', 2), ('', 3),
('', '', ''), ('', '', '')])
header.add_blank(4)
# This time a new blank should be added, and the existing blanks don't
# get used... (though this is really kinda sketchy--there's a
# distinction between truly blank cards, and cards with blank keywords
# that isn't currently made int he code)
assert len(header) == 7
assert header.cards[6].value == 4
assert header[''] == [1, 2, 3, '', '', 4]
assert repr(header['']) == '1\n2\n3\n\n\n4'
header.add_blank(0, after='A')
assert len(header) == 8
assert header.cards[1].value == 0
assert header[''] == [0, 1, 2, 3, '', '', 4]
header[''] = 5
header[' '] = 6
assert header[''] == [0, 1, 2, 3, '', '', 4, 5, 6]
assert header[' '] == [0, 1, 2, 3, '', '', 4, 5, 6]
def test_update(self):
class FakeHeader(list):
def keys(self):
return [l[0] for l in self]
def __getitem__(self, key):
return next(l[1:] for l in self if l[0] == key)
header = fits.Header()
header.update({'FOO': ('BAR', 'BAZ')})
header.update(FakeHeader([('A', 1), ('B', 2, 'comment')]))
assert set(header.keys()) == {'FOO', 'A', 'B'}
assert header.comments['B'] == 'comment'
# test that comments are preserved
tmphdr = fits.Header()
tmphdr['HELLO'] = (1, 'this is a comment')
header.update(tmphdr)
assert set(header.keys()) == {'FOO', 'A', 'B', 'HELLO'}
assert header.comments['HELLO'] == 'this is a comment'
header.update(NAXIS1=100, NAXIS2=100)
assert set(header.keys()) == {'FOO', 'A', 'B', 'HELLO', 'NAXIS1', 'NAXIS2'}
assert set(header.values()) == {'BAR', 1, 2, 100, 100}
def test_update_comment(self):
hdul = fits.open(self.data('arange.fits'))
hdul[0].header.update({'FOO': ('BAR', 'BAZ')})
assert hdul[0].header['FOO'] == 'BAR'
assert hdul[0].header.comments['FOO'] == 'BAZ'
with pytest.raises(ValueError):
hdul[0].header.update({'FOO2': ('BAR', 'BAZ', 'EXTRA')})
hdul.writeto(self.temp('test.fits'))
hdul.close()
hdul = fits.open(self.temp('test.fits'), mode='update')
hdul[0].header.comments['FOO'] = 'QUX'
hdul.close()
hdul = fits.open(self.temp('test.fits'))
assert hdul[0].header.comments['FOO'] == 'QUX'
hdul[0].header.add_comment(0, after='FOO')
assert str(hdul[0].header.cards[-1]).strip() == 'COMMENT 0'
hdul.close()
def test_commentary_cards(self):
# commentary cards
val = "A commentary card's value has no quotes around it."
c = fits.Card("HISTORY", val)
assert str(c) == _pad('HISTORY ' + val)
val = "A commentary card has no comment."
c = fits.Card("COMMENT", val, "comment")
assert str(c) == _pad('COMMENT ' + val)
def test_commentary_card_created_by_fromstring(self):
# commentary card created by fromstring()
c = fits.Card.fromstring(
"COMMENT card has no comments. "
"/ text after slash is still part of the value.")
assert (c.value == 'card has no comments. '
'/ text after slash is still part of the value.')
assert c.comment == ''
def test_commentary_card_will_not_parse_numerical_value(self):
# commentary card will not parse the numerical value
c = fits.Card.fromstring("HISTORY (1, 2)")
assert str(c) == _pad("HISTORY (1, 2)")
def test_equal_sign_after_column8(self):
# equal sign after column 8 of a commentary card will be part of the
# string value
c = fits.Card.fromstring("HISTORY = (1, 2)")
assert str(c) == _pad("HISTORY = (1, 2)")
def test_blank_keyword(self):
c = fits.Card('', ' / EXPOSURE INFORMATION')
assert str(c) == _pad(' / EXPOSURE INFORMATION')
c = fits.Card.fromstring(str(c))
assert c.keyword == ''
assert c.value == ' / EXPOSURE INFORMATION'
def test_specify_undefined_value(self):
# this is how to specify an undefined value
c = fits.Card("undef", fits.card.UNDEFINED)
assert str(c) == _pad("UNDEF =")
def test_complex_number_using_string_input(self):
# complex number using string input
c = fits.Card.fromstring('ABC = (8, 9)')
assert str(c) == _pad("ABC = (8, 9)")
def test_fixable_non_standard_fits_card(self, capsys):
# fixable non-standard FITS card will keep the original format
c = fits.Card.fromstring('abc = + 2.1 e + 12')
assert c.value == 2100000000000.0
with pytest.warns(fits.verify.VerifyWarning,
match=r'Verification reported errors'):
assert str(c) == _pad("ABC = +2.1E+12")
def test_fixable_non_fsc(self):
# fixable non-FSC: if the card is not parsable, it's value will be
# assumed
# to be a string and everything after the first slash will be comment
c = fits.Card.fromstring(
"no_quote= this card's value has no quotes "
"/ let's also try the comment")
with pytest.warns(fits.verify.VerifyWarning,
match=r'Verification reported errors'):
assert (str(c) == "NO_QUOTE= 'this card''s value has no quotes' "
"/ let's also try the comment ")
def test_undefined_value_using_string_input(self):
# undefined value using string input
c = fits.Card.fromstring('ABC = ')
assert str(c) == _pad("ABC =")
def test_undefined_keys_values(self):
header = fits.Header()
header['FOO'] = 'BAR'
header['UNDEF'] = None
assert list(header.values()) == ['BAR', None]
assert list(header.items()) == [('FOO', 'BAR'), ('UNDEF', None)]
def test_mislocated_equal_sign(self, capsys):
# test mislocated "=" sign
c = fits.Card.fromstring('XYZ= 100')
assert c.keyword == 'XYZ'
assert c.value == 100
with pytest.warns(fits.verify.VerifyWarning,
match=r'Verification reported errors'):
assert str(c) == _pad("XYZ = 100")
def test_equal_only_up_to_column_10(self, capsys):
# the test of "=" location is only up to column 10
# This test used to check if Astropy rewrote this card to a new format,
# something like "HISTO = '= (1, 2)". But since ticket #109 if the
# format is completely wrong we don't make any assumptions and the card
# should be left alone
c = fits.Card.fromstring("HISTO = (1, 2)")
with pytest.warns(AstropyUserWarning,
match=r'header keyword is invalid'):
assert str(c) == _pad("HISTO = (1, 2)")
# Likewise this card should just be left in its original form and
# we shouldn't guess how to parse it or rewrite it.
c = fits.Card.fromstring(" HISTORY (1, 2)")
with pytest.warns(AstropyUserWarning,
match=r'header keyword is invalid'):
assert str(c) == _pad(" HISTORY (1, 2)")
def test_verify_invalid_equal_sign(self):
# verification
c = fits.Card.fromstring('ABC= a6')
with pytest.warns(AstropyUserWarning) as w:
c.verify()
err_text1 = ("Card 'ABC' is not FITS standard (equal sign not at "
"column 8)")
err_text2 = ("Card 'ABC' is not FITS standard (invalid value "
"string: 'a6'")
assert len(w) == 4
assert err_text1 in str(w[1].message)
assert err_text2 in str(w[2].message)
def test_fix_invalid_equal_sign(self):
fix_text = "Fixed 'ABC' card to meet the FITS standard."
c = fits.Card.fromstring('ABC= a6')
with pytest.warns(AstropyUserWarning, match=fix_text) as w:
c.verify('fix')
assert len(w) == 4
assert str(c) == _pad("ABC = 'a6 '")
def test_long_string_value(self):
# test long string value
c = fits.Card('abc', 'long string value ' * 10, 'long comment ' * 10)
assert (str(c) ==
"ABC = 'long string value long string value long string value long string &' "
"CONTINUE 'value long string value long string value long string value long &' "
"CONTINUE 'string value long string value long string value &' "
"CONTINUE '&' / long comment long comment long comment long comment long "
"CONTINUE '&' / comment long comment long comment long comment long comment "
"CONTINUE '' / long comment ")
def test_long_string_value_with_multiple_long_words(self):
"""
Regression test for https://github.com/astropy/astropy/issues/11298
"""
c = fits.Card('WHATEVER',
'SuperCalibrationParameters_XXXX_YYYY_ZZZZZ_KK_01_02_'
'03)-AAABBBCCC.n.h5 SuperNavigationParameters_XXXX_YYYY'
'_ZZZZZ_KK_01_02_03)-AAABBBCCC.n.xml')
assert (str(c) ==
"WHATEVER= 'SuperCalibrationParameters_XXXX_YYYY_ZZZZZ_KK_01_02_03)-AAABBBCCC.n&'"
"CONTINUE '.h5 &' "
"CONTINUE 'SuperNavigationParameters_XXXX_YYYY_ZZZZZ_KK_01_02_03)-AAABBBCCC.n.&'"
"CONTINUE 'xml' ")
def test_long_unicode_string(self):
"""Regression test for
https://github.com/spacetelescope/PyFITS/issues/1
So long as a unicode string can be converted to ASCII it should have no
different behavior in this regard from a byte string.
"""
h1 = fits.Header()
h1['TEST'] = 'abcdefg' * 30
h2 = fits.Header()
h2['TEST'] = 'abcdefg' * 30
assert str(h1) == str(h2)
def test_long_string_repr(self):
"""Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/193
Ensure that the __repr__() for cards represented with CONTINUE cards is
split across multiple lines (broken at each *physical* card).
"""
header = fits.Header()
header['TEST1'] = ('Regular value', 'Regular comment')
header['TEST2'] = ('long string value ' * 10, 'long comment ' * 10)
header['TEST3'] = ('Regular value', 'Regular comment')
assert (repr(header).splitlines() ==
[str(fits.Card('TEST1', 'Regular value', 'Regular comment')),
"TEST2 = 'long string value long string value long string value long string &' ",
"CONTINUE 'value long string value long string value long string value long &' ",
"CONTINUE 'string value long string value long string value &' ",
"CONTINUE '&' / long comment long comment long comment long comment long ",
"CONTINUE '&' / comment long comment long comment long comment long comment ",
"CONTINUE '' / long comment ",
str(fits.Card('TEST3', 'Regular value', 'Regular comment'))])
def test_blank_keyword_long_value(self):
"""Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/194
Test that a blank keyword ('') can be assigned a too-long value that is
continued across multiple cards with blank keywords, just like COMMENT
and HISTORY cards.
"""
value = 'long string value ' * 10
header = fits.Header()
header[''] = value
assert len(header) == 3
assert ' '.join(header['']) == value.rstrip()
# Ensure that this works like other commentary keywords
header['COMMENT'] = value
header['HISTORY'] = value
assert header['COMMENT'] == header['HISTORY']
assert header['COMMENT'] == header['']
def test_long_string_from_file(self):
c = fits.Card('abc', 'long string value ' * 10, 'long comment ' * 10)
hdu = fits.PrimaryHDU()
hdu.header.append(c)
hdu.writeto(self.temp('test_new.fits'))
hdul = fits.open(self.temp('test_new.fits'))
c = hdul[0].header.cards['abc']
hdul.close()
assert (str(c) ==
"ABC = 'long string value long string value long string value long string &' "
"CONTINUE 'value long string value long string value long string value long &' "
"CONTINUE 'string value long string value long string value &' "
"CONTINUE '&' / long comment long comment long comment long comment long "
"CONTINUE '&' / comment long comment long comment long comment long comment "
"CONTINUE '' / long comment ")
def test_word_in_long_string_too_long(self):
# if a word in a long string is too long, it will be cut in the middle
c = fits.Card('abc', 'longstringvalue' * 10, 'longcomment' * 10)
assert (str(c) ==
"ABC = 'longstringvaluelongstringvaluelongstringvaluelongstringvaluelongstr&'"
"CONTINUE 'ingvaluelongstringvaluelongstringvaluelongstringvaluelongstringvalu&'"
"CONTINUE 'elongstringvalue&' "
"CONTINUE '&' / longcommentlongcommentlongcommentlongcommentlongcommentlongcomme"
"CONTINUE '' / ntlongcommentlongcommentlongcommentlongcomment ")
def test_long_string_value_via_fromstring(self, capsys):
# long string value via fromstring() method
c = fits.Card.fromstring(
_pad("abc = 'longstring''s testing & ' "
"/ comments in line 1") +
_pad("continue 'continue with long string but without the "
"ampersand at the end' /") +
_pad("continue 'continue must have string value (with quotes)' "
"/ comments with ''. "))
with pytest.warns(fits.verify.VerifyWarning,
match=r'Verification reported errors'):
assert (str(c) ==
"ABC = 'longstring''s testing continue with long string but without the &' "
"CONTINUE 'ampersand at the endcontinue must have string value (with quotes)&' "
"CONTINUE '' / comments in line 1 comments with ''. ")
def test_continue_card_with_equals_in_value(self):
"""
Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/117
"""
c = fits.Card.fromstring(
_pad("EXPR = '/grp/hst/cdbs//grid/pickles/dat_uvk/pickles_uk_10.fits * &'") +
_pad("CONTINUE '5.87359e-12 * MWAvg(Av=0.12)&'") +
_pad("CONTINUE '&' / pysyn expression"))
assert c.keyword == 'EXPR'
assert (c.value ==
'/grp/hst/cdbs//grid/pickles/dat_uvk/pickles_uk_10.fits '
'* 5.87359e-12 * MWAvg(Av=0.12)')
assert c.comment == 'pysyn expression'
def test_final_continue_card_lacks_ampersand(self):
"""
Regression test for https://github.com/astropy/astropy/issues/3282
"""
h = fits.Header()
h['SVALUE'] = 'A' * 69
assert repr(h).splitlines()[-1] == _pad("CONTINUE 'AA'")
def test_final_continue_card_ampersand_removal_on_long_comments(self):
"""
Regression test for https://github.com/astropy/astropy/issues/3282
"""
c = fits.Card('TEST', 'long value' * 10, 'long comment &' * 10)
assert (str(c) ==
"TEST = 'long valuelong valuelong valuelong valuelong valuelong valuelong &' "
"CONTINUE 'valuelong valuelong valuelong value&' "
"CONTINUE '&' / long comment &long comment &long comment &long comment &long "
"CONTINUE '&' / comment &long comment &long comment &long comment &long comment "
"CONTINUE '' / &long comment & ")
def test_hierarch_card_creation(self):
# Test automatic upgrade to hierarch card
with pytest.warns(AstropyUserWarning, match='HIERARCH card will be created') as w:
c = fits.Card('ESO INS SLIT2 Y1FRML',
'ENC=OFFSET+RESOL*acos((WID-(MAX+MIN))/(MAX-MIN)')
assert len(w) == 1
assert (str(c) ==
"HIERARCH ESO INS SLIT2 Y1FRML= "
"'ENC=OFFSET+RESOL*acos((WID-(MAX+MIN))/(MAX-MIN)'")
# Test manual creation of hierarch card
c = fits.Card('hierarch abcdefghi', 10)
assert str(c) == _pad("HIERARCH abcdefghi = 10")
c = fits.Card('HIERARCH ESO INS SLIT2 Y1FRML',
'ENC=OFFSET+RESOL*acos((WID-(MAX+MIN))/(MAX-MIN)')
assert (str(c) ==
"HIERARCH ESO INS SLIT2 Y1FRML= "
"'ENC=OFFSET+RESOL*acos((WID-(MAX+MIN))/(MAX-MIN)'")
def test_hierarch_with_abbrev_value_indicator(self):
"""Regression test for
https://github.com/spacetelescope/PyFITS/issues/5
"""
c = fits.Card.fromstring("HIERARCH key.META_4='calFileVersion'")
assert c.keyword == 'key.META_4'
assert c.value == 'calFileVersion'
assert c.comment == ''
def test_hierarch_not_warn(self):
"""Check that compressed image headers do not issue HIERARCH warnings.
"""
filename = fits.util.get_testdata_filepath('compressed_image.fits')
with fits.open(filename) as hdul:
header = hdul[1].header
with warnings.catch_warnings(record=True) as warning_list:
header["HIERARCH LONG KEYWORD"] = 42
assert len(warning_list) == 0
assert header["LONG KEYWORD"] == 42
assert header["HIERARCH LONG KEYWORD"] == 42
# Check that it still warns if we do not use HIERARCH
with pytest.warns(fits.verify.VerifyWarning,
match=r'greater than 8 characters'):
header["LONG KEYWORD2"] = 1
assert header["LONG KEYWORD2"] == 1
def test_hierarch_keyword_whitespace(self):
"""
Regression test for
https://github.com/spacetelescope/PyFITS/issues/6
Make sure any leading or trailing whitespace around HIERARCH
keywords is stripped from the actual keyword value.
"""
c = fits.Card.fromstring(
"HIERARCH key.META_4 = 'calFileVersion'")
assert c.keyword == 'key.META_4'
assert c.value == 'calFileVersion'
assert c.comment == ''
# Test also with creation via the Card constructor
c = fits.Card('HIERARCH key.META_4', 'calFileVersion')
assert c.keyword == 'key.META_4'
assert c.value == 'calFileVersion'
assert c.comment == ''
def test_verify_mixed_case_hierarch(self):
"""Regression test for
https://github.com/spacetelescope/PyFITS/issues/7
Assures that HIERARCH keywords with lower-case characters and other
normally invalid keyword characters are not considered invalid.
"""
c = fits.Card('HIERARCH WeirdCard.~!@#_^$%&', 'The value', 'a comment')
# This should not raise any exceptions
c.verify('exception')
assert c.keyword == 'WeirdCard.~!@#_^$%&'
assert c.value == 'The value'
assert c.comment == 'a comment'
# Test also the specific case from the original bug report
header = fits.Header([
('simple', True),
('BITPIX', 8),
('NAXIS', 0),
('EXTEND', True, 'May contain datasets'),
('HIERARCH key.META_0', 'detRow')
])
hdu = fits.PrimaryHDU(header=header)
hdu.writeto(self.temp('test.fits'))
with fits.open(self.temp('test.fits')) as hdul:
header2 = hdul[0].header
assert (str(header.cards[header.index('key.META_0')]) ==
str(header2.cards[header2.index('key.META_0')]))
def test_missing_keyword(self):
"""Test that accessing a non-existent keyword raises a KeyError."""
header = fits.Header()
# De-referencing header through the inline function should behave
# identically to accessing it in the pytest.raises context below.
pytest.raises(KeyError, lambda k: header[k], 'NAXIS')
# Test exception with message
with pytest.raises(KeyError, match=r"Keyword 'NAXIS' not found."):
header['NAXIS']
def test_hierarch_card_lookup(self):
header = fits.Header()
header['hierarch abcdefghi'] = 10
assert 'abcdefghi' in header
assert header['abcdefghi'] == 10
# This used to be assert_false, but per ticket
# https://aeon.stsci.edu/ssb/trac/pyfits/ticket/155 hierarch keywords
# should be treated case-insensitively when performing lookups
assert 'ABCDEFGHI' in header
def test_hierarch_card_delete(self):
header = fits.Header()
header['hierarch abcdefghi'] = 10
del header['hierarch abcdefghi']
def test_hierarch_card_insert_delete(self):
header = fits.Header()
with pytest.warns(fits.verify.VerifyWarning,
match=r'greater than 8 characters'):
header['abcdefghi'] = 10
header['abcdefgh'] = 10
header['abcdefg'] = 10
with pytest.warns(fits.verify.VerifyWarning,
match=r'greater than 8 characters'):
header.insert(2, ('abcdefghij', 10))
del header['abcdefghij']
with pytest.warns(fits.verify.VerifyWarning,
match=r'greater than 8 characters'):
header.insert(2, ('abcdefghij', 10))
del header[2]
assert list(header.keys())[2] == 'abcdefg'.upper()
def test_hierarch_create_and_update(self):
"""
Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/158
Tests several additional use cases for working with HIERARCH cards.
"""
msg = 'a HIERARCH card will be created'
header = fits.Header()
with pytest.warns(VerifyWarning) as w:
header.update({'HIERARCH BLAH BLAH': 'TESTA'})
assert len(w) == 0
assert 'BLAH BLAH' in header
assert header['BLAH BLAH'] == 'TESTA'
header.update({'HIERARCH BLAH BLAH': 'TESTB'})
assert len(w) == 0
assert header['BLAH BLAH'], 'TESTB'
# Update without explicitly stating 'HIERARCH':
header.update({'BLAH BLAH': 'TESTC'})
assert len(w) == 1
assert len(header) == 1
assert header['BLAH BLAH'], 'TESTC'
# Test case-insensitivity
header.update({'HIERARCH blah blah': 'TESTD'})
assert len(w) == 1
assert len(header) == 1
assert header['blah blah'], 'TESTD'
header.update({'blah blah': 'TESTE'})
assert len(w) == 2
assert len(header) == 1
assert header['blah blah'], 'TESTE'
# Create a HIERARCH card > 8 characters without explicitly stating
# 'HIERARCH'
header.update({'BLAH BLAH BLAH': 'TESTA'})
assert len(w) == 3
assert msg in str(w[0].message)
header.update({'HIERARCH BLAH BLAH BLAH': 'TESTB'})
assert len(w) == 3
assert header['BLAH BLAH BLAH'], 'TESTB'
# Update without explicitly stating 'HIERARCH':
header.update({'BLAH BLAH BLAH': 'TESTC'})
assert len(w) == 4
assert header['BLAH BLAH BLAH'], 'TESTC'
# Test case-insensitivity
header.update({'HIERARCH blah blah blah': 'TESTD'})
assert len(w) == 4
assert header['blah blah blah'], 'TESTD'
header.update({'blah blah blah': 'TESTE'})
assert len(w) == 5
assert header['blah blah blah'], 'TESTE'
def test_short_hierarch_create_and_update(self):
"""
Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/158
Tests several additional use cases for working with HIERARCH cards,
specifically where the keyword is fewer than 8 characters, but contains
invalid characters such that it can only be created as a HIERARCH card.
"""
msg = 'a HIERARCH card will be created'
header = fits.Header()
with pytest.warns(VerifyWarning) as w:
header.update({'HIERARCH BLA BLA': 'TESTA'})
assert len(w) == 0
assert 'BLA BLA' in header
assert header['BLA BLA'] == 'TESTA'
header.update({'HIERARCH BLA BLA': 'TESTB'})
assert len(w) == 0
assert header['BLA BLA'], 'TESTB'
# Update without explicitly stating 'HIERARCH':
header.update({'BLA BLA': 'TESTC'})
assert len(w) == 1
assert header['BLA BLA'], 'TESTC'
# Test case-insensitivity
header.update({'HIERARCH bla bla': 'TESTD'})
assert len(w) == 1
assert len(header) == 1
assert header['bla bla'], 'TESTD'
header.update({'bla bla': 'TESTE'})
assert len(w) == 2
assert len(header) == 1
assert header['bla bla'], 'TESTE'
header = fits.Header()
with pytest.warns(VerifyWarning) as w:
# Create a HIERARCH card containing invalid characters without
# explicitly stating 'HIERARCH'
header.update({'BLA BLA': 'TESTA'})
print([x.category for x in w])
assert len(w) == 1
assert msg in str(w[0].message)
header.update({'HIERARCH BLA BLA': 'TESTB'})
assert len(w) == 1
assert header['BLA BLA'], 'TESTB'
# Update without explicitly stating 'HIERARCH':
header.update({'BLA BLA': 'TESTC'})
assert len(w) == 2
assert header['BLA BLA'], 'TESTC'
# Test case-insensitivity
header.update({'HIERARCH bla bla': 'TESTD'})
assert len(w) == 2
assert len(header) == 1
assert header['bla bla'], 'TESTD'
header.update({'bla bla': 'TESTE'})
assert len(w) == 3
assert len(header) == 1
assert header['bla bla'], 'TESTE'
def test_header_setitem_invalid(self):
header = fits.Header()
def test():
header['FOO'] = ('bar', 'baz', 'qux')
pytest.raises(ValueError, test)
def test_header_setitem_1tuple(self):
header = fits.Header()
header['FOO'] = ('BAR',)
header['FOO2'] = (None,)
assert header['FOO'] == 'BAR'
assert header['FOO2'] is None
assert header[0] == 'BAR'
assert header.comments[0] == ''
assert header.comments['FOO'] == ''
def test_header_setitem_2tuple(self):
header = fits.Header()
header['FOO'] = ('BAR', 'BAZ')
header['FOO2'] = (None, None)
assert header['FOO'] == 'BAR'
assert header['FOO2'] is None
assert header[0] == 'BAR'
assert header.comments[0] == 'BAZ'
assert header.comments['FOO'] == 'BAZ'
assert header.comments['FOO2'] == ''
def test_header_set_value_to_none(self):
"""
Setting the value of a card to None should simply give that card an
undefined value. Undefined value should map to None.
"""
header = fits.Header()
header['FOO'] = 'BAR'
assert header['FOO'] == 'BAR'
header['FOO'] = None
assert header['FOO'] is None
# Create a header that contains an undefined value and a defined
# value.
hstr = "UNDEF = \nDEFINED = 42"
header = fits.Header.fromstring(hstr, sep='\n')
# Explicitly add a card with an UNDEFINED value
c = fits.Card("UNDEF2", fits.card.UNDEFINED)
header.extend([c])
# And now assign an undefined value to the header through setitem
header['UNDEF3'] = fits.card.UNDEFINED
# Tuple assignment
header.append(("UNDEF5", None, "Undefined value"), end=True)
header.append("UNDEF6")
assert header['DEFINED'] == 42
assert header['UNDEF'] is None
assert header['UNDEF2'] is None
assert header['UNDEF3'] is None
assert header['UNDEF5'] is None
assert header['UNDEF6'] is None
# Assign an undefined value to a new card
header['UNDEF4'] = None
# Overwrite an existing value with None
header["DEFINED"] = None
# All headers now should be undefined
for c in header.cards:
assert c.value == fits.card.UNDEFINED
def test_set_comment_only(self):
header = fits.Header([('A', 'B', 'C')])
header.set('A', comment='D')
assert header['A'] == 'B'
assert header.comments['A'] == 'D'
def test_header_iter(self):
header = fits.Header([('A', 'B'), ('C', 'D')])
assert list(header) == ['A', 'C']
def test_header_slice(self):
header = fits.Header([('A', 'B'), ('C', 'D'), ('E', 'F')])
newheader = header[1:]
assert len(newheader) == 2
assert 'A' not in newheader
assert 'C' in newheader
assert 'E' in newheader
newheader = header[::-1]
assert len(newheader) == 3
assert newheader[0] == 'F'
assert newheader[1] == 'D'
assert newheader[2] == 'B'
newheader = header[::2]
assert len(newheader) == 2
assert 'A' in newheader
assert 'C' not in newheader
assert 'E' in newheader
def test_header_slice_assignment(self):
"""
Assigning to a slice should just assign new values to the cards
included in the slice.
"""
header = fits.Header([('A', 'B'), ('C', 'D'), ('E', 'F')])
# Test assigning slice to the same value; this works similarly to numpy
# arrays
header[1:] = 1
assert header[1] == 1
assert header[2] == 1
# Though strings are iterable they should be treated as a scalar value
header[1:] = 'GH'
assert header[1] == 'GH'
assert header[2] == 'GH'
# Now assign via an iterable
header[1:] = ['H', 'I']
assert header[1] == 'H'
assert header[2] == 'I'
def test_header_slice_delete(self):
"""Test deleting a slice of cards from the header."""
header = fits.Header([('A', 'B'), ('C', 'D'), ('E', 'F')])
del header[1:]
assert len(header) == 1
assert header[0] == 'B'
del header[:]
assert len(header) == 0
def test_wildcard_slice(self):
"""Test selecting a subsection of a header via wildcard matching."""
header = fits.Header([('ABC', 0), ('DEF', 1), ('ABD', 2)])
newheader = header['AB*']
assert len(newheader) == 2
assert newheader[0] == 0
assert newheader[1] == 2
def test_wildcard_with_hyphen(self):
"""
Regression test for issue where wildcards did not work on keywords
containing hyphens.
"""
header = fits.Header([('DATE', 1), ('DATE-OBS', 2), ('DATE-FOO', 3)])
assert len(header['DATE*']) == 3
assert len(header['DATE?*']) == 2
assert len(header['DATE-*']) == 2
def test_wildcard_slice_assignment(self):
"""Test assigning to a header slice selected via wildcard matching."""
header = fits.Header([('ABC', 0), ('DEF', 1), ('ABD', 2)])
# Test assigning slice to the same value; this works similarly to numpy
# arrays
header['AB*'] = 1
assert header[0] == 1
assert header[2] == 1
# Though strings are iterable they should be treated as a scalar value
header['AB*'] = 'GH'
assert header[0] == 'GH'
assert header[2] == 'GH'
# Now assign via an iterable
header['AB*'] = ['H', 'I']
assert header[0] == 'H'
assert header[2] == 'I'
def test_wildcard_slice_deletion(self):
"""Test deleting cards from a header that match a wildcard pattern."""
header = fits.Header([('ABC', 0), ('DEF', 1), ('ABD', 2)])
del header['AB*']
assert len(header) == 1
assert header[0] == 1
def test_header_history(self):
header = fits.Header([('ABC', 0), ('HISTORY', 1), ('HISTORY', 2),
('DEF', 3), ('HISTORY', 4), ('HISTORY', 5)])
assert header['HISTORY'] == [1, 2, 4, 5]
def test_header_clear(self):
header = fits.Header([('A', 'B'), ('C', 'D')])
header.clear()
assert 'A' not in header
assert 'C' not in header
assert len(header) == 0
@pytest.mark.parametrize('fitsext', [fits.ImageHDU(), fits.CompImageHDU()])
def test_header_clear_write(self, fitsext):
hdulist = fits.HDUList([fits.PrimaryHDU(), fitsext])
hdulist[1].header['FOO'] = 'BAR'
hdulist[1].header.clear()
with pytest.raises(VerifyError) as err:
hdulist.writeto(self.temp('temp.fits'), overwrite=True)
err_msg = "'XTENSION' card does not exist."
assert err_msg in str(err.value)
def test_header_fromkeys(self):
header = fits.Header.fromkeys(['A', 'B'])
assert 'A' in header
assert header['A'] is None
assert header.comments['A'] == ''
assert 'B' in header
assert header['B'] is None
assert header.comments['B'] == ''
def test_header_fromkeys_with_value(self):
header = fits.Header.fromkeys(['A', 'B'], 'C')
assert 'A' in header
assert header['A'] == 'C'
assert header.comments['A'] == ''
assert 'B' in header
assert header['B'] == 'C'
assert header.comments['B'] == ''
def test_header_fromkeys_with_value_and_comment(self):
header = fits.Header.fromkeys(['A'], ('B', 'C'))
assert 'A' in header
assert header['A'] == 'B'
assert header.comments['A'] == 'C'
def test_header_fromkeys_with_duplicates(self):
header = fits.Header.fromkeys(['A', 'B', 'A'], 'C')
assert 'A' in header
assert ('A', 0) in header
assert ('A', 1) in header
assert ('A', 2) not in header
assert header[0] == 'C'
assert header['A'] == 'C'
assert header[('A', 0)] == 'C'
assert header[2] == 'C'
assert header[('A', 1)] == 'C'
def test_header_items(self):
header = fits.Header([('A', 'B'), ('C', 'D')])
assert list(header.items()) == [('A', 'B'), ('C', 'D')]
def test_header_iterkeys(self):
header = fits.Header([('A', 'B'), ('C', 'D')])
for a, b in zip(header.keys(), header):
assert a == b
def test_header_itervalues(self):
header = fits.Header([('A', 'B'), ('C', 'D')])
for a, b in zip(header.values(), ['B', 'D']):
assert a == b
def test_header_keys(self):
with fits.open(self.data('arange.fits')) as hdul:
assert (list(hdul[0].header) ==
['SIMPLE', 'BITPIX', 'NAXIS', 'NAXIS1', 'NAXIS2', 'NAXIS3',
'EXTEND'])
def test_header_list_like_pop(self):
header = fits.Header([('A', 'B'), ('C', 'D'), ('E', 'F'),
('G', 'H')])
last = header.pop()
assert last == 'H'
assert len(header) == 3
assert list(header) == ['A', 'C', 'E']
mid = header.pop(1)
assert mid == 'D'
assert len(header) == 2
assert list(header) == ['A', 'E']
first = header.pop(0)
assert first == 'B'
assert len(header) == 1
assert list(header) == ['E']
pytest.raises(IndexError, header.pop, 42)
def test_header_dict_like_pop(self):
header = fits.Header([('A', 'B'), ('C', 'D'), ('E', 'F'),
('G', 'H')])
pytest.raises(TypeError, header.pop, 'A', 'B', 'C')
last = header.pop('G')
assert last == 'H'
assert len(header) == 3
assert list(header) == ['A', 'C', 'E']
mid = header.pop('C')
assert mid == 'D'
assert len(header) == 2
assert list(header) == ['A', 'E']
first = header.pop('A')
assert first == 'B'
assert len(header) == 1
assert list(header) == ['E']
default = header.pop('X', 'Y')
assert default == 'Y'
assert len(header) == 1
pytest.raises(KeyError, header.pop, 'X')
def test_popitem(self):
header = fits.Header([('A', 'B'), ('C', 'D'), ('E', 'F')])
keyword, value = header.popitem()
assert keyword not in header
assert len(header) == 2
keyword, value = header.popitem()
assert keyword not in header
assert len(header) == 1
keyword, value = header.popitem()
assert keyword not in header
assert len(header) == 0
pytest.raises(KeyError, header.popitem)
def test_setdefault(self):
header = fits.Header([('A', 'B'), ('C', 'D'), ('E', 'F')])
assert header.setdefault('A') == 'B'
assert header.setdefault('C') == 'D'
assert header.setdefault('E') == 'F'
assert len(header) == 3
assert header.setdefault('G', 'H') == 'H'
assert len(header) == 4
assert 'G' in header
assert header.setdefault('G', 'H') == 'H'
assert len(header) == 4
def test_update_from_dict(self):
"""
Test adding new cards and updating existing cards from a dict using
Header.update()
"""
header = fits.Header([('A', 'B'), ('C', 'D')])
header.update({'A': 'E', 'F': 'G'})
assert header['A'] == 'E'
assert header[0] == 'E'
assert 'F' in header
assert header['F'] == 'G'
assert header[-1] == 'G'
# Same as above but this time pass the update dict as keyword arguments
header = fits.Header([('A', 'B'), ('C', 'D')])
header.update(A='E', F='G')
assert header['A'] == 'E'
assert header[0] == 'E'
assert 'F' in header
assert header['F'] == 'G'
assert header[-1] == 'G'
def test_update_from_iterable(self):
"""
Test adding new cards and updating existing cards from an iterable of
cards and card tuples.
"""
header = fits.Header([('A', 'B'), ('C', 'D')])
header.update([('A', 'E'), fits.Card('F', 'G')])
assert header['A'] == 'E'
assert header[0] == 'E'
assert 'F' in header
assert header['F'] == 'G'
assert header[-1] == 'G'
def test_header_extend(self):
"""
Test extending a header both with and without stripping cards from the
extension header.
"""
hdu = fits.PrimaryHDU()
hdu2 = fits.ImageHDU()
hdu2.header['MYKEY'] = ('some val', 'some comment')
hdu.header += hdu2.header
assert len(hdu.header) == 5
assert hdu.header[-1] == 'some val'
# Same thing, but using + instead of +=
hdu = fits.PrimaryHDU()
hdu.header = hdu.header + hdu2.header
assert len(hdu.header) == 5
assert hdu.header[-1] == 'some val'
# Directly append the other header in full--not usually a desirable
# operation when the header is coming from another HDU
hdu.header.extend(hdu2.header, strip=False)
assert len(hdu.header) == 11
assert list(hdu.header)[5] == 'XTENSION'
assert hdu.header[-1] == 'some val'
assert ('MYKEY', 1) in hdu.header
def test_header_extend_unique(self):
"""
Test extending the header with and without unique=True.
"""
hdu = fits.PrimaryHDU()
hdu2 = fits.ImageHDU()
hdu.header['MYKEY'] = ('some val', 'some comment')
hdu2.header['MYKEY'] = ('some other val', 'some other comment')
hdu.header.extend(hdu2.header)
assert len(hdu.header) == 6
assert hdu.header[-2] == 'some val'
assert hdu.header[-1] == 'some other val'
hdu = fits.PrimaryHDU()
hdu2 = fits.ImageHDU()
hdu.header['MYKEY'] = ('some val', 'some comment')
hdu2.header['MYKEY'] = ('some other val', 'some other comment')
hdu.header.extend(hdu2.header, unique=True)
assert len(hdu.header) == 5
assert hdu.header[-1] == 'some val'
def test_header_extend_unique_commentary(self):
"""
Test extending header with and without unique=True and commentary
cards in the header being added. Issue astropy/astropy#3967
"""
for commentary_card in ['', 'COMMENT', 'HISTORY']:
for is_unique in [True, False]:
hdu = fits.PrimaryHDU()
# Make sure we are testing the case we want.
assert commentary_card not in hdu.header
hdu2 = fits.ImageHDU()
hdu2.header[commentary_card] = 'My text'
hdu.header.extend(hdu2.header, unique=is_unique)
assert len(hdu.header) == 5
assert hdu.header[commentary_card][0] == 'My text'
def test_header_extend_update(self):
"""
Test extending the header with and without update=True.
"""
hdu = fits.PrimaryHDU()
hdu2 = fits.ImageHDU()
hdu.header['MYKEY'] = ('some val', 'some comment')
hdu.header['HISTORY'] = 'history 1'
hdu2.header['MYKEY'] = ('some other val', 'some other comment')
hdu2.header['HISTORY'] = 'history 1'
hdu2.header['HISTORY'] = 'history 2'
hdu.header.extend(hdu2.header)
assert len(hdu.header) == 9
assert ('MYKEY', 0) in hdu.header
assert ('MYKEY', 1) in hdu.header
assert hdu.header[('MYKEY', 1)] == 'some other val'
assert len(hdu.header['HISTORY']) == 3
assert hdu.header[-1] == 'history 2'
hdu = fits.PrimaryHDU()
hdu.header['MYKEY'] = ('some val', 'some comment')
hdu.header['HISTORY'] = 'history 1'
hdu.header.extend(hdu2.header, update=True)
assert len(hdu.header) == 7
assert ('MYKEY', 0) in hdu.header
assert ('MYKEY', 1) not in hdu.header
assert hdu.header['MYKEY'] == 'some other val'
assert len(hdu.header['HISTORY']) == 2
assert hdu.header[-1] == 'history 2'
def test_header_extend_update_commentary(self):
"""
Test extending header with and without unique=True and commentary
cards in the header being added.
Though not quite the same as astropy/astropy#3967, update=True hits
the same if statement as that issue.
"""
for commentary_card in ['', 'COMMENT', 'HISTORY']:
for is_update in [True, False]:
hdu = fits.PrimaryHDU()
# Make sure we are testing the case we want.
assert commentary_card not in hdu.header
hdu2 = fits.ImageHDU()
hdu2.header[commentary_card] = 'My text'
hdu.header.extend(hdu2.header, update=is_update)
assert len(hdu.header) == 5
assert hdu.header[commentary_card][0] == 'My text'
def test_header_extend_exact(self):
"""
Test that extending an empty header with the contents of an existing
header can exactly duplicate that header, given strip=False and
end=True.
"""
header = fits.getheader(self.data('test0.fits'))
header2 = fits.Header()
header2.extend(header, strip=False, end=True)
assert header == header2
def test_header_count(self):
header = fits.Header([('A', 'B'), ('C', 'D'), ('E', 'F')])
assert header.count('A') == 1
assert header.count('C') == 1
assert header.count('E') == 1
header['HISTORY'] = 'a'
header['HISTORY'] = 'b'
assert header.count('HISTORY') == 2
pytest.raises(KeyError, header.count, 'G')
def test_header_append_use_blanks(self):
"""
Tests that blank cards can be appended, and that future appends will
use blank cards when available (unless useblanks=False)
"""
header = fits.Header([('A', 'B'), ('C', 'D')])
# Append a couple blanks
header.append()
header.append()
assert len(header) == 4
assert header[-1] == ''
assert header[-2] == ''
# New card should fill the first blank by default
header.append(('E', 'F'))
assert len(header) == 4
assert header[-2] == 'F'
assert header[-1] == ''
# This card should not use up a blank spot
header.append(('G', 'H'), useblanks=False)
assert len(header) == 5
assert header[-1] == ''
assert header[-2] == 'H'
def test_header_append_keyword_only(self):
"""
Test appending a new card with just the keyword, and no value or
comment given.
"""
header = fits.Header([('A', 'B'), ('C', 'D')])
header.append('E')
assert len(header) == 3
assert list(header)[-1] == 'E'
assert header[-1] is None
assert header.comments['E'] == ''
# Try appending a blank--normally this can be accomplished with just
# header.append(), but header.append('') should also work (and is maybe
# a little more clear)
header.append('')
assert len(header) == 4
assert list(header)[-1] == ''
assert header[''] == ''
assert header.comments[''] == ''
def test_header_insert_use_blanks(self):
header = fits.Header([('A', 'B'), ('C', 'D')])
# Append a couple blanks
header.append()
header.append()
# Insert a new card; should use up one of the blanks
header.insert(1, ('E', 'F'))
assert len(header) == 4
assert header[1] == 'F'
assert header[-1] == ''
assert header[-2] == 'D'
# Insert a new card without using blanks
header.insert(1, ('G', 'H'), useblanks=False)
assert len(header) == 5
assert header[1] == 'H'
assert header[-1] == ''
def test_header_insert_before_keyword(self):
"""
Test that a keyword name or tuple can be used to insert new keywords.
Also tests the ``after`` keyword argument.
Regression test for https://github.com/spacetelescope/PyFITS/issues/12
"""
header = fits.Header([
('NAXIS1', 10), ('COMMENT', 'Comment 1'),
('COMMENT', 'Comment 3')])
header.insert('NAXIS1', ('NAXIS', 2, 'Number of axes'))
assert list(header.keys())[0] == 'NAXIS'
assert header[0] == 2
assert header.comments[0] == 'Number of axes'
header.insert('NAXIS1', ('NAXIS2', 20), after=True)
assert list(header.keys())[1] == 'NAXIS1'
assert list(header.keys())[2] == 'NAXIS2'
assert header[2] == 20
header.insert(('COMMENT', 1), ('COMMENT', 'Comment 2'))
assert header['COMMENT'] == ['Comment 1', 'Comment 2', 'Comment 3']
header.insert(('COMMENT', 2), ('COMMENT', 'Comment 4'), after=True)
assert header['COMMENT'] == ['Comment 1', 'Comment 2', 'Comment 3',
'Comment 4']
header.insert(-1, ('TEST1', True))
assert list(header.keys())[-2] == 'TEST1'
header.insert(-1, ('TEST2', True), after=True)
assert list(header.keys())[-1] == 'TEST2'
assert list(header.keys())[-3] == 'TEST1'
def test_remove(self):
header = fits.Header([('A', 'B'), ('C', 'D')])
# When keyword is present in the header it should be removed.
header.remove('C')
assert len(header) == 1
assert list(header) == ['A']
assert 'C' not in header
# When keyword is not present in the header and ignore_missing is
# False, KeyError should be raised
with pytest.raises(KeyError):
header.remove('F')
# When keyword is not present and ignore_missing is True, KeyError
# will be ignored
header.remove('F', ignore_missing=True)
assert len(header) == 1
# Test for removing all instances of a keyword
header = fits.Header([('A', 'B'), ('C', 'D'), ('A', 'F')])
header.remove('A', remove_all=True)
assert 'A' not in header
assert len(header) == 1
assert list(header) == ['C']
assert header[0] == 'D'
def test_header_comments(self):
header = fits.Header([('A', 'B', 'C'), ('DEF', 'G', 'H')])
assert (repr(header.comments) ==
' A C\n'
' DEF H')
def test_comment_slices_and_filters(self):
header = fits.Header([('AB', 'C', 'D'), ('EF', 'G', 'H'),
('AI', 'J', 'K')])
s = header.comments[1:]
assert list(s) == ['H', 'K']
s = header.comments[::-1]
assert list(s) == ['K', 'H', 'D']
s = header.comments['A*']
assert list(s) == ['D', 'K']
def test_comment_slice_filter_assign(self):
header = fits.Header([('AB', 'C', 'D'), ('EF', 'G', 'H'),
('AI', 'J', 'K')])
header.comments[1:] = 'L'
assert list(header.comments) == ['D', 'L', 'L']
assert header.cards[header.index('AB')].comment == 'D'
assert header.cards[header.index('EF')].comment == 'L'
assert header.cards[header.index('AI')].comment == 'L'
header.comments[::-1] = header.comments[:]
assert list(header.comments) == ['L', 'L', 'D']
header.comments['A*'] = ['M', 'N']
assert list(header.comments) == ['M', 'L', 'N']
def test_commentary_slicing(self):
header = fits.Header()
indices = list(range(5))
for idx in indices:
header['HISTORY'] = idx
# Just a few sample slice types; this won't get all corner cases but if
# these all work we should be in good shape
assert header['HISTORY'][1:] == indices[1:]
assert header['HISTORY'][:3] == indices[:3]
assert header['HISTORY'][:6] == indices[:6]
assert header['HISTORY'][:-2] == indices[:-2]
assert header['HISTORY'][::-1] == indices[::-1]
assert header['HISTORY'][1::-1] == indices[1::-1]
assert header['HISTORY'][1:5:2] == indices[1:5:2]
# Same tests, but copy the values first; as it turns out this is
# different from just directly doing an __eq__ as in the first set of
# assertions
header.insert(0, ('A', 'B', 'C'))
header.append(('D', 'E', 'F'), end=True)
assert list(header['HISTORY'][1:]) == indices[1:]
assert list(header['HISTORY'][:3]) == indices[:3]
assert list(header['HISTORY'][:6]) == indices[:6]
assert list(header['HISTORY'][:-2]) == indices[:-2]
assert list(header['HISTORY'][::-1]) == indices[::-1]
assert list(header['HISTORY'][1::-1]) == indices[1::-1]
assert list(header['HISTORY'][1:5:2]) == indices[1:5:2]
def test_update_commentary(self):
header = fits.Header()
header['FOO'] = 'BAR'
header['HISTORY'] = 'ABC'
header['FRED'] = 'BARNEY'
header['HISTORY'] = 'DEF'
header['HISTORY'] = 'GHI'
assert header['HISTORY'] == ['ABC', 'DEF', 'GHI']
# Single value update
header['HISTORY'][0] = 'FOO'
assert header['HISTORY'] == ['FOO', 'DEF', 'GHI']
# Single value partial slice update
header['HISTORY'][1:] = 'BAR'
assert header['HISTORY'] == ['FOO', 'BAR', 'BAR']
# Multi-value update
header['HISTORY'][:] = ['BAZ', 'QUX']
assert header['HISTORY'] == ['BAZ', 'QUX', 'BAR']
def test_commentary_comparison(self):
"""
Regression test for an issue found in *writing* the regression test for
https://github.com/astropy/astropy/issues/2363, where comparison of
the list of values for a commentary keyword did not always compare
correctly with other iterables.
"""
header = fits.Header()
header['HISTORY'] = 'hello world'
header['HISTORY'] = 'hello world'
header['COMMENT'] = 'hello world'
assert header['HISTORY'] != header['COMMENT']
header['COMMENT'] = 'hello world'
assert header['HISTORY'] == header['COMMENT']
def test_long_commentary_card(self):
header = fits.Header()
header['FOO'] = 'BAR'
header['BAZ'] = 'QUX'
longval = 'ABC' * 30
header['HISTORY'] = longval
header['FRED'] = 'BARNEY'
header['HISTORY'] = longval
assert len(header) == 7
assert list(header)[2] == 'FRED'
assert str(header.cards[3]) == 'HISTORY ' + longval[:72]
assert str(header.cards[4]).rstrip() == 'HISTORY ' + longval[72:]
header.set('HISTORY', longval, after='FOO')
assert len(header) == 9
assert str(header.cards[1]) == 'HISTORY ' + longval[:72]
assert str(header.cards[2]).rstrip() == 'HISTORY ' + longval[72:]
header = fits.Header()
header.update({'FOO': 'BAR'})
header.update({'BAZ': 'QUX'})
longval = 'ABC' * 30
header.add_history(longval)
header.update({'FRED': 'BARNEY'})
header.add_history(longval)
assert len(header.cards) == 7
assert header.cards[2].keyword == 'FRED'
assert str(header.cards[3]) == 'HISTORY ' + longval[:72]
assert str(header.cards[4]).rstrip() == 'HISTORY ' + longval[72:]
header.add_history(longval, after='FOO')
assert len(header.cards) == 9
assert str(header.cards[1]) == 'HISTORY ' + longval[:72]
assert str(header.cards[2]).rstrip() == 'HISTORY ' + longval[72:]
def test_totxtfile(self, home_is_temp):
header_filename = self.temp('header.txt')
with fits.open(self.data('test0.fits')) as hdul:
hdul[0].header.totextfile(header_filename)
# Check the `overwrite` flag
with pytest.raises(OSError, match=_NOT_OVERWRITING_MSG_MATCH):
hdul[0].header.totextfile(header_filename, overwrite=False)
hdul[0].header.totextfile(header_filename, overwrite=True)
hdu = fits.ImageHDU()
hdu.header.update({'MYKEY': 'FOO'})
hdu.header.extend(hdu.header.fromtextfile(header_filename),
update=True, update_first=True)
# Write the hdu out and read it back in again--it should be recognized
# as a PrimaryHDU
hdu.writeto(self.temp('test.fits'), output_verify='ignore')
with fits.open(self.temp('test.fits')) as hdul:
assert isinstance(hdul[0], fits.PrimaryHDU)
hdu = fits.ImageHDU()
hdu.header.update({'MYKEY': 'FOO'})
hdu.header.extend(hdu.header.fromtextfile(header_filename),
update=True, update_first=True, strip=False)
assert 'MYKEY' in hdu.header
assert 'EXTENSION' not in hdu.header
assert 'SIMPLE' in hdu.header
hdu.writeto(self.temp('test.fits'), output_verify='ignore',
overwrite=True)
with fits.open(self.temp('test.fits')) as hdul2:
assert len(hdul2) == 2
assert 'MYKEY' in hdul2[1].header
def test_tofile(self, home_is_temp):
"""
Repeat test_totxtfile, but with tofile()
"""
header_filename = self.temp('header.fits')
with fits.open(self.data('test0.fits')) as hdul:
hdul[0].header.tofile(header_filename)
# Check the `overwrite` flag
with pytest.raises(OSError, match=_NOT_OVERWRITING_MSG_MATCH):
hdul[0].header.tofile(header_filename, overwrite=False)
hdul[0].header.tofile(header_filename, overwrite=True)
hdu = fits.ImageHDU()
hdu.header.update({'MYKEY': 'FOO'})
hdu.header.extend(hdu.header.fromfile(header_filename),
update=True, update_first=True)
# Write the hdu out and read it back in again--it should be recognized
# as a PrimaryHDU
hdu.writeto(self.temp('test.fits'), output_verify='ignore')
with fits.open(self.temp('test.fits')) as hdul:
assert isinstance(hdul[0], fits.PrimaryHDU)
hdu = fits.ImageHDU()
hdu.header.update({'MYKEY': 'FOO'})
hdu.header.extend(hdu.header.fromfile(header_filename),
update=True, update_first=True, strip=False)
assert 'MYKEY' in hdu.header
assert 'EXTENSION' not in hdu.header
assert 'SIMPLE' in hdu.header
hdu.writeto(self.temp('test.fits'), output_verify='ignore',
overwrite=True)
with fits.open(self.temp('test.fits')) as hdul2:
assert len(hdul2) == 2
assert 'MYKEY' in hdul2[1].header
def test_fromfile(self):
"""Regression test for https://github.com/astropy/astropy/issues/8711
"""
filename = self.data('scale.fits')
hdr = fits.Header.fromfile(filename)
assert hdr['DATASET'] == '2MASS'
def test_header_fromtextfile(self):
"""Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/122
Manually write a text file containing some header cards ending with
newlines and ensure that fromtextfile can read them back in.
"""
header = fits.Header()
header['A'] = ('B', 'C')
header['B'] = ('C', 'D')
header['C'] = ('D', 'E')
with open(self.temp('test.hdr'), 'w') as f:
f.write('\n'.join(str(c).strip() for c in header.cards))
header2 = fits.Header.fromtextfile(self.temp('test.hdr'))
assert header == header2
def test_header_fromtextfile_with_end_card(self):
"""Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/154
Make sure that when a Header is read from a text file that the END card
is ignored.
"""
header = fits.Header([('A', 'B', 'C'), ('D', 'E', 'F')])
# We don't use header.totextfile here because it writes each card with
# trailing spaces to pad them out to 80 characters. But this bug only
# presents itself when each card ends immediately with a newline, and
# no trailing spaces
with open(self.temp('test.hdr'), 'w') as f:
f.write('\n'.join(str(c).strip() for c in header.cards))
f.write('\nEND')
new_header = fits.Header.fromtextfile(self.temp('test.hdr'))
assert 'END' not in new_header
assert header == new_header
def test_append_end_card(self):
"""
Regression test 2 for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/154
Manually adding an END card to a header should simply result in a
ValueError (as was the case in PyFITS 3.0 and earlier).
"""
header = fits.Header([('A', 'B', 'C'), ('D', 'E', 'F')])
def setitem(k, v):
header[k] = v
pytest.raises(ValueError, setitem, 'END', '')
pytest.raises(ValueError, header.append, 'END')
pytest.raises(ValueError, header.append, 'END', end=True)
pytest.raises(ValueError, header.insert, len(header), 'END')
pytest.raises(ValueError, header.set, 'END')
def test_invalid_end_cards(self):
"""
Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/217
This tests the case where the END card looks like a normal card like
'END = ' and other similar oddities. As long as a card starts with END
and looks like it was intended to be the END card we allow it, but with
a warning.
"""
horig = fits.PrimaryHDU(data=np.arange(100)).header
def invalid_header(end, pad):
# Build up a goofy invalid header
# Start from a seemingly normal header
s = horig.tostring(sep='', endcard=False, padding=False)
# append the bogus end card
s += end
# add additional padding if requested
if pad:
s += ' ' * _pad_length(len(s))
# This will differ between Python versions
if isinstance(s, bytes):
return BytesIO(s)
else:
return StringIO(s)
# Basic case motivated by the original issue; it's as if the END card
# was appended by software that doesn't know to treat it specially, and
# it is given an = after it
s = invalid_header('END =', True)
with pytest.warns(AstropyUserWarning, match="Unexpected bytes trailing "
"END keyword: ' ='") as w:
h = fits.Header.fromfile(s)
assert h == horig
assert len(w) == 1
# A case similar to the last but with more spaces between END and the
# =, as though the '= ' value indicator were placed like that of a
# normal card
s = invalid_header('END = ', True)
with pytest.warns(AstropyUserWarning, match="Unexpected bytes trailing "
"END keyword: ' ='") as w:
h = fits.Header.fromfile(s)
assert h == horig
assert len(w) == 1
# END card with trailing gibberish
s = invalid_header('END$%&%^*%*', True)
with pytest.warns(AstropyUserWarning, match=r"Unexpected bytes trailing "
r"END keyword: '\$%&%\^\*%\*'") as w:
h = fits.Header.fromfile(s)
assert h == horig
assert len(w) == 1
# 'END' at the very end of a truncated file without padding; the way
# the block reader works currently this can only happen if the 'END'
# is at the very end of the file.
s = invalid_header('END', False)
with pytest.warns(AstropyUserWarning, match="Missing padding to end of "
"the FITS block") as w:
# Don't raise an exception on missing padding, but still produce a
# warning that the END card is incomplete
h = fits.Header.fromfile(s, padding=False)
assert h == horig
assert len(w) == 1
def test_invalid_characters(self):
"""
Test header with invalid characters
"""
# Generate invalid file with non-ASCII character
h = fits.Header()
h['FOO'] = 'BAR'
h['COMMENT'] = 'hello'
hdul = fits.PrimaryHDU(header=h, data=np.arange(5))
hdul.writeto(self.temp('test.fits'))
with open(self.temp('test.fits'), 'rb') as f:
out = f.read()
out = out.replace(b'hello', 'héllo'.encode('latin1'))
out = out.replace(b'BAR', 'BÀR'.encode('latin1'))
with open(self.temp('test2.fits'), 'wb') as f2:
f2.write(out)
with pytest.warns(AstropyUserWarning, match="non-ASCII characters are "
"present in the FITS file") as w:
h = fits.getheader(self.temp('test2.fits'))
assert h['FOO'] == 'B?R'
assert h['COMMENT'] == 'h?llo'
assert len(w) == 1
def test_unnecessary_move(self):
"""
Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/125
Ensures that a header is not modified when setting the position of a
keyword that's already in its correct position.
"""
header = fits.Header([('A', 'B'), ('B', 'C'), ('C', 'D')])
header.set('B', before=2)
assert list(header) == ['A', 'B', 'C']
assert not header._modified
header.set('B', after=0)
assert list(header) == ['A', 'B', 'C']
assert not header._modified
header.set('B', before='C')
assert list(header) == ['A', 'B', 'C']
assert not header._modified
header.set('B', after='A')
assert list(header) == ['A', 'B', 'C']
assert not header._modified
header.set('B', before=2)
assert list(header) == ['A', 'B', 'C']
assert not header._modified
# 123 is well past the end, and C is already at the end, so it's in the
# right place already
header.set('C', before=123)
assert list(header) == ['A', 'B', 'C']
assert not header._modified
header.set('C', after=123)
assert list(header) == ['A', 'B', 'C']
assert not header._modified
def test_invalid_float_cards(self):
"""Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/137"""
# Create a header containing two of the problematic cards in the test
# case where this came up:
hstr = "FOCALLEN= +1.550000000000e+002\nAPERTURE= +0.000000000000e+000"
h = fits.Header.fromstring(hstr, sep='\n')
# First the case that *does* work prior to fixing this issue
assert h['FOCALLEN'] == 155.0
assert h['APERTURE'] == 0.0
# Now if this were reserialized, would new values for these cards be
# written with repaired exponent signs?
with pytest.warns(fits.verify.VerifyWarning,
match=r'Verification reported errors'):
assert (str(h.cards['FOCALLEN']) ==
_pad("FOCALLEN= +1.550000000000E+002"))
assert h.cards['FOCALLEN']._modified
with pytest.warns(fits.verify.VerifyWarning,
match=r'Verification reported errors'):
assert (str(h.cards['APERTURE']) ==
_pad("APERTURE= +0.000000000000E+000"))
assert h.cards['APERTURE']._modified
assert h._modified
# This is the case that was specifically causing problems; generating
# the card strings *before* parsing the values. Also, the card strings
# really should be "fixed" before being returned to the user
h = fits.Header.fromstring(hstr, sep='\n')
with pytest.warns(fits.verify.VerifyWarning,
match=r'Verification reported errors'):
assert (str(h.cards['FOCALLEN']) ==
_pad("FOCALLEN= +1.550000000000E+002"))
assert h.cards['FOCALLEN']._modified
with pytest.warns(fits.verify.VerifyWarning,
match=r'Verification reported errors'):
assert (str(h.cards['APERTURE']) ==
_pad("APERTURE= +0.000000000000E+000"))
assert h.cards['APERTURE']._modified
assert h['FOCALLEN'] == 155.0
assert h['APERTURE'] == 0.0
assert h._modified
# For the heck of it, try assigning the identical values and ensure
# that the newly fixed value strings are left intact
h['FOCALLEN'] = 155.0
h['APERTURE'] = 0.0
assert (str(h.cards['FOCALLEN']) ==
_pad("FOCALLEN= +1.550000000000E+002"))
assert (str(h.cards['APERTURE']) ==
_pad("APERTURE= +0.000000000000E+000"))
def test_invalid_float_cards2(self, capsys):
"""
Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/140
"""
# The example for this test requires creating a FITS file containing a
# slightly misformatted float value. I can't actually even find a way
# to do that directly through Astropy--it won't let me.
hdu = fits.PrimaryHDU()
hdu.header['TEST'] = 5.0022221e-07
hdu.writeto(self.temp('test.fits'))
# Here we manually make the file invalid
with open(self.temp('test.fits'), 'rb+') as f:
f.seek(346) # Location of the exponent 'E' symbol
f.write(encode_ascii('e'))
with fits.open(self.temp('test.fits')) as hdul, \
pytest.warns(AstropyUserWarning) as w:
hdul.writeto(self.temp('temp.fits'), output_verify='warn')
assert len(w) == 5
# The first two warnings are just the headers to the actual warning
# message (HDU 0, Card 4). I'm still not sure things like that
# should be output as separate warning messages, but that's
# something to think about...
msg = str(w[3].message)
assert "(invalid value string: '5.0022221e-07')" in msg
def test_leading_zeros(self):
"""
Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/137, part 2
Ticket https://aeon.stsci.edu/ssb/trac/pyfits/ticket/137 also showed that in
float values like 0.001 the leading zero was unnecessarily being
stripped off when rewriting the header. Though leading zeros should be
removed from integer values to prevent misinterpretation as octal by
python (for now Astropy will still maintain the leading zeros if now
changes are made to the value, but will drop them if changes are made).
"""
c = fits.Card.fromstring("APERTURE= +0.000000000000E+000")
assert str(c) == _pad("APERTURE= +0.000000000000E+000")
assert c.value == 0.0
c = fits.Card.fromstring("APERTURE= 0.000000000000E+000")
assert str(c) == _pad("APERTURE= 0.000000000000E+000")
assert c.value == 0.0
c = fits.Card.fromstring("APERTURE= 017")
assert str(c) == _pad("APERTURE= 017")
assert c.value == 17
def test_assign_boolean(self):
"""
Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/123
Tests assigning Python and Numpy boolean values to keyword values.
"""
fooimg = _pad('FOO = T')
barimg = _pad('BAR = F')
h = fits.Header()
h['FOO'] = True
h['BAR'] = False
assert h['FOO'] is True
assert h['BAR'] is False
assert str(h.cards['FOO']) == fooimg
assert str(h.cards['BAR']) == barimg
h = fits.Header()
h['FOO'] = np.bool_(True)
h['BAR'] = np.bool_(False)
assert h['FOO'] is True
assert h['BAR'] is False
assert str(h.cards['FOO']) == fooimg
assert str(h.cards['BAR']) == barimg
h = fits.Header()
h.append(fits.Card.fromstring(fooimg))
h.append(fits.Card.fromstring(barimg))
assert h['FOO'] is True
assert h['BAR'] is False
assert str(h.cards['FOO']) == fooimg
assert str(h.cards['BAR']) == barimg
def test_header_method_keyword_normalization(self):
"""
Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/149
Basically ensures that all public Header methods are case-insensitive
w.r.t. keywords.
Provides a reasonably comprehensive test of several methods at once.
"""
h = fits.Header([('abC', 1), ('Def', 2), ('GeH', 3)])
assert list(h) == ['ABC', 'DEF', 'GEH']
assert 'abc' in h
assert 'dEf' in h
assert h['geh'] == 3
# Case insensitivity of wildcards
assert len(h['g*']) == 1
h['aBc'] = 2
assert h['abc'] == 2
# ABC already existed so assigning to aBc should not have added any new
# cards
assert len(h) == 3
del h['gEh']
assert list(h) == ['ABC', 'DEF']
assert len(h) == 2
assert h.get('def') == 2
h.set('Abc', 3)
assert h['ABC'] == 3
h.set('gEh', 3, before='Abc')
assert list(h) == ['GEH', 'ABC', 'DEF']
assert h.pop('abC') == 3
assert len(h) == 2
assert h.setdefault('def', 3) == 2
assert len(h) == 2
assert h.setdefault('aBc', 1) == 1
assert len(h) == 3
assert list(h) == ['GEH', 'DEF', 'ABC']
h.update({'GeH': 1, 'iJk': 4})
assert len(h) == 4
assert list(h) == ['GEH', 'DEF', 'ABC', 'IJK']
assert h['GEH'] == 1
assert h.count('ijk') == 1
assert h.index('ijk') == 3
h.remove('Def')
assert len(h) == 3
assert list(h) == ['GEH', 'ABC', 'IJK']
def test_end_in_comment(self):
"""
Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/142
Tests a case where the comment of a card ends with END, and is followed
by several blank cards.
"""
data = np.arange(100).reshape(10, 10)
hdu = fits.PrimaryHDU(data=data)
hdu.header['TESTKW'] = ('Test val', 'This is the END')
# Add a couple blanks after the END string
hdu.header.append()
hdu.header.append()
hdu.writeto(self.temp('test.fits'))
with fits.open(self.temp('test.fits'), memmap=False) as hdul:
# memmap = False to avoid leaving open a mmap to the file when we
# access the data--this causes problems on Windows when we try to
# overwrite the file later
assert 'TESTKW' in hdul[0].header
assert hdul[0].header == hdu.header
assert (hdul[0].data == data).all()
# Add blanks until the header is extended to two block sizes
while len(hdu.header) < 36:
hdu.header.append()
hdu.writeto(self.temp('test.fits'), overwrite=True)
with fits.open(self.temp('test.fits')) as hdul:
assert 'TESTKW' in hdul[0].header
assert hdul[0].header == hdu.header
assert (hdul[0].data == data).all()
# Test parsing the same header when it's written to a text file
hdu.header.totextfile(self.temp('test.hdr'))
header2 = fits.Header.fromtextfile(self.temp('test.hdr'))
assert hdu.header == header2
def test_assign_unicode(self):
"""
Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/134
Assigning a unicode literal as a header value should not fail silently.
If the value can be converted to ASCII then it should just work.
Otherwise it should fail with an appropriate value error.
Also tests unicode for keywords and comments.
"""
erikku = '\u30a8\u30ea\u30c3\u30af'
def assign(keyword, val):
h[keyword] = val
h = fits.Header()
h['FOO'] = 'BAR'
assert 'FOO' in h
assert h['FOO'] == 'BAR'
assert repr(h) == _pad("FOO = 'BAR '")
pytest.raises(ValueError, assign, erikku, 'BAR')
h['FOO'] = 'BAZ'
assert h['FOO'] == 'BAZ'
assert repr(h) == _pad("FOO = 'BAZ '")
pytest.raises(ValueError, assign, 'FOO', erikku)
h['FOO'] = ('BAR', 'BAZ')
assert h['FOO'] == 'BAR'
assert h.comments['FOO'] == 'BAZ'
assert repr(h) == _pad("FOO = 'BAR ' / BAZ")
pytest.raises(ValueError, assign, 'FOO', ('BAR', erikku))
pytest.raises(ValueError, assign, 'FOO', (erikku, 'BAZ'))
pytest.raises(ValueError, assign, 'FOO', (erikku, erikku))
def test_assign_non_ascii(self):
"""
First regression test for
https://github.com/spacetelescope/PyFITS/issues/37
Although test_assign_unicode ensures that `str` objects containing
non-ASCII characters cannot be assigned to headers.
It should not be possible to assign bytes to a header at all.
"""
h = fits.Header()
with pytest.raises(ValueError, match="Illegal value: b'Hello'."):
h.set('TEST', b'Hello')
def test_header_strip_whitespace(self):
"""
Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/146, and
for the solution that is optional stripping of whitespace from the end
of a header value.
By default extra whitespace is stripped off, but if
`fits.conf.strip_header_whitespace` = False it should not be
stripped.
"""
h = fits.Header()
h['FOO'] = 'Bar '
assert h['FOO'] == 'Bar'
c = fits.Card.fromstring("QUX = 'Bar '")
h.append(c)
assert h['QUX'] == 'Bar'
assert h.cards['FOO'].image.rstrip() == "FOO = 'Bar '"
assert h.cards['QUX'].image.rstrip() == "QUX = 'Bar '"
with fits.conf.set_temp('strip_header_whitespace', False):
assert h['FOO'] == 'Bar '
assert h['QUX'] == 'Bar '
assert h.cards['FOO'].image.rstrip() == "FOO = 'Bar '"
assert h.cards['QUX'].image.rstrip() == "QUX = 'Bar '"
assert h['FOO'] == 'Bar'
assert h['QUX'] == 'Bar'
assert h.cards['FOO'].image.rstrip() == "FOO = 'Bar '"
assert h.cards['QUX'].image.rstrip() == "QUX = 'Bar '"
def test_keep_duplicate_history_in_orig_header(self):
"""
Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/156
When creating a new HDU from an existing Header read from an existing
FITS file, if the original header contains duplicate HISTORY values
those duplicates should be preserved just as in the original header.
This bug occurred due to naivete in Header.extend.
"""
history = ['CCD parameters table ...',
' reference table oref$n951041ko_ccd.fits',
' INFLIGHT 12/07/2001 25/02/2002',
' all bias frames'] * 3
hdu = fits.PrimaryHDU()
# Add the history entries twice
for item in history:
hdu.header['HISTORY'] = item
hdu.writeto(self.temp('test.fits'))
with fits.open(self.temp('test.fits')) as hdul:
assert hdul[0].header['HISTORY'] == history
new_hdu = fits.PrimaryHDU(header=hdu.header)
assert new_hdu.header['HISTORY'] == hdu.header['HISTORY']
new_hdu.writeto(self.temp('test2.fits'))
with fits.open(self.temp('test2.fits')) as hdul:
assert hdul[0].header['HISTORY'] == history
def test_invalid_keyword_cards(self):
"""
Test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/109
Allow opening files with headers containing invalid keywords.
"""
# Create a header containing a few different types of BAD headers.
c1 = fits.Card.fromstring('CLFIND2D: contour = 0.30')
c2 = fits.Card.fromstring('Just some random text.')
c3 = fits.Card.fromstring('A' * 80)
hdu = fits.PrimaryHDU()
# This should work with some warnings
with pytest.warns(AstropyUserWarning) as w:
hdu.header.append(c1)
hdu.header.append(c2)
hdu.header.append(c3)
assert len(w) == 3
hdu.writeto(self.temp('test.fits'))
with pytest.warns(AstropyUserWarning) as w:
with fits.open(self.temp('test.fits')) as hdul:
# Merely opening the file should blast some warnings about the
# invalid keywords
assert len(w) == 3
header = hdul[0].header
assert 'CLFIND2D' in header
assert 'Just som' in header
assert 'AAAAAAAA' in header
assert header['CLFIND2D'] == ': contour = 0.30'
assert header['Just som'] == 'e random text.'
assert header['AAAAAAAA'] == 'A' * 72
# It should not be possible to assign to the invalid keywords
pytest.raises(ValueError, header.set, 'CLFIND2D', 'foo')
pytest.raises(ValueError, header.set, 'Just som', 'foo')
pytest.raises(ValueError, header.set, 'AAAAAAAA', 'foo')
def test_fix_hierarch_with_invalid_value(self, capsys):
"""
Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/172
Ensures that when fixing a hierarch card it remains a hierarch card.
"""
c = fits.Card.fromstring('HIERARCH ESO DET CHIP PXSPACE = 5e6')
with pytest.warns(fits.verify.VerifyWarning,
match=r'Verification reported errors'):
c.verify('fix')
assert str(c) == _pad('HIERARCH ESO DET CHIP PXSPACE = 5E6')
def test_assign_inf_nan(self):
"""
Regression test for https://github.com/spacetelescope/PyFITS/issues/11
For the time being it should not be possible to assign the floating
point values inf or nan to a header value, since this is not defined by
the FITS standard.
"""
h = fits.Header()
pytest.raises(ValueError, h.set, 'TEST', float('nan'))
pytest.raises(ValueError, h.set, 'TEST', np.nan)
pytest.raises(ValueError, h.set, 'TEST', np.float32('nan'))
pytest.raises(ValueError, h.set, 'TEST', float('inf'))
pytest.raises(ValueError, h.set, 'TEST', np.inf)
def test_update_bool(self):
"""
Regression test for an issue where a value of True in a header
cannot be updated to a value of 1, and likewise for False/0.
"""
h = fits.Header([('TEST', True)])
h['TEST'] = 1
assert h['TEST'] is not True
assert isinstance(h['TEST'], int)
assert h['TEST'] == 1
h['TEST'] = np.bool_(True)
assert h['TEST'] is True
h['TEST'] = False
assert h['TEST'] is False
h['TEST'] = np.bool_(False)
assert h['TEST'] is False
h['TEST'] = 0
assert h['TEST'] is not False
assert isinstance(h['TEST'], int)
assert h['TEST'] == 0
h['TEST'] = np.bool_(False)
assert h['TEST'] is False
def test_update_numeric(self):
"""
Regression test for https://github.com/spacetelescope/PyFITS/issues/49
Ensure that numeric values can be upcast/downcast between int, float,
and complex by assigning values that compare equal to the existing
value but are a different type.
"""
h = fits.Header()
h['TEST'] = 1
# int -> float
h['TEST'] = 1.0
assert isinstance(h['TEST'], float)
assert str(h).startswith('TEST = 1.0')
# float -> int
h['TEST'] = 1
assert isinstance(h['TEST'], int)
assert str(h).startswith('TEST = 1')
# int -> complex
h['TEST'] = 1.0+0.0j
assert isinstance(h['TEST'], complex)
assert str(h).startswith('TEST = (1.0, 0.0)')
# complex -> float
h['TEST'] = 1.0
assert isinstance(h['TEST'], float)
assert str(h).startswith('TEST = 1.0')
# float -> complex
h['TEST'] = 1.0+0.0j
assert isinstance(h['TEST'], complex)
assert str(h).startswith('TEST = (1.0, 0.0)')
# complex -> int
h['TEST'] = 1
assert isinstance(h['TEST'], int)
assert str(h).startswith('TEST = 1')
# Now the same tests but with zeros
h['TEST'] = 0
# int -> float
h['TEST'] = 0.0
assert isinstance(h['TEST'], float)
assert str(h).startswith('TEST = 0.0')
# float -> int
h['TEST'] = 0
assert isinstance(h['TEST'], int)
assert str(h).startswith('TEST = 0')
# int -> complex
h['TEST'] = 0.0+0.0j
assert isinstance(h['TEST'], complex)
assert str(h).startswith('TEST = (0.0, 0.0)')
# complex -> float
h['TEST'] = 0.0
assert isinstance(h['TEST'], float)
assert str(h).startswith('TEST = 0.0')
# float -> complex
h['TEST'] = 0.0+0.0j
assert isinstance(h['TEST'], complex)
assert str(h).startswith('TEST = (0.0, 0.0)')
# complex -> int
h['TEST'] = 0
assert isinstance(h['TEST'], int)
assert str(h).startswith('TEST = 0')
def test_newlines_in_commentary(self):
"""
Regression test for https://github.com/spacetelescope/PyFITS/issues/51
Test data extracted from a header in an actual FITS file found in the
wild. Names have been changed to protect the innocent.
"""
# First ensure that we can't assign new keyword values with newlines in
# them
h = fits.Header()
pytest.raises(ValueError, h.set, 'HISTORY', '\n')
pytest.raises(ValueError, h.set, 'HISTORY', '\nabc')
pytest.raises(ValueError, h.set, 'HISTORY', 'abc\n')
pytest.raises(ValueError, h.set, 'HISTORY', 'abc\ndef')
test_cards = [
"HISTORY File modified by user 'wilma' with fv on 2013-04-22T21:42:18 "
"HISTORY File modified by user ' fred' with fv on 2013-04-23T11:16:29 "
"HISTORY File modified by user ' fred' with fv on 2013-11-04T16:59:14 "
"HISTORY File modified by user 'wilma' with fv on 2013-04-22T21:42:18\nFile modif"
"HISTORY ied by user 'wilma' with fv on 2013-04-23T11:16:29\nFile modified by use"
"HISTORY r ' fred' with fv on 2013-11-04T16:59:14 "
"HISTORY File modified by user 'wilma' with fv on 2013-04-22T21:42:18\nFile modif"
"HISTORY ied by user 'wilma' with fv on 2013-04-23T11:16:29\nFile modified by use"
"HISTORY r ' fred' with fv on 2013-11-04T16:59:14\nFile modified by user 'wilma' "
"HISTORY with fv on 2013-04-22T21:42:18\nFile modif\nied by user 'wilma' with fv "
"HISTORY on 2013-04-23T11:16:29\nFile modified by use\nr ' fred' with fv on 2013-1"
"HISTORY 1-04T16:59:14 "
]
for card_image in test_cards:
c = fits.Card.fromstring(card_image)
if '\n' in card_image:
pytest.raises(fits.VerifyError, c.verify, 'exception')
else:
c.verify('exception')
def test_long_commentary_card_appended_to_header(self):
"""
If a HISTORY or COMMENT card with a too-long value is appended to a
header with Header.append (as opposed to assigning to hdr['HISTORY']
it fails verification.
Regression test for https://github.com/astropy/astropy/issues/11486
"""
header = fits.Header()
value = 'abc' * 90
# this is what Table does when saving its history metadata key to a
# FITS file
header.append(('history', value))
assert len(header.cards) == 1
# Test Card._split() directly since this was the main problem area
key, val = header.cards[0]._split()
assert key == 'HISTORY' and val == value
# Try writing adding this header to an HDU and writing it to a file
hdu = fits.PrimaryHDU(header=header)
hdu.writeto(self.temp('test.fits'), overwrite=True)
def test_header_fromstring_bytes(self):
"""
Test reading a Header from a `bytes` string.
See https://github.com/astropy/astropy/issues/8706
"""
with open(self.data('test0.fits'), 'rb') as fobj:
pri_hdr_from_bytes = fits.Header.fromstring(fobj.read())
pri_hdr = fits.getheader(self.data('test0.fits'))
assert pri_hdr['NAXIS'] == pri_hdr_from_bytes['NAXIS']
assert pri_hdr == pri_hdr_from_bytes
assert pri_hdr.tostring() == pri_hdr_from_bytes.tostring()
def test_set_keyword_with_space(self):
"""
Regression test for https://github.com/astropy/astropy/issues/10479
"""
hdr = fits.Header()
hdr['KEY2 '] = 2
hdr['KEY2 '] = 4
assert len(hdr) == 1
assert hdr['KEY2'] == 4
assert hdr['KEY2 '] == 4
def test_strip(self):
hdr = fits.getheader(self.data('tb.fits'), ext=1)
hdr['FOO'] = 'bar'
hdr.strip()
assert set(hdr) == {'HISTORY', 'FOO'}
hdr = fits.getheader(self.data('tb.fits'), ext=1)
hdr['FOO'] = 'bar'
hdr = hdr.copy(strip=True)
assert set(hdr) == {'HISTORY', 'FOO'}
def test_update_invalid_card(self):
"""
Regression test for https://github.com/astropy/astropy/issues/5408
Tests updating the value of a card that is malformatted (with an
invalid value literal).
This tests two ways of reproducing the problem, one working with a
Card object directly, and one when reading/writing a header containing
such an invalid card.
"""
card = fits.Card.fromstring('KW = INF / Comment')
card.value = 'FIXED'
assert tuple(card) == ('KW', 'FIXED', 'Comment')
card.verify('fix')
assert tuple(card) == ('KW', 'FIXED', 'Comment')
card = fits.Card.fromstring('KW = INF')
hdu = fits.PrimaryHDU()
# This is a loophole to write a header containing a malformatted card
card._verified = True
hdu.header.append(card)
hdu.header.tofile(self.temp('bogus.fits'))
with fits.open(self.temp('bogus.fits')) as hdul:
hdul[0].header['KW'] = -1
hdul.writeto(self.temp('bogus_fixed.fits'))
with fits.open(self.temp('bogus_fixed.fits')) as hdul:
assert hdul[0].header['KW'] == -1
def test_index_numpy_int(self):
header = fits.Header([('A', 'FOO'), ('B', 2), ('C', 'BAR')])
idx = np.int8(2)
assert header[idx] == 'BAR'
header[idx] = 'BAZ'
assert header[idx] == 'BAZ'
header.insert(idx, ('D', 42))
assert header[idx] == 42
header.add_comment('HELLO')
header.add_comment('WORLD')
assert header['COMMENT'][np.int64(1)] == 'WORLD'
header.append(('C', 'BAZBAZ'))
assert header[('C', np.int16(0))] == 'BAZ'
assert header[('C', np.uint32(1))] == 'BAZBAZ'
def test_header_data_size(self):
"""
Tests data size calculation (w/o padding) given a Header.
"""
hdu = fits.PrimaryHDU()
header = hdu.header
assert header.data_size == 0
header['BITPIX'] = 32
header['NAXIS'] = 2
header['NAXIS1'] = 100
header['NAXIS2'] = 100
assert header.data_size == 40000
assert header.data_size_padded == 40320
class TestRecordValuedKeywordCards(FitsTestCase):
"""
Tests for handling of record-valued keyword cards as used by the
`FITS WCS distortion paper
<https://www.atnf.csiro.au/people/mcalabre/WCS/dcs_20040422.pdf>`__.
These tests are derived primarily from the release notes for PyFITS 1.4 (in
which this feature was first introduced.
Note that extra leading spaces in the `value` fields should be parsed on input,
but will be stripped in the cards.
"""
def setup_method(self):
super().setup_method()
self._test_header = fits.Header()
self._test_header.set('DP1', 'NAXIS: 2')
self._test_header.set('DP1', 'AXIS.1: 1')
self._test_header.set('DP1', 'AXIS.2: 2')
self._test_header.set('DP1', 'NAUX: 2')
self._test_header.set('DP1', 'AUX.1.COEFF.0: 0')
self._test_header.set('DP1', 'AUX.1.POWER.0: 1')
self._test_header.set('DP1', 'AUX.1.COEFF.1: 0.00048828125')
self._test_header.set('DP1', 'AUX.1.POWER.1: 1')
def test_initialize_rvkc(self):
"""
Test different methods for initializing a card that should be
recognized as a RVKC
"""
c = fits.Card.fromstring("DP1 = 'NAXIS: 2' / A comment")
assert c.keyword == 'DP1.NAXIS'
assert c.value == 2.0
assert c.field_specifier == 'NAXIS'
assert c.comment == 'A comment'
c = fits.Card.fromstring("DP1 = 'NAXIS: 2.1'")
assert c.keyword == 'DP1.NAXIS'
assert c.value == 2.1
assert c.field_specifier == 'NAXIS'
c = fits.Card.fromstring("DP1 = 'NAXIS: a'")
assert c.keyword == 'DP1'
assert c.value == 'NAXIS: a'
assert c.field_specifier is None
c = fits.Card('DP1', 'NAXIS: 2')
assert c.keyword == 'DP1.NAXIS'
assert c.value == 2.0
assert c.field_specifier == 'NAXIS'
c = fits.Card('DP1', 'NAXIS: 2.0')
assert c.keyword == 'DP1.NAXIS'
assert c.value == 2.0
assert c.field_specifier == 'NAXIS'
c = fits.Card('DP1', 'NAXIS: a')
assert c.keyword == 'DP1'
assert c.value == 'NAXIS: a'
assert c.field_specifier is None
c = fits.Card('DP1.NAXIS', 2)
assert c.keyword == 'DP1.NAXIS'
assert c.value == 2.0
assert c.field_specifier == 'NAXIS'
c = fits.Card('DP1.NAXIS', 2.0)
assert c.keyword == 'DP1.NAXIS'
assert c.value == 2.0
assert c.field_specifier == 'NAXIS'
with pytest.warns(fits.verify.VerifyWarning):
c = fits.Card('DP1.NAXIS', 'a')
assert c.keyword == 'DP1.NAXIS'
assert c.value == 'a'
assert c.field_specifier is None
def test_parse_field_specifier(self):
"""
Tests that the field_specifier can accessed from a card read from a
string before any other attributes are accessed.
"""
c = fits.Card.fromstring("DP1 = 'NAXIS: 2' / A comment")
assert c.field_specifier == 'NAXIS'
assert c.keyword == 'DP1.NAXIS'
assert c.value == 2.0
assert c.comment == 'A comment'
def test_update_field_specifier(self):
"""
Test setting the field_specifier attribute and updating the card image
to reflect the new value.
"""
c = fits.Card.fromstring("DP1 = 'NAXIS: 2' / A comment")
assert c.field_specifier == 'NAXIS'
c.field_specifier = 'NAXIS1'
assert c.field_specifier == 'NAXIS1'
assert c.keyword == 'DP1.NAXIS1'
assert c.value == 2.0
assert c.comment == 'A comment'
assert str(c).rstrip() == "DP1 = 'NAXIS1: 2' / A comment"
def test_field_specifier_case_senstivity(self):
"""
The keyword portion of an RVKC should still be case-insensitive, but
the field-specifier portion should be case-sensitive.
"""
header = fits.Header()
header.set('abc.def', 1)
header.set('abc.DEF', 2)
assert header['abc.def'] == 1
assert header['ABC.def'] == 1
assert header['aBc.def'] == 1
assert header['ABC.DEF'] == 2
assert 'ABC.dEf' not in header
def test_get_rvkc_by_index(self):
"""
Returning a RVKC from a header via index lookup should return the
float value of the card.
"""
assert self._test_header[0] == 2.0
assert isinstance(self._test_header[0], float)
assert self._test_header[1] == 1.0
assert isinstance(self._test_header[1], float)
def test_get_rvkc_by_keyword(self):
"""
Returning a RVKC just via the keyword name should return the full value
string of the first card with that keyword.
This test was changed to reflect the requirement in ticket
https://aeon.stsci.edu/ssb/trac/pyfits/ticket/184--previously it required
_test_header['DP1'] to return the parsed float value.
"""
assert self._test_header['DP1'] == 'NAXIS: 2'
def test_get_rvkc_by_keyword_and_field_specifier(self):
"""
Returning a RVKC via the full keyword/field-specifier combination
should return the floating point value associated with the RVKC.
"""
assert self._test_header['DP1.NAXIS'] == 2.0
assert isinstance(self._test_header['DP1.NAXIS'], float)
assert self._test_header['DP1.AUX.1.COEFF.1'] == 0.00048828125
def test_access_nonexistent_rvkc(self):
"""
Accessing a nonexistent RVKC should raise an IndexError for
index-based lookup, or a KeyError for keyword lookup (like a normal
card).
"""
pytest.raises(IndexError, lambda x: self._test_header[x], 8)
# Test exception with message
with pytest.raises(KeyError, match=r"Keyword 'DP1\.AXIS\.3' not found."):
self._test_header['DP1.AXIS.3']
def test_update_rvkc(self):
"""A RVKC can be updated either via index or keyword access."""
self._test_header[0] = 3
assert self._test_header['DP1.NAXIS'] == 3.0
assert isinstance(self._test_header['DP1.NAXIS'], float)
self._test_header['DP1.AXIS.1'] = 1.1
assert self._test_header['DP1.AXIS.1'] == 1.1
def test_update_rvkc_2(self):
"""Regression test for an issue that appeared after SVN r2412."""
h = fits.Header()
h['D2IM1.EXTVER'] = 1
assert h['D2IM1.EXTVER'] == 1.0
h['D2IM1.EXTVER'] = 2
assert h['D2IM1.EXTVER'] == 2.0
def test_raw_keyword_value(self):
c = fits.Card.fromstring("DP1 = 'NAXIS: 2' / A comment")
assert c.rawkeyword == 'DP1'
assert c.rawvalue == 'NAXIS: 2'
c = fits.Card('DP1.NAXIS', 2)
assert c.rawkeyword == 'DP1'
assert c.rawvalue == 'NAXIS: 2.0'
c = fits.Card('DP1.NAXIS', 2.0)
assert c.rawkeyword == 'DP1'
assert c.rawvalue == 'NAXIS: 2.0'
def test_rvkc_insert_after(self):
"""
It should be possible to insert a new RVKC after an existing one
specified by the full keyword/field-specifier combination."""
self._test_header.set('DP1', 'AXIS.3: 1', 'a comment',
after='DP1.AXIS.2')
assert self._test_header[3] == 1
assert self._test_header['DP1.AXIS.3'] == 1
def test_rvkc_delete(self):
"""
Deleting a RVKC should work as with a normal card by using the full
keyword/field-spcifier combination.
"""
del self._test_header['DP1.AXIS.1']
assert len(self._test_header) == 7
assert list(self._test_header)[0] == 'DP1.NAXIS'
assert self._test_header[0] == 2
assert list(self._test_header)[1] == 'DP1.AXIS.2'
# Perform a subsequent delete to make sure all the index mappings were
# updated
del self._test_header['DP1.AXIS.2']
assert len(self._test_header) == 6
assert list(self._test_header)[0] == 'DP1.NAXIS'
assert self._test_header[0] == 2
assert list(self._test_header)[1] == 'DP1.NAUX'
assert self._test_header[1] == 2
def test_pattern_matching_keys(self):
"""Test the keyword filter strings with RVKCs."""
cl = self._test_header['DP1.AXIS.*']
assert isinstance(cl, fits.Header)
assert ([str(c).strip() for c in cl.cards] ==
["DP1 = 'AXIS.1: 1'",
"DP1 = 'AXIS.2: 2'"])
cl = self._test_header['DP1.N*']
assert ([str(c).strip() for c in cl.cards] ==
["DP1 = 'NAXIS: 2'",
"DP1 = 'NAUX: 2'"])
cl = self._test_header['DP1.AUX...']
assert ([str(c).strip() for c in cl.cards] ==
["DP1 = 'AUX.1.COEFF.0: 0'",
"DP1 = 'AUX.1.POWER.0: 1'",
"DP1 = 'AUX.1.COEFF.1: 0.00048828125'",
"DP1 = 'AUX.1.POWER.1: 1'"])
cl = self._test_header['DP?.NAXIS']
assert ([str(c).strip() for c in cl.cards] ==
["DP1 = 'NAXIS: 2'"])
cl = self._test_header['DP1.A*S.*']
assert ([str(c).strip() for c in cl.cards] ==
["DP1 = 'AXIS.1: 1'",
"DP1 = 'AXIS.2: 2'"])
def test_pattern_matching_key_deletion(self):
"""Deletion by filter strings should work."""
del self._test_header['DP1.A*...']
assert len(self._test_header) == 2
assert list(self._test_header)[0] == 'DP1.NAXIS'
assert self._test_header[0] == 2
assert list(self._test_header)[1] == 'DP1.NAUX'
assert self._test_header[1] == 2
def test_successive_pattern_matching(self):
"""
A card list returned via a filter string should be further filterable.
"""
cl = self._test_header['DP1.A*...']
assert ([str(c).strip() for c in cl.cards] ==
["DP1 = 'AXIS.1: 1'",
"DP1 = 'AXIS.2: 2'",
"DP1 = 'AUX.1.COEFF.0: 0'",
"DP1 = 'AUX.1.POWER.0: 1'",
"DP1 = 'AUX.1.COEFF.1: 0.00048828125'",
"DP1 = 'AUX.1.POWER.1: 1'"])
cl2 = cl['*.*AUX...']
assert ([str(c).strip() for c in cl2.cards] ==
["DP1 = 'AUX.1.COEFF.0: 0'",
"DP1 = 'AUX.1.POWER.0: 1'",
"DP1 = 'AUX.1.COEFF.1: 0.00048828125'",
"DP1 = 'AUX.1.POWER.1: 1'"])
def test_rvkc_in_cardlist_keys(self):
"""
The CardList.keys() method should return full keyword/field-spec values
for RVKCs.
"""
cl = self._test_header['DP1.AXIS.*']
assert list(cl) == ['DP1.AXIS.1', 'DP1.AXIS.2']
def test_rvkc_in_cardlist_values(self):
"""
The CardList.values() method should return the values of all RVKCs as
floating point values.
"""
cl = self._test_header['DP1.AXIS.*']
assert list(cl.values()) == [1.0, 2.0]
def test_rvkc_value_attribute(self):
"""
Individual card values should be accessible by the .value attribute
(which should return a float).
"""
cl = self._test_header['DP1.AXIS.*']
assert cl.cards[0].value == 1.0
assert isinstance(cl.cards[0].value, float)
def test_overly_permissive_parsing(self):
"""
Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/183
Ensures that cards with standard commentary keywords are never treated
as RVKCs. Also ensures that cards not strictly matching the RVKC
pattern are not treated as such.
"""
h = fits.Header()
h['HISTORY'] = 'AXIS.1: 2'
h['HISTORY'] = 'AXIS.2: 2'
assert 'HISTORY.AXIS' not in h
assert 'HISTORY.AXIS.1' not in h
assert 'HISTORY.AXIS.2' not in h
assert h['HISTORY'] == ['AXIS.1: 2', 'AXIS.2: 2']
# This is an example straight out of the ticket where everything after
# the '2012' in the date value was being ignored, allowing the value to
# successfully be parsed as a "float"
h = fits.Header()
h['HISTORY'] = 'Date: 2012-09-19T13:58:53.756061'
assert 'HISTORY.Date' not in h
assert str(h.cards[0]) == _pad('HISTORY Date: 2012-09-19T13:58:53.756061')
c = fits.Card.fromstring(
" 'Date: 2012-09-19T13:58:53.756061'")
assert c.keyword == ''
assert c.value == "'Date: 2012-09-19T13:58:53.756061'"
assert c.field_specifier is None
h = fits.Header()
h['FOO'] = 'Date: 2012-09-19T13:58:53.756061'
assert 'FOO.Date' not in h
assert (str(h.cards[0]) ==
_pad("FOO = 'Date: 2012-09-19T13:58:53.756061'"))
def test_overly_aggressive_rvkc_lookup(self):
"""
Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/184
Ensures that looking up a RVKC by keyword only (without the
field-specifier) in a header returns the full string value of that card
without parsing it as a RVKC. Also ensures that a full field-specifier
is required to match a RVKC--a partial field-specifier that doesn't
explicitly match any record-valued keyword should result in a KeyError.
"""
c1 = fits.Card.fromstring("FOO = 'AXIS.1: 2'")
c2 = fits.Card.fromstring("FOO = 'AXIS.2: 4'")
h = fits.Header([c1, c2])
assert h['FOO'] == 'AXIS.1: 2'
assert h[('FOO', 1)] == 'AXIS.2: 4'
assert h['FOO.AXIS.1'] == 2.0
assert h['FOO.AXIS.2'] == 4.0
assert 'FOO.AXIS' not in h
assert 'FOO.AXIS.' not in h
assert 'FOO.' not in h
pytest.raises(KeyError, lambda: h['FOO.AXIS'])
pytest.raises(KeyError, lambda: h['FOO.AXIS.'])
pytest.raises(KeyError, lambda: h['FOO.'])
def test_fitsheader_script(self):
"""Tests the basic functionality of the `fitsheader` script."""
from astropy.io.fits.scripts import fitsheader
# Can an extension by specified by the EXTNAME keyword?
hf = fitsheader.HeaderFormatter(self.data('zerowidth.fits'))
output = hf.parse(extensions=['AIPS FQ'])
assert "EXTNAME = 'AIPS FQ" in output
assert "BITPIX" in output
# Can we limit the display to one specific keyword?
output = hf.parse(extensions=['AIPS FQ'], keywords=['EXTNAME'])
assert "EXTNAME = 'AIPS FQ" in output
assert "BITPIX =" not in output
assert len(output.split('\n')) == 3
# Can we limit the display to two specific keywords?
output = hf.parse(extensions=[1],
keywords=['EXTNAME', 'BITPIX'])
assert "EXTNAME =" in output
assert "BITPIX =" in output
assert len(output.split('\n')) == 4
# Can we use wildcards for keywords?
output = hf.parse(extensions=[1], keywords=['NAXIS*'])
assert "NAXIS =" in output
assert "NAXIS1 =" in output
assert "NAXIS2 =" in output
hf.close()
# Can an extension by specified by the EXTNAME+EXTVER keywords?
hf = fitsheader.HeaderFormatter(self.data('test0.fits'))
assert "EXTNAME = 'SCI" in hf.parse(extensions=['SCI,2'])
hf.close()
# Can we print the original header before decompression?
hf = fitsheader.HeaderFormatter(self.data('comp.fits'))
assert "XTENSION= 'IMAGE" in hf.parse(extensions=[1],
compressed=False)
assert "XTENSION= 'BINTABLE" in hf.parse(extensions=[1],
compressed=True)
hf.close()
def test_fitsheader_compressed_from_primary_image_ext(self):
"""Regression test for issue https://github.com/astropy/astropy/issues/7312"""
data = np.arange(2*2, dtype=np.int8).reshape((2, 2))
phdu = fits.PrimaryHDU(data=data)
chdu = fits.CompImageHDU(data=phdu.data, header=phdu.header)
chdu.writeto(self.temp('tmp2.fits'), overwrite=True)
with fits.open(self.temp('tmp2.fits')) as hdul:
assert 'XTENSION' not in hdul[1].header
assert 'PCOUNT' not in hdul[1].header
assert 'GCOUNT' not in hdul[1].header
def test_fitsheader_table_feature(self):
"""Tests the `--table` feature of the `fitsheader` script."""
from astropy.io import fits
from astropy.io.fits.scripts import fitsheader
test_filename = self.data('zerowidth.fits')
formatter = fitsheader.TableHeaderFormatter(test_filename)
with fits.open(test_filename) as fitsobj:
# Does the table contain the expected number of rows?
mytable = formatter.parse([0])
assert len(mytable) == len(fitsobj[0].header)
# Repeat the above test when multiple HDUs are requested
mytable = formatter.parse(extensions=['AIPS FQ', 2, "4"])
assert len(mytable) == (len(fitsobj['AIPS FQ'].header)
+ len(fitsobj[2].header)
+ len(fitsobj[4].header))
# Can we recover the filename and extension name from the table?
mytable = formatter.parse(extensions=['AIPS FQ'])
assert np.all(mytable['filename'] == test_filename)
assert np.all(mytable['hdu'] == 'AIPS FQ')
assert mytable['value'][mytable['keyword'] == "EXTNAME"] == "AIPS FQ"
# Can we specify a single extension/keyword?
mytable = formatter.parse(extensions=['AIPS FQ'],
keywords=['EXTNAME'])
assert len(mytable) == 1
assert mytable['hdu'][0] == "AIPS FQ"
assert mytable['keyword'][0] == "EXTNAME"
assert mytable['value'][0] == "AIPS FQ"
# Is an incorrect extension dealt with gracefully?
mytable = formatter.parse(extensions=['DOES_NOT_EXIST'])
assert mytable is None
# Is an incorrect keyword dealt with gracefully?
mytable = formatter.parse(extensions=['AIPS FQ'],
keywords=['DOES_NOT_EXIST'])
assert mytable is None
formatter.close()
@pytest.mark.parametrize('mode', ['wb', 'wb+', 'ab', 'ab+'])
def test_hdu_writeto_mode(self, mode):
with open(self.temp('mode.fits'), mode=mode) as ff:
hdu = fits.ImageHDU(data=np.ones(5))
hdu.writeto(ff)
def test_subclass():
"""Check that subclasses don't get ignored on slicing and copying."""
class MyHeader(fits.Header):
def append(self, card, *args, **kwargs):
if isinstance(card, tuple) and len(card) == 2:
# Just for our checks we add a comment if there is none.
card += ('no comment',)
return super().append(card, *args, **kwargs)
my_header = MyHeader((('a', 1., 'first'),
('b', 2., 'second'),
('c', 3.,)))
assert my_header.comments['a'] == 'first'
assert my_header.comments['b'] == 'second'
assert my_header.comments['c'] == 'no comment'
slice_ = my_header[1:]
assert type(slice_) is MyHeader
assert slice_.comments['b'] == 'second'
assert slice_.comments['c'] == 'no comment'
selection = my_header['c*']
assert type(selection) is MyHeader
assert selection.comments['c'] == 'no comment'
copy_ = my_header.copy()
assert type(copy_) is MyHeader
assert copy_.comments['b'] == 'second'
assert copy_.comments['c'] == 'no comment'
my_header.extend((('d', 4.),))
assert my_header.comments['d'] == 'no comment'
| {
"content_hash": "f7c11e82cfa7f42c99ccbf9a9a3d703b",
"timestamp": "",
"source": "github",
"line_count": 3096,
"max_line_length": 103,
"avg_line_length": 38.3640180878553,
"alnum_prop": 0.5529867396337613,
"repo_name": "larrybradley/astropy",
"id": "5488733e8e216a9caf02f7a6830e2a90fe724a0d",
"size": "118841",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "astropy/io/fits/tests/test_header.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "11040101"
},
{
"name": "C++",
"bytes": "47001"
},
{
"name": "Cython",
"bytes": "78755"
},
{
"name": "HTML",
"bytes": "1172"
},
{
"name": "Lex",
"bytes": "183333"
},
{
"name": "M4",
"bytes": "18757"
},
{
"name": "Makefile",
"bytes": "52508"
},
{
"name": "Python",
"bytes": "12335716"
},
{
"name": "Shell",
"bytes": "17024"
},
{
"name": "TeX",
"bytes": "853"
}
],
"symlink_target": ""
} |
import os
import logging
import urlparse
import simplejson as json
import itertools
from os.path import join
from uuid import uuid4
from zipfile import ZipFile
from datetime import datetime
from lxml import etree
from shutil import rmtree
from django.utils.functional import cached_property
from .config import DATETIME_FORMAT
logger = logging.getLogger(__name__)
DATA_TEXT = ['html', 'text']
class Data(object):
"""Stores ouput data collected from set of operations, with additional
information"""
def __init__(self, *args, **kwargs):
self.uuid = kwargs.get('uuid') or kwargs.get('id')
self.task = kwargs.get('task_id')
self.url = kwargs.get('url')
self.start = kwargs.get('start') or datetime.now()
self.end = kwargs.get('end') or None
self.results = []
@property
def dict(self):
if self.end is None:
self.end = datetime.now()
result = {
'id': self.uuid,
'task': self.task,
'url': self.url,
'start': print_time(self.start),
'end': print_time(self.end),
'results': self.results,
}
return result
def update(self, **kwargs):
""" Update this object data with provided dictionary """
for key in kwargs:
self.__setattr__(key, kwargs[key])
def add_result(self, result):
self.results.append(result.dict)
@cached_property
def json(self):
""" Return as pretty JSON """
return json.dumps(self.dict, indent=2)
class Datum(object):
"""Holds ouput of a single operation, supports export to JSON.
...
extras - Holds non-result information"""
def __init__(self, content, media=None, images=None, **kwargs):
self.content = content
self.media = media or []
self.images = images or []
self.extras = kwargs
@property
def dict(self):
return self.__dict__
@property
def json(self):
return json.dumps(self.__dict__, indent=2)
def complete_url(base, link):
"""Test and complete an URL with scheme, domain, base path if missing.
If base doesn't have scheme, it will be auto added."""
url = link['url'] if isinstance(link, dict) else link
elements = urlparse.urlsplit(url)
if not elements.scheme:
url = urlparse.urljoin(base, url)
if isinstance(link, dict):
link['url'] = url
else:
link = url
return link
def get_link_info(link, make_root=False):
"""Extract basic information from a given link (as etree Element),
and return a dictionary:
{
'url': '...',
'text': '...',
}
In case of having invalid URL, the function will return None
"""
if isinstance(link, etree._Element):
href = link.get('href') if not make_root else '/'+link.get('href')
text = link.text.strip() if isinstance(link.text, basestring) else ''
if href:
return {'url': href.strip(), 'text': text}
def get_single_content(element, data_type):
"""Return the processed content of given element"""
if isinstance(element, basestring) or \
isinstance(element, etree._ElementStringResult) or \
isinstance(element, etree._ElementUnicodeResult):
return element
if data_type == 'text':
# Return element.text or ''
return etree.tounicode(element, method='text').strip()
elif data_type == 'html':
return etree.tounicode(element, pretty_print=True).strip()
def get_content(elements, data_type='html'):
"""Receive XPath result and returns appropriate content"""
# Eliminate empty string elements
items = []
if hasattr(elements, '__iter__'):
items = [get_single_content(el, data_type) for el in elements]
else:
items = get_single_content(elements, data_type)
if data_type in DATA_TEXT:
[items.remove(val) for val in items if not val]
return items
def print_time(atime=None, with_time=True):
"""Return string friendly value of given time"""
if isinstance(atime, basestring):
return atime
atime = atime or datetime.now()
try:
return atime.strftime(DATETIME_FORMAT)
except AttributeError:
pass
return ''
def get_uuid(url='', base_dir='', size=8):
""" Return whole new and unique ID and make sure not being duplicated
if base_dir is provided
url (optional) - Address of related page
base_dir (optional) - Directory path to check for duplication
size (optional) - Size of the UUID prefix
"""
netloc = urlparse.urlsplit(url).netloc
duplicated = True
while duplicated:
value = uuid4().get_hex()[:size]
uuid = '{0}-{1}'.format(value, netloc) if netloc else value
if base_dir:
duplicated = os.path.exists(join(base_dir, uuid))
else:
duplicated = False
return uuid
def write_storage_file(storage, file_path, content):
""" Write a file with path and content into given storage. This
merely tries to support both FileSystem and S3 storage
Arguments:
storage - Django file storage
file_path - relative path to the file
content - content of file to be written
"""
try:
mfile = storage.open(file_path, 'w')
mfile.write(content)
mfile.close()
except IOError:
# When directories are not auto being created, exception raised.
# Then try to rewrite using the FileSystemStorage
location = join(storage.base_location, os.path.dirname(file_path))
if not os.path.exists(location):
os.makedirs(location)
mfile = storage.open(file_path, 'w')
mfile.write(content)
mfile.close()
return file_path
def move_to_storage(storage, source, location):
""" Move single file or whole directory to storage. Empty directory
will not be moved.
Arguments:
storage: Instance of the file storage (FileSystemStorage,...)
source: File or directory to be moved
location: Relative path where the file/dir will be placed into.
Returns:
Path of file in storage
"""
source = source.strip().rstrip('/')
if os.path.isfile(source):
saved_path = write_storage_file(
storage, join(location, os.path.basename(source)),
open(source, 'r').read())
else:
blank_size = len(source.rsplit('/', 1)[0]) + 1
for items in os.walk(source):
loc = join(location, items[0][blank_size:])
for item in items[2]:
write_storage_file(
storage, join(loc, item),
open(join(items[0], item), 'r').read())
saved_path = join(location, os.path.basename(source))
# Nuke old file/dir
try:
if os.path.isfile(source):
os.remove(source)
else:
rmtree(source)
except OSError:
logger.exception('Error when deleting: {0}'.format(source))
return saved_path
class SimpleArchive(object):
""" This class provides functionalities to create and maintain archive
file, which is normally used for storing results. """
_file = None
def __init__(self, file_path='', base_dir='', *args, **kwargs):
# Generate new file in case of duplicate or missing
if not file_path:
file_path = get_uuid(base_dir=base_dir)
self.file_path = join(base_dir, file_path)
# Create directories if not existing
location = os.path.dirname(self.file_path)
if not os.path.exists(location):
os.makedirs(location)
if os.path.exists(self.file_path):
os.remove(self.file_path)
self._file = ZipFile(self.file_path, 'w')
def write(self, file_name, content):
""" Write file with content into current archive """
self._file.writestr(file_name, content)
def finish(self):
self._file.close()
def move_to_storage(self, storage, location, remove=True):
""" Move the current archive to given location (directory) in storage.
Arguments:
storage: Instance of the file storage (FileSystemStorage,...)
location: Absolute path where the file will be placed into.
remove: Option to remove the current file after moved or not.
Returns:
Path of file in storage
"""
self.finish()
content = open(self._file.filename, 'r').read()
file_path = join(location, os.path.basename(self._file.filename))
saved_path = write_storage_file(storage, file_path, content)
# Remove file if successful
if remove and saved_path:
try:
os.remove(self._file.filename)
self._file = None
except OSError:
logger.error('Error when removing temporary file: {0}'.format(
self._file.filename))
return saved_path
def __str__(self):
dsc = self._file.filename if self._file else '_REMOVED_'
return 'SimpleArchive ({0})'.format(dsc)
def interval_to_list(interval):
"""Convert interval string to list of number
'1-4'
Returns:
[1, 2, 3, 4]
"""
elements = [e.strip().split('-') for e in interval.split(',')]
return [n for r in elements for n in range(int(r[0]), int(r[-1])+1)]
def generate_urls(base_url, elements=None):
"""Returns every URL base on the starting URL and other values
base_url = 'http://domain/class-{0}/?name={1}'
elements = ((1, 2), ('jane', 'john'))
Returns:
[
'http://domain/class-1/?name=jane'
'http://domain/class-1/?name=john'
'http://domain/class-2/?name=jane'
'http://domain/class-2/?name=john'
]
"""
# Convert the intervals into lists
refined = []
for element in elements:
full_list = []
for i, value in enumerate(element):
if isinstance(value, basestring) and '-' in value:
full_list.extend(interval_to_list(value))
else:
full_list.append(value)
refined.append(full_list)
for comb in itertools.product(*refined):
yield base_url.format(*comb)
| {
"content_hash": "b3d4d2fe8b0db10eb6f248d48c1f0c7e",
"timestamp": "",
"source": "github",
"line_count": 324,
"max_line_length": 78,
"avg_line_length": 31.99074074074074,
"alnum_prop": 0.6005788712011577,
"repo_name": "zniper/django-scraper",
"id": "1114702b02f6d56fec807550d65a2d2c911a1cf1",
"size": "10365",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "scraper/utils.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "91661"
},
{
"name": "Python",
"bytes": "77145"
}
],
"symlink_target": ""
} |
"""
This is a REST API to simulate a bowling algorithm.
Example::
from requests import post, put, get, delete
games = get('http://localhost:5000/v1/games').json()
Returns a list of games currently in the system. For more examples and the full api, see :py:mod:`app.resources.game`
"""
from flask import Flask
from flask.ext import restful
from flask.ext.sqlalchemy import SQLAlchemy
__version__ = 'v1'
def create_app():
"""Creates the flask app and configures it.
"""
app = Flask(__name__)
app.config.from_object('config')
return app
def get_db(app):
"""Creates the sqlalchemy database and returns it.
"""
return SQLAlchemy(app)
app = create_app()
db = get_db(app)
from app.resources import (
GamesResource, GameResource, RollResource)
api = restful.Api(app, prefix="/"+__version__)
api.add_resource(GamesResource, '/games')
api.add_resource(GameResource, '/games/<string:game_id>')
api.add_resource(RollResource, '/games/<string:game_id>/roll')
if __name__ == '__main__':
app.run(debug=True)
| {
"content_hash": "04d0184bbc8da4c05b7cd6a8a6c59185",
"timestamp": "",
"source": "github",
"line_count": 45,
"max_line_length": 118,
"avg_line_length": 23.42222222222222,
"alnum_prop": 0.6821631878557874,
"repo_name": "xuru/bowling",
"id": "02e1be1d9804a0520e4c60e10021c3dbbe712135",
"size": "1054",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "app/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "29106"
}
],
"symlink_target": ""
} |
from .dateAndTime import *
from .sections import *
from .subsections import * | {
"content_hash": "fda2b319b509733e38c86117d60b4260",
"timestamp": "",
"source": "github",
"line_count": 3,
"max_line_length": 26,
"avg_line_length": 25.666666666666668,
"alnum_prop": 0.7792207792207793,
"repo_name": "saraivaufc/jornalEletronico",
"id": "f52f2d3db90d92064d7ac10d70bc57d17a520e6b",
"size": "77",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "newspaper/views/services/__init__.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "C#",
"bytes": "49386"
},
{
"name": "CSS",
"bytes": "677320"
},
{
"name": "HTML",
"bytes": "5607416"
},
{
"name": "JavaScript",
"bytes": "213888"
},
{
"name": "PHP",
"bytes": "146128"
},
{
"name": "Python",
"bytes": "79309"
}
],
"symlink_target": ""
} |
#!/usr/bin/env python3
from distutils.core import setup
setup(
name='dg',
version='1.1.0+git',
description='A programming language for the CPython VM',
author='pyos',
author_email='[email protected]',
url='https://github.com/pyos/dg.git',
packages=['dg'],
package_dir={'dg': '.'},
package_data={'dg': ['*.dg', '*/*.dg', '*/*.dgbundle']}
)
| {
"content_hash": "6f7a85efa0c5a134859c15a024f14380",
"timestamp": "",
"source": "github",
"line_count": 14,
"max_line_length": 60,
"avg_line_length": 27.071428571428573,
"alnum_prop": 0.5963060686015831,
"repo_name": "pyos/dg",
"id": "47fd51f739a6b644a799826a63304b06ace6b6c6",
"size": "379",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "1179"
}
],
"symlink_target": ""
} |
import socket
#system_ip = socket.gethostbyname(socket.getfqdn())
system_ip = "127.0.0.1"
peer_client_ip = system_ip
super_server_ip = system_ip
peer_server_ip = system_ip
super_client_ip = system_ip
debug = True #Print all activity on screen
peer_server_port = 2220 #Receiveing port for statistics data
super_request_port = 2221 #Receiving port for Super peer list
peer_request_port = 2222 #Receiving port for peer list
super_server_port = 2223 #Super peer Server port
super_peer_enable = True #Enable Super peer function
peer_enable = True #Enable peer function for Super peer
peer_file = "conf/peers.conf" #List of peers
super_peer_file = "conf/super_peers.conf" #List of Super peers
log_file = "logs/peers.log" #Log file for system statistics
max_log_size = 3*1024*1024 #3MB #Maximum log file size
client_interval = 30 #Time interval for the client to send stats to other peers
super_client_interval = 2000 #Time interval for Super Peer to update peers and super peer lists
peer_client_interval = 1000 #Time interval for client to update peers and super peer lists
get_peer = "get_peer" #Get list of peers
get_super_peer = "get_super_peer" #Get list of Super peers
add_peer = "add_peer" #Add peer to Super peer
add_super_peer = "add_super_peer" #Add Super peer to Super peer
remove_peer = "remove_peer" #Remove peer from super peer
remove_super_peer = "remove_super_peer" #Remove Super peer from Super peer
| {
"content_hash": "2abf68ea1a7c508164ba4f5d0e8205dc",
"timestamp": "",
"source": "github",
"line_count": 34,
"max_line_length": 95,
"avg_line_length": 41.911764705882355,
"alnum_prop": 0.7536842105263157,
"repo_name": "cloudbansal/p2p_health",
"id": "675ce8ac287e015c6b0ebdf99fecf244bd3fcc94",
"size": "1425",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "conf/params.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "28249"
}
],
"symlink_target": ""
} |
import pyaf.Bench.TS_datasets as tsds
import tests.artificial.process_artificial_dataset as art
art.process_dataset(N = 1024 , FREQ = 'D', seed = 0, trendtype = "PolyTrend", cycle_length = 5, transform = "Fisher", sigma = 0.0, exog_count = 20, ar_order = 0); | {
"content_hash": "f32eb1130ea89f8fc02553ee9df584c3",
"timestamp": "",
"source": "github",
"line_count": 7,
"max_line_length": 162,
"avg_line_length": 37.42857142857143,
"alnum_prop": 0.7022900763358778,
"repo_name": "antoinecarme/pyaf",
"id": "d0416b12a2d00df65915d4d2c51dd3cd9667e019",
"size": "262",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/artificial/transf_Fisher/trend_PolyTrend/cycle_5/ar_/test_artificial_1024_Fisher_PolyTrend_5__20.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Makefile",
"bytes": "6773299"
},
{
"name": "Procfile",
"bytes": "24"
},
{
"name": "Python",
"bytes": "54209093"
},
{
"name": "R",
"bytes": "807"
},
{
"name": "Shell",
"bytes": "3619"
}
],
"symlink_target": ""
} |
import os
cpth = os.path.join('temp', 't005')
# make the directory if it does not exist
if not os.path.isdir(cpth):
os.makedirs(cpth)
def test_modflow_unstructured():
import flopy
mf = flopy.modflow.Modflow(version='mfusg', structured=False,
model_ws=cpth)
assert isinstance(mf, flopy.modflow.Modflow)
dis = flopy.modflow.ModflowDis(mf)
assert isinstance(dis, flopy.modflow.ModflowDis)
bas = flopy.modflow.ModflowBas(mf)
assert isinstance(bas, flopy.modflow.ModflowBas)
lpf = flopy.modflow.ModflowLpf(mf)
assert isinstance(lpf, flopy.modflow.ModflowLpf)
wel = flopy.modflow.ModflowWel(mf, stress_period_data={0: [[0, -100]]})
assert isinstance(wel, flopy.modflow.ModflowWel)
ghb = flopy.modflow.ModflowGhb(mf,
stress_period_data={0: [[1, 5.9, 1000.]]})
assert isinstance(ghb, flopy.modflow.ModflowGhb)
oc = flopy.modflow.ModflowOc(mf)
assert isinstance(oc, flopy.modflow.ModflowOc)
sms = flopy.modflow.ModflowSms(mf)
assert isinstance(sms, flopy.modflow.ModflowSms)
# write well file
wel.write_file()
assert os.path.isfile(os.path.join(cpth, '{}.wel'.format(mf.name))) is True
wel2 = flopy.modflow.ModflowWel.load(
os.path.join(cpth, '{}.wel'.format(mf.name)), mf)
assert wel2.stress_period_data[0] == wel.stress_period_data[0]
# write ghb file
ghb.write_file(check=False)
assert os.path.isfile(os.path.join(cpth, '{}.ghb'.format(mf.name))) is True
ghb2 = flopy.modflow.ModflowGhb.load(
os.path.join(cpth, '{}.ghb'.format(mf.name)), mf)
assert ghb2.stress_period_data[0] == ghb.stress_period_data[0]
return
if __name__ == '__main__':
test_modflow_unstructured()
| {
"content_hash": "d46ab12ee8813798d347dfbdbbb7088b",
"timestamp": "",
"source": "github",
"line_count": 55,
"max_line_length": 79,
"avg_line_length": 32.345454545454544,
"alnum_prop": 0.6554243957279371,
"repo_name": "aleaf/flopy",
"id": "2e6a766b527d8b9066e20b78632ce431d1ef54ee",
"size": "1817",
"binary": false,
"copies": "2",
"ref": "refs/heads/develop",
"path": "autotest/t005_test.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "67"
},
{
"name": "Python",
"bytes": "5469342"
},
{
"name": "Shell",
"bytes": "2562"
}
],
"symlink_target": ""
} |
from django.test import TestCase
from django.core.urlresolvers import reverse
from django.utils.timezone import now
from rapidsms.backends.kannel import KannelBackend
from rapidsms.backends.kannel.forms import KannelForm
from rapidsms.backends.kannel.models import DeliveryReport
from rapidsms.tests.harness import RapidTest, CreateDataMixin
class KannelFormTest(TestCase):
def test_valid_form(self):
"""Form should be valid if GET keys match configuration."""
data = {'id': '1112223333', 'text': 'hi there'}
form = KannelForm(data, backend_name='kannel-backend')
self.assertTrue(form.is_valid())
def test_invalid_form(self):
"""Form is invalid if POST keys don't match configuration."""
data = {'invalid-phone': '1112223333', 'invalid-message': 'hi there'}
form = KannelForm(data, backend_name='kannel-backend')
self.assertFalse(form.is_valid())
def test_get_incoming_data(self):
"""get_incoming_data should return matching text and connection."""
data = {'id': '1112223333', 'text': 'hi there'}
form = KannelForm(data, backend_name='kannel-backend')
form.is_valid()
incoming_data = form.get_incoming_data()
self.assertEqual(data['text'], incoming_data['text'])
self.assertEqual(data['id'],
incoming_data['connection'].identity)
self.assertEqual('kannel-backend',
incoming_data['connection'].backend.name)
class KannelViewTest(RapidTest):
urls = 'rapidsms.backends.kannel.urls'
disable_phases = True
def test_valid_response_get(self):
"""HTTP 200 should return if data is valid."""
data = {'id': '1112223333', 'text': 'hi there'}
response = self.client.get(reverse('kannel-backend'), data)
self.assertEqual(response.status_code, 200)
def test_invalid_response(self):
"""HTTP 400 should return if data is invalid."""
data = {'invalid-phone': '1112223333', 'message': 'hi there'}
response = self.client.get(reverse('kannel-backend'), data)
self.assertEqual(response.status_code, 400)
def test_valid_post_message(self):
"""Valid POSTs should pass message object to router."""
data = {'id': '1112223333', 'text': 'hi there'}
self.client.get(reverse('kannel-backend'), data)
message = self.inbound[0]
self.assertEqual(data['text'], message.text)
self.assertEqual(data['id'],
message.connection.identity)
self.assertEqual('kannel-backend',
message.connection.backend.name)
class KannelSendTest(CreateDataMixin, TestCase):
urls = 'rapidsms.backends.kannel.urls'
def test_outgoing_keys(self):
"""Outgoing POST data should contain the proper keys."""
message = self.create_outgoing_message()
config = {
"sendsms_url": "http://127.0.0.1:13013/cgi-bin/sendsms",
"sendsms_params": {"smsc": "usb0-modem",
"from": "+SIMphonenumber",
"username": "rapidsms",
"password": "CHANGE-ME"},
"coding": 0,
"charset": "ascii",
"encode_errors": "ignore",
}
backend = KannelBackend(None, "kannel", **config)
kwargs = backend.prepare_request(1, message.text,
[message.connections[0].identity], {})
data = kwargs['params']
self.assertEqual(config['sendsms_params']['smsc'], data['smsc'])
self.assertEqual(config['sendsms_params']['from'], data['from'])
self.assertEqual(config['sendsms_params']['username'],
data['username'])
self.assertEqual(config['sendsms_params']['password'],
data['password'])
self.assertEqual(message.connection.identity, data['to'])
self.assertEqual(config['coding'], data['coding'])
self.assertEqual(config['charset'], data['charset'])
self.assertEqual(message.text, data['text'].decode(data['charset']))
def test_outgoing_unicode_characters(self):
"""Ensure outgoing messages are encoded properly."""
message = self.create_outgoing_message()
config = {
"sendsms_params": {"smsc": "usb0-modem",
"from": "+SIMphonenumber",
"username": "rapidsms",
"password": "CHANGE-ME"},
"charset": "UTF-8",
}
backend = KannelBackend(None, "kannel", **config)
kwargs = backend.prepare_request(1, message.text,
[message.connections[0].identity], {})
data = kwargs['params']
self.assertEqual(data['text'].decode('UTF-8'), message.text)
def test_delivery_report_url(self):
"""delivery_report_url config option should send Kannel proper args."""
message = self.create_outgoing_message()
config = {
"sendsms_params": {"smsc": "usb0-modem",
"from": "+SIMphonenumber",
"username": "rapidsms",
"password": "CHANGE-ME"},
"delivery_report_url": "http://localhost:8000",
}
backend = KannelBackend(None, "kannel", **config)
kwargs = backend.prepare_request(1, message.text,
[message.connections[0].identity], {})
data = kwargs['params']
self.assertEqual(31, data['dlr-mask'])
self.assertTrue("http://localhost:8000" in data['dlr-url'])
class KannelDeliveryReportTest(CreateDataMixin, TestCase):
urls = 'rapidsms.backends.kannel.urls'
def test_valid_post(self):
"""Valid delivery reports should create reports in the DB."""
msg = self.create_outgoing_message()
query = {'message_id': msg.id,
'identity': msg.connections[0].identity,
'status': 1,
'status_text': 'Success',
'smsc': 'usb0-modem',
'sms_id': self.random_string(36),
'date_sent': now()}
url = reverse('kannel-delivery-report')
response = self.client.get(url, query)
self.assertEqual(200, response.status_code)
self.assertEqual(1, DeliveryReport.objects.count())
report = DeliveryReport.objects.all()[0]
self.assertEqual(msg.id, report.message_id)
def test_invalid_post(self):
"""Invalid post data should generate a 400."""
msg = self.create_outgoing_message()
query = {'message_id': msg.id,
'identity': msg.connections[0].identity,
'status': 3,
'status_text': 'Success',
'smsc': 'usb0-modem',
'sms_id': self.random_string(36),
'date_sent': now()}
url = reverse('kannel-delivery-report')
response = self.client.get(url, query)
self.assertEqual(400, response.status_code)
| {
"content_hash": "b47e610e180e1a4ebbaee3954657479b",
"timestamp": "",
"source": "github",
"line_count": 166,
"max_line_length": 79,
"avg_line_length": 43.19277108433735,
"alnum_prop": 0.5736401673640167,
"repo_name": "peterayeni/rapidsms",
"id": "6f62924ad21b8ee0686b486fbbdb76c65c7c26f6",
"size": "7170",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "rapidsms/backends/kannel/tests.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "27100"
},
{
"name": "HTML",
"bytes": "39126"
},
{
"name": "JavaScript",
"bytes": "16887"
},
{
"name": "Python",
"bytes": "349490"
},
{
"name": "Shell",
"bytes": "149"
}
],
"symlink_target": ""
} |
import json
from django.urls import reverse
from api.models import CreditCard, Income, Overdraft, PayType
from api.utils import serialize_money
from .base import APIBaseTest
class DebtTests(APIBaseTest):
url = reverse('get-debts')
def test_get_debts(self):
credit_card = CreditCard.objects.create(
name='First',
interest_rate=20.0,
balance=1000_00,
min_payment=10_00,
min_payment_percent=15.0,
annual_fee=100_00,
user=self.user,
)
response = self.client.get(self.url)
self.assertEqual(response.status_code, 200)
self.assertJSONEqual(
response.content,
[credit_card.to_JSON()],
)
def test_credit_cards_sorted_by_interest_rate(self):
# The same for both
balance = 1000_00
min_payment = 10_00
min_payment_percent = 10.0
annual_fee = 100_00
card1 = CreditCard.objects.create(
name='One',
interest_rate=20.0,
balance=balance,
min_payment=min_payment,
min_payment_percent=min_payment_percent,
annual_fee=annual_fee,
user=self.user,
)
card2 = CreditCard.objects.create(
name='Two',
interest_rate=20.1,
balance=balance,
min_payment=min_payment,
min_payment_percent=min_payment_percent,
annual_fee=annual_fee,
user=self.user,
)
response = self.client.get(self.url)
self.assertEqual(response.status_code, 200)
self.assertJSONEqual(
response.content,
[card2.to_JSON(), card1.to_JSON()],
)
self.assertEqual(self.user.get_total_debt(), serialize_money(balance + balance))
def test_debts_sorted_by_fee(self):
"""
Montly/Annual costs should be considered
"""
card = CreditCard.objects.create(
name='One',
interest_rate=20.0,
balance=1000_00,
min_payment=10_00,
min_payment_percent=10.0,
annual_fee=100_00,
user=self.user,
)
overdraft = Overdraft.objects.create(
name='Over',
interest_rate=20.0,
balance=1000_00,
monthly_fee=9_00,
user=self.user,
)
response = self.client.get(self.url)
self.assertEqual(response.status_code, 200)
self.assertJSONEqual(
response.content,
[overdraft.to_JSON(), card.to_JSON()],
)
def test_debts_sorted_properly(self):
"""
Interest rate, fees, should be taken into consideration
"""
# An example where the higher interest rate
# will not cost more than the annual fee
card1 = CreditCard.objects.create(
name='One',
interest_rate=20.0,
balance=1000_00,
min_payment=10_00,
min_payment_percent=10.0,
annual_fee=111_00,
user=self.user,
)
card2 = CreditCard.objects.create(
name='Two',
interest_rate=21.0,
balance=1000_00,
min_payment=10_00,
min_payment_percent=10.0,
annual_fee=100_00,
user=self.user,
)
response = self.client.get(self.url)
self.assertEqual(response.status_code, 200)
self.assertJSONEqual(
response.content,
[card1.to_JSON(), card2.to_JSON()],
)
def test_debts_cc_and_overdraft_sorted(self):
card = CreditCard.objects.create(
name='One',
interest_rate=20.0,
balance=1000_00,
min_payment=10_00,
min_payment_percent=10.0,
annual_fee=100_00,
user=self.user,
)
overdraft = Overdraft.objects.create(
name='Over',
interest_rate=21.0,
balance=1000_00,
monthly_fee=5_00,
user=self.user,
)
response = self.client.get(self.url)
self.assertEqual(response.status_code, 200)
self.assertJSONEqual(
response.content,
[card.to_JSON(), overdraft.to_JSON()],
)
def test_timeline_with_credit_card(self):
Income.objects.create(
name='Job',
user=self.user,
pay_amount=200_00,
pay_type=PayType.MONTHLY,
)
CreditCard.objects.create(
name='One',
interest_rate=20.0,
balance=1000_00,
min_payment=10_00,
min_payment_percent=10.0,
annual_fee=100_00,
user=self.user,
)
url = reverse('get-timeline')
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
self.assertEqual(json.loads(response.content)['num_months'], 7)
def test_timeline_with_credit_card_and_overdraft(self):
Income.objects.create(
name='Job',
user=self.user,
pay_amount=500_00,
pay_type=PayType.SEMI_MONTHLY,
)
CreditCard.objects.create(
name='One',
interest_rate=20.0,
balance=1000_00,
min_payment=10_00,
min_payment_percent=10.0,
annual_fee=100_00,
user=self.user,
)
Overdraft.objects.create(
name='Over',
interest_rate=20.0,
balance=1000_00,
monthly_fee=9_00,
user=self.user,
)
url = reverse('get-timeline')
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
self.assertEqual(json.loads(response.content)['num_months'], 4)
def test_timeline_cannot_reduce_debt(self):
Income.objects.create(
name='Job',
user=self.user,
pay_amount=100_00,
pay_type=PayType.SEMI_MONTHLY,
)
CreditCard.objects.create(
name='One',
interest_rate=20.0,
balance=1000_00,
min_payment=10_00,
min_payment_percent=10.0,
annual_fee=100_00,
user=self.user,
)
Overdraft.objects.create(
name='Over',
interest_rate=20.0,
balance=1000_00,
monthly_fee=9_00,
user=self.user,
)
url = reverse('get-timeline')
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
self.assertEqual(json.loads(response.content)['num_months'], -1)
| {
"content_hash": "27fa84e0a67f02798f0724c9f461e1a7",
"timestamp": "",
"source": "github",
"line_count": 220,
"max_line_length": 88,
"avg_line_length": 30.71818181818182,
"alnum_prop": 0.5292986090559337,
"repo_name": "Siecje/debt",
"id": "3765eef48c8430ac6844a809d1ba57acba375c3b",
"size": "6758",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "api/tests/test_debt.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "65644"
}
],
"symlink_target": ""
} |
import copy
import logging
import os
import PIL.Image
import tempfile
import threading
from handlers.handler_troll_response import HandlerTrollResponse
cached_get_image_lock = threading.Lock()
def cached_get_image(source, width, height):
cached_get_image_lock.acquire()
path = os.path.join(
os.path.dirname(__file__),
'imgcache',
'%s-%s.jpg' % (width, height))
if not os.path.exists(path):
im = PIL.Image.open(source)
im.thumbnail((width, height), PIL.Image.ANTIALIAS)
im.save(path, 'JPEG')
cached_get_image_lock.release()
return path
class ReplaceImageRequestHandlerTroll(object):
def __init__(self, source_image, minimum_size, image_content_types):
self._minimum_width, self._minimum_height = minimum_size
self._image_content_types = image_content_types
self._source_image = source_image
self._logger = logging.getLogger('trollproxy')
@property
def name(self):
return "replace images"
def handle(self, remote_server_response_headers, remote_server_response_text):
content_type = remote_server_response_headers.get('Content-Type', '')
if remote_server_response_headers.get('Content-Type', '') in self._image_content_types:
try:
filename = tempfile.mktemp()
with open(filename, 'wb') as f:
f.write(remote_server_response_text)
im = PIL.Image.open(filename)
w, h = im.size
os.unlink(filename)
if w >= self._minimum_width and h >= self._minimum_height:
headers = copy.copy(remote_server_response_headers)
headers['Content-Type'] = 'image/jpeg'
return HandlerTrollResponse(
changed=True,
new_text=open(cached_get_image(self._source_image, w, h), 'rb').read(),
new_headers=headers)
else:
print("ReplaceImageRequestHandlerTroll: image too small")
except Exception as e:
print("ReplaceImageRequestHandlerTroll: exception: %s, not changing" % repr(e)[:100])
return HandlerTrollResponse(
changed=False)
| {
"content_hash": "2a7e2b7943a3f09a450ab576a00c8aba",
"timestamp": "",
"source": "github",
"line_count": 64,
"max_line_length": 101,
"avg_line_length": 35.796875,
"alnum_prop": 0.5975556525534701,
"repo_name": "kazetkazet/trollproxy",
"id": "15284b108aac5e1fb60c65823379c375f8438923",
"size": "2291",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "handlers/request_handler_trolls/replace_image.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "13738"
}
],
"symlink_target": ""
} |
"""
This module contains the class `CKDio`.
"""
__all__ = ['CKDio']
from pathlib import Path, PosixPath
import h5py
import numpy as np
import xarray as xr
from moniplot.image_to_xarray import h5_to_xr
# - local functions ------------------------------
def reject_row257(xarr):
"""
Remove row 257 from DataArray or Dataset
"""
return xarr.isel(row=np.s_[0:256])
# - class definition -------------------------------
class CKDio():
"""
Read Tropomi CKD from the Static CKD product or from dynamic CKD products
Parameters
----------
ckd_dir : str, optional
Directory where the CKD files are stored,
default='/nfs/Tropomi/share/ckd'
ckd_version : int, optional
Version of the CKD, default=1
ckd_file : str, optional
Name of the CKD file, default=None then the CKD file is searched
in the directory ckd_dir with ckd_version in the glob-string
Notes
-----
Not all CKD are defined or derived for all bands.
You can request a CKD for one band or for a channel (bands: '12', '34',
'56', '78'). Do not mix bands from different channels
The option to have dynamic CKD is not used for the Tropomi mission, only
for S/W version 1 a dynamic CKD product is defined. This product contained
the OCAL CKD and was not updated automatically. For version 2, all CKD are
stored in one product, where some CKD have a time-axis to correct any
in-flight degradation.
Therefore, the logic to find a CKD is implemented as follows:
1) ckd_dir, defines the base directory to search for the CKD products
(see below).
2) ckd_file, defines the full path to (static) CKD product;
(version 1) any product with dynamic CKD has to be in the same
directory.
Version 1:
* Static CKD are stored in one file: glob('*_AUX_L1_CKD_*')
* Dynamic CKD are stored in two files:
- UVN, use glob('*_ICM_CKDUVN_*')
- SWIR, use glob('*_ICM_CKDSIR_*')
Version 2+:
* All CKD in one file: glob('*_AUX_L1_CKD_*')
* Dynamic CKD are empty
"""
def __init__(self, ckd_dir=None, ckd_version=1, ckd_file=None):
"""Create CKDio object.
"""
if ckd_dir is None:
ckd_dir = '/nfs/Tropomi/share/ckd'
self.ckd_version = max(1, ckd_version)
self.ckd_dyn_file = None
# define path to CKD product
if ckd_file is None:
if not Path(ckd_dir).is_dir():
raise FileNotFoundError(f'Not found CKD directory: {ckd_dir}')
self.ckd_dir = Path(ckd_dir)
glob_str = f'*_AUX_L1_CKD_*_*_00000_{self.ckd_version:02d}_*_*.h5'
if (self.ckd_dir / 'static').is_dir():
res = sorted((self.ckd_dir / 'static').glob(glob_str))
else:
res = sorted(self.ckd_dir.glob(glob_str))
if not res:
raise FileNotFoundError('Static CKD product not found')
self.ckd_file = res[-1]
else:
if not Path(ckd_file).is_file():
raise FileNotFoundError(f'Not found CKD file: {ckd_file}')
self.ckd_dir = Path(ckd_file).parent
self.ckd_file = Path(ckd_file)
# obtain path to dynamic CKD product (version 1, only)
if self.ckd_version == 1:
if (self.ckd_dir / 'dynamic').is_dir():
res = sorted((self.ckd_dir / 'dynamic').glob('*_ICM_CKDSIR_*'))
else:
res = sorted(self.ckd_dir.glob('*_ICM_CKDSIR_*'))
if res:
self.ckd_dyn_file = res[-1]
# open access to CKD product
self.fid = h5py.File(self.ckd_file, "r")
def __enter__(self):
"""Method called to initiate the context manager.
"""
return self
def __exit__(self, exc_type, exc_value, traceback):
"""Method called when exiting the context manager.
"""
self.close()
return False # any exception is raised by the with statement.
def close(self) -> None:
"""Make sure that we close all resources.
"""
if self.fid is not None:
self.fid.close()
def creation_time(self) -> str:
"""Returns datetime when the L1b product was created.
"""
if self.ckd_version == 2:
attr = self.fid['METADATA'].attrs['production_datetime']
else:
group = PosixPath('METADATA', 'earth_explorer_header',
'fixed_header', 'source')
attr = self.fid[str(group)].attrs['Creator_Date'][0]
if isinstance(attr, bytes):
attr = attr.decode('ascii')
return attr
def creator_version(self) -> str:
"""Returns version of Tropomi L01B processor.
"""
group = PosixPath('METADATA', 'earth_explorer_header', 'fixed_header')
attr = self.fid[str(group)].attrs['File_Version']
if self.ckd_version == 1:
attr = attr[0]
if isinstance(attr, bytes):
attr = attr.decode('ascii')
return attr
@staticmethod
def __get_spectral_channel(bands: str):
"""Check bands is valid: single band or belong to one channel
Parameters
----------
bands : str
Tropomi bands [1..8] or channels ['12', '34', '56', '78'],
"""
band2channel = ['UNKNOWN', 'UV', 'UV', 'VIS', 'VIS',
'NIR', 'NIR', 'SWIR', 'SWIR']
if 0 < len(bands) > 2:
raise ValueError('read per band or channel, only')
if len(bands) == 2:
if band2channel[int(bands[0])] != band2channel[int(bands[1])]:
raise ValueError('bands should be of the same channel')
return band2channel[int(bands[0])]
def get_param(self, ds_name: str, band='7'):
"""Returns value(s) of a CKD parameter from the Static CKD product.
Parameters
----------
ds_name : str
Name of the HDF5 dataset, default='pixel_full_well'
band : str, default='7'
Band identifier '1', '2', ..., '8'
Returns
-------
numpy.ndarray or scalar
CKD parameter value
Notes
-----
Datasets of size=1 are return as scalar
Handy function for scalar HDF5 datasets, such as:
- dc_reference_temp
- dpqf_threshold
- pixel_full_well
- pixel_fw_flag_thresh
"""
if not 1 <= int(band) <= 8:
raise ValueError('band must be between and 1 and 8')
if ds_name not in self.fid[f'/BAND{band}']:
raise ValueError('dataset not available')
return self.fid[f'/BAND{band}/{ds_name}'][()]
# ---------- band or channel CKD's ----------
def dn2v_factors(self):
"""Returns digital number to Volt CKD, SWIR only.
Notes
-----
The DN2V factor has no error attached to it.
"""
return np.concatenate(
(self.fid['/BAND7/dn2v_factor_swir'][2:],
self.fid['/BAND8/dn2v_factor_swir'][2:]))
def v2c_factors(self):
"""Returns Voltage to Charge CKD, SWIR only.
Notes
-----
The V2C factor has no error attached to it.
"""
# pylint: disable=no-member
return np.concatenate(
(self.fid['/BAND7/v2c_factor_swir'].fields('value')[2:],
self.fid['/BAND8/v2c_factor_swir'].fields('value')[2:]))
# ---------- spectral-channel CKD's ----------
def __rd_dataset(self, dset_name: str, bands: str):
"""General function to read non-compound dataset into xarray::Dataset.
Parameters
----------
dset_name: str
name (including path) of the dataset as '/BAND{}/<name>'
bands : str
Tropomi bands [1..8] or channels ['12', '34', '56', '78'],
Returns
-------
xarray.Dataset
parameters of CKD with name 'dset_name'
"""
ckd_val = None
for band in bands:
# try Static-CKD product
if dset_name.format(band) in self.fid:
if ckd_val is None:
ckd_val = h5_to_xr(self.fid[dset_name.format(band)])
else:
ckd_val = xr.concat(
(ckd_val,
h5_to_xr(self.fid[dset_name.format(band)])),
dim='column')
# try Dynamic-CKD product
else:
dyn_fid = h5py.File(self.ckd_dyn_file, 'r')
if dset_name.format(band) in dyn_fid:
if ckd_val is None:
ckd_val = h5_to_xr(dyn_fid[dset_name.format(band)])
else:
ckd_val = xr.concat(
(ckd_val,
h5_to_xr(dyn_fid[dset_name.format(band)])),
dim='column')
dyn_fid.close()
if ckd_val is None:
return None
# Use NaN as FillValue
ckd_val = ckd_val.where(ckd_val != float.fromhex('0x1.ep+122'),
other=np.nan)
# combine DataArrays to Dataset
return xr.Dataset({'value': ckd_val}, attrs=ckd_val.attrs)
def __rd_datapoints(self, dset_name: str, bands: str):
"""General function to read datapoint dataset into xarray::Dataset
Parameters
----------
dset_name: str
name (including path) of the dataset as '/BAND{}/<name>'
bands : str
Tropomi bands [1..8] or channels ['12', '34', '56', '78'],
default: '78'
Returns
-------
xarray.Dataset
parameters (value and uncertainty) of CKD with name 'dset_name'
"""
ckd_val = None
ckd_err = None
for band in bands:
# try Static-CKD product
if dset_name.format(band) in self.fid:
if ckd_val is None:
ckd_val = h5_to_xr(self.fid[dset_name.format(band)],
field='value')
ckd_err = h5_to_xr(self.fid[dset_name.format(band)],
field='error')
else:
ckd_val = xr.concat(
(ckd_val, h5_to_xr(self.fid[dset_name.format(band)],
field='value')), dim='column')
ckd_err = xr.concat(
(ckd_err, h5_to_xr(self.fid[dset_name.format(band)],
field='error')), dim='column')
# try Dynamic-CKD product
else:
dyn_fid = h5py.File(self.ckd_dyn_file, 'r')
if dset_name.format(band) in dyn_fid:
if ckd_val is None:
ckd_val = h5_to_xr(dyn_fid[dset_name.format(band)],
field='value')
ckd_err = h5_to_xr(dyn_fid[dset_name.format(band)],
field='error')
else:
ckd_val = xr.concat(
(ckd_val, h5_to_xr(dyn_fid[dset_name.format(band)],
field='value')), dim='column')
ckd_err = xr.concat(
(ckd_err, h5_to_xr(dyn_fid[dset_name.format(band)],
field='error')), dim='column')
dyn_fid.close()
if ckd_val is None:
return None
# Use NaN as FillValue
ckd_val = ckd_val.where(ckd_val != float.fromhex('0x1.ep+122'),
other=np.nan)
ckd_err = ckd_err.where(ckd_err != float.fromhex('0x1.ep+122'),
other=np.nan)
# combine DataArrays to Dataset
return xr.Dataset({'value': ckd_val, 'error': ckd_err},
attrs=ckd_val.attrs)
# ---------- static CKD's ----------
def absirr(self, qvd=1, bands='78'):
"""Returns absolute irradiance responsivity.
Parameters
----------
qvd : int, default: 1
Tropomi QVD identifier. Valid values are 1 or 2
bands : str, default: '78'
Tropomi bands [1..8] or channels ['12', '34', '56', '78']
"""
try:
channel = self.__get_spectral_channel(bands)
except Exception as exc:
raise RuntimeError(exc) from exc
dset_name = '/BAND{}' + f'/abs_irr_conv_factor_qvd{qvd}'
ckd = self.__rd_datapoints(dset_name, bands)
if '7' in bands or '8' in bands:
ckd = reject_row257(ckd)
ckd.attrs["long_name"] = \
f'{channel} absolute irradiance CKD (QVD={qvd})'
return ckd.assign_coords(column=np.arange(ckd.column.size, dtype='u4'))
def absrad(self, bands='78'):
"""Returns absolute radiance responsivity.
Parameters
----------
bands : str, default: '78'
Tropomi bands [1..8] or channels ['12', '34', '56', '78']
"""
try:
channel = self.__get_spectral_channel(bands)
except Exception as exc:
raise RuntimeError(exc) from exc
dset_name = '/BAND{}/abs_rad_conv_factor'
ckd = self.__rd_datapoints(dset_name, bands)
if '7' in bands or '8' in bands:
ckd = reject_row257(ckd)
ckd.attrs["long_name"] = f'{channel} absolute radiance CKD'
return ckd.assign_coords(column=np.arange(ckd.column.size, dtype='u4'))
def memory(self):
"""Returns memory CKD, SWIR only.
"""
column = None
ckd_parms = ['mem_lin_neg_swir', 'mem_lin_pos_swir',
'mem_qua_neg_swir', 'mem_qua_pos_swir']
ckd = xr.Dataset()
ckd.attrs["long_name"] = 'SWIR memory CKD'
for key in ckd_parms:
dset_name = f'/BAND7/{key}'
ckd_val = h5_to_xr(self.fid[dset_name], field='value')
ckd_err = h5_to_xr(self.fid[dset_name], field='error')
dset_name = f'/BAND8/{key}'
ckd_val = xr.concat(
(ckd_val, h5_to_xr(self.fid[dset_name], field='value')),
dim='column')
if column is None:
column = np.arange(ckd_val.column.size, dtype='u4')
ckd_val = ckd_val.assign_coords(column=column)
ckd_err = xr.concat(
(ckd_err, h5_to_xr(self.fid[dset_name], field='error')),
dim='column')
ckd_err = ckd_err.assign_coords(column=column)
ckd[key.replace('swir', 'value')] = reject_row257(ckd_val)
ckd[key.replace('swir', 'error')] = reject_row257(ckd_err)
return ckd
def noise(self, bands='78'):
"""Returns readout-noise CKD, SWIR only.
Parameters
----------
bands : str, default: '78'
Tropomi bands [1..8] or channels ['12', '34', '56', '78']
"""
dset_name = '/BAND{}/readout_noise_swir'
ckd = reject_row257(self.__rd_dataset(dset_name, bands))
ckd.attrs["long_name"] = 'SWIR readout-noise CKD'
return ckd.assign_coords(column=np.arange(ckd.column.size, dtype='u4'))
def prnu(self, bands='78'):
"""Returns Pixel Response Non-Uniformity (PRNU).
Parameters
----------
bands : str, default: '78'
Tropomi bands [1..8] or channels ['12', '34', '56', '78']
"""
try:
channel = self.__get_spectral_channel(bands)
except Exception as exc:
raise RuntimeError(exc) from exc
ckd = self.__rd_datapoints('/BAND{}/PRNU', bands)
if '7' in bands or '8' in bands:
ckd = reject_row257(ckd)
ckd.attrs["long_name"] = f'{channel} PRNU CKD'
return ckd.assign_coords(column=np.arange(ckd.column.size, dtype='u4'))
def relirr(self, qvd=1, bands='78'):
"""Returns relative irradiance correction.
Parameters
----------
bands : str, default: '78'
Tropomi bands [1..8] or channels ['12', '34', '56', '78']
qvd : int
Tropomi QVD identifier. Valid values are 1 or 2, default: 1
Returns
-------
dict
CKD for relative irradiance correction as dictionaries with keys:
- band: Tropomi spectral band ID
- mapping_cols: coarse irregular mapping of the columns
- mapping_rows: coarse irregular mapping of the rows
- cheb_coefs: Chebyshev parameters for elevation and azimuth \
for pixels on a coarse irregular grid
"""
try:
_ = self.__get_spectral_channel(bands)
except Exception as exc:
raise RuntimeError(exc) from exc
res = ()
for band in bands:
ckd = {}
ckd['band'] = int(band)
dsname = f'/BAND{band}/rel_irr_coarse_mapping_vert'
ckd['mapping_rows'] = self.fid[dsname][:].astype(int)
dsname = f'/BAND{band}/rel_irr_coarse_mapping_hor'
# pylint: disable=no-member
mapping_hor = self.fid[dsname][:].astype(int)
mapping_hor[mapping_hor > 1000] -= 2**16
ckd['mapping_cols'] = mapping_hor
dsname = f'/BAND{band}/rel_irr_coarse_func_cheb_qvd{qvd}'
ckd['cheb_coefs'] = self.fid[dsname]['coefs'][:]
res += (ckd,)
return res
def saa(self) -> dict:
"""Returns definition of the SAA region.
"""
saa_region = {}
saa_region['lat'] = self.fid['saa_latitude'][:]
saa_region['lon'] = self.fid['saa_longitude'][:]
return saa_region
def wavelength(self, bands='78'):
"""Returns wavelength CKD.
Parameters
----------
bands : str, default: '78'
Tropomi bands [1..8] or channels ['12', '34', '56', '78']
Notes
-----
The wavelength CKD has no error attached to it.
"""
try:
channel = self.__get_spectral_channel(bands)
except Exception as exc:
raise RuntimeError(exc) from exc
dset_name = '/BAND{}/wavelength_map'
ckd = self.__rd_datapoints(dset_name, bands)
if '7' in bands or '8' in bands:
ckd = reject_row257(ckd)
ckd.attrs["long_name"] = f'{channel} wavelength CKD'
return ckd.assign_coords(column=np.arange(ckd.column.size, dtype='u4'))
# ---------- static or dynamic CKD's ----------
def darkflux(self, bands='78'):
"""Returns dark-flux CKD, SWIR only.
Parameters
----------
bands : str, default: '78'
Tropomi SWIR bands '7', '8' or both '78'
"""
dset_name = '/BAND{}/long_term_swir'
ckd = reject_row257(self.__rd_datapoints(dset_name, bands))
ckd.attrs["long_name"] = 'SWIR dark-flux CKD'
return ckd.assign_coords(column=np.arange(ckd.column.size, dtype='u4'))
def offset(self, bands='78'):
"""Returns offset CKD, SWIR only.
Parameters
----------
bands : str, default: '78'
Tropomi SWIR bands '7', '8' or both '78'
"""
dset_name = '/BAND{}/analog_offset_swir'
ckd = reject_row257(self.__rd_datapoints(dset_name, bands))
ckd.attrs["long_name"] = 'SWIR offset CKD'
return ckd.assign_coords(column=np.arange(ckd.column.size, dtype='u4'))
def pixel_quality(self, bands='78'):
"""Returns detector pixel-quality mask (float [0, 1]), SWIR only.
Parameters
----------
bands : str, default: '78'
Tropomi SWIR bands '7', '8' or both '78'
"""
dset_name = '/BAND{}/dpqf_map'
ckd = reject_row257(self.__rd_dataset(dset_name, bands))
ckd.attrs["long_name"] = 'SWIR pixel-quality CKD'
return ckd.assign_coords(column=np.arange(ckd.column.size, dtype='u4'))
def dpqf(self, threshold=None, bands='78'):
"""Returns detector pixel-quality flags (boolean), SWIR only.
Parameters
----------
threshold: float, optional
Value between [0..1], default is to read the threshold from CKD
bands : str, default='78'
Tropomi SWIR bands '7', '8', or both '78'
Returns
-------
numpy ndarray
"""
dpqf = None
if threshold is None:
threshold = self.fid['/BAND7/dpqf_threshold'][:]
# try Static-CKD product
if '/BAND7/dpqf_map' in self.fid:
if bands == '7':
dpqf = self.fid['/BAND7/dpqf_map'][:-1, :] < threshold
elif bands == '8':
dpqf = self.fid['/BAND8/dpqf_map'][:-1, :] < threshold
elif bands == '78':
dpqf_b7 = self.fid['/BAND7/dpqf_map'][:-1, :]
dpqf_b8 = self.fid['/BAND8/dpqf_map'][:-1, :]
dpqf = np.hstack((dpqf_b7, dpqf_b8)) < threshold
else:
# try Dynamic-CKD product
with h5py.File(self.ckd_dyn_file, 'r') as fid:
if bands == '7':
dpqf = fid['/BAND7/dpqf_map'][:-1, :] < threshold
elif bands == '8':
dpqf = fid['/BAND8/dpqf_map'][:-1, :] < threshold
elif bands == '78':
dpqf_b7 = fid['/BAND7/dpqf_map'][:-1, :]
dpqf_b8 = fid['/BAND8/dpqf_map'][:-1, :]
dpqf = np.hstack((dpqf_b7, dpqf_b8)) < threshold
return dpqf
def saturation(self):
"""Returns pixel-saturation values (pre-offset), SWIR only.
"""
ckd_val = None
dset_name = '/BAND{}/saturation_preoffset'
ckd_file = (self.ckd_dir / 'OCAL'
/ 'ckd.saturation_preoffset.detector4.nc')
with h5py.File(ckd_file, 'r') as fid:
ckd_val = xr.concat((h5_to_xr(fid[dset_name.format(7)]),
h5_to_xr(fid[dset_name.format(8)])),
dim='column')
ckd = xr.Dataset({'value': ckd_val}, attrs=ckd_val.attrs)
ckd = reject_row257(ckd)
ckd.attrs["long_name"] = 'SWIR pixel-saturation CKD (pre-offset)'
return ckd.assign_coords(column=np.arange(ckd.column.size, dtype='u4'))
| {
"content_hash": "d7ce8f7ec14abde3ffaf1a184c4ef0d9",
"timestamp": "",
"source": "github",
"line_count": 639,
"max_line_length": 79,
"avg_line_length": 35.328638497652584,
"alnum_prop": 0.5128682170542636,
"repo_name": "rmvanhees/pys5p",
"id": "b9ff41d6a8458a7d603e64f7129ff466f7c8d68b",
"size": "22776",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "src/pys5p/ckd_io.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "176223"
}
],
"symlink_target": ""
} |
"""Create a shop with article and order sequences.
:Copyright: 2006-2020 Jochen Kupperschmidt
:License: Modified BSD, see LICENSE for details.
"""
import click
from byceps.services.shop.sequence.service import create_sequence
from byceps.services.shop.sequence.transfer.models import Purpose
from byceps.services.shop.shop import service as shop_service
from byceps.util.system import get_config_filename_from_env_or_exit
from _util import app_context
@click.command()
@click.argument('shop_id',)
@click.argument('title')
@click.argument('email_config_id',)
@click.argument('article_prefix')
@click.argument('order_prefix')
def execute(shop_id, title, email_config_id, article_prefix, order_prefix):
shop = shop_service.create_shop(shop_id, title, email_config_id)
create_sequence(shop.id, Purpose.article, article_prefix)
create_sequence(shop.id, Purpose.order, order_prefix)
click.secho('Done.', fg='green')
if __name__ == '__main__':
config_filename = get_config_filename_from_env_or_exit()
with app_context(config_filename):
execute()
| {
"content_hash": "b0afeacc7b49c583bbd27f11a04dbdde",
"timestamp": "",
"source": "github",
"line_count": 35,
"max_line_length": 75,
"avg_line_length": 30.885714285714286,
"alnum_prop": 0.7419056429232193,
"repo_name": "m-ober/byceps",
"id": "cdafe642d2d56b5e9efc3a390a3747abfd7fb57c",
"size": "1104",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "scripts/create_shop.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "38499"
},
{
"name": "Dockerfile",
"bytes": "1302"
},
{
"name": "HTML",
"bytes": "369989"
},
{
"name": "JavaScript",
"bytes": "9483"
},
{
"name": "Python",
"bytes": "1152996"
}
],
"symlink_target": ""
} |
from unittest import TestCase
from django.template import Context, Engine
from django.template.base import TextNode, VariableNode
from django.utils import six
class NodelistTest(TestCase):
@classmethod
def setUpClass(cls):
cls.engine = Engine()
super(NodelistTest, cls).setUpClass()
def test_for(self):
template = self.engine.from_string('{% for i in 1 %}{{ a }}{% endfor %}')
vars = template.nodelist.get_nodes_by_type(VariableNode)
self.assertEqual(len(vars), 1)
def test_if(self):
template = self.engine.from_string('{% if x %}{{ a }}{% endif %}')
vars = template.nodelist.get_nodes_by_type(VariableNode)
self.assertEqual(len(vars), 1)
def test_ifequal(self):
template = self.engine.from_string('{% ifequal x y %}{{ a }}{% endifequal %}')
vars = template.nodelist.get_nodes_by_type(VariableNode)
self.assertEqual(len(vars), 1)
def test_ifchanged(self):
template = self.engine.from_string('{% ifchanged x %}{{ a }}{% endifchanged %}')
vars = template.nodelist.get_nodes_by_type(VariableNode)
self.assertEqual(len(vars), 1)
class TextNodeTest(TestCase):
def test_textnode_repr(self):
engine = Engine()
for temptext, reprtext in [
("Hello, world!", "<TextNode: u'Hello, world!'>"),
("One\ntwo.", "<TextNode: u'One\\ntwo.'>"),
]:
template = engine.from_string(temptext)
texts = template.nodelist.get_nodes_by_type(TextNode)
if six.PY3:
reprtext = reprtext.replace("u'", "'")
self.assertEqual(repr(texts[0]), reprtext)
class ErrorIndexTest(TestCase):
"""
Checks whether index of error is calculated correctly in
template debugger in for loops. Refs ticket #5831
"""
def test_correct_exception_index(self):
tests = [
('{% load bad_tag %}{% for i in range %}{% badsimpletag %}{% endfor %}', (38, 56)),
(
'{% load bad_tag %}{% for i in range %}{% for j in range %}'
'{% badsimpletag %}{% endfor %}{% endfor %}',
(58, 76)
),
(
'{% load bad_tag %}{% for i in range %}{% badsimpletag %}'
'{% for j in range %}Hello{% endfor %}{% endfor %}',
(38, 56)
),
(
'{% load bad_tag %}{% for i in range %}{% for j in five %}'
'{% badsimpletag %}{% endfor %}{% endfor %}',
(38, 57)
),
('{% load bad_tag %}{% for j in five %}{% badsimpletag %}{% endfor %}', (18, 37)),
]
context = Context({
'range': range(5),
'five': 5,
})
engine = Engine(debug=True, libraries={'bad_tag': 'template_tests.templatetags.bad_tag'})
for source, expected_error_source_index in tests:
template = engine.from_string(source)
try:
template.render(context)
except (RuntimeError, TypeError) as e:
debug = e.template_debug
self.assertEqual((debug['start'], debug['end']), expected_error_source_index)
| {
"content_hash": "0b78b8b594d6f22f765a6d452489ea7a",
"timestamp": "",
"source": "github",
"line_count": 87,
"max_line_length": 97,
"avg_line_length": 38.172413793103445,
"alnum_prop": 0.5245408009635651,
"repo_name": "yephper/django",
"id": "7786ef2a4a28499c20a575f036f404752610ec0d",
"size": "3321",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/template_tests/test_nodelist.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "ASP",
"bytes": "1538"
},
{
"name": "CSS",
"bytes": "1697381"
},
{
"name": "HTML",
"bytes": "390772"
},
{
"name": "Java",
"bytes": "588"
},
{
"name": "JavaScript",
"bytes": "3172126"
},
{
"name": "Makefile",
"bytes": "134"
},
{
"name": "PHP",
"bytes": "19336"
},
{
"name": "Python",
"bytes": "13365273"
},
{
"name": "Shell",
"bytes": "837"
},
{
"name": "Smarty",
"bytes": "133"
}
],
"symlink_target": ""
} |
bind = "127.0.0.1:8000"
workers = 5
worker_class = 'gevent'
# worker_class = 'egg:gunicorn#gevent'
# Logging
loglevel = 'critical'
acces_logfile = "access.log"
error_logfile = "error.log"
# enable_stdio_inheritance = True
timeout = 360
| {
"content_hash": "04fde0c4ee762f3b9307f0608116ede9",
"timestamp": "",
"source": "github",
"line_count": 10,
"max_line_length": 38,
"avg_line_length": 23.6,
"alnum_prop": 0.7076271186440678,
"repo_name": "geodashio/geodash-server",
"id": "a8e55067ba8761d6e80e34db5d717c2d6150f116",
"size": "236",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "gunicorn.conf.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "611770"
},
{
"name": "HTML",
"bytes": "2290561"
},
{
"name": "JavaScript",
"bytes": "7775309"
},
{
"name": "Python",
"bytes": "36279"
},
{
"name": "Ruby",
"bytes": "1183"
},
{
"name": "Shell",
"bytes": "1043"
}
],
"symlink_target": ""
} |
"""Render library code for accessing tf_mesh_renderer.
"""
import geometry
import ops
import render_utils
import tensorflow as tf
import tensorflow_addons as tfa
from tf_mesh_renderer.mesh_renderer import rasterize_triangles
def sobel_fg_alpha(idepth, beta=5.0):
"""Computes foreground alpha with sobel edges.
Alphas will be low when there are strong sobel edges.
Args:
idepth: [B, H, W, 1] inverse depth tensor.
beta: (float) Higher the beta, higher the sensitivity to the idepth edges.
Returns:
alpha: [B, H, W, 1] alpha visibility.
"""
# Compute Sobel edges and their magnitude.
sobel_components = tf.image.sobel_edges(idepth)
sobel_mag_components = sobel_components**2
sobel_mag_square = tf.math.reduce_sum(sobel_mag_components, axis=-1)
sobel_mag = tf.sqrt(sobel_mag_square + 1e-06)
# Compute alphas from sobel edge magnitudes.
alpha = tf.exp(-1.0 * beta * sobel_mag)
return alpha
def render(input_rgbd, input_pose, input_intrinsics,
target_pose, target_intrinsics,
alpha_threshold=0.3):
"""Renders rgbd to target view, also generating mask.
Args:
input_rgbd: [B, H, W, 4] an input RGBD (either the initial RGBD or output of
a previous render_refine)
input_pose: [B, 3, 4] pose of input_rgbd
input_intrinsics: [B, 4] camera intrinsics of input_rgbd
target_pose: [B, 3, 4] pose of the view to be generated
target_intrinsics: [B, 4] camera intrinsics of the output view
Returns:
[...., height, width, 4] Rendered RGB-D image at the target view.
[...., height, width, 1] Mask at the target view. The mask is 0 where holes
were introduced by the renderer.
"""
# Limit the range of disparity to avoid division by zero or negative values.
min_disparity = 1e-6
max_disparity = 1e5
rgb = input_rgbd[Ellipsis, :-1]
disparity = tf.clip_by_value(input_rgbd[Ellipsis, -1:], min_disparity, max_disparity)
# This returns [B, H, W, 1]
alpha = sobel_fg_alpha(disparity, beta=10.0)
# Make the alpha hard.
mask = tf.cast(tf.greater(alpha, alpha_threshold), dtype=tf.float32)
# Now we'll render RGB and mask from the target view:
rgb_and_mask = tf.concat([rgb, mask], axis=-1)
target_rgb_and_mask, target_disparity = render_channels(
rgb_and_mask, disparity,
input_pose, input_intrinsics,
target_pose, target_intrinsics)
# Multiply by mask.
rgb, mask = tf.split(target_rgb_and_mask, [3, 1], axis=-1)
rgbd = tf.concat([rgb, target_disparity], axis=-1)
return rgbd * mask, mask * mask
def render_channels(
channels, disparity,
source_pose, source_intrinsics,
target_pose, target_intrinsics):
"""Render channels from new target position, given disparity.
Args:
channels: [B, H, W, C] Channels to render
disparity: [B, H, W, 1] Inverse depth
source_pose: [B, 3, 4] reference camera pose
source_intrinsics: [B, 4] reference intrinsics
target_pose: [B, 3, 4] target camera pose
target_intrinsics: [B, 4] target intrinsics
Returns:
[B, H, W, C] Rendered channels at the target view.
[B, H, W, 1] Rendered disparity at the target view.
"""
(batch_size, height, width, channel_count) = channels.get_shape().as_list()
# Relative pose maps source to target pose.
relative_pose = geometry.mat34_product(
target_pose, geometry.mat34_pose_inverse(source_pose))
# Project source image into 3D mesh.
vertices = render_utils.create_vertices_intrinsics(
disparity[Ellipsis, 0], source_intrinsics)
# Depth of each point from target camera.
target_depths = geometry.mat34_transform(relative_pose, vertices)[Ellipsis, -1:]
# Add target-view depths as an extra vertex attribute.
attributes = tf.reshape(channels, (batch_size, width * height, channel_count))
attributes = tf.concat([attributes, target_depths], -1)
# Get triangles,
triangles = render_utils.create_triangles(height, width)
num_triangles = triangles.shape[0]
triangles = tf.convert_to_tensor(triangles, tf.int32)
# Camera matrices.
target_perspective = render_utils.perspective_from_intrinsics(
target_intrinsics)
relative_pose = geometry.mat34_to_mat44(relative_pose)
proj_matrix = tf.matmul(target_perspective, relative_pose)
# Zero background value for channels, large background value for depth.
background = [0.0] * channel_count + [1000.0]
# Render with mesh_renderer library
output = rasterize_triangles.rasterize(
vertices, attributes, triangles, proj_matrix, width, height, background)
output_channels, output_depths = tf.split(output, [channel_count, 1], axis=-1)
output_disparity = tf.math.divide_no_nan(
1.0, tf.clip_by_value(output_depths, 1.0 / 100.0, 1.0 / 0.01))
return (output_channels, output_disparity)
| {
"content_hash": "f03e7579a4421cda35c3d8f4c82b4230",
"timestamp": "",
"source": "github",
"line_count": 131,
"max_line_length": 87,
"avg_line_length": 36.38931297709924,
"alnum_prop": 0.6981329976924691,
"repo_name": "google-research/google-research",
"id": "4ecd1e5796407e8f202a9eced209e6ecd7f04b90",
"size": "5375",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "infinite_nature/render.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "9817"
},
{
"name": "C++",
"bytes": "4166670"
},
{
"name": "CMake",
"bytes": "6412"
},
{
"name": "CSS",
"bytes": "27092"
},
{
"name": "Cuda",
"bytes": "1431"
},
{
"name": "Dockerfile",
"bytes": "7145"
},
{
"name": "Gnuplot",
"bytes": "11125"
},
{
"name": "HTML",
"bytes": "77599"
},
{
"name": "ImageJ Macro",
"bytes": "50488"
},
{
"name": "Java",
"bytes": "487585"
},
{
"name": "JavaScript",
"bytes": "896512"
},
{
"name": "Julia",
"bytes": "67986"
},
{
"name": "Jupyter Notebook",
"bytes": "71290299"
},
{
"name": "Lua",
"bytes": "29905"
},
{
"name": "MATLAB",
"bytes": "103813"
},
{
"name": "Makefile",
"bytes": "5636"
},
{
"name": "NASL",
"bytes": "63883"
},
{
"name": "Perl",
"bytes": "8590"
},
{
"name": "Python",
"bytes": "53790200"
},
{
"name": "R",
"bytes": "101058"
},
{
"name": "Roff",
"bytes": "1208"
},
{
"name": "Rust",
"bytes": "2389"
},
{
"name": "Shell",
"bytes": "730444"
},
{
"name": "Smarty",
"bytes": "5966"
},
{
"name": "Starlark",
"bytes": "245038"
}
],
"symlink_target": ""
} |
from .test_sandpaper import (SandPaperTest,)
from .rules import *
| {
"content_hash": "1553727ddba8edee8656aa4af49a6e5b",
"timestamp": "",
"source": "github",
"line_count": 2,
"max_line_length": 44,
"avg_line_length": 33,
"alnum_prop": 0.7727272727272727,
"repo_name": "stephen-bunn/sandpaper",
"id": "2d5868eca97aab15d6a4b2da677398e59eee6452",
"size": "221",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "55861"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('joins', '0007_auto_20170111_1359'),
]
operations = [
migrations.AddField(
model_name='join',
name='ref_if',
field=models.CharField(default='abra', max_length=120),
),
]
| {
"content_hash": "f35d9aad224cfdb4f3ddaa37f48d9bae",
"timestamp": "",
"source": "github",
"line_count": 18,
"max_line_length": 67,
"avg_line_length": 21.61111111111111,
"alnum_prop": 0.5861182519280206,
"repo_name": "micbuz/project2",
"id": "df04697a7121ffd06b985ba053adc6ea6f616eac",
"size": "462",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "boot/joins/migrations/0008_join_ref_if.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "91640"
},
{
"name": "HTML",
"bytes": "15283"
},
{
"name": "JavaScript",
"bytes": "196154"
},
{
"name": "Python",
"bytes": "35881"
}
],
"symlink_target": ""
} |
from google.cloud import aiplatform_v1
async def sample_create_hyperparameter_tuning_job():
# Create a client
client = aiplatform_v1.JobServiceAsyncClient()
# Initialize request argument(s)
hyperparameter_tuning_job = aiplatform_v1.HyperparameterTuningJob()
hyperparameter_tuning_job.display_name = "display_name_value"
hyperparameter_tuning_job.study_spec.metrics.metric_id = "metric_id_value"
hyperparameter_tuning_job.study_spec.metrics.goal = "MINIMIZE"
hyperparameter_tuning_job.study_spec.parameters.double_value_spec.min_value = 0.96
hyperparameter_tuning_job.study_spec.parameters.double_value_spec.max_value = 0.962
hyperparameter_tuning_job.study_spec.parameters.parameter_id = "parameter_id_value"
hyperparameter_tuning_job.max_trial_count = 1609
hyperparameter_tuning_job.parallel_trial_count = 2128
hyperparameter_tuning_job.trial_job_spec.worker_pool_specs.container_spec.image_uri = "image_uri_value"
request = aiplatform_v1.CreateHyperparameterTuningJobRequest(
parent="parent_value",
hyperparameter_tuning_job=hyperparameter_tuning_job,
)
# Make the request
response = await client.create_hyperparameter_tuning_job(request=request)
# Handle the response
print(response)
# [END aiplatform_generated_aiplatform_v1_JobService_CreateHyperparameterTuningJob_async]
| {
"content_hash": "85f5e339846ca73d2c9c6ce868fbb41b",
"timestamp": "",
"source": "github",
"line_count": 31,
"max_line_length": 107,
"avg_line_length": 44.38709677419355,
"alnum_prop": 0.7630813953488372,
"repo_name": "googleapis/python-aiplatform",
"id": "edf9bcc23060b6353bc8da3570b7a37aa8f51836",
"size": "2422",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "samples/generated_samples/aiplatform_generated_aiplatform_v1_job_service_create_hyperparameter_tuning_job_async.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "2050"
},
{
"name": "Python",
"bytes": "23977004"
},
{
"name": "Shell",
"bytes": "30668"
}
],
"symlink_target": ""
} |
import io
import os
import sys
from typing import TextIO
import pytest
from _pytest.store import StoreKey
fault_handler_stderr_key = StoreKey[TextIO]()
def pytest_addoption(parser):
help = (
"Dump the traceback of all threads if a test takes "
"more than TIMEOUT seconds to finish.\n"
"Not available on Windows."
)
parser.addini("faulthandler_timeout", help, default=0.0)
def pytest_configure(config):
import faulthandler
if not faulthandler.is_enabled():
# faulthhandler is not enabled, so install plugin that does the actual work
# of enabling faulthandler before each test executes.
config.pluginmanager.register(FaultHandlerHooks(), "faulthandler-hooks")
else:
from _pytest.warnings import _issue_warning_captured
# Do not handle dumping to stderr if faulthandler is already enabled, so warn
# users that the option is being ignored.
timeout = FaultHandlerHooks.get_timeout_config_value(config)
if timeout > 0:
_issue_warning_captured(
pytest.PytestConfigWarning(
"faulthandler module enabled before pytest configuration step, "
"'faulthandler_timeout' option ignored"
),
config.hook,
stacklevel=2,
)
class FaultHandlerHooks:
"""Implements hooks that will actually install fault handler before tests execute,
as well as correctly handle pdb and internal errors."""
def pytest_configure(self, config):
import faulthandler
stderr_fd_copy = os.dup(self._get_stderr_fileno())
config._store[fault_handler_stderr_key] = open(stderr_fd_copy, "w")
faulthandler.enable(file=config._store[fault_handler_stderr_key])
def pytest_unconfigure(self, config):
import faulthandler
faulthandler.disable()
# close our dup file installed during pytest_configure
# re-enable the faulthandler, attaching it to the default sys.stderr
# so we can see crashes after pytest has finished, usually during
# garbage collection during interpreter shutdown
config._store[fault_handler_stderr_key].close()
del config._store[fault_handler_stderr_key]
faulthandler.enable(file=self._get_stderr_fileno())
@staticmethod
def _get_stderr_fileno():
try:
return sys.stderr.fileno()
except (AttributeError, io.UnsupportedOperation):
# pytest-xdist monkeypatches sys.stderr with an object that is not an actual file.
# https://docs.python.org/3/library/faulthandler.html#issue-with-file-descriptors
# This is potentially dangerous, but the best we can do.
return sys.__stderr__.fileno()
@staticmethod
def get_timeout_config_value(config):
return float(config.getini("faulthandler_timeout") or 0.0)
@pytest.hookimpl(hookwrapper=True)
def pytest_runtest_protocol(self, item):
timeout = self.get_timeout_config_value(item.config)
stderr = item.config._store[fault_handler_stderr_key]
if timeout > 0 and stderr is not None:
import faulthandler
faulthandler.dump_traceback_later(timeout, file=stderr)
try:
yield
finally:
faulthandler.cancel_dump_traceback_later()
else:
yield
@pytest.hookimpl(tryfirst=True)
def pytest_enter_pdb(self):
"""Cancel any traceback dumping due to timeout before entering pdb.
"""
import faulthandler
faulthandler.cancel_dump_traceback_later()
@pytest.hookimpl(tryfirst=True)
def pytest_exception_interact(self):
"""Cancel any traceback dumping due to an interactive exception being
raised.
"""
import faulthandler
faulthandler.cancel_dump_traceback_later()
| {
"content_hash": "04845d59fee6ae433a73f2e011c2a873",
"timestamp": "",
"source": "github",
"line_count": 113,
"max_line_length": 94,
"avg_line_length": 34.91150442477876,
"alnum_prop": 0.6524714828897339,
"repo_name": "alfredodeza/pytest",
"id": "8d723c206cb8d9f9a3a5f02e116a89a937cc6cbe",
"size": "3945",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/_pytest/faulthandler.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "837013"
}
],
"symlink_target": ""
} |
import os
import subprocess
def test_native():
p = subprocess.Popen(['make', '-B'], cwd=os.path.dirname(__file__))
assert p.wait() == 0 # systems which have `make` have SUCCESS==0
| {
"content_hash": "35850270745367aad74601108a9febcb",
"timestamp": "",
"source": "github",
"line_count": 8,
"max_line_length": 71,
"avg_line_length": 24,
"alnum_prop": 0.6354166666666666,
"repo_name": "bjodah/chemreac",
"id": "0af575cd9a05349440b096816e0d6b3f51d99c13",
"size": "217",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tests-native/test_native_chemreac.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "C++",
"bytes": "31135"
},
{
"name": "CSS",
"bytes": "275"
},
{
"name": "HTML",
"bytes": "1347"
},
{
"name": "Makefile",
"bytes": "2269"
},
{
"name": "Mako",
"bytes": "37265"
},
{
"name": "Python",
"bytes": "236335"
},
{
"name": "Shell",
"bytes": "16959"
}
],
"symlink_target": ""
} |
try:
from osgeo import osr, ogr, gdal
except ImportError:
import osr, ogr, gdal
import string
import sys
#############################################################################
def GeomType2Name( type ):
if type == ogr.wkbUnknown:
return 'wkbUnknown'
elif type == ogr.wkbPoint:
return 'wkbPoint'
elif type == ogr.wkbLineString:
return 'wkbLineString'
elif type == ogr.wkbPolygon:
return 'wkbPolygon'
elif type == ogr.wkbMultiPoint:
return 'wkbMultiPoint'
elif type == ogr.wkbMultiLineString:
return 'wkbMultiLineString'
elif type == ogr.wkbMultiPolygon:
return 'wkbMultiPolygon'
elif type == ogr.wkbGeometryCollection:
return 'wkbGeometryCollection'
elif type == ogr.wkbNone:
return 'wkbNone'
elif type == ogr.wkbLinearRing:
return 'wkbLinearRing'
else:
return 'wkbUnknown'
#############################################################################
def Esc(x):
return gdal.EscapeString( x, gdal.CPLES_XML )
#############################################################################
def Usage():
print('Usage: ogr2vrt.py [-relative] [-schema] [-feature_count] [-extent]')
print(' in_datasource out_vrtfile [layers]')
print('')
sys.exit(1)
#############################################################################
# Argument processing.
infile = None
outfile = None
layer_list = []
relative = "0"
schema=0
feature_count=0
extent=0
argv = gdal.GeneralCmdLineProcessor( sys.argv )
if argv is None:
sys.exit( 0 )
i = 1
while i < len(argv):
arg = argv[i]
if arg == '-relative':
relative = "1"
elif arg == '-schema':
schema = 1
elif arg == '-feature_count':
feature_count = 1
elif arg == '-extent':
extent = 1
elif arg[0] == '-':
Usage()
elif infile is None:
infile = arg
elif outfile is None:
outfile = arg
else:
layer_list.append( arg )
i = i + 1
if outfile is None:
Usage()
if schema and feature_count:
sys.stderr.write('Ignoring -feature_count when used with -schema.\n')
feature_count = 0
if schema and extent:
sys.stderr.write('Ignoring -extent when used with -schema.\n')
extent = 0
#############################################################################
# Open the datasource to read.
src_ds = ogr.Open( infile, update = 0 )
if schema:
infile = '@dummy@'
if len(layer_list) == 0:
for layer in src_ds:
layer_list.append( layer.GetLayerDefn().GetName() )
#############################################################################
# Start the VRT file.
vrt = '<OGRVRTDataSource>\n'
#############################################################################
# Process each source layer.
for name in layer_list:
layer = src_ds.GetLayerByName(name)
layerdef = layer.GetLayerDefn()
vrt += ' <OGRVRTLayer name="%s">\n' % Esc(name)
vrt += ' <SrcDataSource relativeToVRT="%s" shared="%d">%s</SrcDataSource>\n' \
% (relative,not schema,Esc(infile))
if schema:
vrt += ' <SrcLayer>@dummy@</SrcLayer>\n'
else:
vrt += ' <SrcLayer>%s</SrcLayer>\n' % Esc(name)
# Historic format for mono-geometry layers
if layerdef.GetGeomFieldCount() == 0:
vrt += ' <GeometryType>wkbNone</GeometryType>\n'
elif layerdef.GetGeomFieldCount() == 1:
vrt += ' <GeometryType>%s</GeometryType>\n' \
% GeomType2Name(layerdef.GetGeomType())
srs = layer.GetSpatialRef()
if srs is not None:
vrt += ' <LayerSRS>%s</LayerSRS>\n' \
% (Esc(srs.ExportToWkt()))
if extent:
(xmin, xmax, ymin, ymax) = layer.GetExtent()
vrt += ' <ExtentXMin>%.15g</ExtentXMin>\n' % xmin
vrt += ' <ExtentYMin>%.15g</ExtentYMin>\n' % ymin
vrt += ' <ExtentXMax>%.15g</ExtentXMax>\n' % xmax
vrt += ' <ExtentYMax>%.15g</ExtentYMax>\n' % ymax
# New format for multi-geometry field support
else:
for fld_index in range(layerdef.GetGeomFieldCount()):
src_fd = layerdef.GetGeomFieldDefn( fld_index )
vrt += ' <GeometryField name="%s">\n' % src_fd.GetName()
vrt += ' <GeometryType>%s</GeometryType>\n' \
% GeomType2Name(src_fd.GetType())
srs = src_fd.GetSpatialRef()
if srs is not None:
vrt += ' <SRS>%s</SRS>\n' \
% (Esc(srs.ExportToWkt()))
if extent:
(xmin, xmax, ymin, ymax) = layer.GetExtent(geom_field = fld_index)
vrt += ' <ExtentXMin>%.15g</ExtentXMin>\n' % xmin
vrt += ' <ExtentYMin>%.15g</ExtentYMin>\n' % ymin
vrt += ' <ExtentXMax>%.15g</ExtentXMax>\n' % xmax
vrt += ' <ExtentYMax>%.15g</ExtentYMax>\n' % ymax
vrt += ' </GeometryField>\n'
# Process all the fields.
for fld_index in range(layerdef.GetFieldCount()):
src_fd = layerdef.GetFieldDefn( fld_index )
if src_fd.GetType() == ogr.OFTInteger:
type = 'Integer'
elif src_fd.GetType() == ogr.OFTString:
type = 'String'
elif src_fd.GetType() == ogr.OFTReal:
type = 'Real'
elif src_fd.GetType() == ogr.OFTStringList:
type = 'StringList'
elif src_fd.GetType() == ogr.OFTIntegerList:
type = 'IntegerList'
elif src_fd.GetType() == ogr.OFTRealList:
type = 'RealList'
elif src_fd.GetType() == ogr.OFTBinary:
type = 'Binary'
elif src_fd.GetType() == ogr.OFTDate:
type = 'Date'
elif src_fd.GetType() == ogr.OFTTime:
type = 'Time'
elif src_fd.GetType() == ogr.OFTDateTime:
type = 'DateTime'
else:
type = 'String'
vrt += ' <Field name="%s" type="%s"' \
% (Esc(src_fd.GetName()), type)
if not schema:
vrt += ' src="%s"' % Esc(src_fd.GetName())
if src_fd.GetWidth() > 0:
vrt += ' width="%d"' % src_fd.GetWidth()
if src_fd.GetPrecision() > 0:
vrt += ' precision="%d"' % src_fd.GetPrecision()
vrt += '/>\n'
if feature_count:
vrt += ' <FeatureCount>%d</FeatureCount>\n' % layer.GetFeatureCount()
vrt += ' </OGRVRTLayer>\n'
vrt += '</OGRVRTDataSource>\n'
#############################################################################
# Write vrt
open(outfile,'w').write(vrt)
| {
"content_hash": "ebd862380ad11a1783fbc76c09056d03",
"timestamp": "",
"source": "github",
"line_count": 216,
"max_line_length": 85,
"avg_line_length": 31.037037037037038,
"alnum_prop": 0.497464200477327,
"repo_name": "tilemapjp/OSGeo.GDAL.Xamarin",
"id": "b28304050f8be714dc772d777905244cd562d2e6",
"size": "8380",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "gdal-1.11.0/swig/python/samples/ogr2vrt.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Bison",
"bytes": "43427"
},
{
"name": "C",
"bytes": "9827430"
},
{
"name": "C#",
"bytes": "1363593"
},
{
"name": "C++",
"bytes": "37431441"
},
{
"name": "Java",
"bytes": "745423"
},
{
"name": "JavaScript",
"bytes": "77251"
},
{
"name": "Makefile",
"bytes": "134911"
},
{
"name": "Objective-C",
"bytes": "42595"
},
{
"name": "OpenEdge ABL",
"bytes": "28024"
},
{
"name": "PHP",
"bytes": "106999"
},
{
"name": "Perl",
"bytes": "17266"
},
{
"name": "Perl6",
"bytes": "37287"
},
{
"name": "Prolog",
"bytes": "68"
},
{
"name": "Python",
"bytes": "975080"
},
{
"name": "Ruby",
"bytes": "2563"
},
{
"name": "Shell",
"bytes": "749856"
},
{
"name": "Smalltalk",
"bytes": "422"
},
{
"name": "TeX",
"bytes": "344"
},
{
"name": "Visual Basic",
"bytes": "49037"
}
],
"symlink_target": ""
} |
from unittest.mock import patch
import pytest
import sys
from Hologram.Network.Modem.BG96 import BG96
from UtilClasses import ModemResult
sys.path.append(".")
sys.path.append("..")
sys.path.append("../..")
def mock_write(modem, message):
return True
def mock_read(modem):
return True
def mock_readline(modem, timeout=None, hide=False):
return ""
def mock_open_serial_port(modem, device_name=None):
return True
def mock_close_serial_port(modem):
return True
def mock_detect_usable_serial_port(modem, stop_on_first=True):
return "/dev/ttyUSB0"
@pytest.fixture
def no_serial_port(monkeypatch):
monkeypatch.setattr(BG96, "_read_from_serial_port", mock_read)
monkeypatch.setattr(BG96, "_readline_from_serial_port", mock_readline)
monkeypatch.setattr(BG96, "_write_to_serial_port_and_flush", mock_write)
monkeypatch.setattr(BG96, "openSerialPort", mock_open_serial_port)
monkeypatch.setattr(BG96, "closeSerialPort", mock_close_serial_port)
monkeypatch.setattr(BG96, "detect_usable_serial_port", mock_detect_usable_serial_port)
def test_init_BG96_no_args(no_serial_port):
modem = BG96()
assert modem.timeout == 1
assert modem.socket_identifier == 0
assert modem.chatscript_file.endswith("/chatscripts/default-script")
assert modem._at_sockets_available
@patch.object(BG96, "set")
@patch.object(BG96, "command")
@patch.object(BG96, "_is_pdp_context_active")
def test_close_socket(mock_pdp, mock_command, mock_set, no_serial_port):
modem = BG96()
modem.socket_identifier = 1
mock_set.return_value = (ModemResult.OK, None)
mock_command.return_value = (ModemResult.OK, None)
mock_pdp.return_value = True
modem.close_socket()
mock_set.assert_called_with("+QIACT", "0", timeout=30)
mock_command.assert_called_with("+QICLOSE", 1)
| {
"content_hash": "29e8e371dc7cf0ea13b50af3674a69df",
"timestamp": "",
"source": "github",
"line_count": 66,
"max_line_length": 90,
"avg_line_length": 27.818181818181817,
"alnum_prop": 0.7151416122004357,
"repo_name": "hologram-io/hologram-python",
"id": "681d1e09a44984bd8e35cf8ea50c17933b8856e2",
"size": "2064",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "tests/Modem/test_BG96.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "136"
},
{
"name": "Python",
"bytes": "223458"
},
{
"name": "Shell",
"bytes": "8209"
}
],
"symlink_target": ""
} |
import argparse
from lib.httplog import HTTPLog, LogError, InitError, ConfigError
from lib.filter import Filter, InitErr, ConfErr
from datetime import datetime
__title__ = "httpdefender"
__author__ = "Leslie.A.Cordell"
__version__ = "1.0"
__year__ = datetime.now().year
def arguments():
"""
Set up command-line arguments using argparse, simple options
"""
parser = argparse.ArgumentParser(description='Run the httpdefender script.')
parser.add_argument("-c", help="the absolute path of the apache config file. Default; /etc/httpd/conf/httpd.conf")
parser.add_argument("-l", help="the name of the logfile defined in the config file. Default; logs/access_log")
parser.add_argument("-p", help="the fullpath of the log in question. Default; /var/log/httpd/access_log")
parser.add_argument("-o", help="where to output the results to. Default; blacklist.off")
parser.add_argument("-r", help="what the results should be i.e time, host, request, agent. Default; host")
parser.add_argument("-f", help="which signature python config file to be used. Default; signatures_conf.py")
return parser.parse_args()
if __name__ == "__main__":
print __title__
banner = "by %s %s" % (__author__, __year__)
print banner
print "=" * len(banner) + "\n"
# Get the arguments
args = arguments()
# Set up some constants
CONF = args.c or "/etc/httpd/conf/httpd.conf"
LOG = args.l or "logs/access_log"
FULLPATH = args.p or "/var/log/httpd/access_log"
OUTPUT = args.o or "blacklist.off"
RESULTS = args.r or "host"
SIGNATURE_CONF = args.f or "signatures_conf.py"
log = False
try:
print "opening config file %s..." % CONF
log = HTTPLog(conf=CONF, log=LOG, fullpath=FULLPATH)
except LogError as err:
print err
exit(1)
except InitError as err:
print err
exit(1)
except ConfigError as err:
print err
exit(1)
if log:
_filter = False
try:
print "setting up filter from config file %s..." % SIGNATURE_CONF
_filter = Filter(log, SIGNATURE_CONF)
except InitErr as err:
print err
exit(1)
except ConfErr as err:
print err
exit(1)
try:
print "running filters for '%s' keyword" % RESULTS
_filter.runfilter(RESULTS)
except ConfErr as err:
print err
exit(1)
if len(_filter.matches):
try:
print "writing output to %s..." % OUTPUT
_blacklist = open(OUTPUT ,"w+")
_blacklist.writelines('\n'.join(_filter.matches))
except Exception as err:
print err
exit(1)
print "httpdefender completed with %s matches!" % len(_filter.matches)
| {
"content_hash": "4906c20aa0a30cff47b916c47c8932a2",
"timestamp": "",
"source": "github",
"line_count": 83,
"max_line_length": 118,
"avg_line_length": 34.46987951807229,
"alnum_prop": 0.6011883956658511,
"repo_name": "nonapod/httpdefender",
"id": "39a1cbd898f21df89ed3734120aea110d3276352",
"size": "5033",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "httpdefender.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "27148"
}
],
"symlink_target": ""
} |
import urllib
import urllib2
import smtplib
from sys import argv
import socket
import threading
import time
username = argv[2]
password = argv[3]
# Flags to represent what color the user wants to be notified about
BLACK_FLAG = False
GRAY_FLAG = False
# URLs to the product page
black_url = "https://play.google.com/store/devices/details/Moto_360_Black_Leather?id=motorola_moto_360_leather_black"
gray_url = "https://play.google.com/store/devices/details/Moto_360_Gray_Leather?id=motorola_moto_360_leather_gray"
# HTML element only present when the watch is NOT available
not_available = '<div class="not-available">'
sender = username
receivers = argv[4]
black_available_message = """Subject: Black Moto360 is Available!
Hello, {0} the Moto360 in black is currently available!!!!!!
Grab it here: goo.gl/Vm6pzw""".format(argv[2])
gray_available_message = """Subject: Gray Moto360 is Available!
Hello, {0} the Moto360 in gray is currently available!!!!!!
Grab it here: goo.gl/1FJM8p""".format(argv[2])
if argv[1] == "black" or argv[1] == "Black":
req = urllib2.Request(black_url)
BLACK_FLAG = True
if argv[1] == "gray" or argv[1] == "Gray":
req = urllib2.Request(gray_url)
GRAY_FLAG = True
# Check page for availability
while True:
response = urllib2.urlopen(req)
page = response.read()
# If available (not-available div is NOT on page), send the user an
# email to notify them.
if not not_available in page:
# watch is available, change message to correspond with the right color
if BLACK_FLAG: message = black_available_message
if GRAY_FLAG: message = gray_available_message
# send email
try:
server = smtplib.SMTP('smtp.gmail.com:587')
server.starttls()
server.login(username,password)
server.sendmail(sender, receivers, message)
server.quit()
print "Successfully sent email"
except smtplib.SMTPException:
print "Error: unable to send email"
except socket.error:
print "Socket Error: unable to send email"
print "{0} Moto360 is available right now!".format(argv[1])
else:
print "{0} Moto360 not available right now".format(argv[1])
# Repeat every x seconds
time.sleep(float(argv[5]))
| {
"content_hash": "beb11119533b95e56e6cc857ead08340",
"timestamp": "",
"source": "github",
"line_count": 80,
"max_line_length": 117,
"avg_line_length": 27.6875,
"alnum_prop": 0.7069977426636569,
"repo_name": "jonsimington/moto360_notifier",
"id": "d0cb69ccceb7ac7a6b4a0595d492d1ef2735571e",
"size": "2215",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "moto360_notifier.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "2215"
}
],
"symlink_target": ""
} |
"""
This module is written in a DynamicProtocol style and caring for our WEB Server configuration
"""
from zope.interface import implementer
from NetCatKS.Config.api.interfaces import IWeb
from NetCatKS.Config.api.implementers.configuration.mixin import MixinSharedConfig
from NetCatKS.Components.common.factory import RegisterAsFactory
__author__ = 'dimd'
@implementer(IWeb)
class WEB(MixinSharedConfig):
"""
A Class representing our base WEB configuration
we having a simple setter and getters
"""
def __init__(self):
"""
In our constructor we having a default values for each
property. Note: the default web method is "GET"
:return: void
"""
super(WEB, self).__init__()
self.service_name = 'Default WEB Server'
self.port = 8000
self.__http_methods = ['GET']
self.__www_root = ''
@property
def http_methods(self):
"""
A getter for available http methods, by default there are only a "GET"
if you need more methods, you have to edin your config file
:return: list
"""
return self.__http_methods
@http_methods.setter
def http_methods(self, methods):
"""
A setter for an allowed http methods
:param methods:
:type methods: list
:return: void
"""
self.__http_methods = methods
@property
def www_root(self):
"""
A web root getter
:return: str
"""
return self.__www_root
@www_root.setter
def www_root(self, root):
"""
A web root setter
:param root:
:type root: str
:return: void
"""
self.__www_root = root
RegisterAsFactory(WEB).register()
| {
"content_hash": "ee2a361b91372138598a185c3e7bca8c",
"timestamp": "",
"source": "github",
"line_count": 89,
"max_line_length": 93,
"avg_line_length": 20.202247191011235,
"alnum_prop": 0.5939933259176863,
"repo_name": "dimddev/NetCatKS",
"id": "ca088c878ff860e3993f8c03eef6200bb7c5de40",
"size": "1798",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "NetCatKS/Config/api/implementers/configuration/web/__init__.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "182697"
}
],
"symlink_target": ""
} |
"""
The STS Job Manager.
This tool creates STS Jobs and records each job's state.
"""
import argparse
import json
import logging
import os
import time
from datetime import datetime
from typing import Dict, List, Optional
from google.cloud import bigquery, monitoring_v3
from constants import schemas
from constants.status import (KNOWN_STATUSES, STATUS,
sts_operation_status_to_table_status)
from lib.options import STSJobManagerOptions
from lib.services import Services
from lib.table_util import get_table_identifier, get_table_ref
logging.basicConfig()
logger = logging.getLogger(__name__)
logger.setLevel(os.environ.get("LOGLEVEL", "INFO").upper())
class Job:
def __init__(self, data):
self.prefix: str = data.prefix
self.status: str = data.status
self.job_name: str = data.job_name
self.last_updated: datetime = data.last_updated
self.operation_results: Optional[dict] = getattr(
data, 'operation_results', None)
def run_query(query: str, params: Optional[List], services: Services,
options: STSJobManagerOptions):
"""
Runs a given query with optional params.
"""
job_config = bigquery.QueryJobConfig()
job_config.query_parameters = params if params else []
return services.bigquery.query(
query,
location=options.bigquery_options.dataset_location,
job_config=job_config
)
def get_jobs_by_prefix(services: Services, options: STSJobManagerOptions) \
-> Dict[str, Job]:
"""
Retrieves jobs from the database and returns them in a key-value format
where the `key` is the prefix and the value is a `Job` object.
"""
table = get_table_identifier(
services, options.bigquery_options,
options.bigquery_options.table_name['job'])
# API does not support table names for preparameterized queries
# https://cloud.google.com/bigquery/docs/parameterized-queries
query = f"""
SELECT *
FROM `{table}`
""" # nosec
results = run_query(query, None, services, options)
prefixToStatus: Dict[str, Job] = {}
for row in results:
prefixToStatus[row.prefix] = Job(row)
return prefixToStatus
def set_prefixes_to_status(prefixes: List[str], status: str,
services: Services, options: STSJobManagerOptions):
"""
Sets a list of prefixes to a given status in the database.
"""
logger.info(f'Updating {len(prefixes)} prefixes to `{status}` status')
table = get_table_identifier(
services, options.bigquery_options,
options.bigquery_options.table_name['job'])
# API does not support table names for preparameterized queries
# https://cloud.google.com/bigquery/docs/parameterized-queries
query = f"""
UPDATE `{table}`
SET status = @status, last_updated = CURRENT_TIMESTAMP()
WHERE prefix IN UNNEST(@prefixes)
"""
params = [
bigquery.ScalarQueryParameter("status", "STRING", status),
bigquery.ArrayQueryParameter("prefixes", "STRING", prefixes)
]
run_query(query, params, services, options).result()
def set_job_name(prefix: str, job_name: str, services: Services,
options: STSJobManagerOptions):
"""
Set's a prefix's transfer operation job name in the database.
"""
logger.info(
f'Updating the prefix `{prefix}` with job name `{job_name}`...')
table = get_table_identifier(
services, options.bigquery_options,
options.bigquery_options.table_name['job'])
# API does not support table names for preparameterized queries
# https://cloud.google.com/bigquery/docs/parameterized-queries
query = f"""
UPDATE `{table}`
SET job_name = @job_name, last_updated = CURRENT_TIMESTAMP()
WHERE prefix = @prefix
"""
params = [
bigquery.ScalarQueryParameter("prefix", "STRING", prefix),
bigquery.ScalarQueryParameter("job_name", "STRING", job_name)
]
run_query(query, params, services, options).result()
logger.info(
f'...updated the prefix `{prefix}` with job name `{job_name}`.')
def insert_history(rows: List[object], services: Services,
options: STSJobManagerOptions):
"""
Inserts a list of rows into the job history table.
Each object provided in the list matches the `JOB_HISTORY` schema
"""
logger.info(f'Inserting {len(rows)} row(s) into the history table')
table_ref = get_table_ref(
services.bigquery, options.bigquery_options,
options.bigquery_options.table_name['job_history'])
errors = services.bigquery.insert_rows(
table_ref, rows, selected_fields=schemas.JOB_HISTORY)
if errors:
logger.error('errors were found:')
for row in errors:
logger.error(row)
raise Exception('Error inserting one or more rows')
def get_latest_operation_by_prefix(services: Services,
options: STSJobManagerOptions):
"""
Gets the latest transfer operation cooresponding to a prefix.
Returns a key-value object where the key is a prefix and the value is a
[TransferOperation](https://cloud.google.com/storage-transfer/docs/reference/rest/v1/transferOperations#resource-transferoperation).
"""
job_filter = json.dumps({"project_id": services.bigquery.project})
request = services.sts.transferOperations().list(
name='transferOperations', filter=job_filter, pageSize=256)
latest_operation_by_prefix: Dict[str, dict] = {}
operation_to_prefix: Dict[str, str] = {}
while request is not None:
response = request.execute()
if not response:
break
for operation in response['operations']:
transfer_spec = operation['metadata']['transferSpec']
if 'objectConditions' not in transfer_spec:
continue
object_conditions = transfer_spec['objectConditions']
if 'includePrefixes' not in object_conditions:
continue
if 'gcsDataSource' not in operation['metadata']['transferSpec']:
continue
if 'gcsDataSink' not in operation['metadata']['transferSpec']:
continue
if options.source_bucket != operation['metadata']['transferSpec'][
'gcsDataSource']['bucketName']:
continue
if options.destination_bucket != operation['metadata'][
'transferSpec']['gcsDataSink']['bucketName']:
continue
for prefix in object_conditions['includePrefixes']:
operation_to_set_for_prefix = None
if prefix not in latest_operation_by_prefix:
# The prefix does not have an operation, let's use this one
operation_to_set_for_prefix = operation
elif 'endTime' not in operation['metadata'] or \
'endTime' not in latest_operation_by_prefix[prefix][
'metadata']:
# if end time is not available, use the start time
if operation['metadata']['startTime'] > \
latest_operation_by_prefix[prefix]['metadata'][
'startTime']:
latest_operation_by_prefix[prefix] = operation
elif operation['metadata']['endTime'] > \
latest_operation_by_prefix[prefix]['metadata'][
'endTime']:
# This operation is newer than the assigned operation
operation_to_set_for_prefix = operation
# Set the operation for the prefix
if operation_to_set_for_prefix:
# unreference existing operation to prefix, if exists
operation_to_prefix.pop(operation['name'], None)
latest_operation_by_prefix[prefix] = operation
operation_to_prefix[operation['name']] = prefix
request = services.sts.transferOperations().list_next(
previous_request=request, previous_response=response)
# If the latest transferOperation is from a deleted job, we should not
# consider the operation for state management
deleted_job_request = services.sts.transferJobs().list(
filter=json.dumps({
"project_id": services.bigquery.project,
"jobStatuses": ["DELETED"]
}), pageSize=256)
while deleted_job_request is not None:
deleted_job_response = deleted_job_request.execute()
if not deleted_job_response:
break
for transferJob in deleted_job_response['transferJobs']:
if 'latestOperationName' not in transferJob:
continue
operation_to_remove = transferJob['latestOperationName']
prefix = operation_to_prefix.pop(operation_to_remove, None)
if prefix:
latest_operation_by_prefix.pop(prefix, None)
deleted_job_request = services.sts.transferJobs().list_next(
previous_request=deleted_job_request,
previous_response=deleted_job_response)
return latest_operation_by_prefix
def manage_state(services: Services, options: STSJobManagerOptions):
"""
Gathers all prefix information from both STS and the database, then updates
the corresponding rows where necessary.
"""
logger.info('Checking state...')
# jobs from the database
jobs = get_jobs_by_prefix(services, options)
# transfer operations from STS
latest_operation_by_prefix = get_latest_operation_by_prefix(
services, options)
history_rows: List[object] = []
job_status_to_update: Dict[str, List[str]] = {
STATUS.DONE: [],
STATUS.ERROR: [],
STATUS.PAUSED: [],
STATUS.RUNNING: [],
STATUS.WAITING: []
}
def append_history(job: Job, operation_results: object):
history_rows.append({
'prefix': job.prefix,
'status': job.status,
'job_name': job.job_name,
'operation_results': json.dumps(operation_results),
'timestamp': datetime.now()
})
for prefix in jobs:
if prefix in latest_operation_by_prefix:
operation_status = \
latest_operation_by_prefix[prefix]['metadata']['status']
expected_status = jobs[prefix].status
actual_status = sts_operation_status_to_table_status(
operation_status)
actual_job_name = latest_operation_by_prefix[prefix]['name']
if actual_status != expected_status:
# Capture the history for running jobs
logger.info(
f'Status for prefix `{prefix}` has changed from \
`{expected_status}` to `{actual_status}`')
jobs[prefix].status = actual_status
job_status_to_update[actual_status].append(prefix)
append_history(
jobs[prefix], latest_operation_by_prefix[prefix])
elif actual_status == STATUS.RUNNING:
# Capture the history for running jobs
append_history(
jobs[prefix], latest_operation_by_prefix[prefix])
if actual_job_name != jobs[prefix].job_name:
set_job_name(prefix, actual_job_name, services, options)
# sleep to avoid rate limiting
# https://cloud.google.com/bigquery/quotas#standard_tables
time.sleep(2)
# Assign the latest `operation_results`
jobs[prefix].operation_results = latest_operation_by_prefix[prefix]
if history_rows:
insert_history(history_rows, services, options)
for status in job_status_to_update:
if job_status_to_update[status]:
set_prefixes_to_status(
job_status_to_update[status], status, services, options)
# sleep to avoid rate limiting
# https://cloud.google.com/bigquery/quotas#standard_tables
time.sleep(2)
logger.info('...state is up to date.')
return jobs
def run_jobs(count: int, services: Services, options: STSJobManagerOptions):
"""
Pulls pending prefixes from the database and either create a new transfer
operation or resume an existing one.
The `manage_state` function will handle the updates in the job statuses;
this keeps DML usage to a minimum
"""
table = get_table_identifier(
services, options.bigquery_options,
options.bigquery_options.table_name['job'])
# API does not support table names for preparameterized queries
# https://cloud.google.com/bigquery/docs/parameterized-queries
query = f"""
SELECT *
FROM `{table}`
WHERE status IN UNNEST(@statuses)
LIMIT @count
""" # nosec
pending_statuses = [STATUS.WAITING, STATUS.PAUSED]
tryable_statuses = [STATUS.WAITING, STATUS.PAUSED, STATUS.ERROR]
statuses = pending_statuses if options.no_retry_on_job_error \
else tryable_statuses
params = [
bigquery.ArrayQueryParameter("statuses", "STRING", statuses),
bigquery.ScalarQueryParameter("count", "INT64", count),
]
results = run_query(query, params, services, options)
for row in results:
job = Job(row)
if job.status == STATUS.PAUSED:
operation_request = services.sts.transferOperations().resume(
name=job.job_name, body={})
operation_request.execute()
logger.info(f'Resumed `{job.prefix}` (job name: {job.job_name}).')
else:
utc_now = datetime.utcnow()
if job.status == STATUS.ERROR:
logger.error(
f'Retrying errored prefix `{job.prefix}`. \
Previous failed job: {job.job_name}')
transfer_job_body = {
'description': f'Created via STS Job Manager - {job.prefix}',
'project_id': services.bigquery.project,
'transfer_spec': {
'object_conditions': {
'include_prefixes': [
job.prefix
]
},
'transfer_options': {
'overwrite_objects_already_existing_in_sink':
options.overwrite_dest_objects
},
'gcs_data_source': {
'bucket_name': options.source_bucket
},
'gcs_data_sink': {
'bucket_name': options.destination_bucket
}
},
'schedule': {
"schedule_start_date": {
"year": utc_now.year,
"month": utc_now.month,
"day": utc_now.day
},
"schedule_end_date": {
"year": utc_now.year,
"month": utc_now.month,
"day": utc_now.day
}
},
'status': 'ENABLED'
}
request = services.sts.transferJobs().create(
body=transfer_job_body)
response = request.execute()
logger.info(
f'Created new transfer job for `{job.prefix}`: ({response}).')
return True
def determine_stalled_jobs(jobs: Dict[str, Job], last_jobs: Dict[str, Job]) \
-> List[Job]:
stalled_jobs: List[Job] = []
for prefix in jobs:
if prefix not in last_jobs:
continue
current_job = jobs[prefix]
last_job = last_jobs[prefix]
if current_job.status != STATUS.RUNNING or \
last_job.status != STATUS.RUNNING:
continue
if not current_job.operation_results or not last_job.operation_results:
continue
current_counters = \
current_job.operation_results['metadata']['counters']
last_counters = last_job.operation_results['metadata']['counters']
if current_counters and last_counters:
has_changed = False
for key in current_counters:
if key not in last_counters or \
current_counters[key] != last_counters[key]:
has_changed = True
break
if not has_changed:
stalled_jobs.append(current_job)
return stalled_jobs
def manage_jobs(jobs: Dict[str, Job], last_jobs: Dict[str, Job],
services: Services, options: STSJobManagerOptions):
"""
Determines the number of new operations to spin-up, then spins them up.
"""
def num_new_jobs_to_run():
pending_job_count = 0
current_running_jobs = 0
for prefix in jobs:
if jobs[prefix].status == STATUS.RUNNING:
current_running_jobs += 1
elif jobs[prefix].status == STATUS.WAITING or \
jobs[prefix].status == STATUS.PAUSED:
pending_job_count += 1
elif not options.no_retry_on_job_error and \
jobs[prefix].status == STATUS.ERROR:
pending_job_count += 1
if options.allow_new_jobs_when_stalled:
stalled_count = len(determine_stalled_jobs(jobs, last_jobs))
current_running_jobs = max(0, current_running_jobs - stalled_count)
max_number_jobs_available_to_run = \
options.max_concurrent_jobs - current_running_jobs
double_current_job_count = current_running_jobs * 2
if not pending_job_count:
logger.info('No jobs available to run')
return 0
elif current_running_jobs > options.max_concurrent_jobs:
logger.info(f'Will not create any new jobs - too many are running \
(current = {current_running_jobs}, \
max = {options.max_concurrent_jobs})')
return 0
elif current_running_jobs == 0 and \
max_number_jobs_available_to_run > 0:
logger.info(
'Will prepare initial job, as no other jobs are running')
return 1
else:
logger.info('Ramping up job count')
return min(max_number_jobs_available_to_run,
double_current_job_count)
logger.info('Managing jobs...')
count = num_new_jobs_to_run()
if not count:
logger.info('...no new jobs to run.')
return
logger.info(f'...spinning up to {count} new job(s)...')
run_jobs(count, services, options)
logger.info('...done running jobs.')
def publish_heartbeat(jobs: Dict[str, Job], last_jobs: Dict[str, Job],
services: Services, options: STSJobManagerOptions,
monitoring_types=monitoring_v3.types):
"""
Publishes status heartbeats
"""
def publish_timeseries_heartbeat(name: str, value: int, services: Services,
project_name: str,
monitoring_types=monitoring_v3.types):
logger.info(f'Preparing heartbeat for `{name}` (value: {value})...')
series = monitoring_types.TimeSeries()
series.metric.type = name
point = series.points.add()
point.value.int64_value = value
point.interval.end_time.seconds = int(time.time())
services.monitoring.create_time_series(project_name, [series])
logger.info(f'...published heartbeat `{name}`.')
p = options.stackdriver_project if options.stackdriver_project \
else services.bigquery.project
monitoring_project_name = services.monitoring.project_path(p)
logger.info(f'Preparing heartbeats for `{monitoring_project_name}`...')
status_count: Dict[str, int] = {}
stalled_count = 0
# Ensure known statuses are published, even if 0
for status in KNOWN_STATUSES:
status_count[status] = 0
# Gather raw status counts
for prefix in jobs:
job = jobs[prefix]
# status could be unknown
if job.status not in status_count:
status_count[job.status] = 0
status_count[job.status] += 1
for status in status_count:
name = f'custom.googleapis.com/sts_job_manager/status/{status}'
count = status_count[status]
publish_timeseries_heartbeat(
name, count, services, monitoring_project_name, monitoring_types)
for job in determine_stalled_jobs(jobs, last_jobs):
logger.warn(f'Job `{job.job_name}` appears to be stalled.')
stalled_count += 1
# Publish stalled count
stalled_metric = 'custom.googleapis.com/sts_job_manager/metrics/stalled'
publish_timeseries_heartbeat(
stalled_metric, stalled_count, services, monitoring_project_name,
monitoring_types)
logger.info('...done publishing heartbeats.')
def interval(services: Services, options: STSJobManagerOptions):
"""
The main state and job running interval.
This runs the main lifecycle of this application.
"""
interval_count = 0
last_state_check = 0.0
last_manage_jobs = 0.0
last_jobs: Dict[str, Job] = {}
jobs: Dict[str, Job] = {}
while True:
logger.info(f'Running main interval #{interval_count}...')
start = time.time()
job_timeout = start - last_manage_jobs >= options.job_interval
metrics_timeout = start - last_state_check >= options.metrics_interval
if job_timeout or metrics_timeout:
last_jobs = jobs
jobs = manage_state(services, options)
last_state_check = time.time()
if job_timeout:
manage_jobs(jobs, last_jobs, services, options)
# Regather metrics
jobs = manage_state(services, options)
last_manage_jobs = time.time()
if options.publish_heartbeat:
try:
publish_heartbeat(jobs, last_jobs, services, options)
except Exception as e:
logger.error('Failed to publish heartbeat:')
logger.exception(e)
delta = time.time() - start + options.sleep_timeout
logger.info(f'...done running main interval #{interval_count}.\n')
if delta > 0:
time.sleep(delta)
interval_count += 1
def main(options: STSJobManagerOptions):
"""
The main function.
"""
logger.info('Initializing STS Job Manager.')
services = Services()
interval(services, options)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description=__doc__)
options = STSJobManagerOptions()
options.setup_arg_parser(parser)
args = parser.parse_args()
options.assign_from_parsed_args(args)
main(options)
| {
"content_hash": "c6b40381935373f59ebfd0af65255552",
"timestamp": "",
"source": "github",
"line_count": 683,
"max_line_length": 136,
"avg_line_length": 33.84040995607614,
"alnum_prop": 0.5910093886557348,
"repo_name": "GoogleCloudPlatform/professional-services",
"id": "c8b8b377f5ebc8879cf9ed9ed656270e6e721aa1",
"size": "23713",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "tools/sts-job-manager/sts_job_manager.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "C#",
"bytes": "117994"
},
{
"name": "C++",
"bytes": "174"
},
{
"name": "CSS",
"bytes": "13405"
},
{
"name": "Component Pascal",
"bytes": "798"
},
{
"name": "Dockerfile",
"bytes": "15093"
},
{
"name": "Go",
"bytes": "352968"
},
{
"name": "HCL",
"bytes": "204776"
},
{
"name": "HTML",
"bytes": "1229668"
},
{
"name": "Java",
"bytes": "338810"
},
{
"name": "JavaScript",
"bytes": "59905"
},
{
"name": "Jinja",
"bytes": "60083"
},
{
"name": "Makefile",
"bytes": "14129"
},
{
"name": "Python",
"bytes": "2250081"
},
{
"name": "Scala",
"bytes": "978327"
},
{
"name": "Shell",
"bytes": "109299"
},
{
"name": "Smarty",
"bytes": "19839"
},
{
"name": "TypeScript",
"bytes": "147194"
}
],
"symlink_target": ""
} |
def MoveBodyRandomize():
if MoveBodyRandom==1:
i01.setHandSpeed("left", 0.8, 0.8, 0.8, 0.8, 0.8, 0.8)
i01.setHandSpeed("right", 0.8, 0.8, 0.8, 0.8, 0.8, 0.8)
i01.setArmSpeed("right", 0.5, 0.5, 0.5, 0.5)
i01.setArmSpeed("left", 0.5, 0.5, 0.5, 0.5)
i01.setHeadSpeed(0.5, 0.5)
rollneck.setSpeed(0.5)
i01.setTorsoSpeed(0.5, 0.35, 0.5)
i01.moveHead(random.randint(70,100), random.randint(60,90))
rollneck.moveTo(random.randint(50,130))
i01.moveArm("left",random.randint(0,6),random.randint(78,90),random.randint(20,28),random.randint(12,17))
i01.moveArm("right",random.randint(0,6),random.randint(78,90),random.randint(20,28),random.randint(12,17))
i01.moveHand("left",random.randint(50,92),random.randint(28,130),random.randint(28,100),random.randint(28,110),random.randint(28,110),random.randint(20,40))
i01.moveHand("right",random.randint(50,92),random.randint(28,130),random.randint(28,110),random.randint(28,110),random.randint(28,110),random.randint(120,160))
i01.moveTorso(random.randint(85,95),random.randint(85,95),random.randint(85,95))
MoveBodyTimer = Runtime.start("MoveBodyTimer","Clock")
MoveBodyTimer.setInterval(random.randint(600,1200))
def MoveBody(timedata):
MoveBodyRandomize()
MoveBodyTimer.setInterval(random.randint(5000,20000))
def MoveHeadStopped():
if MoveBodyRandom==1:
MoveBodyTimer.addListener("pulse", python.name, "MoveBody")
MoveBodyTimer.addListener("clockStopped", python.name, "MoveBodyStopped")
def MoveBodyStart():
MoveBodyRandomize()
MoveBodyTimer.addListener("pulse", python.name, "MoveBody")
MoveBodyTimer.addListener("clockStopped", python.name, "MoveBodyStopped")
MoveBodyTimer.addListener("clockStarted", python.name, "MoveBodyStart")
| {
"content_hash": "9f78d15577608aa3bc60ac507ef50937",
"timestamp": "",
"source": "github",
"line_count": 36,
"max_line_length": 164,
"avg_line_length": 48.888888888888886,
"alnum_prop": 0.7227272727272728,
"repo_name": "MyRobotLab/pyrobotlab",
"id": "82cc56de1f847f4572035ed445a71fbca3477abd",
"size": "1760",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "home/hairygael/GESTURES/MoveBodyRandomize.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "1827"
},
{
"name": "C",
"bytes": "126258"
},
{
"name": "C++",
"bytes": "373018"
},
{
"name": "Java",
"bytes": "156911"
},
{
"name": "Processing",
"bytes": "17022"
},
{
"name": "Python",
"bytes": "3309101"
},
{
"name": "Shell",
"bytes": "4635"
},
{
"name": "VBA",
"bytes": "11115"
}
],
"symlink_target": ""
} |
import logging
import pytest
import shlex
import time
from tests.common.test_result_verifier import *
from subprocess import call
from tests.beeswax.impala_beeswax import ImpalaBeeswaxException
from tests.common.test_vector import *
from tests.common.test_dimensions import ALL_NODES_ONLY
from tests.common.impala_test_suite import *
from tests.common.skip import SkipIfS3, SkipIfIsilon
# Tests to validate HDFS partitioning.
class TestPartitioning(ImpalaTestSuite):
TEST_DBS = ['hdfs_partitioning', 'bool_partitions']
@classmethod
def get_workload(self):
return 'functional-query'
@classmethod
def add_test_dimensions(cls):
super(TestPartitioning, cls).add_test_dimensions()
cls.TestMatrix.add_dimension(create_single_exec_option_dimension())
# There is no reason to run these tests using all dimensions.
cls.TestMatrix.add_constraint(lambda v:\
v.get_value('table_format').file_format == 'text' and\
v.get_value('table_format').compression_codec == 'none')
@classmethod
def setup_class(cls):
super(TestPartitioning, cls).setup_class()
map(cls.cleanup_db, cls.TEST_DBS)
cls.hdfs_client.delete_file_dir("test-warehouse/all_insert_partition_col_types/",\
recursive=True)
@classmethod
def teardown_class(cls):
map(cls.cleanup_db, cls.TEST_DBS)
super(TestPartitioning, cls).teardown_class()
@pytest.mark.execute_serially
def test_partition_col_types(self, vector):
self.execute_query("create database hdfs_partitioning");
self.run_test_case('QueryTest/partition-col-types', vector,
use_db='hdfs_partitioning')
# Missing Coverage: Impala deals with boolean partitions created by Hive on a non-hdfs
# filesystem.
@SkipIfS3.hive
@SkipIfIsilon.hive
@pytest.mark.execute_serially
def test_boolean_partitions(self, vector):
# This test takes about a minute to complete due to the Hive commands that are
# executed. To cut down on runtime, limit the test to exhaustive exploration
# strategy.
if self.exploration_strategy() != 'exhaustive': pytest.skip()
db_name = 'bool_partitions'
tbl_name = 'tbl'
self.execute_query("create database " + db_name)
self.execute_query("use " + db_name)
self.execute_query("create table %s (i int) partitioned by (b boolean)" % tbl_name)
# Insert some data using Hive. Due to HIVE-6590, Hive may create multiple
# partitions, mapping to the same boolean literal value.
# For example, Hive may create partitions: /b=FALSE and /b=false, etc
call(["hive", "-e", "INSERT OVERWRITE TABLE %s.%s PARTITION(b=false) SELECT 1 from "\
"functional.alltypes limit 1" % (db_name, tbl_name)])
call(["hive", "-e", "INSERT OVERWRITE TABLE %s.%s PARTITION(b=FALSE) SELECT 2 from "\
"functional.alltypes limit 1" % (db_name, tbl_name)])
call(["hive", "-e", "INSERT OVERWRITE TABLE %s.%s PARTITION(b=true) SELECT 10 from "\
"functional.alltypes limit 1" % (db_name, tbl_name)])
# Update the Impala metadata
self.execute_query("refresh " + tbl_name)
# List the partitions. Show table stats returns 1 row for each partition + 1 summary
# row
result = self.execute_query("show table stats %s" % tbl_name)
assert len(result.data) == 3 + 1
# Verify Impala properly merges the results of the bad Hive metadata.
assert '13' == self.execute_scalar("select sum(i) from %s" % tbl_name);
assert '10' == self.execute_scalar("select sum(i) from %s where b=true" % tbl_name)
assert '3' == self.execute_scalar("select sum(i) from %s where b=false" % tbl_name)
# INSERT into a boolean column is disabled in Impala due to this Hive bug.
try:
self.execute_query("insert into %s partition(bool_col=true) select 1" % tbl_name)
except ImpalaBeeswaxException, e:
assert 'AnalysisException: INSERT into table with BOOLEAN partition column (%s) '\
'is not supported: %s.%s' % ('b', db_name, tbl_name) in str(e)
| {
"content_hash": "c50ced179d43207e6222d115431c6168",
"timestamp": "",
"source": "github",
"line_count": 96,
"max_line_length": 89,
"avg_line_length": 41.4375,
"alnum_prop": 0.6993464052287581,
"repo_name": "scalingdata/Impala",
"id": "906c94bec99904418fe4b9795363107299e1e303",
"size": "4599",
"binary": false,
"copies": "1",
"ref": "refs/heads/rocana-master",
"path": "tests/query_test/test_partitioning.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "69445"
},
{
"name": "C++",
"bytes": "5890891"
},
{
"name": "CMake",
"bytes": "89845"
},
{
"name": "CSS",
"bytes": "86925"
},
{
"name": "Groff",
"bytes": "1633"
},
{
"name": "HTML",
"bytes": "56"
},
{
"name": "Java",
"bytes": "3280226"
},
{
"name": "Lex",
"bytes": "21429"
},
{
"name": "PLSQL",
"bytes": "3066"
},
{
"name": "PLpgSQL",
"bytes": "393"
},
{
"name": "Python",
"bytes": "1526425"
},
{
"name": "SQLPL",
"bytes": "187"
},
{
"name": "Shell",
"bytes": "145481"
},
{
"name": "Thrift",
"bytes": "240246"
},
{
"name": "Yacc",
"bytes": "78633"
}
],
"symlink_target": ""
} |
from __future__ import division
__author__ = 'Christoph Statz'
import logging
from maui.backend import context
from visitor import ParallelVisitInstrumentation, VisitInstrumentation
from .adapter import InSituDataAdapter
class MauiInSitu(object):
def __init__(self, fields, identifier, *args, **kwargs):
self.logger = logging.getLogger(__name__)
self.logger.debug("Instantiating InSitu Visualization")
description = "Maui Framework In-Situ Visualization."
if hasattr(context, 'comm'):
instrumentation = ParallelVisitInstrumentation(identifier, description, *args, **kwargs)
else:
instrumentation = VisitInstrumentation(identifier, description, *args, **kwargs)
self.__instrumentation = instrumentation
self.__data_adapter = InSituDataAdapter(fields)
for name in self.__data_adapter.meshes:
mesh_type = self.__data_adapter.meshes[name]['mesh_type']
dimension = self.__data_adapter.meshes[name]['dimension']
mesh_kwargs = dict()
try:
mesh_kwargs = self.__data_adapter.meshes[name]['kwargs']
except KeyError:
self.logger.warn("No metadata provided for mesh %s!" % name)
if isinstance(self.__data_adapter.meshes[name]['domain_number'], dict):
for key in self.__data_adapter.meshes[name]['domain_number']:
domain = self.__data_adapter.meshes[name]['domain_number'][key]
dp = self.__data_adapter.meshes[name]['data_provider'][key]
self.__instrumentation.register_mesh(name, dp, mesh_type, dimension, domain=domain, number_of_domains=self.__data_adapter.meshes[name]['number_of_domains'], **mesh_kwargs)
else:
self.__instrumentation.register_mesh(name, None, mesh_type, dimension, domain='omit', number_of_domains=self.__data_adapter.meshes[name]['number_of_domains'], **mesh_kwargs)
for name in self.__data_adapter.variables:
self.logger.debug(name)
var_type = self.__data_adapter.variables[name]['var_type']
var_centering = self.__data_adapter.variables[name]['centering']
mesh_name = self.__data_adapter.variables[name]['mesh_name']
var_kwargs = dict()
try:
var_kwargs = self.__data_adapter.variables[name]['kwargs']
except KeyError:
self.logger.warn("No metadata provided for variable %s!" % name)
if isinstance(self.__data_adapter.variables[name]['domain_number'], dict):
for key in self.__data_adapter.variables[name]['domain_number']:
domain = self.__data_adapter.variables[name]['domain_number'][key]
dp = self.__data_adapter.variables[name]['data_provider'][key]
self.__instrumentation.register_variable(name, mesh_name, dp, var_type, var_centering, domain=domain, **var_kwargs)
else:
self.__instrumentation.register_variable(name, mesh_name, None, var_type, var_centering, domain='omit', **var_kwargs)
def register_curve(self, name, data_provider):
self.__instrumentation.register_curve(name, data_provider)
def register_expression(self, name, expr, var_type, **kwargs):
self.__instrumentation.register_expression(name, expr, var_type, **kwargs)
def register_ui_command(self, name, function, args):
self.__instrumentation.register_ui_command(name, function, args)
def register_ui_value(self, name, function, args):
self.__instrumentation.register_ui_value(name, function, args)
def register_ui_state(self, name, function, args):
self.__instrumentation.register_ui_state(name, function, args)
def register_ui_set_int(self, name, function):
self.__instrumentation.register_ui_set_int(name, function)
def register_ui_set_string(self, name, function):
self.__instrumentation.register_ui_set_string(name, function)
def register_command(self, name, function, args):
self.__instrumentation.register_generic_command(name, function, args)
def run(self):
self.__instrumentation.run()
def step_wrapper(self, step):
return self.__instrumentation.step_wrapper(step)
| {
"content_hash": "e5de4e4b7aaa64a4a49622ccf686f0fb",
"timestamp": "",
"source": "github",
"line_count": 97,
"max_line_length": 191,
"avg_line_length": 44.78350515463917,
"alnum_prop": 0.6406537753222836,
"repo_name": "cstatz/maui-insitu",
"id": "f5899e9777e6825beeefdcfa344025d1bcf8e563",
"size": "4391",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "mauiinsitu/output.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "10326"
}
],
"symlink_target": ""
} |
BOT_NAME = 'ScrapySVGTodoJuegos'
SPIDER_MODULES = ['ScrapySVGTodoJuegos.spiders']
NEWSPIDER_MODULE = 'ScrapySVGTodoJuegos.spiders'
# Crawl responsibly by identifying yourself (and your website) on the user-agent
#USER_AGENT = 'ScrapySVGTodoJuegos (+http://www.yourdomain.com)'
# Obey robots.txt rules
ROBOTSTXT_OBEY = True
# Configure maximum concurrent requests performed by Scrapy (default: 16)
#CONCURRENT_REQUESTS = 32
# Configure a delay for requests for the same website (default: 0)
# See http://scrapy.readthedocs.org/en/latest/topics/settings.html#download-delay
# See also autothrottle settings and docs
#DOWNLOAD_DELAY = 3
# The download delay setting will honor only one of:
#CONCURRENT_REQUESTS_PER_DOMAIN = 16
#CONCURRENT_REQUESTS_PER_IP = 16
# Disable cookies (enabled by default)
#COOKIES_ENABLED = False
# Disable Telnet Console (enabled by default)
#TELNETCONSOLE_ENABLED = False
# Override the default request headers:
#DEFAULT_REQUEST_HEADERS = {
# 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
# 'Accept-Language': 'en',
#}
# Enable or disable spider middlewares
# See http://scrapy.readthedocs.org/en/latest/topics/spider-middleware.html
#SPIDER_MIDDLEWARES = {
# 'ScrapySVGTodoJuegos.middlewares.ScrapysvgtodojuegosSpiderMiddleware': 543,
#}
# Enable or disable downloader middlewares
# See http://scrapy.readthedocs.org/en/latest/topics/downloader-middleware.html
#DOWNLOADER_MIDDLEWARES = {
# 'ScrapySVGTodoJuegos.middlewares.MyCustomDownloaderMiddleware': 543,
#}
# Enable or disable extensions
# See http://scrapy.readthedocs.org/en/latest/topics/extensions.html
#EXTENSIONS = {
# 'scrapy.extensions.telnet.TelnetConsole': None,
#}
# Configure item pipelines
# See http://scrapy.readthedocs.org/en/latest/topics/item-pipeline.html
#ITEM_PIPELINES = {
# 'ScrapySVGTodoJuegos.pipelines.ScrapysvgtodojuegosPipeline': 300,
#}
# Enable and configure the AutoThrottle extension (disabled by default)
# See http://doc.scrapy.org/en/latest/topics/autothrottle.html
#AUTOTHROTTLE_ENABLED = True
# The initial download delay
#AUTOTHROTTLE_START_DELAY = 5
# The maximum download delay to be set in case of high latencies
#AUTOTHROTTLE_MAX_DELAY = 60
# The average number of requests Scrapy should be sending in parallel to
# each remote server
#AUTOTHROTTLE_TARGET_CONCURRENCY = 1.0
# Enable showing throttling stats for every response received:
#AUTOTHROTTLE_DEBUG = False
# Enable and configure HTTP caching (disabled by default)
# See http://scrapy.readthedocs.org/en/latest/topics/downloader-middleware.html#httpcache-middleware-settings
#HTTPCACHE_ENABLED = True
#HTTPCACHE_EXPIRATION_SECS = 0
#HTTPCACHE_DIR = 'httpcache'
#HTTPCACHE_IGNORE_HTTP_CODES = []
#HTTPCACHE_STORAGE = 'scrapy.extensions.httpcache.FilesystemCacheStorage'
| {
"content_hash": "0d4b8faf602fb45102b5bad2e6965c50",
"timestamp": "",
"source": "github",
"line_count": 79,
"max_line_length": 109,
"avg_line_length": 35.620253164556964,
"alnum_prop": 0.7768301350390903,
"repo_name": "lKaza/VideoJuegosFisicosChile",
"id": "75a697aa7423b727fd6284e5064f2ddbdc79d800",
"size": "3258",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ScrapySVGTodoJuegos/ScrapySVGTodoJuegos/settings.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "47014"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
import django
from django.conf import settings
from django.contrib.contenttypes.models import ContentType
from django.db import IntegrityError, models, transaction
from django.db.models.query import QuerySet
from django.template.defaultfilters import slugify as default_slugify
from django.utils.encoding import python_2_unicode_compatible
from django.utils.translation import ugettext_lazy as _
from django.utils.translation import ugettext
from taggit.utils import _get_field
try:
from unidecode import unidecode
except ImportError:
unidecode = lambda tag: tag
try:
from django.contrib.contenttypes.fields import GenericForeignKey
except ImportError: # django < 1.7
from django.contrib.contenttypes.generic import GenericForeignKey
try:
atomic = transaction.atomic
except AttributeError:
from contextlib import contextmanager
@contextmanager
def atomic(using=None):
sid = transaction.savepoint(using=using)
try:
yield
except IntegrityError:
transaction.savepoint_rollback(sid, using=using)
raise
else:
transaction.savepoint_commit(sid, using=using)
# Default tag states
default_states = (
(0, _('Published')), (1, _('Hidden')),
)
def get_states():
"""
Defining of tag states
"""
if hasattr(settings, 'TAG_STATES'):
states = settings.TAG_STATES
else:
states = default_states
return states
@python_2_unicode_compatible
class TagBase(models.Model):
name = models.CharField(verbose_name=_('Name'), unique=True, max_length=100)
slug = models.SlugField(verbose_name=_('Slug'), unique=True, max_length=100)
def __str__(self):
return self.name
class Meta:
abstract = True
def save(self, *args, **kwargs):
if not self.pk and not self.slug:
self.slug = self.slugify(self.name)
from django.db import router
using = kwargs.get("using") or router.db_for_write(
type(self), instance=self)
# Make sure we write to the same db for all attempted writes,
# with a multi-master setup, theoretically we could try to
# write and rollback on different DBs
kwargs["using"] = using
# Be oportunistic and try to save the tag, this should work for
# most cases ;)
try:
with atomic(using=using):
res = super(TagBase, self).save(*args, **kwargs)
return res
except IntegrityError:
pass
# Now try to find existing slugs with similar names
slugs = set(
self.__class__._default_manager
.filter(slug__startswith=self.slug)
.values_list('slug', flat=True)
)
i = 1
while True:
slug = self.slugify(self.name, i)
if slug not in slugs:
self.slug = slug
# We purposely ignore concurrecny issues here for now.
# (That is, till we found a nice solution...)
return super(TagBase, self).save(*args, **kwargs)
i += 1
else:
return super(TagBase, self).save(*args, **kwargs)
def slugify(self, tag, i=None):
slug = default_slugify(unidecode(tag))
if i is not None:
slug += "_%d" % i
return slug
class Tag(TagBase):
state = models.PositiveSmallIntegerField(default=0, choices=get_states(), )
class Meta:
verbose_name = _("Tag")
verbose_name_plural = _("Tags")
@python_2_unicode_compatible
class ItemBase(models.Model):
def __str__(self):
return ugettext("%(object)s tagged with %(tag)s") % {
"object": self.content_object,
"tag": self.tag
}
class Meta:
abstract = True
@classmethod
def tag_model(cls):
return _get_field(cls, 'tag').rel.to
@classmethod
def tag_relname(cls):
return _get_field(cls, 'tag').rel.related_name
@classmethod
def lookup_kwargs(cls, instance):
return {
'content_object': instance
}
@classmethod
def bulk_lookup_kwargs(cls, instances):
return {
"content_object__in": instances,
}
class TaggedItemBase(ItemBase):
tag = models.ForeignKey(Tag, related_name="%(app_label)s_%(class)s_items")
class Meta:
abstract = True
@classmethod
def tags_for(cls, model, instance=None, **extra_filters):
kwargs = extra_filters or {}
if instance is not None:
kwargs.update({
'%s__content_object' % cls.tag_relname(): instance
})
return cls.tag_model().objects.filter(**kwargs)
kwargs.update({
'%s__content_object__isnull' % cls.tag_relname(): False
})
return cls.tag_model().objects.filter(**kwargs).distinct()
class GenericTaggedItemBase(ItemBase):
object_id = models.IntegerField(verbose_name=_('Object id'), db_index=True)
content_type = models.ForeignKey(
ContentType,
verbose_name=_('Content type'),
related_name="%(app_label)s_%(class)s_tagged_items"
)
content_object = GenericForeignKey()
class Meta:
abstract = True
@classmethod
def lookup_kwargs(cls, instance):
return {
'object_id': instance.pk,
'content_type': ContentType.objects.get_for_model(instance)
}
@classmethod
def bulk_lookup_kwargs(cls, instances):
if isinstance(instances, QuerySet):
# Can do a real object_id IN (SELECT ..) query.
return {
"object_id__in": instances,
"content_type": ContentType.objects.get_for_model(instances.model),
}
else:
# TODO: instances[0], can we assume there are instances.
return {
"object_id__in": [instance.pk for instance in instances],
"content_type": ContentType.objects.get_for_model(instances[0]),
}
@classmethod
def tags_for(cls, model, instance=None, **extra_filters):
ct = ContentType.objects.get_for_model(model)
kwargs = {
"%s__content_type" % cls.tag_relname(): ct
}
if instance is not None:
kwargs["%s__object_id" % cls.tag_relname()] = instance.pk
if extra_filters:
kwargs.update(extra_filters)
return cls.tag_model().objects.filter(**kwargs).distinct()
class TaggedItem(GenericTaggedItemBase, TaggedItemBase):
class Meta:
verbose_name = _("Tagged Item")
verbose_name_plural = _("Tagged Items")
if django.VERSION >= (1, 5):
index_together = [
["content_type", "object_id"],
]
| {
"content_hash": "c41f0f42d7ea23e9d2c92b94d422adc0",
"timestamp": "",
"source": "github",
"line_count": 227,
"max_line_length": 83,
"avg_line_length": 30.722466960352424,
"alnum_prop": 0.5891884141095498,
"repo_name": "eugena/django-taggit",
"id": "e38428f23fdc605286fc6b3cda083c907ab90e6f",
"size": "6974",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "taggit/models.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "92514"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('argus', '0013_client'),
]
operations = [
migrations.AddField(
model_name='astu',
name='is_online',
field=models.BooleanField(default=False),
),
]
| {
"content_hash": "cd6375211fd2f0e0e6a5d5e86a61ea05",
"timestamp": "",
"source": "github",
"line_count": 18,
"max_line_length": 53,
"avg_line_length": 20.333333333333332,
"alnum_prop": 0.5819672131147541,
"repo_name": "dehu4ka/lna",
"id": "312cd3231e7cfa47154646e6d5448080590e7750",
"size": "439",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "argus/migrations/0014_astu_is_online.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "7385"
},
{
"name": "HTML",
"bytes": "75367"
},
{
"name": "JavaScript",
"bytes": "106914"
},
{
"name": "Python",
"bytes": "391076"
},
{
"name": "Shell",
"bytes": "4196"
}
],
"symlink_target": ""
} |
from datetime import datetime
import getpass
import sys
import os
root_dir = os.path.dirname(os.path.abspath(__file__))
proj_name = os.path.split(root_dir)[1]
if os.path.isfile(os.path.normpath(root_dir + os.path.sep + 'version.txt')):
open('version.txt', 'w').close()
f = open('version.txt', 'w')
f.write('Version information file of project ' + proj_name + ', don\'t edit this file manually.\r\n')
f.write('Check the latest version, go to: https://github.com/lowitty/xtool\r\n')
if 1 < len(sys.argv):
f.write('Version:\t' + sys.argv[1].strip() + '\r\n')
f.write('Author: \t' + getpass.getuser() + '\r\n')
f.write('Release:\t' + datetime.now().strftime('%Y-%m-%d %H:%M:%S'))
f.flush()
f.close() | {
"content_hash": "58adafc663bd4f8367b8b36f30eeaf0b",
"timestamp": "",
"source": "github",
"line_count": 19,
"max_line_length": 101,
"avg_line_length": 37.10526315789474,
"alnum_prop": 0.6553191489361702,
"repo_name": "lowitty/xtool",
"id": "fb6f23f68a2450db152c0f789bef4bab2f2fb320",
"size": "749",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": ".version.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "162529"
}
],
"symlink_target": ""
} |
from django.views.generic import TemplateView
from django.conf.urls import patterns, include, url
from django.contrib import admin
urlpatterns = patterns(
'',
url(r'^$', TemplateView.as_view(template_name='contest/index.html'),
name='index'),
url(r'rules$', TemplateView.as_view(template_name='contest/rules.html'),
name='rules'),
url(r'^who_got_capped$', 'tictactoe.contest.views.who_got_capped'),
url(r'^upload$', 'tictactoe.contest.views.upload'),
url(r'^ranking$', 'tictactoe.contest.views.ranking'),
url(r'^entries$', 'tictactoe.contest.views.entries'),
url(r'^entries/(?P<uid>\d+)$', 'tictactoe.contest.views.entries'),
url(r'^entry/(?P<id>\d+)$', 'tictactoe.contest.views.entry'),
url(r'^fight/(?P<id>\d+)$', 'tictactoe.contest.views.fight'),
url(r'^login/$', 'django.contrib.auth.views.login', name='login'),
url(r'^logout$', 'django.contrib.auth.views.logout',
{'next_page': '/'}, name='logout'),
url(r'^admin/', include(admin.site.urls)),
)
| {
"content_hash": "4f0558e2754fedc7cfaf26ff110b7cb8",
"timestamp": "",
"source": "github",
"line_count": 26,
"max_line_length": 76,
"avg_line_length": 39.69230769230769,
"alnum_prop": 0.6472868217054264,
"repo_name": "Motiejus/tictactoe",
"id": "1f172fbd915e24e098d31e7f01ddce10cda0d33a",
"size": "1032",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tictactoe/urls.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "2248"
},
{
"name": "JavaScript",
"bytes": "3137"
},
{
"name": "Python",
"bytes": "35588"
}
],
"symlink_target": ""
} |
'''
Configuration object
====================
The :class:`Config` object is an instance of a modified Python ConfigParser.
See the `ConfigParser documentation
<http://docs.python.org/library/configparser.html>`_ for more information.
Kivy has a configuration file which determines the default settings. In
order to change these settings, you can alter this file manually or use
the Config object. Please see the :ref:`Configure Kivy` section for more
information.
Applying configurations
-----------------------
Configuration options control the initialization of the :class:`~kivy.app.App`.
In order to avoid situations where the config settings do not work or are not
applied before window creation (like setting an initial window size),
:meth:`Config.set <kivy.config.ConfigParser.set>` should be used before
importing any other Kivy modules. Ideally, this means setting them right at
the start of your main.py script.
Alternatively, you can save these settings permanently using
:meth:`Config.set <ConfigParser.set>` then
:meth:`Config.write <ConfigParser.write>`. In this case, you will need to
restart the app for the changes to take effect. Note that this approach will
effect all Kivy apps system wide.
Usage of the Config object
--------------------------
To read a configuration token from a particular section::
>>> from kivy.config import Config
>>> Config.getint('kivy', 'show_fps')
0
Change the configuration and save it::
>>> Config.set('postproc', 'retain_time', '50')
>>> Config.write()
For information on configuring your :class:`~kivy.app.App`, please see the
:ref:`Application configuration` section.
.. versionchanged:: 1.7.1
The ConfigParser should work correctly with utf-8 now. The values are
converted from ascii to unicode only when needed. The method get() returns
utf-8 strings.
.. _configuration-tokens:
Available configuration tokens
------------------------------
.. |log_levels| replace:: 'debug', 'info', 'warning', 'error' or 'critical'
:kivy:
`desktop`: int, 0 or 1
This option controls desktop OS specific features, such as enabling
drag-able scroll-bar in scroll views, disabling of bubbles in
TextInput etc. 0 is disabled, 1 is enabled.
`exit_on_escape`: int, 0 or 1
Enables exiting kivy when escape is pressed.
0 is disabled, 1 is enabled.
`pause_on_minimize`: int, 0 or 1
If set to `1`, the main loop is paused and the `on_pause` event
is dispatched when the window is minimized. This option is intended
for desktop use only. Defaults to `0`.
`keyboard_layout`: string
Identifier of the layout to use.
`keyboard_mode`: string
Specifies the keyboard mode to use. If can be one of the following:
* '' - Let Kivy choose the best option for your current platform.
* 'system' - real keyboard.
* 'dock' - one virtual keyboard docked to a screen side.
* 'multi' - one virtual keyboard for every widget request.
* 'systemanddock' - virtual docked keyboard plus input from real
keyboard.
* 'systemandmulti' - analogous.
`log_dir`: string
Path of log directory.
`log_enable`: int, 0 or 1
Activate file logging. 0 is disabled, 1 is enabled.
`log_level`: string, one of |log_levels|
Set the minimum log level to use.
`log_name`: string
Format string to use for the filename of log file.
`window_icon`: string
Path of the window icon. Use this if you want to replace the default
pygame icon.
:postproc:
`double_tap_distance`: float
Maximum distance allowed for a double tap, normalized inside the range
0 - 1000.
`double_tap_time`: int
Time allowed for the detection of double tap, in milliseconds.
`ignore`: list of tuples
List of regions where new touches are ignored.
This configuration token can be used to resolve hotspot problems
with DIY hardware. The format of the list must be::
ignore = [(xmin, ymin, xmax, ymax), ...]
All the values must be inside the range 0 - 1.
`jitter_distance`: int
Maximum distance for jitter detection, normalized inside the range 0
- 1000.
`jitter_ignore_devices`: string, separated with commas
List of devices to ignore from jitter detection.
`retain_distance`: int
If the touch moves more than is indicated by retain_distance, it will
not be retained. Argument should be an int between 0 and 1000.
`retain_time`: int
Time allowed for a retain touch, in milliseconds.
`triple_tap_distance`: float
Maximum distance allowed for a triple tap, normalized inside the range
0 - 1000.
`triple_tap_time`: int
Time allowed for the detection of triple tap, in milliseconds.
:graphics:
`borderless`: int , one of 0 or 1
If set to `1`, removes the window border/decoration.
`window_state`: string , one of 'visible', 'hidden', 'maximized' \
or 'minimized'
Sets the window state, defaults to 'visible'. This option is available
only for the SDL2 window provider and it should be used on desktop
OSes.
`fbo`: string, one of 'hardware', 'software' or 'force-hardware'
Selects the FBO backend to use.
`fullscreen`: int or string, one of 0, 1, 'fake' or 'auto'
Activate fullscreen. If set to `1`, a resolution of `width`
times `height` pixels will be used.
If set to `auto`, your current display's resolution will be
used instead. This is most likely what you want.
If you want to place the window in another display,
use `fake`, or set the `borderless` option from the graphics section,
then adjust `width`, `height`, `top` and `left`.
`height`: int
Height of the :class:`~kivy.core.window.Window`, not used if
`fullscreen` is set to `auto`.
`left`: int
Left position of the :class:`~kivy.core.window.Window`.
`maxfps`: int, defaults to 60
Maximum FPS allowed.
..warning::
Setting maxfps to 0 will lead to max CPU usage.
'multisamples': int, defaults to 2
Sets the `MultiSample Anti-Aliasing (MSAA)
<http://en.wikipedia.org/wiki/Multisample_anti-aliasing>`_ level.
Increasing this value results in smoother graphics but at the cost of
processing time.
.. note::
This feature is limited by device hardware support and will have no
effect on devices which do not support the level of MSAA requested.
`position`: string, one of 'auto' or 'custom'
Position of the window on your display. If `auto` is used, you have no
control of the initial position: `top` and `left` are ignored.
`show_cursor`: int, one of 0 or 1
Set whether or not the cursor is shown on the window.
`top`: int
Top position of the :class:`~kivy.core.window.Window`.
`resizable`: int, one of 0 or 1
If 0, the window will have a fixed size. If 1, the window will be
resizable.
`rotation`: int, one of 0, 90, 180 or 270
Rotation of the :class:`~kivy.core.window.Window`.
`width`: int
Width of the :class:`~kivy.core.window.Window`, not used if
`fullscreen` is set to `auto`.
`minimum_width`: int
Minimum width to restrict the window to. (sdl2 only)
`minimun_height`: int
Minimum height to restrict the window to. (sdl2 only)
:input:
You can create new input devices using this syntax::
# example of input provider instance
yourid = providerid,parameters
# example for tuio provider
default = tuio,127.0.0.1:3333
mytable = tuio,192.168.0.1:3334
.. seealso::
Check the providers in kivy.input.providers for the syntax to use
inside the configuration file.
:widgets:
`scroll_distance`: int
Default value of the
:attr:`~kivy.uix.scrollview.ScrollView.scroll_distance`
property used by the :class:`~kivy.uix.scrollview.ScrollView` widget.
Check the widget documentation for more information.
`scroll_friction`: float
Default value of the
:attr:`~kivy.uix.scrollview.ScrollView.scroll_friction`
property used by the :class:`~kivy.uix.scrollview.ScrollView` widget.
Check the widget documentation for more information.
`scroll_timeout`: int
Default value of the
:attr:`~kivy.uix.scrollview.ScrollView.scroll_timeout`
property used by the :class:`~kivy.uix.scrollview.ScrollView` widget.
Check the widget documentation for more information.
`scroll_stoptime`: int
Default value of the
:attr:`~kivy.uix.scrollview.ScrollView.scroll_stoptime`
property used by the :class:`~kivy.uix.scrollview.ScrollView` widget.
Check the widget documentation for more information.
.. deprecated:: 1.7.0
Please use
:class:`~kivy.uix.scrollview.ScrollView.effect_cls` instead.
`scroll_moves`: int
Default value of the
:attr:`~kivy.uix.scrollview.ScrollView.scroll_moves`
property used by the :class:`~kivy.uix.scrollview.ScrollView` widget.
Check the widget documentation for more information.
.. deprecated:: 1.7.0
Please use
:class:`~kivy.uix.scrollview.ScrollView.effect_cls` instead.
:modules:
You can activate modules with this syntax::
modulename =
Anything after the = will be passed to the module as arguments.
Check the specific module's documentation for a list of accepted
arguments.
.. versionchanged:: 1.9.0
`borderless` and `window_state` have been added to the graphics section.
The `fake` setting of the `fullscreen` option has been deprecated,
use the `borderless` option instead.
`pause_on_minimize` has been added to the kivy section.
.. versionchanged:: 1.8.0
`systemanddock` and `systemandmulti` has been added as possible values for
`keyboard_mode` in the kivy section. `exit_on_escape` has been added
to the kivy section.
.. versionchanged:: 1.2.0
`resizable` has been added to graphics section.
.. versionchanged:: 1.1.0
tuio no longer listens by default. Window icons are not copied to
user directory anymore. You can still set a new window icon by using the
``window_icon`` config setting.
.. versionchanged:: 1.0.8
`scroll_timeout`, `scroll_distance` and `scroll_friction` have been added.
`list_friction`, `list_trigger_distance` and `list_friction_bound`
have been removed. `keyboard_type` and `keyboard_layout` have been
removed from the widget. `keyboard_mode` and `keyboard_layout` have
been added to the kivy section.
'''
__all__ = ('Config', 'ConfigParser')
try:
from ConfigParser import ConfigParser as PythonConfigParser
except ImportError:
from configparser import RawConfigParser as PythonConfigParser
from os import environ
from os.path import exists
from kivy import kivy_config_fn
from kivy.logger import Logger, logger_config_update
from collections import OrderedDict
from kivy.utils import platform
from kivy.compat import PY2, string_types
from weakref import ref
_is_rpi = exists('/opt/vc/include/bcm_host.h')
# Version number of current configuration format
KIVY_CONFIG_VERSION = 14
Config = None
'''The default Kivy configuration object. This is a :class:`ConfigParser`
instance with the :attr:`~kivy.config.ConfigParser.name` set to 'kivy'.
.. code-block:: python
Config = ConfigParser(name='kivy')
'''
class ConfigParser(PythonConfigParser, object):
'''Enhanced ConfigParser class that supports the addition of default
sections and default values.
By default, the kivy ConfigParser instance, :attr:`~kivy.config.Config`,
is named `'kivy'` and the ConfigParser instance used by the
:meth:`App.build_settings <~kivy.app.App.build_settings>` method is named
`'app'`.
:Parameters:
`name`: string
The name of the instance. See :attr:`name`. Defaults to `''`.
.. versionchanged:: 1.9.0
Each ConfigParser can now be :attr:`named <name>`. You can get the
ConfigParser associated with a name using :meth:`get_configparser`.
In addition, you can now control the config values with
:class:`~kivy.properties.ConfigParserProperty`.
.. versionadded:: 1.0.7
'''
def __init__(self, name=''):
PythonConfigParser.__init__(self)
self._sections = OrderedDict()
self.filename = None
self._callbacks = []
self.name = name
def add_callback(self, callback, section=None, key=None):
'''Add a callback to be called when a specific section or key has
changed. If you don't specify a section or key, it will call the
callback for all section/key changes.
Callbacks will receive 3 arguments: the section, key and value.
.. versionadded:: 1.4.1
'''
if section is None and key is not None:
raise Exception('You cannot specify a key without a section')
self._callbacks.append((callback, section, key))
def remove_callback(self, callback, section=None, key=None):
'''Removes a callback added with :meth:`add_callback`.
:meth:`remove_callback` must be called with the same parameters as
:meth:`add_callback`.
Raises a `ValueError` if not found.
.. versionadded:: 1.9.0
'''
self._callbacks.remove((callback, section, key))
def _do_callbacks(self, section, key, value):
for callback, csection, ckey in self._callbacks:
if csection is not None and csection != section:
continue
elif ckey is not None and ckey != key:
continue
callback(section, key, value)
def read(self, filename):
'''Read only one filename. In contrast to the original ConfigParser of
Python, this one is able to read only one file at a time. The last
read file will be used for the :meth:`write` method.
.. versionchanged:: 1.9.0
:meth:`read` now calls the callbacks if read changed any values.
'''
if not isinstance(filename, string_types):
raise Exception('Only one filename is accepted ({})'.format(
string_types.__name__))
self.filename = filename
# If we try to open directly the configuration file in utf-8,
# we correctly get the unicode value by default.
# But, when we try to save it again, all the values we didn't changed
# are still unicode, and then the PythonConfigParser internal do
# a str() conversion -> fail.
# Instead we currently to the conversion to utf-8 when value are
# "get()", but we internally store them in ascii.
#with codecs.open(filename, 'r', encoding='utf-8') as f:
# self.readfp(f)
old_vals = {sect: {k: v for k, v in self.items(sect)} for sect in
self.sections()}
PythonConfigParser.read(self, filename)
# when reading new file, sections/keys are only increased, not removed
f = self._do_callbacks
for section in self.sections():
if section not in old_vals: # new section
for k, v in self.items(section):
f(section, k, v)
continue
old_keys = old_vals[section]
for k, v in self.items(section): # just update new/changed keys
if k not in old_keys or v != old_keys[k]:
f(section, k, v)
def set(self, section, option, value):
'''Functions similarly to PythonConfigParser's set method, except that
the value is implicitly converted to a string.
'''
e_value = value
if not isinstance(value, string_types):
# might be boolean, int, etc.
e_value = str(value)
if PY2:
if isinstance(value, unicode):
e_value = value.encode('utf-8')
ret = PythonConfigParser.set(self, section, option, e_value)
self._do_callbacks(section, option, value)
return ret
def setall(self, section, keyvalues):
'''Sets multiple key-value pairs in a section. keyvalues should be a
dictionary containing the key-value pairs to be set.
'''
for key, value in keyvalues.items():
self.set(section, key, value)
def get(self, section, option, **kwargs):
value = PythonConfigParser.get(self, section, option, **kwargs)
if PY2:
if type(value) is str:
return value.decode('utf-8')
return value
def setdefaults(self, section, keyvalues):
'''Set multiple key-value defaults in a section. keyvalues should be
a dictionary containing the new key-value defaults.
'''
self.adddefaultsection(section)
for key, value in keyvalues.items():
self.setdefault(section, key, value)
def setdefault(self, section, option, value):
'''Set the default value for an option in the specified section.
'''
if self.has_option(section, option):
return
self.set(section, option, value)
def getdefault(self, section, option, defaultvalue):
'''Get the value of an option in the specified section. If not found,
it will return the default value.
'''
if not self.has_section(section):
return defaultvalue
if not self.has_option(section, option):
return defaultvalue
return self.get(section, option)
def getdefaultint(self, section, option, defaultvalue):
'''Get the value of an option in the specified section. If not found,
it will return the default value. The value will always be
returned as an integer.
.. versionadded:: 1.6.0
'''
return int(self.getdefault(section, option, defaultvalue))
def adddefaultsection(self, section):
'''Add a section if the section is missing.
'''
if self.has_section(section):
return
self.add_section(section)
def write(self):
'''Write the configuration to the last file opened using the
:meth:`read` method.
Return True if the write finished successfully, False otherwise.
'''
if self.filename is None:
return False
try:
with open(self.filename, 'w') as fd:
PythonConfigParser.write(self, fd)
except IOError:
Logger.exception('Unable to write the config <%s>' % self.filename)
return False
return True
def update_config(self, filename, overwrite=False):
'''Upgrade the configuration based on a new default config file.
Overwrite any existing values if overwrite is True.
'''
pcp = PythonConfigParser()
pcp.read(filename)
confset = self.setall if overwrite else self.setdefaults
for section in pcp.sections():
confset(section, dict(pcp.items(section)))
self.write()
@staticmethod
def _register_named_property(name, widget_ref, *largs):
''' Called by the ConfigParserProperty to register a property which
was created with a config name instead of a config object.
When a ConfigParser with this name is later created, the properties
are then notified that this parser now exists so they can use it.
If the parser already exists, the property is notified here. See
:meth:`~kivy.properties.ConfigParserProperty.set_config`.
:Parameters:
`name`: a non-empty string
The name of the ConfigParser that is associated with the
property. See :attr:`name`.
`widget_ref`: 2-tuple.
The first element is a reference to the widget containing the
property, the second element is the name of the property. E.g.:
class House(Widget):
address = ConfigParserProperty('', 'info', 'street',
'directory')
Then, the first element is a ref to a House instance, and the
second is `'address'`.
'''
configs = ConfigParser._named_configs
try:
config, props = configs[name]
except KeyError:
configs[name] = (None, [widget_ref])
return
props.append(widget_ref)
if config:
config = config()
widget = widget_ref[0]()
if config and widget: # associate this config with property
widget.property(widget_ref[1]).set_config(config)
@staticmethod
def get_configparser(name):
'''Returns the :class:`ConfigParser` instance whose name is `name`, or
None if not found.
:Parameters:
`name`: string
The name of the :class:`ConfigParser` instance to return.
'''
try:
config = ConfigParser._named_configs[name][0]
return config() if config else None
except KeyError:
return None
# keys are configparser names, values are 2-tuple of (ref(configparser),
# widget_ref), where widget_ref is same as in _register_named_property
_named_configs = {}
_name = ''
@property
def name(self):
''' The name associated with this ConfigParser instance, if not `''`.
Defaults to `''`. It can be safely changed dynamically or set to `''`.
When a ConfigParser is given a name, that config object can be
retrieved using :meth:`get_configparser`. In addition, that config
instance can also be used with a
:class:`~kivy.properties.ConfigParserProperty` instance that set its
`config` value to this name.
Setting more than one ConfigParser with the same name will raise a
`ValueError`.
'''
return self._name
@name.setter
def name(self, value):
old_name = self._name
if value is old_name:
return
self._name = value
configs = ConfigParser._named_configs
if old_name: # disconnect this parser from previously connected props
_, props = configs.get(old_name, (None, []))
for widget, prop in props:
widget = widget()
if widget:
widget.property(prop).set_config(None)
configs[old_name] = (None, props)
if not value:
return
# if given new name, connect it with property that used this name
try:
config, props = configs[value]
except KeyError:
configs[value] = (ref(self), [])
return
if config is not None:
raise ValueError('A parser named {} already exists'.format(value))
for widget, prop in props:
widget = widget()
if widget:
widget.property(prop).set_config(self)
configs[value] = (ref(self), props)
if not environ.get('KIVY_DOC_INCLUDE'):
#
# Read, analyse configuration file
# Support upgrade of older config file versions
#
# Create default configuration
Config = ConfigParser(name='kivy')
Config.add_callback(logger_config_update, 'kivy', 'log_level')
# Read config file if exist
if (exists(kivy_config_fn) and
'KIVY_USE_DEFAULTCONFIG' not in environ and
'KIVY_NO_CONFIG' not in environ):
try:
Config.read(kivy_config_fn)
except Exception as e:
Logger.exception('Core: error while reading local'
'configuration')
version = Config.getdefaultint('kivy', 'config_version', 0)
# Add defaults section
Config.adddefaultsection('kivy')
Config.adddefaultsection('graphics')
Config.adddefaultsection('input')
Config.adddefaultsection('postproc')
Config.adddefaultsection('widgets')
Config.adddefaultsection('modules')
# Upgrade default configuration until we have the current version
need_save = False
if version != KIVY_CONFIG_VERSION and 'KIVY_NO_CONFIG' not in environ:
Logger.warning('Config: Older configuration version detected'
' ({0} instead of {1})'.format(
version, KIVY_CONFIG_VERSION))
Logger.warning('Config: Upgrading configuration in progress.')
need_save = True
while version < KIVY_CONFIG_VERSION:
Logger.debug('Config: Upgrading from %d to %d' %
(version, version + 1))
if version == 0:
# log level
Config.setdefault('kivy', 'keyboard_repeat_delay', '300')
Config.setdefault('kivy', 'keyboard_repeat_rate', '30')
Config.setdefault('kivy', 'log_dir', 'logs')
Config.setdefault('kivy', 'log_enable', '1')
Config.setdefault('kivy', 'log_level', 'info')
Config.setdefault('kivy', 'log_name', 'kivy_%y-%m-%d_%_.txt')
Config.setdefault('kivy', 'window_icon', '')
# default graphics parameters
Config.setdefault('graphics', 'display', '-1')
Config.setdefault('graphics', 'fullscreen', 'no')
Config.setdefault('graphics', 'height', '600')
Config.setdefault('graphics', 'left', '0')
Config.setdefault('graphics', 'maxfps', '0')
Config.setdefault('graphics', 'multisamples', '2')
Config.setdefault('graphics', 'position', 'auto')
Config.setdefault('graphics', 'rotation', '0')
Config.setdefault('graphics', 'show_cursor', '1')
Config.setdefault('graphics', 'top', '0')
Config.setdefault('graphics', 'vsync', '1')
Config.setdefault('graphics', 'width', '800')
# input configuration
Config.setdefault('input', 'mouse', 'mouse')
# activate native input provider in configuration
# from 1.0.9, don't activate mactouch by default, or app are
# unusable.
if platform == 'win':
Config.setdefault('input', 'wm_touch', 'wm_touch')
Config.setdefault('input', 'wm_pen', 'wm_pen')
elif platform == 'linux':
probesysfs = 'probesysfs'
if _is_rpi:
probesysfs += ',provider=hidinput'
Config.setdefault('input', '%(name)s', probesysfs)
# input postprocessing configuration
Config.setdefault('postproc', 'double_tap_distance', '20')
Config.setdefault('postproc', 'double_tap_time', '250')
Config.setdefault('postproc', 'ignore', '[]')
Config.setdefault('postproc', 'jitter_distance', '0')
Config.setdefault('postproc', 'jitter_ignore_devices',
'mouse,mactouch,')
Config.setdefault('postproc', 'retain_distance', '50')
Config.setdefault('postproc', 'retain_time', '0')
# default configuration for keyboard repeatition
Config.setdefault('widgets', 'keyboard_layout', 'qwerty')
Config.setdefault('widgets', 'keyboard_type', '')
Config.setdefault('widgets', 'list_friction', '10')
Config.setdefault('widgets', 'list_friction_bound', '20')
Config.setdefault('widgets', 'list_trigger_distance', '5')
elif version == 1:
Config.remove_option('graphics', 'vsync')
Config.set('graphics', 'maxfps', '60')
elif version == 2:
# was a version to automatically copy windows icon in the user
# directory, but it's now not used anymore. User can still change
# the window icon by touching the config.
pass
elif version == 3:
# add token for scrollview
Config.setdefault('widgets', 'scroll_timeout', '55')
Config.setdefault('widgets', 'scroll_distance', '20')
Config.setdefault('widgets', 'scroll_friction', '1.')
# remove old list_* token
Config.remove_option('widgets', 'list_friction')
Config.remove_option('widgets', 'list_friction_bound')
Config.remove_option('widgets', 'list_trigger_distance')
elif version == 4:
Config.remove_option('widgets', 'keyboard_type')
Config.remove_option('widgets', 'keyboard_layout')
# add keyboard token
Config.setdefault('kivy', 'keyboard_mode', '')
Config.setdefault('kivy', 'keyboard_layout', 'qwerty')
elif version == 5:
Config.setdefault('graphics', 'resizable', '1')
elif version == 6:
# if the timeout is still the default value, change it
Config.setdefault('widgets', 'scroll_stoptime', '300')
Config.setdefault('widgets', 'scroll_moves', '5')
elif version == 7:
# desktop bool indicating whether to use desktop specific features
is_desktop = int(platform in ('win', 'macosx', 'linux'))
Config.setdefault('kivy', 'desktop', is_desktop)
Config.setdefault('postproc', 'triple_tap_distance', '20')
Config.setdefault('postproc', 'triple_tap_time', '375')
elif version == 8:
if Config.getint('widgets', 'scroll_timeout') == 55:
Config.set('widgets', 'scroll_timeout', '250')
elif version == 9:
Config.setdefault('kivy', 'exit_on_escape', '1')
elif version == 10:
Config.set('graphics', 'fullscreen', '0')
Config.setdefault('graphics', 'borderless', '0')
elif version == 11:
Config.setdefault('kivy', 'pause_on_minimize', '0')
elif version == 12:
Config.setdefault('graphics', 'window_state', 'visible')
elif version == 13:
Config.setdefault('graphics', 'minimum_width', '0')
Config.setdefault('graphics', 'minimum_height', '0')
# elif version == 1:
# # add here the command for upgrading from configuration 0 to 1
else:
# for future.
break
# Pass to the next version
version += 1
# Indicate to the Config that we've upgrade to the latest version.
Config.set('kivy', 'config_version', KIVY_CONFIG_VERSION)
# Now, activate log file
Logger.logfile_activated = bool(Config.getint('kivy', 'log_enable'))
# If no configuration exist, write the default one.
if ((not exists(kivy_config_fn) or need_save) and
'KIVY_NO_CONFIG' not in environ):
try:
Config.filename = kivy_config_fn
Config.write()
except Exception as e:
Logger.exception('Core: Error while saving default config file')
| {
"content_hash": "920af4fb2fff595f342b51c43435b999",
"timestamp": "",
"source": "github",
"line_count": 807,
"max_line_length": 79,
"avg_line_length": 38.69268897149938,
"alnum_prop": 0.6201441152922338,
"repo_name": "vitorio/kivy",
"id": "b478ae7d4bc0bed305b7ffa0728603ff8b27de0e",
"size": "31225",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "kivy/config.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "329293"
},
{
"name": "Emacs Lisp",
"bytes": "9695"
},
{
"name": "GLSL",
"bytes": "289"
},
{
"name": "HTML",
"bytes": "19384"
},
{
"name": "Makefile",
"bytes": "4201"
},
{
"name": "Objective-C",
"bytes": "14779"
},
{
"name": "Python",
"bytes": "3574220"
},
{
"name": "VimL",
"bytes": "1123"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from django.contrib import admin
from django.utils.translation import ugettext_lazy as _
from .models import Category, Forum, TopicType, Topic
from .models import Post, LBForumUserProfile
admin.site.register(Category)
def update_forum_state_info(modeladmin, request, queryset):
for forum in queryset:
forum.update_state_info()
update_forum_state_info.short_description = _("Update forum state info")
class ForumAdmin(admin.ModelAdmin):
list_display = ('name', 'slug', 'category', 'num_topics', 'num_posts',)
list_filter = ('category',)
raw_id_fields = ('admins', 'last_post')
actions = [update_forum_state_info]
admin.site.register(Forum, ForumAdmin)
class TopicTypeAdmin(admin.ModelAdmin):
list_display = ('forum', 'name', 'slug', 'description', )
list_filter = ('forum',)
admin.site.register(TopicType, TopicTypeAdmin)
class PostInline(admin.TabularInline):
model = Post
def update_topic_state_info(modeladmin, request, queryset):
for topic in queryset:
topic.update_state_info()
update_topic_state_info.short_description = _("Update topic state info")
def update_topic_attr_as_not(modeladmin, request, queryset, attr):
for topic in queryset:
if attr == 'sticky':
topic.sticky = not topic.sticky
elif attr == 'close':
topic.closed = not topic.closed
elif attr == 'hide':
topic.hidden = not topic.hidden
topic.save()
def sticky_unsticky_topic(modeladmin, request, queryset):
update_topic_attr_as_not(modeladmin, request, queryset, 'sticky')
sticky_unsticky_topic.short_description = _("sticky/unsticky topics")
def close_unclose_topic(modeladmin, request, queryset):
update_topic_attr_as_not(modeladmin, request, queryset, 'close')
close_unclose_topic.short_description = _("close/unclose topics")
def hide_unhide_topic(modeladmin, request, queryset):
update_topic_attr_as_not(modeladmin, request, queryset, 'hide')
hide_unhide_topic.short_description = _("hide/unhide topics")
class TopicAdmin(admin.ModelAdmin):
list_display = (
'subject', 'forum', 'topic_type', 'posted_by', 'sticky', 'closed',
'hidden', 'level', 'num_views', 'num_replies', 'created_on', 'updated_on', )
list_filter = ('forum', 'sticky', 'closed', 'hidden', 'level')
search_fields = ('subject', 'posted_by__username', )
# inlines = (PostInline, )
raw_id_fields = ('posted_by', 'post', 'last_post')
actions = [update_topic_state_info, sticky_unsticky_topic, close_unclose_topic, hide_unhide_topic]
admin.site.register(Topic, TopicAdmin)
class PostAdmin(admin.ModelAdmin):
list_display = (
'topic', 'posted_by', 'poster_ip',
'created_on', 'updated_on', )
search_fields = ('topic__subject', 'posted_by__username', 'message', )
raw_id_fields = ('topic', 'posted_by', 'attachments', 'last_updated_by')
actions = ['delete_model']
def get_actions(self, request):
actions = super(PostAdmin, self).get_actions(request)
del actions['delete_selected']
return actions
def delete_model(self, request, obj):
for o in obj.all():
topic = o.topic
o.delete()
topic.update_state_info()
delete_model.short_description = 'Delete posts'
admin.site.register(Post, PostAdmin)
class LBForumUserProfileAdmin(admin.ModelAdmin):
list_display = ('user', 'nickname', 'bio',)
search_fields = ('user__username', 'nickname', )
raw_id_fields = ('user',)
admin.site.register(LBForumUserProfile, LBForumUserProfileAdmin)
| {
"content_hash": "9757dc072a0d0f89b4ea910bcad2e873",
"timestamp": "",
"source": "github",
"line_count": 111,
"max_line_length": 102,
"avg_line_length": 32.711711711711715,
"alnum_prop": 0.6722665932250069,
"repo_name": "JiaruZhang/Five",
"id": "186ffb8e2664f60a8964a3a953fb120dba30c0b2",
"size": "3655",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "lbforum/admin.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "99227"
},
{
"name": "HTML",
"bytes": "165128"
},
{
"name": "JavaScript",
"bytes": "7623"
},
{
"name": "Python",
"bytes": "108883"
}
],
"symlink_target": ""
} |
import _plotly_utils.basevalidators
class CandlestickValidator(_plotly_utils.basevalidators.CompoundArrayValidator):
def __init__(
self, plotly_name="candlestick", parent_name="layout.template.data", **kwargs
):
super(CandlestickValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
data_class_str=kwargs.pop("data_class_str", "Candlestick"),
data_docs=kwargs.pop(
"data_docs",
"""
""",
),
**kwargs
)
| {
"content_hash": "655d45bd9649ecf66d397a9442093dfb",
"timestamp": "",
"source": "github",
"line_count": 18,
"max_line_length": 85,
"avg_line_length": 31.22222222222222,
"alnum_prop": 0.5640569395017794,
"repo_name": "plotly/python-api",
"id": "4444c316e253fd8a3e08f605687915585520a455",
"size": "562",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "packages/python/plotly/plotly/validators/layout/template/data/_candlestick.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "JavaScript",
"bytes": "6870"
},
{
"name": "Makefile",
"bytes": "1708"
},
{
"name": "Python",
"bytes": "823245"
},
{
"name": "Shell",
"bytes": "3238"
}
],
"symlink_target": ""
} |
"""rockmylight URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.8/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Add an import: from blog import urls as blog_urls
2. Add a URL to urlpatterns: url(r'^blog/', include(blog_urls))
"""
from django.conf.urls import include, url, patterns
from django.contrib import admin
urlpatterns = patterns(
'',
url(r'^$', 'rockmylight.rml.views.main', name='main'),
url(r'^jam_session/(?P<session_id>[0-9]+)/', 'rockmylight.rml.views.jam',
name='jam'),
url(r'^api/dj/(?P<session_id>[0-9]+)/$', 'rockmylight.rml.views.api_dj',
name='api_dj'),
url(r'^about/', 'rockmylight.rml.views.about',
name='about'),
url(r'^admin/', include(admin.site.urls)),
)
| {
"content_hash": "2cf7a22742b142fabbd0cf81fcdf98a9",
"timestamp": "",
"source": "github",
"line_count": 29,
"max_line_length": 77,
"avg_line_length": 38.62068965517241,
"alnum_prop": 0.65625,
"repo_name": "RockMyLight/django-rml",
"id": "b50f1a749c8d189bb98e6415a8cd83c7faa4e52a",
"size": "1120",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "rockmylight/rockmylight/urls.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "169514"
},
{
"name": "HTML",
"bytes": "17167"
},
{
"name": "JavaScript",
"bytes": "62850"
},
{
"name": "Python",
"bytes": "7372"
}
],
"symlink_target": ""
} |
import unittest
import pytest
import json
import os
import vodka.config
import vodka.log
import vodka.bartender
import vodka.plugins
import vodka.app
import vodka
from click.testing import CliRunner
HOME = os.path.join(os.path.dirname(__file__), "resources", "test_bartender_app")
@vodka.plugin.register("test_bartender_a")
class Plugin(vodka.plugins.PluginBase):
pass
class SimPromptConfigurator(vodka.bartender.ClickConfigurator):
"""
We make a configurator that simulates user input for
the bartender config test. values will be sent to prompts
in order
"""
values = [
# add application,
"test_bartender_app",
# application home,
HOME,
# application from module,
"",
# dont add another application
"skip",
# add plugin type
"test_bartender_a",
# dont add another plugin
"skip",
]
def prompt(self, msg, default=None, *args, **kwargs):
if not hasattr(self, "counter"):
self.counter = 0
# default value is provided, use that
if default != "skip" and default != "." and default != "":
return default
r = self.values[self.counter]
self.counter += 1
return r
# override so config cli will use our new sim prompt configurator
vodka.bartender.ClickConfigurator = SimPromptConfigurator
class TestBartender(unittest.TestCase):
"""
Tests bartender CLI
"""
@pytest.fixture(autouse=True)
def setup(self, tmpdir):
self.tmpdir = tmpdir
self.appdir = tmpdir.mkdir("app")
self.config_file = tmpdir.join("config.json")
self.cli = CliRunner()
def test_newapp(self):
"""
Tests the newapp command which should generate a blank app
structure at the provided directory
"""
p = str(self.appdir)
r = self.cli.invoke(vodka.bartender.newapp, ["--path=%s" % p])
self.assertEqual(r.exit_code, 0)
self.assertEqual(os.path.exists(os.path.join(p, "application.py")), True)
self.assertEqual(os.path.exists(os.path.join(p, "plugins", "example.py")), True)
def test_check_config(self):
"""
Test the check_config command
"""
# create config to check, this should always validate
self.config_file.write(json.dumps({"logging": vodka.log.default_config()}))
# run check_config
r = self.cli.invoke(
vodka.bartender.check_config, ["--config=%s" % str(self.tmpdir)]
)
# assert no errors
self.assertEqual(r.exit_code, 0)
# assert output
self.assertEqual(
str(r.output),
"Checking config at %s for errors ...\n0 config ERRORS, 0 config WARNINGS\n"
% str(self.tmpdir),
)
def test_config(self):
"""
Test the config command
This will will generate a config based on user input and default values
"""
# run config (this will use SimPrompt
r = self.cli.invoke(
vodka.bartender.config, ["--config=%s/config.json" % str(self.tmpdir)]
)
vodka.log.set_loggers(vodka.log.default_config())
print(r.output)
# assert no errors
self.assertEqual(r.exit_code, 0)
cfg = vodka.config.Config(read=str(self.tmpdir))
expected = {
"apps": {
"test_bartender_app": {"enabled": True, "home": HOME, "module": ""}
},
"plugins": [
{
"async": "thread",
"enabled": True,
"start_manual": False,
"name": "test_bartender_a",
"type": "test_bartender_a",
}
],
}
self.assertEqual("apps" in cfg.data, True)
self.assertEqual("plugins" in cfg.data, True)
# note: because other tests may register applications we
# cannot directly compare the entire content of "apps"
self.assertEqual(
expected["apps"]["test_bartender_app"],
cfg.data["apps"]["test_bartender_app"],
)
self.assertEqual(expected["plugins"], cfg.data["plugins"])
| {
"content_hash": "37dc6e7e8ea40074d745bab6f5bb34aa",
"timestamp": "",
"source": "github",
"line_count": 157,
"max_line_length": 88,
"avg_line_length": 27.222929936305732,
"alnum_prop": 0.5744033692091718,
"repo_name": "20c/vodka",
"id": "27b5d12f129d1912245556fd4227175f6577fc23",
"size": "4274",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_bartender.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "260"
},
{
"name": "Python",
"bytes": "118007"
}
],
"symlink_target": ""
} |
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
# pandas display options
pd.options.display.max_rows = 1000
pd.options.display.max_columns = 25
pd.options.display.width = 1000
######################################################################
# data
########################################################################
# read in datafile
data = pd.read_csv('data/nasdaq.csv', index_col=0, parse_dates=True)
data = data.dropna(axis=0) # ditch nulls
data = data[['Open']] # ditch the stuff not being used
data['Open'] = pd.to_numeric(data['Open'], errors='coerce') # convert string to numbers
# switch to log data
data['logNASDAQ'] = np.log(data['Open'])
# log returns
data['logReturns'] = data['logNASDAQ'] - data['logNASDAQ'].shift(1)
# drop NaN
data.dropna()
# remove nan row from target creation
data = data.dropna()
def rolling_stats(w):
window_shift = int(w/2)
data['total'] = data.rolling(window=w).sum()['logReturns'].shift(window_shift)
data['std'] = data.rolling(window=w).std()['logReturns'].shift(window_shift)
data['kurtosis'] = data.rolling(window=w).kurt()['logReturns'].shift(window_shift)
data['mean'] = data.rolling(window=w).mean()['logReturns'].shift(window_shift)
data['skew'] = data.rolling(window=w).skew()['logReturns'].shift(window_shift)
rolling_stats(21)
# sort pandas on highest/lowest return days worst days at top, best at bottom
sorted_data = data.sort_values(by='logReturns', ascending=True)
best_days = sorted_data.tail(50)
worst_days = sorted_data.head(50)
plt.figure(figsize=(15,15))
plt.suptitle("Best/Worst one day returns vs Probability Asymmetry in the month leading up to it")
plt.subplot(1, 2, 1)
plt.ylim(-1.0, 1.0)
plt.xlim(-.15, .15)
plt.scatter(best_days['logReturns'], best_days['skew'])
plt.xlabel('Best Log Returns')
plt.ylabel('Skew')
plt.subplot(1,2,2)
plt.ylim(-1.0, 1.0)
plt.xlim(-.15, .15)
plt.scatter(worst_days['logReturns'], worst_days['skew'])
plt.xlabel('Worst Log Returns')
plt.ylabel('Skew')
plt.savefig("RollingSkew.png")
plt.show()
| {
"content_hash": "7cff70c10a5e63edacaf837e2b8dfe03",
"timestamp": "",
"source": "github",
"line_count": 74,
"max_line_length": 97,
"avg_line_length": 28.2027027027027,
"alnum_prop": 0.6473406804024916,
"repo_name": "timestocome/Test-stock-prediction-algorithms",
"id": "688e817c5a2cc288ddc9342d70db5660886df83e",
"size": "2243",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Misc experiments/RollingStatsBeforeLargeReturnDays.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "455373"
},
{
"name": "Python",
"bytes": "306406"
}
],
"symlink_target": ""
} |
from faint import *
#start
bindk(key.arrow_up, zoom_in, mod.ctrl) # Binds zoom_in to the Ctrl+Up
bindk(key.arrow_down, zoom_out, mod.ctrl) # Binds zoom_out to Ctrl+Down
bindk(key.backspace, auto_crop) # Binds auto_crop to back-space
| {
"content_hash": "3d9911b332b89e649625f9f9413678aa",
"timestamp": "",
"source": "github",
"line_count": 6,
"max_line_length": 71,
"avg_line_length": 40,
"alnum_prop": 0.7166666666666667,
"repo_name": "lukas-ke/faint-graphics-editor",
"id": "21ae13709d2bf89cc4548bf90b356208bad07431",
"size": "240",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "help/example_py/python_bind_keys_bindk.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "49581"
},
{
"name": "C++",
"bytes": "3170874"
},
{
"name": "Emacs Lisp",
"bytes": "13474"
},
{
"name": "HTML",
"bytes": "26096"
},
{
"name": "NSIS",
"bytes": "2088"
},
{
"name": "Python",
"bytes": "537915"
}
],
"symlink_target": ""
} |
'''
Individual stages of the pipeline implemented as functions from
input files to output files.
The run_stage function knows everything about submitting jobs and, given
the state parameter, has full access to the state of the pipeline, such
as config, options, DRMAA and the logger.
'''
from utils import safe_make_dir
from runner import run_stage
import os
class Stages(object):
def __init__(self, state):
self.state = state
def original_mzml(self, output):
'''original mzml files'''
def resample(self, mzml_in, mzml_out):
'''Resample MZML file to new sampling rate'''
cores = self.state.config.get_stage_option('resample', 'cores')
rate = self.state.config.get_stage_option('resample', 'rate')
command = "Resampler -sampling_rate {rate} -threads {cores} -in {mzml_in} -out {mzml_out}".format(rate=rate, cores=cores, mzml_in=mzml_in, mzml_out=mzml_out)
run_stage(self.state, 'resample', command)
def noise_filter_sgolay(self, mzml_in, mzml_out):
'''Filter noise using Savitzky Golay'''
cores = self.state.config.get_stage_option('noise_filter_sgolay', 'cores')
command = "NoiseFilterSGolay -threads {cores} -in {mzml_in} -out {mzml_out}".format(cores=cores, mzml_in=mzml_in, mzml_out=mzml_out)
run_stage(self.state, 'noise_filter_sgolay', command)
def baseline_filter(self, mzml_in, mzml_out):
'''Executes the top-hat filter to remove the baseline of an MS experiment.'''
cores = self.state.config.get_stage_option('baseline_filter', 'cores')
command = "BaselineFilter -threads {cores} -in {mzml_in} -out {mzml_out}".format(cores=cores, mzml_in=mzml_in, mzml_out=mzml_out)
run_stage(self.state, 'baseline_filter', command)
def peak_picker_hires(self, mzml_in, mzml_out):
'''Executes the peak picking with high_res algorithm'''
cores = self.state.config.get_stage_option('baseline_filter', 'cores')
command = "PeakPickerHiRes -threads {cores} -in {mzml_in} -out {mzml_out}".format(cores=cores, mzml_in=mzml_in, mzml_out=mzml_out)
run_stage(self.state, 'peak_picker_hires', command)
def feature_finder_centroid(self, mzml_in, feature_xml_out):
'''The feature detection application for quantitation (centroided).'''
cores = self.state.config.get_stage_option('feature_finder_centroid', 'cores')
command = "FeatureFinderCentroided -threads {cores} -in {mzml_in} -out {feature_out}".format(cores=cores, mzml_in=mzml_in, feature_out=feature_xml_out)
run_stage(self.state, 'feature_finder_centroid', command)
| {
"content_hash": "9586b6d08e49f85b220f750fa3fbeab3",
"timestamp": "",
"source": "github",
"line_count": 50,
"max_line_length": 165,
"avg_line_length": 52.52,
"alnum_prop": 0.682025894897182,
"repo_name": "bjpop/twin_ion_pipeline",
"id": "364784bec9b2ab59e3425afb2534c492b196c0da",
"size": "2626",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/stages.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "15790"
}
],
"symlink_target": ""
} |
import time
from ._timer import Timer
class AsyncTimer(Timer):
"""A timer instance, which awaits the wrapped function."""
def _wrapped_factory(self, func):
"""Report an execution time for each function run.
:param func (function): A function to wrap for measurement.
:returns function: A measured function, which will be awaited.
"""
async def wrapped(*args, **kwargs):
self.time_start = time.time()
ret = await func(*args, **kwargs)
self.__exit__(None, None, None)
return ret
return wrapped
| {
"content_hash": "f2a2065ab825658b6319a3e4bc7b5cc1",
"timestamp": "",
"source": "github",
"line_count": 22,
"max_line_length": 70,
"avg_line_length": 27.40909090909091,
"alnum_prop": 0.6019900497512438,
"repo_name": "Intel471/prom-stats",
"id": "6cd18cddae079e3df01f35fd4f1af7e250604bd5",
"size": "603",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "promstats/_async.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "555"
},
{
"name": "Python",
"bytes": "27203"
}
],
"symlink_target": ""
} |
"""This is a script for fixing XGBoost boosting trees which have negative scores.
We calculate the most negative leaf value, and append one more tree to the model
which adds the abs() of this smallest value, meaning we always get a positive score,
but relatively the scores will not change.
"""
import json
import sys
import argparse
import logging
def find_min(tree):
"""Finds the minimum leaf value in a tree
Parameters
----------
tree : dict
parsed model
"""
if 'leaf' in tree.keys():
return tree['leaf']
else:
mapped = list(map(lambda t: find_min(t), tree['children']))
return min(mapped)
# finds the first feature in a tree, we then use this in the split condition
# it doesn't matter which feature we use, as both of the leaves will add the same value
def find_first_feature(tree):
"""Finds the first feature in a tree, we then use this in the split condition
It doesn't matter which feature we use, as both of the leaves will add the same value
Parameters
----------
tree : dict
parsed model
"""
if 'split' in tree.keys():
return tree['split']
elif 'children' in tree.keys():
return find_first_feature(tree['children'][0])
else:
raise Exception("Unable to find any features")
def create_correction_tree(correction_value, feature_to_split_on):
"""Creates new tree with the given correction amount
Parameters
----------
correction_value : float
leaf values for new tree
feature_to_split_on : string
feature name for the new tree
"""
return {
"children": [
{
"leaf": correction_value,
"nodeid": 1
},
{
"leaf": correction_value,
"nodeid": 2
}
],
"depth": 0,
"missing": 1,
"no": 2,
"nodeid": 0,
"split": feature_to_split_on,
"split_condition": 1,
"yes": 1
}
def fix_tree(trees):
"""Calculate and return a tree that will provide a positive final score
Parameters
----------
trees : dict
trees from model
"""
summed_min_leafs = sum(map(lambda t: find_min(t), trees))
correction_value = abs(summed_min_leafs)
logging.info("Correction value: {}".format(correction_value))
if summed_min_leafs < 0:
feature_to_split_on = find_first_feature(trees[0])
# define an extra tree that produces a positive value so that the sum of all the trees is > 0
extra_tree = create_correction_tree(correction_value, feature_to_split_on)
return extra_tree
else:
logging.info("Not modifying tree, scores are already positive")
return None
#
def process(in_file, out_file):
"""Fixes input model and writes to output model
Parameters
----------
in_file : file
model json file to read
out_file : file
model json file to write
"""
with in_file as i:
model = json.load(i)
inner_model = 'definition' in model
if inner_model:
definition_string = model['definition']
else:
inner_model = False
definition_string = model['model']['model']['definition']
# parse the escaped string to a list of trees
trees = json.loads(definition_string)
correction_tree = fix_tree(trees)
if correction_tree is not None:
trees.append(correction_tree)
# replace the definition and handle both json variants
if inner_model:
model['definition'] = json.dumps(trees)
else:
model['model']['model']['definition'] = json.dumps(trees)
# save it to a new file
with out_file as o:
json.dump(model, o)
if __name__ == '__main__':
logging.basicConfig(level=logging.INFO)
parser = argparse.ArgumentParser(description="""Model fixr adds a tree to
the model with a positive leaf score
equal to the abs sum of the min leafs of the other trees.""")
parser.add_argument('-i', '--input',
action='store', nargs='?',
help='Filename for the input model',
type=argparse.FileType('r'), default='model.json')
parser.add_argument('-o', '--output',
action='store', nargs='?',
help='Filename for the modified model',
type=argparse.FileType('w'), default='model-fixed.json')
if len(sys.argv) == 1:
parser.print_help()
sys.exit(1)
args = parser.parse_args()
process(args.input, args.output)
| {
"content_hash": "d906fe95aac23a809b9d7a02c8ee0cc5",
"timestamp": "",
"source": "github",
"line_count": 154,
"max_line_length": 101,
"avg_line_length": 31.461038961038962,
"alnum_prop": 0.5700722394220846,
"repo_name": "o19s/elasticsearch-learning-to-rank",
"id": "ddf4564077f327e04e11981c9a78548bb234f405",
"size": "4868",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "scripts/xgboost_model_fixr.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Java",
"bytes": "867832"
},
{
"name": "Python",
"bytes": "25006"
}
],
"symlink_target": ""
} |
import pytest
import kevlar
from kevlar.readgraph import ReadGraph
from kevlar.tests import data_file
@pytest.mark.parametrize('partfile,edges,strictedges', [
('connectivity-1311.augfastq', 30, 11),
('connectivity-1541.augfastq', 31, 12),
])
def test_populate(partfile, edges, strictedges):
with kevlar.open(data_file(partfile), 'r') as fh:
reader = kevlar.parse_augmented_fastx(fh)
reads = list(reader)
rg = ReadGraph()
rg.load(reads)
rg.populate_edges()
assert rg.number_of_edges() == pytest.approx(edges, 1)
rg = ReadGraph()
rg.load(reads)
rg.populate_edges(strict=True)
assert rg.number_of_edges() == pytest.approx(strictedges, 1)
| {
"content_hash": "1f2233232a1b2d2a847303f4c2e4c351",
"timestamp": "",
"source": "github",
"line_count": 22,
"max_line_length": 64,
"avg_line_length": 31.681818181818183,
"alnum_prop": 0.6857962697274032,
"repo_name": "dib-lab/kevlar",
"id": "fb629e6497a684f23caf2641690922cf68a11ba2",
"size": "1068",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "kevlar/tests/test_readgraph.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "3342"
},
{
"name": "C++",
"bytes": "16738"
},
{
"name": "Dockerfile",
"bytes": "1538"
},
{
"name": "Makefile",
"bytes": "2648"
},
{
"name": "Python",
"bytes": "488299"
},
{
"name": "Shell",
"bytes": "4576"
}
],
"symlink_target": ""
} |
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "fmovies.settings")
try:
from django.core.management import execute_from_command_line
except ImportError:
# The above import may fail for some other reason. Ensure that the
# issue is really that Django is missing to avoid masking other
# exceptions on Python 2.
try:
import django
except ImportError:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
)
raise
execute_from_command_line(sys.argv)
| {
"content_hash": "6a805daa4f47e2c19d5572e3744761b7",
"timestamp": "",
"source": "github",
"line_count": 21,
"max_line_length": 77,
"avg_line_length": 37.285714285714285,
"alnum_prop": 0.6206896551724138,
"repo_name": "pieromarini/fmovies",
"id": "8cf0ec16df65f9e7eac022001af4fdbed3525a97",
"size": "805",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "manage.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "47715"
},
{
"name": "HTML",
"bytes": "9361"
},
{
"name": "JavaScript",
"bytes": "97996"
},
{
"name": "Python",
"bytes": "14597"
}
],
"symlink_target": ""
} |
def extractNightwaterBar(item):
'''
Parser for 'nightwater.bar'
'''
vol, chp, frag, postfix = extractVolChapterFragmentPostfix(item['title'])
if not (chp or vol) or "preview" in item['title'].lower():
return None
tagmap = [
('PRC', 'PRC', 'translated'),
('Loiterous', 'Loiterous', 'oel'),
]
for tagname, name, tl_type in tagmap:
if tagname in item['tags']:
return buildReleaseMessageWithType(item, name, vol, chp, frag=frag, postfix=postfix, tl_type=tl_type)
return False
| {
"content_hash": "9247477acd596551ac57e0e891f1031d",
"timestamp": "",
"source": "github",
"line_count": 21,
"max_line_length": 104,
"avg_line_length": 25.714285714285715,
"alnum_prop": 0.6240740740740741,
"repo_name": "fake-name/ReadableWebProxy",
"id": "b1446d3219986bed89a950f17942c9ce8d2fddf7",
"size": "541",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "WebMirror/management/rss_parser_funcs/feed_parse_extractNightwaterBar.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "105811"
},
{
"name": "Dockerfile",
"bytes": "1178"
},
{
"name": "HTML",
"bytes": "119737"
},
{
"name": "JavaScript",
"bytes": "3006524"
},
{
"name": "Jupyter Notebook",
"bytes": "148075"
},
{
"name": "Mako",
"bytes": "1454"
},
{
"name": "Python",
"bytes": "5264346"
},
{
"name": "Shell",
"bytes": "1059"
}
],
"symlink_target": ""
} |
class Shell:
"""
Executor of shell commands on OpenWRT instances via an RPC Proxy
"""
def __init__(self, rpc_proxy):
self._rpc = rpc_proxy
def execute(self, command):
"""
Executes a shell command and captures its standard output and returns it as a str.
"""
return getattr(self._rpc.sys, 'exec')(command)
def call(self, command):
"""
Executes a shell command and returns its exist code.
"""
return self._rpc.sys.call(command) | {
"content_hash": "e73e5606aa44962c580964053ba27ac0",
"timestamp": "",
"source": "github",
"line_count": 19,
"max_line_length": 90,
"avg_line_length": 27.57894736842105,
"alnum_prop": 0.5877862595419847,
"repo_name": "Jumpscale/openwrt-remote-manager",
"id": "2702f85357624753c5584ccb458fd94ede63f392",
"size": "525",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "openwrt/core/shell.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "30185"
}
],
"symlink_target": ""
} |
from django.core import serializers
from django.http import HttpResponse
from django.shortcuts import render
from django.views.decorators.http import require_http_methods
from django.views.generic import ListView
from models import *
# Create your views here.
def index(request):
locations = Location.objects.all()
response_data = serializers.serialize("json", locations)
response = HttpResponse(response_data)
return response
| {
"content_hash": "b33dd69c1d6ca4ee3c5c344b0c533e93",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 61,
"avg_line_length": 34.15384615384615,
"alnum_prop": 0.7905405405405406,
"repo_name": "ColusLife/ColusLifeDemo",
"id": "634954141c423eb9eb0125a62b849122d7ba69c7",
"size": "444",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Server/coluslife/views.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "1309"
},
{
"name": "HTML",
"bytes": "9001"
},
{
"name": "JavaScript",
"bytes": "18022"
},
{
"name": "Python",
"bytes": "10846"
}
],
"symlink_target": ""
} |
from django.shortcuts import render
from django.views import generic
import fetcher
class TopSubmissions(generic.ListView):
template_name = 'redditreader/top.html'
context_object_name = 'submissions'
def get_query_set(self):
return fetcher.top()
def top(request, subreddit):
if subreddit.startswith('r/'):
subreddit = subreddit[2:]
return render_to_response() | {
"content_hash": "554b61075769af2135a2ba17a294bbfb",
"timestamp": "",
"source": "github",
"line_count": 17,
"max_line_length": 40,
"avg_line_length": 22,
"alnum_prop": 0.7593582887700535,
"repo_name": "orlenko/FBBot",
"id": "9c0d92905ecbf67016fb65dfa14b156247a86f0e",
"size": "374",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/FBBot/redditreader/views.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "126"
},
{
"name": "JavaScript",
"bytes": "2775"
},
{
"name": "Python",
"bytes": "30632"
},
{
"name": "Shell",
"bytes": "29"
}
],
"symlink_target": ""
} |
import ast
import sys
def astppdump(node):
def _format(node, indent):
#print node, len(indent)
if isinstance(node, ast.AST):
namelen = " "*(len(node.__class__.__name__)) + " "
fields = []
for a,b in ast.iter_fields(node):
fieldlen = len(a)*" "
fields.append((a, _format(b, indent+namelen+fieldlen+" ")))
fieldstr = (",\n"+indent+namelen).join('%s=%s' % (field[0],field[1].lstrip()) for field in fields)
return indent+node.__class__.__name__ + "(%s)" % fieldstr
elif isinstance(node, list):
elems = (',\n').join(_format(x, indent+" ") for x in node)
return indent+"[%s]" % elems.lstrip()
elif isinstance(node, long): # L suffix depends on 32/64 python, and skulpt is ~30 because of number precision in js
return indent+str(node)
return indent+repr(node)
if not isinstance(node, ast.AST):
raise TypeError('expected AST, got %r' % node.__class__.__name__)
return _format(node, "")
if __name__ == "__main__":
print astppdump(ast.parse(open(sys.argv[1]).read(), sys.argv[1]))
| {
"content_hash": "d3c4eee2c5ebd8befbcd1fffbf489200",
"timestamp": "",
"source": "github",
"line_count": 27,
"max_line_length": 124,
"avg_line_length": 43.18518518518518,
"alnum_prop": 0.5497427101200686,
"repo_name": "ArcherSys/ArcherSys",
"id": "579b018c246d8710996cec6318310e7615d35841",
"size": "1166",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "skulpt/test/astppdump.py",
"mode": "33188",
"license": "mit",
"language": [],
"symlink_target": ""
} |
def fancy(string):
return '=== %s ===' % string.capitalize()
| {
"content_hash": "f676c07a0a443b4a30aa17ba43a83557",
"timestamp": "",
"source": "github",
"line_count": 2,
"max_line_length": 45,
"avg_line_length": 32.5,
"alnum_prop": 0.5692307692307692,
"repo_name": "Thom1729/YAML-Macros",
"id": "4dd678f78f5bf9a6b2dbe28db8d5ae769701917a",
"size": "65",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "examples/basic/example_macros.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "23111"
}
],
"symlink_target": ""
} |
import datetime
import flask
import pytest
from webargs import fields
from webargs.flaskparser import parser
from filteralchemy import operators
from filteralchemy import formatters
from filteralchemy import Filter, FilterSet
@pytest.fixture
def app():
return flask.Flask(__name__)
class TestFilters:
@pytest.fixture
def ModelFilterSet(self, session, engine, models):
def modulo(query, model, attr, value):
return query.filter(model.sales % value == 0)
class ModelFilterSet(FilterSet):
class Meta:
model = models.Album
query = session.query(models.Album)
operators = (operators.Equal, operators.In)
parser = parser
genre = Filter(fields.Str(), operator=operators.Like)
sales__modulo = Filter(fields.Str(), operator=modulo)
return ModelFilterSet
@pytest.fixture
def albums(self, models, session):
albums = [
models.Album(
name='A Night at the Opera',
date=datetime.date(1975, 11, 21),
sales=12000000,
genre='rock',
),
models.Album(
name='The Works',
date=datetime.date(1984, 2, 27),
sales=5000000,
genre='synth',
),
]
for album in albums:
session.add(album)
session.commit()
return albums
def test_filter_none(self, app, albums, session, ModelFilterSet):
with app.test_request_context('/'):
query = ModelFilterSet().filter()
assert set(query.all()) == set(albums)
def test_filter_equal(self, app, albums, session, ModelFilterSet):
with app.test_request_context('/?name=The Works'):
query = ModelFilterSet().filter()
assert query.count() == 1
assert query.first() == albums[1]
def test_filter_equal_date(self, app, albums, session, ModelFilterSet):
with app.test_request_context('/?date=1984-02-27'):
query = ModelFilterSet().filter()
assert query.count() == 1
assert query.first() == albums[1]
def test_filter_in(self, app, albums, session, ModelFilterSet):
with app.test_request_context('/?sales__in=12000000&sales__in=5000000'):
query = ModelFilterSet().filter()
assert set(query.all()) == set(albums)
def test_declared_filter(self, app, albums, session, ModelFilterSet):
with app.test_request_context('/?genre=syn%'):
query = ModelFilterSet().filter()
assert query.count() == 1
assert query.first() == albums[1]
def test_custom_filter(self, app, albums, session, ModelFilterSet):
with app.test_request_context('/?sales__modulo=3000000'):
query = ModelFilterSet().filter()
assert query.count() == 1
assert query.first() == albums[0]
def test_override_query(self, app, models, albums, session, ModelFilterSet):
with app.test_request_context('/?sales__in=5000000&sales__in=12000000'):
query = session.query(models.Album).filter(models.Album.name == 'The Works')
query = ModelFilterSet(query).filter()
assert query.count() == 1
assert query.first() == albums[1]
def test_custom_formatter(self, app, albums, models, session):
class ModelFilterSet(FilterSet):
class Meta:
model = models.Album
query = session.query(models.Album)
formatter = formatters.JsonApiFormatter()
operators = (operators.Equal, operators.NotEqual)
parser = parser
with app.test_request_context('/?filter[name][ne]=The Works'):
query = ModelFilterSet().filter()
assert query.count() == 1
assert query.first() == albums[0]
| {
"content_hash": "979f5918eda51a7405318351900b21d0",
"timestamp": "",
"source": "github",
"line_count": 105,
"max_line_length": 88,
"avg_line_length": 37.714285714285715,
"alnum_prop": 0.5843434343434344,
"repo_name": "jmcarp/filteralchemy",
"id": "458164541ddab37ffded0d606a360ac3b54bfee8",
"size": "3985",
"binary": false,
"copies": "1",
"ref": "refs/heads/dev",
"path": "tests/test_integration.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "17794"
}
],
"symlink_target": ""
} |
"""Test Fully Kiosk Browser services."""
from unittest.mock import MagicMock
from homeassistant.components.fully_kiosk.const import (
ATTR_URL,
DOMAIN,
SERVICE_LOAD_URL,
)
from homeassistant.const import ATTR_DEVICE_ID
from homeassistant.core import HomeAssistant
from homeassistant.helpers import device_registry as dr
from tests.common import MockConfigEntry
async def test_services(
hass: HomeAssistant,
mock_fully_kiosk: MagicMock,
init_integration: MockConfigEntry,
) -> None:
"""Test the Fully Kiosk Browser services."""
device_registry = dr.async_get(hass)
device_entry = device_registry.async_get_device(
identifiers={(DOMAIN, "abcdef-123456")}
)
assert device_entry
await hass.services.async_call(
DOMAIN,
SERVICE_LOAD_URL,
{ATTR_DEVICE_ID: [device_entry.id], ATTR_URL: "https://example.com"},
blocking=True,
)
assert len(mock_fully_kiosk.loadUrl.mock_calls) == 1
| {
"content_hash": "a772510d81be7adf390aa594d2a1d602",
"timestamp": "",
"source": "github",
"line_count": 36,
"max_line_length": 77,
"avg_line_length": 27.11111111111111,
"alnum_prop": 0.6997950819672131,
"repo_name": "nkgilley/home-assistant",
"id": "e3b63dad341a8d4425da9a60af7ec390ca86a614",
"size": "976",
"binary": false,
"copies": "1",
"ref": "refs/heads/dev",
"path": "tests/components/fully_kiosk/test_services.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "2963"
},
{
"name": "PLSQL",
"bytes": "840"
},
{
"name": "Python",
"bytes": "51597279"
},
{
"name": "Shell",
"bytes": "6252"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
from django.conf import settings
import pyconde.attendees.validators
class Migration(migrations.Migration):
dependencies = [
('contenttypes', '0002_remove_content_type_name'),
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('sponsorship', '__first__'),
('conference', '__first__'),
]
operations = [
migrations.CreateModel(
name='DietaryPreference',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(unique=True, max_length=30, verbose_name='Name')),
],
),
migrations.CreateModel(
name='Purchase',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('company_name', models.CharField(max_length=100, verbose_name='Company', blank=True)),
('first_name', models.CharField(max_length=250, verbose_name='First name')),
('last_name', models.CharField(max_length=250, verbose_name='Last name')),
('email', models.EmailField(max_length=254, verbose_name='E-mail')),
('street', models.CharField(max_length=100, verbose_name='Street and house number')),
('zip_code', models.CharField(max_length=20, verbose_name='Zip code')),
('city', models.CharField(max_length=100, verbose_name='City')),
('country', models.CharField(max_length=100, verbose_name='Country')),
('vat_id', models.CharField(max_length=16, verbose_name='VAT-ID', blank=True)),
('date_added', models.DateTimeField(default=django.utils.timezone.now, verbose_name='Date (added)')),
('state', models.CharField(default='incomplete', max_length=25, verbose_name='Status', choices=[('incomplete', 'Purchase incomplete'), ('new', 'new'), ('invoice_created', 'invoice created'), ('payment_received', 'payment received'), ('canceled', 'canceled')])),
('comments', models.TextField(verbose_name='Comments', blank=True)),
('payment_method', models.CharField(default='invoice', max_length=20, verbose_name='Payment method', choices=[('invoice', 'Invoice'), ('creditcard', 'Credit card'), ('elv', 'ELV')])),
('payment_transaction', models.CharField(max_length=255, verbose_name='Transaction ID', blank=True)),
('payment_total', models.FloatField(null=True, verbose_name='Payment total', blank=True)),
('exported', models.BooleanField(default=False, verbose_name='Exported')),
('invoice_number', models.IntegerField(null=True, verbose_name='Invoice number', blank=True)),
('invoice_filename', models.CharField(max_length=255, null=True, verbose_name='Invoice filename', blank=True)),
('conference', models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, verbose_name='conference', to='conference.Conference', null=True)),
('user', models.ForeignKey(verbose_name='User', to=settings.AUTH_USER_MODEL, null=True)),
],
options={
'verbose_name': 'Purchase',
'verbose_name_plural': 'Purchases',
},
),
migrations.CreateModel(
name='Ticket',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('date_added', models.DateTimeField(default=django.utils.timezone.now, verbose_name='Date (added)')),
('canceled', models.BooleanField(default=False, verbose_name='Canceled')),
],
options={
'ordering': ('ticket_type__tutorial_ticket', 'ticket_type__product_number'),
},
),
migrations.CreateModel(
name='TicketType',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('product_number', models.IntegerField(help_text='Will be created when you save the first time.', verbose_name='Product number', blank=True)),
('name', models.CharField(max_length=50, verbose_name='Name')),
('fee', models.FloatField(default=0, verbose_name='Fee')),
('max_purchases', models.PositiveIntegerField(default=0, help_text='0 means no limit', verbose_name='Max. purchases')),
('is_active', models.BooleanField(default=False, verbose_name='Is active')),
('is_on_desk_active', models.BooleanField(default=False, verbose_name='Allow on desk purchase')),
('date_valid_from', models.DateTimeField(verbose_name='Sale start')),
('date_valid_to', models.DateTimeField(verbose_name='Sale end')),
('valid_on', models.DateField(blank=True, null=True, verbose_name='Valid on', validators=[pyconde.attendees.validators.during_conference])),
('tutorial_ticket', models.BooleanField(default=False, verbose_name='Tutorial ticket')),
('remarks', models.TextField(verbose_name='Remarks', blank=True)),
('allow_editing', models.NullBooleanField(verbose_name='Allow editing')),
('editable_fields', models.TextField(verbose_name='Editable fields', blank=True)),
('editable_until', models.DateTimeField(null=True, verbose_name='Editable until', blank=True)),
('prevent_invoice', models.BooleanField(default=False, help_text='If checked, a purchase, that contains only tickets of ticket types where this is checked, will not be send to the user. This can be useful for e.g. sponsor tickets', verbose_name='Conditionally prevent invoice to user')),
('conference', models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, verbose_name='conference', to='conference.Conference', null=True)),
('content_type', models.ForeignKey(verbose_name='Ticket to generate', to='contenttypes.ContentType')),
],
options={
'ordering': ('tutorial_ticket', 'product_number', 'vouchertype_needed'),
'verbose_name': 'Ticket type',
'verbose_name_plural': 'Ticket type',
},
),
migrations.CreateModel(
name='TShirtSize',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('size', models.CharField(max_length=100, verbose_name='Size')),
('sort', models.IntegerField(default=999, verbose_name='Sort order')),
('conference', models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, verbose_name='conference', to='conference.Conference', null=True)),
],
options={
'ordering': ('sort',),
'verbose_name': 'T-Shirt size',
'verbose_name_plural': 'T-Shirt sizes',
},
),
migrations.CreateModel(
name='Voucher',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('code', models.CharField(help_text='Can be left blank, code will be created when you save.', max_length=12, verbose_name='Code', blank=True)),
('remarks', models.CharField(max_length=254, verbose_name='Remarks', blank=True)),
('date_valid', models.DateTimeField(help_text='The voucher is valid until this date', verbose_name='Date (valid)')),
('is_used', models.BooleanField(default=False, verbose_name='Is used')),
],
options={
'verbose_name': 'Voucher',
'verbose_name_plural': 'Vouchers',
},
),
migrations.CreateModel(
name='VoucherType',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(max_length=50, verbose_name='voucher type')),
('conference', models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, verbose_name='conference', to='conference.Conference', null=True)),
],
options={
'verbose_name': 'voucher type',
'verbose_name_plural': 'voucher types',
},
),
migrations.CreateModel(
name='SIMCardTicket',
fields=[
('ticket_ptr', models.OneToOneField(parent_link=True, auto_created=True, primary_key=True, serialize=False, to='attendees.Ticket')),
('first_name', models.CharField(max_length=250, verbose_name='First name')),
('last_name', models.CharField(max_length=250, verbose_name='Last name')),
('date_of_birth', models.DateField(verbose_name='Date of birth')),
('gender', models.CharField(max_length=6, verbose_name='Gender', choices=[('female', 'female'), ('male', 'male')])),
('hotel_name', models.CharField(help_text='Name of your hotel or host for your stay.', max_length=100, verbose_name='Host', blank=True)),
('email', models.EmailField(max_length=254, verbose_name='E-mail')),
('street', models.CharField(max_length=100, verbose_name='Street and house number of host')),
('zip_code', models.CharField(max_length=20, verbose_name='Zip code of host')),
('city', models.CharField(max_length=100, verbose_name='City of host')),
('country', models.CharField(max_length=100, verbose_name='Country of host')),
('phone', models.CharField(help_text='Please supply the phone number of your hotel or host.', max_length=100, verbose_name='Host phone number')),
('sim_id', models.CharField(help_text='The IMSI of the SIM Card associated with this account.', max_length=20, verbose_name='IMSI', blank=True)),
],
options={
'verbose_name': 'SIM Card',
'verbose_name_plural': 'SIM Cards',
},
bases=('attendees.ticket',),
),
migrations.CreateModel(
name='SupportTicket',
fields=[
('ticket_ptr', models.OneToOneField(parent_link=True, auto_created=True, primary_key=True, serialize=False, to='attendees.Ticket')),
],
options={
'verbose_name': 'Support Ticket',
'verbose_name_plural': 'Support Tickets',
},
bases=('attendees.ticket',),
),
migrations.CreateModel(
name='VenueTicket',
fields=[
('ticket_ptr', models.OneToOneField(parent_link=True, auto_created=True, primary_key=True, serialize=False, to='attendees.Ticket')),
('first_name', models.CharField(max_length=250, verbose_name='First name', blank=True)),
('last_name', models.CharField(max_length=250, verbose_name='Last name', blank=True)),
('organisation', models.CharField(max_length=100, verbose_name='Organization', blank=True)),
('dietary_preferences', models.ManyToManyField(to='attendees.DietaryPreference', verbose_name='Dietary preferences', blank=True)),
('shirtsize', models.ForeignKey(verbose_name='Desired T-Shirt size', blank=True, to='attendees.TShirtSize', null=True)),
('sponsor', models.ForeignKey(verbose_name='Sponsor', blank=True, to='sponsorship.Sponsor', null=True)),
],
options={
'verbose_name': 'Conference Ticket',
'verbose_name_plural': 'Conference Tickets',
},
bases=('attendees.ticket',),
),
migrations.AddField(
model_name='voucher',
name='type',
field=models.ForeignKey(verbose_name='voucher type', to='attendees.VoucherType', null=True),
),
migrations.AddField(
model_name='tickettype',
name='vouchertype_needed',
field=models.ForeignKey(verbose_name='voucher type needed', blank=True, to='attendees.VoucherType', null=True),
),
migrations.AddField(
model_name='ticket',
name='purchase',
field=models.ForeignKey(to='attendees.Purchase'),
),
migrations.AddField(
model_name='ticket',
name='ticket_type',
field=models.ForeignKey(verbose_name='Ticket type', to='attendees.TicketType'),
),
migrations.AddField(
model_name='ticket',
name='user',
field=models.ForeignKey(related_name='attendees_ticket_tickets', blank=True, to=settings.AUTH_USER_MODEL, null=True),
),
migrations.AddField(
model_name='venueticket',
name='voucher',
field=models.ForeignKey(verbose_name='Voucher', blank=True, to='attendees.Voucher', null=True),
),
migrations.AlterUniqueTogether(
name='tickettype',
unique_together=set([('product_number', 'conference')]),
),
]
| {
"content_hash": "8b569361a60e10233c7f2647affa159a",
"timestamp": "",
"source": "github",
"line_count": 221,
"max_line_length": 303,
"avg_line_length": 61.46606334841629,
"alnum_prop": 0.5899587750294464,
"repo_name": "pysv/djep",
"id": "7a5820ad668075aee39623ff10ffc9c58d7c35f4",
"size": "13608",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "pyconde/attendees/migrations/0001_initial.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "254575"
},
{
"name": "HTML",
"bytes": "726728"
},
{
"name": "JavaScript",
"bytes": "225740"
},
{
"name": "Makefile",
"bytes": "3999"
},
{
"name": "Puppet",
"bytes": "2679"
},
{
"name": "Python",
"bytes": "683293"
},
{
"name": "Roff",
"bytes": "202148"
},
{
"name": "Ruby",
"bytes": "181"
},
{
"name": "Shell",
"bytes": "1393"
}
],
"symlink_target": ""
} |
"""
Views for managing volumes.
"""
import json
import re
from django.core.urlresolvers import reverse
from django.core.urlresolvers import reverse_lazy
from django.utils.translation import ugettext_lazy as _
from horizon import exceptions
from horizon import forms
from horizon import tables
from horizon import tabs
from horizon.utils import memoized
from openstack_dashboard import api
from openstack_dashboard.api import cinder
from openstack_dashboard.usage import quotas
from openstack_dashboard.dashboards.admin.volumes \
.volumes import forms as project_forms
from openstack_dashboard.dashboards.admin.volumes \
.volumes import tables as project_tables
from openstack_dashboard.dashboards.admin.volumes \
.volumes import tabs as project_tabs
class DetailView(tabs.TabView):
tab_group_class = project_tabs.VolumeDetailTabs
template_name = 'admin/volumes/volumes/detail.html'
def get_context_data(self, **kwargs):
context = super(DetailView, self).get_context_data(**kwargs)
volume = self.get_data()
table = project_tables.VolumesTable(self.request)
context["volume"] = volume
context["url"] = self.get_redirect_url()
context["actions"] = table.render_row_actions(volume)
return context
@memoized.memoized_method
def get_data(self):
try:
volume_id = self.kwargs['volume_id']
volume = cinder.volume_get(self.request, volume_id)
for att in volume.attachments:
att['instance'] = api.nova.server_get(self.request,
att['server_id'])
except Exception:
redirect = self.get_redirect_url()
exceptions.handle(self.request,
_('Unable to retrieve volume details.'),
redirect=redirect)
return volume
def get_redirect_url(self):
return reverse('horizon:admin:volumes:index')
def get_tabs(self, request, *args, **kwargs):
volume = self.get_data()
return self.tab_group_class(request, volume=volume, **kwargs)
class CreateView(forms.ModalFormView):
form_class = project_forms.CreateForm
template_name = 'admin/volumes/volumes/create.html'
success_url = reverse_lazy('horizon:admin:volumes:volumes_tab')
def get_context_data(self, **kwargs):
context = super(CreateView, self).get_context_data(**kwargs)
try:
context['usages'] = quotas.tenant_limit_usages(self.request)
except Exception:
exceptions.handle(self.request)
return context
class IncreaseVolumeView(forms.ModalFormView):
form_class = project_forms.IncreaseForm
template_name = 'admin/volumes/volumes/increase.html'
success_url = reverse_lazy('horizon:admin:volumes:index')
def get_object(self):
#try:
endpoints = api.base.url_for(self.request, 'volume')
expression = r'https?://(.+?):.+?'
host = re.match(expression,endpoints).groups()
cloud_size = api.device.get_colud_disk_size(self.request, host=host)
loads_data = json.loads(cloud_size.text)
content = eval(loads_data.get('content'))
volumes = filter(lambda x: x["vg_tags"] == "cinder_volume", content)
if volumes:
volumes = volumes.pop()
vg_size = re.findall(r"[a-zA-Z]{1}", volumes['vg_size'])
return volumes['vg_size'][:-len(vg_size)]
#except Exception:
# exceptions.handle(self.request, _("Unable request the size of current volume group."))
def get_context_data(self, **kwargs):
context = super(IncreaseVolumeView, self).get_context_data(**kwargs)
context['usages'] = quotas.tenant_limit_usages(self.request)
return context
def get_initial(self):
orig_size = self.get_object()
return {'orig_size': orig_size}
class ExtendView(forms.ModalFormView):
form_class = project_forms.ExtendForm
template_name = 'admin/volumes/volumes/extend.html'
success_url = reverse_lazy("horizon:admin:volumes:index")
def get_object(self):
if not hasattr(self, "_object"):
volume_id = self.kwargs['volume_id']
try:
self._object = cinder.volume_get(self.request, volume_id)
except Exception:
self._object = None
exceptions.handle(self.request,
_('Unable to retrieve volume information.'))
return self._object
def get_context_data(self, **kwargs):
context = super(ExtendView, self).get_context_data(**kwargs)
context['volume'] = self.get_object()
try:
usages = quotas.tenant_limit_usages(self.request)
usages['gigabytesUsed'] = (usages['gigabytesUsed']
- context['volume'].size)
context['usages'] = usages
except Exception:
exceptions.handle(self.request)
return context
def get_initial(self):
volume = self.get_object()
return {'id': self.kwargs['volume_id'],
'name': volume.name,
'orig_size': volume.size}
class CreateSnapshotView(forms.ModalFormView):
form_class = project_forms.CreateSnapshotForm
template_name = 'admin/volumes/volumes/create_snapshot.html'
success_url = reverse_lazy('horizon:admin:volumes:snapshots_tab')
def get_context_data(self, **kwargs):
context = super(CreateSnapshotView, self).get_context_data(**kwargs)
context['volume_id'] = self.kwargs['volume_id']
try:
volume = cinder.volume_get(self.request, context['volume_id'])
if (volume.status == 'in-use'):
context['attached'] = True
context['form'].set_warning(_("This volume is currently "
"attached to an instance. "
"In some cases, creating a "
"snapshot from an attached "
"volume can result in a "
"corrupted snapshot."))
context['usages'] = quotas.tenant_limit_usages(self.request)
except Exception:
exceptions.handle(self.request,
_('Unable to retrieve volume information.'))
return context
def get_initial(self):
return {'volume_id': self.kwargs["volume_id"]}
class UploadToImageView(forms.ModalFormView):
form_class = project_forms.UploadToImageForm
template_name = 'admin/volumes/volumes/upload_to_image.html'
success_url = reverse_lazy("horizon:admin:volumes:index")
@memoized.memoized_method
def get_data(self):
try:
volume_id = self.kwargs['volume_id']
volume = cinder.volume_get(self.request, volume_id)
except Exception:
error_message = _(
'Unable to retrieve volume information for volume: "%s"') \
% volume_id
exceptions.handle(self.request,
error_message,
redirect=self.success_url)
return volume
def get_context_data(self, **kwargs):
context = super(UploadToImageView, self).get_context_data(**kwargs)
context['volume'] = self.get_data()
return context
def get_initial(self):
volume = self.get_data()
return {'id': self.kwargs['volume_id'],
'name': volume.name,
'status': volume.status}
class UpdateView(forms.ModalFormView):
form_class = project_forms.UpdateForm
template_name = 'admin/volumes/volumes/update.html'
success_url = reverse_lazy("horizon:admin:volumes:index")
def get_object(self):
if not hasattr(self, "_object"):
vol_id = self.kwargs['volume_id']
try:
self._object = cinder.volume_get(self.request, vol_id)
except Exception:
msg = _('Unable to retrieve volume.')
url = reverse('horizon:admin:volumes:index')
exceptions.handle(self.request, msg, redirect=url)
return self._object
def get_context_data(self, **kwargs):
context = super(UpdateView, self).get_context_data(**kwargs)
context['volume'] = self.get_object()
return context
def get_initial(self):
volume = self.get_object()
return {'volume_id': self.kwargs["volume_id"],
'name': volume.name,
'description': volume.description}
class EditAttachmentsView(tables.DataTableView, forms.ModalFormView):
table_class = project_tables.AttachmentsTable
form_class = project_forms.AttachForm
template_name = 'admin/volumes/volumes/attach.html'
success_url = reverse_lazy("horizon:admin:volumes:index")
@memoized.memoized_method
def get_object(self):
volume_id = self.kwargs['volume_id']
try:
return cinder.volume_get(self.request, volume_id)
except Exception:
self._object = None
exceptions.handle(self.request,
_('Unable to retrieve volume information.'))
def get_data(self):
attachments = []
volume = self.get_object()
instance = self.get_initial()
if volume is not None and instance is not None:
for ins in instance['instances']:
for att in volume.attachments:
if att['server_id'] == ins.id and att['instance_name'] == ins.name:
att['volume_name'] = getattr(volume, 'name', att['device'])
att['status'] = ins.status
attachments.append(att)
return attachments
def get_initial(self):
try:
instances, has_more = api.nova.server_list(self.request)
except Exception:
instances = []
exceptions.handle(self.request,
_("Unable to retrieve attachment information."))
return {'volume': self.get_object(),
'instances': instances}
@memoized.memoized_method
def get_form(self):
form_class = self.get_form_class()
return super(EditAttachmentsView, self).get_form(form_class)
def get_context_data(self, **kwargs):
context = super(EditAttachmentsView, self).get_context_data(**kwargs)
context['form'] = self.get_form()
volume = self.get_object()
if volume and volume.status == 'available':
context['show_attach'] = True
del context['table']
else:
context['show_attach'] = False
context['volume'] = volume
if self.request.is_ajax():
context['hide'] = True
return context
def get(self, request, *args, **kwargs):
# Table action handling
handled = self.construct_tables()
if handled:
return handled
return self.render_to_response(self.get_context_data(**kwargs))
def post(self, request, *args, **kwargs):
form = self.get_form()
if form.is_valid():
return self.form_valid(form)
else:
return self.get(request, *args, **kwargs)
class RetypeView(forms.ModalFormView):
form_class = project_forms.RetypeForm
template_name = 'admin/volumes/volumes/retype.html'
success_url = reverse_lazy("horizon:admin:volumes:index")
@memoized.memoized_method
def get_data(self):
try:
volume_id = self.kwargs['volume_id']
volume = cinder.volume_get(self.request, volume_id)
except Exception:
error_message = _(
'Unable to retrieve volume information for volume: "%s"') \
% volume_id
exceptions.handle(self.request,
error_message,
redirect=self.success_url)
return volume
def get_context_data(self, **kwargs):
context = super(RetypeView, self).get_context_data(**kwargs)
context['volume'] = self.get_data()
return context
def get_initial(self):
volume = self.get_data()
return {'id': self.kwargs['volume_id'],
'name': volume.name,
'volume_type': volume.volume_type}
class UpdateStatusView(forms.ModalFormView):
form_class = project_forms.UpdateStatus
template_name = 'admin/volumes/volumes/update_status.html'
success_url = reverse_lazy('horizon:admin:volumes:index')
def get_context_data(self, **kwargs):
context = super(UpdateStatusView, self).get_context_data(**kwargs)
context["volume_id"] = self.kwargs['volume_id']
return context
@memoized.memoized_method
def get_data(self):
try:
volume_id = self.kwargs['volume_id']
volume = cinder.volume_get(self.request, volume_id)
except Exception:
exceptions.handle(self.request,
_('Unable to retrieve volume details.'),
redirect=self.success_url)
return volume
def get_initial(self):
volume = self.get_data()
return {'volume_id': self.kwargs["volume_id"],
'status': volume.status}
| {
"content_hash": "2b5acbbca796cff2f890e2141caed82a",
"timestamp": "",
"source": "github",
"line_count": 367,
"max_line_length": 99,
"avg_line_length": 36.80108991825613,
"alnum_prop": 0.5915148822745446,
"repo_name": "xuweiliang/Codelibrary",
"id": "f2db258a14150a6ddcaa0d79168f1e76638ba18c",
"size": "14111",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "openstack_dashboard/dashboards/admin/volumes/volumes/views.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "134284"
},
{
"name": "HTML",
"bytes": "830844"
},
{
"name": "JavaScript",
"bytes": "2421484"
},
{
"name": "Makefile",
"bytes": "4934"
},
{
"name": "Python",
"bytes": "17185807"
},
{
"name": "Shell",
"bytes": "9144"
}
],
"symlink_target": ""
} |
import sqlalchemy
from datetime import datetime
from ml_buff.database import DeclarativeBase
from sqlalchemy.dialects.postgresql import ARRAY
from sqlalchemy.orm import relationship
from ml_buff.models.feature import Feature
class FeatureValue(DeclarativeBase):
__tablename__ = 'feature_values'
id = sqlalchemy.Column(sqlalchemy.Integer, primary_key=True)
value = sqlalchemy.Column(ARRAY(sqlalchemy.Float))
feature_id = sqlalchemy.Column(sqlalchemy.Integer, sqlalchemy.ForeignKey('features.id'))
feature = relationship("Feature")
input_data_id = sqlalchemy.Column(sqlalchemy.Integer, sqlalchemy.ForeignKey('input_data.id'))
input_data = relationship("InputData")
created_at = sqlalchemy.Column(sqlalchemy.DateTime, default=datetime.now)
updated_at = sqlalchemy.Column(sqlalchemy.DateTime, default=datetime.now, onupdate=datetime.now)
def __init__(self, value, feature, input_data):
self.value = value
self.feature = feature
self.input_data = input_data
| {
"content_hash": "61b49350375639a8957a4333849917b7",
"timestamp": "",
"source": "github",
"line_count": 24,
"max_line_length": 98,
"avg_line_length": 41.208333333333336,
"alnum_prop": 0.7775530839231547,
"repo_name": "tinenbruno/ml-buff",
"id": "b00ff6991e44beccfe0729478ecd9e769ecb1c18",
"size": "989",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ml_buff/models/feature_value.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "15550"
}
],
"symlink_target": ""
} |
""" Cisco_IOS_XR_shellutil_cfg
This module contains a collection of YANG definitions
for Cisco IOS\-XR shellutil package configuration.
This module contains definitions
for the following management objects\:
host\-names\: Container Schema for hostname configuration
Copyright (c) 2013\-2016 by Cisco Systems, Inc.
All rights reserved.
"""
import re
import collections
from enum import Enum
from ydk.types import Empty, YList, YLeafList, DELETE, Decimal64, FixedBitsDict
from ydk.errors import YPYError, YPYModelError
class HostNames(object):
"""
Container Schema for hostname configuration
.. attribute:: host_name
Configure system's hostname
**type**\: str
"""
_prefix = 'shellutil-cfg'
_revision = '2015-10-12'
def __init__(self):
self.host_name = None
@property
def _common_path(self):
return '/Cisco-IOS-XR-shellutil-cfg:host-names'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if self.host_name is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_shellutil_cfg as meta
return meta._meta_table['HostNames']['meta_info']
| {
"content_hash": "d745569d4ff96f669c75c1dabc94ef9a",
"timestamp": "",
"source": "github",
"line_count": 67,
"max_line_length": 87,
"avg_line_length": 20.492537313432837,
"alnum_prop": 0.6649672250546249,
"repo_name": "111pontes/ydk-py",
"id": "97dd50049c1a2077d1b04eebe960b32944608245",
"size": "1373",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "cisco-ios-xr/ydk/models/cisco_ios_xr/Cisco_IOS_XR_shellutil_cfg.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C++",
"bytes": "7226"
},
{
"name": "Python",
"bytes": "446117948"
}
],
"symlink_target": ""
} |
import numpy as np
import pysam
import re
from collections import defaultdict
from Bio import SeqIO
import time
class State:
on_on = 0
on_off = 1
off_on = 2
off_off = 3
def make_freq_csv_perc(freq_dict, read_max, ref_max, out):
#print(freq_dict)
freq_cnt = np.zeros(shape=(ref_max + 1, read_max + 1), dtype=float)
total = 0
for key, val in freq_dict.items():
ref_len = key[0]
read_len = key[1]
letter = key[2]
freq_cnt[ref_len][read_len] += val
total += val
freq_cnt = np.divide(freq_cnt, float(total))
with open(out,'w') as f:
np.savetxt(f, freq_cnt.astype(float), fmt='%2.5f', delimiter=',')
def make_freq_csv(freq_dict, read_max, ref_max, out):
#print(freq_dict)
freq_cnt = np.zeros(shape=(ref_max + 1, read_max + 1), dtype=int)
for key, val in freq_dict.items():
ref_len = key[0]
read_len = key[1]
letter = key[2]
freq_cnt[ref_len][read_len] += val
with open(out,'w') as f:
np.savetxt(f, freq_cnt.astype(int), fmt='%i', delimiter=',')
def make_test_csv(regions_dict, read_max, out):
size = len(regions_dict)
freq_cnt = np.zeros(shape=(size, read_max + 4), dtype=int)
cnt = 0
for key, val in regions_dict.items():
if(cnt >= size):
break
length = key[1] - key[0]
for i in [x for x in val if x <= read_max]:
freq_cnt[cnt][i] += 1
freq_cnt[cnt][read_max + 1] = length
freq_cnt[cnt][read_max + 2] = key[0]
freq_cnt[cnt][read_max + 3] = key[1]
cnt += 1
with open(out,'w') as f:
np.savetxt(f, freq_cnt.astype(int), fmt='%i', delimiter=',')
def genome_preprocessing(reference_file):
with open(reference_file) as f:
content = f.readlines()
content = [x for x in content if x[0] != '>']
content = [x.strip() for x in content]
genome=''.join(content)
return genome
def get_cigar_string(cigar_string):
cigar_components = re.findall(r'(\d+)([A-Z]{1})', cigar_string)
cigar = ''
for comp in cigar_components:
number = comp[0]
letter = comp[1]
for i in range(int(number)):
cigar+=letter
return cigar
def readerrors(ref, reads):
freq_dict = {}
regions_dict = defaultdict(list)
#track max homopolymer size
ref_max = 0
read_max = 0
fasta_sequences = SeqIO.parse(open(ref),'fasta')
indent = 0 # globalni pomak po referenci
for seq in fasta_sequences:
seq_id, sequence = seq.id, str(seq.seq) + 'X'
content = pysam.AlignmentFile(reads)
for r in content.fetch(until_eof=True):
#ako se read nije namapirao na referencu
if r.cigarstring is None:
continue
# ako se read nije namapirao na ovaj kromosom
if(content.getrname(r.reference_id) != seq_id):
continue
cigar = get_cigar_string(r.cigarstring)
#set up necessary variables
#current pointers, they show us which positions we are looking at
sequence_pointer = r.pos
reference_pointer = r.pos + indent
read_pointer = 0
cigar_pointer = 0
#length of homopolymers
read_homopolymer = 0
ref_homopolymer = 0
#coordinates of homopolymers
ref_begin = 0
ref_end = 0
#homopolymer letters. if they exist, they should never be different (except if one of them is empty)
read_letter = ''
ref_letter = ''
#read sequence
read = r.seq + 'X'
#starting state
state = State.off_off
while(cigar_pointer < len(cigar)):
if(cigar[cigar_pointer] == 'M'):
#not a read nor a reference has a detected homopolymer
if(state == State.off_off):
if(read[read_pointer] == read[read_pointer + 1]):
read_letter = read[read_pointer]
read_homopolymer += 1
state = State.on_off
if(sequence[sequence_pointer] == read_letter):
ref_homopolymer += 1
ref_letter = read_letter
state = State.on_on
ref_begin = reference_pointer
elif(sequence[sequence_pointer] == sequence[sequence_pointer + 1]):
ref_homopolymer += 1
ref_letter = sequence[sequence_pointer]
state = State.off_on
ref_begin = reference_pointer
if(read[read_pointer] == ref_letter):
read_letter = ref_letter
read_homopolymer += 1
state = State.on_on
reference_pointer += 1
read_pointer += 1
cigar_pointer += 1
sequence_pointer += 1
# we have a homopolymer in the read, but not in the reference(or ref homopolymer ended)
elif(state == State.on_off):
#if homopolymer in read continues
if(read[read_pointer] == read_letter):
read_homopolymer += 1
#if we didn't found a homopolymer in the reference by now, check
if(ref_homopolymer == 0 and sequence[sequence_pointer] == read_letter):
ref_letter = read_letter
ref_homopolymer += 1
state = State.on_on
ref_begin = reference_pointer
reference_pointer += 1
read_pointer += 1
cigar_pointer += 1
sequence_pointer += 1
else:
#here we don't update pointers so we can check them in off_off state
state = State.off_off
data = (ref_homopolymer, read_homopolymer, read_letter)
if(freq_dict.has_key(data)):
freq_dict[data] += 1
else:
freq_dict[data] = 1
ref_coordinates = (ref_begin, ref_end)
regions_dict[ref_coordinates].append(read_homopolymer)
ref_max = max(ref_max, ref_homopolymer)
read_max = max(read_max, read_homopolymer)
ref_begin = 0
ref_end = 0
read_homopolymer = 0
ref_homopolymer = 0
read_letter = ''
ref_letter = ''
# we have a homopolymer in the reference, but not in the read (or read homopolymer ended)
elif(state == State.off_on):
#if homopolymer in reference continues
if(sequence[sequence_pointer] == ref_letter):
ref_homopolymer += 1
if(read_homopolymer == 0 and read[read_pointer] == ref_letter):
read_letter = ref_letter
read_homopolymer += 1
state = State.on_on
reference_pointer += 1
read_pointer += 1
cigar_pointer += 1
sequence_pointer += 1
else:
#here we don't update pointers so we can check them in off_off state
state = State.off_off
ref_end = reference_pointer
data = (ref_homopolymer, read_homopolymer, ref_letter)
if(freq_dict.has_key(data)):
freq_dict[data] += 1
else:
freq_dict[data] = 1
ref_coordinates = (ref_begin, ref_end)
regions_dict[ref_coordinates].append(read_homopolymer)
ref_max = max(ref_max, ref_homopolymer)
read_max = max(read_max, read_homopolymer)
ref_begin = 0
ref_end = 0
read_homopolymer = 0
ref_homopolymer = 0
read_letter = ''
ref_letter = ''
elif(state == State.on_on):
#print("ON_ON")
# if read homopolymer continues
if(read[read_pointer] == read_letter and sequence[sequence_pointer] == ref_letter):
read_homopolymer += 1
ref_homopolymer += 1
reference_pointer += 1
read_pointer += 1
cigar_pointer += 1
sequence_pointer += 1
elif(read[read_pointer] == read_letter):
read_homopolymer += 1
state = State.on_off
ref_end = reference_pointer
reference_pointer += 1
read_pointer += 1
cigar_pointer += 1
sequence_pointer += 1
elif(sequence[sequence_pointer] == ref_letter):
ref_homopolymer += 1
state = State.off_on
reference_pointer += 1
read_pointer += 1
cigar_pointer += 1
sequence_pointer += 1
else:
state = State.off_off
ref_end = reference_pointer
data = (ref_homopolymer, read_homopolymer, read_letter)
if(freq_dict.has_key(data)):
freq_dict[data] += 1
else:
freq_dict[data] = 1
ref_coordinates = (ref_begin, ref_end)
regions_dict[ref_coordinates].append(read_homopolymer)
ref_max = max(ref_max, ref_homopolymer)
read_max = max(read_max, read_homopolymer)
ref_begin = 0
ref_end = 0
read_homopolymer = 0
ref_homopolymer = 0
read_letter = ''
ref_letter = ''
elif(cigar[cigar_pointer] == 'I'):
if(state == State.off_off):
#print("OFF_OFF")
if(read[read_pointer] == read[read_pointer + 1]):
read_letter = read[read_pointer]
read_homopolymer += 1
state = State.on_off
read_pointer += 1
cigar_pointer += 1
# we have a homopolymer in the read, but not in the reference(or ref homopolymer ended)
elif(state == State.on_off):
#print("ON_OFF")
if(read[read_pointer] == read_letter):
read_homopolymer += 1
read_pointer += 1
cigar_pointer += 1
else:
state = State.off_off
data = (ref_homopolymer, read_homopolymer, read_letter)
if(freq_dict.has_key(data)):
freq_dict[data] += 1
else:
freq_dict[data] = 1
ref_coordinates = (ref_begin, ref_end)
regions_dict[ref_coordinates].append(read_homopolymer)
ref_max = max(ref_max, ref_homopolymer)
read_max = max(read_max, read_homopolymer)
ref_begin = 0
ref_end = 0
read_homopolymer = 0
ref_homopolymer = 0
read_letter = ''
ref_letter = ''
elif(state == State.off_on):
#print("OFF_ON")
if(read_homopolymer == 0 and read[read_pointer] == ref_letter):
read_letter = ref_letter
read_homopolymer += 1
state = State.on_on
read_pointer += 1
cigar_pointer += 1
elif(state == State.on_on):
#print("ON_ON")
if(read[read_pointer] == read_letter):
read_homopolymer += 1
else:
state = State.off_on
read_pointer += 1
cigar_pointer += 1
elif(cigar[cigar_pointer] == 'D'):
if(state == State.off_off):
#print("OFF_OFF")
if(sequence[sequence_pointer] == sequence[sequence_pointer + 1]):
ref_homopolymer += 1
ref_letter = sequence[sequence_pointer]
ref_begin = reference_pointer
state = State.off_on
reference_pointer += 1
cigar_pointer += 1
sequence_pointer += 1
# we have a homopolymer in the read, but not in the reference(or ref homopolymer ended)
elif(state == State.on_off):
#print("ON_OFF")
if(ref_homopolymer == 0 and sequence[sequence_pointer] == read_letter):
ref_letter = read_letter
ref_homopolymer += 1
state = State.on_on
ref_begin = reference_pointer
reference_pointer += 1
cigar_pointer += 1
sequence_pointer += 1
elif(state == State.off_on):
#rint("OFF_ON")
if(sequence[sequence_pointer] == ref_letter):
ref_homopolymer += 1
reference_pointer += 1
cigar_pointer += 1
sequence_pointer += 1
else:
state = State.off_off
ref_end = reference_pointer
data = (ref_homopolymer, read_homopolymer, ref_letter)
if(freq_dict.has_key(data)):
freq_dict[data] += 1
else:
freq_dict[data] = 1
ref_coordinates = (ref_begin, ref_end)
regions_dict[ref_coordinates].append(read_homopolymer)
ref_max = max(ref_max, ref_homopolymer)
read_max = max(read_max, read_homopolymer)
ref_begin = 0
ref_end = 0
read_homopolymer = 0
ref_homopolymer = 0
read_letter = ''
ref_letter = ''
elif(state == State.on_on):
#print("ON_ON")
if(sequence[sequence_pointer] == ref_letter):
ref_homopolymer += 1
else:
state = State.on_off
ref_end = reference_pointer
cigar_pointer += 1
reference_pointer += 1
sequence_pointer += 1
else:
# S
read_pointer += 1
cigar_pointer += 1
indent += len(sequence)
return read_max, ref_max, regions_dict, freq_dict | {
"content_hash": "cf5ec8756d3ed357b2dfc84dca8bc296",
"timestamp": "",
"source": "github",
"line_count": 437,
"max_line_length": 103,
"avg_line_length": 26.931350114416475,
"alnum_prop": 0.5989463845696321,
"repo_name": "lucka318/master_thesis",
"id": "6be7aeb0f3725b78667d02909bb7b377d885be65",
"size": "11769",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "readerrors.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "25569"
}
],
"symlink_target": ""
} |
"""
This script will test highgui's cvWaitKey(int) function
"""
# name of this test and it's requirements
TESTNAME = "cvWaitKey"
REQUIRED = ["cvShowImage"]
# needed for sys.exit(int) and .works file handling
import os
import sys
import works
# path to imagefiles we need
PREFIX=os.environ["top_srcdir"]+"/tests/python/testdata/images/"
# check requirements and delete old flag file, if it exists
if not works.check_files(REQUIRED, TESTNAME):
sys.exit(77)
# import the necessary things for OpenCV
import python
from python.highgui import *
# request some user input
print "(INFO) Press anykey within the next 20 seconds to 'PASS' this test."
# create a dummy window which reacts on cvWaitKey()
cvNamedWindow(TESTNAME, CV_WINDOW_AUTOSIZE)
# display an image
cvShowImage(TESTNAME, cvLoadImage(PREFIX+"cvWaitKey.jpg"))
# wait 20 seconds using cvWaitKey(20000),
# return 'FAIL' if no key has been pressed.
if cvWaitKey(20000) == -1:
print "(ERROR) No key pressed, remarking as 'FAIL'."
sys.exit(1)
#create flag file for the following tests
works.set_file(TESTNAME)
# return 0 ('PASS')
sys.exit(0)
| {
"content_hash": "3d26ac1179c7a533aeee2908932a60f7",
"timestamp": "",
"source": "github",
"line_count": 45,
"max_line_length": 76,
"avg_line_length": 24.622222222222224,
"alnum_prop": 0.7445848375451264,
"repo_name": "guc-cs/Campus-Vision",
"id": "760d6b4fc1e75bc9e4ee3a91c8602d9c4ca53ffd",
"size": "1131",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "opencv-1.1.0/tests/python/highgui/cvWaitKey.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "689318"
},
{
"name": "C++",
"bytes": "19526064"
},
{
"name": "CSS",
"bytes": "868"
},
{
"name": "Gnuplot",
"bytes": "526"
},
{
"name": "HTML",
"bytes": "914116"
},
{
"name": "Inno Setup",
"bytes": "17560"
},
{
"name": "M",
"bytes": "43160"
},
{
"name": "MATLAB",
"bytes": "29967"
},
{
"name": "Makefile",
"bytes": "719871"
},
{
"name": "Objective-C",
"bytes": "8958"
},
{
"name": "Python",
"bytes": "684932"
},
{
"name": "Shell",
"bytes": "513214"
}
],
"symlink_target": ""
} |
from django.urls import path, include
from rest_framework import routers
from . import api_views
api_router = routers.DefaultRouter()
api_router.register(
r'tracked_words',
api_views.TrackedWordsViewSet,
basename='tracked-words')
urlpatterns = [
path(r'', include(api_router.urls)),
]
| {
"content_hash": "59ecedd0c9edfa71e387d080a2a727b9",
"timestamp": "",
"source": "github",
"line_count": 15,
"max_line_length": 40,
"avg_line_length": 20.333333333333332,
"alnum_prop": 0.7245901639344262,
"repo_name": "aehlke/manabi",
"id": "5f83ebf965c1541f788ba46bd70cfc713d4a069e",
"size": "305",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "manabi/apps/word_tracking/api_urls.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "60000"
},
{
"name": "HTML",
"bytes": "287098"
},
{
"name": "JavaScript",
"bytes": "260813"
},
{
"name": "Jinja",
"bytes": "152668"
},
{
"name": "PowerShell",
"bytes": "935"
},
{
"name": "Python",
"bytes": "5129354"
},
{
"name": "Ruby",
"bytes": "5722"
},
{
"name": "SCSS",
"bytes": "25268"
},
{
"name": "Shell",
"bytes": "3041"
}
],
"symlink_target": ""
} |
import pandas
from qstkutil import DataAccess as da
import numpy as np
import math
import qstkutil.qsdateutil as du
import datetime as dt
import qstkutil.DataAccess as da
"""
Accepts a list of symbols along with start and end date
Returns the Event Matrix which is a pandas Datamatrix
Event matrix has the following structure :
|IBM |GOOG|XOM |MSFT| GS | JP |
(d1)|nan |nan | 1 |nan |nan | 1 |
(d2)|nan | 1 |nan |nan |nan |nan |
(d3)| 1 |nan | 1 |nan | 1 |nan |
(d4)|nan | 1 |nan | 1 |nan |nan |
...................................
...................................
Also, d1 = start date
nan = no information about any event.
1 = status bit(positively confirms the event occurence)
"""
# Get the data from the data store
storename = "Yahoo" # get data from our daily prices source
# Available field names: open, close, high, low, close, actual_close, volume
closefield = "close"
volumefield = "volume"
window = 10
def findEvents(symbols, startday,endday,verbose=False):
timeofday=dt.timedelta(hours=16)
timestamps = du.getNYSEdays(startday,endday,timeofday)
dataobj = da.DataAccess('Yahoo')
if verbose:
print __name__ + " reading data"
close = dataobj.get_data(timestamps, symbols, closefield)
close = (close.fillna()).fillna(method='backfill')
if verbose:
print __name__ + " finding events"
for symbol in symbols:
close[symbol][close[symbol]>= 1.0] = np.NAN
for i in range(1,len(close[symbol])):
if np.isnan(close[symbol][i-1]) and close[symbol][i] < 1.0 :#(i-1)th was > $1, and (i)th is <$1
close[symbol][i] = 1.0 #overwriting the price by the bit
close[symbol][close[symbol]< 1.0] = np.NAN
return close
| {
"content_hash": "baf3459b0b14943a4909d2514840f606",
"timestamp": "",
"source": "github",
"line_count": 46,
"max_line_length": 104,
"avg_line_length": 36.82608695652174,
"alnum_prop": 0.6505312868949232,
"repo_name": "avistous/QSTK",
"id": "3552a4a00af3fc55ddf36333ef0320e4ef985320",
"size": "2069",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "qstkstudy/Events.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "5708"
},
{
"name": "CSS",
"bytes": "127790"
},
{
"name": "Groff",
"bytes": "36352"
},
{
"name": "HTML",
"bytes": "18435650"
},
{
"name": "Java",
"bytes": "8096"
},
{
"name": "JavaScript",
"bytes": "21455"
},
{
"name": "Makefile",
"bytes": "2590"
},
{
"name": "Python",
"bytes": "1693403"
},
{
"name": "Shell",
"bytes": "4070"
},
{
"name": "TeX",
"bytes": "1018533"
}
],
"symlink_target": ""
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.