hexsha
stringlengths 40
40
| size
int64 1
1.03M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
239
| max_stars_repo_name
stringlengths 5
130
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
sequencelengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
239
| max_issues_repo_name
stringlengths 5
130
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
sequencelengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
239
| max_forks_repo_name
stringlengths 5
130
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
sequencelengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 1
1.03M
| avg_line_length
float64 1
958k
| max_line_length
int64 1
1.03M
| alphanum_fraction
float64 0
1
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
4a21ea3f7be2160152511d231c86be018a965ae7 | 527 | py | Python | parsec/commands/histories/delete_history.py | abretaud/parsec | 8ebcafac34b5d6df45de4cecc882b129bb604170 | [
"Apache-2.0"
] | null | null | null | parsec/commands/histories/delete_history.py | abretaud/parsec | 8ebcafac34b5d6df45de4cecc882b129bb604170 | [
"Apache-2.0"
] | null | null | null | parsec/commands/histories/delete_history.py | abretaud/parsec | 8ebcafac34b5d6df45de4cecc882b129bb604170 | [
"Apache-2.0"
] | null | null | null | import click
from parsec.cli import pass_context, json_loads
from parsec.decorators import custom_exception, dict_output, _arg_split
@click.command('delete_history')
@click.argument("history_id", type=str)
@click.option(
"--purge",
help="if ``True``, also purge (permanently delete) the history",
is_flag=True
)
@pass_context
@custom_exception
@dict_output
def cli(ctx, history_id, purge=False):
"""Delete a history.
Output:
"""
return ctx.gi.histories.delete_history(history_id, purge=purge)
| 21.08 | 71 | 0.732448 |
4a21eabb6a2a377243ea6d4737044c7ed6f810d4 | 2,922 | py | Python | blog/user/views.py | znf896/Django-React- | 44bfbffc8f6a6fa13e001f3fc4b42005afa426bc | [
"MIT"
] | 1 | 2021-05-10T15:29:00.000Z | 2021-05-10T15:29:00.000Z | blog/user/views.py | znf896/Django-React- | 44bfbffc8f6a6fa13e001f3fc4b42005afa426bc | [
"MIT"
] | 7 | 2020-09-07T12:44:21.000Z | 2022-02-26T18:35:20.000Z | blog/user/views.py | znf896/Django-React- | 44bfbffc8f6a6fa13e001f3fc4b42005afa426bc | [
"MIT"
] | null | null | null | # Create your views here.
from django.http import HttpRequest, HttpResponse, JsonResponse, HttpResponseBadRequest
import simplejson
from .models import User
import jwt
from datetime import datetime
from blog.settings import SECRET_KEY
import bcrypt
AUTHOR_VER = 60 * 60 * 8
def gen_token(user_id):
ret = jwt.encode(
{'user_id': user_id,
'exp': int(datetime.now().timestamp()) + AUTHOR_VER
}, SECRET_KEY
)
return ret.decode()
# 用户验证
def authoration(view):
def wrapper(req: HttpRequest):
token = req.META['HTTP_JWT']
payload = jwt.decode(token, SECRET_KEY)
timestamp = payload['exp']
try:
# 时间验证
if timestamp:
user_id = payload['user_id']
user = User.objects.get(pk=user_id)
req.user = user # reqest请求中注入user对象
return view(req)
except Exception as e:
print(e)
return HttpResponse(status_code=401)
return wrapper
@authoration # test = authoration(test)
def test(req: HttpRequest):
return HttpResponse(b'jwt test')
def login(req: HttpRequest):
ret = simplejson.loads(req.body) # {'password': 'abc', 'email': '[email protected]'}
email = ret['email']
password = ret['password']
print('~~~~~~~')
print('!!!!!!!', email, password)
try:
user = User.objects.get(email=email) # 查数据库
if not user: # 没有这个账号
return HttpResponseBadRequest()
if not bcrypt.checkpw(password.encode(), user.password.encode()): # 密码验证错误
return HttpResponseBadRequest()
return JsonResponse(
{'user': {'user_id': user.id,
'name': user.name,
'email': user.name,
}, 'token': gen_token(user.id)
}
)
except Exception as e:
print(e)
return HttpResponseBadRequest()
def register(reg: HttpRequest):
# {'name': 'znf', 'password': '[email protected]', 'email': 'abc'}
playload = simplejson.loads(reg.body)
print(playload, type(playload))
try:
email = playload['email']
# 对注册的email进行检查
query = User.objects.filter(email=email)
print(query, type(query), query.query)
if query:
return HttpResponseBadRequest()
name = playload['name']
password = playload['password']
print(email, name, password)
# ORM操作
user = User()
user.email = email
user.name = name
user.password = bcrypt.hashpw(password.encode(), bcrypt.gensalt())
try:
user.save()
return JsonResponse({"userid": user.id, 'token': gen_token(user.id)})
except Exception as e:
print(e)
return HttpResponseBadRequest()
except Exception as e:
print(e)
return HttpResponseBadRequest()
| 28.096154 | 88 | 0.578029 |
4a21eb8afcb395e3804302185844f4ffbc838ed2 | 847 | py | Python | src/sheets/spreadsheet.py | y3rsh/lawrencetrailhawks-treasury | 53418df0543daa62add686ca15c46ceadf98c3d8 | [
"MIT"
] | null | null | null | src/sheets/spreadsheet.py | y3rsh/lawrencetrailhawks-treasury | 53418df0543daa62add686ca15c46ceadf98c3d8 | [
"MIT"
] | null | null | null | src/sheets/spreadsheet.py | y3rsh/lawrencetrailhawks-treasury | 53418df0543daa62add686ca15c46ceadf98c3d8 | [
"MIT"
] | null | null | null | import os
import gspread
from oauth2client.service_account import ServiceAccountCredentials
def get_sheet(worksheet_name, worksheet_title):
# use creds to create a client to interact with the Google Drive API
scope = ['https://spreadsheets.google.com/feeds']
creds = ServiceAccountCredentials.from_json_keyfile_name(os.environ['GSERVICEJSON'], scope)
client = gspread.authorize(creds)
# Find a workbook by name and open the first sheet
# Make sure you use the right name here.
sheet = client.open(worksheet_name).worksheet(worksheet_title)
# Extract and print all of the values
list_of_hashes = sheet.get_all_records()
#remove_characters(list_of_hashes)
#print(list_of_hashes)
return list_of_hashes
def remove_characters(data):
for val in data:
for key, value in val.items():
val[key] = val[key].strip('$') | 35.291667 | 93 | 0.765053 |
4a21ebcf22d9bed7d52354619074e0ff4034f6be | 31,217 | py | Python | formats/base.py | wyim-pgl/jcvi | f79ead2fb30a80ead4e3b9602e0bc4a256995864 | [
"BSD-2-Clause"
] | 2 | 2019-02-22T12:56:39.000Z | 2020-03-02T03:51:45.000Z | formats/base.py | wyim-pgl/jcvi | f79ead2fb30a80ead4e3b9602e0bc4a256995864 | [
"BSD-2-Clause"
] | null | null | null | formats/base.py | wyim-pgl/jcvi | f79ead2fb30a80ead4e3b9602e0bc4a256995864 | [
"BSD-2-Clause"
] | 1 | 2021-05-10T17:26:43.000Z | 2021-05-10T17:26:43.000Z | #!/usr/bin/env python
# -*- coding: UTF-8 -*-
import os
import os.path as op
import math
import sys
import logging
from itertools import groupby, islice, cycle, izip
from Bio import SeqIO
from jcvi.apps.base import OptionParser, ActionDispatcher, sh, debug, need_update, \
mkdir, popen
debug()
FastaExt = ("fasta", "fa", "fna", "cds", "pep", "faa", "fsa", "seq", "nt", "aa")
FastqExt = ("fastq", "fq")
class BaseFile (object):
def __init__(self, filename):
self.filename = filename
if filename:
logging.debug("Load file `{0}`".format(filename))
class LineFile (BaseFile, list):
"""
Generic file parser for line-based files
"""
def __init__(self, filename, comment=None, load=False):
super(LineFile, self).__init__(filename)
if load:
fp = must_open(filename)
self.lines = [l.strip() for l in fp if l[0]!=comment]
logging.debug("Load {0} lines from `{1}`.".\
format(len(self.lines), filename))
class DictFile (BaseFile, dict):
"""
Generic file parser for multi-column files, keyed by a particular index.
"""
def __init__(self, filename, keypos=0, valuepos=1, delimiter=None,
strict=True, keycast=None, cast=None):
super(DictFile, self).__init__(filename)
fp = must_open(filename)
ncols = max(keypos, valuepos) + 1
thiscols = 0
for lineno, row in enumerate(fp):
row = row.rstrip()
atoms = row.split(delimiter)
thiscols = len(atoms)
if thiscols < ncols:
action = "Aborted" if strict else "Skipped"
msg = "Must contain >= {0} columns. {1}.\n".format(ncols, action)
msg += " --> Line {0}: {1}".format(lineno + 1, row)
logging.error(msg)
if strict:
sys.exit(1)
else:
continue
key = atoms[keypos]
value = atoms[valuepos] if (valuepos is not None) else atoms
if keycast:
key = keycast(key)
if cast:
value = cast(value)
self[key] = value
assert thiscols, "File empty"
self.ncols = thiscols
logging.debug("Imported {0} records from `{1}`.".\
format(len(self), filename))
class SetFile (BaseFile, set):
def __init__(self, filename, column=-1, delimiter=None):
super(SetFile, self).__init__(filename)
fp = open(filename)
for row in fp:
if not row.strip():
continue
keys = [x.strip() for x in row.split(delimiter)]
if column >= 0:
keys = [keys[column]]
self.update(keys)
class FileShredder (object):
"""
Same as rm -f *
"""
def __init__(self, filelist, verbose=True):
filelist = [x for x in filelist if x and op.exists(x)]
cmd = "rm -rf {0}".format(" ".join(filelist))
sh(cmd, log=verbose)
class FileMerger (object):
"""
Same as cat * > filename
"""
def __init__(self, filelist, outfile):
self.filelist = filelist
self.outfile = outfile
self.ingz = filelist[0].endswith(".gz")
self.outgz = outfile.endswith(".gz")
def merge(self, checkexists=False):
outfile = self.outfile
if checkexists and not need_update(self.filelist, outfile):
logging.debug("File `{0}` exists. Merge skipped.".format(outfile))
return
files = " ".join(self.filelist)
ingz, outgz = self.ingz, self.outgz
if ingz and outgz: # can merge gz files directly
cmd = "cat {0} > {1}".format(files, outfile)
sh(cmd)
else:
cmd = "zcat" if self.ingz else "cat"
cmd += " " + files
sh(cmd, outfile=outfile)
return outfile
class FileSplitter (object):
def __init__(self, filename, outputdir=None, format="fasta", mode="cycle"):
self.filename = filename
self.outputdir = outputdir
self.mode = mode
format = format or self._guess_format(filename)
logging.debug("format is %s" % format)
if format in ("fasta", "fastq"):
self.klass = "seqio"
elif format == "clust":
self.klass = "clust"
else:
self.klass = "txt"
self.format = format
mkdir(outputdir)
def _open(self, filename):
if self.klass == "seqio":
handle = SeqIO.parse(open(filename), self.format)
elif self.klass == "clust":
from jcvi.apps.uclust import ClustFile
handle = iter(ClustFile(filename))
else:
handle = open(filename)
return handle
@property
def num_records(self):
handle = self._open(self.filename)
return sum(1 for x in handle)
def _guess_format(self, filename):
root, ext = op.splitext(filename)
ext = ext.strip(".")
if ext in FastaExt:
format = "fasta"
elif ext in FastqExt:
format = "fastq"
else:
format = "txt"
return format
def _batch_iterator(self, N=1):
"""Returns N lists of records.
This can be used on any iterator, for example to batch up
SeqRecord objects from Bio.SeqIO.parse(...), or to batch
Alignment objects from Bio.AlignIO.parse(...), or simply
lines from a file handle.
This is a generator function, and it returns lists of the
entries from the supplied iterator. Each list will have
batch_size entries, although the final list may be shorter.
"""
batch_size = math.ceil(self.num_records / float(N))
handle = self._open(self.filename)
while True:
batch = list(islice(handle, batch_size))
if not batch:
break
yield batch
@classmethod
def get_names(cls, filename, N):
root, ext = op.splitext(op.basename(filename))
names = []
pad0 = len(str(int(N - 1)))
for i in xrange(N):
name = "{0}_{1:0{2}d}{3}".format(root, i, pad0, ext)
names.append(name)
return names
def write(self, fw, batch):
if self.klass == "seqio":
SeqIO.write(batch, fw, self.format)
elif self.klass == "clust":
for b in batch:
print >> fw, b
else:
for line in batch:
fw.write(line)
return len(batch)
def split(self, N, force=False):
"""
There are two modes of splitting the records
- batch: splitting is sequentially to records/N chunks
- cycle: placing each record in the splitted files and cycles
use `cycle` if the len of the record is not evenly distributed
"""
mode = self.mode
assert mode in ("batch", "cycle", "optimal")
logging.debug("set split mode=%s" % mode)
self.names = self.__class__.get_names(self.filename, N)
if self.outputdir:
self.names = [op.join(self.outputdir, x) for x in self.names]
if not need_update(self.filename, self.names) and not force:
logging.error("file %s already existed, skip file splitting" % \
self.names[0])
return
filehandles = [open(x, "w") for x in self.names]
if mode == "batch":
for batch, fw in zip(self._batch_iterator(N), filehandles):
count = self.write(fw, batch)
logging.debug("write %d records to %s" % (count, fw.name))
elif mode == "cycle":
handle = self._open(self.filename)
for record, fw in izip(handle, cycle(filehandles)):
count = self.write(fw, [record])
elif mode == "optimal":
"""
This mode is based on Longest Processing Time (LPT) algorithm:
A simple, often-used algorithm is the LPT algorithm (Longest
Processing Time) which sorts the jobs by its processing time and
then assigns them to the machine with the earliest end time so far.
This algorithm achieves an upper bound of 4/3 - 1/(3m) OPT.
Citation: <http://en.wikipedia.org/wiki/Multiprocessor_scheduling>
"""
endtime = [0] * N
handle = self._open(self.filename)
for record in handle:
mt, mi = min((x, i) for (i, x) in enumerate(endtime))
fw = filehandles[mi]
count = self.write(fw, [record])
endtime[mi] += len(record)
for fw in filehandles:
fw.close()
def longest_unique_prefix(query, targets, remove_self=True):
"""
Find the longest unique prefix for filename, when compared against a list of
filenames. Useful to simplify file names in a pool of files. See usage in
formats.fasta.pool().
"""
query = op.basename(query)
targets = [op.basename(x) for x in targets]
prefix_lengths = [len(op.commonprefix([query, name])) for name in targets]
if remove_self and len(query) in prefix_lengths:
prefix_lengths.remove(len(query))
longest_length = max(prefix_lengths)
return query[:longest_length + 1]
def check_exists(filename, oappend=False):
"""
Avoid overwriting some files accidentally.
"""
if op.exists(filename):
if oappend:
return oappend
logging.error("`{0}` found, overwrite (Y/N)?".format(filename))
overwrite = (raw_input() == 'Y')
else:
overwrite = True
return overwrite
def must_open(filename, mode="r", checkexists=False, skipcheck=False, \
oappend=False):
"""
Accepts filename and returns filehandle.
Checks on multiple files, stdin/stdout/stderr, .gz or .bz2 file.
"""
if isinstance(filename, list):
assert "r" in mode
if filename[0].endswith(".gz") or filename[0].endswith(".bz2"):
filename = " ".join(filename) # allow opening multiple gz/bz2 files
else:
import fileinput
return fileinput.input(filename)
if filename.startswith("s3://"):
from jcvi.utils.aws import pull_from_s3
filename = pull_from_s3(filename)
if filename in ("-", "stdin"):
assert "r" in mode
fp = sys.stdin
elif filename == "stdout":
assert "w" in mode
fp = sys.stdout
elif filename == "stderr":
assert "w" in mode
fp = sys.stderr
elif filename == "tmp" and mode == "w":
from tempfile import NamedTemporaryFile
fp = NamedTemporaryFile(delete=False)
elif filename.endswith(".gz"):
if 'r' in mode:
cmd = "zcat {0}".format(filename)
fp = popen(cmd, debug=False)
elif 'w' in mode:
import gzip
fp = gzip.open(filename, mode)
elif filename.endswith(".bz2"):
if 'r' in mode:
cmd = "bzcat {0}".format(filename)
fp = popen(cmd, debug=False)
elif 'w' in mode:
import bz2
fp = bz2.BZ2File(filename, mode)
else:
if checkexists:
assert mode == "w"
overwrite = (not op.exists(filename)) if skipcheck \
else check_exists(filename, oappend)
if overwrite:
if oappend:
fp = open(filename, "a")
else:
fp = open(filename, "w")
else:
logging.debug("File `{0}` already exists. Skipped."\
.format(filename))
return None
else:
fp = open(filename, mode)
return fp
bash_shebang = "#!/bin/bash"
python_shebang = """#!/usr/bin/env python
# -*- coding: UTF-8 -*-"""
def write_file(filename, contents, meta=None, skipcheck=False, append=False, tee=False):
if not meta:
suffix = filename.rsplit(".", 1)[-1]
if suffix == "sh":
meta = "run script"
elif suffix == "py":
meta = "python script"
else:
meta = "file"
meta_choices = ("file", "run script", "python script")
assert meta in meta_choices, "meta must be one of {0}".\
format("|".join(meta_choices))
contents = contents.strip()
shebang = "\n"
if "script" in meta:
if not append:
if meta == "run script":
shebang = bash_shebang
elif meta == "python script":
shebang = python_shebang
contents = "\n\n".join((shebang, contents))
fw = must_open(filename, "w", checkexists=True, skipcheck=skipcheck, oappend=append)
if fw:
print >> fw, contents
fw.close()
if tee:
print >> sys.stderr, contents
fileop = "appended" if append else "written"
message = "{0} {1} to `{2}`.".format(meta, fileop, filename)
logging.debug(message.capitalize())
if meta == "run script" and not append:
sh("chmod u+x {0}".format(filename))
def read_until(handle, start):
# read each line until a certain start, then puts the start tag back
while 1:
pos = handle.tell()
line = handle.readline()
if not line:
break
if line.startswith(start):
handle.seek(pos)
return
def read_block(handle, signal):
"""
Useful for reading block-like file formats, for example FASTA or OBO file,
such file usually startswith some signal, and in-between the signals are a
record
"""
signal_len = len(signal)
it = (x[1] for x in groupby(handle,
key=lambda row: row.strip()[:signal_len] == signal))
found_signal = False
for header in it:
header = list(header)
for h in header[:-1]:
h = h.strip()
if h[:signal_len] != signal:
continue
yield h, [] # Header only, no contents
header = header[-1].strip()
if header[:signal_len] != signal:
continue
found_signal = True
seq = list(s.strip() for s in it.next())
yield header, seq
if not found_signal:
handle.seek(0)
seq = list(s.strip() for s in handle)
yield None, seq
def is_number(s, cast=float):
"""
Check if a string is a number. Use cast=int to check if s is an integer.
"""
try:
cast(s) # for int, long and float
except ValueError:
return False
return True
def get_number(s, cast=int):
"""
Try to get a number out of a string, and cast it.
"""
import string
d = "".join(x for x in str(s) if x in string.digits)
return cast(d)
def flexible_cast(s):
if is_number(s, cast=int):
return int(s)
elif is_number(s, cast=float):
return float(s)
return s
def main():
actions = (
('pairwise', 'convert a list of IDs into all pairs'),
('split', 'split large file into N chunks'),
('reorder', 'reorder columns in tab-delimited files'),
('flatten', 'convert a list of IDs into one per line'),
('group', 'group elements in a table based on key (groupby) column'),
('setop', 'set operations on files'),
('join', 'join tabular-like files based on common column'),
('subset', 'subset tabular-like files based on common column'),
('truncate', 'remove lines from end of file'),
('append', 'append a column with fixed value'),
)
p = ActionDispatcher(actions)
p.dispatch(globals())
def pairwise(args):
"""
%prog pairwise ids
Convert a list of IDs into all pairs.
"""
from itertools import combinations
p = OptionParser(pairwise.__doc__)
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
idsfile, = args
ids = SetFile(idsfile)
ids = sorted(ids)
fw = open(idsfile + ".pairs", "w")
for a, b in combinations(ids, 2):
print >> fw, "\t".join((a, b))
fw.close()
def append(args):
"""
%prog append csvfile [tag]
Append a column with fixed value. If tag is missing then just append the
filename.
"""
p = OptionParser(append.__doc__)
p.set_sep()
p.set_outfile()
opts, args = p.parse_args(args)
nargs = len(args)
if nargs not in (1, 2):
sys.exit(not p.print_help())
csvfile = args[0]
tag = args[1] if nargs == 2 else csvfile
fp = must_open(csvfile)
fw = must_open(opts.outfile, "w")
for row in fp:
row = row.rstrip("\r\n")
row = opts.sep.join((row, tag))
print >> fw, row
def truncate(args):
"""
%prog truncate linecount filename
Remove linecount lines from the end of the file in-place. Borrowed from:
<http://superuser.com/questions/127786/how-to-remove-the-last-2-lines-of-a-very-large-file>
"""
p = OptionParser(truncate.__doc__)
opts, args = p.parse_args(args)
if len(args) != 2:
sys.exit(not p.print_help())
number, filename = args
number = int(number)
count = 0
f = open(filename, "r+b")
f.seek(0, os.SEEK_END)
while f.tell() > 0:
f.seek(-1, os.SEEK_CUR)
char = f.read(1)
if char == '\n':
count += 1
if count == number + 1:
f.truncate()
print >> sys.stderr, "Removed {0} lines from end of file".format(number)
return number
f.seek(-1, os.SEEK_CUR)
if count < number + 1:
print >> sys.stderr, "No change: requested removal would leave empty file"
return -1
def flatten(args):
"""
%prog flatten filename > ids
Convert a list of IDs (say, multiple IDs per line) and move them into one
per line.
For example, convert this, to this:
A,B,C | A
1 | B
a,4 | C
| 1
| a
| 4
If multi-column file with multiple elements per column, zip then flatten like so:
A,B,C 2,10,gg | A,2
1,3 4 | B,10
| C,gg
| 1,4
| 3,na
"""
from itertools import izip_longest
p = OptionParser(flatten.__doc__)
p.set_sep(sep=",")
p.add_option("--zipflatten", default=None, dest="zipsep",
help="Specify if columns of the file should be zipped before" +
" flattening. If so, specify delimiter separating column elements" +
" [default: %default]")
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
tabfile, = args
zipsep = opts.zipsep
fp = must_open(tabfile)
for row in fp:
if zipsep:
row = row.rstrip()
atoms = row.split(opts.sep)
frows = []
for atom in atoms:
frows.append(atom.split(zipsep))
print "\n".join([zipsep.join(x) for x in list(izip_longest(*frows, fillvalue="na"))])
else:
print row.strip().replace(opts.sep, "\n")
def group(args):
"""
%prog group tabfile > tabfile.grouped
Given a tab-delimited file, either group all elements within the file or
group the elements in the value column(s) based on the key (groupby) column
For example, convert this | into this
---------------------------------------
a 2 3 4 | a,2,3,4,5,6
a 5 6 | b,7,8
b 7 8 | c,9,10,11
c 9 |
c 10 11 |
If grouping by a particular column,
convert this | into this:
---------------------------------------------
a 2 3 4 | a 2,5 3,6 4
a 5 6 | b 7 8
b 7 8 | c 9,10 11
c 9 |
c 10 11 |
By default, it uniqifies all the grouped elements
"""
from jcvi.utils.cbook import AutoVivification
from jcvi.utils.grouper import Grouper
p = OptionParser(group.__doc__)
p.set_sep()
p.add_option("--groupby", default=None, type='int',
help="Default column to groupby [default: %default]")
p.add_option("--groupsep", default=',',
help="Separator to join the grouped elements [default: `%default`]")
p.add_option("--nouniq", default=False, action="store_true",
help="Do not uniqify the grouped elements [default: %default]")
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
tabfile, = args
sep = opts.sep
groupby = opts.groupby
groupsep = opts.groupsep
cols = []
grouper = AutoVivification() if groupby is not None else Grouper()
fp = must_open(tabfile)
for row in fp:
row = row.rstrip()
atoms = row.split(sep)
if groupby is not None:
if len(cols) < len(atoms):
cols = [x for x in xrange(len(atoms))]
if groupby not in cols:
logging.error("groupby col index `{0}` is out of range".format(groupby))
sys.exit()
key = atoms[groupby]
for col in cols:
if col == groupby:
continue
if not grouper[key][col]:
grouper[key][col] = [] if opts.nouniq else set()
if col < len(atoms):
if groupsep in atoms[col]:
for atom in atoms[col].split(groupsep):
if opts.nouniq:
grouper[key][col].append(atom)
else:
grouper[key][col].add(atom)
else:
if opts.nouniq:
grouper[key][col].append(atoms[col])
else:
grouper[key][col].add(atoms[col])
else:
grouper.join(*atoms)
for key in grouper:
if groupby is not None:
line = []
for col in cols:
if col == groupby:
line.append(key)
elif col in grouper[key].keys():
line.append(groupsep.join(grouper[key][col]))
else:
line.append("na")
print sep.join(line)
else:
print groupsep.join(key)
def reorder(args):
"""
%prog reorder tabfile 1,2,4,3 > newtabfile
Reorder columns in tab-delimited files. The above syntax will print out a
new file with col-1,2,4,3 from the old file.
"""
import csv
p = OptionParser(reorder.__doc__)
p.set_sep()
opts, args = p.parse_args(args)
if len(args) != 2:
sys.exit(not p.print_help())
tabfile, order = args
sep = opts.sep
order = [int(x) - 1 for x in order.split(",")]
reader = csv.reader(must_open(tabfile), delimiter=sep)
writer = csv.writer(sys.stdout, delimiter=sep)
for row in reader:
newrow = [row[x] for x in order]
writer.writerow(newrow)
def split(args):
"""
%prog split file outdir N
Split file into N records. This allows splitting FASTA/FASTQ/TXT file
properly at boundary of records. Split is useful for parallelization
on input chunks.
Option --mode is useful on how to break into chunks.
1. chunk - chunk records sequentially, 1-100 in file 1, 101-200 in file 2, etc.
2. cycle - chunk records in Round Robin fashion
3. optimal - try to make split file of roughly similar sizes, using LPT
algorithm. This is the default.
"""
p = OptionParser(split.__doc__)
mode_choices = ("batch", "cycle", "optimal")
p.add_option("--all", default=False, action="store_true",
help="split all records [default: %default]")
p.add_option("--mode", default="optimal", choices=mode_choices,
help="Mode when splitting records [default: %default]")
p.add_option("--format", choices=("fasta", "fastq", "txt", "clust"),
help="input file format [default: %default]")
opts, args = p.parse_args(args)
if len(args) != 3:
sys.exit(not p.print_help())
filename, outdir, N = args
fs = FileSplitter(filename, outputdir=outdir,
format=opts.format, mode=opts.mode)
if opts.all:
logging.debug("option -all override N")
N = fs.num_records
else:
N = int(N)
assert N > 0, "N must be > 0"
logging.debug("split file into %d chunks" % N)
fs.split(N)
return fs
def join(args):
"""
%prog join file1.txt(pivotfile) file2.txt ..
Join tabular-like files based on common column.
--column specifies the column index to pivot on.
Use comma to separate multiple values if the pivot column is different
in each file. Maintain the order in the first file.
--sep specifies the column separators, default to tab.
Use comma to separate multiple values if the column separator is different
in each file.
"""
from jcvi.utils.iter import flatten
p = OptionParser(join.__doc__)
p.add_option("--column", default="0",
help="0-based column id, multiple values allowed [default: %default]")
p.set_sep(multiple=True)
p.add_option("--noheader", default=False, action="store_true",
help="Do not print header [default: %default]")
p.add_option("--na", default="na",
help="Value for unjoined data [default: %default]")
p.add_option("--keysep", default=",",
help="specify separator joining multiple elements in the key column"
+ " of the pivot file [default: %default]")
p.set_outfile()
opts, args = p.parse_args(args)
nargs = len(args)
keysep = opts.keysep
if len(args) < 2:
sys.exit(not p.print_help())
na = opts.na
c = opts.column
if "," in c:
cc = [int(x) for x in c.split(",")]
else:
cc = [int(c)] * nargs
assert len(cc) == nargs, "Column index number != File number"
s = opts.sep
if "," in s:
ss = [x for x in s.split(",")]
else:
ss = [s] * nargs
assert len(ss) == nargs, "column separator number != File number"
# Maintain the first file line order, and combine other files into it
pivotfile = args[0]
files = [DictFile(f, keypos=c, valuepos=None, delimiter=s) \
for f, c, s in zip(args, cc, ss)]
otherfiles = files[1:]
header = "\t".join(flatten([op.basename(x.filename)] * x.ncols \
for x in files))
fp = must_open(pivotfile)
fw = must_open(opts.outfile, "w")
if not opts.noheader:
print >> fw, header
for row in fp:
row = row.rstrip()
atoms = row.split(ss[0])
newrow = atoms
key = atoms[cc[0]]
keys = key.split(keysep) if keysep in key else [key]
for d in otherfiles:
drows = list()
for key in keys:
drows.append(d.get(key, [na] * d.ncols))
drow = [keysep.join(x) for x in list(zip(*drows))]
newrow += drow
print >> fw, "\t".join(newrow)
def subset(args):
"""
%prog subset file1.txt(pivotfile) file2.txt ..
subset tabular-like file1 based on common column with file 2.
Normally file1 should have unique row entries.
If more than one file2 are provided, they must have same column separators.
Multiple file2's will be concatenated in the output.
--column specifies the column index (0-based) to pivot on.
Use comma to separate multiple values if the pivot column is different
in each file. Maintain the order in the first file.
--sep specifies the column separators, default to tab.
Use comma to separate multiple values if the column separator is different
in each file.
"""
p = OptionParser(subset.__doc__)
p.add_option("--column", default="0",
help="0-based column id, multiple values allowed [default: %default]")
p.set_sep(multiple=True)
p.add_option("--pivot", default=1, type="int",
help="1 for using order in file1, 2 for using order in \
file2 [default: %default]")
p.set_outfile()
opts, args = p.parse_args(args)
nargs = len(args)
if len(args) < 2:
sys.exit(not p.print_help())
c = opts.column
if "," in c:
cc = [int(x) for x in c.split(",")]
assert len(set(cc[1:])) == 1, \
"Multiple file2's must have same column index."
cc = cc[0:2]
else:
cc = [int(c)] * 2
s = opts.sep
if "," in s:
ss = [x for x in s.split(",")]
assert len(set(cc[1:])) == 1, \
"Multiple file2's must have same column separator."
ss = ss[0:2]
else:
ss = [s] * 2
if nargs > 2:
file2 = FileMerger(args[1:], outfile="concatenatedFile2").merge()
else:
file2 = args[1]
newargs = [args[0], file2]
files = [DictFile(f, keypos=c, valuepos=None, delimiter=s) \
for f, c, s in zip(newargs, cc, ss)]
pivot = 0 if opts.pivot==1 else 1
fp = open(newargs[pivot])
fw = must_open(opts.outfile, "w")
for row in fp:
row = row.rstrip()
atoms = row.split(ss[pivot])
key = atoms[cc[pivot]]
d = files[1-pivot]
if key in d:
print >> fw, ss[0].join(files[0][key])
if nargs > 2:
FileShredder([file2])
def setop(args):
"""
%prog setop "fileA & fileB" > newfile
Perform set operations, except on files. The files (fileA and fileB) contain
list of ids. The operator is one of the four:
|: union (elements found in either file)
&: intersection (elements found in both)
-: difference (elements in fileA but not in fileB)
^: symmetric difference (elementes found in either set but not both)
Please quote the argument to avoid shell interpreting | and &.
"""
from jcvi.utils.natsort import natsorted
p = OptionParser(setop.__doc__)
p.add_option("--column", default=0, type="int",
help="The column to extract, 0-based, -1 to disable [default: %default]")
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
statement, = args
fa, op, fb = statement.split()
assert op in ('|', '&', '-', '^')
column = opts.column
fa = SetFile(fa, column=column)
fb = SetFile(fb, column=column)
if op == '|':
t = fa | fb
elif op == '&':
t = fa & fb
elif op == '-':
t = fa - fb
elif op == '^':
t = fa ^ fb
for x in natsorted(t):
print x
if __name__ == '__main__':
main()
| 30.366732 | 97 | 0.551334 |
4a21ec55e38a00aed65b6c883517237a9855c49c | 6,090 | py | Python | make_photo_gallery.py | Tapyr/tapyr | 4235fba6dce169fe747cce4d17d88dcf4a3f9f1d | [
"BSD-3-Clause"
] | 6 | 2016-12-10T17:51:10.000Z | 2021-10-11T07:51:48.000Z | make_photo_gallery.py | Tapyr/tapyr | 4235fba6dce169fe747cce4d17d88dcf4a3f9f1d | [
"BSD-3-Clause"
] | null | null | null | make_photo_gallery.py | Tapyr/tapyr | 4235fba6dce169fe747cce4d17d88dcf4a3f9f1d | [
"BSD-3-Clause"
] | 3 | 2020-03-29T07:37:03.000Z | 2021-01-21T16:08:40.000Z | # -*- coding: utf-8 -*-
# Copyright (C) 2008-2014 Mag. Christian Tanzer. All rights reserved
# Glasauergasse 32, A--1130 Wien, Austria. [email protected]
# ****************************************************************************
#
# This module is licensed under the terms of the BSD 3-Clause License
# <http://www.c-tanzer.at/license/bsd_3c.html>.
# ****************************************************************************
#
#++
# Name
# make_photo_gallery
#
# Purpose
# Make a photo gallery
#
# Revision Dates
# 7-May-2008 (CT) Creation
# 8-May-2008 (CT) `-year` added
# 8-May-2008 (CT) Set `ImageFile.MAXBLOCK` to avoid IOError during `save`
# 20-Mar-2009 (CT) `convert_one` factored and `-add_to_dir` added
# 1-Dec-2009 (CT) Ignore `__getslice__` warnings
# 14-Jan-2011 (CT) `-format`, `-color`, `-x_off`, and `-y_off` added
# 16-Jun-2013 (CT) Use `TFL.CAO`, not `TFL.Command_Line`
# 1-Oct-2014 (CT) Add `fix_rotation`
# ««revision-date»»···
#--
from _CAL.Date import Date
from _TFL import TFL
from _TFL import sos
from _TFL.Filename import *
from _TFL.Regexp import Regexp, re
import _TFL.CAO
import _TFL.FCM
import warnings
warnings.filterwarnings ("ignore", module = "^PIL.*")
warnings.filterwarnings \
( "ignore", "in 3.x, __getslice__ has been removed; use __getitem__")
from PIL import Image, ImageDraw, ImageFile, ImageFont, ExifTags
try :
import plumbum
except ImportError :
plumbum = None
### http://mail.python.org/pipermail/image-sig/1999-August/000816.html
### to avoid exception
### IOError: encoder error -2 when writing image file
ImageFile.MAXBLOCK = 1000000 # default is 64k
_rotate_pat = Regexp (r"Rotate\s+(?P<angle>\d+)\s+CW")
def convert_one \
( src, name, i_size, t_size, holder, year, font, imp, thp
, format, color, x_off, y_off
, temp_dir = None
) :
f_src = fix_rotation (src, temp_dir)
im = Image.open (f_src)
th = im.copy ()
im.thumbnail (i_size, Image.ANTIALIAS)
th.thumbnail (t_size, Image.ANTIALIAS)
if holder :
xo = x_off if x_off > 0 else im.size [0] + x_off
yo = y_off if y_off > 0 else im.size [1] + y_off
draw = ImageDraw.Draw (im)
draw.text \
((xo, yo), "(C) %s %s" % (year, holder), fill = color, font = font)
print (name, im.size, th.size)
im.save (imp, format, progressive = True)
th.save (thp, format, progressive = True)
# end def convert_one
def fix_rotation (src, temp_dir) :
result = src
if plumbum is not None :
pbl = plumbum.local
rot_pat = _rotate_pat
jt_rot = pbl ["jpegtran"] \
["-copy", "all", "-perfect", "-optimize", "-rotate"]
xt = pbl ["exiftool"] ["-Orientation"]
orient = xt (src)
if rot_pat.search (orient) :
angle = rot_pat.angle
result = Filename (Filename (src).base_ext, default_dir = temp_dir).name
cmd = jt_rot [str (angle), src] > result
cmd ()
return result
# end def fix_rotation
def _main (cmd) :
font = ImageFont.load_default ()
color = cmd.color
fmt = cmd.format
ext = fmt.lower ()
if ext == "jpeg" :
ext = "jpg"
holder = cmd.photographer
x_off = cmd.x_off
y_off = cmd.y_off
year = cmd.year
i_size = cmd.i_size, cmd.i_size
t_size = cmd.t_size, cmd.t_size
td = sos.expanded_path (cmd.target_dir)
with TFL.temp_dir () as temp_dir :
if cmd.add_to_dir :
if not sos.path.isdir (td) :
print ("Making directory %s" % (td, ))
sos.mkdir_p (td)
for src in cmd.argv [1:] :
src, name = src.split ("=")
if not name :
name = src
name = Filename (name).base
imp = sos.path.join (td, "%s_im.%s" % (name, ext))
thp = sos.path.join (td, "%s_th.%s" % (name, ext))
convert_one \
( src, name, i_size, t_size, holder, year, font, imp, thp
, fmt, color, x_off, y_off
, temp_dir
)
else :
td_im = sos.path.join (td, "im")
td_th = sos.path.join (td, "th")
for x in td_im, td_th :
if not sos.path.isdir (x) :
print ("Making directory %s" % (x, ))
sos.mkdir_p (x)
pid = cmd.start_pid
for src in sorted (sos.expanded_globs (* cmd.argv [1:])) :
pid += 1
name = "%04d.%s" % (pid, ext)
imp = sos.path.join (td_im, name)
thp = sos.path.join (td_th, name)
convert_one \
( src, name, i_size, t_size, holder, year, font, imp, thp
, fmt, color, x_off, y_off
, temp_dir
)
# end def _main
today = Date ()
year = today.year
_Command = TFL.CAO.Cmd \
( handler = _main
, args =
( "target_dir:P?Directory to put gallery into"
, "picture:P?Name of picture(s) to convert and put into `target_dir`"
)
, opts =
( "add_to_dir:B"
"?Add pictures to existing directory "
"(no `im` and `th` subdirectories)"
, "color:S=white?Color to use for copyright notice"
, "format:S=JPEG?Image format used for output"
, "i_size:I=800?Size of images in gallery (larger dimension)"
, "photographer:S?Name of photographer"
, "start_pid:I=0?Start value for picture count"
, "t_size:I=150?Size of thumbnails in gallery (larger dimension)"
, "x_off:I=5?X offset of copyright notice"
, "y_off:I=-15?Y offset of copyright notice"
, "-year:I=%s?Year for copyright" % (year, )
)
, min_args = 2
)
if __name__ == "__main__" :
_Command ()
### __END__ make_photo_gallery
| 34.40678 | 84 | 0.534647 |
4a21eccbbad657de37602bc0cccb202df3e04148 | 2,292 | py | Python | airflow/operators/pig_operator.py | rubeshdcube/incubator-airflow | 5419fbb78a2ea2388456c356d2f899ea1991b2de | [
"Apache-2.0"
] | 6 | 2016-04-20T20:40:43.000Z | 2022-02-20T10:32:00.000Z | airflow/operators/pig_operator.py | curest0x1021/incubator-airflow | e6d3160a061dbaa6042d524095dcd1cbc15e0bcd | [
"Apache-2.0"
] | 8 | 2017-09-07T22:20:35.000Z | 2021-05-14T17:35:27.000Z | airflow/operators/pig_operator.py | curest0x1021/incubator-airflow | e6d3160a061dbaa6042d524095dcd1cbc15e0bcd | [
"Apache-2.0"
] | 9 | 2017-08-24T15:47:44.000Z | 2022-02-14T03:30:49.000Z | # -*- coding: utf-8 -*-
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import re
from airflow.hooks.pig_hook import PigCliHook
from airflow.models import BaseOperator
from airflow.utils.decorators import apply_defaults
class PigOperator(BaseOperator):
"""
Executes pig script.
:param pig: the pig latin script to be executed
:type pig: string
:param pig_cli_conn_id: reference to the Hive database
:type pig_cli_conn_id: string
:param pigparams_jinja_translate: when True, pig params-type templating
${var} gets translated into jinja-type templating {{ var }}. Note that
you may want to use this along with the
``DAG(user_defined_macros=myargs)`` parameter. View the DAG
object documentation for more details.
:type pigparams_jinja_translate: boolean
"""
template_fields = ('pig',)
template_ext = ('.pig', '.piglatin',)
ui_color = '#f0e4ec'
@apply_defaults
def __init__(
self, pig,
pig_cli_conn_id='pig_cli_default',
pigparams_jinja_translate=False,
*args, **kwargs):
super(PigOperator, self).__init__(*args, **kwargs)
self.pigparams_jinja_translate = pigparams_jinja_translate
self.pig = pig
self.pig_cli_conn_id = pig_cli_conn_id
def get_hook(self):
return PigCliHook(pig_cli_conn_id=self.pig_cli_conn_id)
def prepare_template(self):
if self.pigparams_jinja_translate:
self.pig = re.sub(
"(\$([a-zA-Z_][a-zA-Z0-9_]*))", "{{ \g<2> }}", self.pig)
def execute(self, context):
logging.info('Executing: ' + self.pig)
self.hook = self.get_hook()
self.hook.run_cli(pig=self.pig)
def on_kill(self):
self.hook.kill()
| 32.742857 | 78 | 0.679319 |
4a21ee2903b0ad595960b9cc8e2b40960d26304c | 1,155 | py | Python | setup.py | gurneesh/harvey | 393308bfc2a833ddbbfe7aca4ddf157a7593aa73 | [
"MIT"
] | null | null | null | setup.py | gurneesh/harvey | 393308bfc2a833ddbbfe7aca4ddf157a7593aa73 | [
"MIT"
] | null | null | null | setup.py | gurneesh/harvey | 393308bfc2a833ddbbfe7aca4ddf157a7593aa73 | [
"MIT"
] | null | null | null | import setuptools
with open('README.md', 'r') as fh:
long_description = fh.read()
REQUIREMENTS = [
'flask == 1.*', # TODO: bump to v2 after thorough testing
'requests == 2.*',
'requests_unixsocket == 0.2.*',
'slackclient == 2.*',
'python-dotenv == 0.17.*',
]
DEV_REQUIREMENTS = [
'coveralls == 3.*',
'flake8',
'mock == 4.*',
'pytest == 6.*',
'pytest-cov == 2.*',
]
setuptools.setup(
name='harvey-ci',
version='0.12.0',
description='Your personal CI/CD and Docker orchestration platform.',
long_description=long_description,
long_description_content_type="text/markdown",
url='http://github.com/justintime50/harvey',
author='Justintime50',
license='MIT',
packages=setuptools.find_packages(),
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
install_requires=REQUIREMENTS,
extras_require={
'dev': DEV_REQUIREMENTS,
},
entry_points={
'console_scripts': ['harvey-ci=harvey.app:main'],
},
python_requires='>=3.6',
)
| 25.108696 | 73 | 0.606061 |
4a21ee3d0605529f089e67d4ae57631a2004a642 | 1,368 | py | Python | metricbeat/tests/system/test_kafka.py | wklken/beats | 60e8999da198f1c8c4242c8afc77e39a82b6e47f | [
"Apache-2.0"
] | 16 | 2018-08-22T03:29:31.000Z | 2021-09-05T14:01:10.000Z | metricbeat/tests/system/test_kafka.py | wklken/beats | 60e8999da198f1c8c4242c8afc77e39a82b6e47f | [
"Apache-2.0"
] | 3 | 2020-05-29T13:53:51.000Z | 2021-06-01T22:19:56.000Z | metricbeat/tests/system/test_kafka.py | andyhao567/beats | 242524e6d8e6b157ad1c2e1516dc49fd353fa895 | [
"Apache-2.0"
] | 6 | 2018-10-31T06:55:01.000Z | 2021-02-06T18:50:04.000Z | import os
import metricbeat
import unittest
from nose.plugins.attrib import attr
from nose.plugins.skip import SkipTest
class KafkaTest(metricbeat.BaseTest):
COMPOSE_SERVICES = ['kafka']
@unittest.skipUnless(metricbeat.INTEGRATION_TESTS, "integration test")
def test_partition(self):
"""
kafka partition metricset test
"""
self.create_topic()
self.render_config_template(modules=[{
"name": "kafka",
"metricsets": ["partition"],
"hosts": self.get_hosts(),
"period": "1s"
}])
proc = self.start_beat()
self.wait_until(lambda: self.output_lines() > 0, max_timeout=20)
proc.check_kill_and_wait()
output = self.read_output_json()
self.assertTrue(len(output) >= 1)
evt = output[0]
print(evt)
self.assert_fields_are_documented(evt)
def create_topic(self):
from kafka import KafkaProducer
producer = KafkaProducer(bootstrap_servers=self.get_hosts()[0],
retries=20, retry_backoff_ms=500)
producer.send('foobar', b'some_message_bytes')
def get_hosts(self):
return [self.compose_hosts()[0] + ':' +
os.getenv('KAFKA_PORT', '9092')]
class Kafka_0_10_2_Test(KafkaTest):
COMPOSE_SERVICES = ['kafka_0_10_2']
| 27.36 | 74 | 0.61769 |
4a21ee6a79cd156c7dd2b5ea54b8343632b359ce | 1,535 | py | Python | topsim/algorithms/scheduling.py | top-sim/topsim | 90cb3cff2612ced3d51f94fe852dc814dcca7730 | [
"MIT"
] | 2 | 2022-03-30T01:19:20.000Z | 2022-03-30T02:53:51.000Z | topsim/algorithms/scheduling.py | firewood1996/topsim | 90cb3cff2612ced3d51f94fe852dc814dcca7730 | [
"MIT"
] | 15 | 2020-10-21T08:35:12.000Z | 2022-01-20T07:55:24.000Z | topsim/algorithms/scheduling.py | firewood1996/topsim | 90cb3cff2612ced3d51f94fe852dc814dcca7730 | [
"MIT"
] | 1 | 2021-11-02T14:21:05.000Z | 2021-11-02T14:21:05.000Z | """
Algorithm presents the abstract base class for any Scheduling algorithm.
"""
from abc import ABC, abstractmethod
class Algorithm(ABC):
"""
Abstract base class for all Scheduling Algorithms (used in the dynamic
allocation by the 'scheduler').
The Algorithm base class only requires the single `run()` method to be
implemented; the `to_df` may simply be used as a stubb.
Notes
-----
It is important to note that the simulation will run with an 'incorrect' algorithm. An algorithm is 'incorrect' if it attempts to:
- Allocate to a machine that is already occupied
- Schedule a task to a machine that has already been scheduled.
These will raise RuntimeErrors.
It is also important for the algorithm to take into account
"""
def __init__(self):
self.name = "AbstractAlgorithm"
@abstractmethod
def run(self, cluster, clock, plan, schedule):
"""
Parameters
----------
cluster: :py:obj:`~topsim.core.cluster.Cluster`
The cluster object for the simulation.
clock: int
Current simulation time (usually generated by)
plan
schedule
Returns
-------
"""
pass
@abstractmethod
def to_df(self):
"""
Produce a Pandas DataFrame object to return current state of the
scheduling algorithm
Returns
-------
df : pandas.DataFrame
DataFrame with current state
"""
| 23.615385 | 135 | 0.616287 |
4a21eeb258ad5f918c06009c0830d960dcb1c5ba | 8,878 | py | Python | pandapower/results_bus.py | suzannejanssen/pandapower | 8d0d422c28924c85e774e0e357e4abff86ff3c55 | [
"BSD-3-Clause"
] | null | null | null | pandapower/results_bus.py | suzannejanssen/pandapower | 8d0d422c28924c85e774e0e357e4abff86ff3c55 | [
"BSD-3-Clause"
] | null | null | null | pandapower/results_bus.py | suzannejanssen/pandapower | 8d0d422c28924c85e774e0e357e4abff86ff3c55 | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
# Copyright (c) 2016-2018 by University of Kassel and Fraunhofer Institute for Energy Economics
# and Energy System Technology (IEE), Kassel. All rights reserved.
import numpy as np
from pandapower.auxiliary import _sum_by_group
from pandapower.idx_bus import VM, VA, PD, QD, LAM_P, LAM_Q, BASE_KV
from pandapower.idx_gen import PG, QG
def _set_buses_out_of_service(ppc):
disco = np.where(ppc["bus"][:, 1] == 4)[0]
ppc["bus"][disco, VM] = np.nan
ppc["bus"][disco, VA] = np.nan
ppc["bus"][disco, PD] = 0
ppc["bus"][disco, QD] = 0
def _get_bus_v_results(net, ppc):
ac = net["_options"]["ac"]
bus_idx = _get_bus_idx(net)
if ac:
net["res_bus"]["vm_pu"] = ppc["bus"][bus_idx][:, VM]
# voltage angles
net["res_bus"]["va_degree"] = ppc["bus"][bus_idx][:, VA]
def _get_bus_idx(net):
bus_lookup = net["_pd2ppc_lookups"]["bus"]
ppi = net["bus"].index.values
bus_idx = bus_lookup[ppi]
return bus_idx
def _get_opf_marginal_prices(net, ppc):
bus_idx = _get_bus_idx(net)
net["res_bus"]["lam_p"] = ppc["bus"][bus_idx][:, LAM_P]
net["res_bus"]["lam_q"] = ppc["bus"][bus_idx][:, LAM_Q]
def _get_bus_results(net, ppc, bus_pq):
ac = net["_options"]["ac"]
mode = net["_options"]["mode"]
# write sum of p and q values to bus
net["res_bus"]["p_mw"].values[:] = bus_pq[:, 0]
if ac:
net["res_bus"]["q_mvar"].values[:] = bus_pq[:, 1]
# opf variables
if mode == "opf":
_get_opf_marginal_prices(net, ppc)
# update index in res bus bus
net["res_bus"].index = net["bus"].index
def write_voltage_dependend_load_results(net, p, q, b):
l = net["load"]
_is_elements = net["_is_elements"]
if len(l) > 0:
load_is = _is_elements["load"]
scaling = l["scaling"].values
bus_lookup = net["_pd2ppc_lookups"]["bus"]
lidx = bus_lookup[l["bus"].values]
voltage_depend_loads = net["_options"]["voltage_depend_loads"]
cz = l["const_z_percent"].values / 100.
ci = l["const_i_percent"].values / 100.
cp = 1. - (cz + ci)
# constant power
pl = l["p_mw"].values * scaling * load_is * cp
net["res_load"]["p_mw"] = pl
p = np.hstack([p, pl])
ql = l["q_mvar"].values * scaling * load_is * cp
net["res_load"]["q_mvar"] = ql
q = np.hstack([q, ql])
b = np.hstack([b, l["bus"].values])
if voltage_depend_loads:
# constant impedance and constant current
vm_l = net["_ppc"]["bus"][lidx,7]
volt_depend = ci * vm_l + cz * vm_l ** 2
pl = l["p_mw"].values * scaling * load_is * volt_depend
net["res_load"]["p_mw"] += pl
p = np.hstack([p, pl])
ql = l["q_mvar"].values * scaling * load_is * volt_depend
net["res_load"]["q_mvar"] += ql
q = np.hstack([q, ql])
b = np.hstack([b, l["bus"].values])
return p, q, b
def write_pq_results_to_element(net, ppc, element):
"""
get p_mw and q_mvar for a specific pq element ("load", "sgen"...).
This function basically writes values element table to res_element table
:param net: pandapower net
:param element: element name (str)
:return:
"""
# info from net
_is_elements = net["_is_elements"]
ac = net["_options"]["ac"]
# info element
el_data = net[element]
res_ = "res_%s"%element
ctrl_ = "%s_controllable"%element
is_controllable = False
if ctrl_ in _is_elements:
controlled_elements = net[element][net._is_elements[ctrl_]].index
gen_idx = net._pd2ppc_lookups[ctrl_][controlled_elements]
gen_sign = 1 if element == "sgen" else -1
is_controllable = True
# Wards and xwards have different names in their element table, but not in res table. Also no scaling -> Fix...
p_mw = "ps_mw" if element in ["ward", "xward"] else "p_mw"
q_mvar = "qs_mvar" if element in ["ward", "xward"] else "q_mvar"
scaling = el_data["scaling"].values if element not in ["ward", "xward"] else 1.0
element_in_service = _is_elements[element]
# P result in kw to element
net[res_]["p_mw"].values[:] = el_data[p_mw].values * scaling * element_in_service
if is_controllable:
net[res_]["p_mw"].loc[controlled_elements] = ppc["gen"][gen_idx, PG] * gen_sign
if ac:
# Q result in kvar to element
net[res_]["q_mvar"].values[:] = el_data[q_mvar].values * scaling * element_in_service
if is_controllable:
net[res_]["q_mvar"].loc[controlled_elements] = ppc["gen"][gen_idx, QG] * gen_sign
return net
def get_p_q_b(net, element):
ac = net["_options"]["ac"]
res_ = "res_" + element
# bus values are needed for stacking
b = net[element]["bus"].values
p = net[res_]["p_mw"]
q = net[res_]["q_mvar"] if ac else np.zeros_like(p)
return p, q, b
def _get_p_q_results(net, ppc, bus_lookup_aranged):
bus_pq = np.zeros(shape=(len(net["bus"].index), 2), dtype=np.float)
b, p, q = np.array([]), np.array([]), np.array([])
ac = net["_options"]["ac"]
if net["_options"]["voltage_depend_loads"] and ac:
# voltage dependend loads need special treatment here
p, q, b = write_voltage_dependend_load_results(net, p, q, b)
elements = ["sgen", "storage", "ward", "xward"]
else:
elements = ["load", "sgen", "storage", "ward", "xward"]
for element in elements:
if len(net[element]):
write_pq_results_to_element(net, ppc, element)
p_el, q_el, bus_el = get_p_q_b(net, element)
if element == "sgen":
p = np.hstack([p, -p_el])
q = np.hstack([q, -q_el])
else:
p = np.hstack([p, p_el])
q = np.hstack([q, q_el])
b = np.hstack([b, bus_el])
if not ac:
q = np.zeros(len(p))
# sum pq results from every element to be written to net['bus'] later on
b_pp, vp, vq = _sum_by_group(b.astype(int), p, q)
b_ppc = bus_lookup_aranged[b_pp]
bus_pq[b_ppc, 0] = vp
bus_pq[b_ppc, 1] = vq
return bus_pq
def _get_shunt_results(net, ppc, bus_lookup_aranged, bus_pq):
ac = net["_options"]["ac"]
b, p, q = np.array([]), np.array([]), np.array([])
_is_elements = net["_is_elements"]
bus_lookup = net["_pd2ppc_lookups"]["bus"]
s = net["shunt"]
if len(s) > 0:
sidx = bus_lookup[s["bus"].values]
shunt_is = _is_elements["shunt"]
u_shunt = ppc["bus"][sidx, VM]
step = s["step"]
v_ratio = (ppc["bus"][sidx, BASE_KV] / net["shunt"]["vn_kv"].values) ** 2
u_shunt = np.nan_to_num(u_shunt)
p_shunt = u_shunt ** 2 * net["shunt"]["p_mw"].values * shunt_is * v_ratio * step
net["res_shunt"]["p_mw"].values[:] = p_shunt
p = np.hstack([p, p_shunt])
if ac:
net["res_shunt"]["vm_pu"].values[:] = u_shunt
q_shunt = u_shunt ** 2 * net["shunt"]["q_mvar"].values * shunt_is * v_ratio * step
net["res_shunt"]["q_mvar"].values[:] = q_shunt
q = np.hstack([q, q_shunt])
b = np.hstack([b, s["bus"].values])
w = net["ward"]
if len(w) > 0:
widx = bus_lookup[w["bus"].values]
ward_is = _is_elements["ward"]
u_ward = ppc["bus"][widx, VM]
u_ward = np.nan_to_num(u_ward)
p_ward = u_ward ** 2 * net["ward"]["pz_mw"].values * ward_is
net["res_ward"]["p_mw"].values[:] = net["res_ward"]["p_mw"].values + p_ward
p = np.hstack([p, p_ward])
if ac:
net["res_ward"]["vm_pu"].values[:] = u_ward
q_ward = u_ward ** 2 * net["ward"]["qz_mvar"].values * ward_is
net["res_ward"]["q_mvar"].values[:] = net["res_ward"]["q_mvar"].values + q_ward
q = np.hstack([q, q_ward])
b = np.hstack([b, w["bus"].values])
xw = net["xward"]
if len(xw) > 0:
widx = bus_lookup[xw["bus"].values]
xward_is = _is_elements["xward"]
u_xward = ppc["bus"][widx, VM]
u_xward = np.nan_to_num(u_xward)
p_xward = u_xward ** 2 * net["xward"]["pz_mw"].values * xward_is
net["res_xward"]["p_mw"].values[:] = net["res_xward"]["p_mw"].values + p_xward
p = np.hstack([p, p_xward])
if ac:
net["res_xward"]["vm_pu"].values[:] = u_xward
q_xward = u_xward ** 2 * net["xward"]["qz_mvar"].values * xward_is
net["res_xward"]["q_mvar"].values[:] = net["res_xward"]["q_mvar"].values + q_xward
q = np.hstack([q, q_xward])
b = np.hstack([b, xw["bus"].values])
if not ac:
q = np.zeros(len(p))
b_pp, vp, vq = _sum_by_group(b.astype(int), p, q)
b_ppc = bus_lookup_aranged[b_pp]
bus_pq[b_ppc, 0] += vp
if ac:
bus_pq[b_ppc, 1] += vq | 34.410853 | 115 | 0.575693 |
4a21f020550ea6baf20b0fb77cdb7e73dc22c703 | 12,962 | py | Python | tools/tcpstates.py | Birch-san/bcc | b374be886b555ead8feaad9ec2d86ccd39d748dd | [
"Apache-2.0"
] | 1 | 2021-07-07T12:38:15.000Z | 2021-07-07T12:38:15.000Z | tools/tcpstates.py | Birch-san/bcc | b374be886b555ead8feaad9ec2d86ccd39d748dd | [
"Apache-2.0"
] | null | null | null | tools/tcpstates.py | Birch-san/bcc | b374be886b555ead8feaad9ec2d86ccd39d748dd | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/python
# -*- coding: utf-8 -*-
# @lint-avoid-python-3-compatibility-imports
#
# tcpstates Trace the TCP session state changes with durations.
# For Linux, uses BCC, BPF. Embedded C.
#
# USAGE: tcpstates [-h] [-C] [-S] [interval [count]]
#
# This uses the sock:inet_sock_set_state tracepoint, added to Linux 4.16.
# Linux 4.16 also adds more state transitions so that they can be traced.
#
# Copyright 2018 Netflix, Inc.
# Licensed under the Apache License, Version 2.0 (the "License")
#
# 20-Mar-2018 Brendan Gregg Created this.
from __future__ import print_function
from bcc import BPF
import argparse
from socket import inet_ntop, AF_INET, AF_INET6
from struct import pack
import ctypes as ct
from time import strftime, time
from os import getuid
# arguments
examples = """examples:
./tcpstates # trace all TCP state changes
./tcpstates -t # include timestamp column
./tcpstates -T # include time column (HH:MM:SS)
./tcpstates -w # wider colums (fit IPv6)
./tcpstates -stT # csv output, with times & timestamps
./tcpstates -Y # log events to the systemd journal
./tcpstates -L 80 # only trace local port 80
./tcpstates -L 80,81 # only trace local ports 80 and 81
./tcpstates -D 80 # only trace remote port 80
"""
parser = argparse.ArgumentParser(
description="Trace TCP session state changes and durations",
formatter_class=argparse.RawDescriptionHelpFormatter,
epilog=examples)
parser.add_argument("-T", "--time", action="store_true",
help="include time column on output (HH:MM:SS)")
parser.add_argument("-t", "--timestamp", action="store_true",
help="include timestamp on output (seconds)")
parser.add_argument("-w", "--wide", action="store_true",
help="wide column output (fits IPv6 addresses)")
parser.add_argument("-s", "--csv", action="store_true",
help="comma separated values output")
parser.add_argument("-L", "--localport",
help="comma-separated list of local ports to trace.")
parser.add_argument("-D", "--remoteport",
help="comma-separated list of remote ports to trace.")
parser.add_argument("--ebpf", action="store_true",
help=argparse.SUPPRESS)
parser.add_argument("-Y", "--journal", action="store_true",
help="log session state changes to the systemd journal")
args = parser.parse_args()
debug = 0
# define BPF program
bpf_text = """
#include <uapi/linux/ptrace.h>
#define KBUILD_MODNAME "foo"
#include <linux/tcp.h>
#include <net/sock.h>
#include <bcc/proto.h>
BPF_HASH(last, struct sock *, u64);
// separate data structs for ipv4 and ipv6
struct ipv4_data_t {
u64 ts_us;
u64 skaddr;
u32 saddr;
u32 daddr;
u64 span_us;
u32 pid;
u32 ports;
u32 oldstate;
u32 newstate;
char task[TASK_COMM_LEN];
};
BPF_PERF_OUTPUT(ipv4_events);
struct ipv6_data_t {
u64 ts_us;
u64 skaddr;
unsigned __int128 saddr;
unsigned __int128 daddr;
u64 span_us;
u32 pid;
u32 ports;
u32 oldstate;
u32 newstate;
char task[TASK_COMM_LEN];
};
BPF_PERF_OUTPUT(ipv6_events);
struct id_t {
u32 pid;
char task[TASK_COMM_LEN];
};
TRACEPOINT_PROBE(sock, inet_sock_set_state)
{
if (args->protocol != IPPROTO_TCP)
return 0;
u32 pid = bpf_get_current_pid_tgid() >> 32;
// sk is used as a UUID
struct sock *sk = (struct sock *)args->skaddr;
// lport is either used in a filter here, or later
u16 lport = args->sport;
FILTER_LPORT
// dport is either used in a filter here, or later
u16 dport = args->dport;
FILTER_DPORT
// calculate delta
u64 *tsp, delta_us;
tsp = last.lookup(&sk);
if (tsp == 0)
delta_us = 0;
else
delta_us = (bpf_ktime_get_ns() - *tsp) / 1000;
if (args->family == AF_INET) {
struct ipv4_data_t data4 = {
.span_us = delta_us,
.oldstate = args->oldstate,
.newstate = args->newstate };
data4.skaddr = (u64)args->skaddr;
data4.ts_us = bpf_ktime_get_ns() / 1000;
__builtin_memcpy(&data4.saddr, args->saddr, sizeof(data4.saddr));
__builtin_memcpy(&data4.daddr, args->daddr, sizeof(data4.daddr));
// a workaround until data4 compiles with separate lport/dport
data4.ports = dport + ((0ULL + lport) << 32);
data4.pid = pid;
bpf_get_current_comm(&data4.task, sizeof(data4.task));
ipv4_events.perf_submit(args, &data4, sizeof(data4));
} else /* 6 */ {
struct ipv6_data_t data6 = {
.span_us = delta_us,
.oldstate = args->oldstate,
.newstate = args->newstate };
data6.skaddr = (u64)args->skaddr;
data6.ts_us = bpf_ktime_get_ns() / 1000;
__builtin_memcpy(&data6.saddr, args->saddr_v6, sizeof(data6.saddr));
__builtin_memcpy(&data6.daddr, args->daddr_v6, sizeof(data6.daddr));
// a workaround until data6 compiles with separate lport/dport
data6.ports = dport + ((0ULL + lport) << 32);
data6.pid = pid;
bpf_get_current_comm(&data6.task, sizeof(data6.task));
ipv6_events.perf_submit(args, &data6, sizeof(data6));
}
u64 ts = bpf_ktime_get_ns();
last.update(&sk, &ts);
return 0;
}
"""
if (not BPF.tracepoint_exists("sock", "inet_sock_set_state")):
print("ERROR: tracepoint sock:inet_sock_set_state missing "
"(added in Linux 4.16). Exiting")
exit()
# code substitutions
if args.remoteport:
dports = [int(dport) for dport in args.remoteport.split(',')]
dports_if = ' && '.join(['dport != %d' % dport for dport in dports])
bpf_text = bpf_text.replace('FILTER_DPORT',
'if (%s) { last.delete(&sk); return 0; }' % dports_if)
if args.localport:
lports = [int(lport) for lport in args.localport.split(',')]
lports_if = ' && '.join(['lport != %d' % lport for lport in lports])
bpf_text = bpf_text.replace('FILTER_LPORT',
'if (%s) { last.delete(&sk); return 0; }' % lports_if)
bpf_text = bpf_text.replace('FILTER_DPORT', '')
bpf_text = bpf_text.replace('FILTER_LPORT', '')
if debug or args.ebpf:
print(bpf_text)
if args.ebpf:
exit()
# event data
TASK_COMM_LEN = 16 # linux/sched.h
class Data_ipv4(ct.Structure):
_fields_ = [
("ts_us", ct.c_ulonglong),
("skaddr", ct.c_ulonglong),
("saddr", ct.c_uint),
("daddr", ct.c_uint),
("span_us", ct.c_ulonglong),
("pid", ct.c_uint),
("ports", ct.c_uint),
("oldstate", ct.c_uint),
("newstate", ct.c_uint),
("task", ct.c_char * TASK_COMM_LEN)
]
class Data_ipv6(ct.Structure):
_fields_ = [
("ts_us", ct.c_ulonglong),
("skaddr", ct.c_ulonglong),
("saddr", (ct.c_ulonglong * 2)),
("daddr", (ct.c_ulonglong * 2)),
("span_us", ct.c_ulonglong),
("pid", ct.c_uint),
("ports", ct.c_uint),
("oldstate", ct.c_uint),
("newstate", ct.c_uint),
("task", ct.c_char * TASK_COMM_LEN)
]
#
# Setup output formats
#
# Don't change the default output (next 2 lines): this fits in 80 chars. I
# know it doesn't have NS or UIDs etc. I know. If you really, really, really
# need to add columns, columns that solve real actual problems, I'd start by
# adding an extended mode (-x) to included those columns.
#
header_string = "%-16s %-5s %-10.10s %s%-15s %-5s %-15s %-5s %-11s -> %-11s %s"
format_string = ("%-16x %-5d %-10.10s %s%-15s %-5d %-15s %-5d %-11s " +
"-> %-11s %.3f")
if args.wide:
header_string = ("%-16s %-5s %-16.16s %-2s %-26s %-5s %-26s %-5s %-11s " +
"-> %-11s %s")
format_string = ("%-16x %-5d %-16.16s %-2s %-26s %-5s %-26s %-5d %-11s " +
"-> %-11s %.3f")
if args.csv:
header_string = "%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s"
format_string = "%x,%d,%s,%s,%s,%s,%s,%d,%s,%s,%.3f"
if args.journal:
try:
from systemd import journal
except ImportError:
print("ERROR: Journal logging requires the systemd.journal module")
exit(1)
def tcpstate2str(state):
# from include/net/tcp_states.h:
tcpstate = {
1: "ESTABLISHED",
2: "SYN_SENT",
3: "SYN_RECV",
4: "FIN_WAIT1",
5: "FIN_WAIT2",
6: "TIME_WAIT",
7: "CLOSE",
8: "CLOSE_WAIT",
9: "LAST_ACK",
10: "LISTEN",
11: "CLOSING",
12: "NEW_SYN_RECV",
}
if state in tcpstate:
return tcpstate[state]
else:
return str(state)
def journal_fields(event, addr_family):
addr_pfx = 'IPV4'
if addr_family == AF_INET6:
addr_pfx = 'IPV6'
fields = {
# Standard fields described in systemd.journal-fields(7). journal.send
# will fill in CODE_LINE, CODE_FILE, and CODE_FUNC for us. If we're
# root and specify OBJECT_PID, systemd-journald will add other OBJECT_*
# fields for us.
'SYSLOG_IDENTIFIER': 'tcpstates',
'PRIORITY': 5,
'_SOURCE_REALTIME_TIMESTAMP': time() * 1000000,
'OBJECT_PID': str(event.pid),
'OBJECT_COMM': event.task.decode('utf-8', 'replace'),
# Custom fields, aka "stuff we sort of made up".
'OBJECT_' + addr_pfx + '_SOURCE_ADDRESS': inet_ntop(addr_family, pack("I", event.saddr)),
'OBJECT_TCP_SOURCE_PORT': str(event.ports >> 32),
'OBJECT_' + addr_pfx + '_DESTINATION_ADDRESS': inet_ntop(addr_family, pack("I", event.daddr)),
'OBJECT_TCP_DESTINATION_PORT': str(event.ports & 0xffffffff),
'OBJECT_TCP_OLD_STATE': tcpstate2str(event.oldstate),
'OBJECT_TCP_NEW_STATE': tcpstate2str(event.newstate),
'OBJECT_TCP_SPAN_TIME': str(event.span_us)
}
msg_format_string = (u"%(OBJECT_COMM)s " +
u"%(OBJECT_" + addr_pfx + "_SOURCE_ADDRESS)s " +
u"%(OBJECT_TCP_SOURCE_PORT)s → " +
u"%(OBJECT_" + addr_pfx + "_DESTINATION_ADDRESS)s " +
u"%(OBJECT_TCP_DESTINATION_PORT)s " +
u"%(OBJECT_TCP_OLD_STATE)s → %(OBJECT_TCP_NEW_STATE)s")
fields['MESSAGE'] = msg_format_string % (fields)
if getuid() == 0:
del fields['OBJECT_COMM'] # Handled by systemd-journald
return fields
# process event
def print_ipv4_event(cpu, data, size):
event = ct.cast(data, ct.POINTER(Data_ipv4)).contents
global start_ts
if args.time:
if args.csv:
print("%s," % strftime("%H:%M:%S"), end="")
else:
print("%-8s " % strftime("%H:%M:%S"), end="")
if args.timestamp:
if start_ts == 0:
start_ts = event.ts_us
delta_s = (float(event.ts_us) - start_ts) / 1000000
if args.csv:
print("%.6f," % delta_s, end="")
else:
print("%-9.6f " % delta_s, end="")
print(format_string % (event.skaddr, event.pid, event.task.decode('utf-8', 'replace'),
"4" if args.wide or args.csv else "",
inet_ntop(AF_INET, pack("I", event.saddr)), event.ports >> 32,
inet_ntop(AF_INET, pack("I", event.daddr)), event.ports & 0xffffffff,
tcpstate2str(event.oldstate), tcpstate2str(event.newstate),
float(event.span_us) / 1000))
if args.journal:
journal.send(**journal_fields(event, AF_INET))
def print_ipv6_event(cpu, data, size):
event = ct.cast(data, ct.POINTER(Data_ipv6)).contents
global start_ts
if args.time:
if args.csv:
print("%s," % strftime("%H:%M:%S"), end="")
else:
print("%-8s " % strftime("%H:%M:%S"), end="")
if args.timestamp:
if start_ts == 0:
start_ts = event.ts_us
delta_s = (float(event.ts_us) - start_ts) / 1000000
if args.csv:
print("%.6f," % delta_s, end="")
else:
print("%-9.6f " % delta_s, end="")
print(format_string % (event.skaddr, event.pid, event.task.decode('utf-8', 'replace'),
"6" if args.wide or args.csv else "",
inet_ntop(AF_INET6, event.saddr), event.ports >> 32,
inet_ntop(AF_INET6, event.daddr), event.ports & 0xffffffff,
tcpstate2str(event.oldstate), tcpstate2str(event.newstate),
float(event.span_us) / 1000))
if args.journal:
journal.send(**journal_fields(event, AF_INET6))
# initialize BPF
b = BPF(text=bpf_text)
# header
if args.time:
if args.csv:
print("%s," % ("TIME"), end="")
else:
print("%-8s " % ("TIME"), end="")
if args.timestamp:
if args.csv:
print("%s," % ("TIME(s)"), end="")
else:
print("%-9s " % ("TIME(s)"), end="")
print(header_string % ("SKADDR", "C-PID", "C-COMM",
"IP" if args.wide or args.csv else "",
"LADDR", "LPORT", "RADDR", "RPORT",
"OLDSTATE", "NEWSTATE", "MS"))
start_ts = 0
# read events
b["ipv4_events"].open_perf_buffer(print_ipv4_event, page_cnt=64)
b["ipv6_events"].open_perf_buffer(print_ipv6_event, page_cnt=64)
while 1:
try:
b.perf_buffer_poll()
except KeyboardInterrupt:
exit()
| 32.982188 | 102 | 0.61094 |
4a21f0935d54f91bbeac7c6c4509ce6ed9bcd698 | 8,457 | py | Python | docs/conf.py | globocom/dbaas-zabbix | 3d38c522abcbaac26a6702101b0754b037332dba | [
"BSD-3-Clause"
] | 3 | 2017-01-24T10:56:39.000Z | 2019-07-23T12:19:29.000Z | docs/conf.py | globocom/dbaas-zabbix | 3d38c522abcbaac26a6702101b0754b037332dba | [
"BSD-3-Clause"
] | 5 | 2017-02-08T16:17:13.000Z | 2019-10-10T16:34:56.000Z | docs/conf.py | globocom/dbaas-zabbix | 3d38c522abcbaac26a6702101b0754b037332dba | [
"BSD-3-Clause"
] | 1 | 2019-10-10T16:32:27.000Z | 2019-10-10T16:32:27.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# complexity documentation build configuration file, created by
# sphinx-quickstart on Tue Jul 9 22:26:36 2013.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
# If extensions (or modules to document with autodoc) are in another
# directory, add these directories to sys.path here. If the directory is
# relative to the documentation root, use os.path.abspath to make it
# absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# Get the project root dir, which is the parent dir of this
cwd = os.getcwd()
project_root = os.path.dirname(cwd)
# Insert the project root dir as the first element in the PYTHONPATH.
# This lets us ensure that the source package is imported, and that its
# version is used.
sys.path.insert(0, project_root)
import dbaas-zabbix
# -- General configuration ---------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.viewcode']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'dbaas-zabbix'
copyright = u'2014, Felippe Raposo'
# The version info for the project you're documenting, acts as replacement
# for |version| and |release|, also used in various other places throughout
# the built documents.
#
# The short X.Y version.
version = dbaas-zabbix.__version__
# The full version, including alpha/beta/rc tags.
release = dbaas-zabbix.__version__
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to
# some non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built
# documents.
#keep_warnings = False
# -- Options for HTML output -------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a
# theme further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as
# html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the
# top of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon
# of the docs. This file should be a Windows icon file (.ico) being
# 16x16 or 32x32 pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets)
# here, relative to this directory. They are copied after the builtin
# static files, so a file named "default.css" will overwrite the builtin
# "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page
# bottom, using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names
# to template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer.
# Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer.
# Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages
# will contain a <link> tag referring to it. The value of this option
# must be the base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'dbaas-zabbixdoc'
# -- Options for LaTeX output ------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass
# [howto/manual]).
latex_documents = [
('index', 'dbaas-zabbix.tex',
u'dbaas-zabbix Documentation',
u'Felippe Raposo', 'manual'),
]
# The name of an image file (relative to this directory) to place at
# the top of the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings
# are parts, not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'dbaas-zabbix',
u'dbaas-zabbix Documentation',
[u'Felippe Raposo'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ----------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'dbaas-zabbix',
u'dbaas-zabbix Documentation',
u'Felippe Raposo',
'dbaas-zabbix',
'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False | 30.752727 | 76 | 0.716093 |
4a21f0a97567cb67bf322139d09d8bf56fe824d7 | 91 | py | Python | authentication/urls.py | Rasel-Al-Mamun/Fiction-Django | b5f25b84abdabc62cf82af7cdfe63fd45f265ded | [
"MIT"
] | null | null | null | authentication/urls.py | Rasel-Al-Mamun/Fiction-Django | b5f25b84abdabc62cf82af7cdfe63fd45f265ded | [
"MIT"
] | null | null | null | authentication/urls.py | Rasel-Al-Mamun/Fiction-Django | b5f25b84abdabc62cf82af7cdfe63fd45f265ded | [
"MIT"
] | null | null | null | from .import views
from django.urls import path
app_name = 'auth'
urlpatterns = [
]
| 10.111111 | 28 | 0.681319 |
4a21f0c5eda434f87e7c58ac57241bea45b2894c | 279 | py | Python | src/rating/processing/bisect.py | alterway/processing-operator | f959bb023ff0b549897ee426a17a096b9437ab71 | [
"Apache-2.0"
] | null | null | null | src/rating/processing/bisect.py | alterway/processing-operator | f959bb023ff0b549897ee426a17a096b9437ab71 | [
"Apache-2.0"
] | null | null | null | src/rating/processing/bisect.py | alterway/processing-operator | f959bb023ff0b549897ee426a17a096b9437ab71 | [
"Apache-2.0"
] | null | null | null | import bisect
def get_closest_configs_bisect(timestamp, timestamps):
timestamps_len = len(timestamps)
if timestamps_len == 1:
return 0
index = bisect.bisect_left(timestamps, timestamp)
if index == timestamps_len:
return index - 1
return index | 27.9 | 54 | 0.702509 |
4a21f3279034131e287608aa7f238be08a6231f6 | 986 | py | Python | project4github/largest_digit.py | chinkaih319/SC101 | 25c179c96e0a2bbc4e47768c029ee4bf49e06245 | [
"MIT"
] | null | null | null | project4github/largest_digit.py | chinkaih319/SC101 | 25c179c96e0a2bbc4e47768c029ee4bf49e06245 | [
"MIT"
] | null | null | null | project4github/largest_digit.py | chinkaih319/SC101 | 25c179c96e0a2bbc4e47768c029ee4bf49e06245 | [
"MIT"
] | null | null | null | """
File: largest_digit.py
Name:
----------------------------------
This file recursively prints the biggest digit in
5 different integers, 12345, 281, 6, -111, -9453
If your implementation is correct, you should see
5, 8, 6, 1, 9 on Console.
"""
def main():
print(find_largest_digit(12345)) # 5
print(find_largest_digit(281)) # 8
print(find_largest_digit(6)) # 6
print(find_largest_digit(-111)) # 1
print(find_largest_digit(-9453)) # 9
def find_largest_digit(n):
"""
:param n:
:return:
"""
time = 0
bs = 0
return helper(n, time, bs)
def helper(n, time, bs):
if 0 <= n <= 10:
return n
else:
if n < 10 ** (time+1):
if n < 0:
return helper(-n, time, bs)
else:
first = n // (10 ** time)
if first > bs:
return first
else:
return bs
else:
sq = n//(10 ** time) - (n//(10 ** (time + 1))) * 10
if sq > bs:
bs = sq
time += 1
return helper(n, time, bs)
if __name__ == '__main__':
main()
| 18.603774 | 54 | 0.558824 |
4a21f32ebbdece2a011fd78d4e334f0837e9fc76 | 8,402 | py | Python | torch_geometric/graphgym/contrib/layer/generalconv.py | Kenneth-Schroeder/pytorch_geometric | f7ec9e964bfae1ce5fb21d9b2b30e9e717bf8e24 | [
"MIT"
] | 12,651 | 2017-10-28T15:14:24.000Z | 2021-09-12T07:22:57.000Z | torch_geometric/graphgym/contrib/layer/generalconv.py | Kenneth-Schroeder/pytorch_geometric | f7ec9e964bfae1ce5fb21d9b2b30e9e717bf8e24 | [
"MIT"
] | 2,472 | 2017-10-30T23:38:47.000Z | 2021-09-12T06:41:44.000Z | torch_geometric/graphgym/contrib/layer/generalconv.py | Kenneth-Schroeder/pytorch_geometric | f7ec9e964bfae1ce5fb21d9b2b30e9e717bf8e24 | [
"MIT"
] | 2,363 | 2017-12-01T13:25:05.000Z | 2021-09-12T07:23:09.000Z | import torch
import torch.nn as nn
from torch.nn import Parameter
from torch_scatter import scatter_add
from torch_geometric.nn.conv import MessagePassing
from torch_geometric.utils import add_remaining_self_loops
from torch_geometric.nn.inits import glorot, zeros
from torch_geometric.graphgym.config import cfg
class GeneralConvLayer(MessagePassing):
r"""General GNN layer
"""
def __init__(self, in_channels, out_channels, improved=False, cached=False,
bias=True, **kwargs):
super(GeneralConvLayer, self).__init__(aggr=cfg.gnn.agg, **kwargs)
self.in_channels = in_channels
self.out_channels = out_channels
self.improved = improved
self.cached = cached
self.normalize = cfg.gnn.normalize_adj
self.weight = Parameter(torch.Tensor(in_channels, out_channels))
if cfg.gnn.self_msg == 'concat':
self.weight_self = Parameter(
torch.Tensor(in_channels, out_channels))
if bias:
self.bias = Parameter(torch.Tensor(out_channels))
else:
self.register_parameter('bias', None)
self.reset_parameters()
def reset_parameters(self):
glorot(self.weight)
if cfg.gnn.self_msg == 'concat':
glorot(self.weight_self)
zeros(self.bias)
self.cached_result = None
self.cached_num_edges = None
@staticmethod
def norm(edge_index, num_nodes, edge_weight=None, improved=False,
dtype=None):
if edge_weight is None:
edge_weight = torch.ones((edge_index.size(1), ), dtype=dtype,
device=edge_index.device)
fill_value = 1 if not improved else 2
edge_index, edge_weight = add_remaining_self_loops(
edge_index, edge_weight, fill_value, num_nodes)
row, col = edge_index
deg = scatter_add(edge_weight, row, dim=0, dim_size=num_nodes)
deg_inv_sqrt = deg.pow(-0.5)
deg_inv_sqrt[deg_inv_sqrt == float('inf')] = 0
return edge_index, deg_inv_sqrt[row] * edge_weight * deg_inv_sqrt[col]
def forward(self, x, edge_index, edge_weight=None, edge_feature=None):
""""""
if cfg.gnn.self_msg == 'concat':
x_self = torch.matmul(x, self.weight_self)
x = torch.matmul(x, self.weight)
if self.cached and self.cached_result is not None:
if edge_index.size(1) != self.cached_num_edges:
raise RuntimeError(
'Cached {} number of edges, but found {}. Please '
'disable the caching behavior of this layer by removing '
'the `cached=True` argument in its constructor.'.format(
self.cached_num_edges, edge_index.size(1)))
if not self.cached or self.cached_result is None:
self.cached_num_edges = edge_index.size(1)
if self.normalize:
edge_index, norm = self.norm(edge_index, x.size(self.node_dim),
edge_weight, self.improved,
x.dtype)
else:
norm = edge_weight
self.cached_result = edge_index, norm
edge_index, norm = self.cached_result
x_msg = self.propagate(edge_index, x=x, norm=norm,
edge_feature=edge_feature)
if cfg.gnn.self_msg == 'none':
return x_msg
elif cfg.gnn.self_msg == 'add':
return x_msg + x
elif cfg.gnn.self_msg == 'concat':
return x_msg + x_self
else:
raise ValueError('self_msg {} not defined'.format(
cfg.gnn.self_msg))
def message(self, x_j, norm, edge_feature):
if edge_feature is None:
return norm.view(-1, 1) * x_j if norm is not None else x_j
else:
return norm.view(-1, 1) * (
x_j + edge_feature) if norm is not None else (x_j +
edge_feature)
def update(self, aggr_out):
if self.bias is not None:
aggr_out = aggr_out + self.bias
return aggr_out
def __repr__(self):
return '{}({}, {})'.format(self.__class__.__name__, self.in_channels,
self.out_channels)
class GeneralEdgeConvLayer(MessagePassing):
r"""General GNN layer, with edge features
"""
def __init__(self, in_channels, out_channels, edge_dim,
improved=False, cached=False, bias=True, **kwargs):
super(GeneralEdgeConvLayer, self).__init__(aggr=cfg.gnn.agg, **kwargs)
self.in_channels = in_channels
self.out_channels = out_channels
self.improved = improved
self.cached = cached
self.normalize = cfg.gnn.normalize_adj
self.msg_direction = cfg.gnn.msg_direction
if self.msg_direction == 'single':
self.linear_msg = nn.Linear(in_channels + edge_dim,
out_channels, bias=False)
else:
self.linear_msg = nn.Linear(in_channels * 2 + edge_dim,
out_channels, bias=False)
if cfg.gnn.self_msg == 'concat':
self.linear_self = nn.Linear(in_channels, out_channels, bias=False)
if bias:
self.bias = Parameter(torch.Tensor(out_channels))
else:
self.register_parameter('bias', None)
self.reset_parameters()
def reset_parameters(self):
zeros(self.bias)
self.cached_result = None
self.cached_num_edges = None
@staticmethod
def norm(edge_index, num_nodes, edge_weight=None, improved=False,
dtype=None):
if edge_weight is None:
edge_weight = torch.ones((edge_index.size(1), ), dtype=dtype,
device=edge_index.device)
fill_value = 1 if not improved else 2
edge_index, edge_weight = add_remaining_self_loops(
edge_index, edge_weight, fill_value, num_nodes)
row, col = edge_index
deg = scatter_add(edge_weight, row, dim=0, dim_size=num_nodes)
deg_inv_sqrt = deg.pow(-0.5)
deg_inv_sqrt[deg_inv_sqrt == float('inf')] = 0
return edge_index, deg_inv_sqrt[row] * edge_weight * deg_inv_sqrt[col]
def forward(self, x, edge_index, edge_weight=None, edge_feature=None):
if self.cached and self.cached_result is not None:
if edge_index.size(1) != self.cached_num_edges:
raise RuntimeError(
'Cached {} number of edges, but found {}. Please '
'disable the caching behavior of this layer by removing '
'the `cached=True` argument in its constructor.'.format(
self.cached_num_edges, edge_index.size(1)))
if not self.cached or self.cached_result is None:
self.cached_num_edges = edge_index.size(1)
if self.normalize:
edge_index, norm = self.norm(edge_index, x.size(self.node_dim),
edge_weight, self.improved,
x.dtype)
else:
norm = edge_weight
self.cached_result = edge_index, norm
edge_index, norm = self.cached_result
x_msg = self.propagate(edge_index, x=x, norm=norm,
edge_feature=edge_feature)
if cfg.gnn.self_msg == 'concat':
x_self = self.linear_self(x)
return x_self + x_msg
elif cfg.gnn.self_msg == 'add':
return x + x_msg
else:
return x_msg
def message(self, x_i, x_j, norm, edge_feature):
if self.msg_direction == 'both':
x_j = torch.cat((x_i, x_j, edge_feature), dim=-1)
else:
x_j = torch.cat((x_j, edge_feature), dim=-1)
x_j = self.linear_msg(x_j)
return norm.view(-1, 1) * x_j if norm is not None else x_j
def update(self, aggr_out):
if self.bias is not None:
aggr_out = aggr_out + self.bias
return aggr_out
def __repr__(self):
return '{}({}, {})'.format(self.__class__.__name__, self.in_channels,
self.out_channels)
| 38.365297 | 79 | 0.580576 |
4a21f415874f39402c1cec6c6d51b94e95489831 | 22,103 | py | Python | ixnetwork_restpy/testplatform/sessions/ixnetwork/vport/protocols/ipv4trafficendpoint_ccf0ac687ab3e96bf323237e4242c33d.py | OpenIxia/ixnetwork_restpy | f628db450573a104f327cf3c737ca25586e067ae | [
"MIT"
] | 20 | 2019-05-07T01:59:14.000Z | 2022-02-11T05:24:47.000Z | ixnetwork_restpy/testplatform/sessions/ixnetwork/vport/protocols/ipv4trafficendpoint_ccf0ac687ab3e96bf323237e4242c33d.py | OpenIxia/ixnetwork_restpy | f628db450573a104f327cf3c737ca25586e067ae | [
"MIT"
] | 60 | 2019-04-03T18:59:35.000Z | 2022-02-22T12:05:05.000Z | ixnetwork_restpy/testplatform/sessions/ixnetwork/vport/protocols/ipv4trafficendpoint_ccf0ac687ab3e96bf323237e4242c33d.py | OpenIxia/ixnetwork_restpy | f628db450573a104f327cf3c737ca25586e067ae | [
"MIT"
] | 13 | 2019-05-20T10:48:31.000Z | 2021-10-06T07:45:44.000Z | # MIT LICENSE
#
# Copyright 1997 - 2020 by IXIA Keysight
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
from ixnetwork_restpy.base import Base
from ixnetwork_restpy.files import Files
from typing import List, Any, Union
class Ipv4TrafficEndPoint(Base):
"""NOT DEFINED
The Ipv4TrafficEndPoint class encapsulates a list of ipv4TrafficEndPoint resources that are managed by the user.
A list of resources can be retrieved from the server using the Ipv4TrafficEndPoint.find() method.
The list can be managed by using the Ipv4TrafficEndPoint.add() and Ipv4TrafficEndPoint.remove() methods.
"""
__slots__ = ()
_SDM_NAME = 'ipv4TrafficEndPoint'
_SDM_ATT_MAP = {
'ArpViaInterface': 'arpViaInterface',
'CustomIpHeaderLength': 'customIpHeaderLength',
'CustomIpHeaderValue': 'customIpHeaderValue',
'CustomIpProtocol': 'customIpProtocol',
'DestinationPort': 'destinationPort',
'EnableVlan': 'enableVlan',
'GatewayMac': 'gatewayMac',
'IpAddress': 'ipAddress',
'IpMask': 'ipMask',
'IpProtocol': 'ipProtocol',
'Ipv4Dscp': 'ipv4Dscp',
'Ipv4Ecn': 'ipv4Ecn',
'MacAddress': 'macAddress',
'Name': 'name',
'ProtocolInterface': 'protocolInterface',
'RangeSize': 'rangeSize',
'SourcePort': 'sourcePort',
'UdpDestination': 'udpDestination',
'UdpSource': 'udpSource',
'VlanCount': 'vlanCount',
'VlanId': 'vlanId',
'VlanPriority': 'vlanPriority',
}
_SDM_ENUM_MAP = {
'ipProtocol': ['custom', 'tcp', 'udp'],
}
def __init__(self, parent, list_op=False):
super(Ipv4TrafficEndPoint, self).__init__(parent, list_op)
@property
def ArpViaInterface(self):
# type: () -> bool
"""
Returns
-------
- bool: If selected, ARP request is conveyed through an Interface.
"""
return self._get_attribute(self._SDM_ATT_MAP['ArpViaInterface'])
@ArpViaInterface.setter
def ArpViaInterface(self, value):
# type: (bool) -> None
self._set_attribute(self._SDM_ATT_MAP['ArpViaInterface'], value)
@property
def CustomIpHeaderLength(self):
# type: () -> int
"""
Returns
-------
- number: The Custom IPv4 Header Length value. The default value is 1.
"""
return self._get_attribute(self._SDM_ATT_MAP['CustomIpHeaderLength'])
@CustomIpHeaderLength.setter
def CustomIpHeaderLength(self, value):
# type: (int) -> None
self._set_attribute(self._SDM_ATT_MAP['CustomIpHeaderLength'], value)
@property
def CustomIpHeaderValue(self):
# type: () -> str
"""
Returns
-------
- str: The Custom IPv4 Header Value. The default value is 00
"""
return self._get_attribute(self._SDM_ATT_MAP['CustomIpHeaderValue'])
@CustomIpHeaderValue.setter
def CustomIpHeaderValue(self, value):
# type: (str) -> None
self._set_attribute(self._SDM_ATT_MAP['CustomIpHeaderValue'], value)
@property
def CustomIpProtocol(self):
# type: () -> str
"""
Returns
-------
- str: Specify the custom IP Protocol for the Source Traffic Endpoints.
"""
return self._get_attribute(self._SDM_ATT_MAP['CustomIpProtocol'])
@CustomIpProtocol.setter
def CustomIpProtocol(self, value):
# type: (str) -> None
self._set_attribute(self._SDM_ATT_MAP['CustomIpProtocol'], value)
@property
def DestinationPort(self):
# type: () -> str
"""
Returns
-------
- str: NOT DEFINED
"""
return self._get_attribute(self._SDM_ATT_MAP['DestinationPort'])
@DestinationPort.setter
def DestinationPort(self, value):
# type: (str) -> None
self._set_attribute(self._SDM_ATT_MAP['DestinationPort'], value)
@property
def EnableVlan(self):
# type: () -> bool
"""
Returns
-------
- bool: Select this check box to make VLAN available.
"""
return self._get_attribute(self._SDM_ATT_MAP['EnableVlan'])
@EnableVlan.setter
def EnableVlan(self, value):
# type: (bool) -> None
self._set_attribute(self._SDM_ATT_MAP['EnableVlan'], value)
@property
def GatewayMac(self):
# type: () -> str
"""
Returns
-------
- str: The Gateway MAC address of the source traffic endpoint. The default value is 00 00 00 00 00 00.
"""
return self._get_attribute(self._SDM_ATT_MAP['GatewayMac'])
@GatewayMac.setter
def GatewayMac(self, value):
# type: (str) -> None
self._set_attribute(self._SDM_ATT_MAP['GatewayMac'], value)
@property
def IpAddress(self):
# type: () -> str
"""
Returns
-------
- str: Specify the IPv4 address of the Source Traffic Endpoint. The default value is 0.0.0.0.
"""
return self._get_attribute(self._SDM_ATT_MAP['IpAddress'])
@IpAddress.setter
def IpAddress(self, value):
# type: (str) -> None
self._set_attribute(self._SDM_ATT_MAP['IpAddress'], value)
@property
def IpMask(self):
# type: () -> int
"""
Returns
-------
- number: Specify the Mask value. The default value is 24.
"""
return self._get_attribute(self._SDM_ATT_MAP['IpMask'])
@IpMask.setter
def IpMask(self, value):
# type: (int) -> None
self._set_attribute(self._SDM_ATT_MAP['IpMask'], value)
@property
def IpProtocol(self):
# type: () -> str
"""
Returns
-------
- str(custom | tcp | udp): Click the IP Protocol type to be used.
"""
return self._get_attribute(self._SDM_ATT_MAP['IpProtocol'])
@IpProtocol.setter
def IpProtocol(self, value):
# type: (str) -> None
self._set_attribute(self._SDM_ATT_MAP['IpProtocol'], value)
@property
def Ipv4Dscp(self):
# type: () -> str
"""
Returns
-------
- str: The priority specified for the IP address. The default value is 0.
"""
return self._get_attribute(self._SDM_ATT_MAP['Ipv4Dscp'])
@Ipv4Dscp.setter
def Ipv4Dscp(self, value):
# type: (str) -> None
self._set_attribute(self._SDM_ATT_MAP['Ipv4Dscp'], value)
@property
def Ipv4Ecn(self):
# type: () -> str
"""
Returns
-------
- str: The ECN value specified for the IP address.
"""
return self._get_attribute(self._SDM_ATT_MAP['Ipv4Ecn'])
@Ipv4Ecn.setter
def Ipv4Ecn(self, value):
# type: (str) -> None
self._set_attribute(self._SDM_ATT_MAP['Ipv4Ecn'], value)
@property
def MacAddress(self):
# type: () -> str
"""
Returns
-------
- str: The MAC Address of the source traffic endpoint. The default value is 00 00 00 00 00 00.
"""
return self._get_attribute(self._SDM_ATT_MAP['MacAddress'])
@MacAddress.setter
def MacAddress(self, value):
# type: (str) -> None
self._set_attribute(self._SDM_ATT_MAP['MacAddress'], value)
@property
def Name(self):
# type: () -> str
"""
Returns
-------
- str: The name of the Traffic endpoint. It is an auto-populated field but can be customized for convenience.
"""
return self._get_attribute(self._SDM_ATT_MAP['Name'])
@Name.setter
def Name(self, value):
# type: (str) -> None
self._set_attribute(self._SDM_ATT_MAP['Name'], value)
@property
def ProtocolInterface(self):
# type: () -> str
"""
Returns
-------
- str(None | /api/v1/sessions/1/ixnetwork/vport/.../interface): NOT DEFINED
"""
return self._get_attribute(self._SDM_ATT_MAP['ProtocolInterface'])
@ProtocolInterface.setter
def ProtocolInterface(self, value):
# type: (str) -> None
self._set_attribute(self._SDM_ATT_MAP['ProtocolInterface'], value)
@property
def RangeSize(self):
# type: () -> int
"""
Returns
-------
- number: Specify the size of the traffic range.
"""
return self._get_attribute(self._SDM_ATT_MAP['RangeSize'])
@RangeSize.setter
def RangeSize(self, value):
# type: (int) -> None
self._set_attribute(self._SDM_ATT_MAP['RangeSize'], value)
@property
def SourcePort(self):
# type: () -> str
"""
Returns
-------
- str: NOT DEFINED
"""
return self._get_attribute(self._SDM_ATT_MAP['SourcePort'])
@SourcePort.setter
def SourcePort(self, value):
# type: (str) -> None
self._set_attribute(self._SDM_ATT_MAP['SourcePort'], value)
@property
def UdpDestination(self):
# type: () -> str
"""
Returns
-------
- str: Specify the UDP Destination. The default value is 1.
"""
return self._get_attribute(self._SDM_ATT_MAP['UdpDestination'])
@UdpDestination.setter
def UdpDestination(self, value):
# type: (str) -> None
self._set_attribute(self._SDM_ATT_MAP['UdpDestination'], value)
@property
def UdpSource(self):
# type: () -> str
"""
Returns
-------
- str: Specify the UDP Source. The default value is 1.
"""
return self._get_attribute(self._SDM_ATT_MAP['UdpSource'])
@UdpSource.setter
def UdpSource(self, value):
# type: (str) -> None
self._set_attribute(self._SDM_ATT_MAP['UdpSource'], value)
@property
def VlanCount(self):
# type: () -> int
"""
Returns
-------
- number: Specify the VLAN count. The default value is 1.
"""
return self._get_attribute(self._SDM_ATT_MAP['VlanCount'])
@VlanCount.setter
def VlanCount(self, value):
# type: (int) -> None
self._set_attribute(self._SDM_ATT_MAP['VlanCount'], value)
@property
def VlanId(self):
# type: () -> str
"""
Returns
-------
- str: Specify the VLAN ID (Outer and Inner).
"""
return self._get_attribute(self._SDM_ATT_MAP['VlanId'])
@VlanId.setter
def VlanId(self, value):
# type: (str) -> None
self._set_attribute(self._SDM_ATT_MAP['VlanId'], value)
@property
def VlanPriority(self):
# type: () -> str
"""
Returns
-------
- str: Specify the VLAN Priority (Outer and Inner).
"""
return self._get_attribute(self._SDM_ATT_MAP['VlanPriority'])
@VlanPriority.setter
def VlanPriority(self, value):
# type: (str) -> None
self._set_attribute(self._SDM_ATT_MAP['VlanPriority'], value)
def update(self, ArpViaInterface=None, CustomIpHeaderLength=None, CustomIpHeaderValue=None, CustomIpProtocol=None, DestinationPort=None, EnableVlan=None, GatewayMac=None, IpAddress=None, IpMask=None, IpProtocol=None, Ipv4Dscp=None, Ipv4Ecn=None, MacAddress=None, Name=None, ProtocolInterface=None, RangeSize=None, SourcePort=None, UdpDestination=None, UdpSource=None, VlanCount=None, VlanId=None, VlanPriority=None):
# type: (bool, int, str, str, str, bool, str, str, int, str, str, str, str, str, str, int, str, str, str, int, str, str) -> Ipv4TrafficEndPoint
"""Updates ipv4TrafficEndPoint resource on the server.
Args
----
- ArpViaInterface (bool): If selected, ARP request is conveyed through an Interface.
- CustomIpHeaderLength (number): The Custom IPv4 Header Length value. The default value is 1.
- CustomIpHeaderValue (str): The Custom IPv4 Header Value. The default value is 00
- CustomIpProtocol (str): Specify the custom IP Protocol for the Source Traffic Endpoints.
- DestinationPort (str): NOT DEFINED
- EnableVlan (bool): Select this check box to make VLAN available.
- GatewayMac (str): The Gateway MAC address of the source traffic endpoint. The default value is 00 00 00 00 00 00.
- IpAddress (str): Specify the IPv4 address of the Source Traffic Endpoint. The default value is 0.0.0.0.
- IpMask (number): Specify the Mask value. The default value is 24.
- IpProtocol (str(custom | tcp | udp)): Click the IP Protocol type to be used.
- Ipv4Dscp (str): The priority specified for the IP address. The default value is 0.
- Ipv4Ecn (str): The ECN value specified for the IP address.
- MacAddress (str): The MAC Address of the source traffic endpoint. The default value is 00 00 00 00 00 00.
- Name (str): The name of the Traffic endpoint. It is an auto-populated field but can be customized for convenience.
- ProtocolInterface (str(None | /api/v1/sessions/1/ixnetwork/vport/.../interface)): NOT DEFINED
- RangeSize (number): Specify the size of the traffic range.
- SourcePort (str): NOT DEFINED
- UdpDestination (str): Specify the UDP Destination. The default value is 1.
- UdpSource (str): Specify the UDP Source. The default value is 1.
- VlanCount (number): Specify the VLAN count. The default value is 1.
- VlanId (str): Specify the VLAN ID (Outer and Inner).
- VlanPriority (str): Specify the VLAN Priority (Outer and Inner).
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
return self._update(self._map_locals(self._SDM_ATT_MAP, locals()))
def add(self, ArpViaInterface=None, CustomIpHeaderLength=None, CustomIpHeaderValue=None, CustomIpProtocol=None, DestinationPort=None, EnableVlan=None, GatewayMac=None, IpAddress=None, IpMask=None, IpProtocol=None, Ipv4Dscp=None, Ipv4Ecn=None, MacAddress=None, Name=None, ProtocolInterface=None, RangeSize=None, SourcePort=None, UdpDestination=None, UdpSource=None, VlanCount=None, VlanId=None, VlanPriority=None):
# type: (bool, int, str, str, str, bool, str, str, int, str, str, str, str, str, str, int, str, str, str, int, str, str) -> Ipv4TrafficEndPoint
"""Adds a new ipv4TrafficEndPoint resource on the server and adds it to the container.
Args
----
- ArpViaInterface (bool): If selected, ARP request is conveyed through an Interface.
- CustomIpHeaderLength (number): The Custom IPv4 Header Length value. The default value is 1.
- CustomIpHeaderValue (str): The Custom IPv4 Header Value. The default value is 00
- CustomIpProtocol (str): Specify the custom IP Protocol for the Source Traffic Endpoints.
- DestinationPort (str): NOT DEFINED
- EnableVlan (bool): Select this check box to make VLAN available.
- GatewayMac (str): The Gateway MAC address of the source traffic endpoint. The default value is 00 00 00 00 00 00.
- IpAddress (str): Specify the IPv4 address of the Source Traffic Endpoint. The default value is 0.0.0.0.
- IpMask (number): Specify the Mask value. The default value is 24.
- IpProtocol (str(custom | tcp | udp)): Click the IP Protocol type to be used.
- Ipv4Dscp (str): The priority specified for the IP address. The default value is 0.
- Ipv4Ecn (str): The ECN value specified for the IP address.
- MacAddress (str): The MAC Address of the source traffic endpoint. The default value is 00 00 00 00 00 00.
- Name (str): The name of the Traffic endpoint. It is an auto-populated field but can be customized for convenience.
- ProtocolInterface (str(None | /api/v1/sessions/1/ixnetwork/vport/.../interface)): NOT DEFINED
- RangeSize (number): Specify the size of the traffic range.
- SourcePort (str): NOT DEFINED
- UdpDestination (str): Specify the UDP Destination. The default value is 1.
- UdpSource (str): Specify the UDP Source. The default value is 1.
- VlanCount (number): Specify the VLAN count. The default value is 1.
- VlanId (str): Specify the VLAN ID (Outer and Inner).
- VlanPriority (str): Specify the VLAN Priority (Outer and Inner).
Returns
-------
- self: This instance with all currently retrieved ipv4TrafficEndPoint resources using find and the newly added ipv4TrafficEndPoint resources available through an iterator or index
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
return self._create(self._map_locals(self._SDM_ATT_MAP, locals()))
def remove(self):
"""Deletes all the contained ipv4TrafficEndPoint resources in this instance from the server.
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
self._delete()
def find(self, ArpViaInterface=None, CustomIpHeaderLength=None, CustomIpHeaderValue=None, CustomIpProtocol=None, DestinationPort=None, EnableVlan=None, GatewayMac=None, IpAddress=None, IpMask=None, IpProtocol=None, Ipv4Dscp=None, Ipv4Ecn=None, MacAddress=None, Name=None, ProtocolInterface=None, RangeSize=None, SourcePort=None, UdpDestination=None, UdpSource=None, VlanCount=None, VlanId=None, VlanPriority=None):
# type: (bool, int, str, str, str, bool, str, str, int, str, str, str, str, str, str, int, str, str, str, int, str, str) -> Ipv4TrafficEndPoint
"""Finds and retrieves ipv4TrafficEndPoint resources from the server.
All named parameters are evaluated on the server using regex. The named parameters can be used to selectively retrieve ipv4TrafficEndPoint resources from the server.
To retrieve an exact match ensure the parameter value starts with ^ and ends with $
By default the find method takes no parameters and will retrieve all ipv4TrafficEndPoint resources from the server.
Args
----
- ArpViaInterface (bool): If selected, ARP request is conveyed through an Interface.
- CustomIpHeaderLength (number): The Custom IPv4 Header Length value. The default value is 1.
- CustomIpHeaderValue (str): The Custom IPv4 Header Value. The default value is 00
- CustomIpProtocol (str): Specify the custom IP Protocol for the Source Traffic Endpoints.
- DestinationPort (str): NOT DEFINED
- EnableVlan (bool): Select this check box to make VLAN available.
- GatewayMac (str): The Gateway MAC address of the source traffic endpoint. The default value is 00 00 00 00 00 00.
- IpAddress (str): Specify the IPv4 address of the Source Traffic Endpoint. The default value is 0.0.0.0.
- IpMask (number): Specify the Mask value. The default value is 24.
- IpProtocol (str(custom | tcp | udp)): Click the IP Protocol type to be used.
- Ipv4Dscp (str): The priority specified for the IP address. The default value is 0.
- Ipv4Ecn (str): The ECN value specified for the IP address.
- MacAddress (str): The MAC Address of the source traffic endpoint. The default value is 00 00 00 00 00 00.
- Name (str): The name of the Traffic endpoint. It is an auto-populated field but can be customized for convenience.
- ProtocolInterface (str(None | /api/v1/sessions/1/ixnetwork/vport/.../interface)): NOT DEFINED
- RangeSize (number): Specify the size of the traffic range.
- SourcePort (str): NOT DEFINED
- UdpDestination (str): Specify the UDP Destination. The default value is 1.
- UdpSource (str): Specify the UDP Source. The default value is 1.
- VlanCount (number): Specify the VLAN count. The default value is 1.
- VlanId (str): Specify the VLAN ID (Outer and Inner).
- VlanPriority (str): Specify the VLAN Priority (Outer and Inner).
Returns
-------
- self: This instance with matching ipv4TrafficEndPoint resources retrieved from the server available through an iterator or index
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
return self._select(self._map_locals(self._SDM_ATT_MAP, locals()))
def read(self, href):
"""Retrieves a single instance of ipv4TrafficEndPoint data from the server.
Args
----
- href (str): An href to the instance to be retrieved
Returns
-------
- self: This instance with the ipv4TrafficEndPoint resources from the server available through an iterator or index
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
return self._read(href)
| 42.587669 | 420 | 0.64032 |
4a21f45043d55d317110d69215b26852191a4417 | 9,645 | py | Python | src/fileseq/utils.py | justinfx/fileseq | 9ec049373af37ec21a21d2a5564deb344a96f97f | [
"MIT"
] | 69 | 2019-09-25T13:08:14.000Z | 2022-03-21T07:47:24.000Z | src/fileseq/utils.py | justinfx/fileseq | 9ec049373af37ec21a21d2a5564deb344a96f97f | [
"MIT"
] | 28 | 2020-02-19T04:58:59.000Z | 2021-10-21T02:40:01.000Z | src/fileseq/utils.py | justinfx/fileseq | 9ec049373af37ec21a21d2a5564deb344a96f97f | [
"MIT"
] | 11 | 2020-04-14T10:22:06.000Z | 2021-12-06T09:49:00.000Z | #! /usr/bin/env python
"""
utils - General tools of use to fileseq operations.
"""
from __future__ import absolute_import, division
from builtins import bytes
from builtins import next
from builtins import range
from builtins import object
import future.utils as futils
import decimal
from itertools import chain, count, islice
import os
import sys
from fileseq import exceptions
FILESYSTEM_ENCODING = sys.getfilesystemencoding() or 'utf-8'
def quantize(number, decimal_places, rounding=decimal.ROUND_HALF_EVEN):
"""
Round a decimal value to given number of decimal places
Args:
number (decimal.Decimal): Decimal number to round
decimal_places (int): Number of decimal places in return value
rounding (str): decimal.Decimal rounding mode. See rounding argument of
https://docs.python.org/2/library/decimal.html#decimal.Context
Returns:
decimal.Decimal:
"""
quantize_exponent = decimal.Decimal(1).scaleb(-decimal_places)
return number.quantize(quantize_exponent, rounding=rounding)
def lenRange(start, stop, step=1):
"""
Get the length of values for a given range
Args:
start (int):
stop (int):
step (int):
"""
if not step:
raise ValueError('step argument must not be zero')
if step > 0:
result = (stop - start + step - 1) // step
else:
result = (stop - start + step + 1) // step
return max(0, result)
class xrange2(object):
"""
An itertools-based replacement for xrange which does
not exhibit the OverflowError issue on some platforms,
when a value exceeds a C long size.
Provides the features of an islice, with the added support
for checking the length of the range.
"""
__slots__ = ['_len', '_islice']
def __init__(self, start, stop=None, step=1):
if stop is None:
start, stop = 0, start
self._len = lenRange(start, stop, step)
self._islice = islice(count(start, step), self._len)
def __len__(self):
return self._len
def __next__(self):
return next(self._islice)
def __iter__(self):
return self._islice.__iter__()
# Issue #44
# On Windows platform, it is possible for xrange to get an
# OverflowError if a value passed to xrange exceeds the size of a C long.
# Switch to an alternate implementation.
if os.name == 'nt':
xrange = range = xrange2
else:
xrange = range
def xfrange(start, stop, step=1, maxSize=-1):
"""
Returns a generator that yields the frames from start to stop, inclusive.
In other words it adds or subtracts a frame, as necessary, to return the
stop value as well, if the stepped range would touch that value.
Args:
start (int):
stop (int):
step (int): Note that the sign will be ignored
maxSize (int):
Returns:
generator:
Raises:
:class:`fileseq.exceptions.MaxSizeException`: if size is exceeded
"""
if not step:
raise ValueError('xfrange() step argument must not be zero')
start, stop, step = normalizeFrames([start, stop, step])
if start <= stop:
step = abs(step)
else:
step = -abs(step)
if isinstance(start, futils.integer_types):
size = (stop - start) // step + 1
else:
size = int((stop - start) / step) + 1
if maxSize >= 0 and size > maxSize:
raise exceptions.MaxSizeException(
"Size %d > %s (MAX_FRAME_SIZE)" % (size, maxSize))
# because an xrange is an odd object all its own, we wrap it in a
# generator expression to get a proper Generator
if isinstance(start, futils.integer_types):
offset = step // abs(step)
return (f for f in range(start, stop + offset, step))
else:
return (start + i * step for i in range(size))
def normalizeFrame(frame):
"""
Convert a frame number to the most appropriate type - the most compact type
that doesn't affect precision, for example numbers that convert exactly
to integer values will be converted to int
Args:
frame (int, float, decimal.Decimal, str): frame number to normalize
Returns:
frame (int, float, or decimal.Decimal):
"""
if frame is None:
return None
elif isinstance(frame, futils.integer_types):
return frame
elif isinstance(frame, float):
frame_int = int(frame)
if frame == frame_int:
return frame_int
return frame
elif isinstance(frame, decimal.Decimal):
frame_int = int(frame)
if frame == frame_int:
return frame_int
return frame.normalize()
else:
try:
return int(frame)
except ValueError:
try:
frame = decimal.Decimal(frame)
except decimal.DecimalException:
return frame
else:
return normalizeFrame(frame)
def normalizeFrames(frames):
"""
Convert a sequence of frame numbers to the most appropriate type for the
overall sequence, where all members of the result are of the same type.
Args:
frames (iterable of int, float, decimal.Decimal, or str):
frame numbers to normalize
Returns:
frames (iterable of int, float, or decimal.Decimal):
"""
# Normalise all frame values and find their type
frames = [normalizeFrame(frame) for frame in frames]
frame_types = set(type(frame) for frame in frames)
# Determine best overall type for frames
if float in frame_types:
FrameType = float
elif decimal.Decimal in frame_types:
FrameType = decimal.Decimal
else:
FrameType = int
if len(frame_types) == 1:
return frames
# Convert all frames to chosen type
frames = [FrameType(frame) for frame in frames]
# Ensure all decimal frames have same exponent
if FrameType is decimal.Decimal:
maximum_decimal_places = max(
-frame.as_tuple().exponent for frame in frames
)
frames = [quantize(frame, maximum_decimal_places) for frame in frames]
return frames
def unique(seen, *iterables):
"""
Get the unique items in iterables while preserving order. Note that this
mutates the seen set provided only when the returned generator is used.
Args:
seen (set): either an empty set, or the set of things already seen
*iterables: one or more iterable lists to chain together
Returns:
generator:
"""
_add = seen.add
# return a generator of the unique items and the set of the seen items
# the seen set will mutate when the generator is iterated over
return (i for i in chain(*iterables) if i not in seen and not _add(i))
def pad(number, width=0, decimal_places=None):
"""
Return the zero-padded string of a given number.
Args:
number (int, float, or decimal.Decimal): the number to pad
width (int): width for zero padding the integral component
decimal_places (int): number of decimal places to use in frame range
Returns:
str:
"""
# Make the common case fast. Truncate to integer value as USD does.
# https://graphics.pixar.com/usd/docs/api/_usd__page__value_clips.html
# See _DeriveClipTimeString for formating of templateAssetPath
# https://github.com/PixarAnimationStudios/USD/blob/release/pxr/usd/usd/clipSetDefinition.cpp
if decimal_places == 0:
return futils.native_str(number).partition(".")[0].zfill(width)
# USD ultimately uses vsnprintf to format floats for templateAssetPath:
# _DeriveClipTimeString -> TfStringPrintf -> ArchVStringPrintf -> ArchVsnprintf -> vsnprintf
# Since glibc 2.17 the printf family of functions rounds floats using the
# current IEEE rounding mode, by default bankers' rounding (FE_TONEAREST).
# See https://sourceware.org/bugzilla/show_bug.cgi?id=5044 and man(3) fegetround
# Also https://www.exploringbinary.com/inconsistent-rounding-of-printed-floating-point-numbers/
if decimal_places is not None:
if not isinstance(number, decimal.Decimal):
number = decimal.Decimal(number)
number = quantize(number, decimal_places, decimal.ROUND_HALF_EVEN)
number = futils.native_str(number)
parts = number.split(".", 1)
parts[0] = parts[0].zfill(width)
return ".".join(parts)
def _getPathSep(path):
"""
Abstracts returning the appropriate path separator
for the given path string.
This implementation always returns ``os.sep``
Abstracted to make test mocking easier.
Args:
path (str): A path to check for the most common sep
Returns:
str:
"""
return os.sep
_STR_TYPES = frozenset((futils.text_type, futils.binary_type))
def asString(obj):
"""
Ensure an object is either explicitly str or unicode
and not some derived type that can change semantics.
If the object is unicode, return unicode.
Otherwise return the string conversion of the object.
Args:
obj: Object to return as str or unicode
Returns:
str or unicode:
"""
typ = type(obj)
# explicit type check as faster path
if typ in _STR_TYPES:
if not futils.PY2 and typ is futils.binary_type:
obj = os.fsdecode(obj)
return obj
# derived type check
elif isinstance(obj, bytes):
if futils.PY2:
obj = bytes(obj)
else:
obj = obj.decode(FILESYSTEM_ENCODING)
else:
obj = futils.text_type(obj)
return futils.native(obj)
| 29.138973 | 99 | 0.654536 |
4a21f55dbae7f5efc8428b682f48677b50551b4d | 71 | py | Python | py_tdlib/constructors/secret_chat_state_pending.py | Mr-TelegramBot/python-tdlib | 2e2d21a742ebcd439971a32357f2d0abd0ce61eb | [
"MIT"
] | 24 | 2018-10-05T13:04:30.000Z | 2020-05-12T08:45:34.000Z | py_tdlib/constructors/secret_chat_state_pending.py | MrMahdi313/python-tdlib | 2e2d21a742ebcd439971a32357f2d0abd0ce61eb | [
"MIT"
] | 3 | 2019-06-26T07:20:20.000Z | 2021-05-24T13:06:56.000Z | py_tdlib/constructors/secret_chat_state_pending.py | MrMahdi313/python-tdlib | 2e2d21a742ebcd439971a32357f2d0abd0ce61eb | [
"MIT"
] | 5 | 2018-10-05T14:29:28.000Z | 2020-08-11T15:04:10.000Z | from ..factory import Type
class secretChatStatePending(Type):
pass
| 11.833333 | 35 | 0.788732 |
4a21f6232cb09ce96806fd53d5e832c2f6ff0878 | 26,589 | py | Python | src/transformers/__init__.py | SoumyaBarikeri/transformers | 996c6e113404000f50444287aa8a31a174ebd92f | [
"Apache-2.0"
] | 1 | 2021-08-07T06:06:45.000Z | 2021-08-07T06:06:45.000Z | src/transformers/__init__.py | SoumyaBarikeri/transformers | 996c6e113404000f50444287aa8a31a174ebd92f | [
"Apache-2.0"
] | null | null | null | src/transformers/__init__.py | SoumyaBarikeri/transformers | 996c6e113404000f50444287aa8a31a174ebd92f | [
"Apache-2.0"
] | 2 | 2021-05-31T08:50:50.000Z | 2022-01-26T13:14:58.000Z | # flake8: noqa
# There's no way to ignore "F401 '...' imported but unused" warnings in this
# module, but to preserve other warnings. So, don't check this module at all.
__version__ = "3.3.0"
# Work around to update TensorFlow's absl.logging threshold which alters the
# default Python logging output behavior when present.
# see: https://github.com/abseil/abseil-py/issues/99
# and: https://github.com/tensorflow/tensorflow/issues/26691#issuecomment-500369493
try:
import absl.logging
except ImportError:
pass
else:
absl.logging.set_verbosity("info")
absl.logging.set_stderrthreshold("info")
absl.logging._warn_preinit_stderr = False
# Integrations: this needs to come before other ml imports
# in order to allow any 3rd-party code to initialize properly
from .integrations import ( # isort:skip
is_comet_available,
is_optuna_available,
is_ray_available,
is_tensorboard_available,
is_wandb_available,
)
# Configurations
from .configuration_albert import ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, AlbertConfig
from .configuration_auto import ALL_PRETRAINED_CONFIG_ARCHIVE_MAP, CONFIG_MAPPING, AutoConfig
from .configuration_bart import BartConfig
from .configuration_bert import BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, BertConfig
from .configuration_bert_generation import BertGenerationConfig
from .configuration_camembert import CAMEMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, CamembertConfig
from .configuration_ctrl import CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP, CTRLConfig
from .configuration_distilbert import DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, DistilBertConfig
from .configuration_dpr import DPR_PRETRAINED_CONFIG_ARCHIVE_MAP, DPRConfig
from .configuration_electra import ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP, ElectraConfig
from .configuration_encoder_decoder import EncoderDecoderConfig
from .configuration_flaubert import FLAUBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, FlaubertConfig
from .configuration_fsmt import FSMT_PRETRAINED_CONFIG_ARCHIVE_MAP, FSMTConfig
from .configuration_funnel import FUNNEL_PRETRAINED_CONFIG_ARCHIVE_MAP, FunnelConfig
from .configuration_gpt2 import GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP, GPT2Config
from .configuration_layoutlm import LAYOUTLM_PRETRAINED_CONFIG_ARCHIVE_MAP, LayoutLMConfig
from .configuration_longformer import LONGFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, LongformerConfig
from .configuration_lxmert import LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP, LxmertConfig
from .configuration_marian import MarianConfig
from .configuration_mbart import MBartConfig
from .configuration_mmbt import MMBTConfig
from .configuration_mobilebert import MOBILEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, MobileBertConfig
from .configuration_openai import OPENAI_GPT_PRETRAINED_CONFIG_ARCHIVE_MAP, OpenAIGPTConfig
from .configuration_pegasus import PegasusConfig
from .configuration_rag import RagConfig
from .configuration_reformer import REFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, ReformerConfig
from .configuration_retribert import RETRIBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, RetriBertConfig
from .configuration_roberta import ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, RobertaConfig
from .configuration_t5 import T5_PRETRAINED_CONFIG_ARCHIVE_MAP, T5Config
from .configuration_transfo_xl import TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP, TransfoXLConfig
from .configuration_utils import PretrainedConfig
from .configuration_xlm import XLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XLMConfig
from .configuration_xlm_roberta import XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, XLMRobertaConfig
from .configuration_xlnet import XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP, XLNetConfig
from .data import (
DataProcessor,
InputExample,
InputFeatures,
SingleSentenceClassificationProcessor,
SquadExample,
SquadFeatures,
SquadV1Processor,
SquadV2Processor,
glue_convert_examples_to_features,
glue_output_modes,
glue_processors,
glue_tasks_num_labels,
is_sklearn_available,
squad_convert_examples_to_features,
xnli_output_modes,
xnli_processors,
xnli_tasks_num_labels,
)
# Files and general utilities
from .file_utils import (
CONFIG_NAME,
MODEL_CARD_NAME,
PYTORCH_PRETRAINED_BERT_CACHE,
PYTORCH_TRANSFORMERS_CACHE,
TF2_WEIGHTS_NAME,
TF_WEIGHTS_NAME,
TRANSFORMERS_CACHE,
WEIGHTS_NAME,
add_end_docstrings,
add_start_docstrings,
cached_path,
is_apex_available,
is_datasets_available,
is_faiss_available,
is_psutil_available,
is_py3nvml_available,
is_tf_available,
is_torch_available,
is_torch_tpu_available,
)
from .hf_argparser import HfArgumentParser
# Model Cards
from .modelcard import ModelCard
# TF 2.0 <=> PyTorch conversion utilities
from .modeling_tf_pytorch_utils import (
convert_tf_weight_name_to_pt_weight_name,
load_pytorch_checkpoint_in_tf2_model,
load_pytorch_model_in_tf2_model,
load_pytorch_weights_in_tf2_model,
load_tf2_checkpoint_in_pytorch_model,
load_tf2_model_in_pytorch_model,
load_tf2_weights_in_pytorch_model,
)
# Pipelines
from .pipelines import (
Conversation,
ConversationalPipeline,
CsvPipelineDataFormat,
FeatureExtractionPipeline,
FillMaskPipeline,
JsonPipelineDataFormat,
NerPipeline,
PipedPipelineDataFormat,
Pipeline,
PipelineDataFormat,
QuestionAnsweringPipeline,
SummarizationPipeline,
Text2TextGenerationPipeline,
TextClassificationPipeline,
TextGenerationPipeline,
TokenClassificationPipeline,
TranslationPipeline,
ZeroShotClassificationPipeline,
pipeline,
)
# Retriever
from .retrieval_rag import RagRetriever
# Tokenizers
from .tokenization_albert import AlbertTokenizer
from .tokenization_auto import TOKENIZER_MAPPING, AutoTokenizer
from .tokenization_bart import BartTokenizer, BartTokenizerFast
from .tokenization_bert import BasicTokenizer, BertTokenizer, BertTokenizerFast, WordpieceTokenizer
from .tokenization_bert_generation import BertGenerationTokenizer
from .tokenization_bert_japanese import BertJapaneseTokenizer, CharacterTokenizer, MecabTokenizer
from .tokenization_bertweet import BertweetTokenizer
from .tokenization_camembert import CamembertTokenizer
from .tokenization_ctrl import CTRLTokenizer
from .tokenization_distilbert import DistilBertTokenizer, DistilBertTokenizerFast
from .tokenization_dpr import (
DPRContextEncoderTokenizer,
DPRContextEncoderTokenizerFast,
DPRQuestionEncoderTokenizer,
DPRQuestionEncoderTokenizerFast,
DPRReaderTokenizer,
DPRReaderTokenizerFast,
)
from .tokenization_electra import ElectraTokenizer, ElectraTokenizerFast
from .tokenization_flaubert import FlaubertTokenizer
from .tokenization_fsmt import FSMTTokenizer
from .tokenization_funnel import FunnelTokenizer, FunnelTokenizerFast
from .tokenization_gpt2 import GPT2Tokenizer, GPT2TokenizerFast
from .tokenization_layoutlm import LayoutLMTokenizer, LayoutLMTokenizerFast
from .tokenization_longformer import LongformerTokenizer, LongformerTokenizerFast
from .tokenization_lxmert import LxmertTokenizer, LxmertTokenizerFast
from .tokenization_mbart import MBartTokenizer
from .tokenization_mobilebert import MobileBertTokenizer, MobileBertTokenizerFast
from .tokenization_openai import OpenAIGPTTokenizer, OpenAIGPTTokenizerFast
from .tokenization_pegasus import PegasusTokenizer
from .tokenization_phobert import PhobertTokenizer
from .tokenization_rag import RagTokenizer
from .tokenization_reformer import ReformerTokenizer
from .tokenization_retribert import RetriBertTokenizer, RetriBertTokenizerFast
from .tokenization_roberta import RobertaTokenizer, RobertaTokenizerFast
from .tokenization_t5 import T5Tokenizer
from .tokenization_transfo_xl import TransfoXLCorpus, TransfoXLTokenizer, TransfoXLTokenizerFast
from .tokenization_utils import PreTrainedTokenizer
from .tokenization_utils_base import (
BatchEncoding,
CharSpan,
PreTrainedTokenizerBase,
SpecialTokensMixin,
TensorType,
TokenSpan,
)
from .tokenization_utils_fast import PreTrainedTokenizerFast
from .tokenization_xlm import XLMTokenizer
from .tokenization_xlm_roberta import XLMRobertaTokenizer
from .tokenization_xlnet import SPIECE_UNDERLINE, XLNetTokenizer
# Trainer
from .trainer_utils import EvalPrediction, set_seed
from .training_args import TrainingArguments
from .training_args_tf import TFTrainingArguments
from .utils import logging
logger = logging.get_logger(__name__) # pylint: disable=invalid-name
if is_sklearn_available():
from .data import glue_compute_metrics, xnli_compute_metrics
# Modeling
if is_torch_available():
# Benchmarks
from .benchmark.benchmark import PyTorchBenchmark
from .benchmark.benchmark_args import PyTorchBenchmarkArguments
from .data.data_collator import (
DataCollator,
DataCollatorForLanguageModeling,
DataCollatorForNextSentencePrediction,
DataCollatorForPermutationLanguageModeling,
DataCollatorForSOP,
DataCollatorWithPadding,
default_data_collator,
)
from .data.datasets import (
GlueDataset,
GlueDataTrainingArguments,
LineByLineTextDataset,
LineByLineWithSOPTextDataset,
SquadDataset,
SquadDataTrainingArguments,
TextDataset,
TextDatasetForNextSentencePrediction,
)
from .generation_utils import top_k_top_p_filtering
from .modeling_albert import (
ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
AlbertForMaskedLM,
AlbertForMultipleChoice,
AlbertForPreTraining,
AlbertForQuestionAnswering,
AlbertForSequenceClassification,
AlbertForTokenClassification,
AlbertModel,
AlbertPreTrainedModel,
load_tf_weights_in_albert,
)
from .modeling_auto import (
MODEL_FOR_CAUSAL_LM_MAPPING,
MODEL_FOR_MASKED_LM_MAPPING,
MODEL_FOR_MULTIPLE_CHOICE_MAPPING,
MODEL_FOR_PRETRAINING_MAPPING,
MODEL_FOR_QUESTION_ANSWERING_MAPPING,
MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING,
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
MODEL_MAPPING,
MODEL_WITH_LM_HEAD_MAPPING,
AutoModel,
AutoModelForCausalLM,
AutoModelForMaskedLM,
AutoModelForMultipleChoice,
AutoModelForPreTraining,
AutoModelForQuestionAnswering,
AutoModelForSeq2SeqLM,
AutoModelForSequenceClassification,
AutoModelForTokenClassification,
AutoModelWithLMHead,
AutoModelWithLMAndDebiasHead,
)
from .modeling_bart import (
BART_PRETRAINED_MODEL_ARCHIVE_LIST,
BartForConditionalGeneration,
BartForQuestionAnswering,
BartForSequenceClassification,
BartModel,
PretrainedBartModel,
)
from .modeling_bert import (
BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
BertForMaskedLM,
BertForMultipleChoice,
BertForNextSentencePrediction,
BertForPreTraining,
BertForQuestionAnswering,
BertForSequenceClassification,
BertForTokenClassification,
BertLayer,
BertLMHeadModel,
BertModel,
BertPreTrainedModel,
load_tf_weights_in_bert,
)
from .modeling_bert_generation import (
BertGenerationDecoder,
BertGenerationEncoder,
load_tf_weights_in_bert_generation,
)
from .modeling_camembert import (
CAMEMBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
CamembertForCausalLM,
CamembertForMaskedLM,
CamembertForMultipleChoice,
CamembertForQuestionAnswering,
CamembertForSequenceClassification,
CamembertForTokenClassification,
CamembertModel,
)
from .modeling_ctrl import CTRL_PRETRAINED_MODEL_ARCHIVE_LIST, CTRLLMHeadModel, CTRLModel, CTRLPreTrainedModel
from .modeling_distilbert import (
DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
DistilBertForMaskedLM,
DistilBertForMultipleChoice,
DistilBertForQuestionAnswering,
DistilBertForSequenceClassification,
DistilBertForTokenClassification,
DistilBertModel,
DistilBertPreTrainedModel,
)
from .modeling_dpr import (
DPRContextEncoder,
DPRPretrainedContextEncoder,
DPRPretrainedQuestionEncoder,
DPRPretrainedReader,
DPRQuestionEncoder,
DPRReader,
)
from .modeling_electra import (
ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST,
ElectraForMaskedLM,
ElectraForMultipleChoice,
ElectraForPreTraining,
ElectraForQuestionAnswering,
ElectraForSequenceClassification,
ElectraForTokenClassification,
ElectraModel,
ElectraPreTrainedModel,
load_tf_weights_in_electra,
)
from .modeling_encoder_decoder import EncoderDecoderModel
from .modeling_flaubert import (
FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
FlaubertForMultipleChoice,
FlaubertForQuestionAnswering,
FlaubertForQuestionAnsweringSimple,
FlaubertForSequenceClassification,
FlaubertForTokenClassification,
FlaubertModel,
FlaubertWithLMHeadModel,
)
from .modeling_fsmt import FSMTForConditionalGeneration, FSMTModel, PretrainedFSMTModel
from .modeling_funnel import (
FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST,
FunnelBaseModel,
FunnelForMaskedLM,
FunnelForMultipleChoice,
FunnelForPreTraining,
FunnelForQuestionAnswering,
FunnelForSequenceClassification,
FunnelForTokenClassification,
FunnelModel,
load_tf_weights_in_funnel,
)
from .modeling_gpt2 import (
GPT2_PRETRAINED_MODEL_ARCHIVE_LIST,
GPT2DoubleHeadsModel,
GPT2LMHeadModel,
GPT2Model,
GPT2PreTrainedModel,
load_tf_weights_in_gpt2,
GPT2DoubleHeadsModelEqualisingLoss,
GPT2DoubleHeadsModelCosineDistLoss,
GPT2DoubleHeadsModelCustomClassifier,
GPT2DoubleHeadsModelSoftDebiasing,
GPT2DoubleHeadsModelHardDebiasing,
GPT2DoubleHeadsModelReligion2EqLoss,
)
from .modeling_layoutlm import (
LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST,
LayoutLMForMaskedLM,
LayoutLMForTokenClassification,
LayoutLMModel,
)
from .modeling_longformer import (
LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
LongformerForMaskedLM,
LongformerForMultipleChoice,
LongformerForQuestionAnswering,
LongformerForSequenceClassification,
LongformerForTokenClassification,
LongformerModel,
LongformerSelfAttention,
)
from .modeling_lxmert import (
LxmertEncoder,
LxmertForPreTraining,
LxmertForQuestionAnswering,
LxmertModel,
LxmertPreTrainedModel,
LxmertVisualFeatureEncoder,
LxmertXLayer,
)
from .modeling_marian import MarianMTModel
from .modeling_mbart import MBartForConditionalGeneration
from .modeling_mmbt import MMBTForClassification, MMBTModel, ModalEmbeddings
from .modeling_mobilebert import (
MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
MobileBertForMaskedLM,
MobileBertForMultipleChoice,
MobileBertForNextSentencePrediction,
MobileBertForPreTraining,
MobileBertForQuestionAnswering,
MobileBertForSequenceClassification,
MobileBertForTokenClassification,
MobileBertLayer,
MobileBertModel,
MobileBertPreTrainedModel,
load_tf_weights_in_mobilebert,
)
from .modeling_openai import (
OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST,
OpenAIGPTDoubleHeadsModel,
OpenAIGPTLMHeadModel,
OpenAIGPTModel,
OpenAIGPTPreTrainedModel,
load_tf_weights_in_openai_gpt,
)
from .modeling_pegasus import PegasusForConditionalGeneration
from .modeling_rag import RagModel, RagSequenceForGeneration, RagTokenForGeneration
from .modeling_reformer import (
REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
ReformerAttention,
ReformerForMaskedLM,
ReformerForQuestionAnswering,
ReformerForSequenceClassification,
ReformerLayer,
ReformerModel,
ReformerModelWithLMHead,
)
from .modeling_retribert import RETRIBERT_PRETRAINED_MODEL_ARCHIVE_LIST, RetriBertModel, RetriBertPreTrainedModel
from .modeling_roberta import (
ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
RobertaForCausalLM,
RobertaForMaskedLM,
RobertaForMultipleChoice,
RobertaForQuestionAnswering,
RobertaForSequenceClassification,
RobertaForTokenClassification,
RobertaModel,
)
from .modeling_t5 import (
T5_PRETRAINED_MODEL_ARCHIVE_LIST,
T5ForConditionalGeneration,
T5Model,
T5PreTrainedModel,
load_tf_weights_in_t5,
)
from .modeling_transfo_xl import (
TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST,
AdaptiveEmbedding,
TransfoXLLMHeadModel,
TransfoXLModel,
TransfoXLPreTrainedModel,
load_tf_weights_in_transfo_xl,
)
from .modeling_utils import Conv1D, PreTrainedModel, apply_chunking_to_forward, prune_layer
from .modeling_xlm import (
XLM_PRETRAINED_MODEL_ARCHIVE_LIST,
XLMForMultipleChoice,
XLMForQuestionAnswering,
XLMForQuestionAnsweringSimple,
XLMForSequenceClassification,
XLMForTokenClassification,
XLMModel,
XLMPreTrainedModel,
XLMWithLMHeadModel,
)
from .modeling_xlm_roberta import (
XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
XLMRobertaForCausalLM,
XLMRobertaForMaskedLM,
XLMRobertaForMultipleChoice,
XLMRobertaForQuestionAnswering,
XLMRobertaForSequenceClassification,
XLMRobertaForTokenClassification,
XLMRobertaModel,
)
from .modeling_xlnet import (
XLNET_PRETRAINED_MODEL_ARCHIVE_LIST,
XLNetForMultipleChoice,
XLNetForQuestionAnswering,
XLNetForQuestionAnsweringSimple,
XLNetForSequenceClassification,
XLNetForTokenClassification,
XLNetLMHeadModel,
XLNetModel,
XLNetPreTrainedModel,
load_tf_weights_in_xlnet,
)
# Optimization
from .optimization import (
Adafactor,
AdamW,
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
)
from .tokenization_marian import MarianTokenizer
# Trainer
from .trainer import EvalPrediction, Trainer, set_seed, torch_distributed_zero_first
# TensorFlow
if is_tf_available():
from .benchmark.benchmark_args_tf import TensorFlowBenchmarkArguments
# Benchmarks
from .benchmark.benchmark_tf import TensorFlowBenchmark
from .generation_tf_utils import tf_top_k_top_p_filtering
from .modeling_tf_albert import (
TF_ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFAlbertForMaskedLM,
TFAlbertForMultipleChoice,
TFAlbertForPreTraining,
TFAlbertForQuestionAnswering,
TFAlbertForSequenceClassification,
TFAlbertForTokenClassification,
TFAlbertMainLayer,
TFAlbertModel,
TFAlbertPreTrainedModel,
)
from .modeling_tf_auto import (
TF_MODEL_FOR_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_MASKED_LM_MAPPING,
TF_MODEL_FOR_MULTIPLE_CHOICE_MAPPING,
TF_MODEL_FOR_PRETRAINING_MAPPING,
TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING,
TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
TF_MODEL_MAPPING,
TF_MODEL_WITH_LM_HEAD_MAPPING,
TFAutoModel,
TFAutoModelForCausalLM,
TFAutoModelForMaskedLM,
TFAutoModelForMultipleChoice,
TFAutoModelForPreTraining,
TFAutoModelForQuestionAnswering,
TFAutoModelForSeq2SeqLM,
TFAutoModelForSequenceClassification,
TFAutoModelForTokenClassification,
TFAutoModelWithLMHead,
)
from .modeling_tf_bert import (
TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFBertEmbeddings,
TFBertForMaskedLM,
TFBertForMultipleChoice,
TFBertForNextSentencePrediction,
TFBertForPreTraining,
TFBertForQuestionAnswering,
TFBertForSequenceClassification,
TFBertForTokenClassification,
TFBertLMHeadModel,
TFBertMainLayer,
TFBertModel,
TFBertPreTrainedModel,
)
from .modeling_tf_camembert import (
TF_CAMEMBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFCamembertForMaskedLM,
TFCamembertForMultipleChoice,
TFCamembertForQuestionAnswering,
TFCamembertForSequenceClassification,
TFCamembertForTokenClassification,
TFCamembertModel,
)
from .modeling_tf_ctrl import (
TF_CTRL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFCTRLLMHeadModel,
TFCTRLModel,
TFCTRLPreTrainedModel,
)
from .modeling_tf_distilbert import (
TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDistilBertForMaskedLM,
TFDistilBertForMultipleChoice,
TFDistilBertForQuestionAnswering,
TFDistilBertForSequenceClassification,
TFDistilBertForTokenClassification,
TFDistilBertMainLayer,
TFDistilBertModel,
TFDistilBertPreTrainedModel,
)
from .modeling_tf_electra import (
TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFElectraForMaskedLM,
TFElectraForMultipleChoice,
TFElectraForPreTraining,
TFElectraForQuestionAnswering,
TFElectraForSequenceClassification,
TFElectraForTokenClassification,
TFElectraModel,
TFElectraPreTrainedModel,
)
from .modeling_tf_flaubert import (
TF_FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFFlaubertForMultipleChoice,
TFFlaubertForQuestionAnsweringSimple,
TFFlaubertForSequenceClassification,
TFFlaubertForTokenClassification,
TFFlaubertModel,
TFFlaubertWithLMHeadModel,
)
from .modeling_tf_funnel import (
TF_FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFFunnelBaseModel,
TFFunnelForMaskedLM,
TFFunnelForMultipleChoice,
TFFunnelForPreTraining,
TFFunnelForQuestionAnswering,
TFFunnelForSequenceClassification,
TFFunnelForTokenClassification,
TFFunnelModel,
)
from .modeling_tf_gpt2 import (
TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST,
TFGPT2DoubleHeadsModel,
TFGPT2LMHeadModel,
TFGPT2MainLayer,
TFGPT2Model,
TFGPT2PreTrainedModel,
)
from .modeling_tf_longformer import (
TF_LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFLongformerForMaskedLM,
TFLongformerForQuestionAnswering,
TFLongformerModel,
TFLongformerSelfAttention,
)
from .modeling_tf_lxmert import (
TF_LXMERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFLxmertForPreTraining,
TFLxmertMainLayer,
TFLxmertModel,
TFLxmertPreTrainedModel,
TFLxmertVisualFeatureEncoder,
)
from .modeling_tf_mobilebert import (
TF_MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFMobileBertForMaskedLM,
TFMobileBertForMultipleChoice,
TFMobileBertForNextSentencePrediction,
TFMobileBertForPreTraining,
TFMobileBertForQuestionAnswering,
TFMobileBertForSequenceClassification,
TFMobileBertForTokenClassification,
TFMobileBertMainLayer,
TFMobileBertModel,
TFMobileBertPreTrainedModel,
)
from .modeling_tf_openai import (
TF_OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFOpenAIGPTDoubleHeadsModel,
TFOpenAIGPTLMHeadModel,
TFOpenAIGPTMainLayer,
TFOpenAIGPTModel,
TFOpenAIGPTPreTrainedModel,
)
from .modeling_tf_roberta import (
TF_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRobertaForMaskedLM,
TFRobertaForMultipleChoice,
TFRobertaForQuestionAnswering,
TFRobertaForSequenceClassification,
TFRobertaForTokenClassification,
TFRobertaMainLayer,
TFRobertaModel,
TFRobertaPreTrainedModel,
)
from .modeling_tf_t5 import (
TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST,
TFT5ForConditionalGeneration,
TFT5Model,
TFT5PreTrainedModel,
)
from .modeling_tf_transfo_xl import (
TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFAdaptiveEmbedding,
TFTransfoXLLMHeadModel,
TFTransfoXLMainLayer,
TFTransfoXLModel,
TFTransfoXLPreTrainedModel,
)
from .modeling_tf_utils import TFPreTrainedModel, TFSequenceSummary, TFSharedEmbeddings, shape_list
from .modeling_tf_xlm import (
TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLMForMultipleChoice,
TFXLMForQuestionAnsweringSimple,
TFXLMForSequenceClassification,
TFXLMForTokenClassification,
TFXLMMainLayer,
TFXLMModel,
TFXLMPreTrainedModel,
TFXLMWithLMHeadModel,
)
from .modeling_tf_xlm_roberta import (
TF_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLMRobertaForMaskedLM,
TFXLMRobertaForMultipleChoice,
TFXLMRobertaForQuestionAnswering,
TFXLMRobertaForSequenceClassification,
TFXLMRobertaForTokenClassification,
TFXLMRobertaModel,
)
from .modeling_tf_xlnet import (
TF_XLNET_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLNetForMultipleChoice,
TFXLNetForQuestionAnsweringSimple,
TFXLNetForSequenceClassification,
TFXLNetForTokenClassification,
TFXLNetLMHeadModel,
TFXLNetMainLayer,
TFXLNetModel,
TFXLNetPreTrainedModel,
)
# Optimization
from .optimization_tf import AdamWeightDecay, GradientAccumulator, WarmUp, create_optimizer
# Trainer
from .trainer_tf import TFTrainer
if not is_tf_available() and not is_torch_available():
logger.warning(
"Neither PyTorch nor TensorFlow >= 2.0 have been found."
"Models won't be available and only tokenizers, configuration"
"and file/data utilities can be used."
)
| 35.499332 | 117 | 0.760427 |
4a21f6be64f592263dd23f2ec21509df09450bce | 791 | py | Python | test/test_grompp.py | bioexcel/virtualscreening | e973958e012e38f99b0c8ed2b798c5e5a7f72b22 | [
"Apache-2.0"
] | 3 | 2020-02-17T11:11:08.000Z | 2021-12-03T18:54:47.000Z | test/test_grompp.py | bioexcel/virtualscreening | e973958e012e38f99b0c8ed2b798c5e5a7f72b22 | [
"Apache-2.0"
] | 1 | 2019-12-05T15:32:50.000Z | 2019-12-10T16:13:08.000Z | test/test_grompp.py | bioexcel/virtualscreening | e973958e012e38f99b0c8ed2b798c5e5a7f72b22 | [
"Apache-2.0"
] | 2 | 2019-09-26T20:21:14.000Z | 2021-07-10T04:37:31.000Z | from os.path import join as opj
from test import fixtures as fx
from gromacs_wrapper.grompp import Grompp
class TestGrompp(object):
def setUp(self):
fx.test_setup(self,'grompp')
def tearDown(self):
fx.test_teardown(self)
def test_launch(self):
output_tpr_path = opj(self.properties['path'], self.properties['output_tpr_path'])
returncode = Grompp(input_gro_path=opj(self.data_dir, self.properties['input_gro_path']),
input_top_zip_path=opj(self.data_dir, self.properties['input_top_zip_path']),
output_tpr_path=output_tpr_path,
properties=self.properties).launch()
assert fx.exe_success(returncode)
assert fx.not_empty(output_tpr_path)
| 35.954545 | 107 | 0.656131 |
4a21f70bbf38998b13ceefdcfd94e54a814ffcb9 | 706 | py | Python | util/chplenv/chplenv.py | MayukhSobo/chapel | c64476af40e5b49689983ac172fa201deb133af9 | [
"ECL-2.0",
"Apache-2.0"
] | 1,602 | 2015-01-06T11:26:31.000Z | 2022-03-30T06:17:21.000Z | util/chplenv/chplenv.py | sthagen/chapel | 888fcc282385f31fe866511e3652c4e88b7721a1 | [
"ECL-2.0",
"Apache-2.0"
] | 11,789 | 2015-01-05T04:50:15.000Z | 2022-03-31T23:39:19.000Z | util/chplenv/chplenv.py | sthagen/chapel | 888fcc282385f31fe866511e3652c4e88b7721a1 | [
"ECL-2.0",
"Apache-2.0"
] | 498 | 2015-01-08T18:58:18.000Z | 2022-03-20T15:37:45.000Z | import chpl_cpu
import chpl_atomics
import chpl_aux_filesys
import chpl_bin_subdir
import chpl_make
import chpl_platform
import chpl_comm
import chpl_comm_debug
import chpl_comm_segment
import chpl_comm_substrate
import chpl_compiler
import chpl_gasnet
import chpl_gmp
import chpl_hwloc
import chpl_jemalloc
import chpl_launcher
import chpl_libfabric
import chpl_llvm
import chpl_locale_model
import chpl_gpu
import chpl_arch
import chpl_mem
import chpl_qthreads
import chpl_re2
import chpl_tasks
import chpl_timers
import chpl_unwind
import chpl_lib_pic
import chpl_sanitizers
# General purpose helpers
import chpl_home_utils
import chpl_python_version
import compiler_utils
import overrides
import utils
| 19.611111 | 26 | 0.896601 |
4a21f7ae0a7049aa0e0ef42f49ae239149ad8ea3 | 1,123 | py | Python | src/models/classifier/__init__.py | alexmlamb/SPUDT | 5d4ff32c9e37a485c176d3e68c58723e544972e5 | [
"MIT"
] | null | null | null | src/models/classifier/__init__.py | alexmlamb/SPUDT | 5d4ff32c9e37a485c176d3e68c58723e544972e5 | [
"MIT"
] | null | null | null | src/models/classifier/__init__.py | alexmlamb/SPUDT | 5d4ff32c9e37a485c176d3e68c58723e544972e5 | [
"MIT"
] | null | null | null | # Model inspired from: https://github.com/szagoruyko/wide-residual-networks
from .train import train
from common.loaders import images
def parse_args(parser):
parser.add_argument('--dataset', type=str, default='mnist')
parser.add_argument('--dataset-loc', type=str, default='.')
parser.add_argument('--h-dim', type=int, default=64)
parser.add_argument('--lr', type=float, default=0.01)
parser.add_argument('--momentum', type=float, default=0.9)
parser.add_argument('--nesterov', type=bool, default=True)
parser.add_argument('--weight-decay', type=float, default=5e-4)
parser.add_argument('--depth', type=float, default=16)
parser.add_argument('--widen-factor', type=float, default=8)
parser.add_argument('--dropRate', type=float, default=0.4)
def execute(args):
print(args)
train_loader, valid_loader, test_loader, shape, nc = \
getattr(images, args.dataset)(args.dataset_loc, args.train_batch_size, args.test_batch_size, args.valid_split)
args.nc = nc
args.loader = (train_loader, valid_loader, test_loader)
args.shape = shape
train(args)
| 40.107143 | 118 | 0.707035 |
4a21f7d37d0e39fc4c5a191df087a53ee43ba73a | 545 | py | Python | core/management/commands/init_admin.py | HiroshiFuu/django-rest-drf-yasg-boilerplate | 93221b2dbca0635eb42a18096e805b00f36ff9c1 | [
"Apache-2.0"
] | null | null | null | core/management/commands/init_admin.py | HiroshiFuu/django-rest-drf-yasg-boilerplate | 93221b2dbca0635eb42a18096e805b00f36ff9c1 | [
"Apache-2.0"
] | null | null | null | core/management/commands/init_admin.py | HiroshiFuu/django-rest-drf-yasg-boilerplate | 93221b2dbca0635eb42a18096e805b00f36ff9c1 | [
"Apache-2.0"
] | null | null | null | from django.core.management.base import BaseCommand
from django.contrib.auth.models import User
class Command(BaseCommand):
help = "Initilize admin user"
def add_arguments(self, parser):
pass
def handle(self, *args, **options):
if User.objects.all().count() == 0:
User.objects.create_superuser(username='admin', password='password', email='[email protected]', first_name='Hao', last_name='FENG')
print('admin user created')
else:
print('admin user already exists') | 34.0625 | 148 | 0.66422 |
4a21f9405c27f4bcfe005b80c08d74f5b04c1d29 | 4,203 | py | Python | modyz-streamlit/app1.py | KiranKhanna721/study-easy | 8ad33bbb958282ccdabcb0622e988d5a96ae1d2f | [
"MIT"
] | null | null | null | modyz-streamlit/app1.py | KiranKhanna721/study-easy | 8ad33bbb958282ccdabcb0622e988d5a96ae1d2f | [
"MIT"
] | null | null | null | modyz-streamlit/app1.py | KiranKhanna721/study-easy | 8ad33bbb958282ccdabcb0622e988d5a96ae1d2f | [
"MIT"
] | null | null | null | from langdetect import detect
from modzy import ApiClient
import streamlit as st
SECRET_KEY = 'modzy modelops are incredibles'
API_URL = "https://app.modzy.com/api"
API_KEY ="BLHlhckkavs13Oz3TZqm.MCwXnXb3KaSlLybyEXoP"
client = ApiClient(base_url=API_URL, api_key=API_KEY)
def text_topic_modeling(input_text):
job = client.jobs.submit_text('m8z2mwe3pt', '0.0.1', {'input.txt': input_text})
result = client.results.block_until_complete(job, timeout=None)
return(result)
def sentimentanalysis(input_text):
job = client.jobs.submit_text('ed542963de', '1.0.1', {'input.txt': input_text})
result = client.results.block_until_complete(job, timeout=None)
return (result['results']['job']['results.json']['data']['result'])
def languageanalysis(input_text):
return detect(input_text)
def languageTranslation(lang,input_text):
if lang == 'ru':
job = client.jobs.submit_text('5b98cvxsd2', '0.0.1', {'input.txt': input_text})
result = client.results.block_until_complete(job, timeout=None)
input_text = result['results']['job']['results.json']['text']
elif lang == 'en':
input_text = input_text
elif lang == 'ar':
job = client.jobs.submit_text('i2gapn1wh7', '0.0.2', {'input.txt': input_text})
result = client.results.block_until_complete(job, timeout=None)
input_text = result['results']['job']['results.json']['text']
elif lang == 'ko':
job = client.jobs.submit_text('hprfkvdbgt', '0.0.2', {'input.txt': input_text})
result = client.results.block_until_complete(job, timeout=None)
input_text = result['results']['job']['results.json']['text']
elif lang == 'tr':
job = client.jobs.submit_text('ydai26qxaa', '0.0.2', {'input.txt': input_text})
result = client.results.block_until_complete(job, timeout=None)
input_text = result['results']['job']['results.json']['text']
elif lang == 'id':
job = client.jobs.submit_text('wn6xe6bizs', '0.0.2', {'input.txt': input_text})
result = client.results.block_until_complete(job, timeout=None)
input_text = result['results']['job']['results.json']['text']
elif lang == 'fa':
job = client.jobs.submit_text('u54lgh7rag', '0.0.2', {'input.txt': input_text})
result = client.results.block_until_complete(job, timeout=None)
input_text = result['results']['job']['results.json']['text']
elif lang == 'zh-cn':
job = client.jobs.submit_text('24ntd2cn93', '0.0.2', {'input.txt': input_text})
result = client.results.block_until_complete(job, timeout=None)
input_text = result['results']['job']['results.json']['text']
elif lang == 'ur':
job = client.jobs.submit_text('vay0g6tavv', '0.0.2', {'input.txt': input_text})
result = client.results.block_until_complete(job, timeout=None)
input_text = result['results']['job']['results.json']['text']
return input_text
def textsummaries(input_text):
job = client.jobs.submit_text('rs2qqwbjwb', '0.0.2', {'input.txt': input_text})
result = client.results.block_until_complete(job, timeout=None)
return(result['results']['job']['results.json']['summary'])
def app():
input_text = st.text_input('Text')
submit = st.button('Submit')
if submit:
if input_text is not None:
st.write("Orginal Text : "+input_text)
lang = languageanalysis(input_text)
input_text = languageTranslation(lang,input_text)
data_lang = lang
st.write("Language : "+data_lang)
data_inputtext = input_text
st.write("English text : "+data_inputtext)
data_texttopic = text_topic_modeling(input_text)
st.write('Topics : ')
st.write(data_texttopic)
data_textsummary = textsummaries(input_text)
st.write("Summary : "+data_textsummary)
data_sentiment = sentimentanalysis(input_text)
st.write("Sentiment : ")
st.write(data_sentiment)
| 51.256098 | 88 | 0.626695 |
4a21f989038a760f70e112311126a813a683bdf5 | 6,263 | py | Python | mimo/abstractions.py | MichaelLutter/mimo | 8a6a770ee90cbd6fd5cc12141d19442a3477af2c | [
"MIT"
] | null | null | null | mimo/abstractions.py | MichaelLutter/mimo | 8a6a770ee90cbd6fd5cc12141d19442a3477af2c | [
"MIT"
] | null | null | null | mimo/abstractions.py | MichaelLutter/mimo | 8a6a770ee90cbd6fd5cc12141d19442a3477af2c | [
"MIT"
] | null | null | null | import abc
import copy
import numpy as np
from future.utils import with_metaclass
from mimo.util.text import progprint_xrange
# Base classes
class Distribution(with_metaclass(abc.ABCMeta, object)):
@abc.abstractmethod
def rvs(self, size=[]):
# random variates (samples)
pass
@abc.abstractmethod
def log_likelihood(self, x):
"""
log likelihood (either log probability mass function or log probability
density function) of x, which has the same type as the output of rvs()
"""
pass
@abc.abstractmethod
def mean(self):
pass
@abc.abstractmethod
def mode(self):
pass
@abc.abstractmethod
def log_partition(self):
pass
@abc.abstractmethod
def entropy(self):
pass
class BayesianDistribution(with_metaclass(abc.ABCMeta, Distribution)):
def empirical_bayes(self, data):
"""
(optional) set hyperparameters via empirical bayes
e.g. treat argument as a pseudo-dataset for exponential family
"""
raise NotImplementedError
# Algorithm interfaces for inference in distributions
class GibbsSampling(with_metaclass(abc.ABCMeta, BayesianDistribution)):
@abc.abstractmethod
def resample(self, data=[]):
pass
def copy_sample(self):
"""
return an object copy suitable for making lists of posterior samples
(override this method to prevent copying shared structures into each sample)
"""
return copy.deepcopy(self)
def resample_and_copy(self):
self.resample()
return self.copy_sample()
class MeanField(with_metaclass(abc.ABCMeta, BayesianDistribution)):
@abc.abstractmethod
def expected_log_likelihood(self, x):
pass
@abc.abstractmethod
def meanfieldupdate(self, data, weights):
pass
def get_vlb(self):
raise NotImplementedError
class MeanFieldSVI(with_metaclass(abc.ABCMeta, BayesianDistribution)):
@abc.abstractmethod
def meanfield_sgdstep(self, expected_suff_stats, weights, prob, stepsize):
pass
class MaxLikelihood(with_metaclass(abc.ABCMeta, Distribution)):
@abc.abstractmethod
def max_likelihood(self, data, weights=None):
"""
sets the parameters set to their maximum likelihood values given the
(weighted) data
"""
pass
@property
def num_parameters(self):
raise NotImplementedError
class MAP(with_metaclass(abc.ABCMeta, BayesianDistribution)):
@abc.abstractmethod
def MAP(self, data, weights=None):
"""
sets the parameters to their MAP values given the (weighted) data
analogous to max_likelihood but includes hyperparameters
"""
pass
# Models
class Model(with_metaclass(abc.ABCMeta, object)):
@abc.abstractmethod
def add_data(self, data):
pass
@abc.abstractmethod
def generate(self, keep=True, **kwargs):
"""
Like a distribution's rvs, but this also fills in latent state over
data and keeps references to the data.
"""
pass
def rvs(self, *args, **kwargs):
return self.generate(*args, keep=False, **kwargs)[0] # 0th component is data, not latent stuff
# Algorithm interfaces for inference in models
class ModelGibbsSampling(with_metaclass(abc.ABCMeta, Model)):
@abc.abstractmethod
def resample_model(self): # TODO niter?
pass
def copy_sample(self):
"""
return an object copy suitable for making lists of posterior samples
(override this method to prevent copying shared structures into each sample)
"""
return copy.deepcopy(self)
def resample_and_copy(self):
self.resample_model()
return self.copy_sample()
class ModelMeanField(with_metaclass(abc.ABCMeta, Model)):
@abc.abstractmethod
def meanfield_coordinate_descent_step(self):
# returns variational lower bound after update, if available
pass
def meanfield_coordinate_descent(self, tol=1e-1, maxiter=250,
progprint=False, **kwargs):
# NOTE: doesn't re-initialize!
scores = []
step_iterator = range(maxiter) if not progprint else progprint_xrange(
maxiter)
for _ in step_iterator:
scores.append(self.meanfield_coordinate_descent_step(**kwargs))
if scores[-1] is not None and len(scores) > 1:
if np.abs(scores[-1] - scores[-2]) < tol:
return scores
print(
'WARNING: meanfield_coordinate_descent hit maxiter of %d' % maxiter)
return scores
class ModelMeanFieldSVI(with_metaclass(abc.ABCMeta, Model)):
@abc.abstractmethod
def meanfield_sgdstep(self, minibatch, prob, stepsize):
pass
class _EMBase(with_metaclass(abc.ABCMeta, Model)):
@abc.abstractmethod
def log_likelihood(self):
# returns a log likelihood number on attached data
pass
def _EM_fit(self, method, tol=1e-1, maxiter=100, progprint=False):
# NOTE: doesn't re-initialize!
likes = []
step_iterator = range(maxiter) if not progprint else progprint_xrange(
maxiter)
for _ in step_iterator:
method()
likes.append(self.log_likelihood())
if len(likes) > 1:
if likes[-1] - likes[-2] < tol:
return likes
elif likes[-1] < likes[-2]:
# probably oscillation, do one more
method()
likes.append(self.log_likelihood())
return likes
print('WARNING: EM_fit reached maxiter of %d' % maxiter)
return likes
class ModelEM(with_metaclass(abc.ABCMeta, _EMBase)):
def EM_fit(self, tol=1e-1, maxiter=100):
return self._EM_fit(self.EM_step, tol=tol, maxiter=maxiter)
@abc.abstractmethod
def EM_step(self):
pass
class ModelMAPEM(with_metaclass(abc.ABCMeta, _EMBase)):
def MAP_EM_fit(self, tol=1e-1, maxiter=100):
return self._EM_fit(self.MAP_EM_step, tol=tol, maxiter=maxiter)
@abc.abstractmethod
def MAP_EM_step(self):
pass
| 28.729358 | 103 | 0.643781 |
4a21fa0e328b8baecf4b4e00d94feefbfe78d3bc | 68 | py | Python | ORCSchlange/__main__.py | Fabianexe/ORC-Schlange | c94ad41622ddcea2bc25a59c5debcfe4c823d9c7 | [
"Apache-2.0"
] | null | null | null | ORCSchlange/__main__.py | Fabianexe/ORC-Schlange | c94ad41622ddcea2bc25a59c5debcfe4c823d9c7 | [
"Apache-2.0"
] | null | null | null | ORCSchlange/__main__.py | Fabianexe/ORC-Schlange | c94ad41622ddcea2bc25a59c5debcfe4c823d9c7 | [
"Apache-2.0"
] | null | null | null | from ORCSchlange import main
if __name__ == "__main__":
main()
| 13.6 | 28 | 0.691176 |
4a21fb6866a37bf0546019a5293a39580da3e87a | 9,753 | py | Python | test/functional/mining_pos_reorg.py | OasisCoinTeam/oasis-1 | a5e996144bf484db751c5feb8ed38c94ab317ca5 | [
"MIT"
] | 10 | 2018-10-07T14:04:48.000Z | 2019-07-14T15:48:05.000Z | test/functional/mining_pos_reorg.py | OasisCoinTeam/oasis-1 | a5e996144bf484db751c5feb8ed38c94ab317ca5 | [
"MIT"
] | 3 | 2019-01-18T22:23:23.000Z | 2020-02-15T19:34:13.000Z | test/functional/mining_pos_reorg.py | OasisCoinTeam/oasis-1 | a5e996144bf484db751c5feb8ed38c94ab317ca5 | [
"MIT"
] | 14 | 2018-12-24T18:33:29.000Z | 2022-03-08T06:26:14.000Z | #!/usr/bin/env python3
# Copyright (c) 2019 The OASIS developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
from test_framework.authproxy import JSONRPCException
from test_framework.test_framework import OasisTestFramework
from test_framework.util import (
sync_blocks,
assert_equal,
assert_raises_rpc_error,
connect_nodes_bi,
connect_nodes_clique,
disconnect_nodes,
set_node_times,
DecimalAmt,
)
class ReorgStakeTest(OasisTestFramework):
def set_test_params(self):
self.num_nodes = 3
# node 0 and 1 stake the blocks, node 2 makes the zerocoin spends
def setup_chain(self):
# Start with PoS cache: 330 blocks
self._initialize_chain(toPosPhase=True)
self.enable_mocktime()
def setup_network(self):
# connect all nodes between each other
self.setup_nodes()
connect_nodes_clique(self.nodes)
self.sync_all()
def log_title(self):
title = "*** Starting %s ***" % self.__class__.__name__
underline = "-" * len(title)
description = "Tests reorganisation for PoS blocks."
self.log.info("\n\n%s\n%s\n%s\n", title, underline, description)
def disconnect_all(self):
self.log.info("Disconnecting nodes...")
for i in range(self.num_nodes):
for j in range(self.num_nodes):
if j != i:
disconnect_nodes(self.nodes[i], j)
self.log.info("Nodes disconnected")
def get_tot_balance(self, nodeid):
wi = self.nodes[nodeid].getwalletinfo()
return wi['balance'] + wi['immature_balance']
def run_test(self):
def findUtxoInList(txid, vout, utxo_list):
for x in utxo_list:
if x["txid"] == txid and x["vout"] == vout:
return True, x
return False, None
# Stake with node 0 and node 1 up to public spend activation (400)
# 70 blocks: 5 blocks each (x7)
self.log.info("Staking 70 blocks to reach public spends activation...")
set_node_times(self.nodes, self.mocktime)
for i in range(7):
for peer in range(2):
for nblock in range(5):
self.mocktime = self.generate_pos(peer, self.mocktime)
sync_blocks(self.nodes)
block_time_0 = block_time_1 = self.mocktime
self.log.info("Blocks staked.")
# Check balances
self.log.info("Checking balances...")
initial_balance = [self.get_tot_balance(i) for i in range(self.num_nodes)]
# --nodes 0, 1: 62 pow blocks + 55 pos blocks
assert_equal(initial_balance[0], DecimalAmt(250.0 * (62 + 55)))
assert_equal(initial_balance[1], DecimalAmt(250.0 * (62 + 55)))
# --node 2: 62 pow blocks + 20 pos blocks - zc minted - zcfee
assert_equal(initial_balance[2], DecimalAmt(250.0 * (62 + 20) - 6666 - 0.08))
assert_equal(self.nodes[2].getzerocoinbalance()['Total'], DecimalAmt(6666))
self.log.info("Balances ok.")
# create the raw zerocoin spend txes
addy = self.nodes[2].getnewaddress()
self.log.info("Creating the raw zerocoin public spends...")
mints = self.nodes[2].listmintedzerocoins(True, True)
tx_A0 = self.nodes[2].createrawzerocoinspend(mints[0]["serial hash"], addy)
tx_A1 = self.nodes[2].createrawzerocoinspend(mints[1]["serial hash"], addy)
# Spending same coins to different recipients to get different txids
new_addy = "yAVWM5urwaTyhiuFQHP2aP47rdZsLUG5PH"
tx_B0 = self.nodes[2].createrawzerocoinspend(mints[0]["serial hash"], new_addy)
tx_B1 = self.nodes[2].createrawzerocoinspend(mints[1]["serial hash"], new_addy)
# Disconnect nodes
minted_amount = mints[0]["denomination"] + mints[1]["denomination"]
self.disconnect_all()
# Stake one block with node-0 and save the stake input
self.log.info("Staking 1 block with node 0...")
initial_unspent_0 = self.nodes[0].listunspent()
self.nodes[0].generate(1)
block_time_0 += 60
set_node_times(self.nodes, block_time_0)
last_block = self.nodes[0].getblock(self.nodes[0].getbestblockhash())
assert(len(last_block["tx"]) > 1) # a PoS block has at least two txes
coinstake_txid = last_block["tx"][1]
coinstake_tx = self.nodes[0].getrawtransaction(coinstake_txid, True)
assert(coinstake_tx["vout"][0]["scriptPubKey"]["hex"] == "") # first output of coinstake is empty
stakeinput = coinstake_tx["vin"][0]
# The stake input was unspent 1 block ago, now it's not
res, utxo = findUtxoInList(stakeinput["txid"], stakeinput["vout"], initial_unspent_0)
assert (res and utxo["spendable"])
res, utxo = findUtxoInList(stakeinput["txid"], stakeinput["vout"], self.nodes[0].listunspent())
assert (not res or not utxo["spendable"])
self.log.info("Coinstake input %s...%s-%d is no longer spendable." % (
stakeinput["txid"][:9], stakeinput["txid"][-4:], stakeinput["vout"]))
# Relay zerocoin spends
self.nodes[0].sendrawtransaction(tx_A0)
self.nodes[0].sendrawtransaction(tx_A1)
# Stake 10 more blocks with node-0 and check balances
self.log.info("Staking 10 more blocks with node 0...")
for i in range(10):
block_time_0 = self.generate_pos(0, block_time_0)
expected_balance_0 = initial_balance[0] + DecimalAmt(11 * 250.0)
assert_equal(self.get_tot_balance(0), expected_balance_0)
self.log.info("Balance for node 0 checks out.")
# Connect with node 2, sync and check zerocoin balance
self.log.info("Reconnecting node 0 and node 2")
connect_nodes_bi(self.nodes, 0, 2)
sync_blocks([self.nodes[i] for i in [0, 2]])
self.log.info("Resetting zerocoin mints on node 2")
self.nodes[2].resetmintzerocoin(True)
assert_equal(self.get_tot_balance(2), initial_balance[2] + DecimalAmt(minted_amount))
assert_equal(self.nodes[2].getzerocoinbalance()['Total'], DecimalAmt(6666-minted_amount))
self.log.info("Balance for node 2 checks out.")
# Double spending txes not possible
assert_raises_rpc_error(-26, "bad-txns-invalid-zxos",
self.nodes[0].sendrawtransaction, tx_B0)
assert_raises_rpc_error(-26, "bad-txns-invalid-zxos",
self.nodes[0].sendrawtransaction, tx_B1)
# verify that the stakeinput can't be spent
stakeinput_tx_json = self.nodes[0].getrawtransaction(stakeinput["txid"], True)
stakeinput_amount = float(stakeinput_tx_json["vout"][int(stakeinput["vout"])]["value"])
rawtx_unsigned = self.nodes[0].createrawtransaction(
[{"txid": stakeinput["txid"], "vout": int(stakeinput["vout"])}],
{"xxncEuJK27ygNh7imNfaX8JV6ZQUnoBqzN": (stakeinput_amount-0.01)})
rawtx = self.nodes[0].signrawtransaction(rawtx_unsigned)
assert(rawtx["complete"])
try:
self.nodes[0].sendrawtransaction(rawtx["hex"])
except JSONRPCException as e:
# JSONRPCException was thrown as expected. Check the code and message values are correct.
if e.error["code"] not in [-26, -25]:
raise AssertionError("Unexpected JSONRPC error code %i" % e.error["code"])
if ([x for x in ["bad-txns-inputs-spent", "Missing inputs"] if x in e.error['message']] == []):
raise e
except Exception as e:
raise AssertionError("Unexpected exception raised: " + type(e).__name__)
self.log.info("GOOD: v2 spend was not possible.")
# Spend tx_B0 and tx_B1 on the other chain
self.nodes[1].sendrawtransaction(tx_B0)
self.nodes[1].sendrawtransaction(tx_B1)
# Stake 12 blocks with node-1
set_node_times(self.nodes, block_time_1)
self.log.info("Staking 12 blocks with node 1...")
for i in range(12):
block_time_1 = self.generate_pos(1, block_time_1)
expected_balance_1 = initial_balance[1] + DecimalAmt(12 * 250.0)
assert_equal(self.get_tot_balance(1), expected_balance_1)
self.log.info("Balance for node 1 checks out.")
# re-connect and sync nodes and check that node-0 and node-2 get on the other chain
new_best_hash = self.nodes[1].getbestblockhash()
self.log.info("Connecting and syncing nodes...")
set_node_times(self.nodes, block_time_1)
connect_nodes_clique(self.nodes)
sync_blocks(self.nodes)
for i in [0, 2]:
assert_equal(self.nodes[i].getbestblockhash(), new_best_hash)
# check balance of node-0
assert_equal(self.get_tot_balance(0), initial_balance[0])
self.log.info("Balance for node 0 checks out.")
# check that NOW the original stakeinput is present and spendable
res, utxo = findUtxoInList(stakeinput["txid"], stakeinput["vout"], self.nodes[0].listunspent())
assert (res and utxo["spendable"])
self.log.info("Coinstake input %s...%s-%d is spendable again." % (
stakeinput["txid"][:9], stakeinput["txid"][-4:], stakeinput["vout"]))
self.nodes[0].sendrawtransaction(rawtx["hex"])
self.nodes[1].generate(1)
sync_blocks(self.nodes)
res, utxo = findUtxoInList(stakeinput["txid"], stakeinput["vout"], self.nodes[0].listunspent())
assert (not res or not utxo["spendable"])
if __name__ == '__main__':
ReorgStakeTest().main() | 46.665072 | 116 | 0.636317 |
4a21fc4ca03b5c70e7ea0acb2c16e9af3897afa4 | 1,167 | py | Python | userbot/plugins/gps.py | thecyberbyte-tech/Secktor-Userbot | 5ede9c98e4480ec48ad5dd114a5bf2da3df6dc3f | [
"MIT"
] | null | null | null | userbot/plugins/gps.py | thecyberbyte-tech/Secktor-Userbot | 5ede9c98e4480ec48ad5dd114a5bf2da3df6dc3f | [
"MIT"
] | null | null | null | userbot/plugins/gps.py | thecyberbyte-tech/Secktor-Userbot | 5ede9c98e4480ec48ad5dd114a5bf2da3df6dc3f | [
"MIT"
] | null | null | null | # Credits ;- @mrconfused
from geopy.geocoders import Nominatim
from userbot.utils import admin_cmd
from telethon.tl import types
from userbot import CMD_HELP
@borg.on(admin_cmd(pattern="gps ?(.*)"))
async def gps(event):
if event.fwd_from:
return
reply_to_id = event.message
if event.reply_to_msg_id:
reply_to_id = await event.get_reply_message()
input_str = event.pattern_match.group(1)
if not input_str:
return await event.edit("Boss ! Give A Place To Search 😔 !.")
await event.edit("Finding This Location In Maps Server.....")
geolocator = Nominatim(user_agent="secktor USERBOT")
geoloc = geolocator.geocode(input_str)
if geoloc:
lon = geoloc.longitude
lat = geoloc.latitude
await reply_to_id.reply(
input_str,
file=types.InputMediaGeoPoint(
types.InputGeoPoint(
lat, lon
)
)
)
await event.delete()
else:
await event.edit("i coudn't find it")
CMD_HELP.update({"gps": "`.gps` <location name> :\
\nUSAGE: Sends you the given location name\
"
})
| 25.933333 | 69 | 0.61868 |
4a21fcaa55dc4eb0e3b0e5341f990164f95fed1a | 2,780 | py | Python | lnt/server/db/search.py | llvm/lnt | 77e0a25f996a5363e23f701c0d995525a5c6484a | [
"Apache-2.0"
] | 19 | 2019-01-15T03:04:00.000Z | 2021-12-08T00:09:01.000Z | lnt/server/db/search.py | llvm/lnt | 77e0a25f996a5363e23f701c0d995525a5c6484a | [
"Apache-2.0"
] | 5 | 2019-04-11T06:22:18.000Z | 2021-09-13T17:41:14.000Z | lnt/server/db/search.py | llvm/lnt | 77e0a25f996a5363e23f701c0d995525a5c6484a | [
"Apache-2.0"
] | 21 | 2019-02-10T02:47:55.000Z | 2022-03-31T14:16:36.000Z | import re
def _naive_search_for_run(session, ts, query, num_results, default_machine):
"""
This 'naive' search doesn't rely on any indexes so can be used without
full-text search enabled. This does make it less clever however.
It is able to match queries for machine names and order numbers
(specifically llvm_project_revision numbers). The revision numbers may be
partial and may be preceded by '#' or 'r'. Any other non-integer tokens are
considered to be partial matches for a machine name; any machine that
contains ALL of the tokens will be searched.
"""
order_re = re.compile(r'[r#]?(\d+)')
machine_queries = []
order_queries = []
# First, tokenize the query string.
for q in query.split(' '):
if not q:
# Prune zero-length tokens
continue
m = order_re.match(q)
if m:
order_queries.append(int(m.group(1)))
else:
machine_queries.append(q)
if not machine_queries and not default_machine:
# No machines to query: no matches. We can't query all machines, we'd
# end up doing a full table scan and that is not scalable.
return []
machines = []
if not machine_queries:
machines = [default_machine]
else:
for m in session.query(ts.Machine).all():
if all(q in m.name for q in machine_queries):
machines.append(m.id)
if not machines:
return []
llvm_project_revision_idx = [i
for i, f in enumerate(ts.Order.fields)
if f.name == 'llvm_project_revision'][0]
llvm_project_revision_col = \
ts.Order.fields[llvm_project_revision_idx].column
q = session.query(ts.Run) \
.filter(ts.Run.machine_id.in_(machines)) \
.filter(ts.Run.order_id == ts.Order.id) \
.filter(llvm_project_revision_col.isnot(None))
if order_queries:
oq = '%' + str(order_queries[0]) + '%'
q = q.filter(llvm_project_revision_col.like(oq))
return q.order_by(ts.Run.id.desc()).limit(num_results).all()
def search(session, ts, query,
num_results=8, default_machine=None):
"""
Performs a textual search for a run. The exact syntax supported depends on
the engine used to perform the search; see _naive_search_for_run for the
minimum supported syntax.
ts: TestSuite object
query: Textual query string
num_results: Number of results to return
default_machine: If no machines were specified (only orders), return
results from this machine.
Returns a list of Run objects.
"""
return _naive_search_for_run(session, ts, query,
num_results, default_machine)
| 33.902439 | 79 | 0.635971 |
4a21fd70e2181f6388fe357c63bec0d8b00df1c3 | 3,221 | py | Python | diagnostics/RMHD/plot_kspectrum.py | ykawazura/calliope | 343b72a0930d70332172a5d87a579b0f8dcced66 | [
"MIT"
] | 2 | 2022-02-04T19:27:11.000Z | 2022-02-05T05:37:38.000Z | diagnostics/RMHD/plot_kspectrum.py | ykawazura/calliope | 343b72a0930d70332172a5d87a579b0f8dcced66 | [
"MIT"
] | null | null | null | diagnostics/RMHD/plot_kspectrum.py | ykawazura/calliope | 343b72a0930d70332172a5d87a579b0f8dcced66 | [
"MIT"
] | 2 | 2022-02-03T10:45:48.000Z | 2022-02-03T10:48:28.000Z | # -*- coding: utf-8 -*-
from load import *
from fft import *
from plots import *
print('\nplotting kspectrum\n')
outdir = './fig_kspectrum/'
upe2_bin = sum_negative_kz2d(upe2_bin)
bpe2_bin = sum_negative_kz2d(bpe2_bin)
if nlz == nkz:
kp_end = np.argmin(np.abs(kpbin - kpbin.max()*2./3.))
if not is2D:
kz_end = np.argmin(np.abs(kz[1:int(nkz/2)] - kz[1:int(nkz/2)].max()*2./3.))
else:
kp_end = kpbin.size - 1
kz_end = int(nkz/2)
#--------------------------------------------------------#
# plot 1D spectra #
#--------------------------------------------------------#
# kprp spectrum
ys = [
np.sum(upe2_bin [final_idx, :, 1:kp_end], axis=0),
np.sum(bpe2_bin [final_idx, :, 1:kp_end], axis=0),
kpbin[1:kp_end]**(-5./3.)/kpbin[1]**(-5./3.)*np.sum(bpe2_bin[final_idx,:,1:kp_end], axis=0)[0]
]
xs = [
kpbin[1:kp_end],
kpbin[1:kp_end],
kpbin[1:kp_end]
]
ls = [
'',
'',
'k--',
]
legends = [
r'$E_{u_\+}$',
r'$E_{\delta B_\+}$',
r'-5/3',
]
plot_log1d_many(xs, ys, xlab='$k_\+ L_\+$', legends=legends, ls=ls, legendloc='lower left', title=r'$t = %.2E $' % tt[final_idx], ylab='', term=True, save=outdir+'kprp_spectra.pdf')
# kz spectrum
if not is2D:
ys = [
np.sum(upe2_bin [final_idx, 1:kz_end, :kp_end], axis=1),
np.sum(bpe2_bin [final_idx, 1:kz_end, :kp_end], axis=1),
]
xs = [
kz[1:kz_end],
kz[1:kz_end],
]
ls = [
'',
'',
]
legends = [
r'$E_{u_\+}$',
r'$E_{\delta B_\+}$',
]
plot_log1d_many(xs, ys, xlab='$'+kzlab+'$', legends=legends, ls=ls, legendloc='lower left', title=r'$t = %.2E $' % tt[final_idx], ylab='', term=True, save=outdir+'kz_spectra.pdf')
#--------------------------------------------------------#
# plot 2D spectra #
#--------------------------------------------------------#
if not is2D:
plot_log2d(upe2_bin[final_idx, 1:kz_end, 1:kp_end], kpbin[1:kp_end], kz[1:kz_end], xlab='$k_\+ L_\+$', ylab='$'+kzlab+'$',
title=r'$E_{u_{\+}}$' + ' $(t = $ %.2E' % tt[final_idx] + '$)$', save=outdir + 'upe2.pdf')
plot_log2d(bpe2_bin[final_idx, 1:kz_end, 1:kp_end], kpbin[1:kp_end], kz[1:kz_end], xlab='$k_\+ L_\+$', ylab='$'+kzlab+'$',
title=r'$E_{\delta B_\+}$' + ' $(t = $ %.2E' % tt[final_idx] + '$)$', save=outdir + 'bpe2.pdf')
#------------------#
# output ascii #
#------------------#
np.savetxt(outdir + 'Ekprp.txt' , np.column_stack((kpbin[:kp_end],
np.sum(upe2_bin [final_idx,:kz_end,:kp_end], axis=0),
np.sum(bpe2_bin [final_idx,:kz_end,:kp_end], axis=0),
)), fmt='%E')
if not is2D:
np.savetxt(outdir + 'Ekz.txt' , np.column_stack((kz[:kz_end],
np.sum(upe2_bin [final_idx,:kz_end,:kp_end], axis=1),
np.sum(bpe2_bin [final_idx,:kz_end,:kp_end], axis=1),
)), fmt='%E')
del upe2_bin
del bpe2_bin
| 35.788889 | 181 | 0.451723 |
4a21fed9fe9c470531f2020af790f8e83631f9c0 | 7,699 | py | Python | test/test_nt_misc.py | TOMOTON/rdflib | 388e47258c14adbf796172e61be629f0f5c34709 | [
"BSD-3-Clause"
] | 2 | 2021-02-06T17:36:05.000Z | 2021-04-21T07:33:39.000Z | test/test_nt_misc.py | pragya16067/rdflib | 6b5bd37ccc67bdec62d2e36d174eb7933b5020b2 | [
"BSD-3-Clause"
] | null | null | null | test/test_nt_misc.py | pragya16067/rdflib | 6b5bd37ccc67bdec62d2e36d174eb7933b5020b2 | [
"BSD-3-Clause"
] | null | null | null | import unittest
import logging
import os
import re
from rdflib import Graph, Literal, URIRef
from rdflib.plugins.parsers import ntriples
from urllib.request import urlopen
log = logging.getLogger(__name__)
class NTTestCase(unittest.TestCase):
def testIssue859(self):
graphA = Graph()
graphB = Graph()
graphA.parse("test/nt/quote-01.nt", format="ntriples")
graphB.parse("test/nt/quote-02.nt", format="ntriples")
for subjectA, predicateA, objA in graphA:
for subjectB, predicateB, objB in graphB:
self.assertEqual(subjectA, subjectB)
self.assertEqual(predicateA, predicateB)
self.assertEqual(objA, objB)
def testIssue78(self):
g = Graph()
g.add((URIRef("foo"), URIRef("foo"), Literal(u"R\u00E4ksm\u00F6rg\u00E5s")))
s = g.serialize(format="nt")
self.assertEqual(type(s), bytes)
self.assertTrue(r"R\u00E4ksm\u00F6rg\u00E5s".encode("latin-1") in s)
def testIssue146(self):
g = Graph()
g.add((URIRef("foo"), URIRef("foo"), Literal("test\n", lang="en")))
s = g.serialize(format="nt").strip()
self.assertEqual(s, '<foo> <foo> "test\\n"@en .'.encode("latin-1"))
def test_sink(self):
s = ntriples.Sink()
self.assertTrue(s.length == 0)
s.triple(None, None, None)
self.assertTrue(s.length == 1)
def test_nonvalidating_unquote(self):
safe = """<http://example.org/alice/foaf.rdf#me> <http://www.w3.org/1999/02/22-rdf-syntax-ns#type> <http://xmlns.com/foaf/0.1/Person> <http://example.org/alice/foaf1.rdf> ."""
ntriples.validate = False
res = ntriples.unquote(safe)
self.assertTrue(isinstance(res, str))
def test_validating_unquote(self):
quot = """<http://example.org/alice/foaf.rdf#me> <http://www.w3.org/1999/02/22-rdf-syntax-ns#type> <http://xmlns.com/foaf/0.1/Person> <http://example.org/alice/foaf1.rdf> ."""
ntriples.validate = True
res = ntriples.unquote(quot)
# revert to default
ntriples.validate = False
log.debug("restype %s" % type(res))
def test_validating_unquote_raises(self):
ntriples.validate = True
uniquot = """<http://www.w3.org/People/Berners-Lee/card#cm> <http://xmlns.com/foaf/0.1/name> "R\\u00E4ksm\\u00F6rg\\u00E5s" <http://www.w3.org/People/Berners-Lee/card> ."""
self.assertRaises(ntriples.ParseError, ntriples.unquote, uniquot)
uniquot = """<http://www.w3.org/People/Berners-Lee/card#cm> <http://xmlns.com/foaf/0.1/name> "R\\\\u00E4ksm\\u00F6rg\\u00E5s" <http://www.w3.org/People/Berners-Lee/card> ."""
self.assertRaises(ntriples.ParseError, ntriples.unquote, uniquot)
# revert to default
ntriples.validate = False
def test_nonvalidating_uriquote(self):
ntriples.validate = False
safe = """<http://example.org/alice/foaf.rdf#me> <http://www.w3.org/1999/02/22-rdf-syntax-ns#type> <http://xmlns.com/foaf/0.1/Person> <http://example.org/alice/foaf1.rdf> ."""
res = ntriples.uriquote(safe)
self.assertTrue(res == safe)
def test_validating_uriquote(self):
ntriples.validate = True
uniquot = """<http://www.w3.org/People/Berners-Lee/card#cm> <http://xmlns.com/foaf/0.1/name> "R\\u00E4ksm\\u00F6rg\\u00E5s" <http://www.w3.org/People/Berners-Lee/card> ."""
res = ntriples.uriquote(uniquot)
# revert to default
ntriples.validate = False
self.assertEqual(res, uniquot)
def test_NTriplesParser_fpath(self):
fpath = "test/nt/" + os.listdir("test/nt")[0]
p = ntriples.NTriplesParser()
self.assertRaises(ntriples.ParseError, p.parse, fpath)
def test_NTriplesParser_parsestring(self):
p = ntriples.NTriplesParser()
data = 3
self.assertRaises(ntriples.ParseError, p.parsestring, data)
fname = "test/nt/lists-02.nt"
with open(fname, "r") as f:
data = f.read()
p = ntriples.NTriplesParser()
res = p.parsestring(data)
self.assertTrue(res == None)
def test_w3_ntriple_variants(self):
uri = "file:///" + os.getcwd() + "/test/nt/test.ntriples"
parser = ntriples.NTriplesParser()
u = urlopen(uri)
sink = parser.parse(u)
u.close()
# ATM we are only really interested in any exceptions thrown
self.assertTrue(sink is not None)
def test_bad_line(self):
data = (
"""<http://example.org/resource32> 3 <http://example.org/datatype1> .\n"""
)
p = ntriples.NTriplesParser()
self.assertRaises(ntriples.ParseError, p.parsestring, data)
def test_cover_eat(self):
data = (
"""<http://example.org/resource32> 3 <http://example.org/datatype1> .\n"""
)
p = ntriples.NTriplesParser()
p.line = data
self.assertRaises(
ntriples.ParseError, p.eat, re.compile("<http://example.org/datatype1>")
)
def test_cover_subjectobjectliteral(self):
# data = '''<http://example.org/resource32> 3 <http://example.org/datatype1> .\n'''
p = ntriples.NTriplesParser()
p.line = "baz"
self.assertRaises(ntriples.ParseError, p.subject)
self.assertRaises(ntriples.ParseError, p.object)
# p.line = '"baz"@fr^^<http://example.org/datatype1>'
# self.assertRaises(ntriples.ParseError, p.literal)
class BNodeContextTestCase(unittest.TestCase):
def test_bnode_shared_across_instances(self):
my_sink = FakeSink()
bnode_context = dict()
p = ntriples.NTriplesParser(my_sink, bnode_context=bnode_context)
p.parsestring('''
_:0 <http://purl.obolibrary.org/obo/RO_0002350> <http://www.gbif.org/species/0000001> .
''')
q = ntriples.NTriplesParser(my_sink, bnode_context=bnode_context)
q.parsestring('''
_:0 <http://purl.obolibrary.org/obo/RO_0002350> <http://www.gbif.org/species/0000002> .
''')
self.assertEqual(len(my_sink.subs), 1)
def test_bnode_distinct_across_instances(self):
my_sink = FakeSink()
p = ntriples.NTriplesParser(my_sink)
p.parsestring('''
_:0 <http://purl.obolibrary.org/obo/RO_0002350> <http://www.gbif.org/species/0000001> .
''')
q = ntriples.NTriplesParser(my_sink)
q.parsestring('''
_:0 <http://purl.obolibrary.org/obo/RO_0002350> <http://www.gbif.org/species/0000002> .
''')
self.assertEqual(len(my_sink.subs), 2)
def test_bnode_distinct_across_parse(self):
my_sink = FakeSink()
p = ntriples.NTriplesParser(my_sink)
p.parsestring('''
_:0 <http://purl.obolibrary.org/obo/RO_0002350> <http://www.gbif.org/species/0000001> .
''', bnode_context=dict())
p.parsestring('''
_:0 <http://purl.obolibrary.org/obo/RO_0002350> <http://www.gbif.org/species/0000002> .
''', bnode_context=dict())
self.assertEqual(len(my_sink.subs), 2)
def test_bnode_shared_across_parse(self):
my_sink = FakeSink()
p = ntriples.NTriplesParser(my_sink)
p.parsestring('''
_:0 <http://purl.obolibrary.org/obo/RO_0002350> <http://www.gbif.org/species/0000001> .
''')
p.parsestring('''
_:0 <http://purl.obolibrary.org/obo/RO_0002350> <http://www.gbif.org/species/0000002> .
''')
self.assertEqual(len(my_sink.subs), 1)
class FakeSink(object):
def __init__(self):
self.subs = set()
def triple(self, s, p, o):
self.subs.add(s)
if __name__ == "__main__":
unittest.main()
| 38.113861 | 183 | 0.621769 |
4a21feeabc8e30b6ff43f946464501e905f96efd | 15,426 | py | Python | src/emuvim/test/unittests/test_resourcemodel.py | PedroPCardoso/fogbed | 11d9c8ce6ccd32ee71fbb77d719cc322dd9515da | [
"Apache-2.0"
] | null | null | null | src/emuvim/test/unittests/test_resourcemodel.py | PedroPCardoso/fogbed | 11d9c8ce6ccd32ee71fbb77d719cc322dd9515da | [
"Apache-2.0"
] | null | null | null | src/emuvim/test/unittests/test_resourcemodel.py | PedroPCardoso/fogbed | 11d9c8ce6ccd32ee71fbb77d719cc322dd9515da | [
"Apache-2.0"
] | null | null | null | """
Copyright (c) 2015 SONATA-NFV
ALL RIGHTS RESERVED.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Neither the name of the SONATA-NFV [, ANY ADDITIONAL AFFILIATION]
nor the names of its contributors may be used to endorse or promote
products derived from this software without specific prior written
permission.
This work has been performed in the framework of the SONATA project,
funded by the European Commission under Grant number 671517 through
the Horizon 2020 and 5G-PPP programmes. The authors would like to
acknowledge the contributions of their colleagues of the SONATA
partner consortium (www.sonata-nfv.eu).
"""
import time
import os
import unittest
from emuvim.test.base import SimpleTestTopology
from emuvim.dcemulator.resourcemodel import BaseResourceModel, ResourceFlavor, NotEnoughResourcesAvailable, ResourceModelRegistrar
from emuvim.dcemulator.resourcemodel.upb.simple import UpbSimpleCloudDcRM, UpbOverprovisioningCloudDcRM, UpbDummyRM
class testResourceModel(SimpleTestTopology):
"""
Test the general resource model API and functionality.
"""
def testBaseResourceModelApi(self):
"""
Tast bare API without real resource madel.
:return:
"""
r = BaseResourceModel()
# check if default flavors are there
self.assertTrue(len(r._flavors) == 5)
# check addFlavor functionality
f = ResourceFlavor("test", {"testmetric": 42})
r.addFlavour(f)
self.assertTrue("test" in r._flavors)
self.assertTrue(r._flavors.get("test").get("testmetric") == 42)
def testAddRmToDc(self):
"""
Test is allocate/free is called when a RM is added to a DC.
:return:
"""
# create network
self.createNet(nswitches=0, ndatacenter=1, nhosts=2, ndockers=0)
# setup links
self.net.addLink(self.dc[0], self.h[0])
self.net.addLink(self.h[1], self.dc[0])
# add resource model
r = BaseResourceModel()
self.dc[0].assignResourceModel(r)
# start Mininet network
self.startNet()
# check number of running nodes
self.assertTrue(len(self.getContainernetContainers()) == 0)
self.assertTrue(len(self.net.hosts) == 2)
self.assertTrue(len(self.net.switches) == 1)
# check resource model and resource model registrar
self.assertTrue(self.dc[0]._resource_model is not None)
self.assertTrue(len(self.net.rm_registrar.resource_models) == 1)
# check if alloc was called during startCompute
self.assertTrue(len(r._allocated_compute_instances) == 0)
self.dc[0].startCompute("tc1")
time.sleep(1)
self.assertTrue(len(r._allocated_compute_instances) == 1)
# check if free was called during stopCompute
self.dc[0].stopCompute("tc1")
self.assertTrue(len(r._allocated_compute_instances) == 0)
# check connectivity by using ping
self.assertTrue(self.net.ping([self.h[0], self.h[1]]) <= 0.0)
# stop Mininet network
self.stopNet()
def createDummyContainerObject(name, flavor):
class DummyContainer(object):
def __init__(self):
# take defaukt values from son-emu
self.resources = dict(
cpu_period = -1,
cpu_quota = -1,
mem_limit = -1,
memswap_limit = -1
)
#self.cpu_period = self.resources['cpu_period']
#self.cpu_quota = self.resources['cpu_quota']
#self.mem_limit = self.resources['mem_limit']
#self.memswap_limit = self.resources['memswap_limit']
def updateCpuLimit(self, cpu_period, cpu_quota):
self.resources['cpu_period'] = cpu_period
self.resources['cpu_quota'] = cpu_quota
def updateMemoryLimit(self, mem_limit):
self.resources['mem_limit'] = mem_limit
d = DummyContainer()
d.name = name
d.flavor_name = flavor
return d
class testUpbSimpleCloudDcRM(SimpleTestTopology):
"""
Test the UpbSimpleCloudDc resource model.
"""
def testAllocationComputations(self):
"""
Test the allocation procedures and correct calculations.
:return:
"""
# config
E_CPU = 1.0
MAX_CU = 100
E_MEM = 512
MAX_MU = 2048
# create dummy resource model environment
reg = ResourceModelRegistrar(dc_emulation_max_cpu=E_CPU, dc_emulation_max_mem=E_MEM)
rm = UpbSimpleCloudDcRM(max_cu=MAX_CU, max_mu=MAX_MU)
reg.register("test_dc", rm)
c1 = createDummyContainerObject("c1", flavor="tiny")
rm.allocate(c1) # calculate allocation
self.assertEqual(float(c1.resources['cpu_quota']) / c1.resources['cpu_period'], E_CPU / MAX_CU * 0.5) # validate compute result
self.assertEqual(float(c1.resources['mem_limit']/1024/1024), float(E_MEM) / MAX_MU * 32) # validate memory result
c2 = createDummyContainerObject("c2", flavor="small")
rm.allocate(c2) # calculate allocation
self.assertEqual(float(c2.resources['cpu_quota']) / c2.resources['cpu_period'], E_CPU / MAX_CU * 1) # validate compute result
self.assertEqual(float(c2.resources['mem_limit']/1024/1024), float(E_MEM) / MAX_MU * 128) # validate memory result
c3 = createDummyContainerObject("c3", flavor="medium")
rm.allocate(c3) # calculate allocation
self.assertEqual(float(c3.resources['cpu_quota']) / c3.resources['cpu_period'], E_CPU / MAX_CU * 4) # validate compute result
self.assertEqual(float(c3.resources['mem_limit']/1024/1024), float(E_MEM) / MAX_MU * 256) # validate memory result
c4 = createDummyContainerObject("c4", flavor="large")
rm.allocate(c4) # calculate allocation
self.assertEqual(float(c4.resources['cpu_quota']) / c4.resources['cpu_period'], E_CPU / MAX_CU * 8) # validate compute result
self.assertEqual(float(c4.resources['mem_limit']/1024/1024), float(E_MEM) / MAX_MU * 512) # validate memory result
c5 = createDummyContainerObject("c5", flavor="xlarge")
rm.allocate(c5) # calculate allocation
self.assertEqual(float(c5.resources['cpu_quota']) / c5.resources['cpu_period'], E_CPU / MAX_CU * 16) # validate compute result
self.assertEqual(float(c5.resources['mem_limit']/1024/1024), float(E_MEM) / MAX_MU * 1024) # validate memory result
def testAllocationCpuLimit(self):
"""
Test CPU allocation limit
:return:
"""
# config
E_CPU = 1.0
MAX_CU = 40
E_MEM = 512
MAX_MU = 4096
# create dummy resource model environment
reg = ResourceModelRegistrar(dc_emulation_max_cpu=E_CPU, dc_emulation_max_mem=E_MEM)
rm = UpbSimpleCloudDcRM(max_cu=MAX_CU, max_mu=MAX_MU)
reg.register("test_dc", rm)
# test over provisioning exeption
exception = False
try:
c6 = createDummyContainerObject("c6", flavor="xlarge")
c7 = createDummyContainerObject("c7", flavor="xlarge")
c8 = createDummyContainerObject("c8", flavor="xlarge")
c9 = createDummyContainerObject("c9", flavor="xlarge")
rm.allocate(c6) # calculate allocation
rm.allocate(c7) # calculate allocation
rm.allocate(c8) # calculate allocation
rm.allocate(c9) # calculate allocation
except NotEnoughResourcesAvailable as e:
self.assertIn("Not enough compute", e.message)
exception = True
self.assertTrue(exception)
def testAllocationMemLimit(self):
"""
Test MEM allocation limit
:return:
"""
# config
E_CPU = 1.0
MAX_CU = 500
E_MEM = 512
MAX_MU = 2048
# create dummy resource model environment
reg = ResourceModelRegistrar(dc_emulation_max_cpu=E_CPU, dc_emulation_max_mem=E_MEM)
rm = UpbSimpleCloudDcRM(max_cu=MAX_CU, max_mu=MAX_MU)
reg.register("test_dc", rm)
# test over provisioning exeption
exception = False
try:
c6 = createDummyContainerObject("c6", flavor="xlarge")
c7 = createDummyContainerObject("c7", flavor="xlarge")
c8 = createDummyContainerObject("c8", flavor="xlarge")
rm.allocate(c6) # calculate allocation
rm.allocate(c7) # calculate allocation
rm.allocate(c8) # calculate allocation
except NotEnoughResourcesAvailable as e:
self.assertIn("Not enough memory", e.message)
exception = True
self.assertTrue(exception)
def testFree(self):
"""
Test the free procedure.
:return:
"""
# config
E_CPU = 1.0
MAX_CU = 100
# create dummy resource model environment
reg = ResourceModelRegistrar(dc_emulation_max_cpu=1.0, dc_emulation_max_mem=512)
rm = UpbSimpleCloudDcRM(max_cu=100, max_mu=100)
reg.register("test_dc", rm)
c1 = createDummyContainerObject("c6", flavor="tiny")
rm.allocate(c1) # calculate allocation
self.assertTrue(rm.dc_alloc_cu == 0.5)
rm.free(c1)
self.assertTrue(rm.dc_alloc_cu == 0)
@unittest.skipIf(os.environ.get("SON_EMU_IN_DOCKER") is not None,
"skipping test when running inside Docker container")
def testInRealTopo(self):
"""
Start a real container and check if limitations are really passed down to Conteinernet.
:return:
"""
# create network
self.createNet(nswitches=0, ndatacenter=1, nhosts=2, ndockers=0)
# setup links
self.net.addLink(self.dc[0], self.h[0])
self.net.addLink(self.h[1], self.dc[0])
# add resource model
r = UpbSimpleCloudDcRM(max_cu=100, max_mu=100)
self.dc[0].assignResourceModel(r)
# start Mininet network
self.startNet()
# check number of running nodes
self.assertTrue(len(self.getContainernetContainers()) == 0)
self.assertTrue(len(self.net.hosts) == 2)
self.assertTrue(len(self.net.switches) == 1)
# check resource model and resource model registrar
self.assertTrue(self.dc[0]._resource_model is not None)
self.assertTrue(len(self.net.rm_registrar.resource_models) == 1)
# check if alloc was called during startCompute
self.assertTrue(len(r._allocated_compute_instances) == 0)
tc1 = self.dc[0].startCompute("tc1", flavor_name="tiny")
time.sleep(1)
self.assertTrue(len(r._allocated_compute_instances) == 1)
# check if there is a real limitation set for containers cgroup
# deactivated for now, seems not to work in docker-in-docker setup used in CI
self.assertEqual(float(tc1.resources['cpu_quota'])/tc1.resources['cpu_period'], 0.005)
# check if free was called during stopCompute
self.dc[0].stopCompute("tc1")
self.assertTrue(len(r._allocated_compute_instances) == 0)
# check connectivity by using ping
self.assertTrue(self.net.ping([self.h[0], self.h[1]]) <= 0.0)
# stop Mininet network
self.stopNet()
class testUpbOverprovisioningCloudDcRM(SimpleTestTopology):
"""
Test the UpbOverprovisioningCloudDc resource model.
"""
def testAllocationComputations(self):
"""
Test the allocation procedures and correct calculations.
:return:
"""
# config
E_CPU = 1.0
MAX_CU = 3
E_MEM = 512
MAX_MU = 2048
# create dummy resource model environment
reg = ResourceModelRegistrar(dc_emulation_max_cpu=E_CPU, dc_emulation_max_mem=E_MEM)
rm = UpbOverprovisioningCloudDcRM(max_cu=MAX_CU, max_mu=MAX_MU)
reg.register("test_dc", rm)
c1 = createDummyContainerObject("c1", flavor="small")
rm.allocate(c1) # calculate allocation
self.assertAlmostEqual(float(c1.resources['cpu_quota']) / c1.resources['cpu_period'], E_CPU / MAX_CU * 1.0, places=5)
self.assertAlmostEqual(float(c1.resources['mem_limit']/1024/1024), float(E_MEM) / MAX_MU * 128)
self.assertAlmostEqual(rm.cpu_op_factor, 1.0)
c2 = createDummyContainerObject("c2", flavor="small")
rm.allocate(c2) # calculate allocation
self.assertAlmostEqual(float(c2.resources['cpu_quota']) / c2.resources['cpu_period'], E_CPU / MAX_CU * 1.0, places=5)
self.assertAlmostEqual(float(c2.resources['mem_limit']/1024/1024), float(E_MEM) / MAX_MU * 128)
self.assertAlmostEqual(rm.cpu_op_factor, 1.0)
c3 = createDummyContainerObject("c3", flavor="small")
rm.allocate(c3) # calculate allocation
self.assertAlmostEqual(float(c3.resources['cpu_quota']) / c3.resources['cpu_period'], E_CPU / MAX_CU * 1.0, places=5)
self.assertAlmostEqual(float(c3.resources['mem_limit']/1024/1024), float(E_MEM) / MAX_MU * 128)
self.assertAlmostEqual(rm.cpu_op_factor, 1.0)
# from this container onwards, we should go to over provisioning mode:
c4 = createDummyContainerObject("c4", flavor="small")
rm.allocate(c4) # calculate allocation
self.assertAlmostEqual(float(c4.resources['cpu_quota']) / c4.resources['cpu_period'], E_CPU / MAX_CU * (float(3) / 4), places=5)
self.assertAlmostEqual(float(c4.resources['mem_limit']/1024/1024), float(E_MEM) / MAX_MU * 128, places=5)
self.assertAlmostEqual(rm.cpu_op_factor, 0.75)
c5 = createDummyContainerObject("c5", flavor="small")
rm.allocate(c5) # calculate allocation
self.assertAlmostEqual(float(c5.resources['cpu_quota']) / c5.resources['cpu_period'], E_CPU / MAX_CU * (float(3) / 5), places=5)
self.assertAlmostEqual(float(c5.resources['mem_limit']/1024/1024), float(E_MEM) / MAX_MU * 128)
self.assertAlmostEqual(rm.cpu_op_factor, 0.6)
class testUpbDummyRM(SimpleTestTopology):
"""
Test the UpbDummyRM resource model.
"""
def testAllocationComputations(self):
"""
Test the allocation procedures and correct calculations.
:return:
"""
# config
E_CPU = 1.0
MAX_CU = 3
E_MEM = 512
MAX_MU = 2048
# create dummy resource model environment
reg = ResourceModelRegistrar(dc_emulation_max_cpu=E_CPU, dc_emulation_max_mem=E_MEM)
rm = UpbDummyRM(max_cu=MAX_CU, max_mu=MAX_MU)
reg.register("test_dc", rm)
c1 = createDummyContainerObject("c1", flavor="small")
rm.allocate(c1) # calculate allocation
self.assertEqual(len(rm._allocated_compute_instances), 1)
c2 = createDummyContainerObject("c2", flavor="small")
rm.allocate(c2) # calculate allocation
self.assertEqual(len(rm._allocated_compute_instances), 2)
| 41.245989 | 137 | 0.654868 |
4a21ff063180f88cc7e4269a01fd7e0275c70e87 | 4,561 | py | Python | test/mitmproxy/proxy2/layers/http/test_http_version_interop.py | itsintern/mitmproxy | b7efe9b2d4b986933f904912324b770dfb3e3da4 | [
"MIT"
] | 2 | 2022-03-21T16:47:15.000Z | 2022-03-24T11:38:12.000Z | test/mitmproxy/proxy2/layers/http/test_http_version_interop.py | Zer0Power/mitmproxy | b7efe9b2d4b986933f904912324b770dfb3e3da4 | [
"MIT"
] | null | null | null | test/mitmproxy/proxy2/layers/http/test_http_version_interop.py | Zer0Power/mitmproxy | b7efe9b2d4b986933f904912324b770dfb3e3da4 | [
"MIT"
] | null | null | null | from typing import Tuple
import h2.config
import h2.connection
import h2.events
from mitmproxy.http import HTTPFlow
from mitmproxy.proxy.protocol.http import HTTPMode
from mitmproxy.proxy2.commands import CloseConnection, OpenConnection, SendData
from mitmproxy.proxy2.context import Context, Server
from mitmproxy.proxy2.events import DataReceived
from mitmproxy.proxy2.layers import http
from test.mitmproxy.proxy2.layers.http.hyper_h2_test_helpers import FrameFactory
from test.mitmproxy.proxy2.layers.http.test_http2 import example_request_headers, example_response_headers, make_h2
from test.mitmproxy.proxy2.tutils import Placeholder, Playbook, reply
h2f = FrameFactory()
def event_types(events):
return [type(x) for x in events]
def h2_client(tctx: Context) -> Tuple[h2.connection.H2Connection, Playbook]:
tctx.client.alpn = b"h2"
playbook = Playbook(http.HttpLayer(tctx, HTTPMode.regular))
conn = h2.connection.H2Connection()
conn.initiate_connection()
server_preamble = Placeholder(bytes)
assert (
playbook
<< SendData(tctx.client, server_preamble)
)
assert event_types(conn.receive_data(server_preamble())) == [h2.events.RemoteSettingsChanged]
settings_ack = Placeholder(bytes)
assert (
playbook
>> DataReceived(tctx.client, conn.data_to_send())
<< SendData(tctx.client, settings_ack)
)
assert event_types(conn.receive_data(settings_ack())) == [h2.events.SettingsAcknowledged]
return conn, playbook
def test_h2_to_h1(tctx):
"""Test HTTP/2 -> HTTP/1 request translation"""
server = Placeholder(Server)
flow = Placeholder(HTTPFlow)
conn, playbook = h2_client(tctx)
conn.send_headers(1, example_request_headers, end_stream=True)
response = Placeholder(bytes)
assert (
playbook
>> DataReceived(tctx.client, conn.data_to_send())
<< http.HttpRequestHeadersHook(flow)
>> reply()
<< http.HttpRequestHook(flow)
>> reply()
<< OpenConnection(server)
>> reply(None)
<< SendData(server, b"GET / HTTP/1.1\r\nHost: example.com\r\n\r\n")
>> DataReceived(server, b"HTTP/1.1 200 OK\r\nContent-Length: 12\r\n\r\n")
<< http.HttpResponseHeadersHook(flow)
>> reply()
>> DataReceived(server, b"Hello World!")
<< http.HttpResponseHook(flow)
<< CloseConnection(server)
>> reply(to=-2)
<< SendData(tctx.client, response)
)
events = conn.receive_data(response())
assert event_types(events) == [
h2.events.ResponseReceived, h2.events.DataReceived, h2.events.DataReceived, h2.events.StreamEnded
]
resp: h2.events.ResponseReceived = events[0]
body: h2.events.DataReceived = events[1]
assert resp.headers == [(b':status', b'200'), (b'content-length', b'12')]
assert body.data == b"Hello World!"
def test_h1_to_h2(tctx):
"""Test HTTP/1 -> HTTP/2 request translation"""
server = Placeholder(Server)
flow = Placeholder(HTTPFlow)
playbook = Playbook(http.HttpLayer(tctx, HTTPMode.regular))
conf = h2.config.H2Configuration(client_side=False)
conn = h2.connection.H2Connection(conf)
conn.initiate_connection()
request = Placeholder(bytes)
assert (
playbook
>> DataReceived(tctx.client, b"GET http://example.com/ HTTP/1.1\r\nHost: example.com\r\n\r\n")
<< http.HttpRequestHeadersHook(flow)
>> reply()
<< http.HttpRequestHook(flow)
>> reply()
<< OpenConnection(server)
>> reply(None, side_effect=make_h2)
<< SendData(server, request)
)
events = conn.receive_data(request())
assert event_types(events) == [
h2.events.RemoteSettingsChanged, h2.events.RequestReceived, h2.events.StreamEnded
]
conn.send_headers(1, example_response_headers)
conn.send_data(1, b"Hello World!", end_stream=True)
settings_ack = Placeholder(bytes)
assert (
playbook
>> DataReceived(server, conn.data_to_send())
<< http.HttpResponseHeadersHook(flow)
<< SendData(server, settings_ack)
>> reply(to=-2)
<< http.HttpResponseHook(flow)
>> reply()
<< SendData(tctx.client, b"HTTP/1.1 200 OK\r\n\r\nHello World!")
<< CloseConnection(tctx.client)
)
assert settings_ack() == b'\x00\x00\x00\x04\x01\x00\x00\x00\x00'
| 35.084615 | 115 | 0.653585 |
4a21ff1f809e8a338db7ed6d1960549ba1adae4a | 135 | py | Python | utils/__init__.py | tamnguyenvan/lipreading | 37f7fc4840cacad9767beba0452cfcc194a2ba1f | [
"Apache-2.0"
] | null | null | null | utils/__init__.py | tamnguyenvan/lipreading | 37f7fc4840cacad9767beba0452cfcc194a2ba1f | [
"Apache-2.0"
] | null | null | null | utils/__init__.py | tamnguyenvan/lipreading | 37f7fc4840cacad9767beba0452cfcc194a2ba1f | [
"Apache-2.0"
] | null | null | null | from .dataset import LRWDataset
from .dataset_lrw1000 import LRW1000_Dataset
from .dataset import AVDataset
from .cvtransforms import * | 33.75 | 44 | 0.851852 |
4a21ff762dcaa1e842347a88e852fa68e44a2f8b | 129,367 | py | Python | Lib/test/test_os.py | ekhavana/cpython | a0e3d2dd09346b01e7d29a35ed31ed28041570b1 | [
"PSF-2.0"
] | null | null | null | Lib/test/test_os.py | ekhavana/cpython | a0e3d2dd09346b01e7d29a35ed31ed28041570b1 | [
"PSF-2.0"
] | null | null | null | Lib/test/test_os.py | ekhavana/cpython | a0e3d2dd09346b01e7d29a35ed31ed28041570b1 | [
"PSF-2.0"
] | null | null | null | # As a test suite for the os module, this is woefully inadequate, but this
# does add tests for a few functions which have been determined to be more
# portable than they had been thought to be.
import asynchat
import asyncore
import codecs
import contextlib
import decimal
import errno
import fractions
import getpass
import itertools
import locale
import mmap
import os
import pickle
import shutil
import signal
import socket
import stat
import subprocess
import sys
import sysconfig
import time
import unittest
import uuid
import warnings
from test import support
try:
import threading
except ImportError:
threading = None
try:
import resource
except ImportError:
resource = None
try:
import fcntl
except ImportError:
fcntl = None
try:
import _winapi
except ImportError:
_winapi = None
try:
import grp
groups = [g.gr_gid for g in grp.getgrall() if getpass.getuser() in g.gr_mem]
if hasattr(os, 'getgid'):
process_gid = os.getgid()
if process_gid not in groups:
groups.append(process_gid)
except ImportError:
groups = []
try:
import pwd
all_users = [u.pw_uid for u in pwd.getpwall()]
except (ImportError, AttributeError):
all_users = []
try:
from _testcapi import INT_MAX, PY_SSIZE_T_MAX
except ImportError:
INT_MAX = PY_SSIZE_T_MAX = sys.maxsize
from test.support.script_helper import assert_python_ok
from test.support import unix_shell
root_in_posix = False
if hasattr(os, 'geteuid'):
root_in_posix = (os.geteuid() == 0)
# Detect whether we're on a Linux system that uses the (now outdated
# and unmaintained) linuxthreads threading library. There's an issue
# when combining linuxthreads with a failed execv call: see
# http://bugs.python.org/issue4970.
if hasattr(sys, 'thread_info') and sys.thread_info.version:
USING_LINUXTHREADS = sys.thread_info.version.startswith("linuxthreads")
else:
USING_LINUXTHREADS = False
# Issue #14110: Some tests fail on FreeBSD if the user is in the wheel group.
HAVE_WHEEL_GROUP = sys.platform.startswith('freebsd') and os.getgid() == 0
@contextlib.contextmanager
def ignore_deprecation_warnings(msg_regex, quiet=False):
with support.check_warnings((msg_regex, DeprecationWarning), quiet=quiet):
yield
def requires_os_func(name):
return unittest.skipUnless(hasattr(os, name), 'requires os.%s' % name)
class _PathLike(os.PathLike):
def __init__(self, path=""):
self.path = path
def __str__(self):
return str(self.path)
def __fspath__(self):
if isinstance(self.path, BaseException):
raise self.path
else:
return self.path
def create_file(filename, content=b'content'):
with open(filename, "xb", 0) as fp:
fp.write(content)
# Tests creating TESTFN
class FileTests(unittest.TestCase):
def setUp(self):
if os.path.lexists(support.TESTFN):
os.unlink(support.TESTFN)
tearDown = setUp
def test_access(self):
f = os.open(support.TESTFN, os.O_CREAT|os.O_RDWR)
os.close(f)
self.assertTrue(os.access(support.TESTFN, os.W_OK))
def test_closerange(self):
first = os.open(support.TESTFN, os.O_CREAT|os.O_RDWR)
# We must allocate two consecutive file descriptors, otherwise
# it will mess up other file descriptors (perhaps even the three
# standard ones).
second = os.dup(first)
try:
retries = 0
while second != first + 1:
os.close(first)
retries += 1
if retries > 10:
# XXX test skipped
self.skipTest("couldn't allocate two consecutive fds")
first, second = second, os.dup(second)
finally:
os.close(second)
# close a fd that is open, and one that isn't
os.closerange(first, first + 2)
self.assertRaises(OSError, os.write, first, b"a")
@support.cpython_only
def test_rename(self):
path = support.TESTFN
old = sys.getrefcount(path)
self.assertRaises(TypeError, os.rename, path, 0)
new = sys.getrefcount(path)
self.assertEqual(old, new)
def test_read(self):
with open(support.TESTFN, "w+b") as fobj:
fobj.write(b"spam")
fobj.flush()
fd = fobj.fileno()
os.lseek(fd, 0, 0)
s = os.read(fd, 4)
self.assertEqual(type(s), bytes)
self.assertEqual(s, b"spam")
@support.cpython_only
# Skip the test on 32-bit platforms: the number of bytes must fit in a
# Py_ssize_t type
@unittest.skipUnless(INT_MAX < PY_SSIZE_T_MAX,
"needs INT_MAX < PY_SSIZE_T_MAX")
@support.bigmemtest(size=INT_MAX + 10, memuse=1, dry_run=False)
def test_large_read(self, size):
self.addCleanup(support.unlink, support.TESTFN)
create_file(support.TESTFN, b'test')
# Issue #21932: Make sure that os.read() does not raise an
# OverflowError for size larger than INT_MAX
with open(support.TESTFN, "rb") as fp:
data = os.read(fp.fileno(), size)
# The test does not try to read more than 2 GB at once because the
# operating system is free to return less bytes than requested.
self.assertEqual(data, b'test')
def test_write(self):
# os.write() accepts bytes- and buffer-like objects but not strings
fd = os.open(support.TESTFN, os.O_CREAT | os.O_WRONLY)
self.assertRaises(TypeError, os.write, fd, "beans")
os.write(fd, b"bacon\n")
os.write(fd, bytearray(b"eggs\n"))
os.write(fd, memoryview(b"spam\n"))
os.close(fd)
with open(support.TESTFN, "rb") as fobj:
self.assertEqual(fobj.read().splitlines(),
[b"bacon", b"eggs", b"spam"])
def write_windows_console(self, *args):
retcode = subprocess.call(args,
# use a new console to not flood the test output
creationflags=subprocess.CREATE_NEW_CONSOLE,
# use a shell to hide the console window (SW_HIDE)
shell=True)
self.assertEqual(retcode, 0)
@unittest.skipUnless(sys.platform == 'win32',
'test specific to the Windows console')
def test_write_windows_console(self):
# Issue #11395: the Windows console returns an error (12: not enough
# space error) on writing into stdout if stdout mode is binary and the
# length is greater than 66,000 bytes (or less, depending on heap
# usage).
code = "print('x' * 100000)"
self.write_windows_console(sys.executable, "-c", code)
self.write_windows_console(sys.executable, "-u", "-c", code)
def fdopen_helper(self, *args):
fd = os.open(support.TESTFN, os.O_RDONLY)
f = os.fdopen(fd, *args)
f.close()
def test_fdopen(self):
fd = os.open(support.TESTFN, os.O_CREAT|os.O_RDWR)
os.close(fd)
self.fdopen_helper()
self.fdopen_helper('r')
self.fdopen_helper('r', 100)
def test_replace(self):
TESTFN2 = support.TESTFN + ".2"
self.addCleanup(support.unlink, support.TESTFN)
self.addCleanup(support.unlink, TESTFN2)
create_file(support.TESTFN, b"1")
create_file(TESTFN2, b"2")
os.replace(support.TESTFN, TESTFN2)
self.assertRaises(FileNotFoundError, os.stat, support.TESTFN)
with open(TESTFN2, 'r') as f:
self.assertEqual(f.read(), "1")
def test_open_keywords(self):
f = os.open(path=__file__, flags=os.O_RDONLY, mode=0o777,
dir_fd=None)
os.close(f)
def test_symlink_keywords(self):
symlink = support.get_attribute(os, "symlink")
try:
symlink(src='target', dst=support.TESTFN,
target_is_directory=False, dir_fd=None)
except (NotImplementedError, OSError):
pass # No OS support or unprivileged user
# Test attributes on return values from os.*stat* family.
class StatAttributeTests(unittest.TestCase):
def setUp(self):
self.fname = support.TESTFN
self.addCleanup(support.unlink, self.fname)
create_file(self.fname, b"ABC")
@unittest.skipUnless(hasattr(os, 'stat'), 'test needs os.stat()')
def check_stat_attributes(self, fname):
result = os.stat(fname)
# Make sure direct access works
self.assertEqual(result[stat.ST_SIZE], 3)
self.assertEqual(result.st_size, 3)
# Make sure all the attributes are there
members = dir(result)
for name in dir(stat):
if name[:3] == 'ST_':
attr = name.lower()
if name.endswith("TIME"):
def trunc(x): return int(x)
else:
def trunc(x): return x
self.assertEqual(trunc(getattr(result, attr)),
result[getattr(stat, name)])
self.assertIn(attr, members)
# Make sure that the st_?time and st_?time_ns fields roughly agree
# (they should always agree up to around tens-of-microseconds)
for name in 'st_atime st_mtime st_ctime'.split():
floaty = int(getattr(result, name) * 100000)
nanosecondy = getattr(result, name + "_ns") // 10000
self.assertAlmostEqual(floaty, nanosecondy, delta=2)
try:
result[200]
self.fail("No exception raised")
except IndexError:
pass
# Make sure that assignment fails
try:
result.st_mode = 1
self.fail("No exception raised")
except AttributeError:
pass
try:
result.st_rdev = 1
self.fail("No exception raised")
except (AttributeError, TypeError):
pass
try:
result.parrot = 1
self.fail("No exception raised")
except AttributeError:
pass
# Use the stat_result constructor with a too-short tuple.
try:
result2 = os.stat_result((10,))
self.fail("No exception raised")
except TypeError:
pass
# Use the constructor with a too-long tuple.
try:
result2 = os.stat_result((0,1,2,3,4,5,6,7,8,9,10,11,12,13,14))
except TypeError:
pass
def test_stat_attributes(self):
self.check_stat_attributes(self.fname)
def test_stat_attributes_bytes(self):
try:
fname = self.fname.encode(sys.getfilesystemencoding())
except UnicodeEncodeError:
self.skipTest("cannot encode %a for the filesystem" % self.fname)
self.check_stat_attributes(fname)
def test_stat_result_pickle(self):
result = os.stat(self.fname)
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
p = pickle.dumps(result, proto)
self.assertIn(b'stat_result', p)
if proto < 4:
self.assertIn(b'cos\nstat_result\n', p)
unpickled = pickle.loads(p)
self.assertEqual(result, unpickled)
@unittest.skipUnless(hasattr(os, 'statvfs'), 'test needs os.statvfs()')
def test_statvfs_attributes(self):
try:
result = os.statvfs(self.fname)
except OSError as e:
# On AtheOS, glibc always returns ENOSYS
if e.errno == errno.ENOSYS:
self.skipTest('os.statvfs() failed with ENOSYS')
# Make sure direct access works
self.assertEqual(result.f_bfree, result[3])
# Make sure all the attributes are there.
members = ('bsize', 'frsize', 'blocks', 'bfree', 'bavail', 'files',
'ffree', 'favail', 'flag', 'namemax')
for value, member in enumerate(members):
self.assertEqual(getattr(result, 'f_' + member), result[value])
# Make sure that assignment really fails
try:
result.f_bfree = 1
self.fail("No exception raised")
except AttributeError:
pass
try:
result.parrot = 1
self.fail("No exception raised")
except AttributeError:
pass
# Use the constructor with a too-short tuple.
try:
result2 = os.statvfs_result((10,))
self.fail("No exception raised")
except TypeError:
pass
# Use the constructor with a too-long tuple.
try:
result2 = os.statvfs_result((0,1,2,3,4,5,6,7,8,9,10,11,12,13,14))
except TypeError:
pass
@unittest.skipUnless(hasattr(os, 'statvfs'),
"need os.statvfs()")
def test_statvfs_result_pickle(self):
try:
result = os.statvfs(self.fname)
except OSError as e:
# On AtheOS, glibc always returns ENOSYS
if e.errno == errno.ENOSYS:
self.skipTest('os.statvfs() failed with ENOSYS')
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
p = pickle.dumps(result, proto)
self.assertIn(b'statvfs_result', p)
if proto < 4:
self.assertIn(b'cos\nstatvfs_result\n', p)
unpickled = pickle.loads(p)
self.assertEqual(result, unpickled)
@unittest.skipUnless(sys.platform == "win32", "Win32 specific tests")
def test_1686475(self):
# Verify that an open file can be stat'ed
try:
os.stat(r"c:\pagefile.sys")
except FileNotFoundError:
self.skipTest(r'c:\pagefile.sys does not exist')
except OSError as e:
self.fail("Could not stat pagefile.sys")
@unittest.skipUnless(sys.platform == "win32", "Win32 specific tests")
@unittest.skipUnless(hasattr(os, "pipe"), "requires os.pipe()")
def test_15261(self):
# Verify that stat'ing a closed fd does not cause crash
r, w = os.pipe()
try:
os.stat(r) # should not raise error
finally:
os.close(r)
os.close(w)
with self.assertRaises(OSError) as ctx:
os.stat(r)
self.assertEqual(ctx.exception.errno, errno.EBADF)
def check_file_attributes(self, result):
self.assertTrue(hasattr(result, 'st_file_attributes'))
self.assertTrue(isinstance(result.st_file_attributes, int))
self.assertTrue(0 <= result.st_file_attributes <= 0xFFFFFFFF)
@unittest.skipUnless(sys.platform == "win32",
"st_file_attributes is Win32 specific")
def test_file_attributes(self):
# test file st_file_attributes (FILE_ATTRIBUTE_DIRECTORY not set)
result = os.stat(self.fname)
self.check_file_attributes(result)
self.assertEqual(
result.st_file_attributes & stat.FILE_ATTRIBUTE_DIRECTORY,
0)
# test directory st_file_attributes (FILE_ATTRIBUTE_DIRECTORY set)
dirname = support.TESTFN + "dir"
os.mkdir(dirname)
self.addCleanup(os.rmdir, dirname)
result = os.stat(dirname)
self.check_file_attributes(result)
self.assertEqual(
result.st_file_attributes & stat.FILE_ATTRIBUTE_DIRECTORY,
stat.FILE_ATTRIBUTE_DIRECTORY)
@unittest.skipUnless(sys.platform == "win32", "Win32 specific tests")
def test_access_denied(self):
# Default to FindFirstFile WIN32_FIND_DATA when access is
# denied. See issue 28075.
# os.environ['TEMP'] should be located on a volume that
# supports file ACLs.
fname = os.path.join(os.environ['TEMP'], self.fname)
self.addCleanup(support.unlink, fname)
create_file(fname, b'ABC')
# Deny the right to [S]YNCHRONIZE on the file to
# force CreateFile to fail with ERROR_ACCESS_DENIED.
DETACHED_PROCESS = 8
subprocess.check_call(
# bpo-30584: Use security identifier *S-1-5-32-545 instead
# of localized "Users" to not depend on the locale.
['icacls.exe', fname, '/deny', '*S-1-5-32-545:(S)'],
creationflags=DETACHED_PROCESS
)
result = os.stat(fname)
self.assertNotEqual(result.st_size, 0)
class UtimeTests(unittest.TestCase):
def setUp(self):
self.dirname = support.TESTFN
self.fname = os.path.join(self.dirname, "f1")
self.addCleanup(support.rmtree, self.dirname)
os.mkdir(self.dirname)
create_file(self.fname)
def restore_float_times(state):
with ignore_deprecation_warnings('stat_float_times'):
os.stat_float_times(state)
# ensure that st_atime and st_mtime are float
with ignore_deprecation_warnings('stat_float_times'):
old_float_times = os.stat_float_times(-1)
self.addCleanup(restore_float_times, old_float_times)
os.stat_float_times(True)
def support_subsecond(self, filename):
# Heuristic to check if the filesystem supports timestamp with
# subsecond resolution: check if float and int timestamps are different
st = os.stat(filename)
return ((st.st_atime != st[7])
or (st.st_mtime != st[8])
or (st.st_ctime != st[9]))
def _test_utime(self, set_time, filename=None):
if not filename:
filename = self.fname
support_subsecond = self.support_subsecond(filename)
if support_subsecond:
# Timestamp with a resolution of 1 microsecond (10^-6).
#
# The resolution of the C internal function used by os.utime()
# depends on the platform: 1 sec, 1 us, 1 ns. Writing a portable
# test with a resolution of 1 ns requires more work:
# see the issue #15745.
atime_ns = 1002003000 # 1.002003 seconds
mtime_ns = 4005006000 # 4.005006 seconds
else:
# use a resolution of 1 second
atime_ns = 5 * 10**9
mtime_ns = 8 * 10**9
set_time(filename, (atime_ns, mtime_ns))
st = os.stat(filename)
if support_subsecond:
self.assertAlmostEqual(st.st_atime, atime_ns * 1e-9, delta=1e-6)
self.assertAlmostEqual(st.st_mtime, mtime_ns * 1e-9, delta=1e-6)
else:
self.assertEqual(st.st_atime, atime_ns * 1e-9)
self.assertEqual(st.st_mtime, mtime_ns * 1e-9)
self.assertEqual(st.st_atime_ns, atime_ns)
self.assertEqual(st.st_mtime_ns, mtime_ns)
def test_utime(self):
def set_time(filename, ns):
# test the ns keyword parameter
os.utime(filename, ns=ns)
self._test_utime(set_time)
@staticmethod
def ns_to_sec(ns):
# Convert a number of nanosecond (int) to a number of seconds (float).
# Round towards infinity by adding 0.5 nanosecond to avoid rounding
# issue, os.utime() rounds towards minus infinity.
return (ns * 1e-9) + 0.5e-9
def test_utime_by_indexed(self):
# pass times as floating point seconds as the second indexed parameter
def set_time(filename, ns):
atime_ns, mtime_ns = ns
atime = self.ns_to_sec(atime_ns)
mtime = self.ns_to_sec(mtime_ns)
# test utimensat(timespec), utimes(timeval), utime(utimbuf)
# or utime(time_t)
os.utime(filename, (atime, mtime))
self._test_utime(set_time)
def test_utime_by_times(self):
def set_time(filename, ns):
atime_ns, mtime_ns = ns
atime = self.ns_to_sec(atime_ns)
mtime = self.ns_to_sec(mtime_ns)
# test the times keyword parameter
os.utime(filename, times=(atime, mtime))
self._test_utime(set_time)
@unittest.skipUnless(os.utime in os.supports_follow_symlinks,
"follow_symlinks support for utime required "
"for this test.")
def test_utime_nofollow_symlinks(self):
def set_time(filename, ns):
# use follow_symlinks=False to test utimensat(timespec)
# or lutimes(timeval)
os.utime(filename, ns=ns, follow_symlinks=False)
self._test_utime(set_time)
@unittest.skipUnless(os.utime in os.supports_fd,
"fd support for utime required for this test.")
def test_utime_fd(self):
def set_time(filename, ns):
with open(filename, 'wb', 0) as fp:
# use a file descriptor to test futimens(timespec)
# or futimes(timeval)
os.utime(fp.fileno(), ns=ns)
self._test_utime(set_time)
@unittest.skipUnless(os.utime in os.supports_dir_fd,
"dir_fd support for utime required for this test.")
def test_utime_dir_fd(self):
def set_time(filename, ns):
dirname, name = os.path.split(filename)
dirfd = os.open(dirname, os.O_RDONLY)
try:
# pass dir_fd to test utimensat(timespec) or futimesat(timeval)
os.utime(name, dir_fd=dirfd, ns=ns)
finally:
os.close(dirfd)
self._test_utime(set_time)
def test_utime_directory(self):
def set_time(filename, ns):
# test calling os.utime() on a directory
os.utime(filename, ns=ns)
self._test_utime(set_time, filename=self.dirname)
def _test_utime_current(self, set_time):
# Get the system clock
current = time.time()
# Call os.utime() to set the timestamp to the current system clock
set_time(self.fname)
if not self.support_subsecond(self.fname):
delta = 1.0
else:
# On Windows, the usual resolution of time.time() is 15.6 ms
delta = 0.020
st = os.stat(self.fname)
msg = ("st_time=%r, current=%r, dt=%r"
% (st.st_mtime, current, st.st_mtime - current))
self.assertAlmostEqual(st.st_mtime, current,
delta=delta, msg=msg)
def test_utime_current(self):
def set_time(filename):
# Set to the current time in the new way
os.utime(self.fname)
self._test_utime_current(set_time)
def test_utime_current_old(self):
def set_time(filename):
# Set to the current time in the old explicit way.
os.utime(self.fname, None)
self._test_utime_current(set_time)
def get_file_system(self, path):
if sys.platform == 'win32':
root = os.path.splitdrive(os.path.abspath(path))[0] + '\\'
import ctypes
kernel32 = ctypes.windll.kernel32
buf = ctypes.create_unicode_buffer("", 100)
ok = kernel32.GetVolumeInformationW(root, None, 0,
None, None, None,
buf, len(buf))
if ok:
return buf.value
# return None if the filesystem is unknown
def test_large_time(self):
# Many filesystems are limited to the year 2038. At least, the test
# pass with NTFS filesystem.
if self.get_file_system(self.dirname) != "NTFS":
self.skipTest("requires NTFS")
large = 5000000000 # some day in 2128
os.utime(self.fname, (large, large))
self.assertEqual(os.stat(self.fname).st_mtime, large)
def test_utime_invalid_arguments(self):
# seconds and nanoseconds parameters are mutually exclusive
with self.assertRaises(ValueError):
os.utime(self.fname, (5, 5), ns=(5, 5))
from test import mapping_tests
class EnvironTests(mapping_tests.BasicTestMappingProtocol):
"""check that os.environ object conform to mapping protocol"""
type2test = None
def setUp(self):
self.__save = dict(os.environ)
if os.supports_bytes_environ:
self.__saveb = dict(os.environb)
for key, value in self._reference().items():
os.environ[key] = value
def tearDown(self):
os.environ.clear()
os.environ.update(self.__save)
if os.supports_bytes_environ:
os.environb.clear()
os.environb.update(self.__saveb)
def _reference(self):
return {"KEY1":"VALUE1", "KEY2":"VALUE2", "KEY3":"VALUE3"}
def _empty_mapping(self):
os.environ.clear()
return os.environ
# Bug 1110478
@unittest.skipUnless(unix_shell and os.path.exists(unix_shell),
'requires a shell')
def test_update2(self):
os.environ.clear()
os.environ.update(HELLO="World")
with os.popen("%s -c 'echo $HELLO'" % unix_shell) as popen:
value = popen.read().strip()
self.assertEqual(value, "World")
@unittest.skipUnless(unix_shell and os.path.exists(unix_shell),
'requires a shell')
def test_os_popen_iter(self):
with os.popen("%s -c 'echo \"line1\nline2\nline3\"'"
% unix_shell) as popen:
it = iter(popen)
self.assertEqual(next(it), "line1\n")
self.assertEqual(next(it), "line2\n")
self.assertEqual(next(it), "line3\n")
self.assertRaises(StopIteration, next, it)
# Verify environ keys and values from the OS are of the
# correct str type.
def test_keyvalue_types(self):
for key, val in os.environ.items():
self.assertEqual(type(key), str)
self.assertEqual(type(val), str)
def test_items(self):
for key, value in self._reference().items():
self.assertEqual(os.environ.get(key), value)
# Issue 7310
def test___repr__(self):
"""Check that the repr() of os.environ looks like environ({...})."""
env = os.environ
self.assertEqual(repr(env), 'environ({{{}}})'.format(', '.join(
'{!r}: {!r}'.format(key, value)
for key, value in env.items())))
def test_get_exec_path(self):
defpath_list = os.defpath.split(os.pathsep)
test_path = ['/monty', '/python', '', '/flying/circus']
test_env = {'PATH': os.pathsep.join(test_path)}
saved_environ = os.environ
try:
os.environ = dict(test_env)
# Test that defaulting to os.environ works.
self.assertSequenceEqual(test_path, os.get_exec_path())
self.assertSequenceEqual(test_path, os.get_exec_path(env=None))
finally:
os.environ = saved_environ
# No PATH environment variable
self.assertSequenceEqual(defpath_list, os.get_exec_path({}))
# Empty PATH environment variable
self.assertSequenceEqual(('',), os.get_exec_path({'PATH':''}))
# Supplied PATH environment variable
self.assertSequenceEqual(test_path, os.get_exec_path(test_env))
if os.supports_bytes_environ:
# env cannot contain 'PATH' and b'PATH' keys
try:
# ignore BytesWarning warning
with warnings.catch_warnings(record=True):
mixed_env = {'PATH': '1', b'PATH': b'2'}
except BytesWarning:
# mixed_env cannot be created with python -bb
pass
else:
self.assertRaises(ValueError, os.get_exec_path, mixed_env)
# bytes key and/or value
self.assertSequenceEqual(os.get_exec_path({b'PATH': b'abc'}),
['abc'])
self.assertSequenceEqual(os.get_exec_path({b'PATH': 'abc'}),
['abc'])
self.assertSequenceEqual(os.get_exec_path({'PATH': b'abc'}),
['abc'])
@unittest.skipUnless(os.supports_bytes_environ,
"os.environb required for this test.")
def test_environb(self):
# os.environ -> os.environb
value = 'euro\u20ac'
try:
value_bytes = value.encode(sys.getfilesystemencoding(),
'surrogateescape')
except UnicodeEncodeError:
msg = "U+20AC character is not encodable to %s" % (
sys.getfilesystemencoding(),)
self.skipTest(msg)
os.environ['unicode'] = value
self.assertEqual(os.environ['unicode'], value)
self.assertEqual(os.environb[b'unicode'], value_bytes)
# os.environb -> os.environ
value = b'\xff'
os.environb[b'bytes'] = value
self.assertEqual(os.environb[b'bytes'], value)
value_str = value.decode(sys.getfilesystemencoding(), 'surrogateescape')
self.assertEqual(os.environ['bytes'], value_str)
# On FreeBSD < 7 and OS X < 10.6, unsetenv() doesn't return a value (issue
# #13415).
@support.requires_freebsd_version(7)
@support.requires_mac_ver(10, 6)
def test_unset_error(self):
if sys.platform == "win32":
# an environment variable is limited to 32,767 characters
key = 'x' * 50000
self.assertRaises(ValueError, os.environ.__delitem__, key)
else:
# "=" is not allowed in a variable name
key = 'key='
self.assertRaises(OSError, os.environ.__delitem__, key)
def test_key_type(self):
missing = 'missingkey'
self.assertNotIn(missing, os.environ)
with self.assertRaises(KeyError) as cm:
os.environ[missing]
self.assertIs(cm.exception.args[0], missing)
self.assertTrue(cm.exception.__suppress_context__)
with self.assertRaises(KeyError) as cm:
del os.environ[missing]
self.assertIs(cm.exception.args[0], missing)
self.assertTrue(cm.exception.__suppress_context__)
class WalkTests(unittest.TestCase):
"""Tests for os.walk()."""
# Wrapper to hide minor differences between os.walk and os.fwalk
# to tests both functions with the same code base
def walk(self, top, **kwargs):
if 'follow_symlinks' in kwargs:
kwargs['followlinks'] = kwargs.pop('follow_symlinks')
return os.walk(top, **kwargs)
def setUp(self):
join = os.path.join
self.addCleanup(support.rmtree, support.TESTFN)
# Build:
# TESTFN/
# TEST1/ a file kid and two directory kids
# tmp1
# SUB1/ a file kid and a directory kid
# tmp2
# SUB11/ no kids
# SUB2/ a file kid and a dirsymlink kid
# tmp3
# SUB21/ not readable
# tmp5
# link/ a symlink to TESTFN.2
# broken_link
# broken_link2
# broken_link3
# TEST2/
# tmp4 a lone file
self.walk_path = join(support.TESTFN, "TEST1")
self.sub1_path = join(self.walk_path, "SUB1")
self.sub11_path = join(self.sub1_path, "SUB11")
sub2_path = join(self.walk_path, "SUB2")
sub21_path = join(sub2_path, "SUB21")
tmp1_path = join(self.walk_path, "tmp1")
tmp2_path = join(self.sub1_path, "tmp2")
tmp3_path = join(sub2_path, "tmp3")
tmp5_path = join(sub21_path, "tmp3")
self.link_path = join(sub2_path, "link")
t2_path = join(support.TESTFN, "TEST2")
tmp4_path = join(support.TESTFN, "TEST2", "tmp4")
broken_link_path = join(sub2_path, "broken_link")
broken_link2_path = join(sub2_path, "broken_link2")
broken_link3_path = join(sub2_path, "broken_link3")
# Create stuff.
os.makedirs(self.sub11_path)
os.makedirs(sub2_path)
os.makedirs(sub21_path)
os.makedirs(t2_path)
for path in tmp1_path, tmp2_path, tmp3_path, tmp4_path, tmp5_path:
with open(path, "x") as f:
f.write("I'm " + path + " and proud of it. Blame test_os.\n")
if support.can_symlink():
os.symlink(os.path.abspath(t2_path), self.link_path)
os.symlink('broken', broken_link_path, True)
os.symlink(join('tmp3', 'broken'), broken_link2_path, True)
os.symlink(join('SUB21', 'tmp5'), broken_link3_path, True)
self.sub2_tree = (sub2_path, ["SUB21", "link"],
["broken_link", "broken_link2", "broken_link3",
"tmp3"])
else:
self.sub2_tree = (sub2_path, [], ["tmp3"])
os.chmod(sub21_path, 0)
try:
os.listdir(sub21_path)
except PermissionError:
self.addCleanup(os.chmod, sub21_path, stat.S_IRWXU)
else:
os.chmod(sub21_path, stat.S_IRWXU)
os.unlink(tmp5_path)
os.rmdir(sub21_path)
del self.sub2_tree[1][:1]
def test_walk_topdown(self):
# Walk top-down.
all = list(self.walk(self.walk_path))
self.assertEqual(len(all), 4)
# We can't know which order SUB1 and SUB2 will appear in.
# Not flipped: TESTFN, SUB1, SUB11, SUB2
# flipped: TESTFN, SUB2, SUB1, SUB11
flipped = all[0][1][0] != "SUB1"
all[0][1].sort()
all[3 - 2 * flipped][-1].sort()
all[3 - 2 * flipped][1].sort()
self.assertEqual(all[0], (self.walk_path, ["SUB1", "SUB2"], ["tmp1"]))
self.assertEqual(all[1 + flipped], (self.sub1_path, ["SUB11"], ["tmp2"]))
self.assertEqual(all[2 + flipped], (self.sub11_path, [], []))
self.assertEqual(all[3 - 2 * flipped], self.sub2_tree)
def test_walk_prune(self, walk_path=None):
if walk_path is None:
walk_path = self.walk_path
# Prune the search.
all = []
for root, dirs, files in self.walk(walk_path):
all.append((root, dirs, files))
# Don't descend into SUB1.
if 'SUB1' in dirs:
# Note that this also mutates the dirs we appended to all!
dirs.remove('SUB1')
self.assertEqual(len(all), 2)
self.assertEqual(all[0],
(str(walk_path), ["SUB2"], ["tmp1"]))
all[1][-1].sort()
all[1][1].sort()
self.assertEqual(all[1], self.sub2_tree)
def test_file_like_path(self):
self.test_walk_prune(_PathLike(self.walk_path))
def test_walk_bottom_up(self):
# Walk bottom-up.
all = list(self.walk(self.walk_path, topdown=False))
self.assertEqual(len(all), 4, all)
# We can't know which order SUB1 and SUB2 will appear in.
# Not flipped: SUB11, SUB1, SUB2, TESTFN
# flipped: SUB2, SUB11, SUB1, TESTFN
flipped = all[3][1][0] != "SUB1"
all[3][1].sort()
all[2 - 2 * flipped][-1].sort()
all[2 - 2 * flipped][1].sort()
self.assertEqual(all[3],
(self.walk_path, ["SUB1", "SUB2"], ["tmp1"]))
self.assertEqual(all[flipped],
(self.sub11_path, [], []))
self.assertEqual(all[flipped + 1],
(self.sub1_path, ["SUB11"], ["tmp2"]))
self.assertEqual(all[2 - 2 * flipped],
self.sub2_tree)
def test_walk_symlink(self):
if not support.can_symlink():
self.skipTest("need symlink support")
# Walk, following symlinks.
walk_it = self.walk(self.walk_path, follow_symlinks=True)
for root, dirs, files in walk_it:
if root == self.link_path:
self.assertEqual(dirs, [])
self.assertEqual(files, ["tmp4"])
break
else:
self.fail("Didn't follow symlink with followlinks=True")
def test_walk_bad_dir(self):
# Walk top-down.
errors = []
walk_it = self.walk(self.walk_path, onerror=errors.append)
root, dirs, files = next(walk_it)
self.assertEqual(errors, [])
dir1 = 'SUB1'
path1 = os.path.join(root, dir1)
path1new = os.path.join(root, dir1 + '.new')
os.rename(path1, path1new)
try:
roots = [r for r, d, f in walk_it]
self.assertTrue(errors)
self.assertNotIn(path1, roots)
self.assertNotIn(path1new, roots)
for dir2 in dirs:
if dir2 != dir1:
self.assertIn(os.path.join(root, dir2), roots)
finally:
os.rename(path1new, path1)
@unittest.skipUnless(hasattr(os, 'fwalk'), "Test needs os.fwalk()")
class FwalkTests(WalkTests):
"""Tests for os.fwalk()."""
def walk(self, top, **kwargs):
for root, dirs, files, root_fd in self.fwalk(top, **kwargs):
yield (root, dirs, files)
def fwalk(self, *args, **kwargs):
return os.fwalk(*args, **kwargs)
def _compare_to_walk(self, walk_kwargs, fwalk_kwargs):
"""
compare with walk() results.
"""
walk_kwargs = walk_kwargs.copy()
fwalk_kwargs = fwalk_kwargs.copy()
for topdown, follow_symlinks in itertools.product((True, False), repeat=2):
walk_kwargs.update(topdown=topdown, followlinks=follow_symlinks)
fwalk_kwargs.update(topdown=topdown, follow_symlinks=follow_symlinks)
expected = {}
for root, dirs, files in os.walk(**walk_kwargs):
expected[root] = (set(dirs), set(files))
for root, dirs, files, rootfd in self.fwalk(**fwalk_kwargs):
self.assertIn(root, expected)
self.assertEqual(expected[root], (set(dirs), set(files)))
def test_compare_to_walk(self):
kwargs = {'top': support.TESTFN}
self._compare_to_walk(kwargs, kwargs)
def test_dir_fd(self):
try:
fd = os.open(".", os.O_RDONLY)
walk_kwargs = {'top': support.TESTFN}
fwalk_kwargs = walk_kwargs.copy()
fwalk_kwargs['dir_fd'] = fd
self._compare_to_walk(walk_kwargs, fwalk_kwargs)
finally:
os.close(fd)
def test_yields_correct_dir_fd(self):
# check returned file descriptors
for topdown, follow_symlinks in itertools.product((True, False), repeat=2):
args = support.TESTFN, topdown, None
for root, dirs, files, rootfd in self.fwalk(*args, follow_symlinks=follow_symlinks):
# check that the FD is valid
os.fstat(rootfd)
# redundant check
os.stat(rootfd)
# check that listdir() returns consistent information
self.assertEqual(set(os.listdir(rootfd)), set(dirs) | set(files))
def test_fd_leak(self):
# Since we're opening a lot of FDs, we must be careful to avoid leaks:
# we both check that calling fwalk() a large number of times doesn't
# yield EMFILE, and that the minimum allocated FD hasn't changed.
minfd = os.dup(1)
os.close(minfd)
for i in range(256):
for x in self.fwalk(support.TESTFN):
pass
newfd = os.dup(1)
self.addCleanup(os.close, newfd)
self.assertEqual(newfd, minfd)
class BytesWalkTests(WalkTests):
"""Tests for os.walk() with bytes."""
def walk(self, top, **kwargs):
if 'follow_symlinks' in kwargs:
kwargs['followlinks'] = kwargs.pop('follow_symlinks')
for broot, bdirs, bfiles in os.walk(os.fsencode(top), **kwargs):
root = os.fsdecode(broot)
dirs = list(map(os.fsdecode, bdirs))
files = list(map(os.fsdecode, bfiles))
yield (root, dirs, files)
bdirs[:] = list(map(os.fsencode, dirs))
bfiles[:] = list(map(os.fsencode, files))
@unittest.skipUnless(hasattr(os, 'fwalk'), "Test needs os.fwalk()")
class BytesFwalkTests(FwalkTests):
"""Tests for os.walk() with bytes."""
def fwalk(self, top='.', *args, **kwargs):
for broot, bdirs, bfiles, topfd in os.fwalk(os.fsencode(top), *args, **kwargs):
root = os.fsdecode(broot)
dirs = list(map(os.fsdecode, bdirs))
files = list(map(os.fsdecode, bfiles))
yield (root, dirs, files, topfd)
bdirs[:] = list(map(os.fsencode, dirs))
bfiles[:] = list(map(os.fsencode, files))
class MakedirTests(unittest.TestCase):
def setUp(self):
os.mkdir(support.TESTFN)
def test_makedir(self):
base = support.TESTFN
path = os.path.join(base, 'dir1', 'dir2', 'dir3')
os.makedirs(path) # Should work
path = os.path.join(base, 'dir1', 'dir2', 'dir3', 'dir4')
os.makedirs(path)
# Try paths with a '.' in them
self.assertRaises(OSError, os.makedirs, os.curdir)
path = os.path.join(base, 'dir1', 'dir2', 'dir3', 'dir4', 'dir5', os.curdir)
os.makedirs(path)
path = os.path.join(base, 'dir1', os.curdir, 'dir2', 'dir3', 'dir4',
'dir5', 'dir6')
os.makedirs(path)
def test_mode(self):
with support.temp_umask(0o002):
base = support.TESTFN
parent = os.path.join(base, 'dir1')
path = os.path.join(parent, 'dir2')
os.makedirs(path, 0o555)
self.assertTrue(os.path.exists(path))
self.assertTrue(os.path.isdir(path))
if os.name != 'nt':
self.assertEqual(stat.S_IMODE(os.stat(path).st_mode), 0o555)
self.assertEqual(stat.S_IMODE(os.stat(parent).st_mode), 0o775)
def test_exist_ok_existing_directory(self):
path = os.path.join(support.TESTFN, 'dir1')
mode = 0o777
old_mask = os.umask(0o022)
os.makedirs(path, mode)
self.assertRaises(OSError, os.makedirs, path, mode)
self.assertRaises(OSError, os.makedirs, path, mode, exist_ok=False)
os.makedirs(path, 0o776, exist_ok=True)
os.makedirs(path, mode=mode, exist_ok=True)
os.umask(old_mask)
# Issue #25583: A drive root could raise PermissionError on Windows
os.makedirs(os.path.abspath('/'), exist_ok=True)
def test_exist_ok_s_isgid_directory(self):
path = os.path.join(support.TESTFN, 'dir1')
S_ISGID = stat.S_ISGID
mode = 0o777
old_mask = os.umask(0o022)
try:
existing_testfn_mode = stat.S_IMODE(
os.lstat(support.TESTFN).st_mode)
try:
os.chmod(support.TESTFN, existing_testfn_mode | S_ISGID)
except PermissionError:
raise unittest.SkipTest('Cannot set S_ISGID for dir.')
if (os.lstat(support.TESTFN).st_mode & S_ISGID != S_ISGID):
raise unittest.SkipTest('No support for S_ISGID dir mode.')
# The os should apply S_ISGID from the parent dir for us, but
# this test need not depend on that behavior. Be explicit.
os.makedirs(path, mode | S_ISGID)
# http://bugs.python.org/issue14992
# Should not fail when the bit is already set.
os.makedirs(path, mode, exist_ok=True)
# remove the bit.
os.chmod(path, stat.S_IMODE(os.lstat(path).st_mode) & ~S_ISGID)
# May work even when the bit is not already set when demanded.
os.makedirs(path, mode | S_ISGID, exist_ok=True)
finally:
os.umask(old_mask)
def test_exist_ok_existing_regular_file(self):
base = support.TESTFN
path = os.path.join(support.TESTFN, 'dir1')
f = open(path, 'w')
f.write('abc')
f.close()
self.assertRaises(OSError, os.makedirs, path)
self.assertRaises(OSError, os.makedirs, path, exist_ok=False)
self.assertRaises(OSError, os.makedirs, path, exist_ok=True)
os.remove(path)
def tearDown(self):
path = os.path.join(support.TESTFN, 'dir1', 'dir2', 'dir3',
'dir4', 'dir5', 'dir6')
# If the tests failed, the bottom-most directory ('../dir6')
# may not have been created, so we look for the outermost directory
# that exists.
while not os.path.exists(path) and path != support.TESTFN:
path = os.path.dirname(path)
os.removedirs(path)
@unittest.skipUnless(hasattr(os, 'chown'), "Test needs chown")
class ChownFileTests(unittest.TestCase):
@classmethod
def setUpClass(cls):
os.mkdir(support.TESTFN)
def test_chown_uid_gid_arguments_must_be_index(self):
stat = os.stat(support.TESTFN)
uid = stat.st_uid
gid = stat.st_gid
for value in (-1.0, -1j, decimal.Decimal(-1), fractions.Fraction(-2, 2)):
self.assertRaises(TypeError, os.chown, support.TESTFN, value, gid)
self.assertRaises(TypeError, os.chown, support.TESTFN, uid, value)
self.assertIsNone(os.chown(support.TESTFN, uid, gid))
self.assertIsNone(os.chown(support.TESTFN, -1, -1))
@unittest.skipUnless(len(groups) > 1, "test needs more than one group")
def test_chown(self):
gid_1, gid_2 = groups[:2]
uid = os.stat(support.TESTFN).st_uid
os.chown(support.TESTFN, uid, gid_1)
gid = os.stat(support.TESTFN).st_gid
self.assertEqual(gid, gid_1)
os.chown(support.TESTFN, uid, gid_2)
gid = os.stat(support.TESTFN).st_gid
self.assertEqual(gid, gid_2)
@unittest.skipUnless(root_in_posix and len(all_users) > 1,
"test needs root privilege and more than one user")
def test_chown_with_root(self):
uid_1, uid_2 = all_users[:2]
gid = os.stat(support.TESTFN).st_gid
os.chown(support.TESTFN, uid_1, gid)
uid = os.stat(support.TESTFN).st_uid
self.assertEqual(uid, uid_1)
os.chown(support.TESTFN, uid_2, gid)
uid = os.stat(support.TESTFN).st_uid
self.assertEqual(uid, uid_2)
@unittest.skipUnless(not root_in_posix and len(all_users) > 1,
"test needs non-root account and more than one user")
def test_chown_without_permission(self):
uid_1, uid_2 = all_users[:2]
gid = os.stat(support.TESTFN).st_gid
with self.assertRaises(PermissionError):
os.chown(support.TESTFN, uid_1, gid)
os.chown(support.TESTFN, uid_2, gid)
@classmethod
def tearDownClass(cls):
os.rmdir(support.TESTFN)
class RemoveDirsTests(unittest.TestCase):
def setUp(self):
os.makedirs(support.TESTFN)
def tearDown(self):
support.rmtree(support.TESTFN)
def test_remove_all(self):
dira = os.path.join(support.TESTFN, 'dira')
os.mkdir(dira)
dirb = os.path.join(dira, 'dirb')
os.mkdir(dirb)
os.removedirs(dirb)
self.assertFalse(os.path.exists(dirb))
self.assertFalse(os.path.exists(dira))
self.assertFalse(os.path.exists(support.TESTFN))
def test_remove_partial(self):
dira = os.path.join(support.TESTFN, 'dira')
os.mkdir(dira)
dirb = os.path.join(dira, 'dirb')
os.mkdir(dirb)
create_file(os.path.join(dira, 'file.txt'))
os.removedirs(dirb)
self.assertFalse(os.path.exists(dirb))
self.assertTrue(os.path.exists(dira))
self.assertTrue(os.path.exists(support.TESTFN))
def test_remove_nothing(self):
dira = os.path.join(support.TESTFN, 'dira')
os.mkdir(dira)
dirb = os.path.join(dira, 'dirb')
os.mkdir(dirb)
create_file(os.path.join(dirb, 'file.txt'))
with self.assertRaises(OSError):
os.removedirs(dirb)
self.assertTrue(os.path.exists(dirb))
self.assertTrue(os.path.exists(dira))
self.assertTrue(os.path.exists(support.TESTFN))
class DevNullTests(unittest.TestCase):
def test_devnull(self):
with open(os.devnull, 'wb', 0) as f:
f.write(b'hello')
f.close()
with open(os.devnull, 'rb') as f:
self.assertEqual(f.read(), b'')
class URandomTests(unittest.TestCase):
def test_urandom_length(self):
self.assertEqual(len(os.urandom(0)), 0)
self.assertEqual(len(os.urandom(1)), 1)
self.assertEqual(len(os.urandom(10)), 10)
self.assertEqual(len(os.urandom(100)), 100)
self.assertEqual(len(os.urandom(1000)), 1000)
def test_urandom_value(self):
data1 = os.urandom(16)
self.assertIsInstance(data1, bytes)
data2 = os.urandom(16)
self.assertNotEqual(data1, data2)
def get_urandom_subprocess(self, count):
code = '\n'.join((
'import os, sys',
'data = os.urandom(%s)' % count,
'sys.stdout.buffer.write(data)',
'sys.stdout.buffer.flush()'))
out = assert_python_ok('-c', code)
stdout = out[1]
self.assertEqual(len(stdout), 16)
return stdout
def test_urandom_subprocess(self):
data1 = self.get_urandom_subprocess(16)
data2 = self.get_urandom_subprocess(16)
self.assertNotEqual(data1, data2)
@unittest.skipUnless(hasattr(os, 'getrandom'), 'need os.getrandom()')
class GetRandomTests(unittest.TestCase):
@classmethod
def setUpClass(cls):
try:
os.getrandom(1)
except OSError as exc:
if exc.errno == errno.ENOSYS:
# Python compiled on a more recent Linux version
# than the current Linux kernel
raise unittest.SkipTest("getrandom() syscall fails with ENOSYS")
else:
raise
def test_getrandom_type(self):
data = os.getrandom(16)
self.assertIsInstance(data, bytes)
self.assertEqual(len(data), 16)
def test_getrandom0(self):
empty = os.getrandom(0)
self.assertEqual(empty, b'')
def test_getrandom_random(self):
self.assertTrue(hasattr(os, 'GRND_RANDOM'))
# Don't test os.getrandom(1, os.GRND_RANDOM) to not consume the rare
# resource /dev/random
def test_getrandom_nonblock(self):
# The call must not fail. Check also that the flag exists
try:
os.getrandom(1, os.GRND_NONBLOCK)
except BlockingIOError:
# System urandom is not initialized yet
pass
def test_getrandom_value(self):
data1 = os.getrandom(16)
data2 = os.getrandom(16)
self.assertNotEqual(data1, data2)
# os.urandom() doesn't use a file descriptor when it is implemented with the
# getentropy() function, the getrandom() function or the getrandom() syscall
OS_URANDOM_DONT_USE_FD = (
sysconfig.get_config_var('HAVE_GETENTROPY') == 1
or sysconfig.get_config_var('HAVE_GETRANDOM') == 1
or sysconfig.get_config_var('HAVE_GETRANDOM_SYSCALL') == 1)
@unittest.skipIf(OS_URANDOM_DONT_USE_FD ,
"os.random() does not use a file descriptor")
class URandomFDTests(unittest.TestCase):
@unittest.skipUnless(resource, "test requires the resource module")
def test_urandom_failure(self):
# Check urandom() failing when it is not able to open /dev/random.
# We spawn a new process to make the test more robust (if getrlimit()
# failed to restore the file descriptor limit after this, the whole
# test suite would crash; this actually happened on the OS X Tiger
# buildbot).
code = """if 1:
import errno
import os
import resource
soft_limit, hard_limit = resource.getrlimit(resource.RLIMIT_NOFILE)
resource.setrlimit(resource.RLIMIT_NOFILE, (1, hard_limit))
try:
os.urandom(16)
except OSError as e:
assert e.errno == errno.EMFILE, e.errno
else:
raise AssertionError("OSError not raised")
"""
assert_python_ok('-c', code)
def test_urandom_fd_closed(self):
# Issue #21207: urandom() should reopen its fd to /dev/urandom if
# closed.
code = """if 1:
import os
import sys
import test.support
os.urandom(4)
with test.support.SuppressCrashReport():
os.closerange(3, 256)
sys.stdout.buffer.write(os.urandom(4))
"""
rc, out, err = assert_python_ok('-Sc', code)
def test_urandom_fd_reopened(self):
# Issue #21207: urandom() should detect its fd to /dev/urandom
# changed to something else, and reopen it.
self.addCleanup(support.unlink, support.TESTFN)
create_file(support.TESTFN, b"x" * 256)
code = """if 1:
import os
import sys
import test.support
os.urandom(4)
with test.support.SuppressCrashReport():
for fd in range(3, 256):
try:
os.close(fd)
except OSError:
pass
else:
# Found the urandom fd (XXX hopefully)
break
os.closerange(3, 256)
with open({TESTFN!r}, 'rb') as f:
new_fd = f.fileno()
# Issue #26935: posix allows new_fd and fd to be equal but
# some libc implementations have dup2 return an error in this
# case.
if new_fd != fd:
os.dup2(new_fd, fd)
sys.stdout.buffer.write(os.urandom(4))
sys.stdout.buffer.write(os.urandom(4))
""".format(TESTFN=support.TESTFN)
rc, out, err = assert_python_ok('-Sc', code)
self.assertEqual(len(out), 8)
self.assertNotEqual(out[0:4], out[4:8])
rc, out2, err2 = assert_python_ok('-Sc', code)
self.assertEqual(len(out2), 8)
self.assertNotEqual(out2, out)
@contextlib.contextmanager
def _execvpe_mockup(defpath=None):
"""
Stubs out execv and execve functions when used as context manager.
Records exec calls. The mock execv and execve functions always raise an
exception as they would normally never return.
"""
# A list of tuples containing (function name, first arg, args)
# of calls to execv or execve that have been made.
calls = []
def mock_execv(name, *args):
calls.append(('execv', name, args))
raise RuntimeError("execv called")
def mock_execve(name, *args):
calls.append(('execve', name, args))
raise OSError(errno.ENOTDIR, "execve called")
try:
orig_execv = os.execv
orig_execve = os.execve
orig_defpath = os.defpath
os.execv = mock_execv
os.execve = mock_execve
if defpath is not None:
os.defpath = defpath
yield calls
finally:
os.execv = orig_execv
os.execve = orig_execve
os.defpath = orig_defpath
class ExecTests(unittest.TestCase):
@unittest.skipIf(USING_LINUXTHREADS,
"avoid triggering a linuxthreads bug: see issue #4970")
def test_execvpe_with_bad_program(self):
self.assertRaises(OSError, os.execvpe, 'no such app-',
['no such app-'], None)
def test_execv_with_bad_arglist(self):
self.assertRaises(ValueError, os.execv, 'notepad', ())
self.assertRaises(ValueError, os.execv, 'notepad', [])
self.assertRaises(ValueError, os.execv, 'notepad', ('',))
self.assertRaises(ValueError, os.execv, 'notepad', [''])
def test_execvpe_with_bad_arglist(self):
self.assertRaises(ValueError, os.execvpe, 'notepad', [], None)
self.assertRaises(ValueError, os.execvpe, 'notepad', [], {})
self.assertRaises(ValueError, os.execvpe, 'notepad', [''], {})
@unittest.skipUnless(hasattr(os, '_execvpe'),
"No internal os._execvpe function to test.")
def _test_internal_execvpe(self, test_type):
program_path = os.sep + 'absolutepath'
if test_type is bytes:
program = b'executable'
fullpath = os.path.join(os.fsencode(program_path), program)
native_fullpath = fullpath
arguments = [b'progname', 'arg1', 'arg2']
else:
program = 'executable'
arguments = ['progname', 'arg1', 'arg2']
fullpath = os.path.join(program_path, program)
if os.name != "nt":
native_fullpath = os.fsencode(fullpath)
else:
native_fullpath = fullpath
env = {'spam': 'beans'}
# test os._execvpe() with an absolute path
with _execvpe_mockup() as calls:
self.assertRaises(RuntimeError,
os._execvpe, fullpath, arguments)
self.assertEqual(len(calls), 1)
self.assertEqual(calls[0], ('execv', fullpath, (arguments,)))
# test os._execvpe() with a relative path:
# os.get_exec_path() returns defpath
with _execvpe_mockup(defpath=program_path) as calls:
self.assertRaises(OSError,
os._execvpe, program, arguments, env=env)
self.assertEqual(len(calls), 1)
self.assertSequenceEqual(calls[0],
('execve', native_fullpath, (arguments, env)))
# test os._execvpe() with a relative path:
# os.get_exec_path() reads the 'PATH' variable
with _execvpe_mockup() as calls:
env_path = env.copy()
if test_type is bytes:
env_path[b'PATH'] = program_path
else:
env_path['PATH'] = program_path
self.assertRaises(OSError,
os._execvpe, program, arguments, env=env_path)
self.assertEqual(len(calls), 1)
self.assertSequenceEqual(calls[0],
('execve', native_fullpath, (arguments, env_path)))
def test_internal_execvpe_str(self):
self._test_internal_execvpe(str)
if os.name != "nt":
self._test_internal_execvpe(bytes)
@unittest.skipUnless(sys.platform == "win32", "Win32 specific tests")
class Win32ErrorTests(unittest.TestCase):
def setUp(self):
try:
os.stat(support.TESTFN)
except FileNotFoundError:
exists = False
except OSError as exc:
exists = True
self.fail("file %s must not exist; os.stat failed with %s"
% (support.TESTFN, exc))
else:
self.fail("file %s must not exist" % support.TESTFN)
def test_rename(self):
self.assertRaises(OSError, os.rename, support.TESTFN, support.TESTFN+".bak")
def test_remove(self):
self.assertRaises(OSError, os.remove, support.TESTFN)
def test_chdir(self):
self.assertRaises(OSError, os.chdir, support.TESTFN)
def test_mkdir(self):
self.addCleanup(support.unlink, support.TESTFN)
with open(support.TESTFN, "x") as f:
self.assertRaises(OSError, os.mkdir, support.TESTFN)
def test_utime(self):
self.assertRaises(OSError, os.utime, support.TESTFN, None)
def test_chmod(self):
self.assertRaises(OSError, os.chmod, support.TESTFN, 0)
class TestInvalidFD(unittest.TestCase):
singles = ["fchdir", "dup", "fdopen", "fdatasync", "fstat",
"fstatvfs", "fsync", "tcgetpgrp", "ttyname"]
#singles.append("close")
#We omit close because it doesn't raise an exception on some platforms
def get_single(f):
def helper(self):
if hasattr(os, f):
self.check(getattr(os, f))
return helper
for f in singles:
locals()["test_"+f] = get_single(f)
def check(self, f, *args):
try:
f(support.make_bad_fd(), *args)
except OSError as e:
self.assertEqual(e.errno, errno.EBADF)
else:
self.fail("%r didn't raise an OSError with a bad file descriptor"
% f)
@unittest.skipUnless(hasattr(os, 'isatty'), 'test needs os.isatty()')
def test_isatty(self):
self.assertEqual(os.isatty(support.make_bad_fd()), False)
@unittest.skipUnless(hasattr(os, 'closerange'), 'test needs os.closerange()')
def test_closerange(self):
fd = support.make_bad_fd()
# Make sure none of the descriptors we are about to close are
# currently valid (issue 6542).
for i in range(10):
try: os.fstat(fd+i)
except OSError:
pass
else:
break
if i < 2:
raise unittest.SkipTest(
"Unable to acquire a range of invalid file descriptors")
self.assertEqual(os.closerange(fd, fd + i-1), None)
@unittest.skipUnless(hasattr(os, 'dup2'), 'test needs os.dup2()')
def test_dup2(self):
self.check(os.dup2, 20)
@unittest.skipUnless(hasattr(os, 'fchmod'), 'test needs os.fchmod()')
def test_fchmod(self):
self.check(os.fchmod, 0)
@unittest.skipUnless(hasattr(os, 'fchown'), 'test needs os.fchown()')
def test_fchown(self):
self.check(os.fchown, -1, -1)
@unittest.skipUnless(hasattr(os, 'fpathconf'), 'test needs os.fpathconf()')
def test_fpathconf(self):
self.check(os.pathconf, "PC_NAME_MAX")
self.check(os.fpathconf, "PC_NAME_MAX")
@unittest.skipUnless(hasattr(os, 'ftruncate'), 'test needs os.ftruncate()')
def test_ftruncate(self):
self.check(os.truncate, 0)
self.check(os.ftruncate, 0)
@unittest.skipUnless(hasattr(os, 'lseek'), 'test needs os.lseek()')
def test_lseek(self):
self.check(os.lseek, 0, 0)
@unittest.skipUnless(hasattr(os, 'read'), 'test needs os.read()')
def test_read(self):
self.check(os.read, 1)
@unittest.skipUnless(hasattr(os, 'readv'), 'test needs os.readv()')
def test_readv(self):
buf = bytearray(10)
self.check(os.readv, [buf])
@unittest.skipUnless(hasattr(os, 'tcsetpgrp'), 'test needs os.tcsetpgrp()')
def test_tcsetpgrpt(self):
self.check(os.tcsetpgrp, 0)
@unittest.skipUnless(hasattr(os, 'write'), 'test needs os.write()')
def test_write(self):
self.check(os.write, b" ")
@unittest.skipUnless(hasattr(os, 'writev'), 'test needs os.writev()')
def test_writev(self):
self.check(os.writev, [b'abc'])
def test_inheritable(self):
self.check(os.get_inheritable)
self.check(os.set_inheritable, True)
@unittest.skipUnless(hasattr(os, 'get_blocking'),
'needs os.get_blocking() and os.set_blocking()')
def test_blocking(self):
self.check(os.get_blocking)
self.check(os.set_blocking, True)
class LinkTests(unittest.TestCase):
def setUp(self):
self.file1 = support.TESTFN
self.file2 = os.path.join(support.TESTFN + "2")
def tearDown(self):
for file in (self.file1, self.file2):
if os.path.exists(file):
os.unlink(file)
def _test_link(self, file1, file2):
create_file(file1)
os.link(file1, file2)
with open(file1, "r") as f1, open(file2, "r") as f2:
self.assertTrue(os.path.sameopenfile(f1.fileno(), f2.fileno()))
def test_link(self):
self._test_link(self.file1, self.file2)
def test_link_bytes(self):
self._test_link(bytes(self.file1, sys.getfilesystemencoding()),
bytes(self.file2, sys.getfilesystemencoding()))
def test_unicode_name(self):
try:
os.fsencode("\xf1")
except UnicodeError:
raise unittest.SkipTest("Unable to encode for this platform.")
self.file1 += "\xf1"
self.file2 = self.file1 + "2"
self._test_link(self.file1, self.file2)
@unittest.skipIf(sys.platform == "win32", "Posix specific tests")
class PosixUidGidTests(unittest.TestCase):
@unittest.skipUnless(hasattr(os, 'setuid'), 'test needs os.setuid()')
def test_setuid(self):
if os.getuid() != 0:
self.assertRaises(OSError, os.setuid, 0)
self.assertRaises(OverflowError, os.setuid, 1<<32)
@unittest.skipUnless(hasattr(os, 'setgid'), 'test needs os.setgid()')
def test_setgid(self):
if os.getuid() != 0 and not HAVE_WHEEL_GROUP:
self.assertRaises(OSError, os.setgid, 0)
self.assertRaises(OverflowError, os.setgid, 1<<32)
@unittest.skipUnless(hasattr(os, 'seteuid'), 'test needs os.seteuid()')
def test_seteuid(self):
if os.getuid() != 0:
self.assertRaises(OSError, os.seteuid, 0)
self.assertRaises(OverflowError, os.seteuid, 1<<32)
@unittest.skipUnless(hasattr(os, 'setegid'), 'test needs os.setegid()')
def test_setegid(self):
if os.getuid() != 0 and not HAVE_WHEEL_GROUP:
self.assertRaises(OSError, os.setegid, 0)
self.assertRaises(OverflowError, os.setegid, 1<<32)
@unittest.skipUnless(hasattr(os, 'setreuid'), 'test needs os.setreuid()')
def test_setreuid(self):
if os.getuid() != 0:
self.assertRaises(OSError, os.setreuid, 0, 0)
self.assertRaises(OverflowError, os.setreuid, 1<<32, 0)
self.assertRaises(OverflowError, os.setreuid, 0, 1<<32)
@unittest.skipUnless(hasattr(os, 'setreuid'), 'test needs os.setreuid()')
def test_setreuid_neg1(self):
# Needs to accept -1. We run this in a subprocess to avoid
# altering the test runner's process state (issue8045).
subprocess.check_call([
sys.executable, '-c',
'import os,sys;os.setreuid(-1,-1);sys.exit(0)'])
@unittest.skipUnless(hasattr(os, 'setregid'), 'test needs os.setregid()')
def test_setregid(self):
if os.getuid() != 0 and not HAVE_WHEEL_GROUP:
self.assertRaises(OSError, os.setregid, 0, 0)
self.assertRaises(OverflowError, os.setregid, 1<<32, 0)
self.assertRaises(OverflowError, os.setregid, 0, 1<<32)
@unittest.skipUnless(hasattr(os, 'setregid'), 'test needs os.setregid()')
def test_setregid_neg1(self):
# Needs to accept -1. We run this in a subprocess to avoid
# altering the test runner's process state (issue8045).
subprocess.check_call([
sys.executable, '-c',
'import os,sys;os.setregid(-1,-1);sys.exit(0)'])
@unittest.skipIf(sys.platform == "win32", "Posix specific tests")
class Pep383Tests(unittest.TestCase):
def setUp(self):
if support.TESTFN_UNENCODABLE:
self.dir = support.TESTFN_UNENCODABLE
elif support.TESTFN_NONASCII:
self.dir = support.TESTFN_NONASCII
else:
self.dir = support.TESTFN
self.bdir = os.fsencode(self.dir)
bytesfn = []
def add_filename(fn):
try:
fn = os.fsencode(fn)
except UnicodeEncodeError:
return
bytesfn.append(fn)
add_filename(support.TESTFN_UNICODE)
if support.TESTFN_UNENCODABLE:
add_filename(support.TESTFN_UNENCODABLE)
if support.TESTFN_NONASCII:
add_filename(support.TESTFN_NONASCII)
if not bytesfn:
self.skipTest("couldn't create any non-ascii filename")
self.unicodefn = set()
os.mkdir(self.dir)
try:
for fn in bytesfn:
support.create_empty_file(os.path.join(self.bdir, fn))
fn = os.fsdecode(fn)
if fn in self.unicodefn:
raise ValueError("duplicate filename")
self.unicodefn.add(fn)
except:
shutil.rmtree(self.dir)
raise
def tearDown(self):
shutil.rmtree(self.dir)
def test_listdir(self):
expected = self.unicodefn
found = set(os.listdir(self.dir))
self.assertEqual(found, expected)
# test listdir without arguments
current_directory = os.getcwd()
try:
os.chdir(os.sep)
self.assertEqual(set(os.listdir()), set(os.listdir(os.sep)))
finally:
os.chdir(current_directory)
def test_open(self):
for fn in self.unicodefn:
f = open(os.path.join(self.dir, fn), 'rb')
f.close()
@unittest.skipUnless(hasattr(os, 'statvfs'),
"need os.statvfs()")
def test_statvfs(self):
# issue #9645
for fn in self.unicodefn:
# should not fail with file not found error
fullname = os.path.join(self.dir, fn)
os.statvfs(fullname)
def test_stat(self):
for fn in self.unicodefn:
os.stat(os.path.join(self.dir, fn))
@unittest.skipUnless(sys.platform == "win32", "Win32 specific tests")
class Win32KillTests(unittest.TestCase):
def _kill(self, sig):
# Start sys.executable as a subprocess and communicate from the
# subprocess to the parent that the interpreter is ready. When it
# becomes ready, send *sig* via os.kill to the subprocess and check
# that the return code is equal to *sig*.
import ctypes
from ctypes import wintypes
import msvcrt
# Since we can't access the contents of the process' stdout until the
# process has exited, use PeekNamedPipe to see what's inside stdout
# without waiting. This is done so we can tell that the interpreter
# is started and running at a point where it could handle a signal.
PeekNamedPipe = ctypes.windll.kernel32.PeekNamedPipe
PeekNamedPipe.restype = wintypes.BOOL
PeekNamedPipe.argtypes = (wintypes.HANDLE, # Pipe handle
ctypes.POINTER(ctypes.c_char), # stdout buf
wintypes.DWORD, # Buffer size
ctypes.POINTER(wintypes.DWORD), # bytes read
ctypes.POINTER(wintypes.DWORD), # bytes avail
ctypes.POINTER(wintypes.DWORD)) # bytes left
msg = "running"
proc = subprocess.Popen([sys.executable, "-c",
"import sys;"
"sys.stdout.write('{}');"
"sys.stdout.flush();"
"input()".format(msg)],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
stdin=subprocess.PIPE)
self.addCleanup(proc.stdout.close)
self.addCleanup(proc.stderr.close)
self.addCleanup(proc.stdin.close)
count, max = 0, 100
while count < max and proc.poll() is None:
# Create a string buffer to store the result of stdout from the pipe
buf = ctypes.create_string_buffer(len(msg))
# Obtain the text currently in proc.stdout
# Bytes read/avail/left are left as NULL and unused
rslt = PeekNamedPipe(msvcrt.get_osfhandle(proc.stdout.fileno()),
buf, ctypes.sizeof(buf), None, None, None)
self.assertNotEqual(rslt, 0, "PeekNamedPipe failed")
if buf.value:
self.assertEqual(msg, buf.value.decode())
break
time.sleep(0.1)
count += 1
else:
self.fail("Did not receive communication from the subprocess")
os.kill(proc.pid, sig)
self.assertEqual(proc.wait(), sig)
def test_kill_sigterm(self):
# SIGTERM doesn't mean anything special, but make sure it works
self._kill(signal.SIGTERM)
def test_kill_int(self):
# os.kill on Windows can take an int which gets set as the exit code
self._kill(100)
def _kill_with_event(self, event, name):
tagname = "test_os_%s" % uuid.uuid1()
m = mmap.mmap(-1, 1, tagname)
m[0] = 0
# Run a script which has console control handling enabled.
proc = subprocess.Popen([sys.executable,
os.path.join(os.path.dirname(__file__),
"win_console_handler.py"), tagname],
creationflags=subprocess.CREATE_NEW_PROCESS_GROUP)
# Let the interpreter startup before we send signals. See #3137.
count, max = 0, 100
while count < max and proc.poll() is None:
if m[0] == 1:
break
time.sleep(0.1)
count += 1
else:
# Forcefully kill the process if we weren't able to signal it.
os.kill(proc.pid, signal.SIGINT)
self.fail("Subprocess didn't finish initialization")
os.kill(proc.pid, event)
# proc.send_signal(event) could also be done here.
# Allow time for the signal to be passed and the process to exit.
time.sleep(0.5)
if not proc.poll():
# Forcefully kill the process if we weren't able to signal it.
os.kill(proc.pid, signal.SIGINT)
self.fail("subprocess did not stop on {}".format(name))
@unittest.skip("subprocesses aren't inheriting Ctrl+C property")
def test_CTRL_C_EVENT(self):
from ctypes import wintypes
import ctypes
# Make a NULL value by creating a pointer with no argument.
NULL = ctypes.POINTER(ctypes.c_int)()
SetConsoleCtrlHandler = ctypes.windll.kernel32.SetConsoleCtrlHandler
SetConsoleCtrlHandler.argtypes = (ctypes.POINTER(ctypes.c_int),
wintypes.BOOL)
SetConsoleCtrlHandler.restype = wintypes.BOOL
# Calling this with NULL and FALSE causes the calling process to
# handle Ctrl+C, rather than ignore it. This property is inherited
# by subprocesses.
SetConsoleCtrlHandler(NULL, 0)
self._kill_with_event(signal.CTRL_C_EVENT, "CTRL_C_EVENT")
def test_CTRL_BREAK_EVENT(self):
self._kill_with_event(signal.CTRL_BREAK_EVENT, "CTRL_BREAK_EVENT")
@unittest.skipUnless(sys.platform == "win32", "Win32 specific tests")
class Win32ListdirTests(unittest.TestCase):
"""Test listdir on Windows."""
def setUp(self):
self.created_paths = []
for i in range(2):
dir_name = 'SUB%d' % i
dir_path = os.path.join(support.TESTFN, dir_name)
file_name = 'FILE%d' % i
file_path = os.path.join(support.TESTFN, file_name)
os.makedirs(dir_path)
with open(file_path, 'w') as f:
f.write("I'm %s and proud of it. Blame test_os.\n" % file_path)
self.created_paths.extend([dir_name, file_name])
self.created_paths.sort()
def tearDown(self):
shutil.rmtree(support.TESTFN)
def test_listdir_no_extended_path(self):
"""Test when the path is not an "extended" path."""
# unicode
self.assertEqual(
sorted(os.listdir(support.TESTFN)),
self.created_paths)
# bytes
self.assertEqual(
sorted(os.listdir(os.fsencode(support.TESTFN))),
[os.fsencode(path) for path in self.created_paths])
def test_listdir_extended_path(self):
"""Test when the path starts with '\\\\?\\'."""
# See: http://msdn.microsoft.com/en-us/library/windows/desktop/aa365247(v=vs.85).aspx#maxpath
# unicode
path = '\\\\?\\' + os.path.abspath(support.TESTFN)
self.assertEqual(
sorted(os.listdir(path)),
self.created_paths)
# bytes
path = b'\\\\?\\' + os.fsencode(os.path.abspath(support.TESTFN))
self.assertEqual(
sorted(os.listdir(path)),
[os.fsencode(path) for path in self.created_paths])
@unittest.skipUnless(sys.platform == "win32", "Win32 specific tests")
@support.skip_unless_symlink
class Win32SymlinkTests(unittest.TestCase):
filelink = 'filelinktest'
filelink_target = os.path.abspath(__file__)
dirlink = 'dirlinktest'
dirlink_target = os.path.dirname(filelink_target)
missing_link = 'missing link'
def setUp(self):
assert os.path.exists(self.dirlink_target)
assert os.path.exists(self.filelink_target)
assert not os.path.exists(self.dirlink)
assert not os.path.exists(self.filelink)
assert not os.path.exists(self.missing_link)
def tearDown(self):
if os.path.exists(self.filelink):
os.remove(self.filelink)
if os.path.exists(self.dirlink):
os.rmdir(self.dirlink)
if os.path.lexists(self.missing_link):
os.remove(self.missing_link)
def test_directory_link(self):
os.symlink(self.dirlink_target, self.dirlink)
self.assertTrue(os.path.exists(self.dirlink))
self.assertTrue(os.path.isdir(self.dirlink))
self.assertTrue(os.path.islink(self.dirlink))
self.check_stat(self.dirlink, self.dirlink_target)
def test_file_link(self):
os.symlink(self.filelink_target, self.filelink)
self.assertTrue(os.path.exists(self.filelink))
self.assertTrue(os.path.isfile(self.filelink))
self.assertTrue(os.path.islink(self.filelink))
self.check_stat(self.filelink, self.filelink_target)
def _create_missing_dir_link(self):
'Create a "directory" link to a non-existent target'
linkname = self.missing_link
if os.path.lexists(linkname):
os.remove(linkname)
target = r'c:\\target does not exist.29r3c740'
assert not os.path.exists(target)
target_is_dir = True
os.symlink(target, linkname, target_is_dir)
def test_remove_directory_link_to_missing_target(self):
self._create_missing_dir_link()
# For compatibility with Unix, os.remove will check the
# directory status and call RemoveDirectory if the symlink
# was created with target_is_dir==True.
os.remove(self.missing_link)
@unittest.skip("currently fails; consider for improvement")
def test_isdir_on_directory_link_to_missing_target(self):
self._create_missing_dir_link()
# consider having isdir return true for directory links
self.assertTrue(os.path.isdir(self.missing_link))
@unittest.skip("currently fails; consider for improvement")
def test_rmdir_on_directory_link_to_missing_target(self):
self._create_missing_dir_link()
# consider allowing rmdir to remove directory links
os.rmdir(self.missing_link)
def check_stat(self, link, target):
self.assertEqual(os.stat(link), os.stat(target))
self.assertNotEqual(os.lstat(link), os.stat(link))
bytes_link = os.fsencode(link)
self.assertEqual(os.stat(bytes_link), os.stat(target))
self.assertNotEqual(os.lstat(bytes_link), os.stat(bytes_link))
def test_12084(self):
level1 = os.path.abspath(support.TESTFN)
level2 = os.path.join(level1, "level2")
level3 = os.path.join(level2, "level3")
self.addCleanup(support.rmtree, level1)
os.mkdir(level1)
os.mkdir(level2)
os.mkdir(level3)
file1 = os.path.abspath(os.path.join(level1, "file1"))
create_file(file1)
orig_dir = os.getcwd()
try:
os.chdir(level2)
link = os.path.join(level2, "link")
os.symlink(os.path.relpath(file1), "link")
self.assertIn("link", os.listdir(os.getcwd()))
# Check os.stat calls from the same dir as the link
self.assertEqual(os.stat(file1), os.stat("link"))
# Check os.stat calls from a dir below the link
os.chdir(level1)
self.assertEqual(os.stat(file1),
os.stat(os.path.relpath(link)))
# Check os.stat calls from a dir above the link
os.chdir(level3)
self.assertEqual(os.stat(file1),
os.stat(os.path.relpath(link)))
finally:
os.chdir(orig_dir)
@unittest.skipUnless(sys.platform == "win32", "Win32 specific tests")
class Win32JunctionTests(unittest.TestCase):
junction = 'junctiontest'
junction_target = os.path.dirname(os.path.abspath(__file__))
def setUp(self):
assert os.path.exists(self.junction_target)
assert not os.path.exists(self.junction)
def tearDown(self):
if os.path.exists(self.junction):
# os.rmdir delegates to Windows' RemoveDirectoryW,
# which removes junction points safely.
os.rmdir(self.junction)
def test_create_junction(self):
_winapi.CreateJunction(self.junction_target, self.junction)
self.assertTrue(os.path.exists(self.junction))
self.assertTrue(os.path.isdir(self.junction))
# Junctions are not recognized as links.
self.assertFalse(os.path.islink(self.junction))
def test_unlink_removes_junction(self):
_winapi.CreateJunction(self.junction_target, self.junction)
self.assertTrue(os.path.exists(self.junction))
os.unlink(self.junction)
self.assertFalse(os.path.exists(self.junction))
@support.skip_unless_symlink
class NonLocalSymlinkTests(unittest.TestCase):
def setUp(self):
r"""
Create this structure:
base
\___ some_dir
"""
os.makedirs('base/some_dir')
def tearDown(self):
shutil.rmtree('base')
def test_directory_link_nonlocal(self):
"""
The symlink target should resolve relative to the link, not relative
to the current directory.
Then, link base/some_link -> base/some_dir and ensure that some_link
is resolved as a directory.
In issue13772, it was discovered that directory detection failed if
the symlink target was not specified relative to the current
directory, which was a defect in the implementation.
"""
src = os.path.join('base', 'some_link')
os.symlink('some_dir', src)
assert os.path.isdir(src)
class FSEncodingTests(unittest.TestCase):
def test_nop(self):
self.assertEqual(os.fsencode(b'abc\xff'), b'abc\xff')
self.assertEqual(os.fsdecode('abc\u0141'), 'abc\u0141')
def test_identity(self):
# assert fsdecode(fsencode(x)) == x
for fn in ('unicode\u0141', 'latin\xe9', 'ascii'):
try:
bytesfn = os.fsencode(fn)
except UnicodeEncodeError:
continue
self.assertEqual(os.fsdecode(bytesfn), fn)
class DeviceEncodingTests(unittest.TestCase):
def test_bad_fd(self):
# Return None when an fd doesn't actually exist.
self.assertIsNone(os.device_encoding(123456))
@unittest.skipUnless(os.isatty(0) and (sys.platform.startswith('win') or
(hasattr(locale, 'nl_langinfo') and hasattr(locale, 'CODESET'))),
'test requires a tty and either Windows or nl_langinfo(CODESET)')
def test_device_encoding(self):
encoding = os.device_encoding(0)
self.assertIsNotNone(encoding)
self.assertTrue(codecs.lookup(encoding))
class PidTests(unittest.TestCase):
@unittest.skipUnless(hasattr(os, 'getppid'), "test needs os.getppid")
def test_getppid(self):
p = subprocess.Popen([sys.executable, '-c',
'import os; print(os.getppid())'],
stdout=subprocess.PIPE)
stdout, _ = p.communicate()
# We are the parent of our subprocess
self.assertEqual(int(stdout), os.getpid())
def test_waitpid(self):
args = [sys.executable, '-c', 'pass']
# Add an implicit test for PyUnicode_FSConverter().
pid = os.spawnv(os.P_NOWAIT, _PathLike(args[0]), args)
status = os.waitpid(pid, 0)
self.assertEqual(status, (pid, 0))
class SpawnTests(unittest.TestCase):
def create_args(self, *, with_env=False, use_bytes=False):
self.exitcode = 17
filename = support.TESTFN
self.addCleanup(support.unlink, filename)
if not with_env:
code = 'import sys; sys.exit(%s)' % self.exitcode
else:
self.env = dict(os.environ)
# create an unique key
self.key = str(uuid.uuid4())
self.env[self.key] = self.key
# read the variable from os.environ to check that it exists
code = ('import sys, os; magic = os.environ[%r]; sys.exit(%s)'
% (self.key, self.exitcode))
with open(filename, "w") as fp:
fp.write(code)
args = [sys.executable, filename]
if use_bytes:
args = [os.fsencode(a) for a in args]
self.env = {os.fsencode(k): os.fsencode(v)
for k, v in self.env.items()}
return args
@requires_os_func('spawnl')
def test_spawnl(self):
args = self.create_args()
exitcode = os.spawnl(os.P_WAIT, args[0], *args)
self.assertEqual(exitcode, self.exitcode)
@requires_os_func('spawnle')
def test_spawnle(self):
args = self.create_args(with_env=True)
exitcode = os.spawnle(os.P_WAIT, args[0], *args, self.env)
self.assertEqual(exitcode, self.exitcode)
@requires_os_func('spawnlp')
def test_spawnlp(self):
args = self.create_args()
exitcode = os.spawnlp(os.P_WAIT, args[0], *args)
self.assertEqual(exitcode, self.exitcode)
@requires_os_func('spawnlpe')
def test_spawnlpe(self):
args = self.create_args(with_env=True)
exitcode = os.spawnlpe(os.P_WAIT, args[0], *args, self.env)
self.assertEqual(exitcode, self.exitcode)
@requires_os_func('spawnv')
def test_spawnv(self):
args = self.create_args()
exitcode = os.spawnv(os.P_WAIT, args[0], args)
self.assertEqual(exitcode, self.exitcode)
@requires_os_func('spawnve')
def test_spawnve(self):
args = self.create_args(with_env=True)
exitcode = os.spawnve(os.P_WAIT, args[0], args, self.env)
self.assertEqual(exitcode, self.exitcode)
@requires_os_func('spawnvp')
def test_spawnvp(self):
args = self.create_args()
exitcode = os.spawnvp(os.P_WAIT, args[0], args)
self.assertEqual(exitcode, self.exitcode)
@requires_os_func('spawnvpe')
def test_spawnvpe(self):
args = self.create_args(with_env=True)
exitcode = os.spawnvpe(os.P_WAIT, args[0], args, self.env)
self.assertEqual(exitcode, self.exitcode)
@requires_os_func('spawnv')
def test_nowait(self):
args = self.create_args()
pid = os.spawnv(os.P_NOWAIT, args[0], args)
result = os.waitpid(pid, 0)
self.assertEqual(result[0], pid)
status = result[1]
if hasattr(os, 'WIFEXITED'):
self.assertTrue(os.WIFEXITED(status))
self.assertEqual(os.WEXITSTATUS(status), self.exitcode)
else:
self.assertEqual(status, self.exitcode << 8)
@requires_os_func('spawnve')
def test_spawnve_bytes(self):
# Test bytes handling in parse_arglist and parse_envlist (#28114)
args = self.create_args(with_env=True, use_bytes=True)
exitcode = os.spawnve(os.P_WAIT, args[0], args, self.env)
self.assertEqual(exitcode, self.exitcode)
@requires_os_func('spawnl')
def test_spawnl_noargs(self):
args = self.create_args()
self.assertRaises(ValueError, os.spawnl, os.P_NOWAIT, args[0])
self.assertRaises(ValueError, os.spawnl, os.P_NOWAIT, args[0], '')
@requires_os_func('spawnle')
def test_spawnle_noargs(self):
args = self.create_args()
self.assertRaises(ValueError, os.spawnle, os.P_NOWAIT, args[0], {})
self.assertRaises(ValueError, os.spawnle, os.P_NOWAIT, args[0], '', {})
@requires_os_func('spawnv')
def test_spawnv_noargs(self):
args = self.create_args()
self.assertRaises(ValueError, os.spawnv, os.P_NOWAIT, args[0], ())
self.assertRaises(ValueError, os.spawnv, os.P_NOWAIT, args[0], [])
self.assertRaises(ValueError, os.spawnv, os.P_NOWAIT, args[0], ('',))
self.assertRaises(ValueError, os.spawnv, os.P_NOWAIT, args[0], [''])
@requires_os_func('spawnve')
def test_spawnve_noargs(self):
args = self.create_args()
self.assertRaises(ValueError, os.spawnve, os.P_NOWAIT, args[0], (), {})
self.assertRaises(ValueError, os.spawnve, os.P_NOWAIT, args[0], [], {})
self.assertRaises(ValueError, os.spawnve, os.P_NOWAIT, args[0], ('',), {})
self.assertRaises(ValueError, os.spawnve, os.P_NOWAIT, args[0], [''], {})
# The introduction of this TestCase caused at least two different errors on
# *nix buildbots. Temporarily skip this to let the buildbots move along.
@unittest.skip("Skip due to platform/environment differences on *NIX buildbots")
@unittest.skipUnless(hasattr(os, 'getlogin'), "test needs os.getlogin")
class LoginTests(unittest.TestCase):
def test_getlogin(self):
user_name = os.getlogin()
self.assertNotEqual(len(user_name), 0)
@unittest.skipUnless(hasattr(os, 'getpriority') and hasattr(os, 'setpriority'),
"needs os.getpriority and os.setpriority")
class ProgramPriorityTests(unittest.TestCase):
"""Tests for os.getpriority() and os.setpriority()."""
def test_set_get_priority(self):
base = os.getpriority(os.PRIO_PROCESS, os.getpid())
os.setpriority(os.PRIO_PROCESS, os.getpid(), base + 1)
try:
new_prio = os.getpriority(os.PRIO_PROCESS, os.getpid())
if base >= 19 and new_prio <= 19:
raise unittest.SkipTest("unable to reliably test setpriority "
"at current nice level of %s" % base)
else:
self.assertEqual(new_prio, base + 1)
finally:
try:
os.setpriority(os.PRIO_PROCESS, os.getpid(), base)
except OSError as err:
if err.errno != errno.EACCES:
raise
if threading is not None:
class SendfileTestServer(asyncore.dispatcher, threading.Thread):
class Handler(asynchat.async_chat):
def __init__(self, conn):
asynchat.async_chat.__init__(self, conn)
self.in_buffer = []
self.closed = False
self.push(b"220 ready\r\n")
def handle_read(self):
data = self.recv(4096)
self.in_buffer.append(data)
def get_data(self):
return b''.join(self.in_buffer)
def handle_close(self):
self.close()
self.closed = True
def handle_error(self):
raise
def __init__(self, address):
threading.Thread.__init__(self)
asyncore.dispatcher.__init__(self)
self.create_socket(socket.AF_INET, socket.SOCK_STREAM)
self.bind(address)
self.listen(5)
self.host, self.port = self.socket.getsockname()[:2]
self.handler_instance = None
self._active = False
self._active_lock = threading.Lock()
# --- public API
@property
def running(self):
return self._active
def start(self):
assert not self.running
self.__flag = threading.Event()
threading.Thread.start(self)
self.__flag.wait()
def stop(self):
assert self.running
self._active = False
self.join()
def wait(self):
# wait for handler connection to be closed, then stop the server
while not getattr(self.handler_instance, "closed", False):
time.sleep(0.001)
self.stop()
# --- internals
def run(self):
self._active = True
self.__flag.set()
while self._active and asyncore.socket_map:
self._active_lock.acquire()
asyncore.loop(timeout=0.001, count=1)
self._active_lock.release()
asyncore.close_all()
def handle_accept(self):
conn, addr = self.accept()
self.handler_instance = self.Handler(conn)
def handle_connect(self):
self.close()
handle_read = handle_connect
def writable(self):
return 0
def handle_error(self):
raise
@unittest.skipUnless(threading is not None, "test needs threading module")
@unittest.skipUnless(hasattr(os, 'sendfile'), "test needs os.sendfile()")
class TestSendfile(unittest.TestCase):
DATA = b"12345abcde" * 16 * 1024 # 160 KB
SUPPORT_HEADERS_TRAILERS = not sys.platform.startswith("linux") and \
not sys.platform.startswith("solaris") and \
not sys.platform.startswith("sunos")
requires_headers_trailers = unittest.skipUnless(SUPPORT_HEADERS_TRAILERS,
'requires headers and trailers support')
@classmethod
def setUpClass(cls):
cls.key = support.threading_setup()
create_file(support.TESTFN, cls.DATA)
@classmethod
def tearDownClass(cls):
support.threading_cleanup(*cls.key)
support.unlink(support.TESTFN)
def setUp(self):
self.server = SendfileTestServer((support.HOST, 0))
self.server.start()
self.client = socket.socket()
self.client.connect((self.server.host, self.server.port))
self.client.settimeout(1)
# synchronize by waiting for "220 ready" response
self.client.recv(1024)
self.sockno = self.client.fileno()
self.file = open(support.TESTFN, 'rb')
self.fileno = self.file.fileno()
def tearDown(self):
self.file.close()
self.client.close()
if self.server.running:
self.server.stop()
def sendfile_wrapper(self, sock, file, offset, nbytes, headers=[], trailers=[]):
"""A higher level wrapper representing how an application is
supposed to use sendfile().
"""
while 1:
try:
if self.SUPPORT_HEADERS_TRAILERS:
return os.sendfile(sock, file, offset, nbytes, headers,
trailers)
else:
return os.sendfile(sock, file, offset, nbytes)
except OSError as err:
if err.errno == errno.ECONNRESET:
# disconnected
raise
elif err.errno in (errno.EAGAIN, errno.EBUSY):
# we have to retry send data
continue
else:
raise
def test_send_whole_file(self):
# normal send
total_sent = 0
offset = 0
nbytes = 4096
while total_sent < len(self.DATA):
sent = self.sendfile_wrapper(self.sockno, self.fileno, offset, nbytes)
if sent == 0:
break
offset += sent
total_sent += sent
self.assertTrue(sent <= nbytes)
self.assertEqual(offset, total_sent)
self.assertEqual(total_sent, len(self.DATA))
self.client.shutdown(socket.SHUT_RDWR)
self.client.close()
self.server.wait()
data = self.server.handler_instance.get_data()
self.assertEqual(len(data), len(self.DATA))
self.assertEqual(data, self.DATA)
def test_send_at_certain_offset(self):
# start sending a file at a certain offset
total_sent = 0
offset = len(self.DATA) // 2
must_send = len(self.DATA) - offset
nbytes = 4096
while total_sent < must_send:
sent = self.sendfile_wrapper(self.sockno, self.fileno, offset, nbytes)
if sent == 0:
break
offset += sent
total_sent += sent
self.assertTrue(sent <= nbytes)
self.client.shutdown(socket.SHUT_RDWR)
self.client.close()
self.server.wait()
data = self.server.handler_instance.get_data()
expected = self.DATA[len(self.DATA) // 2:]
self.assertEqual(total_sent, len(expected))
self.assertEqual(len(data), len(expected))
self.assertEqual(data, expected)
def test_offset_overflow(self):
# specify an offset > file size
offset = len(self.DATA) + 4096
try:
sent = os.sendfile(self.sockno, self.fileno, offset, 4096)
except OSError as e:
# Solaris can raise EINVAL if offset >= file length, ignore.
if e.errno != errno.EINVAL:
raise
else:
self.assertEqual(sent, 0)
self.client.shutdown(socket.SHUT_RDWR)
self.client.close()
self.server.wait()
data = self.server.handler_instance.get_data()
self.assertEqual(data, b'')
def test_invalid_offset(self):
with self.assertRaises(OSError) as cm:
os.sendfile(self.sockno, self.fileno, -1, 4096)
self.assertEqual(cm.exception.errno, errno.EINVAL)
def test_keywords(self):
# Keyword arguments should be supported
os.sendfile(out=self.sockno, offset=0, count=4096,
**{'in': self.fileno})
if self.SUPPORT_HEADERS_TRAILERS:
os.sendfile(self.sockno, self.fileno, offset=0, count=4096,
headers=(), trailers=(), flags=0)
# --- headers / trailers tests
@requires_headers_trailers
def test_headers(self):
total_sent = 0
sent = os.sendfile(self.sockno, self.fileno, 0, 4096,
headers=[b"x" * 512])
total_sent += sent
offset = 4096
nbytes = 4096
while 1:
sent = self.sendfile_wrapper(self.sockno, self.fileno,
offset, nbytes)
if sent == 0:
break
total_sent += sent
offset += sent
expected_data = b"x" * 512 + self.DATA
self.assertEqual(total_sent, len(expected_data))
self.client.close()
self.server.wait()
data = self.server.handler_instance.get_data()
self.assertEqual(hash(data), hash(expected_data))
@requires_headers_trailers
def test_trailers(self):
TESTFN2 = support.TESTFN + "2"
file_data = b"abcdef"
self.addCleanup(support.unlink, TESTFN2)
create_file(TESTFN2, file_data)
with open(TESTFN2, 'rb') as f:
os.sendfile(self.sockno, f.fileno(), 0, len(file_data),
trailers=[b"1234"])
self.client.close()
self.server.wait()
data = self.server.handler_instance.get_data()
self.assertEqual(data, b"abcdef1234")
@requires_headers_trailers
@unittest.skipUnless(hasattr(os, 'SF_NODISKIO'),
'test needs os.SF_NODISKIO')
def test_flags(self):
try:
os.sendfile(self.sockno, self.fileno, 0, 4096,
flags=os.SF_NODISKIO)
except OSError as err:
if err.errno not in (errno.EBUSY, errno.EAGAIN):
raise
def supports_extended_attributes():
if not hasattr(os, "setxattr"):
return False
try:
with open(support.TESTFN, "xb", 0) as fp:
try:
os.setxattr(fp.fileno(), b"user.test", b"")
except OSError:
return False
finally:
support.unlink(support.TESTFN)
return True
@unittest.skipUnless(supports_extended_attributes(),
"no non-broken extended attribute support")
# Kernels < 2.6.39 don't respect setxattr flags.
@support.requires_linux_version(2, 6, 39)
class ExtendedAttributeTests(unittest.TestCase):
def _check_xattrs_str(self, s, getxattr, setxattr, removexattr, listxattr, **kwargs):
fn = support.TESTFN
self.addCleanup(support.unlink, fn)
create_file(fn)
with self.assertRaises(OSError) as cm:
getxattr(fn, s("user.test"), **kwargs)
self.assertEqual(cm.exception.errno, errno.ENODATA)
init_xattr = listxattr(fn)
self.assertIsInstance(init_xattr, list)
setxattr(fn, s("user.test"), b"", **kwargs)
xattr = set(init_xattr)
xattr.add("user.test")
self.assertEqual(set(listxattr(fn)), xattr)
self.assertEqual(getxattr(fn, b"user.test", **kwargs), b"")
setxattr(fn, s("user.test"), b"hello", os.XATTR_REPLACE, **kwargs)
self.assertEqual(getxattr(fn, b"user.test", **kwargs), b"hello")
with self.assertRaises(OSError) as cm:
setxattr(fn, s("user.test"), b"bye", os.XATTR_CREATE, **kwargs)
self.assertEqual(cm.exception.errno, errno.EEXIST)
with self.assertRaises(OSError) as cm:
setxattr(fn, s("user.test2"), b"bye", os.XATTR_REPLACE, **kwargs)
self.assertEqual(cm.exception.errno, errno.ENODATA)
setxattr(fn, s("user.test2"), b"foo", os.XATTR_CREATE, **kwargs)
xattr.add("user.test2")
self.assertEqual(set(listxattr(fn)), xattr)
removexattr(fn, s("user.test"), **kwargs)
with self.assertRaises(OSError) as cm:
getxattr(fn, s("user.test"), **kwargs)
self.assertEqual(cm.exception.errno, errno.ENODATA)
xattr.remove("user.test")
self.assertEqual(set(listxattr(fn)), xattr)
self.assertEqual(getxattr(fn, s("user.test2"), **kwargs), b"foo")
setxattr(fn, s("user.test"), b"a"*1024, **kwargs)
self.assertEqual(getxattr(fn, s("user.test"), **kwargs), b"a"*1024)
removexattr(fn, s("user.test"), **kwargs)
many = sorted("user.test{}".format(i) for i in range(100))
for thing in many:
setxattr(fn, thing, b"x", **kwargs)
self.assertEqual(set(listxattr(fn)), set(init_xattr) | set(many))
def _check_xattrs(self, *args, **kwargs):
self._check_xattrs_str(str, *args, **kwargs)
support.unlink(support.TESTFN)
self._check_xattrs_str(os.fsencode, *args, **kwargs)
support.unlink(support.TESTFN)
def test_simple(self):
self._check_xattrs(os.getxattr, os.setxattr, os.removexattr,
os.listxattr)
def test_lpath(self):
self._check_xattrs(os.getxattr, os.setxattr, os.removexattr,
os.listxattr, follow_symlinks=False)
def test_fds(self):
def getxattr(path, *args):
with open(path, "rb") as fp:
return os.getxattr(fp.fileno(), *args)
def setxattr(path, *args):
with open(path, "wb", 0) as fp:
os.setxattr(fp.fileno(), *args)
def removexattr(path, *args):
with open(path, "wb", 0) as fp:
os.removexattr(fp.fileno(), *args)
def listxattr(path, *args):
with open(path, "rb") as fp:
return os.listxattr(fp.fileno(), *args)
self._check_xattrs(getxattr, setxattr, removexattr, listxattr)
@unittest.skipUnless(hasattr(os, 'get_terminal_size'), "requires os.get_terminal_size")
class TermsizeTests(unittest.TestCase):
def test_does_not_crash(self):
"""Check if get_terminal_size() returns a meaningful value.
There's no easy portable way to actually check the size of the
terminal, so let's check if it returns something sensible instead.
"""
try:
size = os.get_terminal_size()
except OSError as e:
if sys.platform == "win32" or e.errno in (errno.EINVAL, errno.ENOTTY):
# Under win32 a generic OSError can be thrown if the
# handle cannot be retrieved
self.skipTest("failed to query terminal size")
raise
self.assertGreaterEqual(size.columns, 0)
self.assertGreaterEqual(size.lines, 0)
def test_stty_match(self):
"""Check if stty returns the same results
stty actually tests stdin, so get_terminal_size is invoked on
stdin explicitly. If stty succeeded, then get_terminal_size()
should work too.
"""
try:
size = subprocess.check_output(['stty', 'size']).decode().split()
except (FileNotFoundError, subprocess.CalledProcessError):
self.skipTest("stty invocation failed")
expected = (int(size[1]), int(size[0])) # reversed order
try:
actual = os.get_terminal_size(sys.__stdin__.fileno())
except OSError as e:
if sys.platform == "win32" or e.errno in (errno.EINVAL, errno.ENOTTY):
# Under win32 a generic OSError can be thrown if the
# handle cannot be retrieved
self.skipTest("failed to query terminal size")
raise
self.assertEqual(expected, actual)
class OSErrorTests(unittest.TestCase):
def setUp(self):
class Str(str):
pass
self.bytes_filenames = []
self.unicode_filenames = []
if support.TESTFN_UNENCODABLE is not None:
decoded = support.TESTFN_UNENCODABLE
else:
decoded = support.TESTFN
self.unicode_filenames.append(decoded)
self.unicode_filenames.append(Str(decoded))
if support.TESTFN_UNDECODABLE is not None:
encoded = support.TESTFN_UNDECODABLE
else:
encoded = os.fsencode(support.TESTFN)
self.bytes_filenames.append(encoded)
self.bytes_filenames.append(bytearray(encoded))
self.bytes_filenames.append(memoryview(encoded))
self.filenames = self.bytes_filenames + self.unicode_filenames
def test_oserror_filename(self):
funcs = [
(self.filenames, os.chdir,),
(self.filenames, os.chmod, 0o777),
(self.filenames, os.lstat,),
(self.filenames, os.open, os.O_RDONLY),
(self.filenames, os.rmdir,),
(self.filenames, os.stat,),
(self.filenames, os.unlink,),
]
if sys.platform == "win32":
funcs.extend((
(self.bytes_filenames, os.rename, b"dst"),
(self.bytes_filenames, os.replace, b"dst"),
(self.unicode_filenames, os.rename, "dst"),
(self.unicode_filenames, os.replace, "dst"),
(self.unicode_filenames, os.listdir, ),
))
else:
funcs.extend((
(self.filenames, os.listdir,),
(self.filenames, os.rename, "dst"),
(self.filenames, os.replace, "dst"),
))
if hasattr(os, "chown"):
funcs.append((self.filenames, os.chown, 0, 0))
if hasattr(os, "lchown"):
funcs.append((self.filenames, os.lchown, 0, 0))
if hasattr(os, "truncate"):
funcs.append((self.filenames, os.truncate, 0))
if hasattr(os, "chflags"):
funcs.append((self.filenames, os.chflags, 0))
if hasattr(os, "lchflags"):
funcs.append((self.filenames, os.lchflags, 0))
if hasattr(os, "chroot"):
funcs.append((self.filenames, os.chroot,))
if hasattr(os, "link"):
if sys.platform == "win32":
funcs.append((self.bytes_filenames, os.link, b"dst"))
funcs.append((self.unicode_filenames, os.link, "dst"))
else:
funcs.append((self.filenames, os.link, "dst"))
if hasattr(os, "listxattr"):
funcs.extend((
(self.filenames, os.listxattr,),
(self.filenames, os.getxattr, "user.test"),
(self.filenames, os.setxattr, "user.test", b'user'),
(self.filenames, os.removexattr, "user.test"),
))
if hasattr(os, "lchmod"):
funcs.append((self.filenames, os.lchmod, 0o777))
if hasattr(os, "readlink"):
if sys.platform == "win32":
funcs.append((self.unicode_filenames, os.readlink,))
else:
funcs.append((self.filenames, os.readlink,))
for filenames, func, *func_args in funcs:
for name in filenames:
try:
if isinstance(name, (str, bytes)):
func(name, *func_args)
else:
with self.assertWarnsRegex(DeprecationWarning, 'should be'):
func(name, *func_args)
except OSError as err:
self.assertIs(err.filename, name, str(func))
except UnicodeDecodeError:
pass
else:
self.fail("No exception thrown by {}".format(func))
class CPUCountTests(unittest.TestCase):
def test_cpu_count(self):
cpus = os.cpu_count()
if cpus is not None:
self.assertIsInstance(cpus, int)
self.assertGreater(cpus, 0)
else:
self.skipTest("Could not determine the number of CPUs")
class FDInheritanceTests(unittest.TestCase):
def test_get_set_inheritable(self):
fd = os.open(__file__, os.O_RDONLY)
self.addCleanup(os.close, fd)
self.assertEqual(os.get_inheritable(fd), False)
os.set_inheritable(fd, True)
self.assertEqual(os.get_inheritable(fd), True)
@unittest.skipIf(fcntl is None, "need fcntl")
def test_get_inheritable_cloexec(self):
fd = os.open(__file__, os.O_RDONLY)
self.addCleanup(os.close, fd)
self.assertEqual(os.get_inheritable(fd), False)
# clear FD_CLOEXEC flag
flags = fcntl.fcntl(fd, fcntl.F_GETFD)
flags &= ~fcntl.FD_CLOEXEC
fcntl.fcntl(fd, fcntl.F_SETFD, flags)
self.assertEqual(os.get_inheritable(fd), True)
@unittest.skipIf(fcntl is None, "need fcntl")
def test_set_inheritable_cloexec(self):
fd = os.open(__file__, os.O_RDONLY)
self.addCleanup(os.close, fd)
self.assertEqual(fcntl.fcntl(fd, fcntl.F_GETFD) & fcntl.FD_CLOEXEC,
fcntl.FD_CLOEXEC)
os.set_inheritable(fd, True)
self.assertEqual(fcntl.fcntl(fd, fcntl.F_GETFD) & fcntl.FD_CLOEXEC,
0)
def test_open(self):
fd = os.open(__file__, os.O_RDONLY)
self.addCleanup(os.close, fd)
self.assertEqual(os.get_inheritable(fd), False)
@unittest.skipUnless(hasattr(os, 'pipe'), "need os.pipe()")
def test_pipe(self):
rfd, wfd = os.pipe()
self.addCleanup(os.close, rfd)
self.addCleanup(os.close, wfd)
self.assertEqual(os.get_inheritable(rfd), False)
self.assertEqual(os.get_inheritable(wfd), False)
def test_dup(self):
fd1 = os.open(__file__, os.O_RDONLY)
self.addCleanup(os.close, fd1)
fd2 = os.dup(fd1)
self.addCleanup(os.close, fd2)
self.assertEqual(os.get_inheritable(fd2), False)
@unittest.skipUnless(hasattr(os, 'dup2'), "need os.dup2()")
def test_dup2(self):
fd = os.open(__file__, os.O_RDONLY)
self.addCleanup(os.close, fd)
# inheritable by default
fd2 = os.open(__file__, os.O_RDONLY)
try:
os.dup2(fd, fd2)
self.assertEqual(os.get_inheritable(fd2), True)
finally:
os.close(fd2)
# force non-inheritable
fd3 = os.open(__file__, os.O_RDONLY)
try:
os.dup2(fd, fd3, inheritable=False)
self.assertEqual(os.get_inheritable(fd3), False)
finally:
os.close(fd3)
@unittest.skipUnless(hasattr(os, 'openpty'), "need os.openpty()")
def test_openpty(self):
master_fd, slave_fd = os.openpty()
self.addCleanup(os.close, master_fd)
self.addCleanup(os.close, slave_fd)
self.assertEqual(os.get_inheritable(master_fd), False)
self.assertEqual(os.get_inheritable(slave_fd), False)
class PathTConverterTests(unittest.TestCase):
# tuples of (function name, allows fd arguments, additional arguments to
# function, cleanup function)
functions = [
('stat', True, (), None),
('lstat', False, (), None),
('access', False, (os.F_OK,), None),
('chflags', False, (0,), None),
('lchflags', False, (0,), None),
('open', False, (0,), getattr(os, 'close', None)),
]
def test_path_t_converter(self):
str_filename = support.TESTFN
if os.name == 'nt':
bytes_fspath = bytes_filename = None
else:
bytes_filename = support.TESTFN.encode('ascii')
bytes_fspath = _PathLike(bytes_filename)
fd = os.open(_PathLike(str_filename), os.O_WRONLY|os.O_CREAT)
self.addCleanup(support.unlink, support.TESTFN)
self.addCleanup(os.close, fd)
int_fspath = _PathLike(fd)
str_fspath = _PathLike(str_filename)
for name, allow_fd, extra_args, cleanup_fn in self.functions:
with self.subTest(name=name):
try:
fn = getattr(os, name)
except AttributeError:
continue
for path in (str_filename, bytes_filename, str_fspath,
bytes_fspath):
if path is None:
continue
with self.subTest(name=name, path=path):
result = fn(path, *extra_args)
if cleanup_fn is not None:
cleanup_fn(result)
with self.assertRaisesRegex(
TypeError, 'should be string, bytes'):
fn(int_fspath, *extra_args)
if allow_fd:
result = fn(fd, *extra_args) # should not fail
if cleanup_fn is not None:
cleanup_fn(result)
else:
with self.assertRaisesRegex(
TypeError,
'os.PathLike'):
fn(fd, *extra_args)
@unittest.skipUnless(hasattr(os, 'get_blocking'),
'needs os.get_blocking() and os.set_blocking()')
class BlockingTests(unittest.TestCase):
def test_blocking(self):
fd = os.open(__file__, os.O_RDONLY)
self.addCleanup(os.close, fd)
self.assertEqual(os.get_blocking(fd), True)
os.set_blocking(fd, False)
self.assertEqual(os.get_blocking(fd), False)
os.set_blocking(fd, True)
self.assertEqual(os.get_blocking(fd), True)
class ExportsTests(unittest.TestCase):
def test_os_all(self):
self.assertIn('open', os.__all__)
self.assertIn('walk', os.__all__)
class TestScandir(unittest.TestCase):
check_no_resource_warning = support.check_no_resource_warning
def setUp(self):
self.path = os.path.realpath(support.TESTFN)
self.bytes_path = os.fsencode(self.path)
self.addCleanup(support.rmtree, self.path)
os.mkdir(self.path)
def create_file(self, name="file.txt"):
path = self.bytes_path if isinstance(name, bytes) else self.path
filename = os.path.join(path, name)
create_file(filename, b'python')
return filename
def get_entries(self, names):
entries = dict((entry.name, entry)
for entry in os.scandir(self.path))
self.assertEqual(sorted(entries.keys()), names)
return entries
def assert_stat_equal(self, stat1, stat2, skip_fields):
if skip_fields:
for attr in dir(stat1):
if not attr.startswith("st_"):
continue
if attr in ("st_dev", "st_ino", "st_nlink"):
continue
self.assertEqual(getattr(stat1, attr),
getattr(stat2, attr),
(stat1, stat2, attr))
else:
self.assertEqual(stat1, stat2)
def check_entry(self, entry, name, is_dir, is_file, is_symlink):
self.assertIsInstance(entry, os.DirEntry)
self.assertEqual(entry.name, name)
self.assertEqual(entry.path, os.path.join(self.path, name))
self.assertEqual(entry.inode(),
os.stat(entry.path, follow_symlinks=False).st_ino)
entry_stat = os.stat(entry.path)
self.assertEqual(entry.is_dir(),
stat.S_ISDIR(entry_stat.st_mode))
self.assertEqual(entry.is_file(),
stat.S_ISREG(entry_stat.st_mode))
self.assertEqual(entry.is_symlink(),
os.path.islink(entry.path))
entry_lstat = os.stat(entry.path, follow_symlinks=False)
self.assertEqual(entry.is_dir(follow_symlinks=False),
stat.S_ISDIR(entry_lstat.st_mode))
self.assertEqual(entry.is_file(follow_symlinks=False),
stat.S_ISREG(entry_lstat.st_mode))
self.assert_stat_equal(entry.stat(),
entry_stat,
os.name == 'nt' and not is_symlink)
self.assert_stat_equal(entry.stat(follow_symlinks=False),
entry_lstat,
os.name == 'nt')
def test_attributes(self):
link = hasattr(os, 'link')
symlink = support.can_symlink()
dirname = os.path.join(self.path, "dir")
os.mkdir(dirname)
filename = self.create_file("file.txt")
if link:
os.link(filename, os.path.join(self.path, "link_file.txt"))
if symlink:
os.symlink(dirname, os.path.join(self.path, "symlink_dir"),
target_is_directory=True)
os.symlink(filename, os.path.join(self.path, "symlink_file.txt"))
names = ['dir', 'file.txt']
if link:
names.append('link_file.txt')
if symlink:
names.extend(('symlink_dir', 'symlink_file.txt'))
entries = self.get_entries(names)
entry = entries['dir']
self.check_entry(entry, 'dir', True, False, False)
entry = entries['file.txt']
self.check_entry(entry, 'file.txt', False, True, False)
if link:
entry = entries['link_file.txt']
self.check_entry(entry, 'link_file.txt', False, True, False)
if symlink:
entry = entries['symlink_dir']
self.check_entry(entry, 'symlink_dir', True, False, True)
entry = entries['symlink_file.txt']
self.check_entry(entry, 'symlink_file.txt', False, True, True)
def get_entry(self, name):
path = self.bytes_path if isinstance(name, bytes) else self.path
entries = list(os.scandir(path))
self.assertEqual(len(entries), 1)
entry = entries[0]
self.assertEqual(entry.name, name)
return entry
def create_file_entry(self, name='file.txt'):
filename = self.create_file(name=name)
return self.get_entry(os.path.basename(filename))
def test_current_directory(self):
filename = self.create_file()
old_dir = os.getcwd()
try:
os.chdir(self.path)
# call scandir() without parameter: it must list the content
# of the current directory
entries = dict((entry.name, entry) for entry in os.scandir())
self.assertEqual(sorted(entries.keys()),
[os.path.basename(filename)])
finally:
os.chdir(old_dir)
def test_repr(self):
entry = self.create_file_entry()
self.assertEqual(repr(entry), "<DirEntry 'file.txt'>")
def test_fspath_protocol(self):
entry = self.create_file_entry()
self.assertEqual(os.fspath(entry), os.path.join(self.path, 'file.txt'))
def test_fspath_protocol_bytes(self):
bytes_filename = os.fsencode('bytesfile.txt')
bytes_entry = self.create_file_entry(name=bytes_filename)
fspath = os.fspath(bytes_entry)
self.assertIsInstance(fspath, bytes)
self.assertEqual(fspath,
os.path.join(os.fsencode(self.path),bytes_filename))
def test_removed_dir(self):
path = os.path.join(self.path, 'dir')
os.mkdir(path)
entry = self.get_entry('dir')
os.rmdir(path)
# On POSIX, is_dir() result depends if scandir() filled d_type or not
if os.name == 'nt':
self.assertTrue(entry.is_dir())
self.assertFalse(entry.is_file())
self.assertFalse(entry.is_symlink())
if os.name == 'nt':
self.assertRaises(FileNotFoundError, entry.inode)
# don't fail
entry.stat()
entry.stat(follow_symlinks=False)
else:
self.assertGreater(entry.inode(), 0)
self.assertRaises(FileNotFoundError, entry.stat)
self.assertRaises(FileNotFoundError, entry.stat, follow_symlinks=False)
def test_removed_file(self):
entry = self.create_file_entry()
os.unlink(entry.path)
self.assertFalse(entry.is_dir())
# On POSIX, is_dir() result depends if scandir() filled d_type or not
if os.name == 'nt':
self.assertTrue(entry.is_file())
self.assertFalse(entry.is_symlink())
if os.name == 'nt':
self.assertRaises(FileNotFoundError, entry.inode)
# don't fail
entry.stat()
entry.stat(follow_symlinks=False)
else:
self.assertGreater(entry.inode(), 0)
self.assertRaises(FileNotFoundError, entry.stat)
self.assertRaises(FileNotFoundError, entry.stat, follow_symlinks=False)
def test_broken_symlink(self):
if not support.can_symlink():
return self.skipTest('cannot create symbolic link')
filename = self.create_file("file.txt")
os.symlink(filename,
os.path.join(self.path, "symlink.txt"))
entries = self.get_entries(['file.txt', 'symlink.txt'])
entry = entries['symlink.txt']
os.unlink(filename)
self.assertGreater(entry.inode(), 0)
self.assertFalse(entry.is_dir())
self.assertFalse(entry.is_file()) # broken symlink returns False
self.assertFalse(entry.is_dir(follow_symlinks=False))
self.assertFalse(entry.is_file(follow_symlinks=False))
self.assertTrue(entry.is_symlink())
self.assertRaises(FileNotFoundError, entry.stat)
# don't fail
entry.stat(follow_symlinks=False)
def test_bytes(self):
self.create_file("file.txt")
path_bytes = os.fsencode(self.path)
entries = list(os.scandir(path_bytes))
self.assertEqual(len(entries), 1, entries)
entry = entries[0]
self.assertEqual(entry.name, b'file.txt')
self.assertEqual(entry.path,
os.fsencode(os.path.join(self.path, 'file.txt')))
@unittest.skipUnless(os.listdir in os.supports_fd,
'fd support for listdir required for this test.')
def test_fd(self):
self.assertIn(os.scandir, os.supports_fd)
self.create_file('file.txt')
expected_names = ['file.txt']
if support.can_symlink():
os.symlink('file.txt', os.path.join(self.path, 'link'))
expected_names.append('link')
fd = os.open(self.path, os.O_RDONLY)
try:
with os.scandir(fd) as it:
entries = list(it)
names = [entry.name for entry in entries]
self.assertEqual(sorted(names), expected_names)
self.assertEqual(names, os.listdir(fd))
for entry in entries:
self.assertEqual(entry.path, entry.name)
self.assertEqual(os.fspath(entry), entry.name)
self.assertEqual(entry.is_symlink(), entry.name == 'link')
if os.stat in os.supports_dir_fd:
st = os.stat(entry.name, dir_fd=fd)
self.assertEqual(entry.stat(), st)
st = os.stat(entry.name, dir_fd=fd, follow_symlinks=False)
self.assertEqual(entry.stat(follow_symlinks=False), st)
finally:
os.close(fd)
def test_empty_path(self):
self.assertRaises(FileNotFoundError, os.scandir, '')
def test_consume_iterator_twice(self):
self.create_file("file.txt")
iterator = os.scandir(self.path)
entries = list(iterator)
self.assertEqual(len(entries), 1, entries)
# check than consuming the iterator twice doesn't raise exception
entries2 = list(iterator)
self.assertEqual(len(entries2), 0, entries2)
def test_bad_path_type(self):
for obj in [1.234, {}, []]:
self.assertRaises(TypeError, os.scandir, obj)
def test_close(self):
self.create_file("file.txt")
self.create_file("file2.txt")
iterator = os.scandir(self.path)
next(iterator)
iterator.close()
# multiple closes
iterator.close()
with self.check_no_resource_warning():
del iterator
def test_context_manager(self):
self.create_file("file.txt")
self.create_file("file2.txt")
with os.scandir(self.path) as iterator:
next(iterator)
with self.check_no_resource_warning():
del iterator
def test_context_manager_close(self):
self.create_file("file.txt")
self.create_file("file2.txt")
with os.scandir(self.path) as iterator:
next(iterator)
iterator.close()
def test_context_manager_exception(self):
self.create_file("file.txt")
self.create_file("file2.txt")
with self.assertRaises(ZeroDivisionError):
with os.scandir(self.path) as iterator:
next(iterator)
1/0
with self.check_no_resource_warning():
del iterator
def test_resource_warning(self):
self.create_file("file.txt")
self.create_file("file2.txt")
iterator = os.scandir(self.path)
next(iterator)
with self.assertWarns(ResourceWarning):
del iterator
support.gc_collect()
# exhausted iterator
iterator = os.scandir(self.path)
list(iterator)
with self.check_no_resource_warning():
del iterator
class TestPEP519(unittest.TestCase):
# Abstracted so it can be overridden to test pure Python implementation
# if a C version is provided.
fspath = staticmethod(os.fspath)
def test_return_bytes(self):
for b in b'hello', b'goodbye', b'some/path/and/file':
self.assertEqual(b, self.fspath(b))
def test_return_string(self):
for s in 'hello', 'goodbye', 'some/path/and/file':
self.assertEqual(s, self.fspath(s))
def test_fsencode_fsdecode(self):
for p in "path/like/object", b"path/like/object":
pathlike = _PathLike(p)
self.assertEqual(p, self.fspath(pathlike))
self.assertEqual(b"path/like/object", os.fsencode(pathlike))
self.assertEqual("path/like/object", os.fsdecode(pathlike))
def test_pathlike(self):
self.assertEqual('#feelthegil', self.fspath(_PathLike('#feelthegil')))
self.assertTrue(issubclass(_PathLike, os.PathLike))
self.assertTrue(isinstance(_PathLike(), os.PathLike))
def test_garbage_in_exception_out(self):
vapor = type('blah', (), {})
for o in int, type, os, vapor():
self.assertRaises(TypeError, self.fspath, o)
def test_argument_required(self):
self.assertRaises(TypeError, self.fspath)
def test_bad_pathlike(self):
# __fspath__ returns a value other than str or bytes.
self.assertRaises(TypeError, self.fspath, _PathLike(42))
# __fspath__ attribute that is not callable.
c = type('foo', (), {})
c.__fspath__ = 1
self.assertRaises(TypeError, self.fspath, c())
# __fspath__ raises an exception.
self.assertRaises(ZeroDivisionError, self.fspath,
_PathLike(ZeroDivisionError()))
# Only test if the C version is provided, otherwise TestPEP519 already tested
# the pure Python implementation.
if hasattr(os, "_fspath"):
class TestPEP519PurePython(TestPEP519):
"""Explicitly test the pure Python implementation of os.fspath()."""
fspath = staticmethod(os._fspath)
if __name__ == "__main__":
unittest.main()
| 37.23863 | 101 | 0.595005 |
4a22013f7c1f8e84fa1006b847fd38a6e09d9136 | 2,389 | py | Python | app/soc/views/helper/templates.py | jamslevy/gsoc | e995e1a8d34e0291ab988ba501ae4efc61f9516d | [
"Apache-2.0"
] | 1 | 2016-05-09T14:43:53.000Z | 2016-05-09T14:43:53.000Z | app/soc/views/helper/templates.py | jamslevy/gsoc | e995e1a8d34e0291ab988ba501ae4efc61f9516d | [
"Apache-2.0"
] | null | null | null | app/soc/views/helper/templates.py | jamslevy/gsoc | e995e1a8d34e0291ab988ba501ae4efc61f9516d | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/python2.5
#
# Copyright 2008 the Melange authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Helpers for manipulating templates.
"""
__authors__ = [
'"Todd Larsen" <[email protected]>',
'"Pawel Solyga" <[email protected]>'
]
def makeSiblingTemplatesList(templates, new_template_file,
default_template=None):
"""Converts template paths into a list of "sibling" templates.
Args:
templates: search list of templates (or just a single template not in a
list) from which template paths will be extracted (discarding the final
template file name of each template)
new_template_file: new "sibling" template file to append to each extracted
template path
default_template: a default template (or a list of them) to append to the
end of the generated "sibling" template paths; default is None
Returns:
A list of potential "sibling" templates named by new_template_file located
in the paths of the templates in the supplied list. For example, from:
['foo/bar/the_old_template.html', 'foo/the_old_template.html']
to:
['foo/bar/some_new_template.html', 'foo/some_new_template.html']
"""
if not isinstance(templates, (list, tuple)):
templates = [templates]
if default_template is None:
default_template = []
if not isinstance(default_template, (list, tuple)):
default_template = [default_template]
sibling_templates = [
'%s/%s' % (t.rsplit('/', 1)[0], new_template_file) for t in templates]
return sibling_templates + default_template
def unescape(html):
"""Returns the given HTML with ampersands, quotes and carets decoded.
"""
if not isinstance(html, basestring):
html = str(html)
html.replace(''',"'").replace('<', '<')
html.replace('>', '>').replace('"', '"').replace('&', '&')
return html
| 34.128571 | 78 | 0.701549 |
4a2201d4e6342f3633b1f53b7c76ba1712de1244 | 4,197 | py | Python | mentalist/view/substitution.py | qkum/mentalist | fb6944f5f60d0964a085606abcdf6ad0dad8905f | [
"MIT"
] | 1,293 | 2017-11-07T15:36:24.000Z | 2022-03-30T06:28:34.000Z | mentalist/view/substitution.py | qkum/mentalist | fb6944f5f60d0964a085606abcdf6ad0dad8905f | [
"MIT"
] | 22 | 2018-01-17T12:49:55.000Z | 2021-12-24T03:45:08.000Z | mentalist/view/substitution.py | qkum/mentalist | fb6944f5f60d0964a085606abcdf6ad0dad8905f | [
"MIT"
] | 215 | 2017-11-07T19:55:20.000Z | 2022-03-30T06:28:36.000Z | import tkinter as Tk
from functools import partial
from .base import BaseNode
from .main import center_window
from .const import SUBSTITUTION_CHECKS, SPECIAL_TYPES
from .. import model
class SubstitutionNode(BaseNode):
'''Substitute one character for another
'''
def __init__(self, controller, master=None, **kwargs):
BaseNode.__init__(self, controller, master=master, title='Substitution', **kwargs)
self.case_popup = None
self.sp_case = None
def add_upper_button(self):
mb = Tk.Menubutton(self.upper_frame, text=' + ', relief='raised', font=('Helvetica', '14'))
mb.menu = Tk.Menu(mb, tearoff=0)
mb['menu'] = mb.menu
label = 'No Substitution'
mb.menu.add_command(label=label, command=partial(self.controller.add_attr, label=label, node_view=self, attr_class=model.NothingMutatorAttr))
mb.menu.add_command(label='Replace All Instances...', command=partial(self.open_sub_popup, 'All'))
mb.menu.add_command(label='Replace First Instance...', command=partial(self.open_sub_popup, 'First'))
mb.menu.add_command(label='Replace Last Instance...', command=partial(self.open_sub_popup, 'Last'))
mb.pack(side='left', fill='x', padx=10, pady=5)
def open_sub_popup(self, type_):
'''Opens popup for defining the characters to substitute
type_: 'All', 'First', or 'Last'
'''
self.sub_popup = Tk.Toplevel()
self.sub_popup.withdraw()
self.sub_popup.title('Replace {}'.format(type_))
self.sub_popup.resizable(width=False, height=False)
frame = Tk.Frame(self.sub_popup)
lb = Tk.Label(frame, text='Select Substitution Checks'.format(self.title))
lb.pack(fill='both', side='top')
# Create a checkbox for each possible character substitution
box = Tk.Frame(frame)
self.chk_subs = []
max_column_checks = 15
for v in range(len(SUBSTITUTION_CHECKS)):
val = SUBSTITUTION_CHECKS[v]
var = Tk.IntVar()
tmp = Tk.Checkbutton(box, text=val, relief=Tk.FLAT, variable=var,
font=('Courier', '14'))
self.chk_subs.append(var)
# Split the checks into columns so the window isn't too tall
tmp.grid(row=v % max_column_checks, column=v // max_column_checks,
sticky='W', padx=10)
box.pack(fill='both', side='top', padx=30, pady=20)
box_type = Tk.Frame(frame)
self.sub_type = Tk.IntVar()
for i, val in enumerate(SPECIAL_TYPES):
tmp = Tk.Radiobutton(box_type, text=val, relief=Tk.FLAT, variable=self.sub_type, value=i)
tmp.pack(fill='both', side='left')
box_type.pack(fill='both', side='top', padx=30, pady=20)
btn_box = Tk.Frame(frame)
btn_cancel = Tk.Button(btn_box, text='Cancel', command=self.cancel_sub_popup)
btn_cancel.pack(side='right', padx=10, pady=20)
btn_ok = Tk.Button(btn_box, text='Ok', command=partial(self.on_ok_sub_popup, type_))
btn_ok.pack(side='left', padx=10, pady=20)
btn_box.pack()
frame.pack(fill='both', padx=40, pady=10)
center_window(self.sub_popup, self.main.master)
self.sub_popup.focus_set()
def cancel_sub_popup(self, *args):
if self.sub_popup:
self.sub_popup.destroy()
self.sub_popup = None
def on_ok_sub_popup(self, type_, *args):
'''OK in substitution popup was selected, create the attribute
type_: 'All', 'First', or 'Last'
'''
checked_vals = [SUBSTITUTION_CHECKS[i] for i in range(len(SUBSTITUTION_CHECKS)) if self.chk_subs[i].get() == 1]
if len(checked_vals) > 0:
special_type = SPECIAL_TYPES[self.sub_type.get()]
label = 'Replace {}: {} ({})'.format(type_, ', '.join(checked_vals),
special_type)
self.controller.add_attr(label=label, node_view=self, attr_class=model.SubstitutionAttr, type_=type_, checked_vals=checked_vals, all_together=special_type=='All together')
self.cancel_sub_popup()
| 46.120879 | 183 | 0.627353 |
4a22023c68e05bd941096efad3c7f93687b5e08c | 1,609 | py | Python | milk2/milk2.py | jasonhuh/UASCO-Solutions | dfcd574d12b574f396ac041dc1b33c20e9282e5c | [
"MIT"
] | null | null | null | milk2/milk2.py | jasonhuh/UASCO-Solutions | dfcd574d12b574f396ac041dc1b33c20e9282e5c | [
"MIT"
] | null | null | null | milk2/milk2.py | jasonhuh/UASCO-Solutions | dfcd574d12b574f396ac041dc1b33c20e9282e5c | [
"MIT"
] | null | null | null | """
ID: jasonhu5
LANG: PYTHON3
TASK: milk2
"""
def solve(ar):
# Combine overlapping events
def merge_events(ar):
ar = sorted(ar, key=lambda x: x[0]) # Sort by start time
stack = [ar[0]]
for _, event in enumerate(ar[1:]):
top = stack[-1]
if top[1] >= event[0]: # Events overlap
if top[1] < event[1]:
stack.pop()
stack.append((top[0], event[1]))
else:
stack.append(event)
return stack
finals = merge_events(ar)
max_range, max_space = finals[0][1] - finals[0][0], 0
for i, event in enumerate(finals[1:], 1):
max_range = max(max_range, event[1] - event[0])
max_space = max(max_space, event[0] - finals[i-1][1])
return (max_range, max_space)
def test_simple():
assert solve([(100, 200)]) == (100, 0)
assert solve([(300, 1000), (700, 1200), (1500, 2100)]) == (900, 300)
assert solve([(2, 3), (4, 5), (6, 7), (8, 9), (10, 11), (12, 13), \
(14, 15), (16, 17), (18, 19), (1, 20)]) == (19, 0)
assert solve([(100, 200), (200, 400), (400, 800), (800, 1600), (50, 100), \
(1700, 3200)]) == (1550, 100)
if __name__ == '__main__':
test_simple()
fin = open('milk2.in', 'r')
fout = open('milk2.out', 'w')
N = int(fin.readline().strip())
ar = []
for _ in range(N):
start, finish = map(int, fin.readline().strip().split())
ar.append((start, finish))
ans = solve(ar)
fout.write('{} {}\n'.format(ans[0], ans[1]))
fout.close()
| 32.836735 | 79 | 0.497825 |
4a220372594bc419c6e7de1c95df55a49cc8c5a0 | 12,520 | py | Python | Train_faceNet_model/Train+FaceNet+model/tf_slim/nets/inception_v3_test.py | yxing0225/Masked_Face_Detection-Recognition | 126c5bebcefb5b5fbbb105007f32574cb9359064 | [
"Apache-2.0"
] | 307 | 2019-06-28T00:25:39.000Z | 2022-03-27T15:56:15.000Z | Train_faceNet_model/Train+FaceNet+model/tf_slim/nets/inception_v3_test.py | yxing0225/Masked_Face_Detection-Recognition | 126c5bebcefb5b5fbbb105007f32574cb9359064 | [
"Apache-2.0"
] | 17 | 2019-07-26T08:21:40.000Z | 2022-02-25T02:53:23.000Z | Train_faceNet_model/Train+FaceNet+model/tf_slim/nets/inception_v3_test.py | yxing0225/Masked_Face_Detection-Recognition | 126c5bebcefb5b5fbbb105007f32574cb9359064 | [
"Apache-2.0"
] | 95 | 2019-06-30T05:44:07.000Z | 2022-03-28T04:38:52.000Z | # coding=utf-8
# coding=utf-8
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for nets.inception_v3."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow.compat.v1 as tf
from tf_slim import model_analyzer
from tf_slim.nets import inception_v3
from tf_slim.ops import variables as variables_lib
from tf_slim.ops.arg_scope import arg_scope
# pylint:disable=g-direct-tensorflow-import
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
# pylint:enable=g-direct-tensorflow-import
def setUpModule():
tf.disable_eager_execution()
class InceptionV3Test(test.TestCase):
def testBuildClassificationNetwork(self):
batch_size = 5
height, width = 299, 299
num_classes = 1000
inputs = random_ops.random_uniform((batch_size, height, width, 3))
logits, end_points = inception_v3.inception_v3(inputs, num_classes)
self.assertTrue(logits.op.name.startswith('InceptionV3/Logits'))
self.assertListEqual(logits.get_shape().as_list(),
[batch_size, num_classes])
self.assertTrue('Predictions' in end_points)
self.assertListEqual(end_points['Predictions'].get_shape().as_list(),
[batch_size, num_classes])
def testBuildBaseNetwork(self):
batch_size = 5
height, width = 299, 299
inputs = random_ops.random_uniform((batch_size, height, width, 3))
final_endpoint, end_points = inception_v3.inception_v3_base(inputs)
self.assertTrue(final_endpoint.op.name.startswith('InceptionV3/Mixed_7c'))
self.assertListEqual(final_endpoint.get_shape().as_list(),
[batch_size, 8, 8, 2048])
expected_endpoints = [
'Conv2d_1a_3x3', 'Conv2d_2a_3x3', 'Conv2d_2b_3x3', 'MaxPool_3a_3x3',
'Conv2d_3b_1x1', 'Conv2d_4a_3x3', 'MaxPool_5a_3x3', 'Mixed_5b',
'Mixed_5c', 'Mixed_5d', 'Mixed_6a', 'Mixed_6b', 'Mixed_6c', 'Mixed_6d',
'Mixed_6e', 'Mixed_7a', 'Mixed_7b', 'Mixed_7c'
]
self.assertItemsEqual(end_points.keys(), expected_endpoints)
def testBuildOnlyUptoFinalEndpoint(self):
batch_size = 5
height, width = 299, 299
endpoints = [
'Conv2d_1a_3x3', 'Conv2d_2a_3x3', 'Conv2d_2b_3x3', 'MaxPool_3a_3x3',
'Conv2d_3b_1x1', 'Conv2d_4a_3x3', 'MaxPool_5a_3x3', 'Mixed_5b',
'Mixed_5c', 'Mixed_5d', 'Mixed_6a', 'Mixed_6b', 'Mixed_6c', 'Mixed_6d',
'Mixed_6e', 'Mixed_7a', 'Mixed_7b', 'Mixed_7c'
]
for index, endpoint in enumerate(endpoints):
with ops.Graph().as_default():
inputs = random_ops.random_uniform((batch_size, height, width, 3))
out_tensor, end_points = inception_v3.inception_v3_base(
inputs, final_endpoint=endpoint)
self.assertTrue(
out_tensor.op.name.startswith('InceptionV3/' + endpoint))
self.assertItemsEqual(endpoints[:index + 1], end_points)
def testBuildAndCheckAllEndPointsUptoMixed7c(self):
batch_size = 5
height, width = 299, 299
inputs = random_ops.random_uniform((batch_size, height, width, 3))
_, end_points = inception_v3.inception_v3_base(
inputs, final_endpoint='Mixed_7c')
endpoints_shapes = {
'Conv2d_1a_3x3': [batch_size, 149, 149, 32],
'Conv2d_2a_3x3': [batch_size, 147, 147, 32],
'Conv2d_2b_3x3': [batch_size, 147, 147, 64],
'MaxPool_3a_3x3': [batch_size, 73, 73, 64],
'Conv2d_3b_1x1': [batch_size, 73, 73, 80],
'Conv2d_4a_3x3': [batch_size, 71, 71, 192],
'MaxPool_5a_3x3': [batch_size, 35, 35, 192],
'Mixed_5b': [batch_size, 35, 35, 256],
'Mixed_5c': [batch_size, 35, 35, 288],
'Mixed_5d': [batch_size, 35, 35, 288],
'Mixed_6a': [batch_size, 17, 17, 768],
'Mixed_6b': [batch_size, 17, 17, 768],
'Mixed_6c': [batch_size, 17, 17, 768],
'Mixed_6d': [batch_size, 17, 17, 768],
'Mixed_6e': [batch_size, 17, 17, 768],
'Mixed_7a': [batch_size, 8, 8, 1280],
'Mixed_7b': [batch_size, 8, 8, 2048],
'Mixed_7c': [batch_size, 8, 8, 2048]
}
self.assertItemsEqual(endpoints_shapes.keys(), end_points.keys())
for endpoint_name in endpoints_shapes:
expected_shape = endpoints_shapes[endpoint_name]
self.assertTrue(endpoint_name in end_points)
self.assertListEqual(end_points[endpoint_name].get_shape().as_list(),
expected_shape)
def testModelHasExpectedNumberOfParameters(self):
batch_size = 5
height, width = 299, 299
inputs = random_ops.random_uniform((batch_size, height, width, 3))
with arg_scope(inception_v3.inception_v3_arg_scope()):
inception_v3.inception_v3_base(inputs)
total_params, _ = model_analyzer.analyze_vars(
variables_lib.get_model_variables())
self.assertAlmostEqual(21802784, total_params)
def testBuildEndPoints(self):
batch_size = 5
height, width = 299, 299
num_classes = 1000
inputs = random_ops.random_uniform((batch_size, height, width, 3))
_, end_points = inception_v3.inception_v3(inputs, num_classes)
self.assertTrue('Logits' in end_points)
logits = end_points['Logits']
self.assertListEqual(logits.get_shape().as_list(),
[batch_size, num_classes])
self.assertTrue('AuxLogits' in end_points)
aux_logits = end_points['AuxLogits']
self.assertListEqual(aux_logits.get_shape().as_list(),
[batch_size, num_classes])
self.assertTrue('Mixed_7c' in end_points)
pre_pool = end_points['Mixed_7c']
self.assertListEqual(pre_pool.get_shape().as_list(),
[batch_size, 8, 8, 2048])
self.assertTrue('PreLogits' in end_points)
pre_logits = end_points['PreLogits']
self.assertListEqual(pre_logits.get_shape().as_list(),
[batch_size, 1, 1, 2048])
def testBuildEndPointsWithDepthMultiplierLessThanOne(self):
batch_size = 5
height, width = 299, 299
num_classes = 1000
inputs = random_ops.random_uniform((batch_size, height, width, 3))
_, end_points = inception_v3.inception_v3(inputs, num_classes)
endpoint_keys = [
key for key in end_points.keys()
if key.startswith('Mixed') or key.startswith('Conv')
]
_, end_points_with_multiplier = inception_v3.inception_v3(
inputs, num_classes, scope='depth_multiplied_net', depth_multiplier=0.5)
for key in endpoint_keys:
original_depth = end_points[key].get_shape().as_list()[3]
new_depth = end_points_with_multiplier[key].get_shape().as_list()[3]
self.assertEqual(0.5 * original_depth, new_depth)
def testBuildEndPointsWithDepthMultiplierGreaterThanOne(self):
batch_size = 5
height, width = 299, 299
num_classes = 1000
inputs = random_ops.random_uniform((batch_size, height, width, 3))
_, end_points = inception_v3.inception_v3(inputs, num_classes)
endpoint_keys = [
key for key in end_points.keys()
if key.startswith('Mixed') or key.startswith('Conv')
]
_, end_points_with_multiplier = inception_v3.inception_v3(
inputs, num_classes, scope='depth_multiplied_net', depth_multiplier=2.0)
for key in endpoint_keys:
original_depth = end_points[key].get_shape().as_list()[3]
new_depth = end_points_with_multiplier[key].get_shape().as_list()[3]
self.assertEqual(2.0 * original_depth, new_depth)
def testRaiseValueErrorWithInvalidDepthMultiplier(self):
batch_size = 5
height, width = 299, 299
num_classes = 1000
inputs = random_ops.random_uniform((batch_size, height, width, 3))
with self.assertRaises(ValueError):
_ = inception_v3.inception_v3(inputs, num_classes, depth_multiplier=-0.1)
with self.assertRaises(ValueError):
_ = inception_v3.inception_v3(inputs, num_classes, depth_multiplier=0.0)
def testHalfSizeImages(self):
batch_size = 5
height, width = 150, 150
num_classes = 1000
inputs = random_ops.random_uniform((batch_size, height, width, 3))
logits, end_points = inception_v3.inception_v3(inputs, num_classes)
self.assertTrue(logits.op.name.startswith('InceptionV3/Logits'))
self.assertListEqual(logits.get_shape().as_list(),
[batch_size, num_classes])
pre_pool = end_points['Mixed_7c']
self.assertListEqual(pre_pool.get_shape().as_list(),
[batch_size, 3, 3, 2048])
def testUnknownImageShape(self):
ops.reset_default_graph()
batch_size = 2
height, width = 299, 299
num_classes = 1000
input_np = np.random.uniform(0, 1, (batch_size, height, width, 3))
with self.cached_session() as sess:
inputs = array_ops.placeholder(
dtypes.float32, shape=(batch_size, None, None, 3))
logits, end_points = inception_v3.inception_v3(inputs, num_classes)
self.assertListEqual(logits.get_shape().as_list(),
[batch_size, num_classes])
pre_pool = end_points['Mixed_7c']
feed_dict = {inputs: input_np}
variables.global_variables_initializer().run()
pre_pool_out = sess.run(pre_pool, feed_dict=feed_dict)
self.assertListEqual(list(pre_pool_out.shape), [batch_size, 8, 8, 2048])
def testUnknownBatchSize(self):
batch_size = 1
height, width = 299, 299
num_classes = 1000
inputs = array_ops.placeholder(dtypes.float32, (None, height, width, 3))
logits, _ = inception_v3.inception_v3(inputs, num_classes)
self.assertTrue(logits.op.name.startswith('InceptionV3/Logits'))
self.assertListEqual(logits.get_shape().as_list(), [None, num_classes])
images = random_ops.random_uniform((batch_size, height, width, 3))
with self.cached_session() as sess:
sess.run(variables.global_variables_initializer())
output = sess.run(logits, {inputs: images.eval()})
self.assertEqual(output.shape, (batch_size, num_classes))
def testEvaluation(self):
batch_size = 2
height, width = 299, 299
num_classes = 1000
eval_inputs = random_ops.random_uniform((batch_size, height, width, 3))
logits, _ = inception_v3.inception_v3(
eval_inputs, num_classes, is_training=False)
predictions = math_ops.argmax(logits, 1)
with self.cached_session() as sess:
sess.run(variables.global_variables_initializer())
output = sess.run(predictions)
self.assertEqual(output.shape, (batch_size,))
def testTrainEvalWithReuse(self):
train_batch_size = 5
eval_batch_size = 2
height, width = 150, 150
num_classes = 1000
train_inputs = random_ops.random_uniform(
(train_batch_size, height, width, 3))
inception_v3.inception_v3(train_inputs, num_classes)
eval_inputs = random_ops.random_uniform((eval_batch_size, height, width, 3))
logits, _ = inception_v3.inception_v3(
eval_inputs, num_classes, is_training=False, reuse=True)
predictions = math_ops.argmax(logits, 1)
with self.cached_session() as sess:
sess.run(variables.global_variables_initializer())
output = sess.run(predictions)
self.assertEqual(output.shape, (eval_batch_size,))
def testLogitsNotSqueezed(self):
num_classes = 25
images = random_ops.random_uniform([1, 299, 299, 3])
logits, _ = inception_v3.inception_v3(
images, num_classes=num_classes, spatial_squeeze=False)
with self.cached_session() as sess:
variables.global_variables_initializer().run()
logits_out = sess.run(logits)
self.assertListEqual(list(logits_out.shape), [1, 1, 1, num_classes])
if __name__ == '__main__':
test.main()
| 39.746032 | 80 | 0.692093 |
4a220501b117daae1426ce85f57c2b902a8b2b36 | 7,775 | py | Python | report_stats_on_json_kg.py | RTXteam/RTX-KG2 | 2e4affbd423550e5b2456f97da07184a4833d66a | [
"MIT"
] | 3 | 2021-11-09T19:41:40.000Z | 2021-12-26T21:51:38.000Z | report_stats_on_json_kg.py | RTXteam/RTX-KG2 | 2e4affbd423550e5b2456f97da07184a4833d66a | [
"MIT"
] | 190 | 2021-05-22T01:25:49.000Z | 2022-03-20T05:05:37.000Z | report_stats_on_json_kg.py | RTXteam/RTX-KG2 | 2e4affbd423550e5b2456f97da07184a4833d66a | [
"MIT"
] | 1 | 2021-05-26T22:51:26.000Z | 2021-05-26T22:51:26.000Z | #!/usr/bin/env python3
'''Prints a JSON overview report of a JSON knowledge graph in Biolink format, to STDOUT.
Usage: report_stats_on_json_kg.py [--useSimplifiedPredicates] <inputKGFile.json> <outputKGFile.json>
The input file can be optionally gzipped (specify with the .gz extension).
'''
__author__ = 'Stephen Ramsey'
__copyright__ = 'Oregon State University'
__credits__ = ['Stephen Ramsey', 'Erica Wood', 'Veronica Flores']
__license__ = 'MIT'
__version__ = '0.1.0'
__maintainer__ = ''
__email__ = ''
__status__ = 'Prototype'
import argparse
import collections
import datetime
import gzip
import json
import kg2_util
import shutil
import sys
import tempfile
def make_arg_parser():
arg_parser = argparse.ArgumentParser(description='build-kg2: builds the KG2 knowledge graph for the RTX system')
arg_parser.add_argument('inputFile', type=str)
arg_parser.add_argument('outputFile', type=str)
arg_parser.add_argument('--useSimplifiedPredicates', dest='use_simplified_predicates', action='store_true', default=False)
return arg_parser
def get_prefix_from_curie_id(curie_id: str):
assert ':' in curie_id
return curie_id.split(':')[0]
def get_nodes_with_none_category(nodes: list):
return [node for node in nodes if
node['category_label'] is None or node['category_label'] == 'unknown category']
def count_nodes_by_curie_prefix(nodes: list):
return collections.Counter([get_prefix_from_curie_id(node['id']) for node in nodes])
def count_nodes_by_curie_prefix_given_no_category(nodes: list):
return count_nodes_by_curie_prefix(get_nodes_with_none_category(nodes))
def count_nodes_by_category(nodes: list):
return collections.Counter([node['category_label'] for node in nodes])
def count_nodes_by_source(nodes: list):
return collections.Counter([node['knowledge_source'] for node in nodes])
def count_number_of_nodes_by_source_and_category(nodes: list):
fulldict = {}
sourcedict = collections.Counter([node['knowledge_source'] for node in nodes])
sourcecatdict = {}
categorylist = []
for source in sourcedict:
categorylist = []
for node in nodes:
if node['knowledge_source'] == source:
categorylist.append(node['category_label'])
sourcecatdict.update({source: categorylist})
for defintion in sourcecatdict:
sourcecount = collections.Counter(sourcecatdict.get(defintion))
fulldict.update({defintion: sourcecount})
return fulldict
def count_edges_by_source(edges: list):
ret_data = None
if type(edges[0]['knowledge_source']) == str:
ret_data = collections.Counter([edge['knowledge_source'] for edge in edges])
else:
assert type(edges[0]['knowledge_source'] == list)
provby_list = []
for edge in edges:
provby_list += edge['knowledge_source']
ret_data = collections.Counter(provby_list)
return ret_data
def count_edges_by_predicate_curie(edges: list):
curie_field = 'original_predicate' if not args.use_simplified_predicates else 'predicate'
return collections.Counter([edge[curie_field] for edge in edges])
def count_edges_by_predicate_type(edges: list):
label_field = 'relation_label' if not args.use_simplified_predicates else 'predicate_label'
return collections.Counter([edge[label_field] for edge in edges])
def count_edges_by_predicate_curie_prefix(edges: list):
curie_field = 'original_predicate' if not args.use_simplified_predicates else 'predicate'
return collections.Counter([get_prefix_from_curie_id(edge[curie_field]) for edge in edges])
def count_predicates_by_predicate_curie_prefix(edges: list):
curie_field = 'original_predicate' if not args.use_simplified_predicates else 'predicate'
unique_relation_curies = set([edge[curie_field] for edge in edges])
return collections.Counter([get_prefix_from_curie_id(curie) for curie in unique_relation_curies])
def count_types_of_pairs_of_curies_for_xrefs(edges: list):
prefix_pairs_list = list()
for edge in edges:
if edge['relation_label'] == 'xref' or edge['relation_label'] == 'close_match':
subject_curie = edge['subject']
subject_prefix = get_prefix_from_curie_id(subject_curie)
object_curie = edge['object']
object_prefix = get_prefix_from_curie_id(object_curie)
key = subject_prefix + '---' + object_prefix
prefix_pairs_list.append(key)
return collections.Counter(prefix_pairs_list)
def count_types_of_pairs_of_curies_for_equivs(edges: list):
prefix_pairs_list = list()
for edge in edges:
if edge['relation_label'] == kg2_util.EDGE_LABEL_OWL_SAME_AS:
subject_curie = edge['subject']
subject_prefix = get_prefix_from_curie_id(subject_curie)
object_curie = edge['object']
object_prefix = get_prefix_from_curie_id(object_curie)
key = subject_prefix + '---' + object_prefix
prefix_pairs_list.append(key)
return collections.Counter(prefix_pairs_list)
if __name__ == '__main__':
args = make_arg_parser().parse_args()
input_file_name = args.inputFile
if not input_file_name.endswith('.gz'):
input_file = open(input_file_name, 'r')
graph = json.load(input_file)
else:
input_file = gzip.GzipFile(input_file_name, 'r')
graph = json.loads(input_file.read().decode('utf-8'))
if 'nodes' not in graph:
print("WARNING: 'nodes' property is missing from the input JSON.", file=sys.stderr)
nodes = graph.get('nodes', [])
nodes = graph.get('nodes', [])
for n in nodes[::-1]: # search for build info node starting at end
if n["name"] == "KG2:Build": # should be the first node accessed
nodes.remove(n) # remove it so stats aren't reported
break
if 'edges' not in graph:
print("WARNING: 'edges' property is missing from the input JSON.", file=sys.stderr)
edges = graph.get('edges', [])
stats = {'_number_of_nodes': len(nodes), # underscore is to make sure it sorts to the top of the report
'_number_of_edges': len(edges), # underscore is to make sure it sorts to the top of the report
'_report_datetime': datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S"),
'number_of_nodes_by_curie_prefix': dict(count_nodes_by_curie_prefix(nodes)),
'number_of_nodes_without_category__by_curie_prefix': dict(count_nodes_by_curie_prefix_given_no_category(nodes)),
'number_of_nodes_by_category_label': dict(count_nodes_by_category(nodes)),
'number_of_nodes_by_source': dict(count_nodes_by_source(nodes)),
'number_of_edges_by_predicate_curie': dict(count_edges_by_predicate_curie(edges)),
'number_of_edges_by_predicate_type': dict(count_edges_by_predicate_type(edges)),
'number_of_edges_by_predicate_curie_prefixes': dict(count_edges_by_predicate_curie_prefix(edges)),
'number_of_predicates_by_predicate_curie_prefixes': dict(count_predicates_by_predicate_curie_prefix(edges)),
'number_of_edges_by_source': dict(count_edges_by_source(edges)),
'types_of_pairs_of_curies_for_xrefs': dict(count_types_of_pairs_of_curies_for_xrefs(edges)),
'types_of_pairs_of_curies_for_equivs': dict(count_types_of_pairs_of_curies_for_equivs(edges)),
'number_of_nodes_by_source_and_category': dict(count_number_of_nodes_by_source_and_category(nodes))}
temp_output_file = tempfile.mkstemp(prefix='kg2-')[1]
with open(temp_output_file, 'w') as outfile:
json.dump(stats, outfile, indent=4, sort_keys=True)
shutil.move(temp_output_file, args.outputFile)
| 42.486339 | 126 | 0.720129 |
4a22055c05768cad7b118754ca41fc27214ebb80 | 3,114 | py | Python | gitea_api/models/dismiss_pull_review_options.py | r7l/python-gitea-api | 31d3dba27ea7e551e2048a1230c4ab4d73365006 | [
"MIT"
] | 1 | 2022-02-09T23:43:26.000Z | 2022-02-09T23:43:26.000Z | gitea_api/models/dismiss_pull_review_options.py | r7l/python-gitea-api | 31d3dba27ea7e551e2048a1230c4ab4d73365006 | [
"MIT"
] | null | null | null | gitea_api/models/dismiss_pull_review_options.py | r7l/python-gitea-api | 31d3dba27ea7e551e2048a1230c4ab4d73365006 | [
"MIT"
] | null | null | null | # coding: utf-8
"""
Gitea API.
This documentation describes the Gitea API. # noqa: E501
OpenAPI spec version: 1.16.7
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class DismissPullReviewOptions(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'message': 'str'
}
attribute_map = {
'message': 'message'
}
def __init__(self, message=None): # noqa: E501
"""DismissPullReviewOptions - a model defined in Swagger""" # noqa: E501
self._message = None
self.discriminator = None
if message is not None:
self.message = message
@property
def message(self):
"""Gets the message of this DismissPullReviewOptions. # noqa: E501
:return: The message of this DismissPullReviewOptions. # noqa: E501
:rtype: str
"""
return self._message
@message.setter
def message(self, message):
"""Sets the message of this DismissPullReviewOptions.
:param message: The message of this DismissPullReviewOptions. # noqa: E501
:type: str
"""
self._message = message
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(DismissPullReviewOptions, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, DismissPullReviewOptions):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| 28.054054 | 83 | 0.563263 |
4a22065024f098e88d909e90f93566a9572e31fa | 3,198 | py | Python | google/cloud/asset_v1/types/__init__.py | Abdur-rahmaanJ/python-asset | c79d51d31dd04a6cf2b903d91259c093bf200010 | [
"Apache-2.0"
] | null | null | null | google/cloud/asset_v1/types/__init__.py | Abdur-rahmaanJ/python-asset | c79d51d31dd04a6cf2b903d91259c093bf200010 | [
"Apache-2.0"
] | null | null | null | google/cloud/asset_v1/types/__init__.py | Abdur-rahmaanJ/python-asset | c79d51d31dd04a6cf2b903d91259c093bf200010 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from .asset_service import (
AnalyzeIamPolicyLongrunningMetadata,
AnalyzeIamPolicyLongrunningRequest,
AnalyzeIamPolicyLongrunningResponse,
AnalyzeIamPolicyRequest,
AnalyzeIamPolicyResponse,
AnalyzeMoveRequest,
AnalyzeMoveResponse,
BatchGetAssetsHistoryRequest,
BatchGetAssetsHistoryResponse,
BigQueryDestination,
CreateFeedRequest,
DeleteFeedRequest,
ExportAssetsRequest,
ExportAssetsResponse,
Feed,
FeedOutputConfig,
GcsDestination,
GcsOutputResult,
GetFeedRequest,
IamPolicyAnalysisOutputConfig,
IamPolicyAnalysisQuery,
ListAssetsRequest,
ListAssetsResponse,
ListFeedsRequest,
ListFeedsResponse,
MoveAnalysis,
MoveAnalysisResult,
MoveImpact,
OutputConfig,
OutputResult,
PartitionSpec,
PubsubDestination,
SearchAllIamPoliciesRequest,
SearchAllIamPoliciesResponse,
SearchAllResourcesRequest,
SearchAllResourcesResponse,
UpdateFeedRequest,
ContentType,
)
from .assets import (
Asset,
AttachedResource,
ConditionEvaluation,
IamPolicyAnalysisResult,
IamPolicyAnalysisState,
IamPolicySearchResult,
Resource,
ResourceSearchResult,
TemporalAsset,
TimeWindow,
VersionedResource,
)
__all__ = (
"AnalyzeIamPolicyLongrunningMetadata",
"AnalyzeIamPolicyLongrunningRequest",
"AnalyzeIamPolicyLongrunningResponse",
"AnalyzeIamPolicyRequest",
"AnalyzeIamPolicyResponse",
"AnalyzeMoveRequest",
"AnalyzeMoveResponse",
"BatchGetAssetsHistoryRequest",
"BatchGetAssetsHistoryResponse",
"BigQueryDestination",
"CreateFeedRequest",
"DeleteFeedRequest",
"ExportAssetsRequest",
"ExportAssetsResponse",
"Feed",
"FeedOutputConfig",
"GcsDestination",
"GcsOutputResult",
"GetFeedRequest",
"IamPolicyAnalysisOutputConfig",
"IamPolicyAnalysisQuery",
"ListAssetsRequest",
"ListAssetsResponse",
"ListFeedsRequest",
"ListFeedsResponse",
"MoveAnalysis",
"MoveAnalysisResult",
"MoveImpact",
"OutputConfig",
"OutputResult",
"PartitionSpec",
"PubsubDestination",
"SearchAllIamPoliciesRequest",
"SearchAllIamPoliciesResponse",
"SearchAllResourcesRequest",
"SearchAllResourcesResponse",
"UpdateFeedRequest",
"ContentType",
"Asset",
"AttachedResource",
"ConditionEvaluation",
"IamPolicyAnalysisResult",
"IamPolicyAnalysisState",
"IamPolicySearchResult",
"Resource",
"ResourceSearchResult",
"TemporalAsset",
"TimeWindow",
"VersionedResource",
)
| 26.429752 | 74 | 0.730457 |
4a2206e795477bb8b2367786fbb990a095120fca | 2,563 | py | Python | aleph/logic/resolver.py | nabla-c0d3/aleph | d0e4e04e23cb7ee3971298e33ccb1c5171ae0779 | [
"MIT"
] | 2 | 2021-01-09T17:27:23.000Z | 2021-01-09T17:27:25.000Z | aleph/logic/resolver.py | nabla-c0d3/aleph | d0e4e04e23cb7ee3971298e33ccb1c5171ae0779 | [
"MIT"
] | null | null | null | aleph/logic/resolver.py | nabla-c0d3/aleph | d0e4e04e23cb7ee3971298e33ccb1c5171ae0779 | [
"MIT"
] | null | null | null | # Bulk object resolver.
# The purpose of this module is to quickly load objects of different
# types from the backend. It's typically used by the API serialiser
# to ensure that nested objects are loaded only once.
#
import logging
from normality import stringify
from collections import defaultdict
from aleph.core import cache
from aleph.model import Role, Collection, Alert, Entity, EntitySet, EntitySetItem
from aleph.logic.roles import get_role
from aleph.logic.alerts import get_alert
from aleph.logic.entitysets import get_entityset, get_entitysetitem
from aleph.index.collections import get_collection
from aleph.index.entities import entities_by_ids
log = logging.getLogger(__name__)
LOADERS = {
Role: get_role,
Collection: get_collection,
Alert: get_alert,
EntitySet: get_entityset,
EntitySetItem: get_entitysetitem,
}
def _instrument_stub(stub):
if not hasattr(stub, "_rx_queue"):
stub._rx_queue = set()
if not hasattr(stub, "_rx_cache"):
stub._rx_cache = {}
def queue(stub, clazz, key, schema=None):
"""Notify the resolver associated with `stub` that the given object
needs to be retrieved. Multiple calls with the same object signature
will be merged."""
_instrument_stub(stub)
key = stringify(key)
if key is None:
return
stub._rx_queue.add((clazz, key, schema))
def resolve(stub):
_instrument_stub(stub)
cache_keys = {}
schemata = {}
for clazz, key, schema in stub._rx_queue:
if (clazz, key) in stub._rx_cache:
continue
cid = cache.object_key(clazz, key)
cache_keys[cid] = (clazz, key)
schemata[cid] = schema
keys = list(cache_keys.keys())
queries = defaultdict(list)
for cid, value in cache.get_many_complex(keys):
clazz, key = cache_keys.get(cid)
if value is None:
# log.info("MISS [%s]: %s", clazz.__name__, key)
if clazz == Entity:
queries[schemata.get(cid)].append(key)
loader = LOADERS.get(clazz)
if loader is not None:
value = loader(key)
stub._rx_cache[(clazz, key)] = value
for schema, ids in queries.items():
for entity in entities_by_ids(ids, schemata=schema, cached=True):
stub._rx_cache[(Entity, entity.get("id"))] = entity
def get(stub, clazz, key):
"""Retrieve an object that has been loaded (or None)."""
_instrument_stub(stub)
key = stringify(key)
if key is None:
return
return stub._rx_cache.get((clazz, key))
| 30.879518 | 81 | 0.672649 |
4a2207259cdc5fae2e34c8d285f4399b293cef6d | 38,158 | py | Python | salt/states/esxi.py | nielsk/salt | be5d400d903e68d99c216fd63a7146d86a64a55d | [
"Apache-2.0"
] | null | null | null | salt/states/esxi.py | nielsk/salt | be5d400d903e68d99c216fd63a7146d86a64a55d | [
"Apache-2.0"
] | null | null | null | salt/states/esxi.py | nielsk/salt | be5d400d903e68d99c216fd63a7146d86a64a55d | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
'''
Manage VMware ESXi Hosts.
.. versionadded:: 2015.8.4
Dependencies
============
- pyVmomi Python Module
- ESXCLI
pyVmomi
-------
PyVmomi can be installed via pip:
.. code-block:: bash
pip install pyVmomi
.. note::
Version 6.0 of pyVmomi has some problems with SSL error handling on certain
versions of Python. If using version 6.0 of pyVmomi, Python 2.6,
Python 2.7.9, or newer must be present. This is due to an upstream dependency
in pyVmomi 6.0 that is not supported in Python versions 2.7 to 2.7.8. If the
version of Python is not in the supported range, you will need to install an
earlier version of pyVmomi. See `Issue #29537`_ for more information.
.. _Issue #29537: https://github.com/saltstack/salt/issues/29537
Based on the note above, to install an earlier version of pyVmomi than the
version currently listed in PyPi, run the following:
.. code-block:: bash
pip install pyVmomi==5.5.0.2014.1.1
The 5.5.0.2014.1.1 is a known stable version that this original ESXi State
Module was developed against.
ESXCLI
------
Currently, about a third of the functions used in the vSphere Execution Module require
the ESXCLI package be installed on the machine running the Proxy Minion process.
The ESXCLI package is also referred to as the VMware vSphere CLI, or vCLI. VMware
provides vCLI package installation instructions for `vSphere 5.5`_ and
`vSphere 6.0`_.
.. _vSphere 5.5: http://pubs.vmware.com/vsphere-55/index.jsp#com.vmware.vcli.getstart.doc/cli_install.4.2.html
.. _vSphere 6.0: http://pubs.vmware.com/vsphere-60/index.jsp#com.vmware.vcli.getstart.doc/cli_install.4.2.html
Once all of the required dependencies are in place and the vCLI package is
installed, you can check to see if you can connect to your ESXi host or vCenter
server by running the following command:
.. code-block:: bash
esxcli -s <host-location> -u <username> -p <password> system syslog config get
If the connection was successful, ESXCLI was successfully installed on your system.
You should see output related to the ESXi host's syslog configuration.
.. note::
Be aware that some functionality in this state module may depend on the
type of license attached to the ESXi host.
For example, certain services are only available to manipulate service state
or policies with a VMware vSphere Enterprise or Enterprise Plus license, while
others are available with a Standard license. The ``ntpd`` service is restricted
to an Enterprise Plus license, while ``ssh`` is available via the Standard
license.
Please see the `vSphere Comparison`_ page for more information.
.. _vSphere Comparison: https://www.vmware.com/products/vsphere/compare
About
-----
This state module was written to be used in conjunction with Salt's
:mod:`ESXi Proxy Minion <salt.proxy.esxi>`. For a tutorial on how to use Salt's
ESXi Proxy Minion, please refer to the
:ref:`ESXi Proxy Minion Tutorial <tutorial-esxi-proxy>` for
configuration examples, dependency installation instructions, how to run remote
execution functions against ESXi hosts via a Salt Proxy Minion, and a larger state
example.
'''
# Import Python Libs
from __future__ import absolute_import
import logging
# Import Salt Libs
import salt.ext.six as six
import salt.utils.files
from salt.exceptions import CommandExecutionError
# Get Logging Started
log = logging.getLogger(__name__)
def __virtual__():
return 'esxi.cmd' in __salt__
def coredump_configured(name, enabled, dump_ip, host_vnic='vmk0', dump_port=6500):
'''
Ensures a host's core dump configuration.
name
Name of the state.
enabled
Sets whether or not ESXi core dump collection should be enabled.
This is a boolean value set to ``True`` or ``False`` to enable
or disable core dumps.
Note that ESXi requires that the core dump must be enabled before
any other parameters may be set. This also affects the ``changes``
results in the state return dictionary. If ``enabled`` is ``False``,
we can't obtain any previous settings to compare other state variables,
resulting in many ``old`` references returning ``None``.
Once ``enabled`` is ``True`` the ``changes`` dictionary comparisons
will be more accurate. This is due to the way the system coredemp
network configuration command returns data.
dump_ip
The IP address of host that will accept the dump.
host_vnic
Host VNic port through which to communicate. Defaults to ``vmk0``.
dump_port
TCP port to use for the dump. Defaults to ``6500``.
Example:
.. code-block:: yaml
configure-host-coredump:
esxi.coredump_configured:
- enabled: True
- dump_ip: 'my-coredump-ip.example.com'
'''
ret = {'name': name,
'result': False,
'changes': {},
'comment': ''}
esxi_cmd = 'esxi.cmd'
enabled_msg = 'ESXi requires that the core dump must be enabled ' \
'before any other parameters may be set.'
host = __pillar__['proxy']['host']
current_config = __salt__[esxi_cmd]('get_coredump_network_config').get(host)
error = current_config.get('Error')
if error:
ret['comment'] = 'Error: {0}'.format(error)
return ret
current_config = current_config.get('Coredump Config')
current_enabled = current_config.get('enabled')
# Configure coredump enabled state, if there are changes.
if current_enabled != enabled:
enabled_changes = {'enabled': {'old': current_enabled, 'new': enabled}}
# Only run the command if not using test=True
if not __opts__['test']:
response = __salt__[esxi_cmd]('coredump_network_enable',
enabled=enabled).get(host)
error = response.get('Error')
if error:
ret['comment'] = 'Error: {0}'.format(error)
return ret
# Allow users to disable core dump, but then return since
# nothing else can be set if core dump is disabled.
if not enabled:
ret['result'] = True
ret['comment'] = enabled_msg
ret['changes'].update(enabled_changes)
return ret
ret['changes'].update(enabled_changes)
elif not enabled:
# If current_enabled and enabled match, but are both False,
# We must return before configuring anything. This isn't a
# failure as core dump may be disabled intentionally.
ret['result'] = True
ret['comment'] = enabled_msg
return ret
# Test for changes with all remaining configurations. The changes flag is used
# To detect changes, and then set_coredump_network_config is called one time.
changes = False
current_ip = current_config.get('ip')
if current_ip != dump_ip:
ret['changes'].update({'dump_ip':
{'old': current_ip,
'new': dump_ip}})
changes = True
current_vnic = current_config.get('host_vnic')
if current_vnic != host_vnic:
ret['changes'].update({'host_vnic':
{'old': current_vnic,
'new': host_vnic}})
changes = True
current_port = current_config.get('port')
if current_port != str(dump_port):
ret['changes'].update({'dump_port':
{'old': current_port,
'new': str(dump_port)}})
changes = True
# Only run the command if not using test=True and changes were detected.
if not __opts__['test'] and changes is True:
response = __salt__[esxi_cmd]('set_coredump_network_config',
dump_ip=dump_ip,
host_vnic=host_vnic,
dump_port=dump_port).get(host)
if response.get('success') is False:
msg = response.get('stderr')
if not msg:
msg = response.get('stdout')
ret['comment'] = 'Error: {0}'.format(msg)
return ret
ret['result'] = True
if ret['changes'] == {}:
ret['comment'] = 'Core Dump configuration is already in the desired state.'
return ret
if __opts__['test']:
ret['result'] = None
ret['comment'] = 'Core dump configuration will change.'
return ret
def password_present(name, password):
'''
Ensures the given password is set on the ESXi host. Passwords cannot be obtained from
host, so if a password is set in this state, the ``vsphere.update_host_password``
function will always run (except when using test=True functionality) and the state's
changes dictionary will always be populated.
The username for which the password will change is the same username that is used to
authenticate against the ESXi host via the Proxy Minion. For example, if the pillar
definition for the proxy username is defined as ``root``, then the username that the
password will be updated for via this state is ``root``.
name
Name of the state.
password
The new password to change on the host.
Example:
.. code-block:: yaml
configure-host-password:
esxi.password_present:
- password: 'new-bad-password'
'''
ret = {'name': name,
'result': True,
'changes': {'old': 'unknown',
'new': '********'},
'comment': 'Host password was updated.'}
esxi_cmd = 'esxi.cmd'
if __opts__['test']:
ret['result'] = None
ret['comment'] = 'Host password will change.'
return ret
else:
try:
__salt__[esxi_cmd]('update_host_password',
new_password=password)
except CommandExecutionError as err:
ret['result'] = False
ret['comment'] = 'Error: {0}'.format(err)
return ret
return ret
def ntp_configured(name,
service_running,
ntp_servers=None,
service_policy=None,
service_restart=False,
update_datetime=False):
'''
Ensures a host's NTP server configuration such as setting NTP servers, ensuring the
NTP daemon is running or stopped, or restarting the NTP daemon for the ESXi host.
name
Name of the state.
service_running
Ensures the running state of the ntp daemon for the host. Boolean value where
``True`` indicates that ntpd should be running and ``False`` indicates that it
should be stopped.
ntp_servers
A list of servers that should be added to the ESXi host's NTP configuration.
service_policy
The policy to set for the NTP service.
.. note::
When setting the service policy to ``off`` or ``on``, you *must* quote the
setting. If you don't, the yaml parser will set the string to a boolean,
which will cause trouble checking for stateful changes and will error when
trying to set the policy on the ESXi host.
service_restart
If set to ``True``, the ntp daemon will be restarted, regardless of its previous
running state. Default is ``False``.
update_datetime
If set to ``True``, the date/time on the given host will be updated to UTC.
Default setting is ``False``. This option should be used with caution since
network delays and execution delays can result in time skews.
Example:
.. code-block:: yaml
configure-host-ntp:
esxi.ntp_configured:
- service_running: True
- ntp_servers:
- 192.174.1.100
- 192.174.1.200
- service_policy: 'on'
- service_restart: True
'''
ret = {'name': name,
'result': False,
'changes': {},
'comment': ''}
esxi_cmd = 'esxi.cmd'
host = __pillar__['proxy']['host']
ntpd = 'ntpd'
ntp_config = __salt__[esxi_cmd]('get_ntp_config').get(host)
ntp_running = __salt__[esxi_cmd]('get_service_running',
service_name=ntpd).get(host)
error = ntp_running.get('Error')
if error:
ret['comment'] = 'Error: {0}'.format(error)
return ret
ntp_running = ntp_running.get(ntpd)
# Configure NTP Servers for the Host
if ntp_servers and set(ntp_servers) != set(ntp_config):
# Only run the command if not using test=True
if not __opts__['test']:
response = __salt__[esxi_cmd]('set_ntp_config',
ntp_servers=ntp_servers).get(host)
error = response.get('Error')
if error:
ret['comment'] = 'Error: {0}'.format(error)
return ret
# Set changes dictionary for ntp_servers
ret['changes'].update({'ntp_servers':
{'old': ntp_config,
'new': ntp_servers}})
# Configure service_running state
if service_running != ntp_running:
# Only run the command if not using test=True
if not __opts__['test']:
# Start ntdp if service_running=True
if ntp_running is True:
response = __salt__[esxi_cmd]('service_start',
service_name=ntpd).get(host)
error = response.get('Error')
if error:
ret['comment'] = 'Error: {0}'.format(error)
return ret
# Stop ntpd if service_running=False
else:
response = __salt__[esxi_cmd]('service_stop',
service_name=ntpd).get(host)
error = response.get('Error')
if error:
ret['comment'] = 'Error: {0}'.format(error)
return ret
ret['changes'].update({'service_running':
{'old': ntp_running,
'new': service_running}})
# Configure service_policy
if service_policy:
current_service_policy = __salt__[esxi_cmd]('get_service_policy',
service_name=ntpd).get(host)
error = current_service_policy.get('Error')
if error:
ret['comment'] = 'Error: {0}'.format(error)
return ret
current_service_policy = current_service_policy.get(ntpd)
if service_policy != current_service_policy:
# Only run the command if not using test=True
if not __opts__['test']:
response = __salt__[esxi_cmd]('set_service_policy',
service_name=ntpd,
service_policy=service_policy).get(host)
error = response.get('Error')
if error:
ret['comment'] = 'Error: {0}'.format(error)
return ret
ret['changes'].update({'service_policy':
{'old': current_service_policy,
'new': service_policy}})
# Update datetime, if requested.
if update_datetime:
# Only run the command if not using test=True
if not __opts__['test']:
response = __salt__[esxi_cmd]('update_host_datetime').get(host)
error = response.get('Error')
if error:
ret['comment'] = 'Error: {0}'.format(error)
return ret
ret['changes'].update({'update_datetime':
{'old': '',
'new': 'Host datetime was updated.'}})
# Restart ntp_service if service_restart=True
if service_restart:
# Only run the command if not using test=True
if not __opts__['test']:
response = __salt__[esxi_cmd]('service_restart',
service_name=ntpd).get(host)
error = response.get('Error')
if error:
ret['comment'] = 'Error: {0}'.format(error)
return ret
ret['changes'].update({'service_restart':
{'old': '',
'new': 'NTP Daemon Restarted.'}})
ret['result'] = True
if ret['changes'] == {}:
ret['comment'] = 'NTP is already in the desired state.'
return ret
if __opts__['test']:
ret['result'] = None
ret['comment'] = 'NTP state will change.'
return ret
def vmotion_configured(name, enabled, device='vmk0'):
'''
Configures a host's VMotion properties such as enabling VMotion and setting
the device VirtualNic that VMotion will use.
name
Name of the state.
enabled
Ensures whether or not VMotion should be enabled on a host as a boolean
value where ``True`` indicates that VMotion should be enabled and ``False``
indicates that VMotion should be disabled.
device
The device that uniquely identifies the VirtualNic that will be used for
VMotion for the host. Defaults to ``vmk0``.
Example:
.. code-block:: yaml
configure-vmotion:
esxi.vmotion_configured:
- enabled: True
- device: sample-device
'''
ret = {'name': name,
'result': False,
'changes': {},
'comment': ''}
esxi_cmd = 'esxi.cmd'
host = __pillar__['proxy']['host']
current_vmotion_enabled = __salt__[esxi_cmd]('get_vmotion_enabled').get(host)
current_vmotion_enabled = current_vmotion_enabled.get('VMotion Enabled')
# Configure VMotion Enabled state, if changed.
if enabled != current_vmotion_enabled:
# Only run the command if not using test=True
if not __opts__['test']:
# Enable VMotion if enabled=True
if enabled is True:
response = __salt__[esxi_cmd]('vmotion_enable',
device=device).get(host)
error = response.get('Error')
if error:
ret['comment'] = 'Error: {0}'.format(error)
return ret
# Disable VMotion if enabled=False
else:
response = __salt__[esxi_cmd]('vmotion_disable').get(host)
error = response.get('Error')
if error:
ret['comment'] = 'Error: {0}'.format(error)
return ret
ret['changes'].update({'enabled':
{'old': current_vmotion_enabled,
'new': enabled}})
ret['result'] = True
if ret['changes'] == {}:
ret['comment'] = 'VMotion configuration is already in the desired state.'
return ret
if __opts__['test']:
ret['result'] = None
ret['comment'] = 'VMotion configuration will change.'
return ret
def vsan_configured(name, enabled, add_disks_to_vsan=False):
'''
Configures a host's VSAN properties such as enabling or disabling VSAN, or
adding VSAN-eligible disks to the VSAN system for the host.
name
Name of the state.
enabled
Ensures whether or not VSAN should be enabled on a host as a boolean
value where ``True`` indicates that VSAN should be enabled and ``False``
indicates that VSAN should be disabled.
add_disks_to_vsan
If set to ``True``, any VSAN-eligible disks for the given host will be added
to the host's VSAN system. Default is ``False``.
Example:
.. code-block:: yaml
configure-host-vsan:
esxi.vsan_configured:
- enabled: True
- add_disks_to_vsan: True
'''
ret = {'name': name,
'result': False,
'changes': {},
'comment': ''}
esxi_cmd = 'esxi.cmd'
host = __pillar__['proxy']['host']
current_vsan_enabled = __salt__[esxi_cmd]('get_vsan_enabled').get(host)
error = current_vsan_enabled.get('Error')
if error:
ret['comment'] = 'Error: {0}'.format(error)
return ret
current_vsan_enabled = current_vsan_enabled.get('VSAN Enabled')
# Configure VSAN Enabled state, if changed.
if enabled != current_vsan_enabled:
# Only run the command if not using test=True
if not __opts__['test']:
# Enable VSAN if enabled=True
if enabled is True:
response = __salt__[esxi_cmd]('vsan_enable').get(host)
error = response.get('Error')
if error:
ret['comment'] = 'Error: {0}'.format(error)
return ret
# Disable VSAN if enabled=False
else:
response = __salt__[esxi_cmd]('vsan_disable').get(host)
error = response.get('Error')
if error:
ret['comment'] = 'Error: {0}'.format(error)
return ret
ret['changes'].update({'enabled':
{'old': current_vsan_enabled,
'new': enabled}})
# Add any eligible disks to VSAN, if requested.
if add_disks_to_vsan:
current_eligible_disks = __salt__[esxi_cmd]('get_vsan_eligible_disks').get(host)
error = current_eligible_disks.get('Error')
if error:
ret['comment'] = 'Error: {0}'.format(error)
return ret
disks = current_eligible_disks.get('Eligible')
if disks and isinstance(disks, list):
# Only run the command if not using test=True
if not __opts__['test']:
response = __salt__[esxi_cmd]('vsan_add_disks').get(host)
error = response.get('Error')
if error:
ret['comment'] = 'Error: {0}'.format(error)
return ret
ret['changes'].update({'add_disks_to_vsan':
{'old': '',
'new': disks}})
ret['result'] = True
if ret['changes'] == {}:
ret['comment'] = 'VSAN configuration is already in the desired state.'
return ret
if __opts__['test']:
ret['result'] = None
ret['comment'] = 'VSAN configuration will change.'
return ret
def ssh_configured(name,
service_running,
ssh_key=None,
ssh_key_file=None,
service_policy=None,
service_restart=False,
certificate_verify=False):
'''
Manage the SSH configuration for a host including whether or not SSH is running or
the presence of a given SSH key. Note: Only one ssh key can be uploaded for root.
Uploading a second key will replace any existing key.
name
Name of the state.
service_running
Ensures whether or not the SSH service should be running on a host. Represented
as a boolean value where ``True`` indicates that SSH should be running and
``False`` indicates that SSH should stopped.
In order to update SSH keys, the SSH service must be running.
ssh_key
Public SSH key to added to the authorized_keys file on the ESXi host. You can
use ``ssh_key`` or ``ssh_key_file``, but not both.
ssh_key_file
File containing the public SSH key to be added to the authorized_keys file on
the ESXi host. You can use ``ssh_key_file`` or ``ssh_key``, but not both.
service_policy
The policy to set for the NTP service.
.. note::
When setting the service policy to ``off`` or ``on``, you *must* quote the
setting. If you don't, the yaml parser will set the string to a boolean,
which will cause trouble checking for stateful changes and will error when
trying to set the policy on the ESXi host.
service_restart
If set to ``True``, the SSH service will be restarted, regardless of its
previous running state. Default is ``False``.
certificate_verify
If set to ``True``, the SSL connection must present a valid certificate.
Default is ``False``.
Example:
.. code-block:: yaml
configure-host-ssh:
esxi.ssh_configured:
- service_running: True
- ssh_key_file: /etc/salt/ssh_keys/my_key.pub
- service_policy: 'on'
- service_restart: True
- certificate_verify: True
'''
ret = {'name': name,
'result': False,
'changes': {},
'comment': ''}
esxi_cmd = 'esxi.cmd'
host = __pillar__['proxy']['host']
ssh = 'ssh'
ssh_running = __salt__[esxi_cmd]('get_service_running',
service_name=ssh).get(host)
error = ssh_running.get('Error')
if error:
ret['comment'] = 'Error: {0}'.format(error)
return ret
ssh_running = ssh_running.get(ssh)
# Configure SSH service_running state, if changed.
if service_running != ssh_running:
# Only actually run the command if not using test=True
if not __opts__['test']:
# Start SSH if service_running=True
if service_running is True:
enable = __salt__[esxi_cmd]('service_start',
service_name=ssh).get(host)
error = enable.get('Error')
if error:
ret['comment'] = 'Error: {0}'.format(error)
return ret
# Disable SSH if service_running=False
else:
disable = __salt__[esxi_cmd]('service_stop',
service_name=ssh).get(host)
error = disable.get('Error')
if error:
ret['comment'] = 'Error: {0}'.format(error)
return ret
ret['changes'].update({'service_running':
{'old': ssh_running,
'new': service_running}})
# If uploading an SSH key or SSH key file, see if there's a current
# SSH key and compare the current key to the key set in the state.
current_ssh_key, ssh_key_changed = None, False
if ssh_key or ssh_key_file:
current_ssh_key = __salt__[esxi_cmd]('get_ssh_key',
certificate_verify=certificate_verify)
error = current_ssh_key.get('Error')
if error:
ret['comment'] = 'Error: {0}'.format(error)
return ret
current_ssh_key = current_ssh_key.get('key')
if current_ssh_key:
clean_current_key = _strip_key(current_ssh_key).split(' ')
if not ssh_key:
ssh_key = ''
# Open ssh key file and read in contents to create one key string
with salt.utils.files.fopen(ssh_key_file, 'r') as key_file:
for line in key_file:
if line.startswith('#'):
# Commented line
continue
ssh_key = ssh_key + line
clean_ssh_key = _strip_key(ssh_key).split(' ')
# Check that the first two list items of clean key lists are equal.
if clean_current_key[0] != clean_ssh_key[0] or clean_current_key[1] != clean_ssh_key[1]:
ssh_key_changed = True
else:
# If current_ssh_key is None, but we're setting a new key with
# either ssh_key or ssh_key_file, then we need to flag the change.
ssh_key_changed = True
# Upload SSH key, if changed.
if ssh_key_changed:
if not __opts__['test']:
# Upload key
response = __salt__[esxi_cmd]('upload_ssh_key',
ssh_key=ssh_key,
ssh_key_file=ssh_key_file,
certificate_verify=certificate_verify)
error = response.get('Error')
if error:
ret['comment'] = 'Error: {0}'.format(error)
return ret
ret['changes'].update({'SSH Key':
{'old': current_ssh_key,
'new': ssh_key if ssh_key else ssh_key_file}})
# Configure service_policy
if service_policy:
current_service_policy = __salt__[esxi_cmd]('get_service_policy',
service_name=ssh).get(host)
error = current_service_policy.get('Error')
if error:
ret['comment'] = 'Error: {0}'.format(error)
return ret
current_service_policy = current_service_policy.get(ssh)
if service_policy != current_service_policy:
# Only run the command if not using test=True
if not __opts__['test']:
response = __salt__[esxi_cmd]('set_service_policy',
service_name=ssh,
service_policy=service_policy).get(host)
error = response.get('Error')
if error:
ret['comment'] = 'Error: {0}'.format(error)
return ret
ret['changes'].update({'service_policy':
{'old': current_service_policy,
'new': service_policy}})
# Restart ssh_service if service_restart=True
if service_restart:
# Only run the command if not using test=True
if not __opts__['test']:
response = __salt__[esxi_cmd]('service_restart',
service_name=ssh).get(host)
error = response.get('Error')
if error:
ret['comment'] = 'Error: {0}'.format(error)
return ret
ret['changes'].update({'service_restart':
{'old': '',
'new': 'SSH service restarted.'}})
ret['result'] = True
if ret['changes'] == {}:
ret['comment'] = 'SSH service is already in the desired state.'
return ret
if __opts__['test']:
ret['result'] = None
ret['comment'] = 'SSH service state will change.'
return ret
def syslog_configured(name,
syslog_configs,
firewall=True,
reset_service=True,
reset_syslog_config=False,
reset_configs=None):
'''
Ensures the specified syslog configuration parameters. By default,
this state will reset the syslog service after any new or changed
parameters are set successfully.
name
Name of the state.
syslog_configs
Name of parameter to set (corresponds to the command line switch for
esxcli without the double dashes (--))
Valid syslog_config values are ``logdir``, ``loghost``, ``logdir-unique``,
``default-rotate``, ``default-size``, and ``default-timeout``.
Each syslog_config option also needs a configuration value to set.
For example, ``loghost`` requires URLs or IP addresses to use for
logging. Multiple log servers can be specified by listing them,
comma-separated, but without spaces before or after commas
(reference: https://blogs.vmware.com/vsphere/2012/04/configuring-multiple-syslog-servers-for-esxi-5.html)
firewall
Enable the firewall rule set for syslog. Defaults to ``True``.
reset_service
After a successful parameter set, reset the service. Defaults to ``True``.
reset_syslog_config
Resets the syslog service to it's default settings. Defaults to ``False``.
If set to ``True``, default settings defined by the list of syslog configs
in ``reset_configs`` will be reset before running any other syslog settings.
reset_configs
A comma-delimited list of parameters to reset. Only runs if
``reset_syslog_config`` is set to ``True``. If ``reset_syslog_config`` is set
to ``True``, but no syslog configs are listed in ``reset_configs``, then
``reset_configs`` will be set to ``all`` by default.
See ``syslog_configs`` parameter above for a list of valid options.
Example:
.. code-block:: yaml
configure-host-syslog:
esxi.syslog_configured:
- syslog_configs:
loghost: ssl://localhost:5432,tcp://10.1.0.1:1514
default-timeout: 120
- firewall: True
- reset_service: True
- reset_syslog_config: True
- reset_configs: loghost,default-timeout
'''
ret = {'name': name,
'result': False,
'changes': {},
'comment': ''}
esxi_cmd = 'esxi.cmd'
host = __pillar__['proxy']['host']
if reset_syslog_config:
if not reset_configs:
reset_configs = 'all'
# Only run the command if not using test=True
if not __opts__['test']:
reset = __salt__[esxi_cmd]('reset_syslog_config',
syslog_config=reset_configs).get(host)
for key, val in six.iteritems(reset):
if isinstance(val, bool):
continue
if not val.get('success'):
msg = val.get('message')
if not msg:
msg = 'There was an error resetting a syslog config \'{0}\'.' \
'Please check debug logs.'.format(val)
ret['comment'] = 'Error: {0}'.format(msg)
return ret
ret['changes'].update({'reset_syslog_config':
{'old': '',
'new': reset_configs}})
current_firewall = __salt__[esxi_cmd]('get_firewall_status').get(host)
error = current_firewall.get('Error')
if error:
ret['comment'] = 'Error: {0}'.format(error)
return ret
current_firewall = current_firewall.get('rulesets').get('syslog')
if current_firewall != firewall:
# Only run the command if not using test=True
if not __opts__['test']:
enabled = __salt__[esxi_cmd]('enable_firewall_ruleset',
ruleset_enable=firewall,
ruleset_name='syslog').get(host)
if enabled.get('retcode') != 0:
err = enabled.get('stderr')
out = enabled.get('stdout')
ret['comment'] = 'Error: {0}'.format(err if err else out)
return ret
ret['changes'].update({'firewall':
{'old': current_firewall,
'new': firewall}})
current_syslog_config = __salt__[esxi_cmd]('get_syslog_config').get(host)
for key, val in six.iteritems(syslog_configs):
# The output of get_syslog_config has different keys than the keys
# Used to set syslog_config values. We need to look them up first.
try:
lookup_key = _lookup_syslog_config(key)
except KeyError:
ret['comment'] = '\'{0}\' is not a valid config variable.'.format(key)
return ret
current_val = current_syslog_config[lookup_key]
if str(current_val) != str(val):
# Only run the command if not using test=True
if not __opts__['test']:
response = __salt__[esxi_cmd]('set_syslog_config',
syslog_config=key,
config_value=val,
firewall=firewall,
reset_service=reset_service).get(host)
success = response.get(key).get('success')
if not success:
msg = response.get(key).get('message')
if not msg:
msg = 'There was an error setting syslog config \'{0}\'. ' \
'Please check debug logs.'.format(key)
ret['comment'] = msg
return ret
if not ret['changes'].get('syslog_config'):
ret['changes'].update({'syslog_config': {}})
ret['changes']['syslog_config'].update({key:
{'old': current_val,
'new': val}})
ret['result'] = True
if ret['changes'] == {}:
ret['comment'] = 'Syslog is already in the desired state.'
return ret
if __opts__['test']:
ret['result'] = None
ret['comment'] = 'Syslog state will change.'
return ret
def _lookup_syslog_config(config):
'''
Helper function that looks up syslog_config keys available from
``vsphere.get_syslog_config``.
'''
lookup = {'default-timeout': 'Default Network Retry Timeout',
'logdir': 'Local Log Output',
'default-size': 'Local Logging Default Rotation Size',
'logdir-unique': 'Log To Unique Subdirectory',
'default-rotate': 'Local Logging Default Rotations',
'loghost': 'Remote Host'}
return lookup.get(config)
def _strip_key(key_string):
'''
Strips an SSH key string of white space and line endings and returns the new string.
key_string
The string to be stripped.
'''
key_string.strip()
key_string.replace('\n', '')
key_string.replace('\r\n', '')
return key_string
| 37.15482 | 113 | 0.568452 |
4a22073bc84efffe46e17ccae811a00b5bca20e6 | 15,838 | py | Python | core/forecastMod.py | champham/WrfHydroForcing | 90f1cbcc233eb007818ae159be81814e5754f233 | [
"BSD-3-Clause"
] | null | null | null | core/forecastMod.py | champham/WrfHydroForcing | 90f1cbcc233eb007818ae159be81814e5754f233 | [
"BSD-3-Clause"
] | null | null | null | core/forecastMod.py | champham/WrfHydroForcing | 90f1cbcc233eb007818ae159be81814e5754f233 | [
"BSD-3-Clause"
] | null | null | null | import datetime
import os
from core import bias_correction
from core import downscale
from core import err_handler
from core import layeringMod
from core import disaggregateMod
def process_forecasts(ConfigOptions, wrfHydroGeoMeta, inputForcingMod, suppPcpMod, MpiConfig, OutputObj):
"""
Main calling module for running realtime forecasts and re-forecasts.
:param jobMeta:
:return:
"""
# Loop through each WRF-Hydro forecast cycle being processed. Within
# each cycle, perform the following tasks:
# 1.) Loop over each output frequency
# 2.) Determine the input forcing cycle dates (both before and after)
# for temporal interpolation, downscaling, and bias correction reasons.
# 3.) If the input forcings haven't been opened and read into memory,
# open them.
# 4.) Check to see if the ESMF objects for input forcings have been
# created. If not, create them, including the regridding object.
# 5.) Regrid forcing grids for input cycle dates surrounding the
# current output timestep if they haven't been regridded.
# 6.) Perform bias correction and/or downscaling.
# 7.) Output final grids to LDASIN NetCDF files with associated
# WRF-Hydro geospatial metadata to the final output directories.
# Throughout this entire process, log progress being made into LOG
# files. Once a forecast cycle is complete, we will touch an empty
# 'WrfHydroForcing.COMPLETE' flag in the directory. This will be
# checked upon the beginning of this program to see if we
# need to process any files.
disaggregate_fun = disaggregateMod.disaggregate_factory(ConfigOptions)
for fcstCycleNum in range(ConfigOptions.nFcsts):
ConfigOptions.current_fcst_cycle = ConfigOptions.b_date_proc + datetime.timedelta(
seconds=ConfigOptions.fcst_freq * 60 * fcstCycleNum)
if ConfigOptions.first_fcst_cycle is None:
ConfigOptions.first_fcst_cycle = ConfigOptions.current_fcst_cycle
if ConfigOptions.ana_flag:
fcstCycleOutDir = ConfigOptions.output_dir + "/" + ConfigOptions.e_date_proc.strftime('%Y%m%d%H')
else:
fcstCycleOutDir = ConfigOptions.output_dir + "/" + ConfigOptions.current_fcst_cycle.strftime('%Y%m%d%H')
# put all AnA output in the same directory
if ConfigOptions.ana_flag:
if ConfigOptions.ana_out_dir is None:
ConfigOptions.ana_out_dir = fcstCycleOutDir
fcstCycleOutDir = ConfigOptions.ana_out_dir
# completeFlag = ConfigOptions.scratch_dir + "/WrfHydroForcing.COMPLETE"
completeFlag = fcstCycleOutDir + "/WrfHydroForcing.COMPLETE"
if os.path.isfile(completeFlag):
ConfigOptions.statusMsg = "Forecast Cycle: " + \
ConfigOptions.current_fcst_cycle.strftime('%Y-%m-%d %H:%M') + \
" has already completed."
err_handler.log_msg(ConfigOptions, MpiConfig)
# We have already completed processing this cycle,
# move on.
continue
if (not ConfigOptions.ana_flag) or (ConfigOptions.logFile is None):
if MpiConfig.rank == 0:
# If the cycle directory doesn't exist, create it.
if not os.path.isdir(fcstCycleOutDir):
try:
os.mkdir(fcstCycleOutDir)
except:
ConfigOptions.errMsg = "Unable to create output " \
"directory: " + fcstCycleOutDir
err_handler.err_out_screen_para(ConfigOptions.errMsg, MpiConfig)
err_handler.check_program_status(ConfigOptions, MpiConfig)
# Compose a path to a log file, which will contain information
# about this forecast cycle.
# ConfigOptions.logFile = ConfigOptions.output_dir + "/LOG_" + \
if ConfigOptions.ana_flag:
log_time = ConfigOptions.e_date_proc
else:
log_time = ConfigOptions.current_fcst_cycle
ConfigOptions.logFile = ConfigOptions.scratch_dir + "/LOG_" + ConfigOptions.nwmConfig + \
('_' if ConfigOptions.nwmConfig != "long_range" else "_mem" + str(ConfigOptions.cfsv2EnsMember)+ "_") + \
ConfigOptions.d_program_init.strftime('%Y%m%d%H%M') + \
"_" + log_time.strftime('%Y%m%d%H%M')
# Initialize the log file.
try:
err_handler.init_log(ConfigOptions, MpiConfig)
except:
err_handler.err_out_screen_para(ConfigOptions.errMsg, MpiConfig)
err_handler.check_program_status(ConfigOptions, MpiConfig)
# Log information about this forecast cycle
if MpiConfig.rank == 0:
ConfigOptions.statusMsg = 'XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX'
err_handler.log_msg(ConfigOptions, MpiConfig)
ConfigOptions.statusMsg = 'Processing Forecast Cycle: ' + \
ConfigOptions.current_fcst_cycle.strftime('%Y-%m-%d %H:%M')
err_handler.log_msg(ConfigOptions, MpiConfig)
ConfigOptions.statusMsg = 'Forecast Cycle Length is: ' + \
str(ConfigOptions.cycle_length_minutes) + " minutes"
err_handler.log_msg(ConfigOptions, MpiConfig)
# MpiConfig.comm.barrier()
# Loop through each output timestep. Perform the following functions:
# 1.) Calculate all necessary input files per user options.
# 2.) Read in input forcings from GRIB/NetCDF files.
# 3.) Regrid the forcings, and temporally interpolate.
# 4.) Downscale.
# 5.) Layer, and output as necessary.
ana_factor = 1 if ConfigOptions.ana_flag is False else 0
for outStep in range(1, ConfigOptions.num_output_steps + 1):
# Reset out final grids to missing values.
OutputObj.output_local[:, :, :] = -9999.0
ConfigOptions.current_output_step = outStep
OutputObj.outDate = ConfigOptions.current_fcst_cycle + datetime.timedelta(
seconds=ConfigOptions.output_freq * 60 * outStep
)
ConfigOptions.current_output_date = OutputObj.outDate
# if AnA, adjust file date for analysis vs forecast
if ConfigOptions.ana_flag:
file_date = OutputObj.outDate - datetime.timedelta(seconds=ConfigOptions.output_freq * 60)
else:
file_date = OutputObj.outDate
# Calculate the previous output timestep. This is used in potential downscaling routines.
if outStep == ana_factor:
ConfigOptions.prev_output_date = ConfigOptions.current_output_date
else:
ConfigOptions.prev_output_date = ConfigOptions.current_output_date - datetime.timedelta(
seconds=ConfigOptions.output_freq * 60
)
if MpiConfig.rank == 0:
ConfigOptions.statusMsg = '========================================='
err_handler.log_msg(ConfigOptions, MpiConfig)
ConfigOptions.statusMsg = "Processing for output timestep: " + \
file_date.strftime('%Y-%m-%d %H:%M')
err_handler.log_msg(ConfigOptions, MpiConfig)
# MpiConfig.comm.barrier()
# Compose the expected path to the output file. Check to see if the file exists,
# if so, continue to the next time step. Also initialize our output arrays if necessary.
OutputObj.outPath = fcstCycleOutDir + "/" + file_date.strftime('%Y%m%d%H%M') + \
".LDASIN_DOMAIN1"
# MpiConfig.comm.barrier()
if os.path.isfile(OutputObj.outPath):
if MpiConfig.rank == 0:
ConfigOptions.statusMsg = "Output file: " + OutputObj.outPath + " exists. Moving " + \
" to the next output timestep."
err_handler.log_msg(ConfigOptions, MpiConfig)
err_handler.check_program_status(ConfigOptions, MpiConfig)
continue
else:
ConfigOptions.currentForceNum = 0
ConfigOptions.currentCustomForceNum = 0
# Loop over each of the input forcings specifed.
for forceKey in ConfigOptions.input_forcings:
input_forcings = inputForcingMod[forceKey]
# Calculate the previous and next input cycle files from the inputs.
input_forcings.calc_neighbor_files(ConfigOptions, OutputObj.outDate, MpiConfig)
err_handler.check_program_status(ConfigOptions, MpiConfig)
# Regrid forcings.
input_forcings.regrid_inputs(ConfigOptions, wrfHydroGeoMeta, MpiConfig)
err_handler.check_program_status(ConfigOptions, MpiConfig)
# Run check on regridded fields for reasonable values that are not missing values.
err_handler.check_forcing_bounds(ConfigOptions, input_forcings, MpiConfig)
err_handler.check_program_status(ConfigOptions, MpiConfig)
# If we are restarting a forecast cycle, re-calculate the neighboring files, and regrid the
# next set of forcings as the previous step just regridded the previous forcing.
if input_forcings.rstFlag == 1:
if input_forcings.regridded_forcings1 is not None and \
input_forcings.regridded_forcings2 is not None:
# Set the forcings back to reflect we just regridded the previous set of inputs, not the next.
input_forcings.regridded_forcings1[:, :, :] = \
input_forcings.regridded_forcings2[:, :, :]
# Re-calculate the neighbor files.
input_forcings.calc_neighbor_files(ConfigOptions, OutputObj.outDate, MpiConfig)
err_handler.check_program_status(ConfigOptions, MpiConfig)
# Regrid the forcings for the end of the window.
input_forcings.regrid_inputs(ConfigOptions, wrfHydroGeoMeta, MpiConfig)
err_handler.check_program_status(ConfigOptions, MpiConfig)
input_forcings.rstFlag = 0
# Run temporal interpolation on the grids.
input_forcings.temporal_interpolate_inputs(ConfigOptions, MpiConfig)
err_handler.check_program_status(ConfigOptions, MpiConfig)
# Run bias correction.
bias_correction.run_bias_correction(input_forcings, ConfigOptions,
wrfHydroGeoMeta, MpiConfig)
err_handler.check_program_status(ConfigOptions, MpiConfig)
# Run downscaling on grids for this output timestep.
downscale.run_downscaling(input_forcings, ConfigOptions,
wrfHydroGeoMeta, MpiConfig)
err_handler.check_program_status(ConfigOptions, MpiConfig)
# Layer in forcings from this product.
layeringMod.layer_final_forcings(OutputObj, input_forcings, ConfigOptions, MpiConfig)
err_handler.check_program_status(ConfigOptions, MpiConfig)
ConfigOptions.currentForceNum = ConfigOptions.currentForceNum + 1
if forceKey == 10:
ConfigOptions.currentCustomForceNum = ConfigOptions.currentCustomForceNum + 1
# Process supplemental precipitation if we specified in the configuration file.
if ConfigOptions.number_supp_pcp > 0:
for suppPcpKey in ConfigOptions.supp_precip_forcings:
# Like with input forcings, calculate the neighboring files to use.
suppPcpMod[suppPcpKey].calc_neighbor_files(ConfigOptions, OutputObj.outDate, MpiConfig)
err_handler.check_program_status(ConfigOptions, MpiConfig)
# Regrid the supplemental precipitation.
suppPcpMod[suppPcpKey].regrid_inputs(ConfigOptions, wrfHydroGeoMeta, MpiConfig)
err_handler.check_program_status(ConfigOptions, MpiConfig)
if suppPcpMod[suppPcpKey].regridded_precip1 is not None \
and suppPcpMod[suppPcpKey].regridded_precip2 is not None:
# if np.any(suppPcpMod[suppPcpKey].regridded_precip1) and \
# np.any(suppPcpMod[suppPcpKey].regridded_precip2):
# Run check on regridded fields for reasonable values that are not missing values.
err_handler.check_supp_pcp_bounds(ConfigOptions, suppPcpMod[suppPcpKey], MpiConfig)
err_handler.check_program_status(ConfigOptions, MpiConfig)
disaggregate_fun(input_forcings, suppPcpMod[suppPcpKey], ConfigOptions, MpiConfig)
err_handler.check_program_status(ConfigOptions, MpiConfig)
# Run temporal interpolation on the grids.
suppPcpMod[suppPcpKey].temporal_interpolate_inputs(ConfigOptions, MpiConfig)
err_handler.check_program_status(ConfigOptions, MpiConfig)
# Layer in the supplemental precipitation into the current output object.
layeringMod.layer_supplemental_forcing(OutputObj, suppPcpMod[suppPcpKey],
ConfigOptions, MpiConfig)
err_handler.check_program_status(ConfigOptions, MpiConfig)
# Call the output routines
# adjust date for AnA if necessary
if ConfigOptions.ana_flag:
OutputObj.outDate = file_date
OutputObj.output_final_ldasin(ConfigOptions, wrfHydroGeoMeta, MpiConfig)
err_handler.check_program_status(ConfigOptions, MpiConfig)
if (not ConfigOptions.ana_flag) or (fcstCycleNum == (ConfigOptions.nFcsts - 1)):
if MpiConfig.rank == 0:
ConfigOptions.statusMsg = "Forcings complete for forecast cycle: " + \
ConfigOptions.current_fcst_cycle.strftime('%Y-%m-%d %H:%M')
err_handler.log_msg(ConfigOptions, MpiConfig)
err_handler.check_program_status(ConfigOptions, MpiConfig)
if MpiConfig.rank == 0:
# Close the log file.
try:
err_handler.close_log(ConfigOptions, MpiConfig)
except:
err_handler.err_out_screen_para(ConfigOptions.errMsg, MpiConfig)
# Success.... Now touch an empty complete file for this forecast cycle to indicate
# completion in case the code is re-ran.
try:
open(completeFlag, 'a').close()
except:
ConfigOptions.errMsg = "Unable to create completion file: " + completeFlag
err_handler.log_critical(ConfigOptions, MpiConfig)
err_handler.check_program_status(ConfigOptions, MpiConfig)
| 55.767606 | 141 | 0.607211 |
4a22082210a8bab1f61f322b0436afa738a166f9 | 6,484 | py | Python | qa/rpc-tests/zkey_import_export.py | amicoin/amicoin | 84673cb24619766d77b0c1695688ef9b40e20319 | [
"Unlicense"
] | null | null | null | qa/rpc-tests/zkey_import_export.py | amicoin/amicoin | 84673cb24619766d77b0c1695688ef9b40e20319 | [
"Unlicense"
] | null | null | null | qa/rpc-tests/zkey_import_export.py | amicoin/amicoin | 84673cb24619766d77b0c1695688ef9b40e20319 | [
"Unlicense"
] | null | null | null | #!/usr/bin/env python
# Copyright (c) 2017 The Zcash developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
import sys; assert sys.version_info < (3,), ur"This script does not run under Python 3. Please use Python 2.7.x."
from decimal import Decimal
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import assert_equal, assert_greater_than, start_nodes,\
initialize_chain_clean, connect_nodes_bi, wait_and_assert_operationid_status
import logging
logging.basicConfig(format='%(levelname)s:%(message)s', level=logging.INFO)
fee = Decimal('0.0001') # constant (but can be changed within reason)
class ZkeyImportExportTest (BitcoinTestFramework):
def setup_chain(self):
print("Initializing test directory "+self.options.tmpdir)
initialize_chain_clean(self.options.tmpdir, 5)
def setup_network(self, split=False):
self.nodes = start_nodes(5, self.options.tmpdir)
connect_nodes_bi(self.nodes,0,1)
connect_nodes_bi(self.nodes,1,2)
connect_nodes_bi(self.nodes,0,2)
connect_nodes_bi(self.nodes,0,3)
connect_nodes_bi(self.nodes,0,4)
self.is_network_split=False
self.sync_all()
def run_test(self):
[alice, bob, charlie, david, miner] = self.nodes
# the sender loses 'amount' plus fee; to_addr receives exactly 'amount'
def z_send(from_node, from_addr, to_addr, amount):
global fee
opid = from_node.z_sendmany(from_addr,
[{"address": to_addr, "amount": Decimal(amount)}], 1, fee)
wait_and_assert_operationid_status(from_node, opid)
self.sync_all()
miner.generate(1)
self.sync_all()
def verify_utxos(node, amts, zaddr):
amts.sort(reverse=True)
txs = node.z_listreceivedbyaddress(zaddr)
def cmp_confirmations_high_to_low(a, b):
return cmp(b["amount"], a["amount"])
txs.sort(cmp_confirmations_high_to_low)
print("Sorted txs", txs)
print("amts", amts)
try:
assert_equal(amts, [tx["amount"] for tx in txs])
for tx in txs:
# make sure JoinSplit keys exist and have valid values
assert_equal("jsindex" in tx, True)
assert_equal("jsoutindex" in tx, True)
assert_greater_than(tx["jsindex"], -1)
assert_greater_than(tx["jsoutindex"], -1)
except AssertionError:
logging.error(
'Expected amounts: %r; txs: %r',
amts, txs)
raise
def get_private_balance(node):
balance = node.z_gettotalbalance()
return balance['private']
def find_imported_key(node, import_zaddr):
zaddrs = node.z_listaddresses()
assert(import_zaddr in zaddrs)
return import_zaddr
# Seed Alice with some funds
alice.generate(10)
self.sync_all()
miner.generate(100)
self.sync_all()
# Shield Alice's coinbase funds to her zaddr
alice_zaddr = alice.z_getnewaddress('sprout')
res = alice.z_shieldcoinbase("*", alice_zaddr)
wait_and_assert_operationid_status(alice, res['opid'])
self.sync_all()
miner.generate(1)
self.sync_all()
# Now get a pristine z-address for receiving transfers:
bob_zaddr = bob.z_getnewaddress('sprout')
verify_utxos(bob, [], bob_zaddr)
# TODO: Verify that charlie doesn't have funds in addr
# verify_utxos(charlie, [])
# the amounts of each txn embodied which generates a single UTXO:
amounts = map(Decimal, ['2.3', '3.7', '0.1', '0.5', '1.0', '0.19'])
# Internal test consistency assertion:
assert_greater_than(
get_private_balance(alice),
reduce(Decimal.__add__, amounts))
logging.info("Sending pre-export txns...")
for amount in amounts[0:2]:
z_send(alice, alice_zaddr, bob_zaddr, amount)
logging.info("Exporting privkey from bob...")
bob_privkey = bob.z_exportkey(bob_zaddr)
logging.info("Sending post-export txns...")
for amount in amounts[2:4]:
z_send(alice, alice_zaddr, bob_zaddr, amount)
verify_utxos(bob, amounts[:4], bob_zaddr)
# verify_utxos(charlie, [])
logging.info("Importing bob_privkey into charlie...")
# z_importkey rescan defaults to "whenkeyisnew", so should rescan here
charlie.z_importkey(bob_privkey)
ipk_zaddr = find_imported_key(charlie, bob_zaddr)
# z_importkey should have rescanned for new key, so this should pass:
verify_utxos(charlie, amounts[:4], ipk_zaddr)
# Verify idempotent behavior:
charlie.z_importkey(bob_privkey)
ipk_zaddr2 = find_imported_key(charlie, bob_zaddr)
assert_equal(ipk_zaddr, ipk_zaddr2)
# amounts should be unchanged
verify_utxos(charlie, amounts[:4], ipk_zaddr2)
logging.info("Sending post-import txns...")
for amount in amounts[4:]:
z_send(alice, alice_zaddr, bob_zaddr, amount)
verify_utxos(bob, amounts, bob_zaddr)
verify_utxos(charlie, amounts, ipk_zaddr)
verify_utxos(charlie, amounts, ipk_zaddr2)
# keep track of the fees incurred by bob (his sends)
bob_fee = Decimal(0)
# Try to reproduce zombie balance reported in #1936
# At generated zaddr, receive ZEC, and send ZEC back out. bob -> alice
for amount in amounts[:2]:
print("Sending amount from bob to alice: ", amount)
z_send(bob, bob_zaddr, alice_zaddr, amount)
bob_fee += fee
bob_balance = sum(amounts[2:]) - bob_fee
assert_equal(bob.z_getbalance(bob_zaddr), bob_balance)
# z_import onto new node "david" (blockchain rescan, default or True?)
david.z_importkey(bob_privkey)
d_ipk_zaddr = find_imported_key(david, bob_zaddr)
# Check if amt bob spent is deducted for charlie and david
assert_equal(charlie.z_getbalance(ipk_zaddr), bob_balance)
assert_equal(david.z_getbalance(d_ipk_zaddr), bob_balance)
if __name__ == '__main__':
ZkeyImportExportTest().main()
| 38.141176 | 113 | 0.635873 |
4a2208d41e53c61e63fb0afd3d97a36f176ad0ab | 17,100 | py | Python | src/bin/rifle.py | jrha/aquilon-tools | cfbd6c29eed5facc278bbfe315199bba59f543b4 | [
"Apache-2.0"
] | null | null | null | src/bin/rifle.py | jrha/aquilon-tools | cfbd6c29eed5facc278bbfe315199bba59f543b4 | [
"Apache-2.0"
] | null | null | null | src/bin/rifle.py | jrha/aquilon-tools | cfbd6c29eed5facc278bbfe315199bba59f543b4 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
##############################################################################
#
# See COPYRIGHT file in source distribution for copyright holders
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
##############################################################################
import sys
import os
import re
import getopt
import signal
import tempfile
import copy
from subprocess import Popen, PIPE
CALL = os.path.basename(__file__)
CALLDIR = os.path.dirname(__file__)
CKEY = CALLDIR + "/ckey"
GETPROF = CALLDIR + "/getprof"
CCM_DIR = "/var/lib/ccm"
CCM_CURRENT_CID = CCM_DIR + "/current.cid"
CCM_PROFILE = CCM_DIR + "/profile.<CID>/profile"
##############################################################################
def usage():
""" Displays a usage message. """
print """
Syntax: %s [-ehIkv] [-o <output>] <file> [<resource_path ...>]
%s [-ehIkv] [-o <output>] -c [<resource_path ...>]
%s [-ehIkv] [-o <output>] -g {<host>|<alias>} [<resource_path ...>]
%s [-ehIkv] [-o <output>] -G <cluster> [<resource_path ...>]
Displays paths in Quattor XML or JSON host profile data where
-c uses current profile for this host instead of requiring
a pathname to an XML or JSON file
-e removes Quattor-style escaping from output
(WARNING: use with care, this tool cannot know which
elements were escaped and which ones were not)
- a single -e unescapes path components only
- a double -ee unescapes values as well
-G downloads profile for named cluster/metacluster using getprof
-g downloads profile for named host using getprof tool
-h hides structural entries that do not have values
-I do not generate list index numbers, use a hash # instead
-k colourises output by piping through to ckey
-o <output> send output to given file, instead of to stdout
-p if a value contains newlines, prefixes each newline
the resource path as well as the first line
-v display values only
<file> is the XML or JSON file to parse (may be plain or gzipped)
<resource_path ...>
one or more optional resource paths to filter by
Example:
%s -c /software/components/spma
%s /var/quattor/web/htdocs/profiles/aquilon20.one-nyp.ms.com.xml.gz \\
/software/packages
%s -g ilab901.one-nyp /metadata
""" % (CALL, CALL, CALL, CALL, CALL, CALL, CALL)
return 1
##############################################################################
def unescape(s, path=False):
""" Expand Quattor escape sequence. """
if not do_unescape: return s
if do_unescape < 2 and not path: return s
#
# If this is a path, process one path component at a time
# so that we can enclose the component in braces { ... }
# if an expansion occurred
#
if path:
lst = s.split("/")
else:
lst = [s]
new_s = ""
for comp in lst:
if path and (len(new_s) == 0 or new_s[-1] != "/"): new_s += "/"
complst = re.split("(_[0-9a-f][0-9a-f])", comp)
add_s = ""
expanded = False
for atom in complst:
decode_atom = False
if re.match("_[0-9a-f][0-9a-f]", atom):
if path:
#
# Escaped characters in paths will only be unescaped
# if a printable character results and one that
# is likely to have been escaped (i.e. not letters)
#
i = int(atom[1:], 16)
if (i >= 0x20 and i <= 0x40) or \
(i >= 0x5b and i <= 0x60) or \
(i >= 0x7b and i <= 0x7e):
decode_atom = True
else:
decode_atom = True
if decode_atom:
add_s += atom[1:].decode("hex")
expanded = True
else:
add_s += atom
if not path or not expanded:
new_s += add_s
else:
new_s += "{" + add_s + "}"
return new_s
##############################################################################
def chkwrite(output, s, xdup, xout):
""" Write output but check for lines we've been asked to duplicate. """
if xdup is not None:
for m in xdup:
if s[0:m[1]] == m[0]:
xout.write(s)
output.write(s)
##############################################################################
def walk_xml_tree(root, idx=[], strip_prof=False,
output=sys.stdout, xdup=None, xout=None):
""" Walk XML tree and output resource paths of interest. """
name = root.get('name')
if not name:
if gen_indices:
name = '/' + str(idx[-1])
else:
name = '/#'
else: name = '/' + name
text = root.text
if not text: text = ''
text = text.strip()
rpath = name
i = -1
for node in root.iterancestors():
i -= 1
s = node.get('name')
if s == None:
if gen_indices:
s = str(idx[i])
else:
s = '#'
rpath = '/' + s + rpath
s = ""
if (not hide_terminals or text) and not value_only:
pathname = unescape(rpath.encode("utf-8"), True)
s += pathname
if text:
if not value_only: s += " = "
s += unescape(text.strip().encode("utf-8")) + "\n"
elif not hide_terminals:
s += "\n"
if s:
if strip_prof and s[:9] == "/profile/": s = s[8:]
if not prefix_newlines:
chkwrite(output, s, xdup, xout)
else:
lines = s.splitlines()
s = lines.pop(0)
chkwrite(output, s + "\n", xdup, xout)
for line in lines:
s2 = pathname + " .= " + line + "\n"
if strip_prof and s2[:9] == "/profile/": s2 = s2[8:]
output.write(s2)
output.flush()
this_idx = 0
for sub in root.getchildren():
new_idx = copy.copy(idx)
new_idx.append(this_idx)
this_idx += 1
walk_xml_tree(sub, new_idx, strip_prof,
output=output, xdup=xdup, xout=xout)
def walk_dict(d, root="", node=None,
output=sys.stdout, xdup=None, xout=None):
""" Walk dictionary and output resource paths of interest. """
if root == "/": root = ""
for key in sorted(d):
if node is not None and key != node: continue
path = unescape(root + "/" + key.encode("utf-8"), True)
if type(d[key]) is unicode:
value = unescape(d[key].encode("utf-8"))
if not value_only:
if "\n" in value and prefix_newlines:
chkwrite(output, path + " = " +
("\n" + path + " .= ").join(
value.splitlines()) + "\n",
xdup, xout)
else:
chkwrite(output, path + " = " + value + "\n", xdup, xout)
else:
output.write(value + "\n")
elif type(d[key]) is dict:
if not hide_terminals and not value_only:
chkwrite(output, path + "\n", xdup, xout)
walk_dict(d[key], root=path,
output=output, xdup=xdup, xout=xout)
elif type(d[key]) is list:
for i in xrange(0, len(d[key])):
if gen_indices:
lpath = path + "/" + str(i)
else:
lpath = path + "/#"
if type(d[key][i]) is unicode:
value = unescape(d[key][i].encode("utf-8"))
if not value_only:
if "\n" in value and prefix_newlines:
chkwrite(output, lpath + " = " +
("\n" + lpath + " .= ").join(
value.splitlines()) + "\n",
xdup, xout)
else:
chkwrite(output, lpath + " = " + value + "\n",
xdup, xout)
else:
output.write(value + "\n")
elif type(d[key][i]) is dict:
if not hide_terminals and not value_only:
chkwrite(output, lpath + "\n", xdup, xout)
walk_dict(d[key][i], root=lpath,
output=output, xdup=xdup, xout=xout)
##############################################################################
def current_profile():
""" Return name of current host profile. """
if debug: sys.stderr.write("%s: locating current profile in %s\n" % \
(CALL, CCM_DIR))
with open(CCM_CURRENT_CID, "r") as f:
cid = f.read().strip()
filename = CCM_PROFILE.replace("<CID>", cid)
if os.path.exists(filename + ".json"):
return filename + ".json"
return filename + ".xml"
def get_profile(host, cluster = False):
""" Download profile to temporary file and return tempfile handle. """
cmd = [GETPROF, host]
if cluster: cmd.insert(1, "-C")
if debug:
cmd.insert(1, "-D")
sys.stderr.write("%s: launching '%s'\n" % (CALL, " ".join(cmd)))
tempfh = tempfile.NamedTemporaryFile(prefix="tmp.%s." % CALL)
pipe = Popen(cmd, stdout=tempfh)
rc = pipe.wait()
if rc != 0: raise RuntimeError("'%s' returned exit status %d" % \
(" ".join(cmd), rc))
return tempfh
def get_xml_elements(tree, path):
""" Return elements in a particular XML path. """
xpath = ''
for comp in path.split('/')[1:]:
if comp == '*' or comp == '': xpath += '/*[@name]'
else: xpath += '/*[@name="%s"]' % comp
if debug: sys.stderr.write("%s: searching for XML elements: %s\n" % \
(CALL, xpath))
return tree.xpath(xpath)
##############################################################################
def main(args=sys.argv, outfile=sys.stdout, xdup=None, xout=None):
"""
Main program entry point. If run as 'rifle', then all of the default
parameters are used. Otherwise, parameters may be overridden:
args = list of command-line arguments
outfile = file object to write the output
xdup = optional list of resource paths to duplicate
xout = optional file object to write duplicated resource paths to
"""
global debug, hide_terminals, value_only, do_unescape
global prefix_newlines, gen_indices
if args == sys.argv:
#
# Can only use signal() if this is the main thread, and not a
# module function executed from another program
#
signal.signal(signal.SIGPIPE, signal.SIG_DFL)
#
# Parse command-line arguments
#
try:
opts, args = getopt.getopt(args[1:], "cDeG:g:hIko:pv")
except getopt.GetoptError as err:
print "%s: %s" % (CALL, str(err))
return 1
debug = hide_terminals = value_only = False
prefix_newlines = ckey = outopen = False
gen_indices = True
do_unescape = 0
fname = None
for o, a in opts:
if o == "-c": fname = current_profile()
elif o == "-D": debug = True
elif o == "-e": do_unescape += 1
elif o == "-G":
tempfh = get_profile(a, cluster = True)
fname = tempfh.name
elif o == "-g":
tempfh = get_profile(a)
fname = tempfh.name
elif o == "-h": hide_terminals = True
elif o == "-I": gen_indices = False
elif o == "-k": ckey = True
elif o == "-o":
outfile = open(a, "w")
outopen = True
elif o == "-p": prefix_newlines = True
elif o == "-v": value_only = True
if fname == None:
if len(args) < 1: return usage()
fname = args[0]
args.pop(0)
if not os.path.exists(fname):
sys.stderr.write("%s: file not found: %s\n" % (CALL, fname))
return 1
if xdup is None:
xout = None
else:
#
# Normalise xdup list
#
newdup = []
for m in xdup:
newdup.append((m + " ", len(m)+1))
xdup = newdup
#
# Redirect stdout to ckey if -k was given
#
if ckey:
pipe = Popen([CKEY], stdin=PIPE, stdout=outfile)
output = pipe.stdin
else:
pipe = None
output = outfile
#
# Process file
#
if debug: sys.stderr.write("%s: opening %s\n" % (CALL, fname))
if fname[-5:] != ".json" and fname[-8:] != ".json.gz":
#
# Parse XML
#
try:
import ms.version
ms.version.addpkg('lxml', '2.3.2')
except:
pass
from lxml import etree
tree = etree.parse(fname)
if len(args) == 0:
root = tree.getroot()
walk_xml_tree(root, strip_prof=True,
output=output, xdup=xdup, xout=xout)
else:
for path in args:
if path[0] != '/': path = '/' + path
elst = get_xml_elements(tree, path)
strip_prof = True
if len(elst) == 0 and path[:9] != "/profile/":
path = "/profile" + path
elst = get_xml_elements(tree, path)
elif path[:9] == "/profile/":
strip_prof = False
for root in elst:
walk_xml_tree(root, strip_prof=strip_prof,
output=output, xdup=xdup, xout=xout)
else:
#
# Parse JSON
#
import json
if fname[-3:] == ".gz":
import gzip
f = gzip.open(fname)
else:
f = open(fname)
try:
jsdata = json.load(f)
if len(args) == 0:
#
# Display entire file
#
walk_dict(jsdata, output=output, xdup=xdup, xout=xout)
else:
#
# Display only specific paths, first check to see if any
# paths use wildcards and expand those now
#
new_args = []
lpath = ""
for path in args:
if path[-2:] == "/*": path = path[:-2]
if "*" in path:
if path[0] == "/": path = path[1:]
d = jsdata
lst = path.split("/")
path_found = True
if len(lst) > 1:
for comp in lst[:-1]:
if comp == "*":
for comp in d:
rpath = path[len(lpath)+2:]
args.append("%s/%s/%s" % \
(lpath, comp, rpath))
else:
if comp not in d:
path_found = False
else:
d = d[comp]
lpath += "/" + comp
else:
new_args.append(path)
#
# Walk tree for each expanded path
#
for path in new_args:
if path[0] == "/": path = path[1:]
d = jsdata
lst = path.split("/")
path_found = True
if len(lst) > 1:
for comp in lst[:-1]:
if comp not in d: path_found = False
else: d = d[comp]
if path_found and type(d) is dict:
walk_dict(d, root="/" + "/".join(lst[:-1]),
node=lst[-1], output=output)
finally:
f.close()
if pipe: pipe.communicate('')
if outopen: outfile.close()
return 0
if __name__ == "__main__":
retval = main()
exit(retval)
| 33.595285 | 80 | 0.460643 |
4a220a20e4f204d62199ef36efb1c09c65e970db | 547 | py | Python | docs/fund_info.py | txqzzz/xingqi_fund | d5cb59759a713ccddd95b6ba6e09fc8f0dae9d84 | [
"MIT"
] | null | null | null | docs/fund_info.py | txqzzz/xingqi_fund | d5cb59759a713ccddd95b6ba6e09fc8f0dae9d84 | [
"MIT"
] | null | null | null | docs/fund_info.py | txqzzz/xingqi_fund | d5cb59759a713ccddd95b6ba6e09fc8f0dae9d84 | [
"MIT"
] | null | null | null | from typing import List, Any, Union
fund_id: List[Union[str, Any]] = ['000834', '519674', '160225', '007301', '161631', '000148', '007531', '007874',
'161725', '161726', '161720', '006020', '110003', '110022', '161028', '320007',
'007824', '001593', '001594', '008087', '501010', '166002', '006229', '002697',
'001156', '006768', '005224', '001938', '002316', '513600', '510900', '159920',
'008975', '005911']
| 68.375 | 113 | 0.464351 |
4a220a466fdcfa14be15986d352cafa55f1a679f | 739 | py | Python | integration/airflow/openlineage/airflow/extractors/__init__.py | kedar-cz/OpenLineage | bd75b53c84fd9655f593c4f161e15c14785eb93e | [
"Apache-2.0"
] | 1 | 2021-11-19T15:00:39.000Z | 2021-11-19T15:00:39.000Z | integration/airflow/openlineage/airflow/extractors/__init__.py | kedar-cz/OpenLineage | bd75b53c84fd9655f593c4f161e15c14785eb93e | [
"Apache-2.0"
] | null | null | null | integration/airflow/openlineage/airflow/extractors/__init__.py | kedar-cz/OpenLineage | bd75b53c84fd9655f593c4f161e15c14785eb93e | [
"Apache-2.0"
] | 1 | 2021-09-07T04:16:02.000Z | 2021-09-07T04:16:02.000Z | # Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from openlineage.airflow.extractors.extractors import Extractors
from openlineage.airflow.extractors.base import BaseExtractor, StepMetadata
__all__ = [Extractors, BaseExtractor, StepMetadata]
| 43.470588 | 75 | 0.788904 |
4a220ab75433d967bc55e921cdf77cb77697b3bd | 277 | py | Python | md4c/types.py | Exahilosys/md4c | 3c37ba8892870af212108c546a6249cea0c4199e | [
"MIT"
] | null | null | null | md4c/types.py | Exahilosys/md4c | 3c37ba8892870af212108c546a6249cea0c4199e | [
"MIT"
] | null | null | null | md4c/types.py | Exahilosys/md4c | 3c37ba8892870af212108c546a6249cea0c4199e | [
"MIT"
] | null | null | null | import ctypes
__all__ = ('char', 'char_p', 'size', 'offset', 'enum', 'void', 'ires', 'vres')
char = ctypes.c_char
char_p = ctypes.c_char_p
size = ctypes.c_uint
offset = ctypes.c_uint
enum = ctypes.c_uint
void = ctypes.c_void_p
ires = ctypes.c_int
vres = void
| 9.551724 | 78 | 0.65704 |
4a220acc41a46e1d5f13169a01754f4721c58de8 | 465 | py | Python | scripts/wanderer_self_test.py | IRASatUC/turtle_roomba | e562de7a875e3f732e80002f658174f6e0496cba | [
"MIT"
] | null | null | null | scripts/wanderer_self_test.py | IRASatUC/turtle_roomba | e562de7a875e3f732e80002f658174f6e0496cba | [
"MIT"
] | null | null | null | scripts/wanderer_self_test.py | IRASatUC/turtle_roomba | e562de7a875e3f732e80002f658174f6e0496cba | [
"MIT"
] | null | null | null | #!/usr/bin/env python
from __future__ import absolute_import, print_function
import numpy as np
import math
import random
import time
import rospy
import tf
from geometry_msgs.msg import Point, Pose, Twist
from wanderer import Wanderer
if __name__ == "__main__":
rospy.init_node("wanderer_test", anonymous=True, log_level=rospy.DEBUG)
wanderertester = Wanderer()
rospy.on_shutdown(wanderertester.clean_shutdown)
wanderertester.self_test()
rospy.spin()
| 23.25 | 73 | 0.797849 |
4a220ae2c4938810e88f0a475494c6f3817a3487 | 13,973 | py | Python | build/PureCloudPlatformClientV2/models/o_auth_client_request.py | cjohnson-ctl/platform-client-sdk-python | 38ce53bb8012b66e8a43cc8bd6ff00cf6cc99100 | [
"MIT"
] | 10 | 2019-02-22T00:27:08.000Z | 2021-09-12T23:23:44.000Z | build/PureCloudPlatformClientV2/models/o_auth_client_request.py | cjohnson-ctl/platform-client-sdk-python | 38ce53bb8012b66e8a43cc8bd6ff00cf6cc99100 | [
"MIT"
] | 5 | 2018-06-07T08:32:00.000Z | 2021-07-28T17:37:26.000Z | build/PureCloudPlatformClientV2/models/o_auth_client_request.py | cjohnson-ctl/platform-client-sdk-python | 38ce53bb8012b66e8a43cc8bd6ff00cf6cc99100 | [
"MIT"
] | 6 | 2020-04-09T17:43:07.000Z | 2022-02-17T08:48:05.000Z | # coding: utf-8
"""
Copyright 2016 SmartBear Software
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Ref: https://github.com/swagger-api/swagger-codegen
"""
from pprint import pformat
from six import iteritems
import re
import json
from ..utils import sanitize_for_serialization
class OAuthClientRequest(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
def __init__(self):
"""
OAuthClientRequest - a model defined in Swagger
:param dict swaggerTypes: The key is attribute name
and the value is attribute type.
:param dict attributeMap: The key is attribute name
and the value is json key in definition.
"""
self.swagger_types = {
'name': 'str',
'access_token_validity_seconds': 'int',
'description': 'str',
'registered_redirect_uri': 'list[str]',
'role_ids': 'list[str]',
'authorized_grant_type': 'str',
'scope': 'list[str]',
'role_divisions': 'list[RoleDivision]',
'state': 'str',
'date_to_delete': 'datetime'
}
self.attribute_map = {
'name': 'name',
'access_token_validity_seconds': 'accessTokenValiditySeconds',
'description': 'description',
'registered_redirect_uri': 'registeredRedirectUri',
'role_ids': 'roleIds',
'authorized_grant_type': 'authorizedGrantType',
'scope': 'scope',
'role_divisions': 'roleDivisions',
'state': 'state',
'date_to_delete': 'dateToDelete'
}
self._name = None
self._access_token_validity_seconds = None
self._description = None
self._registered_redirect_uri = None
self._role_ids = None
self._authorized_grant_type = None
self._scope = None
self._role_divisions = None
self._state = None
self._date_to_delete = None
@property
def name(self):
"""
Gets the name of this OAuthClientRequest.
The name of the OAuth client.
:return: The name of this OAuthClientRequest.
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""
Sets the name of this OAuthClientRequest.
The name of the OAuth client.
:param name: The name of this OAuthClientRequest.
:type: str
"""
self._name = name
@property
def access_token_validity_seconds(self):
"""
Gets the access_token_validity_seconds of this OAuthClientRequest.
The number of seconds, between 5mins and 48hrs, until tokens created with this client expire. If this field is omitted, a default of 24 hours will be applied.
:return: The access_token_validity_seconds of this OAuthClientRequest.
:rtype: int
"""
return self._access_token_validity_seconds
@access_token_validity_seconds.setter
def access_token_validity_seconds(self, access_token_validity_seconds):
"""
Sets the access_token_validity_seconds of this OAuthClientRequest.
The number of seconds, between 5mins and 48hrs, until tokens created with this client expire. If this field is omitted, a default of 24 hours will be applied.
:param access_token_validity_seconds: The access_token_validity_seconds of this OAuthClientRequest.
:type: int
"""
self._access_token_validity_seconds = access_token_validity_seconds
@property
def description(self):
"""
Gets the description of this OAuthClientRequest.
:return: The description of this OAuthClientRequest.
:rtype: str
"""
return self._description
@description.setter
def description(self, description):
"""
Sets the description of this OAuthClientRequest.
:param description: The description of this OAuthClientRequest.
:type: str
"""
self._description = description
@property
def registered_redirect_uri(self):
"""
Gets the registered_redirect_uri of this OAuthClientRequest.
List of allowed callbacks for this client. For example: https://myap.example.com/auth/callback
:return: The registered_redirect_uri of this OAuthClientRequest.
:rtype: list[str]
"""
return self._registered_redirect_uri
@registered_redirect_uri.setter
def registered_redirect_uri(self, registered_redirect_uri):
"""
Sets the registered_redirect_uri of this OAuthClientRequest.
List of allowed callbacks for this client. For example: https://myap.example.com/auth/callback
:param registered_redirect_uri: The registered_redirect_uri of this OAuthClientRequest.
:type: list[str]
"""
self._registered_redirect_uri = registered_redirect_uri
@property
def role_ids(self):
"""
Gets the role_ids of this OAuthClientRequest.
Deprecated. Use roleDivisions instead.
:return: The role_ids of this OAuthClientRequest.
:rtype: list[str]
"""
return self._role_ids
@role_ids.setter
def role_ids(self, role_ids):
"""
Sets the role_ids of this OAuthClientRequest.
Deprecated. Use roleDivisions instead.
:param role_ids: The role_ids of this OAuthClientRequest.
:type: list[str]
"""
self._role_ids = role_ids
@property
def authorized_grant_type(self):
"""
Gets the authorized_grant_type of this OAuthClientRequest.
The OAuth Grant/Client type supported by this client. Code Authorization Grant/Client type - Preferred client type where the Client ID and Secret are required to create tokens. Used where the secret can be secured. PKCE-Enabled Code Authorization grant type - Code grant type which requires PKCE challenge and verifier to create tokens. Used in public clients for increased security. Implicit grant type - Client ID only is required to create tokens. Used in browser and mobile apps where the secret can not be secured. SAML2-Bearer extension grant type - SAML2 assertion provider for user authentication at the token endpoint. Client Credential grant type - Used to created access tokens that are tied only to the client.
:return: The authorized_grant_type of this OAuthClientRequest.
:rtype: str
"""
return self._authorized_grant_type
@authorized_grant_type.setter
def authorized_grant_type(self, authorized_grant_type):
"""
Sets the authorized_grant_type of this OAuthClientRequest.
The OAuth Grant/Client type supported by this client. Code Authorization Grant/Client type - Preferred client type where the Client ID and Secret are required to create tokens. Used where the secret can be secured. PKCE-Enabled Code Authorization grant type - Code grant type which requires PKCE challenge and verifier to create tokens. Used in public clients for increased security. Implicit grant type - Client ID only is required to create tokens. Used in browser and mobile apps where the secret can not be secured. SAML2-Bearer extension grant type - SAML2 assertion provider for user authentication at the token endpoint. Client Credential grant type - Used to created access tokens that are tied only to the client.
:param authorized_grant_type: The authorized_grant_type of this OAuthClientRequest.
:type: str
"""
allowed_values = ["CODE", "TOKEN", "SAML2BEARER", "PASSWORD", "CLIENT_CREDENTIALS"]
if authorized_grant_type.lower() not in map(str.lower, allowed_values):
# print("Invalid value for authorized_grant_type -> " + authorized_grant_type)
self._authorized_grant_type = "outdated_sdk_version"
else:
self._authorized_grant_type = authorized_grant_type
@property
def scope(self):
"""
Gets the scope of this OAuthClientRequest.
The scope requested by this client. Scopes only apply to clients not using the client_credential grant
:return: The scope of this OAuthClientRequest.
:rtype: list[str]
"""
return self._scope
@scope.setter
def scope(self, scope):
"""
Sets the scope of this OAuthClientRequest.
The scope requested by this client. Scopes only apply to clients not using the client_credential grant
:param scope: The scope of this OAuthClientRequest.
:type: list[str]
"""
self._scope = scope
@property
def role_divisions(self):
"""
Gets the role_divisions of this OAuthClientRequest.
Set of roles and their corresponding divisions associated with this client. Roles and divisions only apply to clients using the client_credential grant
:return: The role_divisions of this OAuthClientRequest.
:rtype: list[RoleDivision]
"""
return self._role_divisions
@role_divisions.setter
def role_divisions(self, role_divisions):
"""
Sets the role_divisions of this OAuthClientRequest.
Set of roles and their corresponding divisions associated with this client. Roles and divisions only apply to clients using the client_credential grant
:param role_divisions: The role_divisions of this OAuthClientRequest.
:type: list[RoleDivision]
"""
self._role_divisions = role_divisions
@property
def state(self):
"""
Gets the state of this OAuthClientRequest.
The state of the OAuth client. Active: The OAuth client can be used to create access tokens. This is the default state. Disabled: Access tokens created by the client are invalid and new ones cannot be created. Inactive: Access tokens cannot be created with this OAuth client and it will be deleted.
:return: The state of this OAuthClientRequest.
:rtype: str
"""
return self._state
@state.setter
def state(self, state):
"""
Sets the state of this OAuthClientRequest.
The state of the OAuth client. Active: The OAuth client can be used to create access tokens. This is the default state. Disabled: Access tokens created by the client are invalid and new ones cannot be created. Inactive: Access tokens cannot be created with this OAuth client and it will be deleted.
:param state: The state of this OAuthClientRequest.
:type: str
"""
allowed_values = ["active", "disabled", "inactive"]
if state.lower() not in map(str.lower, allowed_values):
# print("Invalid value for state -> " + state)
self._state = "outdated_sdk_version"
else:
self._state = state
@property
def date_to_delete(self):
"""
Gets the date_to_delete of this OAuthClientRequest.
The time at which this client will be deleted. Date time is represented as an ISO-8601 string. For example: yyyy-MM-ddTHH:mm:ss[.mmm]Z
:return: The date_to_delete of this OAuthClientRequest.
:rtype: datetime
"""
return self._date_to_delete
@date_to_delete.setter
def date_to_delete(self, date_to_delete):
"""
Sets the date_to_delete of this OAuthClientRequest.
The time at which this client will be deleted. Date time is represented as an ISO-8601 string. For example: yyyy-MM-ddTHH:mm:ss[.mmm]Z
:param date_to_delete: The date_to_delete of this OAuthClientRequest.
:type: datetime
"""
self._date_to_delete = date_to_delete
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_json(self):
"""
Returns the model as raw JSON
"""
return json.dumps(sanitize_for_serialization(self.to_dict()))
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
| 37.461126 | 731 | 0.648966 |
4a220b6bce4fa4e938a78f5bdbdf78fcaa742ff4 | 9,706 | py | Python | accelerator/methods.py | sebras/berkeman-acceldev | 72efd8f3f8d4a4f4bf71612f1d9703fd89fd48e4 | [
"Apache-2.0"
] | null | null | null | accelerator/methods.py | sebras/berkeman-acceldev | 72efd8f3f8d4a4f4bf71612f1d9703fd89fd48e4 | [
"Apache-2.0"
] | null | null | null | accelerator/methods.py | sebras/berkeman-acceldev | 72efd8f3f8d4a4f4bf71612f1d9703fd89fd48e4 | [
"Apache-2.0"
] | 1 | 2020-02-15T17:09:16.000Z | 2020-02-15T17:09:16.000Z | ############################################################################
# #
# Copyright (c) 2017 eBay Inc. #
# Modifications copyright (c) 2018-2019 Carl Drougge #
# #
# Licensed under the Apache License, Version 2.0 (the "License"); #
# you may not use this file except in compliance with the License. #
# You may obtain a copy of the License at #
# #
# http://www.apache.org/licenses/LICENSE-2.0 #
# #
# Unless required by applicable law or agreed to in writing, software #
# distributed under the License is distributed on an "AS IS" BASIS, #
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #
# See the License for the specific language governing permissions and #
# limitations under the License. #
# #
############################################################################
from __future__ import print_function
from __future__ import division
import os
import datetime
from time import time
from collections import defaultdict
from importlib import import_module
from accelerator.compat import iteritems, itervalues, first_value
from accelerator.compat import NoneType, unicode, long
from accelerator.extras import DotDict, OptionString, OptionEnum, OptionDefault, RequiredOption
from accelerator.runner import new_runners
from accelerator.setupfile import _sorted_set
class MethodLoadException(Exception):
def __init__(self, lst):
Exception.__init__(self, 'Failed to load ' + ', '.join(lst))
self.module_list = lst
class Methods(object):
def __init__(self, package_list, configfilename):
self.package_list = package_list
self.db = {}
for package in self.package_list:
try:
package_mod = import_module(package)
if not hasattr(package_mod, "__file__"):
raise ImportError("no __file__")
except ImportError:
raise Exception("Failed to import %s, maybe missing __init__.py?" % (package,))
if not package_mod.__file__:
raise Exception("%s has no __file__, maybe missing __init__.py?" % (package,))
confname = os.path.join(os.path.dirname(package_mod.__file__), configfilename)
tmp = read_method_conf(confname)
for x in tmp:
if x in self.db:
print("METHOD: ERROR, method \"%s\" defined both in \"%s\" and \"%s\"!" % (
x, package, self.db[x]['package']))
exit(1)
for x in tmp.values():
x['package'] = os.path.basename(package)
self.db.update(tmp)
# build dependency tree for all methods
self.deptree = {}
for method in self.db:
self.deptree[method] = self._build_dep_tree(method, tree={})
self.link = {k: v.get('link') for k, v in iteritems(self.db)}
def _build_dep_tree(self, method, tree={}):
if method not in self.db:
raise Exception("Method %r doesn't exist" % method)
dependencies = self.db[method].get('dep', [])
tree.setdefault(method, {'dep' : dependencies, 'level' : -1, 'method' : method})
if not dependencies:
tree[method]['level'] = 0
else:
for dep in dependencies:
self._build_dep_tree(dep, tree=tree)
tree[method]['level'] = max(
tree[method]['level'],
tree[dep]['level']+1,
)
return tree
def new_deptree(self, top_method):
return self._build_dep_tree(top_method, tree={})
# Collect information on methods
class SubMethods(Methods):
def __init__(self, package_list, configfilename, daemon_config):
super(SubMethods, self).__init__(package_list, configfilename)
t0 = time()
per_runner = defaultdict(list)
for key, val in iteritems(self.db):
package = val['package']
per_runner[val['version']].append((package, key))
self.runners = new_runners(daemon_config, set(per_runner))
warnings = []
failed = []
self.hash = {}
self.params = {}
self.typing = {}
for version, data in iteritems(per_runner):
runner = self.runners.get(version)
if not runner:
msg = '%%s.%%s (unconfigured interpreter %s)' % (version)
failed.extend(msg % t for t in sorted(data))
continue
w, f, h, p = runner.load_methods(package_list, data)
warnings.extend(w)
failed.extend(f)
self.hash.update(h)
self.params.update(p)
for key, params in iteritems(self.params):
self.typing[key] = options2typing(key, params.options)
params.defaults = params2defaults(params)
params.required = options2required(params.options)
def prt(a, prefix):
maxlen = (max(len(e) for e in a) + len(prefix))
line = '=' * maxlen
print()
print(line)
for e in sorted(a):
msg = prefix + e
print(msg + ' ' * (maxlen - len(msg)))
print(line)
print()
if warnings:
prt(warnings, 'WARNING: ')
if failed:
print('\033[47;31;1m')
prt(failed, 'FAILED to import ')
print('\033[m')
raise MethodLoadException(failed)
print("Updated %d methods on %d runners in %.1f seconds" % (
len(self.hash), len(per_runner), time() - t0,
))
def params2optset(self, params):
optset = set()
for optmethod, method_params in iteritems(params):
for group, d in iteritems(method_params):
filled_in = dict(self.params[optmethod].defaults[group])
filled_in.update(d)
for optname, optval in iteritems(filled_in):
optset.add('%s %s-%s %s' % (optmethod, group, optname, _reprify(optval),))
return optset
def _reprify(o):
if isinstance(o, OptionDefault):
o = o.default
if isinstance(o, (bytes, str, int, float, long, bool, NoneType)):
return repr(o)
if isinstance(o, unicode):
# not reachable in PY3, the above "str" matches
return repr(o.encode('utf-8'))
if isinstance(o, set):
return '[%s]' % (', '.join(map(_reprify, _sorted_set(o))),)
if isinstance(o, (list, tuple)):
return '[%s]' % (', '.join(map(_reprify, o)),)
if isinstance(o, dict):
return '{%s}' % (', '.join('%s: %s' % (_reprify(k), _reprify(v),) for k, v in sorted(iteritems(o))),)
if isinstance(o, (datetime.datetime, datetime.date, datetime.time, datetime.timedelta,)):
return str(o)
raise Exception('Unhandled %s in dependency resolution' % (type(o),))
def params2defaults(params):
d = DotDict()
for key in ('datasets', 'jobs',):
r = {}
for v in params[key]:
if isinstance(v, list):
r[v[0]] = []
else:
r[v] = None
d[key] = r
def fixup(item):
if isinstance(item, dict):
d = {k: fixup(v) for k, v in iteritems(item)}
if len(d) == 1 and first_value(d) is None and first_value(item) is not None:
return {}
return d
if isinstance(item, (list, tuple, set,)):
l = [fixup(v) for v in item]
if l == [None] and list(item) != [None]:
l = []
return type(item)(l)
if isinstance(item, (type, OptionEnum)):
return None
assert isinstance(item, (bytes, unicode, int, float, long, bool, OptionEnum, NoneType, datetime.datetime, datetime.date, datetime.time, datetime.timedelta)), type(item)
return item
def fixup0(item):
if isinstance(item, RequiredOption):
item = item.value
if isinstance(item, OptionDefault):
item = item.default
return fixup(item)
d.options = {k: fixup0(v) for k, v in iteritems(params.options)}
return d
def options2required(options):
res = set()
def chk(key, value):
if value is OptionString or isinstance(value, RequiredOption):
res.add(key)
elif isinstance(value, OptionEnum):
if None not in value._valid:
res.add(key)
elif isinstance(value, dict):
for v in itervalues(value):
chk(key, v)
elif isinstance(value, (list, tuple, set,)):
for v in value:
chk(key, v)
for key, value in iteritems(options):
chk(key, value)
return res
def options2typing(method, options):
from accelerator.job import JobWithFile
res = {}
def value2spec(value):
if isinstance(value, list):
if not value:
return
fmt = '[%s]'
value = value[0]
else:
fmt = '%s'
typ = None
if value is JobWithFile or isinstance(value, JobWithFile):
typ = 'JobWithFile'
elif isinstance(value, set):
typ = 'set'
elif value in (datetime.datetime, datetime.date, datetime.time, datetime.timedelta,):
typ = value.__name__
elif isinstance(value, (datetime.datetime, datetime.date, datetime.time, datetime.timedelta,)):
typ = type(value).__name__
if typ:
return fmt % (typ,)
def collect(key, value, path=''):
path = "%s/%s" % (path, key,)
if isinstance(value, dict):
for v in itervalues(value):
collect('*', v, path)
return
spec = value2spec(value)
assert res.get(path, spec) == spec, 'Method %s has incompatible types in options%s' % (method, path,)
res[path] = spec
for k, v in iteritems(options):
collect(k, v)
# reverse by key len, so something inside a dict always comes before
# the dict itself. (We don't currently have any dict-like types, but we
# might later.)
return sorted(([k[1:], v] for k, v in iteritems(res) if v), key=lambda i: -len(i[0]))
def read_method_conf(filename):
""" read and parse the methods.conf file """
db = {}
with open(filename) as fh:
for lineno, line in enumerate(fh, 1):
data = line.split('#')[0].split()
if not data:
continue
method = data.pop(0)
try:
version = data.pop(0)
except IndexError:
version = 'DEFAULT'
if data:
raise Exception('Trailing garbage on %s:%d: %s' % (filename, lineno, line,))
db[method] = DotDict(version=version)
return db
| 34.41844 | 170 | 0.625592 |
4a220ce6783a612ad5fdb71e3532778105df0896 | 1,441 | py | Python | src/users/tests/test_models.py | zkkamir/planning-project | f1b82194c41145272569028b36088c2e9834c72f | [
"MIT"
] | null | null | null | src/users/tests/test_models.py | zkkamir/planning-project | f1b82194c41145272569028b36088c2e9834c72f | [
"MIT"
] | null | null | null | src/users/tests/test_models.py | zkkamir/planning-project | f1b82194c41145272569028b36088c2e9834c72f | [
"MIT"
] | null | null | null | import pytest
@pytest.mark.django_db
def test_create_user(django_user_model):
"""
Test user creation.
"""
user = django_user_model.objects.create_user(
email="[email protected]", password="testtest"
)
assert django_user_model.objects.count() == 1
assert user.email == "[email protected]"
assert user.is_active is True
assert user.is_staff is False
assert user.is_superuser is False
with pytest.raises(AttributeError):
user.username
with pytest.raises(TypeError):
django_user_model.objects.create_user()
with pytest.raises(TypeError):
django_user_model.objects.create_user(email="")
with pytest.raises(ValueError):
django_user_model.objects.create_user(email="", password="foo")
@pytest.mark.django_db
def test_create_superuser(django_user_model):
"""
Test superuser creation.
"""
user = django_user_model.objects.create_superuser(
email="[email protected]", password="testtest"
)
assert django_user_model.objects.count() == 1
assert user.email == "[email protected]"
assert user.is_active is True
assert user.is_staff is True
assert user.is_superuser is True
with pytest.raises(AttributeError):
user.username
with pytest.raises(ValueError):
django_user_model.objects.create_superuser(
email="[email protected]", password="foo", is_superuser=False
)
| 31.326087 | 75 | 0.69882 |
4a2210af7fb4db99416975899d5c4de7da1d6fed | 3,048 | py | Python | docs/conf.py | tonitick/horovod | 73d860f2396321761e0f5ef6fe934130afd69094 | [
"Apache-2.0"
] | null | null | null | docs/conf.py | tonitick/horovod | 73d860f2396321761e0f5ef6fe934130afd69094 | [
"Apache-2.0"
] | 1 | 2019-07-29T10:08:33.000Z | 2019-07-29T10:08:33.000Z | docs/conf.py | tonitick/horovod | 73d860f2396321761e0f5ef6fe934130afd69094 | [
"Apache-2.0"
] | 1 | 2019-04-08T17:12:48.000Z | 2019-04-08T17:12:48.000Z | # Configuration file for the Sphinx documentation builder.
#
# This file only contains a selection of the most common options. For a full
# list see the documentation:
# http://www.sphinx-doc.org/en/master/config
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
import sys
sys.path.insert(0, os.path.abspath('.'))
sys.path.insert(0, os.path.abspath('..'))
# -- Project information -----------------------------------------------------
project = 'Horovod'
copyright = '2019, The Horovod Authors'
author = 'The Horovod Authors'
from horovod import __version__
version = __version__
# -- Mocking configuration ---------------------------------------------------
import mocks
mocks.instrument()
# -- General configuration ---------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.viewcode',
'sphinxcontrib.napoleon',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The master toctree document.
master_doc = 'index'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# -- Autodoc configuration ---------------------------------------------------
autodoc_default_options = {
'members': None,
'member-order': 'bysource',
'special-members': '__init__',
'imported-members': None,
'undoc-members': None,
}
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'alabaster'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# For alabaster: https://alabaster.readthedocs.io/en/latest/customization.html
#
html_theme_options = {
'logo': 'logo.png',
'description': 'Distributed training framework for TensorFlow, Keras, PyTorch, and Apache MXNet.',
'github_user': 'horovod',
'github_repo': 'horovod',
'github_button': True,
'github_type': 'star',
'github_count': 'true',
'fixed_sidebar': True,
'sidebar_collapse': True,
}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
| 31.102041 | 102 | 0.650262 |
4a22110880d10b3922ea4afb0ed19ee3af1c55c3 | 1,451 | py | Python | Python/Listas/Q22.py | Flavio-Varejao/Exercicios | 69d62d09e5ef5da4446b6bf7dccda9eae7361d96 | [
"MIT"
] | null | null | null | Python/Listas/Q22.py | Flavio-Varejao/Exercicios | 69d62d09e5ef5da4446b6bf7dccda9eae7361d96 | [
"MIT"
] | null | null | null | Python/Listas/Q22.py | Flavio-Varejao/Exercicios | 69d62d09e5ef5da4446b6bf7dccda9eae7361d96 | [
"MIT"
] | null | null | null | mouses={'1':0,'2':0,'3':0,'4':0}
resposta=1
while resposta != "0":
opcao=input("\nEscolha uma opção:\n"+
"<1> - Necessita de esfera\n"+
"<2> - Necessita de limpeza\n"+
"<3> - Necessita trocar do cabo ou conector\n"+
"<4> - Quebrado ou inutilizado: ")
if opcao == "1":
print("\nNecessita de esfera")
mouses['1']=int(input("Digite a quantidade: "))
elif opcao == "2":
print("\nNecessita de limpeza")
mouses['2']=int(input("Digite a quantidade: "))
elif opcao == "3":
print("\nNecessita trocar do cabo ou conector")
mouses['3']=int(input("Digite a quantidade: "))
elif opcao == "4":
print("\nQuebrado ou inutilizado")
mouses['4']=int(input("Digite a quantidade: "))
else:
break
resposta=input("\nDigite <0> para sair: ")
print("\nQuantidade de mouses:",sum(mouses.values()))
print("\nSituação Quantidade Percentual")
print("1 - necessita da esfera ",mouses['1']," ",mouses['1']/sum(mouses.values()))
print("2 - necessita de limpeza ",mouses['2']," ",mouses['2']/sum(mouses.values()))
print("3 - necessita troca do cabo ou conector ",mouses['3']," ",mouses['3']/sum(mouses.values()))
print("4 - quebrado ou inutilizado ",mouses['4']," ",mouses['4']/sum(mouses.values())) | 46.806452 | 111 | 0.532047 |
4a2212c1d6a8d2c18a7f2e3573d4db93bc57aefa | 7,697 | py | Python | notifications/views.py | facundojmaero/django-notifications | 74edab0ab45d8ca3eee2d2b20c5d3f4a127ef652 | [
"BSD-3-Clause"
] | null | null | null | notifications/views.py | facundojmaero/django-notifications | 74edab0ab45d8ca3eee2d2b20c5d3f4a127ef652 | [
"BSD-3-Clause"
] | null | null | null | notifications/views.py | facundojmaero/django-notifications | 74edab0ab45d8ca3eee2d2b20c5d3f4a127ef652 | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
''' Django Notifications example views '''
from distutils.version import StrictVersion # pylint: disable=no-name-in-module,import-error
from django import get_version
from django.contrib.auth.decorators import login_required
from django.forms import model_to_dict
from django.shortcuts import get_object_or_404, redirect
from django.utils.decorators import method_decorator
from django.views.generic import ListView
from notifications import settings
from notifications.utils import id2slug, slug2id
from notifications.settings import get_config
from django.views.decorators.cache import never_cache
from swapper import load_model
Notification = load_model('notifications', 'Notification')
if StrictVersion(get_version()) >= StrictVersion('1.7.0'):
from django.http import JsonResponse # noqa
else:
# Django 1.6 doesn't have a proper JsonResponse
import json
from django.http import HttpResponse # noqa
def date_handler(obj):
return obj.isoformat() if hasattr(obj, 'isoformat') else obj
def JsonResponse(data): # noqa
return HttpResponse(
json.dumps(data, default=date_handler),
content_type="application/json")
class NotificationViewList(ListView):
template_name = 'notifications/list.html'
context_object_name = 'notifications'
paginate_by = settings.get_config()['PAGINATE_BY']
@method_decorator(login_required)
def dispatch(self, request, *args, **kwargs):
return super(NotificationViewList, self).dispatch(
request, *args, **kwargs)
class AllNotificationsList(NotificationViewList):
"""
Index page for authenticated user
"""
def get_queryset(self):
if settings.get_config()['SOFT_DELETE']:
qset = self.request.user.notifications.active()
else:
qset = self.request.user.notifications.all()
return qset
class UnreadNotificationsList(NotificationViewList):
def get_queryset(self):
return self.request.user.notifications.unread()
@login_required
def mark_all_as_read(request):
request.user.notifications.mark_all_as_read()
_next = request.GET.get('next')
if _next:
return redirect(_next)
return redirect('notifications:unread')
@login_required
def mark_as_read(request, slug=None):
notification_id = slug2id(slug)
notification = get_object_or_404(
Notification, recipient=request.user, id=notification_id)
notification.mark_as_read()
_next = request.GET.get('next')
if _next:
return redirect(_next)
return redirect('notifications:unread')
@login_required
def mark_as_unread(request, slug=None):
notification_id = slug2id(slug)
notification = get_object_or_404(
Notification, recipient=request.user, id=notification_id)
notification.mark_as_unread()
_next = request.GET.get('next')
if _next:
return redirect(_next)
return redirect('notifications:unread')
@login_required
def delete(request, slug=None):
notification_id = slug2id(slug)
notification = get_object_or_404(
Notification, recipient=request.user, id=notification_id)
if settings.get_config()['SOFT_DELETE']:
notification.deleted = True
notification.save()
else:
notification.delete()
_next = request.GET.get('next')
if _next:
return redirect(_next)
return redirect('notifications:all')
@never_cache
def live_unread_notification_count(request):
try:
user_is_authenticated = request.user.is_authenticated()
except TypeError: # Django >= 1.11
user_is_authenticated = request.user.is_authenticated
if not user_is_authenticated:
data = {
'unread_count': 0
}
else:
data = {
'unread_count': request.user.notifications.unread().count(),
}
return JsonResponse(data)
@never_cache
def live_unread_notification_list(request):
''' Return a json with a unread notification list '''
try:
user_is_authenticated = request.user.is_authenticated()
except TypeError: # Django >= 1.11
user_is_authenticated = request.user.is_authenticated
if not user_is_authenticated:
data = {
'unread_count': 0,
'unread_list': []
}
return JsonResponse(data)
default_num_to_fetch = get_config()['NUM_TO_FETCH']
try:
# If they don't specify, make it 5.
num_to_fetch = request.GET.get('max', default_num_to_fetch)
num_to_fetch = int(num_to_fetch)
if not (1 <= num_to_fetch <= 100):
num_to_fetch = default_num_to_fetch
except ValueError: # If casting to an int fails.
num_to_fetch = default_num_to_fetch
unread_list = []
for notification in request.user.notifications.unread()[0:num_to_fetch]:
struct = model_to_dict(notification)
struct['slug'] = id2slug(notification.id)
if notification.actor:
struct['actor'] = str(notification.actor)
if notification.target:
struct['target'] = str(notification.target)
if notification.action_object:
struct['action_object'] = str(notification.action_object)
if notification.data:
struct['data'] = notification.data
unread_list.append(struct)
if request.GET.get('mark_as_read'):
notification.mark_as_read()
data = {
'unread_count': request.user.notifications.unread().count(),
'unread_list': unread_list
}
return JsonResponse(data)
@never_cache
def live_all_notification_list(request):
''' Return a json with a unread notification list '''
try:
user_is_authenticated = request.user.is_authenticated()
except TypeError: # Django >= 1.11
user_is_authenticated = request.user.is_authenticated
if not user_is_authenticated:
data = {
'all_count': 0,
'all_list': []
}
return JsonResponse(data)
default_num_to_fetch = get_config()['NUM_TO_FETCH']
try:
# If they don't specify, make it 5.
num_to_fetch = request.GET.get('max', default_num_to_fetch)
num_to_fetch = int(num_to_fetch)
if not (1 <= num_to_fetch <= 100):
num_to_fetch = default_num_to_fetch
except ValueError: # If casting to an int fails.
num_to_fetch = default_num_to_fetch
all_list = []
for notification in request.user.notifications.all()[0:num_to_fetch]:
struct = model_to_dict(notification)
struct['slug'] = id2slug(notification.id)
if notification.actor:
struct['actor'] = str(notification.actor)
if notification.target:
struct['target'] = str(notification.target)
if notification.action_object:
struct['action_object'] = str(notification.action_object)
if notification.data:
struct['data'] = notification.data
all_list.append(struct)
if request.GET.get('mark_as_read'):
notification.mark_as_read()
data = {
'all_count': request.user.notifications.count(),
'all_list': all_list
}
return JsonResponse(data)
def live_all_notification_count(request):
try:
user_is_authenticated = request.user.is_authenticated()
except TypeError: # Django >= 1.11
user_is_authenticated = request.user.is_authenticated
if not user_is_authenticated:
data = {
'all_count': 0
}
else:
data = {
'all_count': request.user.notifications.count(),
}
return JsonResponse(data)
| 29.718147 | 93 | 0.67169 |
4a2212df19fd4354258808f9f5d685729bd853f0 | 1,456 | py | Python | scripts/species/data_loader.py | JDonini/Cats-and-Dogs-Classification | 1322f80536ff077ab87e5176a13ea5db242254b1 | [
"MIT"
] | 10 | 2018-11-30T08:31:09.000Z | 2020-03-30T09:34:12.000Z | scripts/species/data_loader.py | JDonini/Cats_Dogs_Classification | 1322f80536ff077ab87e5176a13ea5db242254b1 | [
"MIT"
] | 1 | 2019-10-05T14:07:09.000Z | 2019-10-05T14:07:09.000Z | scripts/species/data_loader.py | JDonini/Cats_Dogs_Classification | 1322f80536ff077ab87e5176a13ea5db242254b1 | [
"MIT"
] | 4 | 2018-04-30T05:12:12.000Z | 2018-06-21T13:56:43.000Z | import warnings
import torch
import torchvision.transforms as transforms
from torchvision import datasets
import os
import sys
sys.path.append('utils')
from config import IMG_SIZE, BATCH_SIZE, NUM_WORKERS, DATA_PATH
warnings.filterwarnings("ignore")
print("Processing Species...")
transform = {
'train': transforms.Compose([transforms.Scale(256),
transforms.CenterCrop(224),
transforms.ToTensor(),
transforms.Normalize(
mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
]),
'test': transforms.Compose([transforms.Scale(256),
transforms.CenterCrop(224),
transforms.ToTensor(),
transforms.Normalize(
mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
])
}
dsets = {x: datasets.ImageFolder(os.path.join(DATA_PATH, x), transform[x])
for x in ['train', 'test']}
dset_loaders = {x: torch.utils.data.DataLoader(dsets[x], batch_size=BATCH_SIZE, shuffle=True, num_workers=NUM_WORKERS)
for x in ['train', 'test']}
dset_sizes = {x: len(dsets[x]) for x in ['train', 'test']}
dset_classes = dsets['train'].classes
| 36.4 | 118 | 0.513049 |
4a22135ea3474f8e428d4ef7d4cc93d49f80e52c | 871 | py | Python | test/unit/rules/resources/lmbd/test_deprecated_runtime_eol.py | tomislacker/cfn-python-lint | f209ddfef9bcc1a005adfebcfcc16220b18deddb | [
"MIT-0"
] | 1,134 | 2019-03-02T14:58:34.000Z | 2021-05-15T00:57:16.000Z | test/unit/rules/resources/lmbd/test_deprecated_runtime_eol.py | tomislacker/cfn-python-lint | f209ddfef9bcc1a005adfebcfcc16220b18deddb | [
"MIT-0"
] | 1,122 | 2019-03-03T04:27:15.000Z | 2021-05-14T20:51:16.000Z | test/unit/rules/resources/lmbd/test_deprecated_runtime_eol.py | tomislacker/cfn-python-lint | f209ddfef9bcc1a005adfebcfcc16220b18deddb | [
"MIT-0"
] | 297 | 2019-03-11T09:56:57.000Z | 2021-05-14T16:41:19.000Z | """
Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
SPDX-License-Identifier: MIT-0
"""
from test.unit.rules import BaseRuleTestCase
from datetime import datetime
from cfnlint.rules.resources.lmbd.DeprecatedRuntimeEol import DeprecatedRuntimeEol # pylint: disable=E0401
class TestDeprecatedRuntimeEol(BaseRuleTestCase):
"""Test Lambda Deprecated Runtime usage"""
def setUp(self):
"""Setup"""
super(TestDeprecatedRuntimeEol, self).setUp()
self.collection.register(DeprecatedRuntimeEol())
self.collection.rules[0].current_date = datetime(2019, 6, 29)
def test_file_positive(self):
"""Test Positive"""
self.helper_file_positive()
def test_file_negative(self):
"""Test failure"""
self.helper_file_negative('test/fixtures/templates/bad/resources/lambda/runtimes.yaml', 2)
| 33.5 | 107 | 0.723307 |
4a22144e5631ed1acbc39175093ae35139fa1fe2 | 1,241 | py | Python | sayhitotheworld/urls.py | RUAN-ZX/sayhitothwworld | 0258ef715484d300e43b2a193b85ab7e5a01fba4 | [
"MIT"
] | 1 | 2020-07-22T10:20:32.000Z | 2020-07-22T10:20:32.000Z | sayhitotheworld/urls.py | RUAN-ZX/sayhitothwworld | 0258ef715484d300e43b2a193b85ab7e5a01fba4 | [
"MIT"
] | null | null | null | sayhitotheworld/urls.py | RUAN-ZX/sayhitothwworld | 0258ef715484d300e43b2a193b85ab7e5a01fba4 | [
"MIT"
] | null | null | null | """sayhitotheworld URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.0/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import include
from django.urls import path
from django.conf import settings
from django.conf.urls.static import static
# urls.py配置如下,for media访问
from django.conf.urls import url
from django.views.static import serve
from .settings import MEDIA_ROOT
urlpatterns = [
path('Ryaninnerpeace/admin/', admin.site.urls),
path('', include('app_sayhi.urls')),
url(r'^media/(?P<path>.*)$', serve, {"document_root": MEDIA_ROOT}),
]+ static(settings.STATIC_URL, document_root=settings.STATIC_ROOT) #4
# /avatar/{{ student.studentAvatar }} nginx设计:)
| 36.5 | 77 | 0.730862 |
4a22146bd64b5129993809b06da71e5ca9d301db | 1,221 | py | Python | packages/pyright-internal/src/tests/samples/typeNarrowing11.py | sransara/pyright | 4e117682c946b60f2b24fd75a07736954b21f158 | [
"MIT"
] | 1 | 2020-12-28T16:58:24.000Z | 2020-12-28T16:58:24.000Z | packages/pyright-internal/src/tests/samples/typeNarrowing11.py | sransara/pyright | 4e117682c946b60f2b24fd75a07736954b21f158 | [
"MIT"
] | null | null | null | packages/pyright-internal/src/tests/samples/typeNarrowing11.py | sransara/pyright | 4e117682c946b60f2b24fd75a07736954b21f158 | [
"MIT"
] | null | null | null | # This sample tests the type narrowing capabilities involving
# types that have enumerated literals (bool and enums).
from enum import Enum
from typing import Literal, Union
class SomeEnum(Enum):
SOME_ENUM_VALUE1 = 1
SOME_ENUM_VALUE2 = 2
SOME_ENUM_VALUE3 = 3
def func1(a: SomeEnum) -> Literal[3]:
if a == SomeEnum.SOME_ENUM_VALUE1 or a == SomeEnum.SOME_ENUM_VALUE2:
return 3
else:
return a.value
def func2(a: SomeEnum) -> Literal[3]:
if a == SomeEnum.SOME_ENUM_VALUE1:
return 3
elif a == SomeEnum.SOME_ENUM_VALUE2:
return 3
else:
return a.value
def must_be_true(a: Literal[True]):
...
def must_be_false(a: Literal[False]):
...
def func3(a: bool):
if a == True:
must_be_true(a)
else:
must_be_false(a)
def func3(a: bool):
if not a:
must_be_false(a)
else:
must_be_true(a)
class MyEnum(Enum):
ZERO = 0
ONE = 1
def func4(x: Union[MyEnum, str]):
if x is MyEnum.ZERO:
t1: Literal["Literal[MyEnum.ZERO]"] = reveal_type(x)
elif x is MyEnum.ONE:
t2: Literal["Literal[MyEnum.ONE]"] = reveal_type(x)
else:
t3: Literal["str"] = reveal_type(x)
| 19.078125 | 72 | 0.622441 |
4a221597a950d7a2b40f7cf2c71e783e2bb28f86 | 62,321 | py | Python | selfdrive/car/hyundai/values.py | barghe/Barghe_OP | 013e2f9a69352fae0c0eff62bd247247d0219452 | [
"MIT"
] | null | null | null | selfdrive/car/hyundai/values.py | barghe/Barghe_OP | 013e2f9a69352fae0c0eff62bd247247d0219452 | [
"MIT"
] | null | null | null | selfdrive/car/hyundai/values.py | barghe/Barghe_OP | 013e2f9a69352fae0c0eff62bd247247d0219452 | [
"MIT"
] | null | null | null | from cereal import car
from selfdrive.car import dbc_dict
Ecu = car.CarParams.Ecu
class CarControllerParams:
ACCEL_MAX = 2.0
ACCEL_MIN = -3.7
STEER_MAX = 384 # 409 is the max, 255 is stock
STEER_DELTA_UP = 3
STEER_DELTA_DOWN = 5
STEER_DRIVER_ALLOWANCE = 50
STEER_DRIVER_MULTIPLIER = 2
STEER_DRIVER_FACTOR = 1
class CAR:
# genesis
GENESIS = "GENESIS 2015-2016"
GENESIS_G70 = "GENESIS G70 2018"
GENESIS_G80 = "GENESIS G80 2017"
GENESIS_EQ900 = "GENESIS EQ900 2017"
GENESIS_EQ900_L = "GENESIS EQ900 LIMOUSINE"
GENESIS_G90 = "GENESIS G90 2019"
# hyundai
ELANTRA = "HYUNDAI ELANTRA LIMITED ULTIMATE 2017"
ELANTRA_2021 = "HYUNDAI ELANTRA 2021"
ELANTRA_HEV_2021 = "HYUNDAI ELANTRA HEV 2021"
ELANTRA_GT_I30 = "HYUNDAI I30 N LINE 2019 & GT 2018 DCT"
SONATA = "HYUNDAI SONATA 2020"
SONATA_HEV = "HYUNDAI SONATA HEV 2020"
SONATA21_HEV = "HYUNDAI SONATA HEV 2021"
SONATA19 = "HYUNDAI SONATA 2019"
SONATA19_HEV = "HYUNDAI SONATA 2019 HEV"
SONATA_LF_TURBO = "HYUNDAI SONATA LF TURBO"
KONA = "HYUNDAI KONA 2019"
KONA_EV = "HYUNDAI KONA EV 2019"
KONA_HEV = "HYUNDAI KONA HEV 2019"
IONIQ = "HYUNDAI IONIQ HYBRID PREMIUM 2017"
IONIQ_EV_LTD = "HYUNDAI IONIQ ELECTRIC LIMITED 2019"
IONIQ_EV_2020 = "HYUNDAI IONIQ ELECTRIC 2020"
IONIQ_PHEV = "HYUNDAI IONIQ PHEV 2020"
SANTA_FE = "HYUNDAI SANTA FE LIMITED 2019"
SANTA_FE_2022 = "HYUNDAI SANTA FE 2022"
SANTA_FE_HEV_2022 = "HYUNDAI SANTA FE HYBRID 2022"
PALISADE = "HYUNDAI PALISADE 2020"
VELOSTER = "HYUNDAI VELOSTER 2019"
GRANDEUR_IG = "HYUNDAI GRANDEUR IG 2017"
GRANDEUR_IG_HEV = "HYUNDAI GRANDEUR IG HEV 2019"
GRANDEUR_IG_FL = "HYUNDAI GRANDEUR IG FL 2020"
GRANDEUR_IG_FL_HEV = "HYUNDAI GRANDEUR IG FL HEV 2020"
TUCSON_TL_SCC = "HYUNDAI TUCSON TL SCC 2017"
# kia
FORTE = "KIA FORTE E 2018"
K5 = "KIA K5 2019 & 2016"
K5_2021 = "KIA K5 2021"
K5_HEV = "KIA K5 HYBRID 2017 & SPORTS 2019"
SPORTAGE = "KIA SPORTAGE S 2020"
SORENTO = "KIA SORENTO GT LINE 2018"
STINGER = "KIA STINGER GT2 2018"
NIRO_EV = "KIA NIRO EV 2020 PLATINUM"
NIRO_HEV = "KIA NIRO HEV 2018"
NIRO_HEV_2021 = "KIA NIRO HEV 2021"
CEED = "KIA CEED 2019"
SELTOS = "KIA SELTOS 2021"
MOHAVE = "KIA MOHAVE 2019"
K7 = "KIA K7 2016-2019"
K7_HEV = "KIA K7 HEV 2016-2019"
K9 = "KIA K9 2016-2019"
class Buttons:
NONE = 0
RES_ACCEL = 1
SET_DECEL = 2
GAP_DIST = 3
CANCEL = 4
FINGERPRINTS = {
# genesis
CAR.GENESIS: [{
67: 8, 68: 8, 304: 8, 320: 8, 339: 8, 356: 4, 544: 7, 593: 8, 608: 8, 688: 5, 809: 8, 832: 8, 854: 7, 870: 7, 871: 8, 872: 5, 897: 8, 902: 8, 903: 6, 916: 8, 1024: 2, 1040: 8, 1056: 8, 1057: 8, 1078: 4, 1107: 5, 1136: 8, 1151: 6, 1168: 7, 1170: 8, 1173: 8, 1184: 8, 1265: 4, 1280: 1, 1287: 4, 1292: 8, 1312: 8, 1322: 8, 1331: 8, 1332: 8, 1333: 8, 1334: 8, 1335: 8, 1342: 6, 1345: 8, 1363: 8, 1369: 8, 1370: 8, 1371: 8, 1378: 4, 1384: 5, 1407: 8, 1419: 8, 1427: 6, 1434: 2, 1456: 4
},{
67: 8, 68: 8, 304: 8, 320: 8, 339: 8, 356: 4, 544: 7, 593: 8, 608: 8, 688: 5, 809: 8, 832: 8, 854: 7, 870: 7, 871: 8, 872: 5, 897: 8, 902: 8, 903: 6, 916: 8, 1024: 2, 1040: 8, 1056: 8, 1057: 8, 1078: 4, 1107: 5, 1136: 8, 1151: 6, 1168: 7, 1170: 8, 1173: 8, 1184: 8, 1265: 4, 1280: 1, 1281: 3, 1287: 4, 1292: 8, 1312: 8, 1322: 8, 1331: 8, 1332: 8, 1333: 8, 1334: 8, 1335: 8, 1345: 8, 1363: 8, 1369: 8, 1370: 8, 1378: 4, 1379: 8, 1384: 5, 1407: 8, 1419: 8, 1427: 6, 1434: 2, 1456: 4
},{
67: 8, 68: 8, 304: 8, 320: 8, 339: 8, 356: 4, 544: 7, 593: 8, 608: 8, 688: 5, 809: 8, 854: 7, 870: 7, 871: 8, 872: 5, 897: 8, 902: 8, 903: 6, 912: 7, 916: 8, 1040: 8, 1056: 8, 1057: 8, 1078: 4, 1107: 5, 1136: 8, 1151: 6, 1168: 7, 1170: 8, 1173: 8, 1184: 8, 1265: 4, 1268: 8, 1280: 1, 1281: 3, 1287: 4, 1292: 8, 1312: 8, 1322: 8, 1331: 8, 1332: 8, 1333: 8, 1334: 8, 1335: 8, 1345: 8, 1363: 8, 1369: 8, 1370: 8, 1371: 8, 1378: 4, 1384: 5, 1407: 8, 1419: 8, 1427: 6, 1434: 2, 1437: 8, 1456: 4
},{
67: 8, 68: 8, 304: 8, 320: 8, 339: 8, 356: 4, 544: 7, 593: 8, 608: 8, 688: 5, 809: 8, 832: 8, 854: 7, 870: 7, 871: 8, 872: 5, 897: 8, 902: 8, 903: 6, 916: 8, 1040: 8, 1056: 8, 1057: 8, 1078: 4, 1107: 5, 1136: 8, 1151: 6, 1168: 7, 1170: 8, 1173: 8, 1184: 8, 1265: 4, 1280: 1, 1287: 4, 1292: 8, 1312: 8, 1322: 8, 1331: 8, 1332: 8, 1333: 8, 1334: 8, 1335: 8, 1345: 8, 1363: 8, 1369: 8, 1370: 8, 1378: 4, 1379: 8, 1384: 5, 1407: 8, 1425: 2, 1427: 6, 1437: 8, 1456: 4
},{
67: 8, 68: 8, 304: 8, 320: 8, 339: 8, 356: 4, 544: 7, 593: 8, 608: 8, 688: 5, 809: 8, 832: 8, 854: 7, 870: 7, 871: 8, 872: 5, 897: 8, 902: 8, 903: 6, 916: 8, 1040: 8, 1056: 8, 1057: 8, 1078: 4, 1107: 5, 1136: 8, 1151: 6, 1168: 7, 1170: 8, 1173: 8, 1184: 8, 1265: 4, 1280: 1, 1287: 4, 1292: 8, 1312: 8, 1322: 8, 1331: 8, 1332: 8, 1333: 8, 1334: 8, 1335: 8, 1345: 8, 1363: 8, 1369: 8, 1370: 8, 1371: 8, 1378: 4, 1384: 5, 1407: 8, 1419: 8, 1425: 2, 1427: 6, 1437: 8, 1456: 4
}],
CAR.GENESIS_G70: [{
67: 8, 127: 8, 304: 8, 320: 8, 339: 8, 356: 4, 358: 6, 544: 8, 576: 8, 593: 8, 608: 8, 688: 5, 809: 8, 832:8, 854: 7, 870: 7, 871: 8, 872: 8, 897: 8, 902: 8, 909: 8, 916: 8, 1040: 8, 1042: 8, 1056: 8, 1057: 8, 1064: 8, 1078: 4, 1107: 5, 1136: 8, 1151: 6, 1156: 8, 1168: 7, 1170: 8, 1173:8, 1184: 8, 1186: 2, 1191: 2, 1265: 4, 1280: 1, 1287: 4, 1290: 8, 1292: 8, 1294: 8, 1312: 8, 1322: 8, 1342: 6, 1345: 8, 1348: 8, 1363: 8, 1369: 8, 1379: 8, 1384: 8, 1407: 8, 1419:8, 1427: 6, 1456: 4, 1470: 8, 1988: 8, 1996: 8, 2000: 8, 2004: 8, 2008: 8, 2012: 8, 2015: 8
}],
CAR.GENESIS_G80: [{
67: 8, 68: 8, 127: 8, 304: 8, 320: 8, 339: 8, 356: 4, 358: 6, 544: 8, 593: 8, 608: 8, 688: 5, 809: 8, 832: 8, 854: 7, 870: 7, 871: 8, 872: 8, 897: 8, 902: 8, 903: 8, 916: 8, 1024: 2, 1040: 8, 1042: 8, 1056: 8, 1057: 8, 1078: 4, 1107: 5, 1136: 8, 1151: 6, 1156: 8, 1168: 7, 1170: 8, 1173: 8, 1184: 8, 1191: 2, 1265: 4, 1280: 1, 1287: 4, 1290: 8, 1292: 8, 1294: 8, 1312: 8, 1322: 8, 1342: 6, 1345: 8, 1348: 8, 1363: 8, 1369: 8, 1370: 8, 1371: 8, 1378: 4, 1384: 8, 1407: 8, 1419: 8, 1425: 2, 1427: 6, 1434: 2, 1456: 4, 1470: 8
},{
67: 8, 68: 8, 127: 8, 304: 8, 320: 8, 339: 8, 356: 4, 358: 6, 359: 8, 544: 8, 546: 8, 593: 8, 608: 8, 688: 5, 809: 8, 832: 8, 854: 7, 870: 7, 871: 8, 872: 8, 897: 8, 902: 8, 903: 8, 916: 8, 1040: 8, 1042: 8, 1056: 8, 1057: 8, 1064: 8, 1078: 4, 1107: 5, 1136: 8, 1151: 6, 1156: 8, 1157: 4, 1168: 7, 1170: 8, 1173: 8, 1184: 8, 1265: 4, 1280: 1, 1281: 3, 1287: 4, 1290: 8, 1292: 8, 1294: 8, 1312: 8, 1322: 8, 1342: 6, 1345: 8, 1348: 8, 1363: 8, 1369: 8, 1370: 8, 1371: 8, 1378: 4, 1384: 8, 1407: 8, 1419: 8, 1425: 2, 1427: 6, 1434: 2, 1437: 8, 1456: 4, 1470: 8
},{
67: 8, 68: 8, 127: 8, 304: 8, 320: 8, 339: 8, 356: 4, 358: 6, 544: 8, 593: 8, 608: 8, 688: 5, 809: 8, 832: 8, 854: 7, 870: 7, 871: 8, 872: 8, 897: 8, 902: 8, 903: 8, 916: 8, 1040: 8, 1042: 8, 1056: 8, 1057: 8, 1064: 8, 1078: 4, 1107: 5, 1136: 8, 1151: 6, 1156: 8, 1157: 4, 1162: 8, 1168: 7, 1170: 8, 1173: 8, 1184: 8, 1193: 8, 1265: 4, 1280: 1, 1287: 4, 1290: 8, 1292: 8, 1294: 8, 1312: 8, 1322: 8, 1342: 6, 1345: 8, 1348: 8, 1363: 8, 1369: 8, 1371: 8, 1378: 4, 1384: 8, 1407: 8, 1419: 8, 1425: 2, 1427: 6, 1437: 8, 1456: 4, 1470: 8
}],
CAR.GENESIS_EQ900: [{
67: 8, 68: 8, 127: 8, 304: 8, 320: 8, 339: 8, 356: 4, 358: 6, 359: 8, 544: 8, 593: 8, 608: 8, 688: 5, 809: 8, 854: 7, 870: 7, 871: 8, 872: 8, 897: 8, 902: 8, 903: 8, 916: 8, 1040: 8, 1056: 8, 1057: 8, 1078: 4, 1107: 5, 1136: 8, 1151: 6, 1162: 4, 1168: 7, 1170: 8, 1173: 8, 1184: 8, 1265: 4, 1280: 1, 1281: 3, 1287: 4, 1290: 8, 1292: 8, 1294: 8, 1312: 8, 1322: 8, 1345: 8, 1348: 8, 1363: 8, 1369: 8, 1370: 8, 1371: 8, 1378: 4, 1384: 8, 1407: 8, 1419: 8, 1425: 2, 1427: 6, 1434: 2, 1456: 4, 1470: 8, 1988: 8, 2000: 8, 2003: 8, 2004: 8, 2005: 8, 2008: 8, 2011: 8, 2012: 8, 2013: 8
},{
67: 8, 68: 8, 127: 8, 304: 8, 320: 8, 339: 8, 356: 4, 358: 6, 544: 8, 545: 8, 546: 8, 548: 8, 549: 8, 550: 8, 593: 8, 608: 8, 688: 5, 809: 8, 832: 8, 854: 7, 870: 7, 871: 8, 872: 8, 897: 8, 902: 8, 903: 8, 916: 8, 1040: 8, 1056: 8, 1057: 8, 1064: 8, 1078: 4, 1107: 5, 1136: 8, 1151: 6, 1156: 8, 1157: 4, 1162: 4, 1168: 7, 1170: 8, 1173: 8, 1184: 8, 1265: 4, 1280: 1, 1287: 4, 1290: 8, 1292: 8, 1294: 8, 1312: 8, 1322: 8, 1342: 6, 1345: 8, 1348: 8, 1363: 8, 1369: 8, 1370: 8, 1371: 8, 1378: 4, 1384: 8, 1407: 8, 1419: 8, 1425: 2, 1427: 6, 1434: 2, 1456: 4, 1470: 8
}],
CAR.GENESIS_EQ900_L: [{
67: 8, 68: 8, 127: 8, 304: 8, 320: 8, 339: 8, 356: 4, 358: 6, 359: 8, 544: 8, 593: 8, 608: 8, 688: 5, 809: 8, 832: 8, 854: 7, 870: 7, 871: 8, 872: 8, 897: 8, 902: 8, 903: 8, 916: 8, 1040: 8, 1056: 8, 1057: 8, 1064: 8, 1078: 4, 1107: 5, 1136: 8, 1151: 6, 1156: 8, 1157: 4, 1162: 4, 1168: 7, 1170: 8, 1173: 8, 1184: 8, 1265: 4, 1280: 1, 1281: 3, 1287: 4, 1290: 8, 1292: 8, 1294: 8, 1312: 8, 1322: 8, 1342: 6, 1345: 8, 1348: 8, 1363: 8, 1369: 8, 1370: 8, 1371: 8, 1378: 4, 1384: 8, 1407: 8, 1419: 8, 1425: 2, 1427: 6, 1434: 2, 1456: 4, 1470: 8
}],
CAR.GENESIS_G90: [{
67: 8, 127: 8, 304: 8, 320: 8, 339: 8, 356: 4, 358: 6, 359: 8, 544: 8, 549: 8, 593: 8, 608: 8, 688: 5, 809: 8, 832: 8, 854: 7, 870: 7, 871: 8, 872: 8, 897: 8, 902: 8, 903: 8, 905: 8, 909: 8, 916: 8, 1040: 8, 1042: 8, 1056: 8, 1057: 8, 1064: 8, 1078: 4, 1107: 5, 1113: 8, 1136: 8, 1141: 8, 1142: 8, 1143: 8, 1150: 4, 1151: 8, 1155: 8, 1156: 8, 1157: 4, 1162: 8, 1164: 8, 1168: 7, 1170: 8, 1173: 8, 1180: 8, 1184: 8, 1186: 2, 1191: 2, 1210: 8, 1265: 4, 1280: 1, 1281: 3, 1287: 4, 1290: 8, 1292: 8, 1294: 8, 1312: 8, 1322: 8, 1342: 6, 1345: 8, 1348: 8, 1363: 8, 1369: 8, 1371: 8, 1378: 8, 1384: 8, 1407: 8, 1419: 8, 1427: 6, 1434: 2, 1456: 4, 1470: 8, 2003: 8, 2004: 8, 2011: 8, 2012: 8
}],
# hyundai
CAR.ELANTRA: [{
66: 8, 67: 8, 68: 8, 127: 8, 273: 8, 274: 8, 275: 8, 339: 8, 356: 4, 399: 8, 512: 6, 544: 8, 593: 8, 608: 8, 688: 5, 790: 8, 809: 8, 897: 8, 832: 8, 899: 8, 902: 8, 903: 8, 905: 8, 909: 8, 916: 8, 1040: 8, 1056: 8, 1057: 8, 1078: 4, 1170: 8, 1265: 4, 1280: 1, 1282: 4, 1287: 4, 1290: 8, 1292: 8, 1294: 8, 1312: 8, 1314: 8, 1322: 8, 1345: 8, 1349: 8, 1351: 8, 1353: 8, 1363: 8, 1366: 8, 1367: 8, 1369: 8, 1407: 8, 1415: 8, 1419: 8, 1425: 2, 1427: 6, 1440: 8, 1456: 4, 1472: 8, 1486: 8, 1487: 8, 1491: 8, 1530: 8, 1532: 5, 2001: 8, 2003: 8, 2004: 8, 2009: 8, 2012: 8, 2016: 8, 2017: 8, 2024: 8, 2025: 8
}],
CAR.ELANTRA_GT_I30: [{
66: 8, 67: 8, 68: 8, 127: 8, 128: 8, 129: 8, 273: 8, 274: 8, 275: 8, 339: 8, 354: 3, 356: 4, 399: 8, 512: 6, 544: 8, 593: 8, 608: 8, 688: 5, 790: 8, 809: 8, 884: 8, 897: 8, 899: 8, 902: 8, 903: 8, 905: 8, 909: 8, 916: 8, 1040: 8, 1056: 8, 1057: 8, 1078: 4, 1151: 6, 1168: 7, 1170: 8, 1193: 8, 1265: 4, 1280: 1, 1282: 4, 1287: 4, 1290: 8, 1292: 8, 1294: 8, 1312: 8, 1322: 8, 1345: 8, 1348: 8, 1349: 8, 1351: 8, 1353: 8, 1356: 8, 1363: 8, 1365: 8, 1366: 8, 1367: 8, 1369: 8, 1407: 8, 1414: 3, 1415: 8, 1427: 6, 1440: 8, 1456: 4, 1470: 8, 1486: 8, 1487: 8, 1491: 8, 1530: 8, 1952: 8, 1960: 8, 1988: 8, 2000: 8, 2001: 8, 2005: 8, 2008: 8, 2009: 8, 2013: 8, 2017: 8, 2025: 8
},{
66: 8, 67: 8, 68: 8, 127: 8, 128: 8, 129: 8, 273: 8, 274: 8, 275: 8, 339: 8, 354: 3, 356: 4, 399: 8, 512: 6, 544: 8, 593: 8, 608: 8, 688: 5, 790: 8, 809: 8, 832: 8, 897: 8, 899: 8, 902: 8, 903: 8, 905: 8, 909: 8, 916: 8, 1040: 8, 1056: 8, 1057: 8, 1078: 4, 1151: 6, 1168: 7, 1170: 8, 1265: 4, 1280: 1, 1282: 4, 1287: 4, 1290: 8, 1292: 8, 1294: 8, 1312: 8, 1322: 8, 1342: 6, 1345: 8, 1348: 8, 1349: 8, 1351: 8, 1353: 8, 1356: 8, 1363: 8, 1366: 8, 1367: 8, 1369: 8, 1407: 8, 1414: 3, 1415: 8, 1419: 8, 1440: 8, 1456: 4, 1470: 8, 1486: 8, 1487: 8, 1491: 8, 1530: 8
},{
66: 8, 67: 8, 68: 8, 127: 8, 128: 8, 129: 8, 273: 8, 274: 8, 275: 8, 339: 8, 354: 3, 356: 4, 399: 8, 512: 6, 544: 8, 593: 8, 608: 8, 688: 5, 790: 8, 809: 8, 832: 8, 897: 8, 899: 8, 902: 8, 903: 8, 905: 8, 909: 8, 916: 8, 1040: 8, 1056: 8, 1057: 8, 1078: 4, 1151: 6, 1168: 7, 1170: 8, 1265: 4, 1280: 1, 1282: 4, 1287: 4, 1290: 8, 1292: 8, 1294: 8, 1312: 8, 1322: 8, 1342: 6, 1345: 8, 1348: 8, 1349: 8, 1351: 8, 1353: 8, 1356: 8, 1363: 8, 1366: 8, 1367: 8, 1369: 8, 1407: 8, 1414: 3, 1419: 8, 1427: 6, 1440: 8, 1456: 4, 1470: 8, 1486: 8, 1487: 8, 1491: 8, 1960: 8, 1990: 8, 1998: 8, 2000: 8, 2001: 8, 2004: 8, 2005: 8, 2008: 8, 2009: 8, 2012: 8, 2013: 8, 2015: 8, 2016: 8, 2017: 8, 2024: 8, 2025: 8
}],
CAR.SONATA: [{
67: 8, 68: 8, 127: 8, 304: 8, 320: 8, 339: 8, 356: 4, 544: 8, 545: 8, 546: 8, 547: 8, 548: 8, 549: 8, 550: 8, 576: 8, 593: 8, 608: 8, 688: 6, 809: 8, 832: 8, 854: 8, 865: 8, 870: 7, 871: 8, 872: 8, 897: 8, 902: 8, 903: 8, 905: 8, 908: 8, 909: 8, 912: 7, 913: 8, 916: 8, 1040: 8, 1042: 8, 1056: 8, 1057: 8, 1078: 4, 1089: 5, 1107: 5, 1108: 8, 1114: 8, 1136: 8, 1145: 8, 1151: 8, 1155: 8, 1156: 8, 1157: 4, 1162: 8, 1164: 8, 1168: 8, 1170: 8, 1173: 8, 1180: 8, 1183: 8, 1184: 8, 1186: 2, 1191: 2, 1193: 8, 1210: 8, 1225: 8, 1227: 8, 1265: 4, 1268: 8, 1280: 8, 1287: 4, 1290: 8, 1292: 8, 1294: 8, 1312: 8, 1322: 8, 1330: 8, 1339: 8, 1342: 6, 1343: 8, 1345: 8, 1348: 8, 1363: 8, 1369: 8, 1371: 8, 1378: 8, 1384: 8, 1394: 8, 1407: 8, 1419: 8, 1427: 6, 1446: 8, 1456: 4, 1460: 8, 1470: 8, 1485: 8, 1504: 3
}],
CAR.SONATA_HEV: [{
127: 8, 304: 8, 320: 8, 339: 8, 352: 8, 356: 4, 544: 8, 546: 8, 548: 8, 576: 8, 593: 8, 688: 6, 757: 2, 832: 8, 865: 8, 881: 8, 882: 8, 897: 8, 902: 8, 903: 8, 905: 8, 909: 8, 913: 8, 916: 8, 1040: 8, 1042: 8, 1056: 8, 1057: 8, 1078: 4, 1102: 8, 1108: 8, 1114: 8, 1136: 6, 1138: 5, 1151: 8, 1155: 8, 1156: 8, 1157: 4, 1162: 8, 1164: 8, 1168: 8, 1173: 8, 1180: 8, 1184: 8, 1186: 2, 1191: 2, 1193: 8, 1210: 8, 1225: 8, 1227: 8, 1265: 4, 1268: 8, 1280: 8, 1287: 4, 1290: 8, 1291: 8, 1292: 8, 1294: 8, 1312: 8, 1322: 8, 1330: 8, 1339: 8, 1342: 6, 1343: 8, 1345: 8, 1348: 8, 1355: 8, 1363: 8, 1369: 8, 1371: 8, 1378: 8, 1407: 8, 1419: 8, 1427: 6, 1429: 8, 1430: 8, 1446: 8, 1448: 8, 1456: 4, 1460: 8, 1470: 8, 1476: 8, 1535: 8
},{
127: 8, 304: 8, 320: 8, 339: 8, 352: 8, 356: 4, 544: 8, 576: 8, 593: 8, 688: 6, 757: 2, 832: 8, 865: 8, 881: 8, 882: 8, 897: 8, 902: 8, 903: 8, 905: 8, 909: 8, 913: 8, 916: 8, 1040: 8, 1042: 8, 1056: 8, 1057: 8, 1078: 4, 1102: 8, 1108: 8, 1114: 8, 1136: 6, 1138: 5, 1151: 8, 1155: 8, 1156: 8, 1157: 4, 1162: 8, 1164: 8, 1168: 8, 1173: 8, 1180: 8, 1184: 8, 1186: 2, 1191: 2, 1193: 8, 1210: 8, 1225: 8, 1227: 8, 1265: 4, 1268: 8, 1280: 8, 1287: 4, 1290: 8, 1291: 8, 1292: 8, 1294: 8, 1312: 8, 1322: 8, 1330: 8, 1339: 8, 1342: 6, 1343: 8, 1345: 8, 1348: 8, 1355: 8, 1363: 8, 1369: 8, 1371: 8, 1378: 8, 1407: 8, 1419: 8, 1427: 6, 1429: 8, 1430: 8, 1446: 8, 1448: 8, 1456: 4, 1460: 8, 1470: 8, 1476: 8, 1535: 8
},{
127: 8, 304: 8, 320: 8, 339: 8, 352: 8, 356: 4, 513: 8, 514: 8, 515: 8, 516: 8, 517: 8, 522: 8, 523: 8, 524: 8, 525: 8, 544: 8, 568: 8, 569: 8, 570: 8, 571: 8, 572: 8, 573: 8, 574: 8, 575: 8, 576: 8, 577: 8, 578: 8, 579: 8, 580: 8, 581: 8, 582: 8, 583: 8, 584: 8, 585: 8, 586: 8, 587: 8, 588: 8, 589: 8, 590: 8, 591: 8, 592: 8, 593: 8, 594: 8, 595: 8, 596: 8, 597: 8, 602: 8, 603: 8, 604: 8, 605: 8, 606: 8, 614: 8, 615: 8, 616: 8, 617: 8, 618: 8, 621: 8, 622: 8, 623: 8, 688: 6, 757: 2, 832: 8, 865: 8, 881: 8, 882: 8, 897: 8, 902: 8, 903: 8, 905: 8, 909: 8, 913: 8, 916: 8, 1040: 8, 1042: 8, 1056: 8, 1057: 8, 1078: 4, 1102: 8, 1108: 8, 1114: 8, 1136: 6, 1138: 5, 1151: 8, 1155: 8, 1156: 8, 1157: 4, 1162: 8, 1164: 8, 1168: 8, 1173: 8, 1180: 8, 1184: 8, 1186: 2, 1191: 2, 1193: 8, 1210: 8, 1225: 8, 1227: 8, 1265: 4, 1268: 8, 1280: 8, 1287: 4, 1290: 8, 1291: 8, 1292: 8, 1294: 8, 1312: 8, 1322: 8, 1330: 8, 1339: 8, 1342: 6, 1343: 8, 1345: 8, 1348: 8, 1355: 8, 1363: 8, 1369: 8, 1371: 8, 1378: 8, 1407: 8, 1419: 8, 1427: 6, 1429: 8, 1430: 8, 1446: 8, 1448: 8, 1456: 4, 1460: 8, 1470: 8, 1476: 8, 1517: 8, 1518: 8, 1519: 8, 1535: 8
}],
CAR.SONATA19: [{
66: 8, 67: 8, 68: 8, 127: 8, 273: 8, 274: 8, 275: 8, 339: 8, 356: 4, 399: 8, 447: 8, 512: 6, 544: 8, 593: 8, 608: 8, 688: 5, 790: 8, 809: 8, 832: 8, 884: 8, 897: 8, 899: 8, 902: 8, 903: 6, 916: 8, 1040: 8, 1056: 8, 1057: 8, 1078: 4, 1151: 6, 1168: 7, 1170: 8, 1253: 8, 1254: 8, 1255: 8, 1265: 4, 1280: 1, 1287: 4, 1290: 8, 1292: 8, 1294: 8, 1312: 8, 1314: 8, 1322: 8, 1331: 8, 1332: 8, 1333: 8, 1342: 6, 1345: 8, 1348: 8, 1349: 8, 1351: 8, 1353: 8, 1363: 8, 1365: 8, 1366: 8, 1367: 8, 1369: 8, 1397: 8, 1407: 8, 1415: 8, 1419: 8, 1425: 2, 1427: 6, 1440: 8, 1456: 4, 1470: 8, 1472: 8, 1486: 8, 1487: 8, 1491: 8, 1530: 8, 1532: 5, 2000: 8, 2001: 8, 2004: 8, 2005: 8, 2008: 8, 2009: 8, 2012: 8, 2013: 8, 2014: 8, 2016: 8, 2017: 8, 2024: 8, 2025: 8
}],
CAR.SONATA19_HEV: [{
68: 8, 127: 8, 304: 8, 320: 8, 339: 8, 352: 8, 356: 4, 544: 7, 593: 8, 688: 5, 832: 7, 881: 8, 882: 8, 897: 8, 902: 8, 903: 6, 916: 8, 1040: 8, 1056: 8, 1057: 8, 1078: 4, 1136: 6, 1151: 6, 1168: 7, 1173: 8, 1265: 4, 1280: 1, 1287: 4, 1290: 8, 1291: 8, 1292: 8, 1322: 8, 1331: 8, 1332: 8, 1333: 8, 1345: 8, 1355: 8, 1363: 8, 1369: 8, 1407: 8, 1419: 8, 1427: 6, 1429: 8, 1430: 8, 1448: 8, 1456: 4, 1470: 8, 1476: 8
}],
CAR.SONATA_LF_TURBO: [{
66: 8, 67: 8, 68: 8, 127: 8, 273: 8, 274: 8, 275: 8, 339: 8, 356: 4, 399: 8, 512: 6, 544: 8, 593: 8, 608: 8, 688: 5, 790: 8, 809: 8, 832: 8, 897: 8, 899: 8, 902: 8, 903: 6, 912: 7, 916: 8, 1040: 8, 1056: 8, 1057: 8, 1078: 4, 1151: 6, 1168: 7, 1170: 8, 1265: 4, 1268: 8, 1280: 1, 1287: 4, 1290: 8, 1292: 8, 1312: 8, 1314: 8, 1322: 8, 1331: 8, 1332: 8, 1333: 8, 1345: 8, 1348: 8, 1349: 8, 1351: 8, 1353: 8, 1363: 8, 1365: 8, 1366: 8, 1367: 8, 1369: 8, 1407: 8, 1414: 3, 1415: 8, 1419: 8, 1427: 6, 1440: 8, 1456: 4, 1460: 8, 1470: 8, 1471: 8, 1472: 8, 1491: 8, 1530: 8, 1532: 5, 2016: 8, 2024: 8
},
{
64: 8, 66: 8, 67: 8, 68: 8, 127: 8, 273: 8, 274: 8, 275: 8, 339: 8, 356: 4, 399: 8, 447: 8, 512: 6, 544: 8, 593: 8, 608: 8, 688: 5, 790: 8, 809: 8, 832: 8, 884: 8, 897: 8, 899: 8, 902: 8, 903: 6, 916: 8, 1040: 8, 1056: 8, 1057: 8, 1078: 4, 1151: 6, 1168: 7, 1170: 8, 1253: 8, 1254: 8, 1255: 8, 1265: 4, 1280: 1, 1282: 4, 1287: 4, 1290: 8, 1292: 8, 1294: 8, 1312: 8, 1314: 8, 1322: 8, 1331: 8, 1332: 8, 1333: 8, 1342: 6, 1345: 8, 1348: 8, 1349: 8, 1351: 8, 1353: 8, 1363: 8, 1365: 8, 1366: 8, 1367: 8, 1369: 8, 1371: 8, 1407: 8, 1414: 3, 1415: 8, 1419: 8, 1425: 2, 1427: 6, 1440: 8, 1456: 4, 1460: 8, 1470: 8, 1472: 8, 1486: 8, 1487: 8, 1491: 8, 1530: 8, 2015: 8, 2024: 8
},
{
64: 8, 66: 8, 67: 8, 68: 8, 127: 8, 273: 8, 274: 8, 275: 8, 339: 8, 356: 4, 399: 8, 447: 8, 512: 6, 544: 8, 593: 8, 608: 8, 688: 5, 790: 8, 809: 8, 832: 8, 884: 8, 897: 8, 899: 8, 902: 8, 903: 6, 916: 8, 1040: 8, 1056: 8, 1057: 8, 1078: 4, 1151: 6, 1168: 7, 1170: 8, 1253: 8, 1254: 8, 1255: 8, 1265: 4, 1280: 1, 1282: 4, 1287: 4, 1290: 8, 1292: 8, 1294: 8, 1312: 8, 1314: 8, 1322: 8, 1331: 8, 1332: 8, 1333: 8, 1342: 6, 1345: 8, 1348: 8, 1349: 8, 1351: 8, 1353: 8, 1363: 8, 1365: 8, 1366: 8, 1367: 8, 1369: 8, 1407: 8, 1414: 3, 1415: 8, 1419: 8, 1425: 2, 1427: 6, 1440: 8, 1456: 4, 1460: 8, 1470: 8, 1472: 8, 1486: 8, 1487: 8, 1491: 8, 1530: 8, 1905: 8, 1913: 8, 1990: 8, 1998: 8, 2006: 8, 2014: 8, 2016: 8, 2017: 8, 2024: 8, 2025: 8
}
],
CAR.KONA: [{
67: 8, 127: 8, 304: 8, 320: 8, 339: 8, 354: 3, 356: 4, 544: 8, 593: 8, 608: 8, 688: 5, 809: 8, 832: 8, 854: 7, 870: 7, 871: 8, 872: 8, 897: 8, 902: 8, 903: 8, 909: 8, 916: 8, 1040: 8, 1078: 4, 1107: 5, 1136: 8, 1156: 8, 1170: 8, 1173: 8, 1191: 2, 1265: 4, 1280: 1, 1287: 4, 1292: 8, 1294: 8, 1312: 8, 1322: 8, 1342: 6, 1345: 8, 1348: 8, 1363: 8, 1369: 8, 1384: 8, 1394: 8,1407: 8, 1414: 3, 1419: 8, 1427: 6, 1456: 4, 1470: 8, 2004: 8, 2009: 8, 2012: 8
},{
67: 8, 127: 8, 304: 8, 320: 8, 339: 8, 354: 3, 356: 4, 544: 8, 593: 8, 608: 8, 688: 5, 809: 8, 832: 8, 854: 7, 870: 7, 871: 8, 872: 8, 897: 8, 902: 8, 903: 8, 909: 8, 916: 8, 1040: 8, 1064: 8, 1078: 4, 1107: 5, 1136: 8, 1151: 6, 1156: 8, 1170: 8, 1173: 8, 1186: 2, 1191: 2, 1265: 4, 1280: 1, 1287: 4, 1292: 8, 1294: 8, 1312: 8, 1322: 8, 1342: 6, 1345: 8, 1348: 8, 1363: 8, 1369: 8, 1384: 8, 1394: 8, 1407: 8, 1414: 3, 1419: 8, 1427: 6, 1456: 4, 1470: 8, 1988: 8, 1990: 8, 1998: 8, 2001: 8, 2004: 8, 2009: 8, 2012: 8, 2015: 8
}],
CAR.KONA_EV: [{
127: 8, 304: 8, 320: 8, 339: 8, 352: 8, 356: 4, 544: 8, 549: 8, 593: 8, 688: 5, 832: 8, 881: 8, 882: 8, 897: 8, 902: 8, 903: 8, 905: 8, 909: 8, 916: 8, 1040: 8, 1042: 8, 1056: 8, 1057: 8, 1078: 4, 1136: 8, 1151: 6, 1168: 7, 1173: 8, 1183: 8, 1186: 2, 1191: 2, 1225: 8, 1265: 4, 1280: 1, 1287: 4, 1290: 8, 1291: 8, 1292: 8, 1294: 8, 1307: 8, 1312: 8, 1322: 8, 1342: 6, 1345: 8, 1348: 8, 1355: 8, 1363: 8, 1369: 8, 1378: 4, 1407: 8, 1419: 8, 1426: 8, 1427: 6, 1429: 8, 1430: 8, 1456: 4, 1470: 8, 1473: 8, 1507: 8, 1535: 8, 2000: 8, 2004: 8, 2008: 8, 2012: 8
},{
127: 8, 304: 8, 320: 8, 339: 8, 352: 8, 356: 4, 544: 8, 593: 8, 688: 5, 832: 8, 881: 8, 882: 8, 897: 8, 902: 8, 903: 8, 905: 8, 909: 8, 916: 8, 1040: 8, 1042: 8, 1056: 8, 1057: 8, 1078: 4, 1136: 8, 1151: 6, 1157: 4, 1168: 7, 1173: 8, 1183: 8, 1186: 2, 1191: 2, 1225: 8, 1265: 4, 1280: 1, 1287: 4, 1290: 8, 1291: 8, 1292: 8, 1294: 8, 1312: 8, 1322: 8, 1342: 6, 1345: 8, 1348: 8, 1355: 8, 1363: 8, 1369: 8, 1378: 4, 1407: 8, 1419: 8, 1426: 8, 1427: 6, 1429: 8, 1430: 8, 1456: 4, 1470: 8, 1473: 8, 1507: 8, 1535: 8
},{
127: 8, 304: 8, 320: 8, 339: 8, 352: 8, 356: 4, 544: 8, 546: 8, 593: 8, 688: 5, 832: 8, 881: 8, 882: 8, 897: 8, 902: 8, 903: 8, 905: 8, 909: 8, 916: 8, 1040: 8, 1042: 8, 1056: 8, 1057: 8, 1078: 4, 1136: 8, 1151: 6, 1157: 4, 1168: 7, 1173: 8, 1183: 8, 1186: 2, 1191: 2, 1225: 8, 1265: 4, 1280: 1, 1287: 4, 1290: 8, 1291: 8, 1292: 8, 1294: 8, 1312: 8, 1322: 8, 1342: 6, 1345: 8, 1348: 8, 1355: 8, 1363: 8, 1369: 8, 1378: 4, 1407: 8, 1419: 8, 1426: 8, 1427: 6, 1429: 8, 1430: 8, 1456: 4, 1470: 8, 1473: 8, 1507: 8, 1535: 8
},{
127: 8, 304: 8, 320: 8, 339: 8, 352: 8, 356: 4, 544: 8, 546: 8, 593: 8, 688: 5, 832: 8, 881: 8, 882: 8, 897: 8, 902: 8, 903: 8, 905: 8, 909: 8, 916: 8, 1040: 8, 1042: 8, 1056: 8, 1057: 8, 1078: 4, 1136: 8, 1151: 6, 1157: 4, 1168: 7, 1173: 8, 1183: 8, 1186: 2, 1191: 2, 1225: 8, 1265: 4, 1280: 1, 1287: 4, 1290: 8, 1291: 8, 1292: 8, 1294: 8, 1312: 8, 1322: 8, 1342: 6, 1345: 8, 1348: 8, 1355: 8, 1363: 8, 1369: 8, 1407: 8, 1419: 8, 1426: 8, 1427: 6, 1429: 8, 1430: 8, 1456: 4, 1470: 8, 1473: 8, 1507: 8, 1535: 8
},{
127: 8, 304: 8, 320: 8, 339: 8, 352: 8, 356: 4, 544: 8, 546: 8, 547: 8, 548: 8, 593: 8, 688: 5, 832: 8, 881: 8, 882: 8, 897: 8, 902: 8, 903: 8, 905: 8, 909: 8, 916: 8, 1040: 8, 1042: 8, 1056: 8, 1057: 8, 1078: 4, 1136: 8, 1151: 6, 1157: 4, 1168: 7, 1173: 8, 1183: 8, 1186: 2, 1191: 2, 1193: 8, 1225: 8, 1265: 4, 1280: 1, 1287: 4, 1290: 8, 1291: 8, 1292: 8, 1294: 8, 1307: 8, 1312: 8, 1322: 8, 1342: 6, 1345: 8, 1348: 8, 1355: 8, 1363: 8, 1369: 8, 1378: 4, 1379: 8, 1407: 8, 1419: 8, 1426: 8, 1427: 6, 1429: 8, 1430: 8, 1456: 4, 1470: 8, 1473: 8, 1507: 8, 1535: 8
},{
127: 8, 304: 8, 320: 8, 339: 8, 352: 8, 356: 4, 544: 8, 546: 8, 547: 8, 548: 8, 549: 8, 593: 8, 688: 5, 832: 8, 881: 8, 882: 8, 897: 8, 902: 8, 903: 8, 905: 8, 909: 8, 916: 8, 1040: 8, 1042: 8, 1056: 8, 1057: 8, 1078: 4, 1136: 8, 1151: 6, 1157: 4, 1168: 7, 1173: 8, 1183: 8, 1186: 2, 1191: 2, 1225: 8, 1265: 4, 1280: 1, 1287: 4, 1290: 8, 1291: 8, 1292: 8, 1294: 8, 1307: 8, 1312: 8, 1322: 8, 1342: 6, 1345: 8, 1348: 8, 1355: 8, 1363: 8, 1369: 8, 1378: 4, 1407: 8, 1419: 8, 1426: 8, 1427: 6, 1429: 8, 1430: 8, 1456: 4, 1470: 8, 1473: 8, 1535: 8, 1988: 8, 1996: 8, 2000: 8, 2004: 8, 2008: 8, 2012: 8
},{
127: 8, 304: 8, 320: 8, 339: 8, 352: 8, 356: 4, 544: 8, 546: 8, 593: 8, 688: 5, 832: 8, 881: 8, 882: 8, 897: 8, 902: 8, 903: 8, 905: 8, 909: 8, 916: 8, 1040: 8, 1042: 8, 1056: 8, 1057: 8, 1078: 4, 1136: 8, 1151: 6, 1157: 4, 1168: 7, 1173: 8, 1183: 8, 1186: 2, 1191: 2, 1225: 8, 1265: 4, 1280: 1, 1287: 4, 1290: 8, 1291: 8, 1292: 8, 1294: 8, 1312: 8, 1322: 8, 1342: 6, 1345: 8, 1348: 8, 1355: 8, 1363: 8, 1369: 8, 1407: 8, 1426: 8, 1427: 6, 1429: 8, 1430: 8, 1456: 4, 1470: 8, 1473: 8, 1507: 8, 1535: 8
}],
CAR.KONA_HEV: [{
68: 8, 127: 8, 304: 8, 320: 8, 339: 8, 352: 8, 356: 4, 544: 8, 546: 8, 547: 8, 548: 8, 549: 8, 576: 8, 593: 8, 688: 5, 832: 8, 881: 8, 882: 8, 897: 8, 902: 8, 903: 8, 905: 8, 909: 8, 916: 8, 1040: 8, 1042: 8, 1056: 8, 1057: 8, 1078: 4, 1136: 6, 1138: 4, 1151: 6, 1155: 8, 1157: 4, 1164: 8, 1168: 7, 1173: 8, 1183: 8, 1186: 2, 1191: 2, 1193: 8, 1225: 8, 1265: 4, 1280: 1, 1287: 4, 1290: 8, 1291: 8, 1292: 8, 1294: 8, 1312: 8, 1322: 8, 1342: 6, 1345: 8, 1348: 8, 1355: 8, 1363: 8, 1369: 8, 1378: 8, 1379: 8, 1407: 8, 1419: 8, 1427: 6, 1429: 8, 1430: 8, 1448: 8, 1456: 4, 1470: 8, 1476: 8, 1535: 8
},{
68: 8, 127: 8, 304: 8, 320: 8, 339: 8, 352: 8, 356: 4, 544: 8, 546: 8, 547: 8, 548: 8, 549: 8, 576: 8, 593: 8, 688: 5, 832: 8, 881: 8, 882: 8, 897: 8, 902: 8, 903: 8, 905: 8, 909: 8, 916: 8, 1040: 8, 1042: 8, 1056: 8, 1057: 8, 1078: 4, 1136: 6, 1138: 4, 1151: 6, 1155: 8, 1157: 4, 1164: 8, 1168: 7, 1173: 8, 1183: 8, 1186: 2, 1191: 2, 1193: 8, 1225: 8, 1265: 4, 1280: 1, 1287: 4, 1290: 8, 1291: 8, 1292: 8, 1294: 8, 1312: 8, 1322: 8, 1342: 6, 1345: 8, 1348: 8, 1355: 8, 1363: 8, 1369: 8, 1378: 8, 1379: 8, 1407: 8, 1419: 8, 1427: 6, 1429: 8, 1430: 8, 1448: 8, 1456: 4, 1470: 8, 1476: 8, 1535: 8, 1988: 8, 1996: 8, 2000: 8, 2004: 8, 2008: 8, 2012: 8
}],
CAR.IONIQ: [{
68: 8, 127: 8, 304: 8, 320: 8, 339: 8, 352: 8, 356: 4, 544: 8, 576: 8, 593: 8, 688: 5, 881: 8, 882: 8, 897: 8, 902: 8, 903: 8, 916: 8, 1040: 8, 1056: 8, 1057: 8, 1078: 4, 1136: 6, 1173: 8, 1225: 8, 1265: 4, 1280: 1, 1287: 4, 1290: 8, 1291: 8, 1292: 8, 1294: 8, 1322: 8, 1345: 8, 1348: 8, 1355: 8, 1363: 8, 1369: 8, 1407: 8, 1419: 8, 1427: 6, 1429: 8, 1430: 8, 1448: 8, 1456: 4, 1470:8, 1476: 8, 1535: 8
},{
68: 8, 127: 8, 304: 8, 320: 8, 339: 8, 352: 8, 356: 4, 544: 8, 576:8, 593: 8, 688: 5, 832: 8, 881: 8, 882: 8, 897: 8, 902: 8, 903: 8, 916: 8, 1040: 8, 1056: 8, 1057: 8, 1078: 4, 1136: 6, 1173: 8, 1225: 8, 1265: 4, 1280: 1, 1287: 4, 1290: 8, 1291: 8, 1292: 8, 1294: 8, 1322: 8, 1342: 6, 1345: 8, 1348: 8, 1355: 8, 1363: 8, 1369: 8, 1407: 8, 1427: 6, 1429: 8, 1430: 8, 1448: 8, 1456: 4, 1470: 8, 1476: 8, 1535: 8
},{
68: 8, 127: 8, 304: 8, 320: 8, 339: 8, 352: 8, 544: 8, 576: 8, 832: 8, 881: 8, 882: 8, 902: 8, 903: 8, 916: 8, 1040: 8, 1056: 8, 1057: 8, 1078: 4, 1136: 6, 1173: 8, 1225: 8, 1265: 4, 1280: 1, 1287: 4, 1290: 8, 1291: 8, 1292: 8, 1294: 8, 1322: 8, 1342: 6, 1345: 8, 1348: 8, 1355: 8, 1363: 8, 1369: 8, 1407: 8, 1427: 6, 1429: 8, 1430: 8, 1448: 8, 1456: 4, 1470: 8, 1476: 8, 1535: 8
}],
CAR.IONIQ_EV_LTD: [{
127: 8, 304: 8, 320: 8, 339: 8, 352: 8, 356: 4, 544: 7, 546: 8, 593: 8, 688: 5, 832: 8, 881: 8, 882: 8, 897: 8, 902: 8, 903: 8, 905: 8, 909: 8, 916: 8, 1040: 8, 1042: 8, 1056: 8, 1057: 8, 1078: 4, 1136: 8, 1151: 6, 1155: 8, 1156: 8, 1157: 4, 1164: 8, 1168: 7, 1173: 8, 1183: 8, 1186: 2, 1191: 2, 1225: 8, 1265: 4, 1280: 1, 1287: 4, 1290: 8, 1291: 8, 1292: 8, 1294: 8, 1312: 8, 1322: 8, 1342: 6, 1345: 8, 1348: 8, 1355: 8, 1363: 8, 1369: 8, 1379: 8, 1407: 8, 1419: 8, 1426: 8, 1427: 6, 1429: 8, 1430: 8, 1456: 4, 1470: 8, 1473: 8, 1507: 8, 1535: 8, 2015: 8
},{
127: 8, 304: 8, 320: 8, 339: 8, 352: 8, 356: 4, 544: 7, 593: 8, 688: 5, 832: 8, 881: 8, 882: 8, 897: 8, 902: 8, 903: 8, 916: 8, 1040: 8, 1056: 8, 1057: 8, 1078: 4, 1136: 8, 1151: 6, 1168: 7, 1173: 8, 1265: 4, 1280: 1, 1287: 4, 1290: 8, 1291: 8, 1292: 8, 1294: 8, 1322: 8, 1342: 6, 1345: 8, 1348: 8, 1355: 8, 1363: 8, 1369: 8, 1407: 8, 1419: 8, 1425: 2, 1426: 8, 1427: 6, 1429: 8, 1430: 8, 1456: 4, 1470: 8, 1507: 8, 1535: 8
}],
CAR.IONIQ_EV_2020: [{
127: 8, 304: 8, 320: 8, 339: 8, 352: 8, 356: 4, 524: 8, 544: 7, 593: 8, 688: 5, 832: 8, 881: 8, 882: 8, 897: 8, 902: 8, 903: 8, 905: 8, 909: 8, 916: 8, 1040: 8, 1042: 8, 1056: 8, 1057: 8, 1078: 4, 1136: 8, 1151: 6, 1155: 8, 1156: 8, 1157: 4, 1164: 8, 1168: 7, 1173: 8, 1183: 8, 1186: 2, 1191: 2, 1225: 8, 1265: 4, 1280: 1, 1287: 4, 1290: 8, 1291: 8, 1292: 8, 1294: 8, 1312: 8, 1322: 8, 1342: 6, 1345: 8, 1348: 8, 1355: 8, 1363: 8, 1369: 8, 1379: 8, 1407: 8, 1419: 8, 1426: 8, 1427: 6, 1429: 8, 1430: 8, 1456: 4, 1470: 8, 1473: 8, 1507: 8, 1535: 8, 1988: 8, 1996: 8, 2000: 8, 2004: 8, 2005: 8, 2008: 8, 2012: 8, 2013: 8
}],
CAR.SANTA_FE: [{
67: 8, 127: 8, 304: 8, 320: 8, 339: 8, 356: 4, 544: 8, 593: 8, 608: 8, 688: 6, 809: 8, 832: 8, 854: 7, 870: 7, 871: 8, 872: 8, 897: 8, 902: 8, 903: 8, 905: 8, 909: 8, 916: 8, 1040: 8, 1042: 8, 1056: 8, 1057: 8, 1078: 4, 1107: 5, 1136: 8, 1151: 6, 1155: 8, 1156: 8, 1162: 8, 1164: 8, 1168: 7, 1170: 8, 1173: 8, 1183: 8, 1186: 2, 1191: 2, 1227: 8, 1265: 4, 1280: 1, 1287: 4, 1290: 8, 1292: 8, 1294: 8, 1312: 8, 1322: 8, 1342: 6, 1345: 8, 1348: 8, 1363: 8, 1369: 8, 1379: 8, 1384: 8, 1407: 8, 1414: 3, 1419: 8, 1427: 6, 1456: 4, 1470: 8
},{
67: 8, 127: 8, 304: 8, 320: 8, 339: 8, 356: 4, 544: 8, 593: 8, 608: 8, 688: 6, 764: 8, 809: 8, 854: 7, 870: 7, 871: 8, 872: 8, 897: 8, 902: 8, 903: 8, 905: 8, 909: 8, 916: 8, 1040: 8, 1042: 8, 1056: 8, 1057: 8, 1064: 8, 1078: 4, 1107: 5, 1136: 8, 1151: 6, 1155: 8, 1162: 8, 1164: 8, 1168: 7, 1170: 8, 1173: 8, 1180: 8, 1183: 8, 1186: 2, 1227: 8, 1265: 4, 1280: 1, 1287: 4, 1290: 8, 1292: 8, 1294: 8, 1312: 8, 1322: 8, 1345: 8, 1348: 8, 1363: 8, 1369: 8, 1371: 8, 1378: 8, 1384: 8, 1407: 8, 1414: 3, 1419: 8, 1427: 6, 1456: 4, 1470: 8, 1988: 8, 2000: 8, 2004: 8, 2008: 8, 2012: 8
},{
67: 8, 68: 8, 80: 4, 160: 8, 161: 8, 272: 8, 288: 4, 339: 8, 356: 8, 357: 8, 399: 8, 544: 8, 608: 8, 672: 8, 688: 5, 704: 1, 790: 8, 809: 8, 848: 8, 880: 8, 898: 8, 900: 8, 901: 8, 904: 8, 1056: 8, 1064: 8, 1065: 8, 1072: 8, 1075: 8, 1087: 8, 1088: 8, 1151: 8, 1200: 8, 1201: 8, 1232: 4, 1264: 8, 1265: 8, 1266: 8, 1296: 8, 1306: 8, 1312: 8, 1322: 8, 1331: 8, 1332: 8, 1333: 8, 1348: 8, 1349: 8, 1369: 8, 1370: 8, 1371: 8, 1407: 8, 1415: 8, 1419: 8, 1440: 8, 1442: 4, 1461: 8, 1470: 8
},{
67: 8, 127: 8, 304: 8, 320: 8, 339: 8, 356: 4, 544: 8, 593: 8, 608: 8, 688: 6, 809: 8, 832: 8, 854: 7, 870: 7, 871: 8, 872: 8, 897: 8, 902: 8, 903: 8, 905: 8, 909: 8, 916: 8, 1040: 8, 1042: 8, 1056: 8, 1057: 8, 1064: 8, 1078: 4, 1107: 5, 1136: 8, 1151: 6, 1155: 8, 1156: 8, 1168: 7, 1170: 8, 1173: 8, 1183: 8, 1186: 2, 1191: 2, 1227: 8, 1265: 4, 1280: 1, 1287: 4, 1290: 8, 1292: 8, 1294: 8, 1312: 8, 1322: 8, 1342: 6, 1345: 8, 1348: 8, 1363: 8, 1369: 8, 1379: 8, 1384: 8, 1407: 8, 1427: 6, 1456: 4, 1470: 8
},{
67: 8, 127: 8, 304: 8, 320: 8, 339: 8, 356: 4, 544: 8, 593: 8, 608: 8, 688: 6, 809: 8, 832: 8, 854: 7, 870: 7, 871: 8, 872: 8, 897: 8, 902: 8, 903: 8, 912: 7, 1040: 8, 1042: 8, 1064: 8, 1078: 4, 1107: 5, 1136: 8, 1151: 6, 1156: 8, 1162: 8, 1164: 8, 1168: 7, 1170: 8, 1173: 8, 1180: 8, 1183: 8, 1191: 2, 1227: 8, 1260: 8, 1265: 4, 1268: 8, 1280: 1, 1287: 4, 1292: 8, 1294: 8, 1312: 8, 1322: 8, 1342: 6, 1345: 8, 1348: 8, 1363: 8, 1369: 8, 1371: 8, 1378: 8, 1384: 8, 1407: 8, 1419: 8, 1425: 2, 1427: 6, 1456: 4, 1470: 8, 1628: 8, 1629: 8, 1630: 8, 1631: 8, 1674: 8, 1675: 8, 1676: 8, 1677: 8, 1791: 8
},{
67 : 8 , 127 : 8 , 304 : 8 , 320 : 8 , 339 : 8 , 356 : 4 , 544 : 8 , 546 : 8 , 547 :8 , 548 : 8 , 549 : 8 , 593 : 8 , 608 : 8 , 688 : 6 , 809 : 8 , 832 : 8 , 854 : 7 , 870 : 7 , 871 :8 , 872 : 8 , 897 : 8 , 902 : 8 , 903 : 8 , 905 : 8 , 909 : 8 , 916 : 8 , 1040 : 8 , 1042 : 8 , 1056 : 8 , 1057 : 8 , 1078 : 4 , 1107 : 5 , 1136 :8 , 1151 : 6 , 1155 : 8 , 1156 : 8 , 1157 : 4 , 1162 : 8 , 1164 : 8 , 1168 : 7 , 1170 : 8 , 1173 : 8 , 1180 : 8 , 1183 : 8 , 1186 : 2 , 1191 : 2 , 1210 :8 , 1227 : 8 , 1265 : 4 , 1280 : 1 , 1287 : 4 , 1290 : 8 , 1292 : 8 , 1294 : 8 , 1312 : 8 ,
1322 : 8 , 1342 : 6 , 1345 : 8 , 1348 : 8 , 1363 : 8 , 1369 : 8 , 1371 : 8 , 1378 : 8 , 1384 : 8 , 1407 : 8 , 1414 : 3 , 1419 : 8 , 1425 : 2 , 1427 : 6 ,1456 : 4 , 1470 : 8
}],
CAR.PALISADE: [{
67: 8, 127: 8, 304: 8, 320: 8, 339: 8, 356: 4, 544: 8, 549: 8, 576: 8, 593: 8, 608: 8, 688: 6, 809: 8, 832: 8, 854: 7, 870: 7, 871: 8, 872: 8, 897: 8, 902: 8, 903: 8, 905: 8, 909: 8, 913: 8, 916: 8, 1040: 8, 1042: 8, 1056: 8, 1057: 8, 1064: 8, 1078: 4, 1107: 5, 1123: 8, 1136: 8, 1151: 6, 1155: 8, 1156: 8, 1157: 4, 1162: 8, 1164: 8, 1168: 7, 1170: 8, 1173: 8, 1180: 8, 1186: 2, 1191: 2, 1193: 8, 1210: 8, 1225: 8, 1227: 8, 1265: 4, 1280: 8, 1287: 4, 1290: 8, 1292: 8, 1294: 8, 1312: 8, 1322: 8, 1342: 6, 1345: 8, 1348: 8, 1363: 8, 1369: 8, 1371: 8, 1378: 8, 1384: 8, 1407: 8, 1419: 8, 1427: 6, 1456: 4, 1470: 8, 2000: 8, 2005: 8, 2008: 8
}],
CAR.VELOSTER: [{
64: 8, 66: 8, 67: 8, 68: 8, 127: 8, 128: 8, 129: 8, 273: 8, 274: 8, 275: 8, 339: 8, 354: 3, 356: 4, 399: 8, 512: 6, 544: 8, 558: 8, 593: 8, 608: 8, 688: 5, 790: 8, 809: 8, 832: 8, 884: 8, 897: 8, 899: 8, 902: 8, 903: 8, 905: 8, 909: 8, 916: 8, 1040: 8, 1056: 8, 1057: 8, 1078: 4, 1170: 8, 1181: 5, 1186: 2, 1191: 2, 1265: 4, 1280: 1, 1282: 4, 1287: 4, 1290: 8, 1292: 8, 1294: 8, 1312: 8, 1322: 8, 1342: 6, 1345: 8, 1348: 8, 1349: 8, 1351: 8, 1353: 8, 1356: 8, 1363: 8, 1365: 8, 1366: 8, 1367: 8, 1369: 8, 1378: 4, 1407: 8, 1414: 3, 1415: 8, 1419: 8, 1427: 6, 1440: 8, 1456: 4, 1470: 8, 1486: 8, 1487: 8, 1491: 8, 1530: 8, 1532: 5, 1872: 8, 1988: 8, 1996: 8, 2000: 8, 2001: 8, 2004: 8, 2008: 8, 2009: 8, 2012: 8, 2015: 8, 2016: 8, 2017: 8, 2024: 8, 2025: 8
}],
CAR.GRANDEUR_IG: [{
67: 8, 68: 8, 127: 8, 304: 8, 320: 8, 339: 8, 356: 4, 544: 8, 593: 8, 608: 8, 688: 5, 809: 8, 854 : 7, 870: 7, 871: 8, 872: 8, 897: 8, 902: 8, 916: 8, 1040: 8, 1042: 8, 1056: 8, 1057: 8, 1078: 4, 1136: 8 , 1151: 6, 1162: 4, 1168: 7, 1170: 8, 1173: 8, 1185: 8, 1265: 4, 1280: 1, 1287: 4, 1290: 8, 1292: 8, 1294: 8, 1312 : 8, 1322: 8, 1345: 8, 1348: 8, 1363: 8, 1369: 8, 1371: 8, 1378: 4, 1384: 8, 1407: 8, 1419: 8, 1425: 2, 1427: 6 , 1456: 4, 1470: 8
},{
67: 8, 68: 8, 127: 8, 304: 8, 320: 8, 339: 8, 356: 4, 544: 8, 546: 8, 547: 8, 549: 8, 593: 8, 608: 8, 688: 5, 809: 8, 832: 8, 854: 7, 870: 7, 871: 8, 872: 8, 897: 8, 902: 8, 916: 8, 1040: 8, 1042: 8, 1056: 8, 1057: 8, 1078: 4, 1136: 8, 1151: 6, 1156: 8, 1157: 4, 1162: 4, 1168: 7, 1170: 8, 1173: 8, 1185: 8, 1265: 4, 1280: 1, 1287: 4, 1290: 8, 1292: 8, 1294: 8, 1312: 8, 1322: 8, 1342: 6, 1345: 8, 1348: 8, 1363: 8, 1369: 8, 1384: 8, 1407: 8, 1419: 8, 1427: 6, 1456: 4, 1470: 8
},{
67: 8, 68: 8, 127: 8, 304: 8, 320: 8, 339: 8, 356: 4, 544: 8, 546: 8, 593: 8, 608: 8, 688: 5, 809: 8, 832: 8, 854: 7, 870: 7, 871: 8, 872: 8, 897: 8, 902: 8, 916: 8, 1040: 8, 1042: 8, 1056: 8, 1057: 8, 1078: 4, 1136: 8, 1151: 6, 1156: 8, 1157: 4, 1162: 4, 1168: 7, 1170: 8, 1173: 8, 1185: 8, 1265: 4, 1280: 1, 1287: 4, 1290: 8, 1292: 8, 1294: 8, 1312: 8, 1322: 8, 1342: 6, 1345: 8, 1348: 8, 1363: 8, 1369: 8, 1371: 8, 1378: 4, 1384: 8, 1407: 8, 1419: 8, 1425: 2, 1427: 6, 1456: 4, 1470: 8
},{
67: 8, 68: 8, 127: 8, 304: 8, 320: 8, 339: 8, 356: 4, 544: 8, 593: 8, 608: 8, 688: 5, 809: 8, 832: 8, 854: 7, 870: 7, 871: 8, 872: 8, 897: 8, 902: 8, 916: 8, 1040: 8, 1042: 8, 1056: 8, 1057: 8, 1078: 4, 1136: 8, 1151: 6, 1156: 8, 1157: 4, 1162: 4, 1168: 7, 1170: 8, 1173: 8, 1185: 8, 1265: 4, 1280: 1, 1287: 4, 1290: 8, 1292: 8, 1294: 8, 1312: 8, 1322: 8, 1342: 6, 1345: 8, 1348: 8, 1363: 8, 1369: 8, 1371: 8, 1378: 4, 1384: 8, 1407: 8, 1419: 8, 1425: 2, 1427: 6, 1456: 4, 1470: 8
},{
67: 8, 68: 8, 127: 8, 304: 8, 320: 8, 339: 8, 356: 4, 544: 8, 593: 8, 608: 8, 688: 5, 809: 8, 832: 8, 854: 7, 870: 7, 871: 8, 872: 8, 897: 8, 902: 8, 916: 8, 1040: 8, 1042: 8, 1056: 8, 1057: 8, 1078: 4, 1136: 8, 1151: 6, 1156: 8, 1162: 4, 1168: 7, 1170: 8, 1173: 8, 1185: 8, 1265: 4, 1280: 1, 1287: 4, 1290: 8, 1292: 8, 1294: 8, 1312: 8, 1322: 8, 1342: 6, 1345: 8, 1348: 8, 1363: 8, 1369: 8, 1371: 8, 1378: 4, 1384: 8, 1407: 8, 1419: 8, 1425: 2, 1427: 6, 1456: 4, 1470: 8
}],
CAR.GRANDEUR_IG_HEV: [
{68: 8, 127: 8, 304: 8, 320: 8, 339: 8, 352: 8, 356: 4, 544: 8, 549: 8, 576: 8, 593: 8, 688: 5, 832: 8, 881: 8, 882: 8, 897: 8, 902: 8, 903: 8, 916: 8, 1040: 8, 1042: 8, 1056: 8, 1057: 8, 1078: 4, 1136: 6, 1151: 6, 1156: 8, 1157: 4, 1168: 7, 1173: 8, 1185: 8, 1225: 8, 1265: 4, 1280: 1, 1287: 4, 1290: 8, 1291: 8, 1292: 8, 1294: 8, 1312: 8, 1322: 8, 1342: 6, 1345: 8, 1348: 8, 1355: 8, 1363: 8, 1369: 8, 1371: 8, 1407: 8, 1419: 8, 1425: 2, 1427: 6, 1429: 8, 1430: 8, 1448: 8, 1456: 4, 1470: 8, 1476: 8, 1535: 8, 1988: 8, 1996: 8, 2000: 8, 2004: 8, 2005: 8, 2008: 8, 2012: 8, 2013: 8
},
{68: 8, 127: 8, 304: 8, 320: 8, 339: 8, 352: 8, 356: 4, 544: 8, 546: 8, 576: 8, 593: 8, 688: 5, 832: 8, 881: 8, 882: 8, 897: 8, 902: 8, 903: 8, 916: 8, 1040: 8, 1042: 8, 1056: 8, 1057: 8, 1078: 4, 1136: 6, 1151: 6, 1156: 8, 1157: 4, 1168: 7, 1173: 8, 1185: 8, 1225: 8, 1265: 4, 1280: 1, 1287: 4, 1290: 8, 1291: 8, 1292: 8, 1294: 8, 1312: 8, 1322: 8, 1342: 6, 1345: 8, 1348: 8, 1355: 8, 1363: 8, 1369: 8, 1371: 8, 1378: 4, 1407: 8, 1419: 8, 1425: 2, 1427: 6, 1429: 8, 1430: 8, 1448: 8, 1456: 4, 1470: 8, 1476: 8, 1535: 8
},
{68: 8, 127: 8, 304: 8, 320: 8, 339: 8, 352: 8, 356: 4, 544: 8, 546: 8, 576: 8, 593: 8, 688: 5, 832: 8, 881: 8, 882: 8, 897: 8, 902: 8, 903: 8, 916: 8, 1040: 8, 1042: 8, 1056: 8, 1057: 8, 1078: 4, 1136: 6, 1151: 6, 1156: 8, 1157: 4, 1168: 7, 1173: 8, 1185: 8, 1225: 8, 1265: 4, 1280: 1, 1287: 4, 1290: 8, 1291: 8, 1292: 8, 1294: 8, 1312: 8, 1322: 8, 1342: 6, 1345: 8, 1348: 8, 1355: 8, 1363: 8, 1369: 8, 1379: 8, 1407: 8, 1419: 8, 1425: 2, 1427: 6, 1429: 8, 1430: 8, 1448: 8, 1456: 4, 1470: 8, 1476: 8, 1535: 8
}],
CAR.GRANDEUR_IG_FL: [
{67: 8, 68: 8, 127: 8, 304: 8, 320: 8, 339: 8, 356: 4, 516: 8, 524: 8, 528: 8, 532: 8, 544: 8, 576: 8, 593: 8, 608: 8, 688: 5, 809: 8, 832: 8, 854: 8, 870: 7, 871: 8, 872: 8, 897: 8, 902: 8, 903: 8, 905: 8, 909: 8, 913: 8, 916: 8, 1040: 8, 1042: 8, 1056: 8, 1057: 8, 1078: 4, 1107: 5, 1136: 8, 1151: 8, 1155: 8, 1156: 8, 1157: 4, 1162: 8, 1164: 8, 1168: 8, 1170: 8, 1173: 8, 1180: 8, 1186: 2, 1191: 2, 1193: 8, 1210: 8, 1225: 8, 1227: 8, 1265: 4, 1280: 1, 1287: 4, 1290: 8, 1292: 8, 1294: 8, 1312: 8, 1322: 8, 1342: 8, 1345: 8, 1348: 8, 1363: 8, 1369: 8, 1371: 8, 1378: 8, 1384: 8, 1407: 8, 1419: 8, 1427: 6, 1456: 4, 1470: 8},
],
CAR.GRANDEUR_IG_FL_HEV: [
{
127: 8, 304: 8, 320: 8, 339: 8, 352: 8, 356: 4, 544: 8, 576: 8, 593: 8, 688: 5, 832: 8, 865: 8, 881: 8, 882: 8, 897: 8, 902: 8, 903: 8, 905: 8, 909: 8, 913: 8, 916: 8, 1040: 8, 1042: 8, 1056: 8, 1057: 8, 1078: 4, 1108: 8, 1136: 6, 1138: 5, 1151: 8, 1155: 8, 1156: 8, 1157: 4, 1162: 8, 1164: 8, 1168: 8, 1173: 8, 1180: 8, 1186: 2, 1191: 2, 1193: 8, 1210: 8, 1225: 8, 1227: 8, 1265: 4, 1268: 8, 1280: 1, 1287: 4, 1290: 8, 1291: 8, 1292: 8, 1294: 8, 1312: 8, 1322: 8, 1342: 8, 1345: 8, 1348: 8, 1355: 8, 1363: 8, 1369: 8, 1371: 8, 1378: 8, 1407: 8, 1419: 8, 1427: 6, 1429: 8, 1430: 8, 1448: 8, 1456: 4, 1470: 8, 1476: 8, 1535: 8
},{
127: 8, 304: 8, 320: 8, 339: 8, 352: 8, 356: 4, 516: 8, 544: 8, 576: 8, 593: 8, 688: 5, 832: 8, 865: 8, 881: 8, 882: 8, 897: 8, 902: 8, 903: 8, 905: 8, 909: 8, 913: 8, 916: 8, 1040: 8, 1042: 8, 1056: 8, 1057: 8, 1078: 4, 1108: 8, 1136: 6, 1138: 5, 1151: 8, 1155: 8, 1156: 8, 1157: 4, 1162: 8, 1164: 8, 1168: 8, 1173: 8, 1180: 8, 1186: 2, 1191: 2, 1193: 8, 1210: 8, 1225: 8, 1227: 8, 1265: 4, 1268: 8, 1280: 1, 1287: 4, 1290: 8, 1291: 8, 1292: 8, 1294: 8, 1312: 8, 1322: 8, 1342: 8, 1345: 8, 1348: 8, 1355: 8, 1363: 8, 1369: 8, 1371: 8, 1378: 8, 1407: 8, 1419: 8, 1427: 6, 1429: 8, 1430: 8, 1448: 8, 1456: 4, 1470: 8, 1476: 8, 1535: 8
}
],
CAR.TUCSON_TL_SCC: [{
64: 8, 66: 8, 67: 8, 68: 8, 127: 8, 273: 8, 274: 8, 275: 8, 339: 8, 356: 4, 399: 8, 512: 6, 544: 8, 608: 8, 790: 8, 809: 8, 832: 8, 899: 8, 902: 8, 903: 8, 909: 8, 916: 8, 1040: 8, 1056: 8, 1057: 8, 1078: 4, 1151: 6, 1168: 7, 1170: 8, 1186: 2, 1191: 2, 1265: 4, 1280: 1, 1282: 4, 1287: 4, 1290: 8, 1292: 8, 1312: 8, 1314: 8, 1322: 8, 1342: 6, 1345: 8, 1348: 8, 1349: 8, 1351: 8, 1353: 8, 1363: 8, 1365: 8, 1366: 8, 1367: 8, 1369: 8, 1407: 8, 1419: 8, 1427: 6, 1440: 8, 1456: 4, 1470: 8, 1472: 8, 1491: 8, 1530: 8
},{
64: 8, 66: 8, 67: 8, 68: 8, 127: 8, 273: 8, 274: 8, 275: 8, 339: 8, 356: 4, 399: 8, 512: 6, 544: 8, 608: 8, 790: 8, 809: 8, 832: 8, 899: 8, 902: 8, 903: 8, 909: 8, 916: 8, 1040: 8, 1056: 8, 1057: 8, 1078: 4, 1151: 6, 1168: 7, 1170: 8, 1186: 2, 1191: 2, 1265: 4, 1280: 1, 1282: 4, 1287: 4, 1290: 8, 1292: 8, 1312: 8, 1314: 8, 1322: 8, 1342: 6, 1345: 8, 1348: 8, 1349: 8, 1351: 8, 1353: 8, 1363: 8, 1365: 8, 1366: 8, 1367: 8, 1369: 8, 1407: 8, 1419: 8, 1427: 6, 1440: 8, 1456: 4, 1470: 8, 1472: 8, 1491: 8, 1530: 8, 2015: 8, 2024: 8, 2025: 8
}],
# kia
CAR.FORTE: [{
67: 8, 127: 8, 304: 8, 320: 8, 339: 8, 356: 4, 544: 8, 593: 8, 608: 8, 688: 5, 809: 8, 832: 8, 854: 7, 870: 7, 871: 8, 872: 8, 897: 8, 902: 8, 903: 8, 909: 8, 916: 8, 1040: 8, 1042: 8, 1078: 4, 1107: 5, 1136: 8, 1156: 8, 1170: 8, 1173: 8, 1191: 2, 1225: 8, 1265: 4, 1280: 4, 1287: 4, 1292: 8, 1294: 8, 1312: 8, 1322: 8, 1342: 6, 1345: 8, 1348: 8, 1363: 8, 1369: 8, 1384: 8, 1394: 8, 1407: 8, 1427: 6, 1456: 4, 1470: 8
}],
CAR.K5: [{
64: 8, 66: 8, 67: 8, 68: 8, 127: 8, 273: 8, 274: 8, 275: 8, 339: 8, 356: 4, 399: 8, 447: 8, 512: 6, 544: 8, 593: 8, 608: 8, 688: 5, 790: 8, 809: 8, 832: 8, 884: 8, 897: 8, 899: 8, 902: 8, 903: 6, 909: 8, 916: 8, 1040: 8, 1056: 8, 1057: 8, 1078: 4, 1151: 6, 1168: 7, 1170: 8, 1186: 2, 1191: 2, 1253: 8, 1254: 8, 1255: 8, 1265: 4, 1280: 1, 1282: 4, 1287: 4, 1290: 8, 1292: 8, 1294: 8, 1312: 8, 1322: 8, 1331: 8, 1332: 8, 1333: 8, 1342: 6, 1345: 8, 1348: 8, 1349: 8, 1351: 8, 1353: 8, 1363: 8, 1365: 8, 1366: 8, 1367: 8, 1369: 8, 1407: 8, 1414: 3, 1415: 8, 1419: 8, 1425: 2, 1427: 6, 1440: 8, 1456: 4, 1470: 8, 1472: 8, 1486: 8, 1487: 8, 1491: 8, 1530: 8, 1532: 5, 1952: 8, 1960: 8, 1988: 8, 1996: 8, 2001: 8, 2004: 8, 2008: 8, 2009: 8, 2012: 8, 2016: 8, 2017: 8, 2024: 8, 2025: 8
},{
64: 8, 66: 8, 67: 8, 68: 8, 127: 8, 128: 8, 129: 8, 273: 8, 274: 8, 275: 8, 339: 8, 354: 3, 356: 4, 399: 8, 512: 6, 544: 8, 593: 8, 608: 8, 688: 5, 790: 8, 809: 8, 832: 8, 897: 8, 899: 8, 902: 8, 903: 6, 912: 7, 916: 8, 1040: 8, 1056: 8, 1057: 8, 1078: 4, 1151: 6, 1168: 7, 1170: 8, 1265: 4, 1268: 8, 1280: 1, 1282: 4, 1287: 4, 1290: 8, 1292: 8, 1294: 8, 1312: 8, 1322: 8, 1331: 8, 1332: 8, 1333: 8, 1342: 6, 1345: 8, 1348: 8, 1349: 8, 1351: 8, 1353: 8, 1356: 8, 1363: 8, 1365: 8, 1366: 8, 1367: 8, 1369: 8, 1407: 8, 1419: 8, 1425: 2, 1427: 6, 1440: 8, 1456: 4, 1470: 8, 1472: 8, 1491: 8, 1492: 8
},{
64: 8, 66: 8, 67: 8, 68: 8, 127: 8, 128: 8, 129: 8, 273: 8, 274: 8, 275: 8, 339: 8, 354: 3, 356: 4, 399: 8, 512: 6, 544: 8, 608: 8, 790: 8, 809: 8, 832: 8, 899: 8, 902: 8, 903: 6, 916: 8, 1040: 8, 1056: 8, 1057: 8, 1078: 4, 1151: 6, 1168: 7, 1170: 8, 1265: 4, 1280: 1, 1282: 4, 1287: 4, 1290: 8, 1292: 8, 1294: 8, 1312: 8, 1322: 8, 1331: 8, 1332: 8, 1333: 8, 1342: 6, 1345: 8, 1348: 8, 1349: 8, 1351: 8, 1353: 8, 1356: 8, 1363: 8, 1365: 8, 1366: 8, 1367: 8, 1369: 8, 1407: 8, 1415: 8, 1419: 8, 1427: 6, 1440: 8, 1456: 4, 1470: 8, 1472: 8, 1491: 8, 1492: 8, 1905: 8, 1913: 8, 2001: 8, 2009: 8, 2015: 8, 2016: 8, 2017: 8, 2024: 8, 2025: 8
},{
64: 8, 66: 8, 67: 8, 68: 8, 127: 8, 273: 8, 274: 8, 275: 8, 339: 8, 356: 4, 399: 8, 512: 6, 544: 8, 608: 8, 790: 8, 809: 8, 832: 8, 899: 8, 902: 8, 903: 6, 916: 8, 1040: 8, 1056: 8, 1057: 8, 1078: 4, 1151: 6, 1168: 7, 1170: 8, 1236: 2, 1265: 4, 1280: 1, 1282: 4, 1287: 4, 1290: 8, 1292: 8, 1294: 8, 1312: 8, 1322: 8, 1331: 8, 1332: 8, 1333: 8, 1342: 6, 1345: 8, 1348: 8, 1349: 8, 1351: 8, 1353: 8, 1363: 8, 1366: 8, 1367: 8, 1369: 8, 1371: 8, 1407: 8, 1415: 8, 1419: 8, 1427: 6, 1440: 8, 1456: 4, 1470: 8, 1472: 8, 1491: 8, 1492: 8, 2015: 8, 2024: 8, 2025: 8
},{
64: 8, 66: 8, 67: 8, 68: 8, 127: 8, 273: 8, 274: 8, 275: 8, 339: 8, 356: 4, 399: 8, 512: 6, 544: 8, 608: 8, 625: 8, 790: 8, 809: 8, 832: 8, 899: 8, 902: 8, 903: 6, 916: 8, 1040: 8, 1056: 8, 1057: 8, 1078: 4, 1151: 6, 1168: 7, 1170: 8, 1236: 2, 1265: 4, 1280: 1, 1282: 4, 1287: 4, 1290: 8, 1292: 8, 1294: 8, 1312: 8, 1322: 8, 1331: 8, 1332: 8, 1333: 8, 1342: 6, 1345: 8, 1348: 8, 1349: 8, 1351: 8, 1353: 8, 1363: 8, 1366: 8, 1367: 8, 1369: 8, 1371: 8, 1407: 8, 1415: 8, 1419: 8, 1427: 6, 1440: 8, 1456: 4, 1470: 8, 1472: 8, 1491: 8, 1492: 8, 2015: 8, 2024: 8, 2025: 8
},{
64: 8, 66: 8, 67: 8, 68: 8, 127: 8, 128: 8, 129: 8, 273: 8, 274: 8, 275: 8, 339: 8, 354: 3, 356: 4, 399: 8, 447: 8, 512: 6, 544: 8, 558: 8, 593: 8, 608: 8, 640: 8, 688: 5, 790: 8, 809: 8, 832: 8, 884: 8, 897: 8, 899: 8, 902: 8, 903: 6, 909: 8, 912: 7, 916: 8, 1040: 8, 1056: 8, 1057: 8, 1078: 4, 1151: 6, 1168: 7, 1170: 8, 1186: 2, 1191: 2, 1253: 8, 1254: 8, 1255: 8, 1265: 4, 1268: 8, 1280: 1, 1282: 4, 1287: 4, 1290: 8, 1292: 8, 1294: 8, 1312: 8, 1322: 8, 1331: 8, 1332: 8, 1333: 8, 1342: 6, 1345: 8, 1348: 8, 1349: 8, 1351: 8, 1353: 8, 1356: 8, 1363: 8, 1365: 8, 1366: 8, 1367: 8, 1369: 8, 1407: 8, 1414: 3, 1415: 8, 1419: 8, 1425: 2, 1427: 6, 1440: 8, 1456: 4, 1470: 8, 1472: 8, 1486: 8, 1487: 8, 1491: 8, 1492: 8, 1530: 8, 1532: 5, 1792: 8, 1872: 8, 1937: 8, 1953: 8, 1968: 8, 1988: 8, 1996: 8, 2000: 8, 2001: 8, 2004: 8, 2008: 8, 2009: 8, 2012: 8, 2015: 8, 2016: 8, 2017: 8, 2024: 8, 2025: 8
}],
CAR.K5_HEV: [{
68: 8, 127: 8, 304: 8, 320: 8, 339: 8, 352: 8, 356: 4, 544: 8, 593: 8, 688: 5, 832: 8, 881: 8, 882: 8, 897: 8, 902: 8, 903: 6, 916: 8, 1040: 8, 1056: 8, 1057: 8, 1078: 4, 1136: 6, 1151: 6, 1168: 7, 1173: 8, 1236: 2, 1265: 4, 1280: 1, 1287: 4, 1290: 8, 1291: 8, 1292: 8, 1322: 8, 1331: 8, 1332: 8, 1333: 8, 1342: 6, 1345: 8, 1348: 8, 1355: 8, 1363: 8, 1369: 8, 1371: 8, 1407: 8, 1419: 8, 1427: 6, 1429: 8, 1430: 8, 1448: 8, 1456: 4, 1470: 8, 1476: 8, 1535: 8
},{
68: 8, 127: 8, 304: 8, 320: 8, 339: 8, 352: 8, 356: 4, 544: 8, 576: 8, 593: 8, 688: 5, 881: 8, 882: 8, 897: 8, 902: 8, 903: 8, 909: 8, 912: 7, 916: 8, 1040: 8, 1056: 8, 1057: 8, 1078: 4, 1136: 6, 1151: 6, 1168: 7, 1173: 8, 1180: 8, 1186: 2, 1191: 2, 1265: 4, 1268: 8, 1280: 1, 1287: 4, 1290: 8, 1291: 8, 1292: 8, 1294: 8, 1312: 8, 1322: 8, 1342: 6, 1345: 8, 1348: 8, 1355: 8, 1363: 8, 1369: 8, 1371: 8, 1407: 8, 1419: 8, 1420: 8, 1425: 2, 1427: 6, 1429: 8, 1430: 8, 1448: 8, 1456: 4, 1470: 8, 1476: 8, 1535: 8
}],
CAR.SPORTAGE: [{
67: 8, 68: 8, 127: 8, 273: 8, 274: 8, 275: 8, 339: 8, 356: 4, 399: 8, 447: 8, 512: 6, 544: 8, 593: 8, 608: 8, 688: 5, 790: 8, 809: 8, 832: 8, 884: 8, 897: 8, 899: 8, 902: 8, 903: 6, 909: 8, 916: 8, 1040: 8, 1078: 4, 1170: 8, 1191: 2, 1253: 8, 1254: 8, 1255: 8, 1265: 4, 1280: 1, 1282: 4, 1287: 4, 1292: 8, 1294: 8, 1312: 8, 1322: 8, 1342: 6, 1345: 8, 1348: 8, 1349: 8, 1351: 8, 1353: 8, 1363: 8, 1365: 8, 1366: 8, 1367: 8, 1369: 8, 1407: 8, 1419: 8, 1427: 6, 1440: 8, 1456: 4, 1470: 8, 1472: 8, 1486: 8, 1487: 8, 1491: 8, 1492: 8, 1530: 8
}],
CAR.SORENTO: [{
67: 8, 68: 8, 127: 8, 304: 8, 320: 8, 339: 8, 356: 4, 544: 8, 593: 8, 608: 8, 688: 5, 809: 8, 832: 8, 854: 7, 870: 7, 871: 8, 872: 8, 897: 8, 902: 8, 903: 8, 916: 8, 1040: 8, 1042: 8, 1056: 8, 1057: 8, 1064: 8, 1078: 4, 1107: 5, 1136: 8, 1151: 6, 1168: 7, 1170: 8, 1173: 8, 1265: 4, 1280: 1, 1287: 4, 1290: 8, 1292: 8, 1294: 8, 1312: 8, 1322: 8, 1331: 8, 1332: 8, 1333: 8, 1342: 6, 1345: 8, 1348: 8, 1363: 8, 1369: 8, 1370: 8, 1371: 8, 1384: 8, 1407: 8, 1411: 8, 1419: 8, 1425: 2, 1427: 6, 1444: 8, 1456: 4, 1470: 8, 1489: 1
}],
CAR.STINGER: [{
67: 8, 127: 8, 304: 8, 320: 8, 339: 8, 356: 4, 358: 6, 359: 8, 544: 8, 576: 8, 593: 8, 608: 8, 688: 5, 809: 8, 832: 8, 854: 7, 870: 7, 871: 8, 872: 8, 897: 8, 902: 8, 909: 8, 916: 8, 1040: 8, 1042: 8, 1056: 8, 1057: 8, 1064: 8, 1078: 4, 1107: 5, 1136: 8, 1151: 6, 1168: 7, 1170: 8, 1173: 8, 1184: 8, 1265: 4, 1280: 1, 1281: 4, 1287: 4, 1290: 8, 1292: 8, 1294: 8, 1312: 8, 1322: 8, 1342: 6, 1345: 8, 1348: 8, 1363: 8, 1369: 8, 1371: 8, 1378: 4, 1379: 8, 1384: 8, 1407: 8, 1419: 8, 1425: 2, 1427: 6, 1456: 4, 1470: 8
},{
67: 8, 127: 8, 304: 8, 320: 8, 339: 8, 356: 4, 358: 6, 359: 8, 544: 8, 576: 8, 593: 8, 608: 8, 688: 5, 809: 8, 832: 8, 854: 7, 870: 7, 871: 8, 872: 8, 897: 8, 902: 8, 909: 8, 916: 8, 1040: 8, 1042: 8, 1056: 8, 1057: 8, 1064: 8, 1078: 4, 1107: 5, 1136: 8, 1151: 6, 1157: 4, 1168: 7, 1170: 8, 1173: 8, 1184: 8, 1265: 4, 1280: 1, 1281: 4, 1287: 4, 1290: 8, 1292: 8, 1294: 8, 1312: 8, 1322: 8, 1342: 6, 1345: 8, 1348: 8, 1363: 8, 1369: 8, 1371: 8, 1378: 4, 1384: 8, 1407: 8, 1419: 8, 1427: 6, 1437: 8, 1456: 4, 1470: 8
},{
67: 8, 127: 8, 304: 8, 320: 8, 339: 8, 356: 4, 358: 6, 359: 8, 544: 8, 546: 8, 576: 8, 593: 8, 608: 8, 688: 5, 809: 8, 832: 8, 854: 7, 870: 7, 871: 8, 872: 8, 897: 8, 902: 8, 909: 8, 916: 8, 1040: 8, 1042: 8, 1056: 8, 1057: 8, 1064: 8, 1078: 4, 1107: 5, 1136: 8, 1151: 6, 1157: 4, 1168: 7, 1170: 8, 1173: 8, 1184: 8, 1265: 4, 1280: 1, 1281: 4, 1287: 4, 1290: 8, 1292: 8, 1294: 8, 1312: 8, 1322: 8, 1342: 6, 1345: 8, 1348: 8, 1363: 8, 1369: 8, 1371: 8, 1378: 4, 1384: 8, 1407: 8, 1419: 8, 1427: 6, 1437: 8, 1456: 4, 1470: 8
}],
CAR.NIRO_EV: [{
127: 8, 304: 8, 320: 8, 339: 8, 352: 8, 356: 4, 516: 8, 544: 8, 593: 8, 688: 5, 832: 8, 881: 8, 882: 8, 897: 8, 902: 8, 903: 8, 905: 8, 909: 8, 916: 8, 1040: 8, 1042: 8, 1056: 8, 1057: 8, 1078: 4, 1136: 8, 1151: 6, 1156: 8, 1157: 4, 1168: 7, 1173: 8, 1183: 8, 1186: 2, 1191: 2, 1193: 8, 1225: 8, 1260: 8, 1265: 4, 1280: 1, 1287: 4, 1290: 8, 1291: 8, 1292: 8, 1294: 8, 1312: 8, 1322: 8, 1342: 6, 1345: 8, 1348: 8, 1355: 8, 1363: 8, 1369: 8, 1407: 8, 1419: 8, 1426: 8, 1427: 6, 1429: 8, 1430: 8, 1456: 4, 1470: 8, 1473: 8, 1507: 8, 1535: 8, 1990: 8, 1998: 8
},{
127: 8, 304: 8, 320: 8, 339: 8, 352: 8, 356: 4, 544: 8, 593: 8, 688: 5, 832: 8, 881: 8, 882: 8, 897: 8, 902: 8, 903: 8, 905: 8, 909: 8, 916: 8, 1040: 8, 1042: 8, 1056: 8, 1057: 8, 1078: 4, 1136: 8, 1151: 6, 1157: 4, 1168: 7, 1173: 8, 1186: 2, 1191: 2, 1225: 8, 1265: 4, 1280: 1, 1287: 4, 1290: 8, 1291: 8, 1292: 8, 1294: 8, 1312: 8, 1322: 8, 1342: 6, 1345: 8, 1348: 8, 1355: 8, 1363: 8, 1369: 8, 1407: 8, 1419: 8, 1426: 8, 1427: 6, 1429: 8, 1430: 8, 1456: 4, 1470: 8, 1473: 8, 1507: 8, 1535: 8, 1988: 8, 1996: 8, 2000: 8, 2004: 8, 2008: 8, 2012: 8, 2015: 8
}],
CAR.NIRO_HEV: [{68: 8, 127: 8, 304: 8, 320: 8, 339: 8, 352: 8, 356: 4, 544: 8, 576: 8, 832: 8, 881: 8, 882: 8, 902: 8, 903: 8, 916: 8, 1040: 8, 1056: 8, 1057: 8, 1078: 4, 1136: 6, 1173: 8, 1225: 8, 1265: 4, 1280: 1, 1287: 4, 1290: 8, 1291: 8, 1292: 8, 1294: 8, 1322: 8, 1342: 6, 1345: 8, 1348: 8, 1355: 8, 1363: 8, 1369: 8, 1407: 8, 1419: 8, 1427: 6, 1429: 8, 1430: 8, 1448: 8, 1456: 4, 1470: 8, 1476: 8, 1535: 8
},{
127: 8, 304: 8, 320: 8, 339: 8, 352: 8, 356: 4, 544: 8, 576: 8, 593: 8, 688: 5, 832: 8, 881: 8, 882: 8, 897: 8, 902: 8, 903: 8, 916: 8, 1040: 8, 1056: 8, 1057: 8, 1078: 4, 1136: 6, 1173: 8, 1225: 8, 1265: 4, 1280: 1, 1287: 4, 1290: 8, 1291: 8, 1292: 8, 1294: 8, 1322: 8, 1342: 6, 1345: 8, 1348: 8, 1355: 8, 1363: 8, 1369: 8, 1419: 8, 1427: 6, 1429: 8, 1430: 8, 1448: 8, 1456: 4, 1470: 8, 1535: 8
}],
CAR.CEED: [{
67: 8, 127: 8, 304: 8, 320: 8, 339: 8, 354: 3, 356: 4, 544: 8, 576: 8, 593: 8, 608: 8, 688: 5, 809: 8, 832: 8, 854: 7, 870: 7, 871: 8, 872: 8, 897: 8, 902: 8, 903: 8, 905: 8, 909: 8, 916: 8, 1040: 8, 1042: 8, 1056: 8, 1057: 8, 1078: 4, 1107: 5, 1136: 8, 1151: 6, 1155: 8, 1157: 4, 1168: 7, 1170: 8, 1173: 8, 1183: 8, 1186: 2, 1191: 2, 1225: 8, 1265: 4, 1280: 4, 1287: 4, 1290: 8, 1292: 8, 1294: 8, 1312: 8, 1322: 8, 1342: 6, 1345: 8, 1348: 8, 1363: 8, 1369: 8, 1384: 8, 1394: 8, 1407: 8, 1414: 3, 1427: 6, 1456: 4, 2015: 8
}],
CAR.SELTOS: [
{67: 8, 127: 8, 304: 8, 320: 8, 339: 8, 356: 4, 524: 8, 544: 8, 593: 8, 608: 8, 688: 6, 809: 8, 832: 8, 854: 8, 870: 7, 871: 8, 872: 8, 897: 8, 902: 8, 905: 8, 909: 8, 910: 5, 911: 5, 913: 8, 916: 8, 1040: 8, 1042: 8, 1056: 8, 1057: 8, 1064: 8, 1078: 4, 1102: 8, 1107: 5, 1114: 8, 1136: 8, 1145: 8, 1151: 8, 1155: 8, 1156: 8, 1157: 4, 1162: 8, 1164: 8, 1168: 8, 1170: 8, 1173: 8, 1186: 2, 1191: 2, 1225: 8, 1265: 4, 1280: 8, 1287: 4, 1290: 8, 1292: 8, 1294: 8, 1312: 8, 1322: 8, 1342: 6, 1345: 8, 1348: 8, 1363: 8, 1369: 8, 1379: 8, 1384: 8, 1394: 8, 1407: 8, 1419: 8, 1427: 6, 1446: 8, 1456: 4, 1470: 8, 1485: 8, 1988: 8, 1996: 8, 2000: 8, 2004: 8, 2008: 8, 2012: 8, 2015: 8
},
{67: 8, 127: 8, 304: 8, 320: 8, 339: 8, 354: 8, 356: 4, 544: 8, 593: 8, 608: 8, 688: 6, 764: 8, 809: 8, 832: 8, 854: 8, 870: 7, 871: 8, 872: 8, 897: 8, 902: 8, 905: 8, 909: 8, 910: 5, 911: 5, 913: 8, 916: 8, 1040: 8, 1042: 8, 1056: 8, 1057: 8, 1078: 4, 1107: 5, 1114: 8, 1136: 8, 1145: 8, 1151: 8, 1155: 8, 1156: 8, 1157: 4, 1162: 8, 1164: 8, 1168: 8, 1170: 8, 1173: 8, 1186: 2, 1188: 8, 1191: 2, 1225: 8, 1265: 4, 1280: 8, 1287: 4, 1290: 8, 1292: 8, 1294: 8, 1312: 8, 1322: 8, 1342: 6, 1345: 8, 1348: 8, 1363: 8, 1369: 8, 1378: 8, 1379: 8, 1384: 8, 1394: 8, 1407: 8, 1414: 3, 1419: 8, 1427: 6, 1446: 8, 1456: 4, 1470: 8, 1485: 8
},
],
CAR.MOHAVE: [{
67: 8, 127: 8, 304: 8, 320: 8, 339: 8, 356: 4, 544: 8, 593: 8, 608: 8, 688: 5, 809: 8, 832: 8, 854: 8, 870: 7, 871: 8, 872: 8, 897: 8, 902: 8, 905: 8, 909: 8, 913: 8, 916: 8, 1040: 8, 1056: 8, 1057: 8, 1064: 8, 1078: 4, 1107: 5, 1123: 8, 1136: 8, 1145: 8, 1151: 8, 1155: 8, 1156: 8, 1157: 4, 1162: 8, 1164: 8, 1168: 8, 1170: 8, 1173: 8, 1180: 8, 1186: 2, 1191: 2, 1193: 8, 1210: 8, 1225: 8, 1227: 8, 1265: 4, 1280: 8, 1287: 4, 1290: 8, 1292: 8, 1294: 8, 1312: 8, 1322: 8, 1342: 6, 1345: 8, 1348: 8, 1363: 8, 1369: 8, 1371: 8, 1378: 8, 1384: 8, 1407: 8, 1419: 8, 1427: 6, 1456: 4, 1470: 8, 1479: 8
}],
CAR.K7: [{
67: 8, 68: 8, 127: 8, 304: 8, 320: 8, 339: 8, 356: 4, 544: 8, 549: 8, 608: 8, 809: 8, 832: 8, 854: 7, 870: 7, 871: 8, 872: 8, 902: 8, 903: 8, 916: 8, 1040: 8, 1056: 8, 1057: 8, 1078: 4, 1107: 5, 1136: 8, 1151: 6, 1156: 8, 1162: 4, 1168: 7, 1170: 8, 1173: 8, 1265: 4, 1280: 1, 1287: 4, 1290: 8, 1292: 8, 1294: 8, 1312: 8, 1322: 8, 1342: 6, 1345: 8, 1348: 8, 1363: 8, 1369: 8, 1371: 8, 1378: 4, 1384: 8, 1397: 8, 1407: 8, 1419: 8, 1427: 6, 1444: 8, 1456: 4, 1470: 8
},{
67: 8, 68: 8, 127: 8, 304: 8, 320: 8, 339: 8, 356: 4, 544: 8, 546: 8, 608: 8, 809: 8, 832: 8, 854: 7, 870: 7, 871: 8, 872: 8, 902: 8, 903: 8, 916: 8, 1040: 8, 1056: 8, 1057: 8, 1078: 4, 1107: 5, 1136: 8, 1151: 6, 1156: 8, 1162: 4, 1168: 7, 1170: 8, 1173: 8, 1265: 4, 1280: 1, 1287: 4, 1290: 8, 1292: 8, 1294: 8, 1312: 8, 1322: 8, 1345: 8, 1348: 8, 1363: 8, 1369: 8, 1378: 4, 1384: 8, 1407: 8, 1419: 8, 1427: 6, 1444: 8, 1456: 4, 1470: 8
},{
67: 8, 68: 8, 127: 8, 304: 8, 320: 8, 339: 8, 356: 4, 544: 8, 549: 8, 593: 8, 608: 8, 688: 5, 809: 8, 832: 8, 854: 7, 870: 7, 871: 8, 872: 8, 897: 8, 902: 8, 903: 8, 916: 8, 1040: 8, 1056: 8, 1057: 8, 1078: 4, 1107: 5, 1136: 8, 1151: 6, 1156: 8, 1157: 4, 1162: 4, 1168: 7, 1170: 8, 1173: 8, 1265: 4, 1280: 1, 1287: 4, 1290: 8, 1292: 8, 1294: 8, 1312: 8, 1322: 8, 1342: 6, 1345: 8, 1348: 8, 1363: 8, 1369: 8, 1371: 8, 1378: 4, 1384: 8, 1407: 8, 1419: 8, 1427: 6, 1444: 8, 1456: 4, 1470: 8
}
,
{67: 8, 68: 8, 127: 8, 304: 8, 320: 8, 339: 8, 356: 4, 544: 8, 593: 8, 608: 8, 688: 5, 809: 8, 832: 8, 854: 7, 870: 7, 871: 8, 872: 8, 897: 8, 902: 8, 903: 8, 916: 8, 1040: 8, 1056: 8, 1057: 8, 1078: 4, 1107: 5, 1136: 8, 1151: 6, 1156: 8, 1168: 7, 1170: 8, 1173: 8, 1265: 4, 1280: 1, 1287: 4, 1290: 8, 1292: 8, 1294: 8, 1312: 8, 1322: 8, 1342: 6, 1345: 8, 1348: 8, 1363: 8, 1369: 8, 1371: 8, 1378: 4, 1384: 8, 1407: 8, 1419: 8, 1427: 6, 1444: 8, 1456: 4, 1470: 8
}
],
CAR.K7_HEV: [{
68: 8, 127: 8, 304: 8, 320: 8, 339: 8, 352: 8, 356: 4, 544: 8, 576: 8, 593: 8, 688: 5, 832: 8, 865: 8, 881: 8, 882: 8, 897: 8, 902: 8, 903: 8, 905: 8, 909: 8, 913: 8, 916: 8, 1040: 8, 1042: 8, 1056: 8, 1057: 8, 1078: 4, 1096: 8, 1102: 8, 1108: 8, 1136: 6, 1138: 5, 1151: 8, 1155: 8, 1156: 8, 1157: 4, 1162: 8, 1164: 8, 1168: 7, 1173: 8, 1180: 8, 1186: 2, 1191: 2, 1210: 8, 1227: 8, 1265: 4, 1268: 8, 1280: 1, 1287: 4, 1290: 8, 1291: 8, 1292: 8, 1294: 8, 1312: 8, 1322: 8, 1342: 6, 1343: 8, 1345: 8, 1348: 8, 1355: 8, 1363: 8, 1369: 8, 1371: 8, 1378: 8, 1379: 8, 1407: 8, 1419: 8, 1427: 6, 1429: 8, 1430: 8, 1448: 8, 1456: 4, 1470: 8, 1476: 8, 1535: 8
}],
CAR.K9: [{
67: 8, 127: 8, 304: 8, 320: 8, 339: 8, 356: 4, 544: 8, 593: 8, 608: 8, 688: 5, 809: 8, 832: 8, 854: 7, 870: 7, 871: 8, 872: 8, 897: 8, 902: 8, 903: 8, 905: 8, 909: 8, 916: 8, 1056: 8, 1057: 8, 1078: 4, 1107: 5, 1136: 8, 1151: 6, 1155: 8, 1156: 8, 1157: 4, 1162: 8, 1164: 8, 1168: 7, 1170: 8, 1173: 8, 1184: 8, 1186: 2, 1191: 2, 1227: 8, 1265: 4, 1280: 4, 1287: 4, 1290: 8, 1292: 8, 1294: 8, 1312: 8, 1322: 8, 1342: 6, 1345: 8, 1348: 8, 1363: 8, 1369: 8, 1379: 8, 1384: 8, 1407: 8, 1419: 8, 1427: 6, 1456: 4, 1470: 8
},{
67: 8, 127: 8, 304: 8, 320: 8, 339: 8, 356: 4, 544: 8, 593: 8, 608: 8, 688: 5, 809: 8, 832: 8, 854: 7, 870: 7, 871: 8, 872: 8, 897: 8, 902: 8, 903: 8, 905: 8, 909: 8, 916: 8, 1056: 8, 1057: 8, 1064: 8, 1078: 4, 1107: 5, 1136: 8, 1151: 6, 1155: 8, 1156: 8, 1157: 4, 1162: 8, 1164: 8, 1168: 7, 1170: 8, 1173: 8, 1180: 8, 1184: 8, 1186: 2, 1191: 2, 1210: 8, 1227: 8, 1265: 4, 1280: 4, 1281: 4, 1287: 4, 1290: 8, 1292: 8, 1294: 8, 1312: 8, 1322: 8, 1342: 6, 1345: 8, 1348: 8, 1363: 8, 1369: 8, 1371: 8, 1378: 4, 1384: 8, 1407: 8, 1419: 8, 1427: 6, 1434: 2, 1456: 4, 1470: 8
}],
}
FW_VERSIONS = {}
CHECKSUM = {
"crc8": [CAR.SANTA_FE, CAR.SONATA, CAR.PALISADE, CAR.SONATA_HEV, CAR.SONATA21_HEV, CAR.SELTOS, CAR.ELANTRA_2021,
CAR.ELANTRA_HEV_2021, CAR.SANTA_FE_HEV_2022, CAR.K5_2021],
"6B": [CAR.SORENTO, CAR.GENESIS, CAR.SANTA_FE_2022],
}
FEATURES = {
# Use Cluster for Gear Selection, rather than Transmission
"use_cluster_gears": {CAR.ELANTRA, CAR.KONA, CAR.ELANTRA_GT_I30, CAR.K7, CAR.GRANDEUR_IG, CAR.GRANDEUR_IG_FL},
# Use TCU Message for Gear Selection
"use_tcu_gears": {CAR.K5, CAR.SONATA19, CAR.VELOSTER, CAR.SONATA_LF_TURBO, CAR.TUCSON_TL_SCC},
# Use E_GEAR Message for Gear Selection
"use_elect_gears": {CAR.K5_HEV, CAR.IONIQ_EV_LTD, CAR.KONA_EV, CAR.KONA_HEV, CAR.SONATA_HEV, CAR.SONATA21_HEV, CAR.SONATA21_HEV, CAR.NIRO_EV, CAR.K7_HEV,
CAR.GRANDEUR_IG_HEV, CAR.GRANDEUR_IG_FL_HEV, CAR.IONIQ_EV_2020, CAR.IONIQ_PHEV, CAR.ELANTRA_HEV_2021,
CAR.NIRO_HEV, CAR.NIRO_HEV_2021, CAR.SANTA_FE_HEV_2022},
# send LFA MFA message for new HKG models
"send_lfa_mfa": {CAR.SONATA, CAR.PALISADE, CAR.SONATA_HEV, CAR.SONATA21_HEV, CAR.SANTA_FE, CAR.NIRO_EV, CAR.GRANDEUR_IG_FL, CAR.GRANDEUR_IG_FL_HEV,
CAR.KONA_EV, CAR.KONA_HEV, CAR.TUCSON_TL_SCC, CAR.ELANTRA_2021, CAR.ELANTRA_HEV_2021,
CAR.K9, CAR.GENESIS_G90, CAR.NIRO_HEV_2021, CAR.SANTA_FE_2022, CAR.SANTA_FE_HEV_2022, CAR.K5_2021,
CAR.SELTOS, CAR.MOHAVE},
# these cars use the FCA11 message for the AEB and FCW signals, all others use SCC12
"use_fca": {CAR.SONATA, CAR.ELANTRA, CAR.ELANTRA_GT_I30, CAR.STINGER, CAR.IONIQ_EV_2020, CAR.IONIQ_PHEV, CAR.KONA, CAR.KONA_EV, CAR.FORTE,
CAR.PALISADE, CAR.GENESIS_G70, CAR.SANTA_FE, CAR.SELTOS, CAR.ELANTRA_2021, CAR.ELANTRA_HEV_2021,
CAR.K9, CAR.GENESIS_G90, CAR.SANTA_FE_2022, CAR.SANTA_FE_HEV_2022, CAR.K5_2021, CAR.MOHAVE},
"has_scc13": {CAR.PALISADE, CAR.NIRO_HEV, CAR.NIRO_HEV_2021, CAR.K9, CAR.GENESIS_G90, CAR.K5_2021, CAR.MOHAVE},
"has_scc14": {CAR.PALISADE, CAR.NIRO_HEV, CAR.NIRO_HEV_2021, CAR.K9, CAR.GENESIS_G90, CAR.K5_2021, CAR.MOHAVE},
}
HYBRID_CAR = {CAR.K5_HEV, CAR.KONA_HEV, CAR.NIRO_HEV, CAR.NIRO_HEV_2021, CAR.SONATA_HEV, CAR.SONATA21_HEV, CAR.SONATA19_HEV, CAR.K7_HEV,
CAR.GRANDEUR_IG_HEV, CAR.GRANDEUR_IG_FL_HEV, CAR.IONIQ_PHEV, CAR.ELANTRA_HEV_2021, CAR.IONIQ,
CAR.SANTA_FE_HEV_2022}
EV_CAR = {CAR.IONIQ_EV_LTD, CAR.IONIQ_EV_2020, CAR.KONA_EV, CAR.NIRO_EV}
EV_HYBRID_CAR = EV_CAR | HYBRID_CAR
DBC = {
# genesis
CAR.GENESIS: dbc_dict('hyundai_kia_generic', None),
CAR.GENESIS_G70: dbc_dict('hyundai_kia_generic', None),
CAR.GENESIS_G80: dbc_dict('hyundai_kia_generic', None),
CAR.GENESIS_EQ900: dbc_dict('hyundai_kia_generic', None),
CAR.GENESIS_EQ900_L: dbc_dict('hyundai_kia_generic', None),
CAR.GENESIS_G90: dbc_dict('hyundai_kia_generic', None),
# hyundai
CAR.ELANTRA: dbc_dict('hyundai_kia_generic', None),
CAR.ELANTRA_2021: dbc_dict('hyundai_kia_generic', None),
CAR.ELANTRA_HEV_2021: dbc_dict('hyundai_kia_generic', None),
CAR.ELANTRA_GT_I30: dbc_dict('hyundai_kia_generic', None),
CAR.SONATA: dbc_dict('hyundai_kia_generic', None),
CAR.SONATA_HEV: dbc_dict('hyundai_kia_generic', None),
CAR.SONATA21_HEV: dbc_dict('hyundai_kia_generic', None),
CAR.SONATA19: dbc_dict('hyundai_kia_generic', None),
CAR.SONATA19_HEV: dbc_dict('hyundai_kia_generic', None),
CAR.SONATA_LF_TURBO: dbc_dict('hyundai_kia_generic', None),
CAR.KONA: dbc_dict('hyundai_kia_generic', None),
CAR.KONA_EV: dbc_dict('hyundai_kia_generic', None),
CAR.KONA_HEV: dbc_dict('hyundai_kia_generic', None),
CAR.IONIQ: dbc_dict('hyundai_kia_generic', None),
CAR.IONIQ_EV_LTD: dbc_dict('hyundai_kia_generic', None),
CAR.IONIQ_PHEV: dbc_dict('hyundai_kia_generic', None),
CAR.IONIQ_EV_2020: dbc_dict('hyundai_kia_generic', None),
CAR.SANTA_FE: dbc_dict('hyundai_kia_generic', None),
CAR.SANTA_FE_2022: dbc_dict('hyundai_kia_generic', None),
CAR.SANTA_FE_HEV_2022: dbc_dict('hyundai_kia_generic', None),
CAR.PALISADE: dbc_dict('hyundai_kia_generic', None),
CAR.VELOSTER: dbc_dict('hyundai_kia_generic', None),
CAR.GRANDEUR_IG: dbc_dict('hyundai_kia_generic', None),
CAR.GRANDEUR_IG_HEV: dbc_dict('hyundai_kia_generic', None),
CAR.GRANDEUR_IG_FL: dbc_dict('hyundai_kia_generic', None),
CAR.GRANDEUR_IG_FL_HEV: dbc_dict('hyundai_kia_generic', None),
CAR.TUCSON_TL_SCC: dbc_dict('hyundai_kia_generic', None),
# kia
CAR.FORTE: dbc_dict('hyundai_kia_generic', None),
CAR.K5: dbc_dict('hyundai_kia_generic', None),
CAR.K5_2021: dbc_dict('hyundai_kia_generic', None),
CAR.K5_HEV: dbc_dict('hyundai_kia_generic', None),
CAR.SPORTAGE: dbc_dict('hyundai_kia_generic', None),
CAR.SORENTO: dbc_dict('hyundai_kia_generic', None),
CAR.STINGER: dbc_dict('hyundai_kia_generic', None),
CAR.NIRO_EV: dbc_dict('hyundai_kia_generic', None),
CAR.NIRO_HEV: dbc_dict('hyundai_kia_generic', None),
CAR.NIRO_HEV_2021: dbc_dict('hyundai_kia_generic', None),
CAR.CEED: dbc_dict('hyundai_kia_generic', None),
CAR.SELTOS: dbc_dict('hyundai_kia_generic', None),
CAR.MOHAVE: dbc_dict('hyundai_kia_generic', None),
CAR.K7: dbc_dict('hyundai_kia_generic', None),
CAR.K7_HEV: dbc_dict('hyundai_kia_generic', None),
CAR.K9: dbc_dict('hyundai_kia_generic', None),
}
STEER_THRESHOLD = 150
def main():
for member, value in vars(CAR).items():
if not member.startswith("_"):
print(value)
if __name__ == "__main__":
main()
| 143.596774 | 1,136 | 0.552591 |
4a2215c14c6e64aef1667e271eb0758b75e1dc1f | 1,134 | py | Python | exercises/0138-CopyListWithRandomPointer/copy_list_with_random_pointer.py | tqa236/leetcode-solutions | 556147981c43509a6e8a7f59f138d1ab027ebfd1 | [
"MIT"
] | 1 | 2020-09-26T15:09:25.000Z | 2020-09-26T15:09:25.000Z | exercises/0138-CopyListWithRandomPointer/copy_list_with_random_pointer.py | tqa236/leetcode-solutions | 556147981c43509a6e8a7f59f138d1ab027ebfd1 | [
"MIT"
] | null | null | null | exercises/0138-CopyListWithRandomPointer/copy_list_with_random_pointer.py | tqa236/leetcode-solutions | 556147981c43509a6e8a7f59f138d1ab027ebfd1 | [
"MIT"
] | null | null | null | class Node:
def __init__(self, x: int, next: "Node" = None, random: "Node" = None):
self.val = int(x)
self.next = next
self.random = random
class Solution:
def copyRandomList(self, head: "Node") -> "Node":
if not head:
return None
node = head
new_node = None
node_map = {}
new_node_map = {}
while node:
if not new_node:
new_node = Node(node.val, None, None)
new_head = new_node
new_node_map[node] = new_node
if node.random in new_node_map:
new_node.random = new_node_map[node.random]
if node.random not in node_map:
node_map[node.random] = [new_node]
else:
node_map[node.random].append(new_node)
if node in node_map:
for random_node in node_map[node]:
random_node.random = new_node
node = node.next
if node:
new_node.next = Node(node.val, None, None)
new_node = new_node.next
return new_head
| 32.4 | 75 | 0.518519 |
4a221682ad63daa2939630d0a95dbd8f1493eb30 | 11,547 | py | Python | modeling/backbone/xception.py | PenG-hy/DeepLabv3 | 3eda6c5b395053324251d963477c5fd26ae046dd | [
"MIT"
] | 4 | 2021-12-22T01:52:33.000Z | 2022-03-29T07:46:23.000Z | modeling/backbone/xception.py | PenG-hy/DeepLabv3 | 3eda6c5b395053324251d963477c5fd26ae046dd | [
"MIT"
] | null | null | null | modeling/backbone/xception.py | PenG-hy/DeepLabv3 | 3eda6c5b395053324251d963477c5fd26ae046dd | [
"MIT"
] | null | null | null | import math
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.utils.model_zoo as model_zoo
from ..sync_batchnorm.batchnorm import SynchronizedBatchNorm2d
def fixed_padding(inputs, kernel_size, dilation):
kernel_size_effective = kernel_size + (kernel_size - 1) * (dilation - 1)
pad_total = kernel_size_effective - 1
pad_beg = pad_total // 2
pad_end = pad_total - pad_beg
padded_inputs = F.pad(inputs, (pad_beg, pad_end, pad_beg, pad_end))
return padded_inputs
class SeparableConv2d(nn.Module):
def __init__(self, inplanes, planes, kernel_size=3, stride=1, dilation=1, bias=False, BatchNorm=None):
super(SeparableConv2d, self).__init__()
self.conv1 = nn.Conv2d(inplanes, inplanes, kernel_size, stride, 0, dilation,
groups=inplanes, bias=bias)
self.bn = BatchNorm(inplanes)
self.pointwise = nn.Conv2d(inplanes, planes, 1, 1, 0, 1, 1, bias=bias)
def forward(self, x):
x = fixed_padding(x, self.conv1.kernel_size[0], dilation=self.conv1.dilation[0])
x = self.conv1(x)
x = self.bn(x)
x = self.pointwise(x)
return x
class Block(nn.Module):
def __init__(self, inplanes, planes, reps, stride=1, dilation=1, BatchNorm=None,
start_with_relu=True, grow_first=True, is_last=False):
super(Block, self).__init__()
if planes != inplanes or stride != 1:
self.skip = nn.Conv2d(inplanes, planes, 1, stride=stride, bias=False)
self.skipbn = BatchNorm(planes)
else:
self.skip = None
self.relu = nn.ReLU(inplace=True)
rep = []
filters = inplanes
if grow_first:
rep.append(self.relu)
rep.append(SeparableConv2d(inplanes, planes, 3, 1, dilation, BatchNorm=BatchNorm))
rep.append(BatchNorm(planes))
filters = planes
for i in range(reps - 1):
rep.append(self.relu)
rep.append(SeparableConv2d(filters, filters, 3, 1, dilation, BatchNorm=BatchNorm))
rep.append(BatchNorm(filters))
if not grow_first:
rep.append(self.relu)
rep.append(SeparableConv2d(inplanes, planes, 3, 1, dilation, BatchNorm=BatchNorm))
rep.append(BatchNorm(planes))
if stride != 1:
rep.append(self.relu)
rep.append(SeparableConv2d(planes, planes, 3, 2, BatchNorm=BatchNorm))
rep.append(BatchNorm(planes))
if stride == 1 and is_last:
rep.append(self.relu)
rep.append(SeparableConv2d(planes, planes, 3, 1, BatchNorm=BatchNorm))
rep.append(BatchNorm(planes))
if not start_with_relu:
rep = rep[1:]
self.rep = nn.Sequential(*rep)
def forward(self, inp):
x = self.rep(inp)
if self.skip is not None:
skip = self.skip(inp)
skip = self.skipbn(skip)
else:
skip = inp
x = x + skip
return x
class AlignedXception(nn.Module):
"""
Modified Alighed Xception
"""
def __init__(self, output_stride, BatchNorm,
pretrained=True):
super(AlignedXception, self).__init__()
if output_stride == 16:
entry_block3_stride = 2
middle_block_dilation = 1
exit_block_dilations = (1, 2)
elif output_stride == 8:
entry_block3_stride = 1
middle_block_dilation = 2
exit_block_dilations = (2, 4)
else:
raise NotImplementedError
# Entry flow
self.conv1 = nn.Conv2d(3, 32, 3, stride=2, padding=1, bias=False)
self.bn1 = BatchNorm(32)
self.relu = nn.ReLU(inplace=True)
self.conv2 = nn.Conv2d(32, 64, 3, stride=1, padding=1, bias=False)
self.bn2 = BatchNorm(64)
self.block1 = Block(64, 128, reps=2, stride=2, BatchNorm=BatchNorm, start_with_relu=False)
self.block2 = Block(128, 256, reps=2, stride=2, BatchNorm=BatchNorm, start_with_relu=False,
grow_first=True)
self.block3 = Block(256, 728, reps=2, stride=entry_block3_stride, BatchNorm=BatchNorm,
start_with_relu=True, grow_first=True, is_last=True)
# Middle flow
self.block4 = Block(728, 728, reps=3, stride=1, dilation=middle_block_dilation,
BatchNorm=BatchNorm, start_with_relu=True, grow_first=True)
self.block5 = Block(728, 728, reps=3, stride=1, dilation=middle_block_dilation,
BatchNorm=BatchNorm, start_with_relu=True, grow_first=True)
self.block6 = Block(728, 728, reps=3, stride=1, dilation=middle_block_dilation,
BatchNorm=BatchNorm, start_with_relu=True, grow_first=True)
self.block7 = Block(728, 728, reps=3, stride=1, dilation=middle_block_dilation,
BatchNorm=BatchNorm, start_with_relu=True, grow_first=True)
self.block8 = Block(728, 728, reps=3, stride=1, dilation=middle_block_dilation,
BatchNorm=BatchNorm, start_with_relu=True, grow_first=True)
self.block9 = Block(728, 728, reps=3, stride=1, dilation=middle_block_dilation,
BatchNorm=BatchNorm, start_with_relu=True, grow_first=True)
self.block10 = Block(728, 728, reps=3, stride=1, dilation=middle_block_dilation,
BatchNorm=BatchNorm, start_with_relu=True, grow_first=True)
self.block11 = Block(728, 728, reps=3, stride=1, dilation=middle_block_dilation,
BatchNorm=BatchNorm, start_with_relu=True, grow_first=True)
self.block12 = Block(728, 728, reps=3, stride=1, dilation=middle_block_dilation,
BatchNorm=BatchNorm, start_with_relu=True, grow_first=True)
self.block13 = Block(728, 728, reps=3, stride=1, dilation=middle_block_dilation,
BatchNorm=BatchNorm, start_with_relu=True, grow_first=True)
self.block14 = Block(728, 728, reps=3, stride=1, dilation=middle_block_dilation,
BatchNorm=BatchNorm, start_with_relu=True, grow_first=True)
self.block15 = Block(728, 728, reps=3, stride=1, dilation=middle_block_dilation,
BatchNorm=BatchNorm, start_with_relu=True, grow_first=True)
self.block16 = Block(728, 728, reps=3, stride=1, dilation=middle_block_dilation,
BatchNorm=BatchNorm, start_with_relu=True, grow_first=True)
self.block17 = Block(728, 728, reps=3, stride=1, dilation=middle_block_dilation,
BatchNorm=BatchNorm, start_with_relu=True, grow_first=True)
self.block18 = Block(728, 728, reps=3, stride=1, dilation=middle_block_dilation,
BatchNorm=BatchNorm, start_with_relu=True, grow_first=True)
self.block19 = Block(728, 728, reps=3, stride=1, dilation=middle_block_dilation,
BatchNorm=BatchNorm, start_with_relu=True, grow_first=True)
# Exit flow
self.block20 = Block(728, 1024, reps=2, stride=1, dilation=exit_block_dilations[0],
BatchNorm=BatchNorm, start_with_relu=True, grow_first=False, is_last=True)
self.conv3 = SeparableConv2d(1024, 1536, 3, stride=1, dilation=exit_block_dilations[1], BatchNorm=BatchNorm)
self.bn3 = BatchNorm(1536)
self.conv4 = SeparableConv2d(1536, 1536, 3, stride=1, dilation=exit_block_dilations[1], BatchNorm=BatchNorm)
self.bn4 = BatchNorm(1536)
self.conv5 = SeparableConv2d(1536, 2048, 3, stride=1, dilation=exit_block_dilations[1], BatchNorm=BatchNorm)
self.bn5 = BatchNorm(2048)
# Init weights
self._init_weight()
# Load pretrained model
if pretrained:
self._load_pretrained_model()
def forward(self, x):
# Entry flow
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.conv2(x)
x = self.bn2(x)
x = self.relu(x)
x = self.block1(x)
# add relu here
x = self.relu(x)
low_level_feat = x
x = self.block2(x)
x = self.block3(x)
# Middle flow
x = self.block4(x)
x = self.block5(x)
x = self.block6(x)
x = self.block7(x)
x = self.block8(x)
x = self.block9(x)
x = self.block10(x)
x = self.block11(x)
x = self.block12(x)
x = self.block13(x)
x = self.block14(x)
x = self.block15(x)
x = self.block16(x)
x = self.block17(x)
x = self.block18(x)
x = self.block19(x)
# Exit flow
x = self.block20(x)
x = self.relu(x)
x = self.conv3(x)
x = self.bn3(x)
x = self.relu(x)
x = self.conv4(x)
x = self.bn4(x)
x = self.relu(x)
x = self.conv5(x)
x = self.bn5(x)
x = self.relu(x)
return x, low_level_feat
def _init_weight(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
elif isinstance(m, SynchronizedBatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
def _load_pretrained_model(self):
pretrain_dict = model_zoo.load_url('http://data.lip6.fr/cadene/pretrainedmodels/xception-b5690688.pth')
model_dict = {}
state_dict = self.state_dict()
for k, v in pretrain_dict.items():
if k in state_dict:
if 'pointwise' in k:
v = v.unsqueeze(-1).unsqueeze(-1)
if k.startswith('block11'):
model_dict[k] = v
model_dict[k.replace('block11', 'block12')] = v
model_dict[k.replace('block11', 'block13')] = v
model_dict[k.replace('block11', 'block14')] = v
model_dict[k.replace('block11', 'block15')] = v
model_dict[k.replace('block11', 'block16')] = v
model_dict[k.replace('block11', 'block17')] = v
model_dict[k.replace('block11', 'block18')] = v
model_dict[k.replace('block11', 'block19')] = v
elif k.startswith('block12'):
model_dict[k.replace('block12', 'block20')] = v
elif k.startswith('bn3'):
model_dict[k] = v
model_dict[k.replace('bn3', 'bn4')] = v
elif k.startswith('conv4'):
model_dict[k.replace('conv4', 'conv5')] = v
elif k.startswith('bn4'):
model_dict[k.replace('bn4', 'bn5')] = v
else:
model_dict[k] = v
state_dict.update(model_dict)
self.load_state_dict(state_dict)
if __name__ == "__main__":
import torch
model = AlignedXception(BatchNorm=nn.BatchNorm2d, pretrained=True, output_stride=16)
input = torch.rand(1, 3, 512, 512)
output, low_level_feat = model(input)
print(output.size())
print(low_level_feat.size())
| 39.955017 | 116 | 0.582835 |
4a2216d230203cf4d2b51f8d68075c0e3e4462d1 | 161,776 | py | Python | test/test_tensor_creation_ops.py | kulinseth/pytorch | 337c71be05f959799a305164e6edf86c686bb673 | [
"Intel"
] | null | null | null | test/test_tensor_creation_ops.py | kulinseth/pytorch | 337c71be05f959799a305164e6edf86c686bb673 | [
"Intel"
] | null | null | null | test/test_tensor_creation_ops.py | kulinseth/pytorch | 337c71be05f959799a305164e6edf86c686bb673 | [
"Intel"
] | null | null | null | import torch
import numpy as np
import sys
import math
import warnings
import unittest
from itertools import product, combinations, combinations_with_replacement, permutations
import random
from torch.testing import make_tensor
from torch.testing._internal.common_utils import (
TestCase, run_tests, do_test_empty_full, TEST_WITH_ROCM, suppress_warnings,
torch_to_numpy_dtype_dict, slowTest, TEST_SCIPY, IS_MACOS, IS_PPC,
IS_WINDOWS)
from torch.testing._internal.common_device_type import (
instantiate_device_type_tests, deviceCountAtLeast, onlyOnCPUAndCUDA,
onlyCPU, largeTensorTest, precisionOverride, dtypes,
onlyCUDA, skipCPUIf, dtypesIfCUDA, dtypesIfCPU, skipMeta)
from torch.testing._internal.common_dtype import (
get_all_dtypes, get_all_math_dtypes, get_all_int_dtypes, get_all_fp_dtypes, get_all_complex_dtypes
)
# TODO: refactor tri_tests_args, _compare_trilu_indices, run_additional_tri_tests
from torch.testing._internal.common_methods_invocations import (
tri_tests_args, _compare_trilu_indices, run_additional_tri_tests)
# TODO: replace with make_tensor
def _generate_input(shape, dtype, device, with_extremal):
if shape == ():
x = torch.tensor((), dtype=dtype, device=device)
else:
if dtype.is_floating_point or dtype.is_complex:
# work around torch.randn not being implemented for bfloat16
if dtype == torch.bfloat16:
x = torch.randn(*shape, device=device) * random.randint(30, 100)
x = x.to(torch.bfloat16)
else:
x = torch.randn(*shape, dtype=dtype, device=device) * random.randint(30, 100)
x[torch.randn(*shape) > 0.5] = 0
if with_extremal and dtype.is_floating_point:
# Use extremal values
x[torch.randn(*shape) > 0.5] = float('nan')
x[torch.randn(*shape) > 0.5] = float('inf')
x[torch.randn(*shape) > 0.5] = float('-inf')
elif with_extremal and dtype.is_complex:
x[torch.randn(*shape) > 0.5] = complex('nan')
x[torch.randn(*shape) > 0.5] = complex('inf')
x[torch.randn(*shape) > 0.5] = complex('-inf')
elif dtype == torch.bool:
x = torch.zeros(shape, dtype=dtype, device=device)
x[torch.randn(*shape) > 0.5] = True
else:
x = torch.randint(15, 100, shape, dtype=dtype, device=device)
return x
# TODO: replace with make_tensor
def _rand_shape(dim, min_size, max_size):
shape = []
for i in range(dim):
shape.append(random.randint(min_size, max_size))
return tuple(shape)
# Test suite for tensor creation ops
#
# Includes creation functions like torch.eye, random creation functions like
# torch.rand, and *like functions like torch.ones_like.
# DOES NOT INCLUDE view ops, which are tested in TestViewOps (currently in
# test_torch.py) OR numpy interop (which is also still tested in test_torch.py)
#
# See https://pytorch.org/docs/master/torch.html#creation-ops
class TestTensorCreation(TestCase):
exact_dtype = True
@onlyCPU
@dtypes(torch.float)
def test_diag_embed(self, device, dtype):
x = torch.arange(3 * 4, dtype=dtype, device=device).view(3, 4)
result = torch.diag_embed(x)
expected = torch.stack([torch.diag(r) for r in x], 0)
self.assertEqual(result, expected)
result = torch.diag_embed(x, offset=1, dim1=0, dim2=2)
expected = torch.stack([torch.diag(r, 1) for r in x], 1)
self.assertEqual(result, expected)
def test_cat_mem_overlap(self, device):
x = torch.rand((1, 3), device=device).expand((6, 3))
y = torch.rand((3, 3), device=device)
with self.assertRaisesRegex(RuntimeError, 'unsupported operation'):
torch.cat([y, y], out=x)
@onlyOnCPUAndCUDA
def test_vander(self, device):
x = torch.tensor([1, 2, 3, 5], device=device)
self.assertEqual((0, 0), torch.vander(torch.tensor([]), 0).shape)
with self.assertRaisesRegex(RuntimeError, "N must be non-negative."):
torch.vander(x, N=-1)
with self.assertRaisesRegex(RuntimeError, "x must be a one-dimensional tensor."):
torch.vander(torch.stack((x, x)))
@onlyOnCPUAndCUDA
@dtypes(torch.bool, torch.uint8, torch.int8, torch.short, torch.int, torch.long,
torch.float, torch.double,
torch.cfloat, torch.cdouble)
def test_vander_types(self, device, dtype):
if dtype is torch.uint8:
# Note: no negative uint8 values
X = [[1, 2, 3, 5], [0, 1 / 3, 1, math.pi, 3 / 7]]
elif dtype is torch.bool:
# Note: see https://github.com/pytorch/pytorch/issues/37398
# for why this is necessary.
X = [[True, True, True, True], [False, True, True, True, True]]
elif dtype in [torch.cfloat, torch.cdouble]:
X = [[1 + 1j, 1 + 0j, 0 + 1j, 0 + 0j],
[2 + 2j, 3 + 2j, 4 + 3j, 5 + 4j]]
else:
X = [[1, 2, 3, 5], [-math.pi, 0, 1 / 3, 1, math.pi, 3 / 7]]
N = [None, 0, 1, 3]
increasing = [False, True]
for x, n, inc in product(X, N, increasing):
numpy_dtype = torch_to_numpy_dtype_dict[dtype]
pt_x = torch.tensor(x, device=device, dtype=dtype)
np_x = np.array(x, dtype=numpy_dtype)
pt_res = torch.vander(pt_x, increasing=inc) if n is None else torch.vander(pt_x, n, inc)
np_res = np.vander(np_x, n, inc)
self.assertEqual(
pt_res,
torch.from_numpy(np_res),
atol=1e-3,
rtol=0,
exact_dtype=False)
def test_cat_all_dtypes_and_devices(self, device):
for dt in get_all_dtypes():
x = torch.tensor([[1, 2], [3, 4]], dtype=dt, device=device)
expected1 = torch.tensor([[1, 2], [3, 4], [1, 2], [3, 4]], dtype=dt, device=device)
self.assertEqual(torch.cat((x, x), 0), expected1)
expected2 = torch.tensor([[1, 2, 1, 2], [3, 4, 3, 4]], dtype=dt, device=device)
self.assertEqual(torch.cat((x, x), 1), expected2)
def test_fill_all_dtypes_and_devices(self, device):
for dt in get_all_dtypes():
for x in [torch.tensor((10, 10), dtype=dt, device=device),
torch.empty(10000, dtype=dt, device=device)]: # large tensor
numel = x.numel()
bound = 100 if dt in (torch.uint8, torch.int8) else 2000
for n in range(-bound, bound, bound // 10):
x.fill_(n)
self.assertEqual(x, torch.tensor([n] * numel, dtype=dt, device=device))
self.assertEqual(dt, x.dtype)
def test_roll(self, device):
numbers = torch.arange(1, 9, device=device)
single_roll = numbers.roll(1, 0)
expected = torch.tensor([8, 1, 2, 3, 4, 5, 6, 7], device=device)
self.assertEqual(single_roll, expected, msg="{} did not equal expected result".format(single_roll))
roll_backwards = numbers.roll(-2, 0)
expected = torch.tensor([3, 4, 5, 6, 7, 8, 1, 2], device=device)
self.assertEqual(roll_backwards, expected, msg="{} did not equal expected result".format(roll_backwards))
data = numbers.view(2, 2, 2)
rolled = data.roll(1, 0)
expected = torch.tensor([5, 6, 7, 8, 1, 2, 3, 4], device=device).view(2, 2, 2)
self.assertEqual(expected, rolled, msg="{} did not equal expected result: {}".format(rolled, expected))
data = data.view(2, 4)
# roll a loop until back where started
loop_rolled = data.roll(2, 0).roll(4, 1)
self.assertEqual(data, loop_rolled, msg="{} did not equal the original: {}".format(loop_rolled, data))
# multiple inverse loops
self.assertEqual(data, data.roll(-20, 0).roll(-40, 1))
self.assertEqual(torch.tensor([8, 1, 2, 3, 4, 5, 6, 7], device=device), numbers.roll(1, 0))
# test non-contiguous
# strided equivalent to numbers.as_strided(size=(4, 2), stride=(1, 4))
strided = numbers.view(2, 4).transpose(0, 1)
self.assertFalse(strided.is_contiguous(), "this test needs a non-contiguous tensor")
expected = torch.tensor([4, 8, 1, 5, 2, 6, 3, 7]).view(4, 2)
rolled = strided.roll(1, 0)
self.assertEqual(expected, rolled,
msg="non contiguous tensor rolled to {} instead of {} ".format(rolled, expected))
# test roll with no dimension specified
expected = numbers.roll(1, 0).view(2, 4)
self.assertEqual(expected, data.roll(1), msg="roll with no dims should flatten and roll.")
self.assertEqual(expected, data.roll(1, dims=None), msg="roll with no dims should flatten and roll.")
# test roll over multiple dimensions
expected = torch.tensor([[7, 8, 5, 6], [3, 4, 1, 2]], device=device)
double_rolled = data.roll(shifts=(2, -1), dims=(1, 0))
self.assertEqual(double_rolled, expected,
msg="should be able to roll over two dimensions, got {}".format(double_rolled))
self.assertRaisesRegex(RuntimeError, "required", lambda: data.roll(shifts=(), dims=()))
self.assertRaisesRegex(RuntimeError, "required", lambda: data.roll(shifts=(), dims=1))
# shifts/dims should align
self.assertRaisesRegex(RuntimeError, "align", lambda: data.roll(shifts=(1, 2), dims=(1,)))
self.assertRaisesRegex(RuntimeError, "align", lambda: data.roll(shifts=(1,), dims=(1, 2)))
# test bool tensor
t = torch.zeros(6, dtype=torch.bool, device=device)
t[0] = True
t[3] = True
self.assertEqual(torch.tensor([False, True, False, False, True, False]), t.roll(1, 0))
# test complex tensor
t = torch.tensor([1, 2 + 1j, 3.5, 4. + 2j, 5j, 6.], device=device)
t[0] = 1 + 0.5j
t[3] = 4.
expected = torch.tensor([6., 1 + 0.5j, 2 + 1j, 3.5, 4., 5j], device=device)
self.assertEqual(expected, t.roll(1, 0))
@slowTest
def test_triu_tril(self, device):
def gen_mask(shape, diagonal, device, upper):
mask = torch.zeros(*shape[-2:]).byte()
for i in range(shape[-2]):
for j in range(shape[-1]):
cond = j - i < diagonal if upper else j - i > diagonal
if cond:
mask[i, j] = 1
return mask.expand(*shape).to(device)
torch_functions = {True: torch.triu, False: torch.tril}
numpy_functions = {True: np.triu, False: np.tril}
# TODO: remove this when bool and half are supported for torch.where
def bool_half_compat_where(pred, true_tensor, false_tensor, dtype):
if dtype == torch.bool or dtype == torch.half:
return torch.where(pred.byte(), true_tensor.byte(), false_tensor.byte()).to(dtype=dtype)
else:
return torch.where(pred, true_tensor, false_tensor)
def run_test(shape, device, diagonal, dtype):
x = torch.empty(*shape, device=device, dtype=dtype).fill_(2)
for upper in [True, False]:
# normal test with mask
torch_tri_func = torch_functions[upper]
res1 = torch_tri_func(x, diagonal=diagonal)
res2 = torch.empty(0, device=device, dtype=dtype)
torch_tri_func(x, diagonal=diagonal, out=res2)
exp_mask = gen_mask(shape, diagonal, device, upper)
expected = bool_half_compat_where(exp_mask, torch.tensor(0).type_as(x), x, dtype)
self.assertEqual(res1, res2, atol=0, rtol=0)
self.assertEqual(expected, res1, atol=0, rtol=0)
# non-contiguous and expanded tensors test
if 0 not in shape:
for s in range(-len(shape), -1):
# non-contiguous tensors
x_nc = x.clone().transpose(s, s + 1)
exp_mask = gen_mask(x_nc.size(), diagonal, device, upper)
if 1 not in shape:
assert not x_nc.is_contiguous(), "x is intentionally non-contiguous"
exp_nc = bool_half_compat_where(exp_mask, torch.tensor(0).type_as(x), x_nc, dtype)
self.assertEqual(torch_tri_func(x_nc, diagonal), exp_nc, atol=0, rtol=0)
x_nc_is_contiguous = x_nc.is_contiguous()
if upper:
self.assertEqual(x_nc.triu_(diagonal), exp_nc, atol=0, rtol=0)
else:
self.assertEqual(x_nc.tril_(diagonal), exp_nc, atol=0, rtol=0)
self.assertTrue(x_nc.is_contiguous() == x_nc_is_contiguous,
"contiguity of x_nc should not be changed")
# expanded tensors
expanded_size = (x.size(0),) + x.size()
x_expanded = x.clone().expand(*expanded_size)
if x.size(0) != 1:
assert 0 in x_expanded.stride(), "x intentionally has 0 in its stride"
output = torch_tri_func(x_expanded, diagonal)
self.assertEqual(output, expected.expand(expanded_size), atol=0, rtol=0)
if x.size(0) != 1:
self.assertTrue(0 in x_expanded.stride(),
"geometry of x_expanded should be the same")
if upper:
self.assertEqual(output, x_expanded.triu_(diagonal), atol=0, rtol=0)
else:
self.assertEqual(output, x_expanded.tril_(diagonal), atol=0, rtol=0)
# numpy test
numpy_tri_func = numpy_functions[upper]
self.assertEqual(numpy_tri_func(x.to('cpu').numpy(), diagonal), res1.cpu().numpy())
diagonals = [-2, -1, 0, 1, 2]
shapes = [(3, 3), (5, 3, 3), (7, 5, 3, 3), # square matrices
(7, 3), (5, 7, 3), (7, 5, 7, 3), # fat matrices
(3, 7), (5, 3, 7), (7, 5, 3, 7), # thin matrices
(3, 0), (0, 3, 3), (3, 3, 0, 0), # no numel matrices
(3, 1), (5, 3, 1), (7, 5, 3, 1), # very fat matrices
(1, 3), (5, 1, 3), (7, 5, 1, 3), # very thin matrices
(1, 3, 3, 3), (3, 1, 3, 3, 3)] # unsqueezed batch dimensions
dtypes = [dtype for dtype in get_all_dtypes() if dtype != torch.bfloat16]
for s, d, dtype in product(shapes, diagonals, dtypes):
run_test(s, device, d, dtype)
def test_diagflat(self, device):
dtype = torch.float32
# Basic sanity test
x = torch.randn((100,), dtype=dtype, device=device)
result = torch.diagflat(x)
expected = torch.diag(x)
self.assertEqual(result, expected)
# Test offset
x = torch.randn((100,), dtype=dtype, device=device)
result = torch.diagflat(x, 17)
expected = torch.diag(x, 17)
self.assertEqual(result, expected)
# Test where input has more than one dimension
x = torch.randn((2, 3, 4), dtype=dtype, device=device)
result = torch.diagflat(x)
expected = torch.diag(x.contiguous().view(-1))
self.assertEqual(result, expected)
# Noncontig input
x = torch.randn((2, 3, 4), dtype=dtype, device=device).transpose(2, 0)
self.assertFalse(x.is_contiguous())
result = torch.diagflat(x)
expected = torch.diag(x.contiguous().view(-1))
self.assertEqual(result, expected)
# Complex number support
result = torch.diagflat(torch.ones(4, dtype=torch.complex128))
expected = torch.eye(4, dtype=torch.complex128)
self.assertEqual(result, expected)
def test_block_diag(self, device):
def block_diag_workaround(*arrs):
arrs_expanded = []
for a in arrs:
if a.dim() == 2:
arrs_expanded.append(a)
elif a.dim() == 1:
arrs_expanded.append(a.expand(1, a.size(0)))
elif a.dim() == 0:
arrs_expanded.append(a.expand(1, 1))
shapes = torch.tensor([a.shape for a in arrs_expanded], device=device)
out = torch.zeros(
torch.sum(shapes, dim=0).tolist(),
dtype=arrs_expanded[0].dtype,
device=device
)
r, c = 0, 0
for i, (rr, cc) in enumerate(shapes):
out[r:r + rr, c:c + cc] = arrs_expanded[i]
r += rr
c += cc
return out
tensors = [
torch.rand((2, 2), device=device),
torch.rand((2, 3), device=device),
torch.rand(10, device=device),
torch.rand((8, 1), device=device),
torch.rand(1, device=device)[0]
]
result = torch.block_diag(*tensors)
result_check = block_diag_workaround(*tensors)
self.assertEqual(result, result_check)
tensor = torch.rand(1, device=device)[0]
result = torch.block_diag(tensor)
result_check = tensor.expand(1, 1)
self.assertEqual(result, result_check)
tensor = torch.rand(10, device=device)
result = torch.block_diag(tensor)
result_check = tensor.expand(1, tensor.size(0))
self.assertEqual(result, result_check)
result = torch.block_diag()
result_check = torch.empty(1, 0, device=device)
self.assertEqual(result, result_check)
self.assertEqual(result.device.type, 'cpu')
test_dtypes = [
torch.uint8,
torch.int8,
torch.int16,
torch.int32,
torch.int64,
torch.float32,
torch.float64,
torch.complex64,
torch.complex128
]
# Test pairs of different dtypes
for dtype1 in test_dtypes:
for dtype2 in test_dtypes:
a = torch.tensor(1, device=device, dtype=dtype1)
b = torch.tensor(2, device=device, dtype=dtype2)
result = torch.block_diag(a, b)
result_dtype = torch.result_type(a, b)
result_check = torch.tensor([[1, 0], [0, 2]], device=device, dtype=result_dtype)
self.assertEqual(result, result_check)
with self.assertRaisesRegex(
RuntimeError,
"torch.block_diag: Input tensors must have 2 or fewer dimensions. Input 1 has 3 dimensions"
):
torch.block_diag(torch.tensor(5), torch.tensor([[[6]]]))
with self.assertRaisesRegex(
RuntimeError,
"torch.block_diag: Input tensors must have 2 or fewer dimensions. Input 0 has 4 dimensions"
):
torch.block_diag(torch.tensor([[[[6]]]]))
if device != 'cpu':
with self.assertRaisesRegex(
RuntimeError,
(
"torch.block_diag: input tensors must all be on the same device."
" Input 0 is on device cpu and input 1 is on device "
)
):
torch.block_diag(torch.ones(2, 2).cpu(), torch.ones(2, 2, device=device))
@unittest.skipIf(not TEST_SCIPY, "Scipy not found")
def test_block_diag_scipy(self, device):
import scipy.linalg
scipy_tensors_list = [
[
1,
[2],
[],
[3, 4, 5],
[[], []],
[[6], [7.3]]
],
[
[[1, 2], [3, 4]],
[1]
],
[
[[4, 9], [7, 10]],
[4.6, 9.12],
[1j + 3]
],
[]
]
expected_torch_types = [
torch.float32,
torch.int64,
torch.complex64,
torch.float32
]
expected_scipy_types = [
torch.float64,
# windows scipy block_diag returns int32 types
torch.int32 if IS_WINDOWS else torch.int64,
torch.complex128,
torch.float64
]
for scipy_tensors, torch_type, scipy_type in zip(scipy_tensors_list, expected_torch_types, expected_scipy_types):
torch_tensors = [torch.tensor(t, device=device) for t in scipy_tensors]
torch_result = torch.block_diag(*torch_tensors)
self.assertEqual(torch_result.dtype, torch_type)
scipy_result = torch.tensor(
scipy.linalg.block_diag(*scipy_tensors),
device=device
)
self.assertEqual(scipy_result.dtype, scipy_type)
scipy_result = scipy_result.to(torch_type)
self.assertEqual(torch_result, scipy_result)
@onlyOnCPUAndCUDA
@dtypes(torch.float32, torch.float64)
def test_torch_complex(self, device, dtype):
real = torch.tensor([1, 2], device=device, dtype=dtype)
imag = torch.tensor([3, 4], device=device, dtype=dtype)
z = torch.complex(real, imag)
complex_dtype = torch.complex64 if dtype == torch.float32 else torch.complex128
self.assertEqual(torch.tensor([1.0 + 3.0j, 2.0 + 4.0j], dtype=complex_dtype), z)
@onlyOnCPUAndCUDA
@dtypes(torch.float32, torch.float64)
def test_torch_polar(self, device, dtype):
abs = torch.tensor([1, 2, -3, -4.5, 1, 1], device=device, dtype=dtype)
angle = torch.tensor([math.pi / 2, 5 * math.pi / 4, 0, -11 * math.pi / 6, math.pi, -math.pi],
device=device, dtype=dtype)
z = torch.polar(abs, angle)
complex_dtype = torch.complex64 if dtype == torch.float32 else torch.complex128
self.assertEqual(torch.tensor([1j, -1.41421356237 - 1.41421356237j, -3,
-3.89711431703 - 2.25j, -1, -1],
dtype=complex_dtype),
z, atol=1e-5, rtol=1e-5)
@onlyOnCPUAndCUDA
@dtypes(torch.uint8, torch.int8, torch.int16, torch.int32, torch.int64,
torch.float16, torch.complex64, torch.complex128, torch.bool)
def test_torch_complex_floating_dtype_error(self, device, dtype):
for op in (torch.complex, torch.polar):
a = torch.tensor([1, 2], device=device, dtype=dtype)
b = torch.tensor([3, 4], device=device, dtype=dtype)
error = r"Expected both inputs to be Float or Double tensors but " \
r"got [A-Za-z]+ and [A-Za-z]+"
with self.assertRaisesRegex(RuntimeError, error):
op(a, b)
@onlyOnCPUAndCUDA
@dtypes(torch.float32, torch.float64)
def test_torch_complex_same_dtype_error(self, device, dtype):
def dtype_name(dtype):
return 'Float' if dtype == torch.float32 else 'Double'
for op in (torch.complex, torch.polar):
other_dtype = torch.float64 if dtype == torch.float32 else torch.float32
a = torch.tensor([1, 2], device=device, dtype=dtype)
b = torch.tensor([3, 4], device=device, dtype=other_dtype)
error = "Expected object of scalar type {} but got scalar type " \
"{} for second argument".format(dtype_name(dtype),
dtype_name(other_dtype))
with self.assertRaisesRegex(RuntimeError, error):
op(a, b)
@onlyOnCPUAndCUDA
@dtypes(torch.float32, torch.float64)
def test_torch_complex_out_dtype_error(self, device, dtype):
def dtype_name(dtype):
return 'Float' if dtype == torch.float32 else 'Double'
def complex_dtype_name(dtype):
return 'ComplexFloat' if dtype == torch.complex64 else 'ComplexDouble'
for op in (torch.complex, torch.polar):
a = torch.tensor([1, 2], device=device, dtype=dtype)
b = torch.tensor([3, 4], device=device, dtype=dtype)
out = torch.zeros(2, device=device, dtype=dtype)
expected_dtype = torch.complex64 if dtype == torch.float32 else torch.complex128
error = "Expected object of scalar type {} but got scalar type " \
"{} for argument 'out'".format(
complex_dtype_name(expected_dtype), dtype_name(dtype))
with self.assertRaisesRegex(RuntimeError, error):
op(a, b, out=out)
def test_cat_empty_legacy(self, device):
# FIXME: this is legacy behavior and should be removed
# when we support empty tensors with arbitrary sizes
dtype = torch.float32
x = torch.randn((4, 3, 32, 32), dtype=dtype, device=device)
empty = torch.randn((0,), dtype=dtype, device=device)
res1 = torch.cat([x, empty], dim=1)
res2 = torch.cat([empty, x], dim=1)
self.assertEqual(res1, res2)
res1 = torch.cat([empty, empty], dim=1)
self.assertEqual(res1, empty)
with self.assertRaisesRegex(RuntimeError,
'non-empty list of Tensors'):
torch.cat([], dim=1)
def test_cat_empty(self, device):
dtype = torch.float32
x = torch.randn((4, 3, 32, 32), dtype=dtype, device=device)
empty = torch.randn((4, 0, 32, 32), dtype=dtype, device=device)
res1 = torch.cat([x, empty], dim=1)
res2 = torch.cat([empty, x], dim=1)
self.assertEqual(res1, res2)
res1 = torch.cat([empty, empty], dim=1)
self.assertEqual(res1, empty)
# check non-legacy-behavior (sizes don't match)
empty = torch.randn((4, 0, 31, 32), dtype=dtype, device=device)
self.assertRaises(RuntimeError, lambda: torch.cat([x, empty], dim=1))
self.assertRaises(RuntimeError, lambda: torch.cat([empty, x], dim=1))
# check non-legacy-behavior (dimensions don't match)
empty = torch.randn((4, 0), dtype=dtype, device=device)
self.assertRaises(RuntimeError, lambda: torch.cat([x, empty], dim=1))
self.assertRaises(RuntimeError, lambda: torch.cat([empty, x], dim=1))
def test_cat_out(self, device):
x = torch.zeros((0), device=device)
y = torch.randn((4, 6), device=device)
with self.assertRaisesRegex(
RuntimeError, r"unsupported operation:.* input tensor 0"):
torch.cat([x, y], dim=0, out=x)
with self.assertRaisesRegex(
RuntimeError, r"unsupported operation:.* input tensor 1"):
torch.cat([x, y], dim=0, out=y)
z = torch.zeros((4, 6), device=device)
with self.assertRaisesRegex(
RuntimeError, r"unsupported operation:.* input tensor 1"):
torch.cat([y, z], out=z[:2, :])
w = y.view(-1).clone()
a = torch.cat([w[:2], w[4:6]])
b = torch.cat([w[:2], w[4:6]], out=w[6:10])
self.assertEqual(a, b)
self.assertEqual(w[:6], y.view(-1)[:6])
# Case:
# Reference: https://github.com/pytorch/pytorch/issues/49878
for dim in [0, 1]:
x = torch.zeros((10, 5, 2), device=device)
random_length = random.randint(1, 4)
y = x.narrow(dim, 0, x.shape[dim] - random_length)
val = torch.full_like(y[0], 3., device=device)
if dim == 0:
self.assertTrue(y.is_contiguous())
else:
self.assertFalse(y.is_contiguous())
torch.cat((val[None],) * y.shape[0], dim=0, out=y)
expected_y = torch.cat((val[None],) * y.shape[0], dim=0)
expected_x = torch.zeros((10, 5, 2), device=device)
if dim == 0:
expected_x[:x.shape[dim] - random_length, :, :] = expected_y
elif dim == 1:
expected_x[:, :x.shape[dim] - random_length, :] = expected_y
self.assertEqual(y, expected_y)
self.assertEqual(x, expected_x)
def test_cat_out_channels_last(self, device):
x = torch.randn((4, 3, 8, 8))
y = torch.randn(x.shape)
res1 = torch.cat((x, y))
z = res1.clone().contiguous(memory_format=torch.channels_last)
res2 = torch.cat((x, y), out=z)
self.assertEqual(res1, res2)
@onlyOnCPUAndCUDA
def test_cat_in_channels_last(self, device):
for dim in range(4):
x = torch.randn((4, 15, 8, 8), device=device)
y = torch.randn(x.shape, device=device)
res1 = torch.cat((x, y), dim=dim)
x = x.clone().contiguous(memory_format=torch.channels_last)
y = y.clone().contiguous(memory_format=torch.channels_last)
res2 = torch.cat((x, y), dim=dim)
self.assertTrue(res2.is_contiguous(memory_format=torch.channels_last))
self.assertEqual(res1, res2)
# Size larger than grain size.
x = torch.randn((4, 15, 256, 256), device=device)
y = torch.randn(x.shape, device=device)
res1 = torch.cat((x, y), dim=dim)
x = x.clone().contiguous(memory_format=torch.channels_last)
y = y.clone().contiguous(memory_format=torch.channels_last)
res2 = torch.cat((x, y), dim=dim)
self.assertTrue(res2.is_contiguous(memory_format=torch.channels_last))
self.assertEqual(res1, res2)
@onlyOnCPUAndCUDA
def test_cat_preserve_channels_last(self, device):
x = torch.randn((4, 3, 8, 8), device=device)
y = torch.randn(x.shape, device=device)
res1 = torch.cat((x, y))
res2 = torch.cat((x.contiguous(memory_format=torch.channels_last), y.contiguous(memory_format=torch.channels_last)))
self.assertEqual(res1, res2)
self.assertTrue(res2.is_contiguous(memory_format=torch.channels_last))
# discontiguous channels-last inputs
x = torch.arange(24, dtype=torch.float, device=device).reshape(2, 2, 3, 2).to(memory_format=torch.channels_last)
x1 = x[:, :, :2]
x2 = x[:, :, 1:]
res1 = torch.cat((x1, x2), dim=-1)
res2 = torch.cat((x1.contiguous(), x2.contiguous()), dim=-1)
self.assertEqual(res1, res2)
self.assertTrue(res1.is_contiguous(memory_format=torch.channels_last))
@onlyCUDA
def test_cat_out_memory_format(self, device):
inp_size = (4, 4, 4, 4)
expected_size = (8, 4, 4, 4)
a_cuda = torch.randn(inp_size, device=device).contiguous(memory_format=torch.channels_last)
a_cpu = torch.randn(inp_size, device='cpu').contiguous(memory_format=torch.channels_last)
b_cuda = torch.randn(inp_size, device=device).contiguous(memory_format=torch.contiguous_format)
b_cpu = torch.randn(inp_size, device='cpu').contiguous(memory_format=torch.contiguous_format)
c_cuda = torch.randn(inp_size, device=device).contiguous(memory_format=torch.channels_last)
# Case 1: if out= is the correct shape then the memory format of out= is respected
out_cuda = torch.empty(expected_size, device=device).contiguous(memory_format=torch.contiguous_format)
res1_cuda = torch.cat((a_cuda, b_cuda), out=out_cuda)
out_cpu = torch.empty(expected_size, device='cpu').contiguous(memory_format=torch.contiguous_format)
res1_cpu = torch.cat((a_cpu, b_cpu), out=out_cpu)
self.assertTrue(res1_cuda.is_contiguous(memory_format=torch.contiguous_format))
self.assertTrue(res1_cpu.is_contiguous(memory_format=torch.contiguous_format))
# Case 2: if out= is not the correct shape then the output it is resized internally
# - For the CPU variant the memory format is that of the first tensor
# - For the CUDA variant it only propagates memory format if all the tensors have
# the same memory format, otherwise it just uses contiguous_format as a default
out_cuda = torch.empty((0), device=device).contiguous(memory_format=torch.contiguous_format)
# a_cuda and b_cuda have different memory_format
res2_cuda = torch.cat((a_cuda, b_cuda), out=out_cuda)
out_cpu = torch.empty((0), device='cpu').contiguous(memory_format=torch.contiguous_format)
res2_cpu = torch.cat((a_cpu, b_cpu), out=out_cpu)
self.assertTrue(res2_cuda.is_contiguous(memory_format=torch.contiguous_format))
self.assertTrue(res2_cpu.is_contiguous(memory_format=torch.channels_last))
out_cuda = torch.empty((0), device=device).contiguous(memory_format=torch.contiguous_format)
# a_cuda and c_cuda have same memory_format
res3_cuda = torch.cat((a_cuda, c_cuda), out=out_cuda)
self.assertTrue(res3_cuda.is_contiguous(memory_format=torch.channels_last))
@onlyCUDA
@deviceCountAtLeast(2)
def test_cat_different_devices(self, devices):
cuda0 = torch.randn((3, 3), device=devices[0])
cuda1 = torch.randn((3, 3), device=devices[1])
with self.assertRaisesRegex(RuntimeError,
"Expected all tensors to be on the same device"):
torch.cat((cuda0, cuda1))
with self.assertRaisesRegex(RuntimeError,
"Expected all tensors to be on the same device"):
torch.cat((cuda0, cuda0), out=cuda1)
@onlyCUDA
def test_cat_stack_cross_devices(self, device):
cuda = torch.randn((3, 3), device=device)
cpu = torch.randn((3, 3), device='cpu')
# cat
with self.assertRaisesRegex(RuntimeError,
"Expected all tensors to be on the same device"):
torch.cat((cuda, cpu))
with self.assertRaisesRegex(RuntimeError,
"Expected all tensors to be on the same device"):
torch.cat((cpu, cuda))
# Stack
with self.assertRaisesRegex(RuntimeError,
"Expected all tensors to be on the same device"):
torch.stack((cuda, cpu))
with self.assertRaisesRegex(RuntimeError,
"Expected all tensors to be on the same device"):
torch.stack((cpu, cuda))
# TODO: reconcile with other cat tests
# TODO: Compare with a NumPy reference instead of CPU
@onlyCUDA
def test_cat(self, device):
SIZE = 10
for dim in range(-3, 3):
pos_dim = dim if dim >= 0 else 3 + dim
x = torch.rand(13, SIZE, SIZE, device=device).transpose(0, pos_dim)
y = torch.rand(17, SIZE, SIZE, device=device).transpose(0, pos_dim)
z = torch.rand(19, SIZE, SIZE, device=device).transpose(0, pos_dim)
res1 = torch.cat((x, y, z), dim)
self.assertEqual(res1.narrow(pos_dim, 0, 13), x, atol=0, rtol=0)
self.assertEqual(res1.narrow(pos_dim, 13, 17), y, atol=0, rtol=0)
self.assertEqual(res1.narrow(pos_dim, 30, 19), z, atol=0, rtol=0)
x = torch.randn(20, SIZE, SIZE, device=device)
self.assertEqual(torch.cat(torch.split(x, 7)), x)
self.assertEqual(torch.cat(torch.chunk(x, 7)), x)
y = torch.randn(1, SIZE, SIZE, device=device)
z = torch.cat([x, y])
self.assertEqual(z.size(), (21, SIZE, SIZE))
# TODO: update this test to compare against NumPy instead of CPU
@onlyCUDA
@dtypesIfCUDA(torch.half, torch.float, torch.double)
@dtypes(torch.float, torch.double)
def test_device_rounding(self, device, dtype):
# test half-to-even
a = [-5.8, -3.5, -2.3, -1.5, -0.5, 0.5, 1.5, 2.3, 3.5, 5.8]
res = [-6., -4., -2., -2., 0., 0., 2., 2., 4., 6.]
a_tensor = torch.tensor(a, device=device).round()
res_tensor = torch.tensor(res, device='cpu')
self.assertEqual(a_tensor, res_tensor)
# Note: This test failed on XLA since its test cases are created by empty_strided which
# doesn't support overlapping sizes/strides in XLA impl
@onlyOnCPUAndCUDA
def test_like_fn_stride_proparation_vs_tensoriterator_unary_op(self, device):
# Test like functions against tensoriterator based unary operator (exp) to
# make sure the returned tensor from like function follows the same stride propergation
# rule as what tensoriterator does for unary operator. The like function's output strides
# is computed on CPU side always, no need to test GPU here.
def compare_helper_(like_fn, t):
te = torch.exp(t)
tl = like_fn(t)
self.assertEqual(te.stride(), tl.stride())
self.assertEqual(te.size(), tl.size())
like_fns = [
lambda t, **kwargs: torch.zeros_like(t, **kwargs),
lambda t, **kwargs: torch.ones_like(t, **kwargs),
lambda t, **kwargs: torch.randint_like(t, 10, 100, **kwargs),
lambda t, **kwargs: torch.randint_like(t, 100, **kwargs),
lambda t, **kwargs: torch.randn_like(t, **kwargs),
lambda t, **kwargs: torch.rand_like(t, **kwargs),
lambda t, **kwargs: torch.full_like(t, 7, **kwargs),
lambda t, **kwargs: torch.empty_like(t, **kwargs)]
# dense non-overlapping tensor,
# non-dense non-overlapping sliced tensor
# non-dense non-overlapping gapped tensor
# non-dense non-overlapping 0 strided tensor
# non-dense overlapping general tensor
# non-dense overlapping sliced tensor
# non-dense overlapping gapped tensor
# non-dense overlapping 0 strided tensor
# non-dense overlapping equal strides
tset = (
torch.randn(4, 3, 2, device=device),
torch.randn(4, 3, 2, device=device)[:, :, ::2],
torch.empty_strided((4, 3, 2), (10, 3, 1), device=device).fill_(1.0),
torch.empty_strided((4, 3, 2), (10, 0, 3), device=device).fill_(1.0),
torch.empty_strided((4, 3, 2), (10, 1, 2), device=device).fill_(1.0),
torch.empty_strided((4, 3, 2), (4, 2, 1), device=device)[:, :, ::2].fill_(1.0),
torch.empty_strided((4, 3, 2), (10, 1, 1), device=device).fill_(1.0),
torch.empty_strided((4, 1, 1, 2), (10, 0, 0, 2), device=device).fill_(1.0),
torch.empty_strided((4, 2, 3), (10, 3, 3), device=device).fill_(1.0))
for like_fn in like_fns:
for t in tset:
for p in permutations(range(t.dim())):
tp = t.permute(p)
compare_helper_(like_fn, tp)
def _hvd_split_helper(self, torch_fn, np_fn, op_name, inputs, device, dtype, dim):
dimension_error_message = op_name + " requires a tensor with at least "
divisibiliy_error_message = op_name + " attempted to split along dimension "
for shape, arg in inputs:
direction = dim - (len(shape) == 1 and dim == 1)
bound = dim + 2 * (dim == 0) + (dim == 2)
error_expected = len(shape) < bound or (not isinstance(arg, list) and shape[direction] % arg != 0)
t = make_tensor(shape, device, dtype)
t_np = t.cpu().numpy()
if not error_expected:
self.assertEqual(torch_fn(t, arg), np_fn(t_np, arg))
else:
self.assertRaises(RuntimeError, lambda: torch_fn(t, arg))
self.assertRaises(ValueError, lambda: np_fn(t, arg))
expected_error_message = dimension_error_message if len(shape) < bound else divisibiliy_error_message
self.assertRaisesRegex(RuntimeError, expected_error_message, lambda: torch_fn(t, arg))
@onlyOnCPUAndCUDA
@dtypes(torch.long, torch.float32, torch.complex64)
def test_hsplit(self, device, dtype):
inputs = (
((), 3),
((), [2, 4, 6]),
((6,), 2),
((6,), 4),
((6,), [2, 5]),
((6,), [7, 9]),
((3, 8), 4),
((3, 8), 5),
((3, 8), [1, 5]),
((3, 8), [3, 8]),
((5, 5, 5), 2),
((5, 5, 5), [1, 4]),
((5, 0, 5), 3),
((5, 5, 0), [2, 6]),
)
self._hvd_split_helper(torch.hsplit, np.hsplit, "torch.hsplit", inputs, device, dtype, 1)
@onlyOnCPUAndCUDA
@dtypes(torch.long, torch.float32, torch.complex64)
def test_vsplit(self, device, dtype):
inputs = (
((6,), 2),
((6,), 4),
((6, 5), 2),
((6, 5), 4),
((6, 5), [1, 2, 3]),
((6, 5), [1, 5, 9]),
((6, 5, 5), 2),
((6, 0, 5), 2),
((5, 0, 5), [1, 5]),
)
self._hvd_split_helper(torch.vsplit, np.vsplit, "torch.vsplit", inputs, device, dtype, 0)
@onlyOnCPUAndCUDA
@dtypes(torch.long, torch.float32, torch.complex64)
def test_dsplit(self, device, dtype):
inputs = (
((6,), 4),
((6, 6), 3),
((5, 5, 6), 2),
((5, 5, 6), 4),
((5, 5, 6), [1, 2, 3]),
((5, 5, 6), [1, 5, 9]),
((5, 5, 0), 2),
((5, 0, 6), 4),
((5, 0, 6), [1, 2, 3]),
((5, 5, 6), [1, 5, 9]),
)
self._hvd_split_helper(torch.dsplit, np.dsplit, "torch.dsplit", inputs, device, dtype, 2)
def _test_special_stacks(self, dim, at_least_dim, torch_fn, np_fn, device, dtype):
# Test error for non-tuple argument
t = torch.randn(10)
with self.assertRaisesRegex(TypeError, "must be tuple of Tensors, not Tensor"):
torch_fn(t)
# Test error for a single array
with self.assertRaisesRegex(TypeError, "must be tuple of Tensors, not Tensor"):
torch_fn((t))
# Test 0-D
num_tensors = random.randint(1, 5)
input_t = [torch.tensor(random.uniform(0, 10), device=device, dtype=dtype) for i in range(num_tensors)]
actual = torch_fn(input_t)
expected = np_fn([input.cpu().numpy() for input in input_t])
self.assertEqual(actual, expected)
for ndims in range(1, 5):
base_shape = list(_rand_shape(ndims, min_size=1, max_size=5))
for i in range(ndims):
shape = list(base_shape)
num_tensors = random.randint(1, 5)
torch_input = []
# Create tensors with shape being different along one axis only
for param in range(num_tensors):
shape[i] = random.randint(1, 5)
torch_input.append(_generate_input(tuple(shape), dtype, device, with_extremal=False))
# Determine if input tensors have valid dimensions.
valid_dim = True
for k in range(len(torch_input) - 1):
for tdim in range(ndims):
# Test whether all tensors have the same shape except in concatenating dimension
# Unless the number of dimensions is less than the corresponding at_least function dimension
# Since the original concatenating dimension would shift after applying at_least and would no
# longer be the concatenating dimension
if (ndims < at_least_dim or tdim != dim) and torch_input[k].size()[tdim] != torch_input[k + 1].size()[tdim]:
valid_dim = False
# Special case for hstack is needed since hstack works differently when ndims is 1
if valid_dim or (torch_fn is torch.hstack and ndims == 1):
# Valid dimensions, test against numpy
np_input = [input.cpu().numpy() for input in torch_input]
actual = torch_fn(torch_input)
expected = np_fn(np_input)
self.assertEqual(actual, expected)
else:
# Invalid dimensions, test for error
with self.assertRaisesRegex(RuntimeError, "Sizes of tensors must match except in dimension"):
torch_fn(torch_input)
with self.assertRaises(ValueError):
np_input = [input.cpu().numpy() for input in torch_input]
np_fn(np_input)
@onlyOnCPUAndCUDA
@dtypes(*(get_all_int_dtypes() + get_all_fp_dtypes(include_bfloat16=False) +
get_all_complex_dtypes()))
def test_hstack_column_stack(self, device, dtype):
ops = ((torch.hstack, np.hstack), (torch.column_stack, np.column_stack))
for torch_op, np_op in ops:
self._test_special_stacks(1, 1, torch_op, np_op, device, dtype)
# Test torch.column_stack with combinations of 1D and 2D tensors input
one_dim_tensor = torch.arange(0, 10).to(dtype=dtype, device=device)
two_dim_tensor = torch.arange(0, 100).to(dtype=dtype, device=device).reshape(10, 10)
inputs = two_dim_tensor, one_dim_tensor, two_dim_tensor, one_dim_tensor
torch_result = torch.column_stack(inputs)
np_inputs = [input.cpu().numpy() for input in inputs]
np_result = np.column_stack(np_inputs)
self.assertEqual(np_result,
torch_result)
@onlyOnCPUAndCUDA
@dtypes(*(get_all_int_dtypes() + get_all_fp_dtypes(include_bfloat16=False) +
get_all_complex_dtypes()))
def test_vstack_row_stack(self, device, dtype):
ops = ((torch.vstack, np.vstack), (torch.row_stack, np.row_stack))
for torch_op, np_op in ops:
self._test_special_stacks(0, 2, torch_op, np_op, device, dtype)
for i in range(5):
# Test dimension change for 1D tensor of size (N) and 2D tensor of size (1, N)
n = random.randint(1, 10)
input_a = _generate_input((n,), dtype, device, with_extremal=False)
input_b = _generate_input((1, n), dtype, device, with_extremal=False)
torch_input = [input_a, input_b]
np_input = [input.cpu().numpy() for input in torch_input]
actual = torch_op(torch_input)
expected = np_op(np_input)
self.assertEqual(actual, expected)
@onlyOnCPUAndCUDA
@dtypes(*(get_all_int_dtypes() + get_all_fp_dtypes(include_bfloat16=False) +
get_all_complex_dtypes()))
def test_dstack(self, device, dtype):
self._test_special_stacks(2, 3, torch.dstack, np.dstack, device, dtype)
for i in range(5):
# Test dimension change for 1D tensor of size (N), 2D tensor of size (1, N), and 3D tensor of size (1, N, 1)
n = random.randint(1, 10)
input_a = _generate_input((n,), dtype, device, with_extremal=False)
input_b = _generate_input((1, n), dtype, device, with_extremal=False)
input_c = _generate_input((1, n, 1), dtype, device, with_extremal=False)
torch_input = [input_a, input_b, input_c]
np_input = [input.cpu().numpy() for input in torch_input]
actual = torch.dstack(torch_input)
expected = np.dstack(np_input)
self.assertEqual(actual, expected)
# Test dimension change for 2D tensor of size (M, N) and 3D tensor of size (M, N, 1)
m = random.randint(1, 10)
n = random.randint(1, 10)
input_a = _generate_input((m, n), dtype, device, with_extremal=False)
input_b = _generate_input((m, n, 1), dtype, device, with_extremal=False)
torch_input = [input_a, input_b]
np_input = [input.cpu().numpy() for input in torch_input]
actual = torch.dstack(torch_input)
expected = np.dstack(np_input)
self.assertEqual(actual, expected)
@dtypes(torch.int32, torch.int64)
def test_large_linspace(self, device, dtype):
start = torch.iinfo(dtype).min
end = torch.iinfo(dtype).max & ~0xfff
steps = 15
x = torch.linspace(start, end, steps, dtype=dtype, device=device)
self.assertGreater(x[1] - x[0], (end - start) / steps)
@dtypes(torch.float32, torch.float64)
def test_unpack_double(self, device, dtype):
# Reference: https://github.com/pytorch/pytorch/issues/33111
vals = (2 ** 24 + 1, 2 ** 53 + 1,
np.iinfo(np.int64).max, np.iinfo(np.uint64).max, np.iinfo(np.uint64).max + 1,
-1e500, 1e500)
for val in vals:
t = torch.tensor(val, dtype=dtype, device=device)
a = np.array(val, dtype=torch_to_numpy_dtype_dict[dtype])
self.assertEqual(t, torch.from_numpy(a))
def _float_to_int_conversion_helper(self, vals, device, dtype):
a = np.array(vals, dtype=np.float32).astype(torch_to_numpy_dtype_dict[dtype])
t = torch.tensor(vals, device=device, dtype=torch.float).to(dtype)
self.assertEqual(torch.from_numpy(a), t.cpu())
# Checks that float->integer casts don't produce undefined behavior errors.
# Note: In C++, casting from a floating value to an integral dtype
# is undefined if the floating point value is not within the integral
# dtype's dynamic range. This can (and should) cause undefined behavior
# errors with UBSAN. These casts are deliberate in PyTorch, however, and
# NumPy has the same behavior.
@onlyOnCPUAndCUDA
@unittest.skipIf(IS_MACOS, "Test is broken on MacOS, see https://github.com/pytorch/pytorch/issues/38752")
@unittest.skipIf(IS_PPC, "Test is borken on PowerPC, see https://github.com/pytorch/pytorch/issues/39671")
@dtypes(torch.bool, torch.uint8, torch.int8, torch.int16, torch.int32, torch.int64)
def test_float_to_int_conversion_finite(self, device, dtype):
min = torch.finfo(torch.float).min
max = torch.finfo(torch.float).max
# Note: CUDA max float -> integer conversion is divergent on some dtypes
vals = (min, -2, -1.5, -.5, 0, .5, 1.5, 2, max)
if self.device_type == 'cuda':
if torch.version.hip:
# HIP min float -> int64 conversion is divergent
vals = (-2, -1.5, -.5, 0, .5, 1.5, 2)
else:
vals = (min, -2, -1.5, -.5, 0, .5, 1.5, 2)
self._float_to_int_conversion_helper(vals, device, dtype)
# Note: CUDA will fail this test on most dtypes, often dramatically.
@onlyCPU
@dtypes(torch.bool, torch.uint8, torch.int8, torch.int16, torch.int32, torch.int64)
def test_float_to_int_conversion_nonfinite(self, device, dtype):
vals = (float('-inf'), float('inf'), float('nan'))
self._float_to_int_conversion_helper(vals, device, dtype)
# TODO: re-enable this test
@unittest.skipIf(True, "real and imag not implemented for complex")
@onlyOnCPUAndCUDA
def test_complex_type_conversions(self, device):
dtypes = [torch.float, torch.complex64, torch.complex128]
for from_type in dtypes:
for to_type in dtypes:
from_tensor = torch.randn(4, dtype=from_type, device=device)
to_tensor = from_tensor.to(to_type)
if from_type.is_complex and not to_type.is_complex:
self.assertEqual(torch.real(from_tensor), to_tensor, exact_dtype=False)
elif not from_type.is_complex and to_type.is_complex:
self.assertEqual(from_tensor, torch.real(to_tensor), exact_dtype=False)
self.assertEqual(torch.zeros_like(torch.imag(to_tensor)), torch.imag(to_tensor), exact_dtype=False)
else:
self.assertEqual(from_tensor, to_tensor, exact_dtype=False)
@slowTest
@onlyCPU
def test_cat_big(self, device):
SIZE1 = 6500
SIZE2 = 4500
concat_list = []
concat_list.append(torch.ones((SIZE1, 1024 * 512), dtype=torch.uint8, device=device))
concat_list.append(torch.ones((SIZE2, 1024 * 512), dtype=torch.uint8, device=device))
result = torch.cat(concat_list)
self.assertEqual(result.size(0), SIZE1 + SIZE2)
@onlyCPU
def test_cat_bad_input_sizes(self, device):
x = torch.randn(2, 1, device=device)
y = torch.randn(2, 1, 1, device=device)
z = torch.randn(2, 1, 1, device=device)
self.assertRaises(RuntimeError, lambda: torch.cat([x, y, z]))
x = torch.randn(2, 1, 2, device=device)
y = torch.randn(2, 1, 1, device=device)
z = torch.randn(2, 2, 1, device=device)
self.assertRaises(RuntimeError, lambda: torch.cat([x, y, z], dim=1))
@onlyCPU
@dtypes(torch.half, torch.double, torch.int)
def test_cat2(self, device, dtype):
SIZE = 10
for dim in range(-3, 3):
pos_dim = dim if dim >= 0 else 3 + dim
x = torch.randint(low=-100, high=100, size=(13, SIZE, SIZE), device=device).to(dtype).transpose(0, pos_dim)
y = torch.randint(low=-100, high=100, size=(17, SIZE, SIZE), device=device).to(dtype).transpose(0, pos_dim)
z = torch.randint(low=-100, high=100, size=(19, SIZE, SIZE), device=device).to(dtype).transpose(0, pos_dim)
res1 = torch.cat((x, y, z), dim)
self.assertEqual(res1.narrow(pos_dim, 0, 13), x, atol=0, rtol=0)
self.assertEqual(res1.narrow(pos_dim, 13, 17), y, atol=0, rtol=0)
self.assertEqual(res1.narrow(pos_dim, 30, 19), z, atol=0, rtol=0)
x = torch.randint(low=-100, high=100, size=(20, SIZE, SIZE), device=device).to(dtype)
self.assertEqual(torch.cat(torch.split(x, 7)), x)
self.assertEqual(torch.cat(torch.chunk(x, 7)), x)
y = torch.randint(low=-100, high=100, size=(1, SIZE, SIZE), device=device).to(dtype)
z = torch.cat([x, y])
self.assertEqual(z.size(), (21, SIZE, SIZE))
self.assertRaises(RuntimeError, lambda: torch.cat([]))
self.assertRaisesRegex(TypeError, 'got None', lambda: torch.cat([x, None]))
@onlyCPU
def test_cat_scalars(self, device):
x = torch.tensor(0, device=device)
y = torch.tensor(1, device=device)
with self.assertRaisesRegex(RuntimeError, 'zero-dimensional.*cannot be concatenated'):
torch.cat([x, y])
def test_zeros_dtype_out_match(self, device):
d = torch.tensor((2, 3), device=device, dtype=torch.double)
self.assertRaises(RuntimeError, lambda: torch.zeros((2, 3), device=device, dtype=torch.float32, out=d))
# TODO: update to work on CUDA, too
@onlyCPU
def test_trilu_indices(self, device):
for test_args in tri_tests_args:
_compare_trilu_indices(self, *test_args)
run_additional_tri_tests(self, 'cpu')
# test default options
x = torch.ones(
3, 3, dtype=torch.long, device='cpu', layout=torch.strided)
self.assertEqual(
x.tril(0).nonzero().transpose(0, 1), torch.tril_indices(3, 3))
self.assertEqual(
x.triu(0).nonzero().transpose(0, 1), torch.triu_indices(3, 3))
# test stride 0 cases
x = torch.ones(
3, 1, 3, 3, dtype=torch.long, device='cpu', layout=torch.strided)
output = x.triu(2).expand(3, 3, 3, 3)
b = x.clone().expand(3, 3, 3, 3)
self.assertEqual(b.triu(2), output)
self.assertRaises(RuntimeError, lambda: b.triu_(2))
# TODO: update to work on CUDA, too
@onlyCPU
def test_stack(self, device):
for dtype in (torch.half, torch.double, torch.int):
x = torch.randint(low=-100, high=100, size=(2, 3, 4)).to(dtype)
y = torch.randint(low=-100, high=100, size=(2, 3, 4)).to(dtype)
z = torch.randint(low=-100, high=100, size=(2, 3, 4)).to(dtype)
for dim in range(4):
res = torch.stack((x, y, z), dim)
res_neg = torch.stack((x, y, z), dim - 4)
expected_size = x.size()[:dim] + (3,) + x.size()[dim:]
self.assertEqual(res, res_neg)
self.assertEqual(res.size(), expected_size)
self.assertEqual(res.select(dim, 0), x, atol=0, rtol=0)
self.assertEqual(res.select(dim, 1), y, atol=0, rtol=0)
self.assertEqual(res.select(dim, 2), z, atol=0, rtol=0)
# TODO: update to work on CUDA, too
@onlyCPU
def test_stack_out(self, device):
for dtype in (torch.half, torch.double, torch.int):
x = torch.randint(low=-100, high=100, size=(2, 3, 4)).to(dtype)
y = torch.randint(low=-100, high=100, size=(2, 3, 4)).to(dtype)
z = torch.randint(low=-100, high=100, size=(2, 3, 4)).to(dtype)
for dim in range(4):
expected_size = x.size()[:dim] + (3,) + x.size()[dim:]
res_out = x.new(expected_size)
res_neg_out = x.new(expected_size)
res_out_dp = res_out.data_ptr()
res_out_neg_dp = res_neg_out.data_ptr()
torch.stack((x, y, z), dim, out=res_out)
torch.stack((x, y, z), dim - 4, out=res_neg_out)
self.assertEqual(res_out, res_neg_out)
self.assertEqual(res_out.size(), expected_size)
self.assertEqual(res_out_dp, res_out.data_ptr())
self.assertEqual(res_out_neg_dp, res_neg_out.data_ptr())
self.assertEqual(res_out.select(dim, 0), x, atol=0, rtol=0)
self.assertEqual(res_out.select(dim, 1), y, atol=0, rtol=0)
self.assertEqual(res_out.select(dim, 2), z, atol=0, rtol=0)
def test_repeat_interleave(self, device):
x = torch.tensor([0, 1, 2, 3], device=device)
expected = torch.tensor([1, 2, 2, 3, 3, 3], device=device)
self.assertEqual(torch.repeat_interleave(x), expected)
with self.assertRaises(RuntimeError):
torch.repeat_interleave(torch.arange(4, device=device).reshape(2, 2))
with self.assertRaises(RuntimeError):
torch.repeat_interleave(torch.arange(4.0, device=device))
with self.assertRaises(RuntimeError):
torch.repeat_interleave(torch.tensor([1, 2, -1, 3, 4], device=device))
y = torch.tensor([[1, 2], [3, 4]], device=device)
y1_v1 = torch.repeat_interleave(y, 2)
y1_v2 = torch.repeat_interleave(y, torch.tensor(2, device=device))
y1_v3 = torch.repeat_interleave(y, torch.tensor([2], device=device))
y1_expect = torch.tensor([1, 1, 2, 2, 3, 3, 4, 4], device=device)
self.assertEqual(y1_v1, y1_expect)
self.assertEqual(y1_v2, y1_expect)
self.assertEqual(y1_v3, y1_expect)
y2 = torch.repeat_interleave(y, 3, dim=1)
y2_expect = torch.tensor([[1, 1, 1, 2, 2, 2],
[3, 3, 3, 4, 4, 4]], device=device)
self.assertEqual(y2, y2_expect)
y3 = torch.repeat_interleave(y, torch.tensor([1, 2], device=device), dim=0)
y3_expect = torch.tensor([[1, 2],
[3, 4],
[3, 4]], device=device)
self.assertEqual(y3, y3_expect)
with self.assertRaises(RuntimeError):
torch.repeat_interleave(y, torch.tensor([1, 2, 3], device=device), dim=0)
with self.assertRaises(RuntimeError):
torch.repeat_interleave(y, torch.arange(9, device=device).reshape(3, 3), dim=0)
# test zero sized dimension
x = torch.zeros((5, 0), device=device)
y = torch.repeat_interleave(x, repeats=3, dim=1)
self.assertEqual(y, x.new_zeros(5, 0, device=device))
x = torch.tensor([], dtype=torch.int64, device=device)
y = torch.repeat_interleave(x, x)
self.assertEqual(y, x)
# TODO: udpate to work on CUDA, too
@onlyCPU
def test_new_methods_requires_grad(self, device):
size = (10,)
test_cases = [
# method name, args
('new_full', [size, 1]),
('new_empty', [size]),
('new_zeros', [size]),
('new_ones', [size]),
]
for method_name, args in test_cases:
x = torch.randn(size)
for requires_grad in [True, False]:
x_new = x.__getattribute__(method_name)(*args, requires_grad=requires_grad)
self.assertEqual(x_new.requires_grad, requires_grad)
x = torch.randint(10, size)
with self.assertRaisesRegex(
RuntimeError,
r'Only Tensors of floating point and complex dtype can require gradients'):
x_new = x.__getattribute__(method_name)(*args, requires_grad=True)
# TODO: update to work on CUDA, too?
@onlyCPU
def test_tensor_from_sequence(self, device):
class MockSequence(object):
def __init__(self, lst):
self.lst = lst
def __len__(self):
return len(self.lst)
def __getitem__(self, item):
raise TypeError
class GoodMockSequence(MockSequence):
def __getitem__(self, item):
return self.lst[item]
bad_mock_seq = MockSequence([1.0, 2.0, 3.0])
good_mock_seq = GoodMockSequence([1.0, 2.0, 3.0])
with self.assertRaisesRegex(ValueError, 'could not determine the shape'):
torch.tensor(bad_mock_seq)
self.assertEqual(torch.tensor([1.0, 2.0, 3.0]), torch.tensor(good_mock_seq))
# TODO: update to work on CUDA, too?
@onlyCPU
def test_simple_scalar_cast(self, device):
ok = [torch.tensor([1.5]), torch.zeros(1, 1, 1, 1)]
ok_values = [1.5, 0]
not_ok = map(torch.Tensor, [[], [1, 2], [[1, 2], [3, 4]]])
for tensor, value in zip(ok, ok_values):
self.assertEqual(int(tensor), int(value))
self.assertEqual(float(tensor), float(value))
self.assertEqual(complex(tensor), complex(value))
self.assertEqual(complex(torch.tensor(1.5j)), 1.5j)
for tensor in not_ok:
self.assertRaises(ValueError, lambda: int(tensor))
self.assertRaises(ValueError, lambda: float(tensor))
self.assertRaises(ValueError, lambda: complex(tensor))
self.assertRaises(RuntimeError, lambda: float(torch.tensor(1.5j)))
self.assertRaises(RuntimeError, lambda: int(torch.tensor(1.5j)))
# TODO: update to work on CUDA, too?
@onlyCPU
def test_offset_scalar_cast(self, device):
x = torch.tensor([1., 2., 3.])
y = x[2:]
self.assertEqual(int(y), 3)
def test_meshgrid(self, device):
a = torch.tensor(1, device=device)
b = torch.tensor([1, 2, 3], device=device)
c = torch.tensor([1, 2], device=device)
grid_a, grid_b, grid_c = torch.meshgrid([a, b, c])
self.assertEqual(grid_a.shape, torch.Size([1, 3, 2]))
self.assertEqual(grid_b.shape, torch.Size([1, 3, 2]))
self.assertEqual(grid_c.shape, torch.Size([1, 3, 2]))
grid_a2, grid_b2, grid_c2 = torch.meshgrid(a, b, c)
self.assertEqual(grid_a2.shape, torch.Size([1, 3, 2]))
self.assertEqual(grid_b2.shape, torch.Size([1, 3, 2]))
self.assertEqual(grid_c2.shape, torch.Size([1, 3, 2]))
expected_grid_a = torch.ones(1, 3, 2, dtype=torch.int64, device=device)
expected_grid_b = torch.tensor([[[1, 1],
[2, 2],
[3, 3]]], device=device)
expected_grid_c = torch.tensor([[[1, 2],
[1, 2],
[1, 2]]], device=device)
self.assertTrue(grid_a.equal(expected_grid_a))
self.assertTrue(grid_b.equal(expected_grid_b))
self.assertTrue(grid_c.equal(expected_grid_c))
self.assertTrue(grid_a2.equal(expected_grid_a))
self.assertTrue(grid_b2.equal(expected_grid_b))
self.assertTrue(grid_c2.equal(expected_grid_c))
def test_cartesian_prod(self, device):
a = torch.tensor([1], device=device)
b = torch.tensor([1, 2, 3], device=device)
c = torch.tensor([1, 2], device=device)
prod = torch.cartesian_prod(a, b, c)
expected = torch.tensor(list(product([a], b, c)), device=device)
self.assertEqual(expected, prod)
# test 0 size input
d = torch.empty(0, dtype=b.dtype, device=device)
prod = torch.cartesian_prod(a, b, c, d)
expected = torch.empty(0, 4, dtype=b.dtype, device=device)
self.assertEqual(expected, prod)
# test single input
prod = torch.cartesian_prod(b)
self.assertEqual(b, prod)
def test_combinations(self, device):
a = torch.tensor([1, 2, 3], device=device)
c = torch.combinations(a, r=1)
expected = torch.tensor(list(combinations(a, r=1)), device=device)
self.assertEqual(c, expected)
c = torch.combinations(a, r=1, with_replacement=True)
expected = torch.tensor(list(combinations_with_replacement(a, r=1)), device=device)
self.assertEqual(c, expected)
c = torch.combinations(a)
expected = torch.tensor(list(combinations(a, r=2)), device=device)
self.assertEqual(c, expected)
c = torch.combinations(a, with_replacement=True)
expected = torch.tensor(list(combinations_with_replacement(a, r=2)), device=device)
self.assertEqual(c, expected)
c = torch.combinations(a, r=3)
expected = torch.tensor(list(combinations(a, r=3)), device=device)
self.assertEqual(c, expected)
c = torch.combinations(a, r=4)
expected = torch.empty(0, 4, dtype=a.dtype, device=device)
self.assertEqual(c, expected)
c = torch.combinations(a, r=5)
expected = torch.empty(0, 5, dtype=a.dtype, device=device)
self.assertEqual(c, expected)
# test empty imput
a = torch.empty(0, device=device)
c1 = torch.combinations(a)
c2 = torch.combinations(a, with_replacement=True)
expected = torch.empty(0, 2, dtype=a.dtype, device=device)
self.assertEqual(c1, expected)
self.assertEqual(c2, expected)
def test_linlogspace_mem_overlap(self, device):
x = torch.rand(1, device=device).expand(10)
with self.assertRaisesRegex(RuntimeError, 'unsupported operation'):
torch.linspace(1, 10, 10, out=x)
with self.assertRaisesRegex(RuntimeError, 'unsupported operation'):
torch.logspace(1, 10, 10, out=x)
def test_ctor_with_numpy_array(self, device):
correct_dtypes = [
np.double,
np.float,
np.float16,
np.int64,
np.int32,
np.int16,
np.int8,
np.uint8,
np.bool,
]
incorrect_byteorder = '>' if sys.byteorder == 'little' else '<'
incorrect_dtypes = [incorrect_byteorder + t for t in ['d', 'f']]
for dtype in correct_dtypes:
array = np.array([1, 2, 3, 4], dtype=dtype)
# Upcast
tensor = torch.DoubleTensor(array).to(device)
for i in range(len(array)):
self.assertEqual(tensor[i], array[i])
# Downcast (sometimes)
tensor = torch.FloatTensor(array).to(device)
for i in range(len(array)):
self.assertEqual(tensor[i], array[i])
tensor = torch.HalfTensor(array).to(device)
for i in range(len(array)):
self.assertEqual(tensor[i], array[i])
@dtypes(torch.float, torch.double, torch.int8, torch.int16, torch.int32, torch.int64)
def test_random(self, device, dtype):
# This test is flaky with p<=(2/(ub-lb))^200=6e-36
t = torch.empty(200, dtype=dtype, device=device)
lb = 1
ub = 4
t.fill_(-1)
t.random_(lb, ub)
self.assertEqual(t.min(), lb)
self.assertEqual(t.max(), ub - 1)
t.fill_(-1)
t.random_(ub)
self.assertEqual(t.min(), 0)
self.assertEqual(t.max(), ub - 1)
def test_random_bool(self, device):
size = 2000
t = torch.empty(size, dtype=torch.bool, device=device)
t.fill_(False)
t.random_()
self.assertEqual(t.min(), False)
self.assertEqual(t.max(), True)
self.assertTrue(0.4 < (t.eq(True)).to(torch.int).sum().item() / size < 0.6)
t.fill_(True)
t.random_()
self.assertEqual(t.min(), False)
self.assertEqual(t.max(), True)
self.assertTrue(0.4 < (t.eq(True)).to(torch.int).sum().item() / size < 0.6)
def test_random_from_to_bool(self, device):
size = 2000
int64_min_val = torch.iinfo(torch.int64).min
int64_max_val = torch.iinfo(torch.int64).max
min_val = 0
max_val = 1
froms = [int64_min_val, -42, min_val - 1, min_val, max_val, max_val + 1, 42]
tos = [-42, min_val - 1, min_val, max_val, max_val + 1, 42, int64_max_val]
for from_ in froms:
for to_ in tos:
t = torch.empty(size, dtype=torch.bool, device=device)
if to_ > from_:
if not (min_val <= from_ <= max_val):
self.assertRaisesRegex(
RuntimeError,
"from is out of bounds",
lambda: t.random_(from_, to_)
)
elif not (min_val <= (to_ - 1) <= max_val):
self.assertRaisesRegex(
RuntimeError,
"to - 1 is out of bounds",
lambda: t.random_(from_, to_)
)
else:
t.random_(from_, to_)
range_ = to_ - from_
delta = 1
self.assertTrue(from_ <= t.to(torch.int).min() < (from_ + delta))
self.assertTrue((to_ - delta) <= t.to(torch.int).max() < to_)
else:
self.assertRaisesRegex(
RuntimeError,
"random_ expects 'from' to be less than 'to', but got from=" + str(from_) + " >= to=" + str(to_),
lambda: t.random_(from_, to_)
)
@dtypes(*(get_all_int_dtypes() + get_all_fp_dtypes()))
def test_random_full_range(self, device, dtype):
size = 2000
alpha = 0.1
int64_min_val = torch.iinfo(torch.int64).min
int64_max_val = torch.iinfo(torch.int64).max
if dtype == torch.double:
fp_limit = 2**53
elif dtype == torch.float:
fp_limit = 2**24
elif dtype == torch.half:
fp_limit = 2**11
elif dtype == torch.bfloat16:
fp_limit = 2**8
else:
fp_limit = 0
t = torch.empty(size, dtype=dtype, device=device)
if dtype in [torch.float, torch.double, torch.half, torch.bfloat16]:
from_ = int(max(-fp_limit, int64_min_val))
to_inc_ = int(min(fp_limit, int64_max_val))
else:
from_ = int(max(torch.iinfo(dtype).min, int64_min_val))
to_inc_ = int(min(torch.iinfo(dtype).max, int64_max_val))
range_ = to_inc_ - from_ + 1
t.random_(from_, None)
delta = max(1, alpha * range_)
self.assertTrue(from_ <= t.to(torch.double).min() < (from_ + delta))
self.assertTrue((to_inc_ - delta) < t.to(torch.double).max() <= to_inc_)
@dtypes(*(get_all_int_dtypes() + get_all_fp_dtypes()))
def test_random_from_to(self, device, dtype):
size = 2000
alpha = 0.1
int64_min_val = torch.iinfo(torch.int64).min
int64_max_val = torch.iinfo(torch.int64).max
if dtype in [torch.float, torch.double, torch.half]:
min_val = int(max(torch.finfo(dtype).min, int64_min_val))
max_val = int(min(torch.finfo(dtype).max, int64_max_val))
froms = [min_val, -42, 0, 42]
tos = [-42, 0, 42, max_val >> 1]
elif dtype == torch.bfloat16:
min_val = int64_min_val
max_val = int64_max_val
froms = [min_val, -42, 0, 42]
tos = [-42, 0, 42, max_val >> 1]
elif dtype == torch.uint8:
min_val = torch.iinfo(dtype).min
max_val = torch.iinfo(dtype).max
froms = [int64_min_val, -42, min_val - 1, min_val, 42, max_val, max_val + 1]
tos = [-42, min_val - 1, min_val, 42, max_val, max_val + 1, int64_max_val]
elif dtype == torch.int64:
min_val = int64_min_val
max_val = int64_max_val
froms = [min_val, -42, 0, 42]
tos = [-42, 0, 42, max_val]
else:
min_val = torch.iinfo(dtype).min
max_val = torch.iinfo(dtype).max
froms = [int64_min_val, min_val - 1, min_val, -42, 0, 42, max_val, max_val + 1]
tos = [min_val - 1, min_val, -42, 0, 42, max_val, max_val + 1, int64_max_val]
if dtype == torch.double:
fp_limit = 2**53
elif dtype == torch.float:
fp_limit = 2**24
elif dtype == torch.half:
fp_limit = 2**11
elif dtype == torch.bfloat16:
fp_limit = 2**8
else:
fp_limit = 0
for from_ in froms:
for to_ in tos:
t = torch.empty(size, dtype=dtype, device=device)
if to_ > from_:
if not (min_val <= from_ <= max_val):
self.assertRaisesRegex(
RuntimeError,
"from is out of bounds",
lambda: t.random_(from_, to_)
)
elif not (min_val <= (to_ - 1) <= max_val):
self.assertRaisesRegex(
RuntimeError,
"to - 1 is out of bounds",
lambda: t.random_(from_, to_)
)
else:
if dtype.is_floating_point and (
not (-fp_limit <= from_ <= fp_limit) or not (-fp_limit <= (to_ - 1) <= fp_limit)):
if not (-fp_limit <= from_ <= fp_limit):
self.assertWarnsRegex(UserWarning, "from is out of bounds",
lambda: t.random_(from_, to_))
if not (-fp_limit <= (to_ - 1) <= fp_limit):
self.assertWarnsRegex(UserWarning, "to - 1 is out of bounds",
lambda: t.random_(from_, to_))
else:
t.random_(from_, to_)
range_ = to_ - from_
delta = max(1, alpha * range_)
if dtype == torch.bfloat16:
# Less strict checks because of rounding errors
# TODO investigate rounding errors
self.assertTrue(from_ <= t.to(torch.double).min() < (from_ + delta))
self.assertTrue((to_ - delta) < t.to(torch.double).max() <= to_)
else:
self.assertTrue(from_ <= t.to(torch.double).min() < (from_ + delta))
self.assertTrue((to_ - delta) <= t.to(torch.double).max() < to_)
else:
self.assertRaisesRegex(
RuntimeError,
"random_ expects 'from' to be less than 'to', but got from=" + str(from_) + " >= to=" + str(to_),
lambda: t.random_(from_, to_)
)
@dtypes(*(get_all_int_dtypes() + get_all_fp_dtypes()))
def test_random_to(self, device, dtype):
size = 2000
alpha = 0.1
int64_min_val = torch.iinfo(torch.int64).min
int64_max_val = torch.iinfo(torch.int64).max
if dtype in [torch.float, torch.double, torch.half]:
min_val = int(max(torch.finfo(dtype).min, int64_min_val))
max_val = int(min(torch.finfo(dtype).max, int64_max_val))
tos = [-42, 0, 42, max_val >> 1]
elif dtype == torch.bfloat16:
min_val = int64_min_val
max_val = int64_max_val
tos = [-42, 0, 42, max_val >> 1]
elif dtype == torch.uint8:
min_val = torch.iinfo(dtype).min
max_val = torch.iinfo(dtype).max
tos = [-42, min_val - 1, min_val, 42, max_val, max_val + 1, int64_max_val]
elif dtype == torch.int64:
min_val = int64_min_val
max_val = int64_max_val
tos = [-42, 0, 42, max_val]
else:
min_val = torch.iinfo(dtype).min
max_val = torch.iinfo(dtype).max
tos = [min_val - 1, min_val, -42, 0, 42, max_val, max_val + 1, int64_max_val]
from_ = 0
for to_ in tos:
t = torch.empty(size, dtype=dtype, device=device)
if to_ > from_:
if not (min_val <= (to_ - 1) <= max_val):
self.assertRaisesRegex(
RuntimeError,
"to - 1 is out of bounds",
lambda: t.random_(from_, to_)
)
else:
t.random_(to_)
range_ = to_ - from_
delta = max(1, alpha * range_)
if dtype == torch.bfloat16:
# Less strict checks because of rounding errors
# TODO investigate rounding errors
self.assertTrue(from_ <= t.to(torch.double).min() < (from_ + delta))
self.assertTrue((to_ - delta) < t.to(torch.double).max() <= to_)
else:
self.assertTrue(from_ <= t.to(torch.double).min() < (from_ + delta))
self.assertTrue((to_ - delta) <= t.to(torch.double).max() < to_)
else:
self.assertRaisesRegex(
RuntimeError,
"random_ expects 'from' to be less than 'to', but got from=" + str(from_) + " >= to=" + str(to_),
lambda: t.random_(from_, to_)
)
@dtypes(*(get_all_int_dtypes() + get_all_fp_dtypes()))
def test_random_default(self, device, dtype):
size = 2000
alpha = 0.1
if dtype == torch.float:
to_inc = 1 << 24
elif dtype == torch.double:
to_inc = 1 << 53
elif dtype == torch.half:
to_inc = 1 << 11
elif dtype == torch.bfloat16:
to_inc = 1 << 8
else:
to_inc = torch.iinfo(dtype).max
t = torch.empty(size, dtype=dtype, device=device)
t.random_()
self.assertTrue(0 <= t.to(torch.double).min() < alpha * to_inc)
self.assertTrue((to_inc - alpha * to_inc) < t.to(torch.double).max() <= to_inc)
# TODO: this test should be updated
@onlyOnCPUAndCUDA
def test_empty_full(self, device):
torch_device = torch.device(device)
device_type = torch_device.type
if device_type == 'cpu':
do_test_empty_full(self, get_all_math_dtypes('cpu'), torch.strided, torch_device)
if device_type == 'cuda':
do_test_empty_full(self, get_all_math_dtypes('cpu'), torch.strided, None)
do_test_empty_full(self, get_all_math_dtypes('cpu'), torch.strided, torch_device)
# TODO: this test should be updated
@suppress_warnings
@onlyOnCPUAndCUDA
@deviceCountAtLeast(1)
def test_tensor_device(self, devices):
device_type = torch.device(devices[0]).type
if device_type == 'cpu':
self.assertEqual('cpu', torch.tensor(5).device.type)
self.assertEqual('cpu',
torch.ones((2, 3), dtype=torch.float32, device='cpu').device.type)
self.assertEqual('cpu',
torch.ones((2, 3), dtype=torch.float32, device='cpu:0').device.type)
self.assertEqual('cpu',
torch.tensor(torch.ones((2, 3), dtype=torch.float32), device='cpu:0').device.type)
self.assertEqual('cpu', torch.tensor(np.random.randn(2, 3), device='cpu').device.type)
if device_type == 'cuda':
self.assertEqual('cuda:0', str(torch.tensor(5).cuda(0).device))
self.assertEqual('cuda:0', str(torch.tensor(5).cuda('cuda:0').device))
self.assertEqual('cuda:0',
str(torch.tensor(5, dtype=torch.int64, device=0).device))
self.assertEqual('cuda:0',
str(torch.tensor(5, dtype=torch.int64, device='cuda:0').device))
self.assertEqual('cuda:0',
str(torch.tensor(torch.ones((2, 3), dtype=torch.float32), device='cuda:0').device))
self.assertEqual('cuda:0', str(torch.tensor(np.random.randn(2, 3), device='cuda:0').device))
for device in devices:
with torch.cuda.device(device):
device_string = 'cuda:' + str(torch.cuda.current_device())
self.assertEqual(device_string,
str(torch.tensor(5, dtype=torch.int64, device='cuda').device))
with self.assertRaises(RuntimeError):
torch.tensor(5).cuda('cpu')
with self.assertRaises(RuntimeError):
torch.tensor(5).cuda('cpu:0')
if len(devices) > 1:
self.assertEqual('cuda:1', str(torch.tensor(5).cuda(1).device))
self.assertEqual('cuda:1', str(torch.tensor(5).cuda('cuda:1').device))
self.assertEqual('cuda:1',
str(torch.tensor(5, dtype=torch.int64, device=1).device))
self.assertEqual('cuda:1',
str(torch.tensor(5, dtype=torch.int64, device='cuda:1').device))
self.assertEqual('cuda:1',
str(torch.tensor(torch.ones((2, 3), dtype=torch.float32),
device='cuda:1').device))
self.assertEqual('cuda:1',
str(torch.tensor(np.random.randn(2, 3), device='cuda:1').device))
# TODO: this test should be updated
@onlyOnCPUAndCUDA
def test_as_strided_neg(self, device):
error = r'as_strided: Negative strides are not supported at the ' \
r'moment, got strides: \[-?[0-9]+(, -?[0-9]+)*\]'
with self.assertRaisesRegex(RuntimeError, error):
torch.as_strided(torch.ones(3, 3, device=device), (1, 1), (2, -1))
with self.assertRaisesRegex(RuntimeError, error):
torch.as_strided(torch.ones(14, device=device), (2,), (-11,))
# TODO: this test should be updated
def test_zeros(self, device):
res1 = torch.zeros(100, 100, device=device)
res2 = torch.tensor((), device=device)
torch.zeros(100, 100, device=device, out=res2)
self.assertEqual(res1, res2)
boolTensor = torch.zeros(2, 2, device=device, dtype=torch.bool)
expected = torch.tensor([[False, False], [False, False]],
device=device, dtype=torch.bool)
self.assertEqual(boolTensor, expected)
halfTensor = torch.zeros(1, 1, device=device, dtype=torch.half)
expected = torch.tensor([[0.]], device=device, dtype=torch.float16)
self.assertEqual(halfTensor, expected)
bfloat16Tensor = torch.zeros(1, 1, device=device, dtype=torch.bfloat16)
expected = torch.tensor([[0.]], device=device, dtype=torch.bfloat16)
self.assertEqual(bfloat16Tensor, expected)
complexTensor = torch.zeros(2, 2, device=device, dtype=torch.complex64)
expected = torch.tensor([[0., 0.], [0., 0.]], device=device, dtype=torch.complex64)
self.assertEqual(complexTensor, expected)
# TODO: this test should be updated
def test_zeros_out(self, device):
shape = (3, 4)
out = torch.zeros(shape, device=device)
torch.zeros(shape, device=device, out=out)
# change the dtype, layout, device
with self.assertRaises(RuntimeError):
torch.zeros(shape, device=device, dtype=torch.int64, out=out)
with self.assertRaises(RuntimeError):
torch.zeros(shape, device=device, layout=torch.sparse_coo, out=out)
# leave them the same
self.assertEqual(torch.zeros(shape, device=device),
torch.zeros(shape, device=device, dtype=out.dtype, out=out))
self.assertEqual(torch.zeros(shape, device=device),
torch.zeros(shape, device=device, layout=torch.strided, out=out))
self.assertEqual(torch.zeros(shape, device=device),
torch.zeros(shape, device=device, out=out))
# TODO: this test should be updated
def test_ones(self, device):
res1 = torch.ones(100, 100, device=device)
res2 = torch.tensor((), device=device)
torch.ones(100, 100, device=device, out=res2)
self.assertEqual(res1, res2)
# test boolean tensor
res1 = torch.ones(1, 2, device=device, dtype=torch.bool)
expected = torch.tensor([[True, True]], device=device, dtype=torch.bool)
self.assertEqual(res1, expected)
# TODO: this test should be updated
@onlyCPU
def test_constructor_dtypes(self, device):
default_type = torch.tensor([]).type()
self.assertIs(torch.tensor([]).dtype, torch.get_default_dtype())
self.assertIs(torch.uint8, torch.ByteTensor.dtype)
self.assertIs(torch.float32, torch.FloatTensor.dtype)
self.assertIs(torch.float64, torch.DoubleTensor.dtype)
torch.set_default_tensor_type('torch.FloatTensor')
self.assertIs(torch.float32, torch.get_default_dtype())
self.assertIs(torch.FloatStorage, torch.Storage)
torch.set_default_dtype(torch.float64)
self.assertIs(torch.float64, torch.get_default_dtype())
self.assertIs(torch.DoubleStorage, torch.Storage)
torch.set_default_tensor_type(torch.FloatTensor)
self.assertIs(torch.float32, torch.get_default_dtype())
self.assertIs(torch.FloatStorage, torch.Storage)
if torch.cuda.is_available():
torch.set_default_tensor_type(torch.cuda.FloatTensor)
self.assertIs(torch.float32, torch.get_default_dtype())
self.assertIs(torch.float32, torch.cuda.FloatTensor.dtype)
self.assertIs(torch.cuda.FloatStorage, torch.Storage)
torch.set_default_dtype(torch.float64)
self.assertIs(torch.float64, torch.get_default_dtype())
self.assertIs(torch.cuda.DoubleStorage, torch.Storage)
# don't support integral or sparse default types.
self.assertRaises(TypeError, lambda: torch.set_default_tensor_type('torch.IntTensor'))
self.assertRaises(TypeError, lambda: torch.set_default_dtype(torch.int64))
# don't allow passing dtype to set_default_tensor_type
self.assertRaises(TypeError, lambda: torch.set_default_tensor_type(torch.float32))
torch.set_default_tensor_type(default_type)
# TODO: this test should be updated
@onlyCPU
def test_constructor_device_legacy(self, device):
self.assertRaises(RuntimeError, lambda: torch.FloatTensor(device='cuda'))
self.assertRaises(RuntimeError, lambda: torch.FloatTensor(torch.Size([2, 3, 4]), device='cuda'))
self.assertRaises(RuntimeError, lambda: torch.FloatTensor((2.0, 3.0), device='cuda'))
self.assertRaises(RuntimeError, lambda: torch.Tensor(device='cuda'))
self.assertRaises(RuntimeError, lambda: torch.Tensor(torch.Size([2, 3, 4]), device='cuda'))
self.assertRaises(RuntimeError, lambda: torch.Tensor((2.0, 3.0), device='cuda'))
# Tensor constructor/new with Tensor argument shouldn't work with device specified
i = torch.tensor([1], device='cpu')
self.assertRaises(RuntimeError, lambda: torch.Tensor(i, device='cpu'))
self.assertRaises(RuntimeError, lambda: i.new(i, device='cpu'))
self.assertRaises(RuntimeError, lambda: torch.Tensor(i, device='cuda'))
self.assertRaises(RuntimeError, lambda: i.new(i, device='cuda'))
x = torch.randn((3,), device='cpu')
self.assertRaises(RuntimeError, lambda: x.new(device='cuda'))
self.assertRaises(RuntimeError, lambda: x.new(torch.Size([2, 3, 4]), device='cuda'))
self.assertRaises(RuntimeError, lambda: x.new((2.0, 3.0), device='cuda'))
if torch.cuda.is_available():
self.assertRaises(RuntimeError, lambda: torch.cuda.FloatTensor(device='cpu'))
self.assertRaises(RuntimeError, lambda: torch.cuda.FloatTensor(torch.Size([2, 3, 4]), device='cpu'))
self.assertRaises(RuntimeError, lambda: torch.cuda.FloatTensor((2.0, 3.0), device='cpu'))
# Tensor constructor/new with Tensor argument shouldn't work with device specified
i = torch.tensor([1], device='cuda')
self.assertRaises(RuntimeError, lambda: torch.Tensor(i, device='cuda'))
self.assertRaises(RuntimeError, lambda: i.new(i, device='cuda'))
self.assertRaises(RuntimeError, lambda: torch.Tensor(i, device='cpu'))
self.assertRaises(RuntimeError, lambda: i.new(i, device='cpu'))
default_type = torch.Tensor().type()
torch.set_default_tensor_type(torch.cuda.FloatTensor)
self.assertRaises(RuntimeError, lambda: torch.Tensor(device='cpu'))
self.assertRaises(RuntimeError, lambda: torch.Tensor(torch.Size([2, 3, 4]), device='cpu'))
self.assertRaises(RuntimeError, lambda: torch.Tensor((2.0, 3.0), device='cpu'))
torch.set_default_tensor_type(torch.cuda.FloatTensor)
torch.set_default_tensor_type(default_type)
x = torch.randn((3,), device='cuda')
self.assertRaises(RuntimeError, lambda: x.new(device='cpu'))
self.assertRaises(RuntimeError, lambda: x.new(torch.Size([2, 3, 4]), device='cpu'))
self.assertRaises(RuntimeError, lambda: x.new((2.0, 3.0), device='cpu'))
# TODO: this test should be updated
@suppress_warnings
@onlyCPU
def test_tensor_factory(self, device):
# TODO: This test probably doesn't make too much sense now that
# torch.tensor has been established for a while; it makes more
# sense to test the legacy behavior in terms of the new behavior
expected = torch.Tensor([1, 1])
# test data
res1 = torch.tensor([1, 1])
self.assertEqual(res1, expected, exact_dtype=False)
res1 = torch.tensor([1, 1], dtype=torch.int)
self.assertEqual(res1, expected, exact_dtype=False)
self.assertIs(torch.int, res1.dtype)
# test copy
res2 = torch.tensor(expected)
self.assertEqual(res2, expected)
res2[1] = 2
self.assertEqual(expected, torch.ones_like(expected))
res2 = torch.tensor(expected, dtype=torch.int)
self.assertEqual(res1, expected, exact_dtype=False)
self.assertIs(torch.int, res1.dtype)
# test copy with numpy
for dtype in [np.float64, np.int64, np.int8, np.uint8]:
a = np.array([5.]).astype(dtype)
res1 = torch.tensor(a)
self.assertEqual(5., res1[0].item())
a[0] = 7.
self.assertEqual(5., res1[0].item())
# test boolean tensor
a = torch.tensor([True, True, False, True, True], dtype=torch.bool)
b = torch.tensor([-1, -1.1, 0, 1, 1.1], dtype=torch.bool)
self.assertEqual(a, b)
c = torch.tensor([-0.1, -1.1, 0, 1, 0.1], dtype=torch.bool)
self.assertEqual(a, c)
d = torch.tensor((-.3, 0, .3, 1, 3 / 7), dtype=torch.bool)
e = torch.tensor((True, False, True, True, True), dtype=torch.bool)
self.assertEqual(e, d)
f = torch.tensor((-1, 0, -1.1, 1, 1.1), dtype=torch.bool)
self.assertEqual(e, f)
int64_max = torch.iinfo(torch.int64).max
int64_min = torch.iinfo(torch.int64).min
float64_max = torch.finfo(torch.float64).max
float64_min = torch.finfo(torch.float64).min
g_1 = torch.tensor((float('nan'), 0, int64_min, int64_max, int64_min - 1), dtype=torch.bool)
self.assertEqual(e, g_1)
g_2 = torch.tensor((int64_max + 1, 0, (int64_max + 1) * 2, (int64_max + 1) * 2 + 1, float64_min), dtype=torch.bool)
self.assertEqual(e, g_2)
g_3 = torch.tensor((float64_max, 0, float64_max + 1, float64_min - 1, float64_max + 1e291), dtype=torch.bool)
self.assertEqual(e, g_3)
h = torch.tensor([True, False, False, True, False, True, True], dtype=torch.bool)
i = torch.tensor([1e-323, 1e-324, 0j, 1e-323j, 1e-324j, 1 + 2j, -1j], dtype=torch.bool)
self.assertEqual(h, i)
j = torch.tensor((True, True, True, True), dtype=torch.bool)
k = torch.tensor((1e323, -1e323, float('inf'), -float('inf')), dtype=torch.bool)
self.assertEqual(j, k)
# TODO: this test should be updated
@suppress_warnings
@onlyCPU
def test_tensor_factory_copy_var(self, device):
def check_copy(copy, is_leaf, requires_grad, data_ptr=None):
if data_ptr is None:
data_ptr = copy.data_ptr
self.assertEqual(copy, source, exact_dtype=False)
self.assertTrue(copy.is_leaf == is_leaf)
self.assertTrue(copy.requires_grad == requires_grad)
self.assertTrue(copy.data_ptr == data_ptr)
source = torch.randn(5, 5, dtype=torch.double, requires_grad=True)
# test torch.tensor()
check_copy(torch.tensor(source), True, False)
check_copy(torch.tensor(source, requires_grad=False), True, False)
check_copy(torch.tensor(source, requires_grad=True), True, True)
# test tensor.new_tensor()
copy = torch.randn(1)
check_copy(copy.new_tensor(source), True, False)
check_copy(copy.new_tensor(source, requires_grad=False), True, False)
check_copy(copy.new_tensor(source, requires_grad=True), True, True)
# test torch.as_tensor()
check_copy(torch.as_tensor(source), source.is_leaf, source.requires_grad, source.data_ptr) # not copy
check_copy(torch.as_tensor(source, dtype=torch.float), False, True) # copy and keep the graph
# TODO: this test should be updated
@onlyCPU
def test_tensor_factory_type_inference(self, device):
def test_inference(default_dtype):
saved_dtype = torch.get_default_dtype()
torch.set_default_dtype(default_dtype)
default_complex_dtype = torch.complex64 if default_dtype == torch.float32 else torch.complex128
self.assertIs(default_dtype, torch.tensor(()).dtype)
self.assertIs(default_dtype, torch.tensor(5.).dtype)
self.assertIs(torch.int64, torch.tensor(5).dtype)
self.assertIs(torch.bool, torch.tensor(True).dtype)
self.assertIs(torch.int32, torch.tensor(5, dtype=torch.int32).dtype)
self.assertIs(default_dtype, torch.tensor(((7, 5), (9, 5.))).dtype)
self.assertIs(default_dtype, torch.tensor(((5., 5), (3, 5))).dtype)
self.assertIs(torch.int64, torch.tensor(((5, 3), (3, 5))).dtype)
self.assertIs(default_complex_dtype, torch.tensor(((5, 3 + 2j), (3, 5 + 4j))).dtype)
self.assertIs(torch.float64, torch.tensor(np.array(())).dtype)
self.assertIs(torch.float64, torch.tensor(np.array(5.)).dtype)
if np.array(5).dtype == np.int64: # np long, which can be 4 bytes (e.g. on windows)
self.assertIs(torch.int64, torch.tensor(np.array(5)).dtype)
else:
self.assertIs(torch.int32, torch.tensor(np.array(5)).dtype)
self.assertIs(torch.uint8, torch.tensor(np.array(3, dtype=np.uint8)).dtype)
self.assertIs(default_dtype, torch.tensor(((7, np.array(5)), (np.array(9), 5.))).dtype)
self.assertIs(torch.float64, torch.tensor(((7, 5), (9, np.array(5.)))).dtype)
self.assertIs(torch.int64, torch.tensor(((5, np.array(3)), (np.array(3), 5))).dtype)
torch.set_default_dtype(saved_dtype)
test_inference(torch.float64)
test_inference(torch.float32)
# TODO: this test should be updated
@suppress_warnings
@onlyCPU
def test_new_tensor(self, device):
expected = torch.autograd.Variable(torch.ByteTensor([1, 1]))
# test data
res1 = expected.new_tensor([1, 1])
self.assertEqual(res1, expected)
res1 = expected.new_tensor([1, 1], dtype=torch.int)
self.assertEqual(res1, expected, exact_dtype=False)
self.assertIs(torch.int, res1.dtype)
# test copy
res2 = expected.new_tensor(expected)
self.assertEqual(res2, expected)
res2[1] = 2
self.assertEqual(expected, torch.ones_like(expected))
res2 = expected.new_tensor(expected, dtype=torch.int)
self.assertEqual(res2, expected, exact_dtype=False)
self.assertIs(torch.int, res2.dtype)
# test copy with numpy
a = np.array([5.])
res1 = torch.tensor(a)
res1 = res1.new_tensor(a)
self.assertEqual(5., res1[0].item())
a[0] = 7.
self.assertEqual(5., res1[0].item())
if torch.cuda.device_count() >= 2:
expected = expected.cuda(1)
res1 = expected.new_tensor([1, 1])
self.assertEqual(res1.get_device(), expected.get_device())
res1 = expected.new_tensor([1, 1], dtype=torch.int)
self.assertIs(torch.int, res1.dtype)
self.assertEqual(res1.get_device(), expected.get_device())
res2 = expected.new_tensor(expected)
self.assertEqual(res2.get_device(), expected.get_device())
res2 = expected.new_tensor(expected, dtype=torch.int)
self.assertIs(torch.int, res1.dtype)
self.assertEqual(res2.get_device(), expected.get_device())
res2 = expected.new_tensor(expected, dtype=torch.int, device=0)
self.assertIs(torch.int, res1.dtype)
self.assertEqual(res2.get_device(), 0)
res1 = expected.new_tensor(1)
self.assertEqual(res1.get_device(), expected.get_device())
res1 = expected.new_tensor(1, dtype=torch.int)
self.assertIs(torch.int, res1.dtype)
self.assertEqual(res1.get_device(), expected.get_device())
# TODO: this test should be updated
@onlyCPU
def test_as_tensor(self, device):
# from python data
x = [[0, 1], [2, 3]]
self.assertEqual(torch.tensor(x), torch.as_tensor(x))
self.assertEqual(torch.tensor(x, dtype=torch.float32), torch.as_tensor(x, dtype=torch.float32))
# python data with heterogeneous types
z = [0, 'torch']
with self.assertRaisesRegex(TypeError, "invalid data type"):
torch.tensor(z)
torch.as_tensor(z)
# python data with self-referential lists
z = [0]
z += [z]
with self.assertRaisesRegex(TypeError, "self-referential lists are incompatible"):
torch.tensor(z)
torch.as_tensor(z)
z = [[1, 2], z]
with self.assertRaisesRegex(TypeError, "self-referential lists are incompatible"):
torch.tensor(z)
torch.as_tensor(z)
# from tensor (doesn't copy unless type is different)
y = torch.tensor(x)
self.assertIs(y, torch.as_tensor(y))
self.assertIsNot(y, torch.as_tensor(y, dtype=torch.float32))
if torch.cuda.is_available():
self.assertIsNot(y, torch.as_tensor(y, device='cuda'))
y_cuda = y.to('cuda')
self.assertIs(y_cuda, torch.as_tensor(y_cuda))
self.assertIs(y_cuda, torch.as_tensor(y_cuda, device='cuda'))
# doesn't copy
for dtype in [np.float64, np.int64, np.int8, np.uint8]:
n = np.random.rand(5, 6).astype(dtype)
n_astensor = torch.as_tensor(n)
self.assertEqual(torch.tensor(n), n_astensor)
n_astensor[0][0] = 25.7
self.assertEqual(torch.tensor(n), n_astensor)
# changing dtype causes copy
n = np.random.rand(5, 6).astype(np.float32)
n_astensor = torch.as_tensor(n, dtype=torch.float64)
self.assertEqual(torch.tensor(n, dtype=torch.float64), n_astensor)
n_astensor[0][1] = 250.8
self.assertNotEqual(torch.tensor(n, dtype=torch.float64), n_astensor)
# changing device causes copy
if torch.cuda.is_available():
n = np.random.randn(5, 6)
n_astensor = torch.as_tensor(n, device='cuda')
self.assertEqual(torch.tensor(n, device='cuda'), n_astensor)
n_astensor[0][2] = 250.9
self.assertNotEqual(torch.tensor(n, device='cuda'), n_astensor)
# TODO: this test should be updated
@suppress_warnings
def test_range(self, device):
res1 = torch.range(0, 1, device=device)
res2 = torch.tensor((), device=device)
torch.range(0, 1, device=device, out=res2)
self.assertEqual(res1, res2, atol=0, rtol=0)
# Check range for non-contiguous tensors.
x = torch.zeros(2, 3, device=device)
torch.range(0, 3, device=device, out=x.narrow(1, 1, 2))
res2 = torch.tensor(((0, 0, 1), (0, 2, 3)), device=device, dtype=torch.float32)
self.assertEqual(x, res2, atol=1e-16, rtol=0)
# Check negative
res1 = torch.tensor((1, 0), device=device, dtype=torch.float32)
res2 = torch.tensor((), device=device)
torch.range(1, 0, -1, device=device, out=res2)
self.assertEqual(res1, res2, atol=0, rtol=0)
# Equal bounds
res1 = torch.ones(1, device=device)
res2 = torch.tensor((), device=device)
torch.range(1, 1, -1, device=device, out=res2)
self.assertEqual(res1, res2, atol=0, rtol=0)
torch.range(1, 1, 1, device=device, out=res2)
self.assertEqual(res1, res2, atol=0, rtol=0)
# TODO: this test should be updated
def test_range_warning(self, device):
with warnings.catch_warnings(record=True) as w:
torch.range(0, 10, device=device)
self.assertEqual(len(w), 1)
# TODO: this test should be updated
@onlyCPU
def test_arange(self, device):
res = torch.tensor(range(10000))
res1 = torch.arange(0, 10000) # Use a larger number so vectorized code can be triggered
res2 = torch.tensor([], dtype=torch.int64)
torch.arange(0, 10000, out=res2)
self.assertEqual(res, res1, atol=0, rtol=0)
self.assertEqual(res, res2, atol=0, rtol=0)
# Vectorization on non-contiguous tensors
res = torch.rand(3, 3, 300000).to(torch.int64)
res = res.permute(2, 0, 1)
torch.arange(0, 300000 * 3 * 3, out=res)
self.assertEqual(res.flatten(), torch.arange(0, 300000 * 3 * 3))
# Check arange with only one argument
res1 = torch.arange(10)
res2 = torch.arange(0, 10)
self.assertEqual(res1, res2, atol=0, rtol=0)
# Check arange for non-contiguous tensors.
x = torch.zeros(2, 3)
torch.arange(0, 4, out=x.narrow(1, 1, 2))
res2 = torch.tensor(((0., 0., 1.), (0., 2., 3.)))
self.assertEqual(x, res2, atol=1e-16, rtol=0)
# Check negative
res1 = torch.tensor((1., 0.))
res2 = torch.tensor([])
torch.arange(1, -1, -1, out=res2)
self.assertEqual(res1, res2, atol=0, rtol=0)
# Equal bounds
res1 = torch.ones(1)
res2 = torch.tensor([])
torch.arange(1, 0, -1, out=res2)
self.assertEqual(res1, res2, atol=0, rtol=0)
torch.arange(1, 2, 1, out=res2)
self.assertEqual(res1, res2, atol=0, rtol=0)
# FloatTensor
res1 = torch.arange(0.6, 0.89, 0.1, out=torch.FloatTensor())
self.assertEqual(res1, [0.6, 0.7, 0.8])
res1 = torch.arange(1, 10, 0.3, out=torch.FloatTensor())
self.assertEqual(res1.size(0), 30)
self.assertEqual(res1[0], 1)
self.assertEqual(res1[29], 9.7)
# DoubleTensor
res1 = torch.arange(0.6, 0.89, 0.1, out=torch.DoubleTensor())
self.assertEqual(res1, [0.6, 0.7, 0.8])
res1 = torch.arange(1, 10, 0.3, out=torch.DoubleTensor())
self.assertEqual(res1.size(0), 30)
self.assertEqual(res1[0], 1)
self.assertEqual(res1[29], 9.7)
# Bool Input matching numpy semantics
r = torch.arange(True)
self.assertEqual(r[0], 0)
r2 = torch.arange(False)
self.assertEqual(len(r2), 0)
self.assertEqual(r.dtype, torch.int64)
self.assertEqual(r2.dtype, torch.int64)
# Check that it's exclusive
r = torch.arange(0, 5)
self.assertEqual(r.min(), 0)
self.assertEqual(r.max(), 4)
self.assertEqual(r.numel(), 5)
r = torch.arange(0, 5, 2)
self.assertEqual(r.min(), 0)
self.assertEqual(r.max(), 4)
self.assertEqual(r.numel(), 3)
r1 = torch.arange(0, 5 + 1e-6)
# NB: without the dtype, we'll infer output type to be int64
r2 = torch.arange(0, 5, dtype=torch.float32)
r3 = torch.arange(0, 5 - 1e-6)
self.assertEqual(r1[:-1], r2, atol=0, rtol=0)
self.assertEqual(r2, r3, atol=0, rtol=0)
r1 = torch.arange(10, -1 + 1e-6, -1)
# NB: without the dtype, we'll infer output type to be int64
r2 = torch.arange(10, -1, -1, dtype=torch.float32)
r3 = torch.arange(10, -1 - 1e-6, -1)
self.assertEqual(r1, r2, atol=0, rtol=0)
self.assertEqual(r2, r3[:-1], atol=0, rtol=0)
# Test Rounding Errors
line = torch.zeros(size=(1, 49))
self.assertWarnsRegex(UserWarning, 'The out tensor will be resized',
lambda: torch.arange(-1, 1, 2. / 49, dtype=torch.float32, out=line))
self.assertEqual(line.shape, [50])
x = torch.empty(1).expand(10)
self.assertRaises(RuntimeError, lambda: torch.arange(10, out=x))
msg = "unsupported range"
self.assertRaisesRegex(RuntimeError, msg, lambda: torch.arange(0, float('inf')))
self.assertRaisesRegex(RuntimeError, msg, lambda: torch.arange(float('inf')))
for device in torch.testing.get_all_device_types():
self.assertRaisesRegex(RuntimeError, msg, lambda: torch.arange(-5, float('nan'), device=device))
# check with step size
self.assertRaisesRegex(RuntimeError, msg, lambda: torch.arange(0, float('-inf'), -1, device=device))
self.assertRaisesRegex(RuntimeError, msg, lambda: torch.arange(0, float('inf'), device=device))
self.assertRaisesRegex(RuntimeError, msg, lambda: torch.arange(float('-inf'), 10, device=device))
self.assertRaisesRegex(RuntimeError, msg, lambda: torch.arange(float('nan'), 10, device=device))
self.assertRaisesRegex(RuntimeError, msg, lambda: torch.arange(float('inf'), device=device))
self.assertRaisesRegex(RuntimeError, msg, lambda: torch.arange(float('nan'), device=device))
self.assertRaisesRegex(
RuntimeError, "overflow",
lambda: torch.arange(1.175494351e-38, 3.402823466e+38, device=device))
# check that it holds a consistent output shape on precision-cornered step sizes
d = torch.arange(-4.0, 4.0, 0.01, dtype=torch.float32, device=device)
self.assertEqual(d.shape[0], 800)
# TODO: this test should be updated
@onlyCPU
def test_arange_inference(self, device):
saved_dtype = torch.get_default_dtype()
torch.set_default_dtype(torch.float32)
# end only
self.assertIs(torch.float32, torch.arange(1.).dtype)
self.assertIs(torch.float32, torch.arange(torch.tensor(1.)).dtype)
self.assertIs(torch.float32, torch.arange(torch.tensor(1., dtype=torch.float64)).dtype)
self.assertIs(torch.int64, torch.arange(1).dtype)
self.assertIs(torch.int64, torch.arange(torch.tensor(1)).dtype)
self.assertIs(torch.int64, torch.arange(torch.tensor(1, dtype=torch.int16)).dtype)
# start, end, [step]
self.assertIs(torch.float32, torch.arange(1., 3).dtype)
self.assertIs(torch.float32, torch.arange(torch.tensor(1., dtype=torch.float64), 3).dtype)
self.assertIs(torch.float32, torch.arange(1, 3.).dtype)
self.assertIs(torch.float32, torch.arange(torch.tensor(1, dtype=torch.int16), torch.tensor(3.)).dtype)
self.assertIs(torch.float32, torch.arange(1, 3, 1.).dtype)
self.assertIs(torch.float32,
torch.arange(torch.tensor(1),
torch.tensor(3, dtype=torch.int16),
torch.tensor(1., dtype=torch.float64)).dtype)
self.assertIs(torch.int64, torch.arange(1, 3).dtype)
self.assertIs(torch.int64, torch.arange(torch.tensor(1), 3).dtype)
self.assertIs(torch.int64, torch.arange(torch.tensor(1), torch.tensor(3, dtype=torch.int16)).dtype)
self.assertIs(torch.int64, torch.arange(1, 3, 1).dtype)
self.assertIs(torch.int64,
torch.arange(torch.tensor(1),
torch.tensor(3),
torch.tensor(1, dtype=torch.int16)).dtype)
torch.set_default_dtype(saved_dtype)
# cannot call storage() on meta tensor
@skipMeta
def test_empty_strided(self, device):
for shape in [(2, 3, 4), (0, 2, 0)]:
# some of these cases are pretty strange, just verifying that if as_strided
# allows them then empty_strided can as well.
for strides in [(12, 4, 1), (2, 4, 6), (0, 0, 0)]:
empty_strided = torch.empty_strided(shape, strides, device=device)
# as_strided checks the storage size is big enough to support such a strided tensor;
# instead of repeating this calculation, we just use empty_strided which does the same
# calculation when setting the storage size.
as_strided = torch.empty(empty_strided.storage().size(),
device=device).as_strided(shape, strides)
self.assertEqual(empty_strided.shape, as_strided.shape)
self.assertEqual(empty_strided.stride(), as_strided.stride())
def test_new_empty_strided(self, device):
def _test(sizes, strides, dtype):
x = torch.zeros(5, 5, dtype=dtype, device=device)
result = x.new_empty_strided(sizes, strides)
expected = torch.empty_strided(sizes, strides, dtype=x.dtype, device=x.device)
self.assertEqual(result.shape, expected.shape)
self.assertEqual(result.stride(), expected.stride())
self.assertEqual(result.dtype, expected.dtype)
self.assertEqual(result.device, expected.device)
_test([2, 3], [3, 1], torch.float)
_test([5, 3], [0, 1], torch.int)
_test([], [], torch.float)
# Some really weird cases
for shape in [(2, 3, 4), (0, 2, 0)]:
for strides in [(12, 4, 1), (2, 4, 6), (0, 0, 0)]:
_test(shape, strides, torch.float)
def test_strided_mismatched_stride_shape(self, device):
for shape, strides in [((1, ), ()), ((1, 2), (1, ))]:
with self.assertRaisesRegex(RuntimeError, "mismatch in length of strides and shape"):
torch.tensor(0.42, device=device).as_strided(shape, strides)
with self.assertRaisesRegex(RuntimeError, "mismatch in length of strides and shape"):
torch.tensor(0.42, device=device).as_strided_(shape, strides)
def test_empty_tensor_props(self, device):
sizes = [(0,), (0, 3), (5, 0), (5, 0, 3, 0, 2), (0, 3, 0, 2), (0, 5, 0, 2, 0)]
for size in sizes:
x = torch.empty(tuple(size), device=device)
self.assertEqual(size, x.shape)
self.assertTrue(x.is_contiguous())
size_ones_instead_of_zeros = (x if x != 0 else 1 for x in size)
y = torch.empty(tuple(size_ones_instead_of_zeros), device=device)
self.assertEqual(x.stride(), y.stride())
def test_eye(self, device):
for dtype in get_all_dtypes():
if dtype == torch.bfloat16:
continue
# Test the RuntimeError is raised when either m or n is a negative number
for n, m in ((-1, 1), (1, -1), (-1, -1)):
with self.assertRaisesRegex(RuntimeError, 'must be greater or equal to'):
torch.eye(n, m, device=device, dtype=dtype)
# Test when the `m` parameter is not provided
for n in (3, 5, 7):
res1 = torch.eye(n, device=device, dtype=dtype)
naive_eye = torch.zeros(n, n, dtype=dtype, device=device)
naive_eye.diagonal(dim1=-2, dim2=-1).fill_(1)
self.assertEqual(naive_eye, res1)
# Check eye_out outputs
res2 = torch.empty(0, device=device, dtype=dtype)
torch.eye(n, out=res2)
self.assertEqual(res1, res2)
for n, m in product([3, 5, 7], repeat=2):
# Construct identity using diagonal and fill
res1 = torch.eye(n, m, device=device, dtype=dtype)
naive_eye = torch.zeros(n, m, dtype=dtype, device=device)
naive_eye.diagonal(dim1=-2, dim2=-1).fill_(1)
self.assertEqual(naive_eye, res1)
# Check eye_out outputs
res2 = torch.empty(0, device=device, dtype=dtype)
torch.eye(n, m, out=res2)
self.assertEqual(res1, res2)
@precisionOverride({torch.float: 1e-8, torch.double: 1e-10})
@dtypes(*(get_all_fp_dtypes(include_half=False, include_bfloat16=False) +
get_all_complex_dtypes()))
def test_linspace_vs_numpy(self, device, dtype):
start = -0.0316082797944545745849609375 + (0.8888888888j if dtype.is_complex else 0)
end = .0315315723419189453125 + (0.444444444444j if dtype.is_complex else 0)
for steps in [1, 2, 3, 5, 11, 256, 257, 2**22]:
t = torch.linspace(start, end, steps, device=device, dtype=dtype)
a = np.linspace(start, end, steps, dtype=torch_to_numpy_dtype_dict[dtype])
t = t.cpu()
self.assertEqual(t, torch.from_numpy(a))
self.assertTrue(t[0].item() == a[0])
self.assertTrue(t[steps - 1].item() == a[steps - 1])
def _test_linspace_logspace_complex_helper(self, torch_fn, np_fn, device, dtype):
start = torch.randn(1, dtype=dtype).item()
end = (start + torch.randn(1, dtype=dtype) + random.randint(5, 15)).item()
def test_fn(torch_fn, numpy_fn, steps):
t = torch_fn(start, end, steps, device=device)
a = numpy_fn(start, end, steps, dtype=torch_to_numpy_dtype_dict[dtype])
t = t.cpu()
self.assertEqual(t, torch.from_numpy(a))
for steps in [1, 2, 3, 5, 11, 256, 257, 2**22]:
test_fn(torch.linspace, np.linspace, steps)
@dtypes(torch.complex64)
def test_linspace_vs_numpy_complex(self, device, dtype):
self._test_linspace_logspace_complex_helper(torch.linspace, np.linspace,
device, dtype)
@dtypes(torch.complex64)
def test_logspace_vs_numpy_complex(self, device, dtype):
self._test_linspace_logspace_complex_helper(torch.logspace, np.logspace,
device, dtype)
@precisionOverride({torch.float: 1e-6, torch.double: 1e-10})
@dtypes(*get_all_fp_dtypes(include_half=False, include_bfloat16=False))
def test_logspace_vs_numpy(self, device, dtype):
start = -0.0316082797944545745849609375
end = .0315315723419189453125
for steps in [1, 2, 3, 5, 11, 256, 257, 2**22]:
t = torch.logspace(start, end, steps, device=device, dtype=dtype)
a = np.logspace(start, end, steps, dtype=torch_to_numpy_dtype_dict[dtype])
t = t.cpu()
self.assertEqual(t, torch.from_numpy(a))
self.assertEqual(t[0], a[0])
self.assertEqual(t[steps - 1], a[steps - 1])
def _linspace_logspace_warning_helper(self, op, device, dtype):
with self.assertWarnsOnceRegex(UserWarning, "Not providing a value for .+"):
op(0, 10, device=device, dtype=dtype)
@dtypes(torch.float)
def test_linspace_steps_warning(self, device, dtype):
self._linspace_logspace_warning_helper(torch.linspace, device, dtype)
@dtypes(torch.float)
def test_logspace_steps_warning(self, device, dtype):
self._linspace_logspace_warning_helper(torch.logspace, device, dtype)
@onlyCUDA
@largeTensorTest('16GB')
def test_range_factories_64bit_indexing(self, device):
bigint = 2 ** 31 + 1
t = torch.arange(bigint, dtype=torch.long, device=device)
self.assertEqual(t[-1].item(), bigint - 1)
del t
t = torch.linspace(0, 1, bigint, dtype=torch.float, device=device)
self.assertEqual(t[-1].item(), 1)
del t
t = torch.logspace(0, 1, bigint, 2, dtype=torch.float, device=device)
self.assertEqual(t[-1].item(), 2)
del t
@onlyOnCPUAndCUDA
def test_tensor_ctor_device_inference(self, device):
torch_device = torch.device(device)
values = torch.tensor((1, 2, 3), device=device)
# Tests tensor and as_tensor
# Note: warnings are suppressed (suppresses warnings)
for op in (torch.tensor, torch.as_tensor):
with warnings.catch_warnings():
warnings.simplefilter("ignore")
self.assertEqual(op(values).device, torch_device)
self.assertEqual(op(values, dtype=torch.float64).device, torch_device)
if self.device_type == 'cuda':
with torch.cuda.device(device):
self.assertEqual(op(values.cpu()).device, torch.device('cpu'))
# Tests sparse ctor
indices = torch.tensor([[0, 1, 1],
[2, 0, 1],
[2, 1, 0]], device=device)
sparse_size = (3, 3, 3)
sparse_default = torch.sparse_coo_tensor(indices, values, sparse_size)
self.assertEqual(sparse_default.device, torch_device)
sparse_with_dtype = torch.sparse_coo_tensor(indices, values, sparse_size, dtype=torch.float64)
self.assertEqual(sparse_with_dtype.device, torch_device)
if self.device_type == 'cuda':
with torch.cuda.device(device):
sparse_with_dtype = torch.sparse_coo_tensor(indices.cpu(), values.cpu(),
sparse_size, dtype=torch.float64)
self.assertEqual(sparse_with_dtype.device, torch.device('cpu'))
@onlyOnCPUAndCUDA
@precisionOverride({torch.bfloat16: 5e-2, torch.half: 1e-3})
@unittest.skipIf(not TEST_SCIPY, "Scipy not found")
@dtypesIfCUDA(torch.float, torch.double, torch.bfloat16, torch.half, torch.long)
@dtypesIfCPU(torch.float, torch.double, torch.long)
def test_signal_window_functions(self, device, dtype):
import scipy.signal as signal
def test(name, kwargs):
torch_method = getattr(torch, name + '_window')
if not dtype.is_floating_point:
with self.assertRaisesRegex(RuntimeError, r'floating point'):
torch_method(3, dtype=dtype)
return
for size in [0, 1, 2, 5, 10, 50, 100, 1024, 2048]:
for periodic in [True, False]:
res = torch_method(size, periodic=periodic, **kwargs, device=device, dtype=dtype)
# NB: scipy always returns a float64 result
ref = torch.from_numpy(signal.get_window((name, *(kwargs.values())), size, fftbins=periodic))
self.assertEqual(res, ref, exact_dtype=False)
with self.assertRaisesRegex(RuntimeError, r'not implemented for sparse types'):
torch_method(3, layout=torch.sparse_coo)
self.assertTrue(torch_method(3, requires_grad=True).requires_grad)
self.assertFalse(torch_method(3).requires_grad)
for window in ['hann', 'hamming', 'bartlett', 'blackman']:
test(window, kwargs={})
for num_test in range(50):
test('kaiser', kwargs={'beta': random.random() * 30})
def test_tensor_factories_empty(self, device):
# ensure we can create empty tensors from each factory function
shapes = [(5, 0, 1), (0,), (0, 0, 1, 0, 2, 0, 0)]
for shape in shapes:
for dt in get_all_dtypes():
self.assertEqual(shape, torch.zeros(shape, device=device, dtype=dt).shape)
self.assertEqual(shape, torch.zeros_like(torch.zeros(shape, device=device, dtype=dt)).shape)
self.assertEqual(shape, torch.full(shape, 3, device=device, dtype=dt).shape)
self.assertEqual(shape, torch.full_like(torch.zeros(shape, device=device, dtype=dt), 3).shape)
self.assertEqual(shape, torch.ones(shape, device=device, dtype=dt).shape)
self.assertEqual(shape, torch.ones_like(torch.zeros(shape, device=device, dtype=dt)).shape)
self.assertEqual(shape, torch.empty(shape, device=device, dtype=dt).shape)
self.assertEqual(shape, torch.empty_like(torch.zeros(shape, device=device, dtype=dt)).shape)
self.assertEqual(shape, torch.empty_strided(shape, (0,) * len(shape), device=device, dtype=dt).shape)
if dt == torch.bool:
self.assertEqual(shape, torch.randint(2, shape, device=device, dtype=dt).shape)
self.assertEqual(shape, torch.randint_like(torch.zeros(shape, device=device, dtype=dt), 2).shape)
elif dt.is_complex:
self.assertRaises(RuntimeError, lambda: torch.randint(6, shape, device=device, dtype=dt).shape)
else:
self.assertEqual(shape, torch.randint(6, shape, device=device, dtype=dt).shape)
self.assertEqual(shape, torch.randint_like(torch.zeros(shape, device=device, dtype=dt), 6).shape)
if dt not in {torch.double, torch.float, torch.half, torch.bfloat16, torch.complex64, torch.complex128}:
self.assertRaises(RuntimeError, lambda: torch.rand(shape, device=device, dtype=dt).shape)
if dt == torch.double or dt == torch.float or dt.is_complex:
self.assertEqual(shape, torch.randn(shape, device=device, dtype=dt).shape)
self.assertEqual(shape, torch.randn_like(torch.zeros(shape, device=device, dtype=dt)).shape)
self.assertEqual((0,), torch.arange(0, device=device).shape)
self.assertEqual((0, 0), torch.eye(0, device=device).shape)
self.assertEqual((0, 0), torch.eye(0, 0, device=device).shape)
self.assertEqual((5, 0), torch.eye(5, 0, device=device).shape)
self.assertEqual((0, 5), torch.eye(0, 5, device=device).shape)
self.assertEqual((0,), torch.linspace(1, 1, 0, device=device).shape)
self.assertEqual((0,), torch.logspace(1, 1, 0, device=device).shape)
self.assertEqual((0,), torch.randperm(0, device=device).shape)
self.assertEqual((0,), torch.bartlett_window(0, device=device).shape)
self.assertEqual((0,), torch.bartlett_window(0, periodic=False, device=device).shape)
self.assertEqual((0,), torch.hamming_window(0, device=device).shape)
self.assertEqual((0,), torch.hann_window(0, device=device).shape)
self.assertEqual((0,), torch.kaiser_window(0, device=device).shape)
self.assertEqual((1, 1, 0), torch.tensor([[[]]], device=device).shape)
self.assertEqual((1, 1, 0), torch.as_tensor([[[]]], device=device).shape)
@onlyCUDA
def test_tensor_factory_gpu_type_inference(self, device):
saved_type = torch.tensor([]).type()
torch.set_default_tensor_type(torch.cuda.DoubleTensor)
torch.set_default_dtype(torch.float32)
self.assertIs(torch.float32, torch.tensor(0.).dtype)
self.assertEqual(torch.device(device), torch.tensor(0.).device)
torch.set_default_dtype(torch.float64)
self.assertIs(torch.float64, torch.tensor(0.).dtype)
self.assertEqual(torch.device(device), torch.tensor(0.).device)
torch.set_default_tensor_type(saved_type)
@onlyCUDA
def test_tensor_factory_gpu_type(self, device):
saved_type = torch.tensor([]).type()
torch.set_default_tensor_type(torch.cuda.FloatTensor)
x = torch.zeros((5, 5))
self.assertIs(torch.float32, x.dtype)
self.assertTrue(x.is_cuda)
torch.set_default_tensor_type(torch.cuda.DoubleTensor)
x = torch.zeros((5, 5))
self.assertIs(torch.float64, x.dtype)
self.assertTrue(x.is_cuda)
torch.set_default_tensor_type(saved_type)
@skipCPUIf(True, 'compares device with cpu')
@dtypes(torch.int, torch.long, torch.float, torch.double)
def test_arange_device_vs_cpu(self, device, dtype):
cpu_tensor = torch.arange(0, 10, dtype=dtype, device='cpu')
device_tensor = torch.arange(0, 10, dtype=dtype, device=device)
self.assertEqual(cpu_tensor, device_tensor)
def test_arange_bfloat16(self, device):
ref_tensor = torch.tensor([0, 1, 2, 3], dtype=torch.bfloat16, device=device)
bfloat16_tensor = torch.arange(0, 4, dtype=torch.bfloat16, device=device)
self.assertEqual(ref_tensor, bfloat16_tensor)
# step=2
ref_tensor = torch.tensor([0, 2, 4], dtype=torch.bfloat16, device=device)
bfloat16_tensor = torch.arange(0, 6, step=2, dtype=torch.bfloat16, device=device)
self.assertEqual(ref_tensor, bfloat16_tensor)
@dtypes(*get_all_dtypes(include_bool=False, include_half=False))
@dtypesIfCUDA(*get_all_dtypes(include_bool=False, include_half=True))
def test_linspace(self, device, dtype):
_from = random.random()
to = _from + random.random()
res1 = torch.linspace(_from, to, 137, device=device, dtype=dtype)
res2 = torch.tensor((), device=device, dtype=dtype)
torch.linspace(_from, to, 137, dtype=dtype, out=res2)
self.assertEqual(res1, res2, atol=0, rtol=0)
# small tensor
self.assertEqual(torch.linspace(10, 20, 11, device=device, dtype=dtype),
torch.tensor(list(range(10, 21)), device=device, dtype=dtype))
# large tensor
if dtype not in (torch.int8, torch.uint8):
self.assertEqual(torch.linspace(10, 2000, 1991, device=device, dtype=dtype),
torch.tensor(list(range(10, 2001)), device=device, dtype=dtype))
# Vectorization on non-contiguous tensors
if dtype not in (torch.int8, torch.uint8): # int8 and uint8 are too small for this test
res = torch.rand(3, 3, 1000, device=device).to(dtype)
res = res.permute(2, 0, 1)
torch.linspace(0, 1000 * 3 * 3, 1000 * 3 * 3, out=res)
self.assertEqual(res.flatten(), torch.linspace(0, 1000 * 3 * 3, 1000 * 3 * 3, device=device, dtype=dtype))
self.assertRaises(RuntimeError, lambda: torch.linspace(0, 1, -1, device=device, dtype=dtype))
# steps = 1
self.assertEqual(torch.linspace(0, 1, 1, device=device, dtype=dtype),
torch.zeros(1, device=device, dtype=dtype), atol=0, rtol=0)
# steps = 0
self.assertEqual(torch.linspace(0, 1, 0, device=device, dtype=dtype).numel(), 0, atol=0, rtol=0)
if dtype == torch.float:
# passed dtype can't be safely casted to inferred dtype
with self.assertRaisesRegex(RuntimeError, r"torch.linspace\(\): inferred dtype"):
torch.linspace(0, 1j, 5, device=device, dtype=dtype)
with self.assertRaisesRegex(RuntimeError, r"torch.linspace\(\): inferred dtype"):
torch.linspace(0j, 1, 5, device=device, dtype=dtype)
with self.assertRaisesRegex(RuntimeError, r"torch.linspace\(\): inferred dtype"):
torch.linspace(0j, 1j, 5, device=device, dtype=dtype)
# Check linspace for generating the correct output for each dtype.
start = 0 if dtype == torch.uint8 else -100
expected_lin = torch.tensor([start + .5 * i for i in range(401)], device=device, dtype=torch.double)
actual_lin = torch.linspace(start, start + 200, 401, device=device, dtype=dtype)
# If on GPU, allow for minor error depending on dtype.
tol = 0.
if device != 'cpu':
if dtype == torch.half:
tol = 1e-1
elif dtype == torch.float:
tol = 1e-5
elif dtype == torch.double:
tol = 1e-10
self.assertEqual(expected_lin.to(dtype), actual_lin, atol=tol, rtol=0)
# Check linspace for generating with start > end.
self.assertEqual(torch.linspace(2, 0, 3, device=device, dtype=dtype),
torch.tensor((2, 1, 0), device=device, dtype=dtype),
atol=0, rtol=0)
# Check for race condition (correctness when applied on a large tensor).
if dtype not in (torch.int8, torch.uint8, torch.int16, torch.half, torch.bfloat16):
y = torch.linspace(0, 999999 + (999999j if dtype.is_complex else 0),
1000000, device=device, dtype=dtype)
if dtype.is_complex:
cond = torch.logical_and(y[:-1].real < y[1:].real, y[:-1].imag < y[1:].imag)
else:
cond = y[:-1] < y[1:]
correct = all(cond)
self.assertTrue(correct)
# Check linspace for non-contiguous tensors.
x = torch.zeros(2, 3, device=device, dtype=dtype)
y = torch.linspace(0, 3, 4, out=x.narrow(1, 1, 2), dtype=dtype)
self.assertEqual(x, torch.tensor(((0, 0, 1), (0, 2, 3)), device=device, dtype=dtype), atol=0, rtol=0)
def _test_linspace_logspace_deduction_helper(self, fn, device):
for start, end in [(1, 2), (1., 2), (1., -2.), (1j, 2j), (0., 2j), (1j, 2)]:
dtype = torch.float32
if isinstance(start, complex) or isinstance(end, complex):
dtype = torch.cfloat
self.assertEqual(fn(start, end, steps=100, device=device).dtype, dtype)
def test_linspace_deduction(self, device):
# Test deduction from input parameters.
self._test_linspace_logspace_deduction_helper(torch.linspace, device)
def test_logspace_deduction(self, device):
# Test deduction from input parameters.
self._test_linspace_logspace_deduction_helper(torch.logspace, device)
# The implementation of linspace+logspace goes through a different path
# when the steps arg is equal to 0 or 1. For other values of `steps`
# they call specialized linspace (or logspace) kernels.
LINSPACE_LOGSPACE_SPECIAL_STEPS = [0, 1]
# NOTE [Linspace+Logspace precision override]
# Our Linspace and logspace torch.half CUDA kernels are not very precise.
# Since linspace/logspace are deterministic, we can compute an expected
# amount of error (by testing without a precision override), adding a tiny
# amount (EPS) to that, and using that value as the override.
LINSPACE_LOGSPACE_EXTRA_EPS = 1e-5
# Compares linspace device vs. cpu
def _test_linspace(self, device, dtype, steps):
a = torch.linspace(0, 10, steps=steps, dtype=dtype, device=device)
b = torch.linspace(0, 10, steps=steps)
self.assertEqual(a, b, exact_dtype=False)
# See NOTE [Linspace+Logspace precision override]
@skipCPUIf(True, "compares with CPU")
@precisionOverride({torch.half: 0.0039 + LINSPACE_LOGSPACE_EXTRA_EPS})
@dtypes(*(get_all_fp_dtypes() + get_all_complex_dtypes()))
def test_linspace_device_vs_cpu(self, device, dtype):
self._test_linspace(device, dtype, steps=10)
@skipCPUIf(True, "compares with CPU")
@dtypes(*(get_all_fp_dtypes() + get_all_complex_dtypes()))
def test_linspace_special_steps(self, device, dtype):
for steps in self.LINSPACE_LOGSPACE_SPECIAL_STEPS:
self._test_linspace(device, dtype, steps=steps)
# Compares logspace device vs cpu
def _test_logspace(self, device, dtype, steps):
a = torch.logspace(1, 1.1, steps=steps, dtype=dtype, device=device)
b = torch.logspace(1, 1.1, steps=steps)
self.assertEqual(a, b, exact_dtype=False)
# Compares logspace device vs cpu
def _test_logspace_base2(self, device, dtype, steps):
a = torch.logspace(1, 1.1, steps=steps, base=2, dtype=dtype, device=device)
b = torch.logspace(1, 1.1, steps=steps, base=2)
self.assertEqual(a, b, exact_dtype=False)
# See NOTE [Linspace+Logspace precision override]
@skipCPUIf(True, "compares with CPU")
@precisionOverride({torch.half: 0.025 + LINSPACE_LOGSPACE_EXTRA_EPS})
@dtypesIfCUDA(torch.half, torch.float, torch.double)
@dtypes(torch.float, torch.double)
def test_logspace_device_vs_cpu(self, device, dtype):
self._test_logspace(device, dtype, steps=10)
# See NOTE [Linspace+Logspace precision override]
@skipCPUIf(True, "compares with CPU")
@precisionOverride({torch.half: 0.0201 + LINSPACE_LOGSPACE_EXTRA_EPS})
@dtypesIfCUDA(torch.half, torch.float, torch.double)
@dtypes(torch.float, torch.double)
def test_logspace_base2(self, device, dtype):
self._test_logspace_base2(device, dtype, steps=10)
@skipCPUIf(True, "compares with CPU")
@dtypesIfCUDA(torch.half, torch.float, torch.double)
@dtypes(torch.float, torch.double)
def test_logspace_special_steps(self, device, dtype):
for steps in self.LINSPACE_LOGSPACE_SPECIAL_STEPS:
self._test_logspace(device, dtype, steps=steps)
self._test_logspace_base2(device, dtype, steps=steps)
@dtypes(*get_all_dtypes(include_bool=False, include_half=False, include_complex=False))
@dtypesIfCUDA(*((get_all_int_dtypes() + [torch.float32, torch.float16, torch.bfloat16])
if TEST_WITH_ROCM
else get_all_dtypes(include_bool=False, include_half=True, include_complex=False)))
def test_logspace(self, device, dtype):
_from = random.random()
to = _from + random.random()
res1 = torch.logspace(_from, to, 137, device=device, dtype=dtype)
res2 = torch.tensor((), device=device, dtype=dtype)
torch.logspace(_from, to, 137, device=device, dtype=dtype, out=res2)
self.assertEqual(res1, res2, atol=0, rtol=0)
self.assertRaises(RuntimeError, lambda: torch.logspace(0, 1, -1, device=device, dtype=dtype))
self.assertEqual(torch.logspace(0, 1, 1, device=device, dtype=dtype),
torch.ones(1, device=device, dtype=dtype), atol=0, rtol=0)
if dtype == torch.float:
# passed dtype can't be safely casted to inferred dtype
with self.assertRaisesRegex(RuntimeError, r"torch.logspace\(\): inferred dtype"):
torch.logspace(0, 1j, 5, device=device, dtype=dtype)
with self.assertRaisesRegex(RuntimeError, r"torch.logspace\(\): inferred dtype"):
torch.logspace(0j, 1, 5, device=device, dtype=dtype)
with self.assertRaisesRegex(RuntimeError, r"torch.logspace\(\): inferred dtype"):
torch.logspace(0j, 1j, 5, device=device, dtype=dtype)
# Check precision - start, stop and base are chosen to avoid overflow
# steps is chosen so that step size is not subject to rounding error
# a tolerance is needed for gpu tests due to differences in computation
atol = None
rtol = None
if self.device_type == 'cpu':
atol = 0
rtol = 0
self.assertEqual(torch.tensor([2. ** (i / 8.) for i in range(49)], device=device, dtype=dtype),
torch.logspace(0, 6, steps=49, base=2, device=device, dtype=dtype),
atol=atol, rtol=rtol)
# Check non-default base=2
self.assertEqual(torch.logspace(1, 1, 1, 2, device=device, dtype=dtype),
torch.ones(1, device=device, dtype=dtype) * 2)
self.assertEqual(torch.logspace(0, 2, 3, 2, device=device, dtype=dtype),
torch.tensor((1, 2, 4), device=device, dtype=dtype))
# Check logspace_ for generating with start > end.
self.assertEqual(torch.logspace(1, 0, 2, device=device, dtype=dtype),
torch.tensor((10, 1), device=device, dtype=dtype), atol=0, rtol=0)
# Check logspace_ for non-contiguous tensors.
x = torch.zeros(2, 3, device=device, dtype=dtype)
y = torch.logspace(0, 3, 4, base=2, device=device, dtype=dtype, out=x.narrow(1, 1, 2))
self.assertEqual(x, torch.tensor(((0, 1, 2), (0, 4, 8)), device=device, dtype=dtype), atol=0, rtol=0)
@onlyOnCPUAndCUDA
@dtypes(torch.half, torch.float, torch.double)
def test_full_inference(self, device, dtype):
size = (2, 2)
prev_default = torch.get_default_dtype()
torch.set_default_dtype(dtype)
# Tests bool fill value inference
t = torch.full(size, True)
self.assertEqual(t.dtype, torch.bool)
# Tests integer fill value inference
t = torch.full(size, 1)
self.assertEqual(t.dtype, torch.long)
# Tests float fill value inference
t = torch.full(size, 1.)
self.assertEqual(t.dtype, dtype)
# Tests complex inference
t = torch.full(size, (1 + 1j))
ctype = torch.complex128 if dtype is torch.double else torch.complex64
self.assertEqual(t.dtype, ctype)
torch.set_default_dtype(prev_default)
def test_full_out(self, device):
size = (5,)
o = torch.empty(size, device=device, dtype=torch.long)
# verifies dtype/out conflict throws a RuntimeError
with self.assertRaises(RuntimeError):
torch.full(o.shape, 1., dtype=torch.float, out=o)
# verifies out dtype overrides inference
self.assertEqual(torch.full(o.shape, 1., out=o).dtype, o.dtype)
self.assertEqual(torch.full(size, 1, out=o).dtype, o.dtype)
# check that warning for numpy being not writable is suppressed
# when a copy of it is being created.
# see issue #47160
def test_tensor_from_non_writable_numpy(self, device):
with warnings.catch_warnings(record=True) as w:
a = np.arange(5.)
a.flags.writeable = False
t = torch.tensor(a)
self.assertEqual(len(w), 0)
# Class for testing random tensor creation ops, like torch.randint
class TestRandomTensorCreation(TestCase):
exact_dtype = True
# TODO: add torch.complex64, torch.complex128
@dtypes(torch.float, torch.double)
def test_normal(self, device, dtype):
def helper(self, device, dtype, ptype, t_transform, std_transform):
q = torch.empty(100, 100, dtype=dtype, device=device)
q.normal_()
self.assertEqual(t_transform(q).mean(), 0, atol=0.2, rtol=0)
self.assertEqual(t_transform(q).std(), std_transform(1), atol=0.2, rtol=0)
q.normal_(2, 3)
self.assertEqual(t_transform(q).mean(), 2, atol=0.3, rtol=0)
self.assertEqual(t_transform(q).std(), std_transform(3), atol=0.3, rtol=0)
q = torch.empty(100, 100, dtype=dtype, device=device)
q_row1 = q[0:1].clone()
q[99:100].normal_()
self.assertEqual(t_transform(q[99:100]).mean(), 0, atol=0.2, rtol=0)
self.assertEqual(t_transform(q[99:100]).std(), std_transform(1), atol=0.2, rtol=0)
self.assertEqual(t_transform(q[0:1]).clone(), t_transform(q_row1))
mean = torch.empty(100, 100, dtype=dtype, device=device)
mean[:50].fill_(ptype(0))
mean[50:].fill_(ptype(1))
std = torch.empty(100, 100, dtype=torch.float, device=device)
std[:, :50] = 4
std[:, 50:] = 1
r = torch.normal(mean)
self.assertEqual(r.dtype, dtype)
self.assertEqual(str(r.device), device)
self.assertEqual(t_transform(r[:50]).mean(), 0, atol=0.2, rtol=0)
self.assertEqual(t_transform(r[50:]).mean(), 1, atol=0.2, rtol=0)
self.assertEqual(t_transform(r).std(), std_transform(1), atol=0.2, rtol=0)
r.fill_(42)
r = torch.normal(mean, 3)
self.assertEqual(r.dtype, dtype)
self.assertEqual(str(r.device), device)
self.assertEqual(t_transform(r[:50]).mean(), 0, atol=0.2, rtol=0)
self.assertEqual(t_transform(r[50:]).mean(), 1, atol=0.2, rtol=0)
self.assertEqual(t_transform(r).std(), std_transform(3), atol=0.2, rtol=0)
r.fill_(42)
torch.normal(mean, 3, out=r)
self.assertEqual(r.dtype, dtype)
self.assertEqual(str(r.device), device)
self.assertEqual(t_transform(r[:50]).mean(), 0, atol=0.2, rtol=0)
self.assertEqual(t_transform(r[50:]).mean(), 1, atol=0.2, rtol=0)
self.assertEqual(t_transform(r).std(), std_transform(3), atol=0.2, rtol=0)
r.fill_(42)
r = torch.normal(2, std)
self.assertFalse(r.dtype.is_complex)
self.assertEqual(str(r.device), device)
self.assertEqual(r.mean(), 2, atol=0.2, rtol=0)
self.assertEqual(r[:, :50].std(), 4, atol=0.3, rtol=0)
self.assertEqual(r[:, 50:].std(), 1, atol=0.2, rtol=0)
r.fill_(42)
torch.normal(2, std, out=r)
self.assertFalse(r.dtype.is_complex)
self.assertEqual(str(r.device), device)
self.assertEqual(r.mean(), 2, atol=0.2, rtol=0)
self.assertEqual(r[:, :50].std(), 4, atol=0.3, rtol=0)
self.assertEqual(r[:, 50:].std(), 1, atol=0.2, rtol=0)
r.fill_(42)
r = torch.normal(mean, std)
self.assertEqual(r.dtype, dtype)
self.assertEqual(str(r.device), device)
self.assertEqual(t_transform(r[:50]).mean(), 0, atol=0.2, rtol=0)
self.assertEqual(t_transform(r[50:]).mean(), 1, atol=0.2, rtol=0)
self.assertEqual(t_transform(r[:, :50]).std(), std_transform(4), atol=0.3, rtol=0)
self.assertEqual(t_transform(r[:, 50:]).std(), std_transform(1), atol=0.2, rtol=0)
r.fill_(42)
torch.normal(mean, std, out=r)
self.assertEqual(r.dtype, dtype)
self.assertEqual(str(r.device), device)
self.assertEqual(t_transform(r[:50]).mean(), 0, atol=0.2, rtol=0)
self.assertEqual(t_transform(r[50:]).mean(), 1, atol=0.2, rtol=0)
self.assertEqual(t_transform(r[:, :50]).std(), std_transform(4), atol=0.3, rtol=0)
self.assertEqual(t_transform(r[:, 50:]).std(), std_transform(1), atol=0.2, rtol=0)
r.fill_(42)
r = torch.normal(2, 3, (100, 100), dtype=dtype, device=device)
self.assertEqual(r.dtype, dtype)
self.assertEqual(str(r.device), device)
self.assertEqual(t_transform(r).mean(), 2, atol=0.3, rtol=0)
self.assertEqual(t_transform(r).std(), std_transform(3), atol=0.3, rtol=0)
r.fill_(42)
torch.normal(2, 3, (100, 100), dtype=dtype, device=device, out=r)
self.assertEqual(r.dtype, dtype)
self.assertEqual(str(r.device), device)
self.assertEqual(t_transform(r).mean(), 2, atol=0.3, rtol=0)
self.assertEqual(t_transform(r).std(), std_transform(3), atol=0.3, rtol=0)
# float std 0 with float mean
r.fill_(42)
torch.normal(2, 0, (10, 10), dtype=dtype, device=device, out=r)
self.assertEqual(r.dtype, dtype)
self.assertEqual(str(r.device), device)
self.assertTrue(r.eq(2).all())
# float std 0 with tensor mean
r.fill_(42)
mean_rand = torch.randn(10, 10, dtype=dtype, device=device)
torch.normal(mean_rand, 0, out=r)
self.assertEqual(r.dtype, dtype)
self.assertEqual(str(r.device), device)
self.assertEqual(mean_rand, r, atol=0, rtol=0)
# tensor std 0 with float mean
r.fill_(42)
std_zeros = torch.zeros(10, 10, dtype=dtype, device=device)
torch.normal(2, std_zeros, out=r)
self.assertEqual(r.dtype, dtype)
self.assertEqual(str(r.device), device)
self.assertTrue(r.eq(2).all())
# tensor std 0 with tensor mean
r.fill_(42)
torch.normal(mean_rand, std_zeros, out=r)
self.assertEqual(r.dtype, dtype)
self.assertEqual(str(r.device), device)
self.assertEqual(mean_rand, r, atol=0, rtol=0)
if dtype.is_complex:
helper(self, device, dtype, lambda x: complex(x, x),
lambda t: torch.real(t).to(torch.float), lambda mean: mean / math.sqrt(2))
helper(self, device, dtype, lambda x: complex(x, x),
lambda t: torch.imag(t).to(torch.float), lambda mean: mean / math.sqrt(2))
self.assertRaisesRegex(
RuntimeError, "normal expects standard deviation to be non-complex",
lambda: torch.normal(0, torch.empty(100, 100, dtype=dtype, device=device)))
out = torch.empty(100, 100, dtype=dtype, device=device)
self.assertRaisesRegex(
RuntimeError, "normal expects standard deviation to be non-complex",
lambda: torch.normal(0, torch.empty(100, 100, dtype=dtype, device=device), out=out))
else:
helper(self, device, dtype, lambda x: x, lambda t: t, lambda mean: mean)
# Ensure that normal raises appropriate error when `std` < 0
def test_normal_std_error(self, device):
a = torch.tensor(0, dtype=torch.float32, device=device)
std = torch.tensor(-1, dtype=torch.float32, device=device)
for input in [0, a]:
with self.assertRaisesRegex(RuntimeError, r'normal_ expects std >= 0.0'):
torch.normal(input, -1, (10,))
with self.assertRaisesRegex(RuntimeError, r'normal expects all elements of std >= 0.0'):
torch.normal(input, std)
@dtypes(torch.float, torch.double, torch.half)
@dtypesIfCUDA(torch.float, torch.double, torch.half, torch.bfloat16)
def test_uniform_from_to(self, device, dtype):
size = 2000
alpha = 0.1
float_min = torch.finfo(torch.float).min
float_max = torch.finfo(torch.float).max
double_min = torch.finfo(torch.double).min
double_max = torch.finfo(torch.double).max
if dtype == torch.bfloat16:
min_val = -3.389531389251535e+38
max_val = 3.389531389251535e+38
else:
min_val = torch.finfo(dtype).min
max_val = torch.finfo(dtype).max
values = [double_min, float_min, -42, 0, 42, float_max, double_max]
for from_ in values:
for to_ in values:
t = torch.empty(size, dtype=dtype, device=device)
if not (min_val <= from_ <= max_val) or not (min_val <= to_ <= max_val):
pass
elif to_ < from_:
self.assertRaisesRegex(
RuntimeError,
"uniform_ expects to return",
lambda: t.uniform_(from_, to_)
)
elif to_ - from_ > max_val:
self.assertRaisesRegex(
RuntimeError,
"uniform_ expects to-from",
lambda: t.uniform_(from_, to_)
)
else:
t.uniform_(from_, to_)
range_ = to_ - from_
if not (dtype == torch.bfloat16) and not (
dtype == torch.half and device == 'cpu') and not torch.isnan(t).all():
delta = alpha * range_
double_t = t.to(torch.double)
if range_ == 0:
self.assertTrue(double_t.min() == from_)
self.assertTrue(double_t.max() == to_)
elif dtype == torch.half:
self.assertTrue(from_ <= double_t.min() <= (from_ + delta))
self.assertTrue((to_ - delta) <= double_t.max() <= to_)
else:
self.assertTrue(from_ <= double_t.min() <= (from_ + delta))
self.assertTrue((to_ - delta) <= double_t.max() < to_)
def test_random_neg_values(self, device):
SIZE = 10
signed_dtypes = [torch.double, torch.float, torch.long, torch.int, torch.short]
for dtype in signed_dtypes:
res = torch.rand(SIZE, SIZE).to(device=device, dtype=dtype)
res.random_(-10, -1)
self.assertLessEqual(res.max().item(), 9)
self.assertGreaterEqual(res.min().item(), -10)
# TODO: this test should be updated
@onlyCPU
def test_randint_inference(self, device):
size = (2, 1)
for args in [(3,), (1, 3)]: # (low,) and (low, high)
self.assertIs(torch.int64, torch.randint(*args, size=size).dtype)
self.assertIs(torch.int64, torch.randint(*args, size=size, layout=torch.strided).dtype)
self.assertIs(torch.int64, torch.randint(*args, size=size, generator=torch.default_generator).dtype)
self.assertIs(torch.float32, torch.randint(*args, size=size, dtype=torch.float32).dtype)
out = torch.empty(size, dtype=torch.float32)
self.assertIs(torch.float32, torch.randint(*args, size=size, out=out).dtype)
self.assertIs(torch.float32, torch.randint(*args, size=size, out=out, dtype=torch.float32).dtype)
out = torch.empty(size, dtype=torch.int64)
self.assertIs(torch.int64, torch.randint(*args, size=size, out=out).dtype)
self.assertIs(torch.int64, torch.randint(*args, size=size, out=out, dtype=torch.int64).dtype)
# TODO: this test should be updated
@onlyCPU
def test_randint(self, device):
SIZE = 100
def seed(generator):
if generator is None:
torch.manual_seed(123456)
else:
generator.manual_seed(123456)
return generator
for generator in (None, torch.Generator()):
generator = seed(generator)
res1 = torch.randint(0, 6, (SIZE, SIZE), generator=generator)
res2 = torch.empty((), dtype=torch.int64)
generator = seed(generator)
torch.randint(0, 6, (SIZE, SIZE), generator=generator, out=res2)
generator = seed(generator)
res3 = torch.randint(6, (SIZE, SIZE), generator=generator)
res4 = torch.empty((), dtype=torch.int64)
generator = seed(generator)
torch.randint(6, (SIZE, SIZE), out=res4, generator=generator)
self.assertEqual(res1, res2)
self.assertEqual(res1, res3)
self.assertEqual(res1, res4)
self.assertEqual(res2, res3)
self.assertEqual(res2, res4)
self.assertEqual(res3, res4)
self.assertTrue((res1 < 6).all().item())
self.assertTrue((res1 >= 0).all().item())
@dtypes(torch.half, torch.float, torch.bfloat16, torch.double,
torch.complex64, torch.complex128)
def test_randn(self, device, dtype):
SIZE = 100
for size in [0, SIZE]:
torch.manual_seed(123456)
res1 = torch.randn(size, size, dtype=dtype, device=device)
res2 = torch.tensor([], dtype=dtype, device=device)
torch.manual_seed(123456)
torch.randn(size, size, out=res2)
self.assertEqual(res1, res2)
@dtypes(torch.float, torch.double, torch.complex64, torch.complex128)
def test_rand(self, device, dtype):
SIZE = 100
for size in [0, SIZE]:
torch.manual_seed(123456)
res1 = torch.rand(size, size, dtype=dtype, device=device)
res2 = torch.tensor([], dtype=dtype, device=device)
torch.manual_seed(123456)
torch.rand(size, size, out=res2)
self.assertEqual(res1, res2)
def test_randperm(self, device):
if device == 'cpu' or device == 'meta':
rng_device = None
else:
# TODO: This won't actually work for non-CUDA device
# see https://github.com/pytorch/pytorch/issues/54282
rng_device = [device]
# Test core functionality. On CUDA, different value of n has different
# code path
for n in (5, 100, 50000, 100000):
# Ensure both integer and floating-point numbers are tested. Half follows an execution path that is
# different from others on CUDA.
for dtype in (torch.long, torch.half, torch.float):
if n > 2049 and dtype == torch.half: # Large n for torch.half will raise an exception, do not test here.
continue
with torch.random.fork_rng(devices=rng_device):
res1 = torch.randperm(n, dtype=dtype, device=device)
res2 = torch.empty(0, dtype=dtype, device=device)
torch.randperm(n, out=res2, dtype=dtype, device=device)
self.assertEqual(res1, res2, atol=0, rtol=0)
self.assertEqual(res1.sort().values.long(), torch.arange(n, device=device))
# Default type is long
for n in (100, 10000):
self.assertEqual(torch.randperm(n, device=device).dtype, torch.long)
# randperm of 0 elements is an empty tensor
res1 = torch.randperm(0)
res2 = torch.tensor(5, dtype=dtype, device=device)
torch.randperm(0, out=res2)
self.assertEqual(res1.numel(), 0)
self.assertEqual(res2.numel(), 0)
# Test exceptions when n is too large for a floating point type
for dtype, small_n, large_n in ((torch.uint8, 2**8, 2**8 + 1),
(torch.half, 2**11 + 1, 2**11 + 2),
(torch.float, 2**24 + 1, 2**24 + 2),
(torch.double, 2**25, # 2**53 + 1 is too large to run
2**53 + 2)):
res = torch.empty(0, dtype=dtype, device=device)
torch.randperm(small_n, out=res) # No exception expected
self.assertRaises(RuntimeError, lambda: torch.randperm(large_n, out=res, device=device))
# Test non-contiguous tensors
for n in (4, 5, 6, 10, 20):
non_contiguous_tensor = torch.zeros((2, 3), dtype=torch.long, device=device).t()
self.assertFalse(non_contiguous_tensor.is_contiguous())
with torch.random.fork_rng(devices=rng_device):
res = torch.randperm(n, dtype=torch.long, device=device)
torch.randperm(n, out=non_contiguous_tensor)
self.assertEqual(non_contiguous_tensor, res)
self.assertEqual(res.sort().values.long(), torch.arange(n, device=device))
# Test exceptions when device and generator types are incompatible
@onlyCUDA
def test_randperm_device_compatibility(self, device):
cuda_gen = torch.Generator(device='cuda')
cpu_gen = torch.Generator(device='cpu')
# n=0 is a special case that we don't need to use generator, thus no error even if
# device and generator don't match
torch.randperm(0, device='cuda:0', generator=torch.Generator(device='cuda:1'))
if torch.cuda.device_count() > 1:
torch.randperm(0, device='cuda:1', generator=torch.Generator(device='cuda:0'))
torch.randperm(0, device='cuda', generator=torch.Generator(device='cpu'))
torch.randperm(0, device='cpu', generator=torch.Generator(device='cuda'))
for n in (1, 3, 100, 30000):
torch.randperm(n, device='cuda', generator=torch.Generator(device='cuda:0'))
torch.randperm(n, device='cuda:0', generator=torch.Generator(device='cuda'))
# For cuda:0 to match cuda:1, we are making consistent device type matching
# behavior just like torch.randint. Longer term, generator should ignore
# device ordinal, since it's not used anyway.
torch.randint(low=0, high=n + 1, size=(1,), device="cuda:0", generator=torch.Generator(device='cuda:1'))
torch.randperm(n, device='cuda:0', generator=torch.Generator(device='cuda:1'))
if torch.cuda.device_count() > 1:
torch.randint(low=0, high=n + 1, size=(1,), device="cuda:1", generator=torch.Generator(device='cuda:0'))
torch.randperm(n, device='cuda:1', generator=torch.Generator(device='cuda:0'))
regex = 'Expected a .* device type for generator but found .*'
cuda_t = torch.tensor(n, device='cuda')
self.assertRaisesRegex(RuntimeError, regex, lambda: torch.randperm(n, device='cuda', generator=cpu_gen))
self.assertRaisesRegex(RuntimeError, regex, lambda: torch.randperm(n, device='cuda', generator=cpu_gen, out=cuda_t))
cpu_t = torch.tensor(n, device='cpu')
self.assertRaisesRegex(RuntimeError, regex, lambda: torch.randperm(n, device='cpu', generator=cuda_gen))
self.assertRaisesRegex(RuntimeError, regex, lambda: torch.randperm(n, device='cpu', generator=cuda_gen, out=cpu_t))
self.assertRaisesRegex(RuntimeError, regex, lambda: torch.randperm(n, generator=cuda_gen)) # implicitly on CPU
# Class for testing *like ops, like torch.ones_like
class TestLikeTensorCreation(TestCase):
exact_dtype = True
# TODO: this test should be updated
def test_ones_like(self, device):
expected = torch.ones(100, 100, device=device)
res1 = torch.ones_like(expected)
self.assertEqual(res1, expected)
# test boolean tensor
expected = torch.tensor([True, True], device=device, dtype=torch.bool)
res1 = torch.ones_like(expected)
self.assertEqual(res1, expected)
# TODO: this test should be updated
@onlyCPU
def test_empty_like(self, device):
x = torch.autograd.Variable(torch.tensor([]))
y = torch.autograd.Variable(torch.randn(4, 4))
z = torch.autograd.Variable(torch.IntTensor([1, 2, 3]))
for a in (x, y, z):
self.assertEqual(torch.empty_like(a).shape, a.shape)
self.assertEqualTypeString(torch.empty_like(a), a)
def test_zeros_like(self, device):
expected = torch.zeros((100, 100,), device=device)
res1 = torch.zeros_like(expected)
self.assertEqual(res1, expected)
@deviceCountAtLeast(2)
def test_zeros_like_multiple_device(self, devices):
expected = torch.zeros(100, 100, device=devices[0])
x = torch.randn(100, 100, device=devices[1], dtype=torch.float32)
output = torch.zeros_like(x)
self.assertEqual(output, expected)
@deviceCountAtLeast(2)
def test_ones_like_multiple_device(self, devices):
expected = torch.ones(100, 100, device=devices[0])
x = torch.randn(100, 100, device=devices[1], dtype=torch.float32)
output = torch.ones_like(x)
self.assertEqual(output, expected)
# Full-like precedence is the explicit dtype then the dtype of the "like"
# tensor.
@onlyOnCPUAndCUDA
def test_full_like_inference(self, device):
size = (2, 2)
like = torch.empty((5,), device=device, dtype=torch.long)
self.assertEqual(torch.full_like(like, 1.).dtype, torch.long)
self.assertEqual(torch.full_like(like, 1., dtype=torch.complex64).dtype,
torch.complex64)
instantiate_device_type_tests(TestTensorCreation, globals())
instantiate_device_type_tests(TestRandomTensorCreation, globals())
instantiate_device_type_tests(TestLikeTensorCreation, globals())
if __name__ == '__main__':
run_tests()
| 46.823734 | 132 | 0.589673 |
4a22174da326db0be109b35e27cd1bf611b9c53c | 30,939 | py | Python | tests/test_plugin.py | queilawithaQ/lightning | 78064f8773685238c780d065edb037090c62b47f | [
"MIT"
] | null | null | null | tests/test_plugin.py | queilawithaQ/lightning | 78064f8773685238c780d065edb037090c62b47f | [
"MIT"
] | null | null | null | tests/test_plugin.py | queilawithaQ/lightning | 78064f8773685238c780d065edb037090c62b47f | [
"MIT"
] | null | null | null | from collections import OrderedDict
from fixtures import * # noqa: F401,F403
from flaky import flaky # noqa: F401
from lightning import RpcError, Millisatoshi
from utils import DEVELOPER, only_one, sync_blockheight, TIMEOUT, wait_for, TEST_NETWORK
import json
import os
import pytest
import re
import sqlite3
import subprocess
import time
import unittest
def test_option_passthrough(node_factory, directory):
""" Ensure that registering options works.
First attempts without the plugin and then with the plugin.
"""
plugin_path = os.path.join(os.getcwd(), 'contrib/plugins/helloworld.py')
help_out = subprocess.check_output([
'lightningd/lightningd',
'--lightning-dir={}'.format(directory),
'--help'
]).decode('utf-8')
assert('--greeting' not in help_out)
help_out = subprocess.check_output([
'lightningd/lightningd',
'--lightning-dir={}'.format(directory),
'--plugin={}'.format(plugin_path),
'--help'
]).decode('utf-8')
assert('--greeting' in help_out)
# Now try to see if it gets accepted, would fail to start if the
# option didn't exist
n = node_factory.get_node(options={'plugin': plugin_path, 'greeting': 'Ciao'})
n.stop()
def test_millisatoshi_passthrough(node_factory):
""" Ensure that Millisatoshi arguments and return work.
"""
plugin_path = os.path.join(os.getcwd(), 'tests/plugins/millisatoshis.py')
n = node_factory.get_node(options={'plugin': plugin_path, 'log-level': 'io'})
# By keyword
ret = n.rpc.call('echo', {'msat': Millisatoshi(17), 'not_an_msat': '22msat'})['echo_msat']
assert type(ret) == Millisatoshi
assert ret == Millisatoshi(17)
# By position
ret = n.rpc.call('echo', [Millisatoshi(18), '22msat'])['echo_msat']
assert type(ret) == Millisatoshi
assert ret == Millisatoshi(18)
def test_rpc_passthrough(node_factory):
"""Starting with a plugin exposes its RPC methods.
First check that the RPC method appears in the help output and
then try to call it.
"""
plugin_path = os.path.join(os.getcwd(), 'contrib/plugins/helloworld.py')
n = node_factory.get_node(options={'plugin': plugin_path, 'greeting': 'Ciao'})
# Make sure that the 'hello' command that the helloworld.py plugin
# has registered is available.
cmd = [hlp for hlp in n.rpc.help()['help'] if 'hello' in hlp['command']]
assert(len(cmd) == 1)
# Make sure usage message is present.
assert only_one(n.rpc.help('hello')['help'])['command'] == 'hello [name]'
# While we're at it, let's check that helloworld.py is logging
# correctly via the notifications plugin->lightningd
assert n.daemon.is_in_log('Plugin helloworld.py initialized')
# Now try to call it and see what it returns:
greet = n.rpc.hello(name='World')
assert(greet == "Ciao World")
with pytest.raises(RpcError):
n.rpc.fail()
def test_plugin_dir(node_factory):
"""--plugin-dir works"""
plugin_dir = os.path.join(os.getcwd(), 'contrib/plugins')
node_factory.get_node(options={'plugin-dir': plugin_dir, 'greeting': 'Mars'})
def test_plugin_slowinit(node_factory):
"""Tests that the 'plugin' RPC command times out if plugin doesnt respond"""
n = node_factory.get_node()
with pytest.raises(RpcError, match="Timed out while waiting for plugin response"):
n.rpc.plugin_start(os.path.join(os.getcwd(), "tests/plugins/slow_init.py"))
# It's not actually configured yet, see what happens;
# make sure 'rescan' and 'list' controls dont crash
n.rpc.plugin_rescan()
n.rpc.plugin_list()
def test_plugin_command(node_factory):
"""Tests the 'plugin' RPC command"""
n = node_factory.get_node()
# Make sure that the 'hello' command from the helloworld.py plugin
# is not available.
cmd = [hlp for hlp in n.rpc.help()["help"] if "hello" in hlp["command"]]
assert(len(cmd) == 0)
# Add the 'contrib/plugins' test dir
n.rpc.plugin_startdir(directory=os.path.join(os.getcwd(), "contrib/plugins"))
# Make sure that the 'hello' command from the helloworld.py plugin
# is now available.
cmd = [hlp for hlp in n.rpc.help()["help"] if "hello" in hlp["command"]]
assert(len(cmd) == 1)
# Make sure 'rescan' and 'list' subcommands dont crash
n.rpc.plugin_rescan()
n.rpc.plugin_list()
# Make sure the plugin behaves normally after stop and restart
assert("Successfully stopped helloworld.py." == n.rpc.plugin_stop(plugin="helloworld.py")[''])
n.daemon.wait_for_log(r"Killing plugin: helloworld.py")
n.rpc.plugin_start(plugin=os.path.join(os.getcwd(), "contrib/plugins/helloworld.py"))
n.daemon.wait_for_log(r"Plugin helloworld.py initialized")
assert("Hello world" == n.rpc.call(method="hello"))
# Now stop the helloworld plugin
assert("Successfully stopped helloworld.py." == n.rpc.plugin_stop(plugin="helloworld.py")[''])
n.daemon.wait_for_log(r"Killing plugin: helloworld.py")
# Make sure that the 'hello' command from the helloworld.py plugin
# is not available anymore.
cmd = [hlp for hlp in n.rpc.help()["help"] if "hello" in hlp["command"]]
assert(len(cmd) == 0)
# Test that we cannot start a plugin with 'dynamic' set to False in
# getmanifest
with pytest.raises(RpcError, match=r"Not a dynamic plugin"):
n.rpc.plugin_start(plugin=os.path.join(os.getcwd(), "tests/plugins/static.py"))
# Test that we cannot stop a started plugin with 'dynamic' flag set to
# False
n2 = node_factory.get_node(options={
"plugin": os.path.join(os.getcwd(), "tests/plugins/static.py")
})
with pytest.raises(RpcError, match=r"static.py cannot be managed when lightningd is up"):
n2.rpc.plugin_stop(plugin="static.py")
# Test that we don't crash when starting a broken plugin
with pytest.raises(RpcError, match=r"Timed out while waiting for plugin response"):
n2.rpc.plugin_start(plugin=os.path.join(os.getcwd(), "tests/plugins/broken.py"))
def test_plugin_disable(node_factory):
"""--disable-plugin works"""
plugin_dir = os.path.join(os.getcwd(), 'contrib/plugins')
# We need plugin-dir before disable-plugin!
n = node_factory.get_node(options=OrderedDict([('plugin-dir', plugin_dir),
('disable-plugin',
'{}/helloworld.py'
.format(plugin_dir))]))
with pytest.raises(RpcError):
n.rpc.hello(name='Sun')
# Also works by basename.
n = node_factory.get_node(options=OrderedDict([('plugin-dir', plugin_dir),
('disable-plugin',
'helloworld.py')]))
with pytest.raises(RpcError):
n.rpc.hello(name='Sun')
def test_plugin_hook(node_factory, executor):
"""The helloworld plugin registers a htlc_accepted hook.
The hook will sleep for a few seconds and log a
message. `lightningd` should wait for the response and only then
complete the payment.
"""
l1, l2 = node_factory.line_graph(2, opts={'plugin': os.path.join(os.getcwd(), 'contrib/plugins/helloworld.py')})
start_time = time.time()
f = executor.submit(l1.pay, l2, 100000)
l2.daemon.wait_for_log(r'on_htlc_accepted called')
# The hook will sleep for 20 seconds before answering, so `f`
# should take at least that long.
f.result()
end_time = time.time()
assert(end_time >= start_time + 20)
def test_plugin_connect_notifications(node_factory):
""" test 'connect' and 'disconnect' notifications
"""
l1, l2 = node_factory.get_nodes(2, opts={'plugin': os.path.join(os.getcwd(), 'contrib/plugins/helloworld.py')})
l1.connect(l2)
l1.daemon.wait_for_log(r'Received connect event')
l2.daemon.wait_for_log(r'Received connect event')
l2.rpc.disconnect(l1.info['id'])
l1.daemon.wait_for_log(r'Received disconnect event')
l2.daemon.wait_for_log(r'Received disconnect event')
def test_failing_plugins(directory):
fail_plugins = [
os.path.join(os.getcwd(), 'contrib/plugins/fail/failtimeout.py'),
os.path.join(os.getcwd(), 'contrib/plugins/fail/doesnotexist.py'),
]
for p in fail_plugins:
with pytest.raises(subprocess.CalledProcessError):
subprocess.check_output([
'lightningd/lightningd',
'--lightning-dir={}'.format(directory),
'--plugin={}'.format(p),
'--help',
])
def test_pay_plugin(node_factory):
l1, l2 = node_factory.line_graph(2)
inv = l2.rpc.invoice(123000, 'label', 'description', 3700)
res = l1.rpc.pay(bolt11=inv['bolt11'])
assert res['status'] == 'complete'
with pytest.raises(RpcError, match=r'missing required parameter'):
l1.rpc.call('pay')
# Make sure usage messages are present.
msg = 'pay bolt11 [msatoshi] [label] [riskfactor] [maxfeepercent] '\
'[retry_for] [maxdelay] [exemptfee]'
if DEVELOPER:
msg += ' [use_shadow]'
assert only_one(l1.rpc.help('pay')['help'])['command'] == msg
def test_plugin_connected_hook(node_factory):
""" l1 uses the reject plugin to reject connections.
l1 is configured to accept connections from l2, but not from l3.
"""
opts = [{'plugin': os.path.join(os.getcwd(), 'tests/plugins/reject.py')}, {}, {}]
l1, l2, l3 = node_factory.get_nodes(3, opts=opts)
l1.rpc.reject(l3.info['id'])
l2.connect(l1)
l1.daemon.wait_for_log(r"{} is allowed".format(l2.info['id']))
assert len(l1.rpc.listpeers(l2.info['id'])['peers']) == 1
l3.connect(l1)
l1.daemon.wait_for_log(r"{} is in reject list".format(l3.info['id']))
# FIXME: this error occurs *after* connection, so we connect then drop.
l3.daemon.wait_for_log(r"openingd-chan#1: peer_in WIRE_ERROR")
l3.daemon.wait_for_log(r"You are in reject list")
def check_disconnect():
peers = l1.rpc.listpeers(l3.info['id'])['peers']
return peers == [] or not peers[0]['connected']
wait_for(check_disconnect)
def test_async_rpcmethod(node_factory, executor):
"""This tests the async rpcmethods.
It works in conjunction with the `asynctest` plugin which stashes
requests and then resolves all of them on the fifth call.
"""
l1 = node_factory.get_node(options={'plugin': os.path.join(os.getcwd(), 'tests/plugins/asynctest.py')})
results = []
for i in range(10):
results.append(executor.submit(l1.rpc.asyncqueue))
time.sleep(3)
# None of these should have returned yet
assert len([r for r in results if r.done()]) == 0
# This last one triggers the release and all results should be 42,
# since the last number is returned for all
l1.rpc.asyncflush(42)
assert [r.result() for r in results] == [42] * len(results)
@unittest.skipIf(os.getenv('TEST_DB_PROVIDER', 'sqlite3') != 'sqlite3', "Only sqlite3 implements the db_write_hook currently")
def test_db_hook(node_factory, executor):
"""This tests the db hook."""
dbfile = os.path.join(node_factory.directory, "dblog.sqlite3")
l1 = node_factory.get_node(options={'plugin': os.path.join(os.getcwd(), 'tests/plugins/dblog.py'),
'dblog-file': dbfile})
# It should see the db being created, and sometime later actually get
# initted.
# This precedes startup, so needle already past
assert l1.daemon.is_in_log(r'plugin-dblog.py: deferring \d+ commands')
l1.daemon.logsearch_start = 0
l1.daemon.wait_for_log('plugin-dblog.py: replaying pre-init data:')
l1.daemon.wait_for_log('plugin-dblog.py: CREATE TABLE version \\(version INTEGER\\)')
l1.daemon.wait_for_log("plugin-dblog.py: initialized.* 'startup': True")
l1.stop()
# Databases should be identical.
db1 = sqlite3.connect(os.path.join(l1.daemon.lightning_dir, TEST_NETWORK, 'lightningd.sqlite3'))
db2 = sqlite3.connect(dbfile)
assert [x for x in db1.iterdump()] == [x for x in db2.iterdump()]
def test_utf8_passthrough(node_factory, executor):
l1 = node_factory.get_node(options={'plugin': os.path.join(os.getcwd(), 'tests/plugins/utf8.py'),
'log-level': 'io'})
# This works because Python unmangles.
res = l1.rpc.call('utf8', ['ナンセンス 1杯'])
assert '\\u' not in res['utf8']
assert res['utf8'] == 'ナンセンス 1杯'
# Now, try native.
out = subprocess.check_output(['cli/lightning-cli',
'--network={}'.format(TEST_NETWORK),
'--lightning-dir={}'
.format(l1.daemon.lightning_dir),
'utf8', 'ナンセンス 1杯']).decode('utf-8')
assert '\\u' not in out
assert out == '{\n "utf8": "ナンセンス 1杯"\n}\n'
def test_invoice_payment_hook(node_factory):
""" l1 uses the reject-payment plugin to reject invoices with odd preimages.
"""
opts = [{}, {'plugin': os.path.join(os.getcwd(), 'tests/plugins/reject_some_invoices.py')}]
l1, l2 = node_factory.line_graph(2, opts=opts)
# This one works
inv1 = l2.rpc.invoice(123000, 'label', 'description', preimage='1' * 64)
l1.rpc.pay(inv1['bolt11'])
l2.daemon.wait_for_log('label=label')
l2.daemon.wait_for_log('msat=')
l2.daemon.wait_for_log('preimage=' + '1' * 64)
# This one will be rejected.
inv2 = l2.rpc.invoice(123000, 'label2', 'description', preimage='0' * 64)
with pytest.raises(RpcError):
l1.rpc.pay(inv2['bolt11'])
pstatus = l1.rpc.call('paystatus', [inv2['bolt11']])['pay'][0]
assert pstatus['attempts'][0]['failure']['data']['failcodename'] == 'WIRE_TEMPORARY_NODE_FAILURE'
l2.daemon.wait_for_log('label=label2')
l2.daemon.wait_for_log('msat=')
l2.daemon.wait_for_log('preimage=' + '0' * 64)
def test_invoice_payment_hook_hold(node_factory):
""" l1 uses the hold_invoice plugin to delay invoice payment.
"""
opts = [{}, {'plugin': os.path.join(os.getcwd(), 'tests/plugins/hold_invoice.py'), 'holdtime': TIMEOUT / 2}]
l1, l2 = node_factory.line_graph(2, opts=opts)
inv1 = l2.rpc.invoice(123000, 'label', 'description', preimage='1' * 64)
l1.rpc.pay(inv1['bolt11'])
def test_openchannel_hook(node_factory, bitcoind):
""" l2 uses the reject_odd_funding_amounts plugin to reject some openings.
"""
opts = [{}, {'plugin': os.path.join(os.getcwd(), 'tests/plugins/reject_odd_funding_amounts.py')}]
l1, l2 = node_factory.line_graph(2, fundchannel=False, opts=opts)
# Get some funds.
addr = l1.rpc.newaddr()['bech32']
txid = bitcoind.rpc.sendtoaddress(addr, 10)
numfunds = len(l1.rpc.listfunds()['outputs'])
bitcoind.generate_block(1, txid)
wait_for(lambda: len(l1.rpc.listfunds()['outputs']) > numfunds)
# Even amount: works.
l1.rpc.fundchannel(l2.info['id'], 100000)
# Make sure plugin got all the vars we expect
l2.daemon.wait_for_log('reject_odd_funding_amounts.py: 11 VARS')
l2.daemon.wait_for_log('reject_odd_funding_amounts.py: channel_flags=1')
l2.daemon.wait_for_log('reject_odd_funding_amounts.py: channel_reserve_satoshis=1000000msat')
l2.daemon.wait_for_log('reject_odd_funding_amounts.py: dust_limit_satoshis=546000msat')
l2.daemon.wait_for_log('reject_odd_funding_amounts.py: feerate_per_kw=7500')
l2.daemon.wait_for_log('reject_odd_funding_amounts.py: funding_satoshis=100000000msat')
l2.daemon.wait_for_log('reject_odd_funding_amounts.py: htlc_minimum_msat=0msat')
l2.daemon.wait_for_log('reject_odd_funding_amounts.py: id={}'.format(l1.info['id']))
l2.daemon.wait_for_log('reject_odd_funding_amounts.py: max_accepted_htlcs=483')
l2.daemon.wait_for_log('reject_odd_funding_amounts.py: max_htlc_value_in_flight_msat=18446744073709551615msat')
l2.daemon.wait_for_log('reject_odd_funding_amounts.py: push_msat=0msat')
l2.daemon.wait_for_log('reject_odd_funding_amounts.py: to_self_delay=5')
# Close it.
txid = l1.rpc.close(l2.info['id'])['txid']
bitcoind.generate_block(1, txid)
wait_for(lambda: [c['state'] for c in only_one(l1.rpc.listpeers(l2.info['id'])['peers'])['channels']] == ['ONCHAIN'])
# Odd amount: fails
l1.connect(l2)
with pytest.raises(RpcError, match=r"I don't like odd amounts"):
l1.rpc.fundchannel(l2.info['id'], 100001)
@unittest.skipIf(not DEVELOPER, "without DEVELOPER=1, gossip v slow")
def test_htlc_accepted_hook_fail(node_factory):
"""Send payments from l1 to l2, but l2 just declines everything.
l2 is configured with a plugin that'll hook into htlc_accepted and
always return failures. The same should also work for forwarded
htlcs in the second half.
"""
l1, l2, l3 = node_factory.line_graph(3, opts=[
{},
{'plugin': os.path.join(os.getcwd(), 'tests/plugins/fail_htlcs.py')},
{}
], wait_for_announce=True)
# This must fail
phash = l2.rpc.invoice(1000, "lbl", "desc")['payment_hash']
route = l1.rpc.getroute(l2.info['id'], 1000, 1)['route']
# Here shouldn't use `pay` command because l2 rejects with WIRE_TEMPORARY_NODE_FAILURE,
# then it will be excluded when l1 try another pay attempt.
# Note if the destination is excluded, the route result is undefined.
l1.rpc.sendpay(route, phash)
with pytest.raises(RpcError) as excinfo:
l1.rpc.waitsendpay(phash)
assert excinfo.value.error['data']['failcode'] == 0x2002
assert excinfo.value.error['data']['erring_index'] == 1
# And the invoice must still be unpaid
inv = l2.rpc.listinvoices("lbl")['invoices']
assert len(inv) == 1 and inv[0]['status'] == 'unpaid'
# Now try with forwarded HTLCs: l2 should still fail them
# This must fail
inv = l3.rpc.invoice(1000, "lbl", "desc")['bolt11']
with pytest.raises(RpcError):
l1.rpc.pay(inv)
# And the invoice must still be unpaid
inv = l3.rpc.listinvoices("lbl")['invoices']
assert len(inv) == 1 and inv[0]['status'] == 'unpaid'
@unittest.skipIf(not DEVELOPER, "without DEVELOPER=1, gossip v slow")
def test_htlc_accepted_hook_resolve(node_factory):
"""l3 creates an invoice, l2 knows the preimage and will shortcircuit.
"""
l1, l2, l3 = node_factory.line_graph(3, opts=[
{},
{'plugin': os.path.join(os.getcwd(), 'tests/plugins/shortcircuit.py')},
{}
], wait_for_announce=True)
inv = l3.rpc.invoice(msatoshi=1000, label="lbl", description="desc", preimage="00" * 32)['bolt11']
l1.rpc.pay(inv)
# And the invoice must still be unpaid
inv = l3.rpc.listinvoices("lbl")['invoices']
assert len(inv) == 1 and inv[0]['status'] == 'unpaid'
def test_htlc_accepted_hook_direct_restart(node_factory, executor):
"""l2 restarts while it is pondering what to do with an HTLC.
"""
l1, l2 = node_factory.line_graph(2, opts=[
{'may_reconnect': True},
{'may_reconnect': True,
'plugin': os.path.join(os.getcwd(), 'tests/plugins/hold_htlcs.py')}
])
i1 = l2.rpc.invoice(msatoshi=1000, label="direct", description="desc")['bolt11']
f1 = executor.submit(l1.rpc.pay, i1)
l2.daemon.wait_for_log(r'Holding onto an incoming htlc for 10 seconds')
needle = l2.daemon.logsearch_start
l2.restart()
# Now it should try again, *after* initializing.
# This may be before "Server started with public key" swallowed by restart()
l2.daemon.logsearch_start = needle + 1
l2.daemon.wait_for_log(r'hold_htlcs.py initializing')
l2.daemon.wait_for_log(r'Holding onto an incoming htlc for 10 seconds')
f1.result()
@unittest.skipIf(not DEVELOPER, "without DEVELOPER=1, gossip v slow")
def test_htlc_accepted_hook_forward_restart(node_factory, executor):
"""l2 restarts while it is pondering what to do with an HTLC.
"""
l1, l2, l3 = node_factory.line_graph(3, opts=[
{'may_reconnect': True},
{'may_reconnect': True,
'plugin': os.path.join(os.getcwd(), 'tests/plugins/hold_htlcs.py')},
{'may_reconnect': True},
], wait_for_announce=True)
i1 = l3.rpc.invoice(msatoshi=1000, label="direct", description="desc")['bolt11']
f1 = executor.submit(l1.rpc.dev_pay, i1, use_shadow=False)
l2.daemon.wait_for_log(r'Holding onto an incoming htlc for 10 seconds')
needle = l2.daemon.logsearch_start
l2.restart()
# Now it should try again, *after* initializing.
# This may be before "Server started with public key" swallowed by restart()
l2.daemon.logsearch_start = needle + 1
l2.daemon.wait_for_log(r'hold_htlcs.py initializing')
l2.daemon.wait_for_log(r'Holding onto an incoming htlc for 10 seconds')
# Grab the file where the plugin wrote the onion and read it in for some
# additional checks
logline = l2.daemon.wait_for_log(r'Onion written to')
fname = re.search(r'Onion written to (.*\.json)', logline).group(1)
onion = json.load(open(fname))
assert onion['type'] == 'tlv'
assert re.match(r'^11020203e80401..0608................$', onion['payload'])
assert len(onion['shared_secret']) == 64
assert onion['forward_amount'] == '1000msat'
assert len(onion['next_onion']) == 2 * (1300 + 32 + 33 + 1)
f1.result()
def test_warning_notification(node_factory):
""" test 'warning' notifications
"""
l1 = node_factory.get_node(options={'plugin': os.path.join(os.getcwd(), 'tests/plugins/pretend_badlog.py')}, allow_broken_log=True)
# 1. test 'warn' level
event = "Test warning notification(for unusual event)"
l1.rpc.call('pretendbad', {'event': event, 'level': 'warn'})
# ensure an unusual log_entry was produced by 'pretendunusual' method
l1.daemon.wait_for_log('plugin-pretend_badlog.py: Test warning notification\\(for unusual event\\)')
# now wait for notification
l1.daemon.wait_for_log('plugin-pretend_badlog.py: Received warning')
l1.daemon.wait_for_log('plugin-pretend_badlog.py: level: warn')
l1.daemon.wait_for_log('plugin-pretend_badlog.py: time: *')
l1.daemon.wait_for_log('plugin-pretend_badlog.py: source: plugin-pretend_badlog.py')
l1.daemon.wait_for_log('plugin-pretend_badlog.py: log: Test warning notification\\(for unusual event\\)')
# 2. test 'error' level, steps like above
event = "Test warning notification(for broken event)"
l1.rpc.call('pretendbad', {'event': event, 'level': 'error'})
l1.daemon.wait_for_log(r'\*\*BROKEN\*\* plugin-pretend_badlog.py: Test warning notification\(for broken event\)')
l1.daemon.wait_for_log('plugin-pretend_badlog.py: Received warning')
l1.daemon.wait_for_log('plugin-pretend_badlog.py: level: error')
l1.daemon.wait_for_log('plugin-pretend_badlog.py: time: *')
l1.daemon.wait_for_log('plugin-pretend_badlog.py: source: plugin-pretend_badlog.py')
l1.daemon.wait_for_log('plugin-pretend_badlog.py: log: Test warning notification\\(for broken event\\)')
@unittest.skipIf(not DEVELOPER, "needs to deactivate shadow routing")
def test_invoice_payment_notification(node_factory):
"""
Test the 'invoice_payment' notification
"""
opts = [{}, {"plugin": os.path.join(os.getcwd(), "contrib/plugins/helloworld.py")}]
l1, l2 = node_factory.line_graph(2, opts=opts)
msats = 12345
preimage = '1' * 64
label = "a_descriptive_label"
inv1 = l2.rpc.invoice(msats, label, 'description', preimage=preimage)
l1.rpc.dev_pay(inv1['bolt11'], use_shadow=False)
l2.daemon.wait_for_log(r"Received invoice_payment event for label {},"
" preimage {}, and amount of {}msat"
.format(label, preimage, msats))
def test_channel_opened_notification(node_factory):
"""
Test the 'channel_opened' notification sent at channel funding success.
"""
opts = [{}, {"plugin": os.path.join(os.getcwd(), "tests/plugins/misc_notifications.py")}]
amount = 10**6
l1, l2 = node_factory.line_graph(2, fundchannel=True, fundamount=amount,
opts=opts)
l2.daemon.wait_for_log(r"A channel was opened to us by {}, "
"with an amount of {}*"
.format(l1.info["id"], amount))
@unittest.skipIf(not DEVELOPER, "needs DEVELOPER=1")
def test_forward_event_notification(node_factory, bitcoind, executor):
""" test 'forward_event' notifications
"""
amount = 10**8
disconnects = ['-WIRE_UPDATE_FAIL_HTLC', 'permfail']
l1, l2, l3 = node_factory.line_graph(3, opts=[
{},
{'plugin': os.path.join(os.getcwd(), 'tests/plugins/forward_payment_status.py')},
{}
], wait_for_announce=True)
l4 = node_factory.get_node()
l5 = node_factory.get_node(disconnect=disconnects)
l2.openchannel(l4, 10**6, wait_for_announce=False)
l2.openchannel(l5, 10**6, wait_for_announce=True)
bitcoind.generate_block(5)
wait_for(lambda: len(l1.rpc.listchannels()['channels']) == 8)
payment_hash13 = l3.rpc.invoice(amount, "first", "desc")['payment_hash']
route = l1.rpc.getroute(l3.info['id'], amount, 1)['route']
# status: offered -> settled
l1.rpc.sendpay(route, payment_hash13)
l1.rpc.waitsendpay(payment_hash13)
# status: offered -> failed
route = l1.rpc.getroute(l4.info['id'], amount, 1)['route']
payment_hash14 = "f" * 64
with pytest.raises(RpcError):
l1.rpc.sendpay(route, payment_hash14)
l1.rpc.waitsendpay(payment_hash14)
# status: offered -> local_failed
payment_hash15 = l5.rpc.invoice(amount, 'onchain_timeout', 'desc')['payment_hash']
fee = amount * 10 // 1000000 + 1
c12 = l1.get_channel_scid(l2)
c25 = l2.get_channel_scid(l5)
route = [{'msatoshi': amount + fee - 1,
'id': l2.info['id'],
'delay': 12,
'channel': c12},
{'msatoshi': amount - 1,
'id': l5.info['id'],
'delay': 5,
'channel': c25}]
executor.submit(l1.rpc.sendpay, route, payment_hash15)
l5.daemon.wait_for_log('permfail')
l5.wait_for_channel_onchain(l2.info['id'])
l2.bitcoin.generate_block(1)
l2.daemon.wait_for_log(' to ONCHAIN')
l5.daemon.wait_for_log(' to ONCHAIN')
l2.daemon.wait_for_log('Propose handling THEIR_UNILATERAL/OUR_HTLC by OUR_HTLC_TIMEOUT_TO_US .* after 6 blocks')
bitcoind.generate_block(6)
l2.wait_for_onchaind_broadcast('OUR_HTLC_TIMEOUT_TO_US',
'THEIR_UNILATERAL/OUR_HTLC')
bitcoind.generate_block(1)
l2.daemon.wait_for_log('Resolved THEIR_UNILATERAL/OUR_HTLC by our proposal OUR_HTLC_TIMEOUT_TO_US')
l5.daemon.wait_for_log('Ignoring output.*: OUR_UNILATERAL/THEIR_HTLC')
bitcoind.generate_block(100)
sync_blockheight(bitcoind, [l2])
stats = l2.rpc.listforwards()['forwards']
assert len(stats) == 3
plugin_stats = l2.rpc.call('listforwards_plugin')['forwards']
assert len(plugin_stats) == 6
# use stats to build what we expect went to plugin.
expect = stats[0].copy()
# First event won't have conclusion.
del expect['resolved_time']
expect['status'] = 'offered'
assert plugin_stats[0] == expect
expect = stats[0].copy()
assert plugin_stats[1] == expect
expect = stats[1].copy()
del expect['resolved_time']
expect['status'] = 'offered'
assert plugin_stats[2] == expect
expect = stats[1].copy()
assert plugin_stats[3] == expect
expect = stats[2].copy()
del expect['failcode']
del expect['failreason']
expect['status'] = 'offered'
assert plugin_stats[4] == expect
expect = stats[2].copy()
assert plugin_stats[5] == expect
def test_plugin_deprecated_relpath(node_factory):
"""Test that we can use old-style relative plugin paths with deprecated-apis"""
l1 = node_factory.get_node(options={'plugin-dir': 'contrib/plugins',
'plugin': 'tests/plugins/millisatoshis.py',
'allow-deprecated-apis': True})
plugins = l1.rpc.plugin_list()['plugins']
assert ('helloworld.py', True) in [(os.path.basename(p['name']), p['active']) for p in plugins]
assert ('millisatoshis.py', True) in [(os.path.basename(p['name']), p['active']) for p in plugins]
assert l1.daemon.is_in_log('DEPRECATED WARNING.*plugin-dir={}'
.format(os.path.join(os.getcwd(),
'contrib/plugins')))
assert l1.daemon.is_in_log('DEPRECATED WARNING.*plugin={}'
.format(os.path.join(os.getcwd(),
'tests/plugins/millisatoshis.py')))
def test_sendpay_notifications(node_factory, bitcoind):
""" test 'sendpay_success' and 'sendpay_failure' notifications
"""
amount = 10**8
opts = [{'plugin': os.path.join(os.getcwd(), 'tests/plugins/sendpay_notifications.py')},
{},
{'may_reconnect': False}]
l1, l2, l3 = node_factory.line_graph(3, opts=opts, wait_for_announce=True)
chanid23 = l2.get_channel_scid(l3)
payment_hash1 = l3.rpc.invoice(amount, "first", "desc")['payment_hash']
payment_hash2 = l3.rpc.invoice(amount, "second", "desc")['payment_hash']
route = l1.rpc.getroute(l3.info['id'], amount, 1)['route']
l1.rpc.sendpay(route, payment_hash1)
response1 = l1.rpc.waitsendpay(payment_hash1)
l2.rpc.close(chanid23, 1)
l1.rpc.sendpay(route, payment_hash2)
with pytest.raises(RpcError) as err:
l1.rpc.waitsendpay(payment_hash2)
results = l1.rpc.call('listsendpays_plugin')
assert len(results['sendpay_success']) == 1
assert len(results['sendpay_failure']) == 1
assert results['sendpay_success'][0] == response1
assert results['sendpay_failure'][0] == err.value.error
def test_rpc_command_hook(node_factory):
"""Test the `sensitive_command` hook"""
plugin = os.path.join(os.getcwd(), "tests/plugins/rpc_command.py")
l1 = node_factory.get_node(options={"plugin": plugin})
# Usage of "sendpay" has been restricted by the plugin
with pytest.raises(RpcError, match=r"You cannot do this"):
l1.rpc.call("sendpay")
# The plugin replaces a call made for the "invoice" command
invoice = l1.rpc.invoice(10**6, "test_side", "test_input")
decoded = l1.rpc.decodepay(invoice["bolt11"])
assert decoded["description"] == "A plugin modified this description"
# The plugin sends a custom response to "listfunds"
funds = l1.rpc.listfunds()
assert funds[0] == "Custom result"
# Test command redirection to a plugin
l1.rpc.call('help', [0])
# Test command which removes plugin itself!
l1.rpc.plugin_stop('rpc_command.py')
| 39.563939 | 135 | 0.659459 |
4a22174f05051bca63d41ffdea87e13639a0e25c | 4,983 | py | Python | torchOnVideo/super_resolution/SOF_VSR/train_model.py | torchOnVideo/torchOnVideo | aa07d5661f772eca027ecc6b79e14bd68a515aa1 | [
"MIT"
] | 2 | 2021-03-19T08:05:06.000Z | 2021-05-22T21:54:10.000Z | torchOnVideo/super_resolution/SOF_VSR/train_model.py | torchOnVideo/torchOnVideo | aa07d5661f772eca027ecc6b79e14bd68a515aa1 | [
"MIT"
] | null | null | null | torchOnVideo/super_resolution/SOF_VSR/train_model.py | torchOnVideo/torchOnVideo | aa07d5661f772eca027ecc6b79e14bd68a515aa1 | [
"MIT"
] | null | null | null | import torch
import torch.nn as nn
from torch.autograd import Variable
import torch.nn.functional as F
import numpy as np
from torch.utils.data import DataLoader
import torch.optim as optim
import torch.optim.lr_scheduler as lr_scheduler
import os
from ..SOF_VSR import SOF_VSR
from ..models import SOFVSR, OFRnet, SRnet
from torchOnVideo.datasets.CVDL.super_resolution import TrainSOFVSR
from torchOnVideo.losses import OFR_loss
class TrainModel(SOF_VSR):
def __init__(self, model=None, train_set=None, train_dir='../../db/CVDL_SOFVSR_traindata',
train_data_loader=None, loss=None, checkpoint=None, start_epoch=0, use_start_epoch_checkpoint=False,
output_dir="../../outputs/CVDL_SOFVSR",
scale = 4, patch_size=32, degradation='BI',
epochs=20, batch_size=32, shuffle=True, num_workers=4,
n_iters=200000,
optimizer=None, lr=1e-3, milestone=[80000, 16000],
scheduler=None, gpu_mode=False,
epoch_display_step=1, batch_display_step=1,
run_validation=False, val_dir="../../db/f16_vnlnet_valdata", val_set=None, val_loader=None):
super(TrainModel, self).__init__(scale=scale)
self.degradation = degradation
self.gpu_mode = gpu_mode
print('==> Building training set ')
if train_set is None:
self.train_set = TrainSOFVSR(trainset_dir=train_dir, scale=scale, patch_size=patch_size, n_iters=n_iters,
batch_size=batch_size, degradation=degradation)
else:
self.train_set = train_set
print('==> Building training data loader ')
if train_data_loader is None:
self.train_loader = DataLoader(dataset=self.train_set, batch_size=batch_size, shuffle=shuffle,
num_workers=num_workers)
else:
self.train_loader = train_data_loader
print('==> Building model ')
if model is None:
self.model = SOFVSR(scale=scale)
else:
self.model = model
print('==> Building optimizer ')
if optimizer is None:
self.optimizer = optim.Adam(self.model.parameters(), lr=lr)
else:
self.optimizer = optimizer
print('==> Building scheduler ')
if scheduler is None:
self.scheduler = lr_scheduler.MultiStepLR(self.optimizer, milestones=milestone, gamma=0.01)
else:
self.scheduler = scheduler
if loss in None:
self.criterion = nn.MSELoss(size_average=False)
else:
self.criterion = loss
self.max_step = self.train_loader.__len__()
def __call__(self, *args, **kwargs):
self.model.train()
print('==> Training has started ')
loss_list = []
for idx_iter, (LR, HR) in enumerate(self.train_loader):
self.scheduler.step()
# data
b, n_frames, h_lr, w_lr = LR.size()
idx_center = (n_frames - 1) // 2
LR, HR = Variable(LR), Variable(HR)
if self.gpu_mode:
LR = LR.cuda()
HR = HR.cuda()
LR = LR.view(b, -1, 1, h_lr, w_lr)
HR = HR.view(b, -1, 1, h_lr * self.scale, w_lr * self.scale)
# inference
flow_L1, flow_L2, flow_L3, SR = self.model(LR)
# loss
loss_SR = self.criterion(SR, HR[:, idx_center, :, :, :])
# SHARDUL CHECK CUDA
loss_OFR = torch.zeros(1).cuda()
for i in range(n_frames):
if i != idx_center:
loss_L1 = OFR_loss(F.avg_pool2d(LR[:, i, :, :, :], kernel_size=2),
F.avg_pool2d(LR[:, idx_center, :, :, :], kernel_size=2),
flow_L1[i])
loss_L2 = OFR_loss(LR[:, i, :, :, :], LR[:, idx_center, :, :, :], flow_L2[i])
loss_L3 = OFR_loss(HR[:, i, :, :, :], HR[:, idx_center, :, :, :], flow_L3[i])
loss_OFR = loss_OFR + loss_L3 + 0.2 * loss_L2 + 0.1 * loss_L1
loss = loss_SR + 0.01 * loss_OFR / (n_frames - 1)
loss_list.append(loss.data.cpu())
# backwards
self.optimizer.zero_grad()
loss.backward()
self.optimizer.step()
# save checkpoint
if idx_iter % 5000 == 0:
print('Iteration---%6d, loss---%f' % (idx_iter + 1, np.array(loss_list).mean()))
save_path = 'log/' + self.degradation + '_x' + str(self.scale)
save_name = self.degradation + '_x' + str(self.scale) + '_iter' + str(idx_iter) + '.pth'
if not os.path.exists(save_path):
os.mkdir(save_path)
torch.save(self.model.state_dict(), save_path + '/' + save_name)
loss_list = []
| 38.038168 | 117 | 0.554686 |
4a22189b4204d1c416525a653d94f4fc026ebb68 | 559 | py | Python | sheetfu/__init__.py | shilik/sheetfu | 3b77e27fe3295f3168c8361f495eb873c2ac3bf3 | [
"MIT"
] | 1 | 2020-01-04T14:37:27.000Z | 2020-01-04T14:37:27.000Z | sheetfu/__init__.py | shilik/sheetfu | 3b77e27fe3295f3168c8361f495eb873c2ac3bf3 | [
"MIT"
] | null | null | null | sheetfu/__init__.py | shilik/sheetfu | 3b77e27fe3295f3168c8361f495eb873c2ac3bf3 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
sheetfu
~~~~~~~
A python library to interact with Google Sheets.
:copyright: © 2018 by Social Point Labs.
:license: MIT, see LICENSE for more details.
"""
# Important! Never update this version manually. The automatic semantic-releases library takes care of updating it #
# Manually changing this number could result in unexpected behaviour #
__version__ = "1.5.0"
from sheetfu.client import SpreadsheetApp
from sheetfu.modules.table import Table
from sheetfu.modules.table_selector import TableSelector
| 25.409091 | 116 | 0.733453 |
4a221941b9c5473e8ef3a19a3f3e4f79aa5387f9 | 10,786 | py | Python | tests/coworks/tech/test_ms.py | sidneyarcidiacono/coworks | 7f51b83e8699ced991d16a5a43ad19e569b6e814 | [
"MIT"
] | null | null | null | tests/coworks/tech/test_ms.py | sidneyarcidiacono/coworks | 7f51b83e8699ced991d16a5a43ad19e569b6e814 | [
"MIT"
] | null | null | null | tests/coworks/tech/test_ms.py | sidneyarcidiacono/coworks | 7f51b83e8699ced991d16a5a43ad19e569b6e814 | [
"MIT"
] | null | null | null | from coworks.coworks import ApiResponse
from tests.coworks.ms import *
class ParamMS(TechMicroService):
value = "123"
def token_authorizer(self, token):
return True
@entry
def get(self, str):
return str
@entry
def get_concat(self, str1, str2):
return str1 + str2
@entry
def get_value(self):
return self.value
@entry
def put_value(self, value=None):
self.value = value
return self.value
@entry
def get_param(self, str1, param1='default1', param2='default2'):
return str1 + str(param1) + param2
@entry
def post_params(self, **kwargs):
return {
'keys': [k for k in kwargs.keys()],
}
class TupleReturnedMS(TechMS):
@entry
def get(self):
return 'ok', 200
@entry
def get_json(self):
return {'value': 'ok'}, 200
@entry
def get_resp(self, str):
return ApiResponse(str, 200)
@entry
def get_error(self, str):
return str, 300
@entry
def get_tuple(self, str):
return str, 200, {'x-test': 'true'}
class AmbiguousMS(TechMS):
@entry
def get(self, uid):
return uid, 200
@entry
def post_test(self):
return {'value': 'ok'}, 200
class TestClass:
def test_request_arg(self):
app = SimpleMS()
with app.test_client() as c:
response = c.get('/', headers={'Accept': 'text/plain', 'Authorization': 'token'})
assert response.status_code == 200
assert response.get_data(as_text=True) == "get"
assert 'Content-Type' in response.headers
assert response.headers['Content-Type'] == 'application/json'
assert 'Content-Length' in response.headers
assert response.headers['Content-Length'] == str(len(response.get_data(as_text=True)))
response = c.post('/', headers={'Authorization': 'token'})
assert response.status_code == 405
response = c.get('/get1', headers={'Authorization': 'token'})
assert response.status_code == 404
response = c.get('/content', headers={'Authorization': 'token'})
assert response.status_code == 200
assert response.get_data(as_text=True) == "get content"
response = c.get('/content/3', headers={'Authorization': 'token'})
assert response.status_code == 200
assert response.get_data(as_text=True) == "get content with 3"
response = c.get('/content/3/other', headers={'Authorization': 'token'})
assert response.status_code == 200
assert response.get_data(as_text=True) == "get content with 3 and other"
response = c.post('/content', json={"other": 'other'}, headers={'Authorization': 'token'})
assert response.status_code == 200
assert response.get_data(as_text=True) == "post content without value but other"
response = c.post('/content/3', json={"other": 'other'}, headers={'Authorization': 'token'})
assert response.status_code == 200
assert response.get_data(as_text=True) == "post content with 3 and other"
response = c.post('/content/3', json="other", headers={'Authorization': 'token'})
assert response.status_code == 200
assert response.get_data(as_text=True) == "post content with 3 and other"
response = c.post('/content/3', json={"other": 'other', "value": 5}, headers={'Authorization': 'token'})
assert response.status_code == 400
def test_request_kwargs(self):
app = SimpleMS()
with app.test_client() as c:
response = c.get('/kwparam1?value=5', headers={'Authorization': 'token'})
assert response.status_code == 200
assert response.get_data(as_text=True) == "get **param with only 5"
response = c.get('/kwparam1?other=other&value=5', headers={'Authorization': 'token'})
assert response.status_code == 400
response = c.get('/kwparam1?value=5', headers={'Authorization': 'token'})
assert response.status_code == 200
assert response.get_data(as_text=True) == "get **param with only 5"
response = c.get('/kwparam1', json={"other": 'other', "value": 5}, headers={'Authorization': 'token'})
assert response.status_code == 200
assert response.get_data(as_text=True) == "get **param with only 0"
response = c.get('/kwparam2?other=other&value=5', headers={'Authorization': 'token'})
assert response.status_code == 200
assert response.get_data(as_text=True) == "get **param with 5 and ['other']"
response = c.get('/kwparam2', json={"other": 'other', "value": 5}, headers={'Authorization': 'token'})
assert response.status_code == 200
assert response.get_data(as_text=True) == "get **param with 0 and []"
response = c.put('/kwparam2', json={"other": 'other', "value": 5}, headers={'Authorization': 'token'})
assert response.status_code == 200
assert response.get_data(as_text=True) == "get **param with 5 and ['other']"
response = c.put('/kwparam2?other=other&value=5', headers={'Authorization': 'token'})
assert response.status_code == 200
assert response.get_data(as_text=True) == "get **param with 5 and ['other']"
response = c.get('/extended/content', headers={'Authorization': 'token'})
assert response.status_code == 200
assert response.get_data(as_text=True) == "hello world"
def test_request_form_data(self):
"""normal API call."""
app = ParamMS()
with app.test_client() as c:
files = {
'template': io.BytesIO(b"hello {{ world_name }}"),
}
data = {
'key': 'value',
'template': (files['template'], 'template.j2'),
}
response = c.post('/params', content_type='multipart/form-data', data=data,
headers={'Authorization': 'token'})
assert response.status_code == 200
assert response.is_json
assert 'keys' in response.json
assert response.json['keys'] == ['key', 'template']
def test_parameterized(self):
app = ParamMS()
with app.test_client() as c:
response = c.get('/123', headers={'Authorization': 'token'})
assert response.status_code == 200
assert response.get_data(as_text=True) == '123'
response = c.get('/concat/123/456', headers={'Authorization': 'token'})
assert response.status_code == 200
assert response.get_data(as_text=True) == '123456'
response = c.get('/value', headers={'Authorization': 'token'})
assert response.status_code == 200
assert response.get_data(as_text=True) == '123'
response = c.put("/value", json={'value': "456"}, headers={'Authorization': 'token'})
assert response.status_code == 200
assert response.get_data(as_text=True) == '456'
response = c.get("/value", headers={'Authorization': 'token'})
assert response.status_code == 200
assert response.get_data(as_text=True) == '456'
response = c.get('/param/test1', headers={'Authorization': 'token'})
assert response.status_code == 200
assert response.get_data(as_text=True) == 'test1default1default2'
response = c.get('/param/test1?param1=value1', headers={'Authorization': 'token'})
assert response.status_code == 200
assert response.get_data(as_text=True) == 'test1value1default2'
response = c.get('/param/test1?param2=value2', headers={'Authorization': 'token'})
assert response.status_code == 200
assert response.get_data(as_text=True) == 'test1default1value2'
response = c.get('/param/test1?param1=value1¶m2=value2', headers={'Authorization': 'token'})
assert response.status_code == 200
assert response.get_data(as_text=True) == 'test1value1value2'
response = c.get('/param/test1?param1=value1¶m1=value2', headers={'Authorization': 'token'})
assert response.status_code == 200
assert response.get_data(as_text=True) == "test1['value1', 'value2']default2"
def test_slug_parameterized(self):
app = ParamMS()
with app.test_client() as c:
response = c.get('/123', headers={'Authorization': 'token'})
assert response.status_code == 200
assert response.get_data(as_text=True) == '123'
response = c.get('/concat/123/456', headers={'Authorization': 'token'})
assert response.status_code == 200
assert response.get_data(as_text=True) == '123456'
def test_tuple_returned(self):
app = TupleReturnedMS()
with app.test_client() as c:
headers = {'Accept': 'text/plain', 'Authorization': 'token'}
response = c.get('/', headers=headers)
assert response.status_code == 200
assert response.get_data(as_text=True) == 'ok'
assert response.headers['content-type'] == 'application/json'
response = c.get('/json', headers={'Authorization': 'token'})
assert response.status_code == 200
assert response.json['value'] == 'ok'
assert response.headers['content-type'] == 'application/json'
response = c.get('/resp/ok', headers={'Authorization': 'token'})
assert response.status_code == 200
assert response.get_data(as_text=True) == 'ok'
assert response.headers['content-type'] == 'application/json'
response = c.get('/tuple/test', headers={'Authorization': 'token'})
assert response.status_code == 200
assert response.headers['content-type'] == 'application/json'
assert response.headers['x-test'] == 'true'
assert response.get_data(as_text=True) == 'test'
def test_entry_not_unique(self):
app = AmbiguousMS()
with app.test_request_context():
assert '/test' in app.routes
with app.test_client() as c:
response = c.get('/123', headers={'Authorization': 'token'})
assert response.status_code == 200
assert response.get_data(as_text=True) == '123'
response = c.post('/test', headers={'Authorization': 'token'})
assert response.status_code == 200
assert response.json == {'value': "ok"}
| 45.70339 | 116 | 0.589746 |
4a2219d787dadfb374bc1b6490512cd3922c5c59 | 6,850 | py | Python | riboviz/utils.py | acope3/riboviz | 03a4f13b2d833b8650ebf33bdce81fe2639eb9cf | [
"Apache-2.0"
] | null | null | null | riboviz/utils.py | acope3/riboviz | 03a4f13b2d833b8650ebf33bdce81fe2639eb9cf | [
"Apache-2.0"
] | null | null | null | riboviz/utils.py | acope3/riboviz | 03a4f13b2d833b8650ebf33bdce81fe2639eb9cf | [
"Apache-2.0"
] | null | null | null | """
Useful functions.
"""
import os
import os.path
import numpy as np
import pandas as pd
def value_in_dict(key, dictionary, allow_false_empty=False):
"""
Check that a value is in a dictionary and the value is not
``None``.
If dictionary is::
{
"A":1,
"B":None,
"C":{},"D":[],
"E":[1],
"F":True,
"G":False
}
then:
* ``value_in_dict("A", dictionary)`` is ``True``
* ``value_in_dict("B", dictionary)`` is ``False``
* ``value_in_dict("C", dictionary)`` is ``False``
* ``value_in_dict("D", dictionary)`` is ``False``
* ``value_in_dict("E", dictionary)`` is ``True``
* ``value_in_dict("F", dictionary)`` is ``True``
* ``value_in_dict("G", dictionary)`` is ``False``
* ``value_in_dict("A", dictionary, True)`` is ``True``
* ``value_in_dict("B", dictionary, True)`` is ``False``
* ``value_in_dict("C", dictionary, True)`` is ``True``
* ``value_in_dict("D", dictionary, True)`` is ``True``
* ``value_in_dict("E", dictionary, True)`` is ``True``
* ``value_in_dict("F", dictionary, True)`` is ``True``
* ``value_in_dict("G", dictionary, True)`` is ``True``
:param key: Key
:type key: -
:param dictionary: Dictionary
:type dictionary: dict
:param allow_false_empty: Allow ``False``, empty string, \
``list`` or ``dict`` to be considered as an existing value
:type allow_false_empty: bool
:return: ``True`` or ``False``
:rtype: bool
"""
is_in = key in dictionary and dictionary[key] is not None
if not allow_false_empty:
is_in = is_in and bool(dictionary[key])
return is_in
def list_to_str(lst):
"""
Convert list to space-delimited string.
:param lst: list
:type lst: list
:return: list as string
:rtype: str or unicode
"""
return ' '.join(map(str, lst))
def get_file_ext(file_name):
"""
Given a file name return full file extension, everything after the
first ``.`` in the file name. For example, given
``example.fastq.gz`` return ``fastq.gz``, given ``example.fastq``
return ``fastq``, given ``example`` return ``''``. The extension
is returned in lower-case.
:param file_name: File name
:type file_name: str or uniecode
:return: Extension
:rtype: str or unicode
"""
file_type = ".".join(os.path.basename(file_name).split(".")[1:])
return file_type.lower()
def equal_file_names(file1, file2):
"""
Compare local names of two files each of which must exist and be a
file.
:param file1: File name
:type file1: str or unicode
:param file2: File name
:type file2: str or unicode
:raise AssertionError: If file do not exist, are not files or
their names differ
"""
local_file1 = os.path.split(file1)[1].lower()
local_file2 = os.path.split(file2)[1].lower()
assert os.path.exists(file1) and os.path.isfile(file1),\
"File %s does not exist or is not a file"
assert os.path.exists(file2) and os.path.isfile(file2),\
"File %s does not exist or is not a file"
assert local_file1 == local_file2,\
"Unequal file names: %s, %s" % (local_file1, local_file2)
def equal_file_sizes(file1, file2):
"""
Compare sizes of two files.
:param file1: File name
:type file1: str or unicode
:param file2: File name
:type file2: str or unicode
:raise AssertionError: If the file sizes differ
:raise Exception: If problems arise when accessing the files
"""
stat1 = os.stat(file1)
stat2 = os.stat(file2)
assert stat1.st_size == stat2.st_size,\
"Unequal file sizes: %s, %s" % (file1, file2)
def equal_dataframes(data1, data2, tolerance=0.0001):
"""
Compare two Pandas data frames for equality. The data frames are
expected to be two dimensional i.e. rows and columns.
The data frames are compared column-by-column:
* ``float64`` columns are converted to numpy arrays then tested for
equality to within the given tolerance using
``numpy.allclose``. This is used instead of
``pandas.testing.assert_frame_equal`` as there is an issue with
how that function handles precision (see
'pandas.testing.assert_frame_equal doesn't do precision
according to the doc' #25068,
https://github.com/pandas-dev/pandas/issues/25068). In addition,
``NAN`` values are considered to be equal.
* All other columns (``object``, ``int64``, ``bool``,
``datetime64``, ``timedelta``) are compared for exact equality
using ``pandas.core.series.Series.equals``.
:param data1: dataframe
:type data1: pandas.core.frame.DataFrame
:param data2: dataframe
:type data2: pandas.core.frame.DataFrame
:param tolerance: Tolerance for floating point comparisons
:type tolerance: float
:raise AssertionError: If the data frames differ in their content
"""
assert data1.shape == data2.shape,\
"Unequal shape: %s, %s"\
% (str(data1.shape), str(data2.shape))
assert set(data1.columns) == set(data2.columns),\
"Unequal column names: %s, %s"\
% (str(data1.columns), str(data2.columns))
for column in data1.columns:
column1 = data1[column]
column2 = data2[column]
if column1.dtype in (int, float) and column2.dtype in (int, float):
column_data1 = column1.to_numpy()
column_data2 = column2.to_numpy()
assert np.allclose(column_data1,
column_data2,
rtol=0,
atol=tolerance,
equal_nan=True),\
"Unequal column values: %s" % column
else:
assert column1.equals(column2),\
"Unequal column values: %s" % column
def equal_tsv(file1, file2, tolerance=0.0001, comment="#"):
"""
Compare two tab-separated (TSV) files for equality. This function
uses :py:func:`equal_dataframes`.
:param file1: File name
:type file1: str or unicode
:param file2: File name
:type file2: str or unicode
:param tolerance: Tolerance for floating point comparisons
:type tolerance: float
:param comment: Comment prefix
:type comment: str or unicode
:raise AssertionError: If files differ in their contents
:raise Exception: If problems arise when loading the files
"""
data1 = pd.read_csv(file1, sep="\t", comment=comment)
data2 = pd.read_csv(file2, sep="\t", comment=comment)
try:
equal_dataframes(data1, data2, tolerance)
except AssertionError as error:
# Add file names to error message.
message = error.args[0]
message += " in file: " + str(file1) + ":" + str(file2)
error.args = (message,)
raise
| 33.578431 | 75 | 0.619854 |
4a2219e935a46db2a10cd0664ade7e8df859ef97 | 625 | py | Python | project/data/scrapers/ambitiouskitchen.py | bmogyorodi/recipe_search | 0e7fa4b961342b6c37f36f444337109836618938 | [
"BSD-3-Clause"
] | 1 | 2021-08-13T08:33:09.000Z | 2021-08-13T08:33:09.000Z | project/data/scrapers/ambitiouskitchen.py | bmogyorodi/recipe_search | 0e7fa4b961342b6c37f36f444337109836618938 | [
"BSD-3-Clause"
] | null | null | null | project/data/scrapers/ambitiouskitchen.py | bmogyorodi/recipe_search | 0e7fa4b961342b6c37f36f444337109836618938 | [
"BSD-3-Clause"
] | 2 | 2021-08-13T08:33:35.000Z | 2022-02-21T19:42:23.000Z | from ._generic import RootSitemapScraper
class AmbitiousKitchenScraper(RootSitemapScraper):
"""
A scraper for ambitiouskitchen.com
"""
NAME = "ambitiouskitchen"
RECIPE_URL_FORMAT = "https://www.ambitiouskitchen.com/{id}/"
# e.g. https://www.ambitiouskitchen.com/healthy-white-chicken-chili/
# Recipes are not distinct from any other post, all have only a slug
RECIPE_URL_RE = r"https://www.ambitiouskitchen.com/(?P<id>[^/]+)/?$"
SITEMAPS_ROOT_URL = "https://www.ambitiouskitchen.com/sitemap.xml"
SITEMAP_URL_RE = r"https://www.ambitiouskitchen.com/sitemap-pt-post-\d{4}-\d{2}.xml"
| 36.764706 | 88 | 0.7104 |
4a221a76950c9089348ea067f0687386643517e9 | 1,045 | py | Python | deep-rl/lib/python2.7/site-packages/OpenGL/raw/GL/ATI/separate_stencil.py | ShujaKhalid/deep-rl | 99c6ba6c3095d1bfdab81bd01395ced96bddd611 | [
"MIT"
] | 210 | 2016-04-09T14:26:00.000Z | 2022-03-25T18:36:19.000Z | deep-rl/lib/python2.7/site-packages/OpenGL/raw/GL/ATI/separate_stencil.py | ShujaKhalid/deep-rl | 99c6ba6c3095d1bfdab81bd01395ced96bddd611 | [
"MIT"
] | 72 | 2016-09-04T09:30:19.000Z | 2022-03-27T17:06:53.000Z | deep-rl/lib/python2.7/site-packages/OpenGL/raw/GL/ATI/separate_stencil.py | ShujaKhalid/deep-rl | 99c6ba6c3095d1bfdab81bd01395ced96bddd611 | [
"MIT"
] | 64 | 2016-04-09T14:26:49.000Z | 2022-03-21T11:19:47.000Z | '''Autogenerated by xml_generate script, do not edit!'''
from OpenGL import platform as _p, arrays
# Code generation uses this
from OpenGL.raw.GL import _types as _cs
# End users want this...
from OpenGL.raw.GL._types import *
from OpenGL.raw.GL import _errors
from OpenGL.constant import Constant as _C
import ctypes
_EXTENSION_NAME = 'GL_ATI_separate_stencil'
def _f( function ):
return _p.createFunction( function,_p.PLATFORM.GL,'GL_ATI_separate_stencil',error_checker=_errors._error_checker)
GL_STENCIL_BACK_FAIL_ATI=_C('GL_STENCIL_BACK_FAIL_ATI',0x8801)
GL_STENCIL_BACK_FUNC_ATI=_C('GL_STENCIL_BACK_FUNC_ATI',0x8800)
GL_STENCIL_BACK_PASS_DEPTH_FAIL_ATI=_C('GL_STENCIL_BACK_PASS_DEPTH_FAIL_ATI',0x8802)
GL_STENCIL_BACK_PASS_DEPTH_PASS_ATI=_C('GL_STENCIL_BACK_PASS_DEPTH_PASS_ATI',0x8803)
@_f
@_p.types(None,_cs.GLenum,_cs.GLenum,_cs.GLint,_cs.GLuint)
def glStencilFuncSeparateATI(frontfunc,backfunc,ref,mask):pass
@_f
@_p.types(None,_cs.GLenum,_cs.GLenum,_cs.GLenum,_cs.GLenum)
def glStencilOpSeparateATI(face,sfail,dpfail,dppass):pass
| 43.541667 | 117 | 0.831579 |
4a221ad8e36673e26fd65dfb44de0ad5828d99f7 | 3,317 | py | Python | test/functional/mining_getblocktemplate_longpoll.py | sirlanceoflompoc/karmacoin | 3a75016399f75c27f97856f842915b5c7c4e8fb6 | [
"MIT"
] | null | null | null | test/functional/mining_getblocktemplate_longpoll.py | sirlanceoflompoc/karmacoin | 3a75016399f75c27f97856f842915b5c7c4e8fb6 | [
"MIT"
] | null | null | null | test/functional/mining_getblocktemplate_longpoll.py | sirlanceoflompoc/karmacoin | 3a75016399f75c27f97856f842915b5c7c4e8fb6 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# Copyright (c) 2014-2019 The Karmacoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test longpolling with getblocktemplate."""
from decimal import Decimal
from test_framework.test_framework import KarmacoinTestFramework
from test_framework.util import get_rpc_proxy, random_transaction
import threading
class LongpollThread(threading.Thread):
def __init__(self, node):
threading.Thread.__init__(self)
# query current longpollid
template = node.getblocktemplate({'rules': ['segwit']})
self.longpollid = template['longpollid']
# create a new connection to the node, we can't use the same
# connection from two threads
self.node = get_rpc_proxy(node.url, 1, timeout=600, coveragedir=node.coverage_dir)
def run(self):
self.node.getblocktemplate({'longpollid': self.longpollid, 'rules': ['segwit']})
class GetBlockTemplateLPTest(KarmacoinTestFramework):
def set_test_params(self):
self.num_nodes = 2
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
def run_test(self):
self.log.info("Warning: this test will take about 70 seconds in the best case. Be patient.")
self.nodes[0].generate(10)
template = self.nodes[0].getblocktemplate({'rules': ['segwit']})
longpollid = template['longpollid']
# longpollid should not change between successive invocations if nothing else happens
template2 = self.nodes[0].getblocktemplate({'rules': ['segwit']})
assert template2['longpollid'] == longpollid
# Test 1: test that the longpolling wait if we do nothing
thr = LongpollThread(self.nodes[0])
thr.start()
# check that thread still lives
thr.join(5) # wait 5 seconds or until thread exits
assert thr.is_alive()
# Test 2: test that longpoll will terminate if another node generates a block
self.nodes[1].generate(1) # generate a block on another node
# check that thread will exit now that new transaction entered mempool
thr.join(5) # wait 5 seconds or until thread exits
assert not thr.is_alive()
# Test 3: test that longpoll will terminate if we generate a block ourselves
thr = LongpollThread(self.nodes[0])
thr.start()
self.nodes[0].generate(1) # generate a block on another node
thr.join(5) # wait 5 seconds or until thread exits
assert not thr.is_alive()
# Test 4: test that introducing a new transaction into the mempool will terminate the longpoll
thr = LongpollThread(self.nodes[0])
thr.start()
# generate a random transaction and submit it
min_relay_fee = self.nodes[0].getnetworkinfo()["relayfee"]
# min_relay_fee is fee per 1000 bytes, which should be more than enough.
(txid, txhex, fee) = random_transaction(self.nodes, Decimal("1.1"), min_relay_fee, Decimal("0.001"), 20)
# after one minute, every 10 seconds the mempool is probed, so in 80 seconds it should have returned
thr.join(60 + 20)
assert not thr.is_alive()
if __name__ == '__main__':
GetBlockTemplateLPTest().main()
| 43.644737 | 112 | 0.685258 |
4a221afeae99e04de23f03669453b340a1cd2450 | 1,889 | py | Python | gym-uds-server.py | tiberiu92/gym-uds-api | 65ff4a4368197ce43e954d66ed0daa31a93236af | [
"MIT"
] | 1 | 2018-06-29T10:31:23.000Z | 2018-06-29T10:31:23.000Z | gym-uds-server.py | tiberiu92/gym-uds-api | 65ff4a4368197ce43e954d66ed0daa31a93236af | [
"MIT"
] | null | null | null | gym-uds-server.py | tiberiu92/gym-uds-api | 65ff4a4368197ce43e954d66ed0daa31a93236af | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
import argparse
import os
import time
from concurrent import futures
import grpc
import gym
import gym_uds_pb2
import gym_uds_pb2_grpc
import numpy as np
_ONE_DAY_IN_SECONDS = 60 * 60 * 24
class Environment(gym_uds_pb2_grpc.EnvironmentServicer):
def __init__(self, env_id):
self.env = gym.make(env_id)
def Reset(self, empty_request, context):
observation = self.env.reset()
observation_pb = gym_uds_pb2.Observation(data=observation.ravel(), shape=observation.shape)
return gym_uds_pb2.State(observation=observation_pb, reward=0.0, done=False)
def Step(self, action_request, context):
observation, reward, done, _ = self.env.step(action_request.value)
assert type(observation) is np.ndarray
observation_pb = gym_uds_pb2.Observation(data=observation.ravel(), shape=observation.shape)
return gym_uds_pb2.State(observation=observation_pb, reward=reward, done=done)
def Sample(self, empty_request, context):
action = self.env.action_space.sample()
return gym_uds_pb2.Action(value=action)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('id', help='the id of the gym environment to simulate')
parser.add_argument(
'filepath',
nargs='?',
default='unix:///tmp/gym-uds-socket',
help='a unique filepath where the server will bind')
args = parser.parse_args()
try:
os.remove(args.filepath)
except FileNotFoundError:
pass
server = grpc.server(futures.ThreadPoolExecutor(max_workers=1))
gym_uds_pb2_grpc.add_EnvironmentServicer_to_server(Environment(args.id), server)
server.add_insecure_port(args.filepath)
server.start()
try:
while True:
time.sleep(_ONE_DAY_IN_SECONDS)
except KeyboardInterrupt:
server.stop(0)
| 30.967213 | 99 | 0.707253 |
4a221b32106f31c980bb3572da64784a9844a0c3 | 2,264 | py | Python | cohesity_management_sdk/models/couchbase_connect_params.py | nick6655/management-sdk-python | 88e792cb83e5c24a22af495b220c145d0c45841d | [
"Apache-2.0"
] | 18 | 2019-09-24T17:35:53.000Z | 2022-03-25T08:08:47.000Z | cohesity_management_sdk/models/couchbase_connect_params.py | nick6655/management-sdk-python | 88e792cb83e5c24a22af495b220c145d0c45841d | [
"Apache-2.0"
] | 18 | 2019-03-29T19:32:29.000Z | 2022-01-03T23:16:45.000Z | cohesity_management_sdk/models/couchbase_connect_params.py | nick6655/management-sdk-python | 88e792cb83e5c24a22af495b220c145d0c45841d | [
"Apache-2.0"
] | 16 | 2019-02-27T06:54:12.000Z | 2021-11-16T18:10:24.000Z | # -*- coding: utf-8 -*-
# Copyright 2021 Cohesity Inc.
class CouchbaseConnectParams(object):
"""Implementation of the 'CouchbaseConnectParams' model.
Specifies an Object containing information about a registered couchbase
source.
Attributes:
carrier_direct_port (int): Specifies the Carrier direct/sll port.
http_direct_port (int): Specifies the HTTP direct/sll port.
requires_ssl (bool): Specifies whether this cluster allows connection
through SSL only.
seeds (list of string): Specifies the Seeds of this Couchbase Cluster.
"""
# Create a mapping from Model property names to API property names
_names = {
"carrier_direct_port": 'carrierDirectPort',
"http_direct_port": 'httpDirectPort',
"requires_ssl": 'requiresSsl',
"seeds":'seeds'
}
def __init__(self,
carrier_direct_port=None,
http_direct_port=None,
requires_ssl=None,
seeds=None):
"""Constructor for the CouchbaseConnectParams class"""
# Initialize members of the class
self.carrier_direct_port = carrier_direct_port
self.http_direct_port = http_direct_port
self.requires_ssl = requires_ssl
self.seeds = seeds
@classmethod
def from_dictionary(cls,
dictionary):
"""Creates an instance of this model from a dictionary
Args:
dictionary (dictionary): A dictionary representation of the object as
obtained from the deserialization of the server's response. The keys
MUST match property names in the API description.
Returns:
object: An instance of this structure class.
"""
if dictionary is None:
return None
# Extract variables from the dictionary
carrier_direct_port = dictionary.get('carrierDirectPort')
http_direct_port = dictionary.get('httpDirectPort')
requires_ssl = dictionary.get('requiresSsl')
seeds = dictionary.get('seeds')
# Return an object of this model
return cls(carrier_direct_port,
http_direct_port,
requires_ssl,
seeds)
| 31.444444 | 81 | 0.636926 |
4a221b46cffa440adc8d81dcc9dea389237194d0 | 358 | py | Python | runtests/mpi/tests/test_benchmark.py | pastewka/runtests | 9b6d4806e3b662fac2f266a8aadf79ce1caca134 | [
"BSD-2-Clause"
] | 3 | 2019-11-04T10:50:20.000Z | 2019-11-13T13:03:20.000Z | card_test/venv/Lib/site-packages/runtests/mpi/tests/test_benchmark.py | latedude2/P3_image_processing | 69ce6de8f6c962f961edb83e6974d60e86343faa | [
"MIT"
] | 9 | 2018-03-13T20:59:26.000Z | 2021-02-24T15:34:40.000Z | card_test/venv/Lib/site-packages/runtests/mpi/tests/test_benchmark.py | latedude2/P3_image_processing | 69ce6de8f6c962f961edb83e6974d60e86343faa | [
"MIT"
] | 1 | 2019-08-05T06:43:28.000Z | 2019-08-05T06:43:28.000Z | from runtests.mpi import MPITest
import pytest
import time
def test_benchmark1(benchmark):
comm = benchmark.comm
with benchmark("test 1"):
time.sleep((1+comm.rank)*0.25)
@pytest.mark.parametrize('x', [1, 2])
def test_benchmark2(benchmark, x):
comm = benchmark.comm
with benchmark("test 2"):
time.sleep((1+comm.rank)*0.25)
| 21.058824 | 38 | 0.673184 |
4a221bccb3e610591fe0a0320ee0958fb2a38885 | 11,256 | py | Python | util.py | baronrustamov/bulka | fe19fa993e0d1fa013b83bc08705c70cd26d84aa | [
"MIT"
] | null | null | null | util.py | baronrustamov/bulka | fe19fa993e0d1fa013b83bc08705c70cd26d84aa | [
"MIT"
] | null | null | null | util.py | baronrustamov/bulka | fe19fa993e0d1fa013b83bc08705c70cd26d84aa | [
"MIT"
] | 1 | 2021-06-07T14:45:57.000Z | 2021-06-07T14:45:57.000Z | import logging
import threading
from collections import namedtuple
from functools import wraps
import requests
from bs4 import BeautifulSoup
from fuzzywuzzy import process, fuzz
from requests import Session
from telegram import ParseMode, Update
from telegram.ext import CallbackContext
from const import USER_AGENT
ARROW_CHARACTER = '➜'
GITHUB_URL = "https://github.com/"
DEFAULT_REPO_OWNER = 'python-telegram-bot'
DEFAULT_REPO_NAME = 'python-telegram-bot'
DEFAULT_REPO = f'{DEFAULT_REPO_OWNER}/{DEFAULT_REPO_NAME}'
# Require x non-command messages between each /rules etc.
RATE_LIMIT_SPACING = 2
def get_reply_id(update):
if update.message and update.message.reply_to_message:
return update.message.reply_to_message.message_id
return None
def reply_or_edit(update, context, text):
chat_data = context.chat_data
if update.edited_message:
chat_data[update.edited_message.message_id].edit_text(text,
parse_mode=ParseMode.HTML,
disable_web_page_preview=True)
else:
issued_reply = get_reply_id(update)
if issued_reply:
chat_data[update.message.message_id] = context.bot.sendMessage(update.message.chat_id, text,
reply_to_message_id=issued_reply,
parse_mode=ParseMode.HTML,
disable_web_page_preview=True)
else:
chat_data[update.message.message_id] = update.message.reply_text(text,
parse_mode=ParseMode.HTML,
disable_web_page_preview=True)
def get_text_not_in_entities(html):
soup = BeautifulSoup(html, 'html.parser')
return ' '.join(soup.find_all(text=True, recursive=False))
def build_menu(buttons,
n_cols,
header_buttons=None,
footer_buttons=None):
menu = [buttons[i:i + n_cols] for i in range(0, len(buttons), n_cols)]
if header_buttons:
menu.insert(0, header_buttons)
if footer_buttons:
menu.append(footer_buttons)
return menu
def rate_limit_tracker(update: Update, context: CallbackContext):
data = context.chat_data.get('rate_limit', {})
for key in data.keys():
data[key] += 1
def rate_limit(f):
"""
Rate limit command so that RATE_LIMIT_SPACING non-command messages are
required between invocations.
"""
@wraps(f)
def wrapper(update, context, *args, **kwargs):
# Get rate limit data
try:
data = context.chat_data['rate_limit']
except KeyError:
data = context.chat_data['rate_limit'] = {}
# If we have not seen two non-command messages since last of type `f`
if data.get(f, RATE_LIMIT_SPACING) < RATE_LIMIT_SPACING:
logging.debug('Ignoring due to rate limit!')
return
data[f] = 0
return f(update, context, *args, **kwargs)
return wrapper
def truncate_str(str, max):
return (str[:max] + '…') if len(str) > max else str
Issue = namedtuple('Issue', 'type, owner, repo, number, url, title, author')
Commit = namedtuple('Commit', 'owner, repo, sha, url, title, author')
class GitHubIssues:
def __init__(self, default_owner=DEFAULT_REPO_OWNER, default_repo=DEFAULT_REPO_NAME):
self.s = Session()
self.s.headers.update({'user-agent': USER_AGENT})
self.base_url = 'https://api.github.com/'
self.default_owner = default_owner
self.default_repo = default_repo
self.logger = logging.getLogger(self.__class__.__qualname__)
self.etag = None
self.issues = {}
self.issues_lock = threading.Lock()
def set_auth(self, client_id, client_secret):
self.s.auth = (client_id, client_secret)
def _get_json(self, url, data=None, headers=None):
# Add base_url if needed
url = url if url.startswith('https://') else self.base_url + url
self.logger.info('Getting %s', url)
try:
r = self.s.get(url, params=data, headers=headers)
except requests.exceptions.RequestException as e:
self.logger.exception('While getting %s with data %s', url, data, exec_info=e)
return False, None, (None, None)
self.logger.debug('status_code=%d', r.status_code)
if not r.ok:
self.logger.error('Not OK: %s', r.text)
# Only try .json() if we actually got new data
return r.ok, None if r.status_code == 304 else r.json(), (r.headers, r.links)
def pretty_format(self, thing, short=False, short_with_title=False, title_max_length=15):
if isinstance(thing, Issue):
return self.pretty_format_issue(thing,
short=short,
short_with_title=short_with_title,
title_max_length=title_max_length)
return self.pretty_format_commit(thing,
short=short,
short_with_title=short_with_title,
title_max_length=title_max_length)
def pretty_format_issue(self, issue, short=False, short_with_title=False, title_max_length=15):
# PR OwnerIfNotDefault/RepoIfNotDefault#9999: Title by Author
# OwnerIfNotDefault/RepoIfNotDefault#9999 if short=True
s = (f'{"" if issue.owner == self.default_owner else issue.owner + "/"}'
f'{"" if issue.repo == self.default_repo else issue.repo}'
f'#{issue.number}')
if short:
return s
elif short_with_title:
return f'{s}: {truncate_str(issue.title, title_max_length)}'
return f'{issue.type} {s}: {issue.title} by {issue.author}'
def pretty_format_commit(self, commit, short=False, short_with_title=False, title_max_length=15):
# Commit OwnerIfNotDefault/RepoIfNotDefault@abcdf123456789: Title by Author
# OwnerIfNotDefault/RepoIfNotDefault@abcdf123456789 if short=True
s = (f'{"" if commit.owner == self.default_owner else commit.owner + "/"}'
f'{"" if commit.repo == self.default_repo else commit.repo}'
f'@{commit.sha[:7]}')
if short:
return s
elif short_with_title:
return f'{s}: {truncate_str(commit.title, title_max_length)}'
return f'Commit {s}: {commit.title} by {commit.author}'
def get_issue(self,
number: int,
owner=None,
repo=None):
# Other owner or repo than default?
if owner is not None or repo is not None:
owner = owner or self.default_owner
repo = repo or self.default_repo
ok, data, _ = self._get_json(f'repos/{owner}/{repo}/issues/{number}')
# Return issue directly, or unknown if not found
return Issue(type=('PR' if 'pull_request' in data else 'Issue') if ok else '',
owner=owner,
repo=repo,
number=number,
url=data['html_url'] if ok else f'https://github.com/{owner}/{repo}/issues/{number}',
title=data['title'] if ok else 'Unknown',
author=data['user']['login'] if ok else 'Unknown')
# Look the issue up, or if not found, fall back on above code
try:
return self.issues[number]
except KeyError:
return self.get_issue(number, owner=self.default_owner, repo=self.default_repo)
def get_commit(self,
sha: int,
owner=None,
repo=None):
owner = owner or self.default_owner
repo = repo or self.default_repo
ok, data, _ = self._get_json(f'repos/{owner}/{repo}/commits/{sha}')
return Commit(owner=owner,
repo=repo,
sha=sha,
url=data['html_url'] if ok else f'https://github.com/{owner}/{repo}/commits/{sha}',
title=data['commit']['message'].partition('\n')[0] if ok else 'Unknown',
author=data['commit']['author']['name'] if ok else 'Unknown')
def _job(self, url, job_queue, first=True):
logging.debug('Getting issues from %s', url)
# Load 100 issues
# We pass the ETag if we have one (not called from init_issues)
ok, data, (headers, links) = self._get_json(url, {
'per_page': 100,
'state': 'all'
}, {'If-None-Match': self.etag} if self.etag else None)
if ok and data:
# Add to issue cache
# Acquire lock so we don't add while a func (like self.search) is iterating over it
with self.issues_lock:
for issue in data:
self.issues[issue['number']] = Issue(type='PR' if 'pull_request' in issue else 'Issue',
owner=self.default_owner,
repo=self.default_repo,
url=issue['html_url'],
number=issue['number'],
title=issue['title'],
author=issue['user']['login'])
elif not ok:
# Retry in 5 sec
job_queue.run_once(lambda _: self._job(url, job_queue), 5)
return
# If more issues
if 'next' in links:
# Process next page after 5 sec to not get rate-limited
job_queue.run_once(lambda _: self._job(links['next']['url'], job_queue), 5)
# No more issues
else:
# In 10 min check if the 100 first issues changed, and update them in our cache if needed
job_queue.run_once(lambda _: self._job(links['first']['url'], job_queue, first=True), 10 * 60)
# If this is on page one (first) then we wanna save the header
if first:
self.etag = headers['etag']
def init_issues(self, job_queue):
self._job(f'repos/{self.default_owner}/{self.default_repo}/issues', job_queue, first=True)
def search(self, query):
def processor(x):
if isinstance(x, Issue):
x = x.title
return x.strip().lower()
# We don't care about the score, so return first element
# This must not happen while updating the self.issues dict so acquire the lock
with self.issues_lock:
return [result[0] for result in process.extract(query, self.issues, scorer=fuzz.partial_ratio,
processor=processor, limit=1000)]
github_issues = GitHubIssues()
| 42.315789 | 110 | 0.565387 |
4a221c02052a92cd77593b96ba4dd2c0bda1529b | 19,819 | py | Python | v1_7_0/dx_operations_vdb.py | mcbrune/delphixpy-automation | f986dbf69809748a8c9721a19663c6f6fb66fc3c | [
"MIT"
] | 2 | 2017-01-18T20:27:33.000Z | 2017-07-25T14:23:29.000Z | v1_7_0/dx_operations_vdb.py | mcbrune/delphixpy-automation | f986dbf69809748a8c9721a19663c6f6fb66fc3c | [
"MIT"
] | null | null | null | v1_7_0/dx_operations_vdb.py | mcbrune/delphixpy-automation | f986dbf69809748a8c9721a19663c6f6fb66fc3c | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# Corey Brune - Oct 2016
#This script starts or stops a VDB
#requirements
#pip install docopt delphixpy
#The below doc follows the POSIX compliant standards and allows us to use
#this doc to also define our arguments for the script.
"""List all VDBs or Start, stop, enable, disable a VDB
Usage:
dx_operations_vdb.py (--vdb <name [--stop | --start | --enable | --disable] | --list)
[-d <identifier> | --engine <identifier> | --all]
[--debug] [--parallel <n>] [--poll <n>]
[--config <path_to_file>] [--logdir <path_to_file>]
dx_operations_vdb.py -h | --help | -v | --version
List all VDBs, start, stop, enable, disable a VDB
Examples:
dx_operations_vdb.py -d landsharkengine --vdb testvdb --stop
dx_operations_vdb.py --vdb --start
Options:
--vdb <name> Name of the VDB to stop or start
--start Stop the VDB
--stop Stop the VDB
--list List all databases from an engine
--enable Enable the VDB
--disable Disable the VDB
-d <identifier> Identifier of Delphix engine in dxtools.conf.
--engine <type> Alt Identifier of Delphix engine in dxtools.conf.
--all Run against all engines.
--debug Enable debug logging
--parallel <n> Limit number of jobs to maxjob
--poll <n> The number of seconds to wait between job polls
[default: 10]
--config <path_to_file> The path to the dxtools.conf file
[default: ./dxtools.conf]
--logdir <path_to_file> The path to the logfile you want to use.
[default: ./dx_operations_vdb.log]
-h --help Show this screen.
-v --version Show version.
"""
VERSION="v.0.0.002"
from docopt import docopt
import logging
from os.path import basename
import signal
import sys
import time
import traceback
import json
from multiprocessing import Process
from time import sleep, time
from delphixpy.v1_7_0.delphix_engine import DelphixEngine
from delphixpy.v1_7_0.exceptions import HttpError, JobError
from delphixpy.v1_7_0 import job_context
from delphixpy.v1_7_0.web import database, host, job, source
from delphixpy.v1_7_0.exceptions import RequestError, JobError, HttpError
class dlpxException(Exception):
def __init__(self, message):
self.message = message
def vdb_operation(engine, server, jobs, vdb_name, operation):
"""
Function to start, stop, enable or disable a VDB
"""
print_debug(engine['hostname'] + ': Searching for ' + vdb_name +
' reference.\n')
vdb_obj = find_obj_by_name(engine, server, source, vdb_name)
try:
if vdb_obj:
if operation == 'start':
source.start(server, vdb_obj.reference)
elif operation == 'stop':
source.stop(server, vdb_obj.reference)
elif operation == 'enable':
source.enable(server, vdb_obj.reference)
elif operation == 'disable':
source.disable(server, vdb_obj.reference)
jobs[engine['hostname']] = server.last_job
except (RequestError, HttpError, JobError, AttributeError), e:
raise dlpxException('An error occurred while performing ' +
operation + ' on ' + vdb_name + '.:%s\n' % (e))
def list_databases(engine, server, jobs):
"""
Function to list all databases for a given engine
"""
try:
databases = database.get_all(server)
for db in databases:
if db.provision_container == None:
db.provision_container = 'dSource'
print 'name = ', str(db.name), '\n', 'current timeflow = ', \
str(db.current_timeflow), '\n', 'provision container = ', \
str(db.provision_container), '\n', 'processor = ', \
str(db.processor), '\n'
except (RequestError, HttpError, JobError, AttributeError), e:
print 'An error occurred while listing databases on ' + \
engine['ip_address'] + '.:%s\n' % (e)
def find_obj_by_name(engine, server, f_class, obj_name):
"""
Function to find objects by name and object class, and return object's
reference as a string
You might use this function to find objects like groups.
"""
print_debug(engine["hostname"] + ": Searching objects in the " +
f_class.__name__ + " class\n for one named \"" +
obj_name + "\"")
obj_ref = ''
all_objs = f_class.get_all(server)
try:
for obj in all_objs:
if obj.name == obj_name:
print_debug(engine["hostname"] + ": Found a match " +
str(obj.reference))
return obj
#If the code reaches here, the object was not found.
raise dlpxException('Object %s not found in %s\n' % (obj_name,
engine['ip_address']))
except (RequestError, HttpError, JobError, AttributeError), e:
raise dlpxException('Object %s not found in %s' % (obj_name,
engine['ip_address']))
def get_config(config_file_path):
"""
This function reads in the dxtools.conf file
"""
#First test to see that the file is there and we can open it
try:
config_file = open(config_file_path).read()
except:
print_error("Was unable to open " + config_file_path +
". Please check the path and permissions, then try again.")
sys.exit(1)
#Now parse the file contents as json and turn them into a python
# dictionary, throw an error if it isn't proper json
try:
config = json.loads(config_file)
except:
print_error("Was unable to read " + config_file_path +
" as json. Please check file in a json formatter and " +
"try again.")
sys.exit(1)
#Create a dictionary of engines (removing the data node from the
# dxtools.json, for easier parsing)
delphix_engines = {}
for each in config['data']:
delphix_engines[each['hostname']] = each
print_debug(delphix_engines)
return delphix_engines
def logging_est(logfile_path):
"""
Establish Logging
"""
global debug
logging.basicConfig(filename=logfile_path,format='%(levelname)s:%(asctime)s:%(message)s', level=logging.INFO, datefmt='%Y-%m-%d %H:%M:%S')
print_info("Welcome to " + basename(__file__) + ", version " + VERSION)
global logger
debug = arguments['--debug']
logger = logging.getLogger()
if debug == True:
logger.setLevel(10)
print_info("Debug Logging is enabled.")
def job_mode(server):
"""
This function tells Delphix how to execute jobs, based on the
single_thread variable at the beginning of the file
"""
#Synchronously (one at a time)
if single_thread == True:
job_m = job_context.sync(server)
print_debug("These jobs will be executed synchronously")
#Or asynchronously
else:
job_m = job_context.async(server)
print_debug("These jobs will be executed asynchronously")
return job_m
def job_wait():
"""
This job stops all work in the thread/process until jobs are completed.
"""
#Grab all the jos on the server (the last 25, be default)
all_jobs = job.get_all(server)
#For each job in the list, check to see if it is running (not ended)
for jobobj in all_jobs:
if not (jobobj.job_state in ["CANCELED", "COMPLETED", "FAILED"]):
print_debug("Waiting for " + jobobj.reference + " (currently: " +
jobobj.job_state +
") to finish running against the container")
#If so, wait
job_context.wait(server,jobobj.reference)
def on_exit(sig, func=None):
"""
This function helps us end cleanly and with exit codes
"""
print_info("Shutdown Command Received")
print_info("Shutting down " + basename(__file__))
sys.exit(0)
def print_debug(print_obj):
"""
Call this function with a log message to prefix the message with DEBUG
"""
try:
if debug == True:
print "DEBUG: " + str(print_obj)
logging.debug(str(print_obj))
except:
pass
def print_error(print_obj):
"""
Call this function with a log message to prefix the message with ERROR
"""
print "ERROR: " + str(print_obj)
logging.error(str(print_obj))
def print_info(print_obj):
"""
Call this function with a log message to prefix the message with INFO
"""
print "INFO: " + str(print_obj)
logging.info(str(print_obj))
def print_warning(print_obj):
"""
Call this function with a log message to prefix the message with WARNING
"""
print "WARNING: " + str(print_obj)
logging.warning(str(print_obj))
def run_async(func):
"""
http://code.activestate.com/recipes/576684-simple-threading-decorator/
run_async(func)
function decorator, intended to make "func" run in a separate
thread (asynchronously).
Returns the created Thread object
E.g.:
@run_async
def task1():
do_something
@run_async
def task2():
do_something_too
t1 = task1()
t2 = task2()
...
t1.join()
t2.join()
"""
from threading import Thread
from functools import wraps
@wraps(func)
def async_func(*args, **kwargs):
func_hl = Thread(target = func, args = args, kwargs = kwargs)
func_hl.start()
return func_hl
return async_func
@run_async
def main_workflow(engine):
"""
This function actually runs the jobs.
Use the @run_async decorator to run this function asynchronously.
This allows us to run against multiple Delphix Engine simultaneously
"""
#Pull out the values from the dictionary for this engine
engine_address = engine["ip_address"]
engine_username = engine["username"]
engine_password = engine["password"]
#Establish these variables as empty for use later
jobs = {}
#Setup the connection to the Delphix Engine
server = serversess(engine_address, engine_username, engine_password)
try:
if arguments['--vdb']:
#Get the database reference we are copying from the database name
database_obj = find_obj_by_name(engine, server, database,
arguments['--vdb'])
except dlpxException, e:
print '\nERROR: %s\n' % (e.message)
sys.exit(1)
thingstodo = ["thingtodo"]
#reset the running job count before we begin
i = 0
with job_mode(server):
while (len(jobs) > 0 or len(thingstodo)> 0):
if len(thingstodo)> 0:
if arguments['--start']:
vdb_operation(engine, server, jobs, database_name, 'start')
elif arguments['--stop']:
vdb_operation(engine, server, jobs, database_name, 'stop')
elif arguments['--enable']:
vdb_operation(engine, server, jobs, database_name,
'enable')
elif arguments['--disable']:
vdb_operation(engine, server, jobs, database_name,
'disable')
elif arguments['--list']:
list_databases(engine, server, jobs)
thingstodo.pop()
#get all the jobs, then inspect them
i = 0
for j in jobs.keys():
job_obj = job.get(server, jobs[j])
print_debug(job_obj)
print_info(engine["hostname"] + ": VDB Operations: " +
job_obj.job_state)
if job_obj.job_state in ["CANCELED", "COMPLETED", "FAILED"]:
#If the job is in a non-running state, remove it from the
# running jobs list.
del jobs[j]
else:
#If the job is in a running state, increment the running
# job count.
i += 1
print_info(engine["hostname"] + ": " + str(i) + " jobs running. ")
#If we have running jobs, pause before repeating the checks.
if len(jobs) > 0:
sleep(float(arguments['--poll']))
def run_job(engine):
"""
This function runs the main_workflow aynchronously against all the servers
specified
"""
#Create an empty list to store threads we create.
threads = []
#If the --all argument was given, run against every engine in dxtools.conf
if arguments['--all']:
print_info("Executing against all Delphix Engines in the dxtools.conf")
#For each server in the dxtools.conf...
for delphix_engine in dxtools_objects:
engine = dxtools_objects[delphix_engine]
#Create a new thread and add it to the list.
threads.append(main_workflow(engine))
else:
#Else if the --engine argument was given, test to see if the engine
# exists in dxtools.conf
if arguments['--engine']:
try:
engine = dxtools_objects[arguments['--engine']]
print_info("Executing against Delphix Engine: " +
arguments['--engine'])
except:
print_error("Delphix Engine \"" + arguments['--engine'] +
"\" cannot be found in " + config_file_path)
print_error("Please check your value and try again. Exiting")
sys.exit(1)
#Else if the -d argument was given, test to see if the engine exists
# in dxtools.conf
elif arguments['-d']:
try:
engine = dxtools_objects[arguments['-d']]
print_info("Executing against Delphix Engine: " +
arguments['-d'])
except:
print_error("Delphix Engine \"" + arguments['-d'] +
"\" cannot be found in " + config_file_path)
print_error("Please check your value and try again. Exiting")
sys.exit(1)
else:
#Else search for a default engine in the dxtools.conf
for delphix_engine in dxtools_objects:
if dxtools_objects[delphix_engine]['default'] == 'true':
engine = dxtools_objects[delphix_engine]
print_info("Executing against the default Delphix Engine "
"in the dxtools.conf: " +
dxtools_objects[delphix_engine]['hostname'])
break
if engine == None:
print_error("No default engine found. Exiting")
sys.exit(1)
#run the job against the engine
threads.append(main_workflow(engine))
#For each thread in the list...
for each in threads:
#join them back together so that we wait for all threads to complete
# before moving on
each.join()
def serversess(f_engine_address, f_engine_username, f_engine_password):
"""
Function to setup the session with the Delphix Engine
"""
server_session= DelphixEngine(f_engine_address, f_engine_username,
f_engine_password, "DOMAIN")
return server_session
def set_exit_handler(func):
"""
This function helps us set the correct exit code
"""
signal.signal(signal.SIGTERM, func)
def time_elapsed():
"""
This function calculates the time elapsed since the beginning of the script.
Call this anywhere you want to note the progress in terms of time
"""
elapsed_minutes = round((time() - time_start)/60, +1)
return elapsed_minutes
def update_jobs_dictionary(engine, server, jobs):
"""
This function checks each job in the dictionary and updates its status or
removes it if the job is complete.
Return the number of jobs still running.
"""
#Establish the running jobs counter, as we are about to update the count
# from the jobs report.
i = 0
#get all the jobs, then inspect them
for j in jobs.keys():
job_obj = job.get(server, jobs[j])
print_debug(engine["hostname"] + ": " + str(job_obj))
print_info(engine["hostname"] + ": " + j.name + ": " +
job_obj.job_state)
if job_obj.job_state in ["CANCELED", "COMPLETED", "FAILED"]:
#If the job is in a non-running state, remove it from the running
# jobs list.
del jobs[j]
else:
#If the job is in a running state, increment the running job count.
i += 1
return i
def main(argv):
#We want to be able to call on these variables anywhere in the script.
global single_thread
global usebackup
global time_start
global config_file_path
global database_name
global host_name
global dxtools_objects
try:
logging_est(arguments['--logdir'])
print_debug(arguments)
time_start = time()
engine = None
single_thread = False
config_file_path = arguments['--config']
#Parse the dxtools.conf and put it into a dictionary
dxtools_objects = get_config(config_file_path)
database_name = arguments['--vdb']
#This is the function that will handle processing main_workflow for
# all the servers.
run_job(engine)
elapsed_minutes = time_elapsed()
print_info("script took " + str(elapsed_minutes) +
" minutes to get this far.")
#Here we handle what we do when the unexpected happens
except SystemExit as e:
"""
This is what we use to handle our sys.exit(#)
"""
sys.exit(e)
except HttpError as e:
"""
We use this exception handler when our connection to Delphix fails
"""
print_error("Connection failed to the Delphix Engine")
print_error( "Please check the ERROR message below")
print_error(e.message)
sys.exit(2)
except JobError as e:
"""
We use this exception handler when a job fails in Delphix so that we have actionable data
"""
print_error("A job failed in the Delphix Engine")
print_error(e.job)
elapsed_minutes = time_elapsed()
print_info(basename(__file__) + " took " + str(elapsed_minutes) + " minutes to get this far.")
sys.exit(3)
except KeyboardInterrupt:
"""
We use this exception handler to gracefully handle ctrl+c exits
"""
print_debug("You sent a CTRL+C to interrupt the process")
elapsed_minutes = time_elapsed()
print_info(basename(__file__) + " took " + str(elapsed_minutes) + " minutes to get this far.")
except:
"""
Everything else gets caught here
"""
print_error(sys.exc_info()[0])
print_error(traceback.format_exc())
elapsed_minutes = time_elapsed()
print_info(basename(__file__) + " took " + str(elapsed_minutes) + " minutes to get this far.")
sys.exit(1)
if __name__ == "__main__":
#Grab our arguments from the doc at the top of the script
arguments = docopt(__doc__, version=basename(__file__) + " " + VERSION)
#Feed our arguments to the main function, and off we go!
main(arguments)
| 34.467826 | 142 | 0.593219 |
4a221c98e4d01e520edcfb2a9246e5af5d8bda4d | 251 | py | Python | Tupla revisao/tupla.py | Hipparcus/Python-Learning | a3bd5787ceb67f20a0a053e3db4cf77a18e12112 | [
"MIT"
] | null | null | null | Tupla revisao/tupla.py | Hipparcus/Python-Learning | a3bd5787ceb67f20a0a053e3db4cf77a18e12112 | [
"MIT"
] | null | null | null | Tupla revisao/tupla.py | Hipparcus/Python-Learning | a3bd5787ceb67f20a0a053e3db4cf77a18e12112 | [
"MIT"
] | null | null | null | palavras = ('programacao','nomes','legal','que bacana','yeaaah','astronomia')
for i in palavras:
print(f"Na palavra {i} há as vogais", end=' ')
for j in i:
if j.lower() in ('aeiou'):
print (f"{j}", end=',')
print("\n") | 31.375 | 77 | 0.537849 |
4a221d236564bfaa226c07e4893068f9dee66c78 | 25,638 | py | Python | sdk/apimanagement/azure-mgmt-apimanagement/azure/mgmt/apimanagement/aio/operations/_diagnostic_operations.py | JianpingChen/azure-sdk-for-python | 3072fc8c0366287fbaea1b02493a50259c3248a2 | [
"MIT"
] | 3 | 2020-06-23T02:25:27.000Z | 2021-09-07T18:48:11.000Z | sdk/apimanagement/azure-mgmt-apimanagement/azure/mgmt/apimanagement/aio/operations/_diagnostic_operations.py | JianpingChen/azure-sdk-for-python | 3072fc8c0366287fbaea1b02493a50259c3248a2 | [
"MIT"
] | 510 | 2019-07-17T16:11:19.000Z | 2021-08-02T08:38:32.000Z | sdk/apimanagement/azure-mgmt-apimanagement/azure/mgmt/apimanagement/aio/operations/_diagnostic_operations.py | JianpingChen/azure-sdk-for-python | 3072fc8c0366287fbaea1b02493a50259c3248a2 | [
"MIT"
] | 5 | 2019-09-04T12:51:37.000Z | 2020-09-16T07:28:40.000Z | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar, Union
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.mgmt.core.exceptions import ARMErrorFormat
from ... import models as _models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class DiagnosticOperations:
"""DiagnosticOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.apimanagement.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def list_by_service(
self,
resource_group_name: str,
service_name: str,
filter: Optional[str] = None,
top: Optional[int] = None,
skip: Optional[int] = None,
**kwargs
) -> AsyncIterable["_models.DiagnosticCollection"]:
"""Lists all diagnostics of the API Management service instance.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param service_name: The name of the API Management service.
:type service_name: str
:param filter: | Field | Usage | Supported operators | Supported
functions |</br>|-------------|-------------|-------------|-------------|</br>| name |
filter | ge, le, eq, ne, gt, lt | substringof, contains, startswith, endswith |</br>.
:type filter: str
:param top: Number of records to return.
:type top: int
:param skip: Number of records to skip.
:type skip: int
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either DiagnosticCollection or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.apimanagement.models.DiagnosticCollection]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.DiagnosticCollection"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-12-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_by_service.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'serviceName': self._serialize.url("service_name", service_name, 'str', max_length=50, min_length=1, pattern=r'^[a-zA-Z](?:[a-zA-Z0-9-]*[a-zA-Z0-9])?$'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
if filter is not None:
query_parameters['$filter'] = self._serialize.query("filter", filter, 'str')
if top is not None:
query_parameters['$top'] = self._serialize.query("top", top, 'int', minimum=1)
if skip is not None:
query_parameters['$skip'] = self._serialize.query("skip", skip, 'int', minimum=0)
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('DiagnosticCollection', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
error = self._deserialize(_models.ErrorResponse, response)
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list_by_service.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ApiManagement/service/{serviceName}/diagnostics'} # type: ignore
async def get_entity_tag(
self,
resource_group_name: str,
service_name: str,
diagnostic_id: str,
**kwargs
) -> bool:
"""Gets the entity state (Etag) version of the Diagnostic specified by its identifier.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param service_name: The name of the API Management service.
:type service_name: str
:param diagnostic_id: Diagnostic identifier. Must be unique in the current API Management
service instance.
:type diagnostic_id: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: bool, or the result of cls(response)
:rtype: bool
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-12-01"
accept = "application/json"
# Construct URL
url = self.get_entity_tag.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'serviceName': self._serialize.url("service_name", service_name, 'str', max_length=50, min_length=1, pattern=r'^[a-zA-Z](?:[a-zA-Z0-9-]*[a-zA-Z0-9])?$'),
'diagnosticId': self._serialize.url("diagnostic_id", diagnostic_id, 'str', max_length=80, min_length=1, pattern=r'^[^*#&+:<>?]+$'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.head(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(_models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
response_headers = {}
response_headers['ETag']=self._deserialize('str', response.headers.get('ETag'))
if cls:
return cls(pipeline_response, None, response_headers)
return 200 <= response.status_code <= 299
get_entity_tag.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ApiManagement/service/{serviceName}/diagnostics/{diagnosticId}'} # type: ignore
async def get(
self,
resource_group_name: str,
service_name: str,
diagnostic_id: str,
**kwargs
) -> "_models.DiagnosticContract":
"""Gets the details of the Diagnostic specified by its identifier.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param service_name: The name of the API Management service.
:type service_name: str
:param diagnostic_id: Diagnostic identifier. Must be unique in the current API Management
service instance.
:type diagnostic_id: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: DiagnosticContract, or the result of cls(response)
:rtype: ~azure.mgmt.apimanagement.models.DiagnosticContract
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.DiagnosticContract"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-12-01"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'serviceName': self._serialize.url("service_name", service_name, 'str', max_length=50, min_length=1, pattern=r'^[a-zA-Z](?:[a-zA-Z0-9-]*[a-zA-Z0-9])?$'),
'diagnosticId': self._serialize.url("diagnostic_id", diagnostic_id, 'str', max_length=80, min_length=1, pattern=r'^[^*#&+:<>?]+$'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(_models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
response_headers = {}
response_headers['ETag']=self._deserialize('str', response.headers.get('ETag'))
deserialized = self._deserialize('DiagnosticContract', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, response_headers)
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ApiManagement/service/{serviceName}/diagnostics/{diagnosticId}'} # type: ignore
async def create_or_update(
self,
resource_group_name: str,
service_name: str,
diagnostic_id: str,
parameters: "_models.DiagnosticContract",
if_match: Optional[str] = None,
**kwargs
) -> "_models.DiagnosticContract":
"""Creates a new Diagnostic or updates an existing one.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param service_name: The name of the API Management service.
:type service_name: str
:param diagnostic_id: Diagnostic identifier. Must be unique in the current API Management
service instance.
:type diagnostic_id: str
:param parameters: Create parameters.
:type parameters: ~azure.mgmt.apimanagement.models.DiagnosticContract
:param if_match: ETag of the Entity. Not required when creating an entity, but required when
updating an entity.
:type if_match: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: DiagnosticContract, or the result of cls(response)
:rtype: ~azure.mgmt.apimanagement.models.DiagnosticContract
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.DiagnosticContract"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-12-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self.create_or_update.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'serviceName': self._serialize.url("service_name", service_name, 'str', max_length=50, min_length=1, pattern=r'^[a-zA-Z](?:[a-zA-Z0-9-]*[a-zA-Z0-9])?$'),
'diagnosticId': self._serialize.url("diagnostic_id", diagnostic_id, 'str', max_length=80, min_length=1, pattern=r'^[^*#&+:<>?]+$'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
if if_match is not None:
header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str')
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'DiagnosticContract')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(_models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
response_headers = {}
if response.status_code == 200:
response_headers['ETag']=self._deserialize('str', response.headers.get('ETag'))
deserialized = self._deserialize('DiagnosticContract', pipeline_response)
if response.status_code == 201:
response_headers['ETag']=self._deserialize('str', response.headers.get('ETag'))
deserialized = self._deserialize('DiagnosticContract', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, response_headers)
return deserialized
create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ApiManagement/service/{serviceName}/diagnostics/{diagnosticId}'} # type: ignore
async def update(
self,
resource_group_name: str,
service_name: str,
diagnostic_id: str,
if_match: str,
parameters: "_models.DiagnosticContract",
**kwargs
) -> "_models.DiagnosticContract":
"""Updates the details of the Diagnostic specified by its identifier.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param service_name: The name of the API Management service.
:type service_name: str
:param diagnostic_id: Diagnostic identifier. Must be unique in the current API Management
service instance.
:type diagnostic_id: str
:param if_match: ETag of the Entity. ETag should match the current entity state from the header
response of the GET request or it should be * for unconditional update.
:type if_match: str
:param parameters: Diagnostic Update parameters.
:type parameters: ~azure.mgmt.apimanagement.models.DiagnosticContract
:keyword callable cls: A custom type or function that will be passed the direct response
:return: DiagnosticContract, or the result of cls(response)
:rtype: ~azure.mgmt.apimanagement.models.DiagnosticContract
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.DiagnosticContract"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-12-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self.update.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'serviceName': self._serialize.url("service_name", service_name, 'str', max_length=50, min_length=1, pattern=r'^[a-zA-Z](?:[a-zA-Z0-9-]*[a-zA-Z0-9])?$'),
'diagnosticId': self._serialize.url("diagnostic_id", diagnostic_id, 'str', max_length=80, min_length=1, pattern=r'^[^*#&+:<>?]+$'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str')
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'DiagnosticContract')
body_content_kwargs['content'] = body_content
request = self._client.patch(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(_models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
response_headers = {}
response_headers['ETag']=self._deserialize('str', response.headers.get('ETag'))
deserialized = self._deserialize('DiagnosticContract', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, response_headers)
return deserialized
update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ApiManagement/service/{serviceName}/diagnostics/{diagnosticId}'} # type: ignore
async def delete(
self,
resource_group_name: str,
service_name: str,
diagnostic_id: str,
if_match: str,
**kwargs
) -> None:
"""Deletes the specified Diagnostic.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param service_name: The name of the API Management service.
:type service_name: str
:param diagnostic_id: Diagnostic identifier. Must be unique in the current API Management
service instance.
:type diagnostic_id: str
:param if_match: ETag of the Entity. ETag should match the current entity state from the header
response of the GET request or it should be * for unconditional update.
:type if_match: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None, or the result of cls(response)
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-12-01"
accept = "application/json"
# Construct URL
url = self.delete.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'serviceName': self._serialize.url("service_name", service_name, 'str', max_length=50, min_length=1, pattern=r'^[a-zA-Z](?:[a-zA-Z0-9-]*[a-zA-Z0-9])?$'),
'diagnosticId': self._serialize.url("diagnostic_id", diagnostic_id, 'str', max_length=80, min_length=1, pattern=r'^[^*#&+:<>?]+$'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(_models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ApiManagement/service/{serviceName}/diagnostics/{diagnosticId}'} # type: ignore
| 51.689516 | 208 | 0.661713 |
4a221d248637a20e62d695f12e02037197d13976 | 616 | py | Python | examples/assign.py | LayneInNL/py2flows | 5ecb555c64350cb13c3885a78fe89a40994e9d0e | [
"Apache-2.0"
] | 3 | 2022-03-21T12:10:37.000Z | 2022-03-24T13:31:19.000Z | examples/assign.py | LayneInNL/py2flows | 5ecb555c64350cb13c3885a78fe89a40994e9d0e | [
"Apache-2.0"
] | 1 | 2022-03-17T02:09:37.000Z | 2022-03-17T10:08:14.000Z | examples/assign.py | LayneInNL/py2flows | 5ecb555c64350cb13c3885a78fe89a40994e9d0e | [
"Apache-2.0"
] | 1 | 2022-03-21T12:10:18.000Z | 2022-03-21T12:10:18.000Z | # Copyright 2022 Layne Liu
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
a = b.a = c[1] = 1
a, *b = 1, 2
| 36.235294 | 75 | 0.725649 |
4a221eca901c24d51311a894ad9a257ae367f04d | 772 | py | Python | api/tacticalrmm/logs/migrations/0009_auto_20201110_1431.py | BaDTaG/tacticalrmm | 7bdd8c4626e0629d393edb5dec2541150d1802ef | [
"MIT"
] | 1 | 2021-01-19T20:39:02.000Z | 2021-01-19T20:39:02.000Z | api/tacticalrmm/logs/migrations/0009_auto_20201110_1431.py | BaDTaG/tacticalrmm | 7bdd8c4626e0629d393edb5dec2541150d1802ef | [
"MIT"
] | null | null | null | api/tacticalrmm/logs/migrations/0009_auto_20201110_1431.py | BaDTaG/tacticalrmm | 7bdd8c4626e0629d393edb5dec2541150d1802ef | [
"MIT"
] | null | null | null | # Generated by Django 3.1.2 on 2020-11-10 14:31
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('logs', '0008_auto_20201110_1431'),
]
operations = [
migrations.AlterField(
model_name='auditlog',
name='action',
field=models.CharField(choices=[('login', 'User Login'), ('failed_login', 'Failed User Login'), ('delete', 'Delete Object'), ('modify', 'Modify Object'), ('add', 'Add Object'), ('view', 'View Object'), ('check_run', 'Check Run'), ('task_run', 'Task Run'), ('agent_install', 'Agent Install'), ('remote_session', 'Remote Session'), ('execute_script', 'Execute Script'), ('execute_command', 'Execute Command')], max_length=100),
),
]
| 40.631579 | 437 | 0.623057 |
4a221f5ee0288e18fb56178d4892d0740a3a5782 | 4,619 | py | Python | src/embit/script.py | jimmysong/embit | 36299bd0fe123d6e3a5318a9f9acfd16564c26c1 | [
"MIT"
] | 2 | 2021-03-26T22:41:55.000Z | 2021-05-27T17:38:53.000Z | src/embit/script.py | jimmysong/embit | 36299bd0fe123d6e3a5318a9f9acfd16564c26c1 | [
"MIT"
] | null | null | null | src/embit/script.py | jimmysong/embit | 36299bd0fe123d6e3a5318a9f9acfd16564c26c1 | [
"MIT"
] | 2 | 2021-03-27T10:16:21.000Z | 2021-06-07T18:01:03.000Z | from .networks import NETWORKS
from . import base58
from . import bech32
from . import hashes
from . import compact
from .base import EmbitBase, EmbitError
import io
SIGHASH_ALL = 1
class Script(EmbitBase):
def __init__(self, data):
self.data = data
def address(self, network=NETWORKS["main"]):
script_type = self.script_type()
data = self.data
if script_type is None:
raise ValueError("This type of script doesn't have address representation")
if script_type == "p2pkh":
d = network["p2pkh"] + data[3:23]
return base58.encode_check(d)
if script_type == "p2sh":
d = network["p2sh"] + data[2:22]
return base58.encode_check(d)
if script_type in ["p2wpkh", "p2wsh"]:
ver = data[0]
# FIXME: should be one of OP_N
if ver > 0:
ver = ver % 0x50
return bech32.encode(network["bech32"], ver, data[2:])
# we should never get here
raise ValueError("Unsupported script type")
def script_type(self):
data = self.data
# OP_DUP OP_HASH160 <20:hash160(pubkey)> OP_EQUALVERIFY OP_CHECKSIG
if len(data) == 25 and data[:3] == b"\x76\xa9\x14" and data[-2:] == b"\x88\xac":
return "p2pkh"
# OP_HASH160 <20:hash160(script)> OP_EQUAL
if len(data) == 23 and data[:2] == b"\xa9\x14" and data[-1] == 0x87:
return "p2sh"
# 0 <20:hash160(pubkey)>
if len(data) == 22 and data[:2] == b"\x00\x14":
return "p2wpkh"
# 0 <32:sha256(script)>
if len(data) == 34 and data[:2] == b"\x00\x20":
return "p2wsh"
# unknown type
return None
def write_to(self, stream):
res = stream.write(compact.to_bytes(len(self.data)))
res += stream.write(self.data)
return res
@classmethod
def read_from(cls, stream):
l = compact.read_from(stream)
data = stream.read(l)
if len(data) != l:
raise ValueError("Cant read %d bytes" % l)
return cls(data)
def __eq__(self, other):
return self.data == other.data
def __ne__(self, other):
return self.data != other.data
class Witness(EmbitBase):
def __init__(self, items):
self.items = items[:]
def write_to(self, stream):
res = stream.write(compact.to_bytes(len(self.items)))
for item in self.items:
res += stream.write(compact.to_bytes(len(item)))
res += stream.write(item)
return res
@classmethod
def read_from(cls, stream):
num = compact.read_from(stream)
items = []
for i in range(num):
l = compact.read_from(stream)
data = stream.read(l)
items.append(data)
return cls(items)
def p2pkh(pubkey):
"""Return Pay-To-Pubkey-Hash ScriptPubkey"""
return Script(b"\x76\xa9\x14" + hashes.hash160(pubkey.sec()) + b"\x88\xac")
def p2sh(script):
"""Return Pay-To-Script-Hash ScriptPubkey"""
return Script(b"\xa9\x14" + hashes.hash160(script.data) + b"\x87")
def p2wpkh(pubkey):
"""Return Pay-To-Witness-Pubkey-Hash ScriptPubkey"""
return Script(b"\x00\x14" + hashes.hash160(pubkey.sec()))
def p2wsh(script):
"""Return Pay-To-Witness-Pubkey-Hash ScriptPubkey"""
return Script(b"\x00\x20" + hashes.sha256(script.data))
def p2pkh_from_p2wpkh(script):
"""Convert p2wpkh to p2pkh script"""
return Script(b"\x76\xa9" + script.serialize()[2:] + b"\x88\xac")
def multisig(m: int, pubkeys):
if m <= 0 or m > 16:
raise ValueError("m must be between 1 and 16")
n = len(pubkeys)
if n < m or n > 16:
raise ValueError("Number of pubkeys must be between %d and 16" % m)
data = bytes([80 + m])
for pubkey in pubkeys:
sec = pubkey.sec()
data += bytes([len(sec)]) + sec
# OP_m <len:pubkey> ... <len:pubkey> OP_n OP_CHECKMULTISIG
data += bytes([80 + n, 0xAE])
return Script(data)
def address_to_scriptpubkey(addr):
pass
def script_sig_p2pkh(signature, pubkey):
sec = pubkey.sec()
der = signature.serialize() + bytes([SIGHASH_ALL])
data = compact.to_bytes(len(der)) + der + compact.to_bytes(len(sec)) + sec
return Script(data)
def script_sig_p2sh(redeem_script):
"""Creates scriptsig for p2sh"""
# FIXME: implement for legacy p2sh as well
return Script(redeem_script.serialize())
def witness_p2wpkh(signature, pubkey):
return Witness([signature.serialize() + bytes([SIGHASH_ALL]), pubkey.sec()])
| 29.050314 | 88 | 0.598398 |
4a221fa002e94b1d958f31ec37c854a6a9a5b2a3 | 3,301 | py | Python | code/MMServerEngine/others/table/table.py | xuerong/MMServerEngine | f11c34680ea56645e91bab9ef02a808ee2e1730d | [
"Apache-2.0"
] | 9 | 2016-09-14T11:27:25.000Z | 2020-11-06T06:33:33.000Z | code/MMServerEngine/others/table/table.py | wangxianglong3/MMServerEngine | d3bf90da536ab84efefba2c7128ba88695153495 | [
"Apache-2.0"
] | null | null | null | code/MMServerEngine/others/table/table.py | wangxianglong3/MMServerEngine | d3bf90da536ab84efefba2c7128ba88695153495 | [
"Apache-2.0"
] | 7 | 2016-09-14T11:27:24.000Z | 2019-11-04T08:30:10.000Z | #!/usr/bin/python
#-*- coding: utf-8 -*-
import xlrd
import os
import sys
import shutil
from xlrd import xldate_as_tuple
from datetime import date,datetime
table_file_name = sys.argv[1]
data = xlrd.open_workbook(table_file_name)
def createJavaFile(name,content):
java_class_path = "com/table/"+name+".java"
#删除旧文件
if os.path.exists(java_class_path):
os.remove(java_class_path)
newJavaFile = open(java_class_path,"wb")
newJavaFile.write(content)
newJavaFile.close()
shutil.copy(java_class_path, "../../src/main/java/"+java_class_path)
typeStrs = {"int":"int","long":"long","String":"String","string":"String","float":"float","double":"double","date":"java.sql.Timestamp"}
def createValueByType(type,cell):
cellStr = str(cell.value)
if (cell.ctype == 3):
date_value = xlrd.xldate_as_tuple(cell.value,0)
cellStr = str(date_value[0])+"-"+str(date_value[1])+"-"+str(date_value[2])+" "+str(date_value[3])+":"+str(date_value[4])+":"+str(date_value[5])
if type == "int":
return "(int)"+cellStr
elif type == "long":
return "(long)"+cellStr
elif type == "String":
return "\""+cellStr+"\""
elif type == "float":
return "(float)"+cellStr
elif type == "double":
return "(double)"+cellStr
elif type == "java.sql.Timestamp":
return "java.sql.Timestamp.valueOf(\""+cellStr+"\")"
else:
return ""
tables = data.sheets()
for table in tables:
nrows = table.nrows
if nrows <2:
continue
start = "package com.table;\n//Auto Generate File, Do NOT Modify!!!!!!!!!!!!!!!\npublic final class "+table.name+"{\n"
sb = ""
getSet = ""
constructor = "\tpublic "+table.name+"("
constructorContent = ""
dataStr = "\tpublic static final "+table.name+"[] datas={"
error = 0
names = table.row_values(0)
types = table.row_values(1)
i = 0
for cell in names:
name = str(cell)
typeStr = str(types[i])
if not typeStrs.has_key(typeStr):
print table.name +" is not table"
error = 1
break;
type = typeStrs.get(typeStr)
sb = sb +"\tprivate "+type+" "+name+";\n"
if i!=0:
constructor = constructor+","
constructor = constructor+type+" "+name
constructorContent = constructorContent+"\t\tthis."+name+"="+name+";\n"
getSet = getSet+"\tpublic "+type+" get"+name.capitalize()+"(){return "+name+";}\n"
getSet = getSet+"\tpublic void set"+name.capitalize()+"("+type+" "+name+"){this."+name+"="+name+";}\n"
i=i+1
if error == 1:
continue;
nrows = table.nrows
for i in range(nrows):
if i<2:
continue;
record = table.row_values(i)
if i>2:
dataStr=dataStr+","
dataStr = dataStr+"\n\t\tnew "+table.name+"("
k = 0
for cell in record:
if k>0:
dataStr=dataStr+","
dataStr=dataStr+createValueByType(typeStrs.get(str(types[k])),table.cell(i,k))
k = k+1
dataStr=dataStr+")"
dataStr=dataStr+"\n\t};"
constructor = constructor+"){\n"+constructorContent+"\t}"
sb=start+dataStr+"\n"+sb+"\n"+constructor+"\n"+getSet+"}"
createJavaFile(table.name,sb)
| 31.141509 | 151 | 0.581036 |
4a22202d4b5d48a5582e13ed20560c8a6ffc60bf | 1,662 | py | Python | Gem/PythonTests/Automated/test_suites/periodic/NonMaterialAssetsExcludedInBrowser_test.py | incisor/o3de-atomtest | 026fef06827bf0dd559510882df5cb426ab00a99 | [
"Apache-2.0",
"MIT"
] | 2 | 2021-07-18T11:20:41.000Z | 2022-02-01T20:17:50.000Z | Gem/PythonTests/Automated/test_suites/periodic/NonMaterialAssetsExcludedInBrowser_test.py | incisor/o3de-atomtest | 026fef06827bf0dd559510882df5cb426ab00a99 | [
"Apache-2.0",
"MIT"
] | 5 | 2021-07-14T02:24:07.000Z | 2021-10-04T21:24:35.000Z | Gem/PythonTests/Automated/test_suites/periodic/NonMaterialAssetsExcludedInBrowser_test.py | incisor/o3de-atomtest | 026fef06827bf0dd559510882df5cb426ab00a99 | [
"Apache-2.0",
"MIT"
] | 7 | 2021-07-06T18:21:14.000Z | 2021-12-06T09:12:40.000Z | """
Copyright (c) Contributors to the Open 3D Engine Project.
For complete copyright and license terms please see the LICENSE at the root of this distribution.
SPDX-License-Identifier: Apache-2.0 OR MIT
"""
import os
import pytest
from Automated.atom_utils import hydra_test_utils as hydra
TEST_DIRECTORY = os.path.dirname(__file__)
LOG_MONITOR_TIMEOUT = 40
@pytest.mark.parametrize("project", ["AtomTest"])
@pytest.mark.parametrize("launcher_platform", ["windows_generic"])
class TestNonMaterialAssetsExcludedInBrowser(object):
@pytest.mark.parametrize("exe_file_name", ["MaterialEditor"])
def test_MaterialBrowser_NonMaterialAssets_ExcludedInBrowser(
self, request, workspace, project, launcher_platform, generic_launcher, exe_file_name
):
"""
Please review the hydra script run by this test for more specific test info.
Test to verify if Non-Material based assets excluded from Browser
"""
unexpected_lines = [
"Trace::Assert",
"Trace::Error",
"Traceback (most recent call last):",
"Expected item not found in folder",
"Excluded item found in folder",
"Atom MaterialEditor asset path not found in browser: ",
]
hydra.launch_and_validate_results(
request,
TEST_DIRECTORY,
generic_launcher,
"NonMaterialAssetsExcludedInBrowser_test_case.py",
timeout=LOG_MONITOR_TIMEOUT,
expected_lines=None,
unexpected_lines=unexpected_lines,
halt_on_unexpected=True,
log_file_name="MaterialEditor.log",
)
| 34.625 | 97 | 0.6787 |
4a222067165b4cd2dc5dd855429d29466be4271c | 1,465 | py | Python | bars3d_demo.py | kingslair/MatPlotLib | 66d1accf1a049b901dece69d18edadafbf4b687f | [
"MIT"
] | null | null | null | bars3d_demo.py | kingslair/MatPlotLib | 66d1accf1a049b901dece69d18edadafbf4b687f | [
"MIT"
] | null | null | null | bars3d_demo.py | kingslair/MatPlotLib | 66d1accf1a049b901dece69d18edadafbf4b687f | [
"MIT"
] | null | null | null | from mpl_toolkits.mplot3d import Axes3D
import matplotlib.pyplot as plt
import numpy as np
import matplotlib.animation as animation
import time
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
op_array = np.array([])
def animate(i):
pullData = open("bars_3d.txt","r").read()
dataArray = pullData.split('\n')
xar = []
yar = []
zar = []
for eachLine in dataArray:
if len(eachLine)>1:
x,y,z = eachLine.split(',')
xar.append(int(x))
yar.append(int(y))
zar.append(int(z))
#print (xar)
#print (yar)
#print (zar)
#for x1,y1,z1 in xar,yar,zar:
#print (x1)
#print (y1)
#print (z1)
#cs = [c] * len(xs)
#cs[0] = 'c'
ax.clear()
ax.grid(zorder=0)
#ax.bar(xar, yar, zs=zar, zdir='y', color="blue", alpha=1)
ax.scatter(xar, yar, zs=zar)
'''for c, z in zip(['r'], [10]):
xs = np.arange(20)
#xs =[1]
ys = np.random.rand(20)
#ys = [4]
# You can provide either a single color or an array. To demonstrate this,
# the first bar of each set will be colored cyan.
cs = [c] * len(xs)
cs[0] = 'c'
ax.bar(xs, ys, zs=z, zdir='y', color=cs, alpha=0.8)
#ax.bar(xs, ys, zs=z, zdir='z')
ax.set_xlabel('X')
ax.set_ylabel('Y')
ax.set_zlabel('Z')'''
ax.set_xlabel('X')
ax.set_ylabel('Y')
ax.set_zlabel('Z')
ani = animation.FuncAnimation(fig, animate, interval=1000)
plt.show()
| 24.416667 | 77 | 0.565188 |
4a2220d2400c1620e374e02acf264bf5dace5983 | 6,550 | py | Python | SQLNet/scripts/model/modules/order_predict.py | Bhaskers-Blu-Org2/EMNLP2019-Adjective-Knowledge-for-Text-to-SQL | 246f52ee70d2eeb776fe99597712b57bca3883c7 | [
"MIT"
] | 5 | 2019-11-15T11:02:31.000Z | 2020-05-09T09:32:26.000Z | SQLNet/scripts/model/modules/order_predict.py | Bhaskers-Blu-Org2/EMNLP2019-Adjective-Knowledge-for-Text-to-SQL | 246f52ee70d2eeb776fe99597712b57bca3883c7 | [
"MIT"
] | 1 | 2020-04-07T09:20:51.000Z | 2020-04-07T09:20:51.000Z | SQLNet/scripts/model/modules/order_predict.py | microsoft/EMNLP2019-Adjective-Knowledge-for-Text-to-SQL | 246f52ee70d2eeb776fe99597712b57bca3883c7 | [
"MIT"
] | 7 | 2020-01-01T02:22:23.000Z | 2021-11-05T04:49:19.000Z | import json
import torch
import numpy as np
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
from net_utils import run_lstm, col_name_encode
class OrderPredictor(nn.Module):
def __init__(self, N_word, N_h, N_depth, gpu, feats_format=""):
super(OrderPredictor, self).__init__()
self.N_h = N_h
self.gpu = gpu
self.q_lstm = nn.LSTM(input_size=N_word, hidden_size=N_h/2,
num_layers=N_depth, batch_first=True,
dropout=0.3, bidirectional=True)
self.col_lstm = nn.LSTM(input_size=N_word, hidden_size=N_h/2,
num_layers=N_depth, batch_first=True,
dropout=0.3, bidirectional=True)
self.gby_num_h = nn.Linear(N_h, N_h)
self.gby_num_l = nn.Linear(N_h, N_h)
self.gby_num_out = nn.Sequential(nn.Tanh(), nn.Linear(N_h, 2))
self.q_att = nn.Linear(N_h, N_h)
self.col_out_q = nn.Linear(N_h, N_h)
self.col_out_c = nn.Linear(N_h, N_h)
self.col_out = nn.Sequential(nn.Tanh(), nn.Linear(N_h, 1))
self.agg_att = nn.Linear(N_h, N_h)
self.agg_out_q = nn.Linear(N_h, N_h)
self.agg_out_c = nn.Linear(N_h, N_h)
self.agg_out = nn.Sequential(nn.Tanh(), nn.Linear(N_h, 6)) #to 5
self.dat_att = nn.Linear(N_h, N_h)
self.dat_out_q = nn.Linear(N_h, N_h)
self.dat_out_c = nn.Linear(N_h, N_h)
self.dat_out = nn.Sequential(nn.Tanh(), nn.Linear(N_h, 5)) #for 4 desc/asc limit/none combinations
self.dat_out_dirc = nn.Linear(50, 50)
self.dat_out_dirc_out = nn.Sequential(nn.Tanh(), nn.Linear(50, 5)) #for 4 desc/asc limit/none combinations
self.softmax = nn.Softmax() #dim=1
self.CE = nn.CrossEntropyLoss()
self.log_softmax = nn.LogSoftmax()
self.mlsml = nn.MultiLabelSoftMarginLoss()
self.bce_logit = nn.BCEWithLogitsLoss()
self.sigm = nn.Sigmoid()
if gpu:
self.cuda()
self.feats_format = feats_format
def forward(self, perm, st, ed, q_emb_var, q_len, col_emb_var, col_len, col_num, col_name_len, q_seq, col_seq, emb_layer, train=True):
max_q_len = max(q_len)
max_col_len = max(col_len)
B = len(q_len)
q_enc, _ = run_lstm(self.q_lstm, q_emb_var, q_len)
col_enc, _ = col_name_encode(col_emb_var, col_name_len, col_len, self.col_lstm)
# Predict number
gby_num_att = torch.bmm(col_enc, self.gby_num_h(q_enc).transpose(1, 2))
for idx, num in enumerate(col_len):
if num < max_col_len:
gby_num_att[idx, num:, :] = -100
for idx, num in enumerate(q_len):
if num < max_q_len:
gby_num_att[idx, :, num:] = -100
gby_num_att_val = self.softmax(gby_num_att.view((-1, max_q_len))).view(B, -1, max_q_len)
gby_num_K = (q_enc.unsqueeze(1) * gby_num_att_val.unsqueeze(3)).sum(2).sum(1)
ody_num_score = self.gby_num_out(self.gby_num_l(gby_num_K))
# Predict columns.
att_val_qc = torch.bmm(col_enc, self.q_att(q_enc).transpose(1, 2))
for idx, num in enumerate(q_len):
if num < max_q_len:
att_val_qc[idx, :, num:] = -100
att_prob_qc = self.softmax(att_val_qc.view((-1, max_q_len))).view(B, -1, max_q_len)
# q_weighted: (B, max_col_len, hid_dim)
q_weighted = (q_enc.unsqueeze(1) * att_prob_qc.unsqueeze(3)).sum(2)
# Compute prediction scores
# self.col_out.squeeze(): (B, max_col_len)
col_score = self.col_out(self.col_out_q(q_weighted) + self.col_out_c(col_enc)).squeeze()
for idx, num in enumerate(col_len):
if num < max_col_len:
col_score[idx, num:] = -100
# Predict aggregation
agg_att_val = torch.bmm(col_enc, self.agg_att(q_enc).transpose(1, 2))
for idx, num in enumerate(col_len):
if num < max_col_len:
agg_att_val[idx, num:, :] = -100
for idx, num in enumerate(q_len):
if num < max_q_len:
agg_att_val[idx, :, num:] = -100
agg_att = self.softmax(agg_att_val.view((-1, max_q_len))).view(B, -1, max_q_len)
# q_weighted_num: (B, hid_dim)
q_weighted_agg = (q_enc.unsqueeze(1) * agg_att.unsqueeze(3)).sum(2).sum(1)
# self.col_num_out: (B, 4)
agg_score = self.agg_out(self.agg_out_q(q_weighted_agg))
# Predict desc asc limit
dat_att_val = torch.bmm(col_enc, self.dat_att(q_enc).transpose(1, 2))
for idx, num in enumerate(col_len):
if num < max_col_len:
dat_att_val[idx, num:, :] = -100
for idx, num in enumerate(q_len):
if num < max_q_len:
dat_att_val[idx, :, num:] = -100
dat_att = self.softmax(dat_att_val.view((-1, max_q_len))).view(B, -1, max_q_len)
# q_weighted_num: (B, hid_dim)
q_weighted_dat = (q_enc.unsqueeze(1) * dat_att.unsqueeze(3)).sum(2).sum(1)
# self.col_num_out: (B, 4)
col_scores = col_score.data.cpu().numpy()
chosen_col_gt = [np.argmax(col_scores[b]) for b in range(B)]
assert B == ed - st
dirc_vecs = torch.zeros([B, 50])
zero_feats = torch.zeros([50])
if self.gpu:
dirc_vecs = dirc_vecs.cuda()
zero_feats = zero_feats.cuda()
dirc_vecs = Variable(dirc_vecs, requires_grad=False)
for b in range(st, ed):
idx = perm[b]
gt_col = chosen_col_gt[b - st]
dirc_feat = emb_layer.get_direction_feature(max_q_len, idx, gt_col, train)
if self.feats_format == 'direct':
# [max_len] (-1/0/1)
mask = (att_prob_qc[b - st, gt_col] * dirc_feat[0])
mask_i = mask.cpu().data.numpy()[0]
if mask_i > 0:
dirc_vec = dirc_feat[1]
elif mask_i < 0:
dirc_vec = dirc_feat[2]
else:
dirc_vec = zero_feats
dirc_vec = Variable(dirc_vec, requires_grad=False)
else:
# [max_len, len(feats)]
dirc_vec = torch.matmul(att_prob_qc[b - st, gt_col].unsqueeze(0), dirc_feat).squeeze()
dirc_vecs[b - st] = dirc_vec
dat_score = self.dat_out(self.dat_out_q(q_weighted_dat)) + \
self.dat_out_dirc_out(self.dat_out_dirc(dirc_vecs))
score = (ody_num_score, col_score, agg_score, dat_score)
return score
| 40.9375 | 138 | 0.593282 |
4a2221dcc1dbbee761d960e48a54e00ed4c67ca9 | 1,851 | py | Python | augpathlib/repo_patch.py | tmsincomb/augpathlib | ed9c0edff540741fca866780a3d043a3b7644f08 | [
"MIT"
] | null | null | null | augpathlib/repo_patch.py | tmsincomb/augpathlib | ed9c0edff540741fca866780a3d043a3b7644f08 | [
"MIT"
] | null | null | null | augpathlib/repo_patch.py | tmsincomb/augpathlib | ed9c0edff540741fca866780a3d043a3b7644f08 | [
"MIT"
] | null | null | null | import git
class _Repo(git.Repo): # FIXME should we subclass Repo for this or patch ??
""" monkey patching """
def getRef(self, ref_name):
for ref in self.refs:
if ref.name == ref_name:
return ref
else:
raise ValueError(f'No ref with name: {ref_name}')
# monkey patch git.Repo
git.Repo.getRef = _Repo.getRef
class _Reference(git.Reference):
""" monkey patching """
def __enter__(self):
""" Checkout the ref for this head.
`git stash --all` beforehand and restore during __exit__.
If the ref is the same, then the stash step still happens.
If you need to modify the uncommitted state of a repo this
is not the tool you should use. """
if not self.is_valid():
raise exc.InvalidRefError(f'Not a valid ref: {self.name}')
self.__original_branch = self.repo.active_branch
self.__stash = self.repo.git.stash('--all') # always stash
if self.__stash == 'No local changes to save':
self.__stash = None
if self == self.__original_branch:
return self
self.checkout()
return self
def __exit__(self, exc_type, exc_value, traceback):
_stash = self.repo.git.stash('--all') # always stash on the way out as well
if _stash == 'No local changes to save':
stash = 'stash@{0}'
else:
stash = "stash@{1}"
if self.__original_branch != self:
self.__original_branch.checkout()
# TODO check to make sure no other stashes were pushed on top
if self.__stash is not None:
self.repo.git.stash('pop', stash)
self.__stash = None
# monkey patch git.Reference
git.Reference.__enter__ = _Reference.__enter__
git.Reference.__exit__ = _Reference.__exit__
| 28.921875 | 84 | 0.612102 |
4a2222d1ffe1c59b24e53ba72f1adff8ae5202a8 | 2,445 | py | Python | SearchUtility_Backend/PDFTokenizer.py | ramacpr/AnyDocSearch | 1b3547f418be2fcc5e1f8ae8d83af61e7234dea3 | [
"MIT"
] | 1 | 2020-12-30T13:51:22.000Z | 2020-12-30T13:51:22.000Z | SearchUtility_Backend/PDFTokenizer.py | ramacpr/AnyDocSearch | 1b3547f418be2fcc5e1f8ae8d83af61e7234dea3 | [
"MIT"
] | null | null | null | SearchUtility_Backend/PDFTokenizer.py | ramacpr/AnyDocSearch | 1b3547f418be2fcc5e1f8ae8d83af61e7234dea3 | [
"MIT"
] | null | null | null | import time
import MyExtendedStopWords as StopWordsHelper
import MyDatabaseManager as dbManager
import PyPDFEx as PDFHelper
from nltk.tokenize import word_tokenize as WordHelper
from nltk.stem import PorterStemmer
from wordsegment import load
from SearchUtility_Backend.SearchUtilityLogger import SearchUtilityLogger
load()
class PDFTokenizer:
__stopWordObj = StopWordsHelper.ExtendedStopWord()
__StopWordsList = ""
__dbObj = None
__logger = SearchUtilityLogger.GetLoggerObj()
def __init__(self):
self.__StopWordsList = self.__stopWordObj.thestopwords()
self.__dbObj = dbManager.SqlDBManager()
def __update_word_addresses(self, pdf_file_obj, doc_id):
try:
pdf_reader = PDFHelper.PdfFileReader(pdf_file_obj)
for pageIndex in range(0, pdf_reader.numPages):
page_obj = pdf_reader.getPage(pageIndex)
# tokenizing
originalPageContent = WordHelper(page_obj.extractText().lower())
# stemming
stemmedContent = [PorterStemmer().stem(w) for w in originalPageContent]
# store the word db
positionInPage = 0
for term in stemmedContent:
if term not in self.__StopWordsList:
self.__dbObj.UpdateListing(term, doc_id, pageIndex, positionInPage)
positionInPage += 1
except:
self.__logger.fatal("Unexpected error in __update_word_addresses.")
def tokenize(self, pdf_file_name_list):
for pdf_file_name in pdf_file_name_list:
self.__dbObj.SetServerUpdateState(True)
doc_id = self.__dbObj.GetDocumentID(pdf_file_name)
if doc_id is -1:
continue
start = time.clock()
try:
self.__logger.info("Updating database for file [" + str(doc_id) + "] " + pdf_file_name)
pdf_file_obj = open(pdf_file_name, "rb")
self.__update_word_addresses(pdf_file_obj, doc_id)
self.__logger.info("Completed in " + str(time.clock() - start) + " seconds.")
except:
self.__logger.fatal("Unexpected error. " + pdf_file_name + " incorrectly tokenized.")
finally:
self.__dbObj.SetServerUpdateState(False)
pdf_file_obj.close()
| 41.440678 | 104 | 0.622086 |
4a222310e407a3dd9106b01210dde6ee3443a807 | 8,391 | py | Python | tests/io/test_yaml_dataset.py | lblanche/kedro | 659a47b161d452557504b07971722125a80f6294 | [
"Apache-2.0"
] | null | null | null | tests/io/test_yaml_dataset.py | lblanche/kedro | 659a47b161d452557504b07971722125a80f6294 | [
"Apache-2.0"
] | null | null | null | tests/io/test_yaml_dataset.py | lblanche/kedro | 659a47b161d452557504b07971722125a80f6294 | [
"Apache-2.0"
] | null | null | null | # Copyright 2018-2019 QuantumBlack Visual Analytics Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
# OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, AND
# NONINFRINGEMENT. IN NO EVENT WILL THE LICENSOR OR OTHER CONTRIBUTORS
# BE LIABLE FOR ANY CLAIM, DAMAGES, OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF, OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
# The QuantumBlack Visual Analytics Limited ("QuantumBlack") name and logo
# (either separately or in combination, "QuantumBlack Trademarks") are
# trademarks of QuantumBlack. The License does not grant you any right or
# license to the QuantumBlack Trademarks. You may not use the QuantumBlack
# Trademarks or any confusingly similar mark as a trademark for your product,
# or use the QuantumBlack Trademarks in any other manner that might cause
# confusion in the marketplace, including but not limited to in advertising,
# on websites, or on software.
#
# See the License for the specific language governing permissions and
# limitations under the License.
from pathlib import PurePosixPath
import pandas as pd
import pytest
from fsspec.implementations.http import HTTPFileSystem
from fsspec.implementations.local import LocalFileSystem
from gcsfs import GCSFileSystem
from pandas.testing import assert_frame_equal
from s3fs.core import S3FileSystem
from kedro.io import DataSetError, YAMLDataSet
from kedro.io.core import Version
@pytest.fixture
def filepath_yaml(tmp_path):
return str(tmp_path / "test.yaml")
@pytest.fixture
def yaml_data_set(filepath_yaml, save_args):
return YAMLDataSet(filepath=filepath_yaml, save_args=save_args)
@pytest.fixture
def versioned_yaml_data_set(filepath_yaml, load_version, save_version):
return YAMLDataSet(
filepath=filepath_yaml, version=Version(load_version, save_version)
)
@pytest.fixture
def dummy_data():
return {"col1": 1, "col2": 2, "col3": 3}
class TestYAMLDataSet:
def test_save_and_load(self, yaml_data_set, dummy_data):
"""Test saving and reloading the data set."""
yaml_data_set.save(dummy_data)
reloaded = yaml_data_set.load()
assert dummy_data == reloaded
def test_exists(self, yaml_data_set, dummy_data):
"""Test `exists` method invocation for both existing and
nonexistent data set."""
assert not yaml_data_set.exists()
yaml_data_set.save(dummy_data)
assert yaml_data_set.exists()
@pytest.mark.parametrize(
"save_args", [{"k1": "v1", "index": "value"}], indirect=True
)
def test_save_extra_params(self, yaml_data_set, save_args):
"""Test overriding the default save arguments."""
for key, value in save_args.items():
assert yaml_data_set._save_args[key] == value
def test_load_missing_file(self, yaml_data_set):
"""Check the error when trying to load missing file."""
pattern = r"Failed while loading data from data set YAMLDataSet\(.*\)"
with pytest.raises(DataSetError, match=pattern):
yaml_data_set.load()
@pytest.mark.parametrize(
"filepath,instance_type",
[
("s3://bucket/file.yaml", S3FileSystem),
("file:///tmp/test.yaml", LocalFileSystem),
("/tmp/test.yaml", LocalFileSystem),
("gcs://bucket/file.yaml", GCSFileSystem),
("https://example.com/file.yaml", HTTPFileSystem),
],
)
def test_protocol_usage(self, filepath, instance_type):
data_set = YAMLDataSet(filepath=filepath)
assert isinstance(data_set._fs, instance_type)
# _strip_protocol() doesn't strip http(s) protocol
if data_set._protocol == "https":
path = filepath.split("://")[-1]
else:
path = data_set._fs._strip_protocol(filepath)
assert str(data_set._filepath) == path
assert isinstance(data_set._filepath, PurePosixPath)
def test_catalog_release(self, mocker):
fs_mock = mocker.patch("fsspec.filesystem").return_value
filepath = "test.yaml"
data_set = YAMLDataSet(filepath=filepath)
data_set.release()
fs_mock.invalidate_cache.assert_called_once_with(filepath)
def test_dataframe_support(self, yaml_data_set):
data = pd.DataFrame({"col1": [1, 2], "col2": [4, 5]})
yaml_data_set.save(data)
reloaded = yaml_data_set.load()
assert isinstance(reloaded, dict)
data_df = pd.DataFrame.from_dict(reloaded)
assert_frame_equal(data, data_df)
class TestYAMLDataSetVersioned:
def test_version_str_repr(self, load_version, save_version):
"""Test that version is in string representation of the class instance
when applicable."""
filepath = "test.yaml"
ds = YAMLDataSet(filepath=filepath)
ds_versioned = YAMLDataSet(
filepath=filepath, version=Version(load_version, save_version)
)
assert filepath in str(ds)
assert "version" not in str(ds)
assert filepath in str(ds_versioned)
ver_str = "version=Version(load={}, save='{}')".format(
load_version, save_version
)
assert ver_str in str(ds_versioned)
assert "YAMLDataSet" in str(ds_versioned)
assert "YAMLDataSet" in str(ds)
assert "protocol" in str(ds_versioned)
assert "protocol" in str(ds)
# Default save_args
assert "save_args={'default_flow_style': False}" in str(ds)
assert "save_args={'default_flow_style': False}" in str(ds_versioned)
def test_save_and_load(self, versioned_yaml_data_set, dummy_data):
"""Test that saved and reloaded data matches the original one for
the versioned data set."""
versioned_yaml_data_set.save(dummy_data)
reloaded = versioned_yaml_data_set.load()
assert dummy_data == reloaded
def test_no_versions(self, versioned_yaml_data_set):
"""Check the error if no versions are available for load."""
pattern = r"Did not find any versions for YAMLDataSet\(.+\)"
with pytest.raises(DataSetError, match=pattern):
versioned_yaml_data_set.load()
def test_exists(self, versioned_yaml_data_set, dummy_data):
"""Test `exists` method invocation for versioned data set."""
assert not versioned_yaml_data_set.exists()
versioned_yaml_data_set.save(dummy_data)
assert versioned_yaml_data_set.exists()
def test_prevent_overwrite(self, versioned_yaml_data_set, dummy_data):
"""Check the error when attempting to override the data set if the
corresponding yaml file for a given save version already exists."""
versioned_yaml_data_set.save(dummy_data)
pattern = (
r"Save path \`.+\` for YAMLDataSet\(.+\) must "
r"not exist if versioning is enabled\."
)
with pytest.raises(DataSetError, match=pattern):
versioned_yaml_data_set.save(dummy_data)
@pytest.mark.parametrize(
"load_version", ["2019-01-01T23.59.59.999Z"], indirect=True
)
@pytest.mark.parametrize(
"save_version", ["2019-01-02T00.00.00.000Z"], indirect=True
)
def test_save_version_warning(
self, versioned_yaml_data_set, load_version, save_version, dummy_data
):
"""Check the warning when saving to the path that differs from
the subsequent load path."""
pattern = (
r"Save version `{0}` did not match load version `{1}` "
r"for YAMLDataSet\(.+\)".format(save_version, load_version)
)
with pytest.warns(UserWarning, match=pattern):
versioned_yaml_data_set.save(dummy_data)
def test_http_filesystem_no_versioning(self):
pattern = r"HTTP\(s\) DataSet doesn't support versioning\."
with pytest.raises(DataSetError, match=pattern):
YAMLDataSet(
filepath="https://example.com/file.yaml", version=Version(None, None)
)
| 39.394366 | 85 | 0.687046 |
4a2223eb045882f94ff2e63780be8aac996373eb | 837 | py | Python | aoc-2021-python/day1.py | mihaicostin/advent-of-code | f6c1121e831fb55a7620369970b31654ee5e50e3 | [
"MIT"
] | 1 | 2018-12-07T13:48:24.000Z | 2018-12-07T13:48:24.000Z | aoc-2021-python/day1.py | mihaicostin/adventofcode-2018 | f6c1121e831fb55a7620369970b31654ee5e50e3 | [
"MIT"
] | null | null | null | aoc-2021-python/day1.py | mihaicostin/adventofcode-2018 | f6c1121e831fb55a7620369970b31654ee5e50e3 | [
"MIT"
] | null | null | null | # count the number of times a depth measurement increases from the previous measurement.
# (There is no measurement before the first measurement.)
count = 0
with open("day1.txt") as f:
lines = f.readlines()
prev = -1
for line in lines:
if (int(line) > prev) & (prev != -1):
count = count + 1
prev = int(line)
print(count)
# part 2
secondCount = 0
def window_sum(array, idx):
if idx > 1:
return array[idx] + array[idx - 1] + array[idx - 2]
return -1
with open("day1.txt") as f:
lines = f.readlines()
numbers = list(map(lambda el: int(el), lines))
for idx, val in enumerate(numbers):
if idx > 2:
a = window_sum(numbers, idx - 1)
b = window_sum(numbers, idx)
if b > a:
secondCount += 1
print(secondCount) | 23.914286 | 88 | 0.575866 |
4a22264368e1bd9e5dd5939ca743cef0bebb4616 | 4,782 | py | Python | experiments/ants3d_atlas_fine_remap_labels.py | BlueBrain/atlas-annotation | 118af9b95518a19b64a9d8008aabed557eb0f646 | [
"Apache-2.0"
] | null | null | null | experiments/ants3d_atlas_fine_remap_labels.py | BlueBrain/atlas-annotation | 118af9b95518a19b64a9d8008aabed557eb0f646 | [
"Apache-2.0"
] | 8 | 2021-11-02T17:23:22.000Z | 2022-03-02T12:29:26.000Z | experiments/ants3d_atlas_fine_remap_labels.py | BlueBrain/atlas-annotation | 118af9b95518a19b64a9d8008aabed557eb0f646 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
# Copyright 2021, Blue Brain Project, EPFL
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""3D atlases registration (after remapping labels) script."""
import logging
import sys
import numpy as np
import utils
from atlannot import load_volume
from atlannot.ants import register, transform
from atlannot.utils import remap_labels
# Parameters
description = """\
3D ANTsPy registration with atlases (after remapping labels values):
fixed = v3 atlas
moving = v2 atlas
"""
experiment_name = utils.get_script_file_name()
v2_atlas_path = utils.get_v2_atlas_fine_path()
v3_atlas_path = utils.get_v3_atlas_fine_path()
nissl_path = utils.get_nissl_path()
seed = 2 # (can also be None)
# Initialize the logger
logger = logging.getLogger(experiment_name)
script_info = """
Goal: Computing the registration between two images/volumes after switching
randomly the labels.
Assumptions:
- The input images/volumes have the same shape.
- The input images/volumes are considered as label images.
- The registration is computed on the entire input images at once. Which means
that if volumes are specified, the registration is a 3D registration. If 2D
images are specified, this is a 2D registration.
Steps:
- Loading of the images
- Creation of union list containing all the labels appearing at least in one
of the two input images/volumes.
- The conversion previous labels/new labels is done by taking as new label
the position in the list of the previous label. For example:
Union List: [0, 1002, 6, 9]
New labels: [0, 1, 2, 3]
Which means 0 stays 0 in the new volume, 1002 is becoming 1, 6 is
becoming 2, ... Obviously, there are other strategies to convert previous
labels to new ones.
- Creation of new images/volumes with corresponding new labels.
- Computation of the ANTsPY registration on the new images/volumes.
- Applying transform found in the previous step at the initial images/volumes.
- Computation of baseline misalignement (between inputs) and the results
misalignment (between input reference and warped moving image).
"""
def main():
"""3D atlases registration (after remapping labels)."""
# Paths
output_dir = utils.get_results_dir() / experiment_name
if not utils.can_write_to_dir(output_dir):
print("Cannot write to output directory. Stopping")
return 1
# Load data
logger.info("Loading data")
v3_atlas = load_volume(v3_atlas_path, normalize=False)
v2_atlas = load_volume(v2_atlas_path, normalize=False)
nissl_volume = load_volume(nissl_path)
# Preprocess data
logger.info("Preprocessing data")
v3_atlas_pre, v2_atlas_pre = preprocess_atlases(
v3_atlas,
v2_atlas,
)
# Registration
logger.info("Starting registration")
df = register(fixed=v3_atlas_pre, moving=v2_atlas_pre)
# Warping
logger.info("Warping volumes")
warped_atlas = transform(
v2_atlas.astype(np.float32),
df,
interpolator="genericLabel",
)
warped_atlas = warped_atlas.astype(v2_atlas.dtype)
warped_nissl = transform(nissl_volume, df)
# Write output
logger.info("Saving results")
# metadata
with open(output_dir / "description.txt", "w") as fp:
fp.write(description)
with open(output_dir / "fixed_path.txt", "w") as fp:
fp.write(str(v2_atlas_path) + "\n")
with open(output_dir / "moving_path.txt", "w") as fp:
fp.write(str(v3_atlas_path) + "\n")
with open(output_dir / "nissl_path.txt", "w") as fp:
fp.write(str(nissl_path) + "\n")
# volumes
np.save(output_dir / "warped_atlas", warped_atlas)
np.save(output_dir / "warped_nissl", warped_nissl)
np.save(output_dir / "df", df)
logger.info(f"Finished. The results were saved to {output_dir}")
def preprocess_atlases(*atlases, seed=None):
"""Preprocess atlases.
Parameters
----------
atlases : Iterable of np.ndarray
All atlases to preprocess.
Returns
-------
new_atlases : Iterable of np.ndarray
Preprocessed atlases
"""
atlases_pre, _ = remap_labels(atlases, seed=seed)
return [atlas.astype(np.float32) for atlas in atlases_pre]
if __name__ == "__main__":
logging.basicConfig(level=logging.INFO)
sys.exit(main())
| 32.97931 | 78 | 0.719783 |
4a22264870ccdf95932f4a5b2fa01f6e3dbecb2c | 20,037 | py | Python | kopf/structs/references.py | asteven/kopf | 433709dc8846f4b399e98f04c843362230357225 | [
"MIT"
] | null | null | null | kopf/structs/references.py | asteven/kopf | 433709dc8846f4b399e98f04c843362230357225 | [
"MIT"
] | null | null | null | kopf/structs/references.py | asteven/kopf | 433709dc8846f4b399e98f04c843362230357225 | [
"MIT"
] | null | null | null | import asyncio
import dataclasses
import enum
import fnmatch
import re
import urllib.parse
from typing import Collection, Iterable, Iterator, List, Mapping, \
MutableMapping, NewType, Optional, Pattern, Set, Union
# A namespace specification with globs, negations, and some minimal syntax; see `match_namespace()`.
# Regexps are also supported if pre-compiled from the code, not from the CLI options as raw strings.
NamespacePattern = Union[str, Pattern]
# A specific really existing addressable namespace (at least, the one assumed to be so).
# Made as a NewType for stricter type-checking to avoid collisions with patterns and other strings.
NamespaceName = NewType('NamespaceName', str)
# A namespace reference usable in the API calls. `None` means cluster-wide API calls.
Namespace = Optional[NamespaceName]
def select_specific_namespaces(patterns: Iterable[NamespacePattern]) -> Collection[NamespaceName]:
"""
Select the namespace specifications that can be used as direct namespaces.
It is used in a fallback scenario when the namespace observation is either
disabled or not possible due to restricted permission, while the normal
operation is still possible in the very specific configured namespaces.
"""
return {
NamespaceName(pattern)
for pattern in patterns
if isinstance(pattern, str) # excl. regexps & etc.
if not('!' in pattern or '*' in pattern or '?' in pattern or ',' in pattern)
}
def match_namespace(name: NamespaceName, pattern: NamespacePattern) -> bool:
"""
Check if the specific namespace matches a namespace specification.
Each individual namespace pattern is a string that follows some syntax:
* the pattern consists of comma-separated parts (spaces are ignored);
* each part is either an inclusive or an exclusive (negating) glob;
* each glob can have ``*`` and ``?`` placeholders for any or one symbols;
* the exclusive globs start with ``!``;
* if the the first glob is exclusive, then a preceding catch-all is implied.
A check of whether a namespace matches the individual pattern, is done by
iterating the pattern's globs left-to-right: the exclusive patterns exclude
it from the match; the first inclusive pattern does the initial match, while
the following inclusive patterns only re-match it if it was excluded before;
i.e., they do not do the full initial match.
For example, the pattern ``"myapp-*, !*-pr-*, *pr-123"``
will match ``myapp-test``, ``myapp-live``, even ``myapp-pr-123``,
but not ``myapp-pr-456`` and certainly not ``otherapp-pr-123``.
The latter one, despite it matches the last glob, is not included
because it was not matched by the initial pattern.
On the other hand, the pattern ``"!*-pr-*, *pr-123"``
(equivalent to ``"*, !*-pr-*, *pr-123"``) will match ``myapp-test``,
``myapp-live``, ``myapp-pr-123``, ``anyapp-anything``,
and even ``otherapp-pr-123`` -- though not ``myapp-pr-456``.
Unlike in the first example, the otherapp's namespace was included initially
by the first glob (the implied ``*``), and therefore could be re-matched
by the last glob ``*pr-123`` after being excluded by ``!*-pr-*``.
While these are theoretical capabilities of this pattern-matching algorithm,
it is not expected that they will be abused too much. The main intention is
to have simple one-glob patterns (either inclusive or exclusive),
only rarely followed by a single negation.
"""
# Regexps are powerful enough on their own -- we do not parse or interpret them.
if isinstance(pattern, re.Pattern):
return bool(pattern.fullmatch(name))
# The first pattern should be an inclusive one. Unless it is, prepend a catch-all pattern.
globs = [glob.strip() for glob in pattern.split(',')]
if not globs or globs[0].startswith('!'):
globs.insert(0, '*')
# Iterate and calculate: every inclusive pattern makes the namespace to match regardless,
# of the previous result; every exclusive pattern un-matches it if it was matched before.
matches = first_match = fnmatch.fnmatch(name, globs[0])
for glob in globs[1:]:
if glob.startswith('!'):
matches = matches and not fnmatch.fnmatch(name, glob.lstrip('!'))
else:
matches = matches or (first_match and fnmatch.fnmatch(name, glob))
return matches
# Detect conventional API versions for some cases: e.g. in "myresources.v1alpha1.example.com".
# Non-conventional versions are indistinguishable from API groups ("myresources.foo1.example.com").
# See also: https://kubernetes.io/docs/tasks/extend-kubernetes/custom-resources/custom-resource-definition-versioning/
K8S_VERSION_PATTERN = re.compile(r'^v\d+(?:(?:alpha|beta)\d+)?$')
@dataclasses.dataclass(frozen=True, eq=False, repr=False)
class Resource:
"""
A reference to a very specific custom or built-in resource kind.
It is used to form the K8s API URLs. Generally, K8s API only needs
an API group, an API version, and a plural name of the resource.
All other names are remembered to match against resource selectors,
for logging, and for informational purposes.
"""
group: str
version: str
plural: str
kind: Optional[str] = None
singular: Optional[str] = None
shortcuts: Collection[str] = ()
categories: Collection[str] = ()
subresources: Collection[str] = ()
namespaced: Optional[bool] = None
preferred: bool = True # against conventions, but makes versionless selectors match by default.
verbs: Collection[str] = ()
def __hash__(self) -> int:
return hash((self.group, self.version, self.plural))
def __eq__(self, other: object) -> bool:
if isinstance(other, Resource):
self_tuple = (self.group, self.version, self.plural)
other_tuple = (other.group, other.version, other.plural)
return self_tuple == other_tuple
else:
return NotImplemented
def __repr__(self) -> str:
plural_main, *subs = self.plural.split('/')
name_text = f'{plural_main}.{self.version}.{self.group}'.strip('.')
subs_text = f'/{"/".join(subs)}' if subs else ''
return f'{name_text}{subs_text}'
# Mostly for tests, to be used as `@kopf.on.event(*resource, ...)`
def __iter__(self) -> Iterator[str]:
return iter((self.group, self.version, self.plural))
@property
def name(self) -> str:
return f'{self.plural}.{self.group}'.strip('.')
@property
def api_version(self) -> str:
# Strip heading/trailing slashes if group is absent (e.g. for pods).
return f'{self.group}/{self.version}'.strip('/')
def get_url(
self,
*,
server: Optional[str] = None,
namespace: Namespace = None,
name: Optional[str] = None,
subresource: Optional[str] = None,
params: Optional[Mapping[str, str]] = None,
) -> str:
if subresource is not None and name is None:
raise ValueError("Subresources can be used only with specific resources by their name.")
if not self.namespaced and namespace is not None:
raise ValueError(f"Specific namespaces are not supported for cluster-scoped resources.")
if self.namespaced and namespace is None and name is not None:
raise ValueError("Specific namespaces are required for specific namespaced resources.")
return self._build_url(server, params, [
'/api' if self.group == '' and self.version == 'v1' else '/apis',
self.group,
self.version,
'namespaces' if self.namespaced and namespace is not None else None,
namespace if self.namespaced and namespace is not None else None,
self.plural,
name,
subresource,
])
def get_version_url(
self,
*,
server: Optional[str] = None,
params: Optional[Mapping[str, str]] = None,
) -> str:
return self._build_url(server, params, [
'/api' if self.group == '' and self.version == 'v1' else '/apis',
self.group,
self.version,
])
def _build_url(
self,
server: Optional[str],
params: Optional[Mapping[str, str]],
parts: List[Optional[str]],
) -> str:
query = urllib.parse.urlencode(params, encoding='utf-8') if params else ''
path = '/'.join([part for part in parts if part])
url = path + ('?' if query else '') + query
return url if server is None else server.rstrip('/') + '/' + url.lstrip('/')
class Marker(enum.Enum):
"""
A special marker to handle all resources possible, built-in and custom.
"""
EVERYTHING = enum.auto()
# An explicit catch-all marker for positional arguments of resource selectors.
EVERYTHING = Marker.EVERYTHING
@dataclasses.dataclass(frozen=True)
class Selector:
"""
A resource specification that can match several resource kinds.
The resource specifications are not usable in K8s API calls, as the API
has no endpoints with masks or placeholders for unknown or catch-all
resource identifying parts (e.g. any API group, any API version, any name).
They are used only locally in the operator to match against the actual
resources with specific names (:class:`Resource`). The handlers are
defined with resource specifications, but are invoked with specific
resource kinds. Even if those specifications look very concrete and allow
no variations, they still remain specifications.
"""
arg1: dataclasses.InitVar[Union[None, str, Marker]] = None
arg2: dataclasses.InitVar[Union[None, str, Marker]] = None
arg3: dataclasses.InitVar[Union[None, str, Marker]] = None
argN: dataclasses.InitVar[None] = None # a runtime guard against too many positional arguments
group: Optional[str] = None
version: Optional[str] = None
kind: Optional[str] = None
plural: Optional[str] = None
singular: Optional[str] = None
shortcut: Optional[str] = None
category: Optional[str] = None
any_name: Optional[Union[str, Marker]] = None
def __post_init__(
self,
arg1: Union[None, str, Marker],
arg2: Union[None, str, Marker],
arg3: Union[None, str, Marker],
argN: None, # a runtime guard against too many positional arguments
) -> None:
# Since the class is frozen & read-only, post-creation field adjustment is done via a hack.
# This is the same hack as used in the frozen dataclasses to initialise their fields.
if argN is not None:
raise TypeError("Too many positional arguments. Max 3 positional args are accepted.")
elif arg3 is not None:
object.__setattr__(self, 'group', arg1)
object.__setattr__(self, 'version', arg2)
object.__setattr__(self, 'any_name', arg3)
elif arg2 is not None and isinstance(arg1, str) and '/' in arg1:
object.__setattr__(self, 'group', arg1.rsplit('/', 1)[0])
object.__setattr__(self, 'version', arg1.rsplit('/')[-1])
object.__setattr__(self, 'any_name', arg2)
elif arg2 is not None and arg1 == 'v1':
object.__setattr__(self, 'group', '')
object.__setattr__(self, 'version', arg1)
object.__setattr__(self, 'any_name', arg2)
elif arg2 is not None:
object.__setattr__(self, 'group', arg1)
object.__setattr__(self, 'any_name', arg2)
elif arg1 is not None and isinstance(arg1, Marker):
object.__setattr__(self, 'any_name', arg1)
elif arg1 is not None and '.' in arg1 and K8S_VERSION_PATTERN.match(arg1.split('.')[1]):
if len(arg1.split('.')) >= 3:
object.__setattr__(self, 'group', arg1.split('.', 2)[2])
object.__setattr__(self, 'version', arg1.split('.')[1])
object.__setattr__(self, 'any_name', arg1.split('.')[0])
elif arg1 is not None and '.' in arg1:
object.__setattr__(self, 'group', arg1.split('.', 1)[1])
object.__setattr__(self, 'any_name', arg1.split('.')[0])
elif arg1 is not None:
object.__setattr__(self, 'any_name', arg1)
# Verify that explicit & interpreted arguments have produced an unambiguous specification.
names = [self.kind, self.plural, self.singular, self.shortcut, self.category, self.any_name]
clean = [name for name in names if name is not None]
if len(clean) > 1:
raise TypeError(f"Ambiguous resource specification with names {clean}")
if len(clean) < 1:
raise TypeError(f"Unspecific resource with no names.")
# For reasons unknown, the singular is empty for ALL builtin resources. This does not affect
# the checks unless defined as e.g. ``singular=""``, which would match ALL builtins at once.
# Thus we prohibit it until clarified why is it so, what does it mean, how to deal with it.
if any([name == '' for name in names]):
raise TypeError("Names must not be empty strings; either None or specific strings.")
def __repr__(self) -> str:
kwargs = {f.name: getattr(self, f.name) for f in dataclasses.fields(self)}
kwtext = ', '.join([f'{key!s}={val!r}' for key, val in kwargs.items() if val is not None])
clsname = self.__class__.__name__
return f'{clsname}({kwtext})'
@property
def is_specific(self) -> bool:
return (self.kind is not None or
self.shortcut is not None or
self.plural is not None or
self.singular is not None or
(self.any_name is not None and not isinstance(self.any_name, Marker)))
def check(self, resource: Resource) -> bool:
"""
Check if a specific resources matches this resource specification.
"""
# Core v1 events are excluded from EVERYTHING: they are implicitly produced during handling,
# and thus trigger unnecessary handling cycles (even for other resources, not for events).
return (
(self.group is None or self.group == resource.group) and
((self.version is None and resource.preferred) or self.version == resource.version) and
(self.kind is None or self.kind == resource.kind) and
(self.plural is None or self.plural == resource.plural) and
(self.singular is None or self.singular == resource.singular) and
(self.category is None or self.category in resource.categories) and
(self.shortcut is None or self.shortcut in resource.shortcuts) and
(self.any_name is None or
self.any_name == resource.kind or
self.any_name == resource.plural or
self.any_name == resource.singular or
self.any_name in resource.shortcuts or
(self.any_name is Marker.EVERYTHING and
not EVENTS.check(resource) and
not EVENTS_K8S.check(resource))))
def select(self, resources: Collection[Resource]) -> Collection[Resource]:
result = {resource for resource in resources if self.check(resource)}
# Core v1 API group's priority is hard-coded in K8s and kubectl. Do the same. For example:
# whenever "pods" is specified, and "pods.v1" & "pods.v1beta1.metrics.k8s.io" are found,
# implicitly give priority to "v1" and hide the existence of non-"v1" groups.
# But not if they are specified by categories! -- In that case, keep all resources as is.
if self.is_specific:
v1only = {resource for resource in result if resource.group == ''}
result = v1only or result
return result
# Some predefined API endpoints that we use in the framework itself (not exposed to the operators).
# Note: the CRDs are versionless: we do not look into its ``spec`` stanza, we only watch for
# the fact of changes, so the schema does not matter, any cluster-preferred API version would work.
# Note: the peering resources are either zalando.org/v1 or kopf.dev/v1; both cannot co-exist because
# they would share the names, so K8s will not let this. It is done for domain name transitioning.
CRDS = Selector('apiextensions.k8s.io', 'customresourcedefinitions')
EVENTS = Selector('v1', 'events')
EVENTS_K8S = Selector('events.k8s.io', 'events') # only for exclusion from EVERYTHING
NAMESPACES = Selector('v1', 'namespaces')
CLUSTER_PEERINGS = Selector('clusterkopfpeerings')
NAMESPACED_PEERINGS = Selector('kopfpeerings')
class Backbone(Mapping[Selector, Resource]):
"""
Actual resources used in the core (reactor & engines) of the framework.
Why? The codebase only refers to the resources by API group/version & names.
The actual resources can be different in different clusters, usually due
to different versions: e.g. "v1" vs. "v1beta1" for CRDs.
The actual backbone resources are detected in the initial cluster scanning
during the operator startup in :func:`resource_scanner`.
The backbone resources cannot be changed at runtime after they are found
for the first time -- since the core tasks are already started with those
resource definitions, and cannot be easily restarted.
This does not apply to the resources of the operator (not the framework!),
where the resources can be created, changed, and deleted at runtime easily.
"""
def __init__(self) -> None:
super().__init__()
self._items: MutableMapping[Selector, Resource] = {}
self._revised = asyncio.Condition()
self.selectors = [NAMESPACES, EVENTS, CRDS, CLUSTER_PEERINGS, NAMESPACED_PEERINGS]
def __len__(self) -> int:
return len(self._items)
def __iter__(self) -> Iterator[Selector]:
return iter(self._items)
def __getitem__(self, item: Selector) -> Resource:
return self._items[item]
async def fill(
self,
*,
resources: Iterable[Resource],
) -> None:
async with self._revised:
for resource in resources:
for spec in self.selectors:
if spec not in self._items:
if spec.check(resource):
self._items[spec] = resource
self._revised.notify_all()
async def wait_for(
self,
selector: Selector,
) -> Resource:
"""
Wait for the actual resource to be found in the cluster scanning.
The resources can be cached in-memory. Once the resource is retrieved,
it never changes in memory even if it changes in the cluster. This is
intentional -- to match with the nature of the cluster scanning,
which waits for the resources and then starts background jobs,
which are not easy to terminate without terminating the whole operator.
"""
async with self._revised:
await self._revised.wait_for(lambda: selector in self)
return self[selector]
@dataclasses.dataclass(frozen=True)
class Insights:
"""
Actual resources & namespaces served by the operator.
"""
namespaces: Set[Namespace] = dataclasses.field(default_factory=set)
resources: Set[Resource] = dataclasses.field(default_factory=set)
backbone: Backbone = dataclasses.field(default_factory=Backbone)
# Signalled when anything changes in the insights.
revised: asyncio.Condition = dataclasses.field(default_factory=asyncio.Condition)
# The flags that are set after the initial listing is finished. Not cleared afterwards.
ready_namespaces: asyncio.Event = dataclasses.field(default_factory=asyncio.Event)
ready_resources: asyncio.Event = dataclasses.field(default_factory=asyncio.Event)
| 44.725446 | 118 | 0.658182 |
4a222722b18e220e523b12f68d6b5143f3d55a84 | 1,214 | py | Python | src/recognition/recognize.py | amaanabbasi/LicensePlateDetectionRecognition | 5b44d0cab8d084dbfa7af6a4609c062bbc8c1935 | [
"CNRI-Python"
] | 2 | 2020-01-22T13:24:11.000Z | 2020-04-24T08:03:14.000Z | src/recognition/recognize.py | amaanabbasi/LicensePlateDetectionRecognition | 5b44d0cab8d084dbfa7af6a4609c062bbc8c1935 | [
"CNRI-Python"
] | 5 | 2020-01-28T23:15:25.000Z | 2022-02-10T01:24:45.000Z | src/recognition/recognize.py | amaanabbasi/LicensePlateDetectionRecognition | 5b44d0cab8d084dbfa7af6a4609c062bbc8c1935 | [
"CNRI-Python"
] | 1 | 2020-02-03T16:17:24.000Z | 2020-02-03T16:17:24.000Z | from keras.models import load_model
import cv2
import numpy as np
import matplotlib.pyplot as plt
from keras.models import model_from_json
def preprocess_img(img, flag=0):
"""
Takes in a character image, convert to gray, 28x28, add dimensions acc to
model input.
"""
# print(img.shape)
# img = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
# img= np.array(img)
img = cv2.resize(img, (28, 28))
if flag:
img[img < 15] = 0
img[img > 15] = 1
# img = np.expand_dims(img, axis=0)
# img = np.expand_dims(img, axis=4)
return img
def recognize_chracters(segmented_characters, t_name, t_value):
t = {}
detected_plate = []
j = 0
for segmented in segmented_characters:
segmented = preprocess_img(segmented, 1)
cv2.imwrite("segmented/" + str(j) + ".jpg", segmented)
j += 1
# cv2.waitKey(0)
# cv2.destroyAllWindows()
for i in range(len(t_name)):
template = t_value[i]
template = preprocess_img(template, 0)
diff = (template - segmented).mean()
t[t_name[i]] = diff
detected_plate.append(min(t, key=t.get))
return detected_plate
| 24.77551 | 77 | 0.603789 |
4a22287a78aa56848bfe57266b9f7d39a6105f2a | 1,663 | py | Python | tests/grammpy_test/oldapi_tests/term-nonterm-grammar-handling_tests/TerminalGetTest.py | PatrikValkovic/grammpy | 8308a1fd349bf9ea0d267360cc9a4ab20d1629e8 | [
"MIT"
] | 1 | 2021-02-04T12:41:08.000Z | 2021-02-04T12:41:08.000Z | tests/grammpy_test/oldapi_tests/term-nonterm-grammar-handling_tests/TerminalGetTest.py | PatrikValkovic/grammpy | 8308a1fd349bf9ea0d267360cc9a4ab20d1629e8 | [
"MIT"
] | 3 | 2017-07-08T16:28:52.000Z | 2020-04-23T18:06:24.000Z | tests/grammpy_test/oldapi_tests/term-nonterm-grammar-handling_tests/TerminalGetTest.py | PatrikValkovic/grammpy | 8308a1fd349bf9ea0d267360cc9a4ab20d1629e8 | [
"MIT"
] | 1 | 2021-02-04T12:41:10.000Z | 2021-02-04T12:41:10.000Z | #!/usr/bin/env python
"""
:Author Patrik Valkovic
:Created 03.08.2017 12:28
:Licence MIT
Part of grammpy
"""
from unittest import TestCase, main
from grammpy.old_api import Grammar
class TempClass:
pass
class TerminalGetTest(TestCase):
def test_getTermEmpty(self):
gr = Grammar()
self.assertIsNone(gr.get_term(TempClass))
self.assertIsNone(gr.get_term(1))
self.assertIsNone(gr.get_term('asdf'))
def test_getTermClass(self):
gr = Grammar()
gr.add_term(TempClass)
self.assertEqual(gr.get_term(TempClass).s, TempClass)
def test_getTermArray(self):
gr = Grammar()
gr.add_term([TempClass, 0, 'asdf'])
g = gr.get_term([0, 'asdf'])
for i in g:
self.assertTrue(i.s in [TempClass, 0, 'asdf'])
self.assertEqual(g[0].s, 0)
self.assertEqual(g[1].s, 'asdf')
def test_dontGetTermArray(self):
gr = Grammar()
gr.add_term([TempClass, 0, 'asdf'])
g = gr.get_term([TempClass, 'a'])
self.assertEqual(g[0].s, TempClass)
self.assertIsNone(g[1])
def test_getTermTuple(self):
gr = Grammar()
gr.add_term([TempClass, 0, 'asdf'])
g = gr.get_term((0, 'asdf'))
for i in g:
self.assertTrue(i.s in [TempClass, 0, 'asdf'])
self.assertEqual(g[0].s, 0)
self.assertEqual(g[1].s, 'asdf')
def test_dontGetTermTuple(self):
gr = Grammar()
gr.add_term([TempClass, 0, 'asdf'])
g = gr.get_term((TempClass, 'a'))
self.assertEqual(g[0].s, TempClass)
self.assertIsNone(g[1])
if __name__ == '__main__':
main() | 26.396825 | 61 | 0.593506 |
4a2229205d2e6e931c93ab61d2fc81607972de96 | 3,919 | py | Python | shop/models/ordermodel.py | bennylope/django-shop | 7e7cd743773405f193abefdb8aa30f28b17d71cd | [
"BSD-3-Clause"
] | 1 | 2015-03-23T20:40:39.000Z | 2015-03-23T20:40:39.000Z | shop/models/ordermodel.py | bennylope/django-shop | 7e7cd743773405f193abefdb8aa30f28b17d71cd | [
"BSD-3-Clause"
] | null | null | null | shop/models/ordermodel.py | bennylope/django-shop | 7e7cd743773405f193abefdb8aa30f28b17d71cd | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
from distutils.version import LooseVersion
from django.conf import settings
from django.db import models
from django.db.models.signals import pre_delete
from django.utils.translation import ugettext_lazy as _
from shop.models.productmodel import Product
from shop.util.fields import CurrencyField
from shop.util.loader import load_class
import django
#==============================================================================
# Extensibility
#==============================================================================
# This overrides the various models with classes loaded from the corresponding
# setting if it exists.
# Order model
ORDER_MODEL = getattr(settings, 'SHOP_ORDER_MODEL',
'shop.models.defaults.order.Order')
Order = load_class(ORDER_MODEL, 'SHOP_ORDER_MODEL')
# Order item model
ORDERITEM_MODEL = getattr(settings, 'SHOP_ORDERITEM_MODEL',
'shop.models.defaults.orderitem.OrderItem')
OrderItem = load_class(ORDERITEM_MODEL, 'SHOP_ORDERITEM_MODEL')
# Now we clear refrence to product from every OrderItem
def clear_products(sender, instance, using, **kwargs):
for oi in OrderItem.objects.filter(product=instance):
oi.product = None
oi.save()
if LooseVersion(django.get_version()) < LooseVersion('1.3'):
pre_delete.connect(clear_products, sender=Product)
class OrderExtraInfo(models.Model):
"""
A holder for extra textual information to attach to this order.
"""
order = models.ForeignKey(Order, related_name="extra_info",
verbose_name=_('Order'))
text = models.TextField(verbose_name=_('Extra info'))
class Meta(object):
app_label = 'shop'
verbose_name = _('Order extra info')
verbose_name_plural = _('Order extra info')
class ExtraOrderPriceField(models.Model):
"""
This will make Cart-provided extra price fields persistent since we want
to "snapshot" their statuses at the time when the order was made
"""
order = models.ForeignKey(Order, verbose_name=_('Order'))
label = models.CharField(max_length=255, verbose_name=_('Label'))
value = CurrencyField(verbose_name=_('Amount'))
# Does this represent shipping costs?
is_shipping = models.BooleanField(default=False, editable=False,
verbose_name=_('Is shipping'))
class Meta(object):
app_label = 'shop'
verbose_name = _('Extra order price field')
verbose_name_plural = _('Extra order price fields')
class ExtraOrderItemPriceField(models.Model):
"""
This will make Cart-provided extra price fields persistent since we want
to "snapshot" their statuses at the time when the order was made
"""
order_item = models.ForeignKey(OrderItem, verbose_name=_('Order item'))
label = models.CharField(max_length=255, verbose_name=_('Label'))
value = CurrencyField(verbose_name=_('Amount'))
class Meta(object):
app_label = 'shop'
verbose_name = _('Extra order item price field')
verbose_name_plural = _('Extra order item price fields')
class OrderPayment(models.Model):
"""
A class to hold basic payment information. Backends should define their own
more complex payment types should they need to store more informtion
"""
order = models.ForeignKey(Order, verbose_name=_('Order'))
# How much was paid with this particular transfer
amount = CurrencyField(verbose_name=_('Amount'))
transaction_id = models.CharField(max_length=255,
verbose_name=_('Transaction ID'),
help_text=_("The transaction processor's reference"))
payment_method = models.CharField(max_length=255,
verbose_name=_('Payment method'),
help_text=_("The payment backend use to process the purchase"))
class Meta(object):
app_label = 'shop'
verbose_name = _('Order payment')
verbose_name_plural = _('Order payments')
| 36.971698 | 79 | 0.684358 |
4a222a507050bf6a7c629e71f28f38508e0146a2 | 3,758 | py | Python | core/dbt/task/seed.py | f1fe/dbt | e943b9fc842535e958ef4fd0b8703adc91556bc6 | [
"Apache-2.0"
] | 3,156 | 2017-03-05T09:59:23.000Z | 2021-06-30T01:27:52.000Z | core/dbt/task/seed.py | f1fe/dbt | e943b9fc842535e958ef4fd0b8703adc91556bc6 | [
"Apache-2.0"
] | 2,608 | 2017-02-27T15:39:40.000Z | 2021-06-30T01:49:20.000Z | core/dbt/task/seed.py | f1fe/dbt | e943b9fc842535e958ef4fd0b8703adc91556bc6 | [
"Apache-2.0"
] | 693 | 2017-03-13T03:04:49.000Z | 2021-06-25T15:57:41.000Z | import random
from .run import ModelRunner, RunTask
from .printer import (
print_run_end_messages,
)
from dbt.contracts.results import RunStatus
from dbt.exceptions import InternalException
from dbt.graph import ResourceTypeSelector
from dbt.logger import TextOnly
from dbt.events.functions import fire_event
from dbt.events.types import (
SeedHeader, SeedHeaderSeperator, EmptyLine, PrintSeedErrorResultLine,
PrintSeedResultLine, PrintStartLine
)
from dbt.node_types import NodeType
from dbt.contracts.results import NodeStatus
class SeedRunner(ModelRunner):
def describe_node(self):
return "seed file {}".format(self.get_node_representation())
def before_execute(self):
fire_event(
PrintStartLine(
description=self.describe_node(),
index=self.node_index,
total=self.num_nodes,
report_node_data=self.node
)
)
def _build_run_model_result(self, model, context):
result = super()._build_run_model_result(model, context)
agate_result = context['load_result']('agate_table')
result.agate_table = agate_result.table
return result
def compile(self, manifest):
return self.node
def print_result_line(self, result):
model = result.node
if result.status == NodeStatus.Error:
fire_event(
PrintSeedErrorResultLine(
status=result.status,
index=self.node_index,
total=self.num_nodes,
execution_time=result.execution_time,
schema=self.node.schema,
relation=model.alias,
report_node_data=model
)
)
else:
fire_event(
PrintSeedResultLine(
status=result.message,
index=self.node_index,
total=self.num_nodes,
execution_time=result.execution_time,
schema=self.node.schema,
relation=model.alias,
report_node_data=model
)
)
class SeedTask(RunTask):
def defer_to_manifest(self, adapter, selected_uids):
# seeds don't defer
return
def raise_on_first_error(self):
return False
def get_node_selector(self):
if self.manifest is None or self.graph is None:
raise InternalException(
'manifest and graph must be set to get perform node selection'
)
return ResourceTypeSelector(
graph=self.graph,
manifest=self.manifest,
previous_state=self.previous_state,
resource_types=[NodeType.Seed],
)
def get_runner_type(self, _):
return SeedRunner
def task_end_messages(self, results):
if self.args.show:
self.show_tables(results)
print_run_end_messages(results)
def show_table(self, result):
table = result.agate_table
rand_table = table.order_by(lambda x: random.random())
schema = result.node.schema
alias = result.node.alias
header = "Random sample of table: {}.{}".format(schema, alias)
with TextOnly():
fire_event(EmptyLine())
fire_event(SeedHeader(header=header))
fire_event(SeedHeaderSeperator(len_header=len(header)))
rand_table.print_table(max_rows=10, max_columns=None)
with TextOnly():
fire_event(EmptyLine())
def show_tables(self, results):
for result in results:
if result.status != RunStatus.Error:
self.show_table(result)
| 30.803279 | 78 | 0.608302 |
4a222d4f488663708af0e650c2c5eb01cec3e1e7 | 1,373 | py | Python | shred.py | Skeen/lodextract | 1e9cdc3aa41335b6d9a0a67949bb12205aceb167 | [
"Linux-OpenIB"
] | 10 | 2017-08-25T12:03:20.000Z | 2021-08-29T22:55:15.000Z | shred.py | Skeen/lodextract | 1e9cdc3aa41335b6d9a0a67949bb12205aceb167 | [
"Linux-OpenIB"
] | null | null | null | shred.py | Skeen/lodextract | 1e9cdc3aa41335b6d9a0a67949bb12205aceb167 | [
"Linux-OpenIB"
] | 10 | 2015-08-15T04:04:32.000Z | 2021-12-28T08:18:19.000Z | #!/usr/bin/env python
import numpy as np
from PIL import Image
import crcmod
import os
crc24_func = crcmod.mkCrcFun(0x1864CFBL) # polynomial from libgcrypt
def handle_img(inf, color):
with open(inf) as f:
im = Image.open(f)
pal = im.getpalette()
pixels = np.array(im)
if pal:
pal[765], pal[766], pal[767] = color
pixels[pixels > 7] = 255
im = Image.fromarray(pixels)
im.putpalette(pal)
else:
# non-palette pictures have no transparency
im = Image.new('RGB', im.size, color)
# in case we ever want to replace colors in rgb images:
#rc, gc, bc = pixels[:,:,0], pixels[:,:,1], pixels[:,:,2]
#mask = (rc == 0) & (gc == 255) & (bc == 255)
#pixels[:,:,:3][mask] = color
im.save(inf)
def main(inf):
print "processing %s"%inf
crc = crc24_func(inf)
r = crc>>16
g = (crc&0xff00)>>8
b = crc&0xff
color = r%255,g%255,b%255 # avoid hitting special values
if os.path.isdir(inf):
for fname in os.listdir(inf):
fname = os.path.join(inf,fname)
handle_img(fname, color)
else:
handle_img(inf, color)
return True
if __name__ == '__main__':
import sys
if len(sys.argv) != 2:
print "usage: %s indir/infile"
exit(0)
ret = main(sys.argv[1])
exit(0 if ret else 1)
| 26.921569 | 68 | 0.568099 |
4a222d589eaf029a5be7b78146c818eeae6b5393 | 16,799 | py | Python | py/acmacs_py/zero_do_3.py | acorg/acmacs-py | e0bf6ff7ecfe7332980d15b50f9b6dd6f6f78de1 | [
"MIT"
] | null | null | null | py/acmacs_py/zero_do_3.py | acorg/acmacs-py | e0bf6ff7ecfe7332980d15b50f9b6dd6f6f78de1 | [
"MIT"
] | null | null | null | py/acmacs_py/zero_do_3.py | acorg/acmacs-py | e0bf6ff7ecfe7332980d15b50f9b6dd6f6f78de1 | [
"MIT"
] | null | null | null | # 0do.py v3 support, e.g. ssm report custom
import sys, os, json, subprocess, pprint, traceback
from pathlib import Path
from typing import List, Union, Callable
import acmacs
# ======================================================================
def main():
def main_commands():
return [name for name, value in vars(sys.modules["__main__"]).items() if name[0] != "_" and name != "Path" and callable(value)]
def parse_command_line():
import argparse
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument("--command-list", action='store_true', default=False)
parser.add_argument("--help-api", action='store_true', default=False)
parser.add_argument("command", nargs='?')
args = parser.parse_args()
if args.command_list:
print("\n".join(main_commands()))
exit(0)
if args.help_api:
help(Zd)
help(Painter)
help(Snapshot)
exit(0)
if args.command:
return args.command
else:
return main_commands()[0]
command = parse_command_line()
try:
cmd = getattr(sys.modules["__main__"], command)
zd = Zd(cmd)
return cmd(zd)
except Error as err:
print(f"> {err}", file=sys.stderr)
return 1
except Exception as err:
print(f"> {type(err)}: {err}\n{traceback.format_exc()}", file=sys.stderr)
return 2
# ======================================================================
class Painter (acmacs.ChartDraw):
subtype_lineage_to_mapi_name = {"H1": "h1pdm.mapi", "H3": "h3.mapi", "BVICTORIA": "bvic.mapi", "BYAMAGATA": "byam.mapi"}
subtype_lineage_to_mapi_key = {"H1": "loc:clade-155-156-A(H1N1)2009pdm", "H3": "loc:clades-A(H3N2)-all", "BVICTORIA": "loc:clades-B/Vic", "BYAMAGATA": "loc:clades-B/Yam"}
test_antigen_size = 10
reference_antigen_size = test_antigen_size * 1.5
serum_size = test_antigen_size * 1.5
grey = "#D0D0D0"
def __init__(self, chart: acmacs.Chart, mapi_filename: Path = None, mapi_key: str = None, legend_offset: List[float] = [-10, -10]):
super().__init__(chart)
self.mapi_filename = mapi_filename
self.mapi_key = mapi_key
self.draw_reset()
self.draw_mark_with_mapi()
self.legend(offset=legend_offset)
def make(self, pdf: Path, ace: Path = None, title: bool = True, open: bool = False):
if title:
self.title(lines=["{lab} {virus-type/lineage-subset} {assay-no-hi-cap} " + f"{self.chart().projection(0).stress(recalculate=True):.4f}"], remove_all_lines=True)
self.calculate_viewport()
self.draw(pdf, open=open)
print(f">>> {pdf}")
if ace:
self.chart().export(ace)
print(f">>> {ace}")
def relax(self):
self.projection().relax()
def draw_reset(self):
pchart = self.chart()
self.modify(pchart.select_antigens(lambda ag: ag.antigen.reference()), fill="transparent", outline=self.grey, outline_width=1, size=self.reference_antigen_size)
self.modify(pchart.select_antigens(lambda ag: not ag.antigen.reference()), fill=self.grey, outline=self.grey, outline_width=1, size=self.test_antigen_size)
self.modify(pchart.select_antigens(lambda ag: ag.passage.is_egg()), shape="egg")
self.modify(pchart.select_antigens(lambda ag: bool(ag.reassortant)), rotation=0.5)
self.modify(pchart.select_all_sera(), fill="transparent", outline=self.grey, outline_width=1, size=self.serum_size)
self.modify(pchart.select_sera(lambda sr: sr.passage.is_egg()), shape="uglyegg")
def draw_mark_with_mapi(self, mark_sera: bool = True, report: bool = False):
pchart = self.chart()
marked = {"ag": [], "sr": []}
for en in self.load_mapi():
selector = en["select"]
def clade_match(clade, clades):
if clade[0] != "!":
return clade in clades
else:
return clade[1:] not in clades
def sel_ag_sr(ag_sr):
good = True
if good and selector.get("sequenced"):
good = ag_sr.sequenced()
if good and (clade := selector.get("clade")):
good = clade_match(clade, ag_sr.clades())
if good and (clade_all := selector.get("clade-all")):
good = all(clade_match(clade, ag_sr.clades()) for clade in clade_all)
if good and (aas := selector.get("amino-acid") or selector.get("amino_acid")):
good = ag_sr.sequence_aa().matches_all(aas)
return good
def sel_ag(ag):
return sel_ag_sr(ag.antigen)
def sel_sr(sr):
return sel_ag_sr(sr.serum)
selected = pchart.select_antigens(sel_ag)
marked["ag"].append({"selected": selected, "selector": selector, "modify_args": en["modify_antigens"]})
self.modify(selected, **{k: v for k, v in en["modify_antigens"].items() if v})
if mark_sera:
selected = pchart.select_sera(sel_sr)
marked["sr"].append({"selected": selected, "selector": selector, "modify_args": en["modify_sera"]})
self.modify(selected, **{k: v for k, v in en["modify_sera"].items() if v})
def report_marked(marked, names_to_report):
if names_to_report:
for ag_sr in ["ag", "sr"]:
if marked[ag_sr]:
print(f'{ag_sr.upper()} ({len(marked[ag_sr])})')
for en in marked[ag_sr]:
print(f'{en["selected"].size():6d} {en["selector"]} {en["modify_args"]}')
# reported = en["selected"].report_list(format="{AG_SR} {no0} {full_name}") # [:max_names_to_report]
reported = en["selected"].report_list(format="{ag_sr} {no0:5d} {full_name}")[:names_to_report]
for rep in reported:
print(" ", rep)
if report:
report_marked(marked=marked, names_to_report=10)
def load_mapi(self):
subtype_lineage = self.chart().subtype_lineage()
mapi_filename = self.mapi_filename or Path(os.getcwd()).parents[1].joinpath(self.subtype_lineage_to_mapi_name.get(subtype_lineage, "unknown"))
print(f">>> loading mapi from {mapi_filename}")
if mapi_filename.exists():
if not self.mapi_key:
self.mapi_key = self.subtype_lineage_to_mapi_key.get(subtype_lineage)
print(f">>> mapi key {self.mapi_key}")
if self.mapi_key:
try:
data = json.load(mapi_filename.open())[self.mapi_key]
except json.decoder.JSONDecodeError as err:
raise ErrorJSON(mapi_filename, err)
def make_mapi_entry(en: dict) -> dict:
return {
"select": en["select"],
"modify_antigens": {
"fill": en.get("fill", "").replace("{clade-pale}", ""),
"outline": en.get("outline", "").replace("{clade-pale}", ""),
"outline_width": en.get("outline_width"),
"order": en.get("order"),
"legend": en.get("legend") and acmacs.PointLegend(format=en["legend"].get("label"), show_if_none_selected=en["legend"].get("show_if_none_selected")),
},
"modify_sera": {
"outline": en.get("fill", "").replace("{clade-pale}", ""),
"outline_width": 3,
},
}
mapi_data = [make_mapi_entry(en) for en in data if en.get("N") == "antigens"]
# pprint.pprint(mapi_data)
return mapi_data
return []
# ======================================================================
class Snapshot:
def __init__(self):
self.filename = Path("snapshot.json")
if self.filename.exists():
self.data = json.load(self.filename.open())
else:
self.data = {"sections": []}
self.current_section = None
def __del__(self):
self.save()
self.generate_html()
def save(self):
json.dump(self.data, self.filename.open("w"), indent=2)
def section(self, cmd = None):
if cmd:
for sec in self.data["sections"]:
if sec["name"] == cmd.__name__:
sec["images"] = []
self.current_section = sec
if not self.current_section:
self.current_section = {"name": cmd.__name__, "doc": cmd.__doc__, "images": []}
self.data["sections"].append(self.current_section)
return self.current_section["name"]
def number_of_images(self) -> int:
return len(self.current_section["images"])
def generate_filename(self, ace: Path, infix: bool, infix2: str = None) -> tuple[Path, Path]:
s_infix = self.section()
if infix:
s_infix += f".{self.number_of_images():02d}"
if infix2:
s_infix += f".{infix2}"
prefix = Path(ace.name)
return prefix.with_suffix(f".{s_infix}.pdf"), prefix.with_suffix(f".{s_infix}.ace")
def add_image(self, pdf: Path, ace: Path):
self.current_section["images"].append({"pdf": str(pdf), "ace": str(ace)})
def generate_html(self):
pass
# ======================================================================
class Zd:
def __init__(self, cmd):
self.mapi_key = None
self.mapi_data = None
self.snapshot_data = Snapshot()
self.chart_filename = None
self.painter = None
self.export_ace = True
self.section(cmd)
def open(self, filename: Path, chart: acmacs.Chart = None, mapi_filename: Path = None, mapi_key: str = None, legend_offset: List[float] = [-10, -10], export_ace: bool = False, open_pdf: bool = False) -> Painter:
self.chart_filename = filename
if not chart:
chart = acmacs.Chart(filename)
chart.populate_from_seqdb()
self.painter = Painter(chart=chart, mapi_filename=mapi_filename, mapi_key=mapi_key, legend_offset=legend_offset)
self.snapshot(overwrite=False, export_ace=export_ace, open=open_pdf)
return self.painter
def section(self, cmd):
self.snapshot_data.section(cmd)
def snapshot(self, overwrite: bool = True, infix: bool = True, export_ace: bool = True, open: bool = False):
pdf, ace_filename = self.snapshot_data.generate_filename(ace=self.chart_filename, infix=infix)
if overwrite or not pdf.exists():
self.painter.make(pdf=pdf, ace=ace_filename if export_ace and self.export_ace else None, open=open)
self.snapshot_data.add_image(pdf=pdf, ace=ace_filename)
return ace_filename
def snapshot_procrustes(self, secondary: Path, threshold: float = 0.3, overwrite: bool = True, infix: bool = True, open: bool = False):
pdf, ace = self.snapshot_data.generate_filename(ace=self.chart_filename, infix=infix, infix2=f"pc-{secondary.stem}")
if overwrite or not pdf.exists():
secondary_chart = acmacs.Chart(secondary)
self.painter.procrustes_arrows(common=acmacs.CommonAntigensSera(self.painter.chart(), secondary_chart), secondary_chart=secondary_chart, threshold=threshold)
self.painter.make(pdf=pdf, title=False, open=open)
self.painter.remove_procrustes_arrows()
self.painter.title(remove_all_lines=True)
self.snapshot_data.add_image(pdf=pdf, ace=ace)
def chart_merge(cls, sources: List[Path], output_infix: str = None, match: str = "strict", incremental: bool = False, combine_cheating_assays: bool = True):
first_chart = acmacs.Chart(sources[0])
last_chart = acmacs.Chart(sources[-1])
output_filename = Path(f"{last_chart.subtype_lineage()[:4].lower()}-{last_chart.assay_rbc().lower()}-{last_chart.lab().lower()}-{first_chart.date().split('-')[0]}-{last_chart.date().split('-')[-1]}{output_infix or ''}.ace")
if not output_filename.exists():
subprocess.check_call(["chart-merge",
"--match", match,
"--merge-type", "incremental" if incremental else "simple",
"--combine-cheating-assays" if combine_cheating_assays else "--no-combine-cheating-assays",
"-o", str(output_filename),
*(str(src) for src in sources)])
print(f">>> {output_filename}")
return output_filename
def glob_bash(self, pattern) -> List[Path]:
"return [Path] by matching using bash, e.g. ~/ac/whocc-tables/h3-hint-cdc/h3-hint-cdc-{2020{0[4-9],1},2021}*.ace"
return sorted(Path(fn) for fn in subprocess.check_output(f"ls -1 {pattern}", text=True, shell=True).strip().split("\n"))
def relax(self, source_filename: Path, mcb: str="none", num_optimizations: int = 1000, num_dimensions: int = 2, keep_projections: int = 10, grid: bool = True,
reorient: Union[str, Path, acmacs.Chart] = None, incremental: bool = False, populate_seqdb: bool = False,
disconnect_antigens: Callable[[acmacs.SelectionDataAntigen], bool] = None, disconnect_sera: Callable[[acmacs.SelectionDataSerum], bool] = None,
output_infix: str = None, slurm: bool = False):
"""disconnect_antigens, disconnect_antigens: callable, e.g. lambda ag"""
infix = output_infix or f"{mcb}-{num_optimizations//1000}k"
result_filename = source_filename.with_suffix(f".{infix}.ace")
if not result_filename.exists():
if slurm:
if incremental:
raise Error("relax incremental is not supported with slurm=True")
reorient_args = ["--reorient", str(reorient)] if reorient else []
grid_args = ["--grid"] if grid else []
no_draw_args = ["--no-draw"]
subprocess.check_call(["slurm-relax", *no_draw_args, "-o", str(result_filename), str(source_filename), "-n", str(num_optimizations), "-d", str(num_dimensions), "-m", mcb, "-k", str(keep_projections), *grid_args, *reorient_args])
else:
chart = acmacs.Chart(source_filename)
antigens_to_disconnect = sera_to_disconnect = None
if disconnect_antigens or disconnect_sera:
if incremental:
raise Error("relax incremental cannot handle disconnected points")
print(">>> disconnecting antigens/sera", file=sys.stderr)
antigens_to_disconnect = chart.select_antigens(disconnect_antigens, report=True) if disconnect_antigens else None
sera_to_disconnect = chart.select_sera(disconnect_sera, report=True) if disconnect_sera else None
if populate_seqdb:
chart.populate_from_seqdb()
print(f">>> relaxing chart {chart.description()} in {num_dimensions}d mcb:{mcb} {num_optimizations} times")
if incremental:
chart.relax_incremental(number_of_optimizations=num_optimizations, remove_source_projection=True)
else:
chart.relax(number_of_dimensions=num_dimensions, number_of_optimizations=num_optimizations, minimum_column_basis=mcb, disconnect_antigens=antigens_to_disconnect, disconnect_sera=sera_to_disconnect)
if grid:
chart.grid_test()
chart.keep_projections(keep_projections)
if reorient:
if isinstance(reorient, (str, Path)):
reorient = acmacs.Chart(reorient)
chart.orient_to(master=reorient)
chart.export(result_filename)
print(f">>> {result_filename}")
return result_filename
# ======================================================================
class Error (Exception):
pass
# ----------------------------------------------------------------------
class ErrorJSON (Error):
def __init__(self, filename: Union[str,Path], err: json.decoder.JSONDecodeError):
self.message = f"{filename}:{err.lineno}:{err.colno}: {err.msg}"
def __str__(self) -> str:
return self.message
# ----------------------------------------------------------------------
| 49.119883 | 244 | 0.57301 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.