repo_name
stringlengths 7
94
| repo_path
stringlengths 4
237
| repo_head_hexsha
stringlengths 40
40
| content
stringlengths 10
680k
| apis
stringlengths 2
840k
|
---|---|---|---|---|
mark-nicholson/python-editline | examples/elCmd.py | c23f1071c4b832a92f66e2f49142e5c5f00e500d | """A generic class to build line-oriented command interpreters.
Interpreters constructed with this class obey the following conventions:
1. End of file on input is processed as the command 'EOF'.
2. A command is parsed out of each line by collecting the prefix composed
of characters in the identchars member.
3. A command `foo' is dispatched to a method 'do_foo()'; the do_ method
is passed a single argument consisting of the remainder of the line.
4. Typing an empty line repeats the last command. (Actually, it calls the
method `emptyline', which may be overridden in a subclass.)
5. There is a predefined `help' method. Given an argument `topic', it
calls the command `help_topic'. With no arguments, it lists all topics
with defined help_ functions, broken into up to three topics; documented
commands, miscellaneous help topics, and undocumented commands.
6. The command '?' is a synonym for `help'. The command '!' is a synonym
for `shell', if a do_shell method exists.
7. If completion is enabled, completing commands will be done automatically,
and completing of commands args is done by calling complete_foo() with
arguments text, line, begidx, endidx. text is string we are matching
against, all returned matches must begin with it. line is the current
input line (lstripped), begidx and endidx are the beginning and end
indexes of the text being matched, which could be used to provide
different completion depending upon which position the argument is in.
The `default' method may be overridden to intercept commands for which there
is no do_ method.
The `completedefault' method may be overridden to intercept completions for
commands that have no complete_ method.
The data member `self.ruler' sets the character used to draw separator lines
in the help messages. If empty, no ruler line is drawn. It defaults to "=".
If the value of `self.intro' is nonempty when the cmdloop method is called,
it is printed out on interpreter startup. This value may be overridden
via an optional argument to the cmdloop() method.
The data members `self.doc_header', `self.misc_header', and
`self.undoc_header' set the headers used for the help function's
listings of documented functions, miscellaneous topics, and undocumented
functions respectively.
"""
import string, sys
__all__ = ["Cmd"]
PROMPT = '(Cmd) '
IDENTCHARS = string.ascii_letters + string.digits + '_'
class ElCmd:
"""A simple framework for writing line-oriented command interpreters.
These are often useful for test harnesses, administrative tools, and
prototypes that will later be wrapped in a more sophisticated interface.
A Cmd instance or subclass instance is a line-oriented interpreter
framework. There is no good reason to instantiate Cmd itself; rather,
it's useful as a superclass of an interpreter class you define yourself
in order to inherit Cmd's methods and encapsulate action methods.
"""
prompt = PROMPT
identchars = IDENTCHARS
ruler = '='
lastcmd = ''
intro = None
doc_leader = ""
doc_header = "Documented commands (type help <topic>):"
misc_header = "Miscellaneous help topics:"
undoc_header = "Undocumented commands:"
nohelp = "*** No help on %s"
use_rawinput = False
def __init__(self, completekey='tab', stdin=None, stdout=None):
"""Instantiate a line-oriented interpreter framework.
The optional argument 'completekey' is the readline name of a
completion key; it defaults to the Tab key. If completekey is
not None and the readline module is available, command completion
is done automatically. The optional arguments stdin and stdout
specify alternate input and output file objects; if not specified,
sys.stdin and sys.stdout are used.
"""
if stdin is not None:
self.stdin = stdin
else:
self.stdin = sys.stdin
if stdout is not None:
self.stdout = stdout
else:
self.stdout = sys.stdout
self.cmdqueue = []
self.completekey = completekey
if not self.use_rawinput and self.completekey:
try:
import editline
self.editline = editline.editline("CMD",
self.stdin, self.stdout, sys.stderr)
self.editline.rl_completer = self.complete
except ImportError:
print("Failed to import editline")
pass
def cmdloop(self, intro=None):
"""Repeatedly issue a prompt, accept input, parse an initial prefix
off the received input, and dispatch to action methods, passing them
the remainder of the line as argument.
"""
self.preloop()
try:
if intro is not None:
self.intro = intro
if self.intro:
self.stdout.write(str(self.intro)+"\n")
stop = None
while not stop:
if self.cmdqueue:
line = self.cmdqueue.pop(0)
else:
if self.use_rawinput:
try:
line = input(self.prompt)
except EOFError:
line = 'EOF'
else:
self.editline.prompt = self.prompt
line = self.editline.readline()
if not len(line):
line = 'EOF'
else:
line = line.rstrip('\r\n')
line = self.precmd(line)
stop = self.onecmd(line)
stop = self.postcmd(stop, line)
self.postloop()
finally:
pass
def precmd(self, line):
"""Hook method executed just before the command line is
interpreted, but after the input prompt is generated and issued.
"""
return line
def postcmd(self, stop, line):
"""Hook method executed just after a command dispatch is finished."""
return stop
def preloop(self):
"""Hook method executed once when the cmdloop() method is called."""
pass
def postloop(self):
"""Hook method executed once when the cmdloop() method is about to
return.
"""
pass
def parseline(self, line):
"""Parse the line into a command name and a string containing
the arguments. Returns a tuple containing (command, args, line).
'command' and 'args' may be None if the line couldn't be parsed.
"""
line = line.strip()
if not line:
return None, None, line
elif line[0] == '?':
line = 'help ' + line[1:]
elif line[0] == '!':
if hasattr(self, 'do_shell'):
line = 'shell ' + line[1:]
else:
return None, None, line
i, n = 0, len(line)
while i < n and line[i] in self.identchars: i = i+1
cmd, arg = line[:i], line[i:].strip()
return cmd, arg, line
def onecmd(self, line):
"""Interpret the argument as though it had been typed in response
to the prompt.
This may be overridden, but should not normally need to be;
see the precmd() and postcmd() methods for useful execution hooks.
The return value is a flag indicating whether interpretation of
commands by the interpreter should stop.
"""
cmd, arg, line = self.parseline(line)
if not line:
return self.emptyline()
if cmd is None:
return self.default(line)
self.lastcmd = line
if line == 'EOF' :
print("")
print("Bye")
sys.exit(0)
if cmd == '':
return self.default(line)
else:
try:
func = getattr(self, 'do_' + cmd)
except AttributeError:
return self.default(line)
return func(arg)
def emptyline(self):
"""Called when an empty line is entered in response to the prompt.
If this method is not overridden, it repeats the last nonempty
command entered.
"""
if self.lastcmd:
return self.onecmd(self.lastcmd)
def default(self, line):
"""Called on an input line when the command prefix is not recognized.
If this method is not overridden, it prints an error message and
returns.
"""
self.stdout.write('*** Unknown syntax: %s (%d)\n' % (line,len(line)))
def completedefault(self, *ignored):
"""Method called to complete an input line when no command-specific
complete_*() method is available.
By default, it returns an empty list.
"""
return []
def completenames(self, text, *ignored):
dotext = 'do_'+text
return [a[3:] for a in self.get_names() if a.startswith(dotext)]
def complete(self, text, state):
"""Return the next possible completion for 'text'.
If a command has not been entered, then complete against command list.
Otherwise try to call complete_<command> to get list of completions.
"""
if state == 0:
origline = self.editline.get_line_buffer()
line = origline.lstrip()
stripped = len(origline) - len(line)
begidx = self.editline.get_begidx() - stripped
endidx = self.editline.get_endidx() - stripped
if begidx>0:
cmd, args, foo = self.parseline(line)
if cmd == '':
compfunc = self.completedefault
else:
try:
compfunc = getattr(self, 'complete_' + cmd)
except AttributeError:
compfunc = self.completedefault
else:
compfunc = self.completenames
self.completion_matches = compfunc(text, line, begidx, endidx)
try:
return self.completion_matches[state]
except IndexError:
return None
def get_names(self):
# This method used to pull in base class attributes
# at a time dir() didn't do it yet.
return dir(self.__class__)
def complete_help(self, *args):
commands = set(self.completenames(*args))
topics = set(a[5:] for a in self.get_names()
if a.startswith('help_' + args[0]))
return list(commands | topics)
def do_help(self, arg):
'List available commands with "help" or detailed help with "help cmd".'
if arg:
# XXX check arg syntax
try:
func = getattr(self, 'help_' + arg)
except AttributeError:
try:
doc=getattr(self, 'do_' + arg).__doc__
if doc:
self.stdout.write("%s\n"%str(doc))
return
except AttributeError:
pass
self.stdout.write("%s\n"%str(self.nohelp % (arg,)))
return
func()
else:
names = self.get_names()
cmds_doc = []
cmds_undoc = []
help = {}
for name in names:
if name[:5] == 'help_':
help[name[5:]]=1
names.sort()
# There can be duplicates if routines overridden
prevname = ''
for name in names:
if name[:3] == 'do_':
if name == prevname:
continue
prevname = name
cmd=name[3:]
if cmd in help:
cmds_doc.append(cmd)
del help[cmd]
elif getattr(self, name).__doc__:
cmds_doc.append(cmd)
else:
cmds_undoc.append(cmd)
self.stdout.write("%s\n"%str(self.doc_leader))
self.print_topics(self.doc_header, cmds_doc, 15,80)
self.print_topics(self.misc_header, list(help.keys()),15,80)
self.print_topics(self.undoc_header, cmds_undoc, 15,80)
def print_topics(self, header, cmds, cmdlen, maxcol):
if cmds:
self.stdout.write("%s\n"%str(header))
if self.ruler:
self.stdout.write("%s\n"%str(self.ruler * len(header)))
self.columnize(cmds, maxcol-1)
self.stdout.write("\n")
def columnize(self, list, displaywidth=80):
"""Display a list of strings as a compact set of columns.
Each column is only as wide as necessary.
Columns are separated by two spaces (one was not legible enough).
"""
if not list:
self.stdout.write("<empty>\n")
return
nonstrings = [i for i in range(len(list))
if not isinstance(list[i], str)]
if nonstrings:
raise TypeError("list[i] not a string for i in %s"
% ", ".join(map(str, nonstrings)))
size = len(list)
if size == 1:
self.stdout.write('%s\n'%str(list[0]))
return
# Try every row count from 1 upwards
for nrows in range(1, len(list)):
ncols = (size+nrows-1) // nrows
colwidths = []
totwidth = -2
for col in range(ncols):
colwidth = 0
for row in range(nrows):
i = row + nrows*col
if i >= size:
break
x = list[i]
colwidth = max(colwidth, len(x))
colwidths.append(colwidth)
totwidth += colwidth + 2
if totwidth > displaywidth:
break
if totwidth <= displaywidth:
break
else:
nrows = len(list)
ncols = 1
colwidths = [0]
for row in range(nrows):
texts = []
for col in range(ncols):
i = row + nrows*col
if i >= size:
x = ""
else:
x = list[i]
texts.append(x)
while texts and not texts[-1]:
del texts[-1]
for col in range(len(texts)):
texts[col] = texts[col].ljust(colwidths[col])
self.stdout.write("%s\n"%str(" ".join(texts)))
class MyCmd(ElCmd,object):
def do_bleep(self, s):
print("bleep!")
def do_blob(self, s):
print("blob!")
def do_bob(self, s):
print("bob!")
def do_mods(self, s):
print(sys.modules.keys())
if __name__ == '__main__':
mc = MyCmd()
mc.cmdloop()
| [((208, 12, 208, 23), 'sys.exit', 'sys.exit', ({(208, 21, 208, 22): '(0)'}, {}), '(0)', False, 'import string, sys\n'), ((414, 14, 414, 32), 'sys.modules.keys', 'sys.modules.keys', ({}, {}), '()', False, 'import string, sys\n'), ((101, 32, 102, 56), 'editline.editline', 'editline.editline', ({(101, 50, 101, 55): '"""CMD"""', (102, 20, 102, 30): 'self.stdin', (102, 32, 102, 43): 'self.stdout', (102, 45, 102, 55): 'sys.stderr'}, {}), "('CMD', self.stdin, self.stdout, sys.stderr)", False, 'import editline\n')] |
Shanu85/FCS_Project | ecommerce-website/orders/admin.py | def3437d58b4d2ff00e26c0a5ca769af66eccfad | from django.contrib import admin
from .models import Order, receiverInfo
@admin.register(Order)
class OrderAdmin(admin.ModelAdmin):
date_hierarchy = 'created_at'
list_display = ('user', 'code', 'total_price', 'shipping_status', 'created_at')
list_display_links = ('user',)
list_editable = ('shipping_status',)
list_filter = ('shipping_status', 'payment_mode', 'created_at')
list_per_page = 25
search_fields = ('user__phone_number', 'user__email', 'code')
readonly_fields = ('user','cart', 'receiver', 'payment_mode', 'shipping_status', 'code')
def total_price(self, obj):
return obj.cart.total_price
def has_add_permission(self, request):
return False
@admin.register(receiverInfo)
class receiverInfoAdmin(admin.ModelAdmin):
date_hierarchy = 'created_at'
list_display = ('id', 'full_name', 'phone_number', 'address', 'created_at')
list_display_links = ('id', 'full_name')
list_filter = ('created_at',)
list_per_page = 25
search_fields = ('full_name', 'phone_number', 'address')
readonly_fields = ('full_name', 'phone_number', 'address')
| [((6, 1, 6, 22), 'django.contrib.admin.register', 'admin.register', ({(6, 16, 6, 21): 'Order'}, {}), '(Order)', False, 'from django.contrib import admin\n'), ((23, 1, 23, 29), 'django.contrib.admin.register', 'admin.register', ({(23, 16, 23, 28): 'receiverInfo'}, {}), '(receiverInfo)', False, 'from django.contrib import admin\n')] |
jeremyCtown/data-structures-and-algorithms | data_structures/linked_lists/ll-kth-from-end/ll_kth.py | d4ba8741f858fb5298f8ce560240373fb7742e20 | from node import Node
class LinkedList:
"""
initializes LL
"""
def __init__(self, iter=[]):
self.head = None
self._size = 0
for item in reversed(iter):
self.insert(item)
def __repr__(self):
"""
assumes head will have a val and we will need this
"""
return '<head> => {}'.format(self.head.val)
def __str__(self):
""" this is where we can see the list"""
def __len__(self):
"""
returns size of LL
"""
return self._size
def insert(self, val):
"""
basic insertion method for adding to front of LL
"""
self.head = Node(val, self.head)
self._size += 1
def append(self, val):
"""
appends node to the end of the LL
"""
new_node = Node(val, None)
current = self.head._next
while current._next is not None:
current._next = current._next._next
if current._next._next is None:
current._next._next = new_node
new_node._next is None
self._size += 1
return new_node._next
def insert_before(self, val, new_val):
"""
inserts node before node at val
"""
new_node = Node(new_val)
current = self.head._next
while current._next is not None:
if current._next.val == val:
new_node._next = current._next
current._next = new_node
self._size += 1
break
current = current._next
if current._next is None:
raise ValueError("Data not in list")
def insert_after(self, val, new_val):
"""
inserts node after node at val
"""
new_node = Node(new_val)
current = self.head._next
while current._next is not None:
if current.val == val:
new_node._next = current._next._next
current._next = new_node
self._size += 1
break
current = current._next
if current._next is None:
raise ValueError("Data not in list")
def kth_from_end(self, k):
"""
returns node at kth from end
"""
if self._size - k < 0:
raise AttributeError
current = self.head
for i in range(self._size - k - 1):
current = current._next
return current
| [((34, 20, 34, 40), 'node.Node', 'Node', ({(34, 25, 34, 28): 'val', (34, 30, 34, 39): 'self.head'}, {}), '(val, self.head)', False, 'from node import Node\n'), ((41, 19, 41, 34), 'node.Node', 'Node', ({(41, 24, 41, 27): 'val', (41, 29, 41, 33): 'None'}, {}), '(val, None)', False, 'from node import Node\n'), ((55, 19, 55, 32), 'node.Node', 'Node', ({(55, 24, 55, 31): 'new_val'}, {}), '(new_val)', False, 'from node import Node\n'), ((73, 19, 73, 32), 'node.Node', 'Node', ({(73, 24, 73, 31): 'new_val'}, {}), '(new_val)', False, 'from node import Node\n')] |
ckamtsikis/cmssw | MuonAnalysis/MomentumScaleCalibration/test/LikelihoodPdfDBReader_cfg.py | ea19fe642bb7537cbf58451dcf73aa5fd1b66250 | import FWCore.ParameterSet.Config as cms
process = cms.Process("LIKELIHOODPDFDBREADER")
# process.load("MuonAnalysis.MomentumScaleCalibration.local_CSA08_Y_cff")
process.source = cms.Source("EmptySource",
numberEventsInRun = cms.untracked.uint32(1),
firstRun = cms.untracked.uint32(1)
)
process.load("Configuration.StandardSequences.MagneticField_cff")
process.load("Geometry.CMSCommonData.cmsIdealGeometryXML_cfi")
process.load("Geometry.CommonTopologies.globalTrackingGeometry_cfi")
process.load("RecoMuon.DetLayers.muonDetLayerGeometry_cfi")
process.load("Geometry.MuonNumbering.muonNumberingInitialization_cfi")
process.load("RecoMuon.TrackingTools.MuonServiceProxy_cff")
# process.source = cms.Source("PoolSource",
# fileNames = cms.untracked.vstring()
# )
process.maxEvents = cms.untracked.PSet(
input = cms.untracked.int32(1)
)
process.poolDBESSource = cms.ESSource("PoolDBESSource",
BlobStreamerName = cms.untracked.string('TBufferBlobStreamingService'),
DBParameters = cms.PSet(
messageLevel = cms.untracked.int32(2),
authenticationPath = cms.untracked.string('/afs/cern.ch/cms/DB/conddb')
),
timetype = cms.untracked.string('runnumber'),
connect = cms.string('sqlite_file:dummy2.db'),
toGet = cms.VPSet(cms.PSet(
record = cms.string('MuScleFitLikelihoodPdfRcd'),
tag = cms.string('MuScleFitLikelihoodPdf_2_1_12')
))
)
process.LikelihoodPdfDBReaderModule = cms.EDAnalyzer(
"LikelihoodPdfDBReader"
)
process.p1 = cms.Path(process.LikelihoodPdfDBReaderModule)
| [((3, 10, 3, 46), 'FWCore.ParameterSet.Config.Process', 'cms.Process', ({(3, 22, 3, 45): '"""LIKELIHOODPDFDBREADER"""'}, {}), "('LIKELIHOODPDFDBREADER')", True, 'import FWCore.ParameterSet.Config as cms\n'), ((45, 38, 47, 1), 'FWCore.ParameterSet.Config.EDAnalyzer', 'cms.EDAnalyzer', ({(46, 4, 46, 27): '"""LikelihoodPdfDBReader"""'}, {}), "('LikelihoodPdfDBReader')", True, 'import FWCore.ParameterSet.Config as cms\n'), ((49, 13, 49, 58), 'FWCore.ParameterSet.Config.Path', 'cms.Path', ({(49, 22, 49, 57): 'process.LikelihoodPdfDBReaderModule'}, {}), '(process.LikelihoodPdfDBReaderModule)', True, 'import FWCore.ParameterSet.Config as cms\n'), ((7, 24, 7, 47), 'FWCore.ParameterSet.Config.untracked.uint32', 'cms.untracked.uint32', ({(7, 45, 7, 46): '1'}, {}), '(1)', True, 'import FWCore.ParameterSet.Config as cms\n'), ((8, 15, 8, 38), 'FWCore.ParameterSet.Config.untracked.uint32', 'cms.untracked.uint32', ({(8, 36, 8, 37): '1'}, {}), '(1)', True, 'import FWCore.ParameterSet.Config as cms\n'), ((28, 12, 28, 34), 'FWCore.ParameterSet.Config.untracked.int32', 'cms.untracked.int32', ({(28, 32, 28, 33): '1'}, {}), '(1)', True, 'import FWCore.ParameterSet.Config as cms\n'), ((32, 22, 32, 73), 'FWCore.ParameterSet.Config.untracked.string', 'cms.untracked.string', ({(32, 43, 32, 72): '"""TBufferBlobStreamingService"""'}, {}), "('TBufferBlobStreamingService')", True, 'import FWCore.ParameterSet.Config as cms\n'), ((37, 15, 37, 48), 'FWCore.ParameterSet.Config.untracked.string', 'cms.untracked.string', ({(37, 36, 37, 47): '"""runnumber"""'}, {}), "('runnumber')", True, 'import FWCore.ParameterSet.Config as cms\n'), ((38, 14, 38, 49), 'FWCore.ParameterSet.Config.string', 'cms.string', ({(38, 25, 38, 48): '"""sqlite_file:dummy2.db"""'}, {}), "('sqlite_file:dummy2.db')", True, 'import FWCore.ParameterSet.Config as cms\n'), ((34, 23, 34, 45), 'FWCore.ParameterSet.Config.untracked.int32', 'cms.untracked.int32', ({(34, 43, 34, 44): '2'}, {}), '(2)', True, 'import FWCore.ParameterSet.Config as cms\n'), ((35, 29, 35, 79), 'FWCore.ParameterSet.Config.untracked.string', 'cms.untracked.string', ({(35, 50, 35, 78): '"""/afs/cern.ch/cms/DB/conddb"""'}, {}), "('/afs/cern.ch/cms/DB/conddb')", True, 'import FWCore.ParameterSet.Config as cms\n'), ((40, 17, 40, 56), 'FWCore.ParameterSet.Config.string', 'cms.string', ({(40, 28, 40, 55): '"""MuScleFitLikelihoodPdfRcd"""'}, {}), "('MuScleFitLikelihoodPdfRcd')", True, 'import FWCore.ParameterSet.Config as cms\n'), ((41, 14, 41, 57), 'FWCore.ParameterSet.Config.string', 'cms.string', ({(41, 25, 41, 56): '"""MuScleFitLikelihoodPdf_2_1_12"""'}, {}), "('MuScleFitLikelihoodPdf_2_1_12')", True, 'import FWCore.ParameterSet.Config as cms\n')] |
vinid/fast_fine_tuna | fast_fine_tuna/fast_fine_tuna.py | 2d128f58df0407448cdb2e179972573afa7ac636 | from transformers import AutoModel, AutoModelForSequenceClassification, AutoTokenizer, AutoConfig
from sklearn.model_selection import StratifiedKFold
import numpy as np
import torch
from fast_fine_tuna.dataset import MainDatasetDouble, MainDataset
from transformers import AdamW
from torch.utils.data import DataLoader
import os
from tqdm import tqdm
from fast_fine_tuna.models import MiniModel
from torch import nn
class FastFineTuna:
def __init__(self, model_name, tokenizer_name):
self.model_name = model_name
self.tokenizer_name = tokenizer_name
self.device = torch.device('cuda') if torch.cuda.is_available() else torch.device('cpu')
def cross_validate_fit(self, texts, labels, splits=5, epochs=5, batch_size=16, learning_rate=5e-5):
config = AutoConfig.from_pretrained(self.model_name, num_labels=len(set(labels)),
finetuning_task="custom")
tokenizer = AutoTokenizer.from_pretrained(self.tokenizer_name)
texts = np.array(texts)
labels = np.array(labels)
skf = StratifiedKFold(n_splits=splits)
original = []
predicted = []
for train_index, test_index in skf.split(texts, labels):
model = AutoModelForSequenceClassification.from_pretrained(self.model_name, config=config)
X_train, X_test = texts[train_index].tolist(), texts[test_index].tolist()
y_train, y_test = labels[train_index].tolist(), labels[test_index].tolist()
# not the smartest way to do this, but faster to code up
tokenized_train = tokenizer(X_train, truncation=True, padding=True)
tokenized_test = tokenizer(X_test, truncation=True, padding=True)
train_dataset = MainDataset(tokenized_train, y_train)
test_dataset = MainDataset(tokenized_test, y_test)
model.to(self.device)
model.train()
train_loader = DataLoader(train_dataset, batch_size=batch_size, shuffle=True)
optim = AdamW(model.parameters(), lr=learning_rate)
pbar = tqdm(total=epochs, position=0, leave=True)
for epoch in range(epochs):
pbar.update(1)
for batch in train_loader:
optim.zero_grad()
input_ids = batch['input_ids'].to(self.device)
attention_mask = batch['attention_mask'].to(self.device)
lab = batch['labels'].to(self.device)
outputs = model(input_ids, attention_mask=attention_mask, labels=lab)
loss = outputs[0]
loss.backward()
optim.step()
pbar.close()
model.eval()
loader = DataLoader(test_dataset, batch_size=batch_size)
original.extend(y_test)
with torch.no_grad():
for batch in loader:
input_ids = batch['input_ids'].to(self.device)
attention_mask = batch['attention_mask'].to(self.device)
outputs = model(input_ids, attention_mask=attention_mask)
predicted.extend(torch.argmax(outputs["logits"], axis=1).cpu().numpy().tolist())
del model
return original, predicted
def train_and_save(self, texts, labels, path, epochs=5, batch_size=16, learning_rate=5e-5):
config = AutoConfig.from_pretrained(self.model_name, num_labels=len(set(labels)),
finetuning_task="custom")
model = AutoModelForSequenceClassification.from_pretrained(self.model_name, config=config)
tokenizer = AutoTokenizer.from_pretrained(self.tokenizer_name)
tokenized_train = tokenizer(texts, truncation=True, padding=True)
train_dataset = MainDataset(tokenized_train, labels)
model.to(self.device)
model.train()
train_loader = DataLoader(train_dataset, batch_size=batch_size, shuffle=True)
optim = AdamW(model.parameters(), lr=learning_rate)
pbar = tqdm(total=epochs, position=0, leave=True)
for epoch in range(epochs):
pbar.update(1)
for batch in train_loader:
optim.zero_grad()
input_ids = batch['input_ids'].to(self.device)
attention_mask = batch['attention_mask'].to(self.device)
lab = batch['labels'].to(self.device)
outputs = model(input_ids, attention_mask=attention_mask, labels=lab)
loss = outputs[0]
loss.backward()
optim.step()
pbar.close()
os.makedirs(path)
model.save_pretrained(path)
tokenizer.save_pretrained(path)
class DoubleFastFineTuna:
def __init__(self, model_name, tokenizer_name):
self.model_name = model_name
self.tokenizer_name = tokenizer_name
self.device = torch.device('cuda') if torch.cuda.is_available() else torch.device('cpu')
def cross_validate_fit(self, texts, labels_A, labels_B, splits=5, epochs=5, batch_size=16, learning_rate=5e-5,
):
tokenizer = AutoTokenizer.from_pretrained(self.tokenizer_name)
texts = np.array(texts)
labels_A = np.array(labels_A)
labels_B = np.array(labels_B)
skf = StratifiedKFold(n_splits=splits)
original_A = []
original_B = []
predicted_A = []
predicted_B = []
for train_index, test_index in skf.split(texts, labels_A, labels_B):
model = MiniModel(self.model_name, len(set(labels_A)), len(set(labels_B)))
X_train, X_test = texts[train_index].tolist(), texts[test_index].tolist()
y_A_train, y_A_test = labels_A[train_index].tolist(), labels_A[test_index].tolist()
y_B_train, y_B_test = labels_B[train_index].tolist(), labels_B[test_index].tolist()
# not the smartest way to do this, but faster to code up
tokenized_train = tokenizer(X_train, truncation=True, padding=True)
tokenized_test = tokenizer(X_test, truncation=True, padding=True)
train_dataset = MainDatasetDouble(tokenized_train, y_A_train, y_B_train)
test_dataset = MainDatasetDouble(tokenized_test, y_A_test, y_B_test)
model.to(self.device)
model.train()
train_loader = DataLoader(train_dataset, batch_size=batch_size, shuffle=True)
optim = AdamW(model.parameters(), lr=learning_rate)
pbar = tqdm(total=epochs, position=0, leave=True)
for epoch in range(epochs):
pbar.update(1)
for batch in train_loader:
optim.zero_grad()
input_ids = batch['input_ids'].to(self.device)
attention_mask = batch['attention_mask'].to(self.device)
lab_A = batch['labels_A'].to(self.device)
lab_B = batch['labels_B'].to(self.device)
outputs = model(input_ids, attention_mask=attention_mask)
loss = nn.CrossEntropyLoss()
loss_A = loss(outputs[0], lab_A)
loss_B = loss(outputs[1], lab_B)
loss = loss_A + loss_B
loss.backward()
optim.step()
pbar.close()
model.eval()
loader = DataLoader(test_dataset, batch_size=batch_size)
original_A.extend(y_A_test)
original_B.extend(y_B_test)
with torch.no_grad():
for batch in loader:
input_ids = batch['input_ids'].to(self.device)
attention_mask = batch['attention_mask'].to(self.device)
outputs = model(input_ids, attention_mask=attention_mask)
predicted_A.extend(torch.argmax(outputs[0], axis=1).cpu().numpy().tolist())
predicted_B.extend(torch.argmax(outputs[1], axis=1).cpu().numpy().tolist())
del model
return original_A, original_B, predicted_A, predicted_B
def train_and_save(self, texts, labels, path, epochs=5, batch_size=16, learning_rate=5e-5):
config = AutoConfig.from_pretrained(self.model_name, num_labels=len(set(labels)),
finetuning_task="custom")
model = AutoModelForSequenceClassification.from_pretrained(self.model_name, config=config)
tokenizer = AutoTokenizer.from_pretrained(self.tokenizer_name)
tokenized_train = tokenizer(texts, truncation=True, padding=True)
train_dataset = MainDataset(tokenized_train, labels)
model.to(self.device)
model.train()
train_loader = DataLoader(train_dataset, batch_size=batch_size, shuffle=True)
optim = AdamW(model.parameters(), lr=learning_rate)
pbar = tqdm(total=epochs, position=0, leave=True)
for epoch in range(epochs):
pbar.update(1)
for batch in train_loader:
optim.zero_grad()
input_ids = batch['input_ids'].to(self.device)
attention_mask = batch['attention_mask'].to(self.device)
lab = batch['labels'].to(self.device)
outputs = model(input_ids, attention_mask=attention_mask, labels=lab)
loss = outputs[0]
loss.backward()
optim.step()
pbar.close()
os.makedirs(path)
model.save_pretrained(path)
tokenizer.save_pretrained(path)
| [((24, 20, 24, 70), 'transformers.AutoTokenizer.from_pretrained', 'AutoTokenizer.from_pretrained', ({(24, 50, 24, 69): 'self.tokenizer_name'}, {}), '(self.tokenizer_name)', False, 'from transformers import AutoModel, AutoModelForSequenceClassification, AutoTokenizer, AutoConfig\n'), ((25, 16, 25, 31), 'numpy.array', 'np.array', ({(25, 25, 25, 30): 'texts'}, {}), '(texts)', True, 'import numpy as np\n'), ((26, 17, 26, 33), 'numpy.array', 'np.array', ({(26, 26, 26, 32): 'labels'}, {}), '(labels)', True, 'import numpy as np\n'), ((28, 14, 28, 46), 'sklearn.model_selection.StratifiedKFold', 'StratifiedKFold', (), '', False, 'from sklearn.model_selection import StratifiedKFold\n'), ((86, 16, 86, 98), 'transformers.AutoModelForSequenceClassification.from_pretrained', 'AutoModelForSequenceClassification.from_pretrained', (), '', False, 'from transformers import AutoModel, AutoModelForSequenceClassification, AutoTokenizer, AutoConfig\n'), ((87, 20, 87, 70), 'transformers.AutoTokenizer.from_pretrained', 'AutoTokenizer.from_pretrained', ({(87, 50, 87, 69): 'self.tokenizer_name'}, {}), '(self.tokenizer_name)', False, 'from transformers import AutoModel, AutoModelForSequenceClassification, AutoTokenizer, AutoConfig\n'), ((91, 24, 91, 60), 'fast_fine_tuna.dataset.MainDataset', 'MainDataset', ({(91, 36, 91, 51): 'tokenized_train', (91, 53, 91, 59): 'labels'}, {}), '(tokenized_train, labels)', False, 'from fast_fine_tuna.dataset import MainDatasetDouble, MainDataset\n'), ((96, 23, 96, 85), 'torch.utils.data.DataLoader', 'DataLoader', (), '', False, 'from torch.utils.data import DataLoader\n'), ((100, 15, 100, 57), 'tqdm.tqdm', 'tqdm', (), '', False, 'from tqdm import tqdm\n'), ((114, 8, 114, 25), 'os.makedirs', 'os.makedirs', ({(114, 20, 114, 24): 'path'}, {}), '(path)', False, 'import os\n'), ((129, 20, 129, 70), 'transformers.AutoTokenizer.from_pretrained', 'AutoTokenizer.from_pretrained', ({(129, 50, 129, 69): 'self.tokenizer_name'}, {}), '(self.tokenizer_name)', False, 'from transformers import AutoModel, AutoModelForSequenceClassification, AutoTokenizer, AutoConfig\n'), ((130, 16, 130, 31), 'numpy.array', 'np.array', ({(130, 25, 130, 30): 'texts'}, {}), '(texts)', True, 'import numpy as np\n'), ((131, 19, 131, 37), 'numpy.array', 'np.array', ({(131, 28, 131, 36): 'labels_A'}, {}), '(labels_A)', True, 'import numpy as np\n'), ((132, 19, 132, 37), 'numpy.array', 'np.array', ({(132, 28, 132, 36): 'labels_B'}, {}), '(labels_B)', True, 'import numpy as np\n'), ((134, 14, 134, 46), 'sklearn.model_selection.StratifiedKFold', 'StratifiedKFold', (), '', False, 'from sklearn.model_selection import StratifiedKFold\n'), ((204, 16, 204, 98), 'transformers.AutoModelForSequenceClassification.from_pretrained', 'AutoModelForSequenceClassification.from_pretrained', (), '', False, 'from transformers import AutoModel, AutoModelForSequenceClassification, AutoTokenizer, AutoConfig\n'), ((205, 20, 205, 70), 'transformers.AutoTokenizer.from_pretrained', 'AutoTokenizer.from_pretrained', ({(205, 50, 205, 69): 'self.tokenizer_name'}, {}), '(self.tokenizer_name)', False, 'from transformers import AutoModel, AutoModelForSequenceClassification, AutoTokenizer, AutoConfig\n'), ((209, 24, 209, 60), 'fast_fine_tuna.dataset.MainDataset', 'MainDataset', ({(209, 36, 209, 51): 'tokenized_train', (209, 53, 209, 59): 'labels'}, {}), '(tokenized_train, labels)', False, 'from fast_fine_tuna.dataset import MainDatasetDouble, MainDataset\n'), ((214, 23, 214, 85), 'torch.utils.data.DataLoader', 'DataLoader', (), '', False, 'from torch.utils.data import DataLoader\n'), ((218, 15, 218, 57), 'tqdm.tqdm', 'tqdm', (), '', False, 'from tqdm import tqdm\n'), ((232, 8, 232, 25), 'os.makedirs', 'os.makedirs', ({(232, 20, 232, 24): 'path'}, {}), '(path)', False, 'import os\n'), ((18, 46, 18, 71), 'torch.cuda.is_available', 'torch.cuda.is_available', ({}, {}), '()', False, 'import torch\n'), ((18, 22, 18, 42), 'torch.device', 'torch.device', ({(18, 35, 18, 41): '"""cuda"""'}, {}), "('cuda')", False, 'import torch\n'), ((18, 77, 18, 96), 'torch.device', 'torch.device', ({(18, 90, 18, 95): '"""cpu"""'}, {}), "('cpu')", False, 'import torch\n'), ((34, 20, 34, 102), 'transformers.AutoModelForSequenceClassification.from_pretrained', 'AutoModelForSequenceClassification.from_pretrained', (), '', False, 'from transformers import AutoModel, AutoModelForSequenceClassification, AutoTokenizer, AutoConfig\n'), ((43, 28, 43, 65), 'fast_fine_tuna.dataset.MainDataset', 'MainDataset', ({(43, 40, 43, 55): 'tokenized_train', (43, 57, 43, 64): 'y_train'}, {}), '(tokenized_train, y_train)', False, 'from fast_fine_tuna.dataset import MainDatasetDouble, MainDataset\n'), ((44, 27, 44, 62), 'fast_fine_tuna.dataset.MainDataset', 'MainDataset', ({(44, 39, 44, 53): 'tokenized_test', (44, 55, 44, 61): 'y_test'}, {}), '(tokenized_test, y_test)', False, 'from fast_fine_tuna.dataset import MainDatasetDouble, MainDataset\n'), ((49, 27, 49, 89), 'torch.utils.data.DataLoader', 'DataLoader', (), '', False, 'from torch.utils.data import DataLoader\n'), ((53, 19, 53, 61), 'tqdm.tqdm', 'tqdm', (), '', False, 'from tqdm import tqdm\n'), ((69, 21, 69, 68), 'torch.utils.data.DataLoader', 'DataLoader', (), '', False, 'from torch.utils.data import DataLoader\n'), ((124, 46, 124, 71), 'torch.cuda.is_available', 'torch.cuda.is_available', ({}, {}), '()', False, 'import torch\n'), ((124, 22, 124, 42), 'torch.device', 'torch.device', ({(124, 35, 124, 41): '"""cuda"""'}, {}), "('cuda')", False, 'import torch\n'), ((124, 77, 124, 96), 'torch.device', 'torch.device', ({(124, 90, 124, 95): '"""cpu"""'}, {}), "('cpu')", False, 'import torch\n'), ((152, 28, 152, 84), 'fast_fine_tuna.dataset.MainDatasetDouble', 'MainDatasetDouble', ({(152, 46, 152, 61): 'tokenized_train', (152, 63, 152, 72): 'y_A_train', (152, 74, 152, 83): 'y_B_train'}, {}), '(tokenized_train, y_A_train, y_B_train)', False, 'from fast_fine_tuna.dataset import MainDatasetDouble, MainDataset\n'), ((153, 27, 153, 80), 'fast_fine_tuna.dataset.MainDatasetDouble', 'MainDatasetDouble', ({(153, 45, 153, 59): 'tokenized_test', (153, 61, 153, 69): 'y_A_test', (153, 71, 153, 79): 'y_B_test'}, {}), '(tokenized_test, y_A_test, y_B_test)', False, 'from fast_fine_tuna.dataset import MainDatasetDouble, MainDataset\n'), ((158, 27, 158, 89), 'torch.utils.data.DataLoader', 'DataLoader', (), '', False, 'from torch.utils.data import DataLoader\n'), ((162, 19, 162, 61), 'tqdm.tqdm', 'tqdm', (), '', False, 'from tqdm import tqdm\n'), ((185, 21, 185, 68), 'torch.utils.data.DataLoader', 'DataLoader', (), '', False, 'from torch.utils.data import DataLoader\n'), ((71, 17, 71, 32), 'torch.no_grad', 'torch.no_grad', ({}, {}), '()', False, 'import torch\n'), ((188, 17, 188, 32), 'torch.no_grad', 'torch.no_grad', ({}, {}), '()', False, 'import torch\n'), ((173, 27, 173, 48), 'torch.nn.CrossEntropyLoss', 'nn.CrossEntropyLoss', ({}, {}), '()', False, 'from torch import nn\n'), ((77, 37, 77, 76), 'torch.argmax', 'torch.argmax', (), '', False, 'import torch\n'), ((194, 39, 194, 71), 'torch.argmax', 'torch.argmax', (), '', False, 'import torch\n'), ((195, 39, 195, 71), 'torch.argmax', 'torch.argmax', (), '', False, 'import torch\n')] |
gauravyeole/KVstoreDB | Message/Message.py | 1c7c83b158e95daec998fba62a89fa1211a05089 | # Message class Implementation
# @author: Gaurav Yeole <[email protected]>
class Message:
class Request:
def __init__(self, action="", data=None):
self.action = action
self.data = data
class Rsponse:
def __init__(self):
self.status = False
self.data = None
def __init__(self):
pass
def set_request(self):
pass
def response(self):
pass | [] |
gsnedders/presto-testo | wpt/websockets/websock_handlers/open_delay_wsh.py | a0acfbef13a3f8cae67cc7145216d31b67aa8eb4 | #!/usr/bin/python
from mod_pywebsocket import msgutil
import time
def web_socket_do_extra_handshake(request):
pass # Always accept.
def web_socket_transfer_data(request):
time.sleep(3)
msgutil.send_message(request, "line")
| [((10, 1, 10, 14), 'time.sleep', 'time.sleep', ({(10, 12, 10, 13): '(3)'}, {}), '(3)', False, 'import time\n'), ((11, 1, 11, 38), 'mod_pywebsocket.msgutil.send_message', 'msgutil.send_message', ({(11, 22, 11, 29): 'request', (11, 31, 11, 37): '"""line"""'}, {}), "(request, 'line')", False, 'from mod_pywebsocket import msgutil\n')] |
augusto-herrmann/airflow | airflow/providers/microsoft/psrp/operators/psrp.py | 7ee4295dd3f7dba4fcd763286c7823bb1707fe99 | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from typing import TYPE_CHECKING, List, Optional, Sequence
from airflow.exceptions import AirflowException
from airflow.models import BaseOperator
from airflow.providers.microsoft.psrp.hooks.psrp import PSRPHook
if TYPE_CHECKING:
from airflow.utils.context import Context
class PSRPOperator(BaseOperator):
"""PowerShell Remoting Protocol operator.
:param psrp_conn_id: connection id
:type psrp_conn_id: str
:param command: command to execute on remote host. (templated)
:type command: str
:param powershell: powershell to execute on remote host. (templated)
:type powershell: str
"""
template_fields: Sequence[str] = (
"command",
"powershell",
)
template_fields_renderers = {"command": "powershell", "powershell": "powershell"}
ui_color = "#901dd2"
def __init__(
self,
*,
psrp_conn_id: str,
command: Optional[str] = None,
powershell: Optional[str] = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
if not (command or powershell):
raise ValueError("Must provide either 'command' or 'powershell'")
self.conn_id = psrp_conn_id
self.command = command
self.powershell = powershell
def execute(self, context: "Context") -> List[str]:
with PSRPHook(self.conn_id) as hook:
ps = hook.invoke_powershell(
f"cmd.exe /c @'\n{self.command}\n'@" if self.command else self.powershell
)
if ps.had_errors:
raise AirflowException("Process failed")
return ps.output
| [((63, 13, 63, 35), 'airflow.providers.microsoft.psrp.hooks.psrp.PSRPHook', 'PSRPHook', ({(63, 22, 63, 34): 'self.conn_id'}, {}), '(self.conn_id)', False, 'from airflow.providers.microsoft.psrp.hooks.psrp import PSRPHook\n'), ((68, 18, 68, 52), 'airflow.exceptions.AirflowException', 'AirflowException', ({(68, 35, 68, 51): '"""Process failed"""'}, {}), "('Process failed')", False, 'from airflow.exceptions import AirflowException\n')] |
mithro/chromium-infra | appengine/monorail/services/api_pb2_v1_helpers.py | d27ac0b230bedae4bc968515b02927cf9e17c2b7 | # Copyright 2016 The Chromium Authors. All rights reserved.
# Use of this source code is govered by a BSD-style
# license that can be found in the LICENSE file or at
# https://developers.google.com/open-source/licenses/bsd
"""Convert Monorail PB objects to API PB objects"""
import datetime
import logging
import time
from framework import framework_constants
from framework import framework_helpers
from framework import permissions
from framework import timestr
from proto import api_pb2_v1
from proto import project_pb2
from proto import tracker_pb2
from services import issue_svc
from services import project_svc
from services import user_svc
from tracker import tracker_bizobj
from tracker import tracker_helpers
def convert_project(project, config, role):
"""Convert Monorail Project PB to API ProjectWrapper PB."""
return api_pb2_v1.ProjectWrapper(
kind='monorail#project',
name=project.project_name,
externalId=project.project_name,
htmlLink='/p/%s/' % project.project_name,
summary=project.summary,
description=project.description,
role=role,
issuesConfig=convert_project_config(config))
def convert_project_config(config):
"""Convert Monorail ProjectIssueConfig PB to API ProjectIssueConfig PB."""
return api_pb2_v1.ProjectIssueConfig(
kind='monorail#projectIssueConfig',
restrictToKnown=config.restrict_to_known,
defaultColumns=config.default_col_spec.split(),
defaultSorting=config.default_sort_spec.split(),
statuses=[convert_status(s) for s in config.well_known_statuses],
labels=[convert_label(l) for l in config.well_known_labels],
prompts=[convert_template(t) for t in config.templates],
defaultPromptForMembers=config.default_template_for_developers,
defaultPromptForNonMembers=config.default_template_for_users)
def convert_status(status):
"""Convert Monorail StatusDef PB to API Status PB."""
return api_pb2_v1.Status(
status=status.status,
meansOpen=status.means_open,
description=status.status_docstring)
def convert_label(label):
"""Convert Monorail LabelDef PB to API Label PB."""
return api_pb2_v1.Label(
label=label.label,
description=label.label_docstring)
def convert_template(template):
"""Convert Monorail TemplateDef PB to API Prompt PB."""
return api_pb2_v1.Prompt(
name=template.name,
title=template.summary,
description=template.content,
titleMustBeEdited=template.summary_must_be_edited,
status=template.status,
labels=template.labels,
membersOnly=template.members_only,
defaultToMember=template.owner_defaults_to_member,
componentRequired=template.component_required)
def convert_person(user_id, cnxn, services, trap_exception=False):
"""Convert user id to API AtomPerson PB."""
if not user_id:
return None
try:
user = services.user.GetUser(cnxn, user_id)
except user_svc.NoSuchUserException as ex:
if trap_exception:
logging.warning(str(ex))
return None
else:
raise ex
days_ago = None
if user.last_visit_timestamp:
secs_ago = int(time.time()) - user.last_visit_timestamp
days_ago = secs_ago / framework_constants.SECS_PER_DAY
return api_pb2_v1.AtomPerson(
kind='monorail#issuePerson',
name=user.email,
htmlLink='https://%s/u/%d' % (framework_helpers.GetHostPort(), user_id),
last_visit_days_ago=days_ago,
email_bouncing=bool(user.email_bounce_timestamp),
vacation_message=user.vacation_message)
def convert_issue_ids(issue_ids, mar, services):
"""Convert global issue ids to API IssueRef PB."""
# missed issue ids are filtered out.
issues = services.issue.GetIssues(mar.cnxn, issue_ids)
result = []
for issue in issues:
issue_ref = api_pb2_v1.IssueRef(
issueId=issue.local_id,
projectId=issue.project_name,
kind='monorail#issueRef')
result.append(issue_ref)
return result
def convert_issueref_pbs(issueref_pbs, mar, services):
"""Convert API IssueRef PBs to global issue ids."""
if issueref_pbs:
result = []
for ir in issueref_pbs:
project_id = mar.project_id
if ir.projectId:
project = services.project.GetProjectByName(
mar.cnxn, ir.projectId)
if project:
project_id = project.project_id
try:
issue = services.issue.GetIssueByLocalID(
mar.cnxn, project_id, ir.issueId)
result.append(issue.issue_id)
except issue_svc.NoSuchIssueException:
logging.warning(
'Issue (%s:%d) does not exist.' % (ir.projectId, ir.issueId))
return result
else:
return None
def convert_issue(cls, issue, mar, services):
"""Convert Monorail Issue PB to API IssuesGetInsertResponse."""
config = services.config.GetProjectConfig(mar.cnxn, issue.project_id)
granted_perms = tracker_bizobj.GetGrantedPerms(
issue, mar.auth.effective_ids, config)
issue_project = services.project.GetProject(mar.cnxn, issue.project_id)
component_list = []
for cd in config.component_defs:
cid = cd.component_id
if cid in issue.component_ids:
component_list.append(cd.path)
cc_list = [convert_person(p, mar.cnxn, services) for p in issue.cc_ids]
cc_list = [p for p in cc_list if p is not None]
field_values_list = []
field_id_dict = {
fd.field_id: fd.field_name for fd in config.field_defs}
for fv in issue.field_values:
field_name = field_id_dict.get(fv.field_id)
if not field_name:
logging.warning('Custom field %d of project %s does not exist',
fv.field_id, issue_project.project_name)
continue
val = None
if fv.user_id:
val = _get_user_email(
services.user, mar.cnxn, fv.user_id)
elif fv.str_value:
val = fv.str_value
elif fv.int_value:
val = str(fv.int_value)
new_fv = api_pb2_v1.FieldValue(
fieldName=field_name,
fieldValue=val,
derived=fv.derived)
field_values_list.append(new_fv)
resp = cls(
kind='monorail#issue',
id=issue.local_id,
title=issue.summary,
summary=issue.summary,
projectId=issue_project.project_name,
stars=issue.star_count,
starred=services.issue_star.IsItemStarredBy(
mar.cnxn, issue.issue_id, mar.auth.user_id),
status=issue.status,
state=(api_pb2_v1.IssueState.open if
tracker_helpers.MeansOpenInProject(
tracker_bizobj.GetStatus(issue), config)
else api_pb2_v1.IssueState.closed),
labels=issue.labels,
components=component_list,
author=convert_person(issue.reporter_id, mar.cnxn, services),
owner=convert_person(issue.owner_id, mar.cnxn, services),
cc=cc_list,
updated=datetime.datetime.fromtimestamp(issue.modified_timestamp),
published=datetime.datetime.fromtimestamp(issue.opened_timestamp),
blockedOn=convert_issue_ids(issue.blocked_on_iids, mar, services),
blocking=convert_issue_ids(issue.blocking_iids, mar, services),
canComment=permissions.CanCommentIssue(
mar.auth.effective_ids, mar.perms, issue_project, issue,
granted_perms=granted_perms),
canEdit=permissions.CanEditIssue(
mar.auth.effective_ids, mar.perms, issue_project, issue,
granted_perms=granted_perms),
fieldValues=field_values_list)
if issue.closed_timestamp > 0:
resp.closed = datetime.datetime.fromtimestamp(issue.closed_timestamp)
if issue.merged_into:
resp.mergedInto=convert_issue_ids([issue.merged_into], mar, services)[0]
if issue.owner_modified_timestamp:
resp.owner_modified = datetime.datetime.fromtimestamp(
issue.owner_modified_timestamp)
if issue.status_modified_timestamp:
resp.status_modified = datetime.datetime.fromtimestamp(
issue.status_modified_timestamp)
if issue.component_modified_timestamp:
resp.component_modified = datetime.datetime.fromtimestamp(
issue.component_modified_timestamp)
return resp
def convert_comment(issue, comment, mar, services, granted_perms):
"""Convert Monorail IssueComment PB to API IssueCommentWrapper."""
can_delete = permissions.CanDelete(
mar.auth.user_id, mar.auth.effective_ids, mar.perms,
comment.deleted_by, comment.user_id, mar.project,
permissions.GetRestrictions(issue), granted_perms=granted_perms)
return api_pb2_v1.IssueCommentWrapper(
attachments=[convert_attachment(a) for a in comment.attachments],
author=convert_person(comment.user_id, mar.cnxn, services,
trap_exception=True),
canDelete=can_delete,
content=comment.content,
deletedBy=convert_person(comment.deleted_by, mar.cnxn, services,
trap_exception=True),
id=comment.sequence,
published=datetime.datetime.fromtimestamp(comment.timestamp),
updates=convert_amendments(issue, comment.amendments, mar, services),
kind='monorail#issueComment')
def convert_attachment(attachment):
"""Convert Monorail Attachment PB to API Attachment."""
return api_pb2_v1.Attachment(
attachmentId=attachment.attachment_id,
fileName=attachment.filename,
fileSize=attachment.filesize,
mimetype=attachment.mimetype,
isDeleted=attachment.deleted)
def convert_amendments(issue, amendments, mar, services):
"""Convert a list of Monorail Amendment PBs to API Update."""
result = api_pb2_v1.Update(kind='monorail#issueCommentUpdate')
for amendment in amendments:
if amendment.field == tracker_pb2.FieldID.SUMMARY:
result.summary = amendment.newvalue
elif amendment.field == tracker_pb2.FieldID.STATUS:
result.status = amendment.newvalue
elif amendment.field == tracker_pb2.FieldID.OWNER:
if len(amendment.added_user_ids) == 0:
result.owner = framework_constants.NO_USER_NAME
else:
result.owner = _get_user_email(
services.user, mar.cnxn, amendment.added_user_ids[0])
elif amendment.field == tracker_pb2.FieldID.LABELS:
result.labels = amendment.newvalue.split()
elif amendment.field == tracker_pb2.FieldID.CC:
for user_id in amendment.added_user_ids:
user_email = _get_user_email(
services.user, mar.cnxn, user_id)
result.cc.append(user_email)
for user_id in amendment.removed_user_ids:
user_email = _get_user_email(
services.user, mar.cnxn, user_id)
result.cc.append('-%s' % user_email)
elif amendment.field == tracker_pb2.FieldID.BLOCKEDON:
result.blockedOn = _append_project(
amendment.newvalue, issue.project_name)
elif amendment.field == tracker_pb2.FieldID.BLOCKING:
result.blocking = _append_project(
amendment.newvalue, issue.project_name)
elif amendment.field == tracker_pb2.FieldID.MERGEDINTO:
result.mergedInto = amendment.newvalue
elif amendment.field == tracker_pb2.FieldID.COMPONENTS:
result.components = amendment.newvalue.split()
elif amendment.field == tracker_pb2.FieldID.CUSTOM:
fv = api_pb2_v1.FieldValue()
fv.fieldName = amendment.custom_field_name
fv.fieldValue = amendment.newvalue
result.fieldValues.append(fv)
return result
def _get_user_email(user_service, cnxn, user_id):
"""Get user email."""
try:
user_email = user_service.LookupUserEmail(
cnxn, user_id)
if not user_email:
user_email = framework_constants.DELETED_USER_NAME
except user_svc.NoSuchUserException:
user_email = framework_constants.DELETED_USER_NAME
return user_email
def _append_project(issue_ids, project_name):
"""Append project name to convert <id> to <project>:<id> format."""
result = []
id_list = issue_ids.split()
for id_str in id_list:
if ':' in id_str:
result.append(id_str)
# '-' means this issue is being removed
elif id_str.startswith('-'):
result.append('-%s:%s' % (project_name, id_str[1:]))
else:
result.append('%s:%s' % (project_name, id_str))
return result
def split_remove_add(item_list):
"""Split one list of items into two: items to add and items to remove."""
list_to_add = []
list_to_remove = []
for item in item_list:
if item.startswith('-'):
list_to_remove.append(item[1:])
else:
list_to_add.append(item)
return list_to_add, list_to_remove
# TODO(sheyang): batch the SQL queries to fetch projects/issues.
def issue_global_ids(project_local_id_pairs, project_id, mar, services):
"""Find global issues ids given <project_name>:<issue_local_id> pairs."""
result = []
for pair in project_local_id_pairs:
issue_project_id = None
local_id = None
if ':' in pair:
pair_ary = pair.split(':')
project_name = pair_ary[0]
local_id = int(pair_ary[1])
project = services.project.GetProjectByName(mar.cnxn, project_name)
if not project:
raise project_svc.NoSuchProjectException(
'Project %s does not exist' % project_name)
issue_project_id = project.project_id
else:
issue_project_id = project_id
local_id = int(pair)
result.append(
services.issue.LookupIssueID(mar.cnxn, issue_project_id, local_id))
return result
def convert_group_settings(group_name, setting):
"""Convert UserGroupSettings to UserGroupSettingsWrapper."""
return api_pb2_v1.UserGroupSettingsWrapper(
groupName=group_name,
who_can_view_members=setting.who_can_view_members,
ext_group_type=setting.ext_group_type,
last_sync_time=setting.last_sync_time)
def convert_component_def(cd, mar, services):
"""Convert ComponentDef PB to Component PB."""
project_name = services.project.LookupProjectNames(
mar.cnxn, [cd.project_id])[cd.project_id]
user_ids = set()
user_ids.update(
cd.admin_ids + cd.cc_ids + [cd.creator_id] + [cd.modifier_id])
user_names_dict = services.user.LookupUserEmails(mar.cnxn, list(user_ids))
component = api_pb2_v1.Component(
componentId=cd.component_id,
projectName=project_name,
componentPath=cd.path,
description=cd.docstring,
admin=sorted([user_names_dict[uid] for uid in cd.admin_ids]),
cc=sorted([user_names_dict[uid] for uid in cd.cc_ids]),
deprecated=cd.deprecated)
if cd.created:
component.created = datetime.datetime.fromtimestamp(cd.created)
component.creator = user_names_dict[cd.creator_id]
if cd.modified:
component.modified = datetime.datetime.fromtimestamp(cd.modified)
component.modifier = user_names_dict[cd.modifier_id]
return component
def convert_component_ids(config, component_names):
"""Convert a list of component names to ids."""
component_names_lower = [name.lower() for name in component_names]
result = []
for cd in config.component_defs:
cpath = cd.path
if cpath.lower() in component_names_lower:
result.append(cd.component_id)
return result
def convert_field_values(field_values, mar, services):
"""Convert user passed in field value list to FieldValue PB, or labels."""
fv_list_add = []
fv_list_remove = []
fv_list_clear = []
label_list_add = []
label_list_remove = []
field_name_dict = {
fd.field_name: fd for fd in mar.config.field_defs}
for fv in field_values:
field_def = field_name_dict.get(fv.fieldName)
if not field_def:
logging.warning('Custom field %s of does not exist', fv.fieldName)
continue
if fv.operator == api_pb2_v1.FieldValueOperator.clear:
fv_list_clear.append(field_def.field_id)
continue
# Enum fields are stored as labels
if field_def.field_type == tracker_pb2.FieldTypes.ENUM_TYPE:
raw_val = '%s-%s' % (fv.fieldName, fv.fieldValue)
if fv.operator == api_pb2_v1.FieldValueOperator.remove:
label_list_remove.append(raw_val)
elif fv.operator == api_pb2_v1.FieldValueOperator.add:
label_list_add.append(raw_val)
else:
logging.warning('Unsupported field value operater %s', fv.operator)
else:
new_fv = tracker_pb2.FieldValue(
field_id=field_def.field_id)
if field_def.field_type == tracker_pb2.FieldTypes.USER_TYPE:
try:
new_fv.user_id = services.user.LookupUserID(mar.cnxn, fv.fieldValue)
except user_svc.NoSuchUserException:
new_fv.user_id = 0
elif field_def.field_type == tracker_pb2.FieldTypes.STR_TYPE:
new_fv.str_value = fv.fieldValue
elif field_def.field_type == tracker_pb2.FieldTypes.INT_TYPE:
new_fv.int_value = int(fv.fieldValue)
else:
logging.warning(
'Unsupported field value type %s', field_def.field_type)
if fv.operator == api_pb2_v1.FieldValueOperator.remove:
fv_list_remove.append(new_fv)
elif fv.operator == api_pb2_v1.FieldValueOperator.add:
fv_list_add.append(new_fv)
else:
logging.warning('Unsupported field value operater %s', fv.operator)
return (fv_list_add, fv_list_remove, fv_list_clear,
label_list_add, label_list_remove)
| [((58, 9, 61, 42), 'proto.api_pb2_v1.Status', 'api_pb2_v1.Status', (), '', False, 'from proto import api_pb2_v1\n'), ((67, 9, 69, 40), 'proto.api_pb2_v1.Label', 'api_pb2_v1.Label', (), '', False, 'from proto import api_pb2_v1\n'), ((75, 9, 84, 52), 'proto.api_pb2_v1.Prompt', 'api_pb2_v1.Prompt', (), '', False, 'from proto import api_pb2_v1\n'), ((157, 18, 158, 44), 'tracker.tracker_bizobj.GetGrantedPerms', 'tracker_bizobj.GetGrantedPerms', ({(158, 6, 158, 11): 'issue', (158, 13, 158, 35): 'mar.auth.effective_ids', (158, 37, 158, 43): 'config'}, {}), '(issue, mar.auth.effective_ids, config)', False, 'from tracker import tracker_bizobj\n'), ((260, 9, 265, 35), 'proto.api_pb2_v1.Attachment', 'api_pb2_v1.Attachment', (), '', False, 'from proto import api_pb2_v1\n'), ((271, 11, 271, 64), 'proto.api_pb2_v1.Update', 'api_pb2_v1.Update', (), '', False, 'from proto import api_pb2_v1\n'), ((385, 9, 389, 44), 'proto.api_pb2_v1.UserGroupSettingsWrapper', 'api_pb2_v1.UserGroupSettingsWrapper', (), '', False, 'from proto import api_pb2_v1\n'), ((121, 16, 124, 31), 'proto.api_pb2_v1.IssueRef', 'api_pb2_v1.IssueRef', (), '', False, 'from proto import api_pb2_v1\n'), ((184, 13, 187, 27), 'proto.api_pb2_v1.FieldValue', 'api_pb2_v1.FieldValue', (), '', False, 'from proto import api_pb2_v1\n'), ((220, 18, 220, 73), 'datetime.datetime.fromtimestamp', 'datetime.datetime.fromtimestamp', ({(220, 50, 220, 72): 'issue.closed_timestamp'}, {}), '(issue.closed_timestamp)', False, 'import datetime\n'), ((224, 26, 225, 39), 'datetime.datetime.fromtimestamp', 'datetime.datetime.fromtimestamp', ({(225, 8, 225, 38): 'issue.owner_modified_timestamp'}, {}), '(issue.owner_modified_timestamp)', False, 'import datetime\n'), ((227, 27, 228, 40), 'datetime.datetime.fromtimestamp', 'datetime.datetime.fromtimestamp', ({(228, 8, 228, 39): 'issue.status_modified_timestamp'}, {}), '(issue.status_modified_timestamp)', False, 'import datetime\n'), ((230, 30, 231, 43), 'datetime.datetime.fromtimestamp', 'datetime.datetime.fromtimestamp', ({(231, 8, 231, 42): 'issue.component_modified_timestamp'}, {}), '(issue.component_modified_timestamp)', False, 'import datetime\n'), ((241, 6, 241, 40), 'framework.permissions.GetRestrictions', 'permissions.GetRestrictions', ({(241, 34, 241, 39): 'issue'}, {}), '(issue)', False, 'from framework import permissions\n'), ((409, 24, 409, 67), 'datetime.datetime.fromtimestamp', 'datetime.datetime.fromtimestamp', ({(409, 56, 409, 66): 'cd.created'}, {}), '(cd.created)', False, 'import datetime\n'), ((412, 25, 412, 69), 'datetime.datetime.fromtimestamp', 'datetime.datetime.fromtimestamp', ({(412, 57, 412, 68): 'cd.modified'}, {}), '(cd.modified)', False, 'import datetime\n'), ((173, 6, 174, 62), 'logging.warning', 'logging.warning', ({(173, 22, 173, 68): '"""Custom field %d of project %s does not exist"""', (174, 22, 174, 33): 'fv.field_id', (174, 35, 174, 61): 'issue_project.project_name'}, {}), "('Custom field %d of project %s does not exist', fv.field_id,\n issue_project.project_name)", False, 'import logging\n'), ((208, 14, 208, 71), 'datetime.datetime.fromtimestamp', 'datetime.datetime.fromtimestamp', ({(208, 46, 208, 70): 'issue.modified_timestamp'}, {}), '(issue.modified_timestamp)', False, 'import datetime\n'), ((209, 16, 209, 71), 'datetime.datetime.fromtimestamp', 'datetime.datetime.fromtimestamp', ({(209, 48, 209, 70): 'issue.opened_timestamp'}, {}), '(issue.opened_timestamp)', False, 'import datetime\n'), ((212, 17, 214, 38), 'framework.permissions.CanCommentIssue', 'permissions.CanCommentIssue', (), '', False, 'from framework import permissions\n'), ((215, 14, 217, 38), 'framework.permissions.CanEditIssue', 'permissions.CanEditIssue', (), '', False, 'from framework import permissions\n'), ((252, 16, 252, 66), 'datetime.datetime.fromtimestamp', 'datetime.datetime.fromtimestamp', ({(252, 48, 252, 65): 'comment.timestamp'}, {}), '(comment.timestamp)', False, 'import datetime\n'), ((441, 6, 441, 72), 'logging.warning', 'logging.warning', ({(441, 22, 441, 57): '"""Custom field %s of does not exist"""', (441, 59, 441, 71): 'fv.fieldName'}, {}), "('Custom field %s of does not exist', fv.fieldName)", False, 'import logging\n'), ((458, 15, 459, 38), 'proto.tracker_pb2.FieldValue', 'tracker_pb2.FieldValue', (), '', False, 'from proto import tracker_pb2\n'), ((103, 19, 103, 30), 'time.time', 'time.time', ({}, {}), '()', False, 'import time\n'), ((371, 14, 372, 55), 'services.project_svc.NoSuchProjectException', 'project_svc.NoSuchProjectException', ({(372, 12, 372, 54): "('Project %s does not exist' % project_name)"}, {}), "('Project %s does not exist' % project_name)", False, 'from services import project_svc\n'), ((108, 36, 108, 67), 'framework.framework_helpers.GetHostPort', 'framework_helpers.GetHostPort', ({}, {}), '()', False, 'from framework import framework_helpers\n'), ((146, 8, 147, 73), 'logging.warning', 'logging.warning', ({(147, 12, 147, 72): "('Issue (%s:%d) does not exist.' % (ir.projectId, ir.issueId))"}, {}), "('Issue (%s:%d) does not exist.' % (ir.projectId, ir.issueId))", False, 'import logging\n'), ((201, 17, 201, 48), 'tracker.tracker_bizobj.GetStatus', 'tracker_bizobj.GetStatus', ({(201, 42, 201, 47): 'issue'}, {}), '(issue)', False, 'from tracker import tracker_bizobj\n'), ((456, 8, 456, 75), 'logging.warning', 'logging.warning', ({(456, 24, 456, 61): '"""Unsupported field value operater %s"""', (456, 63, 456, 74): 'fv.operator'}, {}), "('Unsupported field value operater %s', fv.operator)", False, 'import logging\n'), ((478, 8, 478, 75), 'logging.warning', 'logging.warning', ({(478, 24, 478, 61): '"""Unsupported field value operater %s"""', (478, 63, 478, 74): 'fv.operator'}, {}), "('Unsupported field value operater %s', fv.operator)", False, 'import logging\n'), ((470, 8, 471, 68), 'logging.warning', 'logging.warning', ({(471, 12, 471, 45): '"""Unsupported field value type %s"""', (471, 47, 471, 67): 'field_def.field_type'}, {}), "('Unsupported field value type %s', field_def.field_type)", False, 'import logging\n'), ((305, 11, 305, 34), 'proto.api_pb2_v1.FieldValue', 'api_pb2_v1.FieldValue', ({}, {}), '()', False, 'from proto import api_pb2_v1\n')] |
mhmgad/ExCut | excut/feedback/rulebased_deduction/deduction_engine_extended.py | 09e943a23207381de3c3a9e6f70015882b8ec4af | """
This module contains the rule-based inference (rulebased_deduction engine)
"""
import itertools
from collections import defaultdict
from itertools import chain
from excut.explanations_mining.descriptions import dump_explanations_to_file
from excut.explanations_mining.descriptions_new import Description2, Atom, load_from_file
from excut.explanations_mining.explaining_engines_extended import PathBasedClustersExplainerExtended
from excut.explanations_mining.simple_miner.description_miner_extended import DescriptionMinerExtended, ExplanationStructure
from excut.kg.kg_query_interface_extended import EndPointKGQueryInterfaceExtended, KGQueryInterfaceExtended
from excut.kg.kg_indexing import Indexer
from excut.kg.utils.data_formating import n3_repr
from excut.utils.logging import logger
from excut.kg.utils.Constants import DEFUALT_AUX_RELATION
from excut.clustering import target_entities as tes
class Prediction:
"""
An object to represent the prediction of the rules
:ivar triple: the predicted triple
:ivar all_sources: all rules that predicted the same triple
"""
# def __init__(self, triple: tuple, source_description=Description(), all_sources=None):
def __init__(self, triple=None, sources=None):
self.triple = triple
# self.source_description = source_descriptionf
self.all_sources = sources if sources else list() # sources if sources else {source_description}
def get_subject(self):
return self.triple[0]
def get_object(self):
return self.triple[2]
def get_quality(self, measure='x_coverage', method=max):
# return self.source_description.get_quality(measure)
return method([source.get_quality(measure) for source in self.all_sources])
def get_main_description(self, measure='x_coverage', method=max):
return method(self.all_sources, key=lambda d: d.get_quality(measure))
def __str__(self):
return str(self.triple) + '<<' + str(self.get_main_description())
def __repr__(self):
return "%s\t(\t%s,%s)" % (self.__class__.__name__, repr(self.triple), repr(self.all_sources))
def __eq__(self, other):
return other.triple == self.triple
def __hash__(self):
return hash(self.triple)
class DeductionEngine():
"""
Abstract rulebased_deduction/inference engine.
"""
def __init__(self, **kwargs):
pass
def infer(self, descriptions, recursive=False, topk=-1):
pass
class SparqlBasedDeductionEngineExtended(DeductionEngine):
"""
Deduction engine that converts the rules to sparql and fire them over the KG.
The rule-based_deduction takes care of consolidating similar predictions
"""
def __init__(self, kg_query_interface: KGQueryInterfaceExtended, relation=DEFUALT_AUX_RELATION, quality='x_coverage', quality_aggregation=max):
"""
:param kg_query_interface: interface for the KG.
:param relation: the relation used in the predicted triple (optional)
:param quality: objective quality measure for ranking the predictions (optional) by default
the exclusive coverage of the rules is used
:param quality_aggregation: the methd used for aggregating the score if multiple rules infers the same fact
(optional) by default max is used.
"""
super(SparqlBasedDeductionEngineExtended, self).__init__()
self.relation = relation
self.query_executer = kg_query_interface
self.quality = quality
self.quality_aggregation = quality_aggregation
self.labels_indexer=Indexer(store=kg_query_interface.type,
endpoint=kg_query_interface.endpoint,
graph= kg_query_interface.labels_graph,
identifier=kg_query_interface.labels_identifier)
def infer(self, descriptions_list, target_entities=None, min_quality=0, topk=-1, output_filepath=None,
clear_target_entities=True):
"""
Infer new facts for a giving set of descriptions
:param descriptions_list: list of explantions/descriptions rules
:param target_entities: entities and their labels for which predictions are generated
:param min_quality: minimum aggregated quality for the predictions
:param topk: k *distinct* highest quality predictions per entity,
:param output_filepath: predictions output file.
:param clear_target_entities: clear indexed target entities after done inference
:return: dictionary of predicted entity-clusters assignments
"""
if isinstance(descriptions_list,dict):
descriptions_list=list(itertools.chain.from_iterable(descriptions_list.values()))
if target_entities:
self.labels_indexer.index_triples(target_entities)
self.relation=target_entities.get_relation()
predictions = list(map(self._infer_single, descriptions_list))
per_entity_predictions = self.consolidate(predictions)
per_entity_predictions = self._merge_and_sort_cut(per_entity_predictions, min_quality, topk=topk)
if output_filepath:
dump_predictions_map(per_entity_predictions, output_filepath, triple_format=True, topk=topk, with_weight=True,
with_description=False, quality=self.quality)
if target_entities and clear_target_entities:
self.labels_indexer.drop()
return per_entity_predictions
def consolidate(self, predictions):
"""
Combine predictions from different rules
:param predictions: list of generated predictions
:return: combined single prediction with several sources for equivalent predictions
:rtype: dict
"""
# per_var_predictions = defaultdict(lambda: defaultdict(list))
# for p in chain.from_iterable(predictions):
# per_var_predictions[p.get_subject()][p.get_object()].append(p)
per_entity_predictions = defaultdict(lambda: defaultdict(Prediction))
for p in list(chain.from_iterable(predictions)):
cons_pred = per_entity_predictions[p.get_subject()][p.get_object()]
cons_pred.triple = p.triple
cons_pred.all_sources += p.all_sources
return per_entity_predictions
def _merge_and_sort_cut(self, per_entity_prediction, threshold=0, topk=-1):
"""
Merge the the inferred facts in case of functional predicates
:param per_entity_prediction:
:return:
"""
def quality_method(p):
return p.get_quality(self.quality, self.quality_aggregation)
per_entity_prediction_filtered = defaultdict(list)
for sub, per_obj_predictions in per_entity_prediction.items():
# print([(k, p.triple[2], qaulity_method(p)) for k, p in per_obj_predictions.items()])
merged_predictions = list(
filter(lambda p: quality_method(p) > threshold, list(per_obj_predictions.values())))
merged_predictions.sort(key=quality_method, reverse=True)
include = topk if topk > 0 else len(merged_predictions)
per_entity_prediction_filtered[sub] = merged_predictions[:include]
return per_entity_prediction_filtered
def _infer_single(self, description: Description2):
"""
Infer new facts for the given Description
:param description:
:return:
"""
bindings = self.query_executer.get_arguments_bindings(description,
restriction_pattern=Description2(body=[Atom('?x',
self.relation,
'?z')]))
head = description.head
# only supports p(?x,CONSTANT)
predictions = [Prediction((b, head.predicate, head.object), [description]) for b in bindings]
return predictions
def dump_predictions_map(per_var_predictions, out_filepath, triple_format=True, topk=-1, with_weight=True,
with_description=False, quality='x_coverage'):
"""
Writes the predictions to two files, the first is human readable and the other with .parsable extension that can be
parsed in python.
:param per_var_predictions:
:param out_filepath:
:param triple_format:
:param topk:
:param with_weight:
:param with_description:
:return:
"""
out_file_parsable = out_filepath + '.parsable'
out_filepath_with_type = out_filepath + ('.%s' % quality if len(quality) > 0 else '')
with open(out_filepath_with_type, 'w') as out_file:
for var, predictions in per_var_predictions.items():
if topk > 0:
predictions = predictions[:topk]
for p in predictions:
if triple_format:
# I only output normalized_coverage
out_str = n3_repr(p.triple) + ('\t%f' % p.get_quality(quality) if with_weight else '') + (
'\t%s' % p.source_description if with_description else '')
else:
out_str = str(p)
out_file.write(out_str)
out_file.write('\n')
with open(out_file_parsable + ('.%s' % quality if len(quality) > 0 else ''), 'w') as out_file:
out_file.write('\n'.join(
map(str, chain.from_iterable(map(lambda l: l[:topk] if topk > 0 else l, per_var_predictions.values())))))
return out_filepath_with_type
if __name__ == '__main__':
target_entities=tes.load_from_file('/scratch/GW/pool0/gadelrab/ExDEC/data/yago/yago_art_3_4k.tsv')
vos_executer = EndPointKGQueryInterfaceExtended('http://halimede:8890/sparql',
['http://yago-expr.org', 'http://yago-expr.org.types'],
labels_identifier='http://yago-expr.org.labels')
explainer=PathBasedClustersExplainerExtended(vos_executer, language_bias={'max_length': 4, 'structure': ExplanationStructure.TREE})
explans=explainer.explain(target_entities,
output_file='/scratch/GW/pool0/gadelrab/ExDEC/tmp/explanations_tree.txt')
ded = SparqlBasedDeductionEngineExtended(vos_executer)
per_var_predictions = ded.infer(explans, target_entities,
output_filepath='/scratch/GW/pool0/gadelrab/ExDEC/tmp/predictions_tree.tsv')
logger.info("Total variables with predictions subjects: %i", len(per_var_predictions))
| [((237, 20, 237, 102), 'excut.clustering.target_entities.load_from_file', 'tes.load_from_file', ({(237, 39, 237, 101): '"""/scratch/GW/pool0/gadelrab/ExDEC/data/yago/yago_art_3_4k.tsv"""'}, {}), "(\n '/scratch/GW/pool0/gadelrab/ExDEC/data/yago/yago_art_3_4k.tsv')", True, 'from excut.clustering import target_entities as tes\n'), ((238, 19, 240, 100), 'excut.kg.kg_query_interface_extended.EndPointKGQueryInterfaceExtended', 'EndPointKGQueryInterfaceExtended', (), '', False, 'from excut.kg.kg_query_interface_extended import EndPointKGQueryInterfaceExtended, KGQueryInterfaceExtended\n'), ((242, 14, 242, 135), 'excut.explanations_mining.explaining_engines_extended.PathBasedClustersExplainerExtended', 'PathBasedClustersExplainerExtended', (), '', False, 'from excut.explanations_mining.explaining_engines_extended import PathBasedClustersExplainerExtended\n'), ((94, 28, 97, 84), 'excut.kg.kg_indexing.Indexer', 'Indexer', (), '', False, 'from excut.kg.kg_indexing import Indexer\n'), ((166, 41, 166, 58), 'collections.defaultdict', 'defaultdict', ({(166, 53, 166, 57): 'list'}, {}), '(list)', False, 'from collections import defaultdict\n'), ((148, 22, 148, 54), 'itertools.chain.from_iterable', 'chain.from_iterable', ({(148, 42, 148, 53): 'predictions'}, {}), '(predictions)', False, 'from itertools import chain\n'), ((147, 53, 147, 76), 'collections.defaultdict', 'defaultdict', ({(147, 65, 147, 75): 'Prediction'}, {}), '(Prediction)', False, 'from collections import defaultdict\n'), ((186, 101, 188, 111), 'excut.explanations_mining.descriptions_new.Atom', 'Atom', ({(186, 106, 186, 110): '"""?x"""', (187, 106, 187, 119): 'self.relation', (188, 106, 188, 110): '"""?z"""'}, {}), "('?x', self.relation, '?z')", False, 'from excut.explanations_mining.descriptions_new import Description2, Atom, load_from_file\n'), ((220, 30, 220, 47), 'excut.kg.utils.data_formating.n3_repr', 'n3_repr', ({(220, 38, 220, 46): 'p.triple'}, {}), '(p.triple)', False, 'from excut.kg.utils.data_formating import n3_repr\n')] |
urasakikeisuke/rigidmask | dataloader/viperlist_train.py | 4bb781102218dfd11efa767e2d0ba987d9949fd1 | import torch.utils.data as data
from PIL import Image
import os
import os.path
import numpy as np
import pdb
import glob
IMG_EXTENSIONS = [
'.jpg', '.JPG', '.jpeg', '.JPEG',
'.png', '.PNG', '.ppm', '.PPM', '.bmp', '.BMP',
]
def is_image_file(filename):
return any(filename.endswith(extension) for extension in IMG_EXTENSIONS)
def dataloader(filepath):
left_fold = 'image_2/'
train = glob.glob(filepath+left_fold+'/0*.jpg')
train = sorted(train)
l0_train = []
l1_train = []
flow_train = []
for img in train:
img1 = ('%s_%s.jpg'%(img.rsplit('_',1)[0],'%05d'%(1+int(img.split('.')[0].split('_')[-1])) ))
flowp = img.replace('.jpg', '.png').replace('image_2','flow_occ')
if (img1 in train and len(glob.glob(flowp))>0 and ('01000' not in img)):
l0_train.append(img)
l1_train.append(img1)
flow_train.append(flowp)
return l0_train, l1_train, flow_train
| [((22, 10, 22, 49), 'glob.glob', 'glob.glob', ({(22, 20, 22, 48): "filepath + left_fold + '/0*.jpg'"}, {}), "(filepath + left_fold + '/0*.jpg')", False, 'import glob\n'), ((31, 30, 31, 46), 'glob.glob', 'glob.glob', ({(31, 40, 31, 45): 'flowp'}, {}), '(flowp)', False, 'import glob\n')] |
jsosa/floodcomparison | floodcomparison/__init__.py | c6662ae9142b4e89c6c05f93adaba49c5d8e4314 | from floodcomparison.core import floodcomparison
| [] |
crim-ca/weaver | weaver/wps_restapi/quotation/quotes.py | 107fec5e19f20b77061b9405a764da911d2db8a2 | import logging
import random
from datetime import timedelta
from typing import TYPE_CHECKING
from duration import to_iso8601
from pyramid.httpexceptions import HTTPBadRequest, HTTPCreated, HTTPNotFound, HTTPOk
from weaver import sort
from weaver.config import WEAVER_CONFIGURATION_ADES, WEAVER_CONFIGURATION_EMS, get_weaver_configuration
from weaver.database import get_db
from weaver.datatype import Bill, Quote
from weaver.exceptions import ProcessNotFound, QuoteNotFound, log_unhandled_exceptions
from weaver.formats import OUTPUT_FORMAT_JSON
from weaver.processes.types import PROCESS_APPLICATION, PROCESS_WORKFLOW
from weaver.processes.wps_package import get_package_workflow_steps, get_process_location
from weaver.store.base import StoreBills, StoreQuotes
from weaver.utils import get_settings, get_weaver_url
from weaver.wps_restapi import swagger_definitions as sd
from weaver.wps_restapi.processes.processes import submit_local_job
if TYPE_CHECKING:
from weaver.datatype import Process
from weaver.typedefs import JSON
LOGGER = logging.getLogger(__name__)
def process_quote_estimator(process): # noqa: E811
# type: (Process) -> JSON
"""
Simulate quote parameters for the process execution.
:param process: instance of :class:`weaver.datatype.Process` for which to evaluate the quote.
:return: dict of {price, currency, estimatedTime} values for the process quote.
"""
# TODO: replace by some fancy ml technique or something?
price = random.uniform(0, 10) # nosec
currency = "CAD"
estimated_time = to_iso8601(timedelta(minutes=random.uniform(5, 60))) # nosec
return {"price": price, "currency": currency, "estimatedTime": estimated_time}
@sd.process_quotes_service.post(tags=[sd.TAG_BILL_QUOTE, sd.TAG_PROCESSES], renderer=OUTPUT_FORMAT_JSON,
schema=sd.PostProcessQuoteRequestEndpoint(), response_schemas=sd.post_quotes_responses)
@log_unhandled_exceptions(logger=LOGGER, message=sd.InternalServerErrorResponseSchema.description)
def request_quote(request):
"""
Request a quotation for a process.
"""
settings = get_settings(request)
weaver_config = get_weaver_configuration(settings)
if weaver_config not in [WEAVER_CONFIGURATION_ADES, WEAVER_CONFIGURATION_EMS]:
raise HTTPBadRequest("Unsupported request for configuration '{}'.".format(weaver_config))
process_id = request.matchdict.get("process_id")
process_store = get_db(request).get_store("processes")
try:
process = process_store.fetch_by_id(process_id)
except ProcessNotFound:
raise HTTPNotFound("Could not find process with specified 'process_id'.")
store = get_db(request).get_store(StoreQuotes)
process_url = get_process_location(process_id, data_source=get_weaver_url(settings))
process_type = process.type
process_params = dict()
for param in ["inputs", "outputs", "mode", "response"]:
if param in request.json:
process_params[param] = request.json.pop(param)
process_quote_info = process_quote_estimator(process)
process_quote_info.update({
"process": process_id,
"processParameters": process_params,
"location": process_url,
"user": str(request.authenticated_userid)
})
# loop workflow sub-process steps to get individual quotes
if process_type == PROCESS_WORKFLOW and weaver_config == WEAVER_CONFIGURATION_EMS:
workflow_quotes = list()
for step in get_package_workflow_steps(process_url):
# retrieve quote from provider ADES
# TODO: data source mapping
process_step_url = get_process_location(step["reference"])
process_quote_url = "{}/quotations".format(process_step_url)
subreq = request.copy()
subreq.path_info = process_quote_url
resp_json = request.invoke_subrequest(subreq).json()
quote_json = resp_json["quote"]
quote = store.save_quote(Quote(**quote_json))
workflow_quotes.append(quote.id)
process_quote_info.update({"steps": workflow_quotes})
quote = store.save_quote(Quote(**process_quote_info))
return HTTPCreated(json={"quote": quote.json()})
# single application quotes (ADES or EMS)
elif process_type == PROCESS_APPLICATION:
quote = store.save_quote(Quote(**process_quote_info))
quote_json = quote.json()
quote_json.pop("steps", None)
return HTTPCreated(json={"quote": quote_json})
# error if not handled up to this point
raise HTTPBadRequest("Unsupported quoting process type '{0}' on '{1}'.".format(process_type, weaver_config))
@sd.process_quotes_service.get(tags=[sd.TAG_BILL_QUOTE, sd.TAG_PROCESSES], renderer=OUTPUT_FORMAT_JSON,
schema=sd.ProcessQuotesEndpoint(), response_schemas=sd.get_quote_list_responses)
@sd.quotes_service.get(tags=[sd.TAG_BILL_QUOTE], renderer=OUTPUT_FORMAT_JSON,
schema=sd.QuotesEndpoint(), response_schemas=sd.get_quote_list_responses)
@log_unhandled_exceptions(logger=LOGGER, message=sd.InternalServerErrorResponseSchema.description)
def get_quote_list(request):
"""
Get list of quotes IDs.
"""
page = int(request.params.get("page", "0"))
limit = int(request.params.get("limit", "10"))
filters = {
"process_id": request.params.get("process", None) or request.matchdict.get("process_id", None),
"page": page,
"limit": limit,
"sort": request.params.get("sort", sort.SORT_CREATED),
}
store = get_db(request).get_store(StoreQuotes)
items, count = store.find_quotes(**filters)
return HTTPOk(json={
"count": count,
"page": page,
"limit": limit,
"quotes": [quote.id for quote in items]
})
@sd.process_quote_service.get(tags=[sd.TAG_BILL_QUOTE, sd.TAG_PROCESSES], renderer=OUTPUT_FORMAT_JSON,
schema=sd.ProcessQuoteEndpoint(), response_schemas=sd.get_quote_responses)
@sd.quote_service.get(tags=[sd.TAG_BILL_QUOTE], renderer=OUTPUT_FORMAT_JSON,
schema=sd.QuoteEndpoint(), response_schemas=sd.get_quote_responses)
@log_unhandled_exceptions(logger=LOGGER, message=sd.InternalServerErrorResponseSchema.description)
def get_quote_info(request):
"""
Get quote information.
"""
quote_id = request.matchdict.get("quote_id")
store = get_db(request).get_store(StoreQuotes)
try:
quote = store.fetch_by_id(quote_id)
except QuoteNotFound:
raise HTTPNotFound("Could not find quote with specified 'quote_id'.")
return HTTPOk(json={"quote": quote.json()})
@sd.process_quote_service.post(tags=[sd.TAG_BILL_QUOTE, sd.TAG_EXECUTE, sd.TAG_PROCESSES], renderer=OUTPUT_FORMAT_JSON,
schema=sd.PostProcessQuote(), response_schemas=sd.post_quote_responses)
@sd.quote_service.post(tags=[sd.TAG_BILL_QUOTE, sd.TAG_EXECUTE], renderer=OUTPUT_FORMAT_JSON,
schema=sd.PostQuote(), response_schemas=sd.post_quote_responses)
@log_unhandled_exceptions(logger=LOGGER, message=sd.InternalServerErrorResponseSchema.description)
def execute_quote(request):
"""
Execute a quoted process.
"""
quote_info = get_quote_info(request).json["quote"]
quote_bill_info = {
"quote": quote_info.get("id"),
"price": quote_info.get("price"),
"currency": quote_info.get("currency")
}
job_resp = submit_local_job(request)
job_json = job_resp.json
job_id = job_json.get("jobID")
user_id = str(request.authenticated_userid)
store = get_db(request).get_store(StoreBills)
bill = store.save_bill(Bill(user=user_id, job=job_id, **quote_bill_info))
job_json.update({"bill": bill.id})
return HTTPCreated(json=job_json)
| [((26, 9, 26, 36), 'logging.getLogger', 'logging.getLogger', ({(26, 27, 26, 35): '__name__'}, {}), '(__name__)', False, 'import logging\n'), ((46, 1, 46, 98), 'weaver.exceptions.log_unhandled_exceptions', 'log_unhandled_exceptions', (), '', False, 'from weaver.exceptions import ProcessNotFound, QuoteNotFound, log_unhandled_exceptions\n'), ((114, 1, 114, 98), 'weaver.exceptions.log_unhandled_exceptions', 'log_unhandled_exceptions', (), '', False, 'from weaver.exceptions import ProcessNotFound, QuoteNotFound, log_unhandled_exceptions\n'), ((142, 1, 142, 98), 'weaver.exceptions.log_unhandled_exceptions', 'log_unhandled_exceptions', (), '', False, 'from weaver.exceptions import ProcessNotFound, QuoteNotFound, log_unhandled_exceptions\n'), ((160, 1, 160, 98), 'weaver.exceptions.log_unhandled_exceptions', 'log_unhandled_exceptions', (), '', False, 'from weaver.exceptions import ProcessNotFound, QuoteNotFound, log_unhandled_exceptions\n'), ((38, 12, 38, 33), 'random.uniform', 'random.uniform', ({(38, 27, 38, 28): '0', (38, 30, 38, 32): '10'}, {}), '(0, 10)', False, 'import random\n'), ((51, 15, 51, 36), 'weaver.utils.get_settings', 'get_settings', ({(51, 28, 51, 35): 'request'}, {}), '(request)', False, 'from weaver.utils import get_settings, get_weaver_url\n'), ((52, 20, 52, 54), 'weaver.config.get_weaver_configuration', 'get_weaver_configuration', ({(52, 45, 52, 53): 'settings'}, {}), '(settings)', False, 'from weaver.config import WEAVER_CONFIGURATION_ADES, WEAVER_CONFIGURATION_EMS, get_weaver_configuration\n'), ((130, 11, 135, 6), 'pyramid.httpexceptions.HTTPOk', 'HTTPOk', (), '', False, 'from pyramid.httpexceptions import HTTPBadRequest, HTTPCreated, HTTPNotFound, HTTPOk\n'), ((171, 15, 171, 40), 'weaver.wps_restapi.processes.processes.submit_local_job', 'submit_local_job', ({(171, 32, 171, 39): 'request'}, {}), '(request)', False, 'from weaver.wps_restapi.processes.processes import submit_local_job\n'), ((178, 11, 178, 37), 'pyramid.httpexceptions.HTTPCreated', 'HTTPCreated', (), '', False, 'from pyramid.httpexceptions import HTTPBadRequest, HTTPCreated, HTTPNotFound, HTTPOk\n'), ((83, 20, 83, 59), 'weaver.processes.wps_package.get_package_workflow_steps', 'get_package_workflow_steps', ({(83, 47, 83, 58): 'process_url'}, {}), '(process_url)', False, 'from weaver.processes.wps_package import get_package_workflow_steps, get_process_location\n'), ((45, 39, 45, 75), 'weaver.wps_restapi.swagger_definitions.PostProcessQuoteRequestEndpoint', 'sd.PostProcessQuoteRequestEndpoint', ({}, {}), '()', True, 'from weaver.wps_restapi import swagger_definitions as sd\n'), ((111, 38, 111, 64), 'weaver.wps_restapi.swagger_definitions.ProcessQuotesEndpoint', 'sd.ProcessQuotesEndpoint', ({}, {}), '()', True, 'from weaver.wps_restapi import swagger_definitions as sd\n'), ((113, 30, 113, 49), 'weaver.wps_restapi.swagger_definitions.QuotesEndpoint', 'sd.QuotesEndpoint', ({}, {}), '()', True, 'from weaver.wps_restapi import swagger_definitions as sd\n'), ((139, 37, 139, 62), 'weaver.wps_restapi.swagger_definitions.ProcessQuoteEndpoint', 'sd.ProcessQuoteEndpoint', ({}, {}), '()', True, 'from weaver.wps_restapi import swagger_definitions as sd\n'), ((141, 29, 141, 47), 'weaver.wps_restapi.swagger_definitions.QuoteEndpoint', 'sd.QuoteEndpoint', ({}, {}), '()', True, 'from weaver.wps_restapi import swagger_definitions as sd\n'), ((176, 27, 176, 76), 'weaver.datatype.Bill', 'Bill', (), '', False, 'from weaver.datatype import Bill, Quote\n'), ((157, 38, 157, 59), 'weaver.wps_restapi.swagger_definitions.PostProcessQuote', 'sd.PostProcessQuote', ({}, {}), '()', True, 'from weaver.wps_restapi import swagger_definitions as sd\n'), ((159, 30, 159, 44), 'weaver.wps_restapi.swagger_definitions.PostQuote', 'sd.PostQuote', ({}, {}), '()', True, 'from weaver.wps_restapi import swagger_definitions as sd\n'), ((58, 20, 58, 35), 'weaver.database.get_db', 'get_db', ({(58, 27, 58, 34): 'request'}, {}), '(request)', False, 'from weaver.database import get_db\n'), ((62, 14, 62, 81), 'pyramid.httpexceptions.HTTPNotFound', 'HTTPNotFound', ({(62, 27, 62, 80): '"""Could not find process with specified \'process_id\'."""'}, {}), '("Could not find process with specified \'process_id\'.")', False, 'from pyramid.httpexceptions import HTTPBadRequest, HTTPCreated, HTTPNotFound, HTTPOk\n'), ((64, 12, 64, 27), 'weaver.database.get_db', 'get_db', ({(64, 19, 64, 26): 'request'}, {}), '(request)', False, 'from weaver.database import get_db\n'), ((65, 63, 65, 87), 'weaver.utils.get_weaver_url', 'get_weaver_url', ({(65, 78, 65, 86): 'settings'}, {}), '(settings)', False, 'from weaver.utils import get_settings, get_weaver_url\n'), ((86, 31, 86, 70), 'weaver.processes.wps_package.get_process_location', 'get_process_location', ({(86, 52, 86, 69): "step['reference']"}, {}), "(step['reference'])", False, 'from weaver.processes.wps_package import get_package_workflow_steps, get_process_location\n'), ((96, 33, 96, 60), 'weaver.datatype.Quote', 'Quote', ({}, {}), '(**process_quote_info)', False, 'from weaver.datatype import Bill, Quote\n'), ((104, 15, 104, 54), 'pyramid.httpexceptions.HTTPCreated', 'HTTPCreated', (), '', False, 'from pyramid.httpexceptions import HTTPBadRequest, HTTPCreated, HTTPNotFound, HTTPOk\n'), ((128, 12, 128, 27), 'weaver.database.get_db', 'get_db', ({(128, 19, 128, 26): 'request'}, {}), '(request)', False, 'from weaver.database import get_db\n'), ((148, 12, 148, 27), 'weaver.database.get_db', 'get_db', ({(148, 19, 148, 26): 'request'}, {}), '(request)', False, 'from weaver.database import get_db\n'), ((152, 14, 152, 77), 'pyramid.httpexceptions.HTTPNotFound', 'HTTPNotFound', ({(152, 27, 152, 76): '"""Could not find quote with specified \'quote_id\'."""'}, {}), '("Could not find quote with specified \'quote_id\'.")', False, 'from pyramid.httpexceptions import HTTPBadRequest, HTTPCreated, HTTPNotFound, HTTPOk\n'), ((175, 12, 175, 27), 'weaver.database.get_db', 'get_db', ({(175, 19, 175, 26): 'request'}, {}), '(request)', False, 'from weaver.database import get_db\n'), ((40, 50, 40, 71), 'random.uniform', 'random.uniform', ({(40, 65, 40, 66): '5', (40, 68, 40, 70): '60'}, {}), '(5, 60)', False, 'import random\n'), ((92, 37, 92, 56), 'weaver.datatype.Quote', 'Quote', ({}, {}), '(**quote_json)', False, 'from weaver.datatype import Bill, Quote\n'), ((101, 33, 101, 60), 'weaver.datatype.Quote', 'Quote', ({}, {}), '(**process_quote_info)', False, 'from weaver.datatype import Bill, Quote\n')] |
agustinhenze/mibs.snmplabs.com | pysnmp/CISCO-VSI-CONTROLLER-MIB.py | 1fc5c07860542b89212f4c8ab807057d9a9206c7 | #
# PySNMP MIB module CISCO-VSI-CONTROLLER-MIB (http://snmplabs.com/pysmi)
# ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/CISCO-VSI-CONTROLLER-MIB
# Produced by pysmi-0.3.4 at Mon Apr 29 18:03:33 2019
# On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4
# Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15)
#
Integer, ObjectIdentifier, OctetString = mibBuilder.importSymbols("ASN1", "Integer", "ObjectIdentifier", "OctetString")
NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
ValueSizeConstraint, ConstraintsUnion, ConstraintsIntersection, ValueRangeConstraint, SingleValueConstraint = mibBuilder.importSymbols("ASN1-REFINEMENT", "ValueSizeConstraint", "ConstraintsUnion", "ConstraintsIntersection", "ValueRangeConstraint", "SingleValueConstraint")
ciscoMgmt, = mibBuilder.importSymbols("CISCO-SMI", "ciscoMgmt")
ModuleCompliance, NotificationGroup, ObjectGroup = mibBuilder.importSymbols("SNMPv2-CONF", "ModuleCompliance", "NotificationGroup", "ObjectGroup")
ObjectIdentity, NotificationType, Gauge32, Bits, Unsigned32, IpAddress, MibIdentifier, MibScalar, MibTable, MibTableRow, MibTableColumn, ModuleIdentity, Counter32, Counter64, iso, Integer32, TimeTicks = mibBuilder.importSymbols("SNMPv2-SMI", "ObjectIdentity", "NotificationType", "Gauge32", "Bits", "Unsigned32", "IpAddress", "MibIdentifier", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "ModuleIdentity", "Counter32", "Counter64", "iso", "Integer32", "TimeTicks")
TextualConvention, RowStatus, DisplayString = mibBuilder.importSymbols("SNMPv2-TC", "TextualConvention", "RowStatus", "DisplayString")
ciscoVSIControllerMIB = ModuleIdentity((1, 3, 6, 1, 4, 1, 9, 9, 141))
if mibBuilder.loadTexts: ciscoVSIControllerMIB.setLastUpdated('9906080000Z')
if mibBuilder.loadTexts: ciscoVSIControllerMIB.setOrganization('Cisco Systems, Inc.')
class CvcControllerShelfLocation(TextualConvention, Integer32):
status = 'current'
subtypeSpec = Integer32.subtypeSpec + ConstraintsUnion(SingleValueConstraint(1, 2))
namedValues = NamedValues(("internal", 1), ("external", 2))
class CvcControllerType(TextualConvention, Integer32):
status = 'current'
subtypeSpec = Integer32.subtypeSpec + ConstraintsUnion(SingleValueConstraint(1, 2, 3))
namedValues = NamedValues(("par", 1), ("pnni", 2), ("lsc", 3))
cvcMIBObjects = MibIdentifier((1, 3, 6, 1, 4, 1, 9, 9, 141, 1))
cvcConfController = MibIdentifier((1, 3, 6, 1, 4, 1, 9, 9, 141, 1, 1))
cvcConfTable = MibTable((1, 3, 6, 1, 4, 1, 9, 9, 141, 1, 1, 1), )
if mibBuilder.loadTexts: cvcConfTable.setStatus('current')
cvcConfEntry = MibTableRow((1, 3, 6, 1, 4, 1, 9, 9, 141, 1, 1, 1, 1), ).setIndexNames((0, "CISCO-VSI-CONTROLLER-MIB", "cvcConfControllerID"))
if mibBuilder.loadTexts: cvcConfEntry.setStatus('current')
cvcConfControllerID = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 141, 1, 1, 1, 1, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 2147483647)))
if mibBuilder.loadTexts: cvcConfControllerID.setStatus('current')
cvcConfControllerType = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 141, 1, 1, 1, 1, 2), CvcControllerType()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: cvcConfControllerType.setStatus('current')
cvcConfControllerShelfLocation = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 141, 1, 1, 1, 1, 3), CvcControllerShelfLocation()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: cvcConfControllerShelfLocation.setStatus('current')
cvcConfControllerLocation = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 141, 1, 1, 1, 1, 4), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 2147483647))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: cvcConfControllerLocation.setStatus('current')
cvcConfControllerName = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 141, 1, 1, 1, 1, 5), DisplayString()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: cvcConfControllerName.setStatus('current')
cvcConfVpi = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 141, 1, 1, 1, 1, 6), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 4095))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: cvcConfVpi.setStatus('current')
cvcConfVci = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 141, 1, 1, 1, 1, 7), Integer32().subtype(subtypeSpec=ValueRangeConstraint(32, 65535))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: cvcConfVci.setStatus('current')
cvcConfRowStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 141, 1, 1, 1, 1, 8), RowStatus()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: cvcConfRowStatus.setStatus('current')
cvcMIBConformance = MibIdentifier((1, 3, 6, 1, 4, 1, 9, 9, 141, 3))
cvcMIBCompliances = MibIdentifier((1, 3, 6, 1, 4, 1, 9, 9, 141, 3, 1))
cvcMIBGroups = MibIdentifier((1, 3, 6, 1, 4, 1, 9, 9, 141, 3, 2))
cvcMIBCompliance = ModuleCompliance((1, 3, 6, 1, 4, 1, 9, 9, 141, 3, 1, 1)).setObjects(("CISCO-VSI-CONTROLLER-MIB", "cvcConfGroup"), ("CISCO-VSI-CONTROLLER-MIB", "cvcConfGroupExternal"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
cvcMIBCompliance = cvcMIBCompliance.setStatus('current')
cvcConfGroup = ObjectGroup((1, 3, 6, 1, 4, 1, 9, 9, 141, 3, 2, 1)).setObjects(("CISCO-VSI-CONTROLLER-MIB", "cvcConfControllerType"), ("CISCO-VSI-CONTROLLER-MIB", "cvcConfControllerShelfLocation"), ("CISCO-VSI-CONTROLLER-MIB", "cvcConfControllerLocation"), ("CISCO-VSI-CONTROLLER-MIB", "cvcConfControllerName"), ("CISCO-VSI-CONTROLLER-MIB", "cvcConfRowStatus"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
cvcConfGroup = cvcConfGroup.setStatus('current')
cvcConfGroupExternal = ObjectGroup((1, 3, 6, 1, 4, 1, 9, 9, 141, 3, 2, 2)).setObjects(("CISCO-VSI-CONTROLLER-MIB", "cvcConfVpi"), ("CISCO-VSI-CONTROLLER-MIB", "cvcConfVci"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
cvcConfGroupExternal = cvcConfGroupExternal.setStatus('current')
mibBuilder.exportSymbols("CISCO-VSI-CONTROLLER-MIB", cvcConfTable=cvcConfTable, cvcMIBGroups=cvcMIBGroups, cvcConfControllerType=cvcConfControllerType, cvcConfVpi=cvcConfVpi, CvcControllerShelfLocation=CvcControllerShelfLocation, cvcConfControllerLocation=cvcConfControllerLocation, cvcConfController=cvcConfController, cvcConfControllerName=cvcConfControllerName, PYSNMP_MODULE_ID=ciscoVSIControllerMIB, cvcConfControllerID=cvcConfControllerID, cvcConfGroupExternal=cvcConfGroupExternal, cvcMIBCompliance=cvcMIBCompliance, cvcConfEntry=cvcConfEntry, ciscoVSIControllerMIB=ciscoVSIControllerMIB, cvcConfControllerShelfLocation=cvcConfControllerShelfLocation, cvcConfRowStatus=cvcConfRowStatus, cvcConfGroup=cvcConfGroup, CvcControllerType=CvcControllerType, cvcConfVci=cvcConfVci, cvcMIBObjects=cvcMIBObjects, cvcMIBCompliances=cvcMIBCompliances, cvcMIBConformance=cvcMIBConformance)
| [] |
AartGoossens/streamlit-activity-viewer | strava.py | b43f157d8bee596908c4f2222be9bb0d8bd9b9e8 | import base64
import os
import arrow
import httpx
import streamlit as st
import sweat
from bokeh.models.widgets import Div
APP_URL = os.environ["APP_URL"]
STRAVA_CLIENT_ID = os.environ["STRAVA_CLIENT_ID"]
STRAVA_CLIENT_SECRET = os.environ["STRAVA_CLIENT_SECRET"]
STRAVA_AUTHORIZATION_URL = "https://www.strava.com/oauth/authorize"
STRAVA_API_BASE_URL = "https://www.strava.com/api/v3"
DEFAULT_ACTIVITY_LABEL = "NO_ACTIVITY_SELECTED"
STRAVA_ORANGE = "#fc4c02"
@st.cache(show_spinner=False)
def load_image_as_base64(image_path):
with open(image_path, "rb") as f:
contents = f.read()
return base64.b64encode(contents).decode("utf-8")
def powered_by_strava_logo():
base64_image = load_image_as_base64("./static/api_logo_pwrdBy_strava_horiz_light.png")
st.markdown(
f'<img src="data:image/png;base64,{base64_image}" width="100%" alt="powered by strava">',
unsafe_allow_html=True,
)
def authorization_url():
request = httpx.Request(
method="GET",
url=STRAVA_AUTHORIZATION_URL,
params={
"client_id": STRAVA_CLIENT_ID,
"redirect_uri": APP_URL,
"response_type": "code",
"approval_prompt": "auto",
"scope": "activity:read_all"
}
)
return request.url
def login_header(header=None):
strava_authorization_url = authorization_url()
if header is None:
base = st
else:
col1, _, _, button = header
base = button
with col1:
powered_by_strava_logo()
base64_image = load_image_as_base64("./static/[email protected]")
base.markdown(
(
f"<a href=\"{strava_authorization_url}\">"
f" <img alt=\"strava login\" src=\"data:image/png;base64,{base64_image}\" width=\"100%\">"
f"</a>"
),
unsafe_allow_html=True,
)
def logout_header(header=None):
if header is None:
base = st
else:
_, col2, _, button = header
base = button
with col2:
powered_by_strava_logo()
if base.button("Log out"):
js = f"window.location.href = '{APP_URL}'"
html = f"<img src onerror=\"{js}\">"
div = Div(text=html)
st.bokeh_chart(div)
def logged_in_title(strava_auth, header=None):
if header is None:
base = st
else:
col, _, _, _ = header
base = col
first_name = strava_auth["athlete"]["firstname"]
last_name = strava_auth["athlete"]["lastname"]
col.markdown(f"*Welcome, {first_name} {last_name}!*")
@st.cache(show_spinner=False, suppress_st_warning=True)
def exchange_authorization_code(authorization_code):
response = httpx.post(
url="https://www.strava.com/oauth/token",
json={
"client_id": STRAVA_CLIENT_ID,
"client_secret": STRAVA_CLIENT_SECRET,
"code": authorization_code,
"grant_type": "authorization_code",
}
)
try:
response.raise_for_status()
except httpx.HTTPStatusError:
st.error("Something went wrong while authenticating with Strava. Please reload and try again")
st.experimental_set_query_params()
st.stop()
return
strava_auth = response.json()
return strava_auth
def authenticate(header=None, stop_if_unauthenticated=True):
query_params = st.experimental_get_query_params()
authorization_code = query_params.get("code", [None])[0]
if authorization_code is None:
authorization_code = query_params.get("session", [None])[0]
if authorization_code is None:
login_header(header=header)
if stop_if_unauthenticated:
st.stop()
return
else:
logout_header(header=header)
strava_auth = exchange_authorization_code(authorization_code)
logged_in_title(strava_auth, header)
st.experimental_set_query_params(session=authorization_code)
return strava_auth
def header():
col1, col2, col3 = st.beta_columns(3)
with col3:
strava_button = st.empty()
return col1, col2, col3, strava_button
@st.cache(show_spinner=False)
def get_activities(auth, page=1):
access_token = auth["access_token"]
response = httpx.get(
url=f"{STRAVA_API_BASE_URL}/athlete/activities",
params={
"page": page,
},
headers={
"Authorization": f"Bearer {access_token}",
},
)
return response.json()
def activity_label(activity):
if activity["name"] == DEFAULT_ACTIVITY_LABEL:
return ""
start_date = arrow.get(activity["start_date_local"])
human_readable_date = start_date.humanize(granularity=["day"])
date_string = start_date.format("YYYY-MM-DD")
return f"{activity['name']} - {date_string} ({human_readable_date})"
def select_strava_activity(auth):
col1, col2 = st.beta_columns([1, 3])
with col1:
page = st.number_input(
label="Activities page",
min_value=1,
help="The Strava API returns your activities in chunks of 30. Increment this field to go to the next page.",
)
with col2:
activities = get_activities(auth=auth, page=page)
if not activities:
st.info("This Strava account has no activities or you ran out of pages.")
st.stop()
default_activity = {"name": DEFAULT_ACTIVITY_LABEL, "start_date_local": ""}
activity = st.selectbox(
label="Select an activity",
options=[default_activity] + activities,
format_func=activity_label,
)
if activity["name"] == DEFAULT_ACTIVITY_LABEL:
st.write("No activity selected")
st.stop()
return
activity_url = f"https://www.strava.com/activities/{activity['id']}"
st.markdown(
f"<a href=\"{activity_url}\" style=\"color:{STRAVA_ORANGE};\">View on Strava</a>",
unsafe_allow_html=True
)
return activity
@st.cache(show_spinner=False, max_entries=30, allow_output_mutation=True)
def download_activity(activity, strava_auth):
with st.spinner(f"Downloading activity \"{activity['name']}\"..."):
return sweat.read_strava(activity["id"], strava_auth["access_token"])
| [((21, 1, 21, 29), 'streamlit.cache', 'st.cache', (), '', True, 'import streamlit as st\n'), ((105, 1, 105, 55), 'streamlit.cache', 'st.cache', (), '', True, 'import streamlit as st\n'), ((159, 1, 159, 29), 'streamlit.cache', 'st.cache', (), '', True, 'import streamlit as st\n'), ((224, 1, 224, 73), 'streamlit.cache', 'st.cache', (), '', True, 'import streamlit as st\n'), ((30, 4, 33, 5), 'streamlit.markdown', 'st.markdown', (), '', True, 'import streamlit as st\n'), ((37, 14, 47, 5), 'httpx.Request', 'httpx.Request', (), '', False, 'import httpx\n'), ((107, 15, 115, 5), 'httpx.post', 'httpx.post', (), '', False, 'import httpx\n'), ((130, 19, 130, 53), 'streamlit.experimental_get_query_params', 'st.experimental_get_query_params', ({}, {}), '()', True, 'import streamlit as st\n'), ((151, 23, 151, 41), 'streamlit.beta_columns', 'st.beta_columns', ({(151, 39, 151, 40): '3'}, {}), '(3)', True, 'import streamlit as st\n'), ((162, 15, 170, 5), 'httpx.get', 'httpx.get', (), '', False, 'import httpx\n'), ((179, 17, 179, 56), 'arrow.get', 'arrow.get', ({(179, 27, 179, 55): "activity['start_date_local']"}, {}), "(activity['start_date_local'])", False, 'import arrow\n'), ((187, 17, 187, 40), 'streamlit.beta_columns', 'st.beta_columns', ({(187, 33, 187, 39): '[1, 3]'}, {}), '([1, 3])', True, 'import streamlit as st\n'), ((215, 4, 218, 5), 'streamlit.markdown', 'st.markdown', (), '', True, 'import streamlit as st\n'), ((89, 14, 89, 28), 'bokeh.models.widgets.Div', 'Div', (), '', False, 'from bokeh.models.widgets import Div\n'), ((90, 8, 90, 27), 'streamlit.bokeh_chart', 'st.bokeh_chart', ({(90, 23, 90, 26): 'div'}, {}), '(div)', True, 'import streamlit as st\n'), ((145, 8, 145, 68), 'streamlit.experimental_set_query_params', 'st.experimental_set_query_params', (), '', True, 'import streamlit as st\n'), ((154, 24, 154, 34), 'streamlit.empty', 'st.empty', ({}, {}), '()', True, 'import streamlit as st\n'), ((189, 15, 193, 9), 'streamlit.number_input', 'st.number_input', (), '', True, 'import streamlit as st\n'), ((202, 19, 206, 9), 'streamlit.selectbox', 'st.selectbox', (), '', True, 'import streamlit as st\n'), ((209, 8, 209, 40), 'streamlit.write', 'st.write', ({(209, 17, 209, 39): '"""No activity selected"""'}, {}), "('No activity selected')", True, 'import streamlit as st\n'), ((210, 8, 210, 17), 'streamlit.stop', 'st.stop', ({}, {}), '()', True, 'import streamlit as st\n'), ((226, 9, 226, 70), 'streamlit.spinner', 'st.spinner', ({(226, 20, 226, 69): 'f"""Downloading activity "{activity[\'name\']}"..."""'}, {}), '(f\'Downloading activity "{activity[\\\'name\\\']}"...\')', True, 'import streamlit as st\n'), ((227, 15, 227, 77), 'sweat.read_strava', 'sweat.read_strava', ({(227, 33, 227, 47): "activity['id']", (227, 49, 227, 76): "strava_auth['access_token']"}, {}), "(activity['id'], strava_auth['access_token'])", False, 'import sweat\n'), ((25, 11, 25, 37), 'base64.b64encode', 'base64.b64encode', ({(25, 28, 25, 36): 'contents'}, {}), '(contents)', False, 'import base64\n'), ((119, 8, 119, 102), 'streamlit.error', 'st.error', ({(119, 17, 119, 101): '"""Something went wrong while authenticating with Strava. Please reload and try again"""'}, {}), "(\n 'Something went wrong while authenticating with Strava. Please reload and try again'\n )", True, 'import streamlit as st\n'), ((120, 8, 120, 42), 'streamlit.experimental_set_query_params', 'st.experimental_set_query_params', ({}, {}), '()', True, 'import streamlit as st\n'), ((121, 8, 121, 17), 'streamlit.stop', 'st.stop', ({}, {}), '()', True, 'import streamlit as st\n'), ((139, 12, 139, 21), 'streamlit.stop', 'st.stop', ({}, {}), '()', True, 'import streamlit as st\n'), ((198, 12, 198, 85), 'streamlit.info', 'st.info', ({(198, 20, 198, 84): '"""This Strava account has no activities or you ran out of pages."""'}, {}), "('This Strava account has no activities or you ran out of pages.')", True, 'import streamlit as st\n'), ((199, 12, 199, 21), 'streamlit.stop', 'st.stop', ({}, {}), '()', True, 'import streamlit as st\n')] |
reap3r/nmfta-bouncer | appliance/src/ufw_interface.py | a178244dbf0b8a165aabc02a5d1ba05006f9ec22 | #!/usr/bin/env python
#shamelessy stolen from: https://gitlab.com/dhj/easyufw
# A thin wrapper over the thin wrapper that is ufw
# Usage:
# import easyufw as ufw
# ufw.disable() # disable firewall
# ufw.enable() # enable firewall
# ufw.allow() # default allow -- allow all
# ufw.allow(22) # allow port 22, any protocol
# ufw.allow(22,'tcp') # allow port 22, tcp protocol
# ufw.allow('22/tcp') # allow port 22, tcp protocol
# ufw.allow(53,'udp') # allow port 53, udp protocol
# ufw.allow(53,'udp') # allow port 53, udp protocol
# ufw.deny() # default deny -- deny all
# ufw.deny(22,'tcp') # deny port 22, tcp protocol
# ufw.delete(22) # delete rules referencing port 22
# ufw.reset() # restore defaults
# ufw.status() # return status string (default verbose=True)
# ufw.run("allow 22") # directly run command as if from command line
import ufw.frontend
import ufw.common
import gettext
progName = ufw.common.programName
gettext.install(progName)#, unicode=True) # for i18n; fixes '_' not defined
ui = ufw.frontend.UFWFrontend(False) # no dryrun -- do it live
backend = ui.backend
parse_command = ufw.frontend.parse_command
def _parse(actionstr):
# parse commands like "allow 22", "reset", "default allow"
argv = [progName]
argv.extend(actionstr.split(' ')) # generate bogus argv to parse
pr = parse_command(argv)
return pr
def run(actionstr, force=False):
# run command with an explicit force argument
pr = _parse(actionstr)
rule = pr.data.get('rule','') # commands like reset don't have a rule
iptype = pr.data.get('iptype','')
return ui.do_action(pr.action,rule,iptype,force)
def reset(force=True):
run('reset',force=force)
def enable():
ui.set_enabled(True)
def disable():
ui.set_enabled(False)
def allow(port=None, protocol=None):
# port int; protocol str ['tcp','udp']
pp = None
if port is not None:
pp = "" # port and protocol string
pp += str(port)
if protocol is not None:
pp += '/' + protocol
_allow(pp)
def _allow(pp=None):
# pp = port and protocol string ['22','22/tcp','53/udp']
# port without protocol includes all protocols
if pp is None:
run('default allow')
else:
run('allow ' + pp)
def deny(port=None, protocol=None):
# port int; protocol str ['tcp','udp']
pp = None
if port is not None:
pp = "" # port and protocol string
pp += str(port)
if protocol is not None:
pp += '/' + protocol
_deny(pp)
def _deny(pp=None):
# pp = port and protocol string
if pp is None:
run('default deny')
else:
run('deny ' + pp)
def delete(port):
# delete all rules by destination port
while _delete(port): pass # while ports deleted re-enumerate and continue
def _delete(port):
for i,rule in enumerate(backend.get_rules()):
rule_port = None
try:
rule_port = int(rule.dport)
except:
rule_port = None
if rule_port is not None and port == rule_port:
run("delete " + str(i+1), force=True)
return True # delete one rule; enumeration changes after delete
return False
def status(verbose=True):
cmd = 'status'
if verbose:
cmd += ' verbose'
return run(cmd)
| [((28, 0, 28, 25), 'gettext.install', 'gettext.install', ({(28, 16, 28, 24): 'progName'}, {}), '(progName)', False, 'import gettext\n')] |
etri-city-traffic-brain/traffic-simulator | test/libsalt/test_vehicle.py | 6d5061febeaef484388b2b5aee14d9894099d98a | import libsalt
def test(salt_scenario):
libsalt.start(salt_scenario)
libsalt.setCurrentStep(25200)
step = libsalt.getCurrentStep()
while step <= 36000:
if (step % 100 == 0):
print("Simulation Step: ", step)
test_funcs()
libsalt.simulationStep()
step = libsalt.getCurrentStep()
libsalt.close()
print("Python: Simulation End!!!")
def test_funcs():
standbys = libsalt.vehicle.getStandbyVehicles()
runnings = libsalt.vehicle.getRunningVehicles()
print("#Running Vehicles: ", len(runnings))
#for vehicle in runnings:
# print("\t", vehicle.toString())
#for vehicle in standbys:
# print("\t", vehicle.toString())
# for vehicle in runnings:
# print("Running Vehicle)", vehicle.id, ":", libsalt.vehicle.getRoute(vehicle.id).toString())
# print("Running Vehicle)", vehicle.id, ":", vehicle.toString())
#print("#Standby Vehicles: ", len(standbys))
#for vehicle in standbys:
# print("Standby Vehicle)", vehicle.id, ":", libsalt.vehicle.getRouteString(vehicle.id))
#print("Standby Vehicle)", vehicle.id, ":", vehicle.toString())
if __name__ == "__main__":
salt_scenario = r"/home/mclee/project/traffic-simulator/data/dj_sample_data/2020-dj_sample.json"
test(salt_scenario)
| [((4, 4, 4, 32), 'libsalt.start', 'libsalt.start', ({(4, 18, 4, 31): 'salt_scenario'}, {}), '(salt_scenario)', False, 'import libsalt\n'), ((5, 4, 5, 33), 'libsalt.setCurrentStep', 'libsalt.setCurrentStep', ({(5, 27, 5, 32): '(25200)'}, {}), '(25200)', False, 'import libsalt\n'), ((6, 11, 6, 35), 'libsalt.getCurrentStep', 'libsalt.getCurrentStep', ({}, {}), '()', False, 'import libsalt\n'), ((14, 4, 14, 19), 'libsalt.close', 'libsalt.close', ({}, {}), '()', False, 'import libsalt\n'), ((18, 15, 18, 51), 'libsalt.vehicle.getStandbyVehicles', 'libsalt.vehicle.getStandbyVehicles', ({}, {}), '()', False, 'import libsalt\n'), ((19, 15, 19, 51), 'libsalt.vehicle.getRunningVehicles', 'libsalt.vehicle.getRunningVehicles', ({}, {}), '()', False, 'import libsalt\n'), ((11, 8, 11, 32), 'libsalt.simulationStep', 'libsalt.simulationStep', ({}, {}), '()', False, 'import libsalt\n'), ((12, 15, 12, 39), 'libsalt.getCurrentStep', 'libsalt.getCurrentStep', ({}, {}), '()', False, 'import libsalt\n')] |
davidtahim/Glyphs-Scripts | Masters/Copy Layer to Layer.py | 5ed28805b5fe03c63d904ad2f79117844c22aa44 | #MenuTitle: Copy Layer to Layer
# -*- coding: utf-8 -*-
__doc__="""
Copies one master to another master in selected glyphs.
"""
import GlyphsApp
import vanilla
import math
def getComponentScaleX_scaleY_rotation( self ):
a = self.transform[0]
b = self.transform[1]
c = self.transform[2]
d = self.transform[3]
scale_x = math.sqrt(math.pow(a,2)+math.pow(b,2))
scale_y = math.sqrt(math.pow(c,2)+math.pow(d,2))
if (b<0 and c<0):
scale_y = scale_y * -1
rotation = math.atan2(b, a) * (180/math.pi)
return [scale_x, scale_y, rotation]
class MasterFiller( object ):
def __init__( self ):
# Window 'self.w':
windowWidth = 280
windowHeight = 155
windowWidthResize = 120 # user can resize width by this value
windowHeightResize = 0 # user can resize height by this value
self.w = vanilla.FloatingWindow(
( windowWidth, windowHeight ), # default window size
"Copy layer to layer", # window title
minSize = ( windowWidth, windowHeight ), # minimum size (for resizing)
maxSize = ( windowWidth + windowWidthResize, windowHeight + windowHeightResize ), # maximum size (for resizing)
autosaveName = "com.mekkablue.MasterFiller.mainwindow" # stores last window position and size
)
self.w.text_1 = vanilla.TextBox((15, 12+2, 120, 14), "Copy paths from", sizeStyle='small')
self.w.master_from = vanilla.PopUpButton((120, 12, -15, 17), self.GetMasterNames(), sizeStyle='small', callback=self.MasterChangeCallback)
self.w.text_2 = vanilla.TextBox((15, 32+2, 120, 14), "into selection of", sizeStyle='small')
self.w.master_into = vanilla.PopUpButton((120, 32, -15, 17), self.GetMasterNames(), sizeStyle='small', callback=self.MasterChangeCallback)
self.w.include_components = vanilla.CheckBox((15, 52+2, -100, 20), "Include components", sizeStyle='small', callback=self.SavePreferences, value=True)
self.w.include_anchors = vanilla.CheckBox((15, 52+20, -100, 20), "Include anchors", sizeStyle='small', callback=self.SavePreferences, value=True)
self.w.include_metrics = vanilla.CheckBox((15, 52+38, -100, 20), "Include metrics", sizeStyle='small', callback=self.SavePreferences, value=True)
self.w.keep_window_open = vanilla.CheckBox((15, 52+56, -100, 20), "Keep window open", sizeStyle='small', callback=self.SavePreferences, value=True)
self.w.copybutton = vanilla.Button((-80, -30, -15, -10), "Copy", sizeStyle='small', callback=self.buttonCallback)
self.w.setDefaultButton( self.w.copybutton )
# Load Settings:
if not self.LoadPreferences():
print "Note: 'Copy Layer to Layer' could not load preferences. Will resort to defaults."
self.w.open()
self.w.makeKey()
self.w.master_into.set(1)
def SavePreferences( self, sender ):
try:
Glyphs.defaults["com.mekkablue.MasterFiller.include_components"] = self.w.include_components.get()
Glyphs.defaults["com.mekkablue.MasterFiller.include_anchors"] = self.w.include_anchors.get()
Glyphs.defaults["com.mekkablue.MasterFiller.include_metrics"] = self.w.include_metrics.get()
Glyphs.defaults["com.mekkablue.MasterFiller.keep_window_open"] = self.w.keep_window_open.get()
except:
return False
return True
def LoadPreferences( self ):
try:
NSUserDefaults.standardUserDefaults().registerDefaults_(
{
"com.mekkablue.MasterFiller.include_components" : "1",
"com.mekkablue.MasterFiller.include_anchors" : "1",
"com.mekkablue.MasterFiller.include_metrics" : "1",
"com.mekkablue.MasterFiller.keep_window_open" : "1"
}
)
self.w.include_components.set( Glyphs.defaults["com.mekkablue.MasterFiller.include_components"] )
self.w.include_anchors.set( Glyphs.defaults["com.mekkablue.MasterFiller.include_anchors"] )
self.w.include_metrics.set( Glyphs.defaults["com.mekkablue.MasterFiller.include_metrics"] )
self.w.keep_window_open.set( Glyphs.defaults["com.mekkablue.MasterFiller.keep_window_open"] )
except:
return False
return True
def GetMasterNames( self ):
myMasterList = []
for i in range( len( Glyphs.currentDocument.font.masters ) ):
x = Glyphs.currentDocument.font.masters[i]
myMasterList.append( '%i: %s' % (i, x.name) )
return myMasterList
def MasterChangeCallback( self, sender ):
if self.w.master_from.get() == self.w.master_into.get():
self.w.copybutton.enable( False )
else:
self.w.copybutton.enable( True )
def copyPathsFromLayerToLayer( self, sourceLayer, targetLayer ):
"""Copies all paths from sourceLayer to targetLayer"""
num_from = len( sourceLayer.paths )
num_into = len( targetLayer.paths )
if num_into != 0:
print "- Cleaning out paths in target layer"
for i in range( num_into )[::-1]:
del targetLayer.paths[i]
if num_from > 0:
print "- Copying paths"
for thisPath in sourceLayer.paths:
newPath = GSPath()
for n in thisPath.nodes:
newNode = GSNode()
newNode.type = n.type
newNode.connection = n.connection
newNode.setPosition_( (n.x, n.y) )
newPath.addNode_( newNode )
newPath.closed = thisPath.closed
targetLayer.paths.append( newPath )
def copyComponentsFromLayerToLayer( self, sourceLayer, targetLayer ):
"""Copies all components from sourceLayer to targetLayer."""
comp_from = len( sourceLayer.components )
comp_into = len( targetLayer.components )
if comp_into != 0:
print "- Cleaning out components in target layer"
for i in range( comp_into )[::-1]:
del targetLayer.components[i]
if comp_from > 0:
print "- Copying components:"
for thisComp in sourceLayer.components:
compName = str( thisComp.componentName ) # str() probably not necessary anymore, but once fixed a problem
newComp = GSComponent( compName )
newComp.setPosition_( (thisComp.x, thisComp.y) )
ScaleX_scaleY_rotation = getComponentScaleX_scaleY_rotation(thisComp)
newComp.setScaleX_scaleY_rotation_(ScaleX_scaleY_rotation[0],ScaleX_scaleY_rotation[1],ScaleX_scaleY_rotation[2])
print "-- Component: %s" % ( compName )
targetLayer.components.append( newComp )
def copyAnchorsFromLayerToLayer( self, sourceLayer, targetLayer ):
"""Copies all anchors from sourceLayer to targetLayer."""
anch_from = len( sourceLayer.anchors )
anch_into = len( targetLayer.anchors )
if anch_into != 0:
print "- Cleaning out anchors in target layer"
sourceLayer.setAnchors_( None )
if anch_from > 0:
print "- Copying anchors from source layer:"
for thisAnchor in sourceLayer.anchors:
anchorName = thisAnchor.name
anchorPosition = NSPoint( thisAnchor.x, thisAnchor.y )
newAnchor = GSAnchor( anchorName, anchorPosition )
print "-- %s (%i, %i)" % ( anchorName, anchorPosition.x, anchorPosition.y )
targetLayer.addAnchor_( newAnchor )
def copyMetricsFromLayerToLayer( self, sourceLayer, targetLayer ):
"""Copies width of sourceLayer to targetLayer."""
sourceWidth = sourceLayer.width
if targetLayer.width != sourceWidth:
targetLayer.width = sourceWidth
print "- Copying width (%.1f)" % sourceWidth
else:
print "- Width not changed (already was %.1f)" % sourceWidth
def buttonCallback( self, sender ):
Glyphs.clearLog()
Glyphs.showMacroWindow()
print "Copy Layer to Layer Protocol:"
Font = Glyphs.font
Doc = Glyphs.currentDocument
selectedGlyphs = [ x.parent for x in Font.selectedLayers ]
index_from = self.w.master_from.get()
index_into = self.w.master_into.get()
compYesNo = self.w.include_components.get()
anchYesNo = self.w.include_anchors.get()
metrYesNo = self.w.include_metrics.get()
for thisGlyph in selectedGlyphs:
try:
print "\nProcessing", thisGlyph.name
sourcelayer = thisGlyph.layers[ index_from ]
targetlayer = thisGlyph.layers[ index_into ]
Font.disableUpdateInterface()
# copy paths:
self.copyPathsFromLayerToLayer( sourcelayer, targetlayer )
# copy components:
if compYesNo:
self.copyComponentsFromLayerToLayer( sourcelayer, targetlayer )
# copy anchors:
if anchYesNo:
self.copyAnchorsFromLayerToLayer( sourcelayer, targetlayer )
# copy metrics:
if metrYesNo:
self.copyMetricsFromLayerToLayer( sourcelayer, targetlayer )
Font.enableUpdateInterface()
except Exception, e:
print e
if not self.w.keep_window_open.get():
self.w.close()
MasterFiller()
| [] |
bjacobs1/vunit | vunit/test/unit/test_tokenizer.py | a7f7717a172855ea7852296bb768370d50cfc992 | # This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this file,
# You can obtain one at http://mozilla.org/MPL/2.0/.
#
# Copyright (c) 2014-2018, Lars Asplund [email protected]
"""
Test of the general tokenizer
"""
from unittest import TestCase
from vunit.parsing.tokenizer import describe_location
from vunit.test.mock_2or3 import mock
class TestTokenizer(TestCase):
"""
Test of the general tokenizer
"""
def test_describes_single_char_location(self):
self.assertEqual(
_describe_location("""\
S
"""), """\
at filename0 line 1:
S
~""")
def test_describes_single_char_location_within(self):
self.assertEqual(
_describe_location("""\
S
"""), """\
at filename0 line 1:
S
~""")
def test_describes_multi_char_location(self):
self.assertEqual(
_describe_location("""\
S E
"""), """\
at filename0 line 1:
S E
~~~""")
def test_describes_multi_char_location_within(self):
self.assertEqual(
_describe_location("""\
S E
"""), """\
at filename0 line 1:
S E
~~~""")
def test_describes_multi_line_location(self):
self.assertEqual(
_describe_location("""\
S____
E
"""), """\
at filename0 line 1:
S____
~~~~~""")
def test_describes_multi_file_location(self):
self.assertEqual(
_describe_location("""\
S__E""", """\
SE"""), """\
from filename0 line 2:
S__E
~~~~
at filename1 line 3:
SE
~~""")
def test_describe_location_none(self):
self.assertEqual(describe_location(None),
"Unknown location")
def test_describe_missing_location(self):
self.assertEqual(describe_location((("missing.svh", (0, 0)), None)),
"Unknown location in missing.svh")
def test_describe_none_filename_location(self):
self.assertEqual(describe_location(((None, (0, 0)), None)),
"Unknown Python string")
def _describe_location(*codes):
"""
Helper to test describe_location
"""
contents = {}
location = None
for idx, code in enumerate(codes):
filename = "filename%i" % idx
contents[filename] = code
start = code.index("S")
if "E" in code:
end = code.index("E")
else:
end = start
location = ((filename, (start, end)), location)
with mock.patch("vunit.parsing.tokenizer.read_file", autospec=True) as mock_read_file:
with mock.patch("vunit.parsing.tokenizer.file_exists", autospec=True) as mock_file_exists:
def file_exists_side_effect(filename):
return filename in contents
def read_file_side_effect(filename):
return contents[filename]
mock_file_exists.side_effect = file_exists_side_effect
mock_read_file.side_effect = read_file_side_effect
retval = describe_location(location=location)
return retval
| [((113, 9, 113, 71), 'vunit.test.mock_2or3.mock.patch', 'mock.patch', (), '', False, 'from vunit.test.mock_2or3 import mock\n'), ((83, 25, 83, 48), 'vunit.parsing.tokenizer.describe_location', 'describe_location', ({(83, 43, 83, 47): 'None'}, {}), '(None)', False, 'from vunit.parsing.tokenizer import describe_location\n'), ((87, 25, 87, 75), 'vunit.parsing.tokenizer.describe_location', 'describe_location', ({(87, 43, 87, 74): "(('missing.svh', (0, 0)), None)"}, {}), "((('missing.svh', (0, 0)), None))", False, 'from vunit.parsing.tokenizer import describe_location\n'), ((91, 25, 91, 66), 'vunit.parsing.tokenizer.describe_location', 'describe_location', ({(91, 43, 91, 65): '((None, (0, 0)), None)'}, {}), '(((None, (0, 0)), None))', False, 'from vunit.parsing.tokenizer import describe_location\n'), ((114, 13, 114, 77), 'vunit.test.mock_2or3.mock.patch', 'mock.patch', (), '', False, 'from vunit.test.mock_2or3 import mock\n'), ((123, 21, 123, 57), 'vunit.parsing.tokenizer.describe_location', 'describe_location', (), '', False, 'from vunit.parsing.tokenizer import describe_location\n')] |
tbersez/Allmine | modules/star_se_SP.py | 092fb1f5abcb4fd4e40b4a25386f05ecb2dea55b | # STAR aligner single end mode, second pass
#
# This module runs the second pass of the STAR aligner 2 path
# strategy. The goal is to align reads taking in account splice
# junction found in the fist pass..
#
# Inputs:
# - sample_trim.fastq.gz
# - splicing junction files (.tab)
#
# Output:
# - aligned reads
# - logs for follow up and debuging if needed
#
# Parameters:
# No fancy parameters needed, only the threads number is specified.
rule star_se_SP:
input:
# fake input
flag = ancient(config["REF"] + "REindexing_done.txt"),
R1 = config["TRIMMED"] + "{samples}_trim.fastq.gz",
genomeDir = ancient(config["REF"])
output:
bam = config["MAP"] + "{samples}_sorted.bam.gz"
params:
prefix = config["MAP"] + "{samples}.",
tmp = config["MAP"] + "SP/" + "{samples}_sp_STAR_TMP",
bind = config["BIND"],
cont = config["CONT"]
benchmark:
"benchmarks/star_SP/{samples}.tsv"
message : "Running STAR second pass with {input.R1}. \n"
shell:
"""
singularity exec -B {params.bind} {params.cont} \
STAR \
--runThreadN 10 \
--genomeDir {input.genomeDir} \
--readFilesIn {input.R1} \
--outSAMtype BAM SortedByCoordinate \
--outFileNamePrefix {params.prefix} \
--outStd BAM_SortedByCoordinate \
--outTmpDir {params.tmp} \
--scoreGap 0 \
--scoreGapNoncan -8 \
--scoreGapGCAG -4 \
--scoreGapATAC -8 \
--scoreGenomicLengthLog2scale -0.25 \
--scoreDelOpen -2 \
--scoreDelBase -2 \
--scoreInsOpen -2 \
--scoreInsBase -2 \
--scoreStitchSJshift 1 \
--readFilesCommand zcat | gzip --stdout > {output.bam}
"""
| [] |
runzezhang/MOOCs | Udemy/REST-Django-VueJS/C3-practice/03-demo/job_board/jobs/models.py | 8df8c7adc5af3d7b085be01ae9b6963fe33acd68 | from django.db import models
class JobOffer(models.Model):
company_name = models.CharField(max_length=50)
company_email = models.EmailField()
job_title = models.CharField(max_length=60)
job_description = models.TextField()
salary = models.PositiveIntegerField()
city = models.CharField(max_length=35)
state = models.CharField(max_length=35)
created_at = models.DateField(auto_now_add=True)
available = models.BooleanField(default=True)
def __str__(self):
return self.company_name
| [((5, 19, 5, 50), 'django.db.models.CharField', 'models.CharField', (), '', False, 'from django.db import models\n'), ((6, 20, 6, 39), 'django.db.models.EmailField', 'models.EmailField', ({}, {}), '()', False, 'from django.db import models\n'), ((7, 16, 7, 47), 'django.db.models.CharField', 'models.CharField', (), '', False, 'from django.db import models\n'), ((8, 22, 8, 40), 'django.db.models.TextField', 'models.TextField', ({}, {}), '()', False, 'from django.db import models\n'), ((9, 13, 9, 42), 'django.db.models.PositiveIntegerField', 'models.PositiveIntegerField', ({}, {}), '()', False, 'from django.db import models\n'), ((10, 11, 10, 42), 'django.db.models.CharField', 'models.CharField', (), '', False, 'from django.db import models\n'), ((11, 12, 11, 43), 'django.db.models.CharField', 'models.CharField', (), '', False, 'from django.db import models\n'), ((12, 17, 12, 52), 'django.db.models.DateField', 'models.DateField', (), '', False, 'from django.db import models\n'), ((13, 16, 13, 49), 'django.db.models.BooleanField', 'models.BooleanField', (), '', False, 'from django.db import models\n')] |
barbaramootian/Memes-app | memeapp/views.py | 4ffa2da997758ee4f35dc21e755e3db242b8654f | from django.shortcuts import render,redirect
from django.contrib.auth.models import User
from django.contrib import messages
from .forms import PictureUploadForm,CommentForm
from .models import Image,Profile,Likes,Comments
from django.contrib.auth.decorators import login_required
from django.contrib .auth import authenticate,login,logout
from django.contrib.auth.forms import UserCreationForm
from datetime import datetime
def index(request):
images=Image.objects.all()
context={'images':images}
return render(request,'memeapp/index.html',context)
def registerPage(request):
form=UserCreationForm()
if request.method == "POST":
form_results=UserCreationForm(request.POST)
if form_results.is_valid():
user =form_results.save(commit=False)
user.username=user.username.lower()
user.save()
login(request,user)
return redirect('index')
else:
messages.error(request, 'Error occured during registration')
context = {'reg_form':form}
return render(request, 'memeapp/auth.html',context)
def loginPage(request):
page='login'
if request.user.is_authenticated:
return redirect('index')
if request.method == "POST":
username=request.POST.get('username').lower()
password=request.POST.get('password')
try:
user=User.objects.get(username=username)
except:
messages.error(request, 'User does not exist')
user=authenticate(request,username=username,password=password)
if user is not None:
login(request,user)
return redirect('index')
else:
messages.error(request, 'Username OR Password does not exist')
context={'page':page}
return render(request, 'memeapp/auth.html', context)
def logoutUser(request):
logout(request)
return redirect('index')
@login_required(login_url='login')
def uploadPicture(request):
form = PictureUploadForm()
if request.method == "POST":
form_results = PictureUploadForm(request.POST,request.FILES)
if form_results.is_valid():
form_results.save()
return redirect('index')
context = {"form": form}
return render(request, 'memeapp/upload_picture.html', context)
@login_required(login_url='login')
def my_images(request):
current_user = request.user
images = Profile.objects.filter(user_id=current_user.id).first()
profiles = Image.objects.filter(user_id=current_user.id)
return render(request, 'memeapp/profile.html', {"profile": images,"images":profiles})
@login_required(login_url='login')
def each_image(request, id):
image = Image.objects.get(id=id)
return render(request, 'memeapp/image_details.html', {'image': image})
@login_required(login_url='login')
def like_picture(request, id):
likes = Likes.objects.filter(image_id=id).first()
if Likes.objects.filter(image_id=id, user_id=request.user.id).exists():
likes.delete()
image = Image.objects.get(id=id)
if image.likes_number == 0:
image.likes_number = 0
image.save()
else:
image.likes_number -= 1
image.save()
return redirect('/')
else:
likes = Likes(image_id=id, user_id=request.user.id)
likes.save()
image = Image.objects.get(id=id)
image.likes_number = image.likes_number + 1
image.save()
return redirect('/')
@login_required(login_url='login')
def comment(request,pk):
profile = Image.objects.get(pk=pk)
form_results = CommentForm(request.POST,instance=profile)
if request.method == "POST":
if form_results.is_valid():
user = request.user
comment= form_results.cleaned_data['comment']
comment_content = Comments(user=user, image=profile, comment=comment, created_on=datetime.now())
comment_content.save()
profile.comments_number = profile.comments_number + 1
profile.save()
return redirect('index')
else:
print('form is invalid')
else:
form_results = CommentForm
context = {'form':form_results,'image':profile}
return render(request,'memeapp/comments.html',context)
def search(request):
title = "Search"
if 'search_query' in request.GET and request.GET["search_query"]:
search_term = request.GET.get("search_query").lower()
searched_results = Image.search_image(search_term)
message = f"{search_term}"
context = {'message': message, 'results': searched_results, 'title': title}
return render(request, 'memeapp/search.html', context)
else:
messages.error(request, "You haven't searched for any term")
message = "You haven't searched for any term"
return render(request, 'memeapp/search.html', {"message": message})
| [((58, 1, 58, 34), 'django.contrib.auth.decorators.login_required', 'login_required', (), '', False, 'from django.contrib.auth.decorators import login_required\n'), ((70, 1, 70, 34), 'django.contrib.auth.decorators.login_required', 'login_required', (), '', False, 'from django.contrib.auth.decorators import login_required\n'), ((77, 1, 77, 34), 'django.contrib.auth.decorators.login_required', 'login_required', (), '', False, 'from django.contrib.auth.decorators import login_required\n'), ((83, 1, 83, 34), 'django.contrib.auth.decorators.login_required', 'login_required', (), '', False, 'from django.contrib.auth.decorators import login_required\n'), ((104, 1, 104, 34), 'django.contrib.auth.decorators.login_required', 'login_required', (), '', False, 'from django.contrib.auth.decorators import login_required\n'), ((15, 11, 15, 55), 'django.shortcuts.render', 'render', ({(15, 18, 15, 25): 'request', (15, 26, 15, 46): '"""memeapp/index.html"""', (15, 47, 15, 54): 'context'}, {}), "(request, 'memeapp/index.html', context)", False, 'from django.shortcuts import render, redirect\n'), ((19, 9, 19, 27), 'django.contrib.auth.forms.UserCreationForm', 'UserCreationForm', ({}, {}), '()', False, 'from django.contrib.auth.forms import UserCreationForm\n'), ((31, 11, 31, 55), 'django.shortcuts.render', 'render', ({(31, 18, 31, 25): 'request', (31, 27, 31, 46): '"""memeapp/auth.html"""', (31, 47, 31, 54): 'context'}, {}), "(request, 'memeapp/auth.html', context)", False, 'from django.shortcuts import render, redirect\n'), ((52, 11, 52, 56), 'django.shortcuts.render', 'render', ({(52, 18, 52, 25): 'request', (52, 27, 52, 46): '"""memeapp/auth.html"""', (52, 48, 52, 55): 'context'}, {}), "(request, 'memeapp/auth.html', context)", False, 'from django.shortcuts import render, redirect\n'), ((55, 4, 55, 19), 'django.contrib.auth.logout', 'logout', ({(55, 11, 55, 18): 'request'}, {}), '(request)', False, 'from django.contrib.auth import authenticate, login, logout\n'), ((56, 11, 56, 28), 'django.shortcuts.redirect', 'redirect', ({(56, 20, 56, 27): '"""index"""'}, {}), "('index')", False, 'from django.shortcuts import render, redirect\n'), ((67, 11, 67, 66), 'django.shortcuts.render', 'render', ({(67, 18, 67, 25): 'request', (67, 27, 67, 56): '"""memeapp/upload_picture.html"""', (67, 58, 67, 65): 'context'}, {}), "(request, 'memeapp/upload_picture.html', context)", False, 'from django.shortcuts import render, redirect\n'), ((75, 11, 75, 89), 'django.shortcuts.render', 'render', ({(75, 18, 75, 25): 'request', (75, 27, 75, 49): '"""memeapp/profile.html"""', (75, 51, 75, 88): "{'profile': images, 'images': profiles}"}, {}), "(request, 'memeapp/profile.html', {'profile': images, 'images': profiles}\n )", False, 'from django.shortcuts import render, redirect\n'), ((80, 11, 80, 74), 'django.shortcuts.render', 'render', ({(80, 18, 80, 25): 'request', (80, 27, 80, 55): '"""memeapp/image_details.html"""', (80, 57, 80, 73): "{'image': image}"}, {}), "(request, 'memeapp/image_details.html', {'image': image})", False, 'from django.shortcuts import render, redirect\n'), ((122, 11, 122, 58), 'django.shortcuts.render', 'render', ({(122, 18, 122, 25): 'request', (122, 26, 122, 49): '"""memeapp/comments.html"""', (122, 50, 122, 57): 'context'}, {}), "(request, 'memeapp/comments.html', context)", False, 'from django.shortcuts import render, redirect\n'), ((21, 21, 21, 51), 'django.contrib.auth.forms.UserCreationForm', 'UserCreationForm', ({(21, 38, 21, 50): 'request.POST'}, {}), '(request.POST)', False, 'from django.contrib.auth.forms import UserCreationForm\n'), ((37, 16, 37, 33), 'django.shortcuts.redirect', 'redirect', ({(37, 25, 37, 32): '"""index"""'}, {}), "('index')", False, 'from django.shortcuts import render, redirect\n'), ((45, 13, 45, 70), 'django.contrib.auth.authenticate', 'authenticate', (), '', False, 'from django.contrib.auth import authenticate, login, logout\n'), ((95, 15, 95, 28), 'django.shortcuts.redirect', 'redirect', ({(95, 24, 95, 27): '"""/"""'}, {}), "('/')", False, 'from django.shortcuts import render, redirect\n'), ((102, 15, 102, 28), 'django.shortcuts.redirect', 'redirect', ({(102, 24, 102, 27): '"""/"""'}, {}), "('/')", False, 'from django.shortcuts import render, redirect\n'), ((132, 15, 132, 62), 'django.shortcuts.render', 'render', ({(132, 22, 132, 29): 'request', (132, 31, 132, 52): '"""memeapp/search.html"""', (132, 54, 132, 61): 'context'}, {}), "(request, 'memeapp/search.html', context)", False, 'from django.shortcuts import render, redirect\n'), ((134, 8, 134, 68), 'django.contrib.messages.error', 'messages.error', ({(134, 23, 134, 30): 'request', (134, 32, 134, 67): '"""You haven\'t searched for any term"""'}, {}), '(request, "You haven\'t searched for any term")', False, 'from django.contrib import messages\n'), ((136, 15, 136, 75), 'django.shortcuts.render', 'render', ({(136, 22, 136, 29): 'request', (136, 31, 136, 52): '"""memeapp/search.html"""', (136, 54, 136, 74): "{'message': message}"}, {}), "(request, 'memeapp/search.html', {'message': message})", False, 'from django.shortcuts import render, redirect\n'), ((26, 12, 26, 31), 'django.contrib.auth.login', 'login', ({(26, 18, 26, 25): 'request', (26, 26, 26, 30): 'user'}, {}), '(request, user)', False, 'from django.contrib.auth import authenticate, login, logout\n'), ((27, 19, 27, 36), 'django.shortcuts.redirect', 'redirect', ({(27, 28, 27, 35): '"""index"""'}, {}), "('index')", False, 'from django.shortcuts import render, redirect\n'), ((29, 12, 29, 72), 'django.contrib.messages.error', 'messages.error', ({(29, 27, 29, 34): 'request', (29, 36, 29, 71): '"""Error occured during registration"""'}, {}), "(request, 'Error occured during registration')", False, 'from django.contrib import messages\n'), ((42, 17, 42, 52), 'django.contrib.auth.models.User.objects.get', 'User.objects.get', (), '', False, 'from django.contrib.auth.models import User\n'), ((47, 12, 47, 31), 'django.contrib.auth.login', 'login', ({(47, 18, 47, 25): 'request', (47, 26, 47, 30): 'user'}, {}), '(request, user)', False, 'from django.contrib.auth import authenticate, login, logout\n'), ((48, 19, 48, 36), 'django.shortcuts.redirect', 'redirect', ({(48, 28, 48, 35): '"""index"""'}, {}), "('index')", False, 'from django.shortcuts import render, redirect\n'), ((50, 12, 50, 74), 'django.contrib.messages.error', 'messages.error', ({(50, 27, 50, 34): 'request', (50, 36, 50, 73): '"""Username OR Password does not exist"""'}, {}), "(request, 'Username OR Password does not exist')", False, 'from django.contrib import messages\n'), ((65, 19, 65, 36), 'django.shortcuts.redirect', 'redirect', ({(65, 28, 65, 35): '"""index"""'}, {}), "('index')", False, 'from django.shortcuts import render, redirect\n'), ((116, 19, 116, 36), 'django.shortcuts.redirect', 'redirect', ({(116, 28, 116, 35): '"""index"""'}, {}), "('index')", False, 'from django.shortcuts import render, redirect\n'), ((44, 12, 44, 58), 'django.contrib.messages.error', 'messages.error', ({(44, 27, 44, 34): 'request', (44, 36, 44, 57): '"""User does not exist"""'}, {}), "(request, 'User does not exist')", False, 'from django.contrib import messages\n'), ((112, 93, 112, 107), 'datetime.datetime.now', 'datetime.now', ({}, {}), '()', False, 'from datetime import datetime\n')] |
spraakbanken/sparv-pipeline | sparv/modules/hist/diapivot.py | 7293d42c577afdaf01ce8a936743f8b83d6eb962 | """Create diapivot annotation."""
import logging
import pickle
import xml.etree.ElementTree as etree
import sparv.util as util
from sparv import Annotation, Model, ModelOutput, Output, annotator, modelbuilder
log = logging.getLogger(__name__)
PART_DELIM1 = "^1"
# @annotator("Diapivot annotation", language=["swe-1800"])
def diapivot_annotate(out: Output = Output("<token>:hist.diapivot", description="SALDO IDs corresponding to lemgrams"),
lemgram: Annotation = Annotation("<token>:saldo.lemgram"),
model: Model = Model("hist/diapivot.pickle")):
"""Annotate each lemgram with its corresponding saldo_id according to model.
Args:
out (str, optional): Resulting annotation file.
Defaults to Output("<token>:hist.diapivot", description="SALDO IDs corresponding to lemgrams").
lemgram (str, optional): Existing lemgram annotation. Defaults to Annotation("<token>:saldo.lemgram").
model (str, optional): Crosslink model. Defaults to Model("hist/diapivot.pickle").
"""
lexicon = PivotLexicon(model)
lemgram_annotation = list(lemgram.read())
out_annotation = []
for lemgrams in lemgram_annotation:
saldo_ids = []
for lemgram in lemgrams.split(util.DELIM):
s_i = lexicon.get_exactMatch(lemgram)
if s_i:
saldo_ids += [s_i]
out_annotation.append(util.AFFIX + util.DELIM.join(set(saldo_ids)) + util.AFFIX if saldo_ids else util.AFFIX)
out.write(out_annotation)
# @modelbuilder("Diapivot model", language=["swe"])
def build_diapivot(out: ModelOutput = ModelOutput("hist/diapivot.pickle")):
"""Download diapivot XML dictionary and save as a pickle file."""
# Download diapivot.xml
xml_model = Model("hist/diapivot.xml")
xml_model.download("https://svn.spraakdata.gu.se/sb-arkiv/pub/lmf/diapivot/diapivot.xml")
# Create pickle file
xml_lexicon = read_xml(xml_model.path)
log.info("Saving cross lexicon in Pickle format")
picklex = {}
for lem in xml_lexicon:
lemgrams = []
for saldo, match in list(xml_lexicon[lem].items()):
lemgrams.append(PART_DELIM1.join([saldo, match]))
picklex[lem] = sorted(lemgrams)
out.write_pickle(picklex)
# Clean up
xml_model.remove()
################################################################################
# Auxiliaries
################################################################################
class PivotLexicon:
"""A lexicon for old swedish SALDO lookups.
It is initialized from a pickled file.
"""
def __init__(self, crossfile, verbose=True):
"""Read pickled lexicon."""
if verbose:
log.info("Reading cross lexicon: %s", crossfile)
with open(crossfile, "rb") as F:
self.lexicon = pickle.load(F)
if verbose:
log.info("OK, read %d words", len(self.lexicon))
def lookup(self, lem):
"""Lookup a word in the lexicon."""
if lem.lower() == lem:
annotation_tag_pairs = self.lexicon.get(lem, [])
else:
annotation_tag_pairs = self.lexicon.get(lem, []) + self.lexicon.get(lem.lower(), [])
return list(map(_split_val, annotation_tag_pairs))
def get_exactMatch(self, word):
"""Get only exact matches from lexicon."""
s = self.lookup(word)
if s and s[0] == "exactMatch":
return s[1]
def _split_val(key_val):
return key_val.rsplit(PART_DELIM1)[1]
def read_xml(xml):
"""Read the XML version of crosslinked lexicon."""
log.info("Reading XML lexicon")
lexicon = {}
context = etree.iterparse(xml, events=("start", "end")) # "start" needed to save reference to root element
context = iter(context)
_event, root = next(context)
for event, elem in context:
if event == "end":
if elem.tag == 'LexicalEntry':
lemma = elem.find("Lemma")
dalin, saldo = [], ''
for form in lemma.findall("FormRepresentation"):
cat = _findval(form, "category")
lem = _findval(form, "lemgram")
if cat == "modern":
saldo = lem
else:
match = _findval(form, "match")
dalin += [(lem, match)]
[lexicon.update({d: {'saldo': saldo, 'match': m}}) for (d, m) in dalin]
# Done parsing section. Clear tree to save memory
if elem.tag in ['LexicalEntry', 'frame', 'resFrame']:
root.clear()
testwords = ["tigerhjerta..nn.1",
"lågland..nn.1",
"gud..nn.1"]
util.test_lexicon(lexicon, testwords)
log.info("OK, read")
return lexicon
def _findval(elems, key):
for form in elems:
att = form.get("att", "")
if att == key:
return form.get("val")
return ""
| [((10, 6, 10, 33), 'logging.getLogger', 'logging.getLogger', ({(10, 24, 10, 32): '__name__'}, {}), '(__name__)', False, 'import logging\n'), ((16, 36, 16, 118), 'sparv.Output', 'Output', (), '', False, 'from sparv import Annotation, Model, ModelOutput, Output, annotator, modelbuilder\n'), ((17, 44, 17, 79), 'sparv.Annotation', 'Annotation', ({(17, 55, 17, 78): '"""<token>:saldo.lemgram"""'}, {}), "('<token>:saldo.lemgram')", False, 'from sparv import Annotation, Model, ModelOutput, Output, annotator, modelbuilder\n'), ((18, 37, 18, 66), 'sparv.Model', 'Model', ({(18, 43, 18, 65): '"""hist/diapivot.pickle"""'}, {}), "('hist/diapivot.pickle')", False, 'from sparv import Annotation, Model, ModelOutput, Output, annotator, modelbuilder\n'), ((44, 38, 44, 73), 'sparv.ModelOutput', 'ModelOutput', ({(44, 50, 44, 72): '"""hist/diapivot.pickle"""'}, {}), "('hist/diapivot.pickle')", False, 'from sparv import Annotation, Model, ModelOutput, Output, annotator, modelbuilder\n'), ((47, 16, 47, 42), 'sparv.Model', 'Model', ({(47, 22, 47, 41): '"""hist/diapivot.xml"""'}, {}), "('hist/diapivot.xml')", False, 'from sparv import Annotation, Model, ModelOutput, Output, annotator, modelbuilder\n'), ((110, 14, 110, 59), 'xml.etree.ElementTree.iterparse', 'etree.iterparse', (), '', True, 'import xml.etree.ElementTree as etree\n'), ((138, 4, 138, 41), 'sparv.util.test_lexicon', 'util.test_lexicon', ({(138, 22, 138, 29): 'lexicon', (138, 31, 138, 40): 'testwords'}, {}), '(lexicon, testwords)', True, 'import sparv.util as util\n'), ((82, 27, 82, 41), 'pickle.load', 'pickle.load', ({(82, 39, 82, 40): 'F'}, {}), '(F)', False, 'import pickle\n')] |
xinyang178/xbot | src/xbot/util/path.py | dad1fc67062dc6fd21802899fd68f7eb91c96569 | import os
def get_root_path():
current_path = os.path.abspath(os.path.dirname(__file__))
root_path = os.path.dirname(
os.path.dirname(os.path.dirname(os.path.dirname(current_path)))
)
return os.path.join(root_path, "xbot")
def get_config_path():
config_path = os.path.abspath(os.path.join(os.path.dirname(__file__), "../config"))
return config_path
def get_data_path():
data_path = os.path.abspath(
os.path.join(os.path.dirname(__file__), "../../../data/")
)
return data_path
| [((9, 11, 9, 42), 'os.path.join', 'os.path.join', ({(9, 24, 9, 33): 'root_path', (9, 35, 9, 41): '"""xbot"""'}, {}), "(root_path, 'xbot')", False, 'import os\n'), ((5, 35, 5, 60), 'os.path.dirname', 'os.path.dirname', ({(5, 51, 5, 59): '__file__'}, {}), '(__file__)', False, 'import os\n'), ((13, 47, 13, 72), 'os.path.dirname', 'os.path.dirname', ({(13, 63, 13, 71): '__file__'}, {}), '(__file__)', False, 'import os\n'), ((19, 21, 19, 46), 'os.path.dirname', 'os.path.dirname', ({(19, 37, 19, 45): '__file__'}, {}), '(__file__)', False, 'import os\n'), ((7, 40, 7, 69), 'os.path.dirname', 'os.path.dirname', ({(7, 56, 7, 68): 'current_path'}, {}), '(current_path)', False, 'import os\n')] |
HackSoftware/hackconf.bg | home/website/wagtail_hooks.py | ab3cc9fcdccf8991098553e0374103e3a241ce50 | from django.utils.html import format_html
from wagtail.wagtailcore import hooks
@hooks.register('insert_editor_js')
def enable_source():
return format_html(
"""
<script>
registerHalloPlugin('hallohtml');
</script>
"""
)
| [((5, 1, 5, 35), 'wagtail.wagtailcore.hooks.register', 'hooks.register', ({(5, 16, 5, 34): '"""insert_editor_js"""'}, {}), "('insert_editor_js')", False, 'from wagtail.wagtailcore import hooks\n'), ((7, 11, 13, 5), 'django.utils.html.format_html', 'format_html', ({(8, 8, 12, 11): '"""\n <script>\n registerHalloPlugin(\'hallohtml\');\n </script>\n """'}, {}), '(\n """\n <script>\n registerHalloPlugin(\'hallohtml\');\n </script>\n """\n )', False, 'from django.utils.html import format_html\n')] |
msgis/ngsi-timeseries-api | src/reporter/tests/test_api.py | 5cc7a8beab748cecfd5fba61740f3730361d4e31 | from conftest import QL_URL
import requests
def test_api():
api_url = "{}/".format(QL_URL)
r = requests.get('{}'.format(api_url))
assert r.status_code == 200, r.text
assert r.json() == {
"notify_url": "/v2/notify",
"subscriptions_url": "/v2/subscriptions",
"entities_url": "/v2/entities",
"types_url": "/v2/types",
"attributes_url": "/v2/attrs"
}
| [] |
dyung/llvm-zorg | zorg/buildbot/conditions/FileConditions.py | 42cd139968388b14323975647faf322c99945986 | from buildbot.process.remotecommand import RemoteCommand
from buildbot.interfaces import WorkerTooOldError
import stat
class FileExists(object):
"""I check a file existence on the worker. I return True if the file
with the given name exists, False if the file does not exist or that is
a directory.
Use me with doStepIf to make a build step conditional to existence of some
file. For example
doStepIf=FileExists('build/configure')
"""
def __init__(self, filename):
self.filename = filename
def __call__(self, step):
step.checkWorkerHasCommand('stat')
cmd = RemoteCommand('stat', {'file': self.filename})
d = step.runCommand(cmd)
d.addCallback(lambda res: self.commandComplete(cmd))
return d
def commandComplete(self, cmd):
if cmd.didFail():
return False
s = cmd.updates["stat"][-1]
filemode = s[stat.ST_MODE]
if stat.S_ISREG(filemode) or stat.S_ISLNK(filemode):
# True only if this is a file or a link and not any other file
# system object.
return True
else:
return False
class FileDoesNotExist(object):
"""I check a file existence on the worker. I return False if
the file with the given name exists or that is a directory, True if the
file does not exist.
Use me with doStepIf to make a build step conditional to nonexistence
of some file. For example
doStepIf=FileDoesNotExist('build/configure')
"""
def __init__(self, filename):
self.filename = filename
def __call__(self, step):
step.checkWorkerHasCommand('stat')
cmd = RemoteCommand('stat', {'file': self.filename})
d = step.runCommand(cmd)
d.addCallback(lambda res: self.commandComplete(cmd))
return d
def commandComplete(self, cmd):
# False if any filesystem object with the given name exists.
return cmd.didFail()
| [((22, 14, 22, 60), 'buildbot.process.remotecommand.RemoteCommand', 'RemoteCommand', ({(22, 28, 22, 34): '"""stat"""', (22, 36, 22, 59): "{'file': self.filename}"}, {}), "('stat', {'file': self.filename})", False, 'from buildbot.process.remotecommand import RemoteCommand\n'), ((57, 14, 57, 60), 'buildbot.process.remotecommand.RemoteCommand', 'RemoteCommand', ({(57, 28, 57, 34): '"""stat"""', (57, 36, 57, 59): "{'file': self.filename}"}, {}), "('stat', {'file': self.filename})", False, 'from buildbot.process.remotecommand import RemoteCommand\n'), ((33, 11, 33, 33), 'stat.S_ISREG', 'stat.S_ISREG', ({(33, 24, 33, 32): 'filemode'}, {}), '(filemode)', False, 'import stat\n'), ((33, 37, 33, 59), 'stat.S_ISLNK', 'stat.S_ISLNK', ({(33, 50, 33, 58): 'filemode'}, {}), '(filemode)', False, 'import stat\n')] |
refaev/combat_gym | gym_combat/gym_combat/envs/main.py | f02fcf98e95a1dda29cdddd4ae271de3e18ea3bf | from matplotlib import style
from tqdm import tqdm
style.use("ggplot")
from gym_combat.envs.Arena.CState import State
from gym_combat.envs.Arena.Entity import Entity
from gym_combat.envs.Arena.Environment import Environment, Episode
from gym_combat.envs.Common.constants import *
from gym_combat.envs.Qtable import Qtable_DecisionMaker
from gym_combat.envs.DQN import DQNAgent_keras
from gym_combat.envs.Greedy import Greedy_player
import matplotlib.pyplot as plt
def print_start_of_game_info(blue_decision_maker, red_decision_maker):
print("Starting tournament!")
print("Blue player type: ", Agent_type_str[blue_decision_maker.type()])
if blue_decision_maker.path_model_to_load==None:
print("Blue player starting with no model")
else:
print("Blue player starting tournament with trained model: " , blue_decision_maker.path_model_to_load)
print("Red player type: ", Agent_type_str[red_decision_maker.type()])
if red_decision_maker.path_model_to_load==None:
print("Red player starting with no model")
else:
print("Red player starting tournament with trained model: " , red_decision_maker.path_model_to_load)
print("Number of rounds: ", NUM_OF_EPISODES)
print("~~~ GO! ~~~\n\n")
def evaluate(episode_number):
#if episode_number % EVALUATE_PLAYERS_EVERY == 0:
a = episode_number % EVALUATE_PLAYERS_EVERY
if a>=0 and a<EVALUATE_BATCH_SIZE:
EVALUATE = True
else:
EVALUATE = False
return EVALUATE
def print_states(observation_for_blue_s0, observation_for_blue_s1):
import matplotlib.pyplot as plt
plt.matshow(observation_for_blue_s0.img)
plt.show()
plt.matshow(observation_for_blue_s1.img)
plt.show()
if __name__ == '__main__':
env = Environment(IS_TRAINING)
print("Starting Blue player")
blue_decision_maker = DQNAgent_keras.DQNAgent_keras()
#blue_decision_maker = DQNAgent_keras.DQNAgent_keras(UPDATE_CONTEXT=True, path_model_to_load='conv1(6_6_1_256)_conv2(4_4_256_128)_conv3(3_3_128_128)_flatten_fc__blue_202001_ 0.95max_ -0.04avg_ -3.10min__1620558885.model')
print("Starting red player")
### Red Decision Maker
red_decision_maker = Greedy_player.Greedy_player()
env.blue_player = Entity(blue_decision_maker)
env.red_player = Entity(red_decision_maker)
print_start_of_game_info(blue_decision_maker, red_decision_maker)
NUM_OF_EPISODES = env.NUMBER_OF_EPISODES
for episode in tqdm(range(1, NUM_OF_EPISODES + 1), ascii=True, unit='episodes'):
EVALUATE = evaluate(episode)
current_episode = Episode(episode, EVALUATE, show_always=False if IS_TRAINING else True)
# set new start position for the players
env.reset_game(episode)
# get observation
observation_for_blue_s0: State = env.get_observation_for_blue()
action_blue = -1
# initialize the decision_makers for the players
blue_decision_maker.set_initial_state(observation_for_blue_s0, episode)
#red_decision_maker.set_initial_state(observation_for_red_s0, episode) # for non-greedy players
blue_won_the_game = False
red_won_the_game = False
for steps_current_game in range(1, MAX_STEPS_PER_EPISODE + 1):
##### Blue's turn! #####
observation_for_blue_s0: State = env.get_observation_for_blue()
current_episode.print_episode(env, steps_current_game)
action_blue: AgentAction = blue_decision_maker.get_action(observation_for_blue_s0, EVALUATE)
env.take_action(Color.Blue, action_blue) # take the action!
current_episode.print_episode(env, steps_current_game)
current_episode.is_terminal = (env.compute_terminal(whos_turn=Color.Blue) is not WinEnum.NoWin)
if current_episode.is_terminal:# Blue won the game!
blue_won_the_game=True
else:
##### Red's turn! #####
observation_for_red_s0: State = env.get_observation_for_red()
action_red: AgentAction = red_decision_maker.get_action(observation_for_red_s0, EVALUATE)
env.take_action(Color.Red, action_red) # take the action!
current_episode.is_terminal = (env.compute_terminal(whos_turn=Color.Red) is not WinEnum.NoWin)
if current_episode.is_terminal: # Blue won the game!
red_won_the_game = True
current_episode.print_episode(env, steps_current_game)
reward_step_blue, reward_step_red = env.handle_reward(steps_current_game)
current_episode.episode_reward_red += reward_step_red
current_episode.episode_reward_blue += reward_step_blue
observation_for_blue_s1: State = env.get_observation_for_blue()
blue_decision_maker.update_context(observation_for_blue_s0, action_blue, reward_step_blue, observation_for_blue_s1,
current_episode.is_terminal, EVALUATE)
if steps_current_game == MAX_STEPS_PER_EPISODE:
# if we exited the loop because we reached MAX_STEPS_PER_EPISODE
current_episode.is_terminal = True
if blue_won_the_game or red_won_the_game:
break
# for statistics
env.update_win_counters(steps_current_game)
env.data_for_statistics(current_episode.episode_reward_blue, current_episode.episode_reward_red, steps_current_game, blue_decision_maker.get_epsolon())
env.evaluate_info(EVALUATE, episode, steps_current_game, blue_decision_maker.get_epsolon())
if current_episode.episode_number % SAVE_STATS_EVERY == 0:
if False:#blue_decision_maker.type()== AgentType.DQN_keras or blue_decision_maker.type() == AgentType.DQN_basic:
blue_decision_maker._decision_maker.print_model(observation_for_blue_s0, episode, "conv")#env.save_folder_path)
# print info of episode:
current_episode.print_info_of_episode(env, steps_current_game, blue_decision_maker.get_epsolon(), episode)
env.end_run()
if blue_decision_maker.type() == AgentType.DQN_keras or blue_decision_maker.type() == AgentType.DQN_basic:
blue_decision_maker._decision_maker.print_model(observation_for_blue_s0, episode, env.save_folder_path)
| [((4, 0, 4, 19), 'matplotlib.style.use', 'style.use', ({(4, 10, 4, 18): '"""ggplot"""'}, {}), "('ggplot')", False, 'from matplotlib import style\n'), ((44, 4, 44, 44), 'matplotlib.pyplot.matshow', 'plt.matshow', ({(44, 16, 44, 43): 'observation_for_blue_s0.img'}, {}), '(observation_for_blue_s0.img)', True, 'import matplotlib.pyplot as plt\n'), ((45, 4, 45, 14), 'matplotlib.pyplot.show', 'plt.show', ({}, {}), '()', True, 'import matplotlib.pyplot as plt\n'), ((47, 4, 47, 44), 'matplotlib.pyplot.matshow', 'plt.matshow', ({(47, 16, 47, 43): 'observation_for_blue_s1.img'}, {}), '(observation_for_blue_s1.img)', True, 'import matplotlib.pyplot as plt\n'), ((48, 4, 48, 14), 'matplotlib.pyplot.show', 'plt.show', ({}, {}), '()', True, 'import matplotlib.pyplot as plt\n'), ((54, 10, 54, 34), 'gym_combat.envs.Arena.Environment.Environment', 'Environment', ({(54, 22, 54, 33): 'IS_TRAINING'}, {}), '(IS_TRAINING)', False, 'from gym_combat.envs.Arena.Environment import Environment, Episode\n'), ((58, 26, 58, 57), 'gym_combat.envs.DQN.DQNAgent_keras.DQNAgent_keras', 'DQNAgent_keras.DQNAgent_keras', ({}, {}), '()', False, 'from gym_combat.envs.DQN import DQNAgent_keras\n'), ((63, 25, 63, 54), 'gym_combat.envs.Greedy.Greedy_player.Greedy_player', 'Greedy_player.Greedy_player', ({}, {}), '()', False, 'from gym_combat.envs.Greedy import Greedy_player\n'), ((66, 22, 66, 49), 'gym_combat.envs.Arena.Entity.Entity', 'Entity', ({(66, 29, 66, 48): 'blue_decision_maker'}, {}), '(blue_decision_maker)', False, 'from gym_combat.envs.Arena.Entity import Entity\n'), ((67, 21, 67, 47), 'gym_combat.envs.Arena.Entity.Entity', 'Entity', ({(67, 28, 67, 46): 'red_decision_maker'}, {}), '(red_decision_maker)', False, 'from gym_combat.envs.Arena.Entity import Entity\n'), ((75, 26, 75, 96), 'gym_combat.envs.Arena.Environment.Episode', 'Episode', (), '', False, 'from gym_combat.envs.Arena.Environment import Environment, Episode\n')] |
ramongonze/libqif | libqif/core/hyper.py | 57be74a2342a303da5415a3d787855b8115e58f8 | """Hyper-distributions."""
from libqif.core.secrets import Secrets
from libqif.core.channel import Channel
from numpy import array, arange, zeros
from numpy import delete as npdelete
class Hyper:
def __init__(self, channel):
"""Hyper-distribution. To create an instance of this class it is
class it is necessary to have an instance of :py:class:`.Channel`
class. Once created an instance of :py:class:`.Hyper`, the constructor
generates the joint, outer and inner distributions.
Attributes
----------
channel : core.Channel
Channel object.
joint : numpy.ndarray
Matrix of joint distribution.
outer : numpy.ndarray
Outer distribution.
inners : numpy.ndarray
Matrix of inner distributions.
num_posteriors : int
Number of posterior distributions resulted by reducing the
hyper-distribution, i.e., remove columns that contains only
zeros and merge columns which one of them a linear combination
of the other.
Parameters
----------
channel : core.Channel
Channel object.
"""
self._check_types(channel)
self.channel = channel
self.joint = self._generate_joint_distribution()
self.outer, self.inners = self._generate_posteriors()
self._reduce_hyper()
self.num_posteriors = len(self.outer)
def update_prior(self, prior):
"""Update the prior distribution on set of secrets.
The number of secrets must match the current number of rows of the channel.
Parameters
----------
prior : list, numpy.ndarray
Prior distribution on the set of secrets. prior[i] is the
probability of secret named labels[i] beeing the real secret.
"""
self.channel.update_prior(prior)
self.joint = self._generate_joint_distribution()
self.outer, self.inners = self._generate_posteriors()
self._reduce_hyper()
self.num_posteriors = len(self.outer)
def _check_types(self, channel):
if type(channel) != type(Channel(Secrets(['x1','x2'], [1,0]), ['y1'], array([[1],[1]]))):
raise TypeError('The parameter \'channel\' must be a core.channel.Channel object')
def _generate_joint_distribution(self):
joint = []
channel_t = self.channel.matrix.T
for i in arange(self.channel.num_outputs):
joint.append(self.channel.secrets.prior * channel_t[i])
return array(joint).T
def _generate_posteriors(self):
joint_t = self.joint.T.copy()
outer = []
for i in arange(self.channel.num_outputs):
outer.append(joint_t[i].sum())
if outer[i] > 0:
joint_t[i] = joint_t[i]/outer[i]
return array(outer), joint_t.T
def _reduce_hyper(self):
"""Given the hyper-distribution generated by _generate_posteriors
remove columns with zeros and merge columns that are a linear
combination of others. Thus algorithm has time complexity of O(n*m^2)
where n is the number of secrets and m is the number of outputs in
the.
"""
epsilon = 10**(-6)
# Delete inners that have 0 probability of occuring
zero_prob = self.outer < epsilon
self.outer = npdelete(self.outer, zero_prob, 0)
self.inners = npdelete(self.inners, zero_prob, 1)
delete_inner = [False] * len(self.outer)
for i in arange(self.inners.shape[1]):
for j in arange(i+1, self.inners.shape[1]):
# Check if inner i is equal to inner j
if (abs(self.inners[:,i] - self.inners[:,j]) < epsilon).sum() == self.channel.secrets.num_secrets:
delete_inner[j] = True # Delete inner j
self.outer[i] += self.outer[j] # Merge inner j into inner i
self.outer = npdelete(self.outer, delete_inner, 0)
self.inners = npdelete(self.inners, delete_inner, 1)
| [((72, 17, 72, 49), 'numpy.arange', 'arange', ({(72, 24, 72, 48): 'self.channel.num_outputs'}, {}), '(self.channel.num_outputs)', False, 'from numpy import array, arange, zeros\n'), ((80, 17, 80, 49), 'numpy.arange', 'arange', ({(80, 24, 80, 48): 'self.channel.num_outputs'}, {}), '(self.channel.num_outputs)', False, 'from numpy import array, arange, zeros\n'), ((98, 21, 98, 55), 'numpy.delete', 'npdelete', ({(98, 30, 98, 40): 'self.outer', (98, 42, 98, 51): 'zero_prob', (98, 53, 98, 54): '0'}, {}), '(self.outer, zero_prob, 0)', True, 'from numpy import delete as npdelete\n'), ((99, 22, 99, 57), 'numpy.delete', 'npdelete', ({(99, 31, 99, 42): 'self.inners', (99, 44, 99, 53): 'zero_prob', (99, 55, 99, 56): '1'}, {}), '(self.inners, zero_prob, 1)', True, 'from numpy import delete as npdelete\n'), ((102, 17, 102, 45), 'numpy.arange', 'arange', ({(102, 24, 102, 44): 'self.inners.shape[1]'}, {}), '(self.inners.shape[1])', False, 'from numpy import array, arange, zeros\n'), ((109, 21, 109, 58), 'numpy.delete', 'npdelete', ({(109, 30, 109, 40): 'self.outer', (109, 42, 109, 54): 'delete_inner', (109, 56, 109, 57): '0'}, {}), '(self.outer, delete_inner, 0)', True, 'from numpy import delete as npdelete\n'), ((110, 22, 110, 60), 'numpy.delete', 'npdelete', ({(110, 31, 110, 42): 'self.inners', (110, 44, 110, 56): 'delete_inner', (110, 58, 110, 59): '1'}, {}), '(self.inners, delete_inner, 1)', True, 'from numpy import delete as npdelete\n'), ((75, 15, 75, 27), 'numpy.array', 'array', ({(75, 21, 75, 26): 'joint'}, {}), '(joint)', False, 'from numpy import array, arange, zeros\n'), ((85, 15, 85, 27), 'numpy.array', 'array', ({(85, 21, 85, 26): 'outer'}, {}), '(outer)', False, 'from numpy import array, arange, zeros\n'), ((103, 21, 103, 54), 'numpy.arange', 'arange', ({(103, 28, 103, 31): '(i + 1)', (103, 33, 103, 53): 'self.inners.shape[1]'}, {}), '(i + 1, self.inners.shape[1])', False, 'from numpy import array, arange, zeros\n'), ((66, 41, 66, 68), 'libqif.core.secrets.Secrets', 'Secrets', ({(66, 49, 66, 60): "['x1', 'x2']", (66, 62, 66, 67): '[1, 0]'}, {}), "(['x1', 'x2'], [1, 0])", False, 'from libqif.core.secrets import Secrets\n'), ((66, 78, 66, 94), 'numpy.array', 'array', ({(66, 84, 66, 93): '[[1], [1]]'}, {}), '([[1], [1]])', False, 'from numpy import array, arange, zeros\n')] |
gvashchenkolineate/gvashchenkolineate_infra_trytravis | ansible/venv/lib/python2.7/site-packages/ansible/modules/network/fortios/fortios_system_virtual_wan_link.py | 0fb18850afe0d8609693ba4b23f29c7cda17d97f | #!/usr/bin/python
from __future__ import (absolute_import, division, print_function)
# Copyright 2019 Fortinet, Inc.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
__metaclass__ = type
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'metadata_version': '1.1'}
DOCUMENTATION = '''
---
module: fortios_system_virtual_wan_link
short_description: Configure redundant internet connections using SD-WAN (formerly virtual WAN link) in Fortinet's FortiOS and FortiGate.
description:
- This module is able to configure a FortiGate or FortiOS (FOS) device by allowing the
user to set and modify system feature and virtual_wan_link category.
Examples include all parameters and values need to be adjusted to datasources before usage.
Tested with FOS v6.0.5
version_added: "2.8"
author:
- Miguel Angel Munoz (@mamunozgonzalez)
- Nicolas Thomas (@thomnico)
notes:
- Requires fortiosapi library developed by Fortinet
- Run as a local_action in your playbook
requirements:
- fortiosapi>=0.9.8
options:
host:
description:
- FortiOS or FortiGate IP address.
type: str
required: false
username:
description:
- FortiOS or FortiGate username.
type: str
required: false
password:
description:
- FortiOS or FortiGate password.
type: str
default: ""
vdom:
description:
- Virtual domain, among those defined previously. A vdom is a
virtual instance of the FortiGate that can be configured and
used as a different unit.
type: str
default: root
https:
description:
- Indicates if the requests towards FortiGate must use HTTPS protocol.
type: bool
default: true
ssl_verify:
description:
- Ensures FortiGate certificate must be verified by a proper CA.
type: bool
default: true
version_added: 2.9
system_virtual_wan_link:
description:
- Configure redundant internet connections using SD-WAN (formerly virtual WAN link).
default: null
type: dict
suboptions:
fail_alert_interfaces:
description:
- Physical interfaces that will be alerted.
type: list
suboptions:
name:
description:
- Physical interface name. Source system.interface.name.
required: true
type: str
fail_detect:
description:
- Enable/disable SD-WAN Internet connection status checking (failure detection).
type: str
choices:
- enable
- disable
health_check:
description:
- SD-WAN status checking or health checking. Identify a server on the Internet and determine how SD-WAN verifies that the FortiGate can
communicate with it.
type: list
suboptions:
addr_mode:
description:
- Address mode (IPv4 or IPv6).
type: str
choices:
- ipv4
- ipv6
failtime:
description:
- Number of failures before server is considered lost (1 - 3600).
type: int
http_agent:
description:
- String in the http-agent field in the HTTP header.
type: str
http_get:
description:
- URL used to communicate with the server if the protocol if the protocol is HTTP.
type: str
http_match:
description:
- Response string expected from the server if the protocol is HTTP.
type: str
interval:
description:
- Status check interval, or the time between attempting to connect to the server (1 - 3600 sec).
type: int
members:
description:
- Member sequence number list.
type: list
suboptions:
seq_num:
description:
- Member sequence number. Source system.virtual-wan-link.members.seq-num.
type: int
name:
description:
- Status check or health check name.
required: true
type: str
packet_size:
description:
- Packet size of a twamp test session,
type: int
password:
description:
- Twamp controller password in authentication mode
type: str
port:
description:
- Port number used to communicate with the server over the selected protocol.
type: int
protocol:
description:
- Protocol used to determine if the FortiGate can communicate with the server.
type: str
choices:
- ping
- tcp-echo
- udp-echo
- http
- twamp
- ping6
recoverytime:
description:
- Number of successful responses received before server is considered recovered (1 - 3600).
type: int
security_mode:
description:
- Twamp controller security mode.
type: str
choices:
- none
- authentication
server:
description:
- IP address or FQDN name of the server.
type: str
sla:
description:
- Service level agreement (SLA).
type: list
suboptions:
id:
description:
- SLA ID.
required: true
type: int
jitter_threshold:
description:
- Jitter for SLA to make decision in milliseconds. (0 - 10000000).
type: int
latency_threshold:
description:
- Latency for SLA to make decision in milliseconds. (0 - 10000000).
type: int
link_cost_factor:
description:
- Criteria on which to base link selection.
type: str
choices:
- latency
- jitter
- packet-loss
packetloss_threshold:
description:
- Packet loss for SLA to make decision in percentage. (0 - 100).
type: int
threshold_alert_jitter:
description:
- Alert threshold for jitter (ms).
type: int
threshold_alert_latency:
description:
- Alert threshold for latency (ms).
type: int
threshold_alert_packetloss:
description:
- Alert threshold for packet loss (percentage).
type: int
threshold_warning_jitter:
description:
- Warning threshold for jitter (ms).
type: int
threshold_warning_latency:
description:
- Warning threshold for latency (ms).
type: int
threshold_warning_packetloss:
description:
- Warning threshold for packet loss (percentage).
type: int
update_cascade_interface:
description:
- Enable/disable update cascade interface.
type: str
choices:
- enable
- disable
update_static_route:
description:
- Enable/disable updating the static route.
type: str
choices:
- enable
- disable
load_balance_mode:
description:
- Algorithm or mode to use for load balancing Internet traffic to SD-WAN members.
type: str
choices:
- source-ip-based
- weight-based
- usage-based
- source-dest-ip-based
- measured-volume-based
members:
description:
- Physical FortiGate interfaces added to the virtual-wan-link.
type: list
suboptions:
comment:
description:
- Comments.
type: str
gateway:
description:
- The default gateway for this interface. Usually the default gateway of the Internet service provider that this interface is
connected to.
type: str
gateway6:
description:
- IPv6 gateway.
type: str
ingress_spillover_threshold:
description:
- Ingress spillover threshold for this interface (0 - 16776000 kbit/s). When this traffic volume threshold is reached, new
sessions spill over to other interfaces in the SD-WAN.
type: int
interface:
description:
- Interface name. Source system.interface.name.
type: str
priority:
description:
- Priority of the interface (0 - 4294967295). Used for SD-WAN rules or priority rules.
type: int
seq_num:
description:
- Sequence number(1-255).
type: int
source:
description:
- Source IP address used in the health-check packet to the server.
type: str
source6:
description:
- Source IPv6 address used in the health-check packet to the server.
type: str
spillover_threshold:
description:
- Egress spillover threshold for this interface (0 - 16776000 kbit/s). When this traffic volume threshold is reached, new sessions
spill over to other interfaces in the SD-WAN.
type: int
status:
description:
- Enable/disable this interface in the SD-WAN.
type: str
choices:
- disable
- enable
volume_ratio:
description:
- Measured volume ratio (this value / sum of all values = percentage of link volume, 0 - 255).
type: int
weight:
description:
- Weight of this interface for weighted load balancing. (0 - 255) More traffic is directed to interfaces with higher weights.
type: int
service:
description:
- Create SD-WAN rules or priority rules (also called services) to control how sessions are distributed to physical interfaces in the
SD-WAN.
type: list
suboptions:
addr_mode:
description:
- Address mode (IPv4 or IPv6).
type: str
choices:
- ipv4
- ipv6
bandwidth_weight:
description:
- Coefficient of reciprocal of available bidirectional bandwidth in the formula of custom-profile-1.
type: int
default:
description:
- Enable/disable use of SD-WAN as default service.
type: str
choices:
- enable
- disable
dscp_forward:
description:
- Enable/disable forward traffic DSCP tag.
type: str
choices:
- enable
- disable
dscp_forward_tag:
description:
- Forward traffic DSCP tag.
type: str
dscp_reverse:
description:
- Enable/disable reverse traffic DSCP tag.
type: str
choices:
- enable
- disable
dscp_reverse_tag:
description:
- Reverse traffic DSCP tag.
type: str
dst:
description:
- Destination address name.
type: list
suboptions:
name:
description:
- Address or address group name. Source firewall.address.name firewall.addrgrp.name.
required: true
type: str
dst_negate:
description:
- Enable/disable negation of destination address match.
type: str
choices:
- enable
- disable
dst6:
description:
- Destination address6 name.
type: list
suboptions:
name:
description:
- Address6 or address6 group name. Source firewall.address6.name firewall.addrgrp6.name.
required: true
type: str
end_port:
description:
- End destination port number.
type: int
gateway:
description:
- Enable/disable SD-WAN service gateway.
type: str
choices:
- enable
- disable
groups:
description:
- User groups.
type: list
suboptions:
name:
description:
- Group name. Source user.group.name.
required: true
type: str
health_check:
description:
- Health check. Source system.virtual-wan-link.health-check.name.
type: str
hold_down_time:
description:
- Waiting period in seconds when switching from the back-up member to the primary member (0 - 10000000).
type: int
id:
description:
- Priority rule ID (1 - 4000).
required: true
type: int
input_device:
description:
- Source interface name.
type: list
suboptions:
name:
description:
- Interface name. Source system.interface.name.
required: true
type: str
internet_service:
description:
- Enable/disable use of Internet service for application-based load balancing.
type: str
choices:
- enable
- disable
internet_service_ctrl:
description:
- Control-based Internet Service ID list.
type: list
suboptions:
id:
description:
- Control-based Internet Service ID.
required: true
type: int
internet_service_ctrl_group:
description:
- Control-based Internet Service group list.
type: list
suboptions:
name:
description:
- Control-based Internet Service group name. Source application.group.name.
required: true
type: str
internet_service_custom:
description:
- Custom Internet service name list.
type: list
suboptions:
name:
description:
- Custom Internet service name. Source firewall.internet-service-custom.name.
required: true
type: str
internet_service_custom_group:
description:
- Custom Internet Service group list.
type: list
suboptions:
name:
description:
- Custom Internet Service group name. Source firewall.internet-service-custom-group.name.
required: true
type: str
internet_service_group:
description:
- Internet Service group list.
type: list
suboptions:
name:
description:
- Internet Service group name. Source firewall.internet-service-group.name.
required: true
type: str
internet_service_id:
description:
- Internet service ID list.
type: list
suboptions:
id:
description:
- Internet service ID. Source firewall.internet-service.id.
required: true
type: int
jitter_weight:
description:
- Coefficient of jitter in the formula of custom-profile-1.
type: int
latency_weight:
description:
- Coefficient of latency in the formula of custom-profile-1.
type: int
link_cost_factor:
description:
- Link cost factor.
type: str
choices:
- latency
- jitter
- packet-loss
- inbandwidth
- outbandwidth
- bibandwidth
- custom-profile-1
link_cost_threshold:
description:
- Percentage threshold change of link cost values that will result in policy route regeneration (0 - 10000000).
type: int
member:
description:
- Member sequence number.
type: int
mode:
description:
- Control how the priority rule sets the priority of interfaces in the SD-WAN.
type: str
choices:
- auto
- manual
- priority
- sla
name:
description:
- Priority rule name.
type: str
packet_loss_weight:
description:
- Coefficient of packet-loss in the formula of custom-profile-1.
type: int
priority_members:
description:
- Member sequence number list.
type: list
suboptions:
seq_num:
description:
- Member sequence number. Source system.virtual-wan-link.members.seq-num.
type: int
protocol:
description:
- Protocol number.
type: int
quality_link:
description:
- Quality grade.
type: int
route_tag:
description:
- IPv4 route map route-tag.
type: int
sla:
description:
- Service level agreement (SLA).
type: list
suboptions:
health_check:
description:
- Virtual WAN Link health-check. Source system.virtual-wan-link.health-check.name.
type: str
id:
description:
- SLA ID.
type: int
src:
description:
- Source address name.
type: list
suboptions:
name:
description:
- Address or address group name. Source firewall.address.name firewall.addrgrp.name.
required: true
type: str
src_negate:
description:
- Enable/disable negation of source address match.
type: str
choices:
- enable
- disable
src6:
description:
- Source address6 name.
type: list
suboptions:
name:
description:
- Address6 or address6 group name. Source firewall.address6.name firewall.addrgrp6.name.
required: true
type: str
start_port:
description:
- Start destination port number.
type: int
status:
description:
- Enable/disable SD-WAN service.
type: str
choices:
- enable
- disable
tos:
description:
- Type of service bit pattern.
type: str
tos_mask:
description:
- Type of service evaluated bits.
type: str
users:
description:
- User name.
type: list
suboptions:
name:
description:
- User name. Source user.local.name.
required: true
type: str
status:
description:
- Enable/disable SD-WAN.
type: str
choices:
- disable
- enable
'''
EXAMPLES = '''
- hosts: localhost
vars:
host: "192.168.122.40"
username: "admin"
password: ""
vdom: "root"
ssl_verify: "False"
tasks:
- name: Configure redundant internet connections using SD-WAN (formerly virtual WAN link).
fortios_system_virtual_wan_link:
host: "{{ host }}"
username: "{{ username }}"
password: "{{ password }}"
vdom: "{{ vdom }}"
https: "False"
system_virtual_wan_link:
fail_alert_interfaces:
-
name: "default_name_4 (source system.interface.name)"
fail_detect: "enable"
health_check:
-
addr_mode: "ipv4"
failtime: "8"
http_agent: "<your_own_value>"
http_get: "<your_own_value>"
http_match: "<your_own_value>"
interval: "12"
members:
-
seq_num: "14 (source system.virtual-wan-link.members.seq-num)"
name: "default_name_15"
packet_size: "16"
password: "<your_own_value>"
port: "18"
protocol: "ping"
recoverytime: "20"
security_mode: "none"
server: "192.168.100.40"
sla:
-
id: "24"
jitter_threshold: "25"
latency_threshold: "26"
link_cost_factor: "latency"
packetloss_threshold: "28"
threshold_alert_jitter: "29"
threshold_alert_latency: "30"
threshold_alert_packetloss: "31"
threshold_warning_jitter: "32"
threshold_warning_latency: "33"
threshold_warning_packetloss: "34"
update_cascade_interface: "enable"
update_static_route: "enable"
load_balance_mode: "source-ip-based"
members:
-
comment: "Comments."
gateway: "<your_own_value>"
gateway6: "<your_own_value>"
ingress_spillover_threshold: "42"
interface: "<your_own_value> (source system.interface.name)"
priority: "44"
seq_num: "45"
source: "<your_own_value>"
source6: "<your_own_value>"
spillover_threshold: "48"
status: "disable"
volume_ratio: "50"
weight: "51"
service:
-
addr_mode: "ipv4"
bandwidth_weight: "54"
default: "enable"
dscp_forward: "enable"
dscp_forward_tag: "<your_own_value>"
dscp_reverse: "enable"
dscp_reverse_tag: "<your_own_value>"
dst:
-
name: "default_name_61 (source firewall.address.name firewall.addrgrp.name)"
dst_negate: "enable"
dst6:
-
name: "default_name_64 (source firewall.address6.name firewall.addrgrp6.name)"
end_port: "65"
gateway: "enable"
groups:
-
name: "default_name_68 (source user.group.name)"
health_check: "<your_own_value> (source system.virtual-wan-link.health-check.name)"
hold_down_time: "70"
id: "71"
input_device:
-
name: "default_name_73 (source system.interface.name)"
internet_service: "enable"
internet_service_ctrl:
-
id: "76"
internet_service_ctrl_group:
-
name: "default_name_78 (source application.group.name)"
internet_service_custom:
-
name: "default_name_80 (source firewall.internet-service-custom.name)"
internet_service_custom_group:
-
name: "default_name_82 (source firewall.internet-service-custom-group.name)"
internet_service_group:
-
name: "default_name_84 (source firewall.internet-service-group.name)"
internet_service_id:
-
id: "86 (source firewall.internet-service.id)"
jitter_weight: "87"
latency_weight: "88"
link_cost_factor: "latency"
link_cost_threshold: "90"
member: "91"
mode: "auto"
name: "default_name_93"
packet_loss_weight: "94"
priority_members:
-
seq_num: "96 (source system.virtual-wan-link.members.seq-num)"
protocol: "97"
quality_link: "98"
route_tag: "99"
sla:
-
health_check: "<your_own_value> (source system.virtual-wan-link.health-check.name)"
id: "102"
src:
-
name: "default_name_104 (source firewall.address.name firewall.addrgrp.name)"
src_negate: "enable"
src6:
-
name: "default_name_107 (source firewall.address6.name firewall.addrgrp6.name)"
start_port: "108"
status: "enable"
tos: "<your_own_value>"
tos_mask: "<your_own_value>"
users:
-
name: "default_name_113 (source user.local.name)"
status: "disable"
'''
RETURN = '''
build:
description: Build number of the fortigate image
returned: always
type: str
sample: '1547'
http_method:
description: Last method used to provision the content into FortiGate
returned: always
type: str
sample: 'PUT'
http_status:
description: Last result given by FortiGate on last operation applied
returned: always
type: str
sample: "200"
mkey:
description: Master key (id) used in the last call to FortiGate
returned: success
type: str
sample: "id"
name:
description: Name of the table used to fulfill the request
returned: always
type: str
sample: "urlfilter"
path:
description: Path of the table used to fulfill the request
returned: always
type: str
sample: "webfilter"
revision:
description: Internal revision number
returned: always
type: str
sample: "17.0.2.10658"
serial:
description: Serial number of the unit
returned: always
type: str
sample: "FGVMEVYYQT3AB5352"
status:
description: Indication of the operation's result
returned: always
type: str
sample: "success"
vdom:
description: Virtual domain used
returned: always
type: str
sample: "root"
version:
description: Version of the FortiGate
returned: always
type: str
sample: "v5.6.3"
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.connection import Connection
from ansible.module_utils.network.fortios.fortios import FortiOSHandler
from ansible.module_utils.network.fortimanager.common import FAIL_SOCKET_MSG
def login(data, fos):
host = data['host']
username = data['username']
password = data['password']
ssl_verify = data['ssl_verify']
fos.debug('on')
if 'https' in data and not data['https']:
fos.https('off')
else:
fos.https('on')
fos.login(host, username, password, verify=ssl_verify)
def filter_system_virtual_wan_link_data(json):
option_list = ['fail_alert_interfaces', 'fail_detect', 'health_check',
'load_balance_mode', 'members', 'service',
'status']
dictionary = {}
for attribute in option_list:
if attribute in json and json[attribute] is not None:
dictionary[attribute] = json[attribute]
return dictionary
def underscore_to_hyphen(data):
if isinstance(data, list):
for elem in data:
elem = underscore_to_hyphen(elem)
elif isinstance(data, dict):
new_data = {}
for k, v in data.items():
new_data[k.replace('_', '-')] = underscore_to_hyphen(v)
data = new_data
return data
def system_virtual_wan_link(data, fos):
vdom = data['vdom']
system_virtual_wan_link_data = data['system_virtual_wan_link']
filtered_data = underscore_to_hyphen(filter_system_virtual_wan_link_data(system_virtual_wan_link_data))
return fos.set('system',
'virtual-wan-link',
data=filtered_data,
vdom=vdom)
def is_successful_status(status):
return status['status'] == "success" or \
status['http_method'] == "DELETE" and status['http_status'] == 404
def fortios_system(data, fos):
if data['system_virtual_wan_link']:
resp = system_virtual_wan_link(data, fos)
return not is_successful_status(resp), \
resp['status'] == "success", \
resp
def main():
fields = {
"host": {"required": False, "type": "str"},
"username": {"required": False, "type": "str"},
"password": {"required": False, "type": "str", "default": "", "no_log": True},
"vdom": {"required": False, "type": "str", "default": "root"},
"https": {"required": False, "type": "bool", "default": True},
"ssl_verify": {"required": False, "type": "bool", "default": True},
"system_virtual_wan_link": {
"required": False, "type": "dict", "default": None,
"options": {
"fail_alert_interfaces": {"required": False, "type": "list",
"options": {
"name": {"required": True, "type": "str"}
}},
"fail_detect": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"health_check": {"required": False, "type": "list",
"options": {
"addr_mode": {"required": False, "type": "str",
"choices": ["ipv4", "ipv6"]},
"failtime": {"required": False, "type": "int"},
"http_agent": {"required": False, "type": "str"},
"http_get": {"required": False, "type": "str"},
"http_match": {"required": False, "type": "str"},
"interval": {"required": False, "type": "int"},
"members": {"required": False, "type": "list",
"options": {
"seq_num": {"required": False, "type": "int"}
}},
"name": {"required": True, "type": "str"},
"packet_size": {"required": False, "type": "int"},
"password": {"required": False, "type": "str"},
"port": {"required": False, "type": "int"},
"protocol": {"required": False, "type": "str",
"choices": ["ping", "tcp-echo", "udp-echo",
"http", "twamp", "ping6"]},
"recoverytime": {"required": False, "type": "int"},
"security_mode": {"required": False, "type": "str",
"choices": ["none", "authentication"]},
"server": {"required": False, "type": "str"},
"sla": {"required": False, "type": "list",
"options": {
"id": {"required": True, "type": "int"},
"jitter_threshold": {"required": False, "type": "int"},
"latency_threshold": {"required": False, "type": "int"},
"link_cost_factor": {"required": False, "type": "str",
"choices": ["latency", "jitter", "packet-loss"]},
"packetloss_threshold": {"required": False, "type": "int"}
}},
"threshold_alert_jitter": {"required": False, "type": "int"},
"threshold_alert_latency": {"required": False, "type": "int"},
"threshold_alert_packetloss": {"required": False, "type": "int"},
"threshold_warning_jitter": {"required": False, "type": "int"},
"threshold_warning_latency": {"required": False, "type": "int"},
"threshold_warning_packetloss": {"required": False, "type": "int"},
"update_cascade_interface": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"update_static_route": {"required": False, "type": "str",
"choices": ["enable", "disable"]}
}},
"load_balance_mode": {"required": False, "type": "str",
"choices": ["source-ip-based", "weight-based", "usage-based",
"source-dest-ip-based", "measured-volume-based"]},
"members": {"required": False, "type": "list",
"options": {
"comment": {"required": False, "type": "str"},
"gateway": {"required": False, "type": "str"},
"gateway6": {"required": False, "type": "str"},
"ingress_spillover_threshold": {"required": False, "type": "int"},
"interface": {"required": False, "type": "str"},
"priority": {"required": False, "type": "int"},
"seq_num": {"required": False, "type": "int"},
"source": {"required": False, "type": "str"},
"source6": {"required": False, "type": "str"},
"spillover_threshold": {"required": False, "type": "int"},
"status": {"required": False, "type": "str",
"choices": ["disable", "enable"]},
"volume_ratio": {"required": False, "type": "int"},
"weight": {"required": False, "type": "int"}
}},
"service": {"required": False, "type": "list",
"options": {
"addr_mode": {"required": False, "type": "str",
"choices": ["ipv4", "ipv6"]},
"bandwidth_weight": {"required": False, "type": "int"},
"default": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"dscp_forward": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"dscp_forward_tag": {"required": False, "type": "str"},
"dscp_reverse": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"dscp_reverse_tag": {"required": False, "type": "str"},
"dst": {"required": False, "type": "list",
"options": {
"name": {"required": True, "type": "str"}
}},
"dst_negate": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"dst6": {"required": False, "type": "list",
"options": {
"name": {"required": True, "type": "str"}
}},
"end_port": {"required": False, "type": "int"},
"gateway": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"groups": {"required": False, "type": "list",
"options": {
"name": {"required": True, "type": "str"}
}},
"health_check": {"required": False, "type": "str"},
"hold_down_time": {"required": False, "type": "int"},
"id": {"required": True, "type": "int"},
"input_device": {"required": False, "type": "list",
"options": {
"name": {"required": True, "type": "str"}
}},
"internet_service": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"internet_service_ctrl": {"required": False, "type": "list",
"options": {
"id": {"required": True, "type": "int"}
}},
"internet_service_ctrl_group": {"required": False, "type": "list",
"options": {
"name": {"required": True, "type": "str"}
}},
"internet_service_custom": {"required": False, "type": "list",
"options": {
"name": {"required": True, "type": "str"}
}},
"internet_service_custom_group": {"required": False, "type": "list",
"options": {
"name": {"required": True, "type": "str"}
}},
"internet_service_group": {"required": False, "type": "list",
"options": {
"name": {"required": True, "type": "str"}
}},
"internet_service_id": {"required": False, "type": "list",
"options": {
"id": {"required": True, "type": "int"}
}},
"jitter_weight": {"required": False, "type": "int"},
"latency_weight": {"required": False, "type": "int"},
"link_cost_factor": {"required": False, "type": "str",
"choices": ["latency", "jitter", "packet-loss",
"inbandwidth", "outbandwidth", "bibandwidth",
"custom-profile-1"]},
"link_cost_threshold": {"required": False, "type": "int"},
"member": {"required": False, "type": "int"},
"mode": {"required": False, "type": "str",
"choices": ["auto", "manual", "priority",
"sla"]},
"name": {"required": False, "type": "str"},
"packet_loss_weight": {"required": False, "type": "int"},
"priority_members": {"required": False, "type": "list",
"options": {
"seq_num": {"required": False, "type": "int"}
}},
"protocol": {"required": False, "type": "int"},
"quality_link": {"required": False, "type": "int"},
"route_tag": {"required": False, "type": "int"},
"sla": {"required": False, "type": "list",
"options": {
"health_check": {"required": False, "type": "str"},
"id": {"required": False, "type": "int"}
}},
"src": {"required": False, "type": "list",
"options": {
"name": {"required": True, "type": "str"}
}},
"src_negate": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"src6": {"required": False, "type": "list",
"options": {
"name": {"required": True, "type": "str"}
}},
"start_port": {"required": False, "type": "int"},
"status": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"tos": {"required": False, "type": "str"},
"tos_mask": {"required": False, "type": "str"},
"users": {"required": False, "type": "list",
"options": {
"name": {"required": True, "type": "str"}
}}
}},
"status": {"required": False, "type": "str",
"choices": ["disable", "enable"]}
}
}
}
module = AnsibleModule(argument_spec=fields,
supports_check_mode=False)
# legacy_mode refers to using fortiosapi instead of HTTPAPI
legacy_mode = 'host' in module.params and module.params['host'] is not None and \
'username' in module.params and module.params['username'] is not None and \
'password' in module.params and module.params['password'] is not None
if not legacy_mode:
if module._socket_path:
connection = Connection(module._socket_path)
fos = FortiOSHandler(connection)
is_error, has_changed, result = fortios_system(module.params, fos)
else:
module.fail_json(**FAIL_SOCKET_MSG)
else:
try:
from fortiosapi import FortiOSAPI
except ImportError:
module.fail_json(msg="fortiosapi module is required")
fos = FortiOSAPI()
login(module.params, fos)
is_error, has_changed, result = fortios_system(module.params, fos)
fos.logout()
if not is_error:
module.exit_json(changed=has_changed, meta=result)
else:
module.fail_json(msg="Error in repo", meta=result)
if __name__ == '__main__':
main()
| [((1133, 13, 1134, 53), 'ansible.module_utils.basic.AnsibleModule', 'AnsibleModule', (), '', False, 'from ansible.module_utils.basic import AnsibleModule\n'), ((1155, 14, 1155, 26), 'fortiosapi.FortiOSAPI', 'FortiOSAPI', ({}, {}), '()', False, 'from fortiosapi import FortiOSAPI\n'), ((1143, 25, 1143, 56), 'ansible.module_utils.connection.Connection', 'Connection', ({(1143, 36, 1143, 55): 'module._socket_path'}, {}), '(module._socket_path)', False, 'from ansible.module_utils.connection import Connection\n'), ((1144, 18, 1144, 44), 'ansible.module_utils.network.fortios.fortios.FortiOSHandler', 'FortiOSHandler', ({(1144, 33, 1144, 43): 'connection'}, {}), '(connection)', False, 'from ansible.module_utils.network.fortios.fortios import FortiOSHandler\n')] |
victorlujan/Dise-odeSoftwarePatrones | src/Puerta.py | b9845cc1c4abdc44867c90b9e9784246e57f16b3 | from ElementoMapa import ElementoMapa
class Puerta (ElementoMapa):
def __init__(self):
self.abierta= True
self.lado2=None
self.lado1=None
def get_abierta(self):
return self.abierta
def print_cosas(self):
print("hola")
def set_abierta(self, value):
self.abierta = value
def get_lado1(self):
return self.lado1
def set_lado1(self, value):
self.lado1 = value
def get_lado2(self):
return self.lado2
def set_lado2(self, value):
self.lado2 = value
def espuerta(self):
return True
def abrir(self):
self.abierta=True
def entrar(self,habitacion):
if self.abierta==True and (self.lado1.id == habitacion.id or self.lado2.id == habitacion.id):
print("Ahora estas en la habitacion", habitacion.id)
if habitacion.hijos[0] == None:
pass
else:
if habitacion.hijos[0].activa == True:
print("La bomba ha estallado")
if self.abierta==False:
print("La puerta esta cerrada")
| [] |
Teenahshe/ponggame | pong.py | 5e4032753894ce1e1ebeb51841676aac24aa22df | """
# Step 1 - Create the App
# Step 2 - Create the Game
# Step 3 - Build the Game
# Step 4 - Run the App
"""
from kivy.app import App
from kivy.uix.widget import Widget
from kivy.properties import NumericProperty, ReferenceListProperty, ObjectProperty
from kivy.vector import Vector
from kivy.clock import Clock
from random import randint
class PongPaddle(Widget):
score = NumericProperty(0)
def bounce_ball(self, ball):
if self.collide_widget(ball):
ball.velocity_x *= -1
print('hello world')
class PongBall(Widget):
velocity_x = NumericProperty(0)
velocity_y = NumericProperty(0)
velocity = ReferenceListProperty(velocity_x, velocity_y)
# Latest Position of the Ball = Current Velocity + Current Position
def move(self):
self.pos = Vector(*self.velocity) + self.pos
# Update - moving the ball by calling the move function and other stuff
# on touch_down() = When our fingers/mouse touches he screen
# on touch_up() - when we lift our finger off the screen after touching it
# on_touch_move() - when we drag our finger on the screen
class PongGame(Widget):
ball = ObjectProperty(None)
player1 = ObjectProperty(None)
player2 = ObjectProperty(None)
def serve_ball(self):
self.ball.velocity = Vector(4, 0).rotate(randint(0, 360))
def update(self, dt):
self.ball.move()
# Bounce off top and bottom Y
if (self.ball.y < 0) or (self.ball.y > self.height - 50):
self.ball.velocity_y *= -1.1
# Bounce off left and increase th score
if self.ball.x < 0:
self.ball.velocity_x *= -1
self.player1.score += 1
# Bounce off right and increase the score
if self.ball.x > self.width - 50:
self.ball.velocity_x *= -1
self.player2.score += 1
self.player1.bounce_ball(self.ball)
self.player2.bounce_ball(self.ball)
def on_touch_move(self, touch):
if touch.x < self.width / 1 / 4:
self.player1.center_y = touch.y
if touch.x > self.width * 3 / 4:
self.player2.center_y = touch.y
class PongApp(App):
def build(self):
game = PongGame()
game.serve_ball()
Clock.schedule_interval(game.update, 1.0 / 60.0)
return game
PongApp().run()
| [((17, 12, 17, 30), 'kivy.properties.NumericProperty', 'NumericProperty', ({(17, 28, 17, 29): '0'}, {}), '(0)', False, 'from kivy.properties import NumericProperty, ReferenceListProperty, ObjectProperty\n'), ((26, 17, 26, 35), 'kivy.properties.NumericProperty', 'NumericProperty', ({(26, 33, 26, 34): '0'}, {}), '(0)', False, 'from kivy.properties import NumericProperty, ReferenceListProperty, ObjectProperty\n'), ((27, 17, 27, 35), 'kivy.properties.NumericProperty', 'NumericProperty', ({(27, 33, 27, 34): '0'}, {}), '(0)', False, 'from kivy.properties import NumericProperty, ReferenceListProperty, ObjectProperty\n'), ((28, 15, 28, 60), 'kivy.properties.ReferenceListProperty', 'ReferenceListProperty', ({(28, 37, 28, 47): 'velocity_x', (28, 49, 28, 59): 'velocity_y'}, {}), '(velocity_x, velocity_y)', False, 'from kivy.properties import NumericProperty, ReferenceListProperty, ObjectProperty\n'), ((41, 11, 41, 31), 'kivy.properties.ObjectProperty', 'ObjectProperty', ({(41, 26, 41, 30): 'None'}, {}), '(None)', False, 'from kivy.properties import NumericProperty, ReferenceListProperty, ObjectProperty\n'), ((42, 14, 42, 34), 'kivy.properties.ObjectProperty', 'ObjectProperty', ({(42, 29, 42, 33): 'None'}, {}), '(None)', False, 'from kivy.properties import NumericProperty, ReferenceListProperty, ObjectProperty\n'), ((43, 14, 43, 34), 'kivy.properties.ObjectProperty', 'ObjectProperty', ({(43, 29, 43, 33): 'None'}, {}), '(None)', False, 'from kivy.properties import NumericProperty, ReferenceListProperty, ObjectProperty\n'), ((80, 8, 80, 56), 'kivy.clock.Clock.schedule_interval', 'Clock.schedule_interval', ({(80, 32, 80, 43): 'game.update', (80, 45, 80, 55): '(1.0 / 60.0)'}, {}), '(game.update, 1.0 / 60.0)', False, 'from kivy.clock import Clock\n'), ((32, 19, 32, 41), 'kivy.vector.Vector', 'Vector', ({(32, 26, 32, 40): '*self.velocity'}, {}), '(*self.velocity)', False, 'from kivy.vector import Vector\n'), ((46, 49, 46, 64), 'random.randint', 'randint', ({(46, 57, 46, 58): '0', (46, 60, 46, 63): '360'}, {}), '(0, 360)', False, 'from random import randint\n'), ((46, 29, 46, 41), 'kivy.vector.Vector', 'Vector', ({(46, 36, 46, 37): '4', (46, 39, 46, 40): '0'}, {}), '(4, 0)', False, 'from kivy.vector import Vector\n')] |
cyclone923/blocks-world | get_block_data/relation.py | 808127e6b4fde2a9cb499cf6934db7ff73e2f534 | class SceneRelation:
def __init__(self):
self.on_ground = set()
self.on_block = {}
self.clear = set()
def print_relation(self):
print(self.on_ground)
print(self.on_block)
print(self.clear) | [] |
EricZLou/BridgeRLAgent | bridge_RL_agent_v16.py | 78329eec5fcf320d2850f44dc33b138919fba82d | """
CS 238 Final Project: Bridge RL Agent
Eric Lou & Kimberly Tran
"""
import copy
import datetime
import numpy as np
import random
from collections import namedtuple
"""'''''''''''''''''''''''''''''''''''''''''''''''''''''''''
REPRESENTATIONS OF BRIDGE
Representing a "Card" as an integer:
Cards 0 -> 12 are Club 2 -> Club 14
Cards 13 -> 25 are Diamond 2 -> Diamond 14
Cards 26 -> 38 are Heart 2 -> Heart 14
Cards 39 -> 51 are Spade 2 -> Spade 14
Jack is 11
Queen is 12
King is 13
Ace is 14
Representing a "Suit" as an integer:
n/a is -1 <-- used in a "State" where no cards have been played yet.
Clubs is 0
Diamonds is 1
Hearts is 2
Spades is 3
Representing a "State" as an opening suit and frozenset of up to 3 "Card"-s:
state = State(1, frozenset(23, 0))
We have a Diamond 12 and Club 2 with an opening suit of Diamonds.
The agent is 3rd to play a card and must play a Diamond if it has one.
Representing the MDP with a Map from a "State" to an array of length-52:
We call this Map "weights". And the array of length-52 represets the
proportion with which the agent should play each of the 52 cards given
that it is at that state.
In this example, with state = (1, set(23, 0)), weights[state] will
likely have very large values at indices 24 and 25 since a
Diamond 13 and Diamond 14 will beat the Diamond 12.
'''''''''''''''''''''''''''''''''''''''''''''''''''''''''"""
State = namedtuple('State', ['opening_suit', 'cards_played', 'partners_card'])
"""'''''''''''''''''''''''''''''''''''''''''''''''''''''''''
"
" DEFINE SOME CONSTANTS
"
'''''''''''''''''''''''''''''''''''''''''''''''''''''''''"""
NUM_ACTIONS = 52 # Agent can choose any card to play (only some are valid).
NUM_GAMES_TRAIN = 10000
NUM_GAMES_TEST = 10000
STATS_PER = 1000
"""'''''''''''''''''''''''''''''''''''''''''''''''''''''''''
"
" RL AGENT
"
'''''''''''''''''''''''''''''''''''''''''''''''''''''''''"""
class BridgeAgent:
def __init__(self):
# We initialize all weights to 1 such that every card has an equal chance of being chosen.
self.weights = {}
self.weights[State(-1, frozenset(), -1)] = np.full(NUM_ACTIONS, 1.0)
for opening_suit in range(4):
for card_1 in range(52):
for card_2 in range(card_1, 52):
for card_3 in range(card_2, 52):
for card_partner in [-1, card_1, card_2, card_3]:
state = State(
opening_suit,
frozenset([card_1, card_2, card_3]),
card_partner)
self.weights[state] = np.full(NUM_ACTIONS, 1.0)
# self.alpha = 0.997 # 1,000
# self.alpha = 0.9995 # 10,000
# self.alpha = 0.99995 # 100,000
self.alpha = 0.999995 # 1,000,000
# self.alpha = 0.9999995 # 5,000,000
self.game_num = 1
"""
EXAMPLE
state = State(1, set(23, 0)) # Diamond 12, Club 2 <-- first 2 cards in round
card_played = 24 # Diamond 13 <-- 3rd card in round
If 4th card is not 25, then the agent wins. We want to incrase the proportion
with which we play 24.
ba.add_win(state, card_played)
"""
def add_win(self, state, card_played):
self.weights[state][card_played] *= (1 + 0.1 * self.alpha ** self.game_num)
"""
EXAMPLE
state = State(1, set(23, 0))
card_played = 24
If 4th card is 25 (Diamond 14), then the agent loses. We want to decrease the
proportion with which we play 24.
ba.add_loss(state, card_played)
"""
def add_loss(self, state, card_played):
self.weights[state][card_played] /= (1 + 0.1 * self.alpha ** self.game_num)
"""
EXAMPLE
state = State(1, set(23, 0))
cards_in_hand = set(0, 1, 4, 8, 11, 20, 24, 38)
The agent choose to play whichever remaining card has the highest weight.
The agent must play a Diamond if it has Diamonds. In this example, agent
will most likely play 24, which beats 23 <-- hopefully 24 has the highest
weight.
card_played = ba.play_card(state, cards_in_hand)
"""
def play_card(self, state, cards_in_hand):
# Following the EXAMPLE:
# suit = 1
suit = state.opening_suit
# valid_cards = [20, 24]
valid_cards = np.array([i for i in range(suit * 13, (suit + 1) * 13) if i in cards_in_hand])
if len(valid_cards) == 0:
valid_cards = cards_in_hand
# Choose the valid card with highest weight.
# index_into_valid_counts = 1 since 20 has a smaller weight than 24.
# index_into_valid_cards = np.argmax(self.weights[state][valid_cards])
index_into_valid_cards = np.random.choice(np.flatnonzero(self.weights[state][valid_cards] == self.weights[state][valid_cards].max()))
# returns valid_cards[1] = 24
return valid_cards[index_into_valid_cards]
"""
This function write the policy at the end of the data training phase.
"""
def write_policy(self, cards_in_hand, policy, filename, states_accessed):
count = 0
with open(filename + "_Last_Game.txt", 'w') as g:
g.write("Cards in Hand: {}\n\n".format(cards_in_hand))
with open(filename + ".txt", 'w') as f:
for state in self.weights:
f.write("State: suit {} | cards played {} | partner's card {}\nBest Card To Play: {}\n\n".format(state.opening_suit,
state.cards_played, state.partners_card,
policy[count]))
if state in states_accessed:
g.write("State: suit {} | cards played {} | partner's card {}\nBest Card To Play: {}\n\n".format(state.opening_suit,
state.cards_played, state.partners_card,
policy[count]))
count += 1
"""'''''''''''''''''''''''''''''''''''''''''''''''''''''''''
"
" UTILITY FUNCTIONS
"
'''''''''''''''''''''''''''''''''''''''''''''''''''''''''"""
"""
This functions deals random cards.
"""
deck = list(range(52))
def shuffle_cards():
random.shuffle(deck)
return [deck[0:13], deck[13:26], deck[26:39], deck[39:52]]
"""
This function is used by non-agents who play randomly.
"""
def play_random_card(suit, cards_in_hand):
if suit == -1:
return random.choice(cards_in_hand)
valid_cards = [i for i in range(suit * 13, (suit + 1) * 13) if i in cards_in_hand]
if len(valid_cards) == 0:
return random.choice(cards_in_hand)
return random.choice(valid_cards)
"""
This function determines the winner of the round.
"""
def determine_round_winner(suit, cards_played):
max_idx = -1
max_val = -1
for idx, card in enumerate(cards_played):
if suit * 13 <= card < (suit + 1) * 13 and card > max_val:
max_val, max_idx = card, idx
return max_idx
"""
This function determines the declarer based on partnership with the most points.
Return: (agent_is_declarer, declarer_idx)
"""
def agent_declarer(hands):
points = count_points(hands) # determines the number of points in each hand
# agent's partnership has more points and agent is declarer
if points[0] + points[2] > points[1] + points[3] and points[2] > points[0]:
return True, 2
# agent is not declarer and agent should start the play
return False, -1
"""
This function counts the points in each hand.
Note: Ace is 12, 25, 38, 51
"""
def count_points(hands):
points = []
for hand in hands:
p = 0
for card in hand:
if card % 13 == 12:
p += 4
elif card % 13 == 11:
p += 3
elif card % 13 == 10:
p += 2
elif card % 13 == 9:
p += 1
points.append(p)
return points
"""'''''''''''''''''''''''''''''''''''''''''''''''''''''''''
"
" TRACKS PERFORMANCE OF BRIDGE AGENT
"
'''''''''''''''''''''''''''''''''''''''''''''''''''''''''"""
class BridgeAgentRedFlags:
def __init__(self):
self.RED_FLAG_VIOLATIONS = np.zeros(3)
self.RED_FLAG_TOTAL_COUNT = np.zeros(3)
self.ALL_RED_FLAG_VIOLATIONS = np.zeros(3) # Cumulative
self.ALL_RED_FLAG_TOTAL_COUNT = np.zeros(3) # Cumulative
def clear_red_flags(self):
self.RED_FLAG_VIOLATIONS = np.zeros(3)
self.RED_FLAG_TOTAL_COUNT = np.zeros(3)
"""
This function checks if the agent plays their highest card even though the
highest card already played is higher than theirs.
"""
def highest_card(self, valid_cards, agent_valid_cards, card):
if len(agent_valid_cards) > 1 and max(valid_cards) > max(agent_valid_cards):
self.RED_FLAG_TOTAL_COUNT[0] += 1
self.ALL_RED_FLAG_TOTAL_COUNT[0] += 1
if card == max(agent_valid_cards):
self.RED_FLAG_VIOLATIONS[0] += 1
self.ALL_RED_FLAG_VIOLATIONS[0] += 1
"""
This function checks if the agent wins a round when there's three cards played already
and the agent has at least one higher card than what's been played.
"""
def higher_card(self, valid_cards, agent_valid_cards, card, cards_played, partners_cards):
if (len(cards_played) == 3 and len(agent_valid_cards) > 1 and
max(agent_valid_cards) > max(valid_cards) and
max(valid_cards) not in partners_cards
):
self.RED_FLAG_TOTAL_COUNT[1] += 1
self.ALL_RED_FLAG_TOTAL_COUNT[1] += 1
if card < max(valid_cards):
self.RED_FLAG_VIOLATIONS[1] += 1
self.ALL_RED_FLAG_VIOLATIONS[1] += 1
"""
This function checks if the agent plays a higher card even though their partner is guaranteed to win.
"""
def partner_win(self, valid_cards, agent_valid_cards, card, cards_played, partners_cards):
if (len(cards_played) == 3 and len(agent_valid_cards) > 1 and
max(valid_cards) in partners_cards
):
self.RED_FLAG_TOTAL_COUNT[2] += 1
self.ALL_RED_FLAG_TOTAL_COUNT[2] += 1
if card > max(valid_cards):
self.RED_FLAG_VIOLATIONS[2] += 1
self.ALL_RED_FLAG_VIOLATIONS[2] += 1
"""
This function checks for any red flags based on what the agent played.
"""
def assess_card_played(self, hands, card, suit, cards_played, player_idx, partners_cards):
all_valid_cards = list(range(suit * 13, (suit + 1) * 13))
valid_cards = np.array([i for i in all_valid_cards if i in cards_played])
agent_valid_cards = np.array([i for i in all_valid_cards if i in hands[player_idx]])
if suit == -1:
return
# highest card played so far is higher than agent's highest card
self.highest_card(valid_cards, agent_valid_cards, card)
# 3 cards played and agent has higher cards, does it play highest card or highest necessary card?
self.higher_card(valid_cards, agent_valid_cards, card, cards_played, partners_cards)
# 3 cards played + partner has played highest card, does agent play lowest card? do they beat their partner?
self.partner_win(valid_cards, agent_valid_cards, card, cards_played, partners_cards)
"""'''''''''''''''''''''''''''''''''''''''''''''''''''''''''
"
" PLAY A SINGLE GAME OF BRIDGE
"
'''''''''''''''''''''''''''''''''''''''''''''''''''''''''"""
"""
This function plays 13 rounds of 1 NT bridge and outputs a winner.
"""
def play_game(game, hands, train=False, ba=None, barf=None):
partners_cards = copy.copy(hands[0])
agents_cards = copy.copy(hands[2])
declarer, d = agent_declarer(hands)
"""
hands[0] = North's cards
hands[1] = East's cards
hands[2] = Agent's cards
hands[3] = West's cards
"""
round_winner = (d + 1) % 4 # the person to the right of the declarer starts the game
NS_Wins = 0 # used to count total wins in agent partnership
states_accessed = [] # records which states have been updated for this game
# For each round
for _ in range(13):
cards_played = []
agent_card_played = [-1, -1]
agent_state = None
agent_state_2 = None
opening_suit = -1
# Each player plays a card in order starting from round_winner
for player in range(4):
card = None
player_idx = (round_winner + player) % 4
if player_idx == 2: # Agent plays
if ba:
agent_state = State(opening_suit, frozenset(cards_played), agent_card_played[1])
states_accessed.append(agent_state)
card = ba.play_card(agent_state, hands[player_idx])
else:
card = play_random_card(opening_suit, hands[player_idx])
agent_card_played[0] = card
barf.assess_card_played(hands, card, opening_suit, cards_played, player_idx, partners_cards)
elif player_idx == 0: # if agent is declarer, they play their partner's cards
if ba and declarer:
agent_state_2 = State(opening_suit, frozenset(cards_played), agent_card_played[0])
states_accessed.append(agent_state_2)
card = ba.play_card(agent_state_2, hands[player_idx])
barf.assess_card_played(hands, card, opening_suit, cards_played, player_idx, partners_cards)
else:
card = play_random_card(opening_suit, hands[player_idx])
agent_card_played[1] = card
else: # Random bot plays
card = play_random_card(opening_suit, hands[player_idx])
# Keep track of the opening suit.
if player == 0:
opening_suit = card // 13
hands[player_idx].remove(card)
cards_played.append(card)
# Get the winning card.
round_winner = (determine_round_winner(opening_suit, cards_played) + round_winner) % 4
# Adjust the BridgeAgent weights.
# If the BridgeAgent or N wins.
if round_winner == 0 or round_winner == 2:
if ba and train:
ba.add_win(agent_state, agent_card_played[0])
if declarer:
ba.add_win(agent_state_2, agent_card_played[1])
NS_Wins += 1
else:
if ba and train:
ba.add_loss(agent_state, agent_card_played[0])
if declarer:
ba.add_loss(agent_state_2, agent_card_played[1])
# for the last game, determine and write out policy
if ba and game == (NUM_GAMES_TRAIN - 1):
policy = []
count = 0
for x in ba.weights:
y = copy.deepcopy(ba.weights[x])
max = np.argmax(y)
while max in x.cards_played:
y[max] = -1
max = np.argmax(y)
policy.append(max)
count += 1
game_file = "Bridge_" + str(game + 1)
ba.write_policy(agents_cards, policy, game_file, states_accessed)
return NS_Wins
def game_summary(ba, t, iterations=NUM_GAMES_TRAIN):
with open(str(NUM_GAMES_TRAIN) + "_Game_Data_Train-" + str(t) + ".csv", 'w') as k:
k.write("game,"
"agent_wins,random_wins,diff_wins,"
"agent_rfv_a,agent_rftc_a,"
"agent_rfv_b,agent_rftc_b,"
"agent_rfv_c,agent_rftc_c,"
"random_rfv_a,random_rftc_a,"
"random_rfv_b,random_rftc_b,"
"random_rfv_c,random_rftc_c\n")
barf = BridgeAgentRedFlags()
barf_random = BridgeAgentRedFlags()
NS_Wins = [0]
NS_Wins_random = [0]
for game in range(iterations):
hands = shuffle_cards()
NS_Wins[-1] += play_game(game=game, hands=copy.deepcopy(hands), train=True, ba=ba, barf=barf)
NS_Wins_random[-1] += play_game(game=game, hands=hands, ba=None, barf=barf_random)
ba.game_num += 1
if (game + 1) % STATS_PER == 0:
print(f"{game + 1} / ", end="", flush=True)
rfv = barf.RED_FLAG_VIOLATIONS
rfv_random = barf_random.RED_FLAG_VIOLATIONS
rftc = barf.RED_FLAG_TOTAL_COUNT
rftc_random = barf_random.RED_FLAG_TOTAL_COUNT
with open(str(NUM_GAMES_TRAIN) + "_Game_Data_Train-" + str(t) + ".csv", 'a') as k:
k.write(
f"{game + 1},"
f"{NS_Wins[-1]},{NS_Wins_random[-1]},{NS_Wins[-1] - NS_Wins_random[-1]},"
f"{rfv[0]},{rftc[0]},"
f"{rfv[1]},{rftc[1]},"
f"{rfv[2]},{rftc[2]},"
f"{rfv_random[0]},{rftc_random[0]},"
f"{rfv_random[1]},{rftc_random[1]},"
f"{rfv_random[2]},{rftc_random[2]},"
f"\n")
# Cumulative statistics on red flags for every STATS_PER games.
barf.clear_red_flags()
barf_random.clear_red_flags()
NS_Wins.append(0)
NS_Wins_random.append(0)
average_win_delta = (sum(NS_Wins)-sum(NS_Wins_random)) / ((len(NS_Wins) - 1) * STATS_PER)
average_rf_ratios_agent = np.divide(barf.ALL_RED_FLAG_VIOLATIONS, barf.ALL_RED_FLAG_TOTAL_COUNT)
average_rf_ratios_random = np.divide(barf_random.ALL_RED_FLAG_VIOLATIONS, barf_random.ALL_RED_FLAG_TOTAL_COUNT)
print(f"Average Win Delta (want this to be positive): {average_win_delta}")
print(f"Average Red Flag Ratios - Agent: {average_rf_ratios_agent}")
print(f"Average Red Flag Ratios - Random: {average_rf_ratios_random}")
with open(str(NUM_GAMES_TRAIN) + "_Game_Data_Avg_Train-" + str(t) + ".csv", 'w') as m:
m.write(f"avg_win_delta,avg_rf_agent,avg_rf_random\n"
f"{average_win_delta},{average_rf_ratios_agent},{average_rf_ratios_random}\n")
return ba
def main():
start_time = datetime.datetime.now()
hands = []
# TRAINING
print(f"TRAINING on {NUM_GAMES_TRAIN} games")
ba = BridgeAgent()
ba = game_summary(ba, True)
# TESTING -- we don't change the weights here
print(f"TESTING on {NUM_GAMES_TEST} games")
game_summary(ba, False, iterations=NUM_GAMES_TEST)
end_time = datetime.datetime.now()
print("Runtime: ", end_time - start_time) # runtime
if __name__ == "__main__":
main()
| [((47, 8, 47, 78), 'collections.namedtuple', 'namedtuple', ({(47, 19, 47, 26): '"""State"""', (47, 28, 47, 77): "['opening_suit', 'cards_played', 'partners_card']"}, {}), "('State', ['opening_suit', 'cards_played', 'partners_card'])", False, 'from collections import namedtuple\n'), ((175, 4, 175, 24), 'random.shuffle', 'random.shuffle', ({(175, 19, 175, 23): 'deck'}, {}), '(deck)', False, 'import random\n'), ((190, 11, 190, 37), 'random.choice', 'random.choice', ({(190, 25, 190, 36): 'valid_cards'}, {}), '(valid_cards)', False, 'import random\n'), ((322, 21, 322, 40), 'copy.copy', 'copy.copy', ({(322, 31, 322, 39): 'hands[0]'}, {}), '(hands[0])', False, 'import copy\n'), ((323, 19, 323, 38), 'copy.copy', 'copy.copy', ({(323, 29, 323, 37): 'hands[2]'}, {}), '(hands[2])', False, 'import copy\n'), ((461, 30, 461, 100), 'numpy.divide', 'np.divide', ({(461, 40, 461, 68): 'barf.ALL_RED_FLAG_VIOLATIONS', (461, 70, 461, 99): 'barf.ALL_RED_FLAG_TOTAL_COUNT'}, {}), '(barf.ALL_RED_FLAG_VIOLATIONS, barf.ALL_RED_FLAG_TOTAL_COUNT)', True, 'import numpy as np\n'), ((462, 31, 462, 115), 'numpy.divide', 'np.divide', ({(462, 41, 462, 76): 'barf_random.ALL_RED_FLAG_VIOLATIONS', (462, 78, 462, 114): 'barf_random.ALL_RED_FLAG_TOTAL_COUNT'}, {}), '(barf_random.ALL_RED_FLAG_VIOLATIONS, barf_random.\n ALL_RED_FLAG_TOTAL_COUNT)', True, 'import numpy as np\n'), ((474, 17, 474, 40), 'datetime.datetime.now', 'datetime.datetime.now', ({}, {}), '()', False, 'import datetime\n'), ((488, 15, 488, 38), 'datetime.datetime.now', 'datetime.datetime.now', ({}, {}), '()', False, 'import datetime\n'), ((71, 51, 71, 76), 'numpy.full', 'np.full', ({(71, 59, 71, 70): 'NUM_ACTIONS', (71, 72, 71, 75): '1.0'}, {}), '(NUM_ACTIONS, 1.0)', True, 'import numpy as np\n'), ((183, 15, 183, 43), 'random.choice', 'random.choice', ({(183, 29, 183, 42): 'cards_in_hand'}, {}), '(cards_in_hand)', False, 'import random\n'), ((188, 15, 188, 43), 'random.choice', 'random.choice', ({(188, 29, 188, 42): 'cards_in_hand'}, {}), '(cards_in_hand)', False, 'import random\n'), ((245, 35, 245, 46), 'numpy.zeros', 'np.zeros', ({(245, 44, 245, 45): '3'}, {}), '(3)', True, 'import numpy as np\n'), ((246, 36, 246, 47), 'numpy.zeros', 'np.zeros', ({(246, 45, 246, 46): '3'}, {}), '(3)', True, 'import numpy as np\n'), ((247, 39, 247, 50), 'numpy.zeros', 'np.zeros', ({(247, 48, 247, 49): '3'}, {}), '(3)', True, 'import numpy as np\n'), ((248, 40, 248, 51), 'numpy.zeros', 'np.zeros', ({(248, 49, 248, 50): '3'}, {}), '(3)', True, 'import numpy as np\n'), ((251, 35, 251, 46), 'numpy.zeros', 'np.zeros', ({(251, 44, 251, 45): '3'}, {}), '(3)', True, 'import numpy as np\n'), ((252, 36, 252, 47), 'numpy.zeros', 'np.zeros', ({(252, 45, 252, 46): '3'}, {}), '(3)', True, 'import numpy as np\n'), ((299, 22, 299, 81), 'numpy.array', 'np.array', ({(299, 31, 299, 80): '[i for i in all_valid_cards if i in cards_played]'}, {}), '([i for i in all_valid_cards if i in cards_played])', True, 'import numpy as np\n'), ((300, 28, 300, 92), 'numpy.array', 'np.array', ({(300, 37, 300, 91): '[i for i in all_valid_cards if i in hands[player_idx]]'}, {}), '([i for i in all_valid_cards if i in hands[player_idx]])', True, 'import numpy as np\n'), ((399, 16, 399, 44), 'copy.deepcopy', 'copy.deepcopy', ({(399, 30, 399, 43): 'ba.weights[x]'}, {}), '(ba.weights[x])', False, 'import copy\n'), ((400, 18, 400, 30), 'numpy.argmax', 'np.argmax', ({(400, 28, 400, 29): 'y'}, {}), '(y)', True, 'import numpy as np\n'), ((403, 22, 403, 34), 'numpy.argmax', 'np.argmax', ({(403, 32, 403, 33): 'y'}, {}), '(y)', True, 'import numpy as np\n'), ((431, 50, 431, 70), 'copy.deepcopy', 'copy.deepcopy', ({(431, 64, 431, 69): 'hands'}, {}), '(hands)', False, 'import copy\n'), ((81, 50, 81, 75), 'numpy.full', 'np.full', ({(81, 58, 81, 69): 'NUM_ACTIONS', (81, 71, 81, 74): '1.0'}, {}), '(NUM_ACTIONS, 1.0)', True, 'import numpy as np\n')] |
chbonkie/hacs | tests/hacsbase/test_hacsbase_data.py | 81db513a0d3d1af1acf25da7b706ae62d8fdb6fa | """Data Test Suite."""
from aiogithubapi.objects import repository
import pytest
import os
from homeassistant.core import HomeAssistant
from custom_components.hacs.hacsbase.data import HacsData
from custom_components.hacs.helpers.classes.repository import HacsRepository
from custom_components.hacs.hacsbase.configuration import Configuration
from custom_components.hacs.share import get_hacs
from tests.dummy_repository import dummy_repository_base
@pytest.mark.asyncio
async def test_hacs_data_async_write1(tmpdir):
data = HacsData()
hacs = get_hacs()
repository = dummy_repository_base()
repository.data.installed = True
repository.data.installed_version = "1"
hacs.repositories = [repository]
hacs.hass = HomeAssistant()
hacs.hass.config.config_dir = tmpdir
hacs.configuration = Configuration()
await data.async_write()
@pytest.mark.asyncio
async def test_hacs_data_async_write2(tmpdir):
data = HacsData()
hacs = get_hacs()
hacs.hass = HomeAssistant()
hacs.hass.config.config_dir = tmpdir
hacs.configuration = Configuration()
hacs.system.status.background_task = False
hacs.system.disabled = False
await data.async_write()
@pytest.mark.asyncio
async def test_hacs_data_restore(tmpdir):
data = HacsData()
hacs = get_hacs()
hacs.hass = HomeAssistant()
hacs.hass.config.config_dir = tmpdir
await data.restore()
| [((16, 11, 16, 21), 'custom_components.hacs.hacsbase.data.HacsData', 'HacsData', ({}, {}), '()', False, 'from custom_components.hacs.hacsbase.data import HacsData\n'), ((17, 11, 17, 21), 'custom_components.hacs.share.get_hacs', 'get_hacs', ({}, {}), '()', False, 'from custom_components.hacs.share import get_hacs\n'), ((18, 17, 18, 40), 'tests.dummy_repository.dummy_repository_base', 'dummy_repository_base', ({}, {}), '()', False, 'from tests.dummy_repository import dummy_repository_base\n'), ((22, 16, 22, 31), 'homeassistant.core.HomeAssistant', 'HomeAssistant', ({}, {}), '()', False, 'from homeassistant.core import HomeAssistant\n'), ((24, 25, 24, 40), 'custom_components.hacs.hacsbase.configuration.Configuration', 'Configuration', ({}, {}), '()', False, 'from custom_components.hacs.hacsbase.configuration import Configuration\n'), ((30, 11, 30, 21), 'custom_components.hacs.hacsbase.data.HacsData', 'HacsData', ({}, {}), '()', False, 'from custom_components.hacs.hacsbase.data import HacsData\n'), ((31, 11, 31, 21), 'custom_components.hacs.share.get_hacs', 'get_hacs', ({}, {}), '()', False, 'from custom_components.hacs.share import get_hacs\n'), ((32, 16, 32, 31), 'homeassistant.core.HomeAssistant', 'HomeAssistant', ({}, {}), '()', False, 'from homeassistant.core import HomeAssistant\n'), ((34, 25, 34, 40), 'custom_components.hacs.hacsbase.configuration.Configuration', 'Configuration', ({}, {}), '()', False, 'from custom_components.hacs.hacsbase.configuration import Configuration\n'), ((42, 11, 42, 21), 'custom_components.hacs.hacsbase.data.HacsData', 'HacsData', ({}, {}), '()', False, 'from custom_components.hacs.hacsbase.data import HacsData\n'), ((43, 11, 43, 21), 'custom_components.hacs.share.get_hacs', 'get_hacs', ({}, {}), '()', False, 'from custom_components.hacs.share import get_hacs\n'), ((44, 16, 44, 31), 'homeassistant.core.HomeAssistant', 'HomeAssistant', ({}, {}), '()', False, 'from homeassistant.core import HomeAssistant\n')] |
marc-gav/PhiFlow | phi/math/backend/_backend.py | b6186fd1503d040997b52d49aa18cd875267c27e | from collections import namedtuple
from contextlib import contextmanager
from threading import Barrier
from typing import List, Callable
import numpy
from ._dtype import DType, combine_types
SolveResult = namedtuple('SolveResult', [
'method', 'x', 'residual', 'iterations', 'function_evaluations', 'converged', 'diverged', 'message',
])
class ComputeDevice:
"""
A physical device that can be selected to perform backend computations.
"""
def __init__(self, backend: 'Backend', name: str, device_type: str, memory: int, processor_count: int, description: str, ref=None):
self.name: str = name
""" Name of the compute device. CPUs are typically called `'CPU'`. """
self.device_type: str = device_type
""" Type of device such as `'CPU'`, `'GPU'` or `'TPU'`. """
self.memory: int = memory
""" Maximum memory of the device that can be allocated (in bytes). -1 for n/a. """
self.processor_count: int = processor_count
""" Number of CPU cores or GPU multiprocessors. -1 for n/a. """
self.description: str = description
""" Further information about the device such as driver version. """
self.ref = ref
""" (Optional) Reference to the internal device representation. """
self.backend: 'Backend' = backend
""" Backend that this device belongs to. Different backends represent the same device with different objects. """
def __repr__(self):
mem = f"{(self.memory / 1024 ** 2)} MB" if self.memory > 0 else "memory: n/a"
pro = f"{self.processor_count} processors" if self.processor_count > 0 else "processors: n/a"
descr = self.description.replace('\n', ' ')
if len(descr) > 30:
descr = descr[:28] + "..."
return f"'{self.name}' ({self.device_type}) | {mem} | {pro} | {descr}"
class Backend:
def __init__(self, name: str, default_device: ComputeDevice):
"""
Backends delegate low-level operations to a compute library or emulate them.
The methods of `Backend` form a comprehensive list of available operations.
To support a compute library, subclass `Backend` and register it by adding it to `BACKENDS`.
Args:
name: Human-readable string
default_device: `ComputeDevice` being used by default
"""
self._name = name
self._default_device = default_device
def __enter__(self):
_DEFAULT.append(self)
def __exit__(self, exc_type, exc_val, exc_tb):
_DEFAULT.pop(-1)
@property
def name(self) -> str:
return self._name
def supports(self, feature: str or Callable) -> bool:
"""
Tests if this backend supports the given feature.
Features correspond to a method of this backend that must be implemented if the feature is supported.
Possible features:
* `sparse_tensor`
* `gradients
Args:
feature: `str` or unbound Backend method, e.g. `Backend.sparse_tensor`
Returns:
Whether the feature is supported.
"""
feature = feature if isinstance(feature, str) else feature.__name__
if not hasattr(Backend, feature):
raise ValueError(f"Not a valid feature: '{feature}'")
backend_fun = getattr(Backend, feature)
impl_fun = getattr(self.__class__, feature)
return impl_fun is not backend_fun
def prefers_channels_last(self) -> bool:
raise NotImplementedError()
@property
def precision(self) -> int:
""" Short for math.backend.get_precision() """
return get_precision()
@property
def float_type(self) -> DType:
return DType(float, self.precision)
@property
def as_registered(self) -> 'Backend':
from phi.math.backend import BACKENDS
for backend in BACKENDS:
if self.name in backend.name:
return backend
raise RuntimeError(f"Backend '{self}' is not visible.")
@property
def complex_type(self) -> DType:
return DType(complex, max(64, self.precision))
def combine_types(self, *dtypes: DType) -> DType:
return combine_types(*dtypes, fp_precision=self.precision)
def auto_cast(self, *tensors) -> list:
"""
Determins the appropriate values type resulting from operations involving the tensors as input.
This method is called by the default implementations of basic operators.
Backends can override this method to prevent unnecessary casting.
Args:
*tensors: tensors to cast and to consider when determining the common data type
Returns:
tensors cast to a common data type
"""
dtypes = [self.dtype(t) for t in tensors]
result_type = self.combine_types(*dtypes)
if result_type.kind in (int, float, complex, bool):
tensors = [self.cast(t, result_type) for t in tensors]
return tensors
def __str__(self):
return self.name
def __repr__(self):
return self.name
def list_devices(self, device_type: str or None = None) -> List[ComputeDevice]:
"""
Fetches information about all available compute devices this backend can use.
Implementations:
* NumPy: [`os.cpu_count`](https://docs.python.org/3/library/os.html#os.cpu_count)
* PyTorch: [`torch.cuda.get_device_properties`](https://pytorch.org/docs/stable/cuda.html#torch.cuda.get_device_properties)
* TensorFlow: `tensorflow.python.client.device_lib.list_local_devices`
* Jax: [`jax.devices`](https://jax.readthedocs.io/en/latest/jax.html#jax.devices)
Args:
device_type: (optional) Return only devices of this type, e.g. `'GPU'` or `'CPU'`. See `ComputeDevice.device_type`.
Returns:
`list` of all currently available devices.
"""
raise NotImplementedError()
def get_default_device(self) -> ComputeDevice:
return self._default_device
def set_default_device(self, device: ComputeDevice or str):
if isinstance(device, str):
devices = self.list_devices(device)
assert len(devices) >= 1, f"{self.name}: Cannot select '{device} because no device of this type is available."
device = devices[0]
self._default_device = device
def seed(self, seed: int):
raise NotImplementedError()
def is_tensor(self, x, only_native=False):
"""
An object is considered a native tensor by a backend if no internal conversion is required by backend methods.
An object is considered a tensor (nativer or otherwise) by a backend if it is not a struct (e.g. tuple, list) and all methods of the backend accept it as a tensor argument.
Args:
x: object to check
only_native: If True, only accepts true native tensor representations, not Python numbers or others that are also supported as tensors (Default value = False)
Returns:
bool: whether `x` is considered a tensor by this backend
"""
raise NotImplementedError()
def as_tensor(self, x, convert_external=True):
"""
Converts a tensor-like object to the native tensor representation of this backend.
If x is a native tensor of this backend, it is returned without modification.
If x is a Python number (numbers.Number instance), `convert_numbers` decides whether to convert it unless the backend cannot handle Python numbers.
*Note:* There may be objects that are considered tensors by this backend but are not native and thus, will be converted by this method.
Args:
x: tensor-like, e.g. list, tuple, Python number, tensor
convert_external: if False and `x` is a Python number that is understood by this backend, this method returns the number as-is. This can help prevent type clashes like int32 vs int64. (Default value = True)
Returns:
tensor representation of `x`
"""
raise NotImplementedError()
def is_available(self, tensor) -> bool:
"""
Tests if the value of the tensor is known and can be read at this point.
If true, `numpy(tensor)` must return a valid NumPy representation of the value.
Tensors are typically available when the backend operates in eager mode.
Args:
tensor: backend-compatible tensor
Returns:
bool
"""
raise NotImplementedError()
def numpy(self, tensor) -> numpy.ndarray:
"""
Returns a NumPy representation of the given tensor.
If `tensor` is already a NumPy array, it is returned without modification.
This method raises an error if the value of the tensor is not known at this point, e.g. because it represents a node in a graph.
Use `is_available(tensor)` to check if the value can be represented as a NumPy array.
Args:
tensor: backend-compatible tensor
Returns:
NumPy representation of the values stored in the tensor
"""
raise NotImplementedError()
def to_dlpack(self, tensor):
raise NotImplementedError()
def from_dlpack(self, capsule):
raise NotImplementedError()
def copy(self, tensor, only_mutable=False):
raise NotImplementedError()
def call(self, f: Callable, *args, name=None):
"""
Calls `f(*args)` and returns the result.
This method may be used to register internal calls with the profiler.
Usage:
choose_backend(key).call(custom_function, *args)
"""
return f(*args)
def block_until_ready(self, values):
pass
def jit_compile(self, f: Callable) -> Callable:
return NotImplemented
def functional_gradient(self, f, wrt: tuple or list, get_output: bool):
raise NotImplementedError(self)
def custom_gradient(self, f: Callable, gradient: Callable) -> Callable:
"""
Creates a function based on `f` that uses a custom gradient for backprop.
Args:
f: Forward function.
gradient: Function for backprop. Will be called as `gradient(*d_out)` to compute the gradient of `f`.
Returns:
Function with similar signature and return values as `f`. However, the returned function does not support keyword arguments.
"""
return NotImplemented
def jit_compile_grad(self, f, wrt: tuple or list, get_output: bool):
raise NotImplementedError()
def transpose(self, tensor, axes):
raise NotImplementedError()
def random_uniform(self, shape):
""" Float tensor of selected precision containing random values in the range [0, 1) """
raise NotImplementedError(self)
def random_normal(self, shape):
""" Float tensor of selected precision containing random values sampled from a normal distribution with mean 0 and std 1. """
raise NotImplementedError(self)
def stack(self, values, axis=0):
raise NotImplementedError(self)
def concat(self, values, axis):
raise NotImplementedError(self)
def pad(self, value, pad_width, mode: str = 'constant', constant_values=0):
"""
Pad a tensor with values as specified by `mode` and `constant_values`.
If the mode is not supported, returns NotImplemented.
Args:
value: tensor
pad_width: 2D tensor specifying the number of values padded to the edges of each axis in the form [[axis 0 lower, axis 0 upper], ...] including batch and component axes.
mode: constant', 'boundary', 'periodic', 'symmetric', 'reflect'
constant_values: used for out-of-bounds points if mode='constant' (Default value = 0)
mode: str: (Default value = 'constant')
Returns:
padded tensor or NotImplemented
"""
raise NotImplementedError(self)
def reshape(self, value, shape):
raise NotImplementedError(self)
def flip(self, value, axes: tuple or list):
slices = tuple(slice(None, None, -1 if i in axes else None) for i in range(self.ndims(value)))
return value[slices]
def sum(self, value, axis=None, keepdims=False):
raise NotImplementedError(self)
def prod(self, value, axis=None):
raise NotImplementedError(self)
def divide_no_nan(self, x, y):
"""
Computes x/y but returns 0 if y=0.
Args:
x:
y:
Returns:
"""
raise NotImplementedError(self)
def where(self, condition, x=None, y=None):
raise NotImplementedError(self)
def nonzero(self, values):
"""
Args:
values: Tensor with only spatial dimensions
Returns:
non-zero multi-indices as tensor of shape (nnz, vector)
"""
raise NotImplementedError(self)
def mean(self, value, axis=None, keepdims=False):
raise NotImplementedError(self)
def range(self, start, limit=None, delta=1, dtype: DType = DType(int, 32)):
raise NotImplementedError(self)
def zeros(self, shape, dtype: DType = None):
raise NotImplementedError(self)
def zeros_like(self, tensor):
raise NotImplementedError(self)
def ones(self, shape, dtype: DType = None):
raise NotImplementedError(self)
def ones_like(self, tensor):
raise NotImplementedError(self)
def meshgrid(self, *coordinates):
raise NotImplementedError(self)
def linspace(self, start, stop, number):
raise NotImplementedError(self)
def tensordot(self, a, a_axes: tuple or list, b, b_axes: tuple or list):
""" Multiply-sum-reduce a_axes of a with b_axes of b. """
raise NotImplementedError(self)
def matmul(self, A, b):
raise NotImplementedError(self)
def einsum(self, equation, *tensors):
raise NotImplementedError(self)
def while_loop(self, loop: Callable, values: tuple):
"""
```python
while any(values[0]):
values = loop(*values)
return values
```
This operation does not support backpropagation.
Args:
loop: Loop function, must return a `tuple` with entries equal to `values` in shape and data type.
values: Initial values of loop variables.
Returns:
Loop variables upon loop completion.
"""
raise NotImplementedError(self)
def abs(self, x):
raise NotImplementedError(self)
def sign(self, x):
raise NotImplementedError(self)
def round(self, x):
raise NotImplementedError(self)
def ceil(self, x):
raise NotImplementedError(self)
def floor(self, x):
raise NotImplementedError(self)
def max(self, x, axis=None, keepdims=False):
raise NotImplementedError(self)
def min(self, x, axis=None, keepdims=False):
raise NotImplementedError(self)
def maximum(self, a, b):
raise NotImplementedError(self)
def minimum(self, a, b):
raise NotImplementedError(self)
def clip(self, x, minimum, maximum):
raise NotImplementedError(self)
def sqrt(self, x):
raise NotImplementedError(self)
def exp(self, x):
raise NotImplementedError(self)
def conv(self, value, kernel, zero_padding=True):
"""
Convolve value with kernel.
Depending on the tensor rank, the convolution is either 1D (rank=3), 2D (rank=4) or 3D (rank=5).
Higher dimensions may not be supported.
Args:
value: tensor of shape (batch_size, in_channel, spatial...)
kernel: tensor of shape (batch_size or 1, out_channel, in_channel, spatial...)
zero_padding: If True, pads the edges of `value` with zeros so that the result has the same shape as `value`.
Returns:
Convolution result as tensor of shape (batch_size, out_channel, spatial...)
"""
raise NotImplementedError(self)
def expand_dims(self, a, axis=0, number=1):
raise NotImplementedError(self)
def shape(self, tensor):
raise NotImplementedError(self)
def staticshape(self, tensor):
raise NotImplementedError(self)
def cast(self, x, dtype: DType):
raise NotImplementedError(self)
def to_float(self, x):
"""
Converts a tensor to floating point values with precision equal to the currently set default precision.
See Also:
`Backend.precision()`.
If `x` is mutable and of the correct floating type, returns a copy of `x`.
To convert float tensors to the backend precision but leave non-float tensors untouched, use `Backend.as_tensor()`.
Args:
x: tensor of bool, int or float
Returns:
Values of `x` as float tensor
"""
return self.cast(x, self.float_type)
def to_int32(self, x):
return self.cast(x, DType(int, 32))
def to_int64(self, x):
return self.cast(x, DType(int, 64))
def to_complex(self, x):
return self.cast(x, DType(complex, max(64, min(self.precision * 2, 128))))
def batched_gather_nd(self, values, indices):
"""
Gathers values from the tensor `values` at locations `indices`.
The first dimension of `values` and `indices` is the batch dimension which must be either equal for both or one for either.
Args:
values: tensor of shape (batch, spatial..., channel)
indices: int tensor of shape (batch, any..., multi_index) where the size of multi_index is values.rank - 2.
Returns:
Gathered values as tensor of shape (batch, any..., channel)
"""
raise NotImplementedError(self)
def flatten(self, x):
return self.reshape(x, (-1,))
def std(self, x, axis=None, keepdims=False):
raise NotImplementedError(self)
def boolean_mask(self, x, mask, axis=0):
"""
Args:
x: tensor with any number of dimensions
mask: 1D mask tensor
axis: Axis index >= 0
"""
raise NotImplementedError(self)
def isfinite(self, x):
raise NotImplementedError(self)
def scatter(self, base_grid, indices, values, mode: str):
"""
Depending on `mode`, performs scatter_update or scatter_add.
Args:
base_grid: Tensor into which scatter values are inserted at indices. Tensor of shape (batch_size, spatial..., channels)
indices: Tensor of shape (batch_size or 1, update_count, index_vector)
values: Values to scatter at indices. Tensor of shape (batch_size or 1, update_count or 1, channels or 1)
mode: One of ('update', 'add')
Returns:
Copy of base_grid with values at `indices` updated by `values`.
"""
raise NotImplementedError(self)
def any(self, boolean_tensor, axis=None, keepdims=False):
raise NotImplementedError(self)
def all(self, boolean_tensor, axis=None, keepdims=False):
raise NotImplementedError(self)
def fft(self, x):
"""
Computes the n-dimensional FFT along all but the first and last dimensions.
Args:
x: tensor of dimension 3 or higher
Returns:
"""
raise NotImplementedError(self)
def ifft(self, k):
"""
Computes the n-dimensional inverse FFT along all but the first and last dimensions.
Args:
k: tensor of dimension 3 or higher
Returns:
"""
raise NotImplementedError(self)
def imag(self, x):
raise NotImplementedError(self)
def real(self, x):
raise NotImplementedError(self)
def sin(self, x):
raise NotImplementedError(self)
def cos(self, x):
raise NotImplementedError(self)
def tan(self, x):
raise NotImplementedError(self)
def log(self, x):
""" Natural logarithm """
raise NotImplementedError(self)
def log2(self, x):
raise NotImplementedError(self)
def log10(self, x):
raise NotImplementedError(self)
def dtype(self, array) -> DType:
raise NotImplementedError(self)
def tile(self, value, multiples):
"""
Repeats the tensor along each axis the number of times given by multiples.
If `multiples` has more dimensions than `value`, these dimensions are added to `value` as outer dimensions.
Args:
value: tensor
multiples: tuple or list of integers
Returns:
tile tensor
"""
raise NotImplementedError(self)
def sparse_tensor(self, indices, values, shape):
"""
Optional features.
Args:
indices: tuple/list matching the dimensions (pair for matrix)
values: param shape:
shape:
Returns:
"""
raise NotImplementedError(self)
def coordinates(self, tensor):
"""
Returns the coordinates and values of a tensor.
Args:
tensor: Sparse tensor
Returns:
coordinates: `tuple` of tensor holding the coordinate vectors, i.e. (row, col) for matrices.
indices: Tensor holding the corresponding values
"""
raise NotImplementedError(self)
def minimize(self, method: str, f, x0, atol, max_iter, trj: bool):
from scipy.optimize import OptimizeResult, minimize
from threading import Thread
assert self.supports(Backend.functional_gradient)
assert len(self.staticshape(x0)) == 2 # (batch, parameters)
batch_size = self.staticshape(x0)[0]
fg = self.functional_gradient(f, [0], get_output=True)
method_description = f"SciPy {method} with {self.name}"
iterations = [0] * batch_size
function_evaluations = [0] * batch_size
xs = [None] * batch_size
final_losses = [None] * batch_size
converged = [False] * batch_size
diverged = [False] * batch_size
messages = [""] * batch_size
f_inputs = [None] * batch_size
f_b_losses = None
f_b_losses_np = None
f_grad_np = None
f_input_available = Barrier(batch_size + 1)
f_output_available = Barrier(batch_size + 1)
finished = [False] * batch_size
all_finished = False
trajectories = [[] for _ in range(batch_size)] if trj else None
threads = []
for b in range(batch_size):
def b_thread(b=b):
recent_b_losses = []
def b_fun(x: numpy.ndarray):
function_evaluations[b] += 1
f_inputs[b] = self.as_tensor(x, convert_external=True)
f_input_available.wait()
f_output_available.wait()
recent_b_losses.append(f_b_losses[b])
if final_losses[b] is None: # first evaluation
final_losses[b] = f_b_losses[b]
if trajectories is not None:
trajectories[b].append(SolveResult(method_description, x0[b], f_b_losses[b], 0, 1, False, False, ""))
return f_b_losses_np[b], f_grad_np[b]
def callback(x, *args): # L-BFGS-B only passes x but the documentation says (x, state)
iterations[b] += 1
loss = min(recent_b_losses)
recent_b_losses.clear()
final_losses[b] = loss
if trajectories is not None:
trajectories[b].append(SolveResult(method_description, x, loss, iterations[b], function_evaluations[b], False, False, ""))
res = minimize(fun=b_fun, x0=x0[b], jac=True, method=method, tol=atol[b], options={'maxiter': max_iter[b]}, callback=callback)
assert isinstance(res, OptimizeResult)
# res.nit, res.nfev
xs[b] = res.x
converged[b] = res.success
diverged[b] = res.status not in (0, 1) # 0=success
messages[b] = res.message
finished[b] = True
while not all_finished:
f_input_available.wait()
f_output_available.wait()
b_thread = Thread(target=b_thread)
threads.append(b_thread)
b_thread.start()
while True:
f_input_available.wait()
if all(finished):
all_finished = True
f_output_available.wait()
break
_, f_b_losses, f_grad = fg(self.stack(f_inputs))
f_b_losses_np = self.numpy(f_b_losses).astype(numpy.float64)
f_grad_np = self.numpy(f_grad).astype(numpy.float64)
f_output_available.wait()
for b_thread in threads:
b_thread.join() # make sure threads exit correctly
if trj:
max_trajectory_length = max([len(t) for t in trajectories])
last_points = [SolveResult(method_description, xs[b], final_losses[b], iterations[b], function_evaluations[b], converged[b], diverged[b], "") for b in range(batch_size)]
trajectories = [t[:-1] + [last_point] * (max_trajectory_length - len(t) + 1) for t, last_point in zip(trajectories, last_points)]
trajectory = []
for states in zip(*trajectories):
x = self.stack([self.to_float(state.x) for state in states])
residual = self.stack([state.residual for state in states])
iterations = [state.iterations for state in states]
function_evaluations = [state.function_evaluations for state in states]
converged = [state.converged for state in states]
diverged = [state.diverged for state in states]
trajectory.append(SolveResult(method_description, x, residual, iterations, function_evaluations, converged, diverged, messages))
return trajectory
else:
x = self.stack(xs)
residual = self.stack(final_losses)
return SolveResult(method_description, x, residual, iterations, function_evaluations, converged, diverged, messages)
def linear_solve(self, method: str, lin, y, x0, rtol, atol, max_iter, trj: bool) -> SolveResult or List[SolveResult]:
"""
Solve the system of linear equations A · x = y.
This method need not provide a gradient for the operation.
Args:
method: Which algorithm to use. One of `('auto', 'CG', 'CG-adaptive')`.
lin: Linear operation. One of
* sparse/dense matrix valid for all instances
* tuple/list of sparse/dense matrices for varying matrices along batch, must have the same nonzero locations.
* linear function A(x), must be called on all instances in parallel
y: target result of A * x. 2nd order tensor (batch, vector) or list of vectors.
x0: Initial guess of size (batch, parameters)
rtol: Relative tolerance of size (batch,)
atol: Absolute tolerance of size (batch,)
max_iter: Maximum number of iterations of size (batch,)
trj: Whether to record and return the optimization trajectory as a `List[SolveResult]`.
Returns:
result: `SolveResult` or `List[SolveResult]`, depending on `trj`.
"""
if method == 'auto':
return self.conjugate_gradient_adaptive(lin, y, x0, rtol, atol, max_iter, trj)
elif method == 'CG':
return self.conjugate_gradient(lin, y, x0, rtol, atol, max_iter, trj)
elif method == 'CG-adaptive':
return self.conjugate_gradient_adaptive(lin, y, x0, rtol, atol, max_iter, trj)
else:
raise NotImplementedError(f"Method '{method}' not supported for linear solve.")
def conjugate_gradient(self, lin, y, x0, rtol, atol, max_iter, trj: bool) -> SolveResult or List[SolveResult]:
""" Standard conjugate gradient algorithm. Signature matches to `Backend.linear_solve()`. """
# Based on "An Introduction to the Conjugate Gradient Method Without the Agonizing Pain" by Jonathan Richard Shewchuk
# symbols: dx=d, dy=q, step_size=alpha, residual_squared=delta, residual=r, y=b
method = f"Φ-Flow CG ({self.name})"
y = self.to_float(y)
x0 = self.copy(self.to_float(x0), only_mutable=True)
batch_size = self.staticshape(y)[0]
tolerance_sq = self.maximum(rtol ** 2 * self.sum(y ** 2, -1), atol ** 2)
x = x0
dx = residual = y - self.linear(lin, x)
it_counter = 0
iterations = self.zeros([batch_size], DType(int, 32))
function_evaluations = self.ones([batch_size], DType(int, 32))
residual_squared = rsq0 = self.sum(residual ** 2, -1, keepdims=True)
diverged = self.any(~self.isfinite(x), axis=(1,))
converged = self.all(residual_squared <= tolerance_sq, axis=(1,))
trajectory = [SolveResult(method, x, residual, iterations, function_evaluations, converged, diverged, "")] if trj else None
finished = converged | diverged | (iterations >= max_iter); not_finished_1 = self.to_int32(~finished) # ; active = self.to_float(self.expand_dims(not_finished_1, -1))
while ~self.all(finished):
it_counter += 1; iterations += not_finished_1
dy = self.linear(lin, dx); function_evaluations += not_finished_1
dx_dy = self.sum(dx * dy, axis=-1, keepdims=True)
step_size = self.divide_no_nan(residual_squared, dx_dy)
step_size *= self.expand_dims(self.to_float(not_finished_1), -1) # this is not really necessary but ensures batch-independence
x += step_size * dx
if it_counter % 50 == 0:
residual = y - self.linear(lin, x); function_evaluations += 1
else:
residual = residual - step_size * dy # in-place subtraction affects convergence
residual_squared_old = residual_squared
residual_squared = self.sum(residual ** 2, -1, keepdims=True)
dx = residual + self.divide_no_nan(residual_squared, residual_squared_old) * dx
diverged = self.any(residual_squared / rsq0 > 100, axis=(1,)) & (iterations >= 8)
converged = self.all(residual_squared <= tolerance_sq, axis=(1,))
if trajectory is not None:
trajectory.append(SolveResult(method, x, residual, iterations, function_evaluations, converged, diverged, ""))
x = self.copy(x)
iterations = self.copy(iterations)
finished = converged | diverged | (iterations >= max_iter); not_finished_1 = self.to_int32(~finished) # ; active = self.to_float(self.expand_dims(not_finished_1, -1))
return trajectory if trj else SolveResult(method, x, residual, iterations, function_evaluations, converged, diverged, "")
def conjugate_gradient_adaptive(self, lin, y, x0, rtol, atol, max_iter, trj: bool) -> SolveResult or List[SolveResult]:
""" Conjugate gradient algorithm with adaptive step size. Signature matches to `Backend.linear_solve()`. """
# Based on the variant described in "Methods of Conjugate Gradients for Solving Linear Systems" by Magnus R. Hestenes and Eduard Stiefel
# https://nvlpubs.nist.gov/nistpubs/jres/049/jresv49n6p409_A1b.pdf
method = f"Φ-Flow CG-adaptive ({self.name})"
y = self.to_float(y)
x0 = self.copy(self.to_float(x0), only_mutable=True)
batch_size = self.staticshape(y)[0]
tolerance_sq = self.maximum(rtol ** 2 * self.sum(y ** 2, -1), atol ** 2)
x = x0
dx = residual = y - self.linear(lin, x)
dy = self.linear(lin, dx)
iterations = self.zeros([batch_size], DType(int, 32))
function_evaluations = self.ones([batch_size], DType(int, 32))
residual_squared = rsq0 = self.sum(residual ** 2, -1, keepdims=True)
diverged = self.any(~self.isfinite(x), axis=(1,))
converged = self.all(residual_squared <= tolerance_sq, axis=(1,))
trajectory = [SolveResult(method, x, residual, iterations, function_evaluations, converged, diverged, "")] if trj else None
continue_ = ~converged & ~diverged & (iterations < max_iter)
def loop(continue_, it_counter, x, dx, dy, residual, iterations, function_evaluations, _converged, _diverged):
continue_1 = self.to_int32(continue_)
it_counter += 1
iterations += continue_1
dx_dy = self.sum(dx * dy, axis=-1, keepdims=True)
step_size = self.divide_no_nan(self.sum(dx * residual, axis=-1, keepdims=True), dx_dy)
step_size *= self.expand_dims(self.to_float(continue_1), -1) # this is not really necessary but ensures batch-independence
x += step_size * dx
# if it_counter % 50 == 0: # Not traceable since Python bool
# residual = y - self.linear(lin, x); function_evaluations += 1
# else:
residual = residual - step_size * dy # in-place subtraction affects convergence
residual_squared = self.sum(residual ** 2, -1, keepdims=True)
dx = residual - self.divide_no_nan(self.sum(residual * dy, axis=-1, keepdims=True) * dx, dx_dy)
dy = self.linear(lin, dx); function_evaluations += continue_1
diverged = self.any(residual_squared / rsq0 > 100, axis=(1,)) & (iterations >= 8)
converged = self.all(residual_squared <= tolerance_sq, axis=(1,))
if trajectory is not None:
trajectory.append(SolveResult(method, x, residual, iterations, function_evaluations, converged, diverged, ""))
x = self.copy(x)
iterations = self.copy(iterations)
continue_ = ~converged & ~diverged & (iterations < max_iter)
return continue_, it_counter, x, dx, dy, residual, iterations, function_evaluations, converged, diverged
_, _, x, _, _, residual, iterations, function_evaluations, converged, diverged =\
self.while_loop(loop, (continue_, 0, x, dx, dy, residual, iterations, function_evaluations, converged, diverged))
return trajectory if trj else SolveResult(method, x, residual, iterations, function_evaluations, converged, diverged, "")
def linear(self, lin, vector):
if callable(lin):
return lin(vector)
elif isinstance(lin, (tuple, list)):
for lin_i in lin:
lin_shape = self.staticshape(lin_i)
assert len(lin_shape) == 2
return self.stack([self.matmul(m, v) for m, v in zip(lin, self.unstack(vector))])
else:
lin_shape = self.staticshape(lin)
assert len(lin_shape) == 2, f"A must be a matrix but got shape {lin_shape}"
return self.matmul(lin, vector)
def gradients(self, y, xs: tuple or list, grad_y) -> tuple:
raise NotImplementedError(self)
def record_gradients(self, xs: tuple or list, persistent=False):
raise NotImplementedError(self)
def stop_gradient(self, value):
raise NotImplementedError(self)
def grid_sample(self, grid, spatial_dims: tuple, coordinates, extrapolation='constant'):
"""
Interpolates a regular grid at the specified coordinates.
Args:
grid: Tensor
spatial_dims: Dimension indices that correspond to coordinate vectors
coordinates: Tensor of floating grid indices.
The last dimension must match `spatial_dims`.
The first grid point of dimension i lies at position 0, the last at values.shape[i]-1.
extrapolation: Values to use for coordinates outside the grid.
One of `('undefined', 'zeros', 'boundary', 'periodic', 'symmetric', 'reflect')`.
Returns:
sampled values with linear interpolation
"""
return NotImplemented
def variable(self, value):
return NotImplemented
def ndims(self, tensor):
return len(self.staticshape(tensor))
def size(self, array):
return self.prod(self.shape(array))
def batch_gather(self, tensor, batches):
if isinstance(batches, int):
batches = [batches]
return tensor[batches, ...]
def unstack(self, tensor, axis=0, keepdims=False) -> tuple:
if axis < 0:
axis += len(tensor.shape)
if axis >= len(tensor.shape) or axis < 0:
raise ValueError("Illegal axis value")
result = []
for slice_idx in range(tensor.shape[axis]):
if keepdims:
component = tensor[tuple([slice(slice_idx, slice_idx + 1) if d == axis else slice(None) for d in range(len(tensor.shape))])]
else:
component = tensor[tuple([slice_idx if d == axis else slice(None) for d in range(len(tensor.shape))])]
result.append(component)
return tuple(result)
def equal(self, x, y):
""" Element-wise equality check """
raise NotImplementedError(self)
def not_equal(self, x, y):
return ~self.equal(x, y)
def greater_than(self, x, y):
x, y = self.auto_cast(x, y)
return x > y
def greater_or_equal(self, x, y):
x, y = self.auto_cast(x, y)
return x >= y
def add(self, a, b):
a, b = self.auto_cast(a, b)
return a + b
def sub(self, a, b):
a, b = self.auto_cast(a, b)
return a - b
def mul(self, a, b):
a, b = self.auto_cast(a, b)
return a * b
def div(self, numerator, denominator):
numerator, denominator = self.auto_cast(numerator, denominator)
return numerator / denominator
def pow(self, base, exp):
base, exp = self.auto_cast(base, exp)
return base ** exp
def mod(self, dividend, divisor):
dividend, divisor = self.auto_cast(dividend, divisor)
return dividend % divisor
def and_(self, a, b):
a, b = self.auto_cast(a, b)
return a & b
def or_(self, a, b):
a, b = self.auto_cast(a, b)
return a | b
def xor(self, a, b):
a, b = self.auto_cast(a, b)
return a ^ b
def floordiv(self, a, b):
a, b = self.auto_cast(a, b)
return a // b
BACKENDS = []
""" Global list of all registered backends. Register a `Backend` by adding it to the list. """
_DEFAULT = [] # [0] = global default, [1:] from 'with' blocks
_PRECISION = [32] # [0] = global precision in bits, [1:] from 'with' blocks
def choose_backend(*values, prefer_default=False) -> Backend:
"""
Selects a suitable backend to handle the given values.
This function is used by most math functions operating on `Tensor` objects to delegate the actual computations.
Args:
*values:
prefer_default: if True, selects the default backend assuming it can handle handle the values, see `default_backend()`.
raise_error: Determines the behavior of this function if no backend can handle the given values.
If True, raises a `NoBackendFound` error, else returns `None`.
Returns:
the selected `Backend`
"""
# --- Default Backend has priority ---
if _is_applicable(_DEFAULT[-1], values) and (prefer_default or _is_specific(_DEFAULT[-1], values)):
return _DEFAULT[-1]
# --- Filter out non-applicable ---
backends = [backend for backend in BACKENDS if _is_applicable(backend, values)]
if len(backends) == 0:
raise NoBackendFound(f"No backend found for types {[type(v).__name__ for v in values]}; registered backends are {BACKENDS}")
# --- Native tensors? ---
for backend in backends:
if _is_specific(backend, values):
return backend
return backends[0]
class NoBackendFound(Exception):
"""
Thrown by `choose_backend` if no backend can handle the given values.
"""
def __init__(self, msg):
Exception.__init__(self, msg)
def default_backend() -> Backend:
"""
The default backend is preferred by `choose_backend()`.
The default backend can be set globally using `set_global_default_backend()` and locally using `with backend:`.
Returns:
current default `Backend`
"""
return _DEFAULT[-1]
def context_backend() -> Backend or None:
"""
Returns the backend set by the inner-most surrounding `with backend:` block.
If called outside a backend context, returns `None`.
Returns:
`Backend` or `None`
"""
return _DEFAULT[-1] if len(_DEFAULT) > 1 else None
def set_global_default_backend(backend: Backend):
"""
Sets the given backend as default.
This setting can be overridden using `with backend:`.
See `default_backend()`, `choose_backend()`.
Args:
backend: `Backend` to set as default
"""
assert isinstance(backend, Backend)
_DEFAULT[0] = backend
def set_global_precision(floating_point_bits: int):
"""
Sets the floating point precision of DYNAMIC_BACKEND which affects all registered backends.
If `floating_point_bits` is an integer, all floating point tensors created henceforth will be of the corresponding data type, float16, float32 or float64.
Operations may also convert floating point values to this precision, even if the input had a different precision.
If `floating_point_bits` is None, new tensors will default to float32 unless specified otherwise.
The output of math operations has the same precision as its inputs.
Args:
floating_point_bits: one of (16, 32, 64, None)
"""
_PRECISION[0] = floating_point_bits
def get_precision() -> int:
"""
Gets the current target floating point precision in bits.
The precision can be set globally using `set_global_precision()` or locally using `with precision(p):`.
Any Backend method may convert floating point values to this precision, even if the input had a different precision.
Returns:
16 for half, 32 for single, 64 for double
"""
return _PRECISION[-1]
@contextmanager
def precision(floating_point_bits: int):
"""
Sets the floating point precision for the local context.
Usage: `with precision(p):`
This overrides the global setting, see `set_global_precision()`.
Args:
floating_point_bits: 16 for half, 32 for single, 64 for double
"""
_PRECISION.append(floating_point_bits)
try:
yield None
finally:
_PRECISION.pop(-1)
def convert(tensor, backend: Backend = None, use_dlpack=True):
"""
Convert a Tensor to the native format of `backend`.
If the target backend can operate natively on `tensor`, returns `tensor`.
If both backends support *DLPack* and `use_dlpack=True`, uses zero-copy conversion using the DLPack library.
Else, intermediately converts `tensor` to a NumPy array.
*Warning*: This operation breaks the automatic differentiation chain.
Args:
tensor: Native tensor belonging to any registered backend.
backend: Target backend. If `None`, uses the current default backend, see `default_backend()`.
Returns:
Tensor belonging to `backend`.
"""
backend = backend or default_backend()
current_backend = choose_backend(tensor, prefer_default=False)
if backend.is_tensor(tensor, True) or backend is current_backend:
return tensor
if use_dlpack and current_backend.supports(Backend.to_dlpack) and backend.supports(Backend.from_dlpack):
capsule = current_backend.to_dlpack(tensor)
return backend.from_dlpack(capsule)
else:
nparray = current_backend.numpy(tensor)
return backend.as_tensor(nparray)
# Backend choice utility functions
def _is_applicable(backend, values):
for value in values:
if not backend.is_tensor(value, only_native=False):
return False
return True
def _is_specific(backend, values):
for value in values:
if backend.is_tensor(value, only_native=True):
return True
return False
# Other low-level helper functions
def combined_dim(dim1, dim2, type_str: str = 'batch'):
if dim1 is None and dim2 is None:
return None
if dim1 is None or dim1 == 1:
return dim2
if dim2 is None or dim2 == 1:
return dim1
assert dim1 == dim2, f"Incompatible {type_str} dimensions: x0 {dim1}, y {dim2}"
return dim1
| [((11, 14, 13, 2), 'collections.namedtuple', 'namedtuple', ({(11, 25, 11, 38): '"""SolveResult"""', (11, 40, 13, 1): "['method', 'x', 'residual', 'iterations', 'function_evaluations',\n 'converged', 'diverged', 'message']"}, {}), "('SolveResult', ['method', 'x', 'residual', 'iterations',\n 'function_evaluations', 'converged', 'diverged', 'message'])", False, 'from collections import namedtuple\n'), ((680, 28, 680, 51), 'threading.Barrier', 'Barrier', ({(680, 36, 680, 50): 'batch_size + 1'}, {}), '(batch_size + 1)', False, 'from threading import Barrier\n'), ((681, 29, 681, 52), 'threading.Barrier', 'Barrier', ({(681, 37, 681, 51): 'batch_size + 1'}, {}), '(batch_size + 1)', False, 'from threading import Barrier\n'), ((724, 23, 724, 46), 'threading.Thread', 'Thread', (), '', False, 'from threading import Thread\n'), ((712, 22, 712, 142), 'scipy.optimize.minimize', 'minimize', (), '', False, 'from scipy.optimize import OptimizeResult, minimize\n')] |
dtrodrigues/bpython | bpython/curtsiesfrontend/parse.py | 143e4e55d8f5227149528a5880a32a516a40f14d | import re
from curtsies.formatstring import fmtstr, FmtStr
from curtsies.termformatconstants import (
FG_COLORS,
BG_COLORS,
colors as CURTSIES_COLORS,
)
from functools import partial
from ..lazyre import LazyReCompile
COLORS = CURTSIES_COLORS + ("default",)
CNAMES = dict(zip("krgybmcwd", COLORS))
# hack for finding the "inverse"
INVERSE_COLORS = {
CURTSIES_COLORS[idx]: CURTSIES_COLORS[
(idx + (len(CURTSIES_COLORS) // 2)) % len(CURTSIES_COLORS)
]
for idx in range(len(CURTSIES_COLORS))
}
INVERSE_COLORS["default"] = INVERSE_COLORS[CURTSIES_COLORS[0]]
def func_for_letter(letter_color_code: str, default: str = "k"):
"""Returns FmtStr constructor for a bpython-style color code"""
if letter_color_code == "d":
letter_color_code = default
elif letter_color_code == "D":
letter_color_code = default.upper()
return partial(
fmtstr,
fg=CNAMES[letter_color_code.lower()],
bold=letter_color_code.isupper(),
)
def color_for_letter(letter_color_code: str, default: str = "k"):
if letter_color_code == "d":
letter_color_code = default
return CNAMES[letter_color_code.lower()]
def parse(s):
"""Returns a FmtStr object from a bpython-formatted colored string"""
rest = s
stuff = []
while True:
if not rest:
break
start, rest = peel_off_string(rest)
stuff.append(start)
return (
sum((fs_from_match(d) for d in stuff[1:]), fs_from_match(stuff[0]))
if len(stuff) > 0
else FmtStr()
)
def fs_from_match(d):
atts = {}
if d["fg"]:
# this isn't according to spec as I understand it
if d["fg"].isupper():
d["bold"] = True
# TODO figure out why boldness isn't based on presence of \x02
color = CNAMES[d["fg"].lower()]
if color != "default":
atts["fg"] = FG_COLORS[color]
if d["bg"]:
if d["bg"] == "I":
# hack for finding the "inverse"
color = INVERSE_COLORS[color]
else:
color = CNAMES[d["bg"].lower()]
if color != "default":
atts["bg"] = BG_COLORS[color]
if d["bold"]:
atts["bold"] = True
return fmtstr(d["string"], **atts)
peel_off_string_re = LazyReCompile(
r"""(?P<colormarker>\x01
(?P<fg>[krgybmcwdKRGYBMCWD]?)
(?P<bg>[krgybmcwdKRGYBMCWDI]?)?)
(?P<bold>\x02?)
\x03
(?P<string>[^\x04]*)
\x04
(?P<rest>.*)
""",
re.VERBOSE | re.DOTALL,
)
def peel_off_string(s):
m = peel_off_string_re.match(s)
assert m, repr(s)
d = m.groupdict()
rest = d["rest"]
del d["rest"]
return d, rest
| [((82, 11, 82, 38), 'curtsies.formatstring.fmtstr', 'fmtstr', ({(82, 18, 82, 29): "d['string']"}, {}), "(d['string'], **atts)", False, 'from curtsies.formatstring import fmtstr, FmtStr\n'), ((57, 13, 57, 21), 'curtsies.formatstring.FmtStr', 'FmtStr', ({}, {}), '()', False, 'from curtsies.formatstring import fmtstr, FmtStr\n')] |
pressler-vsc/sarpy | sarpy/io/general/nitf_elements/tres/unclass/BANDSA.py | fa6c951c42b9a7d9df2edfa53c771494cb0246fb | # -*- coding: utf-8 -*-
from ..tre_elements import TREExtension, TREElement
__classification__ = "UNCLASSIFIED"
__author__ = "Thomas McCullough"
class BAND(TREElement):
def __init__(self, value):
super(BAND, self).__init__()
self.add_field('BANDPEAK', 's', 5, value)
self.add_field('BANDLBOUND', 's', 5, value)
self.add_field('BANDUBOUND', 's', 5, value)
self.add_field('BANDWIDTH', 's', 5, value)
self.add_field('BANDCALDRK', 's', 6, value)
self.add_field('BANDCALINC', 's', 5, value)
self.add_field('BANDRESP', 's', 5, value)
self.add_field('BANDASD', 's', 5, value)
self.add_field('BANDGSD', 's', 5, value)
class BANDSAType(TREElement):
def __init__(self, value):
super(BANDSAType, self).__init__()
self.add_field('ROW_SPACING', 's', 7, value)
self.add_field('ROW_SPACING_UNITS', 's', 1, value)
self.add_field('COL_SPACING', 's', 7, value)
self.add_field('COL_SPACING_UNITS', 's', 1, value)
self.add_field('FOCAL_LENGTH', 's', 6, value)
self.add_field('BANDCOUNT', 'd', 4, value)
self.add_loop('BANDs', self.BANDCOUNT, BAND, value)
class BANDSA(TREExtension):
_tag_value = 'BANDSA'
_data_type = BANDSAType
| [] |
husmen/ktrain | ktrain/graph/learner.py | 4147b0bd146deb513c6f94505908294a5163efac | from ..imports import *
from .. import utils as U
from ..core import GenLearner
class NodeClassLearner(GenLearner):
"""
```
Main class used to tune and train Keras models for node classification
Main parameters are:
model (Model): A compiled instance of keras.engine.training.Model
train_data (Iterator): a Iterator instance for training set
val_data (Iterator): A Iterator instance for validation set
```
"""
def __init__(self, model, train_data=None, val_data=None,
batch_size=U.DEFAULT_BS, eval_batch_size=U.DEFAULT_BS,
workers=1, use_multiprocessing=False):
super().__init__(model, train_data=train_data, val_data=val_data,
batch_size=batch_size, eval_batch_size=eval_batch_size,
workers=workers, use_multiprocessing=use_multiprocessing)
return
def view_top_losses(self, n=4, preproc=None, val_data=None):
"""
```
Views observations with top losses in validation set.
Typically over-ridden by Learner subclasses.
Args:
n(int or tuple): a range to select in form of int or tuple
e.g., n=8 is treated as n=(0,8)
preproc (Preprocessor): A TextPreprocessor or ImagePreprocessor.
For some data like text data, a preprocessor
is required to undo the pre-processing
to correctly view raw data.
val_data: optional val_data to use instead of self.val_data
Returns:
list of n tuples where first element is either
filepath or id of validation example and second element
is loss.
```
"""
val = self._check_val(val_data)
# get top losses and associated data
tups = self.top_losses(n=n, val_data=val, preproc=preproc)
# get multilabel status and class names
classes = preproc.get_classes() if preproc is not None else None
# iterate through losses
for tup in tups:
# get data
idx = tup[0]
loss = tup[1]
truth = tup[2]
pred = tup[3]
print('----------')
print("id:%s | loss:%s | true:%s | pred:%s)\n" % (idx, round(loss,2), truth, pred))
#print(obs)
return
def layer_output(self, layer_id, example_id=0, batch_id=0, use_val=False):
"""
```
Prints output of layer with index <layer_id> to help debug models.
Uses first example (example_id=0) from training set, by default.
```
"""
raise Exception('currently_unsupported: layer_output method is not yet supported for ' +
'graph neural networks in ktrain')
class LinkPredLearner(GenLearner):
"""
```
Main class used to tune and train Keras models for link prediction
Main parameters are:
model (Model): A compiled instance of keras.engine.training.Model
train_data (Iterator): a Iterator instance for training set
val_data (Iterator): A Iterator instance for validation set
```
"""
def __init__(self, model, train_data=None, val_data=None,
batch_size=U.DEFAULT_BS, eval_batch_size=U.DEFAULT_BS,
workers=1, use_multiprocessing=False):
super().__init__(model, train_data=train_data, val_data=val_data,
batch_size=batch_size, eval_batch_size=eval_batch_size,
workers=workers, use_multiprocessing=use_multiprocessing)
return
def view_top_losses(self, n=4, preproc=None, val_data=None):
"""
```
Views observations with top losses in validation set.
Typically over-ridden by Learner subclasses.
Args:
n(int or tuple): a range to select in form of int or tuple
e.g., n=8 is treated as n=(0,8)
preproc (Preprocessor): A TextPreprocessor or ImagePreprocessor.
For some data like text data, a preprocessor
is required to undo the pre-processing
to correctly view raw data.
val_data: optional val_data to use instead of self.val_data
Returns:
list of n tuples where first element is either
filepath or id of validation example and second element
is loss.
```
"""
val = self._check_val(val_data)
# get top losses and associated data
tups = self.top_losses(n=n, val_data=val, preproc=preproc)
# get multilabel status and class names
classes = preproc.get_classes() if preproc is not None else None
# iterate through losses
for tup in tups:
# get data
idx = tup[0]
loss = tup[1]
truth = tup[2]
pred = tup[3]
print('----------')
print("id:%s | loss:%s | true:%s | pred:%s)\n" % (idx, round(loss,2), truth, pred))
#print(obs)
return
def layer_output(self, layer_id, example_id=0, batch_id=0, use_val=False):
"""
```
Prints output of layer with index <layer_id> to help debug models.
Uses first example (example_id=0) from training set, by default.
```
"""
raise Exception('currently_unsupported: layer_output method is not yet supported for ' +
'graph neural networks in ktrain')
| [] |
Thanksyy/Vega-Zero | VegaZero2VegaLite.py | dd25cb145faec047b01ca54c69ba96c56adb99f4 | __author__ = "Yuyu Luo"
import json
import pandas
class VegaZero2VegaLite(object):
def __init__(self):
pass
def parse_vegaZero(self, vega_zero):
self.parsed_vegaZero = {
'mark': '',
'data': '',
'encoding': {
'x': '',
'y': {
'aggregate': '',
'y': ''
},
'color': {
'z': ''
}
},
'transform': {
'filter': '',
'group': '',
'bin': {
'axis': '',
'type': ''
},
'sort': {
'axis': '',
'type': ''
},
'topk': ''
}
}
vega_zero_keywords = vega_zero.split(' ')
self.parsed_vegaZero['mark'] = vega_zero_keywords[vega_zero_keywords.index('mark') + 1]
self.parsed_vegaZero['data'] = vega_zero_keywords[vega_zero_keywords.index('data') + 1]
self.parsed_vegaZero['encoding']['x'] = vega_zero_keywords[vega_zero_keywords.index('x') + 1]
self.parsed_vegaZero['encoding']['y']['y'] = vega_zero_keywords[vega_zero_keywords.index('aggregate') + 2]
self.parsed_vegaZero['encoding']['y']['aggregate'] = vega_zero_keywords[vega_zero_keywords.index('aggregate') + 1]
if 'color' in vega_zero_keywords:
self.parsed_vegaZero['encoding']['color']['z'] = vega_zero_keywords[vega_zero_keywords.index('color') + 1]
if 'topk' in vega_zero_keywords:
self.parsed_vegaZero['transform']['topk'] = vega_zero_keywords[vega_zero_keywords.index('topk') + 1]
if 'sort' in vega_zero_keywords:
self.parsed_vegaZero['transform']['sort']['axis'] = vega_zero_keywords[vega_zero_keywords.index('sort') + 1]
self.parsed_vegaZero['transform']['sort']['type'] = vega_zero_keywords[vega_zero_keywords.index('sort') + 2]
if 'group' in vega_zero_keywords:
self.parsed_vegaZero['transform']['group'] = vega_zero_keywords[vega_zero_keywords.index('group') + 1]
if 'bin' in vega_zero_keywords:
self.parsed_vegaZero['transform']['bin']['axis'] = vega_zero_keywords[vega_zero_keywords.index('bin') + 1]
self.parsed_vegaZero['transform']['bin']['type'] = vega_zero_keywords[vega_zero_keywords.index('bin') + 3]
if 'filter' in vega_zero_keywords:
filter_part_token = []
for each in vega_zero_keywords[vega_zero_keywords.index('filter') + 1:]:
if each not in ['group', 'bin', 'sort', 'topk']:
filter_part_token.append(each)
else:
break
if 'between' in filter_part_token:
filter_part_token[filter_part_token.index('between') + 2] = 'and ' + filter_part_token[
filter_part_token.index('between') - 1] + ' <='
filter_part_token[filter_part_token.index('between')] = '>='
# replace 'and' -- 'or'
filter_part_token = ' '.join(filter_part_token).split()
filter_part_token = ['&' if x == 'and' else x for x in filter_part_token]
filter_part_token = ['|' if x == 'or' else x for x in filter_part_token]
if '&' in filter_part_token or '|' in filter_part_token:
final_filter_part = ''
each_conditions = []
for i in range(len(filter_part_token)):
each = filter_part_token[i]
if each != '&' and each != '|':
# ’=‘ in SQL --to--> ’==‘ in Vega-Lite
if each == '=':
each = '=='
each_conditions.append(each)
if each == '&' or each == '|' or i == len(filter_part_token) - 1:
# each = '&' or '|'
if 'like' == each_conditions[1]:
# only consider this case: '%a%'
if each_conditions[2][1] == '%' and each_conditions[2][len(each_conditions[2]) - 2] == '%':
final_filter_part += 'indexof(' + 'datum.' + each_conditions[0] + ',"' + \
each_conditions[2][2:len(each_conditions[2]) - 2] + '") != -1'
elif 'like' == each_conditions[2] and 'not' == each_conditions[1]:
if each_conditions[3][1] == '%' and each_conditions[3][len(each_conditions[3]) - 2] == '%':
final_filter_part += 'indexof(' + 'datum.' + each_conditions[0] + ',"' + \
each_conditions[3][2:len(each_conditions[3]) - 2] + '") == -1'
else:
final_filter_part += 'datum.' + ' '.join(each_conditions)
if i != len(filter_part_token) - 1:
final_filter_part += ' ' + each + ' '
each_conditions = []
self.parsed_vegaZero['transform']['filter'] = final_filter_part
else:
# only single filter condition
self.parsed_vegaZero['transform']['filter'] = 'datum.' + ' '.join(filter_part_token).strip()
return self.parsed_vegaZero
def to_VegaLite(self, vega_zero, dataframe=None):
self.VegaLiteSpec = {
'bar': {
"mark": "bar",
"encoding": {
"x": {"field": "x", "type": "nominal"},
"y": {"field": "y", "type": "quantitative"}
}
},
'arc': {
"mark": "arc",
"encoding": {
"color": {"field": "x", "type": "nominal"},
"theta": {"field": "y", "type": "quantitative"}
}
},
'line': {
"mark": "line",
"encoding": {
"x": {"field": "x", "type": "nominal"},
"y": {"field": "y", "type": "quantitative"}
}
},
'point': {
"mark": "point",
"encoding": {
"x": {"field": "x", "type": "quantitative"},
"y": {"field": "y", "type": "quantitative"}
}
}
}
VegaZero = self.parse_vegaZero(vega_zero)
# assign some vega-zero keywords to the VegaLiteSpec object
if isinstance(dataframe, pandas.core.frame.DataFrame):
self.VegaLiteSpec[VegaZero['mark']]['data'] = dict()
self.VegaLiteSpec[VegaZero['mark']]['data']['values'] = json.loads(dataframe.to_json(orient='records'))
if VegaZero['mark'] != 'arc':
self.VegaLiteSpec[VegaZero['mark']]['encoding']['x']['field'] = VegaZero['encoding']['x']
self.VegaLiteSpec[VegaZero['mark']]['encoding']['y']['field'] = VegaZero['encoding']['y']['y']
if VegaZero['encoding']['y']['aggregate'] != '' and VegaZero['encoding']['y']['aggregate'] != 'none':
self.VegaLiteSpec[VegaZero['mark']]['encoding']['y']['aggregate'] = VegaZero['encoding']['y']['aggregate']
else:
self.VegaLiteSpec[VegaZero['mark']]['encoding']['color']['field'] = VegaZero['encoding']['x']
self.VegaLiteSpec[VegaZero['mark']]['encoding']['theta']['field'] = VegaZero['encoding']['y']['y']
if VegaZero['encoding']['y']['aggregate'] != '' and VegaZero['encoding']['y']['aggregate'] != 'none':
self.VegaLiteSpec[VegaZero['mark']]['encoding']['theta']['aggregate'] = VegaZero['encoding']['y'][
'aggregate']
if VegaZero['encoding']['color']['z'] != '':
self.VegaLiteSpec[VegaZero['mark']]['encoding']['color'] = {
'field': VegaZero['encoding']['color']['z'], 'type': 'nominal'
}
# it seems that the group will be performed by VegaLite defaultly, in our cases.
if VegaZero['transform']['group'] != '':
pass
if VegaZero['transform']['bin']['axis'] != '':
if VegaZero['transform']['bin']['axis'] == 'x':
self.VegaLiteSpec[VegaZero['mark']]['encoding']['x']['type'] = 'temporal'
if VegaZero['transform']['bin']['type'] in ['date', 'year', 'week', 'month']:
self.VegaLiteSpec[VegaZero['mark']]['encoding']['x']['timeUnit'] = VegaZero['transform']['bin']['type']
elif VegaZero['transform']['bin']['type'] == 'weekday':
self.VegaLiteSpec[VegaZero['mark']]['encoding']['x']['timeUnit'] = 'week'
else:
print('Unknown binning step.')
if VegaZero['transform']['filter'] != '':
if 'transform' not in self.VegaLiteSpec[VegaZero['mark']]:
self.VegaLiteSpec[VegaZero['mark']]['transform'] = [{
"filter": VegaZero['transform']['filter']
}]
elif 'filter' not in self.VegaLiteSpec[VegaZero['mark']]['transform']:
self.VegaLiteSpec[VegaZero['mark']]['transform'].append({
"filter": VegaZero['transform']['filter']
})
else:
self.VegaLiteSpec[VegaZero['mark']]['transform']['filter'] += ' & ' + VegaZero['transform']['filter']
if VegaZero['transform']['topk'] != '':
if VegaZero['transform']['sort']['axis'] == 'x':
sort_field = VegaZero['encoding']['x']
elif VegaZero['transform']['sort']['axis'] == 'y':
sort_field = VegaZero['encoding']['y']['y']
else:
print('Unknown sorting field: ', VegaZero['transform']['sort']['axis'])
sort_field = VegaZero['transform']['sort']['axis']
if VegaZero['transform']['sort']['type'] == 'desc':
sort_order = 'descending'
else:
sort_order = 'ascending'
if 'transform' in self.VegaLiteSpec[VegaZero['mark']]:
current_filter = self.VegaLiteSpec[VegaZero['mark']]['transform'][0]['filter']
self.VegaLiteSpec[VegaZero['mark']]['transform'][0][
'filter'] = current_filter + ' & ' + "datum.rank <= " + str(VegaZero['transform']['topk'])
self.VegaLiteSpec[VegaZero['mark']]['transform'].insert(0, {
"window": [{
"field": sort_field,
"op": "dense_rank",
"as": "rank"
}],
"sort": [{"field": sort_field, "order": sort_order}]
})
else:
self.VegaLiteSpec[VegaZero['mark']]['transform'] = [
{
"window": [{
"field": sort_field,
"op": "dense_rank",
"as": "rank"
}],
"sort": [{"field": sort_field, "order": sort_order}]
},
{
"filter": "datum.rank <= " + str(VegaZero['transform']['topk'])
}
]
if VegaZero['transform']['sort']['axis'] != '':
if VegaZero['transform']['sort']['axis'] == 'x':
if VegaZero['transform']['sort']['type'] == 'desc':
self.VegaLiteSpec[VegaZero['mark']]['encoding']['y']['sort'] = '-x'
else:
self.VegaLiteSpec[VegaZero['mark']]['encoding']['y']['sort'] = 'x'
else:
if VegaZero['transform']['sort']['type'] == 'desc':
self.VegaLiteSpec[VegaZero['mark']]['encoding']['x']['sort'] = '-y'
else:
self.VegaLiteSpec[VegaZero['mark']]['encoding']['x']['sort'] = 'y'
return self.VegaLiteSpec[VegaZero['mark']]
| [] |
kmzbrnoI/ac-python | utils/dancer.py | 383802734e17d2a00c0b86083cf923517db02acd | """Library for executing user-defined dance."""
import logging
from typing import Any, Dict, Optional, Callable
import datetime
import ac
import ac.blocks
from ac import ACs, AC
JC = Dict[str, Any]
class DanceStartException(Exception):
pass
class Step:
"""Base class for all specific dance steps."""
def update(self, acn: AC) -> None:
pass
def on_start(self, acn: AC) -> None:
pass
def disp_str(self) -> str:
return ''
class JCNotFoundException(DanceStartException):
pass
class StepJC(Step):
"""
Process jc 'name'. If processed already, skip processing and continue.
"""
name_to_id: Dict[str, int] = {}
def __init__(self, name: str, type_: str = 'VC') -> None:
self.jc: Optional[JC] = None
self.type = type_
self.name = name
def update(self, acn: AC) -> None:
assert isinstance(acn, DanceAC)
if self.jc is None:
jcid = self.get_jc_id(self.name, acn)
self.jc = acn.pt_get(f'/jc/{jcid}?state=true')['jc']
if self.jc['state']['active']:
self.jc = None
acn.step_done()
return
result = acn.pt_put(f'/jc/{self.jc["id"]}/state', {})
if result['success']:
self.jc = None
acn.step_done()
def on_start(self, acn: AC) -> None:
self.get_jc_id(self.name, acn)
def get_jc_id(self, name: str, acn: AC) -> int:
if not StepJC.name_to_id:
jcs = acn.pt_get('/jc')['jc']
StepJC.name_to_id = {
jc['name']: jc['id']
for jc in jcs if jc['type'] == self.type
}
if name not in StepJC.name_to_id.keys():
raise JCNotFoundException(f'Jízdní cesta {self.name} neexistuje!')
return StepJC.name_to_id[name]
def disp_str(self) -> str:
return f'Stavění JC {self.name}'
class StepDelay(Step):
"""Delay any time."""
def __init__(self, delay: datetime.timedelta) -> None:
self.delay = delay
self.finish: Optional[datetime.datetime] = None
def update(self, acn: AC) -> None:
assert isinstance(acn, DanceAC)
if self.finish is None:
self.finish = datetime.datetime.now() + self.delay
if datetime.datetime.now() > self.finish:
self.finish = None
acn.step_done()
def disp_str(self) -> str:
return f'Čekání {self.delay}'
class BlockNotFoundException(DanceStartException):
pass
class StepWaitForBlock(Step):
"""Wait for specific state of any block. See examples below."""
name_to_id: Dict[str, int] = {}
def __init__(self, name: str, checker: Callable[[ac.Block], bool]) -> None:
self.name = name
self.checker = checker
self.block: Optional[ac.Block] = None
def update(self, acn: AC) -> None:
assert isinstance(acn, DanceAC)
if self.block is None:
blockid = self.get_block_id(self.name, acn)
self.block = acn.pt_get(f'/blocks/{blockid}?state=true')['block']
if self.checker(self.block):
self.block = None
acn.step_done()
else:
ac.blocks.register([self.block['id']])
def on_start(self, acn: AC) -> None:
self.get_block_id(self.name, acn)
def on_block_change(self, acn: AC, block: ac.Block) -> None:
assert isinstance(acn, DanceAC)
if self.block is None or block['id'] != self.block['id']:
return
if self.checker(block):
ac.blocks.unregister([self.block['id']])
self.block = None
acn.step_done()
def get_block_id(self, name: str, acn: AC) -> int:
if not StepWaitForBlock.name_to_id:
blocks = acn.pt_get('/blocks')['blocks']
StepWaitForBlock.name_to_id = {
block['name']: block['id'] for block in blocks
}
if name not in StepWaitForBlock.name_to_id.keys():
raise BlockNotFoundException(f"Blok {self.name} neexistuje!")
return StepWaitForBlock.name_to_id[name]
def disp_str(self) -> str:
return f'Čekání na stav bloku {self.name}'
def track_is_occupied(block: ac.Block) -> bool:
return bool(block['blockState']['state'] == 'occupied')
class DanceAC(AC):
"""This AC executes predefined steps."""
def __init__(self, id_: str, password: str,
steps: Dict[int, Step]) -> None:
AC.__init__(self, id_, password)
self.steps = steps
self.stepi = 0
def on_start(self) -> None:
logging.info('Start')
for stepi, step in self.steps.items():
try:
step.on_start(self)
except DanceStartException as e:
self.disp_error(f'Krok {stepi}: '+str(e))
self.done()
return
self.stepi = 1
self.send_step()
self.on_update()
def on_stop(self) -> None:
self.statestr = ''
self.statestr_send()
def on_update(self) -> None:
AC.on_update(self)
if not self.running():
return
if self.stepi in self.steps:
self.steps[self.stepi].update(self)
else:
logging.info('Done')
self.done()
def step_done(self) -> None:
logging.info(f'Step {self.stepi} done, '
f'going to step {self.stepi+1}...')
self.stepi += 1
self.send_step()
self.on_update()
def send_step(self) -> None:
if self.stepi in self.steps.keys():
if self.running():
description = self.steps[self.stepi].disp_str()
self.statestr = f'Aktuální krok: {self.stepi}: {description}'
self.statestr_send()
def on_block_change(self, block: ac.Block) -> None:
if (self.running() and
isinstance(self.steps[self.stepi], StepWaitForBlock)):
self.steps[self.stepi].on_block_change(self, block) # type: ignore
@ac.blocks.on_block_change()
def _on_block_change(block: ac.Block) -> None:
for acn in ACs.values():
if isinstance(acn, DanceAC):
acn.on_block_change(block)
| [((212, 1, 212, 28), 'ac.blocks.on_block_change', 'ac.blocks.on_block_change', ({}, {}), '()', False, 'import ac\n'), ((214, 15, 214, 27), 'ac.ACs.values', 'ACs.values', ({}, {}), '()', False, 'from ac import ACs, AC\n'), ((158, 8, 158, 40), 'ac.AC.__init__', 'AC.__init__', ({(158, 20, 158, 24): 'self', (158, 26, 158, 29): 'id_', (158, 31, 158, 39): 'password'}, {}), '(self, id_, password)', False, 'from ac import ACs, AC\n'), ((163, 8, 163, 29), 'logging.info', 'logging.info', ({(163, 21, 163, 28): '"""Start"""'}, {}), "('Start')", False, 'import logging\n'), ((182, 8, 182, 26), 'ac.AC.on_update', 'AC.on_update', ({(182, 21, 182, 25): 'self'}, {}), '(self)', False, 'from ac import ACs, AC\n'), ((193, 8, 194, 56), 'logging.info', 'logging.info', ({(193, 21, 194, 55): 'f"""Step {self.stepi} done, going to step {self.stepi + 1}..."""'}, {}), "(f'Step {self.stepi} done, going to step {self.stepi + 1}...')", False, 'import logging\n'), ((91, 11, 91, 34), 'datetime.datetime.now', 'datetime.datetime.now', ({}, {}), '()', False, 'import datetime\n'), ((131, 12, 131, 52), 'ac.blocks.unregister', 'ac.blocks.unregister', ({(131, 33, 131, 51): "[self.block['id']]"}, {}), "([self.block['id']])", False, 'import ac\n'), ((189, 12, 189, 32), 'logging.info', 'logging.info', ({(189, 25, 189, 31): '"""Done"""'}, {}), "('Done')", False, 'import logging\n'), ((90, 26, 90, 49), 'datetime.datetime.now', 'datetime.datetime.now', ({}, {}), '()', False, 'import datetime\n'), ((121, 16, 121, 54), 'ac.blocks.register', 'ac.blocks.register', ({(121, 35, 121, 53): "[self.block['id']]"}, {}), "([self.block['id']])", False, 'import ac\n')] |
zachwylde00/praw | praw/models/reddit/mixins/reportable.py | ad1d73e6a4a33397bbd983bdfde1a4f99ce5607d | """Provide the ReportableMixin class."""
from ....const import API_PATH
class ReportableMixin:
"""Interface for RedditBase classes that can be reported."""
def report(self, reason):
"""Report this object to the moderators of its subreddit.
:param reason: The reason for reporting.
Raises :class:`.APIException` if ``reason`` is longer than 100
characters.
Example usage:
.. code-block:: python
submission = reddit.submission(id='5or86n')
submission.report('report reason')
comment = reddit.comment(id='dxolpyc')
comment.report('report reason')
"""
self._reddit.post(
API_PATH["report"], data={"id": self.fullname, "reason": reason}
)
| [] |
TrustworthyDL/LeBA | defense/jpeg_compress.py | 3289c1330585f438dc5b931951cbb682c5513053 | def _jpeg_compression(im):
assert torch.is_tensor(im)
im = ToPILImage()(im)
savepath = BytesIO()
im.save(savepath, 'JPEG', quality=75)
im = Image.open(savepath)
im = ToTensor()(im)
return im | [] |
LaudateCorpus1/mellon | mellon/factories/filesystem/file.py | a7a9f6d8abf1dd03b63a94ddb4439c6cc6c2e272 | import collections
import os.path
from zope import component
from zope import interface
from zope.component.factory import Factory
from sparc.configuration import container
import mellon
@interface.implementer(mellon.IByteMellonFile)
class MellonByteFileFromFilePathAndConfig(object):
def __init__(self, file_path, config):
self.file_path = file_path
self.config = config
def __str__(self):
return "byte file at location {}".format(self.file_path)
def __iter__(self):
with open(self.file_path, 'rb') as stream:
file_ = component.createObject(u'mellon.byte_file_from_stream', stream, self.config)
for snippet in file_:
yield snippet
mellonByteFileFromFilePathAndConfigFactory = Factory(MellonByteFileFromFilePathAndConfig)
@interface.implementer(mellon.IUnicodeMellonFile)
class MellonUnicodeFileFromFilePathAndConfig(object):
def __init__(self, file_path, config):
self.file_path = file_path
self.config = config
def __str__(self):
return "Unicode file at location {}".format(self.file_path)
def __iter__(self):
_end = 0
_buffer = collections.deque()
_eof_buffer = collections.deque()
with open(str(self.file_path), 'rU') as stream:
file_ = component.createObject(u'mellon.unicode_file_from_stream', stream, self.config)
for snippet in file_:
yield snippet
mellonUnicodeFileFromFilePathAndConfigFactory = Factory(MellonUnicodeFileFromFilePathAndConfig)
@interface.implementer(mellon.IMellonFileProvider)
class MellonFileProviderForRecursiveDirectoryConfig(object):
def __init__(self, config):
"""Init
Args:
config: sparc.configuration.container.ISparcAppPyContainerConfiguration
provider with
mellon.factories.filesystem[configure.yaml:FileSystemDir]
and mellon[configure.yaml:MellonSnippet] entries.
"""
self.config = config
def __iter__(self):
base_path = container.IPyContainerConfigValue(self.config).\
get('FileSystemDir')['directory']
for d, dirs, files in os.walk(base_path):
for f in files:
path = os.path.join(d, f)
if not os.path.isfile(path):
continue
#get interface-assigned string (IPath)
path = component.createObject(u'mellon.filesystem_path', path)
if mellon.IBinaryChecker(path).check():
yield component.createObject(\
u'mellon.factories.filesystem.byte_file', path, self.config)
else:
yield component.createObject(\
u'mellon.factories.filesystem.unicode_file', path, self.config)
mellonFileProviderForRecursiveDirectoryConfigFactory = Factory(MellonFileProviderForRecursiveDirectoryConfig)
interface.alsoProvides(mellonFileProviderForRecursiveDirectoryConfigFactory, mellon.IMellonFileProviderFactory)
| [((9, 1, 9, 46), 'zope.interface.implementer', 'interface.implementer', ({(9, 23, 9, 45): 'mellon.IByteMellonFile'}, {}), '(mellon.IByteMellonFile)', False, 'from zope import interface\n'), ((24, 45, 24, 89), 'zope.component.factory.Factory', 'Factory', ({(24, 53, 24, 88): 'MellonByteFileFromFilePathAndConfig'}, {}), '(MellonByteFileFromFilePathAndConfig)', False, 'from zope.component.factory import Factory\n'), ((26, 1, 26, 49), 'zope.interface.implementer', 'interface.implementer', ({(26, 23, 26, 48): 'mellon.IUnicodeMellonFile'}, {}), '(mellon.IUnicodeMellonFile)', False, 'from zope import interface\n'), ((44, 48, 44, 95), 'zope.component.factory.Factory', 'Factory', ({(44, 56, 44, 94): 'MellonUnicodeFileFromFilePathAndConfig'}, {}), '(MellonUnicodeFileFromFilePathAndConfig)', False, 'from zope.component.factory import Factory\n'), ((46, 1, 46, 50), 'zope.interface.implementer', 'interface.implementer', ({(46, 23, 46, 49): 'mellon.IMellonFileProvider'}, {}), '(mellon.IMellonFileProvider)', False, 'from zope import interface\n'), ((77, 55, 77, 109), 'zope.component.factory.Factory', 'Factory', ({(77, 63, 77, 108): 'MellonFileProviderForRecursiveDirectoryConfig'}, {}), '(MellonFileProviderForRecursiveDirectoryConfig)', False, 'from zope.component.factory import Factory\n'), ((78, 0, 78, 111), 'zope.interface.alsoProvides', 'interface.alsoProvides', ({(78, 23, 78, 75): 'mellonFileProviderForRecursiveDirectoryConfigFactory', (78, 77, 78, 110): 'mellon.IMellonFileProviderFactory'}, {}), '(mellonFileProviderForRecursiveDirectoryConfigFactory,\n mellon.IMellonFileProviderFactory)', False, 'from zope import interface\n'), ((38, 18, 38, 37), 'collections.deque', 'collections.deque', ({}, {}), '()', False, 'import collections\n'), ((39, 22, 39, 41), 'collections.deque', 'collections.deque', ({}, {}), '()', False, 'import collections\n'), ((21, 20, 21, 96), 'zope.component.createObject', 'component.createObject', ({(21, 43, 21, 74): 'u"""mellon.byte_file_from_stream"""', (21, 76, 21, 82): 'stream', (21, 84, 21, 95): 'self.config'}, {}), "(u'mellon.byte_file_from_stream', stream, self.config)", False, 'from zope import component\n'), ((41, 20, 41, 99), 'zope.component.createObject', 'component.createObject', ({(41, 43, 41, 77): 'u"""mellon.unicode_file_from_stream"""', (41, 79, 41, 85): 'stream', (41, 87, 41, 98): 'self.config'}, {}), "(u'mellon.unicode_file_from_stream', stream, self.config)", False, 'from zope import component\n'), ((69, 23, 69, 78), 'zope.component.createObject', 'component.createObject', ({(69, 46, 69, 71): 'u"""mellon.filesystem_path"""', (69, 73, 69, 77): 'path'}, {}), "(u'mellon.filesystem_path', path)", False, 'from zope import component\n'), ((61, 20, 61, 66), 'sparc.configuration.container.IPyContainerConfigValue', 'container.IPyContainerConfigValue', ({(61, 54, 61, 65): 'self.config'}, {}), '(self.config)', False, 'from sparc.configuration import container\n'), ((70, 19, 70, 46), 'mellon.IBinaryChecker', 'mellon.IBinaryChecker', ({(70, 41, 70, 45): 'path'}, {}), '(path)', False, 'import mellon\n'), ((71, 26, 72, 84), 'zope.component.createObject', 'component.createObject', ({(72, 24, 72, 64): 'u"""mellon.factories.filesystem.byte_file"""', (72, 66, 72, 70): 'path', (72, 72, 72, 83): 'self.config'}, {}), "(u'mellon.factories.filesystem.byte_file', path, self\n .config)", False, 'from zope import component\n'), ((74, 26, 75, 87), 'zope.component.createObject', 'component.createObject', ({(75, 24, 75, 67): 'u"""mellon.factories.filesystem.unicode_file"""', (75, 69, 75, 73): 'path', (75, 75, 75, 86): 'self.config'}, {}), "(u'mellon.factories.filesystem.unicode_file', path,\n self.config)", False, 'from zope import component\n')] |
CogSciUOS/DeepLearningToolbox | dltb/thirdparty/datasource/__init__.py | bf07578b9486d8c48e25df357bc4b9963b513b46 | """Predefined Datasources.
"""
# toolbox imports
from ...datasource import Datasource
Datasource.register_instance('imagenet-val', __name__ + '.imagenet',
'ImageNet', section='val') # section='train'
Datasource.register_instance('dogsandcats', __name__ + '.dogsandcats',
'DogsAndCats')
Datasource.register_instance('widerface', __name__ + '.widerface', 'WiderFace')
Datasource.register_instance('fgnet', __name__ + '.fgnet', 'FGNet')
Datasource.register_instance('Helen', __name__ + '.helen', 'Helen')
Datasource.register_instance('lfw', __name__ + '.lfw', 'LabeledFacesInTheWild')
Datasource.register_instance('ms-celeb-1m', __name__ + '.face', 'MSCeleb1M')
Datasource.register_instance('5celeb', __name__ + '.fivecelebface',
'FiveCelebFace')
Datasource.register_instance('ffhq', __name__ + '.ffhq', 'FFHQ')
Datasource.register_instance('celeba', __name__ + '.celeba', 'CelebA')
Datasource.register_instance('celeba-aligned', __name__ + '.celeba',
'CelebA', aligned=True)
Datasource.register_class('WiderFace', __name__ + '.widerface')
| [] |
babinyurii/RECAN | tests/test_results.py | b49326b47bae22316c3776fee2f398e09a98ba96 | # -*- coding: utf-8 -*-
"""
Created on Tue Oct 22 15:58:44 2019
@author: babin
"""
posits_def = [251, 501, 751, 1001, 1251, 1501, 1751, 2001, 2251, 2501, 2751, 3001, 3215]
dist_whole_align_ref = {'AB048704.1_genotype_C_':
[0.88,
0.938,
0.914,
0.886,
0.89,
0.908,
0.938,
0.948,
0.948,
0.886,
0.852,
0.8580645161290322,
0.827906976744186],
'AB010291.1_Bj':
[0.968,
0.986,
0.946,
0.92,
0.94,
0.964,
0.95,
0.892,
0.914,
0.9359999999999999,
0.924,
0.935483870967742,
0.9255813953488372]}
dist_win_250_shift_100_ref = {'AB048704.1_genotype_C_':
[0.87,
0.9,
0.9359999999999999,
0.924,
0.944,
0.944,
0.948,
0.888,
0.868,
0.86,
0.888,
0.9,
0.908,
0.88,
0.916,
0.924,
0.94,
0.96,
0.948,
0.9319999999999999,
0.944,
0.9359999999999999,
0.96,
0.9319999999999999,
0.864,
0.8200000000000001,
0.88,
0.892,
0.88,
0.844,
0.827906976744186,
0.8608695652173913,
0.9333333333333333],
'AB010291.1_Bj': [0.95,
0.984,
0.988,
0.984,
0.98,
0.98,
0.98,
0.92,
0.896,
0.888,
0.928,
0.94,
0.96,
0.948,
0.976,
0.976,
0.968,
0.952,
0.896,
0.844,
0.86,
0.908,
0.976,
0.948,
0.916,
0.904,
0.9359999999999999,
0.948,
0.94,
0.9359999999999999,
0.9255813953488372,
0.9217391304347826,
0.8666666666666667]}
dist_whole_align_def_params_k2p = {'AB048704.1_genotype_C_':
[0.8681719101219889,
0.9351731626008992,
0.9083728156043438,
0.8750271283550077,
0.879929128403318,
0.9015597329057567,
0.9351297624958606,
0.9459250442159328,
0.9459717143364927,
0.8760802380420646,
0.8343273948904422,
0.841497348083017,
0.8033200314745574],
'AB010291.1_Bj':
[0.9671530980992109,
0.9858456107911616,
0.9438329817983037,
0.9150569322625627,
0.9372918193486423,
0.9630251291666885,
0.9481456308045444,
0.8823622232289046,
0.9077377632214376,
0.9325670957791264,
0.919398127767968,
0.9323907045444492,
0.9211964811945209]}
| [] |
SimonSuster/lxmls-toolkit | lxmls/readers/simple_data_set.py | 6a57884f8b7c98da816a60eb88593e0a1585d434 | import numpy as np
# This class generates a 2D dataset with two classes, "positive" and "negative".
# Each class follows a Gaussian distribution.
class SimpleDataSet():
''' A simple two dimentional dataset for visualization purposes. The date set contains points from two gaussians with mean u_i and std_i'''
def __init__(self,nr_examples=100,g1 = [[-5,-5],1], g2 = [[5,5],1],balance=0.5,split=[0.8,0,0.2]):
nr_positive = nr_examples*balance # number of examples of "positive" class
nr_negative = nr_examples - nr_positive # number of examples of "negative" class
self.mean1 = g1[0] # mean of positive class
self.mean2 = g2[0] # mean of negative class
self.variance1 = g1[1] #
self.variance2 = g2[1]
self.balance = balance
self.nr_points = nr_examples
X_pos_1 = np.random.normal(g1[0][0],g1[1],[nr_positive,1])
X_pos_2 = np.random.normal(g1[0][1],g1[1],[nr_positive,1])
X_pos = np.hstack([X_pos_1,X_pos_2])
X_neg_1 = np.random.normal(g2[0][0],g2[1],[nr_negative,1])
X_neg_2 = np.random.normal(g2[0][1],g2[1],[nr_negative,1])
X_neg = np.hstack([X_neg_1,X_neg_2])
y_pos = np.zeros([nr_positive,1],dtype=np.int)
y_neg = np.ones([nr_negative,1],dtype=np.int)
X = np.vstack([X_pos, X_neg])
y = np.vstack([y_pos, y_neg])
perm = np.random.permutation(nr_examples)
self.split = split
self.X = X[perm,:]
self.y = y[perm]
train_y,dev_y,test_y,train_X,dev_X,test_X = split_train_dev_test(self.X,self.y,split[0],split[1],split[2])
self.train_X = train_X
self.train_y = train_y
self.dev_X = dev_X
self.dev_y = dev_y
self.test_X = test_X
self.test_y = test_y
def get_name(self):
return "Simple Data Set -- Mean1= (%.2f,%.2f) Var1 = %.2f Mean2= (%.2f,%.2f) Var2= %.2f \nNr. Points=%.2f, Balance=%.2f Train-Dev-Test (%.2f,.%.2f,%.2f)"%(self.mean1[0] ,self.mean1[1], self.variance1, self.mean2[0], self.mean2[1], self.variance2, self.nr_points, self.balance, self.split[0],self.split[1],self.split[2])
def get_bayes_optimal(self):
params = np.zeros((3,2))
p1 = self.balance
p2 = 1.0 - self.balance
params[0,0] = -1.0/(2.0*self.variance1) * np.dot(self.mean1,self.mean1) + np.log(p1)
params[0,1] = -1.0/(2.0*self.variance2) * np.dot(self.mean2,self.mean2) + np.log(p2)
params[1,0] = 1.0/self.variance1 * self.mean1[0]
params[2,0] = 1.0/self.variance1 * self.mean1[1]
params[1,1] = 1.0/self.variance2 * self.mean2[0]
params[2,1] = 1.0/self.variance2 * self.mean2[1]
print params
return params
def plot_data(self,params=np.array([]),name="Naive Bayes", print_bayes_opt = True):
import matplotlib.pyplot as plt
fig = plt.figure()
fig.suptitle(self.get_name())
axis = fig.add_subplot(1,1,1)
idx,_ = np.nonzero(self.train_y == 0)
idx2,_ = np.nonzero(self.train_y == 1)
idx3,_ = np.nonzero(self.test_y == 0)
idx4,_ = np.nonzero(self.test_y == 1)
axis.scatter(self.train_X[idx,0],self.train_X[idx,1],s=30,c="red",marker='s')
axis.scatter(self.train_X[idx2,0],self.train_X[idx2,1],s=30,c="blue",marker='s')
if(idx3.shape[0] > 0):
axis.scatter(self.test_X[idx3,0],self.test_X[idx3,1],s=30,c="red",marker='o')
if(idx4.shape[0] > 0):
axis.scatter(self.test_X[idx4,0],self.test_X[idx4,1],s=30,c="blue",marker='o')
## Plot Bayes optimal
if(print_bayes_opt):
bayes_opt_params = self.get_bayes_optimal()
self.add_line(fig,axis,bayes_opt_params, "Bayes Optimal","black")
axis.legend()
# fig.show()
return fig,axis
def add_line(self,fig,axis,params,name,colour):
x_max = np.max(self.train_X)
x_min = np.min(self.train_X)
x = np.arange(x_min,x_max,0.1,dtype = "float")
y_star = ((params[1,1]-params[1,0])*x + (params[0,1] - params[0,0]))/(params[2,0] -params[2,1])
axis.plot(x,y_star,'g--',c=colour, label=name, linewidth=2)
axis.legend()
# fig.show()
return fig,axis
def split_train_dev_test(X,y,train_per,dev_per,test_per):
if(train_per + dev_per + test_per > 1):
print "Train Dev Test split should sum to one"
return
dim = y.shape[0]
split1 = int(dim*train_per)
if(dev_per ==0):
train_y,test_y = np.vsplit(y,[split1])
dev_y = np.array([])
train_X = X[0:split1,:]
dev_X = np.array([])
test_X = X[split1:,:]
else:
split2 = int(dim*(train_per+dev_per))
print split2
train_y,dev_y,test_y = np.vsplit(y,(split1,split2))
train_X = X[0:split1,:]
dev_X = X[split1:split2,:]
test_X = X[split2:,:]
return train_y,dev_y,test_y,train_X,dev_X,test_X
| [] |
kangtastic/cryptopals | set1/c06_attack_repeating_key_xor.py | 7014a08b836b3f9ebfdc889123ccf67406738dac | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# Break repeating-key XOR
#
# It is officially on, now.
#
# This challenge isn't conceptually hard, but it involves actual
# error-prone coding. The other challenges in this set are there to bring
# you up to speed. This one is there to qualify you. If you can do this
# one, you're probably just fine up to Set 6.
#
# There's a file here:
#
# http://cryptopals.com/static/challenge-data/6.txt
#
# It's been base64'd after being encrypted with repeating-key XOR.
#
# Decrypt it.
#
# Here's how:
#
# 1. Let KEYSIZE be the guessed length of the key; try values from 2 to
# (say) 40.
# 2. Write a function to compute the edit distance/Hamming distance between
# two strings. The Hamming distance is just the number of differing
# bits. The distance between:
#
# this is a test
#
# and
#
# wokka wokka!!!
#
# is 37. *Make sure your code agrees before you proceed.*
# 3. For each KEYSIZE, take the first KEYSIZE worth of bytes, and the
# second KEYSIZE worth of bytes, and find the edit distance between them.
# Normalize this result by dividing by KEYSIZE.
# 4. The KEYSIZE with the smallest normalized edit distance is probably the
# key. You could proceed perhaps with the smallest 2-3 KEYSIZE values.
# Or take 4 KEYSIZE blocks instead of 2 and average the distances.
# 5. Now that you probably know the KEYSIZE: break the ciphertext into
# blocks of KEYSIZE length.
# 6. Now transpose the blocks: make a block that is the first byte of every
# block, and a block that is the second byte of every block, and so on.
# 7. Solve each block as if it was single-character XOR. You already have
# code to do this.
# 8. For each block, the single-byte XOR key that produces the best looking
# histogram is the repeating-key XOR key byte for that block. Put them
# together and you have the key.
#
# This code is going to turn out to be surprisingly useful later on. Breaking
# repeating-key XOR ("Vigenère") statistically is obviously an academic
# exercise, a "Crypto 101" thing. But more people "know how" to break it than
# can actually break it, and a similar technique breaks something much more
# important.
#
# No, that's not a mistake.
#
# We get more tech support questions for this challenge than any of the
# other ones. We promise, there aren't any blatant errors in this text.
# In particular: the "wokka wokka!!!" edit distance really is 37.
#
import inspect
import os
import sys
from itertools import zip_longest
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(inspect.getfile(lambda: 0)))))
from util.loader import loader
from util.text import englishness, repeating_key_xor, single_byte_xor
# Lookup table for the number of 1 bits in a nibble. (Nybble, quartet, etc.)
NIBBLE_BITS = [0, 1, 1, 2, 1, 2, 2, 3, 1, 2, 2, 3, 2, 3, 3, 4]
def likely_key_sizes(bs, lower=2, upper=40, n=3):
"""Finds a repeating-key-XOR'd ciphertext's most likely key sizes."""
sizes = {}
for size in range(lower, upper + 1):
normalized_distance = 0
for i in range(0, len(bs) - size * 2, size * 2):
bs1, bs2 = bs[i : i + size], bs[i + size : i + size * 2]
normalized_distance += hamming_distance(bs1, bs2) / 2
sizes.update({size: normalized_distance})
return sorted(sizes, key=lambda k: sizes[k])[:n]
def hamming_distance(bs1, bs2):
"""Finds the Hamming distance between two bytestrings."""
distance = 0
for b1, b2 in zip_longest(bs1, bs2, fillvalue=0):
b = b1 ^ b2
distance += NIBBLE_BITS[b >> 4] + NIBBLE_BITS[b & 0xF]
return distance
def main():
ctext = loader("6.txt", "base64", split=False)
ptext, key, high_score = b"", b"", 0
for size in likely_key_sizes(ctext):
blocks = [ctext[i : i + size] for i in range(0, len(ctext), size)]
transposed = zip_longest(*blocks, fillvalue=0)
likely_key = b"".join(
single_byte_xor(tblock, key=True) for tblock in transposed
)
candidate = repeating_key_xor(ctext, likely_key)
score = englishness(candidate)
if score > high_score:
ptext, key, high_score = candidate, likely_key, score
print(f"Key: '{key.decode()}'")
print()
print(ptext.decode())
if __name__ == "__main__":
try:
main()
except KeyboardInterrupt:
pass
# Output:
#
# Key: 'Terminator X: Bring the noise' (29 bytes)
#
# I'm back and I'm ringin' the bell
# A rockin' on the mike while the fly girls yell
# In ecstasy in the back of me
# Well that's my DJ Deshay cuttin' all them Z's
# Hittin' hard and the girlies goin' crazy
# Vanilla's on the mike, man I'm not lazy.
#
# <remainder of output omitted>
#
| [((99, 18, 99, 52), 'itertools.zip_longest', 'zip_longest', (), '', False, 'from itertools import zip_longest\n'), ((107, 12, 107, 50), 'util.loader.loader', 'loader', (), '', False, 'from util.loader import loader\n'), ((113, 21, 113, 54), 'itertools.zip_longest', 'zip_longest', (), '', False, 'from itertools import zip_longest\n'), ((119, 20, 119, 56), 'util.text.repeating_key_xor', 'repeating_key_xor', ({(119, 38, 119, 43): 'ctext', (119, 45, 119, 55): 'likely_key'}, {}), '(ctext, likely_key)', False, 'from util.text import englishness, repeating_key_xor, single_byte_xor\n'), ((120, 16, 120, 38), 'util.text.englishness', 'englishness', ({(120, 28, 120, 37): 'candidate'}, {}), '(candidate)', False, 'from util.text import englishness, repeating_key_xor, single_byte_xor\n'), ((69, 64, 69, 90), 'inspect.getfile', 'inspect.getfile', ({(69, 80, 69, 89): '(lambda : 0)'}, {}), '(lambda : 0)', False, 'import inspect\n'), ((116, 12, 116, 45), 'util.text.single_byte_xor', 'single_byte_xor', (), '', False, 'from util.text import englishness, repeating_key_xor, single_byte_xor\n')] |
kopf-yhs/ncscos | c2nl/models/transformer.py | 8248aaad32d4d19c01d070bf0dfba7aab849ba1d | import torch
import torch.nn as nn
import torch.nn.functional as f
from prettytable import PrettyTable
from c2nl.modules.char_embedding import CharEmbedding
from c2nl.modules.embeddings import Embeddings
from c2nl.modules.highway import Highway
from c2nl.encoders.transformer import TransformerEncoder
from c2nl.decoders.transformer import TransformerDecoder
from c2nl.inputters import constants
from c2nl.modules.global_attention import GlobalAttention
from c2nl.modules.copy_generator import CopyGenerator, CopyGeneratorCriterion
from c2nl.utils.misc import sequence_mask
class Embedder(nn.Module):
def __init__(self, args):
super(Embedder, self).__init__()
self.enc_input_size = 0
self.dec_input_size = 0
# at least one of word or char embedding options should be True
assert args.use_src_word or args.use_src_char
assert args.use_tgt_word or args.use_tgt_char
self.use_src_word = args.use_src_word
self.use_tgt_word = args.use_tgt_word
if self.use_src_word:
self.src_word_embeddings = Embeddings(args.emsize,
args.src_vocab_size,
constants.PAD)
self.enc_input_size += args.emsize
if self.use_tgt_word:
self.tgt_word_embeddings = Embeddings(args.emsize,
args.tgt_vocab_size,
constants.PAD)
self.dec_input_size += args.emsize
self.use_src_char = args.use_src_char
self.use_tgt_char = args.use_tgt_char
if self.use_src_char:
assert len(args.filter_size) == len(args.nfilters)
self.src_char_embeddings = CharEmbedding(args.n_characters,
args.char_emsize,
args.filter_size,
args.nfilters)
self.enc_input_size += sum(list(map(int, args.nfilters)))
self.src_highway_net = Highway(self.enc_input_size, num_layers=2)
if self.use_tgt_char:
assert len(args.filter_size) == len(args.nfilters)
self.tgt_char_embeddings = CharEmbedding(args.n_characters,
args.char_emsize,
args.filter_size,
args.nfilters)
self.dec_input_size += sum(list(map(int, args.nfilters)))
self.tgt_highway_net = Highway(self.dec_input_size, num_layers=2)
self.use_type = args.use_code_type
if self.use_type:
self.type_embeddings = nn.Embedding(len(constants.TOKEN_TYPE_MAP),
self.enc_input_size)
self.src_pos_emb = args.src_pos_emb
self.tgt_pos_emb = args.tgt_pos_emb
self.no_relative_pos = all(v == 0 for v in args.max_relative_pos)
if self.src_pos_emb and self.no_relative_pos:
self.src_pos_embeddings = nn.Embedding(args.max_src_len,
self.enc_input_size)
if self.tgt_pos_emb:
self.tgt_pos_embeddings = nn.Embedding(args.max_tgt_len + 2,
self.dec_input_size)
self.dropout = nn.Dropout(args.dropout_emb)
def forward(self,
sequence,
sequence_char,
sequence_type=None,
mode='encoder',
step=None):
if mode == 'encoder':
word_rep = None
if self.use_src_word:
word_rep = self.src_word_embeddings(sequence.unsqueeze(2)) # B x P x d
if self.use_src_char:
char_rep = self.src_char_embeddings(sequence_char) # B x P x f
if word_rep is None:
word_rep = char_rep
else:
word_rep = torch.cat((word_rep, char_rep), 2) # B x P x d+f
word_rep = self.src_highway_net(word_rep) # B x P x d+f
if self.use_type:
type_rep = self.type_embeddings(sequence_type)
word_rep = word_rep + type_rep
if self.src_pos_emb and self.no_relative_pos:
pos_enc = torch.arange(start=0,
end=word_rep.size(1)).type(torch.LongTensor)
pos_enc = pos_enc.expand(*word_rep.size()[:-1])
if word_rep.is_cuda:
pos_enc = pos_enc.cuda()
pos_rep = self.src_pos_embeddings(pos_enc)
word_rep = word_rep + pos_rep
elif mode == 'decoder':
word_rep = None
if self.use_tgt_word:
word_rep = self.tgt_word_embeddings(sequence.unsqueeze(2)) # B x P x d
if self.use_tgt_char:
char_rep = self.tgt_char_embeddings(sequence_char) # B x P x f
if word_rep is None:
word_rep = char_rep
else:
word_rep = torch.cat((word_rep, char_rep), 2) # B x P x d+f
word_rep = self.tgt_highway_net(word_rep) # B x P x d+f
if self.tgt_pos_emb:
if step is None:
pos_enc = torch.arange(start=0,
end=word_rep.size(1)).type(torch.LongTensor)
else:
pos_enc = torch.LongTensor([step]) # used in inference time
pos_enc = pos_enc.expand(*word_rep.size()[:-1])
if word_rep.is_cuda:
pos_enc = pos_enc.cuda()
pos_rep = self.tgt_pos_embeddings(pos_enc)
word_rep = word_rep + pos_rep
else:
raise ValueError('Unknown embedder mode!')
word_rep = self.dropout(word_rep)
return word_rep
class Encoder(nn.Module):
def __init__(self,
args,
input_size):
super(Encoder, self).__init__()
self.transformer = TransformerEncoder(num_layers=args.nlayers,
d_model=input_size,
heads=args.num_head,
d_k=args.d_k,
d_v=args.d_v,
d_ff=args.d_ff,
dropout=args.trans_drop,
max_relative_positions=args.max_relative_pos,
use_neg_dist=args.use_neg_dist)
self.use_all_enc_layers = args.use_all_enc_layers
if self.use_all_enc_layers:
self.layer_weights = nn.Linear(input_size, 1, bias=False)
def count_parameters(self):
return self.transformer.count_parameters()
def forward(self,
input,
input_len):
layer_outputs, _ = self.transformer(input, input_len) # B x seq_len x h
if self.use_all_enc_layers:
output = torch.stack(layer_outputs, dim=2) # B x seq_len x nlayers x h
layer_scores = self.layer_weights(output).squeeze(3)
layer_scores = f.softmax(layer_scores, dim=-1)
memory_bank = torch.matmul(output.transpose(2, 3),
layer_scores.unsqueeze(3)).squeeze(3)
else:
memory_bank = layer_outputs[-1]
return memory_bank, layer_outputs
class Decoder(nn.Module):
def __init__(self, args, input_size):
super(Decoder, self).__init__()
self.input_size = input_size
self.split_decoder = args.split_decoder and args.copy_attn
if self.split_decoder:
# Following (https://arxiv.org/pdf/1808.07913.pdf), we split decoder
self.transformer_c = TransformerDecoder(
num_layers=args.nlayers,
d_model=self.input_size,
heads=args.num_head,
d_k=args.d_k,
d_v=args.d_v,
d_ff=args.d_ff,
coverage_attn=args.coverage_attn,
dropout=args.trans_drop
)
self.transformer_d = TransformerDecoder(
num_layers=args.nlayers,
d_model=self.input_size,
heads=args.num_head,
d_k=args.d_k,
d_v=args.d_v,
d_ff=args.d_ff,
dropout=args.trans_drop
)
# To accomplish eq. 19 - 21 from `https://arxiv.org/pdf/1808.07913.pdf`
self.fusion_sigmoid = nn.Sequential(
nn.Linear(self.input_size * 2, self.input_size),
nn.Sigmoid()
)
self.fusion_gate = nn.Sequential(
nn.Linear(self.input_size * 2, self.input_size),
nn.ReLU()
)
else:
self.transformer = TransformerDecoder(
num_layers=args.nlayers,
d_model=self.input_size,
heads=args.num_head,
d_k=args.d_k,
d_v=args.d_v,
d_ff=args.d_ff,
coverage_attn=args.coverage_attn,
dropout=args.trans_drop
)
if args.reload_decoder_state:
state_dict = torch.load(
args.reload_decoder_state, map_location=lambda storage, loc: storage
)
self.decoder.load_state_dict(state_dict)
def count_parameters(self):
if self.split_decoder:
return self.transformer_c.count_parameters() + self.transformer_d.count_parameters()
else:
return self.transformer.count_parameters()
def init_decoder(self,
src_lens,
max_src_len):
if self.split_decoder:
state_c = self.transformer_c.init_state(src_lens, max_src_len)
state_d = self.transformer_d.init_state(src_lens, max_src_len)
return state_c, state_d
else:
return self.transformer.init_state(src_lens, max_src_len)
def decode(self,
tgt_words,
tgt_emb,
memory_bank,
state,
step=None,
layer_wise_coverage=None):
if self.split_decoder:
copier_out, attns = self.transformer_c(tgt_words,
tgt_emb,
memory_bank,
state[0],
step=step,
layer_wise_coverage=layer_wise_coverage)
dec_out, _ = self.transformer_d(tgt_words,
tgt_emb,
memory_bank,
state[1],
step=step)
f_t = self.fusion_sigmoid(torch.cat([copier_out, dec_out], dim=-1))
gate_input = torch.cat([copier_out, torch.mul(f_t, dec_out)], dim=-1)
decoder_outputs = self.fusion_gate(gate_input)
else:
decoder_outputs, attns = self.transformer(tgt_words,
tgt_emb,
memory_bank,
state,
step=step,
layer_wise_coverage=layer_wise_coverage)
return decoder_outputs, attns
def forward(self,
memory_bank,
memory_len,
tgt_pad_mask,
tgt_emb):
max_mem_len = memory_bank[0].shape[1] \
if isinstance(memory_bank, list) else memory_bank.shape[1]
state = self.init_decoder(memory_len, max_mem_len)
return self.decode(tgt_pad_mask, tgt_emb, memory_bank, state)
class Transformer(nn.Module):
"""Module that writes an answer for the question given a passage."""
def __init__(self, args, tgt_dict):
""""Constructor of the class."""
super(Transformer, self).__init__()
self.name = 'Transformer'
if len(args.max_relative_pos) != args.nlayers:
assert len(args.max_relative_pos) == 1
args.max_relative_pos = args.max_relative_pos * args.nlayers
self.embedder = Embedder(args)
self.encoder = Encoder(args, self.embedder.enc_input_size)
self.decoder = Decoder(args, self.embedder.dec_input_size)
self.layer_wise_attn = args.layer_wise_attn
self.generator = nn.Linear(self.decoder.input_size, args.tgt_vocab_size)
if args.share_decoder_embeddings:
if self.embedder.use_tgt_word:
assert args.emsize == self.decoder.input_size
self.generator.weight = self.embedder.tgt_word_embeddings.word_lut.weight
self._copy = args.copy_attn
if self._copy:
self.copy_attn = GlobalAttention(dim=self.decoder.input_size,
attn_type=args.attn_type)
self.copy_generator = CopyGenerator(self.decoder.input_size,
tgt_dict,
self.generator)
self.criterion = CopyGeneratorCriterion(vocab_size=len(tgt_dict),
force_copy=args.force_copy)
else:
self.criterion = nn.CrossEntropyLoss(reduction='none')
def _run_forward_ml(self,
code_word_rep,
code_char_rep,
code_type_rep,
code_len,
summ_word_rep,
summ_char_rep,
summ_len,
tgt_seq,
src_map,
alignment,
**kwargs):
batch_size = code_len.size(0)
# embed and encode the source sequence
code_rep = self.embedder(code_word_rep,
code_char_rep,
code_type_rep,
mode='encoder')
memory_bank, layer_wise_outputs = self.encoder(code_rep, code_len) # B x seq_len x h
# embed and encode the target sequence
summ_emb = self.embedder(summ_word_rep,
summ_char_rep,
mode='decoder')
summ_pad_mask = ~sequence_mask(summ_len, max_len=summ_emb.size(1))
enc_outputs = layer_wise_outputs if self.layer_wise_attn else memory_bank
layer_wise_dec_out, attns = self.decoder(enc_outputs,
code_len,
summ_pad_mask,
summ_emb)
decoder_outputs = layer_wise_dec_out[-1]
loss = dict()
target = tgt_seq[:, 1:].contiguous()
if self._copy:
# copy_score: batch_size, tgt_len, src_len
_, copy_score, _ = self.copy_attn(decoder_outputs,
memory_bank,
memory_lengths=code_len,
softmax_weights=False)
# mask copy_attn weights here if needed
if kwargs['code_mask_rep'] is not None:
mask = kwargs['code_mask_rep'].byte().unsqueeze(1) # Make it broadcastable.
copy_score.data.masked_fill_(mask, -float('inf'))
attn_copy = f.softmax(copy_score, dim=-1)
scores = self.copy_generator(decoder_outputs, attn_copy, src_map)
scores = scores[:, :-1, :].contiguous()
ml_loss = self.criterion(scores,
alignment[:, 1:].contiguous(),
target)
else:
scores = self.generator(decoder_outputs) # `batch x tgt_len x vocab_size`
scores = scores[:, :-1, :].contiguous() # `batch x tgt_len - 1 x vocab_size`
ml_loss = self.criterion(scores.view(-1, scores.size(2)),
target.view(-1))
ml_loss = ml_loss.view(*scores.size()[:-1])
ml_loss = ml_loss.mul(target.ne(constants.PAD).float())
ml_loss = ml_loss.sum(1) * kwargs['example_weights']
loss['ml_loss'] = ml_loss.mean()
loss['loss_per_token'] = ml_loss.div((summ_len - 1).float()).mean()
return loss
def forward(self,
code_word_rep,
code_char_rep,
code_type_rep,
code_len,
summ_word_rep,
summ_char_rep,
summ_len,
tgt_seq,
src_map,
alignment,
**kwargs):
"""
Input:
- code_word_rep: ``(batch_size, max_doc_len)``
- code_char_rep: ``(batch_size, max_doc_len, max_word_len)``
- code_len: ``(batch_size)``
- summ_word_rep: ``(batch_size, max_que_len)``
- summ_char_rep: ``(batch_size, max_que_len, max_word_len)``
- summ_len: ``(batch_size)``
- tgt_seq: ``(batch_size, max_len)``
Output:
- ``(batch_size, P_LEN)``, ``(batch_size, P_LEN)``
"""
if self.training:
return self._run_forward_ml(code_word_rep,
code_char_rep,
code_type_rep,
code_len,
summ_word_rep,
summ_char_rep,
summ_len,
tgt_seq,
src_map,
alignment,
**kwargs)
else:
return self.decode(code_word_rep,
code_char_rep,
code_type_rep,
code_len,
src_map,
alignment,
**kwargs)
def __tens2sent(self,
t,
tgt_dict,
src_vocabs):
words = []
for idx, w in enumerate(t):
widx = w[0].item()
if widx < len(tgt_dict):
words.append(tgt_dict[widx])
else:
widx = widx - len(tgt_dict)
words.append(src_vocabs[idx][widx])
return words
def __generate_sequence(self,
params,
choice='greedy',
tgt_words=None):
batch_size = params['memory_bank'].size(0)
use_cuda = params['memory_bank'].is_cuda
if tgt_words is None:
tgt_words = torch.LongTensor([constants.BOS])
if use_cuda:
tgt_words = tgt_words.cuda()
tgt_words = tgt_words.expand(batch_size).unsqueeze(1) # B x 1
tgt_chars = None
if self.embedder.use_tgt_char:
tgt_chars = params['tgt_dict'].word_to_char_ids(constants.BOS_WORD)
tgt_chars = torch.Tensor(tgt_chars.tolist()).unsqueeze(0)
tgt_chars = tgt_chars.repeat(batch_size, 1)
tgt_chars = tgt_chars.to(tgt_words).unsqueeze(1)
dec_preds = []
copy_info = []
attentions = []
dec_log_probs = []
acc_dec_outs = []
max_mem_len = params['memory_bank'][0].shape[1] \
if isinstance(params['memory_bank'], list) else params['memory_bank'].shape[1]
dec_states = self.decoder.init_decoder(params['src_len'], max_mem_len)
attns = {"coverage": None}
enc_outputs = params['layer_wise_outputs'] if self.layer_wise_attn \
else params['memory_bank']
# +1 for <EOS> token
for idx in range(params['max_len'] + 1):
tgt = self.embedder(tgt_words,
tgt_chars,
mode='decoder',
step=idx)
tgt_pad_mask = tgt_words.data.eq(constants.PAD)
layer_wise_dec_out, attns = self.decoder.decode(tgt_pad_mask,
tgt,
enc_outputs,
dec_states,
step=idx,
layer_wise_coverage=attns['coverage'])
decoder_outputs = layer_wise_dec_out[-1]
acc_dec_outs.append(decoder_outputs.squeeze(1))
if self._copy:
_, copy_score, _ = self.copy_attn(decoder_outputs,
params['memory_bank'],
memory_lengths=params['src_len'],
softmax_weights=False)
# mask copy_attn weights here if needed
if params['src_mask'] is not None:
mask = params['src_mask'].byte().unsqueeze(1) # Make it broadcastable.
copy_score.data.masked_fill_(mask, -float('inf'))
attn_copy = f.softmax(copy_score, dim=-1)
prediction = self.copy_generator(decoder_outputs,
attn_copy,
params['src_map'])
prediction = prediction.squeeze(1)
for b in range(prediction.size(0)):
if params['blank'][b]:
blank_b = torch.LongTensor(params['blank'][b])
fill_b = torch.LongTensor(params['fill'][b])
if use_cuda:
blank_b = blank_b.cuda()
fill_b = fill_b.cuda()
prediction[b].index_add_(0, fill_b,
prediction[b].index_select(0, blank_b))
prediction[b].index_fill_(0, blank_b, 1e-10)
else:
prediction = self.generator(decoder_outputs.squeeze(1))
prediction = f.softmax(prediction, dim=1)
if choice == 'greedy':
tgt_prob, tgt = torch.max(prediction, dim=1, keepdim=True)
log_prob = torch.log(tgt_prob + 1e-20)
elif choice == 'sample':
tgt, log_prob = self.reinforce.sample(prediction.unsqueeze(1))
else:
assert False
dec_log_probs.append(log_prob.squeeze(1))
dec_preds.append(tgt.squeeze(1).clone())
if "std" in attns:
# std_attn: batch_size x num_heads x 1 x src_len
std_attn = torch.stack(attns["std"], dim=1)
attentions.append(std_attn.squeeze(2))
if self._copy:
mask = tgt.gt(len(params['tgt_dict']) - 1)
copy_info.append(mask.float().squeeze(1))
words = self.__tens2sent(tgt, params['tgt_dict'], params['source_vocab'])
tgt_chars = None
if self.embedder.use_tgt_char:
tgt_chars = [params['tgt_dict'].word_to_char_ids(w).tolist() for w in words]
tgt_chars = torch.Tensor(tgt_chars).to(tgt).unsqueeze(1)
words = [params['tgt_dict'][w] for w in words]
words = torch.Tensor(words).type_as(tgt)
tgt_words = words.unsqueeze(1)
return dec_preds, attentions, copy_info, dec_log_probs
def decode(self,
code_word_rep,
code_char_rep,
code_type_rep,
code_len,
src_map,
alignment,
**kwargs):
word_rep = self.embedder(code_word_rep,
code_char_rep,
code_type_rep,
mode='encoder')
memory_bank, layer_wise_outputs = self.encoder(word_rep, code_len) # B x seq_len x h
params = dict()
params['memory_bank'] = memory_bank
params['layer_wise_outputs'] = layer_wise_outputs
params['src_len'] = code_len
params['source_vocab'] = kwargs['source_vocab']
params['src_map'] = src_map
params['src_mask'] = kwargs['code_mask_rep']
params['fill'] = kwargs['fill']
params['blank'] = kwargs['blank']
params['src_dict'] = kwargs['src_dict']
params['tgt_dict'] = kwargs['tgt_dict']
params['max_len'] = kwargs['max_len']
params['src_words'] = code_word_rep
dec_preds, attentions, copy_info, _ = self.__generate_sequence(params, choice='greedy')
dec_preds = torch.stack(dec_preds, dim=1)
copy_info = torch.stack(copy_info, dim=1) if copy_info else None
# attentions: batch_size x tgt_len x num_heads x src_len
attentions = torch.stack(attentions, dim=1) if attentions else None
return {
'predictions': dec_preds,
'copy_info': copy_info,
'memory_bank': memory_bank,
'attentions': attentions
}
def count_parameters(self):
return sum(p.numel() for p in self.parameters() if p.requires_grad)
def count_encoder_parameters(self):
return self.encoder.count_parameters()
def count_decoder_parameters(self):
return self.decoder.count_parameters()
def layer_wise_parameters(self):
table = PrettyTable()
table.field_names = ["Layer Name", "Output Shape", "Param #"]
table.align["Layer Name"] = "l"
table.align["Output Shape"] = "r"
table.align["Param #"] = "r"
for name, parameters in self.named_parameters():
if parameters.requires_grad:
table.add_row([name, str(list(parameters.shape)), parameters.numel()])
return table
| [((78, 23, 78, 51), 'torch.nn.Dropout', 'nn.Dropout', ({(78, 34, 78, 50): 'args.dropout_emb'}, {}), '(args.dropout_emb)', True, 'import torch.nn as nn\n'), ((149, 27, 157, 77), 'c2nl.encoders.transformer.TransformerEncoder', 'TransformerEncoder', (), '', False, 'from c2nl.encoders.transformer import TransformerEncoder\n'), ((315, 25, 315, 80), 'torch.nn.Linear', 'nn.Linear', ({(315, 35, 315, 58): 'self.decoder.input_size', (315, 60, 315, 79): 'args.tgt_vocab_size'}, {}), '(self.decoder.input_size, args.tgt_vocab_size)', True, 'import torch.nn as nn\n'), ((602, 20, 602, 49), 'torch.stack', 'torch.stack', (), '', False, 'import torch\n'), ((624, 16, 624, 29), 'prettytable.PrettyTable', 'PrettyTable', ({}, {}), '()', False, 'from prettytable import PrettyTable\n'), ((31, 39, 33, 64), 'c2nl.modules.embeddings.Embeddings', 'Embeddings', ({(31, 50, 31, 61): 'args.emsize', (32, 50, 32, 69): 'args.src_vocab_size', (33, 50, 33, 63): 'constants.PAD'}, {}), '(args.emsize, args.src_vocab_size, constants.PAD)', False, 'from c2nl.modules.embeddings import Embeddings\n'), ((36, 39, 38, 64), 'c2nl.modules.embeddings.Embeddings', 'Embeddings', ({(36, 50, 36, 61): 'args.emsize', (37, 50, 37, 69): 'args.tgt_vocab_size', (38, 50, 38, 63): 'constants.PAD'}, {}), '(args.emsize, args.tgt_vocab_size, constants.PAD)', False, 'from c2nl.modules.embeddings import Embeddings\n'), ((45, 39, 48, 67), 'c2nl.modules.char_embedding.CharEmbedding', 'CharEmbedding', ({(45, 53, 45, 70): 'args.n_characters', (46, 53, 46, 69): 'args.char_emsize', (47, 53, 47, 69): 'args.filter_size', (48, 53, 48, 66): 'args.nfilters'}, {}), '(args.n_characters, args.char_emsize, args.filter_size, args.\n nfilters)', False, 'from c2nl.modules.char_embedding import CharEmbedding\n'), ((50, 35, 50, 77), 'c2nl.modules.highway.Highway', 'Highway', (), '', False, 'from c2nl.modules.highway import Highway\n'), ((54, 39, 57, 67), 'c2nl.modules.char_embedding.CharEmbedding', 'CharEmbedding', ({(54, 53, 54, 70): 'args.n_characters', (55, 53, 55, 69): 'args.char_emsize', (56, 53, 56, 69): 'args.filter_size', (57, 53, 57, 66): 'args.nfilters'}, {}), '(args.n_characters, args.char_emsize, args.filter_size, args.\n nfilters)', False, 'from c2nl.modules.char_embedding import CharEmbedding\n'), ((59, 35, 59, 77), 'c2nl.modules.highway.Highway', 'Highway', (), '', False, 'from c2nl.modules.highway import Highway\n'), ((71, 38, 72, 71), 'torch.nn.Embedding', 'nn.Embedding', ({(71, 51, 71, 67): 'args.max_src_len', (72, 51, 72, 70): 'self.enc_input_size'}, {}), '(args.max_src_len, self.enc_input_size)', True, 'import torch.nn as nn\n'), ((75, 38, 76, 71), 'torch.nn.Embedding', 'nn.Embedding', ({(75, 51, 75, 71): 'args.max_tgt_len + 2', (76, 51, 76, 70): 'self.dec_input_size'}, {}), '(args.max_tgt_len + 2, self.dec_input_size)', True, 'import torch.nn as nn\n'), ((160, 33, 160, 69), 'torch.nn.Linear', 'nn.Linear', (), '', True, 'import torch.nn as nn\n'), ((170, 21, 170, 54), 'torch.stack', 'torch.stack', (), '', False, 'import torch\n'), ((172, 27, 172, 58), 'torch.nn.functional.softmax', 'f.softmax', (), '', True, 'import torch.nn.functional as f\n'), ((189, 33, 198, 13), 'c2nl.decoders.transformer.TransformerDecoder', 'TransformerDecoder', (), '', False, 'from c2nl.decoders.transformer import TransformerDecoder\n'), ((199, 33, 207, 13), 'c2nl.decoders.transformer.TransformerDecoder', 'TransformerDecoder', (), '', False, 'from c2nl.decoders.transformer import TransformerDecoder\n'), ((219, 31, 228, 13), 'c2nl.decoders.transformer.TransformerDecoder', 'TransformerDecoder', (), '', False, 'from c2nl.decoders.transformer import TransformerDecoder\n'), ((231, 25, 233, 13), 'torch.load', 'torch.load', (), '', False, 'import torch\n'), ((323, 29, 324, 70), 'c2nl.modules.global_attention.GlobalAttention', 'GlobalAttention', (), '', False, 'from c2nl.modules.global_attention import GlobalAttention\n'), ((325, 34, 327, 63), 'c2nl.modules.copy_generator.CopyGenerator', 'CopyGenerator', ({(325, 48, 325, 71): 'self.decoder.input_size', (326, 48, 326, 56): 'tgt_dict', (327, 48, 327, 62): 'self.generator'}, {}), '(self.decoder.input_size, tgt_dict, self.generator)', False, 'from c2nl.modules.copy_generator import CopyGenerator, CopyGeneratorCriterion\n'), ((331, 29, 331, 66), 'torch.nn.CrossEntropyLoss', 'nn.CrossEntropyLoss', (), '', True, 'import torch.nn as nn\n'), ((380, 24, 380, 53), 'torch.nn.functional.softmax', 'f.softmax', (), '', True, 'import torch.nn.functional as f\n'), ((470, 24, 470, 57), 'torch.LongTensor', 'torch.LongTensor', ({(470, 41, 470, 56): '[constants.BOS]'}, {}), '([constants.BOS])', False, 'import torch\n'), ((603, 20, 603, 49), 'torch.stack', 'torch.stack', (), '', False, 'import torch\n'), ((605, 21, 605, 51), 'torch.stack', 'torch.stack', (), '', False, 'import torch\n'), ((211, 16, 211, 63), 'torch.nn.Linear', 'nn.Linear', ({(211, 26, 211, 45): 'self.input_size * 2', (211, 47, 211, 62): 'self.input_size'}, {}), '(self.input_size * 2, self.input_size)', True, 'import torch.nn as nn\n'), ((212, 16, 212, 28), 'torch.nn.Sigmoid', 'nn.Sigmoid', ({}, {}), '()', True, 'import torch.nn as nn\n'), ((215, 16, 215, 63), 'torch.nn.Linear', 'nn.Linear', ({(215, 26, 215, 45): 'self.input_size * 2', (215, 47, 215, 62): 'self.input_size'}, {}), '(self.input_size * 2, self.input_size)', True, 'import torch.nn as nn\n'), ((216, 16, 216, 25), 'torch.nn.ReLU', 'nn.ReLU', ({}, {}), '()', True, 'import torch.nn as nn\n'), ((273, 38, 273, 78), 'torch.cat', 'torch.cat', (), '', False, 'import torch\n'), ((522, 28, 522, 57), 'torch.nn.functional.softmax', 'f.softmax', (), '', True, 'import torch.nn.functional as f\n'), ((540, 29, 540, 57), 'torch.nn.functional.softmax', 'f.softmax', (), '', True, 'import torch.nn.functional as f\n'), ((543, 32, 543, 74), 'torch.max', 'torch.max', (), '', False, 'import torch\n'), ((544, 27, 544, 54), 'torch.log', 'torch.log', ({(544, 37, 544, 53): 'tgt_prob + 1e-20'}, {}), '(tgt_prob + 1e-20)', False, 'import torch\n'), ((554, 27, 554, 59), 'torch.stack', 'torch.stack', (), '', False, 'import torch\n'), ((96, 31, 96, 65), 'torch.cat', 'torch.cat', ({(96, 41, 96, 61): '(word_rep, char_rep)', (96, 63, 96, 64): '2'}, {}), '((word_rep, char_rep), 2)', False, 'import torch\n'), ((274, 48, 274, 71), 'torch.mul', 'torch.mul', ({(274, 58, 274, 61): 'f_t', (274, 63, 274, 70): 'dec_out'}, {}), '(f_t, dec_out)', False, 'import torch\n'), ((567, 20, 567, 39), 'torch.Tensor', 'torch.Tensor', ({(567, 33, 567, 38): 'words'}, {}), '(words)', False, 'import torch\n'), ((121, 31, 121, 65), 'torch.cat', 'torch.cat', ({(121, 41, 121, 61): '(word_rep, char_rep)', (121, 63, 121, 64): '2'}, {}), '((word_rep, char_rep), 2)', False, 'import torch\n'), ((128, 30, 128, 54), 'torch.LongTensor', 'torch.LongTensor', ({(128, 47, 128, 53): '[step]'}, {}), '([step])', False, 'import torch\n'), ((529, 34, 529, 70), 'torch.LongTensor', 'torch.LongTensor', ({(529, 51, 529, 69): "params['blank'][b]"}, {}), "(params['blank'][b])", False, 'import torch\n'), ((530, 33, 530, 68), 'torch.LongTensor', 'torch.LongTensor', ({(530, 50, 530, 67): "params['fill'][b]"}, {}), "(params['fill'][b])", False, 'import torch\n'), ((564, 28, 564, 51), 'torch.Tensor', 'torch.Tensor', ({(564, 41, 564, 50): 'tgt_chars'}, {}), '(tgt_chars)', False, 'import torch\n')] |
cjellick/python-agent | cattle/plugins/docker/delegate.py | 6991369e309d050a43cba770df6e8ddd758f671d | import logging
from cattle import Config
from cattle.utils import reply, popen
from .compute import DockerCompute
from cattle.agent.handler import BaseHandler
from cattle.progress import Progress
from cattle.type_manager import get_type, MARSHALLER
from . import docker_client
import subprocess
import os
import time
log = logging.getLogger('docker')
def ns_exec(pid, event):
script = os.path.join(Config.home(), 'events', event.name.split(';')[0])
cmd = ['nsenter',
'-F',
'-m',
'-u',
'-i',
'-n',
'-p',
'-t', str(pid),
'--', script]
marshaller = get_type(MARSHALLER)
input = marshaller.to_string(event)
data = None
env = {}
with open('/proc/{}/environ'.format(pid)) as f:
for line in f.read().split('\0'):
if not len(line):
continue
kv = line.split('=', 1)
if kv[0].startswith('CATTLE'):
env[kv[0]] = kv[1]
env['PATH'] = os.environ['PATH']
env['CATTLE_CONFIG_URL'] = Config.config_url()
for i in range(3):
p = popen(cmd,
env=env,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
output, error = p.communicate(input=input)
retcode = p.poll()
if retcode == 0:
break
exists_cmd = cmd[:-1] + ['/usr/bin/test', '-e', script]
if popen(exists_cmd, env=env).wait() == 0:
break
# Sleep and try again if missing
time.sleep(1)
if retcode:
return retcode, output, None
text = []
for line in output.splitlines():
if line.startswith('{'):
data = marshaller.from_string(line)
break
text.append(line)
return retcode, ''.join(text), data
class DockerDelegate(BaseHandler):
def __init__(self):
self.compute = DockerCompute()
pass
def events(self):
return ['delegate.request']
def delegate_request(self, req=None, event=None, instanceData=None, **kw):
if instanceData.kind != 'container' or \
instanceData.get('token') is None:
return
container = self.compute.get_container(docker_client(), instanceData,
by_agent=True)
if container is None:
log.info('Can not call [%s], container does not exists',
instanceData.uuid)
return
inspect = self.compute.inspect(container)
try:
running = inspect['State']['Running']
if not running:
log.error('Can not call [%s], container is not running',
instanceData.uuid)
return
except KeyError:
log.error('Can not call [%s], container is not running',
instanceData.uuid)
return
progress = Progress(event, parent=req)
exit_code, output, data = ns_exec(inspect['State']['Pid'], event)
if exit_code == 0:
return reply(event, data, parent=req)
else:
progress.update('Update failed', data={
'exitCode': exit_code,
'output': output
})
| [((15, 6, 15, 33), 'logging.getLogger', 'logging.getLogger', ({(15, 24, 15, 32): '"""docker"""'}, {}), "('docker')", False, 'import logging\n'), ((30, 17, 30, 37), 'cattle.type_manager.get_type', 'get_type', ({(30, 26, 30, 36): 'MARSHALLER'}, {}), '(MARSHALLER)', False, 'from cattle.type_manager import get_type, MARSHALLER\n'), ((44, 31, 44, 50), 'cattle.Config.config_url', 'Config.config_url', ({}, {}), '()', False, 'from cattle import Config\n'), ((19, 26, 19, 39), 'cattle.Config.home', 'Config.home', ({}, {}), '()', False, 'from cattle import Config\n'), ((47, 12, 51, 43), 'cattle.utils.popen', 'popen', (), '', False, 'from cattle.utils import reply, popen\n'), ((63, 8, 63, 21), 'time.sleep', 'time.sleep', ({(63, 19, 63, 20): '(1)'}, {}), '(1)', False, 'import time\n'), ((111, 19, 111, 46), 'cattle.progress.Progress', 'Progress', (), '', False, 'from cattle.progress import Progress\n'), ((115, 19, 115, 49), 'cattle.utils.reply', 'reply', (), '', False, 'from cattle.utils import reply, popen\n'), ((59, 11, 59, 37), 'cattle.utils.popen', 'popen', (), '', False, 'from cattle.utils import reply, popen\n')] |
ehickox2012/bitraider | bitraider/strategy.py | dcc695b93dc1c22415780e3f5ff9f7ee29d6988c | import sys
import pytz
#import xml.utils.iso8601
import time
import numpy
from datetime import date, datetime, timedelta
from matplotlib import pyplot as plt
from exchange import cb_exchange as cb_exchange
from exchange import CoinbaseExchangeAuth
from abc import ABCMeta, abstractmethod
class strategy(object):
"""`strategy` defines an abstract base strategy class. Minimum required to create a strategy is a file with a class which inherits from strategy containing a backtest_strategy function. As a bonus, strategy includes utility functions like calculate_historic_data.
"""
__metaclass__ = ABCMeta
def __init__(name="default name", interval=5):
"""Constructor for an abstract strategy. You can modify it as needed.
\n`interval`: a.k.a timeslice the amount of time in seconds for each 'tick' default is 5
\n`name`: a string name for the strategy
"""
self.name = name
self.interval = interval
self.times_recalculated = 0
@abstractmethod
def trade(self, timeslice):
"""Perform operations on a timeslice.
\n`timeslice`: a section of trade data with time length equal to the strategy's interval, formatted as follows:
\n[time, low, high, open, close, volume]
"""
return
def backtest_strategy(self, historic_data):
"""Returns performance of a strategy vs market performance.
"""
# Reverse the data since Coinbase returns it in reverse chronological
# now historic_data strarts with the oldest entry
historic_data = list(reversed(historic_data))
earliest_time = float(historic_data[0][0])
latest_time = float(historic_data[-1][0])
start_price = float(historic_data[0][4])
end_price = float(historic_data[-1][4])
market_performance = ((end_price-start_price)/start_price)*100
print("Running simulation on historic data. This may take some time....")
for timeslice in historic_data:
# Display what percent through the data we are
idx = historic_data.index(timeslice)
percent = (float(idx)/float(len(historic_data)))*100 + 1
sys.stdout.write("\r%d%%" % percent)
sys.stdout.flush()
self.trade(timeslice)
# Calculate performance
end_amt_no_trades = (float(self.exchange.start_usd)/float(end_price)) + float(self.exchange.start_btc)
end_amt = (float(self.exchange.usd_bal)/float(end_price)) + float(self.exchange.btc_bal)
start_amt = (float(self.exchange.start_usd)/float(start_price)) + float(self.exchange.start_btc)
strategy_performance = ((end_amt-start_amt)/start_amt)*100
print("\n")
print("Times recalculated: "+str(self.times_recalculated))
print("Times bought: "+str(self.exchange.times_bought))
print("Times sold: "+str(self.exchange.times_sold))
print("The Market's performance: "+str(market_performance)+" %")
print("Strategy's performance: "+str(strategy_performance)+" %")
print("Account's ending value if no trades were made: "+str(end_amt_no_trades)+" BTC")
print("Account's ending value with this strategy: "+str(end_amt)+" BTC")
strategy_performance_vs_market = strategy_performance - market_performance
if strategy_performance > market_performance:
print("Congratulations! This strategy has beat the market by: "+str(strategy_performance_vs_market)+" %")
elif strategy_performance < market_performance:
print("This strategy has preformed: "+str(strategy_performance_vs_market)+" % worse than market.")
return strategy_performance_vs_market, strategy_performance, market_performance
@staticmethod
def calculate_historic_data(data, pivot):
"""Returns average price weighted according to volume, and the number of bitcoins traded
above and below a price point, called a pivot.\n
\npivot: the price used for returning volume above and below
\ndata: a list of lists formated as follows [time, low, high, open, close]
\n[
\n\t["2014-11-07 22:19:28.578544+00", "0.32", "4.2", "0.35", "4.2", "12.3"],
\n\t\t...
\n]
"""
price_list = []
weights = []
if data is None:
pass
min_price = float(data[0][1])
max_price = float(data[0][2])
discrete_prices = {}
for timeslice in data:
timeslice = [float(i) for i in timeslice]
if max_price < timeslice[2]:
max_prie = timeslice[2]
if min_price > timeslice[1]:
min_price = timeslice[1]
closing_price = timeslice[4]
volume = timeslice[5]
if closing_price not in discrete_prices.keys():
discrete_prices[str(closing_price)] = volume
else:
discrete[str(closing_price)] += volume
idx = data.index(timeslice)
price_list.append(closing_price)
weights.append(volume)
fltprices = [float(i) for i in discrete_prices.keys()]
fltvolumes = [float(i) for i in discrete_prices.values()]
np_discrete_prices = numpy.array(fltprices)
np_volume_per_price = numpy.array(fltvolumes)
weighted_avg = numpy.average(np_discrete_prices, weights=np_volume_per_price)
num_above = 0
num_below = 0
num_at = 0
for key in discrete_prices.keys():
value = discrete_prices[key]
if float(key) > pivot:
num_above+=value
elif float(key) < pivot:
num_below+=value
elif float(key) == pivot:
num_at+=value
total_volume = 0.0
for volume in fltvolumes:
total_volume+=volume
fltprops = []
for volume in fltvolumes:
fltprops.append((volume/total_volume))
#print("num_below: "+str(num_below))
#print("num_above: "+str(num_above))
#print("num_at: "+str(num_at))
#print("weighted_average: "+str(weighted_avg))
#plt.title("Price distribution")
#plt.xlabel("Price (USD)")
#plt.ylabel("Volume")
#plt.bar(fltprices, fltprops)
#plt.show()
return weighted_avg, num_above, num_below
| [((119, 29, 119, 51), 'numpy.array', 'numpy.array', ({(119, 41, 119, 50): 'fltprices'}, {}), '(fltprices)', False, 'import numpy\n'), ((120, 30, 120, 53), 'numpy.array', 'numpy.array', ({(120, 42, 120, 52): 'fltvolumes'}, {}), '(fltvolumes)', False, 'import numpy\n'), ((121, 23, 121, 85), 'numpy.average', 'numpy.average', (), '', False, 'import numpy\n'), ((54, 12, 54, 48), 'sys.stdout.write', 'sys.stdout.write', ({(54, 29, 54, 47): "('\\r%d%%' % percent)"}, {}), "('\\r%d%%' % percent)", False, 'import sys\n'), ((55, 12, 55, 30), 'sys.stdout.flush', 'sys.stdout.flush', ({}, {}), '()', False, 'import sys\n')] |
PacktPublishing/Python-Deep-Learning-for-Beginners- | neural-networks.py | 90f110158cbf0ce02fd4d5d09e3b2034428d9992 | import numpy as np
# Perceptron
def predict_perceptron(inputs, weights):
if np.dot(inputs, weights) > 0:
return 1
else:
return 0
def predict_perceptron_proper(inputs, weights):
def step_function(input):
return 1 if input > 0 else 0
def linear_model(inputs, weights):
return np.dot(inputs, weights)
return step_function(linear_model(inputs, weights))
def neuron(inputs, weights):
def sigmoid_function(input):
return 1 / (1 + np.exp(-1 * input))
def linear_model(inputs, weights):
return np.dot(inputs, weights)
return sigmoid_function(linear_model(inputs, weights))
neural_network = neuron(neuron(inputs, weights1), weights2)
| [((5, 7, 5, 30), 'numpy.dot', 'np.dot', ({(5, 14, 5, 20): 'inputs', (5, 22, 5, 29): 'weights'}, {}), '(inputs, weights)', True, 'import numpy as np\n'), ((16, 15, 16, 38), 'numpy.dot', 'np.dot', ({(16, 22, 16, 28): 'inputs', (16, 30, 16, 37): 'weights'}, {}), '(inputs, weights)', True, 'import numpy as np\n'), ((26, 15, 26, 38), 'numpy.dot', 'np.dot', ({(26, 22, 26, 28): 'inputs', (26, 30, 26, 37): 'weights'}, {}), '(inputs, weights)', True, 'import numpy as np\n'), ((23, 24, 23, 42), 'numpy.exp', 'np.exp', ({(23, 31, 23, 41): '(-1 * input)'}, {}), '(-1 * input)', True, 'import numpy as np\n')] |
andreasjansson/OroJaR | biggan_discovery/orojar_discover.py | ebb8c0333bbd33c063b6dd4a21a0559eb86d13e9 | """
Learns a matrix of Z-Space directions using a pre-trained BigGAN Generator.
Modified from train.py in the PyTorch BigGAN repo.
"""
import os
from tqdm import tqdm
import torch
import torch.nn as nn
import torch.optim
import utils
import train_fns
from sync_batchnorm import patch_replication_callback
from torch.utils.tensorboard import SummaryWriter
from orojar import orojar
from direction_utils import visualize_directions, load_G, get_direction_padding_fn, init_wandb, download_G
from layers import fast_gram_schmidt, norm
class DataParallelLoss(nn.Module):
"""
This is simply a wrapper class to compute the OroJaR efficiently over several GPUs
"""
def __init__(self, G):
super(DataParallelLoss, self).__init__()
self.G = G
def forward(self, z, y, w, Q):
penalty = orojar(self.G, z, c=y, w=w, G_z=None, Q=Q, multiple_layers=False)
return penalty
# The main training file. Config is a dictionary specifying the configuration
# of this training run.
def run(config):
if config['wandb_entity'] is not None:
init_wandb(config, config['experiment_name'], config['wandb_entity'], 'imagenet')
if config["G_path"] is None: # Download a pre-trained G if necessary
download_G()
config["G_path"] = 'checkpoints/138k'
G, state_dict, device, experiment_name = load_G(config)
# If parallel, parallelize the GD module
if config['parallel']:
G = nn.DataParallel(DataParallelLoss(G))
if config['cross_replica']:
patch_replication_callback(G)
num_gpus = torch.cuda.device_count()
print(f'Using {num_gpus} GPUs')
# If search_space != 'all', then we need to pad the z components that we are leaving alone:
pad = get_direction_padding_fn(config)
direction_size = config['dim_z'] if config['search_space'] == 'all' else config['ndirs']
# A is our (ndirs, |z|) matrix of directions, where ndirs indicates the number of directions we want to learn
if config['load_A'] == 'coords':
print('Initializing with standard basis directions')
A = torch.nn.Parameter(torch.eye(config['ndirs'], direction_size, device=device), requires_grad=True)
elif config['load_A'] == 'random':
print('Initializing with random directions')
A = torch.nn.Parameter(torch.empty(config['ndirs'], direction_size, device=device), requires_grad=True)
torch.nn.init.kaiming_normal_(A)
else:
raise NotImplementedError
# We only learn A; G is left frozen during training:
optim = torch.optim.Adam(params=[A], lr=config['A_lr'])
# Allow for different batch sizes in G
G_batch_size = max(config['G_batch_size'], config['batch_size'])
z_, y_ = utils.prepare_z_y(G_batch_size, G.module.G.dim_z, config['n_classes'],
device=device, fp16=config['G_fp16'])
# Prepare a fixed z & y to see individual sample evolution throghout training
fixed_z, fixed_y = utils.prepare_z_y(G_batch_size, G.module.G.dim_z,
config['n_classes'], device=device,
fp16=config['G_fp16'])
fixed_z.sample_()
fixed_y.sample_()
interp_z, interp_y = utils.prepare_z_y(config["n_samples"], G.module.G.dim_z,
config['n_classes'], device=device,
fp16=config['G_fp16'])
interp_z.sample_()
interp_y.sample_()
if config['fix_class'] is not None:
y_ = y_.new_full(y_.size(), config['fix_class'])
fixed_y = fixed_y.new_full(fixed_y.size(), config['fix_class'])
interp_y = interp_y.new_full(interp_y.size(), config['fix_class'])
print('Beginning training at epoch %d...' % state_dict['epoch'])
# Train for specified number of epochs, although we mostly track G iterations.
iters_per_epoch = 1000
dummy_loader = [None] * iters_per_epoch # We don't need any real data
path_size = config['path_size']
# Simply stores a |z|-dimensional one-hot vector indicating each direction we are learning:
direction_indicators = torch.eye(config['ndirs']).to(device)
G.eval()
G.module.optim = optim
writer = SummaryWriter('%s/%s' % (config['logs_root'], experiment_name))
sample_sheet = train_fns.save_and_sample(G.module.G, None, G.module.G, z_, y_, fixed_z, fixed_y,
state_dict, config, experiment_name)
writer.add_image('samples', sample_sheet, 0)
interp_y_ = G.module.G.shared(interp_y)
norm_fn = norm
# Make directions orthonormal via Gram Schmidt followed a normalization:
Q = pad(norm_fn(fast_gram_schmidt(A))) if not config["no_ortho"] else pad(A)
if config["vis_during_training"]:
print("Generating initial visualizations...")
interp_vis = visualize_directions(G.module.G, interp_z, interp_y_, path_sizes=path_size, Q=Q,
high_quality=False, npv=1)
for w_ix in range(config['ndirs']):
writer.add_video('G_ema/w%03d' % w_ix, interp_vis[w_ix], 0, fps=24)
for epoch in range(state_dict['epoch'], config['num_epochs']):
if config['pbar'] == 'mine':
pbar = utils.progress(dummy_loader, displaytype='s1k' if config['use_multiepoch_sampler'] else 'eta')
else:
pbar = tqdm(dummy_loader)
for i, _ in enumerate(pbar):
state_dict['itr'] += 1
z_.sample_()
if config['fix_class'] is None:
y_.sample_()
y = G.module.G.shared(y_)
# OroJaR taken w.r.t. w_sampled, NOT z:
w = torch.zeros((G_batch_size, config['ndirs'])) # equal to the one-hot w
penalty = G(z_, y, w=w, Q=Q.repeat(num_gpus, 1)).mean()
optim.zero_grad()
penalty.backward()
optim.step()
# re-orthogonalize A for visualizations and the next training iteration:
Q = pad(norm_fn(fast_gram_schmidt(A))) if not config["no_ortho"] else pad(A)
# Log metrics to TensorBoard/WandB:
cur_training_iter = epoch * iters_per_epoch + i
writer.add_scalar(f'Metrics/orojar', penalty.item(), cur_training_iter)
writer.add_scalar('Metrics/direction_norm', A.pow(2).mean().pow(0.5).item(), cur_training_iter)
# Save directions and log visuals:
if not (state_dict['itr'] % config['save_every']):
torch.save(A.cpu().detach(), '%s/%s/A_%06d.pt' %
(config['weights_root'], experiment_name, cur_training_iter))
if config["vis_during_training"]:
interp_vis = visualize_directions(G.module.G, interp_z, interp_y_, path_sizes=path_size, Q=Q,
high_quality=False, npv=1)
for w_ix in range(config['ndirs']):
writer.add_video('G_ema/w%03d' % w_ix, interp_vis[w_ix], cur_training_iter, fps=24)
state_dict['epoch'] += 1
def main():
# parse command line and run
parser = utils.prepare_parser()
config = vars(parser.parse_args())
print(config)
run(config)
if __name__ == '__main__':
main()
| [((46, 45, 46, 59), 'direction_utils.load_G', 'load_G', ({(46, 52, 46, 58): 'config'}, {}), '(config)', False, 'from direction_utils import visualize_directions, load_G, get_direction_padding_fn, init_wandb, download_G\n'), ((53, 15, 53, 40), 'torch.cuda.device_count', 'torch.cuda.device_count', ({}, {}), '()', False, 'import torch\n'), ((57, 10, 57, 42), 'direction_utils.get_direction_padding_fn', 'get_direction_padding_fn', ({(57, 35, 57, 41): 'config'}, {}), '(config)', False, 'from direction_utils import visualize_directions, load_G, get_direction_padding_fn, init_wandb, download_G\n'), ((70, 12, 70, 59), 'torch.optim.Adam', 'torch.optim.Adam', (), '', False, 'import torch\n'), ((74, 13, 75, 68), 'utils.prepare_z_y', 'utils.prepare_z_y', (), '', False, 'import utils\n'), ((78, 23, 80, 63), 'utils.prepare_z_y', 'utils.prepare_z_y', (), '', False, 'import utils\n'), ((84, 25, 86, 65), 'utils.prepare_z_y', 'utils.prepare_z_y', (), '', False, 'import utils\n'), ((108, 13, 108, 76), 'torch.utils.tensorboard.SummaryWriter', 'SummaryWriter', ({(108, 27, 108, 75): "'%s/%s' % (config['logs_root'], experiment_name)"}, {}), "('%s/%s' % (config['logs_root'], experiment_name))", False, 'from torch.utils.tensorboard import SummaryWriter\n'), ((109, 19, 110, 81), 'train_fns.save_and_sample', 'train_fns.save_and_sample', ({(109, 45, 109, 55): 'G.module.G', (109, 57, 109, 61): 'None', (109, 63, 109, 73): 'G.module.G', (109, 75, 109, 77): 'z_', (109, 79, 109, 81): 'y_', (109, 83, 109, 90): 'fixed_z', (109, 92, 109, 99): 'fixed_y', (110, 45, 110, 55): 'state_dict', (110, 57, 110, 63): 'config', (110, 65, 110, 80): 'experiment_name'}, {}), '(G.module.G, None, G.module.G, z_, y_, fixed_z,\n fixed_y, state_dict, config, experiment_name)', False, 'import train_fns\n'), ((169, 13, 169, 35), 'utils.prepare_parser', 'utils.prepare_parser', ({}, {}), '()', False, 'import utils\n'), ((33, 18, 33, 83), 'orojar.orojar', 'orojar', (), '', False, 'from orojar import orojar\n'), ((42, 8, 42, 89), 'direction_utils.init_wandb', 'init_wandb', ({(42, 19, 42, 25): 'config', (42, 27, 42, 52): "config['experiment_name']", (42, 54, 42, 76): "config['wandb_entity']", (42, 78, 42, 88): '"""imagenet"""'}, {}), "(config, config['experiment_name'], config['wandb_entity'],\n 'imagenet')", False, 'from direction_utils import visualize_directions, load_G, get_direction_padding_fn, init_wandb, download_G\n'), ((44, 8, 44, 20), 'direction_utils.download_G', 'download_G', ({}, {}), '()', False, 'from direction_utils import visualize_directions, load_G, get_direction_padding_fn, init_wandb, download_G\n'), ((122, 21, 123, 68), 'direction_utils.visualize_directions', 'visualize_directions', (), '', False, 'from direction_utils import visualize_directions, load_G, get_direction_padding_fn, init_wandb, download_G\n'), ((51, 12, 51, 41), 'sync_batchnorm.patch_replication_callback', 'patch_replication_callback', ({(51, 39, 51, 40): 'G'}, {}), '(G)', False, 'from sync_batchnorm import patch_replication_callback\n'), ((62, 31, 62, 88), 'torch.eye', 'torch.eye', (), '', False, 'import torch\n'), ((66, 8, 66, 40), 'torch.nn.init.kaiming_normal_', 'torch.nn.init.kaiming_normal_', ({(66, 38, 66, 39): 'A'}, {}), '(A)', False, 'import torch\n'), ((102, 27, 102, 53), 'torch.eye', 'torch.eye', ({(102, 37, 102, 52): "config['ndirs']"}, {}), "(config['ndirs'])", False, 'import torch\n'), ((129, 19, 129, 113), 'utils.progress', 'utils.progress', (), '', False, 'import utils\n'), ((131, 19, 131, 37), 'tqdm.tqdm', 'tqdm', ({(131, 24, 131, 36): 'dummy_loader'}, {}), '(dummy_loader)', False, 'from tqdm import tqdm\n'), ((140, 16, 140, 60), 'torch.zeros', 'torch.zeros', ({(140, 28, 140, 59): "(G_batch_size, config['ndirs'])"}, {}), "((G_batch_size, config['ndirs']))", False, 'import torch\n'), ((65, 31, 65, 90), 'torch.empty', 'torch.empty', (), '', False, 'import torch\n'), ((118, 20, 118, 40), 'layers.fast_gram_schmidt', 'fast_gram_schmidt', ({(118, 38, 118, 39): 'A'}, {}), '(A)', False, 'from layers import fast_gram_schmidt, norm\n'), ((159, 33, 160, 80), 'direction_utils.visualize_directions', 'visualize_directions', (), '', False, 'from direction_utils import visualize_directions, load_G, get_direction_padding_fn, init_wandb, download_G\n'), ((147, 28, 147, 48), 'layers.fast_gram_schmidt', 'fast_gram_schmidt', ({(147, 46, 147, 47): 'A'}, {}), '(A)', False, 'from layers import fast_gram_schmidt, norm\n')] |
Alva789ro/Regional-Comprehensive-Economic-Partnership-RCEP-Economic-Default-Risk-Analysis | file_importer0.py | 454583f47883edae17391f101b10b38b68c9834f | import xlsxwriter
import pandas as pd
import numpy as np
import mysql.connector
australia=pd.read_excel(r'\Users\jesica\Desktop\RCEP_economic_analysis.xlsx', sheet_name='Australia')
brunei=pd.read_excel(r'\Users\jesica\Desktop\RCEP_economic_analysis.xlsx', sheet_name='Brunei')
cambodia=pd.read_excel(r'\Users\jesica\Desktop\RCEP_economic_analysis.xlsx', sheet_name='Cambodia')
china=pd.read_excel(r'\Users\jesica\Desktop\RCEP_economic_analysis.xlsx', sheet_name='China')
indonesia=pd.read_excel(r'\Users\jesica\Desktop\RCEP_economic_analysis.xlsx', sheet_name='Indonesia')
japan=pd.read_excel(r'\Users\jesica\Desktop\RCEP_economic_analysis.xlsx', sheet_name='Japan')
lao=pd.read_excel(r'\Users\jesica\Desktop\RCEP_economic_analysis.xlsx', sheet_name='Lao')
malaysia=pd.read_excel(r'\Users\jesica\Desktop\RCEP_economic_analysis.xlsx', sheet_name='Malaysia')
myanmar=pd.read_excel(r'\Users\jesica\Desktop\RCEP_economic_analysis.xlsx', sheet_name='Myanmar')
new_zeland=pd.read_excel(r'\Users\jesica\Desktop\RCEP_economic_analysis.xlsx', sheet_name='New Zeland')
philipines=pd.read_excel(r'\Users\jesica\Desktop\RCEP_economic_analysis.xlsx', sheet_name='Philipines')
singapore=pd.read_excel(r'\Users\jesica\Desktop\RCEP_economic_analysis.xlsx', sheet_name='Singapore')
thailand=pd.read_excel(r'\Users\jesica\Desktop\RCEP_economic_analysis.xlsx', sheet_name='Thailand')
vietnam=pd.read_excel(r'\Users\jesica\Desktop\RCEP_economic_analysis.xlsx', sheet_name='Vietnam')
'''
mydb = mysql.connector.connect(
host = "localhost",
user = "root",
passwd = "",
database = ""
)
mycursor = mydb.cursor()
sqlformula1 = "INSERT INTO australia VALUES(%s, %s, %s, %s, %s, %s, %s, %s)"
for a, b, c, d, e, f, g, h in zip(australia['Year'], australia['RGDP'], australia['NGDP'], australia['GDP_pc'], australia['Inflation'], australia['Unemployment_Rate'], australia['Net_LB'], australia['Account_Balance']):
mycursor.execute(sqlformula1, [a, b, c, d, e, f, g, h])
sqlformula2 = "INSERT INTO brunei VALUES(%s, %s, %s, %s, %s, %s, %s, %s)"
for a, b, c, d, e, f, g, h in zip(brunei['Year'], brunei['RGDP'], brunei['NGDP'], brunei['GDP_pc'], brunei['Inflation'], brunei['Unemployment_Rate'], brunei['Net_LB'], brunei['Account_Balance']):
mycursor.execute(sqlformula2, [a, b, c, d, e, f, g, h])
sqlformula3 = "INSERT INTO cambodia VALUES(%s, %s, %s, %s, %s, %s, %s, %s)"
for a, b, c, d, e, f, g, h in zip(cambodia['Year'], cambodia['RGDP'], cambodia['NGDP'], cambodia['GDP_pc'], cambodia['Inflation'], cambodia['Unemployment_Rate'], cambodia['Net_LB'], cambodia['Account_Balance']):
mycursor.execute(sqlformula3, [a, b, c, d, e, f, g, h])
sqlformula4 = "INSERT INTO china VALUES(%s, %s, %s, %s, %s, %s, %s, %s)"
for a, b, c, d, e, f, g, h in zip(china['Year'], china['RGDP'], china['NGDP'], china['GDP_pc'], china['Inflation'], china['Unemployment_Rate'], china['Net_LB'], china['Account_Balance']):
mycursor.execute(sqlformula4, [a, b, c, d, e, f, g, h])
sqlformula5 = "INSERT INTO indonesia VALUES(%s, %s, %s, %s, %s, %s, %s, %s)"
for a, b, c, d, e, f, g, h in zip(indonesia['Year'], indonesia['RGDP'], indonesia['NGDP'], indonesia['GDP_pc'], indonesia['Inflation'], indonesia['Unemployment_Rate'], indonesia['Net_LB'], indonesia['Account_Balance']):
mycursor.execute(sqlformula5, [a, b, c, d, e, f, g, h])
sqlformula6 = "INSERT INTO japan VALUES(%s, %s, %s, %s, %s, %s, %s, %s)"
for a, b, c, d, e, f, g, h in zip(japan['Year'], japan['RGDP'], japan['NGDP'], japan['GDP_pc'], japan['Inflation'], japan['Unemployment_Rate'], japan['Net_LB'], japan['Account_Balance']):
mycursor.execute(sqlformula6, [a, b, c, d, e, f, g, h])
sqlformula7 = "INSERT INTO lao VALUES(%s, %s, %s, %s, %s, %s, %s, %s)"
for a, b, c, d, e, f, g, h in zip(lao['Year'], lao['RGDP'], lao['NGDP'], lao['GDP_pc'], lao['Inflation'], lao['Unemployment_Rate'], lao['Net_LB'], lao['Account_Balance']):
mycursor.execute(sqlformula7, [a, b, c, d, e, f, g, h])
sqlformula8 = "INSERT INTO malaysia VALUES(%s, %s, %s, %s, %s, %s, %s, %s)"
for a, b, c, d, e, f, g, h in zip(malaysia['Year'], malaysia['RGDP'], malaysia['NGDP'], malaysia['GDP_pc'], malaysia['Inflation'], malaysia['Unemployment_Rate'], malaysia['Net_LB'], malaysia['Account_Balance']):
mycursor.execute(sqlformula8, [a, b, c, d, e, f, g, h])
sqlformula9 = "INSERT INTO myanmar VALUES(%s, %s, %s, %s, %s, %s, %s, %s)"
for a, b, c, d, e, f, g, h in zip(myanmar['Year'], myanmar['RGDP'], myanmar['NGDP'], myanmar['GDP_pc'], myanmar['Inflation'], myanmar['Unemployment_Rate'], myanmar['Net_LB'], myanmar['Account_Balance']):
mycursor.execute(sqlformula9, [a, b, c, d, e, f, g, h])
sqlformula10 = "INSERT INTO new_zeland VALUES(%s, %s, %s, %s, %s, %s, %s, %s)"
for a, b, c, d, e, f, g, h in zip(new_zeland['Year'], new_zeland['RGDP'], new_zeland['NGDP'], new_zeland['GDP_pc'], new_zeland['Inflation'], new_zeland['Unemployment_Rate'], new_zeland['Net_LB'], new_zeland['Account_Balance']):
mycursor.execute(sqlformula10, [a, b, c, d, e, f, g, h])
sqlformula11 = "INSERT INTO philipines VALUES(%s, %s, %s, %s, %s, %s, %s, %s)"
for a, b, c, d, e, f, g, h in zip(philipines['Year'], philipines['RGDP'], philipines['NGDP'], philipines['GDP_pc'], philipines['Inflation'], philipines['Unemployment_Rate'], philipines['Net_LB'], philipines['Account_Balance']):
mycursor.execute(sqlformula11, [a, b, c, d, e, f, g, h])
sqlformula12 = "INSERT INTO singapore VALUES(%s, %s, %s, %s, %s, %s, %s, %s)"
for a, b, c, d, e, f, g, h in zip(singapore['Year'], singapore['RGDP'], singapore['NGDP'], singapore['GDP_pc'], singapore['Inflation'], singapore['Unemployment_Rate'], singapore['Net_LB'], singapore['Account_Balance']):
mycursor.execute(sqlformula12, [a, b, c, d, e, f, g, h])
sqlformula13 = "INSERT INTO thailand VALUES(%s, %s, %s, %s, %s, %s, %s, %s)"
for a, b, c, d, e, f, g, h in zip(thailand['Year'], thailand['RGDP'], thailand['NGDP'], thailand['GDP_pc'], thailand['Inflation'], thailand['Unemployment_Rate'], thailand['Net_LB'], thailand['Account_Balance']):
mycursor.execute(sqlformula13, [a, b, c, d, e, f, g, h])
sqlformula14 = "INSERT INTO vietnam VALUES(%s, %s, %s, %s, %s, %s, %s, %s)"
for a, b, c, d, e, f, g, h in zip(vietnam['Year'], vietnam['RGDP'], vietnam['NGDP'], vietnam['GDP_pc'], vietnam['Inflation'], vietnam['Unemployment_Rate'], vietnam['Net_LB'], vietnam['Account_Balance']):
mycursor.execute(sqlformula14, [a, b, c, d, e, f, g, h])
'''
#mydb.commit()
| [((6, 10, 6, 101), 'pandas.read_excel', 'pd.read_excel', (), '', True, 'import pandas as pd\n'), ((7, 7, 7, 95), 'pandas.read_excel', 'pd.read_excel', (), '', True, 'import pandas as pd\n'), ((8, 9, 8, 99), 'pandas.read_excel', 'pd.read_excel', (), '', True, 'import pandas as pd\n'), ((9, 6, 9, 93), 'pandas.read_excel', 'pd.read_excel', (), '', True, 'import pandas as pd\n'), ((10, 10, 10, 101), 'pandas.read_excel', 'pd.read_excel', (), '', True, 'import pandas as pd\n'), ((11, 6, 11, 93), 'pandas.read_excel', 'pd.read_excel', (), '', True, 'import pandas as pd\n'), ((12, 4, 12, 89), 'pandas.read_excel', 'pd.read_excel', (), '', True, 'import pandas as pd\n'), ((13, 9, 13, 99), 'pandas.read_excel', 'pd.read_excel', (), '', True, 'import pandas as pd\n'), ((14, 8, 14, 97), 'pandas.read_excel', 'pd.read_excel', (), '', True, 'import pandas as pd\n'), ((15, 11, 15, 103), 'pandas.read_excel', 'pd.read_excel', (), '', True, 'import pandas as pd\n'), ((16, 11, 16, 103), 'pandas.read_excel', 'pd.read_excel', (), '', True, 'import pandas as pd\n'), ((17, 10, 17, 101), 'pandas.read_excel', 'pd.read_excel', (), '', True, 'import pandas as pd\n'), ((18, 9, 18, 99), 'pandas.read_excel', 'pd.read_excel', (), '', True, 'import pandas as pd\n'), ((19, 8, 19, 97), 'pandas.read_excel', 'pd.read_excel', (), '', True, 'import pandas as pd\n')] |
VIOOH/nile | packer/resources/bootstrap_node.py | 893802387b3891ea02aae05f39ff4aa051354f18 | #!/usr/bin/env python3
import os
import re
import glob
import boto3
import requests
import subprocess
from time import sleep
AWS_REGION = os.environ['AWS_REGION']
DEPLOY_UUID = os.environ['DEPLOY_UUID']
SERVICE_NAME = os.environ['SERVICE_NAME']
MOUNT_POINT = "/var/lib/" + SERVICE_NAME
NIC_IP = os.environ['NIC_IP']
TAG_KEY = os.environ['TAG_KEY']
def retrieve_eni_ids():
ec2 = boto3.resource('ec2')
enis = []
for eni in ec2.network_interfaces.all():
for tag in eni.tag_set:
if tag['Key'] == TAG_KEY:
if tag['Value'] == DEPLOY_UUID:
enis.append(eni.network_interface_id)
return enis if len(enis) > 0 else None
def attach_eni_ids():
c_ec2 = boto3.client('ec2')
r_ec2 = boto3.resource('ec2')
i_id = requests.get('http://169.254.169.254/latest/meta-data/instance-id').text
eni_ids = retrieve_eni_ids()
device_number = len(r_ec2.Instance(i_id).network_interfaces) + 1
for eni_id in eni_ids:
c_ec2.attach_network_interface(DeviceIndex=device_number, InstanceId=i_id, NetworkInterfaceId=eni_id)
def retrieve_ebs_ids():
ec2 = boto3.resource('ec2')
ebss = []
for volume in ec2.volumes.all():
if volume.tags is not None:
for tag in volume.tags:
if tag['Key'] == TAG_KEY:
if tag['Value'] == DEPLOY_UUID:
ebss.append(volume.volume_id)
return ebss if len(ebss) > 0 else None
def attach_ebs():
ec2 = boto3.client('ec2')
i_id = requests.get('http://169.254.169.254/latest/meta-data/instance-id').text
volume_ids = retrieve_ebs_ids()
i = 0
device_char = 'z'
while i < len(volume_ids):
v_id = volume_ids[i]
device = '/dev/xvd{0}'.format(device_char)
ec2.attach_volume(Device=device, InstanceId=i_id, VolumeId=v_id)
# Wait to ensure device is attached
sleep(3)
if not check_ebs(v_id):
prepare_ebs(v_id)
add_fstab_entries(v_id, MOUNT_POINT)
p_mount = subprocess.Popen('mount -a'.split(), stdout=subprocess.PIPE)
stdout, stderr = p_mount.communicate()
p_chown = subprocess.Popen('chown -R {0}:{0} {1}'.format(SERVICE_NAME, MOUNT_POINT).split(),
stdout=subprocess.PIPE)
stdout, stderr = p_chown.communicate()
device_char = chr(ord(device_char) - 1)
i += 1
def check_ebs(volume_id):
v_id = volume_id.replace('vol-', 'vol')
pattern = '/dev/disk/by-id/*{0}-part1'.format(v_id)
return bool(len(glob.glob(pattern)))
def prepare_ebs(volume_id):
v_id = volume_id.replace('vol-', 'vol')
pattern = '/dev/disk/by-id/*{0}'.format(v_id)
device = glob.glob(pattern)[0]
gdisk_commands = '\n'.join([
'n',
'1',
'34',
'',
'',
'w',
'Y',
''
])
p_echo = subprocess.Popen('echo -ne {0}'.format(gdisk_commands).split(' '), stdout=subprocess.PIPE)
p_fdisk = subprocess.Popen('gdisk {0}'.format(device).split(), stdin=p_echo.stdout, stdout=subprocess.PIPE)
stdout, stderr = p_fdisk.communicate()
print(stdout)
print(stderr)
# p_partprobe = subprocess.Popen('partprobe'.split(' '), stdout=subprocess.PIPE)
# stdout, stderr = p_partprobe.communicate()
# print(stdout)
# print(stderr)
sleep(3)
pattern = '/dev/disk/by-id/*{0}-part1'.format(v_id)
partition = glob.glob(pattern)[0]
p_xfs = subprocess.Popen('mkfs.xfs {0}'.format(partition).split(), stdout=subprocess.PIPE)
stdout, stderr = p_xfs.communicate()
print(stdout)
print(stderr)
def add_fstab_entries(volume_id, mount_point):
v_id = volume_id.replace('vol-', 'vol')
pattern = '/dev/disk/by-id/*{0}-part1'.format(v_id)
partition = glob.glob(pattern)[0]
fstab_entries = [
mount_point,
'xfs',
'defaults',
'0',
'0'
]
with open('/etc/fstab', 'a') as f:
f.write('{0} {1}\n'.format(partition, ' '.join(fstab_entries)))
f.flush()
f.close()
def wait_device_ready(timeout=3):
c = 0
while c < timeout:
sleep(1)
p_ip = subprocess.Popen('ip a'.split(), stdout=subprocess.PIPE)
stdout, stderr = p_ip.communicate()
for line in stdout.decode().splitlines():
res = re.match('.*inet {0}/[0-9]{{2}}'.format(NIC_IP), line)
if res is not None:
return None
c += 1
raise Exception('Device with address {0} not ready'.format(NIC_IP))
def change_default_route():
wait_device_ready(10)
p_ip = subprocess.Popen('ip r'.split(), stdout=subprocess.PIPE)
stdout, stderr = p_ip.communicate()
r_subnet_rules = []
for line in stdout. decode().splitlines():
res = re.match('(.* ){2}eth[0-9](?! $).*', line)
if res is not None:
subnet_rule = res.group(0)
l_subnet_rule = subnet_rule.split()
device = l_subnet_rule[2]
ip = l_subnet_rule[-1]
r_subnet_rules.append(
{
'device': device,
'ip': ip,
'subnet_rule': subnet_rule
}
)
r_default_route = ''
for line in stdout.decode().splitlines():
res = re.match('default .*', line)
if res is not None:
r_default_route = res.group(0)
break
with open('/etc/rc.local', 'a') as f:
f.write('#!/bin/bash\n\n')
rule_index = 128
default_route_device = ''
for rule in r_subnet_rules:
default_route = re.sub('eth.', rule['device'], r_default_route)
f.write('ip rule add from {0} table {1}\n'.format(rule['ip'], rule_index))
f.write('ip r add {0} table {1}\n'.format(default_route, rule_index))
f.write('ip r add {0} table {1}\n\n'.format(rule['subnet_rule'], rule_index))
if rule['ip'] == NIC_IP:
default_route_device = rule['device']
rule_index += 1
default_route = re.sub('eth.', default_route_device, r_default_route)
f.write('ip r del default\n')
f.write('ip r add {0}\n\n'.format(default_route))
f.write('exit 0\n')
f.flush()
f.close()
os.chmod('/etc/rc.local', 0o0755)
p_rc_local = subprocess.Popen('/etc/rc.local'.split(), stdout=subprocess.PIPE)
stdout, stderr = p_rc_local.communicate()
if __name__ == '__main__':
boto3.setup_default_session(region_name=AWS_REGION)
# uses: DEPLOY_UUID, TAG_KEY
attach_eni_ids()
# uses: MOUNT_POINT, SERVICE_NAME, DEPLOY_UUID, TAG_KEY
attach_ebs()
# uses: NIC_IP
change_default_route()
| [((21, 10, 21, 31), 'boto3.resource', 'boto3.resource', ({(21, 25, 21, 30): '"""ec2"""'}, {}), "('ec2')", False, 'import boto3\n'), ((34, 12, 34, 31), 'boto3.client', 'boto3.client', ({(34, 25, 34, 30): '"""ec2"""'}, {}), "('ec2')", False, 'import boto3\n'), ((35, 12, 35, 33), 'boto3.resource', 'boto3.resource', ({(35, 27, 35, 32): '"""ec2"""'}, {}), "('ec2')", False, 'import boto3\n'), ((46, 10, 46, 31), 'boto3.resource', 'boto3.resource', ({(46, 25, 46, 30): '"""ec2"""'}, {}), "('ec2')", False, 'import boto3\n'), ((60, 10, 60, 29), 'boto3.client', 'boto3.client', ({(60, 23, 60, 28): '"""ec2"""'}, {}), "('ec2')", False, 'import boto3\n'), ((126, 4, 126, 12), 'time.sleep', 'sleep', ({(126, 10, 126, 11): '(3)'}, {}), '(3)', False, 'from time import sleep\n'), ((235, 4, 235, 37), 'os.chmod', 'os.chmod', ({(235, 13, 235, 28): '"""/etc/rc.local"""', (235, 30, 235, 36): '(493)'}, {}), "('/etc/rc.local', 493)", False, 'import os\n'), ((244, 4, 244, 55), 'boto3.setup_default_session', 'boto3.setup_default_session', (), '', False, 'import boto3\n'), ((37, 11, 37, 78), 'requests.get', 'requests.get', ({(37, 24, 37, 77): '"""http://169.254.169.254/latest/meta-data/instance-id"""'}, {}), "('http://169.254.169.254/latest/meta-data/instance-id')", False, 'import requests\n'), ((62, 11, 62, 78), 'requests.get', 'requests.get', ({(62, 24, 62, 77): '"""http://169.254.169.254/latest/meta-data/instance-id"""'}, {}), "('http://169.254.169.254/latest/meta-data/instance-id')", False, 'import requests\n'), ((74, 8, 74, 16), 'time.sleep', 'sleep', ({(74, 14, 74, 15): '(3)'}, {}), '(3)', False, 'from time import sleep\n'), ((102, 13, 102, 31), 'glob.glob', 'glob.glob', ({(102, 23, 102, 30): 'pattern'}, {}), '(pattern)', False, 'import glob\n'), ((129, 16, 129, 34), 'glob.glob', 'glob.glob', ({(129, 26, 129, 33): 'pattern'}, {}), '(pattern)', False, 'import glob\n'), ((140, 16, 140, 34), 'glob.glob', 'glob.glob', ({(140, 26, 140, 33): 'pattern'}, {}), '(pattern)', False, 'import glob\n'), ((160, 8, 160, 16), 'time.sleep', 'sleep', ({(160, 14, 160, 15): '(1)'}, {}), '(1)', False, 'from time import sleep\n'), ((184, 14, 184, 56), 're.match', 're.match', ({(184, 23, 184, 49): '"""(.* ){2}eth[0-9](?! $).*"""', (184, 51, 184, 55): 'line'}, {}), "('(.* ){2}eth[0-9](?! $).*', line)", False, 'import re\n'), ((202, 14, 202, 42), 're.match', 're.match', ({(202, 23, 202, 35): '"""default .*"""', (202, 37, 202, 41): 'line'}, {}), "('default .*', line)", False, 'import re\n'), ((226, 24, 226, 77), 're.sub', 're.sub', ({(226, 31, 226, 37): '"""eth."""', (226, 39, 226, 59): 'default_route_device', (226, 61, 226, 76): 'r_default_route'}, {}), "('eth.', default_route_device, r_default_route)", False, 'import re\n'), ((96, 20, 96, 38), 'glob.glob', 'glob.glob', ({(96, 30, 96, 37): 'pattern'}, {}), '(pattern)', False, 'import glob\n'), ((215, 28, 215, 75), 're.sub', 're.sub', ({(215, 35, 215, 41): '"""eth."""', (215, 43, 215, 57): "rule['device']", (215, 59, 215, 74): 'r_default_route'}, {}), "('eth.', rule['device'], r_default_route)", False, 'import re\n')] |
otoriocyber/Chronos | parsers/srum_parser.py | d70e22afed723c0ad4b7e449bd253e15351bada6 | import csv
import datetime
import random
import os
from parsers.parser_base import ParserBase
FILE_TIME_EPOCH = datetime.datetime(1601, 1, 1)
FILE_TIME_MICROSECOND = 10
def filetime_to_epoch_datetime(file_time):
if isinstance(file_time, int):
microseconds_since_file_time_epoch = file_time / FILE_TIME_MICROSECOND
else:
microseconds_since_file_time_epoch = int(file_time) / FILE_TIME_MICROSECOND
return FILE_TIME_EPOCH + datetime.timedelta(microseconds=microseconds_since_file_time_epoch)
class SrumParser(ParserBase):
CSV_FIELDS = {
"Unknown1.csv": ["TimeStamp", "AppId", "UserId", "EndTime", "DurationMS"],
"Unknown2.csv": [],
"Unknown3.csv": [],
"Unknown4.csv": ["TimeStamp", "AppId", "UserId"],
"SruDbCheckpointTable.csv": [],
"SruDbIdMapTable.csv": [],
"Network Usage.csv": ["TimeStamp", "AppId", "UserId", "InterfaceLuid", "L2ProfileId", "BytesSent",
"BytesRecvd"],
"Network Connections.csv": [],
"Energy Usage.csv": [],
"Energy Usage(Long - Term).csv": [],
"Application Resources.csv": ["TimeStamp", "AppId", "UserId"],
"Application Resource Usage.csv": ["TimeStamp", "AppId", "UserId"]
}
PARSING_TOOL = r"Tools\ese-analyst-master\ese2csv.exe"
PARSE_COMMAND = "{parser_path} -o {output_path} -p srudb_plugin {srum_db} --plugin-args {software_hive}"
def __init__(self, temp, config):
super().__init__(config)
self.temp_result_path = temp
def parse(self, args):
srum_db, software_hive = args
output = r"{}\srum_{}".format(self.temp_result_path, random.randint(1, 1000000))
os.mkdir(output)
command = self.PARSE_COMMAND.format(parser_path=self.PARSING_TOOL, output_path=output, srum_db=srum_db,
software_hive=software_hive)
self._run_command(command)
for csv_file in os.listdir(output):
srum_records = []
full_path = os.path.join(output, csv_file)
headers = self.CSV_FIELDS.get(csv_file)
if not headers:
continue
if csv_file == "Unknown1.csv":
with open(full_path, "r") as f:
reader = csv.DictReader(f)
for line in reader:
cur_record = {}
endTime = line.get("EndTime")
duration = line.get("DurationMS")
if endTime and duration:
cur_record["time"] = filetime_to_epoch_datetime(int(endTime) - int(duration)).isoformat()
cur_record["EndTime"] = filetime_to_epoch_datetime(endTime).isoformat()
cur_record["DurationMS"] = duration
else:
cur_record["time"] = datetime.datetime(1970, 1, 1).isoformat()
cur_record["AppId"] = line.get("AppId")
cur_record["UserId"] = line.get("UserId")
srum_records.append(cur_record)
else:
with open(full_path, "r") as f:
reader = csv.DictReader(f)
for line in reader:
cur_record = {}
for header in headers:
if header == "TimeStamp":
cur_record["time"] = line.get("TimeStamp").replace(" ", "T")
line.pop("TimeStamp")
value = line.get(header)
if value:
if isinstance(value, bytes):
cur_record[header.lower().replace(" ", "_")] = value.decode()
elif str.isdigit(value):
cur_record[header.lower().replace(" ", "_")] = int(value)
else:
cur_record[header.lower().replace(" ", "_")] = value
else:
cur_record[header.lower().replace(" ", "_")] = ""
srum_records.append(cur_record)
self._write_results_list([("srum-{}".format(csv_file.split(".")[0].lower().replace(" ", "_")), srum_records)])
| [((7, 18, 7, 47), 'datetime.datetime', 'datetime.datetime', ({(7, 36, 7, 40): '1601', (7, 42, 7, 43): '1', (7, 45, 7, 46): '1'}, {}), '(1601, 1, 1)', False, 'import datetime\n'), ((16, 29, 16, 96), 'datetime.timedelta', 'datetime.timedelta', (), '', False, 'import datetime\n'), ((46, 8, 46, 24), 'os.mkdir', 'os.mkdir', ({(46, 17, 46, 23): 'output'}, {}), '(output)', False, 'import os\n'), ((51, 24, 51, 42), 'os.listdir', 'os.listdir', ({(51, 35, 51, 41): 'output'}, {}), '(output)', False, 'import os\n'), ((45, 61, 45, 87), 'random.randint', 'random.randint', ({(45, 76, 45, 77): '1', (45, 79, 45, 86): '1000000'}, {}), '(1, 1000000)', False, 'import random\n'), ((53, 24, 53, 54), 'os.path.join', 'os.path.join', ({(53, 37, 53, 43): 'output', (53, 45, 53, 53): 'csv_file'}, {}), '(output, csv_file)', False, 'import os\n'), ((60, 29, 60, 46), 'csv.DictReader', 'csv.DictReader', ({(60, 44, 60, 45): 'f'}, {}), '(f)', False, 'import csv\n'), ((78, 29, 78, 46), 'csv.DictReader', 'csv.DictReader', ({(78, 44, 78, 45): 'f'}, {}), '(f)', False, 'import csv\n'), ((70, 49, 70, 78), 'datetime.datetime', 'datetime.datetime', ({(70, 67, 70, 71): '1970', (70, 73, 70, 74): '1', (70, 76, 70, 77): '1'}, {}), '(1970, 1, 1)', False, 'import datetime\n')] |
Yoann-Vie/esgi-hearthstone | tests/csrf_tests/test_context_processor.py | 115d03426c7e8e80d89883b78ac72114c29bed12 | from django.http import HttpRequest
from django.middleware.csrf import _compare_salted_tokens as equivalent_tokens
from django.template.context_processors import csrf
from django.test import SimpleTestCase
class TestContextProcessor(SimpleTestCase):
def test_force_token_to_string(self):
request = HttpRequest()
test_token = '1bcdefghij2bcdefghij3bcdefghij4bcdefghij5bcdefghij6bcdefghijABCD'
request.META['CSRF_COOKIE'] = test_token
token = csrf(request).get('csrf_token')
self.assertTrue(equivalent_tokens(str(token), test_token))
| [((10, 18, 10, 31), 'django.http.HttpRequest', 'HttpRequest', ({}, {}), '()', False, 'from django.http import HttpRequest\n'), ((13, 16, 13, 29), 'django.template.context_processors.csrf', 'csrf', ({(13, 21, 13, 28): 'request'}, {}), '(request)', False, 'from django.template.context_processors import csrf\n')] |
marza-animation-planet/das | python/das/types.py | 1c7460dfdd5f138d8317c72900e90b23c0c28c7b | import sys
import das
import traceback
class ReservedNameError(Exception):
def __init__(self, name):
super(ReservedNameError, self).__init__("'%s' is a reserved name" % name)
class VersionError(Exception):
def __init__(self, msg=None, current_version=None, required_version=None):
fullmsg = "ersion error"
if required_version:
fullmsg += ": %s required" % required_version
else:
fullmsg += ": no requirements"
if current_version:
fullmsg += ", %s in use" % current_version
else:
fullmsg += ", no version info"
if msg:
fullmsg = msg + " v" + fullmsg
else:
fullmsg = "V" + fullmsg
super(VersionError, self).__init__(fullmsg)
class GlobalValidationDisabled(object):
def __init__(self, data):
super(GlobalValidationDisabled, self).__init__()
self.data = data
self.oldstate = None
def __enter__(self):
try:
self.oldstate = self.data._is_global_validation_enabled()
self.data._enable_global_validation(False)
except:
pass
return self.data
def __exit__(self, type, value, traceback):
if self.oldstate is not None:
self.data._enable_global_validation(self.oldstate)
self.oldstate = None
# Always re-raise exception
return False
class TypeBase(object):
@classmethod
def TransferGlobalValidator(klass, src, dst):
if isinstance(src, klass) and isinstance(dst, klass):
dst._set_validate_globally_cb(src._gvalidate)
return dst
@classmethod
def ValidateGlobally(klass, inst):
if isinstance(inst, klass):
inst._gvalidate()
return inst
def __init__(self, *args):
super(TypeBase, self).__init__()
self.__dict__["_schema_type"] = None
self.__dict__["_validate_globally_cb"] = None
self.__dict__["_global_validation_enabled"] = True
def _wrap(self, rhs):
st = self._get_schema_type()
rv = self.__class__(rhs if st is None else st._validate_self(rhs))
rv._set_schema_type(self._get_schema_type())
return rv
def _adapt_value(self, value, key=None, index=None):
return das.adapt_value(value, schema_type=self._get_schema_type(), key=key, index=index)
def _validate(self, schema_type=None):
if schema_type is None:
schema_type = self._get_schema_type()
if schema_type is not None:
schema_type.validate(self)
self._set_schema_type(schema_type)
def _gvalidate(self):
st = self._get_schema_type()
if st is not None:
# run self validation first (container validation)
st._validate_self(self)
if hasattr(self, "_is_global_validation_enabled"):
if not self._is_global_validation_enabled():
# Skip global validaton
return
gvcb = self._get_validate_globally_cb()
if gvcb is not None:
gvcb()
if hasattr(self, "_validate_globally"):
try:
getattr(self, "_validate_globally")()
except:
_, ei, tb = sys.exc_info()
ei = das.ValidationError("Global Validation Failed (%s)" % str(ei))
raise ei.__class__, ei, tb
def _get_schema_type(self):
return self.__dict__["_schema_type"]
def _set_schema_type(self, schema_type):
self.__dict__["_schema_type"] = schema_type
def _get_validate_globally_cb(self):
return self.__dict__["_validate_globally_cb"]
def _set_validate_globally_cb(self, cb):
self.__dict__["_validate_globally_cb"] = cb
def _is_global_validation_enabled(self):
return self.__dict__["_global_validation_enabled"]
def _enable_global_validation(self, on):
self.__dict__["_global_validation_enabled"] = on
class Tuple(TypeBase, tuple):
def __init__(self, *args):
# Funny, we need to declare *args here, but at the time we reach
# the core of the method, tuple is already created
# Maybe because tuple is immutable?
super(Tuple, self).__init__()
def __add__(self, y):
raise das.ValidationError("Expected a tuple of size %d, got %d" % (len(self), len(self) + len(y)))
def __getitem__(self, i):
return TypeBase.TransferGlobalValidator(self, super(Tuple, self).__getitem__(i))
class Sequence(TypeBase, list):
def __init__(self, *args):
TypeBase.__init__(self)
list.__init__(self, *args)
def _wrap_index(self, i, n=None, clamp=False):
if i < 0:
if n is None:
n = len(self)
ii = i + n
if ii < 0:
if clamp:
return 0
else:
raise IndexError("list index out of range")
else:
return ii
else:
return i
def __imul__(self, n):
oldlen = len(self)
super(Sequence, self).__imul__(n)
try:
self._gvalidate()
except:
ec, ei, tb = sys.exc_info()
try:
super(Sequence, self).__setslice__(oldlen, len(self), [])
except Exception, e:
print("das.types.Sequence.__imul__: Failed to recover sequence data (%s)" % e)
raise ec, ei, tb
return self
def __mul__(self, n):
rv = self[:]
rv.__imul__(n)
return rv
def __rmul__(self, n):
return self.__mul__(n)
def __iadd__(self, y):
n = len(self)
super(Sequence, self).__iadd__([self._adapt_value(x, index=n+i) for i, x in enumerate(y)])
try:
self._gvalidate()
except:
ec, ei, tb = sys.exc_info()
try:
super(Sequence, self).__setslice__(n, len(self), [])
except Exception, e:
print("das.types.Sequence.__iadd__: Failed to recover sequence data (%s)" % e)
raise ec, ei, tb
return self
def __add__(self, y):
rv = self[:]
rv.__iadd__(y)
return rv
def __setitem__(self, i, y):
super(Sequence, self).__setitem__(i, self._adapt_value(y, index=i))
self._gvalidate()
def __getitem__(self, i):
return TypeBase.TransferGlobalValidator(self, super(Sequence, self).__getitem__(i))
def __delitem__(self, i):
ii = self._wrap_index(i, clamp=False)
item = super(Sequence, self).__getitem__(ii)
super(Sequence, self).__delitem__(i)
try:
self._gvalidate()
except:
ec, ei, tb = sys.exc_info()
try:
super(Sequence, self).insert(ii, item)
except Exception, e:
print("das.types.Sequence.__delitem__: Failed to recover sequence data (%s)" % e)
raise ec, ei, tb
def __iter__(self):
for item in super(Sequence, self).__iter__():
yield TypeBase.TransferGlobalValidator(self, item)
def __setslice__(self, i, j, y):
oldvals = super(Sequence, self).__getslice__(i, j)
newvals = [self._adapt_value(x, index=i+k) for k, x in enumerate(y)]
super(Sequence, self).__setslice__(i, j, newvals)
try:
self._gvalidate()
except:
ec, ei, tb = sys.exc_info()
try:
ii = self._wrap_index(i, clamp=True)
super(Sequence, self).__setslice__(ii, ii+len(newvals), oldvals)
except Exception, e:
print("das.types.Sequence.__setslice__: Failed to recover sequence data (%s)" % e)
raise ec, ei, tb
def __getslice__(self, i, j):
return self._wrap(super(Sequence, self).__getslice__(i, j))
def __delslice__(self, i, j):
oldvals = super(Sequence, self).__getslice__(i, j)
super(Sequence, self).__delslice__(i, j)
try:
self._gvalidate()
except:
ec, ei, tb = sys.exc_info()
try:
ii = self._wrap_index(i, clamp=True)
super(Sequence, self).__setslice__(ii, ii, oldvals)
except Exception, e:
print("das.types.Sequence.__setslice__: Failed to recover sequence data (%s)" % e)
raise ec, ei, tb
# def __contains__(self, y):
# try:
# _v = self._adapt_value(y, index=0)
# return super(Sequence, self).__contains__(_v)
# except:
# return False
def index(self, y):
return super(Sequence, self).index(self._adapt_value(y, index=0))
def insert(self, i, y):
super(Sequence, self).insert(i, self._adapt_value(y, index=i))
try:
self._gvalidate()
except:
ec, ei, tb = sys.exc_info()
try:
super(Sequence, self).pop(self._wrap_index(i, n=len(self)-1, clamp=True))
except Exception, e:
print("das.types.Sequence.insert: Failed to recover sequence data (%s)" % e)
raise ec, ei, tb
def append(self, y):
n = len(self)
super(Sequence, self).append(self._adapt_value(y, index=n))
try:
self._gvalidate()
except:
ec, ei, tb = sys.exc_info()
try:
super(Sequence, self).pop()
except Exception, e:
print("das.types.Sequence.append: Failed to recover sequence data (%s)" % e)
raise ec, ei, tb
def extend(self, y):
newvals = [self._adapt_value(x, index=len(self)+i) for i, x in enumerate(y)]
super(Sequence, self).extend(newvals)
try:
self._gvalidate()
except:
ec, ei, tb = sys.exc_info()
try:
super(Sequence, self).__setslice__(len(self) - len(newvals), len(self), [])
except Exception, e:
print("das.types.Sequence.extend: Failed to recover sequence data (%s)" % e)
raise ec, ei, tb
def pop(self, *args):
rv = super(Sequence, self).pop(*args)
try:
self._gvalidate()
except:
ec, ei, tb = sys.exc_info()
try:
if args:
super(Sequence, self).insert(self._wrap_index(args[0], n=len(self)+1, clamp=False), rv)
else:
super(Sequence, self).append(rv)
except Exception, e:
print("das.types.Sequence.pop: Failed to recover sequence data (%s)" % e)
raise ec, ei, tb
return rv
def remove(self, y):
idx = self.index(y)
item = self[idx]
super(Sequence, self).remove(item)
try:
self._gvalidate()
except:
ec, ei, tb = sys.exc_info()
try:
super(Sequence, self).insert(idx, item)
except Exception, e:
print("das.types.Sequence.remove: Failed to recover sequence data (%s)" % e)
raise ec, ei, tb
class Set(TypeBase, set):
def __init__(self, args):
TypeBase.__init__(self)
set.__init__(self, args)
def __iand__(self, y):
oldvals = super(Set, self).copy()
super(Set, self).__iand__(set([self._adapt_value(x, index=i) for i, x in enumerate(y)]))
try:
self._gvalidate()
except:
ec, ei, tb = sys.exc_info()
try:
super(Set, self).clear()
super(Set, self).__ior__(oldvals)
except Exception, e:
print("das.types.Set.__iand__: Failed to recover set data (%s)" % e)
raise ec, ei, tb
return self
def __and__(self, y):
rv = self.copy()
rv &= y
return rv
def __rand__(self, y):
return self.__and__(y)
def __isub__(self, y):
oldvals = super(Set, self).copy()
super(Set, self).__isub__(set([self._adapt_value(x, index=i) for i, x in enumerate(y)]))
try:
self._gvalidate()
except:
ec, ei, tb = sys.exc_info()
try:
super(Set, self).clear()
super(Set, self).__ior__(oldvals)
except Exception, e:
print("das.types.Set.__isub__: Failed to recover set data (%s)" % e)
raise ec, ei, tb
return self
def __sub__(self, y):
rv = self.copy()
rv -= y
return rv
def __rsub__(self, y):
return self.__sub__(y)
def __ior__(self, y):
oldvals = super(Set, self).copy()
super(Set, self).__ior__(set([self._adapt_value(x, index=i) for i, x in enumerate(y)]))
try:
self._gvalidate()
except:
ec, ei, tb = sys.exc_info()
try:
super(Set, self).clear()
super(Set, self).__ior__(oldvals)
except Exception, e:
print("das.types.Set.__ior__: Failed to recover set data (%s)" % e)
raise ec, ei, tb
return self
def __or__(self, y):
rv = self.copy()
rv |= y
return rv
def __ror__(self, y):
return self.__or__(y)
def __ixor__(self, y):
oldvals = super(Set, self).copy()
super(Set, self).__ixor__(set([self._adapt_value(x, index=i) for i, x in enumerate(y)]))
try:
self._gvalidate()
except:
ec, ei, tb = sys.exc_info()
try:
super(Set, self).clear()
super(Set, self).__ior__(oldvals)
except Exception, e:
print("das.types.Set.__ixor__: Failed to recover set data (%s)" % e)
raise ec, ei, tb
return self
def __xor__(self, y):
rv = self.copy()
rv ^= y
return rv
def __rxor__(self, y):
rv = self.copy()
rv ^= y
return rv
def __cmp__(self, oth):
# base set class doesn't implement __cmp__
# but we need it for some other purpose
if len(self.symmetric_difference(oth)) == 0:
return 0
elif len(self) <= len(oth):
return -1
else:
return 1
def __iter__(self):
for item in super(Set, self).__iter__():
yield TypeBase.TransferGlobalValidator(self, item)
def clear(self):
oldvals = super(Set, self).copy()
super(Set, self).clear()
try:
self._gvalidate()
except:
ec, ei, tb = sys.exc_info()
try:
super(Set, self).__ior__(oldvals)
except Exception, e:
print("das.types.Set.clear: Failed to recover set data (%s)" % e)
raise ec, ei, tb
def copy(self):
return self._wrap(self)
def add(self, e):
ae = self._adapt_value(e, index=len(self))
if ae in self:
return
super(Set, self).add(ae)
try:
self._gvalidate()
except:
ec, ei, tb = sys.exc_info()
try:
super(Set, self).remove(ae)
except Exception, e:
print("das.types.Set.add: Failed to recover set data (%s)" % e)
raise ec, ei, tb
def update(self, *args):
added = set()
for y in args:
lst = [self._adapt_value(x, index=i) for i, x in enumerate(y)]
for item in lst:
if item in self:
continue
super(Set, self).add(item)
added.add(item)
try:
self._gvalidate()
except:
ec, ei, tb = sys.exc_info()
try:
for item in added:
super(Set, self).remove(item)
except Exception, e:
print("das.types.Set.update: Failed to recover set data (%s)" % e)
raise ec, ei, tb
def pop(self):
item = super(Set, self).pop()
try:
self._gvalidate()
except:
ec, ei, tb = sys.exc_info()
try:
super(Set, self).add(item)
except Exception, e:
print("das.types.Set.pop: Failed to recover set data (%s)" % e)
raise ec, ei, tb
return item
def difference(self, rhs):
return self.__sub__(rhs)
def union(self, rhs):
return self.__or__(rhs)
def intersection(self, rhs):
return self.__and__(rhs)
def symmetric_difference(self, rhs):
return self.__xor__(rhs)
class Dict(TypeBase, dict):
def __init__(self, *args, **kwargs):
TypeBase.__init__(self)
dict.__init__(self, *args, **kwargs)
def _adapt_key(self, key):
st = self._get_schema_type()
return (key if st is None else das.adapt_value(key, schema_type=st.ktype))
def __setitem__(self, k, v):
k = self._adapt_key(k)
wasset = (k in self)
oldval = (self[k] if wasset else None)
super(Dict, self).__setitem__(k, self._adapt_value(v, key=k))
try:
self._gvalidate()
except:
ec, ei, tb = sys.exc_info()
try:
if wasset:
super(Dict, self).__setitem__(k, oldval)
else:
del(self[k])
except Exception, e:
print("das.types.Dict.__setitem__: Failed to recover dict data (%s)" % e)
raise ec, ei, tb
def __getitem__(self, k):
return TypeBase.TransferGlobalValidator(self, super(Dict, self).__getitem__(self._adapt_key(k)))
def __delitem__(self, k):
_k = self._adapt_key(k)
_v = super(Dict, self).__getitem__(_k)
super(Dict, self).__delitem__(_k)
try:
self._gvalidate()
except:
ec, ei, tb = sys.exc_info()
try:
super(Dict, self).__setitem__(_k, _v)
except Exception, e:
print("das.types.Dict.popitem: Failed to recover dict data (%s)" % e)
raise ec, ei, tb
# def __contains__(self, k):
# try:
# _k = self._adapt_key(k)
# return super(Dict, self).__contains__(_k)
# except:
# return False
def setdefault(self, *args):
nargs = len(args)
if nargs > 2:
raise TypeError("setdefault expected at most 2 arguments, got %d" % nargs)
if nargs == 2:
args = (args[0], self._adapt_value(args[1], key=args[0]))
super(Dict, self).setdefault(*args)
def copy(self):
return self._wrap(self)
def update(self, *args, **kwargs):
oldvals = {}
remvals = set()
if len(args) == 1:
a0 = args[0]
if hasattr(a0, "keys"):
for k in a0.keys():
k = self._adapt_key(k)
if k in self:
oldvals[k] = self[k]
else:
remvals.add(k)
self[k] = self._adapt_value(a0[k], key=k)
else:
for k, v in a0:
k = self._adapt_key(k)
if k in self:
oldvals[k] = self[k]
else:
remvals.add(k)
self[k] = self._adapt_value(v, key=k)
elif len(args) > 1:
raise Exception("update expected at most 1 arguments, got %d" % len(args))
for k, v in kwargs.iteritems():
k = self._adapt_key(k)
if k in self:
if not k in oldvals:
oldvals[k] = self[k]
else:
remvals.add(k)
self[k] = self._adapt_value(v, key=k)
try:
self._gvalidate()
except:
ec, ei, tb = sys.exc_info()
try:
for k in remvals:
super(Dict, self).__delitem__(k)
for k, v in oldvals.iteritems():
super(Dict, self).__setitem__(k, v)
except Exception, e:
print("das.types.Dict.update: Failed to recover dict data (%s)" % e)
raise ec, ei, tb
def pop(self, k, *args):
_k = self._adapt_key(k)
_v = super(Dict, self).pop(_k, *args)
try:
self._gvalidate()
except:
ec, ei, tb = sys.exc_info()
try:
# if _k i not defined but a default value is provided, we should not reach here
# as dict is actually unchanged
# -> no need to check if _k was a valid key
super(Dict, self).__setitem__(_k, _v)
except Exception, e:
print("das.types.Dict.popitem: Failed to recover dict data (%s)" % e)
raise ec, ei, tb
return _v
def popitem(self):
item = super(Dict, self).popitem()
try:
self._gvalidate()
except:
ec, ei, tb = sys.exc_info()
try:
super(Dict, self).__setitem__(item[0], item[1])
except Exception, e:
print("das.types.Dict.popitem: Failed to recover dict data (%s)" % e)
raise ec, ei, tb
return item
def clear(self):
items = super(Dict, self).items()
super(Dict, self).clear()
try:
self._gvalidate()
except:
ec, ei, tb = sys.exc_info()
try:
super(Dict, self).update(items)
except Exception, e:
print("das.types.Dict.clear: Failed to recover dict data (%s)" % e)
raise ec, ei, tb
def itervalues(self):
for v in super(Dict, self).itervalues():
yield TypeBase.TransferGlobalValidator(self, v)
def values(self):
return [x for x in self.itervalues()]
def iteritems(self):
for k, v in super(Dict, self).iteritems():
yield k, TypeBase.TransferGlobalValidator(self, v)
def items(self):
return [x for x in self.iteritems()]
class Struct(TypeBase):
def __init__(self, *args, **kwargs):
TypeBase.__init__(self)
self.__dict__["_dict"] = {}
self._update(*args, **kwargs)
def __getattr__(self, k):
try:
k = self._get_alias(k)
return TypeBase.TransferGlobalValidator(self, self._dict[k])
except KeyError:
if hasattr(self._dict, k):
# Look for an override method of the same name prefixed by '_' in current class
k2 = '_' + k
if hasattr(self, k2):
#print("Forward '%s' to %s class '%s'" % (k, self.__class__.__name__, k2))
return getattr(self, k2)
else:
#print("Forward '%s' to dict class '%s'" % (k, k))
return getattr(self._dict, k)
else:
#raise AttributeError("'Struct' has no attribute '%s' (dict %s)" % (k, "has" if hasattr(self._dict, k) else "hasn't"))
return self.__getattribute__(k)
def __setattr__(self, k, v):
# Special case for __class__ member that we may want to modify for
# to enable dynamic function set binding
if k == "__class__":
super(Struct, self).__setattr__(k, v)
else:
k = self._get_alias(k)
self._check_reserved(k)
wasset = (k in self._dict)
oldval = (self._dict[k] if wasset else None)
self._dict[k] = self._adapt_value(v, key=k)
try:
self._gvalidate()
except:
ec, ei, tb = sys.exc_info()
try:
if wasset:
self._dict[k] = oldval
else:
del(self._dict[k])
except Exception, e:
print("das.types.Struct.__setattr__: Failed to recover struct data (%s)" % e)
raise ec, ei, tb
def __delattr__(self, k):
k = self._get_alias(k)
oldval = self._dict.get(k, None)
self._dict.__delitem__(k)
try:
self._gvalidate()
except:
ec, ei, tb = sys.exc_info()
# Note: we can reach here only if k was a valid key (otherwise __delitem__(k) would fail)
try:
self._dict[k] = oldval
except Exception, e:
print("das.types.Struct.__delattr__: Failed to recover struct data (%s)" % e)
raise ec, ei, tb
def __getitem__(self, k):
k = self._get_alias(k)
return TypeBase.TransferGlobalValidator(self, self._dict.__getitem__(k))
def __setitem__(self, k, v):
k = self._get_alias(k)
self._check_reserved(k)
wasset = (k in self._dict)
oldval = (self._dict[k] if wasset else None)
self._dict.__setitem__(k, self._adapt_value(v, key=k))
try:
self._gvalidate()
except:
ec, ei, tb = sys.exc_info()
try:
if wasset:
self._dict[k] = oldval
else:
del(self._dict[k])
except Exception, e:
print("das.types.Struct.__setitem__: Failed to recover struct data (%s)" % e)
raise ec, ei, tb
def __delitem__(self, k):
_k = k
k = self._get_alias(k)
oldval = self._dict.get(k, None)
self._dict.__delitem__(k)
try:
self._gvalidate()
except:
ec, ei, tb = sys.exc_info()
# Note: we can reach here only if k was a valid key (otherwise __delitem__(k) would fail)
try:
self._dict[k] = oldval
except Exception, e:
print("das.types.Struct.__delitem__: Failed to recover struct data (%s)" % e)
raise ec, ei, tb
def __contains__(self, k):
return self._dict.__contains__(self._get_alias(k))
def __cmp__(self, oth):
return self._dict.__cmp__(oth._dict if isinstance(oth, Struct) else oth)
def __eq__(self, oth):
return self._dict.__eq__(oth._dict if isinstance(oth, Struct) else oth)
def __ge__(self, oth):
return self._dict.__ge__(oth._dict if isinstance(oth, Struct) else oth)
def __le__(self, oth):
return self._dict.__le__(oth._dict if isinstance(oth, Struct) else oth)
def __gt__(self, oth):
return self._dict.__gt__(oth._dict if isinstance(oth, Struct) else oth)
def __lt__(self, oth):
return self._dict.__lt__(oth._dict if isinstance(oth, Struct) else oth)
def __iter__(self):
return self._dict.__iter__()
def __len__(self):
return self._dict.__len__()
def __str__(self):
return self._dict.__str__()
def __repr__(self):
return self._dict.__repr__()
# Override of dict.has_key
def _has_key(self, k):
return self._dict.has_key(self._get_alias(k))
# Override of dict.pop
def _pop(self, k, *args):
_k = k
k = self._get_alias(k)
oldval = self._dict.get(k, None)
retval = self._dict.pop(k, *args)
try:
self._gvalidate()
except:
ec, ei, tb = sys.exc_info()
try:
self._dict[k] = oldval
except Exception, e:
print("das.types.Struct.pop: Failed to recover struct data (%s)" % e)
raise ec, ei, tb
return retval
# Override of dict.popitem
def _popitem(self):
k, v = self._dict.popitem()
try:
self._gvalidate()
except:
ec, ei, tb = sys.exc_info()
try:
self._dict[k] = v
except Exception, e:
print("das.types.Struct.popitem: Failed to recover struct data (%s)" % e)
raise ec, ei, tb
# Override of dict.clear
def _clear(self):
items = self._dict.items()
self._dict.clear()
try:
self._gvalidate()
except:
ec, ei, tb = sys.exc_info()
try:
self._dict.update(items)
except Exception, e:
print("das.types.Struct.clear: Failed to recover struct data (%s)" % e)
raise ec, ei, tb
# Override of dict.copy
def _copy(self):
return self._wrap(self)
# Override of dict.setdefault
def _setdefault(self, *args):
nargs = len(args)
if nargs > 2:
raise TypeError("_setdefault expected at most 2 arguments, got %d" % nargs)
if nargs >= 1:
self._check_reserved(args[0])
if nargs == 2:
args = (args[0], self._adapt_value(args[1], key=args[0]))
self._dict.setdefault(*args)
# Override of dict.update
def _update(self, *args, **kwargs):
if len(args) > 1:
raise Exception("update expected at most 1 arguments, got %d" % len(args))
oldvals = self._dict.copy()
try:
if len(args) == 1:
a0 = args[0]
if hasattr(a0, "keys"):
for k in a0.keys():
k = self._get_alias(k)
self._check_reserved(k)
self._dict[k] = self._adapt_value(a0[k], key=k)
else:
for k, v in a0:
k = self._get_alias(k)
self._check_reserved(k)
self._dict[k] = self._adapt_value(v, key=k)
for k, v in kwargs.iteritems():
k = self._get_alias(k)
self._check_reserved(k)
self._dict[k] = self._adapt_value(v, key=k)
self._gvalidate()
except:
ec, ei, tb = sys.exc_info()
try:
self._dict.clear()
self._dict.update(oldvals)
except Exception, e:
print("das.types.Struct.update: Failed to recover struct data (%s)" % e)
raise ec, ei, tb
def _get_alias(self, k):
st = self._get_schema_type()
if st is not None and st.has_key(k):
aliasname = das.schematypes.Alias.Name(st[k])
if aliasname is not None:
# if isinstance(st[k], das.schematypes.Deprecated):
# message = ("[das] Field %s is deprecated, use %s instead" % (repr(k), repr(aliasname)))
# das.print_once(message)
return aliasname
return k
def _check_reserved(self, k):
if hasattr(self.__class__, k):
raise ReservedNameError(k)
elif hasattr(self._dict, k):
k2 = "_" + k
if hasattr(self, k2):
# don't need to create forwarding attribute (set __getattr__)
return
if k2 in self.__dict__:
if self.__dict__[k2] != getattr(self._dict, k):
raise ReservedNameError(k)
else:
msg = "[das] %s's '%s(...)' method conflicts with data field '%s', use '_%s(...)' to call it instead" % (type(self).__name__, k, k, k)
st = self._get_schema_type()
if st is not None:
n = das.get_schema_type_name(st)
if n:
msg = "[%s] %s" % (n, msg)
das.print_once(msg)
self.__dict__[k2] = getattr(self._dict, k)
def ordered_keys(self):
return filter(lambda x: x in self, self._get_schema_type().ordered_keys())
def _itervalues(self):
for v in self._dict.itervalues():
yield TypeBase.TransferGlobalValidator(self, v)
def _values(self):
return [x for x in self.itervalues()]
def _iteritems(self):
for k, v in self._dict.iteritems():
yield k, TypeBase.TransferGlobalValidator(self, v)
def _items(self):
return [x for x in self.iteritems()]
| [] |
AliabbasMerchant/fileTrackAndBackup | track.py | 8cdf97be58c69061e1f60c08f89b524d91f8c17d | #! /usr/bin/python3
from help import *
import time
# short-forms are used, so as to reduce the .json file size
# t : type - d or f
# d : directory
# f : file
# ts : timestamp
# dirs : The dictionary containing info about directory contents
# time : edit time of the file/folder
# s : size of the file/folder
# p : full path of the file/folder
# n : name of the main file/folder in the .json file
# i : info about the contents in the .json file
# folder = {'t': 'd', 's': get_size(dir_dict), 'p': full_path + '/' + entity, 'time': get_time(stats), 'dirs': dir_dict}
# file = {'t': 'f', 's': stats.st_size, 'p': full_path + '/' + entity, 'time': get_time(stats)}
# info = {'t': 'd', 's': size, 'p': base_path, 'time': get_time(stats), 'dirs': info}
# write = {'n': examine_name, 'ts': time.time(), 'i': info}
# info = {'t': 'f', 's': stats.st_size, 'p': base_path, 'time': get_time(stats)}
# write = {'n': examine_name, 'ts': time.time(), 'i': info}
no_of_files = 0
no_of_dirs = 0
examine_name = ''
save_filename = ''
_base_path = None
_ignore = False
errors = []
def get_save_config(base_path: str) -> None:
global examine_name, save_filename
examine_name = base_path.strip().split('/')[-1]
save_filename = examine_name + '.json'
if not os.path.lexists(constants.save_folder_name):
execute_bash("mkdir " + constants.save_folder_name)
def get_info_dict(sub_path: str) -> dict:
global no_of_files, no_of_dirs, _base_path, _ignore, errors
full_path = _base_path + '/' + sub_path
full_path = full_path.strip()
if full_path.endswith('/'):
full_path = full_path[:-1]
edit_dict = dict()
try:
entity_list = os.listdir(full_path)
for entity in entity_list:
ignore_it = False
if _ignore and to_be_ignored(full_path + '/' + entity): # ignoring cache temp etc files
ignore_it = True
if not ignore_it:
try:
stats = os.stat(full_path + '/' + entity)
if not os.path.islink(full_path + '/' + entity):
if os.path.isdir(full_path + '/' + entity):
no_of_dirs += 1
new_sub_path = sub_path + '/' + entity
dir_dict = get_info_dict(new_sub_path)
edit_dict[entity] = {'t': 'd', 's': get_size(dir_dict), 'p': full_path + '/' + entity,
'time': get_time(stats), 'dirs': dir_dict}
if os.path.isfile(full_path + '/' + entity):
no_of_files += 1
edit_dict[entity] = {'t': 'f', 's': stats.st_size, 'p': full_path + '/' + entity,
'time': get_time(stats)}
except FileNotFoundError:
errors.append(full_path + '/' + entity)
except PermissionError:
errors.append(full_path)
return edit_dict
def track(base_path: str, dir_path: str, output: bool = False, ignore: bool = False) -> list:
global _base_path, no_of_dirs, no_of_files, save_filename, _ignore, errors
no_of_dirs = 0
no_of_files = 0
print("Tracking...")
_base_path = base_path
_ignore = ignore
get_save_config(base_path)
if _ignore:
get_ignore_list()
if os.path.isdir(base_path):
info = get_info_dict('')
size = get_size(info)
no_of_dirs += 1
stats = os.stat(base_path)
info = {'t': 'd', 's': size, 'p': base_path, 'time': get_time(stats), 'dirs': info}
write = {'n': examine_name, 'ts': time.time(), 'i': info}
write_to_json_file(write, constants.save_folder_name + "/" + save_filename)
if output:
print("Successfully analysed the folder " + base_path)
print("Found {} folder(s)".format(no_of_dirs))
print("Found {} file(s)".format(no_of_files))
print("The directory is of size {}".format(get_size_format(size)))
print("A detailed report can be found using the 'file_tb.py print [FILE/FOLDER]' command ")
else:
no_of_files += 1
stats = os.stat(base_path)
info = {'t': 'f', 's': stats.st_size, 'p': base_path, 'time': get_time(stats)}
write = {'n': examine_name, 'ts': time.time(), 'i': info}
write_to_json_file(write, constants.save_folder_name + "/" + save_filename)
if output:
print("Successfully analysed the file")
print("The file is of size {}".format(get_size_format(stats.st_size)))
print("A detailed report can be found using the 'file_tb.py print [FILE/FOLDER]' command ")
# pp(info)
return errors
if __name__ == '__main__':
track(os.getcwd(), os.getcwd(), output=True)
| [((94, 42, 94, 53), 'time.time', 'time.time', ({}, {}), '()', False, 'import time\n'), ((106, 42, 106, 53), 'time.time', 'time.time', ({}, {}), '()', False, 'import time\n')] |
Kvarnefalk/llvm-project | clang/tools/scan-build-py/libscanbuild/analyze.py | 8b5f5798aaa24074609d151ea906d114cf5337c2 | # -*- coding: utf-8 -*-
# Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
# See https://llvm.org/LICENSE.txt for license information.
# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
""" This module implements the 'scan-build' command API.
To run the static analyzer against a build is done in multiple steps:
-- Intercept: capture the compilation command during the build,
-- Analyze: run the analyzer against the captured commands,
-- Report: create a cover report from the analyzer outputs. """
import re
import os
import os.path
import json
import logging
import multiprocessing
import tempfile
import functools
import subprocess
import contextlib
import datetime
import shutil
import glob
from collections import defaultdict
from libscanbuild import command_entry_point, compiler_wrapper, \
wrapper_environment, run_build, run_command, CtuConfig
from libscanbuild.arguments import parse_args_for_scan_build, \
parse_args_for_analyze_build
from libscanbuild.intercept import capture
from libscanbuild.report import document
from libscanbuild.compilation import split_command, classify_source, \
compiler_language
from libscanbuild.clang import get_version, get_arguments, get_triple_arch, \
ClangErrorException
from libscanbuild.shell import decode
__all__ = ['scan_build', 'analyze_build', 'analyze_compiler_wrapper']
COMPILER_WRAPPER_CC = 'analyze-cc'
COMPILER_WRAPPER_CXX = 'analyze-c++'
CTU_EXTDEF_MAP_FILENAME = 'externalDefMap.txt'
CTU_TEMP_DEFMAP_FOLDER = 'tmpExternalDefMaps'
@command_entry_point
def scan_build():
""" Entry point for scan-build command. """
args = parse_args_for_scan_build()
# will re-assign the report directory as new output
with report_directory(
args.output, args.keep_empty, args.output_format) as args.output:
# Run against a build command. there are cases, when analyzer run
# is not required. But we need to set up everything for the
# wrappers, because 'configure' needs to capture the CC/CXX values
# for the Makefile.
if args.intercept_first:
# Run build command with intercept module.
exit_code = capture(args)
# Run the analyzer against the captured commands.
if need_analyzer(args.build):
govern_analyzer_runs(args)
else:
# Run build command and analyzer with compiler wrappers.
environment = setup_environment(args)
exit_code = run_build(args.build, env=environment)
# Cover report generation and bug counting.
number_of_bugs = document(args)
# Set exit status as it was requested.
return number_of_bugs if args.status_bugs else exit_code
@command_entry_point
def analyze_build():
""" Entry point for analyze-build command. """
args = parse_args_for_analyze_build()
# will re-assign the report directory as new output
with report_directory(args.output, args.keep_empty, args.output_format) as args.output:
# Run the analyzer against a compilation db.
govern_analyzer_runs(args)
# Cover report generation and bug counting.
number_of_bugs = document(args)
# Set exit status as it was requested.
return number_of_bugs if args.status_bugs else 0
def need_analyzer(args):
""" Check the intent of the build command.
When static analyzer run against project configure step, it should be
silent and no need to run the analyzer or generate report.
To run `scan-build` against the configure step might be necessary,
when compiler wrappers are used. That's the moment when build setup
check the compiler and capture the location for the build process. """
return len(args) and not re.search(r'configure|autogen', args[0])
def prefix_with(constant, pieces):
""" From a sequence create another sequence where every second element
is from the original sequence and the odd elements are the prefix.
eg.: prefix_with(0, [1,2,3]) creates [0, 1, 0, 2, 0, 3] """
return [elem for piece in pieces for elem in [constant, piece]]
def get_ctu_config_from_args(args):
""" CTU configuration is created from the chosen phases and dir. """
return (
CtuConfig(collect=args.ctu_phases.collect,
analyze=args.ctu_phases.analyze,
dir=args.ctu_dir,
extdef_map_cmd=args.extdef_map_cmd)
if hasattr(args, 'ctu_phases') and hasattr(args.ctu_phases, 'dir')
else CtuConfig(collect=False, analyze=False, dir='', extdef_map_cmd=''))
def get_ctu_config_from_json(ctu_conf_json):
""" CTU configuration is created from the chosen phases and dir. """
ctu_config = json.loads(ctu_conf_json)
# Recover namedtuple from json when coming from analyze-cc or analyze-c++
return CtuConfig(collect=ctu_config[0],
analyze=ctu_config[1],
dir=ctu_config[2],
extdef_map_cmd=ctu_config[3])
def create_global_ctu_extdef_map(extdef_map_lines):
""" Takes iterator of individual external definition maps and creates a
global map keeping only unique names. We leave conflicting names out of
CTU.
:param extdef_map_lines: Contains the id of a definition (mangled name) and
the originating source (the corresponding AST file) name.
:type extdef_map_lines: Iterator of str.
:returns: Mangled name - AST file pairs.
:rtype: List of (str, str) tuples.
"""
mangled_to_asts = defaultdict(set)
for line in extdef_map_lines:
mangled_name, ast_file = line.strip().split(' ', 1)
mangled_to_asts[mangled_name].add(ast_file)
mangled_ast_pairs = []
for mangled_name, ast_files in mangled_to_asts.items():
if len(ast_files) == 1:
mangled_ast_pairs.append((mangled_name, next(iter(ast_files))))
return mangled_ast_pairs
def merge_ctu_extdef_maps(ctudir):
""" Merge individual external definition maps into a global one.
As the collect phase runs parallel on multiple threads, all compilation
units are separately mapped into a temporary file in CTU_TEMP_DEFMAP_FOLDER.
These definition maps contain the mangled names and the source
(AST generated from the source) which had their definition.
These files should be merged at the end into a global map file:
CTU_EXTDEF_MAP_FILENAME."""
def generate_extdef_map_lines(extdefmap_dir):
""" Iterate over all lines of input files in a determined order. """
files = glob.glob(os.path.join(extdefmap_dir, '*'))
files.sort()
for filename in files:
with open(filename, 'r') as in_file:
for line in in_file:
yield line
def write_global_map(arch, mangled_ast_pairs):
""" Write (mangled name, ast file) pairs into final file. """
extern_defs_map_file = os.path.join(ctudir, arch,
CTU_EXTDEF_MAP_FILENAME)
with open(extern_defs_map_file, 'w') as out_file:
for mangled_name, ast_file in mangled_ast_pairs:
out_file.write('%s %s\n' % (mangled_name, ast_file))
triple_arches = glob.glob(os.path.join(ctudir, '*'))
for triple_path in triple_arches:
if os.path.isdir(triple_path):
triple_arch = os.path.basename(triple_path)
extdefmap_dir = os.path.join(ctudir, triple_arch,
CTU_TEMP_DEFMAP_FOLDER)
extdef_map_lines = generate_extdef_map_lines(extdefmap_dir)
mangled_ast_pairs = create_global_ctu_extdef_map(extdef_map_lines)
write_global_map(triple_arch, mangled_ast_pairs)
# Remove all temporary files
shutil.rmtree(extdefmap_dir, ignore_errors=True)
def run_analyzer_parallel(args):
""" Runs the analyzer against the given compilation database. """
def exclude(filename, directory):
""" Return true when any excluded directory prefix the filename. """
if not os.path.isabs(filename):
# filename is either absolute or relative to directory. Need to turn
# it to absolute since 'args.excludes' are absolute paths.
filename = os.path.normpath(os.path.join(directory, filename))
return any(re.match(r'^' + exclude_directory, filename)
for exclude_directory in args.excludes)
consts = {
'clang': args.clang,
'output_dir': args.output,
'output_format': args.output_format,
'output_failures': args.output_failures,
'direct_args': analyzer_params(args),
'force_debug': args.force_debug,
'ctu': get_ctu_config_from_args(args)
}
logging.debug('run analyzer against compilation database')
with open(args.cdb, 'r') as handle:
generator = (dict(cmd, **consts)
for cmd in json.load(handle) if not exclude(
cmd['file'], cmd['directory']))
# when verbose output requested execute sequentially
pool = multiprocessing.Pool(1 if args.verbose > 2 else None)
for current in pool.imap_unordered(run, generator):
if current is not None:
# display error message from the static analyzer
for line in current['error_output']:
logging.info(line.rstrip())
pool.close()
pool.join()
def govern_analyzer_runs(args):
""" Governs multiple runs in CTU mode or runs once in normal mode. """
ctu_config = get_ctu_config_from_args(args)
# If we do a CTU collect (1st phase) we remove all previous collection
# data first.
if ctu_config.collect:
shutil.rmtree(ctu_config.dir, ignore_errors=True)
# If the user asked for a collect (1st) and analyze (2nd) phase, we do an
# all-in-one run where we deliberately remove collection data before and
# also after the run. If the user asks only for a single phase data is
# left so multiple analyze runs can use the same data gathered by a single
# collection run.
if ctu_config.collect and ctu_config.analyze:
# CTU strings are coming from args.ctu_dir and extdef_map_cmd,
# so we can leave it empty
args.ctu_phases = CtuConfig(collect=True, analyze=False,
dir='', extdef_map_cmd='')
run_analyzer_parallel(args)
merge_ctu_extdef_maps(ctu_config.dir)
args.ctu_phases = CtuConfig(collect=False, analyze=True,
dir='', extdef_map_cmd='')
run_analyzer_parallel(args)
shutil.rmtree(ctu_config.dir, ignore_errors=True)
else:
# Single runs (collect or analyze) are launched from here.
run_analyzer_parallel(args)
if ctu_config.collect:
merge_ctu_extdef_maps(ctu_config.dir)
def setup_environment(args):
""" Set up environment for build command to interpose compiler wrapper. """
environment = dict(os.environ)
environment.update(wrapper_environment(args))
environment.update({
'CC': COMPILER_WRAPPER_CC,
'CXX': COMPILER_WRAPPER_CXX,
'ANALYZE_BUILD_CLANG': args.clang if need_analyzer(args.build) else '',
'ANALYZE_BUILD_REPORT_DIR': args.output,
'ANALYZE_BUILD_REPORT_FORMAT': args.output_format,
'ANALYZE_BUILD_REPORT_FAILURES': 'yes' if args.output_failures else '',
'ANALYZE_BUILD_PARAMETERS': ' '.join(analyzer_params(args)),
'ANALYZE_BUILD_FORCE_DEBUG': 'yes' if args.force_debug else '',
'ANALYZE_BUILD_CTU': json.dumps(get_ctu_config_from_args(args))
})
return environment
@command_entry_point
def analyze_compiler_wrapper():
""" Entry point for `analyze-cc` and `analyze-c++` compiler wrappers. """
return compiler_wrapper(analyze_compiler_wrapper_impl)
def analyze_compiler_wrapper_impl(result, execution):
""" Implements analyzer compiler wrapper functionality. """
# don't run analyzer when compilation fails. or when it's not requested.
if result or not os.getenv('ANALYZE_BUILD_CLANG'):
return
# check is it a compilation?
compilation = split_command(execution.cmd)
if compilation is None:
return
# collect the needed parameters from environment, crash when missing
parameters = {
'clang': os.getenv('ANALYZE_BUILD_CLANG'),
'output_dir': os.getenv('ANALYZE_BUILD_REPORT_DIR'),
'output_format': os.getenv('ANALYZE_BUILD_REPORT_FORMAT'),
'output_failures': os.getenv('ANALYZE_BUILD_REPORT_FAILURES'),
'direct_args': os.getenv('ANALYZE_BUILD_PARAMETERS',
'').split(' '),
'force_debug': os.getenv('ANALYZE_BUILD_FORCE_DEBUG'),
'directory': execution.cwd,
'command': [execution.cmd[0], '-c'] + compilation.flags,
'ctu': get_ctu_config_from_json(os.getenv('ANALYZE_BUILD_CTU'))
}
# call static analyzer against the compilation
for source in compilation.files:
parameters.update({'file': source})
logging.debug('analyzer parameters %s', parameters)
current = run(parameters)
# display error message from the static analyzer
if current is not None:
for line in current['error_output']:
logging.info(line.rstrip())
@contextlib.contextmanager
def report_directory(hint, keep, output_format):
""" Responsible for the report directory.
hint -- could specify the parent directory of the output directory.
keep -- a boolean value to keep or delete the empty report directory. """
stamp_format = 'scan-build-%Y-%m-%d-%H-%M-%S-%f-'
stamp = datetime.datetime.now().strftime(stamp_format)
parent_dir = os.path.abspath(hint)
if not os.path.exists(parent_dir):
os.makedirs(parent_dir)
name = tempfile.mkdtemp(prefix=stamp, dir=parent_dir)
logging.info('Report directory created: %s', name)
try:
yield name
finally:
if os.listdir(name):
if output_format != 'sarif':
# 'scan-view' currently does not support sarif format.
msg = "Run 'scan-view %s' to examine bug reports."
else:
msg = "View result at %s/results-merged.sarif."
keep = True
else:
if keep:
msg = "Report directory '%s' contains no report, but kept."
else:
msg = "Removing directory '%s' because it contains no report."
logging.warning(msg, name)
if not keep:
os.rmdir(name)
def analyzer_params(args):
""" A group of command line arguments can mapped to command
line arguments of the analyzer. This method generates those. """
result = []
if args.store_model:
result.append('-analyzer-store={0}'.format(args.store_model))
if args.constraints_model:
result.append('-analyzer-constraints={0}'.format(
args.constraints_model))
if args.internal_stats:
result.append('-analyzer-stats')
if args.analyze_headers:
result.append('-analyzer-opt-analyze-headers')
if args.stats:
result.append('-analyzer-checker=debug.Stats')
if args.maxloop:
result.extend(['-analyzer-max-loop', str(args.maxloop)])
if args.output_format:
result.append('-analyzer-output={0}'.format(args.output_format))
if args.analyzer_config:
result.extend(['-analyzer-config', args.analyzer_config])
if args.verbose >= 4:
result.append('-analyzer-display-progress')
if args.plugins:
result.extend(prefix_with('-load', args.plugins))
if args.enable_checker:
checkers = ','.join(args.enable_checker)
result.extend(['-analyzer-checker', checkers])
if args.disable_checker:
checkers = ','.join(args.disable_checker)
result.extend(['-analyzer-disable-checker', checkers])
return prefix_with('-Xclang', result)
def require(required):
""" Decorator for checking the required values in state.
It checks the required attributes in the passed state and stop when
any of those is missing. """
def decorator(function):
@functools.wraps(function)
def wrapper(*args, **kwargs):
for key in required:
if key not in args[0]:
raise KeyError('{0} not passed to {1}'.format(
key, function.__name__))
return function(*args, **kwargs)
return wrapper
return decorator
@require(['command', # entry from compilation database
'directory', # entry from compilation database
'file', # entry from compilation database
'clang', # clang executable name (and path)
'direct_args', # arguments from command line
'force_debug', # kill non debug macros
'output_dir', # where generated report files shall go
'output_format', # it's 'plist', 'html', 'plist-html', 'plist-multi-file', or 'sarif'
'output_failures', # generate crash reports or not
'ctu']) # ctu control options
def run(opts):
""" Entry point to run (or not) static analyzer against a single entry
of the compilation database.
This complex task is decomposed into smaller methods which are calling
each other in chain. If the analysis is not possible the given method
just return and break the chain.
The passed parameter is a python dictionary. Each method first check
that the needed parameters received. (This is done by the 'require'
decorator. It's like an 'assert' to check the contract between the
caller and the called method.) """
try:
command = opts.pop('command')
command = command if isinstance(command, list) else decode(command)
logging.debug("Run analyzer against '%s'", command)
opts.update(classify_parameters(command))
return arch_check(opts)
except Exception:
logging.error("Problem occurred during analysis.", exc_info=1)
return None
@require(['clang', 'directory', 'flags', 'file', 'output_dir', 'language',
'error_output', 'exit_code'])
def report_failure(opts):
""" Create report when analyzer failed.
The major report is the preprocessor output. The output filename generated
randomly. The compiler output also captured into '.stderr.txt' file.
And some more execution context also saved into '.info.txt' file. """
def extension():
""" Generate preprocessor file extension. """
mapping = {'objective-c++': '.mii', 'objective-c': '.mi', 'c++': '.ii'}
return mapping.get(opts['language'], '.i')
def destination():
""" Creates failures directory if not exits yet. """
failures_dir = os.path.join(opts['output_dir'], 'failures')
if not os.path.isdir(failures_dir):
os.makedirs(failures_dir)
return failures_dir
# Classify error type: when Clang terminated by a signal it's a 'Crash'.
# (python subprocess Popen.returncode is negative when child terminated
# by signal.) Everything else is 'Other Error'.
error = 'crash' if opts['exit_code'] < 0 else 'other_error'
# Create preprocessor output file name. (This is blindly following the
# Perl implementation.)
(handle, name) = tempfile.mkstemp(suffix=extension(),
prefix='clang_' + error + '_',
dir=destination())
os.close(handle)
# Execute Clang again, but run the syntax check only.
cwd = opts['directory']
cmd = [opts['clang'], '-fsyntax-only', '-E'] + opts['flags'] + \
[opts['file'], '-o', name]
try:
cmd = get_arguments(cmd, cwd)
run_command(cmd, cwd=cwd)
except subprocess.CalledProcessError:
pass
except ClangErrorException:
pass
# write general information about the crash
with open(name + '.info.txt', 'w') as handle:
handle.write(opts['file'] + os.linesep)
handle.write(error.title().replace('_', ' ') + os.linesep)
handle.write(' '.join(cmd) + os.linesep)
handle.write(' '.join(os.uname()) + os.linesep)
handle.write(get_version(opts['clang']))
handle.close()
# write the captured output too
with open(name + '.stderr.txt', 'w') as handle:
handle.writelines(opts['error_output'])
handle.close()
@require(['clang', 'directory', 'flags', 'direct_args', 'file', 'output_dir',
'output_format'])
def run_analyzer(opts, continuation=report_failure):
""" It assembles the analysis command line and executes it. Capture the
output of the analysis and returns with it. If failure reports are
requested, it calls the continuation to generate it. """
def target():
""" Creates output file name for reports. """
if opts['output_format'] in {
'plist',
'plist-html',
'plist-multi-file'}:
(handle, name) = tempfile.mkstemp(prefix='report-',
suffix='.plist',
dir=opts['output_dir'])
os.close(handle)
return name
elif opts['output_format'] == 'sarif':
(handle, name) = tempfile.mkstemp(prefix='result-',
suffix='.sarif',
dir=opts['output_dir'])
os.close(handle)
return name
return opts['output_dir']
try:
cwd = opts['directory']
cmd = get_arguments([opts['clang'], '--analyze'] +
opts['direct_args'] + opts['flags'] +
[opts['file'], '-o', target()],
cwd)
output = run_command(cmd, cwd=cwd)
return {'error_output': output, 'exit_code': 0}
except subprocess.CalledProcessError as ex:
result = {'error_output': ex.output, 'exit_code': ex.returncode}
if opts.get('output_failures', False):
opts.update(result)
continuation(opts)
return result
except ClangErrorException as ex:
result = {'error_output': ex.error, 'exit_code': 0}
if opts.get('output_failures', False):
opts.update(result)
continuation(opts)
return result
def extdef_map_list_src_to_ast(extdef_src_list):
""" Turns textual external definition map list with source files into an
external definition map list with ast files. """
extdef_ast_list = []
for extdef_src_txt in extdef_src_list:
mangled_name, path = extdef_src_txt.split(" ", 1)
# Normalize path on windows as well
path = os.path.splitdrive(path)[1]
# Make relative path out of absolute
path = path[1:] if path[0] == os.sep else path
ast_path = os.path.join("ast", path + ".ast")
extdef_ast_list.append(mangled_name + " " + ast_path)
return extdef_ast_list
@require(['clang', 'directory', 'flags', 'direct_args', 'file', 'ctu'])
def ctu_collect_phase(opts):
""" Preprocess source by generating all data needed by CTU analysis. """
def generate_ast(triple_arch):
""" Generates ASTs for the current compilation command. """
args = opts['direct_args'] + opts['flags']
ast_joined_path = os.path.join(opts['ctu'].dir, triple_arch, 'ast',
os.path.realpath(opts['file'])[1:] +
'.ast')
ast_path = os.path.abspath(ast_joined_path)
ast_dir = os.path.dirname(ast_path)
if not os.path.isdir(ast_dir):
try:
os.makedirs(ast_dir)
except OSError:
# In case an other process already created it.
pass
ast_command = [opts['clang'], '-emit-ast']
ast_command.extend(args)
ast_command.append('-w')
ast_command.append(opts['file'])
ast_command.append('-o')
ast_command.append(ast_path)
logging.debug("Generating AST using '%s'", ast_command)
run_command(ast_command, cwd=opts['directory'])
def map_extdefs(triple_arch):
""" Generate external definition map file for the current source. """
args = opts['direct_args'] + opts['flags']
extdefmap_command = [opts['ctu'].extdef_map_cmd]
extdefmap_command.append(opts['file'])
extdefmap_command.append('--')
extdefmap_command.extend(args)
logging.debug("Generating external definition map using '%s'",
extdefmap_command)
extdef_src_list = run_command(extdefmap_command, cwd=opts['directory'])
extdef_ast_list = extdef_map_list_src_to_ast(extdef_src_list)
extern_defs_map_folder = os.path.join(opts['ctu'].dir, triple_arch,
CTU_TEMP_DEFMAP_FOLDER)
if not os.path.isdir(extern_defs_map_folder):
try:
os.makedirs(extern_defs_map_folder)
except OSError:
# In case an other process already created it.
pass
if extdef_ast_list:
with tempfile.NamedTemporaryFile(mode='w',
dir=extern_defs_map_folder,
delete=False) as out_file:
out_file.write("\n".join(extdef_ast_list) + "\n")
cwd = opts['directory']
cmd = [opts['clang'], '--analyze'] + opts['direct_args'] + opts['flags'] \
+ [opts['file']]
triple_arch = get_triple_arch(cmd, cwd)
generate_ast(triple_arch)
map_extdefs(triple_arch)
@require(['ctu'])
def dispatch_ctu(opts, continuation=run_analyzer):
""" Execute only one phase of 2 phases of CTU if needed. """
ctu_config = opts['ctu']
if ctu_config.collect or ctu_config.analyze:
assert ctu_config.collect != ctu_config.analyze
if ctu_config.collect:
return ctu_collect_phase(opts)
if ctu_config.analyze:
cwd = opts['directory']
cmd = [opts['clang'], '--analyze'] + opts['direct_args'] \
+ opts['flags'] + [opts['file']]
triarch = get_triple_arch(cmd, cwd)
ctu_options = ['ctu-dir=' + os.path.join(ctu_config.dir, triarch),
'experimental-enable-naive-ctu-analysis=true']
analyzer_options = prefix_with('-analyzer-config', ctu_options)
direct_options = prefix_with('-Xanalyzer', analyzer_options)
opts['direct_args'].extend(direct_options)
return continuation(opts)
@require(['flags', 'force_debug'])
def filter_debug_flags(opts, continuation=dispatch_ctu):
""" Filter out nondebug macros when requested. """
if opts.pop('force_debug'):
# lazy implementation just append an undefine macro at the end
opts.update({'flags': opts['flags'] + ['-UNDEBUG']})
return continuation(opts)
@require(['language', 'compiler', 'file', 'flags'])
def language_check(opts, continuation=filter_debug_flags):
""" Find out the language from command line parameters or file name
extension. The decision also influenced by the compiler invocation. """
accepted = frozenset({
'c', 'c++', 'objective-c', 'objective-c++', 'c-cpp-output',
'c++-cpp-output', 'objective-c-cpp-output'
})
# language can be given as a parameter...
language = opts.pop('language')
compiler = opts.pop('compiler')
# ... or find out from source file extension
if language is None and compiler is not None:
language = classify_source(opts['file'], compiler == 'c')
if language is None:
logging.debug('skip analysis, language not known')
return None
elif language not in accepted:
logging.debug('skip analysis, language not supported')
return None
else:
logging.debug('analysis, language: %s', language)
opts.update({'language': language,
'flags': ['-x', language] + opts['flags']})
return continuation(opts)
@require(['arch_list', 'flags'])
def arch_check(opts, continuation=language_check):
""" Do run analyzer through one of the given architectures. """
disabled = frozenset({'ppc', 'ppc64'})
received_list = opts.pop('arch_list')
if received_list:
# filter out disabled architectures and -arch switches
filtered_list = [a for a in received_list if a not in disabled]
if filtered_list:
# There should be only one arch given (or the same multiple
# times). If there are multiple arch are given and are not
# the same, those should not change the pre-processing step.
# But that's the only pass we have before run the analyzer.
current = filtered_list.pop()
logging.debug('analysis, on arch: %s', current)
opts.update({'flags': ['-arch', current] + opts['flags']})
return continuation(opts)
else:
logging.debug('skip analysis, found not supported arch')
return None
else:
logging.debug('analysis, on default arch')
return continuation(opts)
# To have good results from static analyzer certain compiler options shall be
# omitted. The compiler flag filtering only affects the static analyzer run.
#
# Keys are the option name, value number of options to skip
IGNORED_FLAGS = {
'-c': 0, # compile option will be overwritten
'-fsyntax-only': 0, # static analyzer option will be overwritten
'-o': 1, # will set up own output file
# flags below are inherited from the perl implementation.
'-g': 0,
'-save-temps': 0,
'-install_name': 1,
'-exported_symbols_list': 1,
'-current_version': 1,
'-compatibility_version': 1,
'-init': 1,
'-e': 1,
'-seg1addr': 1,
'-bundle_loader': 1,
'-multiply_defined': 1,
'-sectorder': 3,
'--param': 1,
'--serialize-diagnostics': 1
}
def classify_parameters(command):
""" Prepare compiler flags (filters some and add others) and take out
language (-x) and architecture (-arch) flags for future processing. """
result = {
'flags': [], # the filtered compiler flags
'arch_list': [], # list of architecture flags
'language': None, # compilation language, None, if not specified
'compiler': compiler_language(command) # 'c' or 'c++'
}
# iterate on the compile options
args = iter(command[1:])
for arg in args:
# take arch flags into a separate basket
if arg == '-arch':
result['arch_list'].append(next(args))
# take language
elif arg == '-x':
result['language'] = next(args)
# parameters which looks source file are not flags
elif re.match(r'^[^-].+', arg) and classify_source(arg):
pass
# ignore some flags
elif arg in IGNORED_FLAGS:
count = IGNORED_FLAGS[arg]
for _ in range(count):
next(args)
# we don't care about extra warnings, but we should suppress ones
# that we don't want to see.
elif re.match(r'^-W.+', arg) and not re.match(r'^-Wno-.+', arg):
pass
# and consider everything else as compilation flag.
else:
result['flags'].append(arg)
return result
| [((53, 11, 53, 38), 'libscanbuild.arguments.parse_args_for_scan_build', 'parse_args_for_scan_build', ({}, {}), '()', False, 'from libscanbuild.arguments import parse_args_for_scan_build, parse_args_for_analyze_build\n'), ((81, 11, 81, 41), 'libscanbuild.arguments.parse_args_for_analyze_build', 'parse_args_for_analyze_build', ({}, {}), '()', False, 'from libscanbuild.arguments import parse_args_for_scan_build, parse_args_for_analyze_build\n'), ((129, 17, 129, 42), 'json.loads', 'json.loads', ({(129, 28, 129, 41): 'ctu_conf_json'}, {}), '(ctu_conf_json)', False, 'import json\n'), ((131, 11, 134, 50), 'libscanbuild.CtuConfig', 'CtuConfig', (), '', False, 'from libscanbuild import command_entry_point, compiler_wrapper, wrapper_environment, run_build, run_command, CtuConfig\n'), ((149, 22, 149, 38), 'collections.defaultdict', 'defaultdict', ({(149, 34, 149, 37): 'set'}, {}), '(set)', False, 'from collections import defaultdict\n'), ((230, 4, 230, 62), 'logging.debug', 'logging.debug', ({(230, 18, 230, 61): '"""run analyzer against compilation database"""'}, {}), "('run analyzer against compilation database')", False, 'import logging\n'), ((301, 11, 301, 58), 'libscanbuild.compiler_wrapper', 'compiler_wrapper', ({(301, 28, 301, 57): 'analyze_compiler_wrapper_impl'}, {}), '(analyze_compiler_wrapper_impl)', False, 'from libscanbuild import command_entry_point, compiler_wrapper, wrapper_environment, run_build, run_command, CtuConfig\n'), ((312, 18, 312, 46), 'libscanbuild.compilation.split_command', 'split_command', ({(312, 32, 312, 45): 'execution.cmd'}, {}), '(execution.cmd)', False, 'from libscanbuild.compilation import split_command, classify_source, compiler_language\n'), ((348, 17, 348, 38), 'os.path.abspath', 'os.path.abspath', ({(348, 33, 348, 37): 'hint'}, {}), '(hint)', False, 'import os\n'), ((351, 11, 351, 57), 'tempfile.mkdtemp', 'tempfile.mkdtemp', (), '', False, 'import tempfile\n'), ((353, 4, 353, 54), 'logging.info', 'logging.info', ({(353, 17, 353, 47): '"""Report directory created: %s"""', (353, 49, 353, 53): 'name'}, {}), "('Report directory created: %s', name)", False, 'import logging\n'), ((501, 4, 501, 20), 'os.close', 'os.close', ({(501, 13, 501, 19): 'handle'}, {}), '(handle)', False, 'import os\n'), ((648, 18, 648, 43), 'libscanbuild.clang.get_triple_arch', 'get_triple_arch', ({(648, 34, 648, 37): 'cmd', (648, 39, 648, 42): 'cwd'}, {}), '(cmd, cwd)', False, 'from libscanbuild.clang import get_version, get_arguments, get_triple_arch, ClangErrorException\n'), ((72, 25, 72, 39), 'libscanbuild.report.document', 'document', ({(72, 34, 72, 38): 'args'}, {}), '(args)', False, 'from libscanbuild.report import document\n'), ((87, 25, 87, 39), 'libscanbuild.report.document', 'document', ({(87, 34, 87, 38): 'args'}, {}), '(args)', False, 'from libscanbuild.report import document\n'), ((118, 8, 121, 53), 'libscanbuild.CtuConfig', 'CtuConfig', (), '', False, 'from libscanbuild import command_entry_point, compiler_wrapper, wrapper_environment, run_build, run_command, CtuConfig\n'), ((123, 13, 123, 79), 'libscanbuild.CtuConfig', 'CtuConfig', (), '', False, 'from libscanbuild import command_entry_point, compiler_wrapper, wrapper_environment, run_build, run_command, CtuConfig\n'), ((187, 31, 188, 67), 'os.path.join', 'os.path.join', ({(187, 44, 187, 50): 'ctudir', (187, 52, 187, 56): 'arch', (188, 43, 188, 66): 'CTU_EXTDEF_MAP_FILENAME'}, {}), '(ctudir, arch, CTU_EXTDEF_MAP_FILENAME)', False, 'import os\n'), ((193, 30, 193, 55), 'os.path.join', 'os.path.join', ({(193, 43, 193, 49): 'ctudir', (193, 51, 193, 54): '"""*"""'}, {}), "(ctudir, '*')", False, 'import os\n'), ((195, 11, 195, 37), 'os.path.isdir', 'os.path.isdir', ({(195, 25, 195, 36): 'triple_path'}, {}), '(triple_path)', False, 'import os\n'), ((236, 15, 236, 68), 'multiprocessing.Pool', 'multiprocessing.Pool', ({(236, 36, 236, 67): '1 if args.verbose > 2 else None'}, {}), '(1 if args.verbose > 2 else None)', False, 'import multiprocessing\n'), ((253, 8, 253, 57), 'shutil.rmtree', 'shutil.rmtree', (), '', False, 'import shutil\n'), ((263, 26, 264, 62), 'libscanbuild.CtuConfig', 'CtuConfig', (), '', False, 'from libscanbuild import command_entry_point, compiler_wrapper, wrapper_environment, run_build, run_command, CtuConfig\n'), ((267, 26, 268, 62), 'libscanbuild.CtuConfig', 'CtuConfig', (), '', False, 'from libscanbuild import command_entry_point, compiler_wrapper, wrapper_environment, run_build, run_command, CtuConfig\n'), ((270, 8, 270, 57), 'shutil.rmtree', 'shutil.rmtree', (), '', False, 'import shutil\n'), ((282, 23, 282, 48), 'libscanbuild.wrapper_environment', 'wrapper_environment', ({(282, 43, 282, 47): 'args'}, {}), '(args)', False, 'from libscanbuild import command_entry_point, compiler_wrapper, wrapper_environment, run_build, run_command, CtuConfig\n'), ((317, 17, 317, 49), 'os.getenv', 'os.getenv', ({(317, 27, 317, 48): '"""ANALYZE_BUILD_CLANG"""'}, {}), "('ANALYZE_BUILD_CLANG')", False, 'import os\n'), ((318, 22, 318, 59), 'os.getenv', 'os.getenv', ({(318, 32, 318, 58): '"""ANALYZE_BUILD_REPORT_DIR"""'}, {}), "('ANALYZE_BUILD_REPORT_DIR')", False, 'import os\n'), ((319, 25, 319, 65), 'os.getenv', 'os.getenv', ({(319, 35, 319, 64): '"""ANALYZE_BUILD_REPORT_FORMAT"""'}, {}), "('ANALYZE_BUILD_REPORT_FORMAT')", False, 'import os\n'), ((320, 27, 320, 69), 'os.getenv', 'os.getenv', ({(320, 37, 320, 68): '"""ANALYZE_BUILD_REPORT_FAILURES"""'}, {}), "('ANALYZE_BUILD_REPORT_FAILURES')", False, 'import os\n'), ((323, 23, 323, 61), 'os.getenv', 'os.getenv', ({(323, 33, 323, 60): '"""ANALYZE_BUILD_FORCE_DEBUG"""'}, {}), "('ANALYZE_BUILD_FORCE_DEBUG')", False, 'import os\n'), ((331, 8, 331, 59), 'logging.debug', 'logging.debug', ({(331, 22, 331, 46): '"""analyzer parameters %s"""', (331, 48, 331, 58): 'parameters'}, {}), "('analyzer parameters %s', parameters)", False, 'import logging\n'), ((349, 11, 349, 37), 'os.path.exists', 'os.path.exists', ({(349, 26, 349, 36): 'parent_dir'}, {}), '(parent_dir)', False, 'import os\n'), ((350, 8, 350, 31), 'os.makedirs', 'os.makedirs', ({(350, 20, 350, 30): 'parent_dir'}, {}), '(parent_dir)', False, 'import os\n'), ((358, 11, 358, 27), 'os.listdir', 'os.listdir', ({(358, 22, 358, 26): 'name'}, {}), '(name)', False, 'import os\n'), ((370, 8, 370, 34), 'logging.warning', 'logging.warning', ({(370, 24, 370, 27): 'msg', (370, 29, 370, 33): 'name'}, {}), '(msg, name)', False, 'import logging\n'), ((420, 9, 420, 34), 'functools.wraps', 'functools.wraps', ({(420, 25, 420, 33): 'function'}, {}), '(function)', False, 'import functools\n'), ((460, 8, 460, 59), 'logging.debug', 'logging.debug', ({(460, 22, 460, 49): '"""Run analyzer against \'%s\'"""', (460, 51, 460, 58): 'command'}, {}), '("Run analyzer against \'%s\'", command)', False, 'import logging\n'), ((487, 23, 487, 67), 'os.path.join', 'os.path.join', ({(487, 36, 487, 54): "opts['output_dir']", (487, 56, 487, 66): '"""failures"""'}, {}), "(opts['output_dir'], 'failures')", False, 'import os\n'), ((507, 14, 507, 37), 'libscanbuild.clang.get_arguments', 'get_arguments', ({(507, 28, 507, 31): 'cmd', (507, 33, 507, 36): 'cwd'}, {}), '(cmd, cwd)', False, 'from libscanbuild.clang import get_version, get_arguments, get_triple_arch, ClangErrorException\n'), ((508, 8, 508, 33), 'libscanbuild.run_command', 'run_command', (), '', False, 'from libscanbuild import command_entry_point, compiler_wrapper, wrapper_environment, run_build, run_command, CtuConfig\n'), ((559, 17, 559, 42), 'libscanbuild.run_command', 'run_command', (), '', False, 'from libscanbuild import command_entry_point, compiler_wrapper, wrapper_environment, run_build, run_command, CtuConfig\n'), ((586, 19, 586, 53), 'os.path.join', 'os.path.join', ({(586, 32, 586, 37): '"""ast"""', (586, 39, 586, 52): "path + '.ast'"}, {}), "('ast', path + '.ast')", False, 'import os\n'), ((602, 19, 602, 51), 'os.path.abspath', 'os.path.abspath', ({(602, 35, 602, 50): 'ast_joined_path'}, {}), '(ast_joined_path)', False, 'import os\n'), ((603, 18, 603, 43), 'os.path.dirname', 'os.path.dirname', ({(603, 34, 603, 42): 'ast_path'}, {}), '(ast_path)', False, 'import os\n'), ((616, 8, 616, 63), 'logging.debug', 'logging.debug', ({(616, 22, 616, 49): '"""Generating AST using \'%s\'"""', (616, 51, 616, 62): 'ast_command'}, {}), '("Generating AST using \'%s\'", ast_command)', False, 'import logging\n'), ((617, 8, 617, 55), 'libscanbuild.run_command', 'run_command', (), '', False, 'from libscanbuild import command_entry_point, compiler_wrapper, wrapper_environment, run_build, run_command, CtuConfig\n'), ((627, 8, 628, 40), 'logging.debug', 'logging.debug', ({(627, 22, 627, 69): '"""Generating external definition map using \'%s\'"""', (628, 22, 628, 39): 'extdefmap_command'}, {}), '("Generating external definition map using \'%s\'",\n extdefmap_command)', False, 'import logging\n'), ((629, 26, 629, 79), 'libscanbuild.run_command', 'run_command', (), '', False, 'from libscanbuild import command_entry_point, compiler_wrapper, wrapper_environment, run_build, run_command, CtuConfig\n'), ((631, 33, 632, 68), 'os.path.join', 'os.path.join', ({(631, 46, 631, 61): "opts['ctu'].dir", (631, 63, 631, 74): 'triple_arch', (632, 45, 632, 67): 'CTU_TEMP_DEFMAP_FOLDER'}, {}), "(opts['ctu'].dir, triple_arch, CTU_TEMP_DEFMAP_FOLDER)", False, 'import os\n'), ((703, 19, 703, 65), 'libscanbuild.compilation.classify_source', 'classify_source', ({(703, 35, 703, 47): "opts['file']", (703, 49, 703, 64): "compiler == 'c'"}, {}), "(opts['file'], compiler == 'c')", False, 'from libscanbuild.compilation import split_command, classify_source, compiler_language\n'), ((706, 8, 706, 58), 'logging.debug', 'logging.debug', ({(706, 22, 706, 57): '"""skip analysis, language not known"""'}, {}), "('skip analysis, language not known')", False, 'import logging\n'), ((742, 8, 742, 50), 'logging.debug', 'logging.debug', ({(742, 22, 742, 49): '"""analysis, on default arch"""'}, {}), "('analysis, on default arch')", False, 'import logging\n'), ((780, 20, 780, 46), 'libscanbuild.compilation.compiler_language', 'compiler_language', ({(780, 38, 780, 45): 'command'}, {}), '(command)', False, 'from libscanbuild.compilation import split_command, classify_source, compiler_language\n'), ((63, 24, 63, 37), 'libscanbuild.intercept.capture', 'capture', ({(63, 32, 63, 36): 'args'}, {}), '(args)', False, 'from libscanbuild.intercept import capture\n'), ((70, 24, 70, 62), 'libscanbuild.run_build', 'run_build', (), '', False, 'from libscanbuild import command_entry_point, compiler_wrapper, wrapper_environment, run_build, run_command, CtuConfig\n'), ((102, 29, 102, 69), 're.search', 're.search', ({(102, 39, 102, 59): '"""configure|autogen"""', (102, 61, 102, 68): 'args[0]'}, {}), "('configure|autogen', args[0])", False, 'import re\n'), ((177, 26, 177, 58), 'os.path.join', 'os.path.join', ({(177, 39, 177, 52): 'extdefmap_dir', (177, 54, 177, 57): '"""*"""'}, {}), "(extdefmap_dir, '*')", False, 'import os\n'), ((196, 26, 196, 55), 'os.path.basename', 'os.path.basename', ({(196, 43, 196, 54): 'triple_path'}, {}), '(triple_path)', False, 'import os\n'), ((197, 28, 198, 60), 'os.path.join', 'os.path.join', ({(197, 41, 197, 47): 'ctudir', (197, 49, 197, 60): 'triple_arch', (198, 37, 198, 59): 'CTU_TEMP_DEFMAP_FOLDER'}, {}), '(ctudir, triple_arch, CTU_TEMP_DEFMAP_FOLDER)', False, 'import os\n'), ((205, 12, 205, 60), 'shutil.rmtree', 'shutil.rmtree', (), '', False, 'import shutil\n'), ((213, 15, 213, 38), 'os.path.isabs', 'os.path.isabs', ({(213, 29, 213, 37): 'filename'}, {}), '(filename)', False, 'import os\n'), ((308, 21, 308, 53), 'os.getenv', 'os.getenv', ({(308, 31, 308, 52): '"""ANALYZE_BUILD_CLANG"""'}, {}), "('ANALYZE_BUILD_CLANG')", False, 'import os\n'), ((326, 40, 326, 70), 'os.getenv', 'os.getenv', ({(326, 50, 326, 69): '"""ANALYZE_BUILD_CTU"""'}, {}), "('ANALYZE_BUILD_CTU')", False, 'import os\n'), ((347, 12, 347, 35), 'datetime.datetime.now', 'datetime.datetime.now', ({}, {}), '()', False, 'import datetime\n'), ((373, 12, 373, 26), 'os.rmdir', 'os.rmdir', ({(373, 21, 373, 25): 'name'}, {}), '(name)', False, 'import os\n'), ((459, 60, 459, 75), 'libscanbuild.shell.decode', 'decode', ({(459, 67, 459, 74): 'command'}, {}), '(command)', False, 'from libscanbuild.shell import decode\n'), ((465, 8, 465, 70), 'logging.error', 'logging.error', (), '', False, 'import logging\n'), ((488, 15, 488, 42), 'os.path.isdir', 'os.path.isdir', ({(488, 29, 488, 41): 'failures_dir'}, {}), '(failures_dir)', False, 'import os\n'), ((489, 12, 489, 37), 'os.makedirs', 'os.makedirs', ({(489, 24, 489, 36): 'failures_dir'}, {}), '(failures_dir)', False, 'import os\n'), ((519, 21, 519, 47), 'libscanbuild.clang.get_version', 'get_version', ({(519, 33, 519, 46): "opts['clang']"}, {}), "(opts['clang'])", False, 'from libscanbuild.clang import get_version, get_arguments, get_triple_arch, ClangErrorException\n'), ((540, 29, 542, 69), 'tempfile.mkstemp', 'tempfile.mkstemp', (), '', False, 'import tempfile\n'), ((543, 12, 543, 28), 'os.close', 'os.close', ({(543, 21, 543, 27): 'handle'}, {}), '(handle)', False, 'import os\n'), ((583, 15, 583, 39), 'os.path.splitdrive', 'os.path.splitdrive', ({(583, 34, 583, 38): 'path'}, {}), '(path)', False, 'import os\n'), ((604, 15, 604, 37), 'os.path.isdir', 'os.path.isdir', ({(604, 29, 604, 36): 'ast_dir'}, {}), '(ast_dir)', False, 'import os\n'), ((633, 15, 633, 52), 'os.path.isdir', 'os.path.isdir', ({(633, 29, 633, 51): 'extern_defs_map_folder'}, {}), '(extern_defs_map_folder)', False, 'import os\n'), ((667, 22, 667, 47), 'libscanbuild.clang.get_triple_arch', 'get_triple_arch', ({(667, 38, 667, 41): 'cmd', (667, 43, 667, 46): 'cwd'}, {}), '(cmd, cwd)', False, 'from libscanbuild.clang import get_version, get_arguments, get_triple_arch, ClangErrorException\n'), ((709, 8, 709, 62), 'logging.debug', 'logging.debug', ({(709, 22, 709, 61): '"""skip analysis, language not supported"""'}, {}), "('skip analysis, language not supported')", False, 'import logging\n'), ((712, 8, 712, 57), 'logging.debug', 'logging.debug', ({(712, 22, 712, 46): '"""analysis, language: %s"""', (712, 48, 712, 56): 'language'}, {}), "('analysis, language: %s', language)", False, 'import logging\n'), ((734, 12, 734, 59), 'logging.debug', 'logging.debug', ({(734, 26, 734, 49): '"""analysis, on arch: %s"""', (734, 51, 734, 58): 'current'}, {}), "('analysis, on arch: %s', current)", False, 'import logging\n'), ((739, 12, 739, 68), 'logging.debug', 'logging.debug', ({(739, 26, 739, 67): '"""skip analysis, found not supported arch"""'}, {}), "('skip analysis, found not supported arch')", False, 'import logging\n'), ((216, 40, 216, 73), 'os.path.join', 'os.path.join', ({(216, 53, 216, 62): 'directory', (216, 64, 216, 72): 'filename'}, {}), '(directory, filename)', False, 'import os\n'), ((217, 19, 217, 63), 're.match', 're.match', ({(217, 28, 217, 52): "('^' + exclude_directory)", (217, 54, 217, 62): 'filename'}, {}), "('^' + exclude_directory, filename)", False, 'import re\n'), ((233, 32, 233, 49), 'json.load', 'json.load', ({(233, 42, 233, 48): 'handle'}, {}), '(handle)', False, 'import json\n'), ((321, 23, 322, 36), 'os.getenv', 'os.getenv', ({(321, 33, 321, 59): '"""ANALYZE_BUILD_PARAMETERS"""', (322, 33, 322, 35): '""""""'}, {}), "('ANALYZE_BUILD_PARAMETERS', '')", False, 'import os\n'), ((546, 29, 548, 69), 'tempfile.mkstemp', 'tempfile.mkstemp', (), '', False, 'import tempfile\n'), ((549, 12, 549, 28), 'os.close', 'os.close', ({(549, 21, 549, 27): 'handle'}, {}), '(handle)', False, 'import os\n'), ((606, 16, 606, 36), 'os.makedirs', 'os.makedirs', ({(606, 28, 606, 35): 'ast_dir'}, {}), '(ast_dir)', False, 'import os\n'), ((635, 16, 635, 51), 'os.makedirs', 'os.makedirs', ({(635, 28, 635, 50): 'extern_defs_map_folder'}, {}), '(extern_defs_map_folder)', False, 'import os\n'), ((640, 17, 642, 58), 'tempfile.NamedTemporaryFile', 'tempfile.NamedTemporaryFile', (), '', False, 'import tempfile\n'), ((518, 30, 518, 40), 'os.uname', 'os.uname', ({}, {}), '()', False, 'import os\n'), ((600, 39, 600, 69), 'os.path.realpath', 'os.path.realpath', ({(600, 56, 600, 68): "opts['file']"}, {}), "(opts['file'])", False, 'import os\n'), ((668, 40, 668, 77), 'os.path.join', 'os.path.join', ({(668, 53, 668, 67): 'ctu_config.dir', (668, 69, 668, 76): 'triarch'}, {}), '(ctu_config.dir, triarch)', False, 'import os\n'), ((793, 13, 793, 38), 're.match', 're.match', ({(793, 22, 793, 32): '"""^[^-].+"""', (793, 34, 793, 37): 'arg'}, {}), "('^[^-].+', arg)", False, 'import re\n'), ((793, 43, 793, 63), 'libscanbuild.compilation.classify_source', 'classify_source', ({(793, 59, 793, 62): 'arg'}, {}), '(arg)', False, 'from libscanbuild.compilation import split_command, classify_source, compiler_language\n'), ((802, 13, 802, 36), 're.match', 're.match', ({(802, 22, 802, 30): '"""^-W.+"""', (802, 32, 802, 35): 'arg'}, {}), "('^-W.+', arg)", False, 'import re\n'), ((802, 45, 802, 71), 're.match', 're.match', ({(802, 54, 802, 65): '"""^-Wno-.+"""', (802, 67, 802, 70): 'arg'}, {}), "('^-Wno-.+', arg)", False, 'import re\n')] |
PIRXrav/pyhack | tableborder.py | af5c86fb721053d8a3e819ab772c8144a23b86bf | #!/usr/bin/env python3
# pylint: disable=C0103
# pylint: disable=R0902
# pylint: disable=R0903
# pylint: disable=R0913
"""
Définie la classe TableBorder
"""
class TableBorder:
"""
Facillite l'usage de l'UNICODE
"""
def __init__(self,
top_left, top_split, top_right,
mid_left, mid_split, mid_right,
low_left, low_split, low_right,
horizontal, vertical):
"""
Constructeur
"""
self.top_left = top_left
self.top_split = top_split
self.top_right = top_right
self.mid_left = mid_left
self.mid_split = mid_split
self.mid_right = mid_right
self.low_left = low_left
self.low_split = low_split
self.low_right = low_right
self.horizontal = horizontal
self.vertical = vertical
BORDERS = [TableBorder('+', '+', '+',\
'+', '+', '+',\
'+', '+', '+',\
'-', '|'),
TableBorder(u'\u250c', u'\u252C', u'\u2510',\
u'\u251C', u'\u253C', u'\u2524',\
u'\u2514', u'\u2534', u'\u2518',\
u'\u2500', u'\u2502'),
TableBorder(u'\u2554', u'\u2566', u'\u2557',\
u'\u2560', u'\u256C', u'\u2563',\
u'\u255a', u'\u2569', u'\u255d',\
u'\u2550', u'\u2551')
]
| [] |
tkf2019/Vue-Django-SAST-Search | app/urls.py | 385af9819c608ce2d0845ed3e786777ff52b52b3 | from django.conf.urls import url
from . import views
urlpatterns = [
url(r'^register/', views.register),
url(r'^login/', views.login),
url(r'logout/', views.logout),
url(r'search/', views.search)
]
| [((6, 4, 6, 38), 'django.conf.urls.url', 'url', ({(6, 8, 6, 21): '"""^register/"""', (6, 23, 6, 37): 'views.register'}, {}), "('^register/', views.register)", False, 'from django.conf.urls import url\n'), ((7, 4, 7, 32), 'django.conf.urls.url', 'url', ({(7, 8, 7, 18): '"""^login/"""', (7, 20, 7, 31): 'views.login'}, {}), "('^login/', views.login)", False, 'from django.conf.urls import url\n'), ((8, 4, 8, 33), 'django.conf.urls.url', 'url', ({(8, 8, 8, 18): '"""logout/"""', (8, 20, 8, 32): 'views.logout'}, {}), "('logout/', views.logout)", False, 'from django.conf.urls import url\n'), ((9, 4, 9, 33), 'django.conf.urls.url', 'url', ({(9, 8, 9, 18): '"""search/"""', (9, 20, 9, 32): 'views.search'}, {}), "('search/', views.search)", False, 'from django.conf.urls import url\n')] |
Ziqqo/hasl-platform | custom_components/hasl/sensor.py | 27386314bf58626538d59c38d89249b07ed9256a | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""Simple service for SL (Storstockholms Lokaltrafik)."""
import datetime
import json
import logging
from datetime import timedelta
import homeassistant.helpers.config_validation as cv
import voluptuous as vol
from homeassistant.components.sensor import PLATFORM_SCHEMA
from homeassistant.const import (ATTR_FRIENDLY_NAME, CONF_SCAN_INTERVAL,
CONF_SENSOR_TYPE, CONF_SENSORS, STATE_OFF,
STATE_ON)
from homeassistant.helpers.entity import Entity
from homeassistant.helpers.event import (async_track_point_in_utc_time,
async_track_utc_time_change,
track_time_interval)
from homeassistant.util import Throttle
from homeassistant.util.dt import now
from hasl import (haslapi, fpapi, tl2api, ri4api, si2api,
HASL_Error, HASL_API_Error, HASL_HTTP_Error)
__version__ = '2.2.0'
_LOGGER = logging.getLogger(__name__)
DOMAIN = 'hasl'
# Keys used in the configuration.
CONF_RI4_KEY = 'ri4key'
CONF_SI2_KEY = 'si2key'
CONF_TL2_KEY = 'tl2key'
CONF_SITEID = 'siteid'
CONF_LINES = 'lines'
CONF_DIRECTION = 'direction'
CONF_ENABLED_SENSOR = 'sensor'
CONF_TIMEWINDOW = 'timewindow'
CONF_SENSORPROPERTY = 'property'
CONF_TRAIN_TYPE = 'train_type'
CONF_TRAFFIC_CLASS = 'traffic_class'
CONF_VERSION = 'version_sensor'
CONF_USE_MINIMIZATION = 'api_minimization'
LIST_SENSOR_TYPES = ['departures', 'status', 'trainlocation', 'comb', 'tl2']
LIST_SENSOR_PROPERTIES = ['min', 'time', 'deviations', 'refresh', 'updated']
LIST_TRAIN_TYPES = ['PT', 'RB', 'TVB', 'SB', 'LB', 'SpvC', 'TB1', 'TB2', 'TB3']
# Default values for configuration.
DEFAULT_INTERVAL = timedelta(minutes=10)
DEFAULT_TIMEWINDOW = 30
DEFAULT_DIRECTION = 0
DEFAULT_SENSORPROPERTY = 'min'
DEFAULT_TRAIN_TYPE = 'PT'
DEFAULT_TRAFFIC_CLASS = ['metro', 'train', 'local', 'tram', 'bus', 'fer']
DEFAULT_SENSORTYPE = 'departures'
DEFAULT_CACHE_FILE = '.storage/haslcache.json'
# Defining the configuration schema.
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
# API Keys
vol.Optional(CONF_RI4_KEY): cv.string,
vol.Optional(CONF_SI2_KEY): cv.string,
vol.Optional(CONF_TL2_KEY): cv.string,
vol.Optional(CONF_VERSION, default=False): cv.boolean,
vol.Optional(CONF_USE_MINIMIZATION, default=True): cv.boolean,
vol.Required(CONF_SENSORS, default=[]):
vol.All(cv.ensure_list, [vol.All({
vol.Required(ATTR_FRIENDLY_NAME): cv.string,
vol.Required(CONF_SENSOR_TYPE, default=DEFAULT_SENSORTYPE):
vol.In(LIST_SENSOR_TYPES),
vol.Optional(CONF_ENABLED_SENSOR): cv.string,
vol.Optional(CONF_SCAN_INTERVAL, default=DEFAULT_INTERVAL):
vol.Any(cv.time_period, cv.positive_timedelta),
vol.Optional(CONF_SITEID): cv.string,
vol.Optional(CONF_LINES, default=[]):
vol.All(cv.ensure_list, [cv.string]),
vol.Optional(CONF_DIRECTION, default=DEFAULT_DIRECTION):
vol.All(vol.Coerce(int), vol.Range(min=0, max=2)),
vol.Optional(CONF_TIMEWINDOW, default=DEFAULT_TIMEWINDOW):
vol.All(vol.Coerce(int), vol.Range(min=0, max=60)),
vol.Optional(CONF_SENSORPROPERTY, default=DEFAULT_SENSORPROPERTY):
vol.In(LIST_SENSOR_PROPERTIES),
vol.Optional(CONF_TRAFFIC_CLASS, default=DEFAULT_TRAFFIC_CLASS):
vol.All(cv.ensure_list, [vol.In(DEFAULT_TRAFFIC_CLASS)]),
vol.Optional(CONF_TRAIN_TYPE, default=DEFAULT_TRAIN_TYPE):
vol.In(LIST_TRAIN_TYPES)
})]),
}, extra=vol.ALLOW_EXTRA)
def setup_platform(hass, config, add_devices, discovery_info=None):
"""Setup the sensors."""
if not hass.data.get(DOMAIN):
hass.data[DOMAIN] = {}
sensors = []
if config[CONF_VERSION]:
sensors.append(SLVersionSensor(hass))
_LOGGER.info("Created version sensor for HASL")
for sensorconf in config[CONF_SENSORS]:
if sensorconf[CONF_SENSOR_TYPE] == 'departures' or \
sensorconf[CONF_SENSOR_TYPE] == 'comb':
sitekey = sensorconf.get(CONF_SITEID)
si2key = config.get(CONF_SI2_KEY)
ri4key = config.get(CONF_RI4_KEY)
if sitekey and ri4key:
sensorname = sensorconf[ATTR_FRIENDLY_NAME]
sensors.append(SLDeparturesSensor(
hass,
si2key,
ri4key,
sitekey,
sensorconf.get(CONF_LINES),
sensorname,
sensorconf.get(CONF_ENABLED_SENSOR),
sensorconf.get(CONF_SCAN_INTERVAL),
sensorconf.get(CONF_DIRECTION),
sensorconf.get(CONF_TIMEWINDOW),
sensorconf.get(CONF_SENSORPROPERTY),
config.get(CONF_USE_MINIMIZATION)
))
_LOGGER.info("Created departures sensor %s...", sensorname)
else:
_LOGGER.error("Sensor %s is missing site, si2key or ri4key",
sensorconf[ATTR_FRIENDLY_NAME])
if sensorconf[CONF_SENSOR_TYPE] == 'status' or \
sensorconf[CONF_SENSOR_TYPE] == 'tl2':
tl2key = config.get(CONF_TL2_KEY)
if tl2key:
sensorname = sensorconf[ATTR_FRIENDLY_NAME]
sensors.append(SLStatusSensor(
hass,
tl2key,
sensorname,
sensorconf.get(CONF_ENABLED_SENSOR),
sensorconf.get(CONF_SCAN_INTERVAL),
sensorconf.get(CONF_TRAFFIC_CLASS),
config.get(CONF_USE_MINIMIZATION)
))
_LOGGER.info("Created status sensor %s...", sensorname)
else:
_LOGGER.error("Sensor %s is missing tl2key attribute",
sensorconf[ATTR_FRIENDLY_NAME])
if sensorconf[CONF_SENSOR_TYPE] == 'trainlocation':
train_type = sensorconf.get(CONF_TRAIN_TYPE)
if train_type:
sensorname = sensorconf[ATTR_FRIENDLY_NAME]
sensors.append(SLTrainLocationSensor(
hass,
sensorname,
train_type,
sensorconf.get(CONF_SCAN_INTERVAL),
sensorconf.get(CONF_ENABLED_SENSOR),
))
_LOGGER.info("Created train sensor %s...", sensorname)
else:
_LOGGER.error("Sensor %s is missing train_type attribute",
sensorconf[ATTR_FRIENDLY_NAME])
add_devices(sensors)
class SLTrainLocationSensor(Entity):
"""Trafic Situation Sensor."""
def __init__(self, hass, friendly_name, train_type,
interval, enabled_sensor):
self._hass = hass
self._fpapi = fpapi()
self._name = friendly_name
self._interval = interval
self._enabled_sensor = enabled_sensor
self._train_type = train_type
self._data = {}
self.update = Throttle(interval)(self._update)
@property
def name(self):
"""Return the name of the sensor."""
return self._name
@property
def icon(self):
""" Return the icon for the frontend."""
return None
@property
def device_state_attributes(self):
""" Return the sensor attributes."""
return {'type': self._train_type, 'data': json.dumps(self._data)}
@property
def state(self):
""" Return the state of the sensor."""
return self._train_type
def _update(self):
if self._enabled_sensor is not None:
sensor_state = self._hass.states.get(self._enabled_sensor)
if self._enabled_sensor is None or sensor_state.state is STATE_ON:
try:
apidata = self._fpapi.request(self._train_type)
except HASL_Error as e:
_LOGGER.error("A communication error occured while "
"updating train location sensor: %s", e.details)
return
except Exception as e:
_LOGGER.error("A error occured while"
"updating train location sensor: %s", e)
return
self._data = apidata
_LOGGER.info("Update completed %s...", self._name)
class SLVersionSensor(Entity):
"""HASL Version Sensor."""
def __init__(self, hass):
self._hass = hass
self._haslapi = haslapi()
self._name = 'HASL Version'
self._version = __version__
self._py_version = self._haslapi.version()
@property
def name(self):
"""Return the name of the sensor."""
return self._name
@property
def icon(self):
""" Return the icon for the frontend."""
return None
@property
def device_state_attributes(self):
""" Return the sensor attributes."""
return {'hasl': self._version, 'pyHasl': self._py_version}
@property
def state(self):
""" Return the state of the sensor."""
return self._version + "/" + self._py_version
class SLStatusSensor(Entity):
"""Trafic Situation Sensor."""
def __init__(self, hass, tl2key, friendly_name,
enabled_sensor, interval, type,
minimization):
self._tl2api = tl2api(tl2key)
self._datakey = 'tl2_' + tl2key
self._interval = interval
self._hass = hass
self._name = friendly_name
self._enabled_sensor = enabled_sensor
self._type = type
self._sensordata = []
self._lastupdate = '-'
self._cachefile = hass.config.path(DEFAULT_CACHE_FILE)
self._minimization = minimization
if not hass.data[DOMAIN].get(self._datakey):
hass.data[DOMAIN][self._datakey] = ''
self.update = Throttle(interval)(self._update)
@property
def name(self):
"""Return the name of the sensor."""
return self._name
@property
def icon(self):
""" Return the icon for the frontend."""
return 'mdi:train-car'
@property
def device_state_attributes(self):
""" Return the sensor attributes."""
return self._sensordata
@property
def state(self):
""" Return the state of the sensor."""
return self._lastupdate
def getCache(self, key):
try:
jsonFile = open(self._cachefile, 'r')
data = json.load(jsonFile)
jsonFile.close()
return data.get(key)
except:
return {}
def putCache(self, key, value):
try:
jsonFile = open(self._cachefile, 'r')
data = json.load(jsonFile)
jsonFile.close()
data[key] = value
except:
data = {'' + key + '': value}
jsonFile = open(self._cachefile, 'w')
jsonFile.write(json.dumps(data))
jsonFile.close()
def _update(self):
if self._enabled_sensor is not None:
sensor_state = self._hass.states.get(self._enabled_sensor)
if self._enabled_sensor is None or sensor_state.state is STATE_ON:
_LOGGER.info("Starting to update TL2 for %s...",
self._name)
# Object used to create our object.
newdata = {}
# Use some nice translations for the statuses etc.
statuses = {
'EventGood': 'Good',
'EventMinor': 'Minor',
'EventMajor': 'Closed',
'EventPlanned': 'Planned',
}
# Icon table used for HomeAssistant.
statusIcons = {
'EventGood': 'mdi:check',
'EventMinor': 'mdi:clock-alert-outline',
'EventMajor': 'mdi:close',
'EventPlanned': 'mdi:triangle-outline'
}
trafficTypeIcons = {
'ferry': 'mdi:ferry',
'bus': 'mdi:bus',
'tram': 'mdi:tram',
'train': 'mdi:train',
'local': 'mdi:train-variant',
'metro': 'mdi:subway-variant'
}
# If the same API have already made the request in within
# the specified interval then use that data instead of
# requesting it again and spare some innocent credits from dying.
cacheage = self._hass.data[DOMAIN][self._datakey]
if not cacheage or now(self._hass.config.time_zone) \
- self._interval > cacheage or not self._minimization:
try:
apidata = self._tl2api.request()
apidata = apidata['ResponseData']['TrafficTypes']
self.putCache(self._datakey, apidata)
self._hass.data[DOMAIN][self._datakey] = \
now(self._hass.config.time_zone)
_LOGGER.info("Updated cache for %s...", self._name)
except HASL_Error as e:
_LOGGER.error("A communication error occured while "
"updating TL2 sensor: %s", e.details)
return
except Exception as e:
_LOGGER.error("A error occured while "
"updating TL4 API: %s", e)
return
else:
apidata = self.getCache(self._datakey)
_LOGGER.info("Reusing data from cache for %s...",
self._name)
# Return only the relevant portion of the results.
for response in apidata:
type = response['Type']
if self._type is None or type in self._type:
statustype = ('ferry' if type == 'fer' else type)
newdata[statustype + '_status'] = \
statuses.get(response['StatusIcon'])
newdata[statustype + '_status_icon'] = \
statusIcons.get(response['StatusIcon'])
newdata[statustype + '_icon'] = \
trafficTypeIcons.get(statustype)
for event in response['Events']:
event['Status'] = statuses.get(event['StatusIcon'])
event['StatusIcon'] = \
statusIcons.get(event['StatusIcon'])
newdata[statustype + '_events'] = response['Events']
# Attribution and update sensor data.
newdata['attribution'] = "Stockholms Lokaltrafik"
newdata['last_updated'] = \
self._hass.data[DOMAIN][self._datakey].strftime('%Y-%m-%d' +
'%H:%M:%S')
self._sensordata = newdata
self._lastupdate = newdata['last_updated']
_LOGGER.info("TL2 update completed for %s...", self._name)
class SLDeparturesSensor(Entity):
"""Departure board for one SL site."""
def __init__(self, hass, si2key, ri4key, siteid,
lines, friendly_name, enabled_sensor,
interval, direction, timewindow, sensorproperty,
minimization):
"""Initialize"""
# The table of resulttypes and the corresponding units of measure.
unit_table = {
'min': 'min',
'time': '',
'deviations': '',
'refresh': '',
'update': '',
}
if si2key:
self._si2key = si2key
self._si2api = si2api(si2key, siteid, '')
self._si2datakey = 'si2_' + si2key + '_' + siteid
self._ri4key = ri4key
self._ri4api = ri4api(ri4key, siteid, 60)
self._ri4datakey = 'ri2_' + ri4key + '_' + siteid
self._hass = hass
self._name = friendly_name
self._lines = lines
self._siteid = siteid
self._enabled_sensor = enabled_sensor
self._sensorproperty = sensorproperty
self._departure_table = []
self._deviations_table = []
self._direction = direction
self._timewindow = timewindow
self._nextdeparture_minutes = '0'
self._nextdeparture_expected = '-'
self._lastupdate = '-'
self._interval = interval
self._unit_of_measure = unit_table.get(self._sensorproperty, 'min')
self._cachefile = hass.config.path(DEFAULT_CACHE_FILE)
self._minimization = minimization
if not hass.data[DOMAIN].get(self._ri4datakey):
hass.data[DOMAIN][self._ri4datakey] = ''
if self._si2key:
if not hass.data[DOMAIN].get(self._si2datakey):
hass.data[DOMAIN][self._si2datakey] = ''
# Setup updating of the sensor.
self.update = Throttle(interval)(self._update)
@property
def name(self):
"""Return the name of the sensor."""
return self._name
@property
def icon(self):
""" Return the icon for the frontend."""
if self._deviations_table:
return 'mdi:bus-alert'
return 'mdi:bus'
@property
def state(self):
""" Return number of minutes to the next departure """
# If the sensor should return minutes to next departure.
if self._sensorproperty is 'min':
if not self._departure_table:
return '-'
return self._departure_table[0]['time']
# If the sensor should return the time at which next departure occurs.
if self._sensorproperty is 'time':
if not self._departure_table:
return '-'
expected = self._departure_table[0]['expected'] or '-'
if expected is not '-':
expected = \
datetime.datetime.strptime(self._nextdeparture_expected,
'%Y-%m-%dT%H:%M:%S')
expected = expected.strftime('%H:%M:%S')
return expected
# If the sensor should return the number of deviations.
if self._sensorproperty is 'deviations':
return len(self._deviations_table)
# If the sensor should return if it is updating or not.
if self._sensorproperty is 'refresh':
if self._enabled_sensor is None or sensor_state.state is STATE_ON:
return STATE_ON
return STATE_OFF
if self._sensorproperty is 'updated':
if self._lastupdate is '-':
return '-'
return refresh.strftime('%Y-%m-%d %H:%M:%S')
# Failsafe
return '-'
@property
def device_state_attributes(self):
""" Return the sensor attributes ."""
# Initialize the state attributes.
val = {}
# Format the next exptected time.
if self._departure_table:
expected_time = self._departure_table[0]['expected'] or '-'
expected_minutes = self._departure_table[0]['time'] or '-'
if expected_time is not '-':
expected_time = \
datetime.datetime.strptime(expected_time,
'%Y-%m-%dT%H:%M:%S')
expected_time = expected_time.strftime('%H:%M:%S')
else:
expected_time = '-'
expected_minutes = '-'
# Format the last refresh time.
refresh = self._lastupdate
if self._lastupdate is not '-':
refresh = refresh.strftime('%Y-%m-%d %H:%M:%S')
# Setup the unit of measure.
if self._unit_of_measure is not '':
val['unit_of_measurement'] = self._unit_of_measure
# Check if sensor is currently updating or not.
if self._enabled_sensor is not None:
sensor_state = self._hass.states.get(self._enabled_sensor)
if self._enabled_sensor is None or sensor_state.state is STATE_ON:
val['refresh_enabled'] = STATE_ON
else:
val['refresh_enabled'] = STATE_OFF
# Set values of the sensor.
val['attribution'] = 'Stockholms Lokaltrafik'
val['departures'] = self._departure_table
val['deviations'] = self._deviations_table
val['last_refresh'] = refresh
val['next_departure_minutes'] = expected_minutes
val['next_departure_time'] = expected_time
val['deviation_count'] = len(self._deviations_table)
return val
def parseDepartureTime(self, t):
""" weird time formats from the API,
do some quick and dirty conversions. """
try:
if t == 'Nu':
return 0
s = t.split()
if len(s) > 1 and s[1] == 'min':
return int(s[0])
s = t.split(':')
if len(s) > 1:
rightnow = now(self._hass.config.time_zone)
min = int(s[0]) * 60 + int(s[1]) - (rightnow.hour * 60 +
rightnow.minute)
if min < 0:
min = min + 1440
return min
except Exception:
_LOGGER.warning("Failed to parse departure time (%s) ", t)
return 0
def getCache(self, key):
try:
jsonFile = open(self._cachefile, 'r')
data = json.load(jsonFile)
jsonFile.close()
return data.get(key)
except:
return {}
def putCache(self, key, value):
try:
jsonFile = open(self._cachefile, 'r')
data = json.load(jsonFile)
jsonFile.close()
data[key] = value
except:
data = {'' + key + '': value}
jsonFile = open(self._cachefile, 'w')
jsonFile.write(json.dumps(data))
jsonFile.close()
def _update(self):
"""Get the departure board."""
# If using external sensor, get its value.
if self._enabled_sensor is not None:
sensor_state = self._hass.states.get(self._enabled_sensor)
# If we dont have external sensor or it is ON then proceed.
if self._enabled_sensor is None or sensor_state.state \
is STATE_ON:
self._update_ri4()
if self._si2key:
self._update_si2()
self._lastupdate = now(self._hass.config.time_zone)
def _update_ri4(self):
errorOccured = False
_LOGGER.info("Starting to update RI4 for %s...", self._name)
cacheage = self._hass.data[DOMAIN][self._ri4datakey]
if not cacheage or now(self._hass.config.time_zone) \
- self._interval > cacheage or not self._minimization:
try:
departuredata = self._ri4api.request()
departuredata = departuredata['ResponseData']
self.putCache(self._ri4datakey, departuredata)
self._hass.data[DOMAIN][self._ri4datakey] = \
now(self._hass.config.time_zone)
_LOGGER.info("Updated cache for %s...", self._name)
except HASL_Error as e:
_LOGGER.error("A communication error occured while "
"updating SI2 sensor: %s", e.details)
errorOccured = True
except Exception as e:
_LOGGER.error("A communication error occured while "
"updating RI4 API: %s", e)
errorOccured = True
else:
try:
departuredata = self.getCache(self._ri4datakey)
_LOGGER.info("Reusing data from cache for %s...",
self._name)
except Exception as e:
_LOGGER.error("A error occured while retreiving "
"cached RI4 sensor data: %s", e)
errorOccured = True
if not errorOccured:
departures = []
iconswitcher = {
'Buses': 'mdi:bus',
'Trams': 'mdi:tram',
'Ships': 'mdi:ferry',
'Metros': 'mdi:subway-variant',
'Trains': 'mdi:train',
}
for (i, traffictype) in enumerate(['Metros', 'Buses', 'Trains',
'Trams', 'Ships']):
for (idx, value) in enumerate(departuredata[traffictype]):
direction = value['JourneyDirection'] or 0
displaytime = value['DisplayTime'] or ''
destination = value['Destination'] or ''
linenumber = value['LineNumber'] or ''
expected = value['ExpectedDateTime'] or ''
groupofline = value['GroupOfLine'] or ''
icon = iconswitcher.get(traffictype, 'mdi:train-car')
if int(self._direction) == 0 or int(direction) \
== int(self._direction):
if self._lines == [] or linenumber \
in self._lines:
diff = self.parseDepartureTime(displaytime)
if diff < self._timewindow:
departures.append({
'line': linenumber,
'direction': direction,
'departure': displaytime,
'destination': destination,
'time': diff,
'expected': expected,
'type': traffictype,
'groupofline': groupofline,
'icon': icon,
})
self._departure_table = sorted(departures,
key=lambda k: k['time'])
_LOGGER.info("RI4 update completed for %s...", self._name)
def _update_si2(self):
errorOccured = False
_LOGGER.info("Starting to update SI2 for %s...", self._name)
cacheage = self._hass.data[DOMAIN][self._si2datakey]
if not cacheage or now(self._hass.config.time_zone) \
- self._interval > cacheage or not self._minimization:
try:
deviationdata = self._si2api.request()
deviationdata = deviationdata['ResponseData']
self.putCache(self._si2datakey, deviationdata)
self._hass.data[DOMAIN][self._si2datakey] = \
now(self._hass.config.time_zone)
_LOGGER.info('Updated cache for %s...', self._name)
except HASL_Error as e:
_LOGGER.error("A communication error occured while "
"updating SI2 sensor: %s", e.details)
errorOccured = True
except Exception as e:
_LOGGER.error("A error occured while "
"updating SI2 sensor: %s", e)
errorOccured = True
else:
try:
deviationdata = self.getCache(self._si2datakey)
_LOGGER.info("Reusing data from cache for %s...",
self._name)
except Exception as e:
_LOGGER.error("A error occured while retreiving "
"cached SI2 sensor: %s", e.details)
errorOccured = True
if not errorOccured:
deviations = []
for (idx, value) in enumerate(deviationdata):
deviations.append({
'updated': value['Updated'],
'title': value['Header'],
'fromDate': value['FromDateTime'],
'toDate': value['UpToDateTime'],
'details': value['Details'],
'sortOrder': value['SortOrder'],
})
self._deviations_table = \
sorted(deviations, key=lambda k: k['sortOrder'])
_LOGGER.info("SI2 update completed for %s...", self._name)
| [((26, 10, 26, 37), 'logging.getLogger', 'logging.getLogger', ({(26, 28, 26, 36): '__name__'}, {}), '(__name__)', False, 'import logging\n'), ((49, 19, 49, 40), 'datetime.timedelta', 'timedelta', (), '', False, 'from datetime import timedelta\n'), ((62, 4, 62, 30), 'voluptuous.Optional', 'vol.Optional', ({(62, 17, 62, 29): 'CONF_RI4_KEY'}, {}), '(CONF_RI4_KEY)', True, 'import voluptuous as vol\n'), ((63, 4, 63, 30), 'voluptuous.Optional', 'vol.Optional', ({(63, 17, 63, 29): 'CONF_SI2_KEY'}, {}), '(CONF_SI2_KEY)', True, 'import voluptuous as vol\n'), ((64, 4, 64, 30), 'voluptuous.Optional', 'vol.Optional', ({(64, 17, 64, 29): 'CONF_TL2_KEY'}, {}), '(CONF_TL2_KEY)', True, 'import voluptuous as vol\n'), ((65, 4, 65, 45), 'voluptuous.Optional', 'vol.Optional', (), '', True, 'import voluptuous as vol\n'), ((66, 4, 66, 53), 'voluptuous.Optional', 'vol.Optional', (), '', True, 'import voluptuous as vol\n'), ((68, 4, 68, 42), 'voluptuous.Required', 'vol.Required', (), '', True, 'import voluptuous as vol\n'), ((193, 22, 193, 29), 'hasl.fpapi', 'fpapi', ({}, {}), '()', False, 'from hasl import haslapi, fpapi, tl2api, ri4api, si2api, HASL_Error, HASL_API_Error, HASL_HTTP_Error\n'), ((252, 24, 252, 33), 'hasl.haslapi', 'haslapi', ({}, {}), '()', False, 'from hasl import haslapi, fpapi, tl2api, ri4api, si2api, HASL_Error, HASL_API_Error, HASL_HTTP_Error\n'), ((284, 23, 284, 37), 'hasl.tl2api', 'tl2api', ({(284, 30, 284, 36): 'tl2key'}, {}), '(tl2key)', False, 'from hasl import haslapi, fpapi, tl2api, ri4api, si2api, HASL_Error, HASL_API_Error, HASL_HTTP_Error\n'), ((469, 23, 469, 49), 'hasl.ri4api', 'ri4api', ({(469, 30, 469, 36): 'ri4key', (469, 38, 469, 44): 'siteid', (469, 46, 469, 48): '60'}, {}), '(ri4key, siteid, 60)', False, 'from hasl import haslapi, fpapi, tl2api, ri4api, si2api, HASL_Error, HASL_API_Error, HASL_HTTP_Error\n'), ((200, 22, 200, 40), 'homeassistant.util.Throttle', 'Throttle', ({(200, 31, 200, 39): 'interval'}, {}), '(interval)', False, 'from homeassistant.util import Throttle\n'), ((215, 50, 215, 72), 'json.dumps', 'json.dumps', ({(215, 61, 215, 71): 'self._data'}, {}), '(self._data)', False, 'import json\n'), ((299, 22, 299, 40), 'homeassistant.util.Throttle', 'Throttle', ({(299, 31, 299, 39): 'interval'}, {}), '(interval)', False, 'from homeassistant.util import Throttle\n'), ((324, 19, 324, 38), 'json.load', 'json.load', ({(324, 29, 324, 37): 'jsonFile'}, {}), '(jsonFile)', False, 'import json\n'), ((334, 19, 334, 38), 'json.load', 'json.load', ({(334, 29, 334, 37): 'jsonFile'}, {}), '(jsonFile)', False, 'import json\n'), ((342, 23, 342, 39), 'json.dumps', 'json.dumps', ({(342, 34, 342, 38): 'data'}, {}), '(data)', False, 'import json\n'), ((465, 27, 465, 53), 'hasl.si2api', 'si2api', ({(465, 34, 465, 40): 'si2key', (465, 42, 465, 48): 'siteid', (465, 50, 465, 52): '""""""'}, {}), "(si2key, siteid, '')", False, 'from hasl import haslapi, fpapi, tl2api, ri4api, si2api, HASL_Error, HASL_API_Error, HASL_HTTP_Error\n'), ((497, 22, 497, 40), 'homeassistant.util.Throttle', 'Throttle', ({(497, 31, 497, 39): 'interval'}, {}), '(interval)', False, 'from homeassistant.util import Throttle\n'), ((627, 19, 627, 38), 'json.load', 'json.load', ({(627, 29, 627, 37): 'jsonFile'}, {}), '(jsonFile)', False, 'import json\n'), ((637, 19, 637, 38), 'json.load', 'json.load', ({(637, 29, 637, 37): 'jsonFile'}, {}), '(jsonFile)', False, 'import json\n'), ((645, 23, 645, 39), 'json.dumps', 'json.dumps', ({(645, 34, 645, 38): 'data'}, {}), '(data)', False, 'import json\n'), ((664, 31, 664, 63), 'homeassistant.util.dt.now', 'now', ({(664, 35, 664, 62): 'self._hass.config.time_zone'}, {}), '(self._hass.config.time_zone)', False, 'from homeassistant.util.dt import now\n'), ((530, 20, 531, 67), 'datetime.datetime.strptime', 'datetime.datetime.strptime', ({(530, 47, 530, 75): 'self._nextdeparture_expected', (531, 47, 531, 66): '"""%Y-%m-%dT%H:%M:%S"""'}, {}), "(self._nextdeparture_expected, '%Y-%m-%dT%H:%M:%S')", False, 'import datetime\n'), ((566, 20, 567, 67), 'datetime.datetime.strptime', 'datetime.datetime.strptime', ({(566, 47, 566, 60): 'expected_time', (567, 47, 567, 66): '"""%Y-%m-%dT%H:%M:%S"""'}, {}), "(expected_time, '%Y-%m-%dT%H:%M:%S')", False, 'import datetime\n'), ((614, 27, 614, 59), 'homeassistant.util.dt.now', 'now', ({(614, 31, 614, 58): 'self._hass.config.time_zone'}, {}), '(self._hass.config.time_zone)', False, 'from homeassistant.util.dt import now\n'), ((680, 20, 680, 52), 'homeassistant.util.dt.now', 'now', ({(680, 24, 680, 51): 'self._hass.config.time_zone'}, {}), '(self._hass.config.time_zone)', False, 'from homeassistant.util.dt import now\n'), ((764, 20, 764, 52), 'homeassistant.util.dt.now', 'now', ({(764, 24, 764, 51): 'self._hass.config.time_zone'}, {}), '(self._hass.config.time_zone)', False, 'from homeassistant.util.dt import now\n'), ((396, 24, 396, 56), 'homeassistant.util.dt.now', 'now', ({(396, 28, 396, 55): 'self._hass.config.time_zone'}, {}), '(self._hass.config.time_zone)', False, 'from homeassistant.util.dt import now\n'), ((671, 27, 671, 59), 'homeassistant.util.dt.now', 'now', ({(671, 31, 671, 58): 'self._hass.config.time_zone'}, {}), '(self._hass.config.time_zone)', False, 'from homeassistant.util.dt import now\n'), ((755, 27, 755, 59), 'homeassistant.util.dt.now', 'now', ({(755, 31, 755, 58): 'self._hass.config.time_zone'}, {}), '(self._hass.config.time_zone)', False, 'from homeassistant.util.dt import now\n'), ((71, 12, 71, 44), 'voluptuous.Required', 'vol.Required', ({(71, 25, 71, 43): 'ATTR_FRIENDLY_NAME'}, {}), '(ATTR_FRIENDLY_NAME)', True, 'import voluptuous as vol\n'), ((73, 12, 73, 70), 'voluptuous.Required', 'vol.Required', (), '', True, 'import voluptuous as vol\n'), ((76, 12, 76, 45), 'voluptuous.Optional', 'vol.Optional', ({(76, 25, 76, 44): 'CONF_ENABLED_SENSOR'}, {}), '(CONF_ENABLED_SENSOR)', True, 'import voluptuous as vol\n'), ((78, 12, 78, 70), 'voluptuous.Optional', 'vol.Optional', (), '', True, 'import voluptuous as vol\n'), ((81, 12, 81, 37), 'voluptuous.Optional', 'vol.Optional', ({(81, 25, 81, 36): 'CONF_SITEID'}, {}), '(CONF_SITEID)', True, 'import voluptuous as vol\n'), ((83, 12, 83, 48), 'voluptuous.Optional', 'vol.Optional', (), '', True, 'import voluptuous as vol\n'), ((86, 12, 86, 67), 'voluptuous.Optional', 'vol.Optional', (), '', True, 'import voluptuous as vol\n'), ((89, 12, 89, 69), 'voluptuous.Optional', 'vol.Optional', (), '', True, 'import voluptuous as vol\n'), ((92, 12, 92, 77), 'voluptuous.Optional', 'vol.Optional', (), '', True, 'import voluptuous as vol\n'), ((95, 12, 95, 75), 'voluptuous.Optional', 'vol.Optional', (), '', True, 'import voluptuous as vol\n'), ((98, 12, 98, 69), 'voluptuous.Optional', 'vol.Optional', (), '', True, 'import voluptuous as vol\n'), ((74, 16, 74, 41), 'voluptuous.In', 'vol.In', ({(74, 23, 74, 40): 'LIST_SENSOR_TYPES'}, {}), '(LIST_SENSOR_TYPES)', True, 'import voluptuous as vol\n'), ((79, 16, 79, 62), 'voluptuous.Any', 'vol.Any', ({(79, 24, 79, 38): 'cv.time_period', (79, 40, 79, 61): 'cv.positive_timedelta'}, {}), '(cv.time_period, cv.positive_timedelta)', True, 'import voluptuous as vol\n'), ((84, 16, 84, 52), 'voluptuous.All', 'vol.All', ({(84, 24, 84, 38): 'cv.ensure_list', (84, 40, 84, 51): '[cv.string]'}, {}), '(cv.ensure_list, [cv.string])', True, 'import voluptuous as vol\n'), ((93, 16, 93, 46), 'voluptuous.In', 'vol.In', ({(93, 23, 93, 45): 'LIST_SENSOR_PROPERTIES'}, {}), '(LIST_SENSOR_PROPERTIES)', True, 'import voluptuous as vol\n'), ((99, 16, 99, 40), 'voluptuous.In', 'vol.In', ({(99, 23, 99, 39): 'LIST_TRAIN_TYPES'}, {}), '(LIST_TRAIN_TYPES)', True, 'import voluptuous as vol\n'), ((387, 31, 387, 63), 'homeassistant.util.dt.now', 'now', ({(387, 35, 387, 62): 'self._hass.config.time_zone'}, {}), '(self._hass.config.time_zone)', False, 'from homeassistant.util.dt import now\n'), ((87, 24, 87, 39), 'voluptuous.Coerce', 'vol.Coerce', ({(87, 35, 87, 38): 'int'}, {}), '(int)', True, 'import voluptuous as vol\n'), ((87, 41, 87, 64), 'voluptuous.Range', 'vol.Range', (), '', True, 'import voluptuous as vol\n'), ((90, 24, 90, 39), 'voluptuous.Coerce', 'vol.Coerce', ({(90, 35, 90, 38): 'int'}, {}), '(int)', True, 'import voluptuous as vol\n'), ((90, 41, 90, 65), 'voluptuous.Range', 'vol.Range', (), '', True, 'import voluptuous as vol\n'), ((96, 41, 96, 70), 'voluptuous.In', 'vol.In', ({(96, 48, 96, 69): 'DEFAULT_TRAFFIC_CLASS'}, {}), '(DEFAULT_TRAFFIC_CLASS)', True, 'import voluptuous as vol\n')] |
ishivvers/astro | simbad_tools.py | ff3f3b9f8ef4013157c277bbb5bf82ac1bd3287d | """
A quick library to deal with searching simbad for info
about a SN and parsing the results.
Author: Isaac Shivvers, [email protected], 2014
example SIMBAD uri query:
http://simbad.u-strasbg.fr/simbad/sim-id?output.format=ASCII&Ident=sn%201998S
"""
import re
from urllib2 import urlopen
def get_SN_info( name ):
"""
Queries simbad for SN coords, redshift, and host galaxy.
If redshift is not given for SN, attempts to resolve link to
host galaxy and report its redshift.
Returns ( (ra,dec), redshift, host_name, redshift_citation ), with
values of None inserted whenever it cannot resolve the value.
"""
simbad_uri = "http://simbad.u-strasbg.fr/simbad/sim-id?output.format=ASCII&Ident=%s"
regex_coords = "Coordinates\(FK5.+\): .+"
regex_redshift = "Redshift:\s+\d+\.\d+.+"
regex_host = "apparent\s+host\s+galaxy\s+.+?\{(.*?)\}"
result = urlopen( simbad_uri % name.replace(' ','%20') ).read()
rescoords = re.search( regex_coords, result )
resred = re.search( regex_redshift, result )
reshost = re.search( regex_host, result )
try:
cs = rescoords.group().split(':')[1].strip()
ra = cs[:12].strip()
dec = cs[12:].strip()
except:
ra,dec = None,None
try:
redshift = float(resred.group().strip('Redshift: ').split(' ')[0])
citation = resred.group().split(' ')[-1]
except AttributeError:
redshift = None
citation = None
try:
host = reshost.group().split('{')[1].split('}')[0]
except AttributeError:
host = None
if (redshift == None) and (host != None):
# get the redshift from the host galaxy
result = urlopen( simbad_uri % host.replace(' ','%20') ).read()
resred = re.search( regex_redshift, result )
try:
redshift = float(resred.group().strip('Redshift: ').split(' ')[0])
citation = resred.group().split(' ')[-1]
except AttributeError:
pass
return ((ra,dec), redshift, host, citation)
| [((29, 16, 29, 49), 're.search', 're.search', ({(29, 27, 29, 39): 'regex_coords', (29, 41, 29, 47): 'result'}, {}), '(regex_coords, result)', False, 'import re\n'), ((30, 13, 30, 48), 're.search', 're.search', ({(30, 24, 30, 38): 'regex_redshift', (30, 40, 30, 46): 'result'}, {}), '(regex_redshift, result)', False, 'import re\n'), ((31, 14, 31, 45), 're.search', 're.search', ({(31, 25, 31, 35): 'regex_host', (31, 37, 31, 43): 'result'}, {}), '(regex_host, result)', False, 'import re\n'), ((55, 17, 55, 52), 're.search', 're.search', ({(55, 28, 55, 42): 'regex_redshift', (55, 44, 55, 50): 'result'}, {}), '(regex_redshift, result)', False, 'import re\n')] |
StanfordASL/soft-robot-control | robots/environments.py | 29ade9b7b952e25e639b42767a4f09c87a0e824a | import os
from math import cos
from math import sin
import Sofa.Core
from splib.numerics import Quat, Vec3
from sofacontrol import measurement_models
path = os.path.dirname(os.path.abspath(__file__))
class TemplateEnvironment:
def __init__(self, name='Template', rayleighMass=0.1, rayleighStiffness=0.1, dt=0.01):
self.name = name
self.robot = Sofa.Core.Node(name)
# set-up solvers
self.robot.addObject('EulerImplicitSolver', name='odesolver', firstOrder="0", rayleighMass=str(rayleighMass),
rayleighStiffness=str(rayleighStiffness))
self.robot.addObject('SparseLDLSolver', name='preconditioner')
self.robot.addObject('GenericConstraintCorrection', solverName="preconditioner")
self.actuator_list = []
self.nb_nodes = None
self.gravity = [0., -9810., 0.] # default
self.dt = dt
def get_measurement_model(self, nodes=None, pos=True, vel=True):
if nodes is None:
return measurement_models.linearModel(range(self.nb_nodes), self.nb_nodes, pos=pos, vel=vel)
else:
return measurement_models.linearModel(nodes, self.nb_nodes, pos=pos, vel=vel)
class Trunk(TemplateEnvironment):
def __init__(self, name='Trunk', all_cables=True):
super(Trunk, self).__init__(name=name)
self.nb_nodes = 709
self.gravity = [0., 0., 9810.]
self.robot.min_force = [0.] * 8 # Without premultiplication with dt
self.robot.addObject('MeshVTKLoader', name='loader', filename=path + '/mesh/trunk.vtk')
self.robot.addObject('TetrahedronSetTopologyContainer', src='@loader', name='container')
self.robot.addObject('TetrahedronSetTopologyModifier')
self.robot.addObject('TetrahedronSetTopologyAlgorithms')
self.robot.addObject('TetrahedronSetGeometryAlgorithms')
# Option 1:
self.robot.addObject('MechanicalObject', name='tetras', template='Vec3d', showIndices='false',
showIndicesScale='4e-5')
# Option 2: Equivalent to option 1 (we believe)
# self.robot.addObject('MechanicalObject', src='@loader')
# Gives a mass to the model
self.robot.addObject('UniformMass', totalMass=0.042)
# Add a TetrahedronFEMForceField componant which implement an elastic material model solved using the Finite
# Element Method on tetrahedrons.
self.robot.addObject('TetrahedronFEMForceField', template='Vec3d', name='FEM', method='large',
poissonRatio=0.45,
youngModulus=450)
# Fix the base of the trunk by adding constraints in a region of interest (ROI)
self.robot.addObject('BoxROI', name='boxROI', box=[[-20, -20, 0], [20, 20, 20]], drawBoxes=False)
self.robot.addObject('RestShapeSpringsForceField', points='@boxROI.indices', stiffness='1e12')
##########################################
# Cable #
##########################################
actuator_names = ''
length1 = 10.
length2 = 2.
lengthTrunk = 195.
pullPoint = [[0., length1, 0.], [-length1, 0., 0.], [0., -length1, 0.], [length1, 0., 0.]]
direction = Vec3(0., length2 - length1, lengthTrunk)
direction.normalize()
nbCables = 4
actuators = self.robot.addChild('actuators')
for i in range(0, nbCables):
childname = 'cableL' + str(i)
theta = 1.57 * i
q = Quat(0., 0., sin(theta / 2.), cos(theta / 2.))
position = [[0., 0., 0.]] * 20
for k in range(0, 20, 2):
v = Vec3(direction[0], direction[1] * 17.5 * (k / 2) + length1, direction[2] * 17.5 * (k / 2) + 21)
position[k] = v.rotateFromQuat(q)
v = Vec3(direction[0], direction[1] * 17.5 * (k / 2) + length1, direction[2] * 17.5 * (k / 2) + 27)
position[k + 1] = v.rotateFromQuat(q)
cableL = actuators.addChild(childname)
cableL.addObject('MechanicalObject', name='meca',
position=pullPoint[i] + [pos.toList() for pos in position])
cableL.addObject('CableConstraint', template='Vec3d', name="cable",
hasPullPoint="0",
indices=list(range(21)),
maxPositiveDisp='70',
maxDispVariation="1",
valueType='force',
minForce=self.robot.min_force[i] * self.robot.dt.value)
cableL.addObject('BarycentricMapping', name='mapping', mapForces='false', mapMasses='false')
actuator_names += childname + '/cable,'
self.actuator_list.append(cableL.cable)
if all_cables:
for i in range(0, nbCables):
childname = 'cableS' + str(i)
theta = 1.57 * i
q = Quat(0., 0., sin(theta / 2.), cos(theta / 2.))
position = [[0., 0., 0.]] * 10
for k in range(0, 9, 2):
v = Vec3(direction[0], direction[1] * 17.5 * (k / 2) + length1, direction[2] * 17.5 * (k / 2) + 21)
position[k] = v.rotateFromQuat(q)
v = Vec3(direction[0], direction[1] * 17.5 * (k / 2) + length1, direction[2] * 17.5 * (k / 2) + 27)
position[k + 1] = v.rotateFromQuat(q)
cableS = actuators.addChild(childname)
cableS.addObject('MechanicalObject', name='meca',
position=pullPoint[i] + [pos.toList() for pos in position])
cableS.addObject('CableConstraint', template='Vec3d', name="cable",
hasPullPoint="0",
indices=list(range(10)),
maxPositiveDisp='40',
maxDispVariation="1",
valueType='force',
minForce=self.robot.min_force[i + 4] * self.robot.dt.value)
cableS.addObject('BarycentricMapping', name='mapping', mapForces='false', mapMasses='false')
actuator_names += childname + '/cable,'
self.actuator_list.append(cableS.cable)
self.robot.actuator_list = self.actuator_list
##########################################
# Visualization #
##########################################
trunkVisu = self.robot.addChild('VisualModel')
trunkVisu.addObject('MeshSTLLoader', filename=path + "/mesh/trunk.stl")
trunkVisu.addObject('OglModel', template='Vec3d', color=[1., 1., 1., 0.8])
trunkVisu.addObject('BarycentricMapping')
class Trunk4Cables(Trunk):
def __init__(self, name='Trunk4Cables'):
super(Trunk4Cables, self).__init__(name=name, all_cables=False)
self.robot.min_force = [0, 0, 0, 0] # Without premultiplication with dt
class Finger(TemplateEnvironment):
def __init__(self, name='Finger'):
super(Finger, self).__init__(name=name)
self.nb_nodes = 158
self.robot.min_force = [0.] # Without premultiplication with dt
self.robot.addObject('MeshVTKLoader', name='loader', filename=path + '/mesh/finger.vtk')
self.robot.addObject('TetrahedronSetTopologyContainer', src='@loader', name='container')
self.robot.addObject('TetrahedronSetTopologyModifier')
self.robot.addObject('TetrahedronSetTopologyAlgorithms')
self.robot.addObject('TetrahedronSetGeometryAlgorithms')
self.robot.addObject('MechanicalObject', name='tetras', template='Vec3d', showIndices='false',
showIndicesScale='4e-5')
self.robot.addObject('UniformMass', totalMass=0.075)
# Add a TetrahedronFEMForceField componant which implement an elastic material model solved using the Finite Element Method on tetrahedrons.
self.robot.addObject('TetrahedronFEMForceField', template='Vec3d', name='FEM', method='large',
poissonRatio=0.45,
youngModulus=600)
# Fix the base of the trunk by adding constraints in a region of interest (ROI)
self.robot.addObject('BoxROI', name='boxROI', box=[[-15, 0, 0], [5, 10, 15]], drawBoxes=False)
self.robot.addObject('RestShapeSpringsForceField', points='@boxROI.indices', stiffness='1e12')
##########################################
# Cable #
##########################################
# This creates a new node in the scene. This node is appended to the finger's node.
actuators = self.robot.addChild('actuators')
cable = actuators.addChild('cable')
# This create a MechanicalObject, a componant holding the degree of freedom of our
# mechanical modelling. In the case of a cable it is a set of positions specifying
# the points where the cable is passing by.
cable.addObject('MechanicalObject', name='meca',
position=(
"-17.5 12.5 2.5 " +
"-32.5 12.5 2.5 " +
"-47.5 12.5 2.5 " +
"-62.5 12.5 2.5 " +
"-77.5 12.5 2.5 " +
"-83.5 12.5 4.5 " +
"-85.5 12.5 6.5 " +
"-85.5 12.5 8.5 " +
"-83.5 12.5 10.5 " +
"-77.5 12.5 12.5 " +
"-62.5 12.5 12.5 " +
"-47.5 12.5 12.5 " +
"-32.5 12.5 12.5 " +
"-17.5 12.5 12.5 "))
# Create a CableConstraint object with a name.
# the indices are referring to the MechanicalObject's positions.
# The last indice is where the pullPoint is connected.
cable.addObject('CableConstraint', name="cable",
indices=list(range(14)),
pullPoint="0.0 12.5 2.5", valueType='force',
minForce=self.robot.min_force[0] * self.robot.dt.value)
# This create a BarycentricMapping. A BarycentricMapping is a key element as it will create a bi-directional link
# between the cable's DoFs and the finger's ones so that movements of the cable's DoFs will be mapped
# to the finger and vice-versa;
cable.addObject('BarycentricMapping', name='mapping', mapForces='false', mapMasses='false')
self.actuator_list.append(cable.cable)
self.robot.actuator_list = self.actuator_list
##########################################
# Visualization #
##########################################
# In Sofa, visualization is handled by adding a rendering model.
# Create an empty child node to store this rendering model.
fingerVisu = self.robot.addChild('VisualModel')
# Add to this empty node a rendering model made of triangles and loaded from an stl file.
fingerVisu.addObject('MeshSTLLoader', filename=path + "/mesh/finger.stl")
fingerVisu.addObject('OglModel', template='Vec3d', color=[1., 1., 1., 0.8])
# Add a BarycentricMapping to deform rendering model in way that follow the ones of the parent mechanical model.
fingerVisu.addObject('BarycentricMapping')
class Diamond(TemplateEnvironment):
def __init__(self, name='Diamond', totalMass=0.5, poissonRatio=0.45, youngModulus=450, rayleighMass=0.1, rayleighStiffness=0.1, dt=0.01):
super(Diamond, self).__init__(name=name, rayleighMass=rayleighMass, rayleighStiffness=rayleighStiffness, dt=dt)
self.nb_nodes = 1628
self.gravity = [0., 0., -9810.]
rotation = [90, 0.0, 0.0]
translation = [0.0, 0.0, 35]
self.robot.min_force = [0, 0, 0, 0] # Without premultiplication with dt
self.robot.addObject('MeshVTKLoader', name='loader', filename=path + "/mesh/diamond.vtu", rotation=rotation,
translation=translation)
self.robot.addObject('TetrahedronSetTopologyContainer', src='@loader', name='container')
self.robot.addObject('TetrahedronSetTopologyModifier')
self.robot.addObject('TetrahedronSetTopologyAlgorithms')
self.robot.addObject('TetrahedronSetGeometryAlgorithms')
self.robot.addObject('MechanicalObject', template='Vec3d', name='tetras', showIndices='false',
showIndicesScale='4e-5')
self.robot.addObject('UniformMass', totalMass=totalMass, name='mass')
self.robot.addObject('TetrahedronFEMForceField', template='Vec3d',
method='large', name='forcefield',
poissonRatio=poissonRatio, youngModulus=youngModulus)
# Fix the base of the trunk by adding constraints in a region of interest (ROI)
self.robot.addObject('BoxROI', name='boxROI', box=[-15, -15, -40, 15, 15, 10], drawBoxes=True)
self.robot.addObject('RestShapeSpringsForceField', points='@boxROI.indices', stiffness='1e12')
##########################################
# Cable #
##########################################
self.actuatorsParam = [
{'withName': 'A',
'withCableGeometry': [[0, 97, 45]],
'withAPullPointLocation': [0, 10, 30]
},
{'withName': 'B',
'withCableGeometry': [[-97, 0, 45]],
'withAPullPointLocation': [-10, 0, 30]
},
{'withName': 'C',
'withCableGeometry': [[0, -97, 45]],
'withAPullPointLocation': [0, -10, 30]
},
{'withName': 'D',
'withCableGeometry': [[97, 0, 45]],
'withAPullPointLocation': [10, 0, 30]
}
]
actuators = self.robot.addChild('actuators')
for i in range(len(self.actuatorsParam)):
cable = actuators.addChild(self.actuatorsParam[i]['withName'])
cable.addObject('MechanicalObject', position=self.actuatorsParam[i]['withCableGeometry'])
cable.addObject('CableConstraint',
name='cable',
indices=list(range(len(self.actuatorsParam[i]['withCableGeometry']))),
pullPoint=self.actuatorsParam[i]['withAPullPointLocation'],
valueType='force',
hasPullPoint=True,
minForce=self.robot.min_force[i] * self.robot.dt.value
)
cable.addObject('BarycentricMapping', name="Mapping", mapForces=False, mapMasses=False)
self.actuator_list.append(cable.cable)
self.robot.actuator_list = self.actuator_list
##########################################
# Visualization #
##########################################
diamondVisu = self.robot.addChild('VisualModel')
diamondVisu.addObject('MeshSTLLoader', filename=path + "/mesh/diamond.stl")
diamondVisu.addObject('OglModel', template='Vec3d', color=[0.7, 0.7, 0.7, 0.7], updateNormals=False)
diamondVisu.addObject('BarycentricMapping')
| [((10, 23, 10, 48), 'os.path.abspath', 'os.path.abspath', ({(10, 39, 10, 47): '__file__'}, {}), '(__file__)', False, 'import os\n'), ((76, 20, 76, 60), 'splib.numerics.Vec3', 'Vec3', ({(76, 25, 76, 27): '0.0', (76, 29, 76, 46): 'length2 - length1', (76, 48, 76, 59): 'lengthTrunk'}, {}), '(0.0, length2 - length1, lengthTrunk)', False, 'from splib.numerics import Quat, Vec3\n'), ((31, 19, 31, 89), 'sofacontrol.measurement_models.linearModel', 'measurement_models.linearModel', (), '', False, 'from sofacontrol import measurement_models\n'), ((84, 29, 84, 44), 'math.sin', 'sin', ({(84, 33, 84, 43): 'theta / 2.0'}, {}), '(theta / 2.0)', False, 'from math import sin\n'), ((84, 46, 84, 61), 'math.cos', 'cos', ({(84, 50, 84, 60): 'theta / 2.0'}, {}), '(theta / 2.0)', False, 'from math import cos\n'), ((87, 20, 87, 115), 'splib.numerics.Vec3', 'Vec3', ({(87, 25, 87, 37): 'direction[0]', (87, 39, 87, 78): 'direction[1] * 17.5 * (k / 2) + length1', (87, 80, 87, 114): 'direction[2] * 17.5 * (k / 2) + 21'}, {}), '(direction[0], direction[1] * 17.5 * (k / 2) + length1, direction[2] * \n 17.5 * (k / 2) + 21)', False, 'from splib.numerics import Quat, Vec3\n'), ((89, 20, 89, 115), 'splib.numerics.Vec3', 'Vec3', ({(89, 25, 89, 37): 'direction[0]', (89, 39, 89, 78): 'direction[1] * 17.5 * (k / 2) + length1', (89, 80, 89, 114): 'direction[2] * 17.5 * (k / 2) + 27'}, {}), '(direction[0], direction[1] * 17.5 * (k / 2) + length1, direction[2] * \n 17.5 * (k / 2) + 27)', False, 'from splib.numerics import Quat, Vec3\n'), ((110, 33, 110, 48), 'math.sin', 'sin', ({(110, 37, 110, 47): 'theta / 2.0'}, {}), '(theta / 2.0)', False, 'from math import sin\n'), ((110, 50, 110, 65), 'math.cos', 'cos', ({(110, 54, 110, 64): 'theta / 2.0'}, {}), '(theta / 2.0)', False, 'from math import cos\n'), ((114, 24, 114, 119), 'splib.numerics.Vec3', 'Vec3', ({(114, 29, 114, 41): 'direction[0]', (114, 43, 114, 82): 'direction[1] * 17.5 * (k / 2) + length1', (114, 84, 114, 118): 'direction[2] * 17.5 * (k / 2) + 21'}, {}), '(direction[0], direction[1] * 17.5 * (k / 2) + length1, direction[2] * \n 17.5 * (k / 2) + 21)', False, 'from splib.numerics import Quat, Vec3\n'), ((116, 24, 116, 119), 'splib.numerics.Vec3', 'Vec3', ({(116, 29, 116, 41): 'direction[0]', (116, 43, 116, 82): 'direction[1] * 17.5 * (k / 2) + length1', (116, 84, 116, 118): 'direction[2] * 17.5 * (k / 2) + 27'}, {}), '(direction[0], direction[1] * 17.5 * (k / 2) + length1, direction[2] * \n 17.5 * (k / 2) + 27)', False, 'from splib.numerics import Quat, Vec3\n')] |
SimonPreissner/get-shifty | default.py | aff49220932921c77e419a34ca472b51e0b26b72 | """
This file contains meta information and default configurations of the project
"""
RSC_YEARS = [1660, 1670, 1680, 1690,
1700, 1710, 1720, 1730, 1740, 1750, 1760, 1770, 1780, 1790,
1800, 1810, 1820, 1830, 1840, 1850, 1860, 1870, 1880, 1890,
1900, 1910, 1920]
# cf. Chapter 4.4.1 of the thesis
SPACE_PAIR_SELECTION = [(1740,1750), (1750,1760),
(1680,1710), (1710,1740), (1740,1770), (1770,1800), (1800,1830), (1830,1860), (1860,1890),
(1700,1800), (1800,1900),
(1700,1900)]
COUPLING_CONFIG = { # Alternatives
# parameters passed to the GWOT object
'metric': "cosine", # 'euclidian',
'normalize_vecs': "both", # 'mean', 'whiten', 'whiten_zca'
'normalize_dists': "mean", # 'max', 'median'
'score_type': "coupling", # #TODO fill in the rest of the options in the comments
'adjust': None, # 'csls', ...
'distribs': "uniform", # 'custom', 'zipf'
'share_vocs':False, # True
'size':1000, # 100 is small, 1e4
'max_anchors':100, # used with small couplings (for projection)
# parameters to be passed to the optimizer
'opt_loss_fun': "square_loss", # 'kl_loss'
'opt_entropic': True, # False
'opt_entreg': 5e-4, # stay within the range of e-4 (originally: 1e-4)
'opt_tol': 1e-9, # no limits
'opt_round_g': False, # True
'opt_compute_accuracy': False, # True would require a test dict, but that's not implemented!
'opt_gpu': False, # GPU optimization not tested
# parameters for calling fit()
'fit_maxiter': 300, # no limits; normally converges within 150 iterations
'fit_tol': 1e-9, # no limits
'fit_plot_every': 100000, # normally 20; 'deactivate' the file spam by choosing a large value
'fit_print_every': 1, # no limits
'fit_verbose': True, # False
'fit_save_plots': None # "/my_dir/my_optimizer_plots"
}
DIST_SHAPES = ['uniform', 'zipf', 'custom']
SHIFT_EXPERIMENTS = ["all",
"unsup_bi",
"unsup_mono",
"dis_tech"] | [] |
SimonTopp/Graph-WaveNet | generate_training_data_drb.py | ef63a80cc397744667a5d27f7c410c10e3e03a4c | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import argparse
import numpy as np
import os
import pandas as pd
import util
import os.path
import pandas as pd
import numpy as np
import yaml
import xarray as xr
import datetime
import pickle
def scale(dataset, std=None, mean=None):
"""
scale the data so it has a standard deviation of 1 and a mean of zero
:param dataset: [xr dataset] input or output data
:param std: [xr dataset] standard deviation if scaling test data with dims
:param mean: [xr dataset] mean if scaling test data with dims
:return: scaled data with original dims
"""
if not isinstance(std, xr.Dataset) or not isinstance(mean, xr.Dataset):
std = dataset.std(skipna=True)
mean = dataset.mean(skipna=True)
# adding small number in case there is a std of zero
scaled = (dataset - mean) / (std + 1e-10)
check_if_finite(std)
check_if_finite(mean)
return scaled, std, mean
def sel_partition_data(dataset, start_dates, end_dates):
"""
select the data from a date range or a set of date ranges
:param dataset: [xr dataset] input or output data with date dimension
:param start_dates: [str or list] fmt: "YYYY-MM-DD"; date(s) to start period
(can have multiple discontinuos periods)
:param end_dates: [str or list] fmt: "YYYY-MM-DD"; date(s) to end period
(can have multiple discontinuos periods)
:return: dataset of just those dates
"""
# if it just one date range
if isinstance(start_dates, str):
if isinstance(end_dates, str):
return dataset.sel(date=slice(start_dates, end_dates))
else:
raise ValueError("start_dates is str but not end_date")
# if it's a list of date ranges
elif isinstance(start_dates, list) or isinstance(start_dates, tuple):
if len(start_dates) == len(end_dates):
data_list = []
for i in range(len(start_dates)):
date_slice = slice(start_dates[i], end_dates[i])
data_list.append(dataset.sel(date=date_slice))
return xr.concat(data_list, dim="date")
else:
raise ValueError("start_dates and end_dates must have same length")
else:
raise ValueError("start_dates must be either str, list, or tuple")
def separate_trn_tst(
dataset,
train_start_date,
train_end_date,
val_start_date,
val_end_date,
test_start_date,
test_end_date,
):
"""
separate the train data from the test data according to the start and end
dates. This assumes your training data is in one continuous block and all
the dates that are not in the training are in the testing.
:param dataset: [xr dataset] input or output data with dims
:param train_start_date: [str or list] fmt: "YYYY-MM-DD"; date(s) to start
train period (can have multiple discontinuos periods)
:param train_end_date: [str or list] fmt: "YYYY-MM-DD"; date(s) to end train
period (can have multiple discontinuos periods)
:param val_start_date: [str or list] fmt: "YYYY-MM-DD"; date(s) to start
validation period (can have multiple discontinuos periods)
:param val_end_date: [str or list] fmt: "YYYY-MM-DD"; date(s) to end
validation period (can have multiple discontinuos periods)
:param test_start_date: [str or list] fmt: "YYYY-MM-DD"; date(s) to start
test period (can have multiple discontinuos periods)
:param test_end_date: [str or list] fmt: "YYYY-MM-DD"; date(s) to end test
period (can have multiple discontinuos periods)
"""
train = sel_partition_data(dataset, train_start_date, train_end_date)
val = sel_partition_data(dataset, val_start_date, val_end_date)
test = sel_partition_data(dataset, test_start_date, test_end_date)
return train, val, test
def split_into_batches(data_array, seq_len=365, offset=1):
"""
split training data into batches with size of batch_size
:param data_array: [numpy array] array of training data with dims [nseg,
ndates, nfeat]
:param seq_len: [int] length of sequences (i.e., 365)
:param offset: [float] 0-1, how to offset the batches (e.g., 0.5 means that
the first batch will be 0-365 and the second will be 182-547)
:return: [numpy array] batched data with dims [nbatches, nseg, seq_len
(batch_size), nfeat]
"""
combined = []
for i in range(int(1 / offset)):
start = int(i * offset * seq_len)
idx = np.arange(start=start, stop=data_array.shape[1] + 1, step=seq_len)
split = np.split(data_array, indices_or_sections=idx, axis=1)
# add all but the first and last batch since they will be smaller
combined.extend([s for s in split if s.shape[1] == seq_len])
combined = np.asarray(combined)
return combined
def read_multiple_obs(obs_files, x_data):
"""
read and format multiple observation files. we read in the pretrain data to
make sure we have the same indexing.
:param obs_files: [list] list of filenames of observation files
:param pre_train_file: [str] the file of pre_training data
:return: [xr dataset] the observations in the same time
"""
obs = [x_data.sortby(["seg_id_nat", "date"])]
for filename in obs_files:
ds = xr.open_zarr(filename)
obs.append(ds)
if "site_id" in ds.variables:
del ds["site_id"]
obs = xr.merge(obs, join="left")
obs = obs[["temp_c", "discharge_cms"]]
obs = obs.rename(
{"temp_c": "seg_tave_water", "discharge_cms": "seg_outflow"}
)
return obs
def reshape_for_training(data):
"""
reshape the data for training
:param data: training data (either x or y or mask) dims: [nbatch, nseg,
len_seq, nfeat/nout]
:return: reshaped data [nbatch * nseg, len_seq, nfeat/nout]
"""
n_batch, n_seg, seq_len, n_feat = data.shape
return np.reshape(data, [n_batch * n_seg, seq_len, n_feat])
def get_exclude_start_end(exclude_grp):
"""
get the start and end dates for the exclude group
:param exclude_grp: [dict] dictionary representing the exclude group from
the exclude yml file
:return: [tuple of datetime objects] start date, end date
"""
start = exclude_grp.get("start_date")
if start:
start = datetime.datetime.strptime(start, "%Y-%m-%d")
end = exclude_grp.get("end_date")
if end:
end = datetime.datetime.strptime(end, "%Y-%m-%d")
return start, end
def convert_batch_reshape(dataset, seq_len=365, offset=1, y = False, period = np.nan):
"""
convert xarray dataset into numpy array, swap the axes, batch the array and
reshape for training
:param dataset: [xr dataset] data to be batched
:param seq_len: [int] length of sequences (i.e., 365)
:param offset: [float] 0-1, how to offset the batches (e.g., 0.5 means that
the first batch will be 0-365 and the second will be 182-547)
:return: [numpy array] batched and reshaped dataset
"""
# convert xr.dataset to numpy array
dataset = dataset.transpose("seg_id_nat", "date")
arr = dataset.to_array().values
# if the dataset is empty, just return it as is
if dataset.date.size == 0:
return arr
# before [nfeat, nseg, ndates]; after [nseg, ndates, nfeat]
# this is the order that the split into batches expects
arr = np.moveaxis(arr, 0, -1)
# batch the data
# after [nbatch, nseg, seq_len, nfeat]
batched = split_into_batches(arr, seq_len=seq_len, offset=offset)
# reshape data
# after [nseq, seq_len, nseg, nfeat]
#reshaped = reshape_for_training(batched)
reshaped = np.moveaxis(batched, [0,1,2,3], [0,2,1,3])
if y & np.isfinite(period):
reshaped = reshaped[:,-period:,...]
return reshaped
def coord_as_reshaped_array(dataset, coord_name, seq_len=365, offset=1):
# I need one variable name. It can be any in the dataset, but I'll use the
# first
first_var = next(iter(dataset.data_vars.keys()))
coord_array = xr.broadcast(dataset[coord_name], dataset[first_var])[0]
new_var_name = coord_name + "1"
dataset[new_var_name] = coord_array
reshaped_np_arr = convert_batch_reshape(
dataset[[new_var_name]], seq_len=seq_len, offset=offset
)
return reshaped_np_arr
def check_if_finite(xarr):
assert np.isfinite(xarr.to_array().values).all()
def prep_data(
obs_temper_file,
obs_flow_file,
pretrain_file,
#distfile,
train_start_date,
train_end_date,
val_start_date,
val_end_date,
test_start_date,
test_end_date,
x_vars=None,
y_vars= ["seg_tave_water", "seg_outflow"],
seq_length = 365,
offset = 1,
period = None,
primary_variable="temp",
#catch_prop_file=None,
#exclude_file=None,
#log_q=False,
out_file=None,
#segs=None,
normalize_y=False,
):
"""
prepare input and output data for DL model training read in and process
data into training and testing datasets. the training and testing data are
scaled to have a std of 1 and a mean of zero
:param obs_temper_file: [str] temperature observations file (csv)
:param obs_flow_file:[str] discharge observations file (csv)
:param pretrain_file: [str] the file with the pretraining data (SNTemp data)
:param distfile: [str] path to the distance matrix .npz file
:param train_start_date: [str or list] fmt: "YYYY-MM-DD"; date(s) to start
train period (can have multiple discontinuos periods)
:param train_end_date: [str or list] fmt: "YYYY-MM-DD"; date(s) to end train
period (can have multiple discontinuos periods)
:param val_start_date: [str or list] fmt: "YYYY-MM-DD"; date(s) to start
validation period (can have multiple discontinuos periods)
:param val_end_date: [str or list] fmt: "YYYY-MM-DD"; date(s) to end
validation period (can have multiple discontinuos periods)
:param test_start_date: [str or list] fmt: "YYYY-MM-DD"; date(s) to start
test period (can have multiple discontinuos periods)
:param test_end_date: [str or list] fmt: "YYYY-MM-DD"; date(s) to end test
period (can have multiple discontinuos periods)
:param x_vars: [list] variables that should be used as input. If None, all
of the variables will be used
:param primary_variable: [str] which variable the model should focus on
'temp' or 'flow'. This determines the order of the variables.
:param catch_prop_file: [str] the path to the catchment properties file. If
left unfilled, the catchment properties will not be included as predictors
:param exclude_file: [str] path to exclude file
:param log_q: [bool] whether or not to take the log of discharge in training
:param out_file: [str] file to where the values will be written
:returns: training and testing data along with the means and standard
deviations of the training input and output data
'y_trn_pre': batched, scaled, and centered output data for entire
period of record of SNTemp [n_samples, seq_len, n_out]
'y_obs_trn': batched, scaled, and centered output observation data
for the training period
'y_trn_obs_std': standard deviation of the y observations training
data [n_out]
'y_trn_obs_mean': mean of the observation training data [n_out]
'y_obs_tst': un-batched, unscaled, uncentered observation data for
the test period [n_yrs, n_seg, len_seq, n_out]
'dates_ids_trn: batched dates and national seg ids for training data
[n_samples, seq_len, 2]
'dates_ids_tst: un-batched dates and national seg ids for testing
data [n_yrs, n_seg, len_seq, 2]
"""
ds_pre = xr.open_zarr(pretrain_file)
x_data = ds_pre[x_vars]
# make sure we don't have any weird input values
check_if_finite(x_data)
x_trn, x_val, x_tst = separate_trn_tst(
x_data,
train_start_date,
train_end_date,
val_start_date,
val_end_date,
test_start_date,
test_end_date,
)
x_scl, x_std, x_mean = scale(x_data)
x_trn_scl, _, _ = scale(x_trn, std=x_std, mean=x_mean)
x_val_scl, _, _ = scale(x_val, std=x_std, mean=x_mean)
x_tst_scl, _, _ = scale(x_tst, std=x_std, mean=x_mean)
y_obs = read_multiple_obs([obs_temper_file, obs_flow_file], x_data)
y_obs = y_obs[y_vars]
y_pre = ds_pre[y_vars]
y_obs_trn, y_obs_val, y_obs_tst = separate_trn_tst(
y_obs,
train_start_date,
train_end_date,
val_start_date,
val_end_date,
test_start_date,
test_end_date,
)
y_pre_trn, y_pre_val, y_pre_tst = separate_trn_tst(
y_pre,
train_start_date,
train_end_date,
val_start_date,
val_end_date,
test_start_date,
test_end_date,
)
if normalize_y:
# scale y training data and get the mean and std
y_obs_trn, y_std, y_mean = scale(y_obs_trn)
y_pre_trn, _, _ = scale(y_pre_trn, y_std, y_mean)
else:
_, y_std, y_mean = scale(y_obs_trn)
data = {
"x_train": convert_batch_reshape(x_trn_scl, offset=offset, seq_len=seq_length),
"x_val": convert_batch_reshape(x_val_scl, offset=offset, seq_len=seq_length),
"x_test": convert_batch_reshape(x_tst_scl, offset=offset, seq_len=seq_length),
"x_std": x_std.to_array().values,
"x_mean": x_mean.to_array().values,
"x_cols": np.array(x_vars),
"ids_train": coord_as_reshaped_array(x_trn, "seg_id_nat", offset=offset, seq_len=seq_length),
"dates_train": coord_as_reshaped_array(x_trn, "date", offset=offset, seq_len=seq_length),
"ids_val": coord_as_reshaped_array(x_val, "seg_id_nat", offset=offset, seq_len=seq_length),
"dates_val": coord_as_reshaped_array(x_val, "date", offset=offset, seq_len=seq_length),
"ids_test": coord_as_reshaped_array(x_tst, "seg_id_nat", offset=offset, seq_len=seq_length),
"dates_test": coord_as_reshaped_array(x_tst, "date", offset=offset, seq_len=seq_length),
"y_pre_train": convert_batch_reshape(y_pre_trn, offset=offset, seq_len=seq_length, y=True, period=period),
"y_train": convert_batch_reshape(y_obs_trn, offset=offset, seq_len=seq_length, y=True, period=period),
"y_val": convert_batch_reshape(y_obs_val, offset=offset, seq_len=seq_length, y=True, period=period),
"y_test": convert_batch_reshape(y_obs_tst, offset=offset, seq_len=seq_length, y=True, period=period),
"y_vars": np.array(y_vars),
'period': np.array([period]),
'y_pre_train_val': convert_batch_reshape(y_pre_val, offset=offset, seq_len=seq_length, y=True, period=period),
'y_pre_train_test': convert_batch_reshape(y_pre_tst, offset=offset, seq_len=seq_length, y=True, period=period),
"y_std": y_std.to_array().values,
"y_mean": y_mean.to_array().values,
}
if out_file:
if os.path.isdir(out_file) == False:
os.makedirs(out_file)
'''
np.savez_compressed(os.path.join(out_file, 'pre_train.npz'),
x=data['x_train'],
y=data['y_pre_train'])
np.savez_compressed(os.path.join(out_file,'train.npz'),
x=data['x_train'],
y=data['y_obs_train'],
)
np.savez_compressed(os.path.join(out_file, 'test.npz'),
x=data['x_test'],
y=data['y_obs_tst'],
)
np.savez_compressed(os.path.join(out_file,'val.npz'),
x=data['x_val'],
y=data['y_obs_val'],
)
'''
np.savez_compressed(os.path.join(out_file,'data.npz'), **data)
return data
def prep_adj_matrix(infile, dist_type, out_file=None):
"""
process adj matrix.
**The resulting matrix is sorted by seg_id_nat **
:param infile:
:param dist_type: [str] type of distance matrix ("upstream", "downstream" or
"updown")
:param out_file:
:return: [numpy array] processed adjacency matrix
"""
adj_matrices = np.load(infile)
adj = adj_matrices[dist_type]
adj_full = sort_dist_matrix(adj, adj_matrices["rowcolnames"])
adj = adj_full[2]
adj = np.where(np.isinf(adj), 0, adj)
adj = -adj
mean_adj = np.mean(adj[adj != 0])
std_adj = np.std(adj[adj != 0])
adj[adj != 0] = adj[adj != 0] - mean_adj
adj[adj != 0] = adj[adj != 0] / std_adj
adj[adj != 0] = 1 / (1 + np.exp(-adj[adj != 0]))
I = np.eye(adj.shape[0])
A_hat = adj.copy() + I
D = np.sum(A_hat, axis=1)
D_inv = D ** -1.0
D_inv = np.diag(D_inv)
A_hat = np.matmul(D_inv, A_hat)
if out_file:
out_dm = [adj_full[0], adj_full[1], A_hat]
with open(out_file+'.pkl', 'wb') as f:
pickle.dump(out_dm, f, protocol=2)
return adj_full[0], adj_full[1], A_hat
def sort_dist_matrix(mat, row_col_names):
"""
sort the distance matrix by seg_id_nat
:return:
"""
df = pd.DataFrame(mat, columns=row_col_names, index=row_col_names)
df = df.sort_index(axis=0)
df = df.sort_index(axis=1)
sensor_id_to_ind = {}
for i, sensor_id in enumerate(df.columns):
sensor_id_to_ind[sensor_id] = i
return row_col_names, sensor_id_to_ind, df
#check = prep_adj_matrix('../../gits/river-dl/DRB_data/distance_matrix.npz', 'upstream', 'data/DRB_gwn_full/adj_mx')
#if __name__ == "__main__":
check2 = prep_data(obs_temper_file='../../gits/river-dl/DRB_data/obs_temp_full',
obs_flow_file='../../gits/river-dl/DRB_data/obs_flow_full',
pretrain_file='../../gits/river-dl/DRB_data/uncal_sntemp_input_output',
train_start_date=['1985-10-01', '2016-10-01'],
train_end_date=['2006-09-30', '2020-09-30'],
val_start_date='2006-10-01',
val_end_date='2016-09-30',
test_start_date=['1980-10-01', '2020-10-01'],
test_end_date=['1985-09-30', '2021-09-30'],
x_vars=["seg_rain", "seg_tave_air", "seginc_swrad", "seg_length", "seginc_potet", "seg_slope", "seg_humid",
"seg_elev"],
y_vars=['seg_tave_water'],
primary_variable='temp',
seq_length=365,
period=np.nan,
offset=1,
out_file = 'data/DRB_gwn_full')
'''f __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--output_dir", type=str, default="data/METR-LA", help="Output directory.")
parser.add_argument("--traffic_df_filename", type=str, default="data/metr-la.h5", help="Raw traffic readings.",)
parser.add_argument("--seq_length_x", type=int, default=12, help="Sequence Length.",)
parser.add_argument("--seq_length_y", type=int, default=12, help="Sequence Length.",)
parser.add_argument("--y_start", type=int, default=1, help="Y pred start", )
parser.add_argument("--dow", action='store_true',)
args = parser.parse_args()
if os.path.exists(args.output_dir):
reply = str(input(f'{args.output_dir} exists. Do you want to overwrite it? (y/n)')).lower().strip()
if reply[0] != 'y': exit
else:
os.makedirs(args.output_dir)
generate_train_val_test(args)
##### Reformat our inputs to match theirs.
df = pd.read_hdf("data/metr-la.h5")
seq_length_x = 12
seq_length_y = 12
y_start = 1
LAtrain = np.load('data/METR-LA/train.npz')
LAtest = np.load('data/METR-LA/test.npz')
LAval = np.load('data/METR-LA/val.npz')
LAtrain['x'].shape
LAtrain['y'].shape
LAtest['x'].shape
LAtest['y'].shape
check = np.moveaxis(data['x_train'], [0,1,2,3], [0,2,1,3])
np.savez_compressed(os.path.join(out_file, 'pre_train.npz'),
x=data['x_train'],
y=data['y_pre_train'])
np.savez_compressed(os.path.join(out_file,'train.npz'),
x=data['x_train'],
y=data['y_pre_train'],
)
np.savez_compressed(os.path.join(out_file, 'test.npz'),
x=data['x_test'],
y=data['y_pre_test'],
)
np.savez_compressed(os.path.join(out_file,'val.npz'),
x=data['x_val'],
y=data['y_pre_val'],
)
''' | [((120, 15, 120, 35), 'numpy.asarray', 'np.asarray', ({(120, 26, 120, 34): 'combined'}, {}), '(combined)', True, 'import numpy as np\n'), ((138, 10, 138, 36), 'xarray.merge', 'xr.merge', (), '', True, 'import xarray as xr\n'), ((154, 11, 154, 63), 'numpy.reshape', 'np.reshape', ({(154, 22, 154, 26): 'data', (154, 28, 154, 62): '[n_batch * n_seg, seq_len, n_feat]'}, {}), '(data, [n_batch * n_seg, seq_len, n_feat])', True, 'import numpy as np\n'), ((196, 10, 196, 33), 'numpy.moveaxis', 'np.moveaxis', ({(196, 22, 196, 25): 'arr', (196, 27, 196, 28): '0', (196, 30, 196, 32): '-1'}, {}), '(arr, 0, -1)', True, 'import numpy as np\n'), ((205, 15, 205, 57), 'numpy.moveaxis', 'np.moveaxis', ({(205, 27, 205, 34): 'batched', (205, 36, 205, 45): '[0, 1, 2, 3]', (205, 47, 205, 56): '[0, 2, 1, 3]'}, {}), '(batched, [0, 1, 2, 3], [0, 2, 1, 3])', True, 'import numpy as np\n'), ((298, 13, 298, 40), 'xarray.open_zarr', 'xr.open_zarr', ({(298, 26, 298, 39): 'pretrain_file'}, {}), '(pretrain_file)', True, 'import xarray as xr\n'), ((413, 19, 413, 34), 'numpy.load', 'np.load', ({(413, 27, 413, 33): 'infile'}, {}), '(infile)', True, 'import numpy as np\n'), ((419, 15, 419, 37), 'numpy.mean', 'np.mean', ({(419, 23, 419, 36): 'adj[adj != 0]'}, {}), '(adj[adj != 0])', True, 'import numpy as np\n'), ((420, 14, 420, 35), 'numpy.std', 'np.std', ({(420, 21, 420, 34): 'adj[adj != 0]'}, {}), '(adj[adj != 0])', True, 'import numpy as np\n'), ((425, 8, 425, 28), 'numpy.eye', 'np.eye', ({(425, 15, 425, 27): 'adj.shape[0]'}, {}), '(adj.shape[0])', True, 'import numpy as np\n'), ((427, 8, 427, 29), 'numpy.sum', 'np.sum', (), '', True, 'import numpy as np\n'), ((429, 12, 429, 26), 'numpy.diag', 'np.diag', ({(429, 20, 429, 25): 'D_inv'}, {}), '(D_inv)', True, 'import numpy as np\n'), ((430, 12, 430, 35), 'numpy.matmul', 'np.matmul', ({(430, 22, 430, 27): 'D_inv', (430, 29, 430, 34): 'A_hat'}, {}), '(D_inv, A_hat)', True, 'import numpy as np\n'), ((446, 9, 446, 70), 'pandas.DataFrame', 'pd.DataFrame', (), '', True, 'import pandas as pd\n'), ((116, 14, 116, 80), 'numpy.arange', 'np.arange', (), '', True, 'import numpy as np\n'), ((117, 16, 117, 69), 'numpy.split', 'np.split', (), '', True, 'import numpy as np\n'), ((134, 13, 134, 35), 'xarray.open_zarr', 'xr.open_zarr', ({(134, 26, 134, 34): 'filename'}, {}), '(filename)', True, 'import xarray as xr\n'), ((166, 16, 166, 61), 'datetime.datetime.strptime', 'datetime.datetime.strptime', ({(166, 43, 166, 48): 'start', (166, 50, 166, 60): '"""%Y-%m-%d"""'}, {}), "(start, '%Y-%m-%d')", False, 'import datetime\n'), ((170, 14, 170, 57), 'datetime.datetime.strptime', 'datetime.datetime.strptime', ({(170, 41, 170, 44): 'end', (170, 46, 170, 56): '"""%Y-%m-%d"""'}, {}), "(end, '%Y-%m-%d')", False, 'import datetime\n'), ((206, 11, 206, 30), 'numpy.isfinite', 'np.isfinite', ({(206, 23, 206, 29): 'period'}, {}), '(period)', True, 'import numpy as np\n'), ((216, 18, 216, 71), 'xarray.broadcast', 'xr.broadcast', ({(216, 31, 216, 50): 'dataset[coord_name]', (216, 52, 216, 70): 'dataset[first_var]'}, {}), '(dataset[coord_name], dataset[first_var])', True, 'import xarray as xr\n'), ((356, 18, 356, 34), 'numpy.array', 'np.array', ({(356, 27, 356, 33): 'x_vars'}, {}), '(x_vars)', True, 'import numpy as np\n'), ((367, 18, 367, 34), 'numpy.array', 'np.array', ({(367, 27, 367, 33): 'y_vars'}, {}), '(y_vars)', True, 'import numpy as np\n'), ((368, 18, 368, 36), 'numpy.array', 'np.array', ({(368, 27, 368, 35): '[period]'}, {}), '([period])', True, 'import numpy as np\n'), ((417, 19, 417, 32), 'numpy.isinf', 'np.isinf', ({(417, 28, 417, 31): 'adj'}, {}), '(adj)', True, 'import numpy as np\n'), ((376, 11, 376, 34), 'os.path.isdir', 'os.path.isdir', ({(376, 25, 376, 33): 'out_file'}, {}), '(out_file)', False, 'import os\n'), ((377, 12, 377, 33), 'os.makedirs', 'os.makedirs', ({(377, 24, 377, 32): 'out_file'}, {}), '(out_file)', False, 'import os\n'), ((399, 28, 399, 61), 'os.path.join', 'os.path.join', ({(399, 41, 399, 49): 'out_file', (399, 50, 399, 60): '"""data.npz"""'}, {}), "(out_file, 'data.npz')", False, 'import os\n'), ((423, 29, 423, 51), 'numpy.exp', 'np.exp', ({(423, 36, 423, 50): '(-adj[adj != 0])'}, {}), '(-adj[adj != 0])', True, 'import numpy as np\n'), ((434, 12, 434, 46), 'pickle.dump', 'pickle.dump', (), '', False, 'import pickle\n'), ((62, 19, 62, 51), 'xarray.concat', 'xr.concat', (), '', True, 'import xarray as xr\n')] |
CodedLadiesInnovateTech/python-challenges | Phase-1/Python Basic 1/Day-3.py | 22ce26c68fea6c7c243ada831e47c52e27a62127 | <<<<<<< HEAD
"""
1. Write a Python program to print the documents (syntax, description etc.) of Python built-in function(s).
Sample function : abs()
Expected Result :
abs(number) -> number
Return the absolute value of the argument.
Tools: help function
2. Write a Python program to print the calendar of a given month and year.
Tools: Use 'calendar' module.
3. Write a Python program to print the following here document.
Sample string :
a string that you "don't" have to escape
This
is a ....... multi-line
heredoc string --------> example
Tools: string formating
4. Write a Python program to calculate number of days between two dates.
Sample dates : (2014, 7, 2), (2014, 7, 11)
Expected output : 9 days
Tools: Datetime module, timedelta module
5. Write a Python program to get the volume of a sphere with radius 6.
Tools: input function, math
6. Write a Python program to get the difference between a given number and 17, if the number is greater than 17 return double the absolute difference.
Tools: abs function, input function, math
7. Write a Python program to test whether a number is within 100 of 1000 or 2000.
Tools: maths,input function
8. Write a Python program to calculate the sum of three given numbers, if the values are equal then return three times of their sum.
Tools: math, input function
9. Write a Python program to get a new string from a given string where "Is" has been added to the front. If the given string already begins with "Is" then return the string unchanged.
Tools: input function, string formating
10. Write a Python program to get a string which is n (non-negative integer) copies of a given string.
Tools: input function, slicing
=======
"""
1. Write a Python program to print the documents (syntax, description etc.) of Python built-in function(s).
Sample function : abs()
Expected Result :
abs(number) -> number
Return the absolute value of the argument.
Tools: help function
2. Write a Python program to print the calendar of a given month and year.
Tools: Use 'calendar' module.
3. Write a Python program to print the following here document.
Sample string :
a string that you "don't" have to escape
This
is a ....... multi-line
heredoc string --------> example
Tools: string formating
4. Write a Python program to calculate number of days between two dates.
Sample dates : (2014, 7, 2), (2014, 7, 11)
Expected output : 9 days
Tools: Datetime module, timedelta module
5. Write a Python program to get the volume of a sphere with radius 6.
Tools: input function, math
6. Write a Python program to get the difference between a given number and 17, if the number is greater than 17 return double the absolute difference.
Tools: abs function, input function, math
7. Write a Python program to test whether a number is within 100 of 1000 or 2000.
Tools: maths,input function
8. Write a Python program to calculate the sum of three given numbers, if the values are equal then return three times of their sum.
Tools: math, input function
9. Write a Python program to get a new string from a given string where "Is" has been added to the front. If the given string already begins with "Is" then return the string unchanged.
Tools: input function, string formating
10. Write a Python program to get a string which is n (non-negative integer) copies of a given string.
Tools: input function, slicing
>>>>>>> f4444ec0d72c645d12694e90df7429456db0611c
""" | [] |
gmgunter/pyre | tests/python/metaclass_inheritance.py | e9ff3f8c04661f8b2cd2ba0caded08b6fe8054e2 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# michael a.g. aïvázis
# orthologue
# (c) 1998-2021 all rights reserved
#
#
"""
When a metaclass understands the extra keywords that can be passed during class declaration,
it has to override all these to accommodate the change in signature
"""
class meta(type):
@classmethod
def __prepare__(metacls, name, bases, **kwds):
assert metacls.__name__ == 'meta'
assert name in ['base', 'derived']
if name == 'base':
assert bases == (object,)
assert kwds == {'arg1': True, 'arg2': False}
if name == 'derived':
assert bases == (base,)
assert kwds == {'arg1': False, 'arg2': True}
return super().__prepare__(name, bases)
def __new__(metacls, name, bases, attributes, **kwds):
assert metacls.__name__ == 'meta'
assert name in ['base', 'derived']
if name == 'base':
assert bases == (object,)
assert kwds == {'arg1': True, 'arg2': False}
if name == 'derived':
assert bases == (base,)
assert kwds == {'arg1': False, 'arg2': True}
return super().__new__(metacls, name, bases, attributes)
def __init__(self, name, bases, attributes, **kwds):
assert self.__name__ in ['base', 'derived']
if self.__name__ == 'base':
assert bases == (object,)
assert kwds == {'arg1': True, 'arg2': False}
if self.__name__ == 'derived':
assert bases == (base,)
assert kwds == {'arg1': False, 'arg2': True}
super().__init__(name, bases, attributes)
return
class base(object, metaclass=meta, arg1=True, arg2=False):
def __init__(self, **kwds):
assert type(self).__name__ == 'base'
assert kwds == {}
return
class derived(base, arg1=False, arg2=True):
def __init__(self, **kwds):
assert type(self).__name__ == 'derived'
assert kwds == {}
return
def test():
b = base()
d = derived()
return
# main
if __name__ == "__main__":
test()
# end of file
| [] |
idsdlab/basicai_sp21 | cs101/module8/8-1/chroma1.py | af9acba34c0417fed830de1b61753c50fd303169 |
from cs1media import *
import math
def dist(c1, c2):
r1, g1, b1 = c1
r2, g2, b2 = c2
return math.sqrt((r1-r2)**2 + (g1-g2)**2 + (b1-b2)**2)
def chroma(img, key, threshold):
w, h = img.size()
for y in range(h):
for x in range(w):
p = img.get(x, y)
if dist(p, key) < threshold:
img.set(x, y, Color.yellow)
statue = load_picture("photos/statue1.jpg")
chroma(statue, (41, 75, 146), 70)
statue.show()
| [((8, 9, 8, 56), 'math.sqrt', 'math.sqrt', ({(8, 19, 8, 55): '((r1 - r2) ** 2 + (g1 - g2) ** 2 + (b1 - b2) ** 2)'}, {}), '((r1 - r2) ** 2 + (g1 - g2) ** 2 + (b1 - b2) ** 2)', False, 'import math\n')] |
RuthAngus/wfirst_stars | wfirst_stars/mklc.py | 60989fc56488ac915082e76c3088c6133909985b | import numpy as np
import scipy
import scipy.io
import pylab
import numpy
import glob
import pyfits
def mklc(t, nspot=200, incl=(scipy.pi)*5./12., amp=1., tau=30.5, p=10.0):
diffrot = 0.
''' This is a simplified version of the class-based routines in
spot_model.py. It generates a light curves for dark, point like
spots with no limb-darkening.
Parameters:
nspot = desired number of spots present on star at any
one time
amp = desired light curve amplitude
tau = characteristic spot life-time
diffrot = fractional difference between equatorial and polar
rotation period
(unit of time is equatorial rotation period)'''
# print('Period = ', p)
dur = (max(t) - min(t))
# (crude estimate of) total number of spots needed during entire
# time-series
nspot_tot = int(nspot * dur / 2 / tau)
# uniform distribution of spot longitudes
lon = scipy.rand(nspot_tot) * 2 * scipy.pi
# distribution of spot latitudes uniform in sin(latitude)
lat = scipy.arcsin(scipy.rand(nspot_tot))
# spot rotation rate optionally depends on latitude
period = ((scipy.sin(lat) - 0.5) * diffrot + 1.0 ) * p
period0 = scipy.ones(nspot_tot) * p
# all spots have the same maximum area
# (crude estimate of) filling factor needed per spot
ff = amp / scipy.sqrt(nspot)
scale_fac = 1
amax = scipy.ones(nspot_tot) * ff * scale_fac
# all spots have the evolution timescale
decay = scipy.ones(nspot_tot) * tau
# uniform distribution of spot peak times
# start well before and end well after time-series limits (to
# avoid edge effects)
extra = 3 * decay.max()
pk = scipy.rand(nspot_tot) * (dur + 2 * extra) - extra
# COMPUTE THE LIGHT CURVE
# print("Computing light curve...")
time = numpy.array(t - min(t))
area_tot = scipy.zeros_like(time)
dF_tot = scipy.zeros_like(time)
dF_tot0 = scipy.zeros_like(time)
# add up the contributions of individual spots
for i in range(nspot_tot):
# Spot area
if (pk[i] == 0) + (decay[i] == 0):
area = scipy.ones_like(time) * amax[i]
else:
area = amax[i] * \
scipy.exp(-(time - pk[i])**2 / 2. / decay[i]**2)
area_tot += area
# Fore-shortening
phase = 2 * scipy.pi * time / period[i] + lon[i]
phase0 = 2 * scipy.pi * time / period0[i] + lon[i]
mu = scipy.cos(incl) * scipy.sin(lat[i]) + \
scipy.sin(incl) * scipy.cos(lat[i]) * scipy.cos(phase)
mu0 = scipy.cos(incl) * scipy.sin(lat[i]) + \
scipy.sin(incl) * scipy.cos(lat[i]) * scipy.cos(phase0)
mu[mu < 0] = 0.0
mu0[mu0 < 0] = 0.0
# Flux
dF_tot -= area * mu
dF_tot0 -= area * mu0
amp_eff = dF_tot.max()-dF_tot.min()
nspot_eff = area_tot / scale_fac / ff
res0 = scipy.array([nspot_eff.mean(), ff, amp_eff])
res1 = scipy.zeros((4, len(time)))
res1[0,:] = time
res1[1,:] = area_tot
res1[2,:] = dF_tot
res1[3,:] = dF_tot0
# print('Used %d spots in total over %d rotation periods.' % (nspot_tot, dur))
# print('Mean filling factor of individual spots was %.4f.' % ff)
# print('Desired amplitude was %.4f, actual amplitude was %.4f.' \
# % (amp, amp_eff))
# print('Desired number of spots at any one time was %d.' % nspot)
return res0, res1
| [((62, 15, 62, 37), 'scipy.zeros_like', 'scipy.zeros_like', ({(62, 32, 62, 36): 'time'}, {}), '(time)', False, 'import scipy\n'), ((63, 13, 63, 35), 'scipy.zeros_like', 'scipy.zeros_like', ({(63, 30, 63, 34): 'time'}, {}), '(time)', False, 'import scipy\n'), ((64, 14, 64, 36), 'scipy.zeros_like', 'scipy.zeros_like', ({(64, 31, 64, 35): 'time'}, {}), '(time)', False, 'import scipy\n'), ((37, 23, 37, 44), 'scipy.rand', 'scipy.rand', ({(37, 34, 37, 43): 'nspot_tot'}, {}), '(nspot_tot)', False, 'import scipy\n'), ((41, 14, 41, 35), 'scipy.ones', 'scipy.ones', ({(41, 25, 41, 34): 'nspot_tot'}, {}), '(nspot_tot)', False, 'import scipy\n'), ((45, 15, 45, 32), 'scipy.sqrt', 'scipy.sqrt', ({(45, 26, 45, 31): 'nspot'}, {}), '(nspot)', False, 'import scipy\n'), ((50, 12, 50, 33), 'scipy.ones', 'scipy.ones', ({(50, 23, 50, 32): 'nspot_tot'}, {}), '(nspot_tot)', False, 'import scipy\n'), ((34, 10, 34, 31), 'scipy.rand', 'scipy.rand', ({(34, 21, 34, 30): 'nspot_tot'}, {}), '(nspot_tot)', False, 'import scipy\n'), ((47, 11, 47, 32), 'scipy.ones', 'scipy.ones', ({(47, 22, 47, 31): 'nspot_tot'}, {}), '(nspot_tot)', False, 'import scipy\n'), ((56, 9, 56, 30), 'scipy.rand', 'scipy.rand', ({(56, 20, 56, 29): 'nspot_tot'}, {}), '(nspot_tot)', False, 'import scipy\n'), ((71, 19, 71, 40), 'scipy.ones_like', 'scipy.ones_like', ({(71, 35, 71, 39): 'time'}, {}), '(time)', False, 'import scipy\n'), ((74, 16, 74, 64), 'scipy.exp', 'scipy.exp', ({(74, 26, 74, 63): '(-(time - pk[i]) ** 2 / 2.0 / decay[i] ** 2)'}, {}), '(-(time - pk[i]) ** 2 / 2.0 / decay[i] ** 2)', False, 'import scipy\n'), ((80, 13, 80, 28), 'scipy.cos', 'scipy.cos', ({(80, 23, 80, 27): 'incl'}, {}), '(incl)', False, 'import scipy\n'), ((80, 31, 80, 48), 'scipy.sin', 'scipy.sin', ({(80, 41, 80, 47): 'lat[i]'}, {}), '(lat[i])', False, 'import scipy\n'), ((81, 50, 81, 66), 'scipy.cos', 'scipy.cos', ({(81, 60, 81, 65): 'phase'}, {}), '(phase)', False, 'import scipy\n'), ((82, 14, 82, 29), 'scipy.cos', 'scipy.cos', ({(82, 24, 82, 28): 'incl'}, {}), '(incl)', False, 'import scipy\n'), ((82, 32, 82, 49), 'scipy.sin', 'scipy.sin', ({(82, 42, 82, 48): 'lat[i]'}, {}), '(lat[i])', False, 'import scipy\n'), ((83, 50, 83, 67), 'scipy.cos', 'scipy.cos', ({(83, 60, 83, 66): 'phase0'}, {}), '(phase0)', False, 'import scipy\n'), ((40, 15, 40, 29), 'scipy.sin', 'scipy.sin', ({(40, 25, 40, 28): 'lat'}, {}), '(lat)', False, 'import scipy\n'), ((81, 12, 81, 27), 'scipy.sin', 'scipy.sin', ({(81, 22, 81, 26): 'incl'}, {}), '(incl)', False, 'import scipy\n'), ((81, 30, 81, 47), 'scipy.cos', 'scipy.cos', ({(81, 40, 81, 46): 'lat[i]'}, {}), '(lat[i])', False, 'import scipy\n'), ((83, 12, 83, 27), 'scipy.sin', 'scipy.sin', ({(83, 22, 83, 26): 'incl'}, {}), '(incl)', False, 'import scipy\n'), ((83, 30, 83, 47), 'scipy.cos', 'scipy.cos', ({(83, 40, 83, 46): 'lat[i]'}, {}), '(lat[i])', False, 'import scipy\n')] |
pelavarre/pybashish | bin/sort.py | 03f74356fb0a2a0ef7106f09c059fd9b375ce89a | #!/usr/bin/env python3
"""
usage: sort.py [-h]
sort lines
options:
-h, --help show this help message and exit
quirks:
sorts tabs as different than spaces
sorts some spaces ending a line as different than none ending a line
examples:
Oh no! No examples disclosed!! 💥 💔 💥
"""
# FIXME: doc -k$N,$N and -n and maybe little else is worth learning
# FIXME: ass -k-1,-1 for negative field indexing
# FIXME: think into the mess at "sort" vs "LC_ALL=C sort"
import sys
import argdoc
def main():
args = argdoc.parse_args()
sys.stderr.write("{}\n".format(args))
sys.stderr.write("{}\n".format(argdoc.format_usage().rstrip()))
sys.stderr.write("sort.py: error: not implemented\n")
sys.exit(2) # exit 2 from rejecting usage
if __name__ == "__main__":
main()
# copied from: git clone https://github.com/pelavarre/pybashish.git
| [((29, 11, 29, 30), 'argdoc.parse_args', 'argdoc.parse_args', ({}, {}), '()', False, 'import argdoc\n'), ((32, 4, 32, 57), 'sys.stderr.write', 'sys.stderr.write', ({(32, 21, 32, 56): '"""sort.py: error: not implemented\n"""'}, {}), "('sort.py: error: not implemented\\n')", False, 'import sys\n'), ((33, 4, 33, 15), 'sys.exit', 'sys.exit', ({(33, 13, 33, 14): '(2)'}, {}), '(2)', False, 'import sys\n'), ((31, 35, 31, 56), 'argdoc.format_usage', 'argdoc.format_usage', ({}, {}), '()', False, 'import argdoc\n')] |
davandev/davanserver | davan/http/service/telldus/tdtool.py | 0be914268c8e34d4092251508bae213cff3ef621 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import sys, getopt, httplib, urllib, json, os
import oauth.oauth as oauth
import datetime
from configobj import ConfigObj
import logging
global logger
logger = logging.getLogger(os.path.basename(__file__))
import davan.util.application_logger as log_manager
#insert your own public_key and private_key
import davan.config.config_creator as config_creator
configuration = config_creator.create()
PUBLIC_KEY = configuration["TELLDUS_PUBLIC_KEY"]
PRIVATE_KEY = configuration["TELLDUS_PRIVATE_KEY"]
TELLSTICK_TURNON = 1
TELLSTICK_TURNOFF = 2
TELLSTICK_BELL = 4
TELLSTICK_DIM = 16
TELLSTICK_UP = 128
TELLSTICK_DOWN = 256
SUPPORTED_METHODS = TELLSTICK_TURNON | TELLSTICK_TURNOFF | TELLSTICK_BELL | TELLSTICK_DIM | TELLSTICK_UP | TELLSTICK_DOWN;
def printUsage():
print("Usage: %s [ options ]" % sys.argv[0])
print("")
print("Options:")
print(" -[lnfdbvh] [ --list ] [ --help ]")
print(" [ --on device ] [ --off device ] [ --bell device ]")
print(" [ --dimlevel level --dim device ]")
print(" [ --up device --down device ]")
print("")
print(" --list (-l short option)")
print(" List currently configured devices.")
print("")
print(" --help (-h short option)")
print(" Shows this screen.")
print("")
print(" --on device (-n short option)")
print(" Turns on device. 'device' must be an integer of the device-id")
print(" Device-id and name is outputed with the --list option")
print("")
print(" --off device (-f short option)")
print(" Turns off device. 'device' must be an integer of the device-id")
print(" Device-id and name is outputed with the --list option")
print("")
print(" --dim device (-d short option)")
print(" Dims device. 'device' must be an integer of the device-id")
print(" Device-id and name is outputed with the --list option")
print(" Note: The dimlevel parameter must be set before using this option.")
print("")
print(" --dimlevel level (-v short option)")
print(" Set dim level. 'level' should an integer, 0-255.")
print(" Note: This parameter must be set before using dim.")
print("")
print(" --bell device (-b short option)")
print(" Sends bell command to devices supporting this. 'device' must")
print(" be an integer of the device-id")
print(" Device-id and name is outputed with the --list option")
print("")
print(" --up device")
print(" Sends up command to devices supporting this. 'device' must")
print(" be an integer of the device-id")
print(" Device-id and name is outputed with the --list option")
print("")
print(" --down device")
print(" Sends down command to devices supporting this. 'device' must")
print(" be an integer of the device-id")
print(" Device-id and name is outputed with the --list option")
print("")
print(" --list-sensors (-s short option)")
print(" Lists currently configured sensors")
print("")
print(" --sensor-data sensor (-d short option)")
print(" Get sensor data with sensor id number")
print("")
print("Report bugs to <[email protected]>")
def listSensors():
response = doRequest('sensors/list', {'includeIgnored': 1});
logger.debug("Number of sensors: %i" % len(response['sensor']));
for sensor in response['sensor']:
lastupdate = datetime.datetime.fromtimestamp(int(sensor['lastUpdated']));
logger.debug( "%s\t%s\t%s" % (sensor['id'], sensor['name'], lastupdate))
def listSensorsAndValues():
response = doRequest('sensors/list', {'includeValues': 1});
return response
def listDevicesAndValues():
response = doRequest('devices/list', {'supportedMethods': SUPPORTED_METHODS})
return response
def getSensorData(sensorId):
response = doRequest('sensor/info', {'id': sensorId });
lastupdate = datetime.datetime.fromtimestamp(int(response['lastUpdated']));
sensor_name = response['name'];
for data in response['data']:
logger.debug( "%s\t%s\t%s\t%s" % (sensor_name, data['name'], data['value'], lastupdate) )
def listDevices():
response = doRequest('devices/list', {'supportedMethods': SUPPORTED_METHODS})
logger.debug("Number of devices: %i" % len(response['device']));
for device in response['device']:
if (device['state'] == TELLSTICK_TURNON):
state = 'ON'
elif (device['state'] == TELLSTICK_TURNOFF):
state = 'OFF'
elif (device['state'] == TELLSTICK_DIM):
state = "DIMMED"
elif (device['state'] == TELLSTICK_UP):
state = "UP"
elif (device['state'] == TELLSTICK_DOWN):
state = "DOWN"
else:
state = 'Unknown state'
logger.debug("%s\t%s\t%s" % (device['id'], device['name'], state));
def doMethod(deviceId, methodId, methodValue = 0):
response = doRequest('device/info', {'id': deviceId})
if (methodId == TELLSTICK_TURNON):
method = 'on'
elif (methodId == TELLSTICK_TURNOFF):
method = 'off'
elif (methodId == TELLSTICK_BELL):
method = 'bell'
elif (methodId == TELLSTICK_UP):
method = 'up'
elif (methodId == TELLSTICK_DOWN):
method = 'down'
if ('error' in response):
name = ''
retString = response['error']
else:
name = response['name']
response = doRequest('device/command', {'id': deviceId, 'method': methodId, 'value': methodValue})
if ('error' in response):
retString = response['error']
else:
retString = response['status']
if (methodId in (TELLSTICK_TURNON, TELLSTICK_TURNOFF)):
logger.debug("Turning %s device %s, %s - %s" % ( method, deviceId, name, retString));
elif (methodId in (TELLSTICK_BELL, TELLSTICK_UP, TELLSTICK_DOWN)):
logger.debug("Sending %s to: %s %s - %s" % (method, deviceId, name, retString))
elif (methodId == TELLSTICK_DIM):
logger.debug("Dimming device: %s %s to %s - %s" % (deviceId, name, methodValue, retString))
def doRequest(method, params):
global config
config = ConfigObj(os.environ['HOME'] + '/.config/Telldus/tdtool.conf')
consumer = oauth.OAuthConsumer(PUBLIC_KEY, PRIVATE_KEY)
token = oauth.OAuthToken(config['token'], config['tokenSecret'])
oauth_request = oauth.OAuthRequest.from_consumer_and_token(consumer, token=token, http_method='GET', http_url="http://api.telldus.com/json/" + method, parameters=params)
oauth_request.sign_request(oauth.OAuthSignatureMethod_HMAC_SHA1(), consumer, token)
headers = oauth_request.to_header()
headers['Content-Type'] = 'application/x-www-form-urlencoded'
conn = httplib.HTTPConnection("api.telldus.com:80")
conn.request('GET', "/json/" + method + "?" + urllib.urlencode(params, True).replace('+', '%20'), headers=headers)
response = conn.getresponse()
try:
return json.load(response)
except:
logger.debug( 'Failed to decode response :%s'%str(response))
return ""
def requestToken():
global config
consumer = oauth.OAuthConsumer(PUBLIC_KEY, PRIVATE_KEY)
request = oauth.OAuthRequest.from_consumer_and_token(consumer, http_url='http://api.telldus.com/oauth/requestToken')
request.sign_request(oauth.OAuthSignatureMethod_HMAC_SHA1(), consumer, None)
conn = httplib.HTTPConnection('api.telldus.com:80')
conn.request(request.http_method, '/oauth/requestToken', headers=request.to_header())
resp = conn.getresponse().read()
token = oauth.OAuthToken.from_string(resp)
logger.debug( 'Open the following url in your webbrowser:\nhttp://api.telldus.com/oauth/authorize?oauth_token=%s\n' % token.key)
logger.debug( 'After logging in and accepting to use this application run:\n%s --authenticate' % (sys.argv[0]))
config['requestToken'] = str(token.key)
config['requestTokenSecret'] = str(token.secret)
saveConfig()
def getAccessToken():
global config
consumer = oauth.OAuthConsumer(PUBLIC_KEY, PRIVATE_KEY)
token = oauth.OAuthToken(config['requestToken'], config['requestTokenSecret'])
request = oauth.OAuthRequest.from_consumer_and_token(consumer, token=token, http_method='GET', http_url='http://api.telldus.com/oauth/accessToken')
request.sign_request(oauth.OAuthSignatureMethod_HMAC_SHA1(), consumer, token)
conn = httplib.HTTPConnection('api.telldus.com:80')
conn.request(request.http_method, request.to_url(), headers=request.to_header())
resp = conn.getresponse()
if resp.status != 200:
logger.debug( 'Error retreiving access token, the server replied:\n%s' % resp.read())
return
token = oauth.OAuthToken.from_string(resp.read())
config['requestToken'] = None
config['requestTokenSecret'] = None
config['token'] = str(token.key)
config['tokenSecret'] = str(token.secret)
logger.debug( 'Authentication successful, you can now use tdtool')
saveConfig()
def authenticate():
try:
opts, args = getopt.getopt(sys.argv[1:], '', ['authenticate'])
for opt, arg in opts:
if opt in ('--authenticate'):
getAccessToken()
return
except getopt.GetoptError:
pass
requestToken()
def saveConfig():
global config
try:
os.makedirs(os.environ['HOME'] + '/.config/Telldus')
except:
pass
config.write()
def main(argv):
global config
if ('token' not in config or config['token'] == ''):
authenticate()
return
try:
opts, args = getopt.getopt(argv, "lsd:n:f:d:b:v:h", ["list", "list-sensors", "sensor-data=", "on=", "off=", "dim=", "bell=", "dimlevel=", "up=", "down=", "help"])
except getopt.GetoptError:
printUsage()
sys.exit(2)
dimlevel = -1
for opt, arg in opts:
if opt in ("-h", "--help"):
printUsage()
elif opt in ("-l", "--list"):
listDevices()
elif opt in ("-s", "--list-sensors"):
listSensors()
elif opt in ("-x", "--list-sensorsvalue"):
listSensorsAndValues()
elif opt in ("-d", "--sensor-data"):
getSensorData(arg)
elif opt in ("-n", "--on"):
doMethod(arg, TELLSTICK_TURNON)
elif opt in ("-f", "--off"):
doMethod(arg, TELLSTICK_TURNOFF)
elif opt in ("-b", "--bell"):
doMethod(arg, TELLSTICK_BELL)
elif opt in ("-d", "--dim"):
if (dimlevel < 0):
logger.debug("Dimlevel must be set with --dimlevel before --dim")
else:
doMethod(arg, TELLSTICK_DIM, dimlevel)
elif opt in ("-v", "--dimlevel"):
dimlevel = arg
elif opt in ("--up"):
doMethod(arg, TELLSTICK_UP)
elif opt in ("--down"):
doMethod(arg, TELLSTICK_DOWN)
if __name__ == "__main__":
config = ConfigObj(os.environ['HOME'] + '/.config/Telldus/tdtool.conf')
configuration = config_creator.create()
log_manager.start_logging(configuration["LOGFILE_PATH"],loglevel=4)
main(sys.argv[1:])
| [((15, 16, 15, 39), 'davan.config.config_creator.create', 'config_creator.create', ({}, {}), '()', True, 'import davan.config.config_creator as config_creator\n'), ((11, 27, 11, 53), 'os.path.basename', 'os.path.basename', ({(11, 44, 11, 52): '__file__'}, {}), '(__file__)', False, 'import sys, getopt, httplib, urllib, json, os\n'), ((160, 10, 160, 72), 'configobj.ConfigObj', 'ConfigObj', ({(160, 20, 160, 71): "os.environ['HOME'] + '/.config/Telldus/tdtool.conf'"}, {}), "(os.environ['HOME'] + '/.config/Telldus/tdtool.conf')", False, 'from configobj import ConfigObj\n'), ((162, 12, 162, 56), 'oauth.oauth.OAuthConsumer', 'oauth.OAuthConsumer', ({(162, 32, 162, 42): 'PUBLIC_KEY', (162, 44, 162, 55): 'PRIVATE_KEY'}, {}), '(PUBLIC_KEY, PRIVATE_KEY)', True, 'import oauth.oauth as oauth\n'), ((163, 9, 163, 65), 'oauth.oauth.OAuthToken', 'oauth.OAuthToken', ({(163, 26, 163, 41): "config['token']", (163, 43, 163, 64): "config['tokenSecret']"}, {}), "(config['token'], config['tokenSecret'])", True, 'import oauth.oauth as oauth\n'), ((165, 17, 165, 170), 'oauth.oauth.OAuthRequest.from_consumer_and_token', 'oauth.OAuthRequest.from_consumer_and_token', (), '', True, 'import oauth.oauth as oauth\n'), ((170, 8, 170, 52), 'httplib.HTTPConnection', 'httplib.HTTPConnection', ({(170, 31, 170, 51): '"""api.telldus.com:80"""'}, {}), "('api.telldus.com:80')", False, 'import sys, getopt, httplib, urllib, json, os\n'), ((182, 12, 182, 56), 'oauth.oauth.OAuthConsumer', 'oauth.OAuthConsumer', ({(182, 32, 182, 42): 'PUBLIC_KEY', (182, 44, 182, 55): 'PRIVATE_KEY'}, {}), '(PUBLIC_KEY, PRIVATE_KEY)', True, 'import oauth.oauth as oauth\n'), ((183, 11, 183, 117), 'oauth.oauth.OAuthRequest.from_consumer_and_token', 'oauth.OAuthRequest.from_consumer_and_token', (), '', True, 'import oauth.oauth as oauth\n'), ((185, 8, 185, 52), 'httplib.HTTPConnection', 'httplib.HTTPConnection', ({(185, 31, 185, 51): '"""api.telldus.com:80"""'}, {}), "('api.telldus.com:80')", False, 'import sys, getopt, httplib, urllib, json, os\n'), ((189, 9, 189, 43), 'oauth.oauth.OAuthToken.from_string', 'oauth.OAuthToken.from_string', ({(189, 38, 189, 42): 'resp'}, {}), '(resp)', True, 'import oauth.oauth as oauth\n'), ((198, 12, 198, 56), 'oauth.oauth.OAuthConsumer', 'oauth.OAuthConsumer', ({(198, 32, 198, 42): 'PUBLIC_KEY', (198, 44, 198, 55): 'PRIVATE_KEY'}, {}), '(PUBLIC_KEY, PRIVATE_KEY)', True, 'import oauth.oauth as oauth\n'), ((199, 9, 199, 79), 'oauth.oauth.OAuthToken', 'oauth.OAuthToken', ({(199, 26, 199, 48): "config['requestToken']", (199, 50, 199, 78): "config['requestTokenSecret']"}, {}), "(config['requestToken'], config['requestTokenSecret'])", True, 'import oauth.oauth as oauth\n'), ((200, 11, 200, 148), 'oauth.oauth.OAuthRequest.from_consumer_and_token', 'oauth.OAuthRequest.from_consumer_and_token', (), '', True, 'import oauth.oauth as oauth\n'), ((202, 8, 202, 52), 'httplib.HTTPConnection', 'httplib.HTTPConnection', ({(202, 31, 202, 51): '"""api.telldus.com:80"""'}, {}), "('api.telldus.com:80')", False, 'import sys, getopt, httplib, urllib, json, os\n'), ((290, 10, 290, 72), 'configobj.ConfigObj', 'ConfigObj', ({(290, 20, 290, 71): "os.environ['HOME'] + '/.config/Telldus/tdtool.conf'"}, {}), "(os.environ['HOME'] + '/.config/Telldus/tdtool.conf')", False, 'from configobj import ConfigObj\n'), ((291, 17, 291, 40), 'davan.config.config_creator.create', 'config_creator.create', ({}, {}), '()', True, 'import davan.config.config_creator as config_creator\n'), ((292, 1, 292, 68), 'davan.util.application_logger.start_logging', 'log_manager.start_logging', (), '', True, 'import davan.util.application_logger as log_manager\n'), ((166, 28, 166, 66), 'oauth.oauth.OAuthSignatureMethod_HMAC_SHA1', 'oauth.OAuthSignatureMethod_HMAC_SHA1', ({}, {}), '()', True, 'import oauth.oauth as oauth\n'), ((175, 9, 175, 28), 'json.load', 'json.load', ({(175, 19, 175, 27): 'response'}, {}), '(response)', False, 'import sys, getopt, httplib, urllib, json, os\n'), ((184, 22, 184, 60), 'oauth.oauth.OAuthSignatureMethod_HMAC_SHA1', 'oauth.OAuthSignatureMethod_HMAC_SHA1', ({}, {}), '()', True, 'import oauth.oauth as oauth\n'), ((201, 22, 201, 60), 'oauth.oauth.OAuthSignatureMethod_HMAC_SHA1', 'oauth.OAuthSignatureMethod_HMAC_SHA1', ({}, {}), '()', True, 'import oauth.oauth as oauth\n'), ((219, 15, 219, 64), 'getopt.getopt', 'getopt.getopt', ({(219, 29, 219, 41): 'sys.argv[1:]', (219, 43, 219, 45): '""""""', (219, 47, 219, 63): "['authenticate']"}, {}), "(sys.argv[1:], '', ['authenticate'])", False, 'import sys, getopt, httplib, urllib, json, os\n'), ((231, 2, 231, 54), 'os.makedirs', 'os.makedirs', ({(231, 14, 231, 53): "(os.environ['HOME'] + '/.config/Telldus')"}, {}), "(os.environ['HOME'] + '/.config/Telldus')", False, 'import sys, getopt, httplib, urllib, json, os\n'), ((242, 15, 242, 164), 'getopt.getopt', 'getopt.getopt', ({(242, 29, 242, 33): 'argv', (242, 35, 242, 52): '"""lsd:n:f:d:b:v:h"""', (242, 54, 242, 163): "['list', 'list-sensors', 'sensor-data=', 'on=', 'off=', 'dim=', 'bell=',\n 'dimlevel=', 'up=', 'down=', 'help']"}, {}), "(argv, 'lsd:n:f:d:b:v:h', ['list', 'list-sensors',\n 'sensor-data=', 'on=', 'off=', 'dim=', 'bell=', 'dimlevel=', 'up=',\n 'down=', 'help'])", False, 'import sys, getopt, httplib, urllib, json, os\n'), ((245, 2, 245, 13), 'sys.exit', 'sys.exit', ({(245, 11, 245, 12): '(2)'}, {}), '(2)', False, 'import sys, getopt, httplib, urllib, json, os\n'), ((171, 47, 171, 77), 'urllib.urlencode', 'urllib.urlencode', ({(171, 64, 171, 70): 'params', (171, 72, 171, 76): '(True)'}, {}), '(params, True)', False, 'import sys, getopt, httplib, urllib, json, os\n')] |
rajreet/ichnaea | ichnaea/data/export.py | 7bd2eaa9568f9004e566b802623299625c29f5ae | from collections import defaultdict
import json
import re
import time
from urllib.parse import urlparse
import uuid
import boto3
import boto3.exceptions
import botocore.exceptions
import markus
import redis.exceptions
import requests
import requests.exceptions
from sqlalchemy import select
import sqlalchemy.exc
from ichnaea.data import _map_content_enabled
from ichnaea.models import (
ApiKey,
BlueObservation,
BlueReport,
BlueShard,
CellObservation,
CellReport,
CellShard,
DataMap,
ExportConfig,
Report,
WifiObservation,
WifiReport,
WifiShard,
)
from ichnaea.models.content import encode_datamap_grid
from ichnaea import util
WHITESPACE = re.compile(r"\s", flags=re.UNICODE)
METRICS = markus.get_metrics()
class IncomingQueue(object):
"""
The incoming queue contains the data collected in the web application. It
is the single entrypoint from which all other data pipelines get their
data.
It distributes the data into the configured export queues, checks those
queues and if they contain enough or old enough data schedules an async
export task to process the data in each queue.
"""
def __init__(self, task):
self.task = task
def __call__(self, export_task):
redis_client = self.task.redis_client
data_queue = self.task.app.data_queues["update_incoming"]
data = data_queue.dequeue()
grouped = defaultdict(list)
for item in data:
grouped[(item["api_key"], item.get("source", "gnss"))].append(
{"api_key": item["api_key"], "report": item["report"]}
)
with self.task.db_session(commit=False) as session:
export_configs = ExportConfig.all(session)
with self.task.redis_pipeline() as pipe:
for (api_key, source), items in grouped.items():
for config in export_configs:
if config.allowed(api_key, source):
queue_key = config.queue_key(api_key, source)
queue = config.queue(queue_key, redis_client)
queue.enqueue(items, pipe=pipe)
for config in export_configs:
# Check all queues if they now contain enough data or
# old enough data to be ready for processing.
for queue_key in config.partitions(redis_client):
queue = config.queue(queue_key, redis_client)
if queue.ready():
export_task.delay(config.name, queue_key)
if data_queue.ready():
self.task.apply_countdown()
class ReportExporter(object):
_retriable = (IOError,)
_retries = 3
_retry_wait = 1.0
def __init__(self, task, config, queue_key):
self.task = task
self.config = config
self.queue_key = queue_key
self.queue = config.queue(queue_key, task.redis_client)
self.stats_tags = ["key:" + self.config.name]
@staticmethod
def export(task, name, queue_key):
with task.db_session(commit=False) as session:
config = ExportConfig.get(session, name)
exporter_types = {
"dummy": DummyExporter,
"geosubmit": GeosubmitExporter,
"internal": InternalExporter,
"s3": S3Exporter,
}
exporter_type = exporter_types.get(config.schema)
if exporter_type is not None:
exporter_type(task, config, queue_key)()
def __call__(self):
queue_items = self.queue.dequeue()
if not queue_items:
return
success = False
for i in range(self._retries):
try:
with METRICS.timer("data.export.upload.timing", tags=self.stats_tags):
self.send(queue_items)
success = True
except self._retriable:
success = False
time.sleep(self._retry_wait * (i ** 2 + 1))
if success:
METRICS.incr("data.export.batch", tags=self.stats_tags)
break
if success and self.queue.ready():
self.task.apply_countdown(args=[self.config.name, self.queue_key])
def send(self, queue_items):
raise NotImplementedError()
class DummyExporter(ReportExporter):
def send(self, queue_items):
pass
class GeosubmitExporter(ReportExporter):
_retriable = (IOError, requests.exceptions.RequestException)
def send(self, queue_items):
# ignore metadata
reports = [item["report"] for item in queue_items]
headers = {
"Content-Encoding": "gzip",
"Content-Type": "application/json",
"User-Agent": "ichnaea",
}
response = requests.post(
self.config.url,
data=util.encode_gzip(
json.dumps({"items": reports}).encode(), compresslevel=5
),
headers=headers,
timeout=60.0,
)
# log upload_status and trigger exception for bad responses
# this causes the task to be re-tried
METRICS.incr(
"data.export.upload",
tags=self.stats_tags + ["status:%s" % response.status_code],
)
response.raise_for_status()
class S3Exporter(ReportExporter):
_retriable = (
IOError,
boto3.exceptions.Boto3Error,
botocore.exceptions.BotoCoreError,
)
def send(self, queue_items):
# ignore metadata
reports = [item["report"] for item in queue_items]
_, bucketname, path = urlparse(self.config.url)[:3]
# s3 key names start without a leading slash
path = path.lstrip("/")
if not path.endswith("/"):
path += "/"
year, month, day = util.utcnow().timetuple()[:3]
# strip away queue prefix again
parts = self.queue_key.split(":")
source = parts[1]
api_key = parts[2]
obj_name = path.format(
source=source, api_key=api_key, year=year, month=month, day=day
)
obj_name += uuid.uuid1().hex + ".json.gz"
try:
data = util.encode_gzip(
json.dumps({"items": reports}).encode(), compresslevel=7
)
s3 = boto3.resource("s3")
bucket = s3.Bucket(bucketname)
obj = bucket.Object(obj_name)
obj.put(Body=data, ContentEncoding="gzip", ContentType="application/json")
METRICS.incr(
"data.export.upload", tags=self.stats_tags + ["status:success"]
)
except Exception:
METRICS.incr(
"data.export.upload", tags=self.stats_tags + ["status:failure"]
)
raise
class InternalTransform(object):
"""
This maps the geosubmit v2 schema used in view code and external
transfers (backup, forward to partners) to the internal submit v1
schema used in our own database models.
"""
# *_id maps a source section id to a target section id
# *_map maps fields inside the section from source to target id
# if the names are equal, a simple string can be specified instead
# of a two-tuple
position_id = ("position", None)
position_map = [
("latitude", "lat"),
("longitude", "lon"),
"accuracy",
"altitude",
("altitudeAccuracy", "altitude_accuracy"),
"heading",
"pressure",
"speed",
"source",
]
blue_id = ("bluetoothBeacons", "blue")
blue_map = [("macAddress", "mac"), "age", ("signalStrength", "signal")]
cell_id = ("cellTowers", "cell")
cell_map = [
("radioType", "radio"),
("mobileCountryCode", "mcc"),
("mobileNetworkCode", "mnc"),
("locationAreaCode", "lac"),
("cellId", "cid"),
"age",
"asu",
("primaryScramblingCode", "psc"),
"serving",
("signalStrength", "signal"),
("timingAdvance", "ta"),
]
wifi_id = ("wifiAccessPoints", "wifi")
wifi_map = [
("macAddress", "mac"),
"age",
"channel",
"frequency",
("radioType", "radio"),
("signalToNoiseRatio", "snr"),
("signalStrength", "signal"),
]
def _map_dict(self, item_source, field_map):
value = {}
for spec in field_map:
if isinstance(spec, tuple):
source, target = spec
else:
source = spec
target = spec
source_value = item_source.get(source)
if source_value is not None:
value[target] = source_value
return value
def _parse_dict(self, item, report, key_map, field_map):
value = {}
item_source = item.get(key_map[0])
if item_source:
value = self._map_dict(item_source, field_map)
if value:
if key_map[1] is None:
report.update(value)
else:
report[key_map[1]] = value
return value
def _parse_list(self, item, report, key_map, field_map):
values = []
for value_item in item.get(key_map[0], ()):
value = self._map_dict(value_item, field_map)
if value:
values.append(value)
if values:
report[key_map[1]] = values
return values
def __call__(self, item):
report = {}
self._parse_dict(item, report, self.position_id, self.position_map)
blues = self._parse_list(item, report, self.blue_id, self.blue_map)
cells = self._parse_list(item, report, self.cell_id, self.cell_map)
wifis = self._parse_list(item, report, self.wifi_id, self.wifi_map)
position = item.get("position") or {}
gps_age = position.get("age", 0)
timestamp = item.get("timestamp")
if timestamp:
# turn timestamp into GPS timestamp
report["timestamp"] = timestamp - gps_age
if gps_age:
# Normalize age fields to be relative to GPS time
for type_ in ("blue", "cell", "wifi"):
for record in report.get(type_, ()):
record["age"] = record.get("age", 0) - gps_age
if blues or cells or wifis:
return report
return {}
class InternalExporter(ReportExporter):
_retriable = (IOError, redis.exceptions.RedisError, sqlalchemy.exc.InternalError)
transform = InternalTransform()
def send(self, queue_items):
api_keys = set()
api_keys_known = set()
metrics = {}
items = []
for item in queue_items:
# preprocess items and extract set of API keys
item["report"] = self.transform(item["report"])
if item["report"]:
items.append(item)
api_keys.add(item["api_key"])
for api_key in api_keys:
metrics[api_key] = {}
for type_ in ("report", "blue", "cell", "wifi"):
for action in ("drop", "upload"):
metrics[api_key]["%s_%s" % (type_, action)] = 0
with self.task.db_session(commit=False) as session:
# limit database session to get API keys
keys = [key for key in api_keys if key]
if keys:
columns = ApiKey.__table__.c
rows = session.execute(
select([columns.valid_key]).where(columns.valid_key.in_(keys))
).fetchall()
for row in rows:
api_keys_known.add(row.valid_key)
positions = []
observations = {"blue": [], "cell": [], "wifi": []}
for item in items:
api_key = item["api_key"]
report = item["report"]
obs, malformed_obs = self.process_report(report)
any_data = False
for name in ("blue", "cell", "wifi"):
if obs.get(name):
observations[name].extend(obs[name])
metrics[api_key][name + "_upload"] += len(obs[name])
any_data = True
metrics[api_key][name + "_drop"] += malformed_obs.get(name, 0)
metrics[api_key]["report_upload"] += 1
if any_data:
positions.append((report["lat"], report["lon"]))
else:
metrics[api_key]["report_drop"] += 1
with self.task.redis_pipeline() as pipe:
self.queue_observations(pipe, observations)
if _map_content_enabled and positions:
self.process_datamap(pipe, positions)
self.emit_metrics(api_keys_known, metrics)
def queue_observations(self, pipe, observations):
for datatype, shard_model, shard_key, queue_prefix in (
("blue", BlueShard, "mac", "update_blue_"),
("cell", CellShard, "cellid", "update_cell_"),
("wifi", WifiShard, "mac", "update_wifi_"),
):
queued_obs = defaultdict(list)
for obs in observations[datatype]:
# group by sharded queue
shard_id = shard_model.shard_id(getattr(obs, shard_key))
queue_id = queue_prefix + shard_id
queued_obs[queue_id].append(obs.to_json())
for queue_id, values in queued_obs.items():
# enqueue values for each queue
queue = self.task.app.data_queues[queue_id]
queue.enqueue(values, pipe=pipe)
def emit_metrics(self, api_keys_known, metrics):
for api_key, key_metrics in metrics.items():
api_tag = []
if api_key and api_key in api_keys_known:
api_tag = ["key:%s" % api_key]
for name, count in key_metrics.items():
if not count:
continue
type_, action = name.split("_")
if type_ == "report":
suffix = "report"
tags = api_tag
else:
suffix = "observation"
tags = ["type:%s" % type_] + api_tag
METRICS.incr("data.%s.%s" % (suffix, action), count, tags=tags)
def process_report(self, data):
report = Report.create(**data)
if report is None:
return ({}, {})
malformed = {}
observations = {}
for name, report_cls, obs_cls in (
("blue", BlueReport, BlueObservation),
("cell", CellReport, CellObservation),
("wifi", WifiReport, WifiObservation),
):
malformed[name] = 0
observations[name] = {}
if data.get(name):
for item in data[name]:
# validate the blue/cell/wifi specific fields
item_report = report_cls.create(**item)
if item_report is None:
malformed[name] += 1
continue
# combine general and specific report data into one
item_obs = obs_cls.combine(report, item_report)
item_key = item_obs.unique_key
# if we have better data for the same key, ignore
existing = observations[name].get(item_key)
if existing is not None and existing.better(item_obs):
continue
observations[name][item_key] = item_obs
obs = {
"blue": observations["blue"].values(),
"cell": observations["cell"].values(),
"wifi": observations["wifi"].values(),
}
return (obs, malformed)
def process_datamap(self, pipe, positions):
grids = set()
for lat, lon in positions:
if lat is not None and lon is not None:
grids.add(DataMap.scale(lat, lon))
shards = defaultdict(set)
for lat, lon in grids:
shards[DataMap.shard_id(lat, lon)].add(encode_datamap_grid(lat, lon))
for shard_id, values in shards.items():
queue = self.task.app.data_queues["update_datamap_" + shard_id]
queue.enqueue(list(values), pipe=pipe)
| [((38, 13, 38, 48), 're.compile', 're.compile', (), '', False, 'import re\n'), ((40, 10, 40, 30), 'markus.get_metrics', 'markus.get_metrics', ({}, {}), '()', False, 'import markus\n'), ((63, 18, 63, 35), 'collections.defaultdict', 'defaultdict', ({(63, 30, 63, 34): 'list'}, {}), '(list)', False, 'from collections import defaultdict\n'), ((456, 17, 456, 38), 'ichnaea.models.Report.create', 'Report.create', ({}, {}), '(**data)', False, 'from ichnaea.models import ApiKey, BlueObservation, BlueReport, BlueShard, CellObservation, CellReport, CellShard, DataMap, ExportConfig, Report, WifiObservation, WifiReport, WifiShard\n'), ((503, 17, 503, 33), 'collections.defaultdict', 'defaultdict', ({(503, 29, 503, 32): 'set'}, {}), '(set)', False, 'from collections import defaultdict\n'), ((70, 29, 70, 54), 'ichnaea.models.ExportConfig.all', 'ExportConfig.all', ({(70, 46, 70, 53): 'session'}, {}), '(session)', False, 'from ichnaea.models import ApiKey, BlueObservation, BlueReport, BlueShard, CellObservation, CellReport, CellShard, DataMap, ExportConfig, Report, WifiObservation, WifiReport, WifiShard\n'), ((108, 21, 108, 52), 'ichnaea.models.ExportConfig.get', 'ExportConfig.get', ({(108, 38, 108, 45): 'session', (108, 47, 108, 51): 'name'}, {}), '(session, name)', False, 'from ichnaea.models import ApiKey, BlueObservation, BlueReport, BlueShard, CellObservation, CellReport, CellShard, DataMap, ExportConfig, Report, WifiObservation, WifiReport, WifiShard\n'), ((196, 30, 196, 55), 'urllib.parse.urlparse', 'urlparse', ({(196, 39, 196, 54): 'self.config.url'}, {}), '(self.config.url)', False, 'from urllib.parse import urlparse\n'), ((219, 17, 219, 37), 'boto3.resource', 'boto3.resource', ({(219, 32, 219, 36): '"""s3"""'}, {}), "('s3')", False, 'import boto3\n'), ((423, 25, 423, 42), 'collections.defaultdict', 'defaultdict', ({(423, 37, 423, 41): 'list'}, {}), '(list)', False, 'from collections import defaultdict\n'), ((212, 20, 212, 32), 'uuid.uuid1', 'uuid.uuid1', ({}, {}), '()', False, 'import uuid\n'), ((505, 51, 505, 80), 'ichnaea.models.content.encode_datamap_grid', 'encode_datamap_grid', ({(505, 71, 505, 74): 'lat', (505, 76, 505, 79): 'lon'}, {}), '(lat, lon)', False, 'from ichnaea.models.content import encode_datamap_grid\n'), ((134, 16, 134, 59), 'time.sleep', 'time.sleep', ({(134, 27, 134, 58): '(self._retry_wait * (i ** 2 + 1))'}, {}), '(self._retry_wait * (i ** 2 + 1))', False, 'import time\n'), ((202, 27, 202, 40), 'ichnaea.util.utcnow', 'util.utcnow', ({}, {}), '()', False, 'from ichnaea import util\n'), ((501, 26, 501, 49), 'ichnaea.models.DataMap.scale', 'DataMap.scale', ({(501, 40, 501, 43): 'lat', (501, 45, 501, 48): 'lon'}, {}), '(lat, lon)', False, 'from ichnaea.models import ApiKey, BlueObservation, BlueReport, BlueShard, CellObservation, CellReport, CellShard, DataMap, ExportConfig, Report, WifiObservation, WifiReport, WifiShard\n'), ((216, 16, 216, 46), 'json.dumps', 'json.dumps', ({(216, 27, 216, 45): "{'items': reports}"}, {}), "({'items': reports})", False, 'import json\n'), ((169, 16, 169, 46), 'json.dumps', 'json.dumps', ({(169, 27, 169, 45): "{'items': reports}"}, {}), "({'items': reports})", False, 'import json\n'), ((505, 19, 505, 45), 'ichnaea.models.DataMap.shard_id', 'DataMap.shard_id', ({(505, 36, 505, 39): 'lat', (505, 41, 505, 44): 'lon'}, {}), '(lat, lon)', False, 'from ichnaea.models import ApiKey, BlueObservation, BlueReport, BlueShard, CellObservation, CellReport, CellShard, DataMap, ExportConfig, Report, WifiObservation, WifiReport, WifiShard\n'), ((380, 20, 380, 47), 'sqlalchemy.select', 'select', ({(380, 27, 380, 46): '[columns.valid_key]'}, {}), '([columns.valid_key])', False, 'from sqlalchemy import select\n')] |
x-y-z/HugeCTR | test/inference_correctness/dcn_multi_hot.py | 17bf942215df60827ece9dc015af5191ef9219b7 | import hugectr
from mpi4py import MPI
solver = hugectr.CreateSolver(model_name = "dcn",
max_eval_batches = 1,
batchsize_eval = 16384,
batchsize = 16384,
lr = 0.001,
vvgpu = [[0]],
repeat_dataset = True,
use_mixed_precision = False,
scaler = 1.0,
use_cuda_graph = True,
metrics_spec = {hugectr.MetricsType.AUC: 1.0})
reader = hugectr.DataReaderParams(data_reader_type = hugectr.DataReaderType_t.Norm,
source = ["./dcn_data/file_list.txt"],
eval_source = "./dcn_data/file_list_test.txt",
check_type = hugectr.Check_t.Sum,
num_workers = 16)
optimizer = hugectr.CreateOptimizer(optimizer_type = hugectr.Optimizer_t.Adam,
update_type = hugectr.Update_t.Global,
beta1 = 0.9,
beta2 = 0.999,
epsilon = 0.0001)
model = hugectr.Model(solver, reader, optimizer)
model.add(hugectr.Input(label_dim = 1, label_name = "label",
dense_dim = 13, dense_name = "dense",
data_reader_sparse_param_array =
[hugectr.DataReaderSparseParam("data1", 2, False, 26)]))
model.add(hugectr.SparseEmbedding(embedding_type = hugectr.Embedding_t.DistributedSlotSparseEmbeddingHash,
workspace_size_per_gpu_in_mb = 300,
embedding_vec_size = 16,
combiner = "sum",
sparse_embedding_name = "sparse_embedding1",
bottom_name = "data1",
optimizer = optimizer))
model.add(hugectr.DenseLayer(layer_type = hugectr.Layer_t.Reshape,
bottom_names = ["sparse_embedding1"],
top_names = ["reshape1"],
leading_dim=416))
model.add(hugectr.DenseLayer(layer_type = hugectr.Layer_t.Concat,
bottom_names = ["reshape1", "dense"], top_names = ["concat1"]))
model.add(hugectr.DenseLayer(layer_type = hugectr.Layer_t.Slice,
bottom_names = ["concat1"],
top_names = ["slice11", "slice12"],
ranges=[(0,429),(0,429)]))
model.add(hugectr.DenseLayer(layer_type = hugectr.Layer_t.MultiCross,
bottom_names = ["slice11"],
top_names = ["multicross1"],
num_layers=1))
model.add(hugectr.DenseLayer(layer_type = hugectr.Layer_t.InnerProduct,
bottom_names = ["slice12"],
top_names = ["fc1"],
num_output=1024))
model.add(hugectr.DenseLayer(layer_type = hugectr.Layer_t.ReLU,
bottom_names = ["fc1"],
top_names = ["relu1"]))
model.add(hugectr.DenseLayer(layer_type = hugectr.Layer_t.Dropout,
bottom_names = ["relu1"],
top_names = ["dropout1"],
dropout_rate=0.5))
model.add(hugectr.DenseLayer(layer_type = hugectr.Layer_t.InnerProduct,
bottom_names = ["dropout1"],
top_names = ["fc2"],
num_output=1024))
model.add(hugectr.DenseLayer(layer_type = hugectr.Layer_t.ReLU,
bottom_names = ["fc2"],
top_names = ["relu2"]))
model.add(hugectr.DenseLayer(layer_type = hugectr.Layer_t.Dropout,
bottom_names = ["relu2"],
top_names = ["dropout2"],
dropout_rate=0.5))
model.add(hugectr.DenseLayer(layer_type = hugectr.Layer_t.Concat,
bottom_names = ["dropout2", "multicross1"],
top_names = ["concat2"]))
model.add(hugectr.DenseLayer(layer_type = hugectr.Layer_t.InnerProduct,
bottom_names = ["concat2"],
top_names = ["fc3"],
num_output=1))
model.add(hugectr.DenseLayer(layer_type = hugectr.Layer_t.BinaryCrossEntropyLoss,
bottom_names = ["fc3", "label"],
top_names = ["loss"]))
model.compile()
model.summary()
model.graph_to_json(graph_config_file = "/dump_infer/dcn.json")
model.fit(max_iter = 2300, display = 200, eval_interval = 2000, snapshot = 2000, snapshot_prefix = "/dump_infer/dcn")
model.export_predictions("/dump_infer/dcn_pred_" + str(2000), "/dump_infer/dcn_label_" + str(2000))
from hugectr.inference import InferenceParams, CreateInferenceSession
import numpy as np
batch_size = 16384
num_batches = 1
data_source = "./dcn_data/file_list_test.txt"
inference_params = InferenceParams(model_name = "dcn",
max_batchsize = batch_size,
hit_rate_threshold = 1.0,
dense_model_file = "/dump_infer/dcn_dense_2000.model",
sparse_model_files = ["/dump_infer/dcn0_sparse_2000.model"],
device_id = 0,
use_gpu_embedding_cache = False,
cache_size_percentage = 1.0,
i64_input_key = False,
use_mixed_precision = False,
use_cuda_graph = True)
inference_session = CreateInferenceSession("/dump_infer/dcn.json", inference_params)
predictions = inference_session.predict(num_batches = num_batches,
source = data_source,
data_reader_type = hugectr.DataReaderType_t.Norm,
check_type = hugectr.Check_t.Sum)
grount_truth = np.loadtxt("/dump_infer/dcn_pred_2000")
diff = predictions-grount_truth
mse = np.mean(diff*diff)
if mse > 1e-3:
raise RuntimeError("Too large mse between DCN multi hot inference and training: {}".format(mse))
sys.exit(1)
else:
print("DCN multi hot inference results are consistent with those during training, mse: {}".format(mse)) | [((3, 9, 13, 76), 'hugectr.CreateSolver', 'hugectr.CreateSolver', (), '', False, 'import hugectr\n'), ((14, 9, 18, 51), 'hugectr.DataReaderParams', 'hugectr.DataReaderParams', (), '', False, 'import hugectr\n'), ((19, 12, 23, 53), 'hugectr.CreateOptimizer', 'hugectr.CreateOptimizer', (), '', False, 'import hugectr\n'), ((24, 8, 24, 48), 'hugectr.Model', 'hugectr.Model', ({(24, 22, 24, 28): 'solver', (24, 30, 24, 36): 'reader', (24, 38, 24, 47): 'optimizer'}, {}), '(solver, reader, optimizer)', False, 'import hugectr\n'), ((94, 19, 104, 54), 'hugectr.inference.InferenceParams', 'InferenceParams', (), '', False, 'from hugectr.inference import InferenceParams, CreateInferenceSession\n'), ((105, 20, 105, 84), 'hugectr.inference.CreateInferenceSession', 'CreateInferenceSession', ({(105, 43, 105, 65): '"""/dump_infer/dcn.json"""', (105, 67, 105, 83): 'inference_params'}, {}), "('/dump_infer/dcn.json', inference_params)", False, 'from hugectr.inference import InferenceParams, CreateInferenceSession\n'), ((110, 15, 110, 54), 'numpy.loadtxt', 'np.loadtxt', ({(110, 26, 110, 53): '"""/dump_infer/dcn_pred_2000"""'}, {}), "('/dump_infer/dcn_pred_2000')", True, 'import numpy as np\n'), ((112, 6, 112, 24), 'numpy.mean', 'np.mean', ({(112, 14, 112, 23): 'diff * diff'}, {}), '(diff * diff)', True, 'import numpy as np\n'), ((29, 10, 35, 50), 'hugectr.SparseEmbedding', 'hugectr.SparseEmbedding', (), '', False, 'import hugectr\n'), ((36, 10, 39, 44), 'hugectr.DenseLayer', 'hugectr.DenseLayer', (), '', False, 'import hugectr\n'), ((40, 10, 41, 90), 'hugectr.DenseLayer', 'hugectr.DenseLayer', (), '', False, 'import hugectr\n'), ((42, 10, 45, 53), 'hugectr.DenseLayer', 'hugectr.DenseLayer', (), '', False, 'import hugectr\n'), ((46, 10, 49, 41), 'hugectr.DenseLayer', 'hugectr.DenseLayer', (), '', False, 'import hugectr\n'), ((50, 10, 53, 44), 'hugectr.DenseLayer', 'hugectr.DenseLayer', (), '', False, 'import hugectr\n'), ((54, 10, 56, 50), 'hugectr.DenseLayer', 'hugectr.DenseLayer', (), '', False, 'import hugectr\n'), ((57, 10, 60, 45), 'hugectr.DenseLayer', 'hugectr.DenseLayer', (), '', False, 'import hugectr\n'), ((61, 10, 64, 44), 'hugectr.DenseLayer', 'hugectr.DenseLayer', (), '', False, 'import hugectr\n'), ((65, 10, 67, 50), 'hugectr.DenseLayer', 'hugectr.DenseLayer', (), '', False, 'import hugectr\n'), ((68, 10, 71, 45), 'hugectr.DenseLayer', 'hugectr.DenseLayer', (), '', False, 'import hugectr\n'), ((72, 10, 74, 52), 'hugectr.DenseLayer', 'hugectr.DenseLayer', (), '', False, 'import hugectr\n'), ((75, 10, 78, 41), 'hugectr.DenseLayer', 'hugectr.DenseLayer', (), '', False, 'import hugectr\n'), ((79, 10, 81, 49), 'hugectr.DenseLayer', 'hugectr.DenseLayer', (), '', False, 'import hugectr\n'), ((28, 25, 28, 77), 'hugectr.DataReaderSparseParam', 'hugectr.DataReaderSparseParam', ({(28, 55, 28, 62): '"""data1"""', (28, 64, 28, 65): '(2)', (28, 67, 28, 72): '(False)', (28, 74, 28, 76): '(26)'}, {}), "('data1', 2, False, 26)", False, 'import hugectr\n')] |
RobotLocomotion/drake-python3.7 | bindings/pydrake/systems/perception.py | ae397a4c6985262d23e9675b9bf3927c08d027f5 | import numpy as np
from pydrake.common.value import AbstractValue
from pydrake.math import RigidTransform
from pydrake.perception import BaseField, Fields, PointCloud
from pydrake.systems.framework import LeafSystem
def _TransformPoints(points_Ci, X_CiSi):
# Make homogeneous copy of points.
points_h_Ci = np.vstack((points_Ci,
np.ones((1, points_Ci.shape[1]))))
return X_CiSi.dot(points_h_Ci)[:3, :]
def _TileColors(color, dim):
# Need manual broadcasting.
return np.tile(np.array([color]).T, (1, dim))
def _ConcatenatePointClouds(points_dict, colors_dict):
scene_points = None
scene_colors = None
for id in points_dict:
if scene_points is None:
scene_points = points_dict[id]
else:
scene_points = np.hstack((points_dict[id], scene_points))
if scene_colors is None:
scene_colors = colors_dict[id]
else:
scene_colors = np.hstack((colors_dict[id], scene_colors))
valid_indices = np.logical_not(np.isnan(scene_points))
scene_points = scene_points[:, valid_indices[0, :]]
scene_colors = scene_colors[:, valid_indices[0, :]]
return scene_points, scene_colors
class PointCloudConcatenation(LeafSystem):
"""
.. pydrake_system::
name: PointCloudConcatenation
input_ports:
- point_cloud_CiSi_id0
- X_FCi_id0
- ...
- point_cloud_CiSi_idN
- X_FCi_idN
output_ports:
- point_cloud_FS
"""
def __init__(self, id_list, default_rgb=[255., 255., 255.]):
"""
A system that takes in N point clouds of points Si in frame Ci, and N
RigidTransforms from frame Ci to F, to put each point cloud in a common
frame F. The system returns one point cloud combining all of the
transformed point clouds. Each point cloud must have XYZs. RGBs are
optional. If absent, those points will be the provided default color.
@param id_list A list containing the string IDs of all of the point
clouds. This is often the serial number of the camera they came
from, such as "1" for a simulated camera or "805212060373" for a
real camera.
@param default_rgb A list of length 3 containing the RGB values to use
in the absence of PointCloud.rgbs. Values should be between 0 and
255. The default is white.
"""
LeafSystem.__init__(self)
self._point_cloud_ports = {}
self._transform_ports = {}
self._id_list = id_list
self._default_rgb = np.array(default_rgb)
output_fields = Fields(BaseField.kXYZs | BaseField.kRGBs)
for id in self._id_list:
self._point_cloud_ports[id] = self.DeclareAbstractInputPort(
"point_cloud_CiSi_{}".format(id),
AbstractValue.Make(PointCloud(fields=output_fields)))
self._transform_ports[id] = self.DeclareAbstractInputPort(
"X_FCi_{}".format(id),
AbstractValue.Make(RigidTransform.Identity()))
self.DeclareAbstractOutputPort("point_cloud_FS",
lambda: AbstractValue.Make(
PointCloud(fields=output_fields)),
self.DoCalcOutput)
def _AlignPointClouds(self, context):
points = {}
colors = {}
for id in self._id_list:
point_cloud = self.EvalAbstractInput(
context, self._point_cloud_ports[id].get_index()).get_value()
X_CiSi = self.EvalAbstractInput(
context, self._transform_ports[id].get_index()).get_value()
points[id] = _TransformPoints(
point_cloud.xyzs(), X_CiSi.GetAsMatrix4())
if point_cloud.has_rgbs():
colors[id] = point_cloud.rgbs()
else:
colors[id] = _TileColors(
self._default_rgb, point_cloud.xyzs().shape[1])
return _ConcatenatePointClouds(points, colors)
def DoCalcOutput(self, context, output):
scene_points, scene_colors = self._AlignPointClouds(context)
output.get_mutable_value().resize(scene_points.shape[1])
output.get_mutable_value().mutable_xyzs()[:] = scene_points
output.get_mutable_value().mutable_rgbs()[:] = scene_colors
| [((37, 35, 37, 57), 'numpy.isnan', 'np.isnan', ({(37, 44, 37, 56): 'scene_points'}, {}), '(scene_points)', True, 'import numpy as np\n'), ((76, 8, 76, 33), 'pydrake.systems.framework.LeafSystem.__init__', 'LeafSystem.__init__', ({(76, 28, 76, 32): 'self'}, {}), '(self)', False, 'from pydrake.systems.framework import LeafSystem\n'), ((83, 28, 83, 49), 'numpy.array', 'np.array', ({(83, 37, 83, 48): 'default_rgb'}, {}), '(default_rgb)', True, 'import numpy as np\n'), ((85, 24, 85, 65), 'pydrake.perception.Fields', 'Fields', ({(85, 31, 85, 64): 'BaseField.kXYZs | BaseField.kRGBs'}, {}), '(BaseField.kXYZs | BaseField.kRGBs)', False, 'from pydrake.perception import BaseField, Fields, PointCloud\n'), ((12, 29, 12, 61), 'numpy.ones', 'np.ones', ({(12, 37, 12, 60): '(1, points_Ci.shape[1])'}, {}), '((1, points_Ci.shape[1]))', True, 'import numpy as np\n'), ((19, 19, 19, 36), 'numpy.array', 'np.array', ({(19, 28, 19, 35): '[color]'}, {}), '([color])', True, 'import numpy as np\n'), ((30, 27, 30, 69), 'numpy.hstack', 'np.hstack', ({(30, 37, 30, 68): '(points_dict[id], scene_points)'}, {}), '((points_dict[id], scene_points))', True, 'import numpy as np\n'), ((35, 27, 35, 69), 'numpy.hstack', 'np.hstack', ({(35, 37, 35, 68): '(colors_dict[id], scene_colors)'}, {}), '((colors_dict[id], scene_colors))', True, 'import numpy as np\n'), ((90, 35, 90, 67), 'pydrake.perception.PointCloud', 'PointCloud', (), '', False, 'from pydrake.perception import BaseField, Fields, PointCloud\n'), ((94, 35, 94, 60), 'pydrake.math.RigidTransform.Identity', 'RigidTransform.Identity', ({}, {}), '()', False, 'from pydrake.math import RigidTransform\n'), ((98, 43, 98, 75), 'pydrake.perception.PointCloud', 'PointCloud', (), '', False, 'from pydrake.perception import BaseField, Fields, PointCloud\n')] |
mit-ll/CATAN | experiments/db_test.py | 7cc6f7e8af459c0f6bcf325f0754db1ba5b591ac | #!/usr/bin/env python
"""
@author Hongyi Hu
© 2015 Massachusetts Institute of Technology
"""
import argparse
import random
import catan.db
from catan.data import NodeMessage
# test data
STATUS_LIST = ['ok', 'injured', 'deceased']
# nodes
def gen_nodes(n, db, start_lat, stop_lat, start_long, stop_long):
assert n > 0
cmd = "INSERT INTO catan_nodes VALUES "
# generate n random nodes, centered around Cambridge
for i in range(n):
# random lat, long
lat = round(random.uniform(start_lat, stop_lat), 6)
lng = round(random.uniform(start_long, stop_long), 6)
# node_id, gps_lat, gps_long, gps_acc, path, timestamp
sql_cmd = cmd + "(%d, %.6f, %.6f, %.6f, %.6f, %.6f)" % (i, lat, lng, 0, 0, 0)
db._sql(sql_cmd)
# people
def gen_people(n, db, start_lat, stop_lat, start_long, stop_long):
"""
Generates n people, random male/female ratio between 5 and 90 years of age
"""
assert n > 0
# open male first names file
f = open('dist.male.first','r')
male_first_names = [name.strip().split()[0] for name in f.readlines()]
f.close()
# open female first names file
f = open('dist.female.first','r')
female_first_names = [name.strip().split()[0] for name in f.readlines()]
f.close()
# open last names file
f = open('dist.all.last','r')
family_names = [name.strip().split()[0] for name in f.readlines()]
f.close()
# generate people
for i in range(n):
catanDBObj = catan.db.CatanDatabaseObject()
# bio
sex = random.randint(0,1)
if sex == 0: # male
catanDBObj.person_bio.name_given = male_first_names[random.randint(0,len(male_first_names)-1)]
catanDBObj.person_bio.sex = 'male'
else: # female
catanDBObj.person_bio.name_given = female_first_names[random.randint(0,len(female_first_names)-1)]
catanDBObj.person_bio.sex = 'female'
catanDBObj.person_bio.name_family = family_names[random.randint(0,len(family_names)-1)]
catanDBObj.person_bio.age = random.randint(5,90)
# message (message, status, location, etc.)
# location
lat = round(random.uniform(start_lat, stop_lat), 6)
lng = round(random.uniform(start_long, stop_long), 6)
catanDBObj.person_message.person_message = 'Hi Mom'
catanDBObj.person_message.status_gps_latitude = lat
catanDBObj.person_message.status_gps_longitude = lng
catanDBObj.person_message.status_gps_accuracy = 0
# status
catanDBObj.person_message.status = STATUS_LIST[random.randint(0,len(STATUS_LIST)-1)]
catanDBObj.person_message.status_location = 'Test status location'
# generate a NodeMessage for the database
# it only cares about the data and source fields, so we can ignore other fields
nmsg = NodeMessage()
nmsg.source = random.randint(0,31) # random node 0-31
nmsg.data = catanDBObj.pack()
db.update_db(nmsg)
# Create some random updates
for i in range(1,n+1):
update = random.randint(0,1)
if update == 0:
catanDBObj = catan.db.CatanDatabaseObject()
catanDBObj.person_id = i
# location
lat = round(random.uniform(start_lat, stop_lat), 6)
lng = round(random.uniform(start_long, stop_long), 6)
catanDBObj.person_message.person_message = 'Location update 1'
catanDBObj.person_message.status_gps_latitude = lat
catanDBObj.person_message.status_gps_longitude = lng
catanDBObj.person_message.status_gps_accuracy = 0
n = NodeMessage()
n.source = random.randint(0,31)
n.data = catanDBObj.pack()
db.update_db(n)
def populate_db():
db = catan.db.CatanDatabase(0)
# insert some test nodes
# for cambridge
gen_nodes(32, db, 42.354823, 42.368315, -71.114484, -71.084422)
gen_people(100, db, 42.354823, 42.368315, -71.114484, -71.084422)
cmd = ('SELECT '
'db_person_bio.person_id, '
'db_person_bio.origin_node_id, '
'db_person_bio.name_family, '
'db_person_bio.name_given, '
'db_person_bio.age, '
'db_person_bio.sex, '
'db_person_messages.submission_id, '
'db_person_messages.origin_node_id, '
'db_person_messages.status_gps_latitude, '
'db_person_messages.status_gps_longitude, '
'db_person_messages.status_gps_accuracy, '
'db_person_messages.status, '
'db_person_messages.status_location, '
'db_submitter_info.timestamp '
'FROM db_person_bio '
'LEFT JOIN db_person_messages ON db_person_messages.person_id = db_person_bio.person_id '
'LEFT JOIN db_submitter_info ON db_submitter_info.submission_id = db_person_messages.submission_id')
for r in db._sql(cmd).fetchall():
print r
def main(args):
pass
if __name__=='__main__':
populate_db()
| [] |
Hellofafar/Leetcode | Medium/200.py | 7a459e9742958e63be8886874904e5ab2489411a | # ------------------------------
# 200. Number of Islands
#
# Description:
# Given a 2d grid map of '1's (land) and '0's (water), count the number of islands. An island is surrounded by water and is formed by connecting adjacent lands horizontally or vertically. You may assume all four edges of the grid are all surrounded by water.
#
# Example 1:
# 11110
# 11010
# 11000
# 00000
# Answer: 1
#
# Example 2:
# 11000
# 11000
# 00100
# 00011
# Answer: 3
#
# Version: 1.0
# 11/13/17 by Jianfa
# ------------------------------
class Solution(object):
def numIslands(self, grid):
"""
:type grid: List[List[str]]
:rtype: int
"""
def sink(i, j):
if 0 <= i < len(grid) and 0 <= j < len(grid[0]) and grid[i][j] == "1":
grid[i][j] = "0"
map(sink, (i+1, i-1, i, i), (j, j, j+1, j-1))
return 1
return 0
return sum(sink(i, j) for i in range(len(grid)) for j in range(len(grid[i])))
# ------------------------------
# Summary:
# Copied from discussion.
# The following is another easy understanding idea:
#
# class Solution(object):
# def numIslands(self, grid):
# """
# :type grid: List[List[str]]
# :rtype: int
# """
# if len(grid) == 0: return 0
# m = len(grid)
# n = len(grid[0])
# res = 0
# for i in range(m):
# for j in range(n):
# if grid[i][j] == '1':
# res += 1
# grid[i][j] = '2'
# self.island(i, j, grid, m, n)
# return res
# def island(self, x, y, grid, m, n):
# if x + 1 < m and grid[x+1][y] == '1':
# grid[x+1][y] = '2'
# self.island(x+1,y,grid, m, n)
# if y + 1 < n and grid[x][y+1] == '1':
# grid[x][y+1] = '2'
# self.island(x,y+1,grid, m, n)
# if x -1 >=0 and grid[x-1][y] == '1':
# grid[x-1][y] = '2'
# self.island(x-1,y,grid, m, n)
# if y - 1 >= 0 and grid[x][y-1] == '1':
# grid[x][y-1] = '2'
# self.island(x,y-1,grid, m, n) | [] |
SamuelePilleri/plaso | tests/formatters/fseventsd.py | f5687f12a89c7309797ccc285da78e855c120579 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Tests for the fseventsd record event formatter."""
from __future__ import unicode_literals
import unittest
from plaso.formatters import fseventsd
from tests.formatters import test_lib
class FseventsdFormatterTest(test_lib.EventFormatterTestCase):
"""Tests for the fseventsd record event formatter."""
def testInitialization(self):
"""Tests the initialization."""
event_formatter = fseventsd.FSEventsdEventFormatter()
self.assertIsNotNone(event_formatter)
def testGetFormatStringAttributeNames(self):
"""Tests the GetFormatStringAttributeNames function."""
event_formatter = fseventsd.FSEventsdEventFormatter()
expected_attribute_names = [
u'event_identifier', u'flag_values', u'hex_flags', u'path']
self._TestGetFormatStringAttributeNames(
event_formatter, expected_attribute_names)
# TODO: add test for GetSources.
if __name__ == '__main__':
unittest.main()
| [((36, 2, 36, 17), 'unittest.main', 'unittest.main', ({}, {}), '()', False, 'import unittest\n'), ((19, 22, 19, 57), 'plaso.formatters.fseventsd.FSEventsdEventFormatter', 'fseventsd.FSEventsdEventFormatter', ({}, {}), '()', False, 'from plaso.formatters import fseventsd\n'), ((24, 22, 24, 57), 'plaso.formatters.fseventsd.FSEventsdEventFormatter', 'fseventsd.FSEventsdEventFormatter', ({}, {}), '()', False, 'from plaso.formatters import fseventsd\n')] |
Farzin-Negahbani/PathoNet | train.py | b467a255fb356e64129b7942261e972ae15a2d2b | from keras.callbacks import ModelCheckpoint,Callback,LearningRateScheduler,TensorBoard
from keras.models import load_model
import random
import numpy as np
from scipy import misc
import gc
from keras.optimizers import Adam
from imageio import imread
from datetime import datetime
import os
import json
import models
from utils import DataLoader, LrPolicy
from config import Config
import argparse
def get_parser():
parser = argparse.ArgumentParser('train')
parser.add_argument('--configPath', '-c', required=True)
return parser
def train(args=None):
parser = get_parser()
args = parser.parse_args(args)
conf=Config()
conf.load(args.configPath)
time=datetime.now().strftime('%Y-%m-%d_%H-%M-%S')
trainString="%s_%s_%s_%s" % (conf.model,conf.optimizer,str(conf.lr),time)
os.makedirs(conf.logPath+"/"+trainString)
conf.save(conf.logPath+"/"+trainString+'/config.json')
print('Compiling model...')
model_checkpoint = ModelCheckpoint(conf.logPath+"/"+trainString+'/Checkpoint-{epoch:02d}-{val_loss:.2f}.hdf5', monitor='val_loss', save_best_only=False, save_weights_only=True)
change_lr = LearningRateScheduler(LrPolicy(conf.lr).stepDecay)
tbCallBack=TensorBoard(log_dir=conf.logPath+"/"+trainString+'/logs', histogram_freq=0, write_graph=True, write_images=True)
model=models.modelCreator(conf.model,conf.inputShape,conf.classes,conf.pretrainedModel)
model.compile(optimizer = conf.optimizer, loss = conf.loss)
data = [conf.trainDataPath+"/"+f for f in os.listdir(conf.trainDataPath) if '.jpg' in f]
random.shuffle(data)
thr=int(len(data)*conf.validationSplit)
trainData=data[thr:]
valData=data[:thr]
trainDataLoader=DataLoader(conf.batchSize,conf.inputShape,trainData,conf.guaMaxValue)
validationDataLoader=DataLoader(conf.batchSize,conf.inputShape,valData,conf.guaMaxValue)
print('Fitting model...')
model.fit_generator(generator=trainDataLoader.generator(),
validation_data=validationDataLoader.generator(),
steps_per_epoch=len(trainData)//conf.batchSize,
validation_steps=len(valData)//conf.batchSize,
epochs=conf.epoches,
verbose=1,
initial_epoch=0,
callbacks = [model_checkpoint, change_lr,tbCallBack]
)
if __name__ == "__main__":
train()
| [((19, 13, 19, 45), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ({(19, 37, 19, 44): '"""train"""'}, {}), "('train')", False, 'import argparse\n'), ((26, 9, 26, 17), 'config.Config', 'Config', ({}, {}), '()', False, 'from config import Config\n'), ((30, 4, 30, 45), 'os.makedirs', 'os.makedirs', ({(30, 16, 30, 44): "(conf.logPath + '/' + trainString)"}, {}), "(conf.logPath + '/' + trainString)", False, 'import os\n'), ((33, 23, 33, 180), 'keras.callbacks.ModelCheckpoint', 'ModelCheckpoint', (), '', False, 'from keras.callbacks import ModelCheckpoint, Callback, LearningRateScheduler, TensorBoard\n'), ((35, 15, 35, 128), 'keras.callbacks.TensorBoard', 'TensorBoard', (), '', False, 'from keras.callbacks import ModelCheckpoint, Callback, LearningRateScheduler, TensorBoard\n'), ((36, 10, 36, 91), 'models.modelCreator', 'models.modelCreator', ({(36, 30, 36, 40): 'conf.model', (36, 41, 36, 56): 'conf.inputShape', (36, 57, 36, 69): 'conf.classes', (36, 70, 36, 90): 'conf.pretrainedModel'}, {}), '(conf.model, conf.inputShape, conf.classes, conf.\n pretrainedModel)', False, 'import models\n'), ((39, 4, 39, 24), 'random.shuffle', 'random.shuffle', ({(39, 19, 39, 23): 'data'}, {}), '(data)', False, 'import random\n'), ((43, 20, 43, 89), 'utils.DataLoader', 'DataLoader', ({(43, 31, 43, 45): 'conf.batchSize', (43, 46, 43, 61): 'conf.inputShape', (43, 62, 43, 71): 'trainData', (43, 72, 43, 88): 'conf.guaMaxValue'}, {}), '(conf.batchSize, conf.inputShape, trainData, conf.guaMaxValue)', False, 'from utils import DataLoader, LrPolicy\n'), ((44, 25, 44, 92), 'utils.DataLoader', 'DataLoader', ({(44, 36, 44, 50): 'conf.batchSize', (44, 51, 44, 66): 'conf.inputShape', (44, 67, 44, 74): 'valData', (44, 75, 44, 91): 'conf.guaMaxValue'}, {}), '(conf.batchSize, conf.inputShape, valData, conf.guaMaxValue)', False, 'from utils import DataLoader, LrPolicy\n'), ((28, 9, 28, 23), 'datetime.datetime.now', 'datetime.now', ({}, {}), '()', False, 'from datetime import datetime\n'), ((34, 38, 34, 55), 'utils.LrPolicy', 'LrPolicy', ({(34, 47, 34, 54): 'conf.lr'}, {}), '(conf.lr)', False, 'from utils import DataLoader, LrPolicy\n'), ((38, 46, 38, 76), 'os.listdir', 'os.listdir', ({(38, 57, 38, 75): 'conf.trainDataPath'}, {}), '(conf.trainDataPath)', False, 'import os\n')] |
mingxiaoh/chainer-v3 | tests/chainer_tests/functions_tests/array_tests/test_flatten.py | 815ff00f5eaf7944d6e8a75662ff64a2fe046a4d | import unittest
import numpy
import chainer
from chainer import cuda
from chainer import functions
from chainer import gradient_check
from chainer import testing
from chainer.testing import attr
@testing.parameterize(*testing.product({
'shape': [(3, 4), ()],
'dtype': [numpy.float16, numpy.float32, numpy.float64],
}))
class TestFlatten(unittest.TestCase):
dtype = numpy.float32
def setUp(self):
self.x = numpy.random.uniform(-1, 1, self.shape).astype(self.dtype)
self.g_shape = (numpy.prod((1,) + self.shape),)
self.g = numpy.random.uniform(-1, 1, self.g_shape).astype(self.dtype)
def check_forward(self, x_data):
x = chainer.Variable(x_data)
y = functions.flatten(x)
self.assertEqual(y.shape, self.g_shape)
self.assertEqual(y.dtype, self.dtype)
testing.assert_allclose(self.x.flatten(), y.data)
def test_forward_cpu(self):
self.check_forward(self.x)
@attr.gpu
def test_forward_gpu(self):
self.check_forward(cuda.to_gpu(self.x))
def check_backward(self, x_data, g_data):
gradient_check.check_backward(
functions.Flatten(), x_data, g_data, dtype=numpy.float64)
def test_backward_cpu(self):
self.check_backward(self.x, self.g)
@attr.gpu
def test_backward_gpu(self):
self.check_backward(cuda.to_gpu(self.x), cuda.to_gpu(self.g))
testing.run_module(__name__, __file__)
| [((53, 0, 53, 38), 'chainer.testing.run_module', 'testing.run_module', ({(53, 19, 53, 27): '__name__', (53, 29, 53, 37): '__file__'}, {}), '(__name__, __file__)', False, 'from chainer import testing\n'), ((27, 12, 27, 36), 'chainer.Variable', 'chainer.Variable', ({(27, 29, 27, 35): 'x_data'}, {}), '(x_data)', False, 'import chainer\n'), ((28, 12, 28, 32), 'chainer.functions.flatten', 'functions.flatten', ({(28, 30, 28, 31): 'x'}, {}), '(x)', False, 'from chainer import functions\n'), ((13, 23, 16, 2), 'chainer.testing.product', 'testing.product', ({(13, 39, 16, 1): "{'shape': [(3, 4), ()], 'dtype': [numpy.float16, numpy.float32, numpy.float64]}"}, {}), "({'shape': [(3, 4), ()], 'dtype': [numpy.float16, numpy.\n float32, numpy.float64]})", False, 'from chainer import testing\n'), ((23, 24, 23, 53), 'numpy.prod', 'numpy.prod', ({(23, 35, 23, 52): '((1,) + self.shape)'}, {}), '((1,) + self.shape)', False, 'import numpy\n'), ((39, 27, 39, 46), 'chainer.cuda.to_gpu', 'cuda.to_gpu', ({(39, 39, 39, 45): 'self.x'}, {}), '(self.x)', False, 'from chainer import cuda\n'), ((43, 12, 43, 31), 'chainer.functions.Flatten', 'functions.Flatten', ({}, {}), '()', False, 'from chainer import functions\n'), ((50, 28, 50, 47), 'chainer.cuda.to_gpu', 'cuda.to_gpu', ({(50, 40, 50, 46): 'self.x'}, {}), '(self.x)', False, 'from chainer import cuda\n'), ((50, 49, 50, 68), 'chainer.cuda.to_gpu', 'cuda.to_gpu', ({(50, 61, 50, 67): 'self.g'}, {}), '(self.g)', False, 'from chainer import cuda\n'), ((22, 17, 22, 56), 'numpy.random.uniform', 'numpy.random.uniform', ({(22, 38, 22, 40): '-1', (22, 42, 22, 43): '1', (22, 45, 22, 55): 'self.shape'}, {}), '(-1, 1, self.shape)', False, 'import numpy\n'), ((24, 17, 24, 58), 'numpy.random.uniform', 'numpy.random.uniform', ({(24, 38, 24, 40): '-1', (24, 42, 24, 43): '1', (24, 45, 24, 57): 'self.g_shape'}, {}), '(-1, 1, self.g_shape)', False, 'import numpy\n')] |
snoop2head/exercise_curation_django | categories/migrations/0001_initial.py | ba35bd32d8bc203d318cb8b6e0a1722f3aa26eda | # Generated by Django 3.0.3 on 2020-03-24 09:59
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('exercises', '0018_photo_file'),
]
operations = [
migrations.CreateModel(
name='Category',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created', models.DateTimeField(auto_now_add=True)),
('updated', models.DateTimeField(auto_now=True)),
('name', models.CharField(max_length=80)),
('description', models.TextField(blank=True)),
('exercises', models.ForeignKey(blank=True, on_delete=django.db.models.deletion.CASCADE, related_name='categories', to='exercises.Exercise')),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='Photo',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created', models.DateTimeField(auto_now_add=True)),
('updated', models.DateTimeField(auto_now=True)),
('image_url', models.URLField()),
('image_caption', models.CharField(blank=True, max_length=80)),
('category', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='photos', to='categories.Category')),
],
options={
'abstract': False,
},
),
]
| [((19, 23, 19, 112), 'django.db.models.AutoField', 'models.AutoField', (), '', False, 'from django.db import migrations, models\n'), ((20, 28, 20, 67), 'django.db.models.DateTimeField', 'models.DateTimeField', (), '', False, 'from django.db import migrations, models\n'), ((21, 28, 21, 63), 'django.db.models.DateTimeField', 'models.DateTimeField', (), '', False, 'from django.db import migrations, models\n'), ((22, 25, 22, 56), 'django.db.models.CharField', 'models.CharField', (), '', False, 'from django.db import migrations, models\n'), ((23, 32, 23, 60), 'django.db.models.TextField', 'models.TextField', (), '', False, 'from django.db import migrations, models\n'), ((24, 30, 24, 156), 'django.db.models.ForeignKey', 'models.ForeignKey', (), '', False, 'from django.db import migrations, models\n'), ((33, 23, 33, 112), 'django.db.models.AutoField', 'models.AutoField', (), '', False, 'from django.db import migrations, models\n'), ((34, 28, 34, 67), 'django.db.models.DateTimeField', 'models.DateTimeField', (), '', False, 'from django.db import migrations, models\n'), ((35, 28, 35, 63), 'django.db.models.DateTimeField', 'models.DateTimeField', (), '', False, 'from django.db import migrations, models\n'), ((36, 30, 36, 47), 'django.db.models.URLField', 'models.URLField', ({}, {}), '()', False, 'from django.db import migrations, models\n'), ((37, 34, 37, 77), 'django.db.models.CharField', 'models.CharField', (), '', False, 'from django.db import migrations, models\n'), ((38, 29, 38, 140), 'django.db.models.ForeignKey', 'models.ForeignKey', (), '', False, 'from django.db import migrations, models\n')] |
neurips2020submission11699/metarl | src/metarl/envs/dm_control/dm_control_env.py | ae4825d21478fa1fd0aa6b116941ea40caa152a5 | from dm_control import suite
from dm_control.rl.control import flatten_observation
from dm_env import StepType
import gym
import numpy as np
from metarl.envs import Step
from metarl.envs.dm_control.dm_control_viewer import DmControlViewer
class DmControlEnv(gym.Env):
"""
Binding for `dm_control <https://arxiv.org/pdf/1801.00690.pdf>`_
"""
def __init__(self, env, name=None):
self._name = name or type(env.task).__name__
self._env = env
self._viewer = None
@classmethod
def from_suite(cls, domain_name, task_name):
return cls(suite.load(domain_name, task_name),
name='{}.{}'.format(domain_name, task_name))
def step(self, action):
time_step = self._env.step(action)
return Step(
flatten_observation(time_step.observation)['observations'],
time_step.reward, time_step.step_type == StepType.LAST,
**time_step.observation)
def reset(self):
time_step = self._env.reset()
return flatten_observation(time_step.observation)['observations']
def render(self, mode='human'):
# pylint: disable=inconsistent-return-statements
if mode == 'human':
if not self._viewer:
title = 'dm_control {}'.format(self._name)
self._viewer = DmControlViewer(title=title)
self._viewer.launch(self._env)
self._viewer.render()
return None
elif mode == 'rgb_array':
return self._env.physics.render()
else:
raise NotImplementedError
def close(self):
if self._viewer:
self._viewer.close()
self._env.close()
self._viewer = None
self._env = None
def _flat_shape(self, observation):
return np.sum(int(np.prod(v.shape)) for k, v in observation.items())
@property
def action_space(self):
action_spec = self._env.action_spec()
if (len(action_spec.shape) == 1) and (-np.inf in action_spec.minimum or
np.inf in action_spec.maximum):
return gym.spaces.Discrete(np.prod(action_spec.shape))
else:
return gym.spaces.Box(action_spec.minimum,
action_spec.maximum,
dtype=np.float32)
@property
def observation_space(self):
flat_dim = self._flat_shape(self._env.observation_spec())
return gym.spaces.Box(low=-np.inf,
high=np.inf,
shape=[flat_dim],
dtype=np.float32)
def __getstate__(self):
d = self.__dict__.copy()
d['_viewer'] = None
return d
| [((75, 15, 78, 47), 'gym.spaces.Box', 'gym.spaces.Box', (), '', False, 'import gym\n'), ((23, 19, 23, 53), 'dm_control.suite.load', 'suite.load', ({(23, 30, 23, 41): 'domain_name', (23, 43, 23, 52): 'task_name'}, {}), '(domain_name, task_name)', False, 'from dm_control import suite\n'), ((35, 15, 35, 57), 'dm_control.rl.control.flatten_observation', 'flatten_observation', ({(35, 35, 35, 56): 'time_step.observation'}, {}), '(time_step.observation)', False, 'from dm_control.rl.control import flatten_observation\n'), ((68, 19, 70, 51), 'gym.spaces.Box', 'gym.spaces.Box', (), '', False, 'import gym\n'), ((29, 12, 29, 54), 'dm_control.rl.control.flatten_observation', 'flatten_observation', ({(29, 32, 29, 53): 'time_step.observation'}, {}), '(time_step.observation)', False, 'from dm_control.rl.control import flatten_observation\n'), ((42, 31, 42, 59), 'metarl.envs.dm_control.dm_control_viewer.DmControlViewer', 'DmControlViewer', (), '', False, 'from metarl.envs.dm_control.dm_control_viewer import DmControlViewer\n'), ((66, 39, 66, 65), 'numpy.prod', 'np.prod', ({(66, 47, 66, 64): 'action_spec.shape'}, {}), '(action_spec.shape)', True, 'import numpy as np\n'), ((59, 26, 59, 42), 'numpy.prod', 'np.prod', ({(59, 34, 59, 41): 'v.shape'}, {}), '(v.shape)', True, 'import numpy as np\n')] |
vatervonacht/dagster | python_modules/lakehouse/lakehouse/snowflake_table.py | 595d78c883ef20618052ac1575fe46cde51fd541 | from dagster import check
from .house import Lakehouse
from .table import create_lakehouse_table_def
class SnowflakeLakehouse(Lakehouse):
def __init__(self):
pass
def hydrate(self, _context, _table_type, _table_metadata, table_handle, _dest_metadata):
return None
def materialize(self, context, table_type, table_metadata, value):
return None, None
def snowflake_table(
name=None,
input_tables=None,
other_input_defs=None,
tags=None,
required_resource_keys=None,
description=None,
):
tags = check.opt_dict_param(tags, 'tags')
tags['lakehouse_type'] = 'snowflake_table'
tags['kind'] = 'snowflake'
required_resource_keys = check.opt_set_param(required_resource_keys, 'required_resource_keys')
required_resource_keys.add('snowflake')
if callable(name):
fn = name
return create_lakehouse_table_def(
name=fn.__name__,
lakehouse_fn=fn,
input_tables=[],
required_resource_keys=required_resource_keys,
)
def _wrap(fn):
return create_lakehouse_table_def(
name=name if name is not None else fn.__name__,
lakehouse_fn=fn,
input_tables=input_tables,
other_input_defs=other_input_defs,
tags=tags,
description=description,
required_resource_keys=required_resource_keys,
)
return _wrap
| [((26, 11, 26, 45), 'dagster.check.opt_dict_param', 'check.opt_dict_param', ({(26, 32, 26, 36): 'tags', (26, 38, 26, 44): '"""tags"""'}, {}), "(tags, 'tags')", False, 'from dagster import check\n'), ((30, 29, 30, 98), 'dagster.check.opt_set_param', 'check.opt_set_param', ({(30, 49, 30, 71): 'required_resource_keys', (30, 73, 30, 97): '"""required_resource_keys"""'}, {}), "(required_resource_keys, 'required_resource_keys')", False, 'from dagster import check\n')] |
tokejepsen/pype | pype/plugins/maya/publish/validate_look_no_default_shaders.py | 8f2b2b631cc5d3ad93eeb5ad3bc6110d32466ed3 | from maya import cmds
import pyblish.api
import pype.api
import pype.maya.action
class ValidateLookNoDefaultShaders(pyblish.api.InstancePlugin):
"""Validate if any node has a connection to a default shader.
This checks whether the look has any members of:
- lambert1
- initialShadingGroup
- initialParticleSE
- particleCloud1
If any of those is present it will raise an error. A look is not allowed
to have any of the "default" shaders present in a scene as they can
introduce problems when referenced (overriding local scene shaders).
To fix this no shape nodes in the look must have any of default shaders
applied.
"""
order = pype.api.ValidateContentsOrder + 0.01
families = ['look']
hosts = ['maya']
label = 'Look No Default Shaders'
actions = [pype.maya.action.SelectInvalidAction]
DEFAULT_SHADERS = {"lambert1", "initialShadingGroup",
"initialParticleSE", "particleCloud1"}
def process(self, instance):
"""Process all the nodes in the instance"""
invalid = self.get_invalid(instance)
if invalid:
raise RuntimeError("Invalid node relationships found: "
"{0}".format(invalid))
@classmethod
def get_invalid(cls, instance):
invalid = set()
for node in instance:
# Get shading engine connections
shaders = cmds.listConnections(node, type="shadingEngine") or []
# Check for any disallowed connections on *all* nodes
if any(s in cls.DEFAULT_SHADERS for s in shaders):
# Explicitly log each individual "wrong" connection.
for s in shaders:
if s in cls.DEFAULT_SHADERS:
cls.log.error("Node has unallowed connection to "
"'{}': {}".format(s, node))
invalid.add(node)
return list(invalid)
| [((49, 22, 49, 70), 'maya.cmds.listConnections', 'cmds.listConnections', (), '', False, 'from maya import cmds\n')] |
Johne-DuChene/data_science_learning_app | data_science_app/app.py | 40bafce85a27155766950806b5b32a2d1f6753c4 | from flask import Flask
# initialize the app
app = Flask(__name__)
# execute iris function at /iris route
@app.route("/iris")
def iris():
from sklearn.datasets import load_iris
from sklearn.linear_model import LogisticRegression
X, y = load_iris(return_X_y=True)
clf = LogisticRegression(
random_state = 42,
solver="lbfgs",
multi_class="multinomial"
).fit(X, y)
return str(clf.predict(X[:2, :])) | [((4, 6, 4, 21), 'flask.Flask', 'Flask', ({(4, 12, 4, 20): '__name__'}, {}), '(__name__)', False, 'from flask import Flask\n'), ((11, 11, 11, 37), 'sklearn.datasets.load_iris', 'load_iris', (), '', False, 'from sklearn.datasets import load_iris\n'), ((12, 10, 16, 5), 'sklearn.linear_model.LogisticRegression', 'LogisticRegression', (), '', False, 'from sklearn.linear_model import LogisticRegression\n')] |
VarunSrivastava19/VBDiarization | vbdiar/scoring/normalization.py | 2a460b4fc11b3a5ff73d0534cadb182be1a9d882 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2018 Brno University of Technology FIT
# Author: Jan Profant <[email protected]>
# All Rights Reserved
import os
import logging
import pickle
import multiprocessing
import numpy as np
from sklearn.metrics.pairwise import cosine_similarity
from vbdiar.features.segments import get_frames_from_time
from vbdiar.embeddings.embedding import extract_embeddings
from vbdiar.utils import mkdir_p
from vbdiar.utils.utils import Utils
logger = logging.getLogger(__name__)
def process_files(fns, speakers_dict, features_extractor, embedding_extractor,
audio_dir, wav_suffix, in_rttm_dir, rttm_suffix, min_length, n_jobs=1):
"""
Args:
fns:
speakers_dict:
features_extractor:
embedding_extractor:
audio_dir:
wav_suffix:
in_rttm_dir:
rttm_suffix:
min_length:
n_jobs:
Returns:
"""
kwargs = dict(speakers_dict=speakers_dict, features_extractor=features_extractor,
embedding_extractor=embedding_extractor, audio_dir=audio_dir, wav_suffix=wav_suffix,
in_rttm_dir=in_rttm_dir, rttm_suffix=rttm_suffix, min_length=min_length)
if n_jobs == 1:
ret = _process_files((fns, kwargs))
else:
pool = multiprocessing.Pool(n_jobs)
ret = pool.map(_process_files, ((part, kwargs) for part in Utils.partition(fns, n_jobs)))
return ret
def _process_files(dargs):
"""
Args:
dargs:
Returns:
"""
fns, kwargs = dargs
ret = []
for fn in fns:
ret.append(process_file(file_name=fn, **kwargs))
return ret
def process_file(file_name, speakers_dict, features_extractor, embedding_extractor,
audio_dir, wav_suffix, in_rttm_dir, rttm_suffix, min_length):
""" Extract embeddings for all defined speakers.
Args:
file_name (string_types): path to input audio file
speakers_dict (dict): dictionary containing all embedding across speakers
features_extractor (Any):
embedding_extractor (Any):
audio_dir (string_types):
wav_suffix (string_types):
in_rttm_dir (string_types):
rttm_suffix (string_types):
min_length (float):
Returns:
dict: updated dictionary with speakers
"""
logger.info('Processing file `{}`.'.format(file_name.split()[0]))
# extract features from whole audio
features = features_extractor.audio2features(os.path.join(audio_dir, '{}{}'.format(file_name, wav_suffix)))
# process utterances of the speakers
features_dict = {}
with open(f'{os.path.join(in_rttm_dir, file_name)}{rttm_suffix}') as f:
for line in f:
start_time, dur = int(float(line.split()[3]) * 1000), int(float(line.split()[4]) * 1000)
speaker = line.split()[7]
if dur > min_length:
end_time = start_time + dur
start, end = get_frames_from_time(int(start_time)), get_frames_from_time(int(end_time))
if speaker not in features_dict:
features_dict[speaker] = {}
assert 0 <= start < end, \
f'Incorrect timing for extracting features, start: {start}, size: {features.shape[0]}, end: {end}.'
if end >= features.shape[0]:
end = features.shape[0] - 1
features_dict[speaker][(start_time, end_time)] = features[start:end]
for speaker in features_dict:
embedding_set = extract_embeddings(features_dict[speaker], embedding_extractor)
embeddings_long = embedding_set.get_all_embeddings()
if speaker not in speakers_dict.keys():
speakers_dict[speaker] = embeddings_long
else:
speakers_dict[speaker] = np.concatenate((speakers_dict[speaker], embeddings_long), axis=0)
return speakers_dict
class Normalization(object):
""" Speaker normalization S-Norm. """
embeddings = None
in_emb_dir = None
def __init__(self, norm_list, audio_dir=None, in_rttm_dir=None, in_emb_dir=None,
out_emb_dir=None, min_length=None, features_extractor=None, embedding_extractor=None,
plda=None, wav_suffix='.wav', rttm_suffix='.rttm', n_jobs=1):
""" Initialize normalization object.
Args:
norm_list (string_types): path to normalization list
audio_dir (string_types|None): path to audio directory
in_rttm_dir (string_types|None): path to directory with rttm files
in_emb_dir (str|None): path to directory with i-vectors
out_emb_dir (str|None): path to directory for storing embeddings
min_length (int): minimal length for extracting embeddings
features_extractor (Any): object for feature extraction
embedding_extractor (Any): object for extracting embedding
plda (PLDA|None): plda model object
wav_suffix (string_types): suffix of wav files
rttm_suffix (string_types): suffix of rttm files
"""
if audio_dir:
self.audio_dir = os.path.abspath(audio_dir)
self.norm_list = norm_list
if in_rttm_dir:
self.in_rttm_dir = os.path.abspath(in_rttm_dir)
else:
raise ValueError('It is required to have input rttm files for normalization.')
self.features_extractor = features_extractor
self.embedding_extractor = embedding_extractor
self.plda = plda
self.wav_suffix = wav_suffix
self.rttm_suffix = rttm_suffix
if in_emb_dir:
self.in_emb_dir = os.path.abspath(in_emb_dir)
if out_emb_dir:
self.out_emb_dir = os.path.abspath(out_emb_dir)
self.min_length = min_length
self.n_jobs = n_jobs
if self.in_emb_dir is None:
self.embeddings = self.extract_embeddings()
else:
self.embeddings = self.load_embeddings()
self.mean = np.mean(self.embeddings, axis=0)
def __iter__(self):
current = 0
while current < len(self.embeddings):
yield self.embeddings[current]
current += 1
def __getitem__(self, key):
return self.embeddings[key]
def __setitem__(self, key, value):
self.embeddings[key] = value
def __len__(self):
return len(self.embeddings)
def extract_embeddings(self):
""" Extract normalization embeddings using averaging.
Returns:
Tuple[np.array, np.array]: vectors for individual speakers, global mean over all speakers
"""
speakers_dict, fns = {}, []
with open(self.norm_list) as f:
for line in f:
if len(line.split()) > 1: # number of speakers is defined
line = line.split()[0]
else:
line = line.replace(os.linesep, '')
fns.append(line)
speakers_dict = process_files(fns, speakers_dict=speakers_dict, features_extractor=self.features_extractor,
embedding_extractor=self.embedding_extractor, audio_dir=self.audio_dir,
wav_suffix=self.wav_suffix, in_rttm_dir=self.in_rttm_dir,
rttm_suffix=self.rttm_suffix, min_length=self.min_length, n_jobs=self.n_jobs)
assert len(speakers_dict) == len(fns)
# all are the same
merged_speakers_dict = speakers_dict[0]
if self.out_emb_dir:
for speaker in merged_speakers_dict:
out_path = os.path.join(self.out_emb_dir, f'{speaker}.pkl')
mkdir_p(os.path.dirname(out_path))
with open(out_path, 'wb') as f:
pickle.dump(merged_speakers_dict[speaker], f, pickle.HIGHEST_PROTOCOL)
for speaker in merged_speakers_dict:
merged_speakers_dict[speaker] = np.mean(merged_speakers_dict[speaker], axis=0)
return np.array(list(merged_speakers_dict.values()))
def load_embeddings(self):
""" Load normalization embeddings from pickle files.
Returns:
np.array: embeddings per speaker
"""
embeddings, speakers = [], set()
with open(self.norm_list) as f:
for file_name in f:
if len(file_name.split()) > 1: # number of speakers is defined
file_name = file_name.split()[0]
else:
file_name = file_name.replace(os.linesep, '')
with open('{}{}'.format(os.path.join(self.in_rttm_dir, file_name), self.rttm_suffix)) as fp:
for line in fp:
speakers.add(line.split()[7])
logger.info('Loading pickled normalization embeddings from `{}`.'.format(self.in_emb_dir))
for speaker in speakers:
embedding_path = os.path.join(self.in_emb_dir, '{}.pkl'.format(speaker))
if os.path.isfile(embedding_path):
logger.info('Loading normalization pickle file `{}`.'.format(speaker))
with open(embedding_path, 'rb') as f:
# append mean from speaker's embeddings
speaker_embeddings = pickle.load(f)
embeddings.append(np.mean(speaker_embeddings, axis=0))
else:
logger.warning('No pickle file found for `{}` in `{}`.'.format(speaker, self.in_emb_dir))
return np.array(embeddings)
def s_norm(self, test, enroll):
""" Run speaker normalization (S-Norm) on cached embeddings.
Args:
test (np.array): test embedding
enroll (np.array): enroll embedding
Returns:
float: hypothesis
"""
if self.plda:
a = self.plda.score(test, self.embeddings).T
b = self.plda.score(enroll, self.embeddings).T
c = self.plda.score(enroll, test).T
else:
a = cosine_similarity(test, self.embeddings).T
b = cosine_similarity(enroll, self.embeddings).T
c = cosine_similarity(enroll, test).T
scores = []
for ii in range(test.shape[0]):
test_scores = []
for jj in range(enroll.shape[0]):
test_mean, test_std = np.mean(a.T[ii]), np.std(a.T[ii])
enroll_mean, enroll_std = np.mean(b.T[jj]), np.std(b.T[jj])
s = c[ii][jj]
test_scores.append((((s - test_mean) / test_std + (s - enroll_mean) / enroll_std) / 2))
scores.append(test_scores)
return np.array(scores)
| [((21, 9, 21, 36), 'logging.getLogger', 'logging.getLogger', ({(21, 27, 21, 35): '__name__'}, {}), '(__name__)', False, 'import logging\n'), ((49, 15, 49, 43), 'multiprocessing.Pool', 'multiprocessing.Pool', ({(49, 36, 49, 42): 'n_jobs'}, {}), '(n_jobs)', False, 'import multiprocessing\n'), ((110, 24, 110, 87), 'vbdiar.embeddings.embedding.extract_embeddings', 'extract_embeddings', ({(110, 43, 110, 65): 'features_dict[speaker]', (110, 67, 110, 86): 'embedding_extractor'}, {}), '(features_dict[speaker], embedding_extractor)', False, 'from vbdiar.embeddings.embedding import extract_embeddings\n'), ((164, 20, 164, 52), 'numpy.mean', 'np.mean', (), '', True, 'import numpy as np\n'), ((244, 15, 244, 35), 'numpy.array', 'np.array', ({(244, 24, 244, 34): 'embeddings'}, {}), '(embeddings)', True, 'import numpy as np\n'), ((273, 15, 273, 31), 'numpy.array', 'np.array', ({(273, 24, 273, 30): 'scores'}, {}), '(scores)', True, 'import numpy as np\n'), ((115, 37, 115, 102), 'numpy.concatenate', 'np.concatenate', (), '', True, 'import numpy as np\n'), ((143, 29, 143, 55), 'os.path.abspath', 'os.path.abspath', ({(143, 45, 143, 54): 'audio_dir'}, {}), '(audio_dir)', False, 'import os\n'), ((146, 31, 146, 59), 'os.path.abspath', 'os.path.abspath', ({(146, 47, 146, 58): 'in_rttm_dir'}, {}), '(in_rttm_dir)', False, 'import os\n'), ((155, 30, 155, 57), 'os.path.abspath', 'os.path.abspath', ({(155, 46, 155, 56): 'in_emb_dir'}, {}), '(in_emb_dir)', False, 'import os\n'), ((157, 31, 157, 59), 'os.path.abspath', 'os.path.abspath', ({(157, 47, 157, 58): 'out_emb_dir'}, {}), '(out_emb_dir)', False, 'import os\n'), ((212, 44, 212, 90), 'numpy.mean', 'np.mean', (), '', True, 'import numpy as np\n'), ((236, 15, 236, 45), 'os.path.isfile', 'os.path.isfile', ({(236, 30, 236, 44): 'embedding_path'}, {}), '(embedding_path)', False, 'import os\n'), ((206, 27, 206, 75), 'os.path.join', 'os.path.join', ({(206, 40, 206, 56): 'self.out_emb_dir', (206, 58, 206, 74): 'f"""{speaker}.pkl"""'}, {}), "(self.out_emb_dir, f'{speaker}.pkl')", False, 'import os\n'), ((261, 16, 261, 56), 'sklearn.metrics.pairwise.cosine_similarity', 'cosine_similarity', ({(261, 34, 261, 38): 'test', (261, 40, 261, 55): 'self.embeddings'}, {}), '(test, self.embeddings)', False, 'from sklearn.metrics.pairwise import cosine_similarity\n'), ((262, 16, 262, 58), 'sklearn.metrics.pairwise.cosine_similarity', 'cosine_similarity', ({(262, 34, 262, 40): 'enroll', (262, 42, 262, 57): 'self.embeddings'}, {}), '(enroll, self.embeddings)', False, 'from sklearn.metrics.pairwise import cosine_similarity\n'), ((263, 16, 263, 47), 'sklearn.metrics.pairwise.cosine_similarity', 'cosine_similarity', ({(263, 34, 263, 40): 'enroll', (263, 42, 263, 46): 'test'}, {}), '(enroll, test)', False, 'from sklearn.metrics.pairwise import cosine_similarity\n'), ((50, 67, 50, 95), 'vbdiar.utils.utils.Utils.partition', 'Utils.partition', ({(50, 83, 50, 86): 'fns', (50, 88, 50, 94): 'n_jobs'}, {}), '(fns, n_jobs)', False, 'from vbdiar.utils.utils import Utils\n'), ((94, 17, 94, 53), 'os.path.join', 'os.path.join', ({(94, 30, 94, 41): 'in_rttm_dir', (94, 43, 94, 52): 'file_name'}, {}), '(in_rttm_dir, file_name)', False, 'import os\n'), ((207, 24, 207, 49), 'os.path.dirname', 'os.path.dirname', ({(207, 40, 207, 48): 'out_path'}, {}), '(out_path)', False, 'import os\n'), ((209, 20, 209, 90), 'pickle.dump', 'pickle.dump', ({(209, 32, 209, 61): 'merged_speakers_dict[speaker]', (209, 63, 209, 64): 'f', (209, 66, 209, 89): 'pickle.HIGHEST_PROTOCOL'}, {}), '(merged_speakers_dict[speaker], f, pickle.HIGHEST_PROTOCOL)', False, 'import pickle\n'), ((240, 41, 240, 55), 'pickle.load', 'pickle.load', ({(240, 53, 240, 54): 'f'}, {}), '(f)', False, 'import pickle\n'), ((268, 38, 268, 54), 'numpy.mean', 'np.mean', ({(268, 46, 268, 53): 'a.T[ii]'}, {}), '(a.T[ii])', True, 'import numpy as np\n'), ((268, 56, 268, 71), 'numpy.std', 'np.std', ({(268, 63, 268, 70): 'a.T[ii]'}, {}), '(a.T[ii])', True, 'import numpy as np\n'), ((269, 42, 269, 58), 'numpy.mean', 'np.mean', ({(269, 50, 269, 57): 'b.T[jj]'}, {}), '(b.T[jj])', True, 'import numpy as np\n'), ((269, 60, 269, 75), 'numpy.std', 'np.std', ({(269, 67, 269, 74): 'b.T[jj]'}, {}), '(b.T[jj])', True, 'import numpy as np\n'), ((241, 38, 241, 73), 'numpy.mean', 'np.mean', (), '', True, 'import numpy as np\n'), ((229, 40, 229, 81), 'os.path.join', 'os.path.join', ({(229, 53, 229, 69): 'self.in_rttm_dir', (229, 71, 229, 80): 'file_name'}, {}), '(self.in_rttm_dir, file_name)', False, 'import os\n')] |
mattsmart/biomodels | agent_based_models/abm_allelopathy/plot_data.py | 237f87489553fa1ebf5c676fab563166dd0c39e9 | import matplotlib.pyplot as plt
import os
def data_plotter(lattice_dict, datafile_dir, plot_dir):
# total spaces on grid implies grid size
total_cells = lattice_dict['E'][0] + lattice_dict['D_a'][0] + lattice_dict['D_b'][0] + lattice_dict['B'][0]
n = int(total_cells**0.5)
plt.figure(1)
plt.plot(lattice_dict['time'], lattice_dict['E'], label='Empty lattice points')
plt.plot(lattice_dict['time'], lattice_dict['D_a'], label='Donors (Type A)')
plt.plot(lattice_dict['time'], lattice_dict['D_b'], label='Donors (Type B)')
plt.plot(lattice_dict['time'], lattice_dict['B'], label='Debris')
ax = plt.gca()
ax.set_title('Cell Populations over time (n = %d)' % n)
ax.set_ylabel('Number of cells')
ax.set_xlabel('Time (h)')
plt.legend()
f = plt.gcf()
f.set_size_inches(20.0, 8.0) # alternative: 20.0, 8.0
f.tight_layout()
plt.savefig(os.path.join(plot_dir, 'population_vs_time.png'))
plt.clf()
return
| [((11, 4, 11, 17), 'matplotlib.pyplot.figure', 'plt.figure', ({(11, 15, 11, 16): '(1)'}, {}), '(1)', True, 'import matplotlib.pyplot as plt\n'), ((13, 4, 13, 83), 'matplotlib.pyplot.plot', 'plt.plot', (), '', True, 'import matplotlib.pyplot as plt\n'), ((14, 4, 14, 80), 'matplotlib.pyplot.plot', 'plt.plot', (), '', True, 'import matplotlib.pyplot as plt\n'), ((15, 4, 15, 80), 'matplotlib.pyplot.plot', 'plt.plot', (), '', True, 'import matplotlib.pyplot as plt\n'), ((16, 4, 16, 69), 'matplotlib.pyplot.plot', 'plt.plot', (), '', True, 'import matplotlib.pyplot as plt\n'), ((18, 9, 18, 18), 'matplotlib.pyplot.gca', 'plt.gca', ({}, {}), '()', True, 'import matplotlib.pyplot as plt\n'), ((23, 4, 23, 16), 'matplotlib.pyplot.legend', 'plt.legend', ({}, {}), '()', True, 'import matplotlib.pyplot as plt\n'), ((25, 8, 25, 17), 'matplotlib.pyplot.gcf', 'plt.gcf', ({}, {}), '()', True, 'import matplotlib.pyplot as plt\n'), ((29, 4, 29, 13), 'matplotlib.pyplot.clf', 'plt.clf', ({}, {}), '()', True, 'import matplotlib.pyplot as plt\n'), ((28, 16, 28, 64), 'os.path.join', 'os.path.join', ({(28, 29, 28, 37): 'plot_dir', (28, 39, 28, 63): '"""population_vs_time.png"""'}, {}), "(plot_dir, 'population_vs_time.png')", False, 'import os\n')] |
JonathanGailliez/azure-sdk-for-python | azure-mgmt-network/azure/mgmt/network/v2018_10_01/models/virtual_wan_security_providers.py | f0f051bfd27f8ea512aea6fc0c3212ee9ee0029b | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class VirtualWanSecurityProviders(Model):
"""Collection of SecurityProviders.
:param supported_providers:
:type supported_providers:
list[~azure.mgmt.network.v2018_10_01.models.VirtualWanSecurityProvider]
"""
_attribute_map = {
'supported_providers': {'key': 'supportedProviders', 'type': '[VirtualWanSecurityProvider]'},
}
def __init__(self, **kwargs):
super(VirtualWanSecurityProviders, self).__init__(**kwargs)
self.supported_providers = kwargs.get('supported_providers', None)
| [] |
flowgunso/jsonresume-theme-stackoverflow | jsonresume_theme_stackoverflow/filters.py | 5fcadcf41a93478a09e95d79fd62d8ac3402b33b | import datetime
import re
from .exceptions import ObjectIsNotADate
def format_date(value, format="%d %M %Y"):
regex = re.match(r"(?P<year>\d{4})-(?P<month>\d{2})-(?P<day>\d{2})", value)
if regex is not None:
date = datetime.date(
int(regex.group("year")),
int(regex.group("month")),
int(regex.group("day")))
else:
raise ObjectIsNotADate
return date.strftime(format)
| [((8, 12, 8, 79), 're.match', 're.match', ({(8, 21, 8, 71): '"""(?P<year>\\\\d{4})-(?P<month>\\\\d{2})-(?P<day>\\\\d{2})"""', (8, 73, 8, 78): 'value'}, {}), "('(?P<year>\\\\d{4})-(?P<month>\\\\d{2})-(?P<day>\\\\d{2})', value)", False, 'import re\n')] |
wwwbbb8510/ippso | ipec/data/core.py | fa20d23cd8edba5908e65a0ab0ab990d7ce3d5d5 | import numpy as np
import os
import logging
from sklearn.model_selection import train_test_split
DATASET_ROOT_FOLDER = os.path.abspath('datasets')
class DataLoader:
train = None
validation = None
test = None
mode = None
partial_dataset = None
@staticmethod
def load(train_path=None, validation_path=None, test_path=None, height=28, length=28, train_validation_split_point=10000):
if train_path is not None:
DataLoader.train = DataLoader.load_image_data_with_label_at_end(
os.path.join(DATASET_ROOT_FOLDER, train_path), height=height, length=length)
if validation_path is not None:
DataLoader.validation = DataLoader.load_image_data_with_label_at_end(
os.path.join(DATASET_ROOT_FOLDER, validation_path), height=height, length=length)
elif train_validation_split_point is not None and train_validation_split_point > 0:
if DataLoader.mode is None or DataLoader.partial_dataset is not None:
train_validation_split_point = int(DataLoader.train['images'].shape[0] * 0.8)
splited_train = {
'images': DataLoader.train['images'][0:train_validation_split_point, :, :, :],
'labels': DataLoader.train['labels'][0:train_validation_split_point]
}
splited_validation = {
'images': DataLoader.train['images'][train_validation_split_point:, :, :, :],
'labels': DataLoader.train['labels'][train_validation_split_point:]
}
DataLoader.train = splited_train
DataLoader.validation = splited_validation
if test_path is not None:
DataLoader.test = DataLoader.load_image_data_with_label_at_end(os.path.join(DATASET_ROOT_FOLDER, test_path), height=height, length=length)
logging.debug('Training data shape:{}'.format(str(DataLoader.train['images'].shape)))
logging.debug('Validation data shape:{}'.format(str(DataLoader.validation['images'].shape)))
logging.debug('Test data shape:{}'.format(str(DataLoader.test['images'].shape)))
return DataLoader
@staticmethod
def get_training_data():
"""
get training data
:return: dict of (images, labels)
:rtype: dict
"""
images = DataLoader.train.images
labels = DataLoader.train.labels
return {
'images': images,
'labels': labels
}
@staticmethod
def get_validation_data():
"""
get validation data
:return: dict of (images, labels)
:rtype: dict
"""
images = DataLoader.validation.images
labels = DataLoader.validation.labels
return {
'images': images,
'labels': labels
}
@staticmethod
def get_test_data():
"""
get test data
:return: dict of (images, labels)
:rtype: dict
"""
images = DataLoader.test.images
labels = DataLoader.test.labels
return {
'images': images,
'labels': labels
}
@staticmethod
def load_image_data_with_label_at_end(path, height, length):
data = np.loadtxt(path)
if DataLoader.mode is None:
data = data[0:1000, :]
elif DataLoader.partial_dataset is not None and DataLoader.partial_dataset > 0 and DataLoader.partial_dataset <1:
# randomly pick partial dataset
cut_point = int(data.shape[0] * DataLoader.partial_dataset)
indices = np.random.permutation(data.shape[0])
training_idx= indices[:cut_point]
data = data[training_idx, :]
images = data[:, 0:-1]
labels = data[:, -1]
images = np.reshape(images, [images.shape[0], height, length, 1], order='F')
return {
'images': images,
'labels': labels
}
| [((6, 22, 6, 49), 'os.path.abspath', 'os.path.abspath', ({(6, 38, 6, 48): '"""datasets"""'}, {}), "('datasets')", False, 'import os\n'), ((92, 15, 92, 31), 'numpy.loadtxt', 'np.loadtxt', ({(92, 26, 92, 30): 'path'}, {}), '(path)', True, 'import numpy as np\n'), ((103, 17, 103, 84), 'numpy.reshape', 'np.reshape', (), '', True, 'import numpy as np\n'), ((20, 16, 20, 61), 'os.path.join', 'os.path.join', ({(20, 29, 20, 48): 'DATASET_ROOT_FOLDER', (20, 50, 20, 60): 'train_path'}, {}), '(DATASET_ROOT_FOLDER, train_path)', False, 'import os\n'), ((23, 16, 23, 66), 'os.path.join', 'os.path.join', ({(23, 29, 23, 48): 'DATASET_ROOT_FOLDER', (23, 50, 23, 65): 'validation_path'}, {}), '(DATASET_ROOT_FOLDER, validation_path)', False, 'import os\n'), ((38, 75, 38, 119), 'os.path.join', 'os.path.join', ({(38, 88, 38, 107): 'DATASET_ROOT_FOLDER', (38, 109, 38, 118): 'test_path'}, {}), '(DATASET_ROOT_FOLDER, test_path)', False, 'import os\n'), ((98, 22, 98, 58), 'numpy.random.permutation', 'np.random.permutation', ({(98, 44, 98, 57): 'data.shape[0]'}, {}), '(data.shape[0])', True, 'import numpy as np\n')] |
lucasf5/Python | FOR/Analisador-completo/main.py | c5649121e2af42922e2d9c19cec98322e132bdab | # Exercício Python 56: Desenvolva um programa que leia o nome, idade e sexo de 4 pessoas. No final do programa, mostre: a média de idade do grupo, qual é o nome do homem mais velho e quantas mulheres têm menos de 20 anos.
mediaidade = ''
nomelista = []
idadelista = []
sexolista = []
homens = []
mulherescommenosde20 = 0
nomedelas = []
# -------------------------------------------------------------------
for i in range(1,5):
print(f'{i} PESSOA')
nome = (input('Seu nome: '))
idade = int(input('Sua idade: '))
sexo = int(input('Sexo? [0]Masculino [1]Feminino: '))
if sexo == 1 and idade < 20:
nomedelas.append(nome)
mulherescommenosde20 += 1
elif sexo == 0:
homens.append(nome)
# Adcionei todas idades em uma lista
idadelista.append(idade)
# Tirei a média dessas idades //Primeira parte
mediaidade = ((sum(idadelista))/4)
# Adcionei todos os nomes em uma lista
nomelista.append(nome)
# -------------------------------------------------------------------
# Armazenei em maximo o maior valor encontrado dentro de uma lista
maximo = max(idadelista)
# Armazenei em idadexidade o INDEX do maior valor
indexidade = idadelista.index(maximo)
# Armazenei em indexnome a posição de quem tem a maior idade
indexnome = nomelista[indexidade]
# -------------------------------------------------------------------
print(f'A media das idades é: {mediaidade}')
print(f'A pessoa que tem a maior idade, com {maximo} é essa: {indexnome}')
print(f'As mulheres que possuem menos de 20 anos: {mulherescommenosde20} e são: {nomedelas}')
| [] |
EnriqueL8/qiskit-terra | test/python/quantum_info/operators/test_operator.py | 08b801f1f8598c4e44680b4a75c232ed92db0262 | # -*- coding: utf-8 -*-
# This code is part of Qiskit.
#
# (C) Copyright IBM 2017, 2019.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
# pylint: disable=invalid-name
"""Tests for Operator matrix linear operator class."""
import unittest
import logging
import copy
import numpy as np
from numpy.testing import assert_allclose
import scipy.linalg as la
from qiskit import QiskitError
from qiskit import QuantumRegister, ClassicalRegister, QuantumCircuit
from qiskit.extensions.standard import HGate, CHGate, CXGate
from qiskit.test import QiskitTestCase
from qiskit.quantum_info.operators.operator import Operator
from qiskit.quantum_info.operators.predicates import matrix_equal
logger = logging.getLogger(__name__)
class OperatorTestCase(QiskitTestCase):
"""Test utils for Operator"""
# Pauli-matrix unitaries
UI = np.eye(2)
UX = np.array([[0, 1], [1, 0]])
UY = np.array([[0, -1j], [1j, 0]])
UZ = np.diag([1, -1])
UH = np.array([[1, 1], [1, -1]]) / np.sqrt(2)
@classmethod
def rand_rho(cls, n):
"""Return random density matrix"""
seed = np.random.randint(0, np.iinfo(np.int32).max)
logger.debug("rand_rho RandomState seeded with seed=%s", seed)
rng = np.random.RandomState(seed)
psi = rng.rand(n) + 1j * rng.rand(n)
rho = np.outer(psi, psi.conj())
rho /= np.trace(rho)
return rho
@classmethod
def rand_matrix(cls, rows, cols=None, real=False):
"""Return a random matrix."""
seed = np.random.randint(0, np.iinfo(np.int32).max)
logger.debug("rand_matrix RandomState seeded with seed=%s", seed)
rng = np.random.RandomState(seed)
if cols is None:
cols = rows
if real:
return rng.rand(rows, cols)
return rng.rand(rows, cols) + 1j * rng.rand(rows, cols)
def simple_circuit_no_measure(self):
"""Return a unitary circuit and the corresponding unitary array."""
qr = QuantumRegister(3)
circ = QuantumCircuit(qr)
circ.h(qr[0])
circ.x(qr[1])
circ.ry(np.pi / 2, qr[2])
y90 = (1 / np.sqrt(2)) * np.array([[1, -1], [1, 1]])
target = Operator(np.kron(y90, np.kron(self.UX, self.UH)))
return circ, target
def simple_circuit_with_measure(self):
"""Return a unitary circuit with measurement."""
qr = QuantumRegister(2)
cr = ClassicalRegister(2)
circ = QuantumCircuit(qr, cr)
circ.h(qr[0])
circ.x(qr[1])
circ.measure(qr, cr)
return circ
class TestOperator(OperatorTestCase):
"""Tests for Operator linear operator class."""
def test_init_array_qubit(self):
"""Test subsystem initialization from N-qubit array."""
# Test automatic inference of qubit subsystems
mat = self.rand_matrix(8, 8)
op = Operator(mat)
assert_allclose(op.data, mat)
self.assertEqual(op.dim, (8, 8))
self.assertEqual(op.input_dims(), (2, 2, 2))
self.assertEqual(op.output_dims(), (2, 2, 2))
op = Operator(mat, input_dims=8, output_dims=8)
assert_allclose(op.data, mat)
self.assertEqual(op.dim, (8, 8))
self.assertEqual(op.input_dims(), (2, 2, 2))
self.assertEqual(op.output_dims(), (2, 2, 2))
def test_init_array(self):
"""Test initialization from array."""
mat = np.eye(3)
op = Operator(mat)
assert_allclose(op.data, mat)
self.assertEqual(op.dim, (3, 3))
self.assertEqual(op.input_dims(), (3,))
self.assertEqual(op.output_dims(), (3,))
mat = self.rand_matrix(2 * 3 * 4, 4 * 5)
op = Operator(mat, input_dims=[4, 5], output_dims=[2, 3, 4])
assert_allclose(op.data, mat)
self.assertEqual(op.dim, (4 * 5, 2 * 3 * 4))
self.assertEqual(op.input_dims(), (4, 5))
self.assertEqual(op.output_dims(), (2, 3, 4))
def test_init_array_except(self):
"""Test initialization exception from array."""
mat = self.rand_matrix(4, 4)
self.assertRaises(QiskitError, Operator, mat, input_dims=[4, 2])
self.assertRaises(QiskitError, Operator, mat, input_dims=[2, 4])
self.assertRaises(QiskitError, Operator, mat, input_dims=5)
def test_init_operator(self):
"""Test initialization from Operator."""
op1 = Operator(self.rand_matrix(4, 4))
op2 = Operator(op1)
self.assertEqual(op1, op2)
def test_circuit_init(self):
"""Test initialization from a circuit."""
# Test tensor product of 1-qubit gates
circuit = QuantumCircuit(3)
circuit.h(0)
circuit.x(1)
circuit.ry(np.pi / 2, 2)
op = Operator(circuit)
y90 = (1 / np.sqrt(2)) * np.array([[1, -1], [1, 1]])
target = np.kron(y90, np.kron(self.UX, self.UH))
global_phase_equivalent = matrix_equal(
op.data, target, ignore_phase=True)
self.assertTrue(global_phase_equivalent)
# Test decomposition of Controlled-u1 gate
lam = np.pi / 4
circuit = QuantumCircuit(2)
circuit.cu1(lam, 0, 1)
op = Operator(circuit)
target = np.diag([1, 1, 1, np.exp(1j * lam)])
global_phase_equivalent = matrix_equal(
op.data, target, ignore_phase=True)
self.assertTrue(global_phase_equivalent)
# Test decomposition of controlled-H gate
circuit = QuantumCircuit(2)
circuit.ch(0, 1)
op = Operator(circuit)
target = np.kron(self.UI, np.diag([1, 0])) + np.kron(
self.UH, np.diag([0, 1]))
global_phase_equivalent = matrix_equal(
op.data, target, ignore_phase=True)
self.assertTrue(global_phase_equivalent)
def test_instruction_init(self):
"""Test initialization from a circuit."""
gate = CXGate()
op = Operator(gate).data
target = gate.to_matrix()
global_phase_equivalent = matrix_equal(op, target, ignore_phase=True)
self.assertTrue(global_phase_equivalent)
gate = CHGate()
op = Operator(gate).data
had = HGate().to_matrix()
target = np.kron(had, np.diag([0, 1])) + np.kron(
np.eye(2), np.diag([1, 0]))
global_phase_equivalent = matrix_equal(op, target, ignore_phase=True)
self.assertTrue(global_phase_equivalent)
def test_circuit_init_except(self):
"""Test initialization from circuit with measure raises exception."""
circuit = self.simple_circuit_with_measure()
self.assertRaises(QiskitError, Operator, circuit)
def test_equal(self):
"""Test __eq__ method"""
mat = self.rand_matrix(2, 2, real=True)
self.assertEqual(Operator(np.array(mat, dtype=complex)),
Operator(mat))
mat = self.rand_matrix(4, 4)
self.assertEqual(Operator(mat.tolist()),
Operator(mat))
def test_data(self):
"""Test Operator representation string property."""
mat = self.rand_matrix(2, 2)
op = Operator(mat)
assert_allclose(mat, op.data)
def test_dim(self):
"""Test Operator dim property."""
mat = self.rand_matrix(4, 4)
self.assertEqual(Operator(mat).dim, (4, 4))
self.assertEqual(Operator(mat, input_dims=[4], output_dims=[4]).dim, (4, 4))
self.assertEqual(Operator(mat, input_dims=[2, 2], output_dims=[2, 2]).dim, (4, 4))
def test_input_dims(self):
"""Test Operator input_dims method."""
op = Operator(self.rand_matrix(2 * 3 * 4, 4 * 5),
input_dims=[4, 5], output_dims=[2, 3, 4])
self.assertEqual(op.input_dims(), (4, 5))
self.assertEqual(op.input_dims(qargs=[0, 1]), (4, 5))
self.assertEqual(op.input_dims(qargs=[1, 0]), (5, 4))
self.assertEqual(op.input_dims(qargs=[0]), (4,))
self.assertEqual(op.input_dims(qargs=[1]), (5,))
def test_output_dims(self):
"""Test Operator output_dims method."""
op = Operator(self.rand_matrix(2 * 3 * 4, 4 * 5),
input_dims=[4, 5], output_dims=[2, 3, 4])
self.assertEqual(op.output_dims(), (2, 3, 4))
self.assertEqual(op.output_dims(qargs=[0, 1, 2]), (2, 3, 4))
self.assertEqual(op.output_dims(qargs=[2, 1, 0]), (4, 3, 2))
self.assertEqual(op.output_dims(qargs=[2, 0, 1]), (4, 2, 3))
self.assertEqual(op.output_dims(qargs=[0]), (2,))
self.assertEqual(op.output_dims(qargs=[1]), (3,))
self.assertEqual(op.output_dims(qargs=[2]), (4,))
self.assertEqual(op.output_dims(qargs=[0, 2]), (2, 4))
self.assertEqual(op.output_dims(qargs=[2, 0]), (4, 2))
def test_reshape(self):
"""Test Operator reshape method."""
op = Operator(self.rand_matrix(8, 8))
reshaped1 = op.reshape(input_dims=[8], output_dims=[8])
reshaped2 = op.reshape(input_dims=[4, 2], output_dims=[2, 4])
self.assertEqual(op.output_dims(), (2, 2, 2))
self.assertEqual(op.input_dims(), (2, 2, 2))
self.assertEqual(reshaped1.output_dims(), (8,))
self.assertEqual(reshaped1.input_dims(), (8,))
self.assertEqual(reshaped2.output_dims(), (2, 4))
self.assertEqual(reshaped2.input_dims(), (4, 2))
def test_copy(self):
"""Test Operator copy method"""
mat = np.eye(2)
with self.subTest("Deep copy"):
orig = Operator(mat)
cpy = orig.copy()
cpy._data[0, 0] = 0.0
self.assertFalse(cpy == orig)
with self.subTest("Shallow copy"):
orig = Operator(mat)
clone = copy.copy(orig)
clone._data[0, 0] = 0.0
self.assertTrue(clone == orig)
def test_is_unitary(self):
"""Test is_unitary method."""
# X-90 rotation
X90 = la.expm(-1j * 0.5 * np.pi * np.array([[0, 1], [1, 0]]) / 2)
self.assertTrue(Operator(X90).is_unitary())
# Non-unitary should return false
self.assertFalse(Operator([[1, 0], [0, 0]]).is_unitary())
def test_to_operator(self):
"""Test to_operator method."""
op1 = Operator(self.rand_matrix(4, 4))
op2 = op1.to_operator()
self.assertEqual(op1, op2)
def test_conjugate(self):
"""Test conjugate method."""
matr = self.rand_matrix(2, 4, real=True)
mati = self.rand_matrix(2, 4, real=True)
op = Operator(matr + 1j * mati)
uni_conj = op.conjugate()
self.assertEqual(uni_conj, Operator(matr - 1j * mati))
def test_transpose(self):
"""Test transpose method."""
matr = self.rand_matrix(2, 4, real=True)
mati = self.rand_matrix(2, 4, real=True)
op = Operator(matr + 1j * mati)
uni_t = op.transpose()
self.assertEqual(uni_t, Operator(matr.T + 1j * mati.T))
def test_adjoint(self):
"""Test adjoint method."""
matr = self.rand_matrix(2, 4, real=True)
mati = self.rand_matrix(2, 4, real=True)
op = Operator(matr + 1j * mati)
uni_adj = op.adjoint()
self.assertEqual(uni_adj, Operator(matr.T - 1j * mati.T))
def test_compose_except(self):
"""Test compose different dimension exception"""
self.assertRaises(QiskitError,
Operator(np.eye(2)).compose,
Operator(np.eye(3)))
self.assertRaises(QiskitError, Operator(np.eye(2)).compose, 2)
def test_compose(self):
"""Test compose method."""
op1 = Operator(self.UX)
op2 = Operator(self.UY)
targ = Operator(np.dot(self.UY, self.UX))
self.assertEqual(op1.compose(op2), targ)
self.assertEqual(op1 @ op2, targ)
targ = Operator(np.dot(self.UX, self.UY))
self.assertEqual(op2.compose(op1), targ)
self.assertEqual(op2 @ op1, targ)
def test_dot(self):
"""Test dot method."""
op1 = Operator(self.UY)
op2 = Operator(self.UX)
targ = Operator(np.dot(self.UY, self.UX))
self.assertEqual(op1.dot(op2), targ)
self.assertEqual(op1 * op2, targ)
targ = Operator(np.dot(self.UX, self.UY))
self.assertEqual(op2.dot(op1), targ)
self.assertEqual(op2 * op1, targ)
def test_compose_front(self):
"""Test front compose method."""
opYX = Operator(self.UY).compose(Operator(self.UX), front=True)
matYX = np.dot(self.UY, self.UX)
self.assertEqual(opYX, Operator(matYX))
opXY = Operator(self.UX).compose(Operator(self.UY), front=True)
matXY = np.dot(self.UX, self.UY)
self.assertEqual(opXY, Operator(matXY))
def test_compose_subsystem(self):
"""Test subsystem compose method."""
# 3-qubit operator
mat = self.rand_matrix(8, 8)
mat_a = self.rand_matrix(2, 2)
mat_b = self.rand_matrix(2, 2)
mat_c = self.rand_matrix(2, 2)
op = Operator(mat)
op1 = Operator(mat_a)
op2 = Operator(np.kron(mat_b, mat_a))
op3 = Operator(np.kron(mat_c, np.kron(mat_b, mat_a)))
# op3 qargs=[0, 1, 2]
targ = np.dot(np.kron(mat_c, np.kron(mat_b, mat_a)), mat)
self.assertEqual(op.compose(op3, qargs=[0, 1, 2]), Operator(targ))
self.assertEqual(op.compose(op3([0, 1, 2])), Operator(targ))
self.assertEqual(op @ op3([0, 1, 2]), Operator(targ))
# op3 qargs=[2, 1, 0]
targ = np.dot(np.kron(mat_a, np.kron(mat_b, mat_c)), mat)
self.assertEqual(op.compose(op3, qargs=[2, 1, 0]), Operator(targ))
self.assertEqual(op @ op3([2, 1, 0]), Operator(targ))
# op2 qargs=[0, 1]
targ = np.dot(np.kron(np.eye(2), np.kron(mat_b, mat_a)), mat)
self.assertEqual(op.compose(op2, qargs=[0, 1]), Operator(targ))
self.assertEqual(op @ op2([0, 1]), Operator(targ))
# op2 qargs=[2, 0]
targ = np.dot(np.kron(mat_a, np.kron(np.eye(2), mat_b)), mat)
self.assertEqual(op.compose(op2, qargs=[2, 0]), Operator(targ))
self.assertEqual(op @ op2([2, 0]), Operator(targ))
# op1 qargs=[0]
targ = np.dot(np.kron(np.eye(4), mat_a), mat)
self.assertEqual(op.compose(op1, qargs=[0]), Operator(targ))
self.assertEqual(op @ op1([0]), Operator(targ))
# op1 qargs=[1]
targ = np.dot(np.kron(np.eye(2), np.kron(mat_a, np.eye(2))), mat)
self.assertEqual(op.compose(op1, qargs=[1]), Operator(targ))
self.assertEqual(op @ op1([1]), Operator(targ))
# op1 qargs=[2]
targ = np.dot(np.kron(mat_a, np.eye(4)), mat)
self.assertEqual(op.compose(op1, qargs=[2]), Operator(targ))
self.assertEqual(op @ op1([2]), Operator(targ))
def test_dot_subsystem(self):
"""Test subsystem dot method."""
# 3-qubit operator
mat = self.rand_matrix(8, 8)
mat_a = self.rand_matrix(2, 2)
mat_b = self.rand_matrix(2, 2)
mat_c = self.rand_matrix(2, 2)
op = Operator(mat)
op1 = Operator(mat_a)
op2 = Operator(np.kron(mat_b, mat_a))
op3 = Operator(np.kron(mat_c, np.kron(mat_b, mat_a)))
# op3 qargs=[0, 1, 2]
targ = np.dot(mat, np.kron(mat_c, np.kron(mat_b, mat_a)))
self.assertEqual(op.dot(op3, qargs=[0, 1, 2]), Operator(targ))
self.assertEqual(op * op3([0, 1, 2]), Operator(targ))
# op3 qargs=[2, 1, 0]
targ = np.dot(mat, np.kron(mat_a, np.kron(mat_b, mat_c)))
self.assertEqual(op.dot(op3, qargs=[2, 1, 0]), Operator(targ))
self.assertEqual(op * op3([2, 1, 0]), Operator(targ))
# op2 qargs=[0, 1]
targ = np.dot(mat, np.kron(np.eye(2), np.kron(mat_b, mat_a)))
self.assertEqual(op.dot(op2, qargs=[0, 1]), Operator(targ))
self.assertEqual(op * op2([0, 1]), Operator(targ))
# op2 qargs=[2, 0]
targ = np.dot(mat, np.kron(mat_a, np.kron(np.eye(2), mat_b)))
self.assertEqual(op.dot(op2, qargs=[2, 0]), Operator(targ))
self.assertEqual(op * op2([2, 0]), Operator(targ))
# op1 qargs=[0]
targ = np.dot(mat, np.kron(np.eye(4), mat_a))
self.assertEqual(op.dot(op1, qargs=[0]), Operator(targ))
self.assertEqual(op * op1([0]), Operator(targ))
# op1 qargs=[1]
targ = np.dot(mat, np.kron(np.eye(2), np.kron(mat_a, np.eye(2))))
self.assertEqual(op.dot(op1, qargs=[1]), Operator(targ))
self.assertEqual(op * op1([1]), Operator(targ))
# op1 qargs=[2]
targ = np.dot(mat, np.kron(mat_a, np.eye(4)))
self.assertEqual(op.dot(op1, qargs=[2]), Operator(targ))
self.assertEqual(op * op1([2]), Operator(targ))
def test_compose_front_subsystem(self):
"""Test subsystem front compose method."""
# 3-qubit operator
mat = self.rand_matrix(8, 8)
mat_a = self.rand_matrix(2, 2)
mat_b = self.rand_matrix(2, 2)
mat_c = self.rand_matrix(2, 2)
op = Operator(mat)
op1 = Operator(mat_a)
op2 = Operator(np.kron(mat_b, mat_a))
op3 = Operator(np.kron(mat_c, np.kron(mat_b, mat_a)))
# op3 qargs=[0, 1, 2]
targ = np.dot(mat, np.kron(mat_c, np.kron(mat_b, mat_a)))
self.assertEqual(op.compose(op3, qargs=[0, 1, 2], front=True), Operator(targ))
# op3 qargs=[2, 1, 0]
targ = np.dot(mat, np.kron(mat_a, np.kron(mat_b, mat_c)))
self.assertEqual(op.compose(op3, qargs=[2, 1, 0], front=True), Operator(targ))
# op2 qargs=[0, 1]
targ = np.dot(mat, np.kron(np.eye(2), np.kron(mat_b, mat_a)))
self.assertEqual(op.compose(op2, qargs=[0, 1], front=True), Operator(targ))
# op2 qargs=[2, 0]
targ = np.dot(mat, np.kron(mat_a, np.kron(np.eye(2), mat_b)))
self.assertEqual(op.compose(op2, qargs=[2, 0], front=True), Operator(targ))
# op1 qargs=[0]
targ = np.dot(mat, np.kron(np.eye(4), mat_a))
self.assertEqual(op.compose(op1, qargs=[0], front=True), Operator(targ))
# op1 qargs=[1]
targ = np.dot(mat, np.kron(np.eye(2), np.kron(mat_a, np.eye(2))))
self.assertEqual(op.compose(op1, qargs=[1], front=True), Operator(targ))
# op1 qargs=[2]
targ = np.dot(mat, np.kron(mat_a, np.eye(4)))
self.assertEqual(op.compose(op1, qargs=[2], front=True), Operator(targ))
def test_power(self):
"""Test power method."""
X90 = la.expm(-1j * 0.5 * np.pi * np.array([[0, 1], [1, 0]]) / 2)
op = Operator(X90)
self.assertEqual(op.power(2), Operator([[0, -1j], [-1j, 0]]))
self.assertEqual(op.power(4), Operator(-1 * np.eye(2)))
self.assertEqual(op.power(8), Operator(np.eye(2)))
def test_expand(self):
"""Test expand method."""
mat1 = self.UX
mat2 = np.eye(3, dtype=complex)
mat21 = np.kron(mat2, mat1)
op21 = Operator(mat1).expand(Operator(mat2))
self.assertEqual(op21.dim, (6, 6))
assert_allclose(op21.data, Operator(mat21).data)
mat12 = np.kron(mat1, mat2)
op12 = Operator(mat2).expand(Operator(mat1))
self.assertEqual(op12.dim, (6, 6))
assert_allclose(op12.data, Operator(mat12).data)
def test_tensor(self):
"""Test tensor method."""
mat1 = self.UX
mat2 = np.eye(3, dtype=complex)
mat21 = np.kron(mat2, mat1)
op21 = Operator(mat2).tensor(Operator(mat1))
self.assertEqual(op21.dim, (6, 6))
assert_allclose(op21.data, Operator(mat21).data)
mat12 = np.kron(mat1, mat2)
op12 = Operator(mat1).tensor(Operator(mat2))
self.assertEqual(op12.dim, (6, 6))
assert_allclose(op12.data, Operator(mat12).data)
def test_power_except(self):
"""Test power method raises exceptions."""
op = Operator(self.rand_matrix(3, 3))
# Non-integer power raises error
self.assertRaises(QiskitError, op.power, 0.5)
def test_add(self):
"""Test add method."""
mat1 = self.rand_matrix(4, 4)
mat2 = self.rand_matrix(4, 4)
op1 = Operator(mat1)
op2 = Operator(mat2)
self.assertEqual(op1._add(op2), Operator(mat1 + mat2))
self.assertEqual(op1 + op2, Operator(mat1 + mat2))
self.assertEqual(op1 - op2, Operator(mat1 - mat2))
def test_add_except(self):
"""Test add method raises exceptions."""
op1 = Operator(self.rand_matrix(2, 2))
op2 = Operator(self.rand_matrix(3, 3))
self.assertRaises(QiskitError, op1._add, op2)
def test_multiply(self):
"""Test multiply method."""
mat = self.rand_matrix(4, 4)
val = np.exp(5j)
op = Operator(mat)
self.assertEqual(op._multiply(val), Operator(val * mat))
self.assertEqual(val * op, Operator(val * mat))
def test_multiply_except(self):
"""Test multiply method raises exceptions."""
op = Operator(self.rand_matrix(2, 2))
self.assertRaises(QiskitError, op._multiply, 's')
self.assertRaises(QiskitError, op.__rmul__, 's')
self.assertRaises(QiskitError, op._multiply, op)
self.assertRaises(QiskitError, op.__rmul__, op)
def test_negate(self):
"""Test negate method"""
mat = self.rand_matrix(4, 4)
op = Operator(mat)
self.assertEqual(-op, Operator(-1 * mat))
def test_equiv(self):
"""Test negate method"""
mat = np.diag([1, np.exp(1j * np.pi / 2)])
phase = np.exp(-1j * np.pi / 4)
op = Operator(mat)
self.assertTrue(op.equiv(phase * mat))
self.assertTrue(op.equiv(Operator(phase * mat)))
self.assertFalse(op.equiv(2 * mat))
if __name__ == '__main__':
unittest.main()
| [((33, 9, 33, 36), 'logging.getLogger', 'logging.getLogger', ({(33, 27, 33, 35): '__name__'}, {}), '(__name__)', False, 'import logging\n'), ((40, 9, 40, 18), 'numpy.eye', 'np.eye', ({(40, 16, 40, 17): '2'}, {}), '(2)', True, 'import numpy as np\n'), ((41, 9, 41, 35), 'numpy.array', 'np.array', ({(41, 18, 41, 34): '[[0, 1], [1, 0]]'}, {}), '([[0, 1], [1, 0]])', True, 'import numpy as np\n'), ((42, 9, 42, 38), 'numpy.array', 'np.array', ({(42, 18, 42, 37): '[[0, -1.0j], [1.0j, 0]]'}, {}), '([[0, -1.0j], [1.0j, 0]])', True, 'import numpy as np\n'), ((43, 9, 43, 25), 'numpy.diag', 'np.diag', ({(43, 17, 43, 24): '[1, -1]'}, {}), '([1, -1])', True, 'import numpy as np\n'), ((569, 4, 569, 19), 'unittest.main', 'unittest.main', ({}, {}), '()', False, 'import unittest\n'), ((44, 9, 44, 36), 'numpy.array', 'np.array', ({(44, 18, 44, 35): '[[1, 1], [1, -1]]'}, {}), '([[1, 1], [1, -1]])', True, 'import numpy as np\n'), ((44, 39, 44, 49), 'numpy.sqrt', 'np.sqrt', ({(44, 47, 44, 48): '(2)'}, {}), '(2)', True, 'import numpy as np\n'), ((51, 14, 51, 41), 'numpy.random.RandomState', 'np.random.RandomState', ({(51, 36, 51, 40): 'seed'}, {}), '(seed)', True, 'import numpy as np\n'), ((55, 15, 55, 28), 'numpy.trace', 'np.trace', ({(55, 24, 55, 27): 'rho'}, {}), '(rho)', True, 'import numpy as np\n'), ((63, 14, 63, 41), 'numpy.random.RandomState', 'np.random.RandomState', ({(63, 36, 63, 40): 'seed'}, {}), '(seed)', True, 'import numpy as np\n'), ((73, 13, 73, 31), 'qiskit.QuantumRegister', 'QuantumRegister', ({(73, 29, 73, 30): '3'}, {}), '(3)', False, 'from qiskit import QuantumRegister, ClassicalRegister, QuantumCircuit\n'), ((74, 15, 74, 33), 'qiskit.QuantumCircuit', 'QuantumCircuit', ({(74, 30, 74, 32): 'qr'}, {}), '(qr)', False, 'from qiskit import QuantumRegister, ClassicalRegister, QuantumCircuit\n'), ((84, 13, 84, 31), 'qiskit.QuantumRegister', 'QuantumRegister', ({(84, 29, 84, 30): '2'}, {}), '(2)', False, 'from qiskit import QuantumRegister, ClassicalRegister, QuantumCircuit\n'), ((85, 13, 85, 33), 'qiskit.ClassicalRegister', 'ClassicalRegister', ({(85, 31, 85, 32): '2'}, {}), '(2)', False, 'from qiskit import QuantumRegister, ClassicalRegister, QuantumCircuit\n'), ((86, 15, 86, 37), 'qiskit.QuantumCircuit', 'QuantumCircuit', ({(86, 30, 86, 32): 'qr', (86, 34, 86, 36): 'cr'}, {}), '(qr, cr)', False, 'from qiskit import QuantumRegister, ClassicalRegister, QuantumCircuit\n'), ((100, 13, 100, 26), 'qiskit.quantum_info.operators.operator.Operator', 'Operator', ({(100, 22, 100, 25): 'mat'}, {}), '(mat)', False, 'from qiskit.quantum_info.operators.operator import Operator\n'), ((101, 8, 101, 37), 'numpy.testing.assert_allclose', 'assert_allclose', ({(101, 24, 101, 31): 'op.data', (101, 33, 101, 36): 'mat'}, {}), '(op.data, mat)', False, 'from numpy.testing import assert_allclose\n'), ((106, 13, 106, 55), 'qiskit.quantum_info.operators.operator.Operator', 'Operator', (), '', False, 'from qiskit.quantum_info.operators.operator import Operator\n'), ((107, 8, 107, 37), 'numpy.testing.assert_allclose', 'assert_allclose', ({(107, 24, 107, 31): 'op.data', (107, 33, 107, 36): 'mat'}, {}), '(op.data, mat)', False, 'from numpy.testing import assert_allclose\n'), ((114, 14, 114, 23), 'numpy.eye', 'np.eye', ({(114, 21, 114, 22): '3'}, {}), '(3)', True, 'import numpy as np\n'), ((115, 13, 115, 26), 'qiskit.quantum_info.operators.operator.Operator', 'Operator', ({(115, 22, 115, 25): 'mat'}, {}), '(mat)', False, 'from qiskit.quantum_info.operators.operator import Operator\n'), ((116, 8, 116, 37), 'numpy.testing.assert_allclose', 'assert_allclose', ({(116, 24, 116, 31): 'op.data', (116, 33, 116, 36): 'mat'}, {}), '(op.data, mat)', False, 'from numpy.testing import assert_allclose\n'), ((122, 13, 122, 68), 'qiskit.quantum_info.operators.operator.Operator', 'Operator', (), '', False, 'from qiskit.quantum_info.operators.operator import Operator\n'), ((123, 8, 123, 37), 'numpy.testing.assert_allclose', 'assert_allclose', ({(123, 24, 123, 31): 'op.data', (123, 33, 123, 36): 'mat'}, {}), '(op.data, mat)', False, 'from numpy.testing import assert_allclose\n'), ((138, 14, 138, 27), 'qiskit.quantum_info.operators.operator.Operator', 'Operator', ({(138, 23, 138, 26): 'op1'}, {}), '(op1)', False, 'from qiskit.quantum_info.operators.operator import Operator\n'), ((144, 18, 144, 35), 'qiskit.QuantumCircuit', 'QuantumCircuit', ({(144, 33, 144, 34): '3'}, {}), '(3)', False, 'from qiskit import QuantumRegister, ClassicalRegister, QuantumCircuit\n'), ((148, 13, 148, 30), 'qiskit.quantum_info.operators.operator.Operator', 'Operator', ({(148, 22, 148, 29): 'circuit'}, {}), '(circuit)', False, 'from qiskit.quantum_info.operators.operator import Operator\n'), ((151, 34, 152, 47), 'qiskit.quantum_info.operators.predicates.matrix_equal', 'matrix_equal', (), '', False, 'from qiskit.quantum_info.operators.predicates import matrix_equal\n'), ((157, 18, 157, 35), 'qiskit.QuantumCircuit', 'QuantumCircuit', ({(157, 33, 157, 34): '2'}, {}), '(2)', False, 'from qiskit import QuantumRegister, ClassicalRegister, QuantumCircuit\n'), ((159, 13, 159, 30), 'qiskit.quantum_info.operators.operator.Operator', 'Operator', ({(159, 22, 159, 29): 'circuit'}, {}), '(circuit)', False, 'from qiskit.quantum_info.operators.operator import Operator\n'), ((161, 34, 162, 47), 'qiskit.quantum_info.operators.predicates.matrix_equal', 'matrix_equal', (), '', False, 'from qiskit.quantum_info.operators.predicates import matrix_equal\n'), ((166, 18, 166, 35), 'qiskit.QuantumCircuit', 'QuantumCircuit', ({(166, 33, 166, 34): '2'}, {}), '(2)', False, 'from qiskit import QuantumRegister, ClassicalRegister, QuantumCircuit\n'), ((168, 13, 168, 30), 'qiskit.quantum_info.operators.operator.Operator', 'Operator', ({(168, 22, 168, 29): 'circuit'}, {}), '(circuit)', False, 'from qiskit.quantum_info.operators.operator import Operator\n'), ((171, 34, 172, 47), 'qiskit.quantum_info.operators.predicates.matrix_equal', 'matrix_equal', (), '', False, 'from qiskit.quantum_info.operators.predicates import matrix_equal\n'), ((177, 15, 177, 23), 'qiskit.extensions.standard.CXGate', 'CXGate', ({}, {}), '()', False, 'from qiskit.extensions.standard import HGate, CHGate, CXGate\n'), ((180, 34, 180, 77), 'qiskit.quantum_info.operators.predicates.matrix_equal', 'matrix_equal', (), '', False, 'from qiskit.quantum_info.operators.predicates import matrix_equal\n'), ((183, 15, 183, 23), 'qiskit.extensions.standard.CHGate', 'CHGate', ({}, {}), '()', False, 'from qiskit.extensions.standard import HGate, CHGate, CXGate\n'), ((188, 34, 188, 77), 'qiskit.quantum_info.operators.predicates.matrix_equal', 'matrix_equal', (), '', False, 'from qiskit.quantum_info.operators.predicates import matrix_equal\n'), ((208, 13, 208, 26), 'qiskit.quantum_info.operators.operator.Operator', 'Operator', ({(208, 22, 208, 25): 'mat'}, {}), '(mat)', False, 'from qiskit.quantum_info.operators.operator import Operator\n'), ((209, 8, 209, 37), 'numpy.testing.assert_allclose', 'assert_allclose', ({(209, 24, 209, 27): 'mat', (209, 29, 209, 36): 'op.data'}, {}), '(mat, op.data)', False, 'from numpy.testing import assert_allclose\n'), ((256, 14, 256, 23), 'numpy.eye', 'np.eye', ({(256, 21, 256, 22): '2'}, {}), '(2)', True, 'import numpy as np\n'), ((286, 13, 286, 39), 'qiskit.quantum_info.operators.operator.Operator', 'Operator', ({(286, 22, 286, 38): 'matr + 1.0j * mati'}, {}), '(matr + 1.0j * mati)', False, 'from qiskit.quantum_info.operators.operator import Operator\n'), ((294, 13, 294, 39), 'qiskit.quantum_info.operators.operator.Operator', 'Operator', ({(294, 22, 294, 38): 'matr + 1.0j * mati'}, {}), '(matr + 1.0j * mati)', False, 'from qiskit.quantum_info.operators.operator import Operator\n'), ((302, 13, 302, 39), 'qiskit.quantum_info.operators.operator.Operator', 'Operator', ({(302, 22, 302, 38): 'matr + 1.0j * mati'}, {}), '(matr + 1.0j * mati)', False, 'from qiskit.quantum_info.operators.operator import Operator\n'), ((316, 14, 316, 31), 'qiskit.quantum_info.operators.operator.Operator', 'Operator', ({(316, 23, 316, 30): 'self.UX'}, {}), '(self.UX)', False, 'from qiskit.quantum_info.operators.operator import Operator\n'), ((317, 14, 317, 31), 'qiskit.quantum_info.operators.operator.Operator', 'Operator', ({(317, 23, 317, 30): 'self.UY'}, {}), '(self.UY)', False, 'from qiskit.quantum_info.operators.operator import Operator\n'), ((329, 14, 329, 31), 'qiskit.quantum_info.operators.operator.Operator', 'Operator', ({(329, 23, 329, 30): 'self.UY'}, {}), '(self.UY)', False, 'from qiskit.quantum_info.operators.operator import Operator\n'), ((330, 14, 330, 31), 'qiskit.quantum_info.operators.operator.Operator', 'Operator', ({(330, 23, 330, 30): 'self.UX'}, {}), '(self.UX)', False, 'from qiskit.quantum_info.operators.operator import Operator\n'), ((344, 16, 344, 40), 'numpy.dot', 'np.dot', ({(344, 23, 344, 30): 'self.UY', (344, 32, 344, 39): 'self.UX'}, {}), '(self.UY, self.UX)', True, 'import numpy as np\n'), ((348, 16, 348, 40), 'numpy.dot', 'np.dot', ({(348, 23, 348, 30): 'self.UX', (348, 32, 348, 39): 'self.UY'}, {}), '(self.UX, self.UY)', True, 'import numpy as np\n'), ((358, 13, 358, 26), 'qiskit.quantum_info.operators.operator.Operator', 'Operator', ({(358, 22, 358, 25): 'mat'}, {}), '(mat)', False, 'from qiskit.quantum_info.operators.operator import Operator\n'), ((359, 14, 359, 29), 'qiskit.quantum_info.operators.operator.Operator', 'Operator', ({(359, 23, 359, 28): 'mat_a'}, {}), '(mat_a)', False, 'from qiskit.quantum_info.operators.operator import Operator\n'), ((402, 13, 402, 26), 'qiskit.quantum_info.operators.operator.Operator', 'Operator', ({(402, 22, 402, 25): 'mat'}, {}), '(mat)', False, 'from qiskit.quantum_info.operators.operator import Operator\n'), ((403, 14, 403, 29), 'qiskit.quantum_info.operators.operator.Operator', 'Operator', ({(403, 23, 403, 28): 'mat_a'}, {}), '(mat_a)', False, 'from qiskit.quantum_info.operators.operator import Operator\n'), ((445, 13, 445, 26), 'qiskit.quantum_info.operators.operator.Operator', 'Operator', ({(445, 22, 445, 25): 'mat'}, {}), '(mat)', False, 'from qiskit.quantum_info.operators.operator import Operator\n'), ((446, 14, 446, 29), 'qiskit.quantum_info.operators.operator.Operator', 'Operator', ({(446, 23, 446, 28): 'mat_a'}, {}), '(mat_a)', False, 'from qiskit.quantum_info.operators.operator import Operator\n'), ((479, 13, 479, 26), 'qiskit.quantum_info.operators.operator.Operator', 'Operator', ({(479, 22, 479, 25): 'X90'}, {}), '(X90)', False, 'from qiskit.quantum_info.operators.operator import Operator\n'), ((487, 15, 487, 39), 'numpy.eye', 'np.eye', (), '', True, 'import numpy as np\n'), ((489, 16, 489, 35), 'numpy.kron', 'np.kron', ({(489, 24, 489, 28): 'mat2', (489, 30, 489, 34): 'mat1'}, {}), '(mat2, mat1)', True, 'import numpy as np\n'), ((494, 16, 494, 35), 'numpy.kron', 'np.kron', ({(494, 24, 494, 28): 'mat1', (494, 30, 494, 34): 'mat2'}, {}), '(mat1, mat2)', True, 'import numpy as np\n'), ((502, 15, 502, 39), 'numpy.eye', 'np.eye', (), '', True, 'import numpy as np\n'), ((504, 16, 504, 35), 'numpy.kron', 'np.kron', ({(504, 24, 504, 28): 'mat2', (504, 30, 504, 34): 'mat1'}, {}), '(mat2, mat1)', True, 'import numpy as np\n'), ((509, 16, 509, 35), 'numpy.kron', 'np.kron', ({(509, 24, 509, 28): 'mat1', (509, 30, 509, 34): 'mat2'}, {}), '(mat1, mat2)', True, 'import numpy as np\n'), ((524, 14, 524, 28), 'qiskit.quantum_info.operators.operator.Operator', 'Operator', ({(524, 23, 524, 27): 'mat1'}, {}), '(mat1)', False, 'from qiskit.quantum_info.operators.operator import Operator\n'), ((525, 14, 525, 28), 'qiskit.quantum_info.operators.operator.Operator', 'Operator', ({(525, 23, 525, 27): 'mat2'}, {}), '(mat2)', False, 'from qiskit.quantum_info.operators.operator import Operator\n'), ((539, 14, 539, 24), 'numpy.exp', 'np.exp', ({(539, 21, 539, 23): '5.0j'}, {}), '(5.0j)', True, 'import numpy as np\n'), ((540, 13, 540, 26), 'qiskit.quantum_info.operators.operator.Operator', 'Operator', ({(540, 22, 540, 25): 'mat'}, {}), '(mat)', False, 'from qiskit.quantum_info.operators.operator import Operator\n'), ((555, 13, 555, 26), 'qiskit.quantum_info.operators.operator.Operator', 'Operator', ({(555, 22, 555, 25): 'mat'}, {}), '(mat)', False, 'from qiskit.quantum_info.operators.operator import Operator\n'), ((561, 16, 561, 39), 'numpy.exp', 'np.exp', ({(561, 23, 561, 38): '-1.0j * np.pi / 4'}, {}), '(-1.0j * np.pi / 4)', True, 'import numpy as np\n'), ((562, 13, 562, 26), 'qiskit.quantum_info.operators.operator.Operator', 'Operator', ({(562, 22, 562, 25): 'mat'}, {}), '(mat)', False, 'from qiskit.quantum_info.operators.operator import Operator\n'), ((78, 33, 78, 60), 'numpy.array', 'np.array', ({(78, 42, 78, 59): '[[1, -1], [1, 1]]'}, {}), '([[1, -1], [1, 1]])', True, 'import numpy as np\n'), ((149, 33, 149, 60), 'numpy.array', 'np.array', ({(149, 42, 149, 59): '[[1, -1], [1, 1]]'}, {}), '([[1, -1], [1, 1]])', True, 'import numpy as np\n'), ((150, 30, 150, 55), 'numpy.kron', 'np.kron', ({(150, 38, 150, 45): 'self.UX', (150, 47, 150, 54): 'self.UH'}, {}), '(self.UX, self.UH)', True, 'import numpy as np\n'), ((178, 13, 178, 27), 'qiskit.quantum_info.operators.operator.Operator', 'Operator', ({(178, 22, 178, 26): 'gate'}, {}), '(gate)', False, 'from qiskit.quantum_info.operators.operator import Operator\n'), ((184, 13, 184, 27), 'qiskit.quantum_info.operators.operator.Operator', 'Operator', ({(184, 22, 184, 26): 'gate'}, {}), '(gate)', False, 'from qiskit.quantum_info.operators.operator import Operator\n'), ((200, 25, 200, 38), 'qiskit.quantum_info.operators.operator.Operator', 'Operator', ({(200, 34, 200, 37): 'mat'}, {}), '(mat)', False, 'from qiskit.quantum_info.operators.operator import Operator\n'), ((203, 25, 203, 38), 'qiskit.quantum_info.operators.operator.Operator', 'Operator', ({(203, 34, 203, 37): 'mat'}, {}), '(mat)', False, 'from qiskit.quantum_info.operators.operator import Operator\n'), ((258, 19, 258, 32), 'qiskit.quantum_info.operators.operator.Operator', 'Operator', ({(258, 28, 258, 31): 'mat'}, {}), '(mat)', False, 'from qiskit.quantum_info.operators.operator import Operator\n'), ((263, 19, 263, 32), 'qiskit.quantum_info.operators.operator.Operator', 'Operator', ({(263, 28, 263, 31): 'mat'}, {}), '(mat)', False, 'from qiskit.quantum_info.operators.operator import Operator\n'), ((264, 20, 264, 35), 'copy.copy', 'copy.copy', ({(264, 30, 264, 34): 'orig'}, {}), '(orig)', False, 'import copy\n'), ((288, 35, 288, 61), 'qiskit.quantum_info.operators.operator.Operator', 'Operator', ({(288, 44, 288, 60): '(matr - 1.0j * mati)'}, {}), '(matr - 1.0j * mati)', False, 'from qiskit.quantum_info.operators.operator import Operator\n'), ((296, 32, 296, 62), 'qiskit.quantum_info.operators.operator.Operator', 'Operator', ({(296, 41, 296, 61): '(matr.T + 1.0j * mati.T)'}, {}), '(matr.T + 1.0j * mati.T)', False, 'from qiskit.quantum_info.operators.operator import Operator\n'), ((304, 34, 304, 64), 'qiskit.quantum_info.operators.operator.Operator', 'Operator', ({(304, 43, 304, 63): '(matr.T - 1.0j * mati.T)'}, {}), '(matr.T - 1.0j * mati.T)', False, 'from qiskit.quantum_info.operators.operator import Operator\n'), ((319, 24, 319, 48), 'numpy.dot', 'np.dot', ({(319, 31, 319, 38): 'self.UY', (319, 40, 319, 47): 'self.UX'}, {}), '(self.UY, self.UX)', True, 'import numpy as np\n'), ((323, 24, 323, 48), 'numpy.dot', 'np.dot', ({(323, 31, 323, 38): 'self.UX', (323, 40, 323, 47): 'self.UY'}, {}), '(self.UX, self.UY)', True, 'import numpy as np\n'), ((332, 24, 332, 48), 'numpy.dot', 'np.dot', ({(332, 31, 332, 38): 'self.UY', (332, 40, 332, 47): 'self.UX'}, {}), '(self.UY, self.UX)', True, 'import numpy as np\n'), ((336, 24, 336, 48), 'numpy.dot', 'np.dot', ({(336, 31, 336, 38): 'self.UX', (336, 40, 336, 47): 'self.UY'}, {}), '(self.UX, self.UY)', True, 'import numpy as np\n'), ((343, 41, 343, 58), 'qiskit.quantum_info.operators.operator.Operator', 'Operator', ({(343, 50, 343, 57): 'self.UX'}, {}), '(self.UX)', False, 'from qiskit.quantum_info.operators.operator import Operator\n'), ((345, 31, 345, 46), 'qiskit.quantum_info.operators.operator.Operator', 'Operator', ({(345, 40, 345, 45): 'matYX'}, {}), '(matYX)', False, 'from qiskit.quantum_info.operators.operator import Operator\n'), ((347, 41, 347, 58), 'qiskit.quantum_info.operators.operator.Operator', 'Operator', ({(347, 50, 347, 57): 'self.UY'}, {}), '(self.UY)', False, 'from qiskit.quantum_info.operators.operator import Operator\n'), ((349, 31, 349, 46), 'qiskit.quantum_info.operators.operator.Operator', 'Operator', ({(349, 40, 349, 45): 'matXY'}, {}), '(matXY)', False, 'from qiskit.quantum_info.operators.operator import Operator\n'), ((360, 23, 360, 44), 'numpy.kron', 'np.kron', ({(360, 31, 360, 36): 'mat_b', (360, 38, 360, 43): 'mat_a'}, {}), '(mat_b, mat_a)', True, 'import numpy as np\n'), ((365, 59, 365, 73), 'qiskit.quantum_info.operators.operator.Operator', 'Operator', ({(365, 68, 365, 72): 'targ'}, {}), '(targ)', False, 'from qiskit.quantum_info.operators.operator import Operator\n'), ((366, 53, 366, 67), 'qiskit.quantum_info.operators.operator.Operator', 'Operator', ({(366, 62, 366, 66): 'targ'}, {}), '(targ)', False, 'from qiskit.quantum_info.operators.operator import Operator\n'), ((367, 46, 367, 60), 'qiskit.quantum_info.operators.operator.Operator', 'Operator', ({(367, 55, 367, 59): 'targ'}, {}), '(targ)', False, 'from qiskit.quantum_info.operators.operator import Operator\n'), ((370, 59, 370, 73), 'qiskit.quantum_info.operators.operator.Operator', 'Operator', ({(370, 68, 370, 72): 'targ'}, {}), '(targ)', False, 'from qiskit.quantum_info.operators.operator import Operator\n'), ((371, 46, 371, 60), 'qiskit.quantum_info.operators.operator.Operator', 'Operator', ({(371, 55, 371, 59): 'targ'}, {}), '(targ)', False, 'from qiskit.quantum_info.operators.operator import Operator\n'), ((375, 56, 375, 70), 'qiskit.quantum_info.operators.operator.Operator', 'Operator', ({(375, 65, 375, 69): 'targ'}, {}), '(targ)', False, 'from qiskit.quantum_info.operators.operator import Operator\n'), ((376, 43, 376, 57), 'qiskit.quantum_info.operators.operator.Operator', 'Operator', ({(376, 52, 376, 56): 'targ'}, {}), '(targ)', False, 'from qiskit.quantum_info.operators.operator import Operator\n'), ((379, 56, 379, 70), 'qiskit.quantum_info.operators.operator.Operator', 'Operator', ({(379, 65, 379, 69): 'targ'}, {}), '(targ)', False, 'from qiskit.quantum_info.operators.operator import Operator\n'), ((380, 43, 380, 57), 'qiskit.quantum_info.operators.operator.Operator', 'Operator', ({(380, 52, 380, 56): 'targ'}, {}), '(targ)', False, 'from qiskit.quantum_info.operators.operator import Operator\n'), ((384, 53, 384, 67), 'qiskit.quantum_info.operators.operator.Operator', 'Operator', ({(384, 62, 384, 66): 'targ'}, {}), '(targ)', False, 'from qiskit.quantum_info.operators.operator import Operator\n'), ((385, 40, 385, 54), 'qiskit.quantum_info.operators.operator.Operator', 'Operator', ({(385, 49, 385, 53): 'targ'}, {}), '(targ)', False, 'from qiskit.quantum_info.operators.operator import Operator\n'), ((388, 53, 388, 67), 'qiskit.quantum_info.operators.operator.Operator', 'Operator', ({(388, 62, 388, 66): 'targ'}, {}), '(targ)', False, 'from qiskit.quantum_info.operators.operator import Operator\n'), ((389, 40, 389, 54), 'qiskit.quantum_info.operators.operator.Operator', 'Operator', ({(389, 49, 389, 53): 'targ'}, {}), '(targ)', False, 'from qiskit.quantum_info.operators.operator import Operator\n'), ((392, 53, 392, 67), 'qiskit.quantum_info.operators.operator.Operator', 'Operator', ({(392, 62, 392, 66): 'targ'}, {}), '(targ)', False, 'from qiskit.quantum_info.operators.operator import Operator\n'), ((393, 40, 393, 54), 'qiskit.quantum_info.operators.operator.Operator', 'Operator', ({(393, 49, 393, 53): 'targ'}, {}), '(targ)', False, 'from qiskit.quantum_info.operators.operator import Operator\n'), ((404, 23, 404, 44), 'numpy.kron', 'np.kron', ({(404, 31, 404, 36): 'mat_b', (404, 38, 404, 43): 'mat_a'}, {}), '(mat_b, mat_a)', True, 'import numpy as np\n'), ((409, 55, 409, 69), 'qiskit.quantum_info.operators.operator.Operator', 'Operator', ({(409, 64, 409, 68): 'targ'}, {}), '(targ)', False, 'from qiskit.quantum_info.operators.operator import Operator\n'), ((410, 46, 410, 60), 'qiskit.quantum_info.operators.operator.Operator', 'Operator', ({(410, 55, 410, 59): 'targ'}, {}), '(targ)', False, 'from qiskit.quantum_info.operators.operator import Operator\n'), ((413, 55, 413, 69), 'qiskit.quantum_info.operators.operator.Operator', 'Operator', ({(413, 64, 413, 68): 'targ'}, {}), '(targ)', False, 'from qiskit.quantum_info.operators.operator import Operator\n'), ((414, 46, 414, 60), 'qiskit.quantum_info.operators.operator.Operator', 'Operator', ({(414, 55, 414, 59): 'targ'}, {}), '(targ)', False, 'from qiskit.quantum_info.operators.operator import Operator\n'), ((418, 52, 418, 66), 'qiskit.quantum_info.operators.operator.Operator', 'Operator', ({(418, 61, 418, 65): 'targ'}, {}), '(targ)', False, 'from qiskit.quantum_info.operators.operator import Operator\n'), ((419, 43, 419, 57), 'qiskit.quantum_info.operators.operator.Operator', 'Operator', ({(419, 52, 419, 56): 'targ'}, {}), '(targ)', False, 'from qiskit.quantum_info.operators.operator import Operator\n'), ((422, 52, 422, 66), 'qiskit.quantum_info.operators.operator.Operator', 'Operator', ({(422, 61, 422, 65): 'targ'}, {}), '(targ)', False, 'from qiskit.quantum_info.operators.operator import Operator\n'), ((423, 43, 423, 57), 'qiskit.quantum_info.operators.operator.Operator', 'Operator', ({(423, 52, 423, 56): 'targ'}, {}), '(targ)', False, 'from qiskit.quantum_info.operators.operator import Operator\n'), ((427, 49, 427, 63), 'qiskit.quantum_info.operators.operator.Operator', 'Operator', ({(427, 58, 427, 62): 'targ'}, {}), '(targ)', False, 'from qiskit.quantum_info.operators.operator import Operator\n'), ((428, 40, 428, 54), 'qiskit.quantum_info.operators.operator.Operator', 'Operator', ({(428, 49, 428, 53): 'targ'}, {}), '(targ)', False, 'from qiskit.quantum_info.operators.operator import Operator\n'), ((431, 49, 431, 63), 'qiskit.quantum_info.operators.operator.Operator', 'Operator', ({(431, 58, 431, 62): 'targ'}, {}), '(targ)', False, 'from qiskit.quantum_info.operators.operator import Operator\n'), ((432, 40, 432, 54), 'qiskit.quantum_info.operators.operator.Operator', 'Operator', ({(432, 49, 432, 53): 'targ'}, {}), '(targ)', False, 'from qiskit.quantum_info.operators.operator import Operator\n'), ((435, 49, 435, 63), 'qiskit.quantum_info.operators.operator.Operator', 'Operator', ({(435, 58, 435, 62): 'targ'}, {}), '(targ)', False, 'from qiskit.quantum_info.operators.operator import Operator\n'), ((436, 40, 436, 54), 'qiskit.quantum_info.operators.operator.Operator', 'Operator', ({(436, 49, 436, 53): 'targ'}, {}), '(targ)', False, 'from qiskit.quantum_info.operators.operator import Operator\n'), ((447, 23, 447, 44), 'numpy.kron', 'np.kron', ({(447, 31, 447, 36): 'mat_b', (447, 38, 447, 43): 'mat_a'}, {}), '(mat_b, mat_a)', True, 'import numpy as np\n'), ((452, 71, 452, 85), 'qiskit.quantum_info.operators.operator.Operator', 'Operator', ({(452, 80, 452, 84): 'targ'}, {}), '(targ)', False, 'from qiskit.quantum_info.operators.operator import Operator\n'), ((455, 71, 455, 85), 'qiskit.quantum_info.operators.operator.Operator', 'Operator', ({(455, 80, 455, 84): 'targ'}, {}), '(targ)', False, 'from qiskit.quantum_info.operators.operator import Operator\n'), ((459, 68, 459, 82), 'qiskit.quantum_info.operators.operator.Operator', 'Operator', ({(459, 77, 459, 81): 'targ'}, {}), '(targ)', False, 'from qiskit.quantum_info.operators.operator import Operator\n'), ((462, 68, 462, 82), 'qiskit.quantum_info.operators.operator.Operator', 'Operator', ({(462, 77, 462, 81): 'targ'}, {}), '(targ)', False, 'from qiskit.quantum_info.operators.operator import Operator\n'), ((466, 65, 466, 79), 'qiskit.quantum_info.operators.operator.Operator', 'Operator', ({(466, 74, 466, 78): 'targ'}, {}), '(targ)', False, 'from qiskit.quantum_info.operators.operator import Operator\n'), ((470, 65, 470, 79), 'qiskit.quantum_info.operators.operator.Operator', 'Operator', ({(470, 74, 470, 78): 'targ'}, {}), '(targ)', False, 'from qiskit.quantum_info.operators.operator import Operator\n'), ((474, 65, 474, 79), 'qiskit.quantum_info.operators.operator.Operator', 'Operator', ({(474, 74, 474, 78): 'targ'}, {}), '(targ)', False, 'from qiskit.quantum_info.operators.operator import Operator\n'), ((480, 38, 480, 68), 'qiskit.quantum_info.operators.operator.Operator', 'Operator', ({(480, 47, 480, 67): '[[0, -1.0j], [-1.0j, 0]]'}, {}), '([[0, -1.0j], [-1.0j, 0]])', False, 'from qiskit.quantum_info.operators.operator import Operator\n'), ((490, 37, 490, 51), 'qiskit.quantum_info.operators.operator.Operator', 'Operator', ({(490, 46, 490, 50): 'mat2'}, {}), '(mat2)', False, 'from qiskit.quantum_info.operators.operator import Operator\n'), ((495, 37, 495, 51), 'qiskit.quantum_info.operators.operator.Operator', 'Operator', ({(495, 46, 495, 50): 'mat1'}, {}), '(mat1)', False, 'from qiskit.quantum_info.operators.operator import Operator\n'), ((505, 37, 505, 51), 'qiskit.quantum_info.operators.operator.Operator', 'Operator', ({(505, 46, 505, 50): 'mat1'}, {}), '(mat1)', False, 'from qiskit.quantum_info.operators.operator import Operator\n'), ((510, 37, 510, 51), 'qiskit.quantum_info.operators.operator.Operator', 'Operator', ({(510, 46, 510, 50): 'mat2'}, {}), '(mat2)', False, 'from qiskit.quantum_info.operators.operator import Operator\n'), ((526, 40, 526, 61), 'qiskit.quantum_info.operators.operator.Operator', 'Operator', ({(526, 49, 526, 60): '(mat1 + mat2)'}, {}), '(mat1 + mat2)', False, 'from qiskit.quantum_info.operators.operator import Operator\n'), ((527, 36, 527, 57), 'qiskit.quantum_info.operators.operator.Operator', 'Operator', ({(527, 45, 527, 56): '(mat1 + mat2)'}, {}), '(mat1 + mat2)', False, 'from qiskit.quantum_info.operators.operator import Operator\n'), ((528, 36, 528, 57), 'qiskit.quantum_info.operators.operator.Operator', 'Operator', ({(528, 45, 528, 56): '(mat1 - mat2)'}, {}), '(mat1 - mat2)', False, 'from qiskit.quantum_info.operators.operator import Operator\n'), ((541, 44, 541, 63), 'qiskit.quantum_info.operators.operator.Operator', 'Operator', ({(541, 53, 541, 62): '(val * mat)'}, {}), '(val * mat)', False, 'from qiskit.quantum_info.operators.operator import Operator\n'), ((542, 35, 542, 54), 'qiskit.quantum_info.operators.operator.Operator', 'Operator', ({(542, 44, 542, 53): '(val * mat)'}, {}), '(val * mat)', False, 'from qiskit.quantum_info.operators.operator import Operator\n'), ((556, 30, 556, 48), 'qiskit.quantum_info.operators.operator.Operator', 'Operator', ({(556, 39, 556, 47): '(-1 * mat)'}, {}), '(-1 * mat)', False, 'from qiskit.quantum_info.operators.operator import Operator\n'), ((49, 36, 49, 54), 'numpy.iinfo', 'np.iinfo', ({(49, 45, 49, 53): 'np.int32'}, {}), '(np.int32)', True, 'import numpy as np\n'), ((61, 36, 61, 54), 'numpy.iinfo', 'np.iinfo', ({(61, 45, 61, 53): 'np.int32'}, {}), '(np.int32)', True, 'import numpy as np\n'), ((78, 19, 78, 29), 'numpy.sqrt', 'np.sqrt', ({(78, 27, 78, 28): '(2)'}, {}), '(2)', True, 'import numpy as np\n'), ((79, 39, 79, 64), 'numpy.kron', 'np.kron', ({(79, 47, 79, 54): 'self.UX', (79, 56, 79, 63): 'self.UH'}, {}), '(self.UX, self.UH)', True, 'import numpy as np\n'), ((149, 19, 149, 29), 'numpy.sqrt', 'np.sqrt', ({(149, 27, 149, 28): '(2)'}, {}), '(2)', True, 'import numpy as np\n'), ((160, 35, 160, 51), 'numpy.exp', 'np.exp', ({(160, 42, 160, 50): '1.0j * lam'}, {}), '(1.0j * lam)', True, 'import numpy as np\n'), ((169, 34, 169, 49), 'numpy.diag', 'np.diag', ({(169, 42, 169, 48): '[1, 0]'}, {}), '([1, 0])', True, 'import numpy as np\n'), ((170, 21, 170, 36), 'numpy.diag', 'np.diag', ({(170, 29, 170, 35): '[0, 1]'}, {}), '([0, 1])', True, 'import numpy as np\n'), ((185, 14, 185, 21), 'qiskit.extensions.standard.HGate', 'HGate', ({}, {}), '()', False, 'from qiskit.extensions.standard import HGate, CHGate, CXGate\n'), ((186, 30, 186, 45), 'numpy.diag', 'np.diag', ({(186, 38, 186, 44): '[0, 1]'}, {}), '([0, 1])', True, 'import numpy as np\n'), ((187, 12, 187, 21), 'numpy.eye', 'np.eye', ({(187, 19, 187, 20): '(2)'}, {}), '(2)', True, 'import numpy as np\n'), ((187, 23, 187, 38), 'numpy.diag', 'np.diag', ({(187, 31, 187, 37): '[1, 0]'}, {}), '([1, 0])', True, 'import numpy as np\n'), ((199, 34, 199, 62), 'numpy.array', 'np.array', (), '', True, 'import numpy as np\n'), ((214, 25, 214, 38), 'qiskit.quantum_info.operators.operator.Operator', 'Operator', ({(214, 34, 214, 37): 'mat'}, {}), '(mat)', False, 'from qiskit.quantum_info.operators.operator import Operator\n'), ((215, 25, 215, 71), 'qiskit.quantum_info.operators.operator.Operator', 'Operator', (), '', False, 'from qiskit.quantum_info.operators.operator import Operator\n'), ((216, 25, 216, 77), 'qiskit.quantum_info.operators.operator.Operator', 'Operator', (), '', False, 'from qiskit.quantum_info.operators.operator import Operator\n'), ((310, 35, 310, 44), 'numpy.eye', 'np.eye', ({(310, 42, 310, 43): '(3)'}, {}), '(3)', True, 'import numpy as np\n'), ((343, 15, 343, 32), 'qiskit.quantum_info.operators.operator.Operator', 'Operator', ({(343, 24, 343, 31): 'self.UY'}, {}), '(self.UY)', False, 'from qiskit.quantum_info.operators.operator import Operator\n'), ((347, 15, 347, 32), 'qiskit.quantum_info.operators.operator.Operator', 'Operator', ({(347, 24, 347, 31): 'self.UX'}, {}), '(self.UX)', False, 'from qiskit.quantum_info.operators.operator import Operator\n'), ((361, 38, 361, 59), 'numpy.kron', 'np.kron', ({(361, 46, 361, 51): 'mat_b', (361, 53, 361, 58): 'mat_a'}, {}), '(mat_b, mat_a)', True, 'import numpy as np\n'), ((364, 37, 364, 58), 'numpy.kron', 'np.kron', ({(364, 45, 364, 50): 'mat_b', (364, 52, 364, 57): 'mat_a'}, {}), '(mat_b, mat_a)', True, 'import numpy as np\n'), ((369, 37, 369, 58), 'numpy.kron', 'np.kron', ({(369, 45, 369, 50): 'mat_b', (369, 52, 369, 57): 'mat_c'}, {}), '(mat_b, mat_c)', True, 'import numpy as np\n'), ((374, 30, 374, 39), 'numpy.eye', 'np.eye', ({(374, 37, 374, 38): '2'}, {}), '(2)', True, 'import numpy as np\n'), ((374, 41, 374, 62), 'numpy.kron', 'np.kron', ({(374, 49, 374, 54): 'mat_b', (374, 56, 374, 61): 'mat_a'}, {}), '(mat_b, mat_a)', True, 'import numpy as np\n'), ((383, 30, 383, 39), 'numpy.eye', 'np.eye', ({(383, 37, 383, 38): '4'}, {}), '(4)', True, 'import numpy as np\n'), ((387, 30, 387, 39), 'numpy.eye', 'np.eye', ({(387, 37, 387, 38): '2'}, {}), '(2)', True, 'import numpy as np\n'), ((391, 37, 391, 46), 'numpy.eye', 'np.eye', ({(391, 44, 391, 45): '4'}, {}), '(4)', True, 'import numpy as np\n'), ((405, 38, 405, 59), 'numpy.kron', 'np.kron', ({(405, 46, 405, 51): 'mat_b', (405, 53, 405, 58): 'mat_a'}, {}), '(mat_b, mat_a)', True, 'import numpy as np\n'), ((408, 42, 408, 63), 'numpy.kron', 'np.kron', ({(408, 50, 408, 55): 'mat_b', (408, 57, 408, 62): 'mat_a'}, {}), '(mat_b, mat_a)', True, 'import numpy as np\n'), ((412, 42, 412, 63), 'numpy.kron', 'np.kron', ({(412, 50, 412, 55): 'mat_b', (412, 57, 412, 62): 'mat_c'}, {}), '(mat_b, mat_c)', True, 'import numpy as np\n'), ((417, 35, 417, 44), 'numpy.eye', 'np.eye', ({(417, 42, 417, 43): '2'}, {}), '(2)', True, 'import numpy as np\n'), ((417, 46, 417, 67), 'numpy.kron', 'np.kron', ({(417, 54, 417, 59): 'mat_b', (417, 61, 417, 66): 'mat_a'}, {}), '(mat_b, mat_a)', True, 'import numpy as np\n'), ((426, 35, 426, 44), 'numpy.eye', 'np.eye', ({(426, 42, 426, 43): '4'}, {}), '(4)', True, 'import numpy as np\n'), ((430, 35, 430, 44), 'numpy.eye', 'np.eye', ({(430, 42, 430, 43): '2'}, {}), '(2)', True, 'import numpy as np\n'), ((434, 42, 434, 51), 'numpy.eye', 'np.eye', ({(434, 49, 434, 50): '4'}, {}), '(4)', True, 'import numpy as np\n'), ((448, 38, 448, 59), 'numpy.kron', 'np.kron', ({(448, 46, 448, 51): 'mat_b', (448, 53, 448, 58): 'mat_a'}, {}), '(mat_b, mat_a)', True, 'import numpy as np\n'), ((451, 42, 451, 63), 'numpy.kron', 'np.kron', ({(451, 50, 451, 55): 'mat_b', (451, 57, 451, 62): 'mat_a'}, {}), '(mat_b, mat_a)', True, 'import numpy as np\n'), ((454, 42, 454, 63), 'numpy.kron', 'np.kron', ({(454, 50, 454, 55): 'mat_b', (454, 57, 454, 62): 'mat_c'}, {}), '(mat_b, mat_c)', True, 'import numpy as np\n'), ((458, 35, 458, 44), 'numpy.eye', 'np.eye', ({(458, 42, 458, 43): '2'}, {}), '(2)', True, 'import numpy as np\n'), ((458, 46, 458, 67), 'numpy.kron', 'np.kron', ({(458, 54, 458, 59): 'mat_b', (458, 61, 458, 66): 'mat_a'}, {}), '(mat_b, mat_a)', True, 'import numpy as np\n'), ((465, 35, 465, 44), 'numpy.eye', 'np.eye', ({(465, 42, 465, 43): '4'}, {}), '(4)', True, 'import numpy as np\n'), ((469, 35, 469, 44), 'numpy.eye', 'np.eye', ({(469, 42, 469, 43): '2'}, {}), '(2)', True, 'import numpy as np\n'), ((473, 42, 473, 51), 'numpy.eye', 'np.eye', ({(473, 49, 473, 50): '4'}, {}), '(4)', True, 'import numpy as np\n'), ((482, 47, 482, 56), 'numpy.eye', 'np.eye', ({(482, 54, 482, 55): '(2)'}, {}), '(2)', True, 'import numpy as np\n'), ((490, 15, 490, 29), 'qiskit.quantum_info.operators.operator.Operator', 'Operator', ({(490, 24, 490, 28): 'mat1'}, {}), '(mat1)', False, 'from qiskit.quantum_info.operators.operator import Operator\n'), ((492, 35, 492, 50), 'qiskit.quantum_info.operators.operator.Operator', 'Operator', ({(492, 44, 492, 49): 'mat21'}, {}), '(mat21)', False, 'from qiskit.quantum_info.operators.operator import Operator\n'), ((495, 15, 495, 29), 'qiskit.quantum_info.operators.operator.Operator', 'Operator', ({(495, 24, 495, 28): 'mat2'}, {}), '(mat2)', False, 'from qiskit.quantum_info.operators.operator import Operator\n'), ((497, 35, 497, 50), 'qiskit.quantum_info.operators.operator.Operator', 'Operator', ({(497, 44, 497, 49): 'mat12'}, {}), '(mat12)', False, 'from qiskit.quantum_info.operators.operator import Operator\n'), ((505, 15, 505, 29), 'qiskit.quantum_info.operators.operator.Operator', 'Operator', ({(505, 24, 505, 28): 'mat2'}, {}), '(mat2)', False, 'from qiskit.quantum_info.operators.operator import Operator\n'), ((507, 35, 507, 50), 'qiskit.quantum_info.operators.operator.Operator', 'Operator', ({(507, 44, 507, 49): 'mat21'}, {}), '(mat21)', False, 'from qiskit.quantum_info.operators.operator import Operator\n'), ((510, 15, 510, 29), 'qiskit.quantum_info.operators.operator.Operator', 'Operator', ({(510, 24, 510, 28): 'mat1'}, {}), '(mat1)', False, 'from qiskit.quantum_info.operators.operator import Operator\n'), ((512, 35, 512, 50), 'qiskit.quantum_info.operators.operator.Operator', 'Operator', ({(512, 44, 512, 49): 'mat12'}, {}), '(mat12)', False, 'from qiskit.quantum_info.operators.operator import Operator\n'), ((560, 26, 560, 48), 'numpy.exp', 'np.exp', ({(560, 33, 560, 47): '1.0j * np.pi / 2'}, {}), '(1.0j * np.pi / 2)', True, 'import numpy as np\n'), ((564, 33, 564, 54), 'qiskit.quantum_info.operators.operator.Operator', 'Operator', ({(564, 42, 564, 53): '(phase * mat)'}, {}), '(phase * mat)', False, 'from qiskit.quantum_info.operators.operator import Operator\n'), ((271, 42, 271, 68), 'numpy.array', 'np.array', ({(271, 51, 271, 67): '[[0, 1], [1, 0]]'}, {}), '([[0, 1], [1, 0]])', True, 'import numpy as np\n'), ((272, 24, 272, 37), 'qiskit.quantum_info.operators.operator.Operator', 'Operator', ({(272, 33, 272, 36): 'X90'}, {}), '(X90)', False, 'from qiskit.quantum_info.operators.operator import Operator\n'), ((274, 25, 274, 51), 'qiskit.quantum_info.operators.operator.Operator', 'Operator', ({(274, 34, 274, 50): '[[1, 0], [0, 0]]'}, {}), '([[1, 0], [0, 0]])', False, 'from qiskit.quantum_info.operators.operator import Operator\n'), ((309, 35, 309, 44), 'numpy.eye', 'np.eye', ({(309, 42, 309, 43): '(2)'}, {}), '(2)', True, 'import numpy as np\n'), ((311, 48, 311, 57), 'numpy.eye', 'np.eye', ({(311, 55, 311, 56): '(2)'}, {}), '(2)', True, 'import numpy as np\n'), ((378, 45, 378, 54), 'numpy.eye', 'np.eye', ({(378, 52, 378, 53): '2'}, {}), '(2)', True, 'import numpy as np\n'), ((387, 56, 387, 65), 'numpy.eye', 'np.eye', ({(387, 63, 387, 64): '2'}, {}), '(2)', True, 'import numpy as np\n'), ((421, 50, 421, 59), 'numpy.eye', 'np.eye', ({(421, 57, 421, 58): '2'}, {}), '(2)', True, 'import numpy as np\n'), ((430, 61, 430, 70), 'numpy.eye', 'np.eye', ({(430, 68, 430, 69): '2'}, {}), '(2)', True, 'import numpy as np\n'), ((461, 50, 461, 59), 'numpy.eye', 'np.eye', ({(461, 57, 461, 58): '2'}, {}), '(2)', True, 'import numpy as np\n'), ((469, 61, 469, 70), 'numpy.eye', 'np.eye', ({(469, 68, 469, 69): '2'}, {}), '(2)', True, 'import numpy as np\n'), ((478, 42, 478, 68), 'numpy.array', 'np.array', ({(478, 51, 478, 67): '[[0, 1], [1, 0]]'}, {}), '([[0, 1], [1, 0]])', True, 'import numpy as np\n'), ((481, 52, 481, 61), 'numpy.eye', 'np.eye', ({(481, 59, 481, 60): '(2)'}, {}), '(2)', True, 'import numpy as np\n')] |
jack-skerrett-bluefruit/Python-ScreenPlay | pages/feature_modal.py | 045486bdf441fa3a7a6cde59e7b7e12a7d53fbed | from selenium.webdriver.common.by import By
class feature_modal:
title_textbox = (By.ID, "feature-name")
description_textbox = (By.ID, "description")
save_button = (By.XPATH, "/html/body/app/div[3]/div[2]/div/div/div/button[1]")
| [] |
CrookedY/AirPollutionBot | liststations.py | ce79037d6dddd1f297fce04a694b49f8b9a1bfad | from urllib2 import Request, urlopen, URLError
import json
request = Request('https://uk-air.defra.gov.uk/sos-ukair/api/v1/stations/')
try:
response = urlopen(request)
data = response.read()
except URLError, e:
print 'error:', e
stations= json.loads (data)
#extract out station 2
stations2 = stations [7]
properties = stations2[u'properties']
#extract ID so can be use in link
ID = properties[u'id']
#print ID
url = ('https://uk-air.defra.gov.uk/sos-ukair/api/v1/stations/'+str(ID))
request2 = Request (url)
try:
response = urlopen(request2)
data2 = response.read()
except URLError, e:
print 'error:', e
#contains station properties data. Need to get to timecourse ID
station_prop = data2
station_prop_json= json.loads (station_prop)
#ID is a key in dictionary so need to extract as a key
a= station_prop_json[u'properties'][u'timeseries'].keys()
i=a[0]
url2 =('https://uk-air.defra.gov.uk/sos-ukair/api/v1/timeseries/'+str(i) +'/getData')
request3 = Request(url2)
try:
response = urlopen(request3)
data3 = response.read()
except URLError, e:
print 'error:', e
print data3
| [] |
kmiller96/PyFinancials | pyfinancials/engine.py | 73a89b0fd3a3d501b8f8c770f73473e9a2d18fdf | def hello_world():
"""Tests the import."""
return "Hello world!"
| [] |
mertyildiran/echo | core/migrations/0002_auto_20180702_1913.py | 805db64e3fa9d31fd3c24390fac2e9bf7c91ad57 | # Generated by Django 2.0.6 on 2018-07-02 19:13
import core.models
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('core', '0001_initial'),
]
operations = [
migrations.RenameField(
model_name='echo',
old_name='owner',
new_name='user',
),
migrations.AlterField(
model_name='echo',
name='audio',
field=models.FileField(upload_to=core.models.echo_directory),
),
migrations.AlterField(
model_name='profile',
name='picture',
field=models.FileField(blank=True, null=True, upload_to=core.models.profile_directory),
),
]
| [((14, 8, 18, 9), 'django.db.migrations.RenameField', 'migrations.RenameField', (), '', False, 'from django.db import migrations, models\n'), ((22, 18, 22, 72), 'django.db.models.FileField', 'models.FileField', (), '', False, 'from django.db import migrations, models\n'), ((27, 18, 27, 98), 'django.db.models.FileField', 'models.FileField', (), '', False, 'from django.db import migrations, models\n')] |
ajdavis/aiohttp | tests/test_helpers.py | d5138978f3e82aa82a2f003b00d38112c58a40c1 | import pytest
from unittest import mock
from aiohttp import helpers
import datetime
def test_parse_mimetype_1():
assert helpers.parse_mimetype('') == ('', '', '', {})
def test_parse_mimetype_2():
assert helpers.parse_mimetype('*') == ('*', '*', '', {})
def test_parse_mimetype_3():
assert (helpers.parse_mimetype('application/json') ==
('application', 'json', '', {}))
def test_parse_mimetype_4():
assert (
helpers.parse_mimetype('application/json; charset=utf-8') ==
('application', 'json', '', {'charset': 'utf-8'}))
def test_parse_mimetype_5():
assert (
helpers.parse_mimetype('''application/json; charset=utf-8;''') ==
('application', 'json', '', {'charset': 'utf-8'}))
def test_parse_mimetype_6():
assert(
helpers.parse_mimetype('ApPlIcAtIoN/JSON;ChaRseT="UTF-8"') ==
('application', 'json', '', {'charset': 'UTF-8'}))
def test_parse_mimetype_7():
assert (
helpers.parse_mimetype('application/rss+xml') ==
('application', 'rss', 'xml', {}))
def test_parse_mimetype_8():
assert (
helpers.parse_mimetype('text/plain;base64') ==
('text', 'plain', '', {'base64': ''}))
def test_basic_auth1():
# missing password here
with pytest.raises(ValueError):
helpers.BasicAuth(None)
def test_basic_auth2():
with pytest.raises(ValueError):
helpers.BasicAuth('nkim', None)
def test_basic_auth3():
auth = helpers.BasicAuth('nkim')
assert auth.login == 'nkim'
assert auth.password == ''
def test_basic_auth4():
auth = helpers.BasicAuth('nkim', 'pwd')
assert auth.login == 'nkim'
assert auth.password == 'pwd'
assert auth.encode() == 'Basic bmtpbTpwd2Q='
def test_invalid_formdata_params():
with pytest.raises(TypeError):
helpers.FormData('asdasf')
def test_invalid_formdata_params2():
with pytest.raises(TypeError):
helpers.FormData('as') # 2-char str is not allowed
def test_invalid_formdata_content_type():
form = helpers.FormData()
invalid_vals = [0, 0.1, {}, [], b'foo']
for invalid_val in invalid_vals:
with pytest.raises(TypeError):
form.add_field('foo', 'bar', content_type=invalid_val)
def test_invalid_formdata_filename():
form = helpers.FormData()
invalid_vals = [0, 0.1, {}, [], b'foo']
for invalid_val in invalid_vals:
with pytest.raises(TypeError):
form.add_field('foo', 'bar', filename=invalid_val)
def test_invalid_formdata_content_transfer_encoding():
form = helpers.FormData()
invalid_vals = [0, 0.1, {}, [], b'foo']
for invalid_val in invalid_vals:
with pytest.raises(TypeError):
form.add_field('foo',
'bar',
content_transfer_encoding=invalid_val)
def test_access_logger_format():
log_format = '%T {%{SPAM}e} "%{ETag}o" %X {X} %%P'
mock_logger = mock.Mock()
access_logger = helpers.AccessLogger(mock_logger, log_format)
expected = '%s {%s} "%s" %%X {X} %%%s'
assert expected == access_logger._log_format
@mock.patch("aiohttp.helpers.datetime")
@mock.patch("os.getpid")
def test_access_logger_atoms(mock_getpid, mock_datetime):
utcnow = datetime.datetime(1843, 1, 1, 0, 0)
mock_datetime.datetime.utcnow.return_value = utcnow
mock_getpid.return_value = 42
log_format = '%a %t %P %l %u %r %s %b %O %T %Tf %D'
mock_logger = mock.Mock()
access_logger = helpers.AccessLogger(mock_logger, log_format)
message = mock.Mock(headers={}, method="GET", path="/path", version=(1, 1))
environ = {}
response = mock.Mock(headers={}, output_length=123,
body_length=42, status=200)
transport = mock.Mock()
transport.get_extra_info.return_value = ("127.0.0.2", 1234)
access_logger.log(message, environ, response, transport, 3.1415926)
assert not mock_logger.exception.called
expected = ('127.0.0.2 [01/Jan/1843:00:00:00 +0000] <42> - - '
'GET /path HTTP/1.1 200 42 123 3 3.141593 3141593')
mock_logger.info.assert_called_with(expected)
def test_access_logger_dicts():
log_format = '%{User-Agent}i %{Content-Length}o %{SPAM}e %{None}i'
mock_logger = mock.Mock()
access_logger = helpers.AccessLogger(mock_logger, log_format)
message = mock.Mock(headers={"USER-AGENT": "Mock/1.0"}, version=(1, 1))
environ = {"SPAM": "EGGS"}
response = mock.Mock(headers={"CONTENT-LENGTH": 123})
transport = mock.Mock()
transport.get_extra_info.return_value = ("127.0.0.2", 1234)
access_logger.log(message, environ, response, transport, 0.0)
assert not mock_logger.error.called
expected = 'Mock/1.0 123 EGGS -'
mock_logger.info.assert_called_with(expected)
def test_logger_no_message_and_environ():
mock_logger = mock.Mock()
mock_transport = mock.Mock()
mock_transport.get_extra_info.return_value = ("127.0.0.3", 0)
access_logger = helpers.AccessLogger(mock_logger, "%r %{FOOBAR}e")
access_logger.log(None, None, None, mock_transport, 0.0)
mock_logger.info.assert_called_with("- -")
def test_reify():
class A:
@helpers.reify
def prop(self):
return 1
a = A()
assert 1 == a.prop
def test_reify_class():
class A:
@helpers.reify
def prop(self):
"""Docstring."""
return 1
assert isinstance(A.prop, helpers.reify)
assert 'Docstring.' == A.prop.__doc__
def test_reify_assignment():
class A:
@helpers.reify
def prop(self):
return 1
a = A()
with pytest.raises(AttributeError):
a.prop = 123
def test_requote_uri_with_unquoted_percents():
# Ensure we handle unquoted percent signs in redirects.
bad_uri = 'http://example.com/fiz?buz=%ppicture'
quoted = 'http://example.com/fiz?buz=%25ppicture'
assert quoted == helpers.requote_uri(bad_uri)
def test_requote_uri_properly_requotes():
# Ensure requoting doesn't break expectations.
quoted = 'http://example.com/fiz?buz=%25ppicture'
assert quoted == helpers.requote_uri(quoted)
| [((118, 1, 118, 39), 'unittest.mock.patch', 'mock.patch', ({(118, 12, 118, 38): '"""aiohttp.helpers.datetime"""'}, {}), "('aiohttp.helpers.datetime')", False, 'from unittest import mock\n'), ((119, 1, 119, 24), 'unittest.mock.patch', 'mock.patch', ({(119, 12, 119, 23): '"""os.getpid"""'}, {}), "('os.getpid')", False, 'from unittest import mock\n'), ((62, 11, 62, 36), 'aiohttp.helpers.BasicAuth', 'helpers.BasicAuth', ({(62, 29, 62, 35): '"""nkim"""'}, {}), "('nkim')", False, 'from aiohttp import helpers\n'), ((68, 11, 68, 43), 'aiohttp.helpers.BasicAuth', 'helpers.BasicAuth', ({(68, 29, 68, 35): '"""nkim"""', (68, 37, 68, 42): '"""pwd"""'}, {}), "('nkim', 'pwd')", False, 'from aiohttp import helpers\n'), ((85, 11, 85, 29), 'aiohttp.helpers.FormData', 'helpers.FormData', ({}, {}), '()', False, 'from aiohttp import helpers\n'), ((93, 11, 93, 29), 'aiohttp.helpers.FormData', 'helpers.FormData', ({}, {}), '()', False, 'from aiohttp import helpers\n'), ((101, 11, 101, 29), 'aiohttp.helpers.FormData', 'helpers.FormData', ({}, {}), '()', False, 'from aiohttp import helpers\n'), ((112, 18, 112, 29), 'unittest.mock.Mock', 'mock.Mock', ({}, {}), '()', False, 'from unittest import mock\n'), ((113, 20, 113, 65), 'aiohttp.helpers.AccessLogger', 'helpers.AccessLogger', ({(113, 41, 113, 52): 'mock_logger', (113, 54, 113, 64): 'log_format'}, {}), '(mock_logger, log_format)', False, 'from aiohttp import helpers\n'), ((121, 13, 121, 48), 'datetime.datetime', 'datetime.datetime', ({(121, 31, 121, 35): '1843', (121, 37, 121, 38): '1', (121, 40, 121, 41): '1', (121, 43, 121, 44): '0', (121, 46, 121, 47): '0'}, {}), '(1843, 1, 1, 0, 0)', False, 'import datetime\n'), ((125, 18, 125, 29), 'unittest.mock.Mock', 'mock.Mock', ({}, {}), '()', False, 'from unittest import mock\n'), ((126, 20, 126, 65), 'aiohttp.helpers.AccessLogger', 'helpers.AccessLogger', ({(126, 41, 126, 52): 'mock_logger', (126, 54, 126, 64): 'log_format'}, {}), '(mock_logger, log_format)', False, 'from aiohttp import helpers\n'), ((127, 14, 127, 79), 'unittest.mock.Mock', 'mock.Mock', (), '', False, 'from unittest import mock\n'), ((129, 15, 130, 52), 'unittest.mock.Mock', 'mock.Mock', (), '', False, 'from unittest import mock\n'), ((131, 16, 131, 27), 'unittest.mock.Mock', 'mock.Mock', ({}, {}), '()', False, 'from unittest import mock\n'), ((142, 18, 142, 29), 'unittest.mock.Mock', 'mock.Mock', ({}, {}), '()', False, 'from unittest import mock\n'), ((143, 20, 143, 65), 'aiohttp.helpers.AccessLogger', 'helpers.AccessLogger', ({(143, 41, 143, 52): 'mock_logger', (143, 54, 143, 64): 'log_format'}, {}), '(mock_logger, log_format)', False, 'from aiohttp import helpers\n'), ((144, 14, 144, 75), 'unittest.mock.Mock', 'mock.Mock', (), '', False, 'from unittest import mock\n'), ((146, 15, 146, 57), 'unittest.mock.Mock', 'mock.Mock', (), '', False, 'from unittest import mock\n'), ((147, 16, 147, 27), 'unittest.mock.Mock', 'mock.Mock', ({}, {}), '()', False, 'from unittest import mock\n'), ((156, 18, 156, 29), 'unittest.mock.Mock', 'mock.Mock', ({}, {}), '()', False, 'from unittest import mock\n'), ((157, 21, 157, 32), 'unittest.mock.Mock', 'mock.Mock', ({}, {}), '()', False, 'from unittest import mock\n'), ((159, 20, 159, 70), 'aiohttp.helpers.AccessLogger', 'helpers.AccessLogger', ({(159, 41, 159, 52): 'mock_logger', (159, 54, 159, 69): '"""%r %{FOOBAR}e"""'}, {}), "(mock_logger, '%r %{FOOBAR}e')", False, 'from aiohttp import helpers\n'), ((8, 11, 8, 37), 'aiohttp.helpers.parse_mimetype', 'helpers.parse_mimetype', ({(8, 34, 8, 36): '""""""'}, {}), "('')", False, 'from aiohttp import helpers\n'), ((12, 11, 12, 38), 'aiohttp.helpers.parse_mimetype', 'helpers.parse_mimetype', ({(12, 34, 12, 37): '"""*"""'}, {}), "('*')", False, 'from aiohttp import helpers\n'), ((16, 12, 16, 54), 'aiohttp.helpers.parse_mimetype', 'helpers.parse_mimetype', ({(16, 35, 16, 53): '"""application/json"""'}, {}), "('application/json')", False, 'from aiohttp import helpers\n'), ((22, 8, 22, 66), 'aiohttp.helpers.parse_mimetype', 'helpers.parse_mimetype', ({(22, 31, 22, 65): '"""application/json; charset=utf-8"""'}, {}), "('application/json; charset=utf-8')", False, 'from aiohttp import helpers\n'), ((28, 8, 28, 70), 'aiohttp.helpers.parse_mimetype', 'helpers.parse_mimetype', ({(28, 31, 28, 69): '"""application/json; charset=utf-8;"""'}, {}), "('application/json; charset=utf-8;')", False, 'from aiohttp import helpers\n'), ((34, 8, 34, 66), 'aiohttp.helpers.parse_mimetype', 'helpers.parse_mimetype', ({(34, 31, 34, 65): '"""ApPlIcAtIoN/JSON;ChaRseT="UTF-8\\""""'}, {}), '(\'ApPlIcAtIoN/JSON;ChaRseT="UTF-8"\')', False, 'from aiohttp import helpers\n'), ((40, 8, 40, 53), 'aiohttp.helpers.parse_mimetype', 'helpers.parse_mimetype', ({(40, 31, 40, 52): '"""application/rss+xml"""'}, {}), "('application/rss+xml')", False, 'from aiohttp import helpers\n'), ((46, 8, 46, 51), 'aiohttp.helpers.parse_mimetype', 'helpers.parse_mimetype', ({(46, 31, 46, 50): '"""text/plain;base64"""'}, {}), "('text/plain;base64')", False, 'from aiohttp import helpers\n'), ((52, 9, 52, 34), 'pytest.raises', 'pytest.raises', ({(52, 23, 52, 33): 'ValueError'}, {}), '(ValueError)', False, 'import pytest\n'), ((53, 8, 53, 31), 'aiohttp.helpers.BasicAuth', 'helpers.BasicAuth', ({(53, 26, 53, 30): 'None'}, {}), '(None)', False, 'from aiohttp import helpers\n'), ((57, 9, 57, 34), 'pytest.raises', 'pytest.raises', ({(57, 23, 57, 33): 'ValueError'}, {}), '(ValueError)', False, 'import pytest\n'), ((58, 8, 58, 39), 'aiohttp.helpers.BasicAuth', 'helpers.BasicAuth', ({(58, 26, 58, 32): '"""nkim"""', (58, 34, 58, 38): 'None'}, {}), "('nkim', None)", False, 'from aiohttp import helpers\n'), ((75, 9, 75, 33), 'pytest.raises', 'pytest.raises', ({(75, 23, 75, 32): 'TypeError'}, {}), '(TypeError)', False, 'import pytest\n'), ((76, 8, 76, 34), 'aiohttp.helpers.FormData', 'helpers.FormData', ({(76, 25, 76, 33): '"""asdasf"""'}, {}), "('asdasf')", False, 'from aiohttp import helpers\n'), ((80, 9, 80, 33), 'pytest.raises', 'pytest.raises', ({(80, 23, 80, 32): 'TypeError'}, {}), '(TypeError)', False, 'import pytest\n'), ((81, 8, 81, 30), 'aiohttp.helpers.FormData', 'helpers.FormData', ({(81, 25, 81, 29): '"""as"""'}, {}), "('as')", False, 'from aiohttp import helpers\n'), ((193, 9, 193, 38), 'pytest.raises', 'pytest.raises', ({(193, 23, 193, 37): 'AttributeError'}, {}), '(AttributeError)', False, 'import pytest\n'), ((201, 21, 201, 49), 'aiohttp.helpers.requote_uri', 'helpers.requote_uri', ({(201, 41, 201, 48): 'bad_uri'}, {}), '(bad_uri)', False, 'from aiohttp import helpers\n'), ((207, 21, 207, 48), 'aiohttp.helpers.requote_uri', 'helpers.requote_uri', ({(207, 41, 207, 47): 'quoted'}, {}), '(quoted)', False, 'from aiohttp import helpers\n'), ((88, 13, 88, 37), 'pytest.raises', 'pytest.raises', ({(88, 27, 88, 36): 'TypeError'}, {}), '(TypeError)', False, 'import pytest\n'), ((96, 13, 96, 37), 'pytest.raises', 'pytest.raises', ({(96, 27, 96, 36): 'TypeError'}, {}), '(TypeError)', False, 'import pytest\n'), ((104, 13, 104, 37), 'pytest.raises', 'pytest.raises', ({(104, 27, 104, 36): 'TypeError'}, {}), '(TypeError)', False, 'import pytest\n')] |
truls/faas-profiler | GenConfigs.py | d54ca0d9926f38c693f616ba4d08414aea823f51 | from os.path import join
FAAS_ROOT="/lhome/trulsas/faas-profiler"
WORKLOAD_SPECS=join(FAAS_ROOT, "specs", "workloads")
#FAAS_ROOT="/home/truls/uni/phd/faas-profiler"
WSK_PATH = "wsk"
OPENWHISK_PATH = "/lhome/trulsas/openwhisk"
#: Location of output data
DATA_DIR = join(FAAS_ROOT, "..", "profiler_results")
SYSTEM_CPU_SET = "0,2,4,6,8,10,12,14,16,18,20,22,24,26,28,30"
| [((4, 15, 4, 52), 'os.path.join', 'join', ({(4, 20, 4, 29): 'FAAS_ROOT', (4, 31, 4, 38): '"""specs"""', (4, 40, 4, 51): '"""workloads"""'}, {}), "(FAAS_ROOT, 'specs', 'workloads')", False, 'from os.path import join\n'), ((10, 11, 10, 52), 'os.path.join', 'join', ({(10, 16, 10, 25): 'FAAS_ROOT', (10, 27, 10, 31): '""".."""', (10, 33, 10, 51): '"""profiler_results"""'}, {}), "(FAAS_ROOT, '..', 'profiler_results')", False, 'from os.path import join\n')] |
LuisPereda/Learning_Python | Chapter09/calc.py | e89e69346c5584be10d991010f39b59329793ba5 |
def sum1(a,b):
try:
c = a+b
return c
except :
print "Error in sum1 function"
def divide(a,b):
try:
c = a/b
return c
except :
print "Error in divide function"
print divide(10,0)
print sum1(10,0) | [] |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.