content
stringlengths 5
1.05M
|
---|
#!/usr/bin/env python
"""
Copyright (C) 2014 Ivan Gregor
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
Note that we could have written some parts of this code in a nicer way,
but didn't have time. Be careful when reusing the source code.
"""
import sys
import os
import re
import subprocess
import glob
from algbioi.com.config import Config
from algbioi.com.csv import forEachLine
from algbioi.com.csv import OutFileBuffer
from algbioi.com.csv import getMapping
from algbioi.com.fasta import fastaFileToDict
class MGCluster():
""" Main class """
def __init__(self, config, mgWorkingDir, s16Prefix, sequences, taxonomy, sampleName):
self._config = config
self._mgWorkingDir = mgWorkingDir
self._taxonomicRanks = config.get('taxonomicRanks').split(',')
self._minBpToModel = int(config.get('minBpToModel'))
self._clustDir = os.path.normpath(os.path.join(mgWorkingDir,'clust'))
self._s16Prefix = s16Prefix
self._sequences = sequences
self._mgToDM = None
self._mgToCluster = None
self._taxonomy = taxonomy
self._sampleName = sampleName
self._mgList = None
self._mgToMaxThreshold = None
self._seqIdToTaxPathDict = None
self._seqIdToWeight = None
self._initDone = False
if not os.path.exists(self._clustDir):
try:
os.mkdir(self._clustDir)
except OSError:
print 'Can`t create directory:', self._clust
raise
#---------------------------------------------------------------------------------
def _init(self, align=True, dm=True, cluster=True):
"""
Init data, compute: alignment, distance matrix, clusters.
"""
if self._initDone:
return
self._initDone = True
fastaPathList = [] # fasta files containing regions that correspond to particular marker genes
self._mgList = [] # list of names of marker genes
mgToFastaPath = dict([]) # marker gene name -> fasta file path
#collect regions from Amphora mg
for fastaFile in glob.glob(os.path.join(os.path.normpath(self._mgWorkingDir),'*.gff')):
fastaPathList.append(fastaFile)
for path in fastaPathList:
name = re.sub('([^\.]+)\..*$', r'\1' , os.path.basename(path))
mg = re.sub(r'([^_]+)_dna', r'\1',name)
dir = os.path.dirname(path)
self._mgList.append(mg)
mgToFastaPath[mg] = path
#add 16S
s16List = ['5S_rRNA', '16S_rRNA', '23S_rRNA']
for mg in s16List:
mgToFastaPath[mg] = str(self._s16Prefix + '.' + mg + '.fna')
self._mgList.append(mg)
#For each marker gene create filtered fasta file that contains for each mg and sequence at most one region.
mgToFilteredFastaPath = dict([])
mgToSeqNameToTaxPathDict = dict([]) #mg -> seqName (~region name) -> pred
for mg in self._mgList:
mgToSeqNameToTaxPathDict[mg] = dict([])
for seq in self._sequences.sequences:
id = str(str(seq.scaffold.id) + '_' + str(seq.id))
for mg,tag,pred in zip(seq.getCandidateTaxPathSourceList(), seq.getCandidateTaxPathTagList(),
seq.getCandidateTaxPathDictList()):
mgToSeqNameToTaxPathDict[mg][str(id + '_' + tag)] = pred
#for each marker gene: choose only one sequence region for each mg and sequence
#all sequences are predicted at least at superkingdom
for mg in self._mgList:
seqNameToPred = mgToSeqNameToTaxPathDict[mg] #sequence region predictions for this mg
seqNameToSeq = fastaFileToDict(mgToFastaPath[mg]) #read the fasta file
outPath = os.path.normpath(os.path.join(self._clustDir, str(mg + '.filter.fna')))
mgToFilteredFastaPath[mg] = outPath
out = OutFileBuffer(outPath)
seqBaseToSeqName = dict([]) # sequence base (scaffId_seqId) -> region name
for seqName in seqNameToSeq:
seqBase = re.sub(r'^([0-9]+_[0-9]+)[^0-9].*',r'\1', seqName)
if seqBase not in seqBaseToSeqName:
seqBaseToSeqName[seqBase] = []
seqBaseToSeqName[seqBase].append(seqName)
for seqBase in seqBaseToSeqName:
seqId = int(re.sub(r'^[0-9]+_([0-9]+)',r'\1', seqBase))
seqBaseTaxPathDict = self._sequences.getSequence(seqId).getTaxonomyPath()
list = seqBaseToSeqName[seqBase]
candidateSeq = [] # sequence region is predicted at least at rank superkingdom
for seqName in list:
if seqName not in seqNameToPred:
taxPathDict = None
else:
taxPathDict = seqNameToPred[seqName]
if taxPathDict != None:
candidateSeq.append(seqName)
if len(candidateSeq) == 0:
continue
candidateSeq2 = [] # sequence regions predicted at least at the same rank as the whole sequence
for seqName in candidateSeq:
taxPathDict = seqNameToPred[seqName]
if ((seqBaseTaxPathDict == None)
or (len(taxPathDict) >= len(seqBaseTaxPathDict))): #predict at least at the same level
candidateSeq2.append(seqName)
if len(candidateSeq2) > 0: #take the longest sequence
sMax = candidateSeq2[0]
for s in candidateSeq2[1:]:
if len(seqNameToSeq[s]) > len(seqNameToSeq[sMax]):
sMax = s
else: #all sequence regions are predicted higher than the sequence
sMax = candidateSeq[0] #sequence region with the most specific prediction
for s in candidateSeq[1:]:
taxPathDictMax = seqNameToPred[sMax]
taxPathDictS = seqNameToPred[s]
if taxPathDictS == None:
continue
if taxPathDictMax == None:
sMax = s
continue
if len(taxPathDictMax) < len(taxPathDictS):
sMax = s
candidateSeq3 = [] #get all sequence regions with the most specific prediction
taxPathDictMax = seqNameToPred[sMax]
for s in candidateSeq:
taxPathDictS = seqNameToPred[s]
if taxPathDictMax == None:
candidateSeq3.append(s)
elif len(taxPathDictS) == len(taxPathDictMax):
candidateSeq3.append(s)
sMax = candidateSeq3[0]
for s in candidateSeq3[1:]: #take the longest sequence
if len(seqNameToSeq[sMax]) < len(seqNameToSeq[s]):
sMax = s
out.writeText(str('>' + str(sMax) + '\n' + str(seqNameToSeq[sMax]) + '\n'))
out.close()
mgToAlignPath = dict([])
for mg in self._mgList:
mgToAlignPath[mg] = os.path.normpath(os.path.join(self._clustDir, str(mg + '.align.fna')))
#build alignment
if align:
for mg in self._mgList:
alignCmd = str(self._config.get('aligner') + ' -in ' + mgToFilteredFastaPath[mg]
+ ' -out ' + mgToAlignPath[mg] + ' -quiet')
assert os.name == 'posix'
predictProc = subprocess.Popen(alignCmd, cwd=self._mgWorkingDir, shell=True, bufsize=-1) #stdout=subprocess.STDOUT, stderr=subprocess.STDOUT)
predictProc.wait()
print 'Muscle return code for', mg, ':', predictProc.returncode
if predictProc.returncode != 0:
sys.stderr.write(str(alignCmd + ' \n'))
#compute DM
if dm:
for mg in self._mgList:
mothur = os.path.join(os.path.normpath(self._configRRNA16S.get('mothurInstallDir')), 'mothur')
mothurCmd = str('time ' + mothur + ' "#dist.seqs(fasta=' + mgToAlignPath[mg]
+ ', processors=2, countends=F, calc=nogaps, cutoff=0.3, output=lt)"')
assert os.name == 'posix'
mothurProc = subprocess.Popen(mothurCmd, shell=True, bufsize=-1, cwd=self._mgWorkingDir)
mothurProc.wait()
print 'Mothur return code dist:', mg, mothurProc.returncode
#distFilePath = os.path.join(os.path.dirname(mgToAlignPath[mg]), str(mg + '.align.phylip.dist'))
#self._mgToDM[mg] = forEachLine(distFilePath, DM())
#self._mgToDM[mg].printDM()
#cluster
if cluster:
for mg in self._mgList:
distFilePath = os.path.join(os.path.dirname(mgToAlignPath[mg]), str(mg + '.align.phylip.dist'))
mothur = os.path.join(os.path.normpath(self._configRRNA16S.get('mothurInstallDir')), 'mothur')
mothurCmd = str('time ' + mothur + ' "#cluster(phylip=' + distFilePath
+ ', method=furthest, hard=t, precision=1000)"')
assert os.name == 'posix'
mothurProc = subprocess.Popen(mothurCmd, shell=True, bufsize=-1, cwd=self._mgWorkingDir)
mothurProc.wait()
print 'Mothur return code cluster:', mg, mothurProc.returncode
#read DM and clusters
#sequence predictions
self._seqIdToTaxPathDict = dict([])
self._seqIdToWeight = dict([])
for seq in self._sequences.sequences:
id = int(seq.id)
self._seqIdToTaxPathDict[id] = seq.getTaxonomyPath()
self._seqIdToWeight[id] = seq.getTaxonomyPathWeight()
#similarity thresholds
thresholds = self._configMG.get('mgSimilarityThresholds')
self._mgToMaxThreshold = dict([])
tmpDict = getMapping(self._configMG.get('mgSimilarityThresholds'), 0, 1, sep='\t', comment = '#')
for k in tmpDict:
self._mgToMaxThreshold[k] = float(tmpDict[k][0])
self._mgToDM = dict([])
self._mgToCluster = dict([])
for mg in self._mgList:
file = os.path.join(os.path.dirname(mgToAlignPath[mg]), str(mg + '.align.phylip.dist'))
self._mgToDM[mg] = forEachLine(file, DM())
file = os.path.join(os.path.dirname(mgToAlignPath[mg]), str(mg + '.align.phylip.fn.list'))
self._mgToCluster[mg] = forEachLine(file, MCluster(self._seqIdToTaxPathDict, self._mgToMaxThreshold[mg]))
#---------------------------------------------------------------------------------
def refineSpecificPred(self):
self._init(align=False, dm=False, cluster=False)
seqToCandidatePred = dict([])
for mg in self._mgList:
mCluster = self._mgToCluster[mg]
tCluster = mCluster.getLastNoConflictClustering()
threshold = tCluster.getThreshold()
if threshold > self._mgToMaxThreshold[mg]: #just in the case the first clustering was already conflicting
continue
#seqNameToGroupId = tCluster.getSeqNameToGroupId()
groupIdToSeqNameSet = tCluster.getGroupIdToSeqNameSet()
for groupId in groupIdToSeqNameSet:
group = groupIdToSeqNameSet[groupId]
if len(group) < 2:
continue
seqNameMaxTaxPathDict = None #the lowest prediction within the group (all lie on the common path to the root)
seqNameToTaxPathDict = dict([])
#weightList = []
for seqName in group:
seqId = int(re.sub(r'^[0-9]+_([0-9]+)$', r'\1', seqName))
taxPathDict = self._seqIdToTaxPathDict[seqId]
#weightList.append(self._seqIdToWeight[seqId])
seqNameToTaxPathDict[seqName] = taxPathDict
if (seqNameMaxTaxPathDict == None) or (len(seqNameToTaxPathDict[seqNameMaxTaxPathDict]) < len(taxPathDict)):
seqNameMaxTaxPathDict = seqName
maxTaxPathDict = seqNameToTaxPathDict[seqNameMaxTaxPathDict]
weightList = []
for seqName in group:
if len(seqNameToTaxPathDict[seqName]) >= len(maxTaxPathDict):
weightList.append(self._seqIdToWeight[int(re.sub(r'^[0-9]+_([0-9]+)$', r'\1', seqName))])
for seqName in group:
if seqName == seqNameMaxTaxPathDict:
continue
if len(seqNameToTaxPathDict[seqName]) < len(maxTaxPathDict):
if seqName not in seqToCandidatePred:
seqToCandidatePred[seqName] = []
seqToCandidatePred[seqName].append((mg, maxTaxPathDict, min(weightList)))
#resolve candidate predictions
for seqName in seqToCandidatePred:
list = seqToCandidatePred[seqName]
if len(list) == 1:
taxPathDict = self._taxonomy.replicateTaxPathDict(list[0][1])
weight = list[0][2]
else:
#get lowest common ancestor
taxPathDictList = []
weightList = []
for t in list:
taxPathDictList.append(t[1])
weightList.append(t[2])
taxPathDict = self._taxonomy.getLongestCommonPathFromMultipleAssignments(taxPathDictList)
weight = min(weightList)
seqId = int(re.sub(r'^[0-9]+_([0-9]+)$', r'\1', seqName))
scaffoldId = int(re.sub(r'^([0-9]+)_[0-9]+$', r'\1', seqName))
currentTaxPathDict = self._sequences.getSequence(seqId).getTaxonomyPath()
if (taxPathDict != None) and (len(currentTaxPathDict) < len(taxPathDict)):
print 'Spec. pred override:', seqId, currentTaxPathDict, '->', taxPathDict
self._sequences.setTaxonomyPathOverride(seqId, scaffoldId, taxPathDict, weight)
#----------------------------------------------------------------
def reconstructOTU(self, mgToConsider = ['16S_rRNA', '23S_rRNA', 'rpoB']):
self._init(align=False, dm=False, cluster=False)
#collect all ncbids
ncbids = set([])
for seq in self._sequences.sequences:
taxPathDict = seq.getTaxonomyPath()
if taxPathDict != None:
for t in taxPathDict:
ncbids.add(taxPathDict[t].ncbid)
#remove elements that correspond to the superkingdom
ncbids.discard(1) #Root
ncbids.discard(2) #Bacteria
ncbids.discard(2157) #Archaea
ncbids.discard(2759) #Eukaryota
#for each ncbid collect a list of sequences their lowest assignment is to this ncbid
ncbidToSeqList = dict([])
innerNcbidSet = set([]) #set of ncbids that are not leafs
speciesNcbidSet = set([])
for seq in self._sequences.sequences:
taxPathDict = seq.getTaxonomyPath()
if taxPathDict == None:
continue
ncbid = taxPathDict[self._taxonomicRanks[len(taxPathDict) - 1]]
for rankIdx in range(len(taxPathDict)-1):
tmp = taxPathDict[self._taxonomicRanks[rankIdx]].ncbid
innerNcbidSet.add(tmp)
if self._taxonomicRanks[rankIdx] == 'species':
speciesNcbidSet.append(tmp)
if ncbid not in ncbidToSeqList:
ncbidToSeqList[ncbid] = []
ncbidToSeqList[ncbid].append(str(str(seq.scaffold.id) + '_' + str(seq.id)))
#try to resolve subclades of all ncbids
for ncbid in ncbids:
#skip ncbids that are at the rank species !!!
if ncbid in speciesNcbidSet:
continue
if ncbid not in ncbidToSeqList:
continue
seqNameList = ncbidToSeqList[ncbid]
for mg in mgToConsider:
tCluster = self._mgToCluster[mg].getLastNoConflictClustering()
seqNameToGroupId = tCluster.getSeqNameToGroupId()
groupIdToSeqNameSet = tCluster.getGroupIdToSeqNameSet()
groupIdToSeqNameMgList = dict([])
for seqName in seqNameList:
if seqName in seqNameToGroupId:
groupId = seqNameToGroupId[seqName]
if groupId not in groupIdToSeqNameMgList:
groupIdToSeqNameMgList[groupId] = []
groupIdToSeqNameMgList[groupId].append(seqName)
groupIdToBp = dict([]) #for each group store its size
for groupId in groupIdToSeqNameMgList:
seqNameList = groupIdToSeqNameMgList[groupId]
bp = 0
for seqName in seqNameList:
seqId = int(re.sub(r'^[0-9]+_([0-9]+)$', r'\1', seqName))
bp += self._sequences.getSequence(seqId).seqBp
groupIdToBp[groupId] = bp
candidateGroupIdOtuSet = set([])
for groupId in groupIdToBp:
if groupIdToBp[groupId] >= self._minBpToModel:
candidateGroupIdOtuSet.add(groupId)
if ((ncbid in innerNcbidSet) and (len(candidateGroupIdOtuSet) >= 1) or
(ncbid not in innerNcbidSet) and (len(candidateGroupIdOtuSet) >= 2)):
#create new OTUs
newNcbid = self._taxonomy.createNewOtuDBEntry(ncbid, self._sampleName, rank='species')
taxPathDict = self._taxonomy.getPathToRoot(newNcbid)
for groupId in candidateGroupIdOtuSet:
seqNameList = groupIdToSeqNameMgList[groupId]
weightList = []
for seqName in seqNameList:
seqId = int(re.sub(r'^[0-9]+_([0-9]+)$', r'\1', seqName))
weightList.append(self._sequences.getSequence(seqId).getTaxonomyPathWeight())
for seqName in seqNameList:
seqId = int(re.sub(r'^[0-9]+_([0-9]+)$', r'\1', seqName))
scaffoldId = int(re.sub(r'^([0-9]+)_[0-9]+$', r'\1', seqName))
weight = None
print 'NewOtu:', ncbid, seqId, self._sequences.getSequence(seqId).getTaxonomyPath(), '->', taxPathDict
self._sequences.setTaxonomyPathOverride(seqId, scaffoldId,
self._taxonomy.replicateTaxPathDict(taxPathDict), min(weightList))
break # don`t try to infer OTUs for
#get all clusters at this node
#compute the size of all clusters at this node
#try to extend the clusters using other marker genes
#store suggested OTUs
#get sequences that are predicted to this ncbid but not lower
#---------------------------------------------------------------------------------
def reconstructOTU_OLD(self, lowestRank='genus'):
self._init(align=False, dm=False, cluster=False)
allowedRanks=['root','superkingdom','phylum','class','order','family','genus','species']
#for each clade, write which OTUs could be reconstructed
#collect clades
ncbids = set([])
for seq in self._sequences.sequences:
taxPathDict = seq.getCandidateTaxPathDictList()
for t in taxPathDict:
print taxPathDict, t
ncbids.add(taxPathDict[t].ncbid)
seqNodeListD = dict([])
seqLowerListD = dict([])
seqUpperListD = dict([])
for ncbid in ncbids:
#if ncbid != 171549:
# continue
seqNodeListD[ncbid] = set([])
seqLowerListD[ncbid] = set([])
seqUpperListD[ncbid] = set([])
for seq in self._sequences.sequences:
path = seq.getTaxonomyPath()
if path == None or len(path) <= 1:
continue
found = False
for rank in allowedRanks:
if path[rank].node == ncbid:
found=True
seqNodeListD[ncbid].add(seq.id)
elif not found:
seqUpperListD[ncbid].add(seq.id)
else:
seqLowerListD[ncbid].add(seq.id)
for ncbid in ncbids:
if ncbid != 171549: #!remove then!
continue
s16Clust = self._mgToCluster['16S_rRNA']
lastThreshold = 0.0
for threshold in s16Clust.thresholdsList:
if threshold < 0.3:
lastThreshold = threshold
continue
if threshold > 0.5:
lastThreshold = threshold
cluster = self.thresholdToTCluster[threshold]
#look at each cluster
relClustIdSet = set([])
for i in range(cluster.clusterIdCount):
#is some sequence from the cluster at my node? filter out clusters
for seqId in seqNodeListD[ncbid]:
if seqId in cluster.clusterIdToSeqSet[i]:
relClustIdSet.add(i)
break
#inspect relevant clusters "relClustIdSet"
filteredClustId = set([])
for i in relClustIdSet:
seqs = cluster.clusterIdToSeqSet[i]
relSeq = 0
wrongSeq = 0
for s in seqs:
if s in seqNodeListD[ncbid] or s in seqUpperListD[ncbid] or s in seqLowerListD[ncbid]:
relSeq += 1
else:
wrongSeq += 1
if wrongSeq == 0: # corrected wrongSet
filteredClustId.add(i)
#have clusters to consider:
bp = 0
for i in filteredClustId:
s = cluster.clusterIdToSeqSet[i]
sAtNode = set([])
for seq in s:
if seq in seqNodeListD[ncbid]:
sAtNode.add(seq) # take just sequences that were at the node
bp += self._sequences.getSequence(seq).seqBp
print ncbid, 'clust', sAtNode, bp
if bp > 100000:
#create new OTU and assign all sequences in the cluster to it
newNcbid = self._taxonomy.createNewOtuDBEntry(ncbid, self._sampleName, rank='species')
for seq in sAtNode:
taxPathDictOTU = self._taxonomy.getPathToRoot(newNcbid)
self._sequences.setTaxonomyPathOverride(seq.id, seq.scaffold.id, taxPathDictOTU, 100.0)
break
#---------------------------------------------------------------------------------
class DM():
"""
Phylip Distance matrix for a marker gene (and line parser).
"""
def __init__(self):
self._counter = -1
self._seqNum = None
self._seqNames = []
self._matrix = []
self._nameToIndex = dict([])
def parse(self, line):
if self._counter == -1:
self._seqNum = int(line)
else:
tokens = line.split()
name = re.sub(r'^([0-9]+_[0-9]+)_.*',r'\1', tokens[0])
self._seqNames.append(name)
self._nameToIndex[name] = self._counter
list = []
for e in tokens[1:]:
list.append(float(e))
self._matrix.append(list)
self._counter += 1
def printDM(self):
for name1 in self._seqNames:
for name2 in self._seqNames:
if name1 != name2:
print self.getDist(name1,name2)
def getDist(self, seqName1, seqName2):
if seqName1 not in self._nameToIndex or seqName2 not in self._nameToIndex:
return None
idx1 = self._nameToIndex[seqName1]
idx2 = self._nameToIndex[seqName2]
#print idx1, idx2
return self._matrix[max(idx1,idx2)][min(idx1,idx2)]
def getSeqNameList(self):
return self._seqNames
#---------------------------------------------------------------------------------
class MCluster():
"""
Clusters for different thresholds
"""
def __init__(self, seqIdToTaxPathDict, maxSimilarityThreshold):
self._thresholdsList = []
self._thresholdIdxToTCluster = dict([])
self._lineCounter = -1
self._seqIdToTaxPathDict = seqIdToTaxPathDict
self._lastNoConflictThresholdIdx = 0
self._maxSimilarityThreshold = maxSimilarityThreshold
def parse(self, line):
if self._lineCounter == -1: #skip unique clusters
pass
else:
c = TCluster(line)
self._thresholdsList.append(c.getThreshold())
self._thresholdIdxToTCluster[self._lineCounter] = c
self._lineCounter += 1
def finalize(self):
#finds largest threshold at which all groups within a clustering are consistent
for i in range(len(self._thresholdsList)):
if self._thresholdsList[i] > self._maxSimilarityThreshold:
break
tCluster = self._thresholdIdxToTCluster[i]
if self._isConsistent(tCluster):
self._lastNoConflictThresholdIdx = i
else:
break
def getLastNoConflictClustering(self):
return self._thresholdIdxToTCluster[self._lastNoConflictThresholdIdx]
def _isConsistent(self, tCluster):
groupIdToSeqNameSet = tCluster.getGroupIdToSeqNameSet()
for groupId in groupIdToSeqNameSet:
taxPathDictList = []
maxLenTaxPathDict = None
for seqName in groupIdToSeqNameSet[groupId]:
t = self._seqIdToTaxPathDict[int(re.sub(r'^[0-9]+_([0-9]+)$', r'\1', seqName))]
taxPathDictList.append(t)
if (maxLenTaxPathDict == None) or (len(maxLenTaxPathDict) < len(t)):
maxLenTaxPathDict = t
assert len(taxPathDictList) > 0
if len(taxPathDictList) == 1:
continue
allowedNcbids = set([])
for node in maxLenTaxPathDict:
allowedNcbids.add(node.ncbid)
for t in taxPathDictList:
for node in t:
if node.ncbid not in allowedNcbids:
return False
return True
#for each mg, get the highest threshold at which there is no conflicting cluster with known labels
# for mg in mgList:
# mCluster = self._mgToCluster[mg]
# mCluster.setNoConflictThreshold()
def getThresholdsList(self):
"""
List of thresholds of different clusterings.
"""
return self._thresholdsList
def getClusterAtThreshold(self, thresholdIdx):
"""
Gets cluster where the thresholdIdx correspond to the index in the array returned from getThresholdsList.
"""
return self._thresholdIdxToTCluster[thresholdIdx]
#---------------------------------------------------------------------------------
class TCluster():
"""
One clustering of marker genes at a specific threshold.
"""
def __init__(self, line):
tokens = line.split(',')
self._threshold = float(re.sub(r'^([^\t]+)\t[^\t]+\t.*', r'\1', tokens[0]))
tokens[0] = re.sub(r'^[^\t]+\t[^\t]+\t(.*)', r'\1', tokens[0])
self.groupIdCount = 0
self.seqNameToGroupId = dict([])
self.groupIdToSeqNameSet = dict([])
for token in tokens:
names = token.split('\t')
self.groupIdToSeqNameSet[self.groupIdCount] = set([])
for name in names:
#print name
if re.match(r'^[0-9]+_.*$', name):
seqName = re.sub(r'^([0-9]+_[0-9]+)_.*$',r'\1', name)
self.seqNameToGroupId[seqName] = self.groupIdCount
self.groupIdToSeqNameSet[self.groupIdCount].add(seqName)
self.groupIdCount += 1
def getThreshold(self):
return self._threshold
def getSeqNameToGroupId(self):
return self.seqNameToGroupId
def getGroupIdToSeqNameSet(self):
return self.groupIdToSeqNameSet
#---------------------------------------------------------------------------------
def test():
config = Config(open('/Users/ivan/Documents/work/binning/tests/CowRumen/03/config.cfg'), 'pPPS')
mgWorkingDir = '/Users/ivan/Documents/work/binning/tests/CowRumen/03/working/mgWorking'
s16Prefix = '/Users/ivan/Documents/work/binning/tests/CowRumen/03/working/cow_rumen_fragmented_velvet_assembly_scaffolds.fas.ids'
clust = MGCluster(config, mgWorkingDir, s16Prefix)
clust.preprocess(align=False, dm=False, cluster=False, readData=True)
#clust.buildSpecificPred()
clust.reconstructOTU()
if __name__ == "__main__":
test() |
# We start out with an integer.
wealth = 1000000000
# We convert it to a string formatted the way we want.
formatted = format(wealth, ',d')
# Print it out!
print(formatted)
|
from django.urls import path
from . import views
urlpatterns = [
path('',views.get_room,name='chat'),
path('public/',views.get_public_room,name='public-chat'),
path('student/<int:pk>',views.chat_with_Student,name='teacher-chat'),
path('<str:room_name>/', views.room, name='room'),
] |
# coding=utf-8
# Copyright 2018 The Tensor2Tensor Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Basic models for testing simple tasks."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# Dependency imports
from tensor2tensor.layers import common_hparams
from tensor2tensor.layers import common_layers
from tensor2tensor.utils import registry
from tensor2tensor.utils import t2t_model
import tensorflow as tf
@registry.register_model
class BasicConvGen(t2t_model.T2TModel):
def body(self, features):
hparams = self.hparams
filters = hparams.hidden_size
kernel1, kernel2 = (3, 3), (4, 4)
# Concat frames and down-stride.
cur_frame = tf.to_float(features["inputs"])
prev_frame = tf.to_float(features["inputs_prev"])
x = tf.concat([cur_frame, prev_frame], axis=-1)
for _ in xrange(hparams.num_compress_steps):
x = tf.layers.conv2d(x, filters, kernel2, activation=common_layers.belu,
strides=(2, 2), padding="SAME")
x = common_layers.layer_norm(x)
filters *= 2
# Add embedded action.
action = tf.reshape(features["action"], [-1, 1, 1, hparams.hidden_size])
zeros = tf.zeros(common_layers.shape_list(x)[:-1] + [hparams.hidden_size])
x = tf.concat([x, action + zeros], axis=-1)
# Run a stack of convolutions.
for i in xrange(hparams.num_hidden_layers):
with tf.variable_scope("layer%d" % i):
y = tf.layers.conv2d(x, filters, kernel1, activation=common_layers.belu,
strides=(1, 1), padding="SAME")
if i == 0:
x = y
else:
x = common_layers.layer_norm(x + y)
# Up-convolve.
for _ in xrange(hparams.num_compress_steps):
filters //= 2
x = tf.layers.conv2d_transpose(
x, filters, kernel2, activation=common_layers.belu,
strides=(2, 2), padding="SAME")
x = common_layers.layer_norm(x)
# Reward prediction.
reward_pred_h1 = tf.reduce_mean(x, axis=[1, 2], keep_dims=True)
# Rewards are {-1, 0, 1} so we add 1 to the raw gold ones, predict 3.
reward_pred = tf.layers.dense(reward_pred_h1, 3, name="reward")
reward_gold = tf.expand_dims(tf.to_int32(features["reward_raw"]) + 1, 1)
reward_loss = tf.nn.sparse_softmax_cross_entropy_with_logits(
labels=reward_gold, logits=reward_pred, name="reward_loss")
reward_loss = tf.reduce_mean(reward_loss)
return x, {"reward": reward_loss}
@registry.register_hparams
def basic_conv():
"""Basic 2-frame conv model."""
hparams = common_hparams.basic_params1()
hparams.hidden_size = 64
hparams.batch_size = 8
hparams.num_hidden_layers = 3
hparams.optimizer = "Adam"
hparams.learning_rate_constant = 0.0002
hparams.learning_rate_warmup_steps = 500
hparams.learning_rate_schedule = "constant * linear_warmup"
hparams.label_smoothing = 0.05
hparams.initializer = "uniform_unit_scaling"
hparams.initializer_gain = 1.0
hparams.weight_decay = 0.0
hparams.add_hparam("num_compress_steps", 2)
return hparams
@registry.register_hparams
def basic_conv_small():
"""Small conv model."""
hparams = common_hparams.basic_params1()
hparams.hidden_size = 32
return hparams
|
from django.shortcuts import render, redirect
from django.contrib.auth import update_session_auth_hash
from django.contrib.auth.decorators import login_required
from django.http import *
from django.urls import reverse
from apps.accounts.models import *
from apps.accounts.forms import *
@login_required
def login_redirect(request):
user = request.user
if user.usertype == 'school':
return redirect(reverse('school-overview'))
elif user.usertype == 'student':
return redirect(reverse(''))
else:
return HttpResponse(" We don't have an idea of your user type, \
contact the admin")
def register_school(request):
if request.method == 'POST':
form = SchoolRegistrationForm(request.POST)
if form.is_valid():
cleaned = form.cleaned_data
username = cleaned['username']
password = cleaned['password']
email = cleaned['email']
school_name = cleaned['school_name']
address = cleaned['address']
user = User.objects.create_user(username=username,
email=email,
password=password)
user.usertype = 'result'
user.save()
school = School(user=user,
name=school_name,
address=address,
)
school.save()
return redirect(reverse('registration-complete', kwargs={'username': username}))
else:
form = SchoolRegistrationForm
return render(request, 'registration.html', {'form': form})
def registration_successful(request, username):
try:
user = User.objects.get(username=username)
except User.DoesNotExist:
raise Http404('User not found')
try:
school = School.objects.get(user=user)
except School.DoesNotExist:
raise Http404("School not found, contact the Admin!")
context = {
'username': username,
'name': school.name
}
return render(request, 'registration-complete.html', context)
def recover_password(request):
if request.method == 'POST':
# TODO: work on the remaining part of this block
pass
else:
form = PassWordRecoveryForm
context = {
'form': form
}
return render(request, 'forgot-password.html', context)
@login_required
def change_password(request):
if request.method == 'POST':
form = ChangePassWordForm(request.POST)
if form.is_valid():
cleaned = form.cleaned_data
current_password = cleaned['current_password']
new_password = cleaned['new_password']
reentered_password = cleaned['reentered_password']
user = User.objects.get(pk=request.user.pk)
if user.check_password(current_password):
if new_password == reentered_password:
user.set_password(new_password)
user.save()
update_session_auth_hash(request, user)
return redirect(reverse('password-change-success'))
else:
return render(request, 'change-password.html', {'form': form,
'error': 'new password and re-entered \
password not the same, correct it'})
else:
return render(request, 'change-password.html', {'form': form,
'error': 'You entered a wrong current \
password'})
else:
form = ChangePassWordForm
context = {
'form': form
}
return render(request, 'change-password.html', context)
@login_required
def password_change_successful(request):
return render(request, 'password-change-success.html', {})
|
"""Contains pytest fixtures."""
import os
import pytest
from ase_notebook.data import get_example_atoms
@pytest.fixture(scope="function")
def get_test_filepath():
"""Fixture to return a path to a file in the raw files folder."""
dirpath = os.path.abspath(os.path.dirname(__file__))
def _get_test_filepath(*path):
return os.path.join(dirpath, "raw_files", *path)
return _get_test_filepath
@pytest.fixture(scope="function")
def get_test_atoms():
"""Fixture to return an ase.Atoms instance by name."""
return get_example_atoms
|
class Order:
def __init__(self, order_number, customer_id, lp_number, category, pickup_date, return_date, price, insurance, actual_return_date):
self.__order_number = order_number
self.__customer_id = customer_id
self.__lp_number = lp_number
self.__category = category
self.__pickup_date = pickup_date
self.__return_date = return_date
self.__price = price
self.__insurance = insurance
self.__actual_return_date = actual_return_date
def __str__(self):
return "{},{},{},{},{},{},{},{},{}".format(self.__order_number, self.__customer_id, self.__lp_number, self.__category, self.__pickup_date,
self.__return_date, self.__price, self.__insurance, self.__actual_return_date)
def get_order_number(self):
return self.__order_number
def get_customer_id(self):
return self.__customer_id
def get_lp_number(self):
return self.__lp_number
def get_category(self):
return self.__category
def get_pickup_date(self):
return self.__pickup_date
def get_return_date(self):
return self.__return_date
def get_price(self):
return self.__price
def get_insurance(self):
return self.__insurance
def get_actual_return_date(self):
return self.__actual_return_date
|
from moderate.queue.serialization import pack, unpack
class BaseQueue(object):
def put(self, name, *args, **kw):
pass
def get(self):
pass
def pack(self, name, args, kw):
return pack({'name': name, 'args': args, 'kw': kw})
def unpack(self, msg):
return unpack(msg)
|
# Generated by Django 2.2.3 on 2019-07-17 18:32
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('products', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Bug',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('bugname', models.CharField(max_length=64, verbose_name='bug 名称')),
('bugdetail', models.CharField(max_length=200, verbose_name='详情')),
('bugstatus', models.CharField(choices=[('激活', '激活'), ('已解决', '已解决'), ('已关闭', '已关闭')], default='激活', max_length=200, null=True, verbose_name='解决状态')),
('buglevel', models.CharField(choices=[('1', '1'), ('2', '2'), ('3', '3')], default='3', max_length=200, null=True, verbose_name='严重程度')),
('bugcreater', models.CharField(max_length=200, verbose_name='创建人')),
('bugassign', models.CharField(max_length=200, verbose_name='分配给')),
('created_time', models.DateTimeField(auto_now=True, verbose_name='创建时间')),
('Product', models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='products.Product')),
],
options={
'verbose_name': 'bug 管理',
'verbose_name_plural': 'bug 管理',
},
),
]
|
from pathlib import Path
import sys
TEST_MODE = bool(len(sys.argv) > 1 and sys.argv[1] == "test")
def phase1(v):
return next(v[i]*v[j] for i in range(len(v)) for j in range(i,len(v)) if v[i]+v[j] == 2020)
def phase2(v):
return next(v[i]*v[j]*v[k] for i in range(len(v)) for j in range(i,len(v)) for k in range(j,len(v)) if v[i]+v[j]+v[k] == 2020)
if __name__ == "__main__":
with Path(__file__).parent.joinpath("input/day1_sample" if TEST_MODE else "input/day1").open() as f:
values = [int(i) for i in f]
print(f'Phase 1: {phase1(values)}')
print(f'Phase 1: {phase2(values)}')
|
# -*- coding: utf-8 -*-
#
# Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights
# Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License.
# A copy of the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS
# OF ANY KIND, either express or implied. See the License for the
# specific language governing permissions and limitations under the
# License.
#
import unittest
import random
from ask_sdk_model import (
IntentRequest, RequestEnvelope, Intent, SessionEndedRequest, Context)
from ask_sdk_model.canfulfill import CanFulfillIntentRequest
from ask_sdk_core.utils import (
is_canfulfill_intent_name, is_intent_name, is_request_type, viewport)
from ask_sdk_core.handler_input import HandlerInput
from ask_sdk_core.exceptions import AskSdkException
from ask_sdk_model.interfaces.viewport import ViewportState, Shape
def test_is_canfulfill_intent_name_match():
test_canfulfill_intent_name = "TestIntent"
test_handler_input = HandlerInput(
request_envelope=RequestEnvelope(request=CanFulfillIntentRequest(
intent=Intent(name=test_canfulfill_intent_name))))
canfulfill_intent_name_wrapper = is_canfulfill_intent_name(test_canfulfill_intent_name)
assert canfulfill_intent_name_wrapper(
test_handler_input), "is_canfulfill_intent_name matcher didn't match with the " \
"correct intent name"
def test_is_canfulfill_intent_name_not_match():
test_canfulfill_intent_name = "TestIntent"
test_handler_input = HandlerInput(
request_envelope=RequestEnvelope(request=CanFulfillIntentRequest(
intent=Intent(name=test_canfulfill_intent_name))))
canfulfill_intent_name_wrapper = is_canfulfill_intent_name("TestIntent1")
assert not canfulfill_intent_name_wrapper(
test_handler_input), "is_canfulfill_intent_name matcher matched with the " \
"incorrect intent name"
def test_is_canfulfill_intent_not_match_intent():
test_canfulfill_intent_name = "TestIntent"
test_canfulfill_handler_input = HandlerInput(
request_envelope=RequestEnvelope(request=CanFulfillIntentRequest(
intent=Intent(name=test_canfulfill_intent_name))))
intent_name_wrapper = is_intent_name(test_canfulfill_intent_name)
assert not intent_name_wrapper(
test_canfulfill_handler_input), "is_intent_name matcher matched with the " \
"incorrect request type"
def test_is_intent_not_match_canfulfill_intent():
test_intent_name = "TestIntent"
test_handler_input = HandlerInput(
request_envelope=RequestEnvelope(request=IntentRequest(
intent=Intent(name=test_intent_name))))
canfulfill_intent_name_wrapper = is_canfulfill_intent_name(test_intent_name)
assert not canfulfill_intent_name_wrapper(
test_handler_input), "is_canfulfill_intent_name matcher matched with the " \
"incorrect request type"
def test_is_intent_name_match():
test_intent_name = "TestIntent"
test_handler_input = HandlerInput(
request_envelope=RequestEnvelope(request=IntentRequest(
intent=Intent(name=test_intent_name))))
intent_name_wrapper = is_intent_name(test_intent_name)
assert intent_name_wrapper(
test_handler_input), "is_intent_name matcher didn't match with the " \
"correct intent name"
def test_is_intent_name_not_match():
test_intent_name = "TestIntent"
test_handler_input = HandlerInput(
request_envelope=RequestEnvelope(request=IntentRequest(
intent=Intent(name=test_intent_name))))
intent_name_wrapper = is_intent_name("TestIntent1")
assert not intent_name_wrapper(
test_handler_input), "is_intent_name matcher matched with the " \
"incorrect intent name"
def test_is_request_type_match():
test_handler_input = HandlerInput(
request_envelope=RequestEnvelope(request=IntentRequest()))
request_type_wrapper = is_request_type("IntentRequest")
assert request_type_wrapper(test_handler_input), (
"is_request_type matcher didn't match with the correct request type")
def test_is_request_type_not_match():
test_handler_input = HandlerInput(
request_envelope=RequestEnvelope(request=SessionEndedRequest()))
intent_name_wrapper = is_request_type("IntentRequest")
assert not intent_name_wrapper(test_handler_input), (
"is_request_type matcher matched with the incorrect request type")
class TestViewportOrientation(unittest.TestCase):
def test_portrait_orientation(self):
width = 0
height = 1
assert (viewport.get_orientation(width=width, height=height)
== viewport.Orientation.PORTRAIT), (
"Invalid viewport orientation resolved when width < height")
def test_landscape_orientation(self):
width = 1
height = 0
assert (viewport.get_orientation(width=width, height=height)
== viewport.Orientation.LANDSCAPE), (
"Invalid viewport orientation resolved when width > height")
def test_equal_orientation(self):
width = 0
height = 0
assert (viewport.get_orientation(width=width, height=height)
== viewport.Orientation.EQUAL), (
"Invalid viewport orientation resolved when width == height")
class TestViewportSize(unittest.TestCase):
def test_xsmall_size(self):
size = random.choice(range(0, 600))
assert (viewport.get_size(size=size)
== viewport.Size.XSMALL), (
"Invalid viewport size resolved when size = {}".format(size))
def test_small_size(self):
size = random.choice(range(600, 960))
assert (viewport.get_size(size=size)
== viewport.Size.SMALL), (
"Invalid viewport size resolved when size = {}".format(size))
def test_medium_size(self):
size = random.choice(range(960, 1280))
assert (viewport.get_size(size=size)
== viewport.Size.MEDIUM), (
"Invalid viewport size resolved when size = {}".format(size))
def test_large_size(self):
size = random.choice(range(1280, 1920))
assert (viewport.get_size(size=size)
== viewport.Size.LARGE), (
"Invalid viewport size resolved when size = {}".format(size))
def test_xlarge_size(self):
size = 1920
assert (viewport.get_size(size=size)
== viewport.Size.XLARGE), (
"Invalid viewport size resolved when size = {}".format(size))
def test_unknown_size(self):
size = -1
with self.assertRaises(AskSdkException) as exc_info:
viewport.get_size(size=size)
assert "Unknown size group value: -1" in str(exc_info.exception), (
"Viewport size resolver didn't raise exception on invalid size"
)
class TestViewportDpiGroup(unittest.TestCase):
def test_xlow_dpi_group(self):
dpi = random.choice(range(0, 121))
assert (viewport.get_dpi_group(dpi=dpi)
== viewport.Density.XLOW), (
"Invalid viewport dpi_group resolved when dpi = {}".format(dpi))
def test_low_dpi_group(self):
dpi = random.choice(range(121, 161))
assert (viewport.get_dpi_group(dpi=dpi)
== viewport.Density.LOW), (
"Invalid viewport dpi_group resolved when dpi = {}".format(dpi))
def test_medium_dpi_group(self):
dpi = random.choice(range(161, 241))
assert (viewport.get_dpi_group(dpi=dpi)
== viewport.Density.MEDIUM), (
"Invalid viewport dpi_group resolved when dpi = {}".format(dpi))
def test_high_dpi_group(self):
dpi = random.choice(range(241, 321))
assert (viewport.get_dpi_group(dpi=dpi)
== viewport.Density.HIGH), (
"Invalid viewport dpi_group resolved when dpi = {}".format(dpi))
def test_xhigh_dpi_group(self):
dpi = random.choice(range(321, 481))
assert (viewport.get_dpi_group(dpi=dpi)
== viewport.Density.XHIGH), (
"Invalid viewport dpi_group resolved when dpi = {}".format(dpi))
def test_xxhigh_dpi_group(self):
dpi = 481
assert (viewport.get_dpi_group(dpi=dpi)
== viewport.Density.XXHIGH), (
"Invalid viewport dpi_group resolved when dpi = {}".format(dpi))
def test_unknown_dpi_group(self):
dpi = -1
with self.assertRaises(AskSdkException) as exc_info:
viewport.get_dpi_group(dpi=dpi)
assert "Unknown dpi group value: -1" in str(exc_info.exception), (
"Viewport dpi group resolver didn't raise exception on invalid dpi"
)
class TestViewportProfile(unittest.TestCase):
def test_viewport_map_to_hub_round_small(self):
viewport_state = ViewportState(
shape=Shape.ROUND,
dpi=float(160),
current_pixel_width=float(300),
current_pixel_height=float(300))
test_request_env = RequestEnvelope(
context=Context(
viewport=viewport_state))
assert (viewport.get_viewport_profile(test_request_env)
is viewport.ViewportProfile.HUB_ROUND_SMALL), (
"Viewport profile couldn't resolve HUB_ROUND_SMALL")
def test_viewport_map_to_hub_landscape_medium(self):
viewport_state = ViewportState(
shape=Shape.RECTANGLE,
dpi=float(160),
current_pixel_width=float(960),
current_pixel_height=float(600))
test_request_env = RequestEnvelope(
context=Context(
viewport=viewport_state))
assert (viewport.get_viewport_profile(test_request_env)
is viewport.ViewportProfile.HUB_LANDSCAPE_MEDIUM), (
"Viewport profile couldn't resolve HUB_LANDSCAPE_MEDIUM")
def test_viewport_map_to_hub_landscape_large(self):
viewport_state = ViewportState(
shape=Shape.RECTANGLE,
dpi=float(160),
current_pixel_width=float(1280),
current_pixel_height=float(960))
test_request_env = RequestEnvelope(
context=Context(
viewport=viewport_state))
assert (viewport.get_viewport_profile(test_request_env)
is viewport.ViewportProfile.HUB_LANDSCAPE_LARGE), (
"Viewport profile couldn't resolve HUB_LANDSCAPE_LARGE")
def test_viewport_map_to_mobile_landscape_small(self):
viewport_state = ViewportState(
shape=Shape.RECTANGLE,
dpi=float(240),
current_pixel_width=float(600),
current_pixel_height=float(300))
test_request_env = RequestEnvelope(
context=Context(
viewport=viewport_state))
assert (viewport.get_viewport_profile(test_request_env)
is viewport.ViewportProfile.MOBILE_LANDSCAPE_SMALL), (
"Viewport profile couldn't resolve MOBILE_LANDSCAPE_SMALL")
def test_viewport_map_to_mobile_portrait_small(self):
viewport_state = ViewportState(
shape=Shape.RECTANGLE,
dpi=float(240),
current_pixel_width=float(300),
current_pixel_height=float(600))
test_request_env = RequestEnvelope(
context=Context(
viewport=viewport_state))
assert (viewport.get_viewport_profile(test_request_env)
is viewport.ViewportProfile.MOBILE_PORTRAIT_SMALL), (
"Viewport profile couldn't resolve MOBILE_PORTRAIT_SMALL")
def test_viewport_map_to_mobile_landscape_medium(self):
viewport_state = ViewportState(
shape=Shape.RECTANGLE,
dpi=float(240),
current_pixel_width=float(960),
current_pixel_height=float(600))
test_request_env = RequestEnvelope(
context=Context(
viewport=viewport_state))
assert (viewport.get_viewport_profile(test_request_env)
is viewport.ViewportProfile.MOBILE_LANDSCAPE_MEDIUM), (
"Viewport profile couldn't resolve MOBILE_LANDSCAPE_MEDIUM")
def test_viewport_map_to_mobile_portrait_medium(self):
viewport_state = ViewportState(
shape=Shape.RECTANGLE,
dpi=float(240),
current_pixel_width=float(600),
current_pixel_height=float(960))
test_request_env = RequestEnvelope(
context=Context(
viewport=viewport_state))
assert (viewport.get_viewport_profile(test_request_env)
is viewport.ViewportProfile.MOBILE_PORTRAIT_MEDIUM), (
"Viewport profile couldn't resolve MOBILE_PORTRAIT_MEDIUM")
def test_viewport_map_to_tv_landscape_xlarge(self):
viewport_state = ViewportState(
shape=Shape.RECTANGLE,
dpi=float(320),
current_pixel_width=float(1920),
current_pixel_height=float(960))
test_request_env = RequestEnvelope(
context=Context(
viewport=viewport_state))
assert (viewport.get_viewport_profile(test_request_env)
is viewport.ViewportProfile.TV_LANDSCAPE_XLARGE), (
"Viewport profile couldn't resolve TV_LANDSCAPE_XLARGE")
def test_viewport_map_to_tv_portrait_medium(self):
viewport_state = ViewportState(
shape=Shape.RECTANGLE,
dpi=float(320),
current_pixel_width=float(300),
current_pixel_height=float(1920))
test_request_env = RequestEnvelope(
context=Context(
viewport=viewport_state))
assert (viewport.get_viewport_profile(test_request_env)
is viewport.ViewportProfile.TV_PORTRAIT_MEDIUM), (
"Viewport profile couldn't resolve TV_PORTRAIT_MEDIUM")
def test_viewport_map_to_tv_landscape_medium(self):
viewport_state = ViewportState(
shape=Shape.RECTANGLE,
dpi=float(320),
current_pixel_width=float(960),
current_pixel_height=float(600))
test_request_env = RequestEnvelope(
context=Context(
viewport=viewport_state))
assert (viewport.get_viewport_profile(test_request_env)
is viewport.ViewportProfile.TV_LANDSCAPE_MEDIUM), (
"Viewport profile couldn't resolve TV_LANDSCAPE_MEDIUM")
def test_viewport_map_to_unknown(self):
viewport_state = ViewportState(
shape=Shape.ROUND,
dpi=float(240),
current_pixel_width=float(600),
current_pixel_height=float(600))
test_request_env = RequestEnvelope(
context=Context(
viewport=viewport_state))
assert (viewport.get_viewport_profile(test_request_env)
is viewport.ViewportProfile.UNKNOWN_VIEWPORT_PROFILE), (
"Viewport profile couldn't resolve UNKNOWN_VIEWPORT_PROFILE")
def test_viewport_map_to_unknown_for_no_viewport(self):
viewport_state = None
test_request_env = RequestEnvelope(
context=Context(
viewport=viewport_state))
assert (viewport.get_viewport_profile(test_request_env)
is viewport.ViewportProfile.UNKNOWN_VIEWPORT_PROFILE), (
"Viewport profile couldn't resolve UNKNOWN_VIEWPORT_PROFILE")
|
from __future__ import unicode_literals
from django.apps import AppConfig
class WagerConfig(AppConfig):
name = 'Wager'
|
# -*- coding: utf-8 -*-
"""This code is a part of Hydra Toolkit
.. module:: hydratk.extensions.datagen.translation.en
:platform: Unix
:synopsis: English language translation for Datagen extension
.. moduleauthor:: Petr Czaderna <[email protected]>
"""
language = {
'name': 'English',
'ISO-639-1': 'en'
}
msg = {
'datagen_received_cmd': ["Received command: '{0}'"],
'datagen_asn1_compile': ["Compiling ASN.1 specification: '{0}'"],
'datagen_asn1_compiled': ["ASN.1 specification compiled"],
'datagen_asn1_decode': ["Decoding input file: '{0}' from format: '{1}' according to element: '{2}' in specification: '{3}'"],
'datagen_asn1_decoded': ["Decoded to output file: '{0}'"],
'datagen_asn1_encode': ["Encoding input file: '{0}' to format: '{1}' according to element: '{2}' in specification: '{3}'"],
'datagen_asn1_encoded': ["Encoded to output file: '{0}'"],
'datagen_asn1_transcode': ["Transcoding input file: '{0}' from format: '{1}' to format: '{2}' according to element: '{3}' specification: '{4}'"],
'datagen_asn1_transcoded': ["Transcoded to output file: '{0}'"],
'datagen_jsongen_import_spec': ["Importing JSON specification: '{0}'"],
'datagen_jsongen_spec_imported': ["JSON specification imported"],
'datagen_jsongen_write_sample': ["Creating sample JSON document"],
'datagen_jsongen_sample_written': ["Sample written to file: '{0}'"],
'datagen_xmlgen_import_spec': ["Importing XML specification: '{0}'"],
'datagen_xmlgen_spec_imported': ["XML specification imported"],
'datagen_xmlgen_write_sample': ["Creating sample XML document"],
'datagen_xmlgen_sample_written': ["Sample written to file: '{0}'"],
'datagen_adapter_parsing_suite': ["Parsing suite file: '{0}'"],
'datagen_adapter_suite_parsed': ["Suite successfully parsed"],
'datagen_adapter_parsing_test': ["Parsing test file: '{0}'"],
'datagen_adapter_test_parsed': ["Test successfully parsed"],
'datagen_adapter_cmd_unknown': ["Detected unknown command '{0}'"],
'datagen_adapter_cmd_dummy': ["Detected not supported command '{0}'"]
}
|
from django.contrib import admin
from .models import City,Question
# Register your models here.
admin.site.register(City)
admin.site.register(Question) |
import time
import random
import string
from vectorai import ViClient, ViCollectionClient
class TempClient:
def __init__(self, client, collection_name: str=None):
self.client = client
if isinstance(client, ViClient):
self.collection_name = collection_name
elif isinstance(client, ViCollectionClient):
self.collection_name = self.client.collection_name
def teardown_collection(self):
if self.collection_name in self.client.list_collections():
time.sleep(2)
if isinstance(self.client, ViClient):
self.client.delete_collection(self.collection_name)
elif isinstance(self.client, ViCollectionClient):
self.client.delete_collection()
def __enter__(self):
self.teardown_collection()
return self.client
def __exit__(self, *exc):
self.teardown_collection()
class TempClientWithDocs(TempClient):
"""
Temporary Client With Documents already inserted.
"""
def __init__(self, client, collection_name: str=None, num_of_docs: int=10):
self.client = client
if hasattr(self.client, 'collection_name'):
self.collection_name = collection_name
else:
if collection_name is None:
collection_name = self.generate_random_collection_name()
self.collection_name = collection_name
self.client.collection_name = collection_name
self.num_of_docs = num_of_docs
def generate_random_collection_name(self):
return self.generate_random_string(20)
def generate_random_string(self, num_of_letters):
letters = string.ascii_lowercase
return ''.join(random.choice(letters) for i in range(num_of_letters))
def __enter__(self):
self.teardown_collection()
self.client.insert_documents(self.collection_name,
self.client.create_sample_documents(self.num_of_docs))
return self.client
|
#
# voice-skill-sdk
#
# (C) 2020, Deutsche Telekom AG
#
# This file is distributed under the terms of the MIT license.
# For details see the file LICENSE in the top directory.
#
#
# Caching functions to facilitate L1/L2 caching
#
from .decorators import CallCache # noqa: F401
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
test_pympa-core
------------
Tests for `pympa-core` models module.
"""
import os
import shutil
import unittest
from pympa_core import models
class TestPympa_core(unittest.TestCase):
def setUp(self):
pass
def test_something(self):
pass
def tearDown(self):
pass
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from gBlockChain.lib.database import db, Model, Column, relationship, reference_col
class Host(Model):
__tablename__ = 'host'
id = Column(db.Integer, primary_key=True, autoincrement=True, unique=True)
ip = Column(db.String(64), nullable=False, index=True, unique=True)
cpu_core_num = Column(db.Integer, default=1)
mem_total_size = Column(db.Integer, default=8)
disk_total_size = Column(db.Integer, default=40)
band_width = Column(db.Integer, default=1)
container_limit = Column(db.Integer, default=20)
docker_api_port = Column(db.Integer, default=2375)
cadvisor_api_port = Column(db.Integer, default=4033)
description = Column(db.Text, default="")
host_label = Column(db.String(32), default="")
cloud_ostype = Column(db.String(32), default="CentOS")
cloud_vendor = Column(db.String(32), default="")
cloud_region = Column(db.String(32), default="")
|
# Select an element
# Open yaml file with entity types
# If parameters are already present, set values according to yaml input
import sys
import clr
import System
import rpw
import yaml
import pprint
from System.Collections.Generic import *
clr.AddReference("RevitAPI")
from Autodesk.Revit.DB import *
from rpw.ui.forms import *
from Autodesk.Revit.UI.Selection import ObjectType
doc = __revit__.ActiveUIDocument.Document
uidoc = __revit__.ActiveUIDocument
app = doc.Application
pp = pprint.PrettyPrinter(indent=1)
shared_param_file = app.OpenSharedParameterFile()
selection = [doc.GetElement(element_Id) for element_Id in uidoc.Selection.GetElementIds()]
def createnewgroup(shared_parameter_file, new_group_name):
try:
newgroup = shared_parameter_file.Groups.Create(new_group_name)
print("Group successfully created with name: {}".format(new_group_name))
except:
all_groups = []
for group in shared_parameter_file.Groups:
all_groups.append(group.Name)
if new_group_name in all_groups:
print("A group already exists with the following name: {}".format(new_group_name))
for group in shared_parameter_file.Groups:
if group.Name == new_group_name:
newgroup = group
else:
print("Something went wrong. The group with name {} was not created. Please check Shared Parameter File is not read-only.".format(new_group_name))
sys.exit("Script has ended")
return newgroup
def builtinGroupFromName(builtin_group_name):
b_i_groups = System.Enum.GetValues(BuiltInParameterGroup)
builtin_group = None
for g in b_i_groups:
if g.ToString() == builtin_group_name:
builtin_group = g
if builtin_group != None:
return builtin_group
else:
print("Built-in Group not valid: {}".format(builtin_group_name))
return None
def parameterName2ExternalDefinition(sharedParamFile, definitionName):
"""
Given the name of a parameter, return the definition from the shared parameter file
"""
externalDefinition = None
for group in sharedParamFile.Groups:
for definition in group.Definitions:
if definition.Name == definitionName:
externalDefinition = definition
return externalDefinition
def NameAndGroup2ExternalDefinition(sharedParamFile, definitionName, groupName):
external_definition = None
group_found = False
group_matches = None
for group in sharedParamFile.Groups:
if group.Name == groupName:
group_found = True
group_matches = group
if group_found == True:
for definition in group_matches.Definitions:
if definition.Name == definitionName:
external_definition = definition
else:
print("Group not found with name: {}".format(groupName))
return external_definition
def create_definition (group_name, shared_parameter_file, param_name, param_type, usermodifiable, description):
new_definition = None
group_matches = False
group = None
definition_matches = False
for existing_group in shared_parameter_file.Groups:
if existing_group.Name == group_name:
group_matches = True
group = existing_group
print("Group_matches: {}".format(group_matches))
if group_matches == True:
for existing_definition in group.Definitions:
if existing_definition.Name == param_name:
definition_matches = True
print("Definition matches:".format(definition_matches))
if definition_matches == False:
ext_def_creation_options = ExternalDefinitionCreationOptions(param_name, param_type)
ext_def_creation_options.UserModifiable = usermodifiable
ext_def_creation_options.Description = description
new_definition = group.Definitions.Create(ext_def_creation_options)
print("Created external definition \"{}\" in group \"{}\"".format(new_definition.Name, group.Name))
else:
print("Extenal definition already exists with name \"{}\" in group \"{}\"".format(param_name, group.Name))
else:
print("Group doesn't match")
family_instances = []
not_family_instances = []
print("Selected {} items".format(len(selection)))
for item in selection:
if type(item).__name__ == "FamilyInstance":
family_instances.append(item)
else:
not_family_instances.append(item)
print("The following elements are family instances and will receive the parameter values from the ontology:")
if family_instances == []:
print("None")
else:
print([item.Id.ToString() for item in family_instances])
print("The following elements are not family instances and will be dropped from the selection:")
if not_family_instances == []:
print("None")
else:
print([item.Id.ToString() for item in not_family_instances])
yaml_path = select_file("Yaml File (*.yaml)|*.yaml", "Select the yaml file with the parameters", multiple = False, restore_directory = True)
if yaml_path:
with open(yaml_path, "r") as stream:
ontology_yaml = yaml.safe_load(stream)
file_name_split = yaml_path.split("\\")
file_name_with_ext = file_name_split[-1]
file_name_with_ext_split = file_name_with_ext.split(".")
group_name = file_name_with_ext_split[0]
canonical_types = dict(filter(lambda elem : elem[1].get("is_canonical") == True, ontology_yaml.items()))
parameter_names = []
for canonical_type in canonical_types.items():
implements_params = canonical_type[1]["implements"]
for implement_param in implements_params:
parameter_names.append(implement_param)
parameter_names = list(dict.fromkeys(parameter_names))
param_names_with_prefix = []
for pn in parameter_names:
param_name_with_prefix = "Implements_" + pn
param_names_with_prefix.append(param_name_with_prefix)
param_names_with_prefix.append("Entity_Type")
#print(param_names_with_prefix)
# Check if item has the parameters:
print("Checking if family instances have the required parameters...")
for family_instance in family_instances:
all_params = family_instance.Parameters
all_params_names = [param.Definition.Name for param in all_params]
#pp.pprint(all_params_names)
missing_params = []
for param_name in param_names_with_prefix:
if param_name in all_params_names:
pass
else:
missing_params.append(param_name)
if missing_params == []:
print("Family instance {} has all required parameters.".format(family_instance.Id.ToString()))
else:
print("Family instance {} is missing the following parameters".format(family_instance.Id))
pp.pprint(missing_params)
family_instances.remove(family_instance)
print("Family instance {} removed from the list of objects to modify")
# ADD SELECTION OF TYPE THROUGH MENU
print("Please select an entity type from the yaml ontology...")
form_title = "Select an entity type:"
canonical_types = dict(filter(lambda elem : elem[1].get("is_canonical") == True, ontology_yaml.items()))
options = canonical_types.keys()
entity_type_name = rpw.ui.forms.SelectFromList(form_title,options,description=None,sort=True,exit_on_close=True)
entity_type_dict = (dict(filter(lambda elem: elem [0] == entity_type_name, canonical_types.items())))
print("Printing selected entity type:")
pp.pprint(entity_type_dict)
implements = entity_type_dict[entity_type_name]["implements"]
params_to_edit_names = []
for i in implements:
params_to_edit_names.append("Implements_"+i)
print(params_to_edit_names)
print("The following instances will be modified according to Entity Type: {}".format(entity_type_name))
pp.pprint(family_instances)
warnings = []
t = Transaction(doc, "Populate BOS parameters")
t.Start()
for family_instance in family_instances:
print("Editing family instance {}...".format(family_instance.Id.ToString()))
# MODIFY ENTITY TYPE
try:
p_entity_type = family_instance.LookupParameter("Entity_Type")
p_entity_type.Set(entity_type_name)
print("Entity_Type parameter successfully edited for family instance {}.".format(family_instance.Id.ToString()))
except:
message = "Couldn't edit parameter Entity_Type for family instance {}.".format(family_instance.Id.ToString())
warnings.append(message)
# MODIFY YESNO PARAMETERS
all_implements_params = []
for p in family_instance.Parameters:
if "Implements_" in p.Definition.Name:
all_implements_params.append(p)
for p in all_implements_params:
try:
if p.Definition.Name in params_to_edit_names:
p.Set(True)
else:
p.Set(False)
print("{} parameter successfully edited for family instance {}.".format(p.Definition.Name, family_instance.Id.ToString()))
except:
message = "Couldn't edit parameter {} for family instance {}.".format(p.Definition.Name, family_instance.Id.ToString())
warnings.append(message)
t.Commit()
print("Script has ended")
if warnings == []:
print("Warnings: None")
else:
print("Warnings:")
for w in warnings:
print(w)
|
from flask import Blueprint, request, render_template, redirect, url_for, g, session
authorization = Blueprint("authorization", __name__)
@authorization.route("/login", methods=["GET", "POST"])
def login():
# Redirect if already logged in
if g.user:
return redirect("/")
if request.method == "GET":
return render_template("login.html", fail=("fail" in request.args))
else:
if not ("email" in request.form and "password" in request.form):
return redirect(url_for("authorization.login", fail=1))
email = request.form["email"]
password = request.form["password"]
# Verify that the user exists
match = g.users.find_one({
"email": email
})
if match:
# Verify credentials
if password == match["password"]:
session["email"] = email
return redirect("/")
else:
return redirect(url_for("authorization.login", fail=1))
else:
return redirect(url_for("authorization.login", fail=1))
@authorization.route("/logout", methods=["GET"])
def logout():
session.pop("email")
return redirect("/")
@authorization.route("/register", methods=["GET", "POST"])
def register():
# Redirect if already logged in
if g.user:
return redirect("/")
if request.method == "GET":
return render_template("register.html", fail=("fail" in request.args))
else:
if not ("name" in request.form and "email" in request.form and "password" in request.form):
return redirect(url_for("authorization.login", fail=1))
name = request.form["name"]
email = request.form["email"]
password = request.form["password"]
# Verify that the user doesn't exist
match = g.users.find_one({
"email": email
})
if not match:
g.users.insert_one({
"name": name,
"email": email,
"password": password,
"category": "user"
})
session["email"] = email
return redirect("/")
else:
return redirect(url_for("authorization.register", fail=1))
|
from django.apps import apps
from lru import LRU
from .. import settings
from . import RecordMetadata, Record, get_offset_backend
from botocore.exceptions import ClientError
import boto3
import collections
import logging
import time
logger = logging.getLogger(__name__)
class KinesisBase(object):
_client = None
@property
def client(self):
if not self._client:
kwargs = self._get_client_config()
self._client = boto3.client('kinesis', **kwargs)
return self._client
def _get_client_config(self):
return {
'region_name': settings.get_aws_region(),
}
class ModelOffsetStore(object):
def commit(self, consumer, message):
KinesisOffset = apps.get_model(app_label='logpipe', model_name='KinesisOffset')
region = settings.get_aws_region()
logger.debug('Commit offset "%s" for region "%s", stream "%s", shard "%s" to %s' % (
message.offset, region, message.topic, message.partition, self.__class__.__name__))
obj, created = KinesisOffset.objects.get_or_create(
region=region,
stream=message.topic,
shard=message.partition)
obj.sequence_number = message.offset
obj.save()
def seek(self, consumer, stream, shard):
KinesisOffset = apps.get_model(app_label='logpipe', model_name='KinesisOffset')
region = settings.get_aws_region()
try:
obj = KinesisOffset.objects.get(region=settings.get_aws_region(), stream=stream, shard=shard)
logger.debug('Seeking to offset "%s" on region "%s", stream "%s", partition "%s"' % (obj.sequence_number, region, stream, shard))
consumer.seek_to_sequence_number(shard, obj.sequence_number)
except KinesisOffset.DoesNotExist:
logger.debug('Seeking to beginning of region "%s", stream "%s", partition "%s"' % (region, stream, shard))
consumer.seek_to_sequence_number(shard, None)
class Consumer(KinesisBase):
def __init__(self, topic_name, **kwargs):
self.topic_name = topic_name
self.client_kwargs = kwargs
self.shards = collections.deque()
self.records = collections.deque()
self.shard_iters = {}
shards = self._list_shard_ids()
logger.debug('Found {} kinesis shards.'.format(len(shards)))
backend = get_offset_backend()
for shard in shards:
self.shards.append(shard)
backend.seek(self, self.topic_name, shard)
def seek_to_sequence_number(self, shard, sequence_number=None):
if sequence_number is None:
resp = self.client.get_shard_iterator(
StreamName=self.topic_name,
ShardId=shard,
ShardIteratorType='TRIM_HORIZON')
else:
resp = self.client.get_shard_iterator(
StreamName=self.topic_name,
ShardId=shard,
ShardIteratorType='AFTER_SEQUENCE_NUMBER',
StartingSequenceNumber=sequence_number)
self.shard_iters[shard] = resp['ShardIterator']
def __iter__(self):
return self
def __next__(self):
# Try and load records. Keep trying until either (1) we have some records or (2) current_lag drops to 0
while len(self.records) <= 0:
# Load a page from each shard and sum the shard lags
current_lag = 0
for i in range(len(self.shards)):
current_lag += self._load_next_page()
# If all shards report 0 lag, then give up trying to load records
if current_lag <= 0:
break
# If we've tried all the shards and still don't have any records, stop iteration
if len(self.records) == 0:
raise StopIteration()
# Return the left most record in the queue
return self.records.popleft()
def _load_next_page(self):
# Load a page from the left-most shard in the queue
try:
shard = self.shards.popleft()
except IndexError:
return 0
# Get the next shard iterator for the shard
shard_iter = self.shard_iters.pop(shard, None)
if not shard_iter:
return 0
# Fetch the records from Kinesis
logger.debug('Loading page of records from {}.{}'.format(self.topic_name, shard))
fetch_limit = settings.get('KINESIS_FETCH_LIMIT', 25)
response = self._get_records(shard_iter, fetch_limit)
if response is None:
return 0
# This default value is mostly just for testing with Moto. Real Kinesis should always return a value for MillisBehindLatest.
num_records = len(response['Records'])
if 'MillisBehindLatest' in response:
current_stream_lag = response['MillisBehindLatest']
else:
current_stream_lag = 0 if num_records == 0 else 1
logger.debug('Loaded {} records from {}.{}. Currently {}ms behind stream head.'.format(num_records, self.topic_name, shard, current_stream_lag))
# Add the records page into the queue
timestamp = (time.time() * 1000) - current_stream_lag
for r in response['Records']:
record = Record(
topic=self.topic_name,
partition=shard,
offset=r['SequenceNumber'],
timestamp=timestamp,
key=r['PartitionKey'],
value=r['Data'])
self.records.append(record)
# Add the shard back to the right of the queue and save the shard iterator for next time we need
# to get records from this shard. If NextShardIterator is None, the shard has been closed and
# we should remove it from the pool.
if response.get('NextShardIterator', None):
self.shard_iters[shard] = response['NextShardIterator']
self.shards.append(shard)
else:
logger.info('Shard {}.{} has been closed. Removing it from the fetch pool.'.format(self.topic_name, shard))
return current_stream_lag
def _get_records(self, shard_iter, fetch_limit, retries=1):
i = 0
while i <= retries:
try:
response = self.client.get_records(ShardIterator=shard_iter, Limit=fetch_limit)
return response
except ClientError as e:
if e.response['Error']['Code'] == 'ProvisionedThroughputExceededException':
logger.warning("Caught ProvisionedThroughputExceededException. Sleeping for 5 seconds.")
time.sleep(5)
else:
logger.warning("Received {} from AWS API: {}".format(e.response['Error']['Code'], e.response['Error']['Message']))
i += 1
logger.warning("After {} attempts, couldn't get records from Kinesis. Giving up.".format(i))
return None
def _list_shard_ids(self):
resp = self.client.describe_stream(StreamName=self.topic_name)
return [shard['ShardId'] for shard in resp['StreamDescription']['Shards']]
class Producer(KinesisBase):
_last_sequence_numbers = LRU(settings.get('KINESIS_SEQ_NUM_CACHE_SIZE', 1000))
def send(self, topic_name, key, value):
kwargs = {
'StreamName': topic_name,
'Data': value,
'PartitionKey': key,
}
if topic_name not in self._last_sequence_numbers:
self._last_sequence_numbers[topic_name] = {}
last_seq_num = self._last_sequence_numbers[topic_name].get(key)
if last_seq_num:
kwargs['SequenceNumberForOrdering'] = last_seq_num
metadata = self._send_and_retry(kwargs)
shard_id = metadata['ShardId']
seq_num = str(metadata['SequenceNumber'])
self._last_sequence_numbers[topic_name][key] = seq_num
return RecordMetadata(
topic=topic_name,
partition=shard_id,
offset=seq_num)
def _send_and_retry(self, data, retries=1):
i = 0
while i <= retries:
try:
metadata = self.client.put_record(**data)
return metadata
except ClientError as e:
if e.response['Error']['Code'] == 'ProvisionedThroughputExceededException':
logger.warning("Caught ProvisionedThroughputExceededException. Sleeping for 5 seconds.")
time.sleep(5)
else:
logger.warning("Received {} from AWS API: {}".format(e.response['Error']['Code'], e.response['Error']['Message']))
i += 1
logger.warning("After {} attempts, couldn't send message to Kinesis. Giving up.".format(i))
|
import codecs
import datetime
import os
import pathlib
import time
from ctd_processing import cnv_column_info
from ctd_processing import exceptions
from ctd_processing.cnv.cnv_header import CNVheader
from ctd_processing.cnv.cnv_parameter import CNVparameter
class CNVfileInfo:
def __init__(self, file_path):
self._path = pathlib.Path(file_path)
self.date_format_in_file = '%b %d %Y %H:%M:%S'
self._time = None
self._lat = None
self._lon = None
self._station = None
self._get_info_from_file()
def _get_info_from_file(self):
with codecs.open(self._path, encoding='cp1252') as fid:
for line in fid:
if '* System UTC' in line:
self._time = datetime.datetime.strptime(line.split('=')[1].strip(), self.date_format_in_file)
elif '* NMEA Latitude' in line:
self._lat = line.split('=')[1].strip()[:-1].replace(' ', '')
elif '* NMEA Longitude' in line:
self._lon = line.split('=')[1].strip()[:-1].replace(' ', '')
elif line.startswith('** Station'):
self._station = line.split(':')[-1].strip()
@property
def time(self):
return self._time
@property
def lat(self):
return self._lat
@property
def lon(self):
return self._lon
@property
def station(self):
return self._station
class CNVfile:
def __init__(self, ctd_files=None, cnv_column_info_directory=None, use_cnv_info_format=False, **kwargs):
self._ctd_files = ctd_files
key = 'cnv_down'
self.file_path = self._ctd_files(key)
if not self.file_path:
raise FileNotFoundError(key)
if not self.file_path.exists():
raise FileNotFoundError(self.file_path)
self.date_format_in_file = '%b %d %Y %H:%M:%S'
self._time = None
self._lat = None
self._lon = None
self._station = None
self.instrument_number = self._ctd_files.instrument_number
cnv_info_files = cnv_column_info.CnvInfoFiles(cnv_column_info_directory)
self.cnv_info_object = cnv_info_files.get_info(self.instrument_number)
self.use_cnv_info_format = use_cnv_info_format
self.parameters = {}
self.header = CNVheader()
self.data = {}
self.nr_data_lines = None
self.linebreak = kwargs.get('linebreak', '\n')
self.missing_value = -9.990e-29
self.missing_value_str = '-9.990e-29'
self.g = 9.818 # g vid 60 gr nord (dblg)
self._load_info()
self._save_columns()
self._set_active_parameters()
@property
def time(self):
return self._time
@property
def lat(self):
return self._lat
@property
def lon(self):
return self._lon
@property
def station(self):
return self._station
def modify(self):
self._check_index()
self._modify_header_information()
self._modify_irradiance()
self._modify_fluorescence()
self._modify_depth()
def save_file(self, file_path, overwrite=False):
file_path = pathlib.Path(file_path)
if file_path.exists() and not overwrite:
raise FileExistsError(file_path)
if not file_path.parent.exists():
os.makedirs(file_path.parent)
all_rows = []
all_rows.extend(self.header.rows)
all_rows.extend(self._get_data_rows())
all_rows.append('')
with open(file_path, 'w') as fid:
fid.write(self.linebreak.join(all_rows))
def _get_data_rows(self):
data_rows = []
for r in range(self.nr_data_lines):
line_list = []
for par, obj in self.parameters.items():
value = obj.get_value_as_string_for_index(r)
line_list.append(value)
line_string = ''.join(line_list)
data_rows.append(line_string)
return data_rows
# def _save_instrument_files_info(self):
# self.year = self._ctd_files.year
# self.ctry = self._ctd_files.ctry
# self.ship = self._ctd_files.ship
# self.serie = self._ctd_files.serial_number
def _load_info(self):
header = True
has_set_value_length = False
self.nr_data_lines = 0
with open(self.file_path) as fid:
for r, line in enumerate(fid):
strip_line = line.strip()
if '* System UTC' in line:
self._time = datetime.datetime.strptime(line.split('=')[1].strip(), self.date_format_in_file)
if '* NMEA Latitude' in line:
self._lat = line.split('=')[1].strip()[:-1].replace(' ', '')
if '* NMEA Longitude' in line:
self._lon = line.split('=')[1].strip()[:-1].replace(' ', '')
if line.startswith('** Station'):
self._station = line.split(':')[-1].strip()
if '*END*' in line:
self.header.add_row(line)
header = False
continue
if strip_line.startswith('# name'):
name, par = [item.strip() for item in strip_line.split('=', 1)]
index = name.split(' ')[-1]
obj = CNVparameter(use_cnv_info_format=self.use_cnv_info_format,
cnv_info_object=self.cnv_info_object[int(index)],
index=index, name=par)
self.parameters[obj.index] = obj
if header:
self.header.add_row(line)
else:
if not line.strip():
continue
self.nr_data_lines += 1
split_line = strip_line.split()
if not has_set_value_length:
tot_len = len(line.rstrip())
value_length = tot_len / len(split_line)
int_value_lenght = int(value_length)
if int_value_lenght != value_length:
raise ValueError('Something is wrong in the file!')
for i, value in enumerate(split_line):
self.parameters[i].set_value_length(int_value_lenght)
has_set_value_length = True
for i, value in enumerate(split_line):
self.parameters[i].add_data(value)
def _save_columns(self):
self.col_pres = None
self.col_dens = None
self.col_dens2 = None
self.col_depth = None
self.col_sv = None
for par in self.parameters.values():
if 'prDM: Pressure, Digiquartz [db]' in par.name:
self.col_pres = par.index
elif 'sigma-t00: Density [sigma-t' in par.name:
self.col_dens = par.index
elif 'sigma-t11: Density, 2 [sigma-t' in par.name:
self.col_dens2 = par.index
elif 'depFM: Depth [fresh water, m]' in par.name:
self.col_depth = par.index
elif 'depFM: Depth [true depth, m]' in par.name:
self.col_depth = par.index
elif 'svCM: Sound Velocity [Chen-Millero, m/s]' in par.name:
self.col_sv = par.index
def _set_active_parameters(self):
for i, info in self.cnv_info_object.items():
self.parameters[i].set_active(info.active)
def _change_parameter_name(self, current_name, new_name):
for par in self.parameters.values():
if par.name == new_name:
return
for par in self.parameters.values():
if current_name == par.name:
par.change_name(new_name)
def _get_parameter_name_matching_string(self, match_string):
for par in self.parameters.values():
if match_string in par.name:
return par.name
def _check_index(self):
if not self.cnv_info_object:
raise exceptions.MissingAttribute('cnv_info_object')
for info, cnv in zip(self.cnv_info_object.values(), self.parameters.values()):
if 'depFM: Depth [true depth, m], lat' in info.name:
continue
if info.name not in cnv.name:
print(info.name)
print(cnv.name)
raise exceptions.InvalidParameterIndex(f'Index stämmer inte i cnv för parameter: {info.name}')
cnv.active = True
# Här borde man kunna definiera sensor_index, dvs första kolumnen i self.cnv_column_info
# den kommer automatiskt efter så som DatCnv.psa är inställd
# Börjar med att kolla så det iaf är korrekt
def _get_pressure_data(self):
return self.parameters[self.col_pres].data
def _get_depth_data(self):
return self.parameters[self.col_depth].data
def _get_sound_velocity_data(self):
return self.parameters[self.col_sv].data
def _get_density_data(self):
if self.parameters[self.col_dens].active:
return self.parameters[self.col_dens].data
elif self.parameters[self.col_dens2].active:
return self.parameters[self.col_dens2].data
else:
return [self.missing_value]*self.nr_data_lines
def _get_calculated_true_depth(self):
prdM_data = self._get_pressure_data()
sigT_data = self._get_density_data()
# Beräkning av truedepth # Ersätt depFM med true depth i headern
# Start params
dens_0 = (sigT_data[0] + 1000.) / 1000. # ' start densitet
p_0 = 0
depth = 0
true_depth = []
for q in range(len(prdM_data)):
if sigT_data[q] != self.missing_value:
# decibar till bar (dblRPres)
rpres = prdM_data[q] * 10.
# Beräknar densitet (dblDens)
dens = (sigT_data[q] + 1000.) / 1000.
# Beräknar delta djup (dblDDjup)
ddepth = (rpres - p_0) / ((dens + dens_0) / 2. * self.g)
# Summerar alla djup och använd framräknande trycket i nästa loop
# Om det är första (ej helt relevant kanske) eller sista värdet dela med två enl. trappetsmetoden
dens_0 = dens
# if q == 0 or q == (len(prdM)-1):
# Depth = Depth + DDepth / 2.
# else:
# Depth = Depth + DDepth
# Ändrad av Örjan 2015-02-10 /2. första och sista djupet borttaget.
depth = depth + ddepth
# Spara framräknat djup för nästa loop
p_0 = rpres
# Sparar undan TrueDepth
true_depth.append(depth)
else:
true_depth.append(self.missing_value)
return true_depth
def _get_mean_sound_velocity(self):
svCM_data = self._get_sound_velocity_data()
return sum(svCM_data) / len(svCM_data)
def _modify_header_information(self):
svMean = self._get_mean_sound_velocity()
now = time.strftime("%a, %d %b %Y %H:%M:%S +0000", time.gmtime())
after_str = '** Ship'
rows_to_insert = [f'** Average sound velocity: {str("%6.2f" % svMean)} m/s',
f'** True-depth calculation {now}',
# f'** CTD Python Module SMHI /ver 3-12/ feb 2012',
# f'** Python Module: ctd_processing, nov 2020'
# f'** LIMS Job: {self.year}{self.ctry}{self.ship}-{self.serie}'
]
for row in rows_to_insert:
if 'True-depth calculation' in row:
self.header.insert_row_after(row, after_str, ignore_if_string='True-depth calculation')
else:
self.header.insert_row_after(row, after_str)
after_str = row
def _modify_irradiance(self):
self.header.append_to_row('par: PAR/Irradiance', ' [µE/(cm^2*s)]')
def _modify_fluorescence(self):
# Lägger till Chl-a på de fluorometrar som har beteckning som börjar på FLNTURT
par_name_1 = self._get_parameter_name_matching_string('Fluorescence, WET Labs ECO-AFL/FL [mg/m^3]')
fluo_index_1 = self.header.get_row_index_for_matching_string('Fluorescence, WET Labs ECO-AFL/FL [mg/m^3]')
fluo_xml_index_1 = self.header.get_row_index_for_matching_string('Fluorometer, WET Labs ECO-AFL/FL -->')
serial_index_1 = self.header.get_row_index_for_matching_string('<SerialNumber>FLNTURT', as_list=True)
par_name_2 = self._get_parameter_name_matching_string('Fluorescence, WET Labs ECO-AFL/FL, 2 [mg/m^3]')
fluo_index_2 = self.header.get_row_index_for_matching_string('Fluorescence, WET Labs ECO-AFL/FL, 2 [mg/m^3]')
fluo_xml_index_2 = self.header.get_row_index_for_matching_string('Fluorometer, WET Labs ECO-AFL/FL, 2 -->')
serial_index_2 = self.header.get_row_index_for_matching_string('<SerialNumber>FLPCRTD', as_list=True)
if fluo_xml_index_1 and (fluo_xml_index_1 + 2) in serial_index_1:
self.header.replace_string_at_index(fluo_xml_index_1, 'Fluorometer', 'Chl-a Fluorometer')
self.header.replace_string_at_index(fluo_index_1, 'Fluorescence', 'Chl-a Fluorescence')
new_par_name_1 = par_name_1.replace('Fluorescence', 'Chl-a Fluorescence')
self._change_parameter_name(par_name_1, new_par_name_1)
if fluo_xml_index_2 and (fluo_xml_index_2 + 2) in serial_index_2:
self.header.replace_string_at_index(fluo_xml_index_2, 'Fluorometer', 'Phycocyanin Fluorometer')
self.header.replace_string_at_index(fluo_index_2, 'Fluorescence', 'Phycocyanin Fluorescence')
new_par_name_2 = par_name_2.replace('Fluorescence', 'Phycocyanin Fluorescence')
self._change_parameter_name(par_name_2, new_par_name_2)
def _modify_depth(self):
index = self.header.get_row_index_for_matching_string('depFM: Depth [fresh water, m]')
self.header.replace_string_at_index(index, 'fresh water', 'true depth')
par_name = self._get_parameter_name_matching_string('depFM: Depth [fresh water, m]')
if par_name:
new_par_name = par_name.replace('fresh water', 'true depth')
self._change_parameter_name(par_name, new_par_name)
span_depth_index = self.header.get_row_index_for_matching_string(f'# span {self.col_depth}')
true_depth_values = self._get_calculated_true_depth()
if int(self.col_depth) < 10:
new_line = '# span %s =%11.3f,%11.3f%7s' % (self.col_depth, min(true_depth_values), max(true_depth_values), '')
else:
new_line = '# span %s =%11.3f,%11.3f%6s' % (self.col_depth, min(true_depth_values), max(true_depth_values), '')
self.header.replace_row(span_depth_index, new_line)
# Ersätt data i fresh water kolumnen med true depth avrundar true depth till tre decimaler
new_depth_data = []
for value in true_depth_values:
if value == self.missing_value:
new_depth_data.append(self.missing_value_str)
else:
new_depth_data.append(round(value, 3))
self.parameters[self.col_depth].data = new_depth_data
def _modify_span(self):
# Justera span för de parametrar som har flaggats som bad
for index, info in self.cnv_info_object.items():
if info.active:
continue
span_index = self.header.get_row_index_for_matching_string(f'# span {info.index}')
if int(info.index) < 10:
new_line = f'# span {span_index} = {self.missing_value_str}, {self.missing_value_str}{" ": >7}'
else:
new_line = f'# span {span_index} = {self.missing_value_str}, {self.missing_value_str}{" ": >6}'
self.header.replace_row(span_index, new_line) |
import unittest
from pycoin.cmds import ku
from pycoin.coins.bitcoin.networks import BitcoinMainnet
from .ToolTest import ToolTest
# BRAIN DAMAGE
Key = BitcoinMainnet.ui._key_class
class KuTest(ToolTest):
@classmethod
def setUpClass(cls):
cls.parser = ku.create_parser()
cls.tool_name = "ku"
def test_ku_create(self):
output = self.launch_tool("ku create -w").split("\n")
bip32 = BitcoinMainnet.ui.parse(output[0])
bip32_as_text = bip32.hwif(as_private=True)
self.assertEqual(output[0], bip32_as_text)
def main():
unittest.main()
if __name__ == "__main__":
main()
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import os
import sys
import unittest
# Add the repo root to the beginning of the Python module path.
# Even if the user has installed family_tree locally, the version
# next to the test file will be used.
sys.path = [os.path.join(os.path.dirname(__file__), '..')] + sys.path
from source import family_tree as ft
class FamilyTreeTest(unittest.TestCase):
def setUp(self):
self.family = ft.Family()
def testMakeEmptyFamily(self):
self.assertEqual(0, self.family.Size())
def testAddOnePerson(self):
self.family.Person('John Smith')
self.assertEqual(1, self.family.Size())
def testAddOnePersonWithGender(self):
p = self.family.Person('John Smith', gender='M')
self.assertEqual('M', p.Gender())
def testAddOnePersonWithAttributes(self):
p = self.family.Person('John Smith', birth='1942', death='?')
self.assertIsNone(p.Gender()) # Gender is unknown yet.
self.assertEqual('1942', p.Birth())
self.assertIsNone(p.Death())
self.assertTrue(p.Deceased()) # We know he's deceased as the death is '?'.
self.assertEqual(1, self.family.Size())
def testAddPersonWithWife(self):
p = self.family.Person('John Smith', wife='Ada Smith')
self.assertEqual('M', p.Gender()) # Inferred.
q = self.family.Person('Ada Smith')
self.assertEqual('F', q.Gender()) # Inferred.
wives = p.Wives()
self.assertEqual(1, len(wives))
self.assertEqual(q, wives[0])
self.assertEqual(0, len(p.Husbands()))
husbands = q.Husbands()
self.assertEqual(1, len(husbands))
self.assertEqual(p, husbands[0])
self.assertEqual(0, len(q.Wives()))
self.assertEqual(2, self.family.Size())
def testAddPersonWithTwoWives(self):
p = self.family.Person('John Smith', wife=('Ada Smith', 'Katty Lam'))
self.assertEqual('M', p.Gender()) # Inferred.
q = self.family.Person('Ada Smith')
self.assertEqual('F', q.Gender()) # Inferred.
r = self.family.Person('Katty Lam')
self.assertEqual('F', q.Gender()) # Inferred.
wives = p.Wives()
self.assertEqual(2, len(wives))
self.assertEqual(q, wives[0])
self.assertEqual(r, wives[1])
self.assertEqual(0, len(p.Husbands()))
husbands = q.Husbands()
self.assertEqual(1, len(husbands))
self.assertEqual(p, husbands[0])
self.assertEqual(0, len(q.Wives()))
husbands = r.Husbands()
self.assertEqual(1, len(husbands))
self.assertEqual(p, husbands[0])
self.assertEqual(0, len(r.Wives()))
self.assertEqual(3, self.family.Size())
def testAddPersonWithHusband(self):
q = self.family.Person('Ada Smith', husband='John Smith')
self.assertEqual('F', q.Gender()) # Inferred.
p = self.family.Person('John Smith')
self.assertEqual('M', p.Gender()) # Inferred.
wives = p.Wives()
self.assertEqual(1, len(wives))
self.assertEqual(q, wives[0])
self.assertEqual(0, len(p.Husbands()))
husbands = q.Husbands()
self.assertEqual(1, len(husbands))
self.assertEqual(p, husbands[0])
self.assertEqual(0, len(q.Wives()))
self.assertEqual(2, self.family.Size())
def testAddPersonWithTwoHusbands(self):
p = self.family.Person('Ada Smith', husband=('John Smith', 'Mike Jin'))
self.assertEqual('F', p.Gender()) # Inferred.
q = self.family.Person('John Smith')
self.assertEqual('M', q.Gender()) # Inferred.
r = self.family.Person('Mike Jin')
self.assertEqual('M', q.Gender()) # Inferred.
husbands = p.Husbands()
self.assertEqual(2, len(husbands))
self.assertEqual(q, husbands[0])
self.assertEqual(r, husbands[1])
self.assertEqual(0, len(p.Wives()))
wives = q.Wives()
self.assertEqual(1, len(wives))
self.assertEqual(p, wives[0])
self.assertEqual(0, len(q.Husbands()))
wives = r.Wives()
self.assertEqual(1, len(wives))
self.assertEqual(p, wives[0])
self.assertEqual(0, len(r.Husbands()))
self.assertEqual(3, self.family.Size())
def testAddPersonWithFather(self):
p = self.family.Person('John Smith', father='Adam Smith')
q = self.family.Person('Adam Smith')
self.assertEqual(q, p.Father())
self.assertIsNone(p.Mother())
self.assertEqual(0, len(p.Children()))
self.assertEqual('M', q.Gender())
self.assertIsNone(q.Father())
self.assertIsNone(q.Mother())
children = q.Children()
self.assertEqual(1, len(children))
self.assertEqual(p, children[0])
self.assertEqual(2, self.family.Size())
def testAddPersonWithMother(self):
p = self.family.Person('John Smith', mother='Alice Smith')
q = self.family.Person('Alice Smith')
self.assertEqual(q, p.Mother())
self.assertIsNone(p.Father())
self.assertEqual(0, len(p.Children()))
self.assertEqual('F', q.Gender())
self.assertIsNone(q.Father())
self.assertIsNone(q.Mother())
children = q.Children()
self.assertEqual(1, len(children))
self.assertEqual(p, children[0])
self.assertEqual(2, self.family.Size())
def testSortEmptyFamily(self):
self.assertEqual([[]], self.family.Sort())
def testSortFamilyOfOne(self):
p = self.family.Person('Adam Smith')
self.assertEqual([[p]], self.family.Sort())
self.assertEqual(0, p.Generation())
def testSortCouple(self):
p = self.family.Person('Adam Smith', wife='Jan Smith')
q = self.family.Person('Jan Smith')
self.assertEqual([[p, q]], self.family.Sort())
self.assertEqual(0, p.Generation())
self.assertEqual(0, q.Generation())
def testSortCoupleReverse(self):
p = self.family.Person('Jan Smith', husband='Mike Smith')
q = self.family.Person('Mike Smith')
self.assertEqual([[q, p]], self.family.Sort())
self.assertEqual(0, p.Generation())
self.assertEqual(0, q.Generation())
def testSortFatherSon(self):
p = self.family.Person('Adam Smith', father='Jim Smith')
q = self.family.Person('Jim Smith')
self.assertEqual([[q], [p]], self.family.Sort())
self.assertEqual(0, q.Generation())
self.assertEqual(1, p.Generation())
def testSortMultipleWives(self):
w2 = self.family.Person('Mary Jones')
w1 = self.family.Person('Jan Smith', husband='Mike Smith')
h = self.family.Person('Mike Smith', wife='Mary Jones')
self.assertEqual([[h, w1, w2]], self.family.Sort())
self.assertEqual(0, h.Generation())
self.assertEqual(0, w1.Generation())
self.assertEqual(0, w2.Generation())
def testDisplayName(self):
h = self.family.Person('王 大明', wife='李幺妹')
w = self.family.Person('李 幺妹', husband='王大明')
self.assertEqual('王大明', h.ID())
self.assertEqual('王 大明', h.Name())
self.assertEqual('李幺妹', w.ID())
self.assertEqual('李 幺妹', w.Name())
self.assertEqual(2, self.family.Size())
if __name__ == '__main__':
unittest.main() |
"""Macro support for P65.
P65 Macros are cached SequenceNodes with arguments
set via .alias commands and prevented from escaping
with .scope and .scend commands."""
import sys
import Ophis.IR as IR
import Ophis.CmdLine as Cmd
import Ophis.Errors as Err
macros = {}
currentname = None
currentbody = None
def newMacro(name):
"Start creating a new macro with the specified name."
global currentname
global currentbody
global macros
if currentname is not None:
Err.log("Internal error! Nested macro attempt!")
else:
if name in macros:
Err.log("Duplicate macro definition '%s'" % name)
currentname = name
currentbody = []
def registerNode(node):
global currentbody
currentbody.append(IR.Node(node.ppt, node.nodetype, *node.data))
def endMacro():
global currentname
global currentbody
global macros
if currentname is None:
Err.log("Internal error! Ended a non-existent macro!")
else:
macros[currentname] = currentbody
currentname = None
currentbody = None
def expandMacro(ppt, name, arglist):
global macros
if name not in macros:
Err.log("Undefined macro '%s'" % name)
return IR.NullNode
argexprs = [IR.Node(ppt, "Label", "_*%d" % i, arg) for (i, arg) in zip(xrange(1, sys.maxint), arglist)]
bindexprs = [IR.Node(ppt, "Label", "_%d" % i, IR.LabelExpr("_*%d" % i)) for i in range(1, len(arglist)+1)]
body = [IR.Node("%s->%s" % (ppt, node.ppt), node.nodetype, *node.data) for node in macros[name]]
invocation = [IR.Node(ppt, "ScopeBegin")] + argexprs + [IR.Node(ppt, "ScopeBegin")] + bindexprs + body + [IR.Node(ppt, "ScopeEnd"), IR.Node(ppt, "ScopeEnd")]
return IR.SequenceNode(ppt, invocation)
def dump():
global macros
for mac in macros:
body = macros[mac]
print "Macro: "+mac
for node in body: print node
print ""
|
#!/usr/bin/env python
#
# Copyright (c) 2018 [email protected] and others.
#
# All rights reserved. This program and the accompanying materials
# are made available under the terms of the Apache License, Version 2.0
# which accompanies this distribution, and is available at
# http://www.apache.org/licenses/LICENSE-2.0
##
import os
import unittest
import yaml
from mock import patch, call, Mock
import dovetail.testcase as tcase
__author__ = 'Stamatis Katsaounis <[email protected]>'
class TestcaseTesting(unittest.TestCase):
def setUp(self):
test_path = os.path.dirname(os.path.realpath(__file__))
with open(os.path.join(test_path, 'test_testcase.yaml')) as f:
self.testcase_yaml = yaml.safe_load(f)
def tearDown(self):
pass
def teardown_method(self, method):
tcase.Testcase.logger = None
tcase.Testcase.validate_testcase_list = {}
tcase.Testcase.testcase_list = {}
tcase.Testsuite.testsuite_list = {}
@patch('dovetail.testcase.dt_logger')
def test_create_log(self, mock_logger):
getlogger_obj = Mock()
logger_obj = Mock()
logger_obj.getLogger.return_value = getlogger_obj
mock_logger.Logger.return_value = logger_obj
tcase.Testcase.create_log()
self.assertEqual(getlogger_obj, tcase.Testcase.logger)
@patch('dovetail.testcase.Parser')
def test_parse_cmd_no_lines(self, mock_parser):
testcase = tcase.YardstickTestcase(self.testcase_yaml)
cmds_list = ['cmd']
mock_parser.parse_cmd.return_value = None
result = testcase.parse_cmd(cmds_list)
mock_parser.parse_cmd.assert_called_once_with(
'cmd', testcase)
self.assertEqual(False, result)
@patch('dovetail.testcase.Parser')
def test_parse_cmd(self, mock_parser):
testcase = tcase.BottlenecksTestcase(self.testcase_yaml)
logger_obj = Mock()
testcase.logger = logger_obj
cmds_list = ['cmd']
mock_parser.parse_cmd.return_value = 'cmd_lines'
result = testcase.parse_cmd(cmds_list)
mock_parser.parse_cmd.assert_called_once_with(
'cmd', testcase)
logger_obj.debug.assert_called_once_with("cmds: ['cmd_lines']")
self.assertEqual(['cmd_lines'], testcase.cmds)
self.assertEqual(True, result)
@patch('dovetail.testcase.dt_cfg')
def test_prepare_cmd_no_cmds(self, mock_config):
testcase = tcase.ShellTestcase(self.testcase_yaml)
logger_obj = Mock()
testcase.logger = logger_obj
mock_config.dovetail_config = {}
result = testcase.prepare_cmd('type')
logger_obj.error.assert_called_once_with(
'Test case {} has no cmds.'.format(testcase.name()))
self.assertEqual(False, result)
@patch('dovetail.testcase.dt_cfg')
@patch.object(tcase.Testcase, 'parse_cmd')
def test_prepare_cmd_testcase_cmd(self, mock_parse, mock_config):
testcase = tcase.ShellTestcase(self.testcase_yaml)
testcase.testcase['validate']['cmds'] = ['cmd']
mock_config.dovetail_config = {}
mock_parse.return_value = True
result = testcase.prepare_cmd('type')
mock_parse.assert_called_once_with(['cmd'])
self.assertEqual(True, result)
@patch('dovetail.testcase.dt_cfg')
@patch.object(tcase.Testcase, 'parse_cmd')
def test_prepare_cmd_config_cmd(self, mock_parse, mock_config):
testcase = tcase.TestcaseFactory.create('yardstick',
self.testcase_yaml)
mock_config.dovetail_config = {'type': {'cmds': ['cmd']}}
mock_parse.return_value = True
result = testcase.prepare_cmd('type')
mock_parse.assert_called_once_with(['cmd'])
self.assertEqual(True, result)
def test_str(self):
testcase = tcase.OnapVtpTestcase(self.testcase_yaml)
result = testcase.__str__()
self.assertEqual(testcase.testcase, result)
def test_objective(self):
testcase = tcase.OnapVvpTestcase(self.testcase_yaml)
testcase.testcase['objective'] = 'objective'
result = testcase.objective()
self.assertEqual('objective', result)
@patch('dovetail.testcase.dt_utils')
def test_sub_testcase(self, mock_utils):
testcase = tcase.Testcase(self.testcase_yaml)
mock_utils.get_value_from_dict.return_value = 'value'
result = testcase.sub_testcase()
mock_utils.get_value_from_dict.assert_called_once_with(
'report.sub_testcase_list', testcase.testcase)
self.assertEqual('value', result)
def test_sub_testcase_passed(self):
testcase = tcase.Testcase(self.testcase_yaml)
logger_obj = Mock()
testcase.logger = logger_obj
result = testcase.sub_testcase_passed('name', 'passed')
self.assertEqual('passed', result)
def test_validate_type(self):
testcase = tcase.Testcase(self.testcase_yaml)
result = testcase.validate_type()
self.assertEqual('functest', result)
def test_validate_testcase(self):
testcase = tcase.Testcase(self.testcase_yaml)
result = testcase.validate_testcase()
self.assertEqual('tempest_smoke_serial', result)
def test_portal_key_file(self):
testcase = tcase.Testcase(self.testcase_yaml)
result = testcase.portal_key_file()
self.assertEqual('tempest_logs/tempest_smoke_serial.html', result)
def test_vnf_type(self):
testcase = tcase.OnapVtpTestcase(self.testcase_yaml)
result = testcase.vnf_type()
self.assertEqual('tosca', result)
def test_passed(self):
testcase = tcase.Testcase(self.testcase_yaml)
result = testcase.passed('passed')
self.assertEqual('passed', result)
def test_set_get_results(self):
testcase = tcase.Testcase(self.testcase_yaml)
testcase.set_results('results')
self.assertEqual('results', testcase.get_results())
def test_pre_condition_exists(self):
testcase = tcase.Testcase(self.testcase_yaml)
testcase.testcase['validate']['pre_condition'] = 'pre_condition'
result = testcase.pre_condition()
self.assertEqual('pre_condition', result)
@patch.object(tcase.Testcase, 'pre_condition_cls')
def test_pre_condition_not_exists(self, mock_pre_condition):
testcase = tcase.Testcase(self.testcase_yaml)
logger_obj = Mock()
testcase.logger = logger_obj
mock_pre_condition.return_value = False
result = testcase.pre_condition()
mock_pre_condition.assert_called_once_with('functest')
logger_obj.debug.assert_called_once_with(
'Test case: {} pre_condition is empty.'.format(testcase.name()))
self.assertEqual(False, result)
def test_pre_copy_path(self):
testcase = tcase.Testcase(self.testcase_yaml)
testcase.testcase['validate']['pre_copy'] = {'key': 'value'}
result = testcase.pre_copy_path('key')
self.assertEqual('value', result)
def test_pre_copy_path_error(self):
testcase = tcase.Testcase(self.testcase_yaml)
result = testcase.pre_copy_path('key')
self.assertEqual(None, result)
def test_post_condition_exists(self):
testcase = tcase.Testcase(self.testcase_yaml)
testcase.testcase['validate']['post_condition'] = 'post_condition'
result = testcase.post_condition()
self.assertEqual('post_condition', result)
@patch.object(tcase.Testcase, 'post_condition_cls')
def test_post_condition_not_exists(self, mock_post_condition):
testcase = tcase.Testcase(self.testcase_yaml)
logger_obj = Mock()
testcase.logger = logger_obj
mock_post_condition.return_value = False
result = testcase.post_condition()
mock_post_condition.assert_called_once_with('functest')
logger_obj.debug.assert_called_once_with(
'Test case: {} post_condition is empty.'.format(testcase.name()))
self.assertEqual(False, result)
@patch('builtins.open')
@patch('dovetail.testcase.os.path')
@patch('dovetail.testcase.dt_cfg')
@patch.object(tcase.Testcase, 'sub_testcase')
def test_mk_src_file(self, mock_sub_testcase, mock_config,
mock_path, mock_open):
testcase = tcase.Testcase(self.testcase_yaml)
logger_obj = Mock()
testcase.logger = logger_obj
mock_config.dovetail_config = {'result_dir': 'value'}
sub_test = 'sub_test'
file_path = 'file_path'
mock_path.join.return_value = file_path
mock_sub_testcase.return_value = [sub_test]
file_obj = Mock()
mock_open.return_value.__enter__.return_value = file_obj
result = testcase.mk_src_file()
mock_path.join.assert_called_once_with('value', 'tempest_custom.txt')
mock_open.assert_called_once_with(file_path, 'w+')
file_obj.write.assert_called_once_with(sub_test + '\n')
logger_obj.debug.assert_called_once_with(
'Save test cases to {}'.format(file_path))
self.assertEqual(file_path, result)
@patch('builtins.open')
@patch('dovetail.testcase.os.path')
@patch('dovetail.testcase.dt_cfg')
@patch.object(tcase.Testcase, 'sub_testcase')
def test_mk_src_file_exception(self, mock_sub_testcase,
mock_config, mock_path, mock_open):
testcase = tcase.Testcase(self.testcase_yaml)
logger_obj = Mock()
testcase.logger = logger_obj
mock_config.dovetail_config = {'result_dir': 'value'}
sub_test = 'sub_test'
file_path = 'file_path'
mock_path.join.return_value = file_path
mock_sub_testcase.return_value = [sub_test]
mock_open.return_value.__enter__.side_effect = Exception()
result = testcase.mk_src_file()
mock_path.join.assert_called_once_with('value', 'tempest_custom.txt')
mock_open.assert_called_once_with(file_path, 'w+')
logger_obj.exception('Failed to save: {}'.format(file_path))
self.assertEqual(None, result)
@patch('dovetail.testcase.TestRunnerFactory')
def test_run(self, mock_factory):
testcase = tcase.Testcase(self.testcase_yaml)
logger_obj = Mock()
testcase.logger = logger_obj
runner_obj = Mock()
mock_factory.create.return_value = runner_obj
error_msg = 'An error happened!'
runner_obj.archive_logs.side_effect = AttributeError(error_msg)
testcase.run()
runner_obj.run.assert_called_once_with()
runner_obj.archive_logs.assert_called_once_with()
logger_obj.exception.assert_called_once_with(
'Test case: {} Exception: {}'.format(testcase.name, error_msg))
@patch('dovetail.testcase.dt_cfg')
def test_pre_condition_cls(self, mock_config):
mock_config.dovetail_config = {'type': {'pre_condition': 'value'}}
result = tcase.Testcase.pre_condition_cls('type')
self.assertEqual('value', result)
@patch('dovetail.testcase.dt_cfg')
def test_pre_condition_cls_key_error(self, mock_config):
mock_config.dovetail_config = {}
result = tcase.Testcase.pre_condition_cls('type')
self.assertEqual(None, result)
@patch('dovetail.testcase.dt_cfg')
def test_post_condition_cls(self, mock_config):
mock_config.dovetail_config = {'type': {'post_condition': 'value'}}
result = tcase.Testcase.post_condition_cls('type')
self.assertEqual('value', result)
@patch('dovetail.testcase.dt_cfg')
def test_post_condition_cls_key_error(self, mock_config):
mock_config.dovetail_config = {}
result = tcase.Testcase.post_condition_cls('type')
self.assertEqual(None, result)
def test_increase_retry(self):
testcase = tcase.Testcase(self.testcase_yaml)
tcase.Testcase.validate_testcase_list[
'tempest_smoke_serial'] = {'retry': 0}
for _ in range(0, 42):
result = testcase.increase_retry()
self.assertEqual(42, result)
@patch('builtins.open')
@patch('dovetail.testcase.yaml')
@patch('dovetail.testcase.os')
@patch('dovetail.testcase.TestcaseFactory')
@patch('dovetail.testcase.constants')
def test_load(self, mock_constants, mock_factory, mock_os, mock_yaml,
mock_open):
testcase = tcase.Testcase(self.testcase_yaml)
mock_constants.TESTCASE_PATH = 'abs_path'
mock_os.walk.return_value = [('root', ['dir'], ['file'])]
mock_os.path.join.return_value = 'testcase_path'
file_obj = Mock()
mock_open.return_value.__enter__.return_value = file_obj
yaml_dict = {'key': {'validate': {'type': 'value'}}}
mock_yaml.safe_load.return_value = yaml_dict
runner_obj = Mock()
mock_factory.create.return_value = runner_obj
testcase.load()
mock_os.walk.assert_called_once_with('abs_path')
mock_os.path.join.assert_called_with('root', 'file')
mock_open.assert_called_once_with('testcase_path')
mock_yaml.safe_load.assert_called_once_with(file_obj)
mock_factory.create.assert_called_once_with('value', yaml_dict)
self.assertEqual(runner_obj, tcase.Testcase.get('key'))
@patch('builtins.open')
@patch('dovetail.testcase.yaml')
@patch('dovetail.testcase.os')
@patch('dovetail.testcase.TestcaseFactory')
@patch('dovetail.testcase.constants')
def test_load_no_testcase(self, mock_constants, mock_factory, mock_os,
mock_yaml, mock_open):
logger_obj = Mock()
tcase.Testcase.logger = logger_obj
mock_constants.TESTCASE_PATH = 'abs_path'
mock_os.walk.return_value = [('root', ['dir'], ['file'])]
mock_os.path.join.return_value = 'testcase_path'
file_obj = Mock()
mock_open.return_value.__enter__.return_value = file_obj
yaml_dict = {'key': {'validate': {'type': 'value'}}}
mock_yaml.safe_load.return_value = yaml_dict
mock_factory.create.return_value = None
tcase.Testcase.load()
mock_os.walk.assert_called_once_with('abs_path')
mock_os.path.join.assert_called_with('root', 'file')
mock_open.assert_called_once_with('testcase_path')
mock_yaml.safe_load.assert_called_once_with(file_obj)
mock_factory.create.assert_called_once_with('value', yaml_dict)
logger_obj.error.assert_called_once_with(
'Failed to create test case: file')
def test_get_none(self):
self.assertEqual(None, tcase.Testcase.get('unknown'))
def test_check_testarea_none(self):
self.assertEqual((True, ['full']),
tcase.Testcase.check_testarea(None))
@patch('dovetail.testcase.dt_cfg')
def test_check_testarea_full(self, mock_config):
self.assertEqual((True, ['full']),
tcase.Testcase.check_testarea(['full']))
@patch('dovetail.testcase.dt_cfg')
def test_check_testarea(self, mock_config):
self.assertEqual((True, ['area']),
tcase.Testcase.check_testarea(['area']))
def test_check_testcase_area(self):
self.assertEqual(False,
tcase.Testcase.check_testcase_area(None, None))
def test_check_testcase_area_full_or_in_testcase(self):
self.assertEqual(True,
tcase.Testcase.check_testcase_area(['full'], 'full'))
def test_check_testcase_area_not_in_testcase_or_full(self):
self.assertEqual(False,
tcase.Testcase.check_testcase_area(['full'], 'half'))
@patch('dovetail.testcase.dt_utils')
def test_get_testcases_for_testsuite_no_testcases(self, mock_utils):
mock_utils.get_value_from_dict.return_value = None
result = tcase.Testcase.get_testcases_for_testsuite('suite', 'area')
mock_utils.get_value_from_dict.assert_has_calls([
call('testcases_list', 'suite'),
call('mandatory', None),
call('optional', None)])
self.assertEqual([], result)
@patch('dovetail.testcase.dt_cfg')
@patch('dovetail.testcase.dt_utils')
def test_get_testcases_for_testsuite_no_selected_testcases(self,
mock_utils,
mock_config):
logger_obj = Mock()
tcase.Testcase.logger = logger_obj
testcases_obj = Mock()
mock_utils.get_value_from_dict.side_effect = [
testcases_obj, None, None]
mock_config.dovetail_config = {
'mandatory': True,
'optional': True
}
testsuite = {'name': 'test_name'}
result = tcase.Testcase.get_testcases_for_testsuite(testsuite, 'area')
mock_utils.get_value_from_dict.assert_has_calls([
call('testcases_list', testsuite),
call('mandatory', testcases_obj),
call('optional', testcases_obj)])
logger_obj.error.assert_has_calls([
call('There is no mandatory test case in test suite {}'
.format(testsuite['name'])),
call('There is no optional test case in test suite {}'
.format(testsuite['name']))])
self.assertEqual(None, result)
@patch('dovetail.testcase.dt_cfg')
@patch('dovetail.testcase.dt_utils')
@patch.object(tcase.Testcase, 'check_testcase_area')
def test_get_testcases_for_testsuite(self, mock_check, mock_utils,
mock_config):
logger_obj = Mock()
tcase.Testcase.logger = logger_obj
testcases_obj = Mock()
mock_utils.get_value_from_dict.side_effect = [
testcases_obj, ['mandatory'], ['optional']]
mock_config.dovetail_config = {
'mandatory': True,
'optional': True
}
mock_check.return_value = True
testsuite = {'name': 'test_name'}
testarea = ['area']
mandatory_obj = Mock()
tcase.Testcase.testcase_list['mandatory'] = mandatory_obj
optional_obj = Mock()
tcase.Testcase.testcase_list['optional'] = optional_obj
result = tcase.Testcase.get_testcases_for_testsuite(
testsuite, testarea)
mock_utils.get_value_from_dict.assert_has_calls([
call('testcases_list', testsuite),
call('mandatory', testcases_obj),
call('optional', testcases_obj)])
mock_check.assert_has_calls([
call('mandatory', 'area'),
call('optional', 'area')])
self.assertEqual(['mandatory', 'optional'], result)
self.assertEqual(
True, tcase.Testcase.testcase_list['mandatory'].is_mandatory)
self.assertEqual(
False, tcase.Testcase.testcase_list['optional'].is_mandatory)
@patch('dovetail.testcase.dt_cfg')
@patch('dovetail.testcase.dt_utils')
@patch.object(tcase.Testcase, 'check_testcase_area')
def test_get_testcases_for_testsuite_no_conf(self, mock_check, mock_utils,
mock_config):
logger_obj = Mock()
tcase.Testcase.logger = logger_obj
testcases_obj = Mock()
mock_utils.get_value_from_dict.side_effect = [
testcases_obj, ['mandatory'], ['optional']]
mock_config.dovetail_config = {
'mandatory': False,
'optional': False
}
mock_check.return_value = True
testsuite = {'name': 'test_name'}
testarea = ['area']
mandatory_obj = Mock()
tcase.Testcase.testcase_list['mandatory'] = mandatory_obj
optional_obj = Mock()
tcase.Testcase.testcase_list['optional'] = optional_obj
result = tcase.Testcase.get_testcases_for_testsuite(testsuite,
testarea)
mock_utils.get_value_from_dict.assert_has_calls([
call('testcases_list', testsuite),
call('mandatory', testcases_obj),
call('optional', testcases_obj)])
mock_check.assert_has_calls([
call('mandatory', 'area'),
call('optional', 'area')])
self.assertEqual(['mandatory', 'optional'], result)
self.assertEqual(True,
tcase.Testcase.testcase_list['mandatory']
.is_mandatory)
self.assertEqual(False,
tcase.Testcase.testcase_list['optional'].is_mandatory)
@patch.object(tcase.Testcase, 'prepare_cmd')
def test_functest_case_prepare_cmd_false(self, mock_prepare):
testcase = tcase.FunctestTestcase(self.testcase_yaml)
mock_prepare.return_value = False
result = testcase.prepare_cmd('type')
mock_prepare.assert_called_once_with('type')
self.assertEqual(False, result)
@patch('dovetail.testcase.os.path')
@patch('dovetail.testcase.dt_cfg')
@patch.object(tcase.Testcase, 'prepare_cmd')
def test_functest_case_prepare_cmd(self, mock_prepare, mock_config,
mock_path):
testcase = tcase.FunctestTestcase(self.testcase_yaml)
logger_obj = Mock()
testcase.logger = logger_obj
mock_prepare.return_value = True
mock_config.dovetail_config = {
'no_api_validation': True,
'functest': {'patches_dir': 'value'}}
mock_path.join.return_value = 'patch_cmd'
result = testcase.prepare_cmd('type')
mock_path.join.assert_called_once_with(
'value', 'functest', 'disable-api-validation', 'apply.sh')
logger_obj.debug.assert_called_once_with(
'Updated list of commands for test run with '
'disabled API response validation: {}'
.format(testcase.cmds))
self.assertEqual(['patch_cmd'], testcase.cmds)
self.assertEqual(True, result)
def test_testfactory_error(self):
self.assertEqual(None,
tcase.TestcaseFactory.create('unknown',
self.testcase_yaml))
def test_testfactory_k8s(self):
k8s_testcase = tcase.TestcaseFactory.create('functest-k8s',
self.testcase_yaml)
self.assertEqual('functest-k8s', k8s_testcase.type)
@patch('dovetail.testcase.dt_logger')
def test_testsuite_create_log(self, mock_logger):
getlogger_obj = Mock()
logger_obj = Mock()
logger_obj.getLogger.return_value = getlogger_obj
mock_logger.Logger.return_value = logger_obj
tcase.Testsuite.create_log()
self.assertEqual(getlogger_obj, tcase.Testsuite.logger)
def test_testsuite_get_test(self):
suite = tcase.Testsuite('suite')
suite.testcase_list['testcase'] = 'value'
result = suite.get_test('testcase')
self.assertEqual('value', result)
def test_testsuite_get_test_not_exists(self):
suite = tcase.Testsuite('suite')
result = suite.get_test('testcase')
self.assertEqual(None, result)
@patch('builtins.open')
@patch('dovetail.testcase.yaml')
@patch('dovetail.testcase.os')
@patch('dovetail.testcase.constants')
def test_testsuite_load(self, mock_constants, mock_os, mock_yaml,
mock_open):
mock_constants.COMPLIANCE_PATH = 'abs_path'
mock_os.walk.return_value = [('root', ['dir'], ['file'])]
mock_os.path.join.return_value = 'file_path'
mock_yaml.safe_load.return_value = {'testsuite': 'value'}
file_obj = Mock()
mock_open.return_value.__enter__.return_value = file_obj
tcase.Testsuite.load()
mock_os.walk.assert_called_once_with('abs_path')
mock_os.path.join.assert_called_with('root', 'file')
mock_open.assert_called_once_with('file_path')
mock_yaml.safe_load.assert_called_once_with(file_obj)
self.assertEqual({'testsuite': 'value'},
tcase.Testsuite.testsuite_list)
def test_testsuite_get_none(self):
self.assertEqual(None, tcase.Testsuite.get('unknown'))
def test_testsuite_get(self):
tcase.Testsuite.testsuite_list.update({'key': 'value'})
self.assertEqual('value', tcase.Testsuite.get('key'))
def test_testsuite_get_all(self):
tcase.Testsuite.testsuite_list.update({'key': 'value'})
self.assertEqual({'key': 'value'}, tcase.Testsuite.get_all())
|
"""Base class for undirected graphs.
The Graph class allows any hashable object as a node
and can associate key/value attribute pairs with each undirected edge.
Self-loops are allowed but multiple edges are not (see MultiGraph).
For directed graphs see DiGraph and MultiDiGraph.
"""
# Copyright (C) 2004-2011 by
# Aric Hagberg <[email protected]>
# Dan Schult <[email protected]>
# Pieter Swart <[email protected]>
# All rights reserved.
# BSD license.
from copy import deepcopy
import networkx as nx
from networkx.exception import NetworkXError
import networkx.convert as convert
__author__ = """\n""".join(['Aric Hagberg ([email protected])',
'Pieter Swart ([email protected])',
'Dan Schult([email protected])'])
class Graph(object):
"""
Base class for undirected graphs.
A Graph stores nodes and edges with optional data, or attributes.
Graphs hold undirected edges. Self loops are allowed but multiple
(parallel) edges are not.
Nodes can be arbitrary (hashable) Python objects with optional
key/value attributes.
Edges are represented as links between nodes with optional
key/value attributes.
Parameters
----------
data : input graph
Data to initialize graph. If data=None (default) an empty
graph is created. The data can be an edge list, or any
NetworkX graph object. If the corresponding optional Python
packages are installed the data can also be a NumPy matrix
or 2d ndarray, a SciPy sparse matrix, or a PyGraphviz graph.
attr : keyword arguments, optional (default= no attributes)
Attributes to add to graph as key=value pairs.
See Also
--------
DiGraph
MultiGraph
MultiDiGraph
Examples
--------
Create an empty graph structure (a "null graph") with no nodes and
no edges.
>>> G = nx.Graph()
G can be grown in several ways.
**Nodes:**
Add one node at a time:
>>> G.add_node(1)
Add the nodes from any container (a list, dict, set or
even the lines from a file or the nodes from another graph).
>>> G.add_nodes_from([2,3])
>>> G.add_nodes_from(range(100,110))
>>> H=nx.Graph()
>>> H.add_path([0,1,2,3,4,5,6,7,8,9])
>>> G.add_nodes_from(H)
In addition to strings and integers any hashable Python object
(except None) can represent a node, e.g. a customized node object,
or even another Graph.
>>> G.add_node(H)
**Edges:**
G can also be grown by adding edges.
Add one edge,
>>> G.add_edge(1, 2)
a list of edges,
>>> G.add_edges_from([(1,2),(1,3)])
or a collection of edges,
>>> G.add_edges_from(H.edges())
If some edges connect nodes not yet in the graph, the nodes
are added automatically. There are no errors when adding
nodes or edges that already exist.
**Attributes:**
Each graph, node, and edge can hold key/value attribute pairs
in an associated attribute dictionary (the keys must be hashable).
By default these are empty, but can be added or changed using
add_edge, add_node or direct manipulation of the attribute
dictionaries named graph, node and edge respectively.
>>> G = nx.Graph(day="Friday")
>>> G.graph
{'day': 'Friday'}
Add node attributes using add_node(), add_nodes_from() or G.node
>>> G.add_node(1, time='5pm')
>>> G.add_nodes_from([3], time='2pm')
>>> G.node[1]
{'time': '5pm'}
>>> G.node[1]['room'] = 714
>>> del G.node[1]['room'] # remove attribute
>>> G.nodes(data=True)
[(1, {'time': '5pm'}), (3, {'time': '2pm'})]
Warning: adding a node to G.node does not add it to the graph.
Add edge attributes using add_edge(), add_edges_from(), subscript
notation, or G.edge.
>>> G.add_edge(1, 2, weight=4.7 )
>>> G.add_edges_from([(3,4),(4,5)], color='red')
>>> G.add_edges_from([(1,2,{'color':'blue'}), (2,3,{'weight':8})])
>>> G[1][2]['weight'] = 4.7
>>> G.edge[1][2]['weight'] = 4
**Shortcuts:**
Many common graph features allow python syntax to speed reporting.
>>> 1 in G # check if node in graph
True
>>> [n for n in G if n<3] # iterate through nodes
[1, 2]
>>> len(G) # number of nodes in graph
5
The fastest way to traverse all edges of a graph is via
adjacency_iter(), but the edges() method is often more convenient.
>>> for n,nbrsdict in G.adjacency_iter():
... for nbr,eattr in nbrsdict.items():
... if 'weight' in eattr:
... (n,nbr,eattr['weight'])
(1, 2, 4)
(2, 1, 4)
(2, 3, 8)
(3, 2, 8)
>>> [ (u,v,edata['weight']) for u,v,edata in G.edges(data=True) if 'weight' in edata ]
[(1, 2, 4), (2, 3, 8)]
**Reporting:**
Simple graph information is obtained using methods.
Iterator versions of many reporting methods exist for efficiency.
Methods exist for reporting nodes(), edges(), neighbors() and degree()
as well as the number of nodes and edges.
For details on these and other miscellaneous methods, see below.
"""
def __init__(self, data=None, **attr):
"""Initialize a graph with edges, name, graph attributes.
Parameters
----------
data : input graph
Data to initialize graph. If data=None (default) an empty
graph is created. The data can be an edge list, or any
NetworkX graph object. If the corresponding optional Python
packages are installed the data can also be a NumPy matrix
or 2d ndarray, a SciPy sparse matrix, or a PyGraphviz graph.
name : string, optional (default='')
An optional name for the graph.
attr : keyword arguments, optional (default= no attributes)
Attributes to add to graph as key=value pairs.
See Also
--------
convert
Examples
--------
>>> G = nx.Graph() # or DiGraph, MultiGraph, MultiDiGraph, etc
>>> G = nx.Graph(name='my graph')
>>> e = [(1,2),(2,3),(3,4)] # list of edges
>>> G = nx.Graph(e)
Arbitrary graph attribute pairs (key=value) may be assigned
>>> G=nx.Graph(e, day="Friday")
>>> G.graph
{'day': 'Friday'}
"""
self.graph = {} # dictionary for graph attributes
self.node = {} # empty node dict (created before convert)
self.adj = {} # empty adjacency dict
# attempt to load graph with data
if data is not None:
convert.to_networkx_graph(data,create_using=self)
# load graph attributes (must be after convert)
self.graph.update(attr)
self.edge = self.adj
@property
def name(self):
return self.graph.get('name','')
@name.setter
def name(self, s):
self.graph['name']=s
def __str__(self):
"""Return the graph name.
Returns
-------
name : string
The name of the graph.
Examples
--------
>>> G = nx.Graph(name='foo')
>>> str(G)
'foo'
"""
return self.name
def __iter__(self):
"""Iterate over the nodes. Use the expression 'for n in G'.
Returns
-------
niter : iterator
An iterator over all nodes in the graph.
Examples
--------
>>> G = nx.Graph() # or DiGraph, MultiGraph, MultiDiGraph, etc
>>> G.add_path([0,1,2,3])
"""
return iter(self.node)
def __contains__(self,n):
"""Return True if n is a node, False otherwise. Use the expression
'n in G'.
Examples
--------
>>> G = nx.Graph() # or DiGraph, MultiGraph, MultiDiGraph, etc
>>> G.add_path([0,1,2,3])
>>> 1 in G
True
"""
try:
return n in self.node
except TypeError:
return False
def __len__(self):
"""Return the number of nodes. Use the expression 'len(G)'.
Returns
-------
nnodes : int
The number of nodes in the graph.
Examples
--------
>>> G = nx.Graph() # or DiGraph, MultiGraph, MultiDiGraph, etc
>>> G.add_path([0,1,2,3])
>>> len(G)
4
"""
return len(self.node)
def __getitem__(self, n):
"""Return a dict of neighbors of node n. Use the expression 'G[n]'.
Parameters
----------
n : node
A node in the graph.
Returns
-------
adj_dict : dictionary
The adjacency dictionary for nodes connected to n.
Notes
-----
G[n] is similar to G.neighbors(n) but the internal data dictionary
is returned instead of a list.
Assigning G[n] will corrupt the internal graph data structure.
Use G[n] for reading data only.
Examples
--------
>>> G = nx.Graph() # or DiGraph, MultiGraph, MultiDiGraph, etc
>>> G.add_path([0,1,2,3])
>>> G[0]
{1: {}}
"""
return self.adj[n]
def add_node(self, n, attr_dict=None, **attr):
"""Add a single node n and update node attributes.
Parameters
----------
n : node
A node can be any hashable Python object except None.
attr_dict : dictionary, optional (default= no attributes)
Dictionary of node attributes. Key/value pairs will
update existing data associated with the node.
attr : keyword arguments, optional
Set or change attributes using key=value.
See Also
--------
add_nodes_from
Examples
--------
>>> G = nx.Graph() # or DiGraph, MultiGraph, MultiDiGraph, etc
>>> G.add_node(1)
>>> G.add_node('Hello')
>>> K3 = nx.Graph([(0,1),(1,2),(2,0)])
>>> G.add_node(K3)
>>> G.number_of_nodes()
3
Use keywords set/change node attributes:
>>> G.add_node(1,size=10)
>>> G.add_node(3,weight=0.4,UTM=('13S',382871,3972649))
Notes
-----
A hashable object is one that can be used as a key in a Python
dictionary. This includes strings, numbers, tuples of strings
and numbers, etc.
On many platforms hashable items also include mutables such as
NetworkX Graphs, though one should be careful that the hash
doesn't change on mutables.
"""
# set up attribute dict
if attr_dict is None:
attr_dict=attr
else:
try:
attr_dict.update(attr)
except AttributeError:
raise NetworkXError(\
"The attr_dict argument must be a dictionary.")
if n not in self.node:
self.adj[n] = {}
self.node[n] = attr_dict
else: # update attr even if node already exists
self.node[n].update(attr_dict)
def add_nodes_from(self, nodes, **attr):
"""Add multiple nodes.
Parameters
----------
nodes : iterable container
A container of nodes (list, dict, set, etc.).
OR
A container of (node, attribute dict) tuples.
Node attributes are updated using the attribute dict.
attr : keyword arguments, optional (default= no attributes)
Update attributes for all nodes in nodes.
Node attributes specified in nodes as a tuple
take precedence over attributes specified generally.
See Also
--------
add_node
Examples
--------
>>> G = nx.Graph() # or DiGraph, MultiGraph, MultiDiGraph, etc
>>> G.add_nodes_from('Hello')
>>> K3 = nx.Graph([(0,1),(1,2),(2,0)])
>>> G.add_nodes_from(K3)
>>> sorted(G.nodes(),key=str)
[0, 1, 2, 'H', 'e', 'l', 'o']
Use keywords to update specific node attributes for every node.
>>> G.add_nodes_from([1,2], size=10)
>>> G.add_nodes_from([3,4], weight=0.4)
Use (node, attrdict) tuples to update attributes for specific
nodes.
>>> G.add_nodes_from([(1,dict(size=11)), (2,{'color':'blue'})])
>>> G.node[1]['size']
11
>>> H = nx.Graph()
>>> H.add_nodes_from(G.nodes(data=True))
>>> H.node[1]['size']
11
"""
for n in nodes:
try:
if n not in self.node:
self.adj[n] = {}
self.node[n] = attr.copy()
else:
self.node[n].update(attr)
except TypeError:
nn,ndict = n
if nn not in self.node:
self.adj[nn] = {}
newdict = attr.copy()
newdict.update(ndict)
self.node[nn] = newdict
else:
olddict = self.node[nn]
olddict.update(attr)
olddict.update(ndict)
def remove_node(self,n):
"""Remove node n.
Removes the node n and all adjacent edges.
Attempting to remove a non-existent node will raise an exception.
Parameters
----------
n : node
A node in the graph
Raises
-------
NetworkXError
If n is not in the graph.
See Also
--------
remove_nodes_from
Examples
--------
>>> G = nx.Graph() # or DiGraph, MultiGraph, MultiDiGraph, etc
>>> G.add_path([0,1,2])
>>> G.edges()
[(0, 1), (1, 2)]
>>> G.remove_node(1)
>>> G.edges()
[]
"""
adj = self.adj
try:
nbrs = list(adj[n].keys()) # keys handles self-loops (allow mutation later)
del self.node[n]
except KeyError: # NetworkXError if n not in self
raise NetworkXError("The node %s is not in the graph."%(n,))
for u in nbrs:
del adj[u][n] # remove all edges n-u in graph
del adj[n] # now remove node
def remove_nodes_from(self, nodes):
"""Remove multiple nodes.
Parameters
----------
nodes : iterable container
A container of nodes (list, dict, set, etc.). If a node
in the container is not in the graph it is silently
ignored.
See Also
--------
remove_node
Examples
--------
>>> G = nx.Graph() # or DiGraph, MultiGraph, MultiDiGraph, etc
>>> G.add_path([0,1,2])
>>> e = G.nodes()
>>> e
[0, 1, 2]
>>> G.remove_nodes_from(e)
>>> G.nodes()
[]
"""
adj = self.adj
for n in nodes:
try:
del self.node[n]
for u in list(adj[n].keys()): # keys() handles self-loops
del adj[u][n] #(allows mutation of dict in loop)
del adj[n]
except KeyError:
pass
def nodes_iter(self, data=False):
"""Return an iterator over the nodes.
Parameters
----------
data : boolean, optional (default=False)
If False the iterator returns nodes. If True
return a two-tuple of node and node data dictionary
Returns
-------
niter : iterator
An iterator over nodes. If data=True the iterator gives
two-tuples containing (node, node data, dictionary)
Notes
-----
If the node data is not required it is simpler and equivalent
to use the expression 'for n in G'.
>>> G = nx.Graph() # or DiGraph, MultiGraph, MultiDiGraph, etc
>>> G.add_path([0,1,2])
Examples
--------
>>> G = nx.Graph() # or DiGraph, MultiGraph, MultiDiGraph, etc
>>> G.add_path([0,1,2])
>>> [d for n,d in G.nodes_iter(data=True)]
[{}, {}, {}]
"""
if data:
return iter(self.node.items())
return iter(self.node)
def nodes(self, data=False):
"""Return a list of the nodes in the graph.
Parameters
----------
data : boolean, optional (default=False)
If False return a list of nodes. If True return a
two-tuple of node and node data dictionary
Returns
-------
nlist : list
A list of nodes. If data=True a list of two-tuples containing
(node, node data dictionary).
Examples
--------
>>> G = nx.Graph() # or DiGraph, MultiGraph, MultiDiGraph, etc
>>> G.add_path([0,1,2])
>>> G.nodes()
[0, 1, 2]
>>> G.add_node(1, time='5pm')
>>> G.nodes(data=True)
[(0, {}), (1, {'time': '5pm'}), (2, {})]
"""
return list(self.nodes_iter(data=data))
def number_of_nodes(self):
"""Return the number of nodes in the graph.
Returns
-------
nnodes : int
The number of nodes in the graph.
See Also
--------
order, __len__ which are identical
Examples
--------
>>> G = nx.Graph() # or DiGraph, MultiGraph, MultiDiGraph, etc
>>> G.add_path([0,1,2])
>>> len(G)
3
"""
return len(self.node)
def order(self):
"""Return the number of nodes in the graph.
Returns
-------
nnodes : int
The number of nodes in the graph.
See Also
--------
number_of_nodes, __len__ which are identical
"""
return len(self.node)
def has_node(self, n):
"""Return True if the graph contains the node n.
Parameters
----------
n : node
Examples
--------
>>> G = nx.Graph() # or DiGraph, MultiGraph, MultiDiGraph, etc
>>> G.add_path([0,1,2])
>>> G.has_node(0)
True
It is more readable and simpler to use
>>> 0 in G
True
"""
try:
return n in self.node
except TypeError:
return False
def add_edge(self, u, v, attr_dict=None, **attr):
"""Add an edge between u and v.
The nodes u and v will be automatically added if they are
not already in the graph.
Edge attributes can be specified with keywords or by providing
a dictionary with key/value pairs. See examples below.
Parameters
----------
u,v : nodes
Nodes can be, for example, strings or numbers.
Nodes must be hashable (and not None) Python objects.
attr_dict : dictionary, optional (default= no attributes)
Dictionary of edge attributes. Key/value pairs will
update existing data associated with the edge.
attr : keyword arguments, optional
Edge data (or labels or objects) can be assigned using
keyword arguments.
See Also
--------
add_edges_from : add a collection of edges
Notes
-----
Adding an edge that already exists updates the edge data.
Many NetworkX algorithms designed for weighted graphs use as
the edge weight a numerical value assigned to a keyword
which by default is 'weight'.
Examples
--------
The following all add the edge e=(1,2) to graph G:
>>> G = nx.Graph() # or DiGraph, MultiGraph, MultiDiGraph, etc
>>> e = (1,2)
>>> G.add_edge(1, 2) # explicit two-node form
>>> G.add_edge(*e) # single edge as tuple of two nodes
>>> G.add_edges_from( [(1,2)] ) # add edges from iterable container
Associate data to edges using keywords:
>>> G.add_edge(1, 2, weight=3)
>>> G.add_edge(1, 3, weight=7, capacity=15, length=342.7)
"""
# set up attribute dictionary
if attr_dict is None:
attr_dict=attr
else:
try:
attr_dict.update(attr)
except AttributeError:
raise NetworkXError(\
"The attr_dict argument must be a dictionary.")
# add nodes
if u not in self.node:
self.adj[u] = {}
self.node[u] = {}
if v not in self.node:
self.adj[v] = {}
self.node[v] = {}
# add the edge
datadict=self.adj[u].get(v,{})
datadict.update(attr_dict)
self.adj[u][v] = datadict
self.adj[v][u] = datadict
def add_edges_from(self, ebunch, attr_dict=None, **attr):
"""Add all the edges in ebunch.
Parameters
----------
ebunch : container of edges
Each edge given in the container will be added to the
graph. The edges must be given as as 2-tuples (u,v) or
3-tuples (u,v,d) where d is a dictionary containing edge
data.
attr_dict : dictionary, optional (default= no attributes)
Dictionary of edge attributes. Key/value pairs will
update existing data associated with each edge.
attr : keyword arguments, optional
Edge data (or labels or objects) can be assigned using
keyword arguments.
See Also
--------
add_edge : add a single edge
add_weighted_edges_from : convenient way to add weighted edges
Notes
-----
Adding the same edge twice has no effect but any edge data
will be updated when each duplicate edge is added.
Edge attributes specified in edges as a tuple take precedence
over attributes specified generally.
Examples
--------
>>> G = nx.Graph() # or DiGraph, MultiGraph, MultiDiGraph, etc
>>> G.add_edges_from([(0,1),(1,2)]) # using a list of edge tuples
>>> e = zip(range(0,3),range(1,4))
>>> G.add_edges_from(e) # Add the path graph 0-1-2-3
Associate data to edges
>>> G.add_edges_from([(1,2),(2,3)], weight=3)
>>> G.add_edges_from([(3,4),(1,4)], label='WN2898')
"""
# set up attribute dict
if attr_dict is None:
attr_dict=attr
else:
try:
attr_dict.update(attr)
except AttributeError:
raise NetworkXError(\
"The attr_dict argument must be a dictionary.")
# process ebunch
for e in ebunch:
ne=len(e)
if ne==3:
u,v,dd = e
elif ne==2:
u,v = e
dd = {}
else:
raise NetworkXError(\
"Edge tuple %s must be a 2-tuple or 3-tuple."%(e,))
if u not in self.node:
self.adj[u] = {}
self.node[u] = {}
if v not in self.node:
self.adj[v] = {}
self.node[v] = {}
datadict=self.adj[u].get(v,{})
datadict.update(attr_dict)
datadict.update(dd)
self.adj[u][v] = datadict
self.adj[v][u] = datadict
def add_weighted_edges_from(self, ebunch, weight='weight', **attr):
"""Add all the edges in ebunch as weighted edges with specified
weights.
Parameters
----------
ebunch : container of edges
Each edge given in the list or container will be added
to the graph. The edges must be given as 3-tuples (u,v,w)
where w is a number.
weight : string, optional (default= 'weight')
The attribute name for the edge weights to be added.
attr : keyword arguments, optional (default= no attributes)
Edge attributes to add/update for all edges.
See Also
--------
add_edge : add a single edge
add_edges_from : add multiple edges
Notes
-----
Adding the same edge twice for Graph/DiGraph simply updates
the edge data. For MultiGraph/MultiDiGraph, duplicate edges
are stored.
Examples
--------
>>> G = nx.Graph() # or DiGraph, MultiGraph, MultiDiGraph, etc
>>> G.add_weighted_edges_from([(0,1,3.0),(1,2,7.5)])
"""
self.add_edges_from(((u,v,{weight:d}) for u,v,d in ebunch),**attr)
def remove_edge(self, u, v):
"""Remove the edge between u and v.
Parameters
----------
u,v: nodes
Remove the edge between nodes u and v.
Raises
------
NetworkXError
If there is not an edge between u and v.
See Also
--------
remove_edges_from : remove a collection of edges
Examples
--------
>>> G = nx.Graph() # or DiGraph, etc
>>> G.add_path([0,1,2,3])
>>> G.remove_edge(0,1)
>>> e = (1,2)
>>> G.remove_edge(*e) # unpacks e from an edge tuple
>>> e = (2,3,{'weight':7}) # an edge with attribute data
>>> G.remove_edge(*e[:2]) # select first part of edge tuple
"""
try:
del self.adj[u][v]
if u != v: # self-loop needs only one entry removed
del self.adj[v][u]
except KeyError:
raise NetworkXError("The edge %s-%s is not in the graph"%(u,v))
def remove_edges_from(self, ebunch):
"""Remove all edges specified in ebunch.
Parameters
----------
ebunch: list or container of edge tuples
Each edge given in the list or container will be removed
from the graph. The edges can be:
- 2-tuples (u,v) edge between u and v.
- 3-tuples (u,v,k) where k is ignored.
See Also
--------
remove_edge : remove a single edge
Notes
-----
Will fail silently if an edge in ebunch is not in the graph.
Examples
--------
>>> G = nx.Graph() # or DiGraph, MultiGraph, MultiDiGraph, etc
>>> G.add_path([0,1,2,3])
>>> ebunch=[(1,2),(2,3)]
>>> G.remove_edges_from(ebunch)
"""
adj=self.adj
for e in ebunch:
u,v = e[:2] # ignore edge data if present
if u in adj and v in adj[u]:
del adj[u][v]
if u != v: # self loop needs only one entry removed
del adj[v][u]
def has_edge(self, u, v):
"""Return True if the edge (u,v) is in the graph.
Parameters
----------
u,v : nodes
Nodes can be, for example, strings or numbers.
Nodes must be hashable (and not None) Python objects.
Returns
-------
edge_ind : bool
True if edge is in the graph, False otherwise.
Examples
--------
Can be called either using two nodes u,v or edge tuple (u,v)
>>> G = nx.Graph() # or DiGraph, MultiGraph, MultiDiGraph, etc
>>> G.add_path([0,1,2,3])
>>> G.has_edge(0,1) # using two nodes
True
>>> e = (0,1)
>>> G.has_edge(*e) # e is a 2-tuple (u,v)
True
>>> e = (0,1,{'weight':7})
>>> G.has_edge(*e[:2]) # e is a 3-tuple (u,v,data_dictionary)
True
The following syntax are all equivalent:
>>> G.has_edge(0,1)
True
>>> 1 in G[0] # though this gives KeyError if 0 not in G
True
"""
try:
return v in self.adj[u]
except KeyError:
return False
def neighbors(self, n):
"""Return a list of the nodes connected to the node n.
Parameters
----------
n : node
A node in the graph
Returns
-------
nlist : list
A list of nodes that are adjacent to n.
Raises
------
NetworkXError
If the node n is not in the graph.
Notes
-----
It is usually more convenient (and faster) to access the
adjacency dictionary as G[n]:
>>> G = nx.Graph() # or DiGraph, MultiGraph, MultiDiGraph, etc
>>> G.add_edge('a','b',weight=7)
>>> G['a']
{'b': {'weight': 7}}
Examples
--------
>>> G = nx.Graph() # or DiGraph, MultiGraph, MultiDiGraph, etc
>>> G.add_path([0,1,2,3])
>>> G.neighbors(0)
[1]
"""
try:
return list(self.adj[n])
except KeyError:
raise NetworkXError("The node %s is not in the graph."%(n,))
def neighbors_iter(self, n):
"""Return an iterator over all neighbors of node n.
Examples
--------
>>> G = nx.Graph() # or DiGraph, MultiGraph, MultiDiGraph, etc
>>> G.add_path([0,1,2,3])
>>> [n for n in G.neighbors_iter(0)]
[1]
Notes
-----
It is faster to use the idiom "in G[0]", e.g.
>>> G = nx.path_graph(4)
>>> [n for n in G[0]]
[1]
"""
try:
return iter(self.adj[n])
except KeyError:
raise NetworkXError("The node %s is not in the graph."%(n,))
def edges(self, nbunch=None, data=False):
"""Return a list of edges.
Edges are returned as tuples with optional data
in the order (node, neighbor, data).
Parameters
----------
nbunch : iterable container, optional (default= all nodes)
A container of nodes. The container will be iterated
through once.
data : bool, optional (default=False)
Return two tuples (u,v) (False) or three-tuples (u,v,data) (True).
Returns
--------
edge_list: list of edge tuples
Edges that are adjacent to any node in nbunch, or a list
of all edges if nbunch is not specified.
See Also
--------
edges_iter : return an iterator over the edges
Notes
-----
Nodes in nbunch that are not in the graph will be (quietly) ignored.
For directed graphs this returns the out-edges.
Examples
--------
>>> G = nx.Graph() # or DiGraph, MultiGraph, MultiDiGraph, etc
>>> G.add_path([0,1,2,3])
>>> G.edges()
[(0, 1), (1, 2), (2, 3)]
>>> G.edges(data=True) # default edge data is {} (empty dictionary)
[(0, 1, {}), (1, 2, {}), (2, 3, {})]
>>> G.edges([0,3])
[(0, 1), (3, 2)]
>>> G.edges(0)
[(0, 1)]
"""
return list(self.edges_iter(nbunch, data))
def edges_iter(self, nbunch=None, data=False):
"""Return an iterator over the edges.
Edges are returned as tuples with optional data
in the order (node, neighbor, data).
Parameters
----------
nbunch : iterable container, optional (default= all nodes)
A container of nodes. The container will be iterated
through once.
data : bool, optional (default=False)
If True, return edge attribute dict in 3-tuple (u,v,data).
Returns
-------
edge_iter : iterator
An iterator of (u,v) or (u,v,d) tuples of edges.
See Also
--------
edges : return a list of edges
Notes
-----
Nodes in nbunch that are not in the graph will be (quietly) ignored.
For directed graphs this returns the out-edges.
Examples
--------
>>> G = nx.Graph() # or MultiGraph, etc
>>> G.add_path([0,1,2,3])
>>> [e for e in G.edges_iter()]
[(0, 1), (1, 2), (2, 3)]
>>> list(G.edges_iter(data=True)) # default data is {} (empty dict)
[(0, 1, {}), (1, 2, {}), (2, 3, {})]
>>> list(G.edges_iter([0,3]))
[(0, 1), (3, 2)]
>>> list(G.edges_iter(0))
[(0, 1)]
"""
seen={} # helper dict to keep track of multiply stored edges
if nbunch is None:
nodes_nbrs = self.adj.items()
else:
nodes_nbrs=((n,self.adj[n]) for n in self.nbunch_iter(nbunch))
if data:
for n,nbrs in nodes_nbrs:
for nbr,data in nbrs.items():
if nbr not in seen:
yield (n,nbr,data)
seen[n]=1
else:
for n,nbrs in nodes_nbrs:
for nbr in nbrs:
if nbr not in seen:
yield (n,nbr)
seen[n] = 1
del seen
def get_edge_data(self, u, v, default=None):
"""Return the attribute dictionary associated with edge (u,v).
Parameters
----------
u,v : nodes
default: any Python object (default=None)
Value to return if the edge (u,v) is not found.
Returns
-------
edge_dict : dictionary
The edge attribute dictionary.
Notes
-----
It is faster to use G[u][v].
>>> G = nx.Graph() # or DiGraph, MultiGraph, MultiDiGraph, etc
>>> G.add_path([0,1,2,3])
>>> G[0][1]
{}
Warning: Assigning G[u][v] corrupts the graph data structure.
But it is safe to assign attributes to that dictionary,
>>> G[0][1]['weight'] = 7
>>> G[0][1]['weight']
7
>>> G[1][0]['weight']
7
Examples
--------
>>> G = nx.Graph() # or DiGraph, MultiGraph, MultiDiGraph, etc
>>> G.add_path([0,1,2,3])
>>> G.get_edge_data(0,1) # default edge data is {}
{}
>>> e = (0,1)
>>> G.get_edge_data(*e) # tuple form
{}
>>> G.get_edge_data('a','b',default=0) # edge not in graph, return 0
0
"""
try:
return self.adj[u][v]
except KeyError:
return default
def adjacency_list(self):
"""Return an adjacency list representation of the graph.
The output adjacency list is in the order of G.nodes().
For directed graphs, only outgoing adjacencies are included.
Returns
-------
adj_list : lists of lists
The adjacency structure of the graph as a list of lists.
See Also
--------
adjacency_iter
Examples
--------
>>> G = nx.Graph() # or DiGraph, MultiGraph, MultiDiGraph, etc
>>> G.add_path([0,1,2,3])
>>> G.adjacency_list() # in order given by G.nodes()
[[1], [0, 2], [1, 3], [2]]
"""
return list(map(list,iter(self.adj.values())))
def adjacency_iter(self):
"""Return an iterator of (node, adjacency dict) tuples for all nodes.
This is the fastest way to look at every edge.
For directed graphs, only outgoing adjacencies are included.
Returns
-------
adj_iter : iterator
An iterator of (node, adjacency dictionary) for all nodes in
the graph.
See Also
--------
adjacency_list
Examples
--------
>>> G = nx.Graph() # or DiGraph, MultiGraph, MultiDiGraph, etc
>>> G.add_path([0,1,2,3])
>>> [(n,nbrdict) for n,nbrdict in G.adjacency_iter()]
[(0, {1: {}}), (1, {0: {}, 2: {}}), (2, {1: {}, 3: {}}), (3, {2: {}})]
"""
return iter(self.adj.items())
def degree(self, nbunch=None, weight=None):
"""Return the degree of a node or nodes.
The node degree is the number of edges adjacent to that node.
Parameters
----------
nbunch : iterable container, optional (default=all nodes)
A container of nodes. The container will be iterated
through once.
weight : string or None, optional (default=None)
The edge attribute that holds the numerical value used
as a weight. If None, then each edge has weight 1.
The degree is the sum of the edge weights adjacent to the node.
Returns
-------
nd : dictionary, or number
A dictionary with nodes as keys and degree as values or
a number if a single node is specified.
Examples
--------
>>> G = nx.Graph() # or DiGraph, MultiGraph, MultiDiGraph, etc
>>> G.add_path([0,1,2,3])
>>> G.degree(0)
1
>>> G.degree([0,1])
{0: 1, 1: 2}
>>> list(G.degree([0,1]).values())
[1, 2]
"""
if nbunch in self: # return a single node
return next(self.degree_iter(nbunch,weight))[1]
else: # return a dict
return dict(self.degree_iter(nbunch,weight))
def degree_iter(self, nbunch=None, weight=None):
"""Return an iterator for (node, degree).
The node degree is the number of edges adjacent to the node.
Parameters
----------
nbunch : iterable container, optional (default=all nodes)
A container of nodes. The container will be iterated
through once.
weight : string or None, optional (default=None)
The edge attribute that holds the numerical value used
as a weight. If None, then each edge has weight 1.
The degree is the sum of the edge weights adjacent to the node.
Returns
-------
nd_iter : an iterator
The iterator returns two-tuples of (node, degree).
See Also
--------
degree
Examples
--------
>>> G = nx.Graph() # or DiGraph, MultiGraph, MultiDiGraph, etc
>>> G.add_path([0,1,2,3])
>>> list(G.degree_iter(0)) # node 0 with degree 1
[(0, 1)]
>>> list(G.degree_iter([0,1]))
[(0, 1), (1, 2)]
"""
if nbunch is None:
nodes_nbrs = self.adj.items()
else:
nodes_nbrs=((n,self.adj[n]) for n in self.nbunch_iter(nbunch))
if weight is None:
for n,nbrs in nodes_nbrs:
yield (n,len(nbrs)+(n in nbrs)) # return tuple (n,degree)
else:
# edge weighted graph - degree is sum of nbr edge weights
for n,nbrs in nodes_nbrs:
yield (n, sum((nbrs[nbr].get(weight,1) for nbr in nbrs)) +
(n in nbrs and nbrs[n].get(weight,1)))
def clear(self):
"""Remove all nodes and edges from the graph.
This also removes the name, and all graph, node, and edge attributes.
Examples
--------
>>> G = nx.Graph() # or DiGraph, MultiGraph, MultiDiGraph, etc
>>> G.add_path([0,1,2,3])
>>> G.clear()
>>> G.nodes()
[]
>>> G.edges()
[]
"""
self.name = ''
self.adj.clear()
self.node.clear()
self.graph.clear()
def copy(self):
"""Return a copy of the graph.
Returns
-------
G : Graph
A copy of the graph.
See Also
--------
to_directed: return a directed copy of the graph.
Notes
-----
This makes a complete copy of the graph including all of the
node or edge attributes.
Examples
--------
>>> G = nx.Graph() # or DiGraph, MultiGraph, MultiDiGraph, etc
>>> G.add_path([0,1,2,3])
>>> H = G.copy()
"""
return deepcopy(self)
def is_multigraph(self):
"""Return True if graph is a multigraph, False otherwise."""
return False
def is_directed(self):
"""Return True if graph is directed, False otherwise."""
return False
def to_directed(self):
"""Return a directed representation of the graph.
Returns
-------
G : DiGraph
A directed graph with the same name, same nodes, and with
each edge (u,v,data) replaced by two directed edges
(u,v,data) and (v,u,data).
Notes
-----
This returns a "deepcopy" of the edge, node, and
graph attributes which attempts to completely copy
all of the data and references.
This is in contrast to the similar D=DiGraph(G) which returns a
shallow copy of the data.
See the Python copy module for more information on shallow
and deep copies, http://docs.python.org/library/copy.html.
Examples
--------
>>> G = nx.Graph() # or MultiGraph, etc
>>> G.add_path([0,1])
>>> H = G.to_directed()
>>> H.edges()
[(0, 1), (1, 0)]
If already directed, return a (deep) copy
>>> G = nx.DiGraph() # or MultiDiGraph, etc
>>> G.add_path([0,1])
>>> H = G.to_directed()
>>> H.edges()
[(0, 1)]
"""
from networkx import DiGraph
G=DiGraph()
G.name=self.name
G.add_nodes_from(self)
G.add_edges_from( ((u,v,deepcopy(data))
for u,nbrs in self.adjacency_iter()
for v,data in nbrs.items()) )
G.graph=deepcopy(self.graph)
G.node=deepcopy(self.node)
return G
def to_undirected(self):
"""Return an undirected copy of the graph.
Returns
-------
G : Graph/MultiGraph
A deepcopy of the graph.
See Also
--------
copy, add_edge, add_edges_from
Notes
-----
This returns a "deepcopy" of the edge, node, and
graph attributes which attempts to completely copy
all of the data and references.
This is in contrast to the similar G=DiGraph(D) which returns a
shallow copy of the data.
See the Python copy module for more information on shallow
and deep copies, http://docs.python.org/library/copy.html.
Examples
--------
>>> G = nx.Graph() # or MultiGraph, etc
>>> G.add_path([0,1])
>>> H = G.to_directed()
>>> H.edges()
[(0, 1), (1, 0)]
>>> G2 = H.to_undirected()
>>> G2.edges()
[(0, 1)]
"""
return deepcopy(self)
def subgraph(self, nbunch):
"""Return the subgraph induced on nodes in nbunch.
The induced subgraph of the graph contains the nodes in nbunch
and the edges between those nodes.
Parameters
----------
nbunch : list, iterable
A container of nodes which will be iterated through once.
Returns
-------
G : Graph
A subgraph of the graph with the same edge attributes.
Notes
-----
The graph, edge or node attributes just point to the original graph.
So changes to the node or edge structure will not be reflected in
the original graph while changes to the attributes will.
To create a subgraph with its own copy of the edge/node attributes use:
nx.Graph(G.subgraph(nbunch))
If edge attributes are containers, a deep copy can be obtained using:
G.subgraph(nbunch).copy()
For an inplace reduction of a graph to a subgraph you can remove nodes:
G.remove_nodes_from([ n in G if n not in set(nbunch)])
Examples
--------
>>> G = nx.Graph() # or DiGraph, MultiGraph, MultiDiGraph, etc
>>> G.add_path([0,1,2,3])
>>> H = G.subgraph([0,1,2])
>>> H.edges()
[(0, 1), (1, 2)]
"""
bunch =self.nbunch_iter(nbunch)
# create new graph and copy subgraph into it
H = self.__class__()
# copy node and attribute dictionaries
for n in bunch:
H.node[n]=self.node[n]
# namespace shortcuts for speed
H_adj=H.adj
self_adj=self.adj
# add nodes and edges (undirected method)
for n in H.node:
Hnbrs={}
H_adj[n]=Hnbrs
for nbr,d in self_adj[n].items():
if nbr in H_adj:
# add both representations of edge: n-nbr and nbr-n
Hnbrs[nbr]=d
H_adj[nbr][n]=d
H.graph=self.graph
return H
def nodes_with_selfloops(self):
"""Return a list of nodes with self loops.
A node with a self loop has an edge with both ends adjacent
to that node.
Returns
-------
nodelist : list
A list of nodes with self loops.
See Also
--------
selfloop_edges, number_of_selfloops
Examples
--------
>>> G = nx.Graph() # or DiGraph, MultiGraph, MultiDiGraph, etc
>>> G.add_edge(1,1)
>>> G.add_edge(1,2)
>>> G.nodes_with_selfloops()
[1]
"""
return [ n for n,nbrs in self.adj.items() if n in nbrs ]
def selfloop_edges(self, data=False):
"""Return a list of selfloop edges.
A selfloop edge has the same node at both ends.
Parameters
-----------
data : bool, optional (default=False)
Return selfloop edges as two tuples (u,v) (data=False)
or three-tuples (u,v,data) (data=True)
Returns
-------
edgelist : list of edge tuples
A list of all selfloop edges.
See Also
--------
nodes_with_selfloops, number_of_selfloops
Examples
--------
>>> G = nx.Graph() # or DiGraph, MultiGraph, MultiDiGraph, etc
>>> G.add_edge(1,1)
>>> G.add_edge(1,2)
>>> G.selfloop_edges()
[(1, 1)]
>>> G.selfloop_edges(data=True)
[(1, 1, {})]
"""
if data:
return [ (n,n,nbrs[n])
for n,nbrs in self.adj.items() if n in nbrs ]
else:
return [ (n,n)
for n,nbrs in self.adj.items() if n in nbrs ]
def number_of_selfloops(self):
"""Return the number of selfloop edges.
A selfloop edge has the same node at both ends.
Returns
-------
nloops : int
The number of selfloops.
See Also
--------
nodes_with_selfloops, selfloop_edges
Examples
--------
>>> G=nx.Graph() # or DiGraph, MultiGraph, MultiDiGraph, etc
>>> G.add_edge(1,1)
>>> G.add_edge(1,2)
>>> G.number_of_selfloops()
1
"""
return len(self.selfloop_edges())
def size(self, weight=None):
"""Return the number of edges.
Parameters
----------
weight : string or None, optional (default=None)
The edge attribute that holds the numerical value used
as a weight. If None, then each edge has weight 1.
Returns
-------
nedges : int
The number of edges or sum of edge weights in the graph.
See Also
--------
number_of_edges
Examples
--------
>>> G = nx.Graph() # or DiGraph, MultiGraph, MultiDiGraph, etc
>>> G.add_path([0,1,2,3])
>>> G.size()
3
>>> G = nx.Graph() # or DiGraph, MultiGraph, MultiDiGraph, etc
>>> G.add_edge('a','b',weight=2)
>>> G.add_edge('b','c',weight=4)
>>> G.size()
2
>>> G.size(weight='weight')
6.0
"""
s=sum(self.degree(weight=weight).values())/2
if weight is None:
return int(s)
else:
return float(s)
def number_of_edges(self, u=None, v=None):
"""Return the number of edges between two nodes.
Parameters
----------
u,v : nodes, optional (default=all edges)
If u and v are specified, return the number of edges between
u and v. Otherwise return the total number of all edges.
Returns
-------
nedges : int
The number of edges in the graph. If nodes u and v are specified
return the number of edges between those nodes.
See Also
--------
size
Examples
--------
>>> G = nx.Graph() # or DiGraph, MultiGraph, MultiDiGraph, etc
>>> G.add_path([0,1,2,3])
>>> G.number_of_edges()
3
>>> G.number_of_edges(0,1)
1
>>> e = (0,1)
>>> G.number_of_edges(*e)
1
"""
if u is None: return int(self.size())
if v in self.adj[u]:
return 1
else:
return 0
def add_star(self, nodes, **attr):
"""Add a star.
The first node in nodes is the middle of the star. It is connected
to all other nodes.
Parameters
----------
nodes : iterable container
A container of nodes.
attr : keyword arguments, optional (default= no attributes)
Attributes to add to every edge in star.
See Also
--------
add_path, add_cycle
Examples
--------
>>> G = nx.Graph() # or DiGraph, MultiGraph, MultiDiGraph, etc
>>> G.add_star([0,1,2,3])
>>> G.add_star([10,11,12],weight=2)
"""
nlist = list(nodes)
v=nlist[0]
edges=((v,n) for n in nlist[1:])
self.add_edges_from(edges, **attr)
def add_path(self, nodes, **attr):
"""Add a path.
Parameters
----------
nodes : iterable container
A container of nodes. A path will be constructed from
the nodes (in order) and added to the graph.
attr : keyword arguments, optional (default= no attributes)
Attributes to add to every edge in path.
See Also
--------
add_star, add_cycle
Examples
--------
>>> G=nx.Graph() # or DiGraph, MultiGraph, MultiDiGraph, etc
>>> G.add_path([0,1,2,3])
>>> G.add_path([10,11,12],weight=7)
"""
nlist = list(nodes)
edges=zip(nlist[:-1],nlist[1:])
self.add_edges_from(edges, **attr)
def add_cycle(self, nodes, **attr):
"""Add a cycle.
Parameters
----------
nodes: iterable container
A container of nodes. A cycle will be constructed from
the nodes (in order) and added to the graph.
attr : keyword arguments, optional (default= no attributes)
Attributes to add to every edge in cycle.
See Also
--------
add_path, add_star
Examples
--------
>>> G=nx.Graph() # or DiGraph, MultiGraph, MultiDiGraph, etc
>>> G.add_cycle([0,1,2,3])
>>> G.add_cycle([10,11,12],weight=7)
"""
nlist = list(nodes)
edges=zip(nlist,nlist[1:]+[nlist[0]])
self.add_edges_from(edges, **attr)
def nbunch_iter(self, nbunch=None):
"""Return an iterator of nodes contained in nbunch that are
also in the graph.
The nodes in nbunch are checked for membership in the graph
and if not are silently ignored.
Parameters
----------
nbunch : iterable container, optional (default=all nodes)
A container of nodes. The container will be iterated
through once.
Returns
-------
niter : iterator
An iterator over nodes in nbunch that are also in the graph.
If nbunch is None, iterate over all nodes in the graph.
Raises
------
NetworkXError
If nbunch is not a node or or sequence of nodes.
If a node in nbunch is not hashable.
See Also
--------
Graph.__iter__
Notes
-----
When nbunch is an iterator, the returned iterator yields values
directly from nbunch, becoming exhausted when nbunch is exhausted.
To test whether nbunch is a single node, one can use
"if nbunch in self:", even after processing with this routine.
If nbunch is not a node or a (possibly empty) sequence/iterator
or None, a NetworkXError is raised. Also, if any object in
nbunch is not hashable, a NetworkXError is raised.
"""
if nbunch is None: # include all nodes via iterator
bunch=iter(self.adj.keys())
elif nbunch in self: # if nbunch is a single node
bunch=iter([nbunch])
else: # if nbunch is a sequence of nodes
def bunch_iter(nlist,adj):
try:
for n in nlist:
if n in adj:
yield n
except TypeError as e:
message=e.args[0]
import sys
sys.stdout.write(message)
# capture error for non-sequence/iterator nbunch.
if 'iter' in message:
raise NetworkXError(\
"nbunch is not a node or a sequence of nodes.")
# capture error for unhashable node.
elif 'hashable' in message:
raise NetworkXError(\
"Node %s in the sequence nbunch is not a valid node."%n)
else:
raise
bunch=bunch_iter(nbunch,self.adj)
return bunch
|
from django.urls import path
from playgroup import views
app_name = 'playgroup'
urlpatterns = [
path('', views.dashboard, name='dashboard'),
path('create/', views.manage, name="create"),
path('<int:group_id>/', views.details, name="details"),
path('<int:group_id>/edit/', views.manage, name="edit"),
path('<int:group_id>/newevent/', views.newevent, name="newevent"),
]
|
def validate_base_sequence(base_sequence, RNAflag=False):
"""Return True if the string base_sequence contains only upper- or lowercase T (or U, if RNAflag), C, A, G characters, otherwise False"""
seq = base_sequence.upper()
result = len(seq) == (seq.count('A') + seq.count('U' if RNAflag else 'T') +
seq.count('G') + seq.count('C'))
return print(result)
# USAGE
validate_base_sequence("TTAAGGCC")
|
import sys
var.verboseRead = 0
var.warnReadNoFile = 0
var.nexus_allowAllDigitNames = True # put it somewhere else
read(sys.argv[2])
d = Data()
d.compoSummary()
read(sys.argv[3])
t = var.trees[0]
t.data = d
c1 = t.newComp(free=1, spec='empirical')
c2 = t.newComp(free=1, spec='empirical')
# Put the c1 comp on all the nodes of the tree. Then put c2 on the
# root, over-riding c1 that is already there.
t.setModelThing(c1, node=0, clade=1)
t.setModelThing(c2, node=0, clade=0)
t.newRMatrix(free=0, spec='lg') #maybe try altering this
t.setNGammaCat(nGammaCat=4)
t.newGdasrv(free=1, val=1.0)
t.setPInvar(free=0, val=0.0)
t.optLogLike()
t.tPickle(sys.argv[3] + '_optTree')
|
from hydroDL.data import gageII
from hydroDL import kPath
from hydroDL.app import waterQuality
from hydroDL.post import axplot
import pandas as pd
import numpy as np
import time
import os
import matplotlib.pyplot as plt
import importlib
# read slope data
dirCQ = os.path.join(kPath.dirWQ, 'C-Q')
mapFolder = os.path.join(dirCQ, 'slopeMap')
boxFolder = os.path.join(dirCQ, 'slopeBox')
dfSa = pd.read_csv(os.path.join(dirCQ, 'slope_a'), dtype={
'siteNo': str}).set_index('siteNo')
dfSb = pd.read_csv(os.path.join(dirCQ, 'slope_b'), dtype={
'siteNo': str}).set_index('siteNo')
dfCeq = pd.read_csv(os.path.join(dirCQ, 'kate_ceq'), dtype={
'siteNo': str}).set_index('siteNo')
dfDw = pd.read_csv(os.path.join(dirCQ, 'kate_dw'), dtype={
'siteNo': str}).set_index('siteNo')
dfN = pd.read_csv(os.path.join(dirCQ, 'nSample'), dtype={
'siteNo': str}).set_index('siteNo')
siteNoLst = dfN.index.tolist()
codeLst = dfN.columns.tolist()
dfPLst = [dfSa, dfSb, dfCeq, dfDw]
strPLst = ['slope-a', 'slope-b', 'ceq', 'dw']
if not os.path.exists(mapFolder):
os.mkdir(mapFolder)
if not os.path.exists(boxFolder):
os.mkdir(boxFolder)
# code='00955'
# v = dfSb[code].values
# v = dfDw[code].values
# fig, ax = plt.subplots(1, 1)
# temp = v[~np.isnan(v)]
# ax.hist(temp, bins=200, range=[
# np.percentile(temp, 5), np.percentile(temp, 95)])
# fig.show()
# # plot map
importlib.reload(axplot)
codePdf = waterQuality.codePdf
dfCrd = gageII.readData(varLst=['LAT_GAGE', 'LNG_GAGE'], siteNoLst=siteNoLst)
for code in codeLst:
for dfP, strP in zip(dfPLst, strPLst):
pAry = dfP[code].values
nAry = dfN[code].values
strTitle = '{} of {} [{}]'.format(
strP, codePdf['srsName'][code], codePdf['unit'][code])
strFile = strP+'_'+codePdf['shortName'][code]
ind = np.where((~np.isnan(pAry)) & (nAry > 10))[0]
lat = dfCrd['LAT_GAGE'][ind]
lon = dfCrd['LNG_GAGE'][ind]
data = pAry[ind]
vr = np.max([np.abs(np.percentile(data, 5)),
np.abs(np.percentile(data, 95))])
fig, ax = plt.subplots(1, 1, figsize=(12, 6))
axplot.mapPoint(ax, lat, lon, data, title=strTitle,
vRange=[-vr, vr], s=15)
# fig.show()
fig.savefig(os.path.join(mapFolder, strFile))
# plot box
codePdf = waterQuality.codePdf
groupLst = codePdf.group.unique().tolist()
for group in groupLst:
print(group)
codeLst = codePdf[codePdf.group == group].index.tolist()
pos = list(range(0, len(codeLst)))
for rmExtreme in [True, False]:
for dfP, strP in zip(dfPLst, strPLst):
dataLst = list()
for code in codeLst:
pAry = dfP[code].values
nAry = dfN[code].values
ind = np.where((~np.isnan(pAry)) & (nAry > 10))[0]
vr = np.max([np.abs(np.percentile(pAry[ind], 10)),
np.abs(np.percentile(pAry[ind], 90))])
if rmExtreme is True:
ind = np.where((~np.isnan(pAry)) & (nAry > 10) & (
pAry <= vr) & (pAry >= -vr))[0]
dataLst.append(pAry[ind])
fig, ax = plt.subplots(1, 1, figsize=(12, 6))
ax.violinplot(dataLst, pos, points=500, widths=1,
showmeans=True, showextrema=True)
ax.set_xticks(pos)
ax.set_xticklabels(codePdf.shortName[codeLst].tolist())
if rmExtreme is True:
ax.set_title('{} of {} variables, 10\%-90\%'.format(strP,group))
fig.savefig(os.path.join(boxFolder, group+'_'+strP+'_rmE'))
else:
ax.set_title('{} of {} variables'.format(strP,group))
fig.savefig(os.path.join(boxFolder, group+'_'+strP))
# fig.show()
|
#!/usr/bin/env python
# Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import tensorflow as tf
import shutil
import tensorflow.contrib.learn as tflearn
import tensorflow.contrib.layers as tflayers
from tensorflow.contrib.learn.python.learn import learn_runner
import tensorflow.contrib.metrics as metrics
import tensorflow.contrib.rnn as rnn
tf.logging.set_verbosity(tf.logging.INFO)
SEQ_LEN = 10
DEFAULTS = [[0.0] for x in xrange(0, SEQ_LEN)]
BATCH_SIZE = 20
TIMESERIES_COL = 'rawdata'
N_OUTPUTS = 2 # in each sequence, 1-8 are features, and 9-10 is label
N_INPUTS = SEQ_LEN - N_OUTPUTS
# read data and convert to needed format
def read_dataset(filename, mode=tf.contrib.learn.ModeKeys.TRAIN):
def _input_fn():
num_epochs = 100 if mode == tf.contrib.learn.ModeKeys.TRAIN else 1
# could be a path to one file or a file pattern.
input_file_names = tf.train.match_filenames_once(filename)
filename_queue = tf.train.string_input_producer(
input_file_names, num_epochs=num_epochs, shuffle=True)
reader = tf.TextLineReader()
_, value = reader.read_up_to(filename_queue, num_records=BATCH_SIZE)
value_column = tf.expand_dims(value, -1)
print 'readcsv={}'.format(value_column)
# all_data is a list of tensors
all_data = tf.decode_csv(value_column, record_defaults=DEFAULTS)
inputs = all_data[:len(all_data)-N_OUTPUTS] # first few values
label = all_data[len(all_data)-N_OUTPUTS : ] # last few values
# from list of tensors to tensor with one more dimension
inputs = tf.concat(inputs, axis=1)
label = tf.concat(label, axis=1)
print 'inputs={}'.format(inputs)
return {TIMESERIES_COL: inputs}, label # dict of features, label
return _input_fn
LSTM_SIZE = 3 # number of hidden layers in each of the LSTM cells
# create the inference model
def simple_rnn(features, targets, mode):
# 0. Reformat input shape to become a sequence
x = tf.split(features[TIMESERIES_COL], N_INPUTS, 1)
#print 'x={}'.format(x)
# 1. configure the RNN
lstm_cell = rnn.BasicLSTMCell(LSTM_SIZE, forget_bias=1.0)
outputs, _ = rnn.static_rnn(lstm_cell, x, dtype=tf.float32)
# slice to keep only the last cell of the RNN
outputs = outputs[-1]
#print 'last outputs={}'.format(outputs)
# output is result of linear activation of last layer of RNN
weight = tf.Variable(tf.random_normal([LSTM_SIZE, N_OUTPUTS]))
bias = tf.Variable(tf.random_normal([N_OUTPUTS]))
predictions = tf.matmul(outputs, weight) + bias
# 2. loss function, training/eval ops
if mode == tf.contrib.learn.ModeKeys.TRAIN or mode == tf.contrib.learn.ModeKeys.EVAL:
loss = tf.losses.mean_squared_error(targets, predictions)
train_op = tf.contrib.layers.optimize_loss(
loss=loss,
global_step=tf.contrib.framework.get_global_step(),
learning_rate=0.01,
optimizer="SGD")
eval_metric_ops = {
"rmse": tf.metrics.root_mean_squared_error(targets, predictions)
}
else:
loss = None
train_op = None
eval_metric_ops = None
# 3. Create predictions
predictions_dict = {"predicted": predictions}
# 4. return ModelFnOps
return tflearn.ModelFnOps(
mode=mode,
predictions=predictions_dict,
loss=loss,
train_op=train_op,
eval_metric_ops=eval_metric_ops)
def serving_input_fn():
feature_placeholders = {
TIMESERIES_COL: tf.placeholder(tf.float32, [None, N_INPUTS])
}
features = {
key: tf.expand_dims(tensor, -1)
for key, tensor in feature_placeholders.items()
}
features[TIMESERIES_COL] = tf.squeeze(features[TIMESERIES_COL], axis=[2])
return tflearn.utils.input_fn_utils.InputFnOps(
features,
None,
feature_placeholders
)
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
def xpath_soup(element):
"""
Generate xpath from BeautifulSoup4 element
:param element: BeautifulSoup4 element.
:type element: bs4.element.Tag or bs4.element.NavigableString
:return: xpath as string
:rtype: str
Usage:
>>> import bs4
>>> html = (
... '<html><head><title>title</title></head>'
... '<body><p>p <i>1</i></p><p>p <i>2</i></p></body></html>'
... )
>>> soup = bs4.BeautifulSoup(html, 'html.parser')
>>> xpath_soup(soup.html.body.p.i)
'/html/body/p[1]/i'
"""
components = []
child = element if element.name else element.parent
for parent in child.parents:
"""
@type parent: bs4.element.Tag
"""
siblings = parent.find_all(child.name, recursive=False)
components.append(
child.name
if siblings == [child] else
'%s[%d]' % (child.name, 1 + siblings.index(child))
)
child = parent
components.reverse()
return '/%s' % '/'.join(components)
if __name__ == '__main__':
import doctest
doctest.testmod(verbose=True)
|
# NeoPixel Color Picker demo - wire up some NeoPixels and set their color
# using Adafruit Bluefruit Connect App on your phone
import time
import busio
import board
from digitalio import DigitalInOut
from adafruit_bluefruitspi import BluefruitSPI
import neopixel
ADVERT_NAME = b"BlinkaNeoLamp"
# 16 neopixels on a digital pin, adjust as necessary!
pixels = neopixel.NeoPixel(board.D5, 16)
pixels.fill(0)
spi_bus = busio.SPI(board.SCK, MOSI=board.MOSI, MISO=board.MISO)
cs = DigitalInOut(board.D8)
irq = DigitalInOut(board.D7)
rst = DigitalInOut(board.D4)
bluefruit = BluefruitSPI(spi_bus, cs, irq, rst, debug=False)
def init_bluefruit():
# Initialize the device and perform a factory reset
print("Initializing the Bluefruit LE SPI Friend module")
bluefruit.init()
bluefruit.command_check_OK(b"AT+FACTORYRESET", delay=1)
# Print the response to 'ATI' (info request) as a string
print(str(bluefruit.command_check_OK(b"ATI"), "utf-8"))
# Change advertised name
bluefruit.command_check_OK(b"AT+GAPDEVNAME=" + ADVERT_NAME)
def wait_for_connection():
print("Waiting for a connection to Bluefruit LE Connect ...")
# Wait for a connection ...
dotcount = 0
while not bluefruit.connected:
print(".", end="")
dotcount = (dotcount + 1) % 80
if dotcount == 79:
print("")
time.sleep(0.5)
# This code will check the connection but only query the module if it has been
# at least 'n_sec' seconds. Otherwise it 'caches' the response, to keep from
# hogging the Bluefruit connection with constant queries
connection_timestamp = None
is_connected = None
def check_connection(n_sec):
# pylint: disable=global-statement
global connection_timestamp, is_connected
if (not connection_timestamp) or (time.monotonic() - connection_timestamp > n_sec):
connection_timestamp = time.monotonic()
is_connected = bluefruit.connected
return is_connected
# Unlike most circuitpython code, this runs in two loops
# one outer loop manages reconnecting bluetooth if we lose connection
# then one inner loop for doing what we want when connected!
while True:
# Initialize the module
try: # Wireless connections can have corrupt data or other runtime failures
# This try block will reset the module if that happens
init_bluefruit()
wait_for_connection()
print("\n *Connected!*")
# Once connected, check for incoming BLE UART data
while check_connection(3): # Check our connection status every 3 seconds
# OK we're still connected, see if we have any data waiting
resp = bluefruit.read_packet()
if not resp:
continue # nothin'
print("Read packet", resp)
# Look for a 'C'olor packet
if resp[0] != "C":
continue
# Set the neopixels to the three bytes in the packet
pixels.fill(resp[1:4])
print("Connection lost.")
except RuntimeError as e:
print(e) # Print what happened
continue # retry!
|
"""Module that holds success messages template"""
audit_messages = {
'created': 'created a new {} ',
'retrieved': 'retrieved {} successfully',
'updated': 'updated {} with id {} ',
'deleted': 'deleted favorite things with id {}'
}
|
from allennlp.modules.token_embedders.pretrained_transformer_mismatched_embedder import PretrainedTransformerMismatchedEmbedder
from allennlp.modules.token_embedders.token_embedder import TokenEmbedder
from typing import Optional, Dict, Any, List, Union
from .adapters import Adapter, AdapterBertLayer
import torch.nn as nn
from allennlp.common.checks import ConfigurationError
from transformers import BertModel, ElectraModel, RobertaModel
@TokenEmbedder.register('adapter_bert')
class AdapterBertMismatchedEmbedder(PretrainedTransformerMismatchedEmbedder):
def __init__(
self,
model_name: str,
adapters: List[Dict[str, Union[Dict[str, Any], List[int]]]] = [{"layers": [i], "params": dict()} for i in range(12)],
max_length: int = None,
train_parameters: bool = False,
last_layer_only: bool = True,
override_weights_file: Optional[str] = None,
override_weights_strip_prefix: Optional[str] = None,
load_weights: bool = True,
gradient_checkpointing: Optional[bool] = None,
tokenizer_kwargs: Optional[Dict[str, Any]] = None,
transformer_kwargs: Optional[Dict[str, Any]] = None,
sub_token_mode: Optional[str] = "avg",
) -> None:
super().__init__(
model_name,
max_length=max_length,
train_parameters=train_parameters,
last_layer_only=last_layer_only,
override_weights_file=override_weights_file,
override_weights_strip_prefix=override_weights_strip_prefix,
load_weights=load_weights,
gradient_checkpointing=gradient_checkpointing,
tokenizer_kwargs=tokenizer_kwargs,
transformer_kwargs=transformer_kwargs,
sub_token_mode=sub_token_mode
)
self.adapters_groups = self.insert_adapters(adapters)
def insert_adapters(
self,
adapters: List[Dict[str, Union[List[int], Dict[str, Any]]]]
) -> nn.ModuleList:
if not isinstance(self._matched_embedder.transformer_model, (BertModel, ElectraModel, RobertaModel)):
raise ConfigurationError("只支持 *BERT 结构")
adapters_groups = nn.ModuleList()
for adapter in adapters:
adapter_a = Adapter(self.get_output_dim(), **adapter['params'])
adapter_f = Adapter(self.get_output_dim(), **adapter['params'])
for i in adapter['layers']:
layer = self._matched_embedder.transformer_model.encoder.layer[i]
layer.output = AdapterBertLayer(layer.output, adapter_a)
layer.attention.output = AdapterBertLayer(layer.attention.output, adapter_f)
adapters_groups.append(nn.ModuleList([adapter_a, adapter_f]))
return adapters_groups
|
import os
import xml.etree.ElementTree as ET
import sqlite3
import tempfile
from . import (AError, ACommandLineTool, AOS)
_commandlineTool = ACommandLineTool.CommandLineTool('svn')
_commandlineTool.checkExistence()
def getCommandLineTool():
return _commandlineTool
class SVN_Error(AError.Error):
pass
class SVN_SubCommandError(SVN_Error):
def __init__(self, cmd):
self.cmd = cmd
def __str__(self):
return 'svn: execute sub command(%s) failed' % self.cmd
class SVN_NoMessageError(SVN_SubCommandError):
def __init__(self, cmd):
SVN_SubCommandError.__init__(self, cmd)
def __str__(self):
return SVN_SubCommandError.__str__(self) + ": message(with -m) can't be empty"
class SVN_AlreadyLockedError(SVN_Error):
def __init__(self, path, lockOwner, lockComment, lockDate):
self.path = path
self.lockOwner = lockOwner
self.lockComment = lockComment
self.lockDate = lockDate
def __str__(self):
return "svn: path '%s' already locked by user '%s' at %s%s" % (
self.path, self.lockOwner, self.lockDate, '' if self.lockComment == '' else ': ' + self.lockComment
)
class SVN_BranchDestinationAlreadyExistError(SVN_Error):
def __init__(self, dst):
self.dst = dst
def __str__(self):
return "svn: branch destination '%s' already exist" % self.dst
RESOLVE_ACCEPT_BASE = 'base'
RESOLVE_ACCEPT_WORKING = 'working'
RESOLVE_ACCEPT_MINE_CONFLICT = 'mine-conflict'
RESOLVE_ACCEPT_THEIRS_CONFLICT = 'theirs-conflict'
RESOLVE_ACCEPT_MINE_FULL = 'mine-full'
RESOLVE_ACCEPT_THEIRS_FULL = 'theirs-full'
def makeUserPassOptionStr(userpass):
s = ''
if userpass:
s += '--username ' + userpass[0]
s += ' --password ' + userpass[1]
s += ' --no-auth-cache'
return s
def makeRevisionOptionStr(revision):
"""
:param revision: a revision number, or string('HEAD', 'BASE', 'COMMITTED', 'PREV'), or revision range tuple
"""
if not revision:
return ''
# some command(svn log...) support revision range
if isinstance(revision, tuple) or isinstance(revision, list):
return '-r %s:%s' % (revision[0], revision[1])
return '-r %s' % revision
def makeMessageOptionStr(message):
s = ''
if message:
s += '-m "%s"' % message
return s
def execSubCommand(cmdline):
cmdline += ' --non-interactive'
_commandlineTool.execCommand(cmdline)
def execOutputSubCommand(cmdline):
cmdline += ' --non-interactive'
return _commandlineTool.execOutputCommand(cmdline)
def isURL(url):
for prefix in ('file:\\\\\\', 'svn://', 'http://', 'https://'):
if url.startswith(prefix):
return True
return False
def isSVNPath(path, userpass=None):
cmd = 'info'
cmd += ' ' + path
cmd += makeUserPassOptionStr(userpass)
try:
execOutputSubCommand(cmd)
except AOS.OS_SystemOutputError:
return False
return True
def infoDict(pathOrURL, revision=None, userpass=None):
"""
:param pathOrURL: working copy path or remote url
"""
cmd = 'info'
cmd += ' ' + pathOrURL
cmd += ' --xml'
cmd += ' ' + makeRevisionOptionStr(revision)
cmd += ' ' + makeUserPassOptionStr(userpass)
result = execOutputSubCommand(cmd)
root = ET.fromstring(result)
entryNode = root.find('entry')
ret = {}
ret['#kind'] = entryNode.attrib['kind']
ret['#path'] = entryNode.attrib['path']
ret['#revision'] = int(entryNode.attrib['revision']) # 整个工作拷贝的版本号
ret['url'] = entryNode.find('url').text
repoNode = entryNode.find('repo')
if repoNode is None:
repoNode = entryNode.find('repository') # for compatible with old svn version such as v1.7.14
repo = {}
ret['repo'] = repo
repo['root'] = repoNode.find('root').text
repo['uuid'] = repoNode.find('uuid').text
relativeURLNode = entryNode.find('relative-url')
if relativeURLNode is None: # relative-url not supported by svn 1.7.14(installed by yum in CentOS-7)
ret['relative-url'] = '^' + ret['url'][len(repo['root']):]
else:
ret['relative-url'] = relativeURLNode.text
wcInfoNode = entryNode.find('wc-info')
if wcInfoNode is not None: # svn info url has no wc-info node
wcInfo = {}
ret['wc-info'] = wcInfo
wcInfo['wcroot-abspath'] = wcInfoNode.find('wcroot-abspath').text
wcInfo['uuid'] = wcInfoNode.find('schedule').text
wcInfo['depth'] = wcInfoNode.find('depth').text
commitNode = entryNode.find('commit')
commit = {}
ret['commit'] = commit
commit['#revision'] = int(commitNode.attrib['revision']) # 当前目录或文件的版本号
commitAuthorNode = commitNode.find('author') # author can be None if the repo has revision 0
if commitAuthorNode != None:
commit['author'] = commitAuthorNode.text
commit['date'] = commitNode.find('date').text
lockNode = entryNode.find('lock')
if lockNode is not None:
lock = {}
ret['lock'] = lock
lock['token'] = lockNode.find('token').text
lock['owner'] = lockNode.find('owner').text
lockCommentNode = lockNode.find('comment')
lock['comment'] = '' if lockCommentNode is None else lockCommentNode.text
lock['created'] = lockNode.find('created').text
return ret
def checkout(url, path, revision=None, userpass=None):
cmd = 'checkout'
cmd += ' ' + url + ' ' + path
cmd += ' ' + makeRevisionOptionStr(revision)
cmd += ' ' + makeUserPassOptionStr(userpass)
execSubCommand(cmd)
def update(path, revision=None, userpass=None):
cmd = 'update'
cmd += ' ' + path
cmd += ' ' + makeRevisionOptionStr(revision)
cmd += ' ' + makeUserPassOptionStr(userpass)
execSubCommand(cmd)
def checkoutOrUpdate(url, path, revision=None, userpass=None):
if os.path.exists(path):
update(path, revision, userpass)
else:
checkout(url, path, revision, userpass)
def export(pathOrURL, path, revision=None, userpass=None):
cmd = 'export'
cmd += ' ' + pathOrURL + ' ' + path
cmd += ' ' + makeRevisionOptionStr(revision)
cmd += ' ' + makeUserPassOptionStr(userpass)
execSubCommand(cmd)
def resolve(path, acceptOption, recursive=True, quiet=True):
"""
:param acceptOption: RESOLVE_ACCEPT_XXX
"""
cmd = 'resolve'
cmd += ' ' + path
if recursive:
cmd += ' -R'
if quiet:
cmd += ' -q'
cmd += ' --accept ' + acceptOption
execSubCommand(cmd)
def clearWorkQueue(path):
"""
Do this action maybe useful if cleanup failed
:param path: must be a working-copy root dir
"""
conn = sqlite3.connect(os.path.join(path, '.svn', 'wc.db'))
conn.execute('DELETE FROM work_queue')
def cleanup(path):
execSubCommand('cleanup %s' % path)
def revert(path, recursive=True):
cmd = 'revert ' + path
if recursive:
cmd += ' -R'
execSubCommand(cmd)
def easyClearEverything(path):
clearWorkQueue(path)
cleanup(path)
revert(path)
def add(path, force=True):
"""
:param path: can be file or dir
"""
cmd = 'add'
cmd += ' ' + path
if force:
cmd += ' --force'
execSubCommand(cmd)
def commit(path, includeExternals=False, message=None, userpass=None):
cmd = 'commit'
cmd += ' ' + path
if includeExternals:
cmd += ' --include-externals'
cmd += ' ' + makeMessageOptionStr(message)
cmd += ' ' + makeUserPassOptionStr(userpass)
execSubCommand(cmd)
def log(pathOrURL, limit=None, verbose=False, searchPattern=None, revision=None, userpass=None):
"""
:param pathOrURL: working copy path or remote url
:param limit: when the revision is a range, limit the record count
:param verbose:
:param searchPattern:
- search in the limited records(by param limit)
- matches any of the author, date, log message text, if verbose is True also a changed path
- The search pattern use "glob syntax" wildcards
? matches any single character
* matches a sequence of arbitrary characters
[abc] matches any of the characters listed inside the brackets
example:
revision=(5, 10) limit=2 output: 5, 6
revision=(10, 5) limit=2 output: 10, 9
:param commonOptions.revision: single revision number or revision range tuple/list
- if range specified, format as (5, 10) or (10, 50) are both supported
- for (5, 10): return list ordered by 5 -> 10
- for (10, 5): return list ordered by 10 -> 5
- the bound revision 5 or 10 also included
"""
cmd = 'log'
cmd += ' ' + pathOrURL
cmd += ' --xml'
if limit is not None:
cmd += ' -l %s' % limit
if verbose:
cmd += ' -v'
if searchPattern is not None:
cmd += ' --search %s' % searchPattern
cmd += ' ' + makeRevisionOptionStr(revision)
cmd += ' ' + makeUserPassOptionStr(userpass)
result = execOutputSubCommand(cmd)
root = ET.fromstring(result)
ret = []
for logentryNode in root.iterfind('logentry'):
logentry = {}
ret.append(logentry)
logentry['#revision'] = logentryNode.attrib['revision']
logentry['author'] = logentryNode.find('author').text
logentry['date'] = logentryNode.find('date').text
logentry['msg'] = logentryNode.find('msg').text
pathsNode = logentryNode.find('paths')
if pathsNode is not None:
paths = []
logentry['paths'] = paths
for path_node in pathsNode.iterfind('path'):
path = {}
paths.append(path)
path['#'] = path_node.text
path['#prop-mods'] = True if path_node.attrib['prop-mods']=='true' else False
path['#text-mods'] = True if path_node.attrib['text-mods']=='true' else False
path['#kind'] = path_node.attrib['kind']
path['#action'] = path_node.attrib['action']
return ret
def removeNotVersioned(path):
for line in execOutputSubCommand('status ' + path).splitlines():
if len(line) > 0 and line[0] == '?':
AOS.removePath(line[8:])
def propset(path, key, value):
execSubCommand('propset svn:%s %s %s' % (key, value, path))
def propsetByTempFile(path, key, value):
with AOS.TempFile(value) as f:
execSubCommand('propset svn:%s -F %s %s' % (key, f.path, path))
def propsetExternals(dir, externalPairs):
"""
设置dir下的子目录外联到其他地方
:param dir: the externals to set on
:param externalPairs: list of(subDir, externalDir[, 可选的版本号])
externalDir可以是URL,也可以是工作拷贝中相对于dir的位置
"""
value = ''
for pair in externalPairs:
if len(pair) == 3:
value += '-r%s %s %s\n' % (pair[2], pair[1], pair[0])
elif len(pair) == 2:
value += '%s %s\n' % (pair[1], pair[0])
else:
raise SVN_Error("invalid externalPairs")
propsetByTempFile(dir, 'externals', value)
def lock(filePath, message=None, userpass=None):
"""
:except:
SVN_AlreadyLockedError: if lock failure
"""
cmd = 'lock'
cmd += ' ' + filePath
cmd += ' ' + makeMessageOptionStr(message)
cmd += ' ' + makeUserPassOptionStr(userpass)
result = execOutputSubCommand(cmd)
if result[0:4] == 'svn:':
if isURL(filePath):
raise SVN_AlreadyLockedError(filePath, 'None', 'None', 'None')
else:
info = infoDict(filePath, userpass=userpass)['lock']
raise SVN_AlreadyLockedError(filePath, info['owner'], info['comment'], info['created'])
def unlock(filePath, userpass=None):
cmd = 'unlock'
cmd += ' ' + filePath
cmd += ' --force'
cmd += ' ' + makeUserPassOptionStr(userpass)
execSubCommand(cmd)
def move(src, dst, message=None, userpass=None):
cmd = 'move'
cmd += ' ' + src + ' ' + dst
cmd += ' --force'
cmd += ' --parents'
cmd += ' ' + makeMessageOptionStr(message)
cmd += ' ' + makeUserPassOptionStr(userpass)
execSubCommand(cmd)
def branch(src, dst, message=None, revision=None, userpass=None):
try:
infoDict(dst)
raise SVN_BranchDestinationAlreadyExistError(dst)
except AOS.OS_SystemOutputError:
pass
cmd = 'copy ' + src + ' ' + dst
cmd += ' ' + src + ' ' + dst
cmd += ' --parents'
cmd += ' ' + makeMessageOptionStr(message)
cmd += ' ' + makeRevisionOptionStr(revision)
cmd += ' ' + makeUserPassOptionStr(userpass)
execSubCommand(cmd)
def rollback(path, revision):
"""
rollback path changes made by commits in revision
"""
if isinstance(revision, tuple) or isinstance(revision, list):
startRevision = infoDict(path, revision[0])['#revision']
endRevision = infoDict(path, revision[1])['#revision']
if startRevision < endRevision:
startRevision, endRevision = endRevision, startRevision
revision = (startRevision, endRevision-1) if isinstance(revision, tuple) else [startRevision, endRevision-1]
else:
revision = infoDict(path, revision)['#revision']
revision = '-%d' % revision
cmd = 'merge '
cmd += ' ' + makeRevisionOptionStr(revision)
cmd += ' ' + path
cmd += ' ' + path
execSubCommand(cmd)
|
import json
from lbrynet.testcase import CommandTestCase
class ResolveCommand(CommandTestCase):
async def test_resolve(self):
tx = await self.channel_create('@abc', '0.01')
channel_id = tx['outputs'][0]['claim_id']
# resolving a channel @abc
response = await self.resolve('lbry://@abc')
self.assertSetEqual({'lbry://@abc'}, set(response))
self.assertIn('certificate', response['lbry://@abc'])
self.assertNotIn('claim', response['lbry://@abc'])
self.assertEqual(response['lbry://@abc']['certificate']['name'], '@abc')
self.assertEqual(response['lbry://@abc']['claims_in_channel'], 0)
await self.stream_create('foo', '0.01', channel_id=channel_id)
await self.stream_create('foo2', '0.01', channel_id=channel_id)
# resolving a channel @abc with some claims in it
response = await self.resolve('lbry://@abc')
self.assertSetEqual({'lbry://@abc'}, set(response))
self.assertIn('certificate', response['lbry://@abc'])
self.assertNotIn('claim', response['lbry://@abc'])
self.assertEqual(response['lbry://@abc']['certificate']['name'], '@abc')
self.assertEqual(response['lbry://@abc']['claims_in_channel'], 2)
# resolving claim foo within channel @abc
response = await self.resolve('lbry://@abc/foo')
self.assertSetEqual({'lbry://@abc/foo'}, set(response))
claim = response['lbry://@abc/foo']
self.assertIn('certificate', claim)
self.assertIn('claim', claim)
self.assertEqual(claim['claim']['name'], 'foo')
self.assertEqual(claim['claim']['channel_name'], '@abc')
self.assertEqual(claim['certificate']['name'], '@abc')
self.assertEqual(claim['claims_in_channel'], 0)
# resolving claim foo by itself
response = await self.resolve('lbry://foo')
self.assertSetEqual({'lbry://foo'}, set(response))
claim = response['lbry://foo']
self.assertIn('certificate', claim)
self.assertIn('claim', claim)
self.assertEqual(claim['claim']['name'], 'foo')
self.assertEqual(claim['claim']['channel_name'], '@abc')
self.assertEqual(claim['certificate']['name'], '@abc')
self.assertEqual(claim['claims_in_channel'], 0)
# resolving from the given permanent url
new_response = await self.resolve(claim['claim']['permanent_url'])
self.assertEqual(new_response[claim['claim']['permanent_url']], claim)
# resolving multiple at once
response = await self.resolve(['lbry://foo', 'lbry://foo2'])
self.assertSetEqual({'lbry://foo', 'lbry://foo2'}, set(response))
claim = response['lbry://foo2']
self.assertIn('certificate', claim)
self.assertIn('claim', claim)
self.assertEqual(claim['claim']['name'], 'foo2')
self.assertEqual(claim['claim']['channel_name'], '@abc')
self.assertEqual(claim['certificate']['name'], '@abc')
self.assertEqual(claim['claims_in_channel'], 0)
# resolve has correct depth
tx_details = await self.blockchain.get_raw_transaction(claim['claim']['txid'])
self.assertEqual(claim['claim']['depth'], json.loads(tx_details)['confirmations'])
# resolve handles invalid data
txid = await self.blockchain_claim_name("gibberish", "cafecafe", "0.1")
await self.generate(1)
response = await self.resolve("lbry://gibberish")
self.assertSetEqual({'lbry://gibberish'}, set(response))
claim = response['lbry://gibberish']['claim']
self.assertEqual(claim['name'], 'gibberish')
self.assertEqual(claim['hex'], 'cafecafe')
self.assertFalse(claim['decoded_claim'])
self.assertEqual(claim['txid'], txid)
self.assertEqual(claim['effective_amount'], "0.1")
|
from bot_settings import welcome_channel_id, default_role, server_id
from discord.utils import get
import logging
async def on_ready(client):
logging.getLogger('BotEvents').debug(f"Logged in successfully as {client.user}!")
async def on_message(client,message):
content = message.content if message.content else "(Not printable)"
logging.getLogger('BotEvents').debug(f"Message from {message.author}: {content}.")
async def on_member_join(client,member):
if default_role:
server = client.get_guild(server_id)
role = get(server.roles,name=default_role)
await member.add_roles(role)
await client.get_channel(welcome_channel_id).send(f"{member.name} has joined!") |
# -*- coding: utf-8 -*-
import math
import time
# 绘图设置
# ELAPSED_DAY_CHAR='H' # 已流逝天数的绘图字符
# ELAPSED_DAY_CHAR="\u2591" # 已流逝天数的绘图字符 ░
# ELAPSED_DAY_CHAR="\u2764" # 已流逝天数的绘图字符 ❤
ELAPSED_DAY_CHAR="\u2665" # 已流逝天数的绘图字符 ♥
# REMAIN_DAY_CHAR='\u002D' # 年度剩余天数的绘图字符 -
#REMAIN_DAY_CHAR='\u2500' # 年度剩余天数的绘图字符 ━
REMAIN_DAY_CHAR='\u2661' # 年度剩余天数的绘图字符 ━♡
# Bar的字符数
BAR_NUM = 20
BAR_NUM_FACTOR = round(100/BAR_NUM)
Leap_monthDays=[31,29,31,30,31,30,31,31,30,31,30,31]
Ping_monthDays=[31,28,31,30,31,30,31,31,30,31,30,31]
def isLeapYear(year):
retVal=0
if((0 == (year % 4)) and (0 != (year % 100))):
retVal = 1
elif(0 == (year % 400)):
retVal = 1
return retVal
def elapsedDaysInYear(year,mon,day):
LeapYear = isLeapYear(year)
if(1 == LeapYear):
pMonthDaysArray = Leap_monthDays
else:
pMonthDaysArray = Ping_monthDays
sum = 0
i = 0
while(i < (mon -1)):
sum += pMonthDaysArray[i]
i = i + 1
sum += day - 1
return sum
def get_date():
loc_time = time.localtime(time.time())
#print("now datetime:",loc_time.tm_year,"\\",loc_time.tm_mon,"\\,",loc_time.tm_mday, loc_time.tm_hour,":",loc_time.tm_min,":",loc_time.tm_sec,"\n")
return loc_time.tm_year,loc_time.tm_mon,loc_time.tm_mday
def generateText():
year,mon,day = get_date()
# year,mon,day = 2020,12,31
# year,mon,day = 2020,1,2
a = elapsedDaysInYear(year,mon,day)
if(1 == isLeapYear(year)):
b = 366
else:
b = 365
p = a*100/b
print(a,b,p)
#if(a < 5):
# 向上取整
# p=math.ceil(a*100/b)
#elif(a > 350):
# 向下取整
# p=int(a*100/b)
#else:
# 四舍五入
# p=round(a*100/b)
#print(str(year)+"已经过了"+str(a)+"天,还剩"+str(b-a)+"天\n")
#print('%d年已经过%d天了,还剩%d天了。\n' % (year,a,b-a))
text ='{0}年已经过{1}天,还剩{2}天,余额{3:.1f}%。\n'.format(year,a,b-a,100-p)
#print(text)
return text,p
def generateBar(text,percentage):
# bar='{0}% ['.format(percentage)
bar=''
tmp = math.ceil(percentage/BAR_NUM_FACTOR)
for i in range(int(100/BAR_NUM_FACTOR)):
if(i<tmp):
bar=bar+ELAPSED_DAY_CHAR
else:
bar=bar+REMAIN_DAY_CHAR
bar=bar+' {0:.1f}%\n'.format(percentage)
bar=text+bar
return bar
def GetText():
text,p = generateText()
#print(text)
bar = generateBar(text,p)
return bar
if __name__ == '__main__':
text,p = generateText()
print(text)
bar = generateBar(text,p)
print(bar)
|
"""
Python Daemonizing helper
Originally based on code Copyright (C) 2005 Chad J. Schroeder but now heavily modified
to allow a function to be daemonized and return for bitbake use by Richard Purdie
"""
import os
import sys
import io
import traceback
def createDaemon(function, logfile):
"""
Detach a process from the controlling terminal and run it in the
background as a daemon, returning control to the caller.
"""
try:
# Fork a child process so the parent can exit. This returns control to
# the command-line or shell. It also guarantees that the child will not
# be a process group leader, since the child receives a new process ID
# and inherits the parent's process group ID. This step is required
# to insure that the next call to os.setsid is successful.
pid = os.fork()
except OSError as e:
raise Exception("%s [%d]" % (e.strerror, e.errno))
if (pid == 0): # The first child.
# To become the session leader of this new session and the process group
# leader of the new process group, we call os.setsid(). The process is
# also guaranteed not to have a controlling terminal.
os.setsid()
try:
# Fork a second child and exit immediately to prevent zombies. This
# causes the second child process to be orphaned, making the init
# process responsible for its cleanup. And, since the first child is
# a session leader without a controlling terminal, it's possible for
# it to acquire one by opening a terminal in the future (System V-
# based systems). This second fork guarantees that the child is no
# longer a session leader, preventing the daemon from ever acquiring
# a controlling terminal.
pid = os.fork() # Fork a second child.
except OSError as e:
raise Exception("%s [%d]" % (e.strerror, e.errno))
if (pid != 0):
# Parent (the first child) of the second child.
# exit() or _exit()?
# _exit is like exit(), but it doesn't call any functions registered
# with atexit (and on_exit) or any registered signal handlers. It also
# closes any open file descriptors. Using exit() may cause all stdio
# streams to be flushed twice and any temporary files may be unexpectedly
# removed. It's therefore recommended that child branches of a fork()
# and the parent branch(es) of a daemon use _exit().
os._exit(0)
else:
os.waitpid(pid, 0)
return
# The second child.
# Replace standard fds with our own
si = open('/dev/null', 'r')
os.dup2(si.fileno(), sys.stdin.fileno())
try:
so = open(logfile, 'a+')
se = so
os.dup2(so.fileno(), sys.stdout.fileno())
os.dup2(se.fileno(), sys.stderr.fileno())
except io.UnsupportedOperation:
sys.stdout = open(logfile, 'a+')
sys.stderr = sys.stdout
try:
function()
except Exception as e:
traceback.print_exc()
finally:
bb.event.print_ui_queue()
os._exit(0)
|
#! /usr/bin/env python
import unittest
import os, sys
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
from src.kronprod import *
class TestKronSparse(unittest.TestCase):
# add global stuff here
def setUp(self):
return
def testOnes(self):
A1 = [ np.array([[1., 1.], [1.,1.]]),
np.array([[1.,1.], [1.,1.]])]
x1 = np.array([1.,1.,1.,1.])
y1 = np.array([4.,4.,4.,4.])
kp = KronProd(A1,sparse_flag=True)
y = kp.dot(x1)
self.assertSequenceEqual(list(y), list(y1))
def testInts(self):
A1 = [ np.array([[1.0, 0.0], [0.0,0.0]]),
np.array([[1.,1.], [0.,0.]])]
x1 = np.array([1.,2.,3.,4.])
big_A = reduce(np.kron, A1)
print(big_A)
print(x1)
big_y = np.matmul(big_A, x1)
print("full calc: ",big_y)
kp = KronProd(A1,sparse_flag=True)
Y = kp.dot(x1)
print("efficient calc: ", Y)
self.assertSequenceEqual(list(Y), list(big_y))
# this dimensionality pushes the limit of what full rank calc can do
def testRandom(self):
n = 5 # number of factors
p = 5 # dimension of factor
r_As = [np.random.rand(p,p) for i in range(n)]
As = [m/m.sum(axis=1)[:,None] for m in r_As] # normalize each row
x = np.random.rand(p**n)
big_A = reduce(np.kron, As)
big_y = np.matmul(big_A, x)
print("full calc: ",big_y)
kp = KronProd(As,sparse_flag=True)
Y = kp.dot(x)
print("efficient calc: ", Y)
np.testing.assert_almost_equal(big_y, Y, decimal=7, verbose=True)
def testBig(self):
n = 2 # number of factors
p = 100 # dimension of factor
r_As = [np.random.rand(p,p) for i in range(n)]
As = [m/m.sum(axis=1)[:,None] for m in r_As] # normalize each row
x = np.random.rand(p**n)
kp = KronProd(As,sparse_flag=True)
Y = kp.dot(x)
print("efficient calc: ", Y)
|
from rest_pandas import (
PandasView, PandasUnstackedSerializer, PandasScatterSerializer,
PandasBoxplotSerializer,
)
from .serializers import EventResultSerializer
from .filters import ChartFilterBackend
import swapper
EventResult = swapper.load_model('results', 'EventResult')
class ChartView(PandasView):
queryset = EventResult.objects.all()
serializer_class = EventResultSerializer
filter_backends = [ChartFilterBackend]
def get_queryset(self):
qs = super(ChartView, self).get_queryset()
qs = qs.select_related('event_site', 'result_type')
return qs
class TimeSeriesView(ChartView):
pandas_serializer_class = PandasUnstackedSerializer
class ScatterView(ChartView):
pandas_serializer_class = PandasScatterSerializer
class BoxPlotView(ChartView):
pandas_serializer_class = PandasBoxplotSerializer
|
from django.contrib.gis.db import models
from django.conf import settings
from django.contrib.gis.measure import A, D
from lingcod.unit_converter.models import length_in_display_units, area_in_display_units
class SpacingPoint(models.Model):
name = models.CharField(max_length=200)
geometry = models.PointField(srid=settings.GEOMETRY_DB_SRID)
objects = models.GeoManager()
def __unicode__(self):
return unicode(self.name)
def all_spacing_points_dict():
"""
Returns a dictionary of the form: { point: 'name' } for all objects in SpacingPoint
"""
return dict( [ (p.geometry,p.name) for p in SpacingPoint.objects.all() ] )
def add_all_spacing_points(in_dict):
"""
Takes a dictionary of the form: { point: 'name' }, and adds all the objects in SpacingPoint
"""
in_dict.update(all_spacing_points_dict())
return in_dict
def distance_row_dict(from_dict, to_dict):
"""
from_dict will be a dict with a point as the key and a label as the value.
to_dict will be of the same format with multiple entries.
will return a dictionary with points as keys and a dictionary as values.
NOTE: This method assumes that the projection units are meters.
"""
from_pnt = from_dict.keys()[0]
for s_pnt in SpacingPoint.objects.all():
to_dict.update({s_pnt.geometry:s_pnt.name})
result = {}
for point, pnt_label in to_dict.iteritems():
result[point] = {
'label': pnt_label,
'distance': length_in_display_units(point.distance(from_pnt)),
'sort': point.y
}
return result
def distance_row_list(from_pnt, to_list):
"""
NOTE: This method assumes that the projection units are meters.
"""
result = []
for point in to_list:
result.append(length_in_display_units(point.distance(from_pnt)))
return result
def distance_matrix(point_list):
result = []
for point in point_list:
result.append(distance_row_list(point,point_list))
return result
def sorted_points_and_labels(in_dict):
"""
in_dict will look like:
{ point: 'name' }
sorted_points, sorted_labels (both lists) will be returned in a dictionary and they'll be
ordered from North to South.
"""
sorted_points = []
sorted_labels = []
y_dict = {}
for point, name in in_dict.iteritems():
y_dict.update( { point.y: point } )
y_list = y_dict.keys()
y_list.sort()
for y in reversed(y_list):
sorted_points.append(y_dict[y])
sorted_labels.append(in_dict[y_dict[y]])
return { 'points': sorted_points, 'labels': sorted_labels }
def distance_matrix_and_labels(in_dict,add_spacing_points=True):
"""
in_dict will look like:
{ point: 'name' }
Will return a dictionary with the keys 'labels' and 'matrix'
"""
if add_spacing_points:
in_dict = add_all_spacing_points(in_dict)
spl_dict = sorted_points_and_labels(in_dict)
dist_mat = distance_matrix(spl_dict['points'])
return { 'labels': spl_dict['labels'], 'matrix': dist_mat } |
import random
from sys import maxsize
from math import sqrt, inf
RANDOM_SEED = 1208
LARGE_NUMBER = inf
EQUAL_THRESHOLD = 0.0001
class KMeans:
def __init__(self, n_clusters):
self.n_clusters = n_clusters
def initialCentroidsRandomPick(self, X, k):
"""
:return centroids: dict - {0:[], tag: centroid_coordinate, ...}
"""
X_length = len(X)
random.seed(RANDOM_SEED)
idx_centroids = random.sample(range(X_length), k)
# centroids = [X[idx] for idx in idx_centroids]
centroids = {i: X[idx] for i, idx in enumerate(idx_centroids)}
return centroids
def computeDistance(self, v1, v2):
# Euclidean Distance
pingfanghe = 0
for i in range(len(v1)):
pingfanghe += (v1[i] - v2[i])**2
return sqrt(pingfanghe)
def clusterPoints(self, X, centroids):
"""
:return clusters: {cluster_idx: [point_idx, ...], ...}
"""
clusters = {}
for i, x in enumerate(X):
min_distance = (0, LARGE_NUMBER)
for k in centroids:
dis = self.computeDistance(x, centroids[k])
if dis < min_distance[1]:
min_distance = (k, dis)
cid = min_distance[0]
if clusters.get(cid) == None:
clusters[cid] = [i]
else:
clusters[cid].append(i)
return clusters
def computeCentroids(self, X, clusters):
dimension = len(X[0])
new_centroids = {}
for cid in clusters:
cluster = clusters[cid]
n = len(cluster)
centroid = [0] * dimension
for idx in cluster:
point = X[idx]
for i, v in enumerate(point):
centroid[i] += v
for i, v in enumerate(centroid):
centroid[i] = v / n
new_centroids[cid] = centroid
return new_centroids
def checkClustersChanged(self, clusters, new_clusters):
if clusters == None:
return True
for k in clusters:
set1 = set(clusters[k])
set2 = set(new_clusters[k])
if set1 != set2:
return True
return False
def fit(self, X):
# when all of clusters don't change, stop
"""
:param X: [[]] - matrix
"""
# initial centroids
centroids = self.initialCentroidsRandomPick(X, self.n_clusters)
# cluster points
clusters = None
new_clusters = self.clusterPoints(X, centroids)
num_iteration = 0
while(self.checkClustersChanged(clusters, new_clusters)):
# compute centroids
new_centroids = self.computeCentroids(X, new_clusters)
clusters = new_clusters
# cluster points
new_clusters = self.clusterPoints(X, new_centroids)
num_iteration += 1
# print(num_iteration)
print('number_of_iterations:', num_iteration)
return new_clusters
def checkCentroidsChanged(self, centroids, new_centroids):
if centroids == None:
return True
for k in centroids:
centroid = centroids[k]
new_centroid = new_centroids[k]
for i in range(len(centroid)):
if abs(centroid[i] - new_centroid[i]) > EQUAL_THRESHOLD:
return True
return False
def fit2(self, X):
# when all of the positions of centroids don't change, stop
"""
:param X: [[]] - matrix
"""
# initial centroids
centroids = None
new_centroids = self.initialCentroidsRandomPick(X, self.n_clusters)
# cluster points
clusters = self.clusterPoints(X, new_centroids)
# num_iteration = 1
while(self.checkCentroidsChanged(centroids, new_centroids)):
# print(num_iteration)
# num_iteration += 1
centroids = new_centroids
# compute centroids
new_centroids = self.computeCentroids(X, clusters)
# cluster points
clusters = self.clusterPoints(X, new_centroids)
return clusters
|
import logging
import sanic.request
import sanic.response
from sanic_json_logging import setup_json_logging
app = sanic.Sanic("app1")
setup_json_logging(app, context_var="test1")
logger = logging.getLogger("root")
async def log():
logger.info("some informational message")
@app.route("/endpoint1", methods=["GET"])
async def endpoint1(request: sanic.request.Request) -> sanic.response.BaseHTTPResponse:
await log()
return sanic.response.text("")
app.run()
|
from django.apps import AppConfig
class DBConfig(AppConfig):
name = 'db'
|
import numpy as np
import dace as dc
import sympy as sp
# N, R, K, M1, M2 = (dc.symbol(s) for s in ('N', 'R', 'K', 'M1', 'M2'))
R, K, M1, M2 = (dc.symbol(s, dtype=dc.int64, integer=True, positive=True)
for s in ('R', 'K', 'M1', 'M2'))
N = R**K
# TODO: Temporary fix!
@dc.program
def mgrid1(X: dc.uint32[R, R], Y: dc.uint32[R, R]):
for i in range(R):
X[i, :] = i
for j in range(R):
Y[:, j] = j
@dc.program
def mgrid2(X: dc.uint32[R, N], Y: dc.uint32[R, N]):
for i in range(R):
X[i, :] = i
for j in range(R**K):
Y[:, j] = j
@dc.program
def stockham_fft(x: dc.complex128[R**K], y: dc.complex128[R**K]):
# Generate DFT matrix for radix R.
# Define transient variable for matrix.
# i_coord, j_coord = np.mgrid[0:R, 0:R]
i_coord = np.ndarray((R, R), dtype=np.uint32)
j_coord = np.ndarray((R, R), dtype=np.uint32)
mgrid1(i_coord, j_coord)
dft_mat = np.empty((R, R), dtype=np.complex128)
dft_mat[:] = np.exp(-2.0j * np.pi * i_coord * j_coord / R)
# Move input x to output y
# to avoid overwriting the input.
y[:] = x[:]
# ii_coord, jj_coord = np.mgrid[0:R, 0:R**K]
ii_coord = np.ndarray((R, N), dtype=np.uint32)
jj_coord = np.ndarray((R, N), dtype=np.uint32)
mgrid2(ii_coord, jj_coord)
tmp_perm = np.empty_like(y)
D = np.empty_like(y)
tmp = np.empty_like(y)
# Main Stockham loop
for i in range(K):
# Stride permutation
yv = np.reshape(y, (R**i, R, R**(K - i - 1)))
# tmp_perm = np.transpose(yv, axes=(1, 0, 2))
tmp_perm[:] = np.reshape(np.transpose(yv, axes=(1, 0, 2)), (N, ))
# Twiddle Factor multiplication
# D = np.empty((R, R ** i, R ** (K-i-1)), dtype=np.complex128)
Dv = np.reshape(D, (R, R**i, R**(K - i - 1)))
tmpv = np.reshape(tmp, (R**(K - i - 1), R, R**i))
tmpv[0] = np.exp(-2.0j * np.pi * ii_coord[:, :R**i] *
jj_coord[:, :R**i] / R**(i + 1))
for k in range(R**(K - i - 1)):
# D[:, :, k] = tmp
Dv[:, :, k] = np.reshape(tmpv[0], (R, R**i, 1))
# tmp_twid = np.reshape(tmp_perm, (N, )) * np.reshape(D, (N, ))
tmp_twid = tmp_perm * D
# Product with Butterfly
y[:] = np.reshape(dft_mat @ np.reshape(tmp_twid, (R, R**(K - 1))),
(N, ))
|
import os
import sys
import click
@click.command()
@click.argument("state_file", type=str)
@click.argument("exit_1", type=int)
@click.argument("exit_2", type=int)
@click.argument("exit_3", type=int)
def main(
state_file: str,
exit_1: int,
exit_2: int,
exit_3: int,
):
if not os.path.exists(state_file):
state = 0
else:
with open(state_file, "rt") as fp:
state = int(fp.read())
state += 1
with open(state_file, "wt") as fp:
fp.write(str(state))
if state == 1:
print(f"Exiting with status: {exit_1}")
sys.exit(exit_1)
if state == 2:
print(f"Exiting with status: {exit_2}")
sys.exit(exit_2)
if state == 3:
print(f"Exiting with status: {exit_3}")
sys.exit(exit_3)
if __name__ == "__main__":
main()
|
str1 = 'I love Python Programming'
str2 = 'Python'
str3 = 'Java'
print(f'"{str1}" contains "{str2}" = {str2 in str1}')
print(f'"{str1}" contains "{str2.lower()}" = {str2.lower() in str1}')
print(f'"{str1}" contains "{str3}" = {str3 in str1}')
if str2 in str1:
print(f'"{str1}" contains "{str2}"')
else:
print(f'"{str1}" does not contain "{str2}"')
index = str1.find(str2)
if index != -1:
print(f'"{str1}" contains "{str2}"')
else:
print(f'"{str1}" does not contain "{str2}"')
index = str1.find(str3)
if index is not -1:
print(f'"{str1}" contains "{str3}"')
else:
print(f'"{str1}" does not contain "{str3}"')
|
from amaranth.back import verilog
from register_bank import RegisterBank
top = RegisterBank()
with open("register_bank.v", "w") as f:
f.write(verilog.convert(top, ports=[top.reset, top.regNum0, top.regNum1, top.wRegNum, top.writeEnable, top.dataOut0, top.dataOut1, top.dataIn])) |
import sys
sys.path.append("yam")
import config
def test_config_man():
config.setConfigFolder("./")
assert config.getConfigFolder() == "./"
config.setProperty("test-property", "test-value")
assert config.getProperty("test-property") == "test-value"
config.deleteConfigFile() |
import numpy as np
from sklearn.metrics.scorer import _BaseScorer
from solnml.components.utils.constants import CLS_TASKS, IMG_CLS
from solnml.datasets.base_dl_dataset import DLDataset
from solnml.components.evaluators.base_dl_evaluator import get_estimator_with_parameters
from solnml.components.ensemble.dl_ensemble.base_ensemble import BaseEnsembleModel
from solnml.components.models.img_classification.nn_utils.nn_aug.aug_hp_space import get_test_transforms
from functools import reduce
class Bagging(BaseEnsembleModel):
def __init__(self, stats,
ensemble_size: int,
task_type: int,
max_epoch: int,
metric: _BaseScorer,
timestamp: float,
output_dir=None,
device='cpu', **kwargs):
super().__init__(stats=stats,
ensemble_method='bagging',
ensemble_size=ensemble_size,
task_type=task_type,
max_epoch=max_epoch,
metric=metric,
timestamp=timestamp,
output_dir=output_dir,
device=device)
if self.task_type == IMG_CLS:
self.image_size = kwargs['image_size']
def fit(self, train_data):
# Do nothing, models has been trained and saved.
return self
def predict(self, test_data: DLDataset, mode='test'):
model_pred_list = list()
final_pred = list()
model_cnt = 0
for algo_id in self.stats["include_algorithms"]:
model_configs = self.stats[algo_id]['model_configs']
for idx, config in enumerate(model_configs):
if self.task_type == IMG_CLS:
test_transforms = get_test_transforms(config, image_size=self.image_size)
test_data.load_test_data(test_transforms)
test_data.load_data(test_transforms, test_transforms)
else:
test_data.load_test_data()
test_data.load_data()
if mode == 'test':
dataset = test_data.test_dataset
else:
if test_data.subset_sampler_used:
dataset = test_data.train_dataset
else:
dataset = test_data.val_dataset
estimator = get_estimator_with_parameters(self.task_type, config, self.max_epoch,
dataset, self.timestamp, device=self.device)
if self.task_type in CLS_TASKS:
if mode == 'test':
model_pred_list.append(estimator.predict_proba(test_data.test_dataset))
else:
if test_data.subset_sampler_used:
model_pred_list.append(
estimator.predict_proba(test_data.train_dataset, sampler=test_data.val_sampler))
else:
model_pred_list.append(estimator.predict_proba(test_data.val_dataset))
else:
if mode == 'test':
model_pred_list.append(estimator.predict(test_data.test_dataset))
else:
if test_data.subset_sampler_used:
model_pred_list.append(
estimator.predict(test_data.train_dataset, sampler=test_data.val_sampler))
else:
model_pred_list.append(estimator.predict(test_data.val_dataset))
model_cnt += 1
# Calculate the average of predictions
for i in range(len(model_pred_list[0])):
sample_pred_list = [model_pred[i] for model_pred in model_pred_list]
pred_average = reduce(lambda x, y: x + y, sample_pred_list) / len(sample_pred_list)
final_pred.append(pred_average)
return np.array(final_pred)
def get_ens_model_info(self):
raise NotImplementedError
|
# -*- coding: utf-8 -*-
__version__ = '$Id$'
import family
# The wikispecies family
class Family(family.WikimediaFamily):
def __init__(self):
super(Family, self).__init__()
self.name = 'species'
self.langs = {
'species': 'species.wikimedia.org',
}
self.namespaces[4] = {
'_default': [u'Wikispecies', self.namespaces[4]['_default']],
}
self.namespaces[5] = {
'_default': [u'Wikispecies talk', self.namespaces[5]['_default']],
}
self.interwiki_forward = 'wikipedia'
|
# 用摄像头捕获视频
# 先创建一个VideoCapture对象. 它的参数可以实设备的索引号
# 或是一个视频文件.
# Cap.isOpen检查是否初始化成功
# cap.get(propId)可以获取是平的一些参数信息, propId从0~18代表视频的一个属性
# cap.set(propId, value)可以修改视频属性
import numpy as np
import cv2
cap = cv2.VideoCapture(0)
# Define the codec and create VideoWriter object
# 利用cv2.VideoWriter_fourcc(*’XVID’)定义视频格式,然后创建视频写入对象。
fourcc = cv2.VideoWriter_fourcc(*'XVID')
out = cv2.VideoWriter('/path/to/output.avi', fourcc, 20, (640, 480))
while(True):
# Capture frame-by-frame
ret, frame = cap.read() # cap.read()返回True/False
print(cap.get(3), cap.get(4))
out.write(frame)
# Our operations on the frame come here
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
#Display the resulting frame
cv2.imshow('frame', gray)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
# When everything done, release the capture
cap.release()
cv2.destroyAllWindows() |
#!/usr/bin/python
# coding=UTF-8
from math import log
import os
import re
import sys
from comparator import calculate_errors
from tokenizer import Tokenizer
from trainer import Trainer
from estimator import Estimator
verbose = False
class Model():
def __init__(self, tokenizer, estimator):
self.tokenizer = tokenizer
self.estimator = estimator
self.found_names = {}
def extract_names(self, tkns, second_passing):
self.found_names = {}
tkns = [t for t in tkns if not t.tkn in ['dr', 'drs', 'drd', 'mr', 'mrs', 'ms', 'professor', 'dipl', 'prof', 'miss', 'emeritus', 'ing', 'assoc', 'asst', 'lecturer', 'ast', 'res', 'inf', 'diplom', 'jun', 'junprof', 'inform', 'lect', 'senior', 'ass', 'ajarn', 'honorarprof', 'theol', 'math', 'phil', 'doz', 'dphil']]
i = 0
while i <= len(tkns) - 4:
cur_tkns = tkns[i:i+4]
# el = cur_tkns[0].element
# cur_tkns = [t for t in cur_tkns if t.element == el]
full_probs = self.estimator.get_full_probs(cur_tkns, second_passing)
index = full_probs.index(max(full_probs))
arr = [
'WWWW', 'WWWN', 'WWNW', 'WWNN', 'WNWW', 'WNWN', 'WNNW', 'WNNN',
'NWWW', 'NWWN', 'NWNW', 'NWNN', 'NNWW', 'NNWN', 'NNNW', 'NNNN',
'Nnnn', 'NNnn', 'NNNn'
]
name = " ".join([t.tkn for t in tkns[i:i+4]])
probs = [arr[j] + str(full_probs[j]) for j in range(0,19)]
# print "xxx", name, probs
if index < 12: # Is word.
tkns[i].is_name = False
i += 1
else:
name_length = 2 if (index - 12 == 0) else index - 11
if index == 16:
tkns[i].is_name = False
i += 1
continue
elif index == 17:
name_length = 2
elif index == 18:
name_length = 3
if name_length > len(cur_tkns):
name_length = len(cur_tkns)
name_start = int(i)
name_end = int(i) + int(name_length)
name_tkns = tkns[name_start:name_end]
for tkn in name_tkns:
tkn.is_name = True
name_tkns = [t.tkn for t in name_tkns if re.search('[0-9]', t.tkn) == None]
titles = ['dr', 'mr', 'mrs', 'ms', 'professor', 'dipl', 'prof', 'miss', 'emeritus', 'ing']
name_tkns = [t for t in name_tkns if not t in titles]
i += name_length
if len(name_tkns) <= 1:
continue
if len([t for t in name_tkns if len(t) > 1]) == 0:
continue
name = " ".join(name_tkns).encode('utf-8')
self.found_names[name] = [full_probs[0], full_probs[index]]
return tkns
def compare(self, x, y):
return int((float(y[0]) - float(y[1])) - (float(x[0]) - float(x[1])))
def print_results(self):
# for name in self.found_names:
for name in sorted(self.found_names, cmp=self.compare, key=self.found_names.get):
if verbose: print name, self.found_names[name]
else: print name
def extract_html(self, html):
tkns = self.tokenizer.tokenize(html)
self.estimator.calculate_tkn_incidence(tkns)
tkns = self.extract_names(tkns, False)
for i in range(0, 2):
self.estimator.calculate_secondary_features(tkns)
tkns = self.tokenizer.tokenize(html)
tkns = self.extract_names(tkns, True)
# This method is a faster version of the name extraction
# algorithm. It is less efficient, but it is enough to serve
# as a feature for the classifier.
def extract_html_simple(self, html):
tkns = self.tokenizer.tokenize(html)
tkns = self.extract_names(tkns, False)
def extract(self, filename):
with open(filename) as f:
html = f.read()
self.extract_html(html)
fold = 1
def create_model():
tokenizer = Tokenizer()
estimator= Estimator(tokenizer)
trainer = Trainer(tokenizer)
estimator.load_name_cond_probs("data/probabilities/tokenized_authors_prob.txt")
estimator.load_word_cond_probs("data/probabilities/conditional_not_a_name_prob.txt")
# estimator.load_conditional_probabilities("data/probabilities/conditional_probs_4.txt")
estimator.load_conditional_probabilities("data/probabilities/fold_" + str(fold) + ".txt")
model = Model(tokenizer, estimator)
return model
if __name__ == "__main__":
model = create_model()
trainer = Trainer(Tokenizer())
if len(sys.argv) > 1:
if len(sys.argv) > 2 and sys.argv[2] == '-v':
verbose = True
if sys.argv[1] == 'train':
test_path = "downloaded_pages/faculty"
expected_path = "data/correct_names"
# file_nums = [f[-7:-4] for f in os.listdir(expected_path) if os.path.isfile(os.path.join(expected_path, f))]
file_nums = [
'180', '197', '156', '203', '095', '052', '072', '126', '184', '086',
'102', '034', '007', '011', '105', '199', '182', '092', '128', '031',
'014', '124', '129', '050', '024', '080', '122', '076', '134', '206',
'067', '216', '170', '107', '174', '085', '027', '074', '049', '099',
'177', '127', '131', '026', '192', '123', '019', '017', '178', '036',
'187', '113', '098', '041', '020', '146', '001', '132', '191', '112',
'101', '044', '037', '155', '022', '149', '201', '008', '077', '152',
'161', '091', '189', '087', '114', '082', '157', '100', '120', '006',
'147', '106', '039', '150', '209', '053', '144', '175', '005', '159',
'183', '013', '118', '016', '136', '158', '162', '094', '088', '210',
'151', '133', '029', '166', '069', '066', '061', '045', '195', '115',
'176', '010', '116', '121', '015', '171', '057', '063', '207', '194',
# '214', '179', '135', '021', '093', '208', '004', '047', '215', '211',
# '025', '038', '033', '071', '190', '160', '108', '141', '096', '202',
# '154', '009', '023', '186', '117', '002', '111', '125', '064'
]
count = 1
for file_num in file_nums:
print "File", file_num, count, 'of', len(file_nums)
count += 1
test_file = os.path.join(test_path, file_num + ".html")
expected_file = os.path.join(expected_path, "names_" + file_num + ".txt")
if not os.path.isfile(test_file):
print "Missing file", test_file
quit()
elif not os.path.isfile(expected_file):
print "Missing file", expected_file
quit()
trainer.train(test_file, expected_file)
trainer.compute_probabilities()
else:
model.extract(sys.argv[1])
model.print_results()
else:
test_path = "downloaded_pages/faculty"
expected_path = "data/correct_names"
file_nums = [f[-7:-4] for f in os.listdir(expected_path) if os.path.isfile(os.path.join(expected_path, f))]
for i in range(1, 6):
fold = i
model = create_model()
file_nums_fold = [
['180', '197', '156', '203', '095', '052', '072', '126', '184', '086',
'102', '034', '007', '011', '105', '199', '182', '092', '128', '031',
'014', '124', '129', '050', '024', '080', '122', '076', '134', '206'],
['067', '216', '170', '107', '174', '085', '027', '074', '049', '099',
'177', '127', '131', '026', '192', '123', '019', '017', '178', '036',
'187', '113', '098', '041', '020', '146', '001', '132', '191', '112'],
['101', '044', '037', '155', '022', '149', '201', '008', '077', '152',
'161', '091', '189', '087', '114', '082', '157', '100', '120', '006',
'147', '106', '039', '150', '209', '053', '144', '175', '005', '159'],
['183', '013', '118', '016', '136', '158', '162', '094', '088', '210',
'151', '133', '029', '166', '069', '066', '061', '045', '195', '115',
'176', '010', '116', '121', '015', '171', '057', '063', '207', '194'],
['214', '179', '135', '021', '093', '208', '004', '047', '215', '211',
'025', '038', '033', '071', '190', '160', '108', '141', '096', '202',
'154', '009', '023', '186', '117', '002', '111', '125', '064']
]
file_nums = file_nums_fold[fold - 1]
type_1_errors = 0
type_2_errors = 0
test_names_count = 0
expected_names_count = 0
for file_num in file_nums:
test_file = os.path.join(test_path, file_num + ".html")
expected_file = os.path.join(expected_path, "names_" + file_num + ".txt")
if not os.path.isfile(test_file):
print "Missing file", test_file
quit()
elif not os.path.isfile(expected_file):
print "Missing file", expected_file
quit()
model.extract(test_file)
type_1, type_2, correct_names = calculate_errors(expected_file, model.found_names)
type_1_errors += len(type_1)
type_2_errors += len(type_2)
test_names_count += len(type_1) + len(correct_names)
expected_names_count += len(type_2) + len(correct_names)
# print "File", test_file
precision = 1.0 - float(len(type_1)) / (len(type_1) + len(correct_names))
recall = 1.0 - float(len(type_2)) / (len(type_2) + len(correct_names))
# print precision, ',', recall
# print "False positives:", float(len(type_1)), '/', (len(type_1) + len(correct_names))
# print "False negatives:", float(len(type_2)), '/', (len(type_2) + len(correct_names))
# print "Total false positives:", type_1_errors, "/", test_names_count, float(type_1_errors) / test_names_count
# print "Total false negatives:", type_2_errors, "/", expected_names_count, float(type_2_errors) / expected_names_count
precision = 1.0 - float(type_1_errors) / test_names_count
recall = 1.0 - float(type_2_errors) / expected_names_count
print precision, ',', recall
|
import pygame
import time
import sys
from grid import grid
# Colors used
black = (0, 0, 0)
white = (255, 255, 255)
gray = (192, 192, 192)
MARGIN = 5 # Margin between cells
WIDTH = 20 # Cell width
HEIGHT = 20 # Cell height
ROW = 40 # Random row count
COL = 40 # Random column count
speed_delta = 10
max_speed = 125
min_speed = 5
speed = min_speed
done = False
pause = False
pygame.init()
pygame.display.set_caption("Game of Life")
clock = pygame.time.Clock()
main = grid()
if len(sys.argv) > 1:
main.load_map(sys.argv[1])
else:
main.load_random(ROW, COL)
window_size = [WIDTH * len(main.grid[0]) + MARGIN * len(main.grid[0]) + MARGIN, HEIGHT * len(main.grid)+ MARGIN * len(main.grid) + MARGIN]
screen = pygame.display.set_mode(window_size)
screen.fill(gray)
def draw_grid():
for row in range(len(main.grid)):
for col in range(len(main.grid[row])):
if main.grid[row][col] == 0:
color = white
elif main.grid[row][col] == 1:
color = black
pygame.draw.rect(screen, color, [(MARGIN + WIDTH) * col + MARGIN, (MARGIN + HEIGHT) * row + MARGIN, WIDTH, HEIGHT])
while not done:
for event in pygame.event.get():
if event.type == pygame.QUIT:
# Quit
done = True
elif event.type == pygame.KEYDOWN:
if event.key == pygame.K_p:
# Pause
pause = not pause
elif event.key == pygame.K_r:
# Reset
if main.map_type == "random":
main.load_random(ROW, COL)
else:
main.load_map(sys.argv[1])
speed = min_speed
if pause:
pause = not pause
elif event.key == pygame.K_ESCAPE:
# Quit
done = True
elif event.key == pygame.K_RIGHT:
# Speed up
speed += speed_delta
if speed > max_speed:
speed = max_speed
elif event.key == pygame.K_LEFT:
# Speed down
speed -= speed_delta
if speed < min_speed:
speed = min_speed
if not pause:
screen.fill(gray)
draw_grid()
main.apply_rules()
clock.tick(speed)
pygame.display.flip()
pygame.quit()
|
#!/usr/bin/env python
#-*- coding: utf-8 -*-
import sys
from lxml import etree, html
def run(bk):
count = 0
print('start')
for (file_id, _) in bk.text_iter():
modified = True
html_original = bk.readfile(file_id)
doc = html.fromstring(html_original.encode("utf-8"))
for link in doc.xpath("//*[local-name() = 'link']"):
if link.attrib['href'] == "../Styles/t2bv2l.css":
modified = False
break
if modified:
count += 1
head = doc.xpath("//*[local-name() = 'head']")[0]
link = etree.SubElement(head,
"link",
attrib={'href': "../Styles/t2bv2l.css",
'rel': "stylesheet",
'type': "text/css"
})
print("Modified File : ", file_id)
bk.writefile(file_id,
etree.tostring(
doc,
encoding="utf-8",
xml_declaration=True).decode('utf8'))
# css
if count > 0:
cssdata = '''
html{
direction:rtl;
-ms-writing-mode: tb-rl;
-epub-writing-mode: vertical-rl;
-webkit-writing-mode: vertical-rl;
writing-mode: vertical-rl;
}
body {
direction:ltr;
word-break: normal;
text-align: justify;
text-justify: inter-ideograph;
vertical-align: baseline;
word-wrap: break-word;
line-break: normal;
-epub-line-break: normal;
-webkit-line-break: normal;
text-orientation: upright;
-webkit-text-orientation: upright;
-epub-text-orientation: upright;
}
.tcy {
-epub-text-combine: horizontal;
-webkit-text-combine: horizontal;
-ms-text-combine-horizontal: all;
text-combine-horizontal: all;
text-combine-upright: all;
}
.upright {
-epub-text-orientation: rotate-right;
-epub-text-orientation: upright;
-webkit-text-orientation: upright;
-epub-text-combine: horizontal;
-webkit-text-combine: horizontal;
-ms-text-combine-horizontal: all;
text-combine-horizontal: all;
text-combine-upright: all;
}'''
basename = "t2bv2l.css"
uid = "t2bv2lcss"
mime = "text/css"
bk.addfile(uid, basename, cssdata, mime)
bk.setspine_ppd('rtl')
xml = bk.getmetadataxml()
if '<meta name="primary-writing-mode" content="vertical-rl"/>' not in xml:
xml = xml.replace('</metadata>',
'<meta name="primary-writing-mode" content="vertical-rl"/>\n</metadata>')
bk.setmetadataxml(xml)
print('end')
return 0
def main():
print("I reached main when I should not have\n")
return -1
if __name__ == "__main__":
sys.exit(main())
|
from output.models.ms_data.particles.particles_r020_xsd.particles_r020 import (
B,
R,
Doc,
)
from output.models.ms_data.particles.particles_r020_xsd.particles_r020_imp import (
ImpElem1,
ImpElem2,
)
__all__ = [
"B",
"R",
"Doc",
"ImpElem1",
"ImpElem2",
]
|
from django.contrib import admin
from tastie.models import SimpleRest
admin.site.register(SimpleRest)
|
import mysql.connector
import csv
mydb = mysql.connector.connect(
host="localhost",
user="root",
password="admin123",
db="db_TEST"
)
mycursor = mydb.cursor()
# mycursor.execute("CREATE TABLE customers (customerid INT AUTO_INCREMENT PRIMARY KEY,firstname TEXT,lastname TEXT,companyname TEXT,billingaddress1 TEXT,billingaddress2 TEXT,city TEXT,state TEXT,postalcode TEXT,country TEXT,phonenumber TEXT,emailaddress TEXT,createddate TEXT)")
csv_data = csv.reader(file('customer.csv'), delimiter=',')
count = 0
for row in csv_data:
if count == 0:
count +=1
else:
mycursor.execute("INSERT INTO customers(customerid,firstname,lastname,companyname,billingaddress1,billingaddress2,city,state,postalcode,country,phonenumber,emailaddress,createddate) VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s)",row)
count += 1
print(row)
print "Done"
# mycursor.execute("SELECT * FROM customers")
#
# myresult = mycursor.fetchall()
#
# for x in myresult:
# print(x)
|
__author__ = 'renderle'
def scrambleForecast(fc1, fc2):
return (fc1 + fc2) /2; |
# Generated by Django 3.1.13 on 2022-01-07 16:23
import django.db.models.deletion
from django.db import migrations
from django.db import models
class Migration(migrations.Migration):
dependencies = [("api", "0051_reapply_partition_trigger_func")]
operations = [
migrations.AddField(
model_name="sources",
name="provider",
field=models.ForeignKey(
db_constraint=False, null=True, on_delete=django.db.models.deletion.DO_NOTHING, to="api.provider"
),
),
migrations.RunSQL("UPDATE public.api_sources SET provider_id = koku_uuid::uuid"),
]
|
import itertools
from computation.split_and_merge.domain_centers.calculate_domain_centers import calculate_domain_centers as calc_dom
from computation.split_and_merge.util.pos_bool_type import PosBoolType
from computation.split_and_merge.util.numpy_extension.find_index_of_first_element_not_equivalent import find_index_of_first_element_not_equivalent
from computation.split_and_merge.util.node_border_iterator import iterate_node_border_with_adjacent_node_cells
it = itertools.count()
class ObjectType:
DOMAIN = it.next()
CAVITY = it.next()
def is_homogenous_split(data_part, mask_part):
return PosBoolType(find_index_of_first_element_not_equivalent(data_part, mask_part))
def is_homogenous_merge(image_data_part, image_merge_data):
def sign(x):
return 1 if x > 0 else -1 if x < 0 else 0
return sign(image_data_part[0, 0, 0]) == sign(image_merge_data[0, 0, 0])
def is_relevant_part(hom_image_data_part, object_type):
obj_to_func = {
ObjectType.DOMAIN: is_domain_part,
ObjectType.CAVITY: is_cavity_part
}
return obj_to_func[object_type](hom_image_data_part)
def is_domain_part(hom_image_data_part):
return hom_image_data_part[0, 0, 0] == 0
def is_cavity_part(hom_image_data_part):
return hom_image_data_part[0, 0, 0] < 0
def is_inside_volume(hom_mask_part):
return not bool(hom_mask_part[0, 0, 0])
def is_neighboring(node1, node2):
x_, y_, z_ = node1[0]
w_, h_, d_ = node1[1]
x, y, z = node2[0]
w, h, d = node2[1]
return x-w_ <= x_ <= x+w and y-h_ <= y_ <= y+h and z-d_ <= z_ <= z+d
def split(data, mask, graph, object_type):
node = ((0, 0, 0), data.shape)
graph.set_initial_node(node)
stack = [node]
while len(stack) > 0:
pos, dim = stack.pop()
is_hom = is_homogenous_split(data[pos[0]:pos[0]+dim[0], pos[1]:pos[1]+dim[1], pos[2]:pos[2]+dim[2]],
mask[pos[0]:pos[0]+dim[0], pos[1]:pos[1]+dim[1], pos[2]:pos[2]+dim[2]])
if not is_hom:
nodes = graph.split_node((pos, dim), is_hom)
for node in nodes:
stack.append(node)
elif (is_relevant_part(data[pos[0]:pos[0]+dim[0], pos[1]:pos[1]+dim[1], pos[2]:pos[2]+dim[2]], object_type) or
not is_inside_volume(mask[pos[0]:pos[0]+dim[0], pos[1]:pos[1]+dim[1], pos[2]:pos[2]+dim[2]])):
graph.remove_node((pos, dim))
graph.forbid_splitting()
def merge(data, graph):
for node, neighbors in graph.iteritems():
for neighbor in neighbors:
if not graph.is_merged(node, neighbor):
pos_node, dim_node = node
pos_neighbor, dim_neighbor = neighbor
data_node = data[pos_node[0]:pos_node[0]+dim_node[0],
pos_node[1]:pos_node[1]+dim_node[1],
pos_node[2]:pos_node[2]+dim_node[2]]
data_neighbor = data[pos_neighbor[0]:pos_neighbor[0]+dim_neighbor[0],
pos_neighbor[1]:pos_neighbor[1]+dim_neighbor[1],
pos_neighbor[2]:pos_neighbor[2]+dim_neighbor[2]]
if is_homogenous_merge(data_node, data_neighbor):
graph.merge_nodes(node, neighbor)
def add_periodic_neighbors(graph):
border_node_translation_vectors = graph.get_border_node_translation_vectors()
border_nodes = border_node_translation_vectors.keys()
for i, n in enumerate(border_nodes[:-1]):
for m in border_nodes[i+1:]:
m_x, m_y, m_z = m[0]
for translation_vector in border_node_translation_vectors[m]:
translated_node = ((m_x+translation_vector[0], m_y+translation_vector[1], m_z+translation_vector[2]),
m[1])
if is_neighboring(n, translated_node):
graph.add_neighbors(n, [m], translation_vectors=[translation_vector])
break
def merge_periodic_border(data, graph):
for border_node, border_neighbors in graph.iter_border_items():
for border_neighbor in border_neighbors:
if not graph.is_merged(border_node, border_neighbor, detect_cyclic_merge=True):
pos_node, dim_node = border_node
pos_neighbor, dim_neighbor = border_neighbor
data_node = data[pos_node[0]:pos_node[0]+dim_node[0],
pos_node[1]:pos_node[1]+dim_node[1],
pos_node[2]:pos_node[2]+dim_node[2]]
data_neighbor = data[pos_neighbor[0]:pos_neighbor[0]+dim_neighbor[0],
pos_neighbor[1]:pos_neighbor[1]+dim_neighbor[1],
pos_neighbor[2]:pos_neighbor[2]+dim_neighbor[2]]
if is_homogenous_merge(data_node, data_neighbor):
graph.merge_nodes(border_node, border_neighbor)
def mark_domain_points(data, areas):
for i, area in enumerate(areas):
for node in area:
x, y, z = node[0]
w, h, d = node[1]
data[x:x+w, y:y+h, z:z+d] = -(i+1)
def calculate_domain_centers(atoms, combined_translation_vectors, areas):
return calc_dom(atoms, combined_translation_vectors, areas)
def get_domain_area_cells(areas):
cell_positions = [[(pos[0] + x, pos[1] + y, pos[2] + z)
for pos, dim in area
for x, y, z in itertools.product(*(range(c) for c in dim))]
for area in areas]
return cell_positions
def get_domain_surface_cells(data, mask, areas):
domain = None
def func(border_x, border_y, border_z, adjacent_node_cells):
if bool(data[border_x, border_y, border_z]) or bool(mask[border_x, border_y, border_z]):
for n in adjacent_node_cells:
domain.add(n)
domains = []
for area in areas:
domain = set()
for node in area:
iterate_node_border_with_adjacent_node_cells(node, func)
domains.append(list(domain))
return domains
|
# -*- coding: utf-8 -*-
__all__ = ["Zero", "Constant"]
import theano.tensor as tt
class Zero:
def __call__(self, x):
return tt.zeros_like(x)
class Constant:
def __init__(self, value):
self.value = tt.as_tensor_variable(value)
def __call__(self, x):
return tt.zeros_like(x) + self.value
|
# http://soundjax.com/beep-1.html
# structure the class
# define what the cla<ss does
# add comments
#
# add to string - complexity
# impr:
# same ingredient twice
# keeping sorted the steps
# ---------------------------------------------------------------
# Imports
# ---------------------------------------------------------------
from datetime import datetime, timedelta
import time
import pygame
# ---------------------------------------------------------------
# Class - RecipeStep
# ---------------------------------------------------------------
class RecipeStep:
"""One step in a recipe.
Is defined by the time-offset (in minutes) from the start of cooking when we want to do it and the instruction
"""
# ---------------------------------------------------------------
# Initialisation
# ---------------------------------------------------------------
def __init__(self, instruction, time_offset_min):
"""Creates a new recipe-step
Includes the instruction what to do and the time-offset from the start of the recipe"""
self.time_offset_min = time_offset_min
self.instruction = instruction
# ---------------------------------------------------------------
# Interface
# ---------------------------------------------------------------
def get_string(self, recipe_start_time):
"""Gets the string representing the step.
The time when it should be carried out (computed by the recipe start time) and the instruction
"""
return str((recipe_start_time + timedelta(0, 60*self.time_offset_min)).time()) + ": " + self.instruction
def __str__(self):
return "After " + str(self.time_offset_min) + " minutes, " + self.instruction
# ---------------------------------------------------------------
# Class - Recipe
# ---------------------------------------------------------------
class Recipe:
"""Cooking recipe.
Is basically a set of ingredients (with their quantities) and a list of steps to carry out during cooking
"""
# ---------------------------------------------------------------
# Constants
# ---------------------------------------------------------------
QUANTITY = 'quantity'
MEASURE = 'measure'
# ---------------------------------------------------------------
# Initialisation - how do we construct one
# ---------------------------------------------------------------
def __init__(self, name):
"""Construct an empty recipe (containing no ingredients of steps to carry out)"""
self.name = name
self.steps = []
self.ingredients = {}
# ---------------------------------------------------------------
# Interface - what functionality is offered
# ---------------------------------------------------------------
def add_ingredient(self, ingredient, quantity=1, measure="pieces"):
"""Adds some quantity of the ingredient to the recipe"""
if ingredient in self.ingredients:
self.ingredients[ingredient][self.QUANTITY] += quantity
else:
self.ingredients[ingredient] = {self.QUANTITY: quantity, self.MEASURE: measure}
def print_ingredients(self):
"""Prints the ingredients of the recipe"""
print(self.get_ingredients_str())
def add_step(self, recipe_step):
"""Adds another cooking step to the recipe"""
self.steps.append(recipe_step)
def print_steps(self):
"""Prints the steps of the recipe"""
print(self.get_steps_str())
def print_recipe(self):
"""Prints the whole recipe"""
print("Recipe for " + self.name)
print(str(self))
print()
def run_recipe(self):
"""Runs the recipe
Prints the ingredients and then the individual steps one by one, when their time comes
"""
self.print_ingredients()
self.__sort_steps_by_time()
recipe_start_time = datetime.now()
print("Making " + self.name + ". Started at " + str(recipe_start_time.time()))
for i in range(len(self.steps)):
pygame.mixer.music.play()
print(str(i + 1) + ": " + self.steps[i].get_string(recipe_start_time))
if i != len(self.steps) - 1:
print("\tNext step at " + self.steps[i + 1].get_string(recipe_start_time))
time.sleep(self.steps[i + 1].time_offset_min - self.steps[i].time_offset_min)
else:
print("\tThis is the last step, don't screw it!")
print("Should be ready now, enjoy :-)")
# ---------------------------------------------------------------
# Implementation
# ---------------------------------------------------------------
def __str__(self):
return self.get_ingredients_str() + "\n" + self.get_steps_str()
def get_steps_str(self):
"""Returns the string with the steps of the recipe"""
self.__sort_steps_by_time()
steps_str = "Steps for " + self.name + ":\n"
for step in self.steps:
steps_str += "\t" + str(step) + "\n"
return steps_str
def get_ingredients_str(self):
"""Returns the string with the ingredients of the recipe"""
ingredients_str = "Ingredients for " + self.name + ":\n"
for ingredient in self.ingredients:
ingredients_str += "\t" + str(ingredient) + "\n"
return ingredients_str
def __sort_steps_by_time(self):
"""Sorts the steps of the recipe by the time when they should be carried out"""
self.steps.sort(key=lambda step: step.time_offset_min)
# ---------------------------------------------------------------
# Main
# ---------------------------------------------------------------
if __name__ == "__main__":
pygame.init()
pygame.mixer.music.load("beep.mp3")
smoothie = Recipe("Smoothie")
smoothie.add_ingredient("mango")
smoothie.add_ingredient("banana")
smoothie.add_ingredient("ground coconut", 2, "soup spoons")
smoothie.add_ingredient("milk", 1, "dl")
smoothie.add_step(RecipeStep("Peel mango and cut to pieces", 0))
smoothie.add_step(RecipeStep("Peel bananas and cut to pieces", 1))
smoothie.add_step(RecipeStep("Add everything to the mixer, with milk and coconut", 2))
smoothie.add_step(RecipeStep("Mix for 120 seconds", 3))
smoothie.add_step(RecipeStep("Drink", 5))
print(smoothie)
smoothie.run_recipe() |
import os
import math
import argparse
import torch
import torch.nn as nn
import torch.backends.cudnn as cudnn
from PIL import Image
import numpy as np
from torch.autograd import Variable
from torchvision.utils import save_image
from torch.utils.data import DataLoader
from data.dataloader import ErasingData, ErasingData_test
from models.sa_gan import STRnet2
parser = argparse.ArgumentParser()
parser.add_argument('--numOfWorkers', type=int, default=0,
help='workers for dataloader')
parser.add_argument('--modelsSavePath', type=str, default='',
help='path for saving models')
parser.add_argument('--logPath', type=str,
default='')
parser.add_argument('--batchSize', type=int, default=16)
parser.add_argument('--loadSize', type=int, default=512,
help='image loading size')
parser.add_argument('--dataRoot', type=str,
default='')
parser.add_argument('--pretrained',type=str, default='', help='pretrained models for finetuning')
parser.add_argument('--savePath', type=str, default='./results/sn_tv/')
args = parser.parse_args()
cuda = torch.cuda.is_available()
if cuda:
print('Cuda is available!')
cudnn.benchmark = True
def visual(image):
im =(image).transpose(1,2).transpose(2,3).detach().cpu().numpy()
Image.fromarray(im[0].astype(np.uint8)).show()
batchSize = args.batchSize
loadSize = (args.loadSize, args.loadSize)
dataRoot = args.dataRoot
savePath = args.savePath
result_with_mask = savePath + 'WithMaskOutput/'
result_straight = savePath + 'StrOuput_t/'
#import pdb;pdb.set_trace()
# if not os.path.exists(savePath):
# os.makedirs(savePath)
# os.makedirs(result_with_mask)
# os.makedirs(result_straight)
Erase_data = ErasingData_test(dataRoot, loadSize, training=False)
Erase_data = DataLoader(Erase_data, batch_size=batchSize, shuffle=True, num_workers=args.numOfWorkers, drop_last=False)
netG = STRnet2(3)
netG.load_state_dict(torch.load(args.pretrained))
#
if cuda:
netG = netG.cuda()
for param in netG.parameters():
param.requires_grad = False
print('OK!')
import time
netG.eval()
for imgs, path,h,w in (Erase_data):
start = time.time()
if cuda:
imgs = imgs.cuda()
# gt = gt.cuda()
# masks = masks.cuda()
out1, out2, out3, g_images,mm = netG(imgs)
end = time.time()
time_one = end-start
print(time_one)
g_image = g_images.data.cpu()
# gt = gt.data.cpu()
# mask = masks.data.cpu()
# g_image_with_mask = gt * (mask) + g_image * (1- mask)
g_image = torch.nn.functional.interpolate(g_image,[h,w])
# save_image(g_image_with_mask, result_with_mask+path[0])
save_image(g_image, result_straight+path[0].replace('jpg','png'))
|
import glob
import numpy as np
import pandas as pd
import re
import matplotlib.pyplot as plt
import matplotlib
import matplotlib.colors as mcolors
import random
matplotlib.rc('text', usetex=True)
matplotlib.rc('font', size=15)
matplotlib.rc('legend', fontsize=14)
def read_metrics(filename: str):
opinions = []
df = pd.read_csv(filename)
for col in df.columns:
opinions.append(df[col].values)
p_range = df.columns.astype('float').values
return p_range, opinions
def filter_files_by(files: list, query: str):
return [f for f in files if query in f]
def plot_experiment(files, with_weighted_opinion=False, show_q=False, col=None):
colors = list(dict(mcolors.TABLEAU_COLORS).values())
colors.insert(0, 'k')
colors.append('lime')
for i, f in enumerate(files):
result = pd.read_csv(f)
opinion_type = extract_opinion_type(f)
if not with_weighted_opinion and 'weighted_mean_opinion' in opinion_type:
continue
result.columns = ['p_range', opinion_type]
plot_label = extract_name(f) + ' ' + opinion_type.replace('_', ' ')
plot_label = plot_label.split('/')[-1]
if show_q:
plot_label += ' ' + extract_q(f)
plt.grid(alpha=0.3)
if col:
colors[i] = col
plt.scatter(result['p_range'], result[opinion_type],
color=colors[i])
plt.plot(result['p_range'], result[opinion_type],
color=colors[i], alpha=0.6)
plt.plot([], [], '-o', label=plot_label, color=colors[i])
plt.xlabel('p')
plt.ylabel('opinion')
plt.legend()
def extract_q(file: str):
q = re.search(r'q=\d', file)
return q.group(0)
def extract_opinion_type(file: str):
opinion_type = re.search(r'\D+opinion', file)
return opinion_type.group(0)[1:]
def extract_name(file: str):
return file.split('\\')[-1].split('-q')[0].replace('_', ' ')
def fetch_files_from(experiment: str):
return glob.glob(f"../../../results/{experiment}/*.txt")
def convert_old_format_results(path):
"""
Convert old `.csv` format into simple p_range, mean_opinion dataframe
:param path:
"""
files = glob.glob(path)
for f in files:
p_range, opinion = read_metrics(f)
opinion = np.transpose(opinion)[-1] # last opinion from simulation
df = pd.DataFrame(data={'p_range': p_range, 'opinion': opinion})
save_file = f.split('.csv')[0]
save_file = save_file + '.txt'
df.to_csv(save_file, index=False, header=False)
if __name__ == '__main__':
# convert_old_format_results("../../../new_results/q-experiment/*.csv")
# convert_old_format_results("../../../new_results/preference-sampling-experiment/*.csv")
# convert_old_format_results("../../../new_results/majority-voting-experiment/*.csv")
convert_old_format_results(
"../../../new_results/directed-undirected-experiment/*.csv")
|
import torch
import torch.nn as nn
import numpy as np
import sys
import disc_span_parser
"""
class MarginLoss(nn.Module):
def __init__(self, complexity, ill_nested, reduction="mean"):
super(MarginLoss, self).__init__()
self.complexity = complexity
self.ill_nested = ill_nested
self.mean = (reduction == "mean")
# assume that weights are given in a matrix of size (n words + 2, n words + 2)
# so the first word will be ignored
def forward(self, dep_weights, label_weights, sentences):
# remove the first line/col that corresponds to BOS tag
dep_weights = {k: v[:, 1:, 1:] for k, v in dep_weights.items()}
label_weights = {k: v[:, 1:, 1:] for k, v in label_weights.items()}
n_max_words = dep_weights["cont"].shape[1] # it contains bos but they will always be masked
n_batch = dep_weights["cont"].shape[0]
device = dep_weights["cont"].device
# compute gold indices and loss augmented weight
loss_augmented_weights = dict()
gold_indices = dict()
for k in dep_weights.keys():
n_labels = label_weights[k].shape[3]
weights = dep_weights[k] + label_weights[k]
# the last label will be used as a "fake empty label" value
g = torch.empty((n_batch, n_max_words, n_max_words, n_labels), requires_grad=False, device=device).fill_(0.)
for b in range(n_batch):
x = sentences[b][k + "_spans"][0].to(device)
y = sentences[b][k + "_spans"][1].to(device)
z = sentences[b][k + "_labels"].to(device)
g[b, x, y, z] = 1.
gold_indices[k] = g
loss_augmented_weights[k] = weights + (1. - g)
# compute argmax
pred_indices = {
k: torch.empty((n_batch, n_max_words, n_max_words, label_weights[k].shape[3]), requires_grad=False, device=device).fill_(0.)
for k in dep_weights.keys()
}
for b in range(n_batch):
n_words = len(sentences[b]["words"]) - 2
cont_spans = loss_augmented_weights["cont"][b, :n_words, :n_words]
disc_spans = loss_augmented_weights["disc"][b, :n_words, :n_words]
gap_spans = loss_augmented_weights["gap"][b, :n_words, :n_words]
pred_cst = disc_span_parser.argmax_as_list_parallel(
list([cont_spans]),
list([disc_spans]),
list([gap_spans]),
None,
self.complexity,
self.ill_nested
)[0]
for label, i, k, l, j in pred_cst:
if k < 0:
pred_indices["cont"][b, i, j, label] += 1.
else:
pred_indices["disc"][b, i, j, label] += 1.
pred_indices["gap"][b, k+1, l-1, label] += 1.
# try to normalize with / gold_indices[k].sum()
loss = [loss_augmented_weights[k] * (pred_indices[k] - gold_indices[k]) for k in dep_weights.keys()]
loss = [l.sum() for l in loss]
loss = sum(loss).sum()
if self.mean:
loss = loss / n_words
return loss
class CorrectedBatchUnstructuredProbLoss(nn.Module):
def __init__(self, joint=True, reduction="mean"):
super(CorrectedBatchUnstructuredProbLoss, self).__init__()
self.joint = joint
self.builder = nn.BCEWithLogitsLoss(reduction=reduction)
# assume that weights are given in a matrix of size (n words + 2, n words + 2)
# so the first word will be ignored
def forward(self, dep_weights, label_weights, sentences):
# remove the first line/col that corresponds to BOS tag
dep_weights = {k: v[:, 1:, 1:] for k, v in dep_weights.items()}
label_weights = {k: v[:, 1:, 1:] for k, v in label_weights.items()}
n_max_words = dep_weights["cont"].shape[1] # it contains bos but they will always be maked
n_batch = dep_weights["cont"].shape[0]
device = dep_weights["cont"].device
# build mask
triangle = torch.ones((n_max_words, n_max_words), dtype=bool, device=device, requires_grad=False).triu_(diagonal=0)
# dim: (n_batch, max len)
sentence_sizes = torch.LongTensor([len(sentence["words"]) for sentence in sentences]).to(device)
size_mask = torch.arange(n_max_words, device=device).unsqueeze(0) < sentence_sizes.unsqueeze(1)
# our triangular mask!
mask = triangle.expand(n_batch, -1, -1)
mask = mask * size_mask.unsqueeze(2)
mask = mask * size_mask.unsqueeze(1)
if self.joint:
loss = list()
for k in dep_weights.keys():
n_labels = label_weights[k].shape[3]
weights = dep_weights[k] + label_weights[k]
# the last label will be used as a "fake empty label" value
gold_indices = torch.empty((n_batch, n_max_words, n_max_words, n_labels), requires_grad=False, device=device).fill_(0.)
for b in range(n_batch):
x = sentences[b][k + "_spans"][0].to(device)
y = sentences[b][k + "_spans"][1].to(device)
z = sentences[b][k + "_labels"].to(device)
gold_indices[b, x, y, z] = 1.
weights = weights[mask]
gold_indices = gold_indices[mask]
loss.append(self.builder(weights, gold_indices))
return sum(loss)
else:
raise NotImplementedError()
"""
# this is the "correct" loss
class BatchUnstructuredApproximateProbLoss(nn.Module):
def __init__(self, joint=True, reduction="mean"):
super(BatchUnstructuredApproximateProbLoss, self).__init__()
self.joint = joint
self.builder = nn.CrossEntropyLoss(reduction=reduction)
if not joint:
self.binary_builder = nn.BCEWithLogitsLoss(reduction=reduction)
# assume that weights are given in a matrix of size (n words + 2, n words + 2)
# so the first word will be ignored
def forward(self, dep_weights, label_weights, sentences):
# remove the first line/col that corresponds to BOS tag
dep_weights = {k: v[:, 1:, 1:] for k, v in dep_weights.items()}
label_weights = {k: v[:, 1:, 1:] for k, v in label_weights.items()}
n_max_words = dep_weights["cont"].shape[1] # it contains bos but they will always be maked
n_batch = dep_weights["cont"].shape[0]
device = dep_weights["cont"].device
# build mask
triangle = torch.ones((n_max_words, n_max_words), dtype=bool, device=device, requires_grad=False).triu_(diagonal=0)
# dim: (n_batch, max len)
sentence_sizes = torch.LongTensor([len(sentence["words"]) for sentence in sentences]).to(device)
size_mask = torch.arange(n_max_words, device=device).unsqueeze(0) < sentence_sizes.unsqueeze(1)
# our triangular mask!
mask = triangle.expand(n_batch, -1, -1)
mask = mask * size_mask.unsqueeze(2)
mask = mask * size_mask.unsqueeze(1)
if self.joint:
loss = list()
for k in dep_weights.keys():
n_labels = label_weights[k].shape[3]
weights = dep_weights[k] + label_weights[k]
# the last label will be used as a "fake empty label" value
gold_indices = torch.empty((n_batch, n_max_words, n_max_words), dtype=torch.long, requires_grad=False, device=device).fill_(n_labels)
for b in range(n_batch):
x = sentences[b][k + "_spans"][0].to(device)
y = sentences[b][k + "_spans"][1].to(device)
z = sentences[b][k + "_labels"].to(device)
gold_indices[b, x, y] = z
weights = weights[mask]
# "fake" label
weights = torch.cat([weights, torch.zeros((weights.shape[0], 1), device=device)], dim=1)
gold_indices = gold_indices[mask]
loss.append(self.builder(weights, gold_indices))
return sum(loss)
else:
loss = list()
for k in dep_weights.keys():
n_labels = label_weights[k].shape[3]
xs = [sentences[b][k + "_spans"][0].to(device) for b in range(n_batch)]
ys = [sentences[b][k + "_spans"][1].to(device) for b in range(n_batch)]
# span loss
gold_span_indices = torch.zeros((n_batch, n_max_words, n_max_words), dtype=torch.float).to(device)
for b in range(n_batch):
gold_span_indices[b, xs[b], ys[b]] = 1.
gold_span_indices = gold_span_indices[mask]
loss.append(self.binary_builder(dep_weights[k].squeeze(-1)[mask], gold_span_indices))
# label loss
label_mask = torch.zeros((n_batch, n_max_words, n_max_words), device=device, requires_grad=False, dtype=torch.bool)
for b in range(n_batch):
label_mask[b, xs[b], ys[b]] = True
# this is inefficient, but I do this to ensure the same order after unmasking
gold_label_indices = torch.zeros((n_batch, n_max_words, n_max_words), device=device, requires_grad=False, dtype=torch.long)
for b in range(n_batch):
gold_label_indices[b, xs[b], ys[b]] = sentences[b][k + "_labels"].to(device)
loss.append(self.builder(label_weights[k][mask], gold_label_indices[mask]))
# maybe it should be this?
# return sum(loss)
return sum(l.sum() for l in loss)
# why is this one wrong?
"""
class BatchUnstructuredCorrectProbLoss(nn.Module):
def __init__(self, joint=True, reduction="mean"):
super(BatchUnstructuredCorrectProbLoss, self).__init__()
if not joint:
raise RuntimeError("Not implemented")
self.builder = nn.CrossEntropyLoss(reduction=reduction)
def build_indices(self, name, n_words, device):
id_x = list()
id_y = list()
if name == "cont":
for i in range(n_words):
for j in range(i, n_words):
id_x.append(i)
id_y.append(j)
elif name =="disc" or name == "gap":
for i in range(n_words):
for j in range(i, n_words):
for k in range(i + 1, j):
for l in range(k, j):
if name == "disc":
id_x.append(i)
id_y.append(j)
else:
id_x.append(k)
id_y.append(l)
else:
raise RuntimeError("Invalid name: %s" % name)
return (
torch.LongTensor(id_x).to(device),
torch.LongTensor(id_y).to(device)
)
# assume that weights are given in a matrix of size (n words + 2, n words + 2)
# so the first word will be ignored
def forward(self, dep_weights, label_weights, sentences):
# remove the first line/col that corresponds to BOS tag
dep_weights = {k: v[:, 1:, 1:] for k, v in dep_weights.items()}
label_weights = {k: v[:, 1:, 1:] for k, v in label_weights.items()}
n_batch = dep_weights["cont"].shape[0]
device = dep_weights["cont"].device
loss = list()
weights = {k: dep_weights[k] + label_weights[k] for k in dep_weights.keys()}
for k in dep_weights.keys():
n_labels = weights[k].shape[3]
for b, sentence in enumerate(sentences):
n_words = len(sentence["words"]) - 2
id_x, id_y = self.build_indices(k, n_words, weights[k].device)
# the last label will be used as a "fake empty label" value
gold_indices = torch.empty((n_words, n_words), dtype=torch.long, requires_grad=False, device=device).fill_(n_labels)
x = sentences[b][k + "_spans"][0].to(device)
y = sentences[b][k + "_spans"][1].to(device)
z = sentences[b][k + "_labels"].to(device)
gold_indices[x, y] = z
sentence_weights = weights[k][b, id_x, id_y]
# "fake" label
sentence_weights = torch.cat([sentence_weights, torch.zeros((sentence_weights.shape[0], 1), device=device)], dim=1)
gold_indices = gold_indices[id_x, id_y]
loss.append(self.builder(sentence_weights, gold_indices))
return sum(loss)
""" |
import collections
# Keep indexes of good candidates in deque d.
# The indexes in d are from the current window, they're increasing,
# and their corresponding nums are decreasing.
# Then the first deque element is the index of the largest window value.
# For each index i:
# 1. Pop (from the end) indexes of smaller elements (they'll be useless).
# 2. Append the current index.
# 3. Pop (from the front) the index i - k, if it's still in the deque
# (it falls out of the window).
# 4. If our window has reached size k,
# append the current window maximum to the output.
def max_sliding_window(nums, k):
d = collections.deque()
out = []
for i, n in enumerate(nums):
while d and nums[d[-1]] < n:
d.pop()
d += i,
if d[0] == i - k:
d.popleft()
if i >= k - 1:
out += nums[d[0]],
return out
array = [1,3,-1,-3,5,3,6,7]
print(max_sliding_window(array))
|
from distutils.core import setup, Extension
import numpy
module =Extension('FunNorm', sources = ['FunNorm.c'],libraries=['gsl','gslcblas','m'],include_dirs=[numpy.get_include()+'/numpy'])
setup(name = 'Funciones creadas por Nestor: Extensiones de C/Python', version = '1.0', ext_modules = [module])
|
# Copyright 2013-2014, Simon Kennedy, [email protected]
#
# Part of 'hiss' the asynchronous notification library
import hashlib
from os import urandom
from binascii import hexlify, unhexlify
from hiss.exception import ValidationError
DEFAULT_HASH_ALGORITHM = 'SHA256'
class HashInfo():
"""Records the hash information required to be sent to remote hosts.
:param algorithm: The algorithm used to generate the hash
:type algorithm: str
:param key_hash: The hash of the key. Generated by the hash algorithm
:type key_hash: bytes
:param salt: The salt added to the key to generate the key_hash
:type salt: bytes
"""
def __init__(self, algorithm, key_hash, salt):
self._algorithm = None
self._key_hash = None
self._salt = None
self.algorithm = algorithm
self.key_hash = key_hash
self.salt = salt
@property
def algorithm(self):
"""Hash algorithm. One of MD5, SHA1 or SHA256"""
return self._algorithm
@algorithm.setter
def algorithm(self, value):
if isinstance(value, (bytes, bytearray)):
value = value.decode('UTF-8')
value = value.upper()
if value not in ['MD5', 'SHA1', 'SHA256']:
raise ValidationError('Unknown hash algorithm %s specified.' % value)
self._algorithm = value
@property
def key_hash(self):
"""algorithm(password + salt)"""
return self._key_hash
@key_hash.setter
def key_hash(self, value):
if isinstance(value, str):
value = unhexlify(value.encode('UTF-8'))
self._key_hash = value
@property
def salt(self):
"""Salt used to compute the key_hash."""
return self._salt
@salt.setter
def salt(self, value):
if isinstance(value, str):
value = unhexlify(value.encode('UTF-8'))
self._salt = value
def __eq__(self, other):
return self.algorithm == other.algorithm and \
self.key_hash == other.key_hash and \
self.salt == other.salt
def __repr__(self):
return '%s:%s.%s' % (self.algorithm,
hexlify(self.key_hash).decode('UTF-8'),
hexlify(self.salt).decode('UTF-8'))
def generate_hash(password, hash_algorithm=DEFAULT_HASH_ALGORITHM):
"""Create a :class:`~hiss.hash.HashInfo` instance for the password using
the specified hash algorithm.
:param password: The password to generate the hash for.
:type password: str or UTF-8 encoded bytes.
:param hash_algorithm: The algorithm to use to generate the hash (one of
`MD5`, `SHA1`, `SHA256` (default) or `SHA512`)
:type hash_algorithm: str
:raises HissError: if the hash_algorithm specified is not known
"""
if hash_algorithm not in ['MD5', 'SHA1', 'SHA256', 'SHA512']:
raise ValidationError("Don't know how to handle hash algorithm %s" % hash_algorithm)
if isinstance(password, str):
password = password.encode('UTF-8')
salt = generate_salt()
key_basis = password + salt
if hash_algorithm == 'MD5':
key = hashlib.md5(key_basis).digest()
key_hash = hashlib.md5(key).digest()
elif hash_algorithm == 'SHA1':
key = hashlib.sha1(key_basis).digest()
key_hash = hashlib.sha1(key).digest()
elif hash_algorithm == 'SHA256':
key = hashlib.sha256(key_basis).digest()
key_hash = hashlib.sha256(key).digest()
elif hash_algorithm == 'SHA512':
key = hashlib.sha512(key_basis).digest()
key_hash = hashlib.sha512(key).digest()
return HashInfo(DEFAULT_HASH_ALGORITHM, key_hash, salt)
def validate_hash(password, hash_to_validate):
"""Validate the information stored in the :class:`~hiss.hash.HashInfo`
instance against the password.
:param password: The password used to validate the hash.
:type password: str or UTF-8 encoded bytes.
:param hash_to_validate: The hash to validate
:type hash_to_validate: :class:`~hiss.hash.HashInfo`
:raises HissError: if the hash validation fails.
"""
if hash_to_validate.algorithm not in ['MD5', 'SHA1', 'SHA256', 'SHA512']:
raise ValueError("Don't know how to validate hash algorithm %s" % hash_to_validate.algorithm)
if isinstance(password, str):
password = password.encode('UTF-8')
key_basis = password + hash_to_validate.salt
if hash_to_validate.algorithm == 'MD5':
key = hashlib.md5(key_basis).digest()
key_hash = hashlib.md5(key).digest()
elif hash_to_validate.algorithm == 'SHA1':
key = hashlib.sha1(key_basis).digest()
key_hash = hashlib.sha1(key).digest()
elif hash_to_validate.algorithm == 'SHA256':
key = hashlib.sha256(key_basis).digest()
key_hash = hashlib.sha256(key).digest()
elif hash_to_validate.algorithm == 'SHA512':
key = hashlib.sha512(key_basis).digest()
key_hash = hashlib.sha512(key).digest()
if hash_to_validate.key_hash != key_hash:
raise ValidationError('Invalid hash in request')
def generate_salt(size=16):
return urandom(size)
|
from .BroadcastInfo import BroadcastInfo
from .ConcatInfo import ConcatInfo
from .PadInfo import PadInfo
from .ReductionInfo import ReductionInfo
from .ReshapeInfo import ReshapeInfo
from .SliceInfo import SliceInfo
from .StackInfo import StackInfo
from .TileInfo import TileInfo
from .TransposeInfo import TransposeInfo
from .Conv2DInfo import Conv2DInfo |
from sqlite_random import Sqlite_random
|
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.chrome.options import Options
def test_home():
options = Options()
options.add_argument("--no-sandbox")
options.add_argument("--headless")
driver = webdriver.Chrome(chrome_options=options)
driver.get("http://162.246.157.110:8000/")
assert driver.find_element_by_id("name") != None
assert driver.find_element_by_id("about") != None
assert driver.find_element_by_id("education") != None
assert driver.find_element_by_id("skills") != None
assert driver.find_element_by_id("work") != None
assert driver.find_element_by_id("contact") != None
test_home() |
import sys
read = sys.stdin.buffer.read
readline = sys.stdin.buffer.readline
readlines = sys.stdin.buffer.readlines
sys.setrecursionlimit(10 ** 7)
n = int(readline())
s = read().rstrip().decode()
cnt = 0
for i in range(1000):
num = str(i).zfill(3)
idx = -1
for m in num:
idx = s.find(m, idx + 1)
if idx == -1:
break
else:
cnt += 1
print(cnt)
|
# Do not import ir in this Module
import typing
if typing.TYPE_CHECKING:
from spydrnet.ir.element import Element
from spydrnet.ir.first_class_element import FirstClassElement
from spydrnet.ir.netlist import Netlist
from spydrnet.ir.library import Library
from spydrnet.ir.definition import Definition
from spydrnet.ir.port import Port
from spydrnet.ir.cable import Cable
from spydrnet.ir.wire import Wire
from spydrnet.ir.instance import Instance
from spydrnet.ir.innerpin import InnerPin
from spydrnet.ir.outerpin import OuterPin
from spydrnet.ir.bundle import Bundle
from spydrnet.ir.pin import Pin
|
import unittest
from parameterized import parameterized_class
from pyfx import PyfxApp
from pyfx.config import parse
from pyfx.model import DataSourceType
from tests.fixtures import FIXTURES_DIR
from tests.fixtures.keys import split
@parameterized_class([
{"config_file": "configs/basic.yml"},
{"config_file": "configs/emacs.yml"},
{"config_file": "configs/vim.yml"}
])
class AutoCompleteIT(unittest.TestCase):
"""
Integration tests for query and auto-completion working flow.
"""
def setUp(self):
self.config = parse(FIXTURES_DIR / self.config_file)
self.keymap = self.config.view.keymap.mapping
def test_autocomplete_select(self):
"""
Test navigate and select one auto-complete options.
"""
data = {
"alice": "0",
"bob": "1",
"chuck": "2",
"daniel": "3"
}
app = PyfxApp(self.config)
model = app._model
model.load(DataSourceType.VARIABLE, data)
view = app._view
inputs = split([
# 1. enter query bar
self.keymap.json_browser.open_query_bar,
# 2. input '.'
".",
# 3. move down in the autocomplete popup
self.keymap.autocomplete_popup.cursor_down,
# 4. select option
self.keymap.autocomplete_popup.select,
# 5. apply query and switch to json browser
self.keymap.query_bar.query,
# 6. exit
self.keymap.exit
], self.keymap.global_command_key)
result, err = view.process_input(data, inputs)
self.assertEqual(True, result, err)
def test_autocomplete_cancel(self):
"""
Test navigate and cancel auto-complete.
"""
data = {
"alice": "0",
"bob": "1",
"chuck": "2",
"daniel": "3"
}
app = PyfxApp(self.config)
model = app._model
model.load(DataSourceType.VARIABLE, data)
view = app._view
inputs = split([
# 1. enter query bar
self.keymap.json_browser.open_query_bar,
# 2. input '.'
".",
# 3. move down in the autocomplete popup twice
self.keymap.autocomplete_popup.cursor_down,
self.keymap.autocomplete_popup.cursor_down,
# 4. move up in the autocomplete popup
self.keymap.autocomplete_popup.cursor_up,
# 5. cancel autocomplete
self.keymap.autocomplete_popup.cancel,
# 6. remove last '.'
"backspace",
# 7. apply query and switch to json browser
self.keymap.query_bar.query,
# 8. exit
self.keymap.exit
], self.keymap.global_command_key)
result, err = view.process_input(data, inputs)
self.assertEqual(True, result, err)
def test_autocomplete_navigation(self):
"""
Test navigation in auto-complete, particularly with pressing
navigation key at the start or end of the list.
"""
data = {
"alice": "0",
"bob": "1"
}
app = PyfxApp(self.config)
model = app._model
model.load(DataSourceType.VARIABLE, data)
view = app._view
inputs = split([
# 1. enter query bar
self.keymap.json_browser.open_query_bar,
# 2. input '.'
".",
# 3. move down in the autocomplete popup third times,
# extra navigation key should not have any effect
self.keymap.autocomplete_popup.cursor_down,
self.keymap.autocomplete_popup.cursor_down,
self.keymap.autocomplete_popup.cursor_down,
# 4. move up in the autocomplete popup,
# extra navigation key should not have any effect
self.keymap.autocomplete_popup.cursor_up,
self.keymap.autocomplete_popup.cursor_up,
self.keymap.autocomplete_popup.cursor_up,
self.keymap.autocomplete_popup.cursor_up,
# 5. select option
self.keymap.autocomplete_popup.select,
# 6. apply query and switch to json browser
self.keymap.query_bar.query,
# 7. exit
self.keymap.exit
], self.keymap.global_command_key)
result, err = view.process_input(data, inputs)
self.assertEqual(True, result, err)
def test_autocomplete_pass_keypress(self):
"""
Test autocomplete popup pass keypress to query bar and update itself.
"""
data = {
"alice": "0",
"bob": "1"
}
app = PyfxApp(self.config)
model = app._model
model.load(DataSourceType.VARIABLE, data)
view = app._view
inputs = split([
# 1. enter query bar
self.keymap.json_browser.open_query_bar,
# 2. input '.'
".",
# 3. input 'a'
"a",
# 4. select autocomplete
self.keymap.autocomplete_popup.select,
# 5. apply query and switch to json browser
self.keymap.query_bar.query,
# 6. exit
self.keymap.exit
], self.keymap.global_command_key)
result, err = view.process_input(data, inputs)
self.assertEqual(True, result, err)
|
"""Definitions (types) for style sheets."""
from enum import Enum
from typing import Sequence, Tuple, TypedDict, Union
Color = Tuple[float, float, float, float] # RGBA
Padding = Tuple[float, float, float, float] # top/right/bottom/left
Number = Union[int, float]
class TextAlign(Enum):
LEFT = "left"
CENTER = "center"
RIGHT = "right"
class VerticalAlign(Enum):
TOP = "top"
MIDDLE = "middle"
BOTTOM = "bottom"
class FontStyle(Enum):
NORMAL = "normal"
ITALIC = "italic"
class FontWeight(Enum):
NORMAL = "normal"
BOLD = "bold"
class TextDecoration(Enum):
NONE = "none"
UNDERLINE = "underline"
# Style is using SVG properties where possible
# https://developer.mozilla.org/en-US/docs/Web/SVG/Attribute
Style = TypedDict(
"Style",
{
"background-color": Color,
"border-radius": Number,
"color": Color,
"dash-style": Sequence[Number],
"padding": Padding,
"font-family": str,
"font-size": Union[int, float, str],
"font-style": FontStyle,
"font-weight": FontWeight,
"line-style": Number,
"line-width": Number,
"min-width": Number,
"min-height": Number,
"opacity": Number,
"text-decoration": TextDecoration,
"text-align": TextAlign,
"text-color": Color,
"vertical-align": VerticalAlign,
"vertical-spacing": Number,
},
total=False,
)
|
from shapy.framework.commands import *
from .stats import * |
from unittest import TestCase
import numpy as np
from model_parameters.KaonParametersFixedRhoOmega import KaonParametersFixedRhoOmega, Parameter
class TestModelParametersFixedRhoOmega(TestCase):
def setUp(self):
self.parameters = KaonParametersFixedRhoOmega(
0.5, 0.6, 0.7, 0.8,
0.1, 0.78266, 0.00868,
0.2, 1.410, 0.29,
0.15, 1.67, 0.315,
0.3, 1.019461, 0.004249,
0.35, 1.680, 0.150,
2.159, 0.137,
0.12, 0.77526, 0.1474,
0.13, 1.465, 0.4,
0.14, 1.720, 0.25,
2.15, 0.3,
)
def test___getitem__(self):
with self.subTest():
par1 = self.parameters['a_rho_double_prime']
self.assertEqual(par1.value, 0.14)
with self.subTest():
par2 = self.parameters['mass_phi_prime']
self.assertEqual(par2.value, 1.680)
with self.subTest():
par3 = self.parameters['t_in_isovector']
self.assertEqual(par3.value, 0.8)
with self.subTest():
with self.assertRaises(KeyError):
_ = self.parameters['non-existent-key']
def test___setitem__(self):
with self.subTest(msg='Testing type checking'):
with self.assertRaises(TypeError):
self.parameters['a_omega'] = 2.0 # type: ignore
with self.subTest(msg='Testing parameter name checking'):
with self.assertRaises(ValueError):
self.parameters['a_omega'] = Parameter(name='new name', value=2.0, is_fixed=True)
with self.subTest(msg='Testing a successful call'):
self.parameters['a_omega'] = Parameter(name='a_omega', value=95.412, is_fixed=True)
self.assertEqual(self.parameters['a_omega'].value, 95.412)
self.assertTrue(self.parameters['a_omega'].is_fixed)
def test_set_value(self):
self.parameters.set_value('a_rho_prime', 0.654321)
self.assertEqual(self.parameters['a_rho_prime'].value, 0.654321)
def test_fix_parameters(self):
self.parameters.release_all_parameters()
self.parameters.fix_parameters(['mass_phi_prime', 't_in_isoscalar'])
with self.subTest():
self.assertTrue(self.parameters['mass_phi_prime'].is_fixed)
with self.subTest():
self.assertTrue(self.parameters['t_in_isoscalar'].is_fixed)
def test_release_parameters(self):
self.parameters.fix_parameters(['a_rho_double_prime'])
self.parameters.release_parameters(['a_rho_double_prime'])
self.assertFalse(self.parameters['a_rho_double_prime'].is_fixed)
def test_release_all_parameters(self):
self.parameters.fix_parameters(['a_phi', 'mass_omega_prime', 't_in_isovector'])
self.parameters.release_all_parameters()
self.assertFalse(self.parameters['a_phi'].is_fixed)
self.assertFalse(self.parameters['mass_omega_prime'].is_fixed)
self.assertFalse(self.parameters['t_in_isovector'].is_fixed)
def test_fix_all_parameters(self):
self.parameters.release_parameters(['a_phi', 'mass_omega_prime', 't_in_isovector'])
self.parameters.fix_all_parameters()
self.assertTrue(self.parameters['a_phi'].is_fixed)
self.assertTrue(self.parameters['mass_omega_prime'].is_fixed)
self.assertTrue(self.parameters['t_in_isovector'].is_fixed)
def test_get_fixed_values(self):
self.parameters.release_all_parameters()
self.parameters.fix_parameters(['a_omega_double_prime', 'mass_rho_prime', 't_in_isovector'])
self.assertListEqual(
self.parameters.get_fixed_values(),
[0.5, 0.6, 0.8, 0.78266, 0.00868, 0.15, 0.77526, 0.1474, 1.465],
)
def test_get_free_values(self):
self.parameters.release_all_parameters()
self.parameters.fix_parameters([
'a_phi', 'mass_phi', 'decay_rate_phi',
'a_phi_prime', 'mass_phi_prime', 'decay_rate_phi_prime',
'mass_phi_double_prime', 'decay_rate_phi_double_prime',
'a_omega', 'a_omega_prime', 'mass_omega_prime', 'decay_rate_omega_prime',
'a_omega_double_prime', 'mass_omega_double_prime', 'decay_rate_omega_double_prime',
])
self.assertListEqual(
self.parameters.get_free_values(),
[0.7, 0.8, 0.12, 0.13, 1.465, 0.4,
0.14, 1.720, 0.25, 2.15, 0.3],
)
def test_get_all_values(self):
self.assertListEqual(
self.parameters.get_all_values(),
[0.5, 0.6, 0.7, 0.8, 0.1, 0.78266, 0.00868, 0.2, 1.410, 0.29,
0.15, 1.67, 0.315, 0.3, 1.019461, 0.004249, 0.35, 1.680, 0.150,
2.159, 0.137, 0.12, 0.77526, 0.1474, 0.13, 1.465, 0.4,
0.14, 1.720, 0.25, 2.15, 0.3],
)
def test_update_free_values(self):
self.parameters.release_all_parameters()
self.parameters.fix_parameters([
'a_phi', 'mass_phi', 'decay_rate_phi',
'a_phi_prime', 'mass_phi_prime', 'decay_rate_phi_prime',
'mass_phi_double_prime', 'decay_rate_phi_double_prime',
'a_omega', 'a_omega_prime', 'mass_omega_prime', 'decay_rate_omega_prime',
'a_omega_double_prime', 'mass_omega_double_prime', 'decay_rate_omega_double_prime',
])
self.parameters.update_free_values(
[0.7, 0.82, 0.12, 0.0, 14.65, 0.4, 0.24, 1.724, 0.25, 0.15, 7.4]
)
self.assertListEqual(
self.parameters.get_free_values(),
[0.7, 0.82, 0.12, 0.0, 14.65, 0.4, 0.24, 1.724, 0.25, 0.15, 7.4]
)
def test_update_free_values__length_mismatch(self):
self.parameters.release_all_parameters()
with self.assertRaises(ValueError):
self.parameters.update_free_values([1])
def test___iter__(self):
self.assertListEqual(
[parameter.value for parameter in self.parameters],
self.parameters.get_all_values(),
)
def test_get_bounds_for_free_parameters(self):
self.parameters.release_all_parameters()
self.parameters.fix_parameters([
't_in_isovector',
'a_phi', 'decay_rate_phi',
'a_phi_prime', 'mass_phi_prime', 'decay_rate_phi_prime',
'mass_phi_double_prime', 'decay_rate_phi_double_prime',
'a_omega', 'a_omega_prime', 'mass_omega_prime', 'decay_rate_omega_prime',
'a_omega_double_prime', 'mass_omega_double_prime', 'decay_rate_omega_double_prime',
'a_rho', 'a_rho_prime', 'mass_rho_prime', 'decay_rate_rho_prime',
'mass_rho_triple_prime', 'decay_rate_rho_triple_prime',
])
lower_bounds, upper_bounds = self.parameters.get_bounds_for_free_parameters(handpicked=False)
self.assertListEqual(
lower_bounds,
[0.5, 0.7071067811865476, -np.inf, 0.7745966692414834, 0.0],
)
self.assertListEqual(
upper_bounds,
[np.inf, np.inf, np.inf, np.inf, np.inf],
)
def test_from_list_to_list_consistency(self):
list_of_parameters = [
Parameter(name='t_0_isoscalar', value=0.17531904388276887, is_fixed=True),
Parameter(name='t_0_isovector', value=0.07791957505900839, is_fixed=True),
Parameter(name='t_in_isoscalar', value=1.443968455888137, is_fixed=False),
Parameter(name='t_in_isovector', value=2.2187117617810133, is_fixed=False),
Parameter(name='a_omega', value=-0.6250308032982322, is_fixed=False),
Parameter(name='mass_omega', value=0.9980285527253926, is_fixed=True),
Parameter(name='decay_rate_omega', value=0.33979382250306833, is_fixed=True),
Parameter(name='a_omega_prime', value=2.5941212472427653, is_fixed=False),
Parameter(name='mass_omega_prime', value=1.262175646031432, is_fixed=False),
Parameter(name='decay_rate_omega_prime', value=0.29325211379153737, is_fixed=False),
Parameter(name='a_omega_double_prime', value=-0.7584476425520864, is_fixed=False),
Parameter(name='mass_omega_double_prime', value=1.65208370470636, is_fixed=False),
Parameter(name='decay_rate_omega_double_prime', value=0.88444489573003, is_fixed=False),
Parameter(name='a_phi', value=-0.7613717224426919, is_fixed=False),
Parameter(name='mass_phi', value=1.032465750329927, is_fixed=False),
Parameter(name='decay_rate_phi', value=0.007187927884885847, is_fixed=False),
Parameter(name='a_phi_prime', value=0.049186367861031886, is_fixed=False),
Parameter(name='mass_phi_prime', value=1.798584035817393, is_fixed=False),
Parameter(name='decay_rate_phi_prime', value=0.18684970951346638, is_fixed=True),
Parameter(name='mass_phi_double_prime', value=2.1701948878602404, is_fixed=False),
Parameter(name='decay_rate_phi_double_prime', value=0.018584914300650075, is_fixed=False),
Parameter(name='a_rho', value=0.769926083031994, is_fixed=False),
Parameter(name='mass_rho', value=1.0785537445043378, is_fixed=True),
Parameter(name='decay_rate_rho', value=0.1244368079469832, is_fixed=True),
Parameter(name='a_rho_prime', value=-0.26856897443026406, is_fixed=False),
Parameter(name='mass_rho_prime', value=1.6384823897203615, is_fixed=False),
Parameter(name='decay_rate_rho_prime', value=0.13857062014248653, is_fixed=True),
Parameter(name='a_rho_double_prime', value=-0.004875792606689498, is_fixed=False),
Parameter(name='mass_rho_double_prime', value=2.245836486713978, is_fixed=False),
Parameter(name='decay_rate_rho_double_prime', value=0.1035328301790177, is_fixed=False),
Parameter(name='mass_rho_triple_prime', value=3.463509700968759, is_fixed=True),
Parameter(name='decay_rate_rho_triple_prime', value=1.4560004801176234, is_fixed=False),
]
model_parameters = KaonParametersFixedRhoOmega.from_list(list_of_parameters)
self.assertIsInstance(model_parameters, KaonParametersFixedRhoOmega)
self.assertListEqual(list_of_parameters, model_parameters.to_list())
def test_to_list_from_list_consistency(self):
recreated = KaonParametersFixedRhoOmega.from_list(self.parameters.to_list())
self.assertEqual(
recreated['a_rho_double_prime'].value,
self.parameters['a_rho_double_prime'].value,
)
self.assertEqual(
recreated['mass_omega'].value,
self.parameters['mass_omega'].value,
)
self.assertEqual(
recreated['t_0_isovector'].value,
self.parameters['t_0_isovector'].value,
)
|
from django.test import TestCase
from curious import model_registry
from curious.query import Query
from curious_tests.models import Blog, Entry, Author
from curious_tests import assertQueryResultsEqual
import curious_tests.models
class TestQueryJoins(TestCase):
def setUp(self):
names = ('Databases', 'Relational Databases', 'Graph Databases')
authors = ('John Smith', 'Jane Doe', 'Joe Plummer')
headlines = ('MySQL is a relational DB',
'Postgres is a really good relational DB',
'Neo4J is a graph DB')
self.blogs = [Blog(name=name) for name in names]
for blog in self.blogs:
blog.save()
self.entries = [Entry(headline=headline, blog=blog) for headline, blog in zip(headlines, self.blogs)]
for entry in self.entries:
entry.save()
self.authors = [Author(name=name) for name in authors]
for author in self.authors:
author.save()
for i, entry in enumerate(self.entries):
entry.authors.add(self.authors[i])
entry.authors.add(self.authors[(i+1)%len(self.authors)])
model_registry.register(curious_tests.models)
model_registry.get_manager('Blog').allowed_relationships = ['authors']
def tearDown(self):
model_registry.clear()
def test_first_set_of_results_are_unique_and_not_separated_by_objects_from_first_relation(self):
qs = 'Blog(name__icontains="Databases") Blog.entry_set Entry.authors'
query = Query(qs)
result = query()
self.assertEquals(result[0][0][1], -1)
assertQueryResultsEqual(self, result[0][0][0], [(self.authors[0], None),
(self.authors[1], None),
(self.authors[2], None)])
self.assertEquals(len(result[0]), 1)
self.assertEquals(result[1], Author)
def test_separates_second_set_of_results_by_objects_from_first_set_of_results(self):
qs = 'Blog(name__icontains="Databases"), Blog.entry_set Entry.authors'
query = Query(qs)
result = query()
self.assertEquals(result[0][0][1], -1)
assertQueryResultsEqual(self, result[0][0][0], [(self.blogs[0], None),
(self.blogs[1], None),
(self.blogs[2], None)])
self.assertEquals(result[0][1][1], 0)
assertQueryResultsEqual(self, result[0][1][0], [(self.authors[0], self.blogs[0].pk),
(self.authors[1], self.blogs[0].pk),
(self.authors[1], self.blogs[1].pk),
(self.authors[2], self.blogs[1].pk),
(self.authors[2], self.blogs[2].pk),
(self.authors[0], self.blogs[2].pk)])
self.assertEquals(len(result[0]), 2)
self.assertEquals(result[1], Author)
def test_outputs_of_filter_query_are_separated_by_inputs_to_filter_query(self):
qs = 'Blog(name__icontains="Databases") (Blog.entry_set Entry.authors)'
query = Query(qs)
result = query()
self.assertEquals(result[0][0][1], -1)
assertQueryResultsEqual(self, result[0][0][0], [(self.blogs[0], None),
(self.blogs[1], None),
(self.blogs[2], None)])
self.assertEquals(result[0][1][1], 0)
assertQueryResultsEqual(self, result[0][1][0], [(self.authors[0], self.blogs[0].pk),
(self.authors[1], self.blogs[0].pk),
(self.authors[1], self.blogs[1].pk),
(self.authors[2], self.blogs[1].pk),
(self.authors[2], self.blogs[2].pk),
(self.authors[0], self.blogs[2].pk)])
self.assertEquals(len(result[0]), 2)
self.assertEquals(result[1], Blog)
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
__author__ = "Christian Heider Nielsen"
__doc__ = r"""
Created on 18/03/2020
"""
__all__ = ["get_host_ip"]
def get_host_ip() -> str:
"""Get host ip.
Returns:
str: The obtained ip. UNKNOWN if failed."""
import socket
try:
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.connect(("8.8.8.8", 80))
ip = s.getsockname()[0]
except:
ip = "UNKNOWN"
finally:
s.close()
return ip
if __name__ == "__main__":
HOST_IP = get_host_ip()
|
'''
Schema of behavioral information.
'''
import re
import os
from datetime import datetime
import pathlib
import numpy as np
import scipy.io as sio
import datajoint as dj
import h5py as h5
from . import reference, subject, utilities, stimulation, acquisition, analysis
schema = dj.schema(dj.config['custom'].get('database.prefix', '') + 'behavior')
sess_data_dir = pathlib.Path(dj.config['custom'].get('intracellular_directory')).as_posix()
@schema
class LickTrace(dj.Imported):
definition = """
-> acquisition.Session
---
lick_trace_left: longblob
lick_trace_right: longblob
lick_trace_start_time: float # (s) first timepoint of lick trace recording
lick_trace_sampling_rate: float # (Hz) sampling rate of lick trace recording
"""
def make(self, key):
# ============ Dataset ============
# Get the Session definition from the keys of this session
animal_id = key['subject_id']
date_of_experiment = key['session_time']
# Search the files in filenames to find a match for "this" session (based on key)
sess_data_file = utilities.find_session_matched_nwbfile(sess_data_dir, animal_id, date_of_experiment)
if sess_data_file is None:
raise FileNotFoundError(f'BehaviorAcquisition import failed for: {animal_id} - {date_of_experiment}')
nwb = h5.File(os.path.join(sess_data_dir, sess_data_file), 'r')
# ============= Now read the data and start ingesting =============
print('Insert behavioral data for: subject: {0} - date: {1}'.format(key['subject_id'], key['session_time']))
key['lick_trace_left'] = nwb['acquisition']['timeseries']['lick_trace_L']['data'].value
key['lick_trace_right'] = nwb['acquisition']['timeseries']['lick_trace_R']['data'].value
lick_trace_time_stamps = nwb['acquisition']['timeseries']['lick_trace_R']['timestamps'].value
key['lick_trace_start_time'] = lick_trace_time_stamps[0]
key['lick_trace_sampling_rate'] = 1 / np.mean(np.diff(lick_trace_time_stamps))
self.insert1(key)
@schema
class TrialSegmentedLickTrace(dj.Computed):
definition = """
-> LickTrace
-> acquisition.TrialSet.Trial
-> analysis.TrialSegmentationSetting
---
segmented_lt_left: longblob
segmented_lt_right: longblob
"""
def make(self, key):
# get event, pre/post stim duration
event_name, pre_stim_dur, post_stim_dur = (analysis.TrialSegmentationSetting & key).fetch1(
'event', 'pre_stim_duration', 'post_stim_duration')
# get raw
fs, first_time_point, lt_left, lt_right = (LickTrace & key).fetch1(
'lick_trace_sampling_rate', 'lick_trace_start_time', 'lick_trace_left', 'lick_trace_right')
# segmentation
key['segmented_lt_left'] = analysis.perform_trial_segmentation(key, event_name, pre_stim_dur, post_stim_dur,
lt_left, fs, first_time_point)
key['segmented_lt_right'] = analysis.perform_trial_segmentation(key, event_name, pre_stim_dur, post_stim_dur,
lt_right, fs, first_time_point)
self.LickTrace.insert1(key)
print(f'Perform trial-segmentation of lick traces for trial: {key["trial_id"]}')
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Base Taxon of SpeciesEvolver."""
from abc import ABC, abstractmethod
class Taxon(ABC):
"""Base Taxon of SpeciesEvolver.
A SpeciesEvolver Taxon represents a group of organisms. Examples of the
kind of group represented by a subclass include an analog group (e.g., a
specific trout species, seed plants), a taxonomic level (e.g., phylum,
species, population). More generally, Taxon subclasses act as approaches to
simulate evolution of biologic groups. Taxon can be made of individual
organisms, although SpeciesEvolver currently has no built in functionality
for individuals.
This class is intended to be subclassed. Subclasses must implement the
properties and methods of this base class that are designated as abstract.
The methods must take the same parameters and both the properties and
methods must return the values described in their docstrings.
"""
def __init__(self):
self._extant = True
self._uid = None
self._parent = None
self._children = []
def __repr__(self):
return '<{}, uid={}>'.format(self.__class__.__name__, self.uid)
@property
def uid(self):
"""The unique identifier of the taxon.
The identifier is a unique integer automatically assigned by
SpeciesEvolver once the component begins tracking the taxon. It is
read-only as it should not be changed once it is assigned.
"""
return self._uid
@property
def extant(self):
"""The living state of the taxon.
The taxon lives at the current model time if ``True``. The taxon is
extinct as of the current model time if ``False``.
"""
return self._extant
@extant.setter
def extant(self, value):
"""Set the living state of the taxon."""
self._extant = value
@property
def parent(self):
"""The parent taxon.
The parent is the taxon object that produced this object. A value of
``None`` indicates no parent taxon.
"""
return self._parent
@parent.setter
def parent(self, value):
"""Set the parent taxon.
This method also appends this taxon to the list of children belonging
to the parent taxon.
"""
self._parent = value
if value is not None:
value.children.append(self)
@property
def children(self):
"""The immediate descendents of the taxon.
The children are the objects produced by this taxon. An empty list
indicates no child taxon.
"""
return self._children
@property
@abstractmethod
def range_mask(self):
"""A mask of the taxon geographic extent.
The range mask is a boolean numpy array where True values indicate
where the taxon is located in the model grid associated with a
SpeciesEvolver instance.
This property must be implemented in a subclass.
"""
# pragma: no cover
@abstractmethod
def _evolve(self, dt, stage, record, id):
"""Run the evolutionary processes of the taxon.
SpeciesEvolver loops through the evolution processes of extant taxa in
stages during the ``run_one_step`` method of the component. Therefore
if a taxon type requires all other taxa to undergo some processing
before an evolution process, then the taxon can evolve at a later
stage using the ``stage`` parameter. This method must return a boolean
indicating if the taxon has completed evolution of all stages. See this
method implemented in ``ZoneTaxon`` for an example.
This method must be implemented in a subclass.
Parameters
----------
dt : float
The model time step duration.
stage : int
The evolution stage of the time step.
record : defaultdict
The SpeciesEvolver record.
Returns
-------
boolean
Indicates if the taxon is still evolving. When `False` is returned,
this method will not be called for the taxon in subsequent stages in
the current model time step.
"""
# pragma: no cover
|
"""
Copyright (c) 2019 Intel Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from enum import Enum
import numpy as np
try:
import pycocotools.mask as maskUtils
except ImportError:
maskUtils = None
from .base_representation import BaseRepresentation
from ..data_readers import BaseReader
from ..utils import remove_difficult
class GTMaskLoader(Enum):
PILLOW = 0
OPENCV = 1
SCIPY = 2
NIFTI = 3
NUMPY = 4
NIFTI_CHANNELS_FIRST = 5
LOADERS_MAPPING = {
'opencv': GTMaskLoader.OPENCV,
'pillow': GTMaskLoader.PILLOW,
'scipy': GTMaskLoader.SCIPY,
'nifti': GTMaskLoader.NIFTI,
'nifti_channels_first': GTMaskLoader.NIFTI_CHANNELS_FIRST,
'numpy': GTMaskLoader.NUMPY
}
class SegmentationRepresentation(BaseRepresentation):
pass
class SegmentationAnnotation(SegmentationRepresentation):
LOADERS = {
GTMaskLoader.PILLOW: 'pillow_imread',
GTMaskLoader.OPENCV: 'opencv_imread',
GTMaskLoader.SCIPY: 'scipy_imread',
GTMaskLoader.NIFTI: 'nifti_reader',
GTMaskLoader.NIFTI_CHANNELS_FIRST: {'type': 'nifti_reader', 'channels_first': True},
GTMaskLoader.NUMPY: 'numpy_reader'
}
def __init__(self, identifier, path_to_mask, mask_loader=GTMaskLoader.PILLOW):
"""
Args:
identifier: object identifier (e.g. image name).
path_to_mask: path where segmentation mask should be loaded from. The path is relative to data source.
mask_loader: back-end, used to load segmentation masks.
"""
super().__init__(identifier)
self._mask_path = path_to_mask
self._mask_loader = mask_loader
self._mask = None
@property
def mask(self):
return self._mask if self._mask is not None else self._load_mask()
@mask.setter
def mask(self, value):
self._mask = value
def _load_mask(self):
if self._mask is None:
loader_config = self.LOADERS.get(self._mask_loader)
if isinstance(loader_config, str):
loader = BaseReader.provide(loader_config, self.metadata['data_source'])
else:
loader = BaseReader.provide(loader_config['type'], self.metadata['data_source'], config=loader_config)
if self._mask_loader == GTMaskLoader.PILLOW:
loader.convert_to_rgb = False
mask = loader.read(self._mask_path)
return mask.astype(np.uint8)
return self._mask
class SegmentationPrediction(SegmentationRepresentation):
def __init__(self, identifiers, mask):
"""
Args:
identifiers: object identifier (e.g. image name).
mask: array with shape (n_classes, height, width) of probabilities at each location.
"""
super().__init__(identifiers)
self.mask = mask
class BrainTumorSegmentationAnnotation(SegmentationAnnotation):
def __init__(self, identifier, path_to_mask, loader=GTMaskLoader.NIFTI, box=None):
super().__init__(identifier, path_to_mask, loader)
self.box = box
class BrainTumorSegmentationPrediction(SegmentationPrediction):
def __init__(self, identifiers, mask, label_order=(0, 1, 2, 3)):
super().__init__(identifiers, mask)
self.label_order = label_order
class CoCoInstanceSegmentationRepresentation(SegmentationRepresentation):
def __init__(self, identifier, mask, labels):
if not maskUtils:
raise ValueError('can not create representation')
super().__init__(identifier)
self.raw_mask = mask
self.labels = labels
self._mask = None
@property
def mask(self):
return self._mask if self._mask is not None else self._load_mask()
def _load_mask(self):
masks = []
image_size = self.metadata['image_size']
height, width, _ = image_size if len(np.shape(image_size)) == 1 else image_size[0]
for mask in self.raw_mask:
converted_mask = self._convert_mask(mask, height, width)
masks.append(converted_mask)
self._mask = masks
return self._mask
@staticmethod
def _convert_mask(mask, height, width):
if maskUtils and isinstance(mask, list):
rles = maskUtils.frPyObjects(mask, height, width)
rle = maskUtils.merge(rles)
elif maskUtils and isinstance(mask['counts'], list):
# uncompressed RLE
rle = maskUtils.frPyObjects(mask, height, width)
else:
rle = mask
# rle
return rle
@mask.setter
def mask(self, value):
self._mask = value
@property
def size(self):
return len(self.raw_mask)
@property
def areas(self):
precomputed_areas = self.metadata.get('areas')
if precomputed_areas:
return precomputed_areas
masks = self.mask
areas = []
for mask in masks:
areas.append(maskUtils.area(mask))
return areas
class CoCoInstanceSegmentationAnnotation(CoCoInstanceSegmentationRepresentation):
pass
class CoCocInstanceSegmentationPrediction(CoCoInstanceSegmentationRepresentation):
def __init__(self, identifier, mask, labels, scores):
super().__init__(identifier, mask, labels)
self.scores = scores
def remove(self, indexes):
self.labels = np.delete(self.labels, indexes)
self.mask = np.delete(self.mask, indexes)
self.scores = np.delete(self.scores, indexes)
difficult_boxes = self.metadata.get('difficult_boxes')
if not difficult_boxes:
return
new_difficult_boxes = remove_difficult(difficult_boxes, indexes)
self.metadata['difficult_boxes'] = new_difficult_boxes
|
"""
Simple Facebook Echo bot: Respond with exactly what it receives
Standalone version
"""
import sys, json, traceback, requests
from flask import Flask, request
application = Flask(__name__)
app = application
PAT = 'replace_your_own_PAT_here'
@app.route('/', methods=['GET'])
def handle_verification():
print "Handling Verification."
if request.args.get('hub.verify_token', '') == 'your_own_token':
print "Webhook verified!"
return request.args.get('hub.challenge', '')
else:
print "Wrong verification token!"
# ======================= Bot processing ===========================
@app.route('/', methods=['POST'])
def handle_messages():
payload = request.get_data()
# Handle messages
for sender_id, message in messaging_events(payload):
# Start processing valid requests
try:
response = processIncoming(sender_id, message)
if response is not None:
send_message(PAT, sender_id, response)
else:
send_message(PAT, sender_id, "Sorry I don't understand that")
except Exception, e:
print e
traceback.print_exc()
return "ok"
def processIncoming(user_id, message):
if message['type'] == 'text':
message_text = message['data']
return message_text
elif message['type'] == 'location':
response = "I've received location (%s,%s) (y)"%(message['data'][0],message['data'][1])
return response
elif message['type'] == 'audio':
audio_url = message['data']
return "I've received audio %s"%(audio_url)
# Unrecognizable incoming, remove context and reset all data to start afresh
else:
return "*scratch my head*"
def send_message(token, user_id, text):
"""Send the message text to recipient with id recipient.
"""
r = requests.post("https://graph.facebook.com/v2.6/me/messages",
params={"access_token": token},
data=json.dumps({
"recipient": {"id": user_id},
"message": {"text": text.decode('unicode_escape')}
}),
headers={'Content-type': 'application/json'})
if r.status_code != requests.codes.ok:
print r.text
# Generate tuples of (sender_id, message_text) from the provided payload.
# This part technically clean up received data to pass only meaningful data to processIncoming() function
def messaging_events(payload):
data = json.loads(payload)
messaging_events = data["entry"][0]["messaging"]
for event in messaging_events:
sender_id = event["sender"]["id"]
# Not a message
if "message" not in event:
yield sender_id, None
# Pure text message
if "message" in event and "text" in event["message"] and "quick_reply" not in event["message"]:
data = event["message"]["text"].encode('unicode_escape')
yield sender_id, {'type':'text', 'data': data, 'message_id': event['message']['mid']}
# Message with attachment (location, audio, photo, file, etc)
elif "attachments" in event["message"]:
# Location
if "location" == event['message']['attachments'][0]["type"]:
coordinates = event['message']['attachments'][
0]['payload']['coordinates']
latitude = coordinates['lat']
longitude = coordinates['long']
yield sender_id, {'type':'location','data':[latitude, longitude],'message_id': event['message']['mid']}
# Audio
elif "audio" == event['message']['attachments'][0]["type"]:
audio_url = event['message'][
'attachments'][0]['payload']['url']
yield sender_id, {'type':'audio','data': audio_url, 'message_id': event['message']['mid']}
else:
yield sender_id, {'type':'text','data':"I don't understand this", 'message_id': event['message']['mid']}
# Quick reply message type
elif "quick_reply" in event["message"]:
data = event["message"]["quick_reply"]["payload"]
yield sender_id, {'type':'quick_reply','data': data, 'message_id': event['message']['mid']}
else:
yield sender_id, {'type':'text','data':"I don't understand this", 'message_id': event['message']['mid']}
# Allows running with simple `python <filename> <port>`
if __name__ == '__main__':
if len(sys.argv) == 2: # Allow running on customized ports
app.run(port=int(sys.argv[1]))
else:
app.run() # Default port 5000 |
from pwnypack.bytecode import *
|
from peewee import *
from .PWDatabase import PWDatabase
from neocore.Cryptography.Crypto import Crypto
from neocore.UInt256 import UInt256
from neocore.UInt160 import UInt160
import binascii
from neo.Wallets.Coin import CoinReference
from neo.Blockchain import GetBlockchain
class ModelBase(Model):
class Meta:
database = PWDatabase.DBProxy()
class Account(ModelBase):
Id = PrimaryKeyField()
PrivateKeyEncrypted = CharField(unique=True)
PublicKeyHash = CharField()
class Address(ModelBase):
Id = PrimaryKeyField()
ScriptHash = CharField(unique=True)
IsWatchOnly = BooleanField(default=False)
def ToString(self):
return Crypto.ToAddress(UInt160(data=self.ScriptHash))
class NamedAddress(ModelBase):
Id = PrimaryKeyField()
ScriptHash = CharField(unique=True)
Title = CharField()
def UInt160ScriptHash(self):
return UInt160(data=self.ScriptHash)
def ToString(self):
return Crypto.ToAddress(UInt160(data=self.ScriptHash))
class Coin(ModelBase):
Id = PrimaryKeyField()
TxId = CharField()
Index = IntegerField()
AssetId = CharField()
Value = IntegerField()
ScriptHash = CharField()
State = IntegerField()
Address = ForeignKeyField(Address)
class Contract(ModelBase):
Id = PrimaryKeyField()
RawData = CharField()
ScriptHash = CharField()
PublicKeyHash = CharField()
Account = ForeignKeyField(Account, null=True)
Address = ForeignKeyField(Address)
class Key(ModelBase):
Id = PrimaryKeyField()
Name = CharField(unique=True)
Value = CharField()
class NEP5Token(ModelBase):
ContractHash = CharField(unique=True)
Name = CharField()
Symbol = CharField()
Decimals = IntegerField()
class Transaction(ModelBase):
Id = PrimaryKeyField()
Hash = CharField(unique=True)
TransactionType = IntegerField()
RawData = CharField()
Height = IntegerField()
DateTime = DateTimeField()
class TransactionInfo(ModelBase):
Id = PrimaryKeyField()
CoreTransaction = ForeignKeyField(Transaction)
Height = IntegerField()
DateTime = DateTimeField()
class VINHold(ModelBase):
Id = PrimaryKeyField()
Index = IntegerField()
Hash = CharField()
FromAddress = CharField()
ToAddress = CharField()
Amount = IntegerField()
IsComplete = BooleanField(default=False)
@property
def Reference(self):
reference = CoinReference(prev_hash=self.TXHash, prev_index=self.Index)
return reference
@property
def TXHash(self):
data = bytearray(binascii.unhexlify(self.Hash.encode('utf-8')))
data.reverse()
return UInt256(data=data)
@property
def Vin(self):
index = bytearray(self.Index.to_bytes(1, 'little'))
return self.TXHash.Data + index
@property
def OutputHash(self):
data = bytearray(binascii.unhexlify(self.ToAddress.encode('utf-8')))
data.reverse()
return UInt160(data=data)
@property
def OutputAddr(self):
return Crypto.ToAddress(self.OutputHash)
@property
def AssetId(self):
return self.Output.AssetId
@property
def AssetName(self):
if self.AssetId == GetBlockchain().SystemShare().Hash:
return 'NEO'
elif self.AssetId == GetBlockchain().SystemCoin().Hash:
return 'Gas'
return 'Unknown'
@property
def Output(self):
tx, height = GetBlockchain().GetTransaction(self.TXHash)
output = tx.outputs[self.Index]
return output
@property
def InputHash(self):
data = bytearray(binascii.unhexlify(self.FromAddress.encode('utf-8')))
data.reverse()
return UInt160(data=data)
@property
def InputAddr(self):
return Crypto.ToAddress(self.InputHash)
def ToJson(self):
jsn = {
'To': self.OutputAddr,
'From': self.InputHash.ToString(),
'Amount': self.Amount,
'Index': self.Index,
'TxId': self.Hash,
'Complete': self.IsComplete
}
return jsn
|
import numpy as np
import numpy.linalg as la
#This function calculates the SVD and returns S' a resized version of S matrix
def SVD(matrix, dimension):
A = matrix
r = dimension
AT = A.T
AAT = np.dot(A, AT)
evalues1, evectors1 = la.eig(AAT)
#calculate out the S matrix
nonzero = evalues1[evalues1 !=0]
shape = np.shape(A)
D = np.diag(nonzero)
D.resize(shape)
#calculate U and V
ATA = np.dot(AT,A)
evalues2, evectors2 = la.eig(ATA)
#U=evectors1, V=evectors2, S=np.sqrt(D)
#resize U, V, & S to the r value, to get U', S', & V'
U1 = evectors1[0:(np.size(evectors1, 0)), 0:r]
V1 = evectors2[0:(np.size(evectors2,0)), 0:r]
S1 = np.sqrt(D)[0:r, 0:r]
A1 = U1.dot(S1).dot(V1.T).real
return A1 #returns just the real part of A'
|
import pytest
from pyupgrade._data import Settings
from pyupgrade._main import _fix_plugins
@pytest.mark.parametrize(
('s', 'version'),
(
pytest.param(
'foo(*[i for i in bar])\n',
(2, 7),
id='Not Python3+',
),
pytest.param(
'2*3',
(3,),
id='Multiplication star',
),
pytest.param(
'2**3',
(3,),
id='Power star',
),
pytest.param(
'foo([i for i in bar])',
(3,),
id='List comp, no star',
),
pytest.param(
'foo(*bar)',
(3,),
id='Starred, no list comp',
),
),
)
def test_fix_unpack_argument_list_comp_noop(s, version):
assert _fix_plugins(s, settings=Settings(min_version=version)) == s
@pytest.mark.parametrize(
('s', 'expected'),
(
pytest.param(
'foo(*[i for i in bar])\n',
'foo(*(i for i in bar))\n',
id='Starred list comprehension',
),
pytest.param(
'foo(\n'
' *\n'
' [i for i in bar]\n'
' )\n',
'foo(\n'
' *\n'
' (i for i in bar)\n'
' )\n',
id='Multiline starred list comprehension',
),
pytest.param(
'foo(*[i for i in bar], qux, quox=None)\n',
'foo(*(i for i in bar), qux, quox=None)\n',
id='Single line, including other args',
),
),
)
def test_fix_unpack_argument_list_comp(s, expected):
ret = _fix_plugins(s, settings=Settings((3,)))
assert ret == expected
|
from sklearn.compose import ColumnTransformer
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import OneHotEncoder
def get_column_names_from_ColumnTransformer(column_transformer, orig_cols, verbose=False):
col_name = []
# the last transformer is ColumnTransformer's 'remainder'
for transformer_in_columns in column_transformer.transformers_[:-1]:
if verbose:
print('transformer name:', transformer_in_columns[0])
raw_col_name = list(transformer_in_columns[2])
transformer = None
if isinstance(transformer_in_columns[1], Pipeline):
# if pipeline, get the last transformer
transformer = transformer_in_columns[1].steps[-1][1]
else:
transformer = transformer_in_columns[1]
try:
if isinstance(transformer, OneHotEncoder):
names = list(transformer.get_feature_names(raw_col_name))
else:
names = list(transformer.get_feature_names())
except AttributeError as error:
names = raw_col_name
if verbose:
print("Output column names", names)
col_name.extend(names)
rem_names = [orig_cols[c] for c in column_transformer.transformers_[-1][2]]
col_name.extend(rem_names)
return col_name |
#!/usr/bin/env python
# ===========================================================================
# Copyright 2017 `Tung Thanh Le`
# Email: ttungl at gmail dot com
#
# Heterogeneous Architecture Configurations Generator for Multi2Sim simulator
# (aka, `HeteroArchGen4M2S`)
# `HeteroArchGen4M2S` is free software, which is freely to be
# redistributed and modified it under the terms of
# the GNU General Public License as published by
# the Free Software Foundation.
# For more details `http://www.gnu.org/licenses`
# `HeteroArchGen4M2S` is written to help you configure M2S
# easily, but non-warranty and non-mechantability.
# ============================================================================
import os ## for chmod
import stat
def create_shell_script(num_CPU, num_GPU, gpu_type, cpu_max_inst, benchmark, net_max_inst, network_only, numThreads):
# assert (benchmark is not empty), "Error benchmark"
## create a shell script in runsimulations folder.
# File name
f = open('run_simulation_files/run-sim-%0.f-CPU-%0.f-%s-GPU-benchmark-%s.sh' % (num_CPU, num_GPU, gpu_type, benchmark), 'w');
## content
if network_only == 0: ## full system
f.write('m2s --x86-sim detailed ')
f.write('--x86-report HeteroArchGen4M2S/results/%s_pipeline.out ' % benchmark)
f.write('--mem-report HeteroArchGen4M2S/results/%s_mem.out ' % benchmark)
f.write('--x86-config ./HeteroArchGen4M2S/configs/x86_cpuconfig ')
f.write('--si-sim detailed ')
f.write('--si-config ./HeteroArchGen4M2S/configs/si_gpuconfig ')
f.write('--mem-config ./HeteroArchGen4M2S/configs/memconfig ')
f.write('--net-config ./HeteroArchGen4M2S/configs/netconfig ')
f.write('--x86-max-inst %0.f ' % cpu_max_inst)
## network report:
##### Note: This file is generated under m2s directory.
f.write('--net-report %s_net_report.out ' % benchmark)
else: ## network_only = 1
## m2s --net-config net-config --net-sim mynet --net-max-cycles 1000000 --report-net report-net
## --net-injection-rate 0.1
f.write('m2s --net-config ./HeteroArchGen4M2S/configs/netconfig ')
f.write('--net-sim net-l2-mm ')
f.write('--net-max-cycles %0.f '% net_max_inst)
f.write('--net-report %s_net_report.out ' % benchmark)
## benchmarks or synthetic workloads
# if synthetic_workload == 1:
# f.write('--net-report %0.f_net_report.out ' % injection_rate)
# f.write('--net-injection-rate %0.f '% injection_rate)
## splash2-benchmarks
if benchmark == 'fft':
f.write('HeteroArchGen4M2S/benchmarks/m2s-bench-splash2/fft/fft -m18 -p%0.f -n65536 -l4' % numThreads)
if benchmark == 'fmm':
f.write('HeteroArchGen4M2S/benchmarks/m2s-bench-splash2/fmm/fmm.i386 %0.f HeteroArchGen4M2S/benchmarks/m2s-bench-splash2/fmm/input' % numThreads)
if benchmark == 'lu':
f.write('HeteroArchGen4M2S/benchmarks/m2s-bench-splash2/lu/lu.i386 -p%0.f -n512 -b16' % numThreads)
if benchmark == 'cholesky':
f.write('HeteroArchGen4M2S/benchmarks/m2s-bench-splash2/cholesky/cholesky.i386 -p%0.f HeteroArchGen4M2S/benchmarks/m2s-bench-splash2/cholesky/tk14.O' % numThreads)
if benchmark == 'barnes':
f.write('HeteroArchGen4M2S/benchmarks/m2s-bench-splash2/barnes/barnes.i386 %0.f HeteroArchGen4M2S/benchmarks/m2s-bench-splash2/barnes/input' % numThreads)
if benchmark == 'ocean':
f.write('HeteroArchGen4M2S/benchmarks/m2s-bench-splash2/ocean/ocean.i386 -n130 -p%0.f -e1e-07 -r20000 -t28800' % numThreads)
if benchmark == 'raytrace':
f.write('HeteroArchGen4M2S/benchmarks/m2s-bench-splash2/raytrace/raytrace.i386 -p%0.f HeteroArchGen4M2S/benchmarks/m2s-bench-splash2/raytrace/balls4.env' % numThreads)
if benchmark == 'water-nsquared':
f.write('HeteroArchGen4M2S/benchmarks/m2s-bench-splash2/water-nsquared/water-nsquared.i386 %0.f HeteroArchGen4M2S/benchmarks/m2s-bench-splash2/water-nsquared/input' % numThreads)
if benchmark == 'water-spatial':
f.write('HeteroArchGen4M2S/benchmarks/m2s-bench-splash2/water-spatial/water-spatial.i386 %0.f HeteroArchGen4M2S/benchmarks/m2s-bench-splash2/water-spatial/input' % numThreads)
if benchmark == 'radiosity':
f.write('HeteroArchGen4M2S/benchmarks/m2s-bench-splash2/radiosity/radiosity.i386 -batch -p %0.f -en 0.5' % numThreads)
if benchmark == 'radix':
f.write('HeteroArchGen4M2S/benchmarks/m2s-bench-splash2/radix/radix.i386 -p%0.f -r4096 -n262144 -m524288' % numThreads)
## hetero-mark-benchmarks
if benchmark == 'aes':
f.write('HeteroArchGen4M2S/benchmarks/m2s-bench-heteromark/aes/aes_hsa -k HeteroArchGen4M2S/benchmarks/m2s-bench-heteromark/aes/data/key.data -i HeteroArchGen4M2S/benchmarks/m2s-bench-heteromark/aes/data/small.data')
# # f.write('HeteroArchGen4M2S/benchmarks/m2s-bench-heteromark/aes/aes_hsa -k key.data -i small.data data')
# if benchmark == 'fir':
# f.write('HeteroArchGen4M2S/benchmarks/m2s-bench-heteromark/fir/fir_hsa')
# if benchmark == 'histogram':
# f.write('HeteroArchGen4M2S/benchmarks/m2s-bench-heteromark/histogram/hist_hsa')
# if benchmark == 'kmeans':
# f.write('HeteroArchGen4M2S/benchmarks/m2s-bench-heteromark/kmeans/kmeans_hsa -k HeteroArchGen4M2S/benchmarks/m2s-bench-heteromark/kmeans/small.data HeteroArchGen4M2S/benchmarks/m2s-bench-heteromark/kmeans/data')
# if benchmark == 'page_rank':
# f.write('HeteroArchGen4M2S/benchmarks/m2s-bench-heteromark/page_rank/pr_hsa -k HeteroArchGen4M2S/benchmarks/m2s-bench-heteromark/page_rank/small.data HeteroArchGen4M2S/benchmarks/m2s-bench-heteromark/page_rank/data')
## amd-sdk-2.5-benchmark
if benchmark == 'BinarySearch':
f.write('HeteroArchGen4M2S/benchmarks/m2s-bench-amdsdk-2.5/BinarySearch/BinarySearch --load HeteroArchGen4M2S/benchmarks/m2s-bench-amdsdk-2.5/BinarySearch/BinarySearch_Kernels.bin -e')
## Spec2006-benchmarks
## Parsec-3.0-benchmarks
## Parboil-benchmarks
## test benchmark
if benchmark == 'default_mm':
f.write('HeteroArchGen4M2S/mm_multi/mm_multi_serial %0.f' % numThreads)
## close
f.close()
## granted `chmod +x` for this file
# if synthetic_workload ==1:
# st = os.stat('run_simulation_files/run-sim-%0.f-CPU-%0.f-%s-GPU-synthetic-injection-%0.f.sh' % (num_CPU, num_GPU, gpu_type, injection_rate))
# os.chmod('run_simulation_files/run-sim-%0.f-CPU-%0.f-%s-GPU-synthetic-injection-%0.f.sh' % (num_CPU, num_GPU, gpu_type, injection_rate), st.st_mode | stat.S_IEXEC)
# else:
st = os.stat('run_simulation_files/run-sim-%0.f-CPU-%0.f-%s-GPU-benchmark-%s.sh' % (num_CPU, num_GPU, gpu_type, benchmark))
os.chmod('run_simulation_files/run-sim-%0.f-CPU-%0.f-%s-GPU-benchmark-%s.sh' % (num_CPU, num_GPU, gpu_type, benchmark), st.st_mode | stat.S_IEXEC)
## tested
# create_shell_script(16, 16, 1, 10000000, 'fft', 0)
|
from datetime import datetime, timedelta
from django.test import TestCase
from corehq.apps.domain.shortcuts import create_domain
from corehq.apps.enterprise.tests.utils import create_enterprise_permissions
from corehq.apps.users.dbaccessors import delete_all_users
from corehq.apps.users.models import WebUser
from corehq.apps.users.tasks import update_domain_date
class TasksTest(TestCase):
@classmethod
def setUpClass(cls):
super().setUpClass()
delete_all_users()
# Set up domains
cls.domain = create_domain('test')
cls.mirror_domain = create_domain('mirror')
create_enterprise_permissions('[email protected]', 'test', ['mirror'])
# Set up user
cls.web_user = WebUser.create(
domain='test',
username='web',
password='secret',
created_by=None,
created_via=None,
)
cls.today = datetime.today().date()
cls.last_week = cls.today - timedelta(days=7)
@classmethod
def tearDownClass(cls):
delete_all_users()
cls.domain.delete()
cls.mirror_domain.delete()
super().tearDownClass()
def _last_accessed(self, user, domain):
domain_membership = user.get_domain_membership(domain, allow_enterprise=False)
if domain_membership:
return domain_membership.last_accessed
return None
def test_update_domain_date_web_user(self):
self.assertIsNone(self._last_accessed(self.web_user, self.domain.name))
update_domain_date(self.web_user.user_id, self.domain.name)
self.web_user = WebUser.get_by_username(self.web_user.username)
self.assertEqual(self._last_accessed(self.web_user, self.domain.name), self.today)
def test_update_domain_date_web_user_mirror(self):
# Mirror domain access shouldn't be updated because user doesn't have a real membership
self.assertIsNone(self._last_accessed(self.web_user, self.mirror_domain.name))
update_domain_date(self.web_user.user_id, self.mirror_domain.name)
self.web_user = WebUser.get_by_username(self.web_user.username)
self.assertIsNone(self._last_accessed(self.web_user, self.mirror_domain.name))
|
from nested_diff import Differ, handlers
def test_diff_handlers():
class FloatHanler(handlers.TypeHandler):
handled_type = float
def __init__(self, precision=2, *args, **kwargs):
super().__init__(*args, **kwargs)
self.precision = precision
def diff(self, differ, a, b):
if round(a, self.precision) == round(b, self.precision):
return {'U': a} if differ.op_u else {}
return super().diff(differ, a, b)
differ = Differ(U=False)
differ.set_handler(FloatHanler(precision=1))
a = [0.001, 0.01, 0.1]
b = [0.002, 0.02, 0.2]
assert {'D': [{'I': 2, 'N': 0.2, 'O': 0.1}]} == differ.diff(a, b)
# TODO: rename this file when handlers related deprecation cycle ended
# TODO: drop code below when handlers related deprecation cycle ended
def test_subclassing():
class CustomDiffer(Differ):
"""Differ with custom precision for floats."""
def __init__(self, float_precision=2, *args, **kwargs):
super().__init__(*args, **kwargs)
self.set_differ(float, self.diff_float)
self.float_precision = float_precision
def diff_float(self, a, b):
if round(a, self.float_precision) == round(b, self.float_precision):
return {'U': a} if self.op_u else {}
return super().diff__default(a, b)
differ = CustomDiffer(float_precision=1, U=False)
a = [0.001, 0.01, 0.1]
b = [0.002, 0.02, 0.2]
assert {'D': [{'I': 2, 'N': 0.2, 'O': 0.1}]} == differ.diff(a, b)
|
from datetime import datetime, timezone
from typing import Any, Dict, Iterable, List, Optional, Set
from dateutil.rrule import DAILY, rrule
from sqlalchemy import not_, or_
from sqlalchemy.orm.attributes import InstrumentedAttribute
from sqlalchemy.sql import ClauseElement
from athenian.api.controllers.logical_repos import drop_logical_repo
from athenian.api.controllers.miners.filters import LabelFilter
from athenian.api.controllers.miners.types import MinedPullRequest, PullRequestFacts, \
PullRequestFactsMap
from athenian.api.controllers.settings import default_branch_alias, ReleaseMatch, ReleaseSettings
from athenian.api.models.metadata.github import PullRequestComment, \
PullRequestCommit, PullRequestReview, PullRequestReviewRequest
from athenian.api.models.precomputed.models import GitHubBase
def build_days_range(time_from: datetime, time_to: datetime) -> Set[datetime]:
"""Build the daily range between the two provided times."""
# timezones: date_from and date_to may be not exactly 00:00
date_from_day = datetime.combine(
time_from.date(), datetime.min.time(), tzinfo=timezone.utc)
date_to_day = datetime.combine(
time_to.date(), datetime.min.time(), tzinfo=timezone.utc)
# date_to_day will be included
return rrule(DAILY, dtstart=date_from_day, until=date_to_day)
def append_activity_days_filter(time_from: datetime, time_to: datetime,
selected: Set[InstrumentedAttribute],
filters: List[ClauseElement],
activity_days_column: InstrumentedAttribute,
postgres: bool) -> Set[datetime]:
"""Append the activity days to provided SQL filters."""
date_range = build_days_range(time_from, time_to)
if postgres:
filters.append(activity_days_column.overlap(list(date_range)))
else:
selected.add(activity_days_column)
date_range = set(date_range)
return date_range
def collect_activity_days(pr: MinedPullRequest, facts: PullRequestFacts, sqlite: bool):
"""Collect activity days from mined PR and facts."""
activity_days = set()
if facts.released is not None:
activity_days.add(facts.released.item().date())
if facts.closed is not None:
activity_days.add(facts.closed.item().date())
activity_days.add(facts.created.item().date())
# if they are empty the column dtype is sometimes an object so .dt raises an exception
if not pr.review_requests.empty:
activity_days.update(
pr.review_requests[PullRequestReviewRequest.created_at.name].dt.date)
if not pr.reviews.empty:
activity_days.update(pr.reviews[PullRequestReview.created_at.name].dt.date)
if not pr.comments.empty:
activity_days.update(pr.comments[PullRequestComment.created_at.name].dt.date)
if not pr.commits.empty:
activity_days.update(pr.commits[PullRequestCommit.committed_date.name].dt.date)
if sqlite:
activity_days = [d.strftime("%Y-%m-%d") for d in sorted(activity_days)]
else:
# Postgres is "clever" enough to localize them otherwise
activity_days = [datetime.combine(d, datetime.min.time(), tzinfo=timezone.utc)
for d in activity_days]
return activity_days
def build_labels_filters(model: GitHubBase,
labels: LabelFilter,
filters: list,
selected: Set[InstrumentedAttribute],
postgres: bool) -> None:
"""Build SQL filter for labels."""
if postgres:
if labels.include:
singles, multiples = LabelFilter.split(labels.include)
or_items = []
if singles:
or_items.append(model.labels.has_any(singles))
or_items.extend(model.labels.contains(m) for m in multiples)
filters.append(or_(*or_items))
if labels.exclude:
filters.append(not_(model.labels.has_any(labels.exclude)))
else:
selected.add(model.labels)
def labels_are_compatible(include_singles: Set[str],
include_multiples: List[Set[str]],
exclude: Set[str],
labels: Iterable[str]) -> bool:
"""Check labels compatiblity."""
labels = set(labels)
return ((include_singles.intersection(labels)
or
any(m.issubset(labels) for m in include_multiples)
or
(not include_singles and not include_multiples))
and
(not exclude or not exclude.intersection(labels)))
def extract_release_match(repo: str,
matched_bys: Dict[str, ReleaseMatch],
default_branches: Dict[str, str],
release_settings: ReleaseSettings,
) -> Optional[str]:
"""Extract the release match for the given repo and settings."""
try:
matched_by = matched_bys[repo]
except KeyError:
return None
return release_settings.native[repo] \
.with_match(matched_by) \
.as_db(default_branches[drop_logical_repo(repo)])
def triage_by_release_match(repo: str,
release_match: str,
release_settings: ReleaseSettings,
default_branches: Dict[str, str],
result: Any,
ambiguous: Dict[str, Any],
) -> Optional[Any]:
"""Check the release match of the specified `repo` and return `None` if it is not effective \
according to `release_settings`, or decide between `result` and `ambiguous`."""
# DEV-1451: if we don't have this repository in the release settings, then it is deleted
assert repo in release_settings.native, \
f"You must take care of deleted repositories separately: {repo}"
if release_match in (ReleaseMatch.rejected.name, ReleaseMatch.force_push_drop.name):
return result
required_release_match = release_settings.native[repo]
match_name, match_by = release_match.split("|", 1)
match = ReleaseMatch[match_name]
if required_release_match.match != ReleaseMatch.tag_or_branch:
if match != required_release_match.match:
return None
dump = result
else:
dump = ambiguous[match_name]
if match == ReleaseMatch.tag:
target = required_release_match.tags
elif match == ReleaseMatch.branch:
target = required_release_match.branches.replace(
default_branch_alias, default_branches.get(repo, default_branch_alias))
elif match == ReleaseMatch.event:
target = required_release_match.events
else:
raise AssertionError("Precomputed DB may not contain Match.tag_or_branch")
if target != match_by:
return None
return dump
def remove_ambiguous_prs(prs: PullRequestFactsMap,
ambiguous: Dict[str, List[int]],
matched_bys: Dict[str, ReleaseMatch]) -> int:
"""
Delete PRs from `prs` which are released by branch while the effective match is by tag.
:return: Number of removed PRs.
"""
missed = 0
for repo, pr_node_ids in ambiguous.items():
if matched_bys[repo] == ReleaseMatch.tag:
for node_id in pr_node_ids:
try:
del prs[(node_id, repo)]
missed += 1
except KeyError:
continue
return missed
|
## Backreference
re.sub(r'\[(\d+)\]', r'\1', '[52] apples and [31] mangoes')
re.sub(r'(_)?_', r'\1', '_foo_ __123__ _baz_')
re.sub(r'(\w+),(\w+)', r'\2,\1', 'good,bad 42,24')
re.sub(r'\[(\d+)\]', r'(\15)', '[52] apples and [31] mangoes')
re.sub(r'\[(\d+)\]', r'(\g<1>5)', '[52] apples and [31] mangoes')
re.sub(r'\[(\d+)\]', r'(\1\065)', '[52] apples and [31] mangoes')
re.sub(r'[a-z]+', r'{\g<0>}', '[52] apples and [31] mangoes')
re.sub(r'.+', r'Hi. \g<0>. Have a nice day', 'Hello world')
re.sub(r'\A([^,]+),.+', r'\g<0>,\1', 'fork,42,nice,3.14')
words = ['effort', 'flee', 'facade', 'oddball', 'rat', 'tool']
[w for w in words if re.search(r'\b\w*(\w)\1\w*\b', w)]
re.sub(r'\b(\w+)( \1)+\b', r'\1', 'aa a a a 42 f_1 f_1 f_13.14')
s = 'abcdefghijklmna1d'
re.sub(r'(.).*\11', 'X', s)
re.sub(r'(.).*\1\x31', 'X', s)
re.sub(r'(.)(.)(.)(.)(.)(.)(.)(.)(.)(.)(.)(.).*\11', 'X', s)
re.sub(r'(.)(.)(.)(.)(.)(.)(.)(.)(.)(.)(.)(.).*\1\x31', 'X', s)
## Non-capturing groups
re.findall(r'\b\w*(?:st|in)\b', 'cost akin more east run against')
re.split(r'hand(?:y|ful)?', '123hand42handy777handful500')
re.sub(r'\A(([^,]+,){3})([^,]+)', r'\1(\3)', '1,2,3,4,5,6,7')
re.sub(r'\A((?:[^,]+,){3})([^,]+)', r'\1(\2)', '1,2,3,4,5,6,7')
s = 'hi 123123123 bye 456123456'
re.findall(r'(123)+', s)
re.findall(r'(?:123)+', s)
re.sub(r'(123)+', 'X', s)
row = 'one,2,3.14,42,five'
re.sub(r'\A([^,]+,){3}([^,]+)', r'\1"\2"', row)
re.sub(r'\A((?:[^,]+,){3})([^,]+)', r'\1"\2"', row)
words = 'effort flee facade oddball rat tool'
repeat_char = re.compile(r'\b\w*(\w)\1\w*\b')
repeat_char.findall(words)
m_iter = repeat_char.finditer(words)
[m[0] for m in m_iter]
## Named capture groups
re.sub(r'(?P<fw>\w+),(?P<sw>\w+)', r'\g<sw>,\g<fw>', 'good,bad 42,24')
s = 'aa a a a 42 f_1 f_1 f_13.14'
re.sub(r'\b(?P<dup>\w+)( (?P=dup))+\b', r'\g<dup>', s)
sentence = 'I bought an apple'
m = re.search(r'(?P<fruit>\w+)\Z', sentence)
m[1]
m['fruit']
m.group('fruit')
details = '2018-10-25,car,2346'
re.search(r'(?P<date>[^,]+),(?P<product>[^,]+)', details).groupdict()
re.search(r'(?P<date>[^,]+),([^,]+)', details).groupdict()
s = 'good,bad 42,24'
[m.groupdict() for m in re.finditer(r'(?P<fw>\w+),(?P<sw>\w+)', s)]
## Conditional groups
words = ['"hi"', 'bye', 'bad"', '"good"', '42', '"3']
pat = re.compile(r'(")?\w+(?(1)")')
[w for w in words if pat.fullmatch(w)]
[w for w in words if re.fullmatch(r'"\w+"|\w+', w)]
[w for w in words if re.fullmatch(r'"?\w+"?', w)]
[w for w in words if pat.search(w)]
words = ['(hi)', 'good-bye', 'bad', '(42)', '-oh', 'i-j', '(-)']
pat = re.compile(r'(\()?\w+(?(1)\)|-\w+)')
[w for w in words if pat.fullmatch(w)]
## Match.expand
re.sub(r'w(.*)m', r'[\1]', 'awesome')
re.search(r'w(.*)m', 'awesome').expand(r'[\1]')
dates = '2020/04/25,1986/03/02,77/12/31'
m_iter = re.finditer(r'([^/]+)/([^/]+)/[^,]+,?', dates)
[m.expand(r'Month:\2, Year:\1') for m in m_iter]
|
# pylint: disable = import-error, invalid-name
"""General model classes"""
from org.kie.kogito.explainability import (
TestUtils as _TestUtils,
Config as _Config,
)
from java.util import ArrayList, List
TestUtils = _TestUtils
Config = _Config
def toJList(pyList):
"""Convert a Python list to a Java ArrayList"""
result = ArrayList()
for item in pyList:
result.add(item)
return result
|
'''
read_bovespa.py - helper functions to interpret records from
BOVESPA's historical data.
Raw data available in http://http://www.bmfbovespa.com.br
'''
import pandas as pd
from collections import OrderedDict
from datetime import date
from datetime import datetime as dt
from typing import Any, Callable, Iterator, Union, Dict
RecordTypes = Union[int, str, float, date]
FilePath = str
CharStream = Iterator[str]
BovespaDateString = str
def read_bovespa_file(filename: FilePath) -> pd.DataFrame:
'''read a file in the BOVESPA historical records format as a
Pandas' DataFrame.'''
with open(filename, 'r') as bovespa_file:
bovespa_file.readline() # skip file header
records = (read_bovespa_record(line)
for line in skip_last(bovespa_file))
return pd.DataFrame(records)
def read_bovespa_record(line: str) -> Dict[str, RecordTypes]:
'''read a line from a BOVESPA style record as a dictionary.'''
read = read_from(line)
skip = read # just for clarity
record: Dict[str, RecordTypes] = OrderedDict()
skip(2) # register type code (always "01")
record["date"] = read_date(read(8))
skip(2) # BDI code
record["symbol"] = read(12)
skip(3) # market type code
record["company_name"] = read(12)
record["type"] = read(10)
skip(3) # market time in days
record["currency"] = read(4)
record["open"] = read_float(read(13))
record["high"] = read_float(read(13))
record["low"] = read_float(read(13))
record["mean"] = read_float(read(13))
record["close"] = read_float(read(13))
skip(13) # best buy offer price
skip(13) # best sell offer price
skip(5) # number of exchanges for the asset
record["quantity"] = int(read(18))
record["volume"] = read_float(read(18))
record["option_strike_price"] = read_float(read(13))
skip(1) # price correction indicator
record["option_expiry_date"] = read_date(read(8))
return record
def read_from(string: str) -> Callable[[int], str]:
'''return a function to read a string as a stream.'''
it: CharStream = iter(string)
return lambda n: read_n(n, it)
def read_n(n: int, iterator: CharStream) -> str:
'''read n chars from a string iterator, join them and strip whitespace.'''
return ''.join([next(iterator, '') for _ in range(n)]).strip()
def read_date(date_string: BovespaDateString) -> date:
'''read a string as a date in "YYYYMMDD" format.'''
date_format = "%Y%m%d" # from http://strftime.org/
return dt.strptime(date_string, date_format).date()
def read_float(value: str, after_comma: int = 2) -> float:
'''read a float from a string of integers with decimal place metadata.'''
integral_part = value[:-after_comma]
fractional_part = value[-after_comma:]
return float(f'{integral_part}.{fractional_part}')
def skip_last(iterator: Iterator[Any]) -> Iterator[Any]:
'''Yield all but the last value of an iterator.'''
try:
next_value = next(iterator)
for element in iterator:
yield next_value
next_value = element
except StopIteration:
raise ValueError("Iterator is empty")
|
from urllib.request import urlopen
from bs4 import BeautifulSoup
import ssl
ctx = ssl.create_default_context()
ctx.check_hostname = False
ctx.verify_mode = ssl.CERT_NONE
url = input('Enter URL - ')
count = int(input('Enter iterations:'))
position = int(input('Enter position:'))
for _ in range(count+1):
html = urlopen(url, context=ctx).read()
print('Retrieving: ', url)
soup = BeautifulSoup(html, "html.parser")
url = soup('a')[position-1].get('href', None)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.