code
stringlengths 3
1.05M
| repo_name
stringlengths 5
104
| path
stringlengths 4
251
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 3
1.05M
|
---|---|---|---|---|---|
from Spiders import BaseSpider
class FlRu(BaseSpider):
#start_urls = 'https://www.fl.ru/users/i3qqq' #'https://www.fl.ru/freelancers/#34234'
start_urls = 'https://www.fl.ru/users/i3qqq/'
routes = [
{
'name': 'project',
're': '[^f]*?fl.ru/users/[^/]*/viewproj.php\?prjid\=.*'
},
{
'name': 'user',
're': '[^f]*?fl.ru/users/[^/]*/',
},
{
'name': 'category',
're': '[^f]*?fl.ru/freelancers/.*'
},
{
'name': 'main_page',
're': '[^f]*?fl.ru/',
},
]
def main_page(self, request):
p = request.parser()
links = p.FindAll('a').Href()
self.add_urls_routed(links)
def user(self, request):
p = request.parser()
links = p.FindAll('a').Href()
self.add_urls_routed(links)
def category(self, request):
p = request.parser()
links = p.FindAll('a').Href()
self.add_urls_routed(links)
def project(self, request):
p = request.parser()
links = p.FindAll('a').Href()
self.add_urls_routed(links)
bot = FlRu()
bot.run()
| SaltusVita/ReoGrab | fl.ru.py | Python | bsd-3-clause | 1,192 |
# -----------------------------------------------------------
# compares the creation of sorted lists using the python
# bisect module, and the "usual" way
#o
# (C) 2015 Frank Hofmann, Berlin, Germany
# Released under GNU Public License (GPL)
# email [email protected]
# -----------------------------------------------------------
# import standard modules
import bisect, random, time
def sortListDefault():
# define empty list, and fill with 200000 randomized integers
sortedNumbers = []
for element in range(200000):
# choose a number between 0 and 1000
newNumber = random.randint(0, 1000)
# add number to list
#print ("adding %i to list ... " %newNumber)
sortedNumbers.append(newNumber)
# sort the list in-place
sortedNumbers.sort()
return
def sortListBisect():
# define empty list, and fill with 200000 randomized integers
sortedNumbers = []
for element in range(200000):
# choose a number between 0 and 1000
newNumber = random.randint(0, 1000)
#print ("adding %i to list ... " %newNumber)
# insert into sorted list
bisect.insort(sortedNumbers, newNumber)
return
# evaluate default sort
startTime1 = time.time()
listPosition = sortListDefault()
endTime1 = time.time()
# calculate and output interval time
seconds = endTime1 - startTime1
print ("default sort took %.8f seconds" % seconds)
# evaluate bisect sort
startTime1 = time.time()
listPosition = sortListBisect()
endTime1 = time.time()
# calculate and output interval time
seconds = endTime1 - startTime1
print ("bisect sort took %.8f seconds" % seconds)
| plasmashadow/training-python | time/sorted-list.py | Python | gpl-2.0 | 1,566 |
#! /usr/bin/env python
#
# Check the option usage.
# Make sure the union member matches the option type.
#
from os.path import dirname, join, abspath
from os import listdir, EX_OK, EX_DATAERR
from fnmatch import filter
# just use the first letter of the member name - should be unique
map_access_type = {
'b': 'AT_BOOL',
'a': 'AT_IARF',
'n': 'AT_NUM',
'u': 'AT_UNUM',
'l': 'AT_LINE',
't': 'AT_POS',
}
map_option_type = {}
# checks if while accessing the cpd.settings the right union accessor is used in the file
def check_file(file_path):
problems = 0
line_no = 0
fd = open(file_path, 'r')
for line in fd:
line_no += 1
pos_cpd_s = line.find('cpd.settings[UO_')
pos_cpd_e = line[pos_cpd_s:].find(']')
if pos_cpd_s > 0 and pos_cpd_e > 0:
pos_option_s = pos_cpd_s + 13
pos_option_e = pos_cpd_s + pos_cpd_e
option = line[pos_option_s : pos_option_e]
union_access = line[pos_option_e + 2]
if option in map_option_type and union_access in map_access_type:
if map_option_type[option] != map_access_type[union_access]:
print("%s [%d] %s should use %s not %s" % (file_path, line_no, option,
map_option_type[option], map_access_type[union_access]))
problems += 1
return problems
def fill_map_option_type(file_path):
# Read in all the options
fd = open(file_path, 'r')
for line in fd:
if line.find('unc_add_option') > 0 and line.find('UO_') > 0:
splits = line.split(',')
if len(splits) >= 3:
map_option_type[splits[1].strip()] = splits[2].strip()
fd.close()
def main():
src_dir = join(dirname(dirname(abspath(__file__))), 'src')
fill_map_option_type(join(src_dir, 'options.cpp'))
# Get a list of all the source files
ld = listdir(src_dir)
src_files = filter(ld, '*.cpp')
src_files.extend(filter(ld, '*.h'))
# Check each source file
problems = 0
for fn in src_files:
problems += check_file(join(src_dir, fn))
if problems == 0:
print("No problems found")
return EX_OK
else:
return EX_DATAERR
if __name__ == '__main__':
exit(main())
| nivekkagicom/uncrustify | scripts/check_options.py | Python | gpl-2.0 | 2,333 |
# coding: utf-8
"""
Kubernetes
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version: v1.6.1
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import os
import sys
import unittest
import kubernetes.client
from kubernetes.client.rest import ApiException
from kubernetes.client.models.v1beta1_network_policy_ingress_rule import V1beta1NetworkPolicyIngressRule
class TestV1beta1NetworkPolicyIngressRule(unittest.TestCase):
""" V1beta1NetworkPolicyIngressRule unit test stubs """
def setUp(self):
pass
def tearDown(self):
pass
def testV1beta1NetworkPolicyIngressRule(self):
"""
Test V1beta1NetworkPolicyIngressRule
"""
model = kubernetes.client.models.v1beta1_network_policy_ingress_rule.V1beta1NetworkPolicyIngressRule()
if __name__ == '__main__':
unittest.main()
| skuda/client-python | kubernetes/test/test_v1beta1_network_policy_ingress_rule.py | Python | apache-2.0 | 991 |
##
# Copyright (c) 2012-2017 Apple Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##
"""
Load-testing tool.
"""
| macosforge/ccs-calendarserver | contrib/performance/loadtest/__init__.py | Python | apache-2.0 | 634 |
# -*- coding: utf-8 -*-
from Plugins.SystemPlugins.Hotplug.plugin import hotplugNotifier
from Screens.Screen import Screen
from Screens.MessageBox import MessageBox
from Screens.ChoiceBox import ChoiceBox
from Screens.HelpMenu import HelpableScreen
from Screens.TaskView import JobView
from Components.About import about
from Components.ActionMap import ActionMap
from Components.Sources.StaticText import StaticText
from Components.Sources.List import List
from Components.Label import Label
from Components.FileList import FileList
from Components.MenuList import MenuList
from Components.MultiContent import MultiContentEntryText
from Components.ScrollLabel import ScrollLabel
from Components.Harddisk import harddiskmanager
from Components.Task import Task, Job, job_manager, Condition
from Tools.Directories import fileExists, isMount, resolveFilename, SCOPE_HDD, SCOPE_MEDIA
from Tools.HardwareInfo import HardwareInfo
from Tools.Downloader import downloadWithProgress
from enigma import eConsoleAppContainer, gFont, RT_HALIGN_LEFT, RT_HALIGN_CENTER, RT_VALIGN_CENTER, RT_WRAP, eTimer
from os import system, path, access, stat, remove, W_OK, R_OK
from twisted.web import client
from twisted.internet import reactor, defer
from twisted.python import failure
import re
class ImageDownloadJob(Job):
def __init__(self, url, filename, device=None, mountpoint="/"):
Job.__init__(self, _("Download .NFI-Files for USB-Flasher"))
if device:
if isMount(mountpoint):
UmountTask(self, mountpoint)
MountTask(self, device, mountpoint)
ImageDownloadTask(self, url, mountpoint+filename)
ImageDownloadTask(self, url[:-4]+".nfo", mountpoint+filename[:-4]+".nfo")
#if device:
#UmountTask(self, mountpoint)
def retry(self):
self.tasks[0].args += self.tasks[0].retryargs
Job.retry(self)
class MountTask(Task):
def __init__(self, job, device, mountpoint):
Task.__init__(self, job, ("mount"))
self.setTool("mount")
options = "rw,sync"
self.mountpoint = mountpoint
self.args += [ device, mountpoint, "-o"+options ]
self.weighting = 1
def processOutput(self, data):
print "[MountTask] output:", data
class UmountTask(Task):
def __init__(self, job, mountpoint):
Task.__init__(self, job, ("mount"))
self.setTool("umount")
self.args += [mountpoint]
self.weighting = 1
class DownloaderPostcondition(Condition):
def check(self, task):
return task.returncode == 0
def getErrorMessage(self, task):
return self.error_message
class ImageDownloadTask(Task):
def __init__(self, job, url, path):
Task.__init__(self, job, _("Downloading"))
self.postconditions.append(DownloaderPostcondition())
self.job = job
self.url = url
self.path = path
self.error_message = ""
self.last_recvbytes = 0
self.error_message = None
self.download = None
self.aborted = False
def run(self, callback):
self.callback = callback
self.download = downloadWithProgress(self.url,self.path)
self.download.addProgress(self.download_progress)
self.download.start().addCallback(self.download_finished).addErrback(self.download_failed)
print "[ImageDownloadTask] downloading", self.url, "to", self.path
def abort(self):
print "[ImageDownloadTask] aborting", self.url
if self.download:
self.download.stop()
self.aborted = True
def download_progress(self, recvbytes, totalbytes):
#print "[update_progress] recvbytes=%d, totalbytes=%d" % (recvbytes, totalbytes)
if ( recvbytes - self.last_recvbytes ) > 10000: # anti-flicker
self.progress = int(100*(float(recvbytes)/float(totalbytes)))
self.name = _("Downloading") + ' ' + "%d of %d kBytes" % (recvbytes/1024, totalbytes/1024)
self.last_recvbytes = recvbytes
def download_failed(self, failure_instance=None, error_message=""):
self.error_message = error_message
if error_message == "" and failure_instance is not None:
self.error_message = failure_instance.getErrorMessage()
Task.processFinished(self, 1)
def download_finished(self, string=""):
if self.aborted:
self.finish(aborted = True)
else:
Task.processFinished(self, 0)
class StickWizardJob(Job):
def __init__(self, path):
Job.__init__(self, _("USB stick wizard"))
self.path = path
self.device = path
while self.device[-1:] == "/" or self.device[-1:].isdigit():
self.device = self.device[:-1]
box = HardwareInfo().get_device_name()
url = "http://www.dreamboxupdate.com/download/opendreambox/dreambox-nfiflasher-%s.tar.bz2" % box
self.downloadfilename = "/tmp/dreambox-nfiflasher-%s.tar.bz2" % box
self.imagefilename = "/tmp/nfiflash_%s.img" % box
#UmountTask(self, device)
PartitionTask(self)
ImageDownloadTask(self, url, self.downloadfilename)
UnpackTask(self)
CopyTask(self)
class PartitionTaskPostcondition(Condition):
def check(self, task):
return task.returncode == 0
def getErrorMessage(self, task):
return {
task.ERROR_BLKRRPART: ("Device or resource busy"),
task.ERROR_UNKNOWN: (task.errormsg)
}[task.error]
class PartitionTask(Task):
ERROR_UNKNOWN, ERROR_BLKRRPART = range(2)
def __init__(self, job):
Task.__init__(self, job, ("partitioning"))
self.postconditions.append(PartitionTaskPostcondition())
self.job = job
self.setTool("sfdisk")
self.args += [self.job.device]
self.weighting = 10
self.initial_input = "0 - 0x6 *\n;\n;\n;\ny"
self.errormsg = ""
def run(self, callback):
Task.run(self, callback)
def processOutput(self, data):
print "[PartitionTask] output:", data
if data.startswith("BLKRRPART:"):
self.error = self.ERROR_BLKRRPART
else:
self.error = self.ERROR_UNKNOWN
self.errormsg = data
class UnpackTask(Task):
def __init__(self, job):
Task.__init__(self, job, ("Unpacking USB flasher image..."))
self.job = job
self.setTool("tar")
self.args += ["-xjvf", self.job.downloadfilename]
self.weighting = 80
self.end = 80
self.delayTimer = eTimer()
self.delayTimer.callback.append(self.progress_increment)
def run(self, callback):
Task.run(self, callback)
self.delayTimer.start(950, False)
def progress_increment(self):
self.progress += 1
def processOutput(self, data):
print "[UnpackTask] output: \'%s\'" % data
self.job.imagefilename = data
def afterRun(self):
self.delayTimer.callback.remove(self.progress_increment)
class CopyTask(Task):
def __init__(self, job):
Task.__init__(self, job, ("Copying USB flasher boot image to stick..."))
self.job = job
self.setTool("dd")
self.args += ["if=%s" % self.job.imagefilename, "of=%s1" % self.job.device]
self.weighting = 20
self.end = 20
self.delayTimer = eTimer()
self.delayTimer.callback.append(self.progress_increment)
def run(self, callback):
Task.run(self, callback)
self.delayTimer.start(100, False)
def progress_increment(self):
self.progress += 1
def processOutput(self, data):
print "[CopyTask] output:", data
def afterRun(self):
self.delayTimer.callback.remove(self.progress_increment)
class NFOViewer(Screen):
skin = """
<screen name="NFOViewer" position="center,center" size="610,410" title="Changelog" >
<widget name="changelog" position="10,10" size="590,380" font="Regular;16" />
</screen>"""
def __init__(self, session, nfo):
Screen.__init__(self, session)
self["changelog"] = ScrollLabel(nfo)
self["ViewerActions"] = ActionMap(["SetupActions", "ColorActions", "DirectionActions"],
{
"green": self.exit,
"red": self.exit,
"ok": self.exit,
"cancel": self.exit,
"down": self.pageDown,
"up": self.pageUp
})
def pageUp(self):
self["changelog"].pageUp()
def pageDown(self):
self["changelog"].pageDown()
def exit(self):
self.close(False)
class feedDownloader:
def __init__(self, feed_base, box, OE_vers):
print "[feedDownloader::init] feed_base=%s, box=%s" % (feed_base, box)
self.feed_base = feed_base
self.OE_vers = OE_vers
self.box = box
def getList(self, callback, errback):
self.urlbase = "%s/%s/%s/images/" % (self.feed_base, self.OE_vers, self.box)
print "[getList]", self.urlbase
self.callback = callback
self.errback = errback
client.getPage(self.urlbase).addCallback(self.feed_finished).addErrback(self.feed_failed)
def feed_failed(self, failure_instance):
print "[feed_failed]", str(failure_instance)
self.errback(failure_instance.getErrorMessage())
def feed_finished(self, feedhtml):
print "[feed_finished]"
fileresultmask = re.compile("<a class=[\'\"]nfi[\'\"] href=[\'\"](?P<url>.*?)[\'\"]>(?P<name>.*?.nfi)</a>", re.DOTALL)
searchresults = fileresultmask.finditer(feedhtml)
fileresultlist = []
if searchresults:
for x in searchresults:
url = x.group("url")
if url[0:7] != "http://":
url = self.urlbase + x.group("url")
name = x.group("name")
entry = (name, url)
fileresultlist.append(entry)
self.callback(fileresultlist, self.OE_vers)
class DeviceBrowser(Screen, HelpableScreen):
skin = """
<screen name="DeviceBrowser" position="center,center" size="520,430" title="Please select target medium" >
<ePixmap pixmap="skin_default/buttons/red.png" position="0,0" size="140,40" alphatest="on" />
<ePixmap pixmap="skin_default/buttons/green.png" position="140,0" size="140,40" alphatest="on" />
<widget source="key_red" render="Label" position="0,0" zPosition="1" size="140,40" font="Regular;20" halign="center" valign="center" backgroundColor="#9f1313" transparent="1" />
<widget source="key_green" render="Label" position="140,0" zPosition="1" size="140,40" font="Regular;20" halign="center" valign="center" backgroundColor="#1f771f" transparent="1" />
<widget source="message" render="Label" position="5,50" size="510,150" font="Regular;16" />
<widget name="filelist" position="5,210" size="510,220" scrollbarMode="showOnDemand" />
</screen>"""
def __init__(self, session, startdir, message="", showDirectories = True, showFiles = True, showMountpoints = True, matchingPattern = "", useServiceRef = False, inhibitDirs = False, inhibitMounts = False, isTop = False, enableWrapAround = False, additionalExtensions = None):
Screen.__init__(self, session)
HelpableScreen.__init__(self)
self["key_red"] = StaticText(_("Cancel"))
self["key_green"] = StaticText()
self["message"] = StaticText(message)
self.filelist = FileList(startdir, showDirectories = showDirectories, showFiles = showFiles, showMountpoints = showMountpoints, matchingPattern = matchingPattern, useServiceRef = useServiceRef, inhibitDirs = inhibitDirs, inhibitMounts = inhibitMounts, isTop = isTop, enableWrapAround = enableWrapAround, additionalExtensions = additionalExtensions)
self["filelist"] = self.filelist
self["FilelistActions"] = ActionMap(["SetupActions", "ColorActions"],
{
"green": self.use,
"red": self.exit,
"ok": self.ok,
"cancel": self.exit
})
hotplugNotifier.append(self.hotplugCB)
self.onShown.append(self.updateButton)
self.onClose.append(self.removeHotplug)
def hotplugCB(self, dev, action):
print "[hotplugCB]", dev, action
self.updateButton()
def updateButton(self):
if self["filelist"].getFilename() or self["filelist"].getCurrentDirectory():
self["key_green"].text = _("Use")
else:
self["key_green"].text = ""
def removeHotplug(self):
print "[removeHotplug]"
hotplugNotifier.remove(self.hotplugCB)
def ok(self):
if self.filelist.canDescent():
if self["filelist"].showMountpoints == True and self["filelist"].showDirectories == False:
self.use()
else:
self.filelist.descent()
def use(self):
print "[use]", self["filelist"].getCurrentDirectory(), self["filelist"].getFilename()
if self["filelist"].getCurrentDirectory() is not None:
if self.filelist.canDescent() and self["filelist"].getFilename() and len(self["filelist"].getFilename()) > len(self["filelist"].getCurrentDirectory()):
self.filelist.descent()
self.close(self["filelist"].getCurrentDirectory())
elif self["filelist"].getFilename():
self.close(self["filelist"].getFilename())
def exit(self):
self.close(False)
(ALLIMAGES, RELEASE, EXPERIMENTAL, STICK_WIZARD, START) = range(5)
class NFIDownload(Screen):
skin = """
<screen name="NFIDownload" position="center,center" size="610,410" title="NFIDownload" >
<ePixmap pixmap="skin_default/buttons/red.png" position="0,0" size="140,40" alphatest="on" />
<ePixmap pixmap="skin_default/buttons/green.png" position="140,0" size="140,40" alphatest="on" />
<ePixmap pixmap="skin_default/buttons/yellow.png" position="280,0" size="140,40" alphatest="on" />
<ePixmap pixmap="skin_default/buttons/blue.png" position="420,0" size="140,40" alphatest="on" />
<widget source="key_red" render="Label" position="0,0" zPosition="1" size="140,40" font="Regular;20" valign="center" halign="center" backgroundColor="#9f1313" transparent="1" />
<widget source="key_green" render="Label" position="140,0" zPosition="1" size="140,40" font="Regular;20" valign="center" halign="center" backgroundColor="#1f771f" transparent="1" />
<widget source="key_yellow" render="Label" position="280,0" zPosition="1" size="140,40" font="Regular;20" valign="center" halign="center" backgroundColor="#a08500" transparent="1" />
<widget source="key_blue" render="Label" position="420,0" zPosition="1" size="140,40" font="Regular;20" valign="center" halign="center" backgroundColor="#18188b" transparent="1" />
<ePixmap pixmap="skin_default/border_menu_350.png" position="5,50" zPosition="1" size="350,300" transparent="1" alphatest="on" />
<widget source="menu" render="Listbox" position="15,60" size="330,290" scrollbarMode="showOnDemand">
<convert type="TemplatedMultiContent">
{"templates":
{"default": (25, [
MultiContentEntryText(pos = (2, 2), size = (330, 24), flags = RT_HALIGN_LEFT, text = 1), # index 0 is the MenuText,
], True, "showOnDemand")
},
"fonts": [gFont("Regular", 22)],
"itemHeight": 25
}
</convert>
</widget>
<widget source="menu" render="Listbox" position="360,50" size="240,300" scrollbarMode="showNever" selectionDisabled="1">
<convert type="TemplatedMultiContent">
{"templates":
{"default": (300, [
MultiContentEntryText(pos = (2, 2), size = (240, 300), flags = RT_HALIGN_CENTER|RT_VALIGN_CENTER|RT_WRAP, text = 2), # index 2 is the Description,
], False, "showNever")
},
"fonts": [gFont("Regular", 22)],
"itemHeight": 300
}
</convert>
</widget>
<widget source="status" render="Label" position="5,360" zPosition="10" size="600,50" halign="center" valign="center" font="Regular;22" transparent="1" shadowColor="black" shadowOffset="-1,-1" />
</screen>"""
def __init__(self, session, destdir=None):
Screen.__init__(self, session)
#self.skin_path = plugin_path
#self.menu = args
self.box = HardwareInfo().get_device_name()
self.feed_base = "http://www.dreamboxupdate.com/opendreambox" #/1.5/%s/images/" % self.box
self.usbmountpoint = resolveFilename(SCOPE_MEDIA)+"usb/"
self.menulist = []
self["menu"] = List(self.menulist)
self["key_red"] = StaticText(_("Close"))
self["key_green"] = StaticText()
self["key_yellow"] = StaticText()
self["key_blue"] = StaticText()
self["status"] = StaticText(_("Please wait... Loading list..."))
self["shortcuts"] = ActionMap(["OkCancelActions", "ColorActions", "ShortcutActions", "DirectionActions"],
{
"ok": self.keyOk,
"green": self.keyOk,
"red": self.keyRed,
"blue": self.keyBlue,
"up": self.keyUp,
"upRepeated": self.keyUp,
"downRepeated": self.keyDown,
"down": self.keyDown,
"cancel": self.close,
}, -1)
self.onShown.append(self.go)
self.feedlists = [[],[],[]]
self.branch = START
self.container = eConsoleAppContainer()
self.container.dataAvail.append(self.tool_avail)
self.taskstring = ""
self.image_idx = 0
self.nfofilename = ""
self.nfo = ""
self.target_dir = None
def tool_avail(self, string):
print "[tool_avail]" + string
self.taskstring += string
def go(self):
self.onShown.remove(self.go)
self.umountCallback = self.getMD5
self.umount()
def getMD5(self):
url = "http://www.dreamboxupdate.com/download/opendreambox/dreambox-nfiflasher-%s-md5sums" % self.box
client.getPage(url).addCallback(self.md5sums_finished).addErrback(self.feed_failed)
def md5sums_finished(self, data):
print "[md5sums_finished]", data
self.stickimage_md5 = data
self.checkUSBStick()
def keyRed(self):
if self.branch == START:
self.close()
else:
self.branch = START
self["menu"].setList(self.menulist)
#elif self.branch == ALLIMAGES or self.branch == STICK_WIZARD:
def keyBlue(self):
if self.nfo != "":
self.session.open(NFOViewer, self.nfo)
def keyOk(self):
print "[keyOk]", self["menu"].getCurrent()
current = self["menu"].getCurrent()
if current:
if self.branch == START:
currentEntry = current[0]
if currentEntry == RELEASE:
self.image_idx = 0
self.branch = RELEASE
self.askDestination()
elif currentEntry == EXPERIMENTAL:
self.image_idx = 0
self.branch = EXPERIMENTAL
self.askDestination()
elif currentEntry == ALLIMAGES:
self.branch = ALLIMAGES
self.listImages()
elif currentEntry == STICK_WIZARD:
self.askStartWizard()
elif self.branch == ALLIMAGES:
self.image_idx = self["menu"].getIndex()
self.askDestination()
self.updateButtons()
def keyUp(self):
self["menu"].selectPrevious()
self.updateButtons()
def keyDown(self):
self["menu"].selectNext()
self.updateButtons()
def updateButtons(self):
current = self["menu"].getCurrent()
if current:
if self.branch == START:
self["key_red"].text = _("Close")
currentEntry = current[0]
if currentEntry in (RELEASE, EXPERIMENTAL):
self.nfo_download(currentEntry, 0)
self["key_green"].text = _("Download")
else:
self.nfofilename = ""
self.nfo = ""
self["key_blue"].text = ""
self["key_green"].text = _("continue")
elif self.branch == ALLIMAGES:
self["key_red"].text = _("Back")
self["key_green"].text = _("Download")
self.nfo_download(ALLIMAGES, self["menu"].getIndex())
def listImages(self):
print "[listImages]"
imagelist = []
mask = re.compile("%s/(?P<OE_vers>1\.\d)/%s/images/(?P<branch>.*?)-%s_(?P<version>.*?).nfi" % (self.feed_base, self.box, self.box), re.DOTALL)
for name, url in self.feedlists[ALLIMAGES]:
result = mask.match(url)
if result:
if result.group("version").startswith("20"):
version = ( result.group("version")[:4]+'-'+result.group("version")[4:6]+'-'+result.group("version")[6:8] )
else:
version = result.group("version")
description = "\nOpendreambox %s\n%s image\n%s\n" % (result.group("OE_vers"), result.group("branch"), version)
imagelist.append((url, name, _("Download %s from Server" ) % description, None))
self["menu"].setList(imagelist)
def getUSBPartitions(self):
allpartitions = [ (r.description, r.mountpoint) for r in harddiskmanager.getMountedPartitions(onlyhotplug = True)]
print "[getUSBPartitions]", allpartitions
usbpartition = []
for x in allpartitions:
print x, x[1] == '/', x[0].find("USB"), access(x[1], R_OK)
if x[1] != '/' and x[0].find("USB") > -1: # and access(x[1], R_OK) is True:
usbpartition.append(x)
return usbpartition
def askDestination(self):
usbpartition = self.getUSBPartitions()
if len(usbpartition) == 1:
self.target_dir = usbpartition[0][1]
self.ackDestinationDevice(device_description=usbpartition[0][0])
else:
self.openDeviceBrowser()
def openDeviceBrowser(self):
self.session.openWithCallback(self.DeviceBrowserClosed, DeviceBrowser, None, showDirectories=True, showMountpoints=True, inhibitMounts=["/autofs/sr0/"])
def DeviceBrowserClosed(self, path):
print "[DeviceBrowserClosed]", str(path)
self.target_dir = path
if path:
self.ackDestinationDevice()
else:
self.keyRed()
def ackDestinationDevice(self, device_description=None):
if device_description == None:
dev = self.target_dir
else:
dev = device_description
message = _("Do you want to download the image to %s ?") % (dev)
choices = [(_("Yes"), self.ackedDestination), (_("List of Storage Devices"),self.openDeviceBrowser), (_("Cancel"),self.keyRed)]
self.session.openWithCallback(self.ackDestination_query, ChoiceBox, title=message, list=choices)
def ackDestination_query(self, choice):
print "[ackDestination_query]", choice
if isinstance(choice, tuple):
choice[1]()
else:
self.keyRed()
def ackedDestination(self):
print "[ackedDestination]", self.branch, self.target_dir
self.container.setCWD(resolveFilename(SCOPE_MEDIA)+"usb/")
if self.target_dir[:8] == "/autofs/":
self.target_dir = "/dev/" + self.target_dir[8:-1]
if self.branch == STICK_WIZARD:
job = StickWizardJob(self.target_dir)
job.afterEvent = "close"
job_manager.AddJob(job)
job_manager.failed_jobs = []
self.session.openWithCallback(self.StickWizardCB, JobView, job, afterEventChangeable = False)
elif self.branch != STICK_WIZARD:
url = self.feedlists[self.branch][self.image_idx][1]
filename = self.feedlists[self.branch][self.image_idx][0]
print "[getImage] start downloading %s to %s" % (url, filename)
if self.target_dir.startswith("/dev/"):
job = ImageDownloadJob(url, filename, self.target_dir, self.usbmountpoint)
else:
job = ImageDownloadJob(url, filename, None, self.target_dir)
job.afterEvent = "close"
job_manager.AddJob(job)
job_manager.failed_jobs = []
self.session.openWithCallback(self.ImageDownloadCB, JobView, job, afterEventChangeable = False)
def StickWizardCB(self, ret=None):
print "[StickWizardCB]", ret
# print job_manager.active_jobs, job_manager.failed_jobs, job_manager.job_classes, job_manager.in_background, job_manager.active_job
if len(job_manager.failed_jobs) == 0:
self.session.open(MessageBox, _("The USB stick was prepared to be bootable.\nNow you can download an NFI image file!"), type = MessageBox.TYPE_INFO)
if len(self.feedlists[ALLIMAGES]) == 0:
self.getFeed()
else:
self.setMenu()
else:
self.umountCallback = self.checkUSBStick
self.umount()
def ImageDownloadCB(self, ret):
print "[ImageDownloadCB]", ret
# print job_manager.active_jobs, job_manager.failed_jobs, job_manager.job_classes, job_manager.in_background, job_manager.active_job
if len(job_manager.failed_jobs) == 0:
self.session.openWithCallback(self.askBackupCB, MessageBox, _("The wizard can backup your current settings. Do you want to do a backup now?"), MessageBox.TYPE_YESNO)
else:
self.umountCallback = self.keyRed
self.umount()
def askBackupCB(self, ret):
if ret:
from Plugins.SystemPlugins.SoftwareManager.BackupRestore import BackupScreen
class USBBackupScreen(BackupScreen):
def __init__(self, session, usbmountpoint):
BackupScreen.__init__(self, session, runBackup = True)
self.backuppath = usbmountpoint
self.fullbackupfilename = self.backuppath + "/" + self.backupfile
self.session.openWithCallback(self.showHint, USBBackupScreen, self.usbmountpoint)
else:
self.showHint()
def showHint(self, ret=None):
self.session.open(MessageBox, _("To update your Settop Box firmware, please follow these steps:\n1) Turn off your box with the rear power switch and make sure the bootable USB stick is plugged in.\n2) Turn mains back on and hold the DOWN button on the front panel pressed for 10 seconds.\n3) Wait for bootup and follow instructions of the wizard."), type = MessageBox.TYPE_INFO)
self.umountCallback = self.keyRed
self.umount()
def getFeed(self):
self.feedDownloader15 = feedDownloader(self.feed_base, self.box, OE_vers="1.5")
self.feedDownloader16 = feedDownloader(self.feed_base, self.box, OE_vers="1.6")
self.feedlists = [[],[],[]]
self.feedDownloader15.getList(self.gotFeed, self.feed_failed)
self.feedDownloader16.getList(self.gotFeed, self.feed_failed)
def feed_failed(self, message=""):
self["status"].text = _("Could not connect to Settop Box .NFI Image Feed Server:") + "\n" + str(message) + "\n" + _("Please check your network settings!")
def gotFeed(self, feedlist, OE_vers):
print "[gotFeed]", OE_vers
releaselist = []
experimentallist = []
for name, url in feedlist:
if name.find("release") > -1:
releaselist.append((name, url))
if name.find("experimental") > -1:
experimentallist.append((name, url))
self.feedlists[ALLIMAGES].append((name, url))
if OE_vers == "1.6":
self.feedlists[RELEASE] = releaselist + self.feedlists[RELEASE]
self.feedlists[EXPERIMENTAL] = experimentallist + self.feedlists[RELEASE]
elif OE_vers == "1.5":
self.feedlists[RELEASE] = self.feedlists[RELEASE] + releaselist
self.feedlists[EXPERIMENTAL] = self.feedlists[EXPERIMENTAL] + experimentallist
self.setMenu()
def checkUSBStick(self):
self.target_dir = None
allpartitions = [ (r.description, r.mountpoint) for r in harddiskmanager.getMountedPartitions(onlyhotplug = True)]
print "[checkUSBStick] found partitions:", allpartitions
usbpartition = []
for x in allpartitions:
print x, x[1] == '/', x[0].find("USB"), access(x[1], R_OK)
if x[1] != '/' and x[0].find("USB") > -1: # and access(x[1], R_OK) is True:
usbpartition.append(x)
print usbpartition
if len(usbpartition) == 1:
self.target_dir = usbpartition[0][1]
self.md5_passback = self.getFeed
self.md5_failback = self.askStartWizard
self.md5verify(self.stickimage_md5, self.target_dir)
elif usbpartition == []:
print "[NFIFlash] needs to create usb flasher stick first!"
self.askStartWizard()
else:
self.askStartWizard()
def askStartWizard(self):
self.branch = STICK_WIZARD
message = _("""This plugin creates a USB stick which can be used to update the firmware of your Settop Box without the need for a network or WLAN connection.
First, a USB stick needs to be prepared so that it becomes bootable.
In the next step, an NFI image file can be downloaded from the update server and saved on the USB stick.
If you already have a prepared bootable USB stick, please insert it now. Otherwise plug in a USB stick with a minimum size of 64 MB!""")
self.session.openWithCallback(self.wizardDeviceBrowserClosed, DeviceBrowser, None, message, showDirectories=True, showMountpoints=True, inhibitMounts=["/","/autofs/sr0/","/autofs/sda1/","/media/hdd/","/media/net/",self.usbmountpoint,"/media/dvd/"])
def wizardDeviceBrowserClosed(self, path):
print "[wizardDeviceBrowserClosed]", path
self.target_dir = path
if path:
self.md5_passback = self.getFeed
self.md5_failback = self.wizardQuery
self.md5verify(self.stickimage_md5, self.target_dir)
else:
self.close()
def wizardQuery(self):
print "[wizardQuery]"
description = self.target_dir
for name, dev in self.getUSBPartitions():
if dev == self.target_dir:
description = name
message = _("You have chosen to create a new .NFI flasher bootable USB stick. This will repartition the USB stick and therefore all data on it will be erased.") + "\n"
message += _("The following device was found:\n\n%s\n\nDo you want to write the USB flasher to this stick?") % description
choices = [(_("Yes"), self.ackedDestination), (_("List of Storage Devices"),self.askStartWizard), (_("Cancel"),self.close)]
self.session.openWithCallback(self.ackDestination_query, ChoiceBox, title=message, list=choices)
def setMenu(self):
self.menulist = []
try:
latest_release = "Release %s (Opendreambox 1.5)" % self.feedlists[RELEASE][0][0][-9:-4]
self.menulist.append((RELEASE, _("Get latest release image"), _("Download %s from Server" ) % latest_release, None))
except IndexError:
pass
try:
dat = self.feedlists[EXPERIMENTAL][0][0][-12:-4]
latest_experimental = "Experimental %s-%s-%s (Opendreambox 1.6)" % (dat[:4], dat[4:6], dat[6:])
self.menulist.append((EXPERIMENTAL, _("Get latest experimental image"), _("Download %s from Server") % latest_experimental, None))
except IndexError:
pass
self.menulist.append((ALLIMAGES, _("Choose image to download"), _("Select desired image from feed list" ), None))
self.menulist.append((STICK_WIZARD, _("USB stick wizard"), _("Prepare another USB stick for image flashing" ), None))
self["menu"].setList(self.menulist)
self["status"].text = _("Currently installed image") + ": %s" % (about.getImageVersionString())
self.branch = START
self.updateButtons()
def nfo_download(self, branch, idx):
nfourl = (self.feedlists[branch][idx][1])[:-4]+".nfo"
self.nfofilename = (self.feedlists[branch][idx][0])[:-4]+".nfo"
print "[check_for_NFO]", nfourl
client.getPage(nfourl).addCallback(self.nfo_finished).addErrback(self.nfo_failed)
def nfo_failed(self, failure_instance):
print "[nfo_failed] " + str(failure_instance)
self["key_blue"].text = ""
self.nfofilename = ""
self.nfo = ""
def nfo_finished(self,nfodata=""):
print "[nfo_finished] " + str(nfodata)
self["key_blue"].text = _("Changelog")
self.nfo = nfodata
def md5verify(self, md5, path):
cmd = "md5sum -c -s"
print "[verify_md5]", md5, path, cmd
self.container.setCWD(path)
self.container.appClosed.append(self.md5finished)
self.container.execute(cmd)
self.container.write(md5)
self.container.dataSent.append(self.md5ready)
def md5ready(self, retval):
self.container.sendEOF()
def md5finished(self, retval):
print "[md5finished]", str(retval)
self.container.appClosed.remove(self.md5finished)
self.container.dataSent.remove(self.md5ready)
if retval==0:
print "check passed! calling", repr(self.md5_passback)
self.md5_passback()
else:
print "check failed! calling", repr(self.md5_failback)
self.md5_failback()
def umount(self):
cmd = "umount " + self.usbmountpoint
print "[umount]", cmd
self.container.setCWD('/')
self.container.appClosed.append(self.umountFinished)
self.container.execute(cmd)
def umountFinished(self, retval):
print "[umountFinished]", str(retval)
self.container.appClosed.remove(self.umountFinished)
self.umountCallback()
def main(session, **kwargs):
session.open(NFIDownload,resolveFilename(SCOPE_HDD))
def filescan_open(list, session, **kwargs):
dev = "/dev/" + (list[0].path).rsplit('/',1)[0][7:]
print "mounting device " + dev + " to /media/usb..."
usbmountpoint = resolveFilename(SCOPE_MEDIA)+"usb/"
system("mount %s %s -o rw,sync" % (dev, usbmountpoint))
session.open(NFIDownload,usbmountpoint)
def filescan(**kwargs):
from Components.Scanner import Scanner, ScanPath
return \
Scanner(mimetypes = ["application/x-dream-image"],
paths_to_scan =
[
ScanPath(path = "", with_subdirs = False),
],
name = "NFI",
description = (_("Download .NFI-Files for USB-Flasher")+"..."),
openfnc = filescan_open, )
| digidudeofdw/enigma2 | lib/python/Plugins/SystemPlugins/NFIFlash/downloader.py | Python | gpl-2.0 | 30,759 |
from artemis.Netarea import MAX
import time
import artemis.Utility as utility
#for human, https://stackoverflow.com/questions/1094841/reusable-library-to-get-human-readable-version-of-file-size
def sizeof_fmt(num, suffix='B'):
for unit in ['','Ki','Mi','Gi','Ti','Pi','Ei','Zi']:
if abs(num) < 1024.0:
return "%3.1f%s%s" % (num, unit, suffix)
num /= 1024.0
return "%.1f%s%s" % (num, 'Yi', suffix)
class AbstractReport:
def __init__(self, host, port, lifetime=4):
self.host = host
self.port = port
self.lifetime = lifetime
self.deathtime = time.time() + lifetime
def id(self):
return hash( (self.host, self.port) )
def is_expired(self):
return time.time() > self.deathtime
def serialize(self):
return utility.serialize(self)
def unserialize(self, data):
self = utility.unserialize( data )
def update(self, n):
self.host = n.host
self.port = n.port
self.deathtime = n.deathtime
def reset(self):
self.deathtime = time.time() + self.lifetime
class Report(AbstractReport):
# host,port : coordonnées pour le contacter
def __init__(self, host, port, used_ram, max_ram, lifetime=4):
AbstractReport.__init__(self, host, port, lifetime)
self.used_ram = used_ram
self.max_ram = max_ram
def load(self):
return float(self.used_ram)/self.max_ram
def __ge__(self, that):
return self.load() >= that.load()
def __gt__(self, that):
return self.load() > that.load()
def __le__(self, that):
return self.load() <= that.load()
def __lt__(self, that):
return self.load() < that.load()
def is_overload(self):
return self.load()>0.85
def update(self, n):
AbstractReport.update(self, n)
self.used_ram = n.used_ram
self.max_ram = n.max_ram
def __str__(self):
return ("host=%s, port=%d, used_ram=%s/%s" % (self.host, self.port, sizeof_fmt(self.used_ram), sizeof_fmt(self.max_ram)) )
class NetareaReport(Report):
"""
@param netarea uniqu id (str)
@param weight = plus c'est grarnad plus la partition est importante : servira à l'allouer à un Netareamanger robuste load balancibg
"""
def __init__(self, host, port, netarea, used_ram, max_ram, next_netarea=MAX, lifetime=4):
#netarea is an hash ie heaxdigit str
Report.__init__(self,host, port, used_ram, max_ram, lifetime)
self.netarea = netarea # [netarea,next_netarea[
self.next_netarea = next_netarea
def split(self):
mid = floor( (next_netarea-netarea) / 2.0 )
self.used_ram = 0
self.next_netarea = mid
return NetareaReport(self.host, -1, h, 0, self.max_ram, next_netarea, self.lifetime )#number_port will be update later by the master
class MasterReport(Report):
def __init__(self, host, port, num_core, max_ram, maxNumNetareas,
netarea_reports, lifetime=4):
self.host = host
self.port = port
self.num_core = num_core
self.maxNumNetareas = maxNumNetareas
self.netarea_reports= netarea_reports
self.deathtime = time.time() + lifetime
def load(self):
return float(len(self.netarea_reports))/self.maxNumNetareas
def is_overload(self):
return self.maxNumNetareas <= len( self.netarea_reports)
def allocate(self, net):
self.netarea_reports.append( net )
def __str__(self):
return ("host=%s, port=%d, netareas=%d/%d\n\t%s" %
(self.host, self.port, len(self.netarea_reports),
self.num_core, '\n\t'.join([str(n) for n in self.netarea_reports])))
class SlaveReport(Report):
def __init__(self, host, port, used_ram, max_ram, lifetime=4):
Report.__init__(self, host, port, used_ram, max_ram, lifetime)
class SlaveMetrics(AbstractReport):
def __init__(self, host, port, tasks_processed, delay, lifetime=4):
AbstractReport.__init__(self, host, port, lifetime)
self.tasks_processed = tasks_processed
self.delay = delay
def speed(self):
return float(self.tasks_processed) / float(self.delay)
def __str__(self):
return ("""host=%s, port=%d, %d tasks processed in %d s at
%ft/s""" % (self.host, self.port, self.tasks_processed,
self.delay, self.speed()) )
class MonitorReport(Report):
def __init__(self, host, port, lifetime=60):
Report.__init__(self, host, port, 0, 0, lifetime)
| athena-project/Artemis | src/network/Reports.py | Python | gpl-2.0 | 4,180 |
#!/usr/bin/env python
import os
import sys
if __name__ == '__main__':
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'conf.settings.base')
try:
from django.core.management import execute_from_command_line
except ImportError:
# The above import may fail for some other reason. Ensure that the
# issue is really that Django is missing to avoid masking other
# exceptions on Python 2.
try:
import django # noqa
except ImportError:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?")
raise
execute_from_command_line(sys.argv)
| rafasis1986/deaths-dancing | endpoint/manage.py | Python | gpl-3.0 | 802 |
class AbstractComponent:
def get_regularization_term(self):
return 0
def prepare_tensorflow_variables(self, mode="train"):
pass
def handle_variable_assignment(self, batch, mode):
pass | MichSchli/QuestionAnsweringGCN | old_version/candidate_selection/tensorflow_models/components/abstract_component.py | Python | mit | 222 |
from .client_wrapper import CloudShellClient, create_cloudshell_client # noqa: F401
| QualiSystems/shellfoundry | shellfoundry/utilities/cloudshell_api/__init__.py | Python | apache-2.0 | 85 |
# Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Checkers context.
Defines the Context class, a basic building block of checkers tests.
"""
import gflags
import logging
import test_result
FLAGS = gflags.FLAGS
class _DataRegistry(object):
def __init__(self):
self.data = {}
def SetValue(self, key, value):
self.__dict__[key] = value
self.data[key] = value
class Context(object):
def __init__(self, test_case, test_run=None, logger=None,
args=None, data=None):
self.test_case = test_case
self.test_run = test_run
# pylint: disable=no-value-for-parameter
self.log = logger if logger else logging.Logger()
self.args = args
self.test_result = test_result.TestResult.READY
self.data = _DataRegistry()
for key, flag in FLAGS.FlagDict().iteritems():
self.data.SetValue(key, flag.value)
data = data if data else {}
for key, value in data.iteritems():
self.data.SetValue(key, value)
| google/checkers_classic | python/context.py | Python | apache-2.0 | 1,540 |
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Utilies and constants specific to Chromium C++ code.
"""
from code import Code
from datetime import datetime
from model import PropertyType
import os
import re
CHROMIUM_LICENSE = (
"""// Copyright (c) %d The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.""" % datetime.now().year
)
GENERATED_FILE_MESSAGE = """// GENERATED FROM THE API DEFINITION IN
// %s
// DO NOT EDIT.
"""
GENERATED_BUNDLE_FILE_MESSAGE = """// GENERATED FROM THE API DEFINITIONS IN
// %s
// DO NOT EDIT.
"""
GENERATED_FEATURE_MESSAGE = """// GENERATED FROM THE FEATURE DEFINITIONS IN
// %s
// DO NOT EDIT.
"""
def Classname(s):
"""Translates a namespace name or function name into something more
suited to C++.
eg experimental.downloads -> Experimental_Downloads
updateAll -> UpdateAll.
"""
return '_'.join([x[0].upper() + x[1:] for x in re.split('\W', s)])
def GetAsFundamentalValue(type_, src, dst):
"""Returns the C++ code for retrieving a fundamental type from a
Value into a variable.
src: Value*
dst: Property*
"""
return {
PropertyType.BOOLEAN: '%s->GetAsBoolean(%s)',
PropertyType.DOUBLE: '%s->GetAsDouble(%s)',
PropertyType.INTEGER: '%s->GetAsInteger(%s)',
PropertyType.STRING: '%s->GetAsString(%s)',
}[type_.property_type] % (src, dst)
def GetValueType(type_):
"""Returns the Value::Type corresponding to the model.Type.
"""
return {
PropertyType.ARRAY: 'base::Value::TYPE_LIST',
PropertyType.BINARY: 'base::Value::TYPE_BINARY',
PropertyType.BOOLEAN: 'base::Value::TYPE_BOOLEAN',
# PropertyType.CHOICES can be any combination of types.
PropertyType.DOUBLE: 'base::Value::TYPE_DOUBLE',
PropertyType.ENUM: 'base::Value::TYPE_STRING',
PropertyType.FUNCTION: 'base::Value::TYPE_DICTIONARY',
PropertyType.INTEGER: 'base::Value::TYPE_INTEGER',
PropertyType.OBJECT: 'base::Value::TYPE_DICTIONARY',
PropertyType.STRING: 'base::Value::TYPE_STRING',
}[type_.property_type]
def GetParameterDeclaration(param, type_):
"""Gets a parameter declaration of a given model.Property and its C++
type.
"""
if param.type_.property_type in (PropertyType.ANY,
PropertyType.ARRAY,
PropertyType.CHOICES,
PropertyType.OBJECT,
PropertyType.REF,
PropertyType.STRING):
arg = 'const %(type)s& %(name)s'
else:
arg = '%(type)s %(name)s'
return arg % {
'type': type_,
'name': param.unix_name,
}
def GenerateIfndefName(path, filename):
"""Formats a path and filename as a #define name.
e.g chrome/extensions/gen, file.h becomes CHROME_EXTENSIONS_GEN_FILE_H__.
"""
return (('%s_%s_H__' % (path, filename))
.upper().replace(os.sep, '_').replace('/', '_'))
def PadForGenerics(var):
"""Appends a space to |var| if it ends with a >, so that it can be compiled
within generic types.
"""
return ('%s ' % var) if var.endswith('>') else var
def OpenNamespace(namespace):
"""Get opening root namespace declarations.
"""
c = Code()
# In lieu of GYP supporting None for the namespace variable the '' namespace
# implies there is no root namespace.
if namespace == '':
return c
for component in namespace.split('::'):
c.Append('namespace %s {' % component)
return c
def CloseNamespace(namespace):
"""Get closing root namespace declarations.
"""
c = Code()
# In lieu of GYP supporting None for the namespace variable the '' namespace
# implies there is no root namespace.
if namespace == '':
return c
for component in reversed(namespace.split('::')):
c.Append('} // namespace %s' % component)
return c
def ConstantName(feature_name):
"""Returns a kName for a feature's name.
"""
return ('k' + ''.join(word[0].upper() + word[1:]
for word in feature_name.replace('.', ' ').split()))
def CamelCase(unix_name):
return ''.join(word.capitalize() for word in unix_name.split('_'))
def ClassName(filepath):
return CamelCase(os.path.split(filepath)[1])
| TeamEOS/external_chromium_org | tools/json_schema_compiler/cpp_util.py | Python | bsd-3-clause | 4,388 |
import os
import os.path
import osiris
class Page(osiris.IMainPage):
def __init__(self, session):
osiris.IMainPage.__init__(self, session)
def getPageName(self):
return "extensions.4A7F130B4A5C42CC5D928D157641596A89543C65.testsuite"
def onInit(self):
osiris.IMainPage.onInit(self)
self.pathway.add(self.getText("extensions.4A7F130B4A5C42CC5D928D157641596A89543C65.index.title"),"/developer_tools")
def onPreRender(self):
osiris.IMainPage.onPreRender(self)
document = osiris.XMLDocument()
root = document.create("testsuite")
template = osiris.HtmlXSLControl()
template.stylesheet = self.loadStylesheet(os.path.join(os.path.dirname(__file__), "testsuite.xsl"))
template.document = document
self.getArea(osiris.pageAreaContent).controls.add(template)
| OsirisSPS/osiris-sps | client/data/extensions/4A7F130B4A5C42CC5D928D157641596A89543C65/scripts/testsuite.py | Python | gpl-3.0 | 798 |
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2010-today OpenERP SA (<http://www.openerp.com>)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>
#
##############################################################################
import base64
import logging
from email.utils import formataddr
from urlparse import urljoin
from openerp import api, tools
from openerp import SUPERUSER_ID
from openerp.addons.base.ir.ir_mail_server import MailDeliveryException
from openerp.osv import fields, osv
from openerp.tools.safe_eval import safe_eval as eval
from openerp.tools.translate import _
import openerp.tools as tools
_logger = logging.getLogger(__name__)
class mail_mail(osv.Model):
""" Model holding RFC2822 email messages to send. This model also provides
facilities to queue and send new email messages. """
_name = 'mail.mail'
_description = 'Outgoing Mails'
_inherits = {'mail.message': 'mail_message_id'}
_order = 'id desc'
_rec_name = 'subject'
_columns = {
'mail_message_id': fields.many2one('mail.message', 'Message', required=True, ondelete='cascade', auto_join=True),
'state': fields.selection([
('outgoing', 'Outgoing'),
('sent', 'Sent'),
('received', 'Received'),
('exception', 'Delivery Failed'),
('cancel', 'Cancelled'),
], 'Status', readonly=True, copy=False),
'auto_delete': fields.boolean('Auto Delete',
help="Permanently delete this email after sending it, to save space"),
'references': fields.text('References', help='Message references, such as identifiers of previous messages', readonly=1),
'email_to': fields.text('To', help='Message recipients (emails)'),
'recipient_ids': fields.many2many('res.partner', string='To (Partners)'),
'email_cc': fields.char('Cc', help='Carbon copy message recipients'),
'body_html': fields.text('Rich-text Contents', help="Rich-text/HTML message"),
'headers': fields.text('Headers', copy=False),
'failure_reason': fields.text('Failure Reason', help="Failure reason. This is usually the exception thrown by the email server, stored to ease the debugging of mailing issues.", readonly=1),
# Auto-detected based on create() - if 'mail_message_id' was passed then this mail is a notification
# and during unlink() we will not cascade delete the parent and its attachments
'notification': fields.boolean('Is Notification',
help='Mail has been created to notify people of an existing mail.message'),
}
_defaults = {
'state': 'outgoing',
}
def default_get(self, cr, uid, fields, context=None):
# protection for `default_type` values leaking from menu action context (e.g. for invoices)
# To remove when automatic context propagation is removed in web client
if context and context.get('default_type') and context.get('default_type') not in self._all_columns['type'].column.selection:
context = dict(context, default_type=None)
return super(mail_mail, self).default_get(cr, uid, fields, context=context)
def create(self, cr, uid, values, context=None):
# notification field: if not set, set if mail comes from an existing mail.message
if 'notification' not in values and values.get('mail_message_id'):
values['notification'] = True
return super(mail_mail, self).create(cr, uid, values, context=context)
def unlink(self, cr, uid, ids, context=None):
# cascade-delete the parent message for all mails that are not created for a notification
ids_to_cascade = self.search(cr, uid, [('notification', '=', False), ('id', 'in', ids)])
parent_msg_ids = [m.mail_message_id.id for m in self.browse(cr, uid, ids_to_cascade, context=context)]
res = super(mail_mail, self).unlink(cr, uid, ids, context=context)
self.pool.get('mail.message').unlink(cr, uid, parent_msg_ids, context=context)
return res
def mark_outgoing(self, cr, uid, ids, context=None):
return self.write(cr, uid, ids, {'state': 'outgoing'}, context=context)
def cancel(self, cr, uid, ids, context=None):
return self.write(cr, uid, ids, {'state': 'cancel'}, context=context)
@api.cr_uid
def process_email_queue(self, cr, uid, ids=None, context=None):
"""Send immediately queued messages, committing after each
message is sent - this is not transactional and should
not be called during another transaction!
:param list ids: optional list of emails ids to send. If passed
no search is performed, and these ids are used
instead.
:param dict context: if a 'filters' key is present in context,
this value will be used as an additional
filter to further restrict the outgoing
messages to send (by default all 'outgoing'
messages are sent).
"""
if context is None:
context = {}
if not ids:
filters = [('state', '=', 'outgoing')]
if 'filters' in context:
filters.extend(context['filters'])
ids = self.search(cr, uid, filters, context=context)
res = None
try:
# Force auto-commit - this is meant to be called by
# the scheduler, and we can't allow rolling back the status
# of previously sent emails!
res = self.send(cr, uid, ids, auto_commit=True, context=context)
except Exception:
_logger.exception("Failed processing mail queue")
return res
def _postprocess_sent_message(self, cr, uid, mail, context=None, mail_sent=True):
"""Perform any post-processing necessary after sending ``mail``
successfully, including deleting it completely along with its
attachment if the ``auto_delete`` flag of the mail was set.
Overridden by subclasses for extra post-processing behaviors.
:param browse_record mail: the mail that was just sent
:return: True
"""
if mail_sent and mail.auto_delete:
# done with SUPERUSER_ID to avoid giving large unlink access rights
self.unlink(cr, SUPERUSER_ID, [mail.id], context=context)
return True
#------------------------------------------------------
# mail_mail formatting, tools and send mechanism
#------------------------------------------------------
def _get_partner_access_link(self, cr, uid, mail, partner=None, context=None):
"""Generate URLs for links in mails: partner has access (is user):
link to action_mail_redirect action that will redirect to doc or Inbox """
if context is None:
context = {}
if partner and partner.user_ids:
base_url = self.pool.get('ir.config_parameter').get_param(cr, SUPERUSER_ID, 'web.base.url')
mail_model = mail.model or 'mail.thread'
url = urljoin(base_url, self.pool[mail_model]._get_access_link(cr, uid, mail, partner, context=context))
return "<span class='oe_mail_footer_access'><small>%(access_msg)s <a style='color:inherit' href='%(portal_link)s'>%(portal_msg)s</a></small></span>" % {
'access_msg': _('about') if mail.record_name else _('access'),
'portal_link': url,
'portal_msg': '%s %s' % (context.get('model_name', ''), mail.record_name) if mail.record_name else _('your messages'),
}
else:
return None
def send_get_mail_subject(self, cr, uid, mail, force=False, partner=None, context=None):
"""If subject is void, set the subject as 'Re: <Resource>' or
'Re: <mail.parent_id.subject>'
:param boolean force: force the subject replacement
"""
if (force or not mail.subject) and mail.record_name:
return 'Re: %s' % (mail.record_name)
elif (force or not mail.subject) and mail.parent_id and mail.parent_id.subject:
return 'Re: %s' % (mail.parent_id.subject)
return mail.subject
def send_get_mail_body(self, cr, uid, mail, partner=None, context=None):
"""Return a specific ir_email body. The main purpose of this method
is to be inherited to add custom content depending on some module."""
body = mail.body_html or ''
# generate access links for notifications or emails linked to a specific document with auto threading
link = None
if mail.notification or (mail.model and mail.res_id and not mail.no_auto_thread):
link = self._get_partner_access_link(cr, uid, mail, partner, context=context)
if link:
body = tools.append_content_to_html(body, link, plaintext=False, container_tag='div')
return body
def send_get_mail_to(self, cr, uid, mail, partner=None, context=None):
"""Forge the email_to with the following heuristic:
- if 'partner', recipient specific (Partner Name <email>)
- else fallback on mail.email_to splitting """
if partner:
email_to = [formataddr((partner.name, partner.email))]
else:
email_to = tools.email_split(mail.email_to)
return email_to
def send_get_email_dict(self, cr, uid, mail, partner=None, context=None):
"""Return a dictionary for specific email values, depending on a
partner, or generic to the whole recipients given by mail.email_to.
:param browse_record mail: mail.mail browse_record
:param browse_record partner: specific recipient partner
"""
body = self.send_get_mail_body(cr, uid, mail, partner=partner, context=context)
body_alternative = tools.html2plaintext(body)
res = {
'body': body,
'body_alternative': body_alternative,
'subject': self.send_get_mail_subject(cr, uid, mail, partner=partner, context=context),
'email_to': self.send_get_mail_to(cr, uid, mail, partner=partner, context=context),
}
return res
def send(self, cr, uid, ids, auto_commit=False, raise_exception=False, context=None):
""" Sends the selected emails immediately, ignoring their current
state (mails that have already been sent should not be passed
unless they should actually be re-sent).
Emails successfully delivered are marked as 'sent', and those
that fail to be deliver are marked as 'exception', and the
corresponding error mail is output in the server logs.
:param bool auto_commit: whether to force a commit of the mail status
after sending each mail (meant only for scheduler processing);
should never be True during normal transactions (default: False)
:param bool raise_exception: whether to raise an exception if the
email sending process has failed
:return: True
"""
context = dict(context or {})
ir_mail_server = self.pool.get('ir.mail_server')
ir_attachment = self.pool['ir.attachment']
for mail in self.browse(cr, SUPERUSER_ID, ids, context=context):
try:
# TDE note: remove me when model_id field is present on mail.message - done here to avoid doing it multiple times in the sub method
if mail.model:
model_id = self.pool['ir.model'].search(cr, SUPERUSER_ID, [('model', '=', mail.model)], context=context)[0]
model = self.pool['ir.model'].browse(cr, SUPERUSER_ID, model_id, context=context)
else:
model = None
if model:
context['model_name'] = model.name
# load attachment binary data with a separate read(), as prefetching all
# `datas` (binary field) could bloat the browse cache, triggerring
# soft/hard mem limits with temporary data.
attachment_ids = [a.id for a in mail.attachment_ids]
attachments = [(a['datas_fname'], base64.b64decode(a['datas']))
for a in ir_attachment.read(cr, SUPERUSER_ID, attachment_ids,
['datas_fname', 'datas'])]
# specific behavior to customize the send email for notified partners
email_list = []
if mail.email_to:
email_list.append(self.send_get_email_dict(cr, uid, mail, context=context))
for partner in mail.recipient_ids:
email_list.append(self.send_get_email_dict(cr, uid, mail, partner=partner, context=context))
# headers
headers = {}
bounce_alias = self.pool['ir.config_parameter'].get_param(cr, uid, "mail.bounce.alias", context=context)
catchall_domain = self.pool['ir.config_parameter'].get_param(cr, uid, "mail.catchall.domain", context=context)
if bounce_alias and catchall_domain:
if mail.model and mail.res_id:
headers['Return-Path'] = '%s-%d-%s-%d@%s' % (bounce_alias, mail.id, mail.model, mail.res_id, catchall_domain)
else:
headers['Return-Path'] = '%s-%d@%s' % (bounce_alias, mail.id, catchall_domain)
if mail.headers:
try:
headers.update(eval(mail.headers))
except Exception:
pass
# Writing on the mail object may fail (e.g. lock on user) which
# would trigger a rollback *after* actually sending the email.
# To avoid sending twice the same email, provoke the failure earlier
mail.write({
'state': 'exception',
'failure_reason': _('Error without exception. Probably due do sending an email without computed recipients.'),
})
mail_sent = False
# build an RFC2822 email.message.Message object and send it without queuing
res = None
for email in email_list:
msg = ir_mail_server.build_email(
email_from=mail.email_from,
email_to=email.get('email_to'),
subject=email.get('subject'),
body=email.get('body'),
body_alternative=email.get('body_alternative'),
email_cc=tools.email_split(mail.email_cc),
reply_to=mail.reply_to,
attachments=attachments,
message_id=mail.message_id,
references=mail.references,
object_id=mail.res_id and ('%s-%s' % (mail.res_id, mail.model)),
subtype='html',
subtype_alternative='plain',
headers=headers)
try:
res = ir_mail_server.send_email(cr, uid, msg,
mail_server_id=mail.mail_server_id.id,
context=context)
except AssertionError as error:
if error.message == ir_mail_server.NO_VALID_RECIPIENT:
# No valid recipient found for this particular
# mail item -> ignore error to avoid blocking
# delivery to next recipients, if any. If this is
# the only recipient, the mail will show as failed.
_logger.info("Ignoring invalid recipients for mail.mail %s: %s",
mail.message_id, email.get('email_to'))
else:
raise
if res:
mail.write({'state': 'sent', 'message_id': res, 'failure_reason': False})
mail_sent = True
# /!\ can't use mail.state here, as mail.refresh() will cause an error
# see revid:[email protected] in 6.1
if mail_sent:
_logger.info('Mail with ID %r and Message-Id %r successfully sent', mail.id, mail.message_id)
self._postprocess_sent_message(cr, uid, mail, context=context, mail_sent=mail_sent)
except MemoryError:
# prevent catching transient MemoryErrors, bubble up to notify user or abort cron job
# instead of marking the mail as failed
_logger.exception('MemoryError while processing mail with ID %r and Msg-Id %r. '\
'Consider raising the --limit-memory-hard startup option',
mail.id, mail.message_id)
raise
except Exception as e:
failure_reason = tools.ustr(e)
_logger.exception('failed sending mail (id: %s) due to %s', mail.id, failure_reason)
mail.write({'state': 'exception', 'failure_reason': failure_reason})
self._postprocess_sent_message(cr, uid, mail, context=context, mail_sent=False)
if raise_exception:
if isinstance(e, AssertionError):
# get the args of the original error, wrap into a value and throw a MailDeliveryException
# that is an except_orm, with name and value as arguments
value = '. '.join(e.args)
raise MailDeliveryException(_("Mail Delivery Failed"), value)
raise
if auto_commit is True:
cr.commit()
return True
| cdrooom/odoo | addons/mail/mail_mail.py | Python | agpl-3.0 | 18,908 |
# Copyright (C) 2010-2015 Cuckoo Foundation.
# This file is part of Cuckoo Sandbox - http://www.cuckoosandbox.org
# See the file 'docs/LICENSE' for copying permission.
"""Database migration from Cuckoo 0.6 to Cuckoo 1.1.
Revision ID: 263a45963c72
Revises: None
Create Date: 2014-03-23 23:30:36.756792
"""
# Revision identifiers, used by Alembic.
revision = "263a45963c72"
mongo_revision = "1"
down_revision = None
import os
import sys
import sqlalchemy as sa
from datetime import datetime
try:
from dateutil.parser import parse
except ImportError:
print "Unable to import dateutil.parser",
print "(install with `pip install python-dateutil`)"
sys.exit()
try:
from alembic import op
except ImportError:
print "Unable to import alembic (install with `pip install alembic`)"
sys.exit()
try:
from pymongo.connection import Connection
from pymongo.errors import ConnectionFailure
except ImportError:
print "Unable to import pymongo (install with `pip install pymongo`)"
sys.exit()
sys.path.append(os.path.join("..", ".."))
import lib.cuckoo.core.database as db
from lib.cuckoo.common.config import Config
def upgrade():
# BEWARE: be prepared to really spaghetti code. To deal with SQLite limitations in Alembic we coded some workarounds.
# Migrations are supported starting form Cuckoo 0.6 and Cuckoo 1.0; I need a way to figure out if from which release
# it will start because both schema are missing alembic release versioning.
# I check for tags table to distinguish between Cuckoo 0.6 and 1.0.
conn = op.get_bind()
if conn.engine.dialect.has_table(conn.engine.connect(), "machines_tags"):
# If this table exist we are on Cuckoo 1.0 or above.
# So skip SQL migration.
pass
else:
# We are on Cuckoo < 1.0, hopefully 0.6.
# So run SQL migration.
# Create table used by Tag.
op.create_table(
"tags",
sa.Column("id", sa.Integer(), primary_key=True),
sa.Column("name", sa.String(length=255), nullable=False, unique=True),
)
# Create secondary table used in association Machine - Tag.
op.create_table(
"machines_tags",
sa.Column("machine_id", sa.Integer, sa.ForeignKey("machines.id")),
sa.Column("tag_id", sa.Integer, sa.ForeignKey("tags.id")),
)
# Add columns to Machine.
op.add_column("machines", sa.Column("interface", sa.String(length=255), nullable=True))
op.add_column("machines", sa.Column("snapshot", sa.String(length=255), nullable=True))
# TODO: change default value, be aware sqlite doesn't support that kind of ALTER statement.
op.add_column("machines", sa.Column("resultserver_ip", sa.String(length=255), server_default="192.168.56.1", nullable=False))
# TODO: change default value, be aware sqlite doesn't support that kind of ALTER statement.
op.add_column("machines", sa.Column("resultserver_port", sa.String(length=255), server_default="2042", nullable=False))
# Deal with Alembic shit.
# Alembic is so ORMish that it was impossible to write code which works on different DBMS.
if conn.engine.driver == "psycopg2":
# We don"t provide a default value and leave the column as nullable because o further data migration.
op.add_column("tasks", sa.Column("clock", sa.DateTime(timezone=False),nullable=True))
# NOTE: We added this new column so we force clock time to the added_on for old analyses.
conn.execute("update tasks set clock=added_on")
# Add the not null constraint.
op.alter_column("tasks", "clock", nullable=False, existing_nullable=True)
# Altering status ENUM.
# This shit of raw SQL is here because alembic doesn't deal well with alter_colum of ENUM type.
op.execute('COMMIT') # Commit because SQLAlchemy doesn't support ALTER TYPE in a transaction.
conn.execute("ALTER TYPE status_type ADD VALUE 'completed'")
conn.execute("ALTER TYPE status_type ADD VALUE 'reported'")
conn.execute("ALTER TYPE status_type ADD VALUE 'recovered'")
conn.execute("ALTER TYPE status_type ADD VALUE 'running'")
conn.execute("ALTER TYPE status_type RENAME ATTRIBUTE success TO completed")
conn.execute("ALTER TYPE status_type DROP ATTRIBUTE IF EXISTS failure")
elif conn.engine.driver == "mysqldb":
# We don"t provide a default value and leave the column as nullable because o further data migration.
op.add_column("tasks", sa.Column("clock", sa.DateTime(timezone=False),nullable=True))
# NOTE: We added this new column so we force clock time to the added_on for old analyses.
conn.execute("update tasks set clock=added_on")
# Add the not null constraint.
op.alter_column("tasks", "clock", nullable=False, existing_nullable=True, existing_type=sa.DateTime(timezone=False))
# NOTE: To workaround limitations in Alembic and MySQL ALTER statement (cannot remove item from ENUM).
# Read data.
tasks_data = []
old_tasks = conn.execute("select id, target, category, timeout, priority, custom, machine, package, options, platform, memory, enforce_timeout, added_on, started_on, completed_on, status, sample_id from tasks").fetchall()
for item in old_tasks:
d = {}
d["id"] = item[0]
d["target"] = item[1]
d["category"] = item[2]
d["timeout"] = item[3]
d["priority"] = item[4]
d["custom"] = item[5]
d["machine"] = item[6]
d["package"] = item[7]
d["options"] = item[8]
d["platform"] = item[9]
d["memory"] = item[10]
d["enforce_timeout"] = item[11]
if isinstance(item[12], datetime):
d["added_on"] = item[12]
else:
d["added_on"] = parse(item[12])
if isinstance(item[13], datetime):
d["started_on"] = item[13]
else:
d["started_on"] = parse(item[13])
if isinstance(item[14], datetime):
d["completed_on"] = item[14]
else:
d["completed_on"] = parse(item[14])
d["status"] = item[15]
d["sample_id"] = item[16]
# Force clock.
# NOTE: We added this new column so we force clock time to the added_on for old analyses.
d["clock"] = d["added_on"]
# Enum migration, "success" isn"t a valid state now.
if d["status"] == "success":
d["status"] = "completed"
tasks_data.append(d)
# Rename original table.
op.rename_table("tasks", "old_tasks")
# Drop old table.
op.drop_table("old_tasks")
# Drop old Enum.
sa.Enum(name="status_type").drop(op.get_bind(), checkfirst=False)
# Create new table with 1.0 schema.
op.create_table(
"tasks",
sa.Column("id", sa.Integer(), nullable=False),
sa.Column("target", sa.String(length=255), nullable=False),
sa.Column("category", sa.String(length=255), nullable=False),
sa.Column("timeout", sa.Integer(), server_default="0", nullable=False),
sa.Column("priority", sa.Integer(), server_default="1", nullable=False),
sa.Column("custom", sa.String(length=255), nullable=True),
sa.Column("machine", sa.String(length=255), nullable=True),
sa.Column("package", sa.String(length=255), nullable=True),
sa.Column("options", sa.String(length=255), nullable=True),
sa.Column("platform", sa.String(length=255), nullable=True),
sa.Column("memory", sa.Boolean(), nullable=False, default=False),
sa.Column("enforce_timeout", sa.Boolean(), nullable=False, default=False),
sa.Column("clock", sa.DateTime(timezone=False), server_default=sa.func.now(), nullable=False),
sa.Column("added_on", sa.DateTime(timezone=False), nullable=False),
sa.Column("started_on", sa.DateTime(timezone=False), nullable=True),
sa.Column("completed_on", sa.DateTime(timezone=False), nullable=True),
sa.Column("status", sa.Enum("pending", "running", "completed", "reported", "recovered", name="status_type"), server_default="pending", nullable=False),
sa.Column("sample_id", sa.Integer, sa.ForeignKey("samples.id"), nullable=True),
sa.PrimaryKeyConstraint("id")
)
# Insert data.
op.bulk_insert(db.Task.__table__, tasks_data)
elif conn.engine.driver == "pysqlite":
# Edit task status enumeration in Task.
# NOTE: To workaround limitations in SQLite we have to create a temporary table, create the new schema and copy data.
# Read data.
tasks_data = []
old_tasks = conn.execute("select id, target, category, timeout, priority, custom, machine, package, options, platform, memory, enforce_timeout, added_on, started_on, completed_on, status, sample_id from tasks").fetchall()
for item in old_tasks:
d = {}
d["id"] = item[0]
d["target"] = item[1]
d["category"] = item[2]
d["timeout"] = item[3]
d["priority"] = item[4]
d["custom"] = item[5]
d["machine"] = item[6]
d["package"] = item[7]
d["options"] = item[8]
d["platform"] = item[9]
d["memory"] = item[10]
d["enforce_timeout"] = item[11]
if isinstance(item[12], datetime):
d["added_on"] = item[12]
else:
d["added_on"] = parse(item[12])
if isinstance(item[13], datetime):
d["started_on"] = item[13]
else:
d["started_on"] = parse(item[13])
if isinstance(item[14], datetime):
d["completed_on"] = item[14]
else:
d["completed_on"] = parse(item[14])
d["status"] = item[15]
d["sample_id"] = item[16]
# Force clock.
# NOTE: We added this new column so we force clock time to the added_on for old analyses.
d["clock"] = d["added_on"]
# Enum migration, "success" isn"t a valid state now.
if d["status"] == "success":
d["status"] = "completed"
tasks_data.append(d)
# Rename original table.
op.rename_table("tasks", "old_tasks")
# Drop old table.
op.drop_table("old_tasks")
# Drop old Enum.
sa.Enum(name="status_type").drop(op.get_bind(), checkfirst=False)
# Create new table with 1.0 schema.
op.create_table(
"tasks",
sa.Column("id", sa.Integer(), nullable=False),
sa.Column("target", sa.String(length=255), nullable=False),
sa.Column("category", sa.String(length=255), nullable=False),
sa.Column("timeout", sa.Integer(), server_default="0", nullable=False),
sa.Column("priority", sa.Integer(), server_default="1", nullable=False),
sa.Column("custom", sa.String(length=255), nullable=True),
sa.Column("machine", sa.String(length=255), nullable=True),
sa.Column("package", sa.String(length=255), nullable=True),
sa.Column("options", sa.String(length=255), nullable=True),
sa.Column("platform", sa.String(length=255), nullable=True),
sa.Column("memory", sa.Boolean(), nullable=False, default=False),
sa.Column("enforce_timeout", sa.Boolean(), nullable=False, default=False),
sa.Column("clock", sa.DateTime(timezone=False), server_default=sa.func.now(), nullable=False),
sa.Column("added_on", sa.DateTime(timezone=False), nullable=False),
sa.Column("started_on", sa.DateTime(timezone=False), nullable=True),
sa.Column("completed_on", sa.DateTime(timezone=False), nullable=True),
sa.Column("status", sa.Enum("pending", "running", "completed", "reported", "recovered", name="status_type"), server_default="pending", nullable=False),
sa.Column("sample_id", sa.Integer, sa.ForeignKey("samples.id"), nullable=True),
sa.PrimaryKeyConstraint("id")
)
# Insert data.
op.bulk_insert(db.Task.__table__, tasks_data)
# Migrate mongo.
mongo_upgrade()
def mongo_upgrade():
"""Migrate mongodb schema and data."""
# Read reporting.conf to fetch mongo configuration.
config = Config(cfg=os.path.join("..", "..", "conf", "reporting.conf"))
# Run migration only if mongo is enabled as reporting module.
if config.mongodb.enabled:
host = config.mongodb.get("host", "127.0.0.1")
port = config.mongodb.get("port", 27017)
print "Mongo reporting is enabled, strarting mongo data migration."
# Connect.
try:
conn = Connection(host, port)
db = conn.cuckoo
except TypeError:
print "Mongo connection port must be integer"
sys.exit()
except ConnectionFailure:
print "Cannot connect to MongoDB"
sys.exit()
# Check for schema version and create it.
if "cuckoo_schema" in db.collection_names():
print "Mongo schema version not expected"
sys.exit()
else:
db.cuckoo_schema.save({"version": mongo_revision})
else:
print "Mongo reporting module not enabled, skipping mongo migration."
def downgrade():
# We don"t support downgrade.
pass
| davidoren/CuckooSploit | utils/db_migration/versions/from_0_6_to_1_1.py | Python | gpl-3.0 | 14,422 |
from __future__ import unicode_literals
import errno
import os
import re
import socket
import sys
from datetime import datetime
from django.core.exceptions import ImproperlyConfigured
from django.core.management.base import BaseCommand, CommandError
from django.core.servers.basehttp import get_internal_wsgi_application, run
from django.db import DEFAULT_DB_ALIAS, connections
from django.db.migrations.executor import MigrationExecutor
from django.utils import autoreload, six
from django.utils.encoding import force_text, get_system_encoding
naiveip_re = re.compile(r"""^(?:
(?P<addr>
(?P<ipv4>\d{1,3}(?:\.\d{1,3}){3}) | # IPv4 address
(?P<ipv6>\[[a-fA-F0-9:]+\]) | # IPv6 address
(?P<fqdn>[a-zA-Z0-9-]+(?:\.[a-zA-Z0-9-]+)*) # FQDN
):)?(?P<port>\d+)$""", re.X)
DEFAULT_PORT = "8000"
class Command(BaseCommand):
help = "Starts a lightweight Web server for development."
# Validation is called explicitly each time the server is reloaded.
requires_system_checks = False
def add_arguments(self, parser):
parser.add_argument('addrport', nargs='?',
help='Optional port number, or ipaddr:port')
parser.add_argument('--ipv6', '-6', action='store_true', dest='use_ipv6', default=False,
help='Tells Django to use an IPv6 address.')
parser.add_argument('--nothreading', action='store_false', dest='use_threading', default=True,
help='Tells Django to NOT use threading.')
parser.add_argument('--noreload', action='store_false', dest='use_reloader', default=True,
help='Tells Django to NOT use the auto-reloader.')
def execute(self, *args, **options):
if options.get('no_color'):
# We rely on the environment because it's currently the only
# way to reach WSGIRequestHandler. This seems an acceptable
# compromise considering `runserver` runs indefinitely.
os.environ[str("DJANGO_COLORS")] = str("nocolor")
super(Command, self).execute(*args, **options)
def get_handler(self, *args, **options):
"""
Returns the default WSGI handler for the runner.
"""
return get_internal_wsgi_application()
def handle(self, *args, **options):
from django.conf import settings
if not settings.DEBUG and not settings.ALLOWED_HOSTS:
raise CommandError('You must set settings.ALLOWED_HOSTS if DEBUG is False.')
self.use_ipv6 = options.get('use_ipv6')
if self.use_ipv6 and not socket.has_ipv6:
raise CommandError('Your Python does not support IPv6.')
self._raw_ipv6 = False
if not options.get('addrport'):
self.addr = ''
self.port = DEFAULT_PORT
else:
m = re.match(naiveip_re, options['addrport'])
if m is None:
raise CommandError('"%s" is not a valid port number '
'or address:port pair.' % options['addrport'])
self.addr, _ipv4, _ipv6, _fqdn, self.port = m.groups()
if not self.port.isdigit():
raise CommandError("%r is not a valid port number." % self.port)
if self.addr:
if _ipv6:
self.addr = self.addr[1:-1]
self.use_ipv6 = True
self._raw_ipv6 = True
elif self.use_ipv6 and not _fqdn:
raise CommandError('"%s" is not a valid IPv6 address.' % self.addr)
if not self.addr:
self.addr = '::1' if self.use_ipv6 else '127.0.0.1'
self._raw_ipv6 = bool(self.use_ipv6)
self.run(**options)
def run(self, **options):
"""
Runs the server, using the autoreloader if needed
"""
use_reloader = options.get('use_reloader')
if use_reloader:
autoreload.main(self.inner_run, None, options)
else:
self.inner_run(None, **options)
def inner_run(self, *args, **options):
from django.conf import settings
from django.utils import translation
# If an exception was silenced in ManagementUtility.execute in order
# to be raised in the child process, raise it now.
autoreload.raise_last_exception()
threading = options.get('use_threading')
shutdown_message = options.get('shutdown_message', '')
quit_command = 'CTRL-BREAK' if sys.platform == 'win32' else 'CONTROL-C'
self.stdout.write("Performing system checks...\n\n")
self.validate(display_num_errors=True)
try:
self.check_migrations()
except ImproperlyConfigured:
pass
now = datetime.now().strftime('%B %d, %Y - %X')
if six.PY2:
now = now.decode(get_system_encoding())
self.stdout.write((
"%(started_at)s\n"
"Django version %(version)s, using settings %(settings)r\n"
"Starting development server at http://%(addr)s:%(port)s/\n"
"Quit the server with %(quit_command)s.\n"
) % {
"started_at": now,
"version": self.get_version(),
"settings": settings.SETTINGS_MODULE,
"addr": '[%s]' % self.addr if self._raw_ipv6 else self.addr,
"port": self.port,
"quit_command": quit_command,
})
# django.core.management.base forces the locale to en-us. We should
# set it up correctly for the first request (particularly important
# in the "--noreload" case).
translation.activate(settings.LANGUAGE_CODE)
try:
handler = self.get_handler(*args, **options)
run(self.addr, int(self.port), handler,
ipv6=self.use_ipv6, threading=threading)
except socket.error as e:
# Use helpful error messages instead of ugly tracebacks.
ERRORS = {
errno.EACCES: "You don't have permission to access that port.",
errno.EADDRINUSE: "That port is already in use.",
errno.EADDRNOTAVAIL: "That IP address can't be assigned-to.",
}
try:
error_text = ERRORS[e.errno]
except KeyError:
error_text = force_text(e)
self.stderr.write("Error: %s" % error_text)
# Need to use an OS exit because sys.exit doesn't work in a thread
os._exit(1)
except KeyboardInterrupt:
if shutdown_message:
self.stdout.write(shutdown_message)
sys.exit(0)
def check_migrations(self):
"""
Checks to see if the set of migrations on disk matches the
migrations in the database. Prints a warning if they don't match.
"""
executor = MigrationExecutor(connections[DEFAULT_DB_ALIAS])
plan = executor.migration_plan(executor.loader.graph.leaf_nodes())
if plan:
self.stdout.write(self.style.NOTICE(
"\nYou have unapplied migrations; your app may not work properly until they are applied."
))
self.stdout.write(self.style.NOTICE("Run 'python manage.py migrate' to apply them.\n"))
# Kept for backward compatibility
BaseRunserverCommand = Command
| devs1991/test_edx_docmode | venv/lib/python2.7/site-packages/django/core/management/commands/runserver.py | Python | agpl-3.0 | 7,307 |
# Definition for binary tree with next pointer.
# class TreeLinkNode:
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
# self.next = None
class Solution:
# @param root, a tree link node
# @return nothing
def connect(self, root):
def find_next(parent, child):
parent = parent.next
while parent:
if parent.left:
child.next = parent.left
return
elif parent.right:
child.next = parent.right
return
else:
parent = parent.next
if not root: return
q = [root]
while q:
nxt = []
for node in q:
if node.left:
if node.right:
node.left.next = node.right
else:
find_next(node, node.left)
nxt.append(node.left)
if node.right:
find_next(node, node.right)
nxt.append(node.right)
q = nxt | YiqunPeng/Leetcode-pyq | solutions/117PopulatingNextRightPointersInEachNodeII.py | Python | gpl-3.0 | 1,205 |
from setuptools import setup
setup(
name='wjordpress',
# when bumping versions, also update __init__, sphinx config, and changelog
version='0.2.2',
author='Chris Chang',
author_email='[email protected]',
url='https://github.com/texastribune/wjordpress',
packages=['wjordpress'],
include_package_data=True, # automatically include things from MANIFEST.in
license='Apache License, Version 2.0',
description='Django integration with WordPress through the json-rest-api plugin',
long_description=open('README.rst').read(),
install_requires=[
'six>=1.0.0', # works in tox
],
classifiers=[
"Development Status :: 3 - Alpha",
"Framework :: Django",
"Intended Audience :: Developers",
"License :: OSI Approved :: Apache Software License",
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
],
)
| texastribune/wjordpress | setup.py | Python | apache-2.0 | 1,086 |
#! /usr/bin/env python
# -*- coding: utf-8 -*-
# This file is part of IVRE.
# Copyright 2011 - 2022 Pierre LALET <[email protected]>
#
# IVRE is free software: you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# IVRE is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
# or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
# License for more details.
#
# You should have received a copy of the GNU General Public License
# along with IVRE. If not, see <http://www.gnu.org/licenses/>.
"""This module contains functions to interact with *ANY* SQL database.
"""
# Tests like "expr == None" should be used for BinaryExpression instances
# pylint: disable=singleton-comparison
import codecs
from collections import namedtuple
import csv
import datetime
import json
import re
from sqlalchemy import (
Boolean,
Integer,
and_,
cast,
column,
create_engine,
delete,
desc,
func,
exists,
join,
not_,
nullsfirst,
or_,
select,
text,
true,
update,
insert,
)
from sqlalchemy.dialects.postgresql import JSONB
from ivre.active.data import ALIASES_TABLE_ELEMS
from ivre.db import DB, DBActive, DBFlow, DBNmap, DBPassive, DBView
from ivre import config, utils, xmlnmap
from ivre.db.sql.tables import (
N_Association_Scan_Category,
N_Association_Scan_Hostname,
N_Association_Scan_ScanFile,
N_Category,
N_Hop,
N_Hostname,
N_Port,
N_Scan,
N_ScanFile,
N_Script,
N_Tag,
N_Trace,
V_Association_Scan_Category,
V_Association_Scan_Hostname,
V_Category,
V_Hop,
V_Hostname,
V_Port,
V_Scan,
V_Script,
V_Tag,
V_Trace,
Flow,
Passive,
Point,
)
# Data
class CSVFile:
"""A file like object generating CSV lines suitable for use with
PostgresDB.copy_from(). Reads (at most `limit`, when it's not None)
lines from `fname`, skipping `skip` first lines.
When .read() returns the empty string, the attribute `.more_to_read`
is set to True when the `limit` has been reached, and to False when
there is no more data to read from the input.
"""
def __init__(self, fname, skip=0, limit=None):
# pylint: disable=consider-using-with
self.fdesc = codecs.open(fname, encoding="latin-1")
for _ in range(skip):
self.fdesc.readline()
self.limit = limit
if limit is not None:
self.count = 0
self.more_to_read = None
self.inp = csv.reader(self.fdesc)
@staticmethod
def fixline(line):
"""Subclasses can override this method to generate the CSV line from
the original line.
"""
return line
def read(self, size=None):
if self.limit is not None:
if self.count >= self.limit:
self.more_to_read = True
return ""
try:
line = None
while line is None:
line = self.fixline(next(self.inp))
if self.limit is not None:
self.count += 1
return "%s\n" % "\t".join(line)
except StopIteration:
self.more_to_read = False
return ""
def readline(self):
return self.read()
def __exit__(self, *args):
if self.fdesc is not None:
self.fdesc.__exit__(*args)
def __enter__(self):
return self
# Nmap
class ScanCSVFile(CSVFile):
def __init__(self, hostgen, ip2internal, table):
self.ip2internal = ip2internal
self.table = table
self.inp = hostgen
self.fdesc = None
def fixline(self, line):
for field in ["cpes", "extraports", "openports", "os", "traces"]:
line.pop(field, None)
line["addr"] = self.ip2internal(line["addr"])
scanfileid = line.pop("scanid")
if isinstance(scanfileid, str):
scanfileid = [scanfileid]
line["scanfileid"] = "{%s}" % ",".join('"\\x%s"' % fid for fid in scanfileid)
line["time_start"] = line.pop("starttime")
line["time_stop"] = line.pop("endtime")
line["info"] = line.pop("infos", None)
for field in ["categories"]:
if field in line:
line[field] = "{%s}" % json.dumps(line[field])[1:-1]
for port in line.get("ports", []):
for script in port.get("scripts", []):
if "masscan" in script and "raw" in script["masscan"]:
script["masscan"]["raw"] = utils.encode_b64(
script["masscan"]["raw"]
)
if "ssl-cert" in script:
for cert in script["ssl-cert"]:
for fld in ["not_before", "not_after"]:
if fld not in cert:
continue
if isinstance(cert[fld], datetime.datetime):
cert[fld] = cert[fld].timestamp()
elif isinstance(cert[fld], str):
cert[fld] = utils.all2datetime(cert[fld]).timestamp()
if "screendata" in port:
port["screendata"] = utils.encode_b64(port["screendata"])
for field in ["hostnames", "ports", "info"]:
if field in line:
line[field] = json.dumps(line[field]).replace("\\", "\\\\")
return [
"\\N" if line.get(col.name) is None else str(line.get(col.name))
for col in self.table.columns
]
# Passive
class PassiveCSVFile(CSVFile):
info_fields = set(["distance", "signature", "version"])
def __init__(
self,
siggen,
ip2internal,
table,
limit=None,
getinfos=None,
separated_timestamps=True,
):
self.ip2internal = ip2internal
self.table = table
self.inp = siggen
self.fdesc = None
self.limit = limit
if limit is not None:
self.count = 0
self.getinfos = getinfos
self.timestamps = separated_timestamps
def fixline(self, line):
if self.timestamps:
timestamp, line = line
line["firstseen"] = line["lastseen"] = utils.all2datetime(timestamp)
else:
line["firstseen"] = utils.all2datetime(line["firstseen"])
line["lastseen"] = utils.all2datetime(line["lastseen"])
if self.getinfos is not None:
line.update(self.getinfos(line))
try:
line.update(line.pop("infos"))
except KeyError:
pass
if "addr" in line:
line["addr"] = self.ip2internal(line["addr"])
else:
line["addr"] = None
line.setdefault("count", 1)
line.setdefault("port", -1)
for key in ["sensor", "value", "source", "targetval"]:
line.setdefault(key, "")
if line["recontype"] in {"SSL_SERVER", "SSL_CLIENT"} and line["source"] in {
"cert",
"cacert",
}:
for fld in ["not_before", "not_after"]:
if fld not in line:
continue
if isinstance(line[fld], datetime.datetime):
line[fld] = line[fld].timestamp()
elif isinstance(line[fld], str):
line[fld] = utils.all2datetime(line[fld]).timestamp()
for key, value in line.items():
if key not in ["info", "moreinfo"] and isinstance(value, str):
try:
value = value.encode("latin-1")
except Exception:
pass
line[key] = "".join(
chr(c) if 32 <= c <= 126 else "\\x%02x" % c for c in value
).replace("\\", "\\\\")
line["info"] = (
"%s"
% json.dumps(
dict(
(key, line.pop(key))
for key in list(line)
if key in self.info_fields
),
).replace("\\", "\\\\")
)
line["moreinfo"] = (
"%s"
% json.dumps(
dict(
(key, line.pop(key))
for key in list(line)
if key not in self.table.columns
),
).replace("\\", "\\\\")
)
return [
"\\N" if line.get(col.name) is None else str(line.get(col.name))
for col in self.table.columns
]
class SQLDB(DB):
table_layout = namedtuple("empty_layout", [])
tables = table_layout()
fields = {}
no_limit = None
def __init__(self, url):
super().__init__()
self.dburl = url.geturl()
@property
def db(self):
"""The DB connection."""
try:
return self._db
except AttributeError:
# echo on debug disabled for tests
self._db = create_engine(self.dburl, echo=config.DEBUG_DB)
return self._db
@property
def flt_empty(self):
return self.base_filter()
def drop(self):
for table in reversed(self.tables):
table.__table__.drop(bind=self.db, checkfirst=True)
def create(self):
for table in self.tables:
table.__table__.create(bind=self.db, checkfirst=True)
def init(self):
self.drop()
self.create()
def explain(self, req, **_):
"""This method calls the SQL EXPLAIN statement to retrieve database
statistics.
"""
raise NotImplementedError()
def _get(self, flt, limit=None, skip=None, sort=None, fields=None):
raise NotImplementedError()
@staticmethod
def ip2internal(addr):
# required for use with ivre.db.sql.tables.DefaultINET() (see
# .bind_processor()). Backends using variants must implement
# their own methods.
if not addr:
return b""
return utils.ip2bin(addr)
@staticmethod
def internal2ip(addr):
# required for use with ivre.db.sql.tables.DefaultINET() (see
# .result_processor()). Backends using variants must implement
# their own methods.
if not addr:
return None
return utils.bin2ip(addr)
@staticmethod
def to_binary(data):
return utils.encode_b64(data).decode()
@staticmethod
def from_binary(data):
return utils.decode_b64(data.encode())
@staticmethod
def flt2str(flt):
result = {}
for queryname, queries in flt.all_queries.items():
outqueries = []
if not isinstance(queries, list):
queries = [queries]
for query in queries:
if query is not None:
outqueries.append(str(query))
if outqueries:
result[queryname] = outqueries
return json.dumps(result)
def create_indexes(self):
raise NotImplementedError()
def ensure_indexes(self):
raise NotImplementedError()
@staticmethod
def query(*args, **kargs):
raise NotImplementedError()
def run(self, query):
raise NotImplementedError()
@classmethod
def from_dbdict(cls, d):
raise NotImplementedError()
@classmethod
def from_dbprop(cls, prop, val):
raise NotImplementedError()
@classmethod
def to_dbdict(cls, d):
raise NotImplementedError()
@classmethod
def to_dbprop(cls, prop, val):
raise NotImplementedError()
# FIXME: move this method
@classmethod
def _date_round(cls, date):
if isinstance(date, datetime.datetime):
ts = date.timestamp()
else:
ts = date
ts = ts - (ts % config.FLOW_TIME_PRECISION)
if isinstance(date, datetime.datetime):
return datetime.datetime.fromtimestamp(ts)
return ts
@staticmethod
def fmt_results(fields, result):
return dict(
(fld, value) for fld, value in zip(fields, result) if value is not None
)
@classmethod
def searchobjectid(cls, oid, neg=False):
"""Filters records by their ObjectID. `oid` can be a single or many
(as a list or any iterable) object ID(s), specified as strings
or an `ObjectID`s.
"""
if isinstance(oid, (int, str)):
oid = [int(oid)]
else:
oid = [int(suboid) for suboid in oid]
return cls._searchobjectid(oid, neg=neg)
@staticmethod
def _searchobjectid(oid, neg=False):
raise NotImplementedError()
@staticmethod
def _distinct_req(field, flt):
return flt.query(select([field.distinct()]).select_from(flt.select_from))
def distinct(self, field, flt=None, sort=None, limit=None, skip=None, **kargs):
"""This method produces a generator of distinct values for a given
field.
"""
if isinstance(field, str):
n_dots = field.count(".")
for i in range(n_dots + 1):
subfields = field.rsplit(".", i)
try:
fld = self.fields[subfields[0]]
except KeyError:
continue
for attr in subfields[1:]:
try:
fld = getattr(fld, attr)
except AttributeError:
continue
field = fld
break
else:
raise ValueError("Unknown field %r" % field)
if flt is None:
flt = self.flt_empty
sort = [
(self.fields[key] if isinstance(key, str) else key, way)
for key, way in sort or []
]
req = self._distinct_req(field, flt, **kargs)
for key, way in sort:
req = req.order_by(key if way >= 0 else desc(key))
if skip is not None:
req = req.offset(skip)
if limit is not None:
req = req.limit(limit)
return (next(iter(res.values())) for res in self.db.execute(req))
@staticmethod
def _flt_and(cond1, cond2):
return cond1 & cond2
@staticmethod
def _flt_or(cond1, cond2):
return cond1 | cond2
@staticmethod
def _searchstring_re_inarray(idfield, field, value, neg=False):
if isinstance(value, utils.REGEXP_T):
if neg:
# FIXME
raise ValueError("Not implemented")
operator = "~*" if (value.flags & re.IGNORECASE) else "~"
value = value.pattern
base1 = select(
[idfield.label("id"), func.unnest(field).label("field")]
).cte("base1")
base2 = (
select([column("id", Integer)])
.select_from(base1)
.where(column("field").op(operator)(value))
)
return idfield.in_(base2)
return not_(field.any(value)) if neg else field.any(value)
@staticmethod
def _searchstring_re(field, value, neg=False):
if isinstance(value, utils.REGEXP_T):
flt = field.op("~*" if (value.flags & re.IGNORECASE) else "~")(
value.pattern
)
if neg:
return not_(flt)
return flt
if neg:
return field != value
return field == value
@staticmethod
def _searchstring_list(field, value, neg=False, map_=None):
if not isinstance(value, str) and hasattr(value, "__iter__"):
if map_ is not None:
value = [map_(elt) for elt in value]
if neg:
return field.notin_(value)
return field.in_(value)
if map_ is not None:
value = map_(value)
if neg:
return field != value
return field == value
@classmethod
def _searchcert(
cls,
base,
keytype=None,
md5=None,
sha1=None,
sha256=None,
subject=None,
issuer=None,
self_signed=None,
pkmd5=None,
pksha1=None,
pksha256=None,
):
req = true()
if keytype is not None:
req &= base.op("->")("pubkey").op("->>")("type") == keytype
for hashtype in ["md5", "sha1", "sha256"]:
hashval = locals()[hashtype]
if hashval is None:
continue
key = base.op("->>")(hashtype)
if isinstance(hashval, utils.REGEXP_T):
req &= key.op("~*")(hashval.pattern)
continue
if isinstance(hashval, list):
req &= key.in_([val.lower() for val in hashval])
continue
req &= key == hashval.lower()
if subject is not None:
req &= cls._searchstring_re(base.op("->>")("subject_text"), subject)
if issuer is not None:
req &= cls._searchstring_re(base.op("->>")("issuer_text"), issuer)
if self_signed is not None:
req &= base.op("->")("self_signed").cast(Boolean) == self_signed
for hashtype in ["md5", "sha1", "sha256"]:
hashval = locals()[f"pk{hashtype}"]
if hashval is None:
continue
key = base.op("->>")("pk{hashtype}")
if isinstance(hashval, utils.REGEXP_T):
req &= key.op("~*")(hashval.pattern)
continue
if isinstance(hashval, list):
req &= key.in_([val.lower() for val in hashval])
continue
req &= key == hashval.lower()
return req
class SQLDBFlow(SQLDB, DBFlow):
table_layout = namedtuple("flow_layout", ["flow"])
tables = table_layout(Flow)
@staticmethod
def query(*args, **kargs):
raise NotImplementedError()
def add_flow(
self,
labels,
keys,
counters=None,
accumulators=None,
srcnode=None,
dstnode=None,
time=True,
):
raise NotImplementedError()
@classmethod
def add_host(cls, labels=None, keys=None, time=True):
raise NotImplementedError()
def add_flow_metadata(
self,
labels,
linktype,
keys,
flow_keys,
counters=None,
accumulators=None,
time=True,
flow_labels=None,
):
raise NotImplementedError()
def add_host_metadata(
self,
labels,
linktype,
keys,
host_keys=None,
counters=None,
accumulators=None,
time=True,
):
raise NotImplementedError()
def host_details(self, node_id):
raise NotImplementedError()
def flow_details(self, flow_id):
raise NotImplementedError()
def from_filters(
self, filters, limit=None, skip=0, orderby="", mode=None, timeline=False
):
raise NotImplementedError()
def to_graph(self, query):
raise NotImplementedError()
def to_iter(self, query):
raise NotImplementedError()
def count(self, flt):
raise NotImplementedError()
def flow_daily(self, query):
raise NotImplementedError()
def top(self, query, fields, collect=None, sumfields=None):
"""Returns an iterator of:
{fields: <fields>, count: <number of occurrence or sum of sumfields>,
collected: <collected fields>}.
"""
raise NotImplementedError()
def cleanup_flows(self):
raise NotImplementedError()
class Filter:
@staticmethod
def fltand(flt1, flt2):
return flt1 if flt2 is None else flt2 if flt1 is None else and_(flt1, flt2)
@staticmethod
def fltor(flt1, flt2):
return flt1 if flt2 is None else flt2 if flt1 is None else or_(flt1, flt2)
class ActiveFilter(Filter):
def __init__(
self,
main=None,
hostname=None,
category=None,
port=None,
script=None,
tables=None,
tag=None,
trace=None,
):
self.main = main
self.hostname = [] if hostname is None else hostname
self.category = [] if category is None else category
self.port = [] if port is None else port
self.script = [] if script is None else script
self.tables = tables # default value is handled in the subclasses
self.tag = [] if tag is None else tag
self.trace = [] if trace is None else trace
@property
def all_queries(self):
return {
"main": self.main,
"hostname": self.hostname,
"category": self.category,
"port": [elt[1] if elt[0] else not_(elt[1]) for elt in self.port],
"script": self.script,
"tables": self.tables,
"tag": self.tag,
"trace": self.trace,
}
def copy(self):
return self.__class__(
main=self.main,
hostname=self.hostname[:],
category=self.category[:],
port=self.port[:],
script=self.script[:],
tables=self.tables,
tag=self.tag[:],
trace=self.trace[:],
)
def __and__(self, other):
if self.tables != other.tables:
raise ValueError(
"Cannot 'AND' two filters on separate tables (%s / %s)"
% (self.tables, other.tables)
)
return self.__class__(
main=self.fltand(self.main, other.main),
hostname=self.hostname + other.hostname,
category=self.category + other.category,
port=self.port + other.port,
script=self.script + other.script,
tables=self.tables,
tag=self.tag + other.tag,
trace=self.trace + other.trace,
)
def __or__(self, other):
# FIXME: this has to be implemented
if self.hostname and other.hostname:
raise ValueError("Cannot 'OR' two filters on hostname")
if self.category and other.category:
raise ValueError("Cannot 'OR' two filters on category")
if self.port and other.port:
raise ValueError("Cannot 'OR' two filters on port")
if self.script and other.script:
raise ValueError("Cannot 'OR' two filters on script")
if self.tag and other.tag:
raise ValueError("Cannot 'OR' two filters on tag")
if self.trace and other.trace:
raise ValueError("Cannot 'OR' two filters on trace")
if self.tables != other.tables:
raise ValueError("Cannot 'OR' two filters on separate tables")
return self.__class__(
main=self.fltor(self.main, other.main),
hostname=self.hostname + other.hostname,
category=self.category + other.category,
port=self.port + other.port,
script=self.script + other.script,
tables=self.tables,
trace=self.trace + other.trace,
)
def select_from_base(self, base=None):
if base in [None, self.tables.scan, self.tables.scan.__mapper__]:
base = self.tables.scan
else:
base = join(self.tables.scan, base)
return base
@property
def select_from(self):
return self.select_from_base()
def query(self, req):
if self.main is not None:
req = req.where(self.main)
for incl, subflt in self.hostname:
base = select([self.tables.hostname.scan]).where(subflt)
if incl:
req = req.where(self.tables.scan.id.in_(base))
else:
req = req.where(self.tables.scan.id.notin_(base))
# See <http://stackoverflow.com/q/17112345/3223422> - "Using
# INTERSECT with tables from a WITH clause"
for subflt in self.category:
req = req.where(
exists(
select([1])
.select_from(
join(
self.tables.category, self.tables.association_scan_category
)
)
.where(subflt)
.where(
self.tables.association_scan_category.scan
== self.tables.scan.id
)
)
)
for incl, subflt in self.port:
if incl:
req = req.where(
exists(
select([1])
.select_from(self.tables.port)
.where(subflt)
.where(self.tables.port.scan == self.tables.scan.id)
)
)
else:
base = select([self.tables.port.scan]).where(subflt)
req = req.where(self.tables.scan.id.notin_(base))
for incl, subflt in self.script:
subreq = select([1]).select_from(join(self.tables.script, self.tables.port))
if isinstance(subflt, tuple):
for selectfrom in subflt[1]:
subreq = subreq.select_from(selectfrom)
subreq = subreq.where(subflt[0])
else:
subreq = subreq.where(subflt)
subreq = subreq.where(self.tables.port.scan == self.tables.scan.id)
if incl:
req = req.where(exists(subreq))
else:
req = req.where(not_(exists(subreq)))
for incl, subflt in self.tag:
base = select([self.tables.tag.scan]).where(subflt)
if incl:
req = req.where(self.tables.scan.id.in_(base))
else:
req = req.where(self.tables.scan.id.notin_(base))
for subflt in self.trace:
req = req.where(
exists(
select([1])
.select_from(join(self.tables.trace, self.tables.hop))
.where(subflt)
.where(self.tables.trace.scan == self.tables.scan.id)
)
)
return req
class NmapFilter(ActiveFilter):
def __init__(
self,
main=None,
hostname=None,
category=None,
port=None,
script=None,
tables=None,
tag=None,
trace=None,
):
super().__init__(
main=main,
hostname=hostname,
category=category,
port=port,
script=script,
tables=SQLDBNmap.tables if tables is None else tables,
tag=tag,
trace=trace,
)
class ViewFilter(ActiveFilter):
def __init__(
self,
main=None,
hostname=None,
category=None,
port=None,
script=None,
tables=None,
tag=None,
trace=None,
):
super().__init__(
main=main,
hostname=hostname,
category=category,
port=port,
script=script,
tables=SQLDBView.tables if tables is None else tables,
tag=tag,
trace=trace,
)
class SQLDBActive(SQLDB, DBActive):
_needunwind_script = set(
[
"http-headers",
"http-user-agent",
"ssh-hostkey",
"ssl-cert",
"ssl-ja3-client",
"ssl-ja3-server",
]
)
@classmethod
def needunwind_script(cls, key):
key = key.split(".")
for i in range(len(key)):
subkey = ".".join(key[: i + 1])
if subkey in cls._needunwind_script:
yield subkey
def __init__(self, url):
super().__init__(url)
self.output_function = None
self.bulk = None
def store_host(self, host):
raise NotImplementedError()
def store_or_merge_host(self, host):
raise NotImplementedError()
def migrate_schema(self, version):
"""Migrates the scan data."""
failed = 0
if (version or 0) < 9:
failed += self._migrate_schema_8_9()
if (version or 0) < 10:
failed += self._migrate_schema_9_10()
if (version or 0) < 11:
failed += self._migrate_schema_10_11()
if (version or 0) < 12:
failed += self._migrate_schema_11_12()
if (version or 0) < 13:
failed += self._migrate_schema_12_13()
if (version or 0) < 14:
failed += self._migrate_schema_13_14()
if (version or 0) < 15:
failed += self._migrate_schema_14_15()
if (version or 0) < 16:
failed += self._migrate_schema_15_16()
if (version or 0) < 18:
failed += self._migrate_schema_17_18()
if (version or 0) < 19:
failed += self._migrate_schema_18_19()
return failed
def _migrate_schema_8_9(self):
"""Converts records from version 8 to version 9. Version 9 creates a
structured output for http-headers script.
"""
cond = self.tables.scan.schema_version == 8
failed = set()
req = (
select(
[
self.tables.scan.id,
self.tables.script.port,
self.tables.script.output,
self.tables.script.data,
]
)
.select_from(
join(join(self.tables.scan, self.tables.port), self.tables.script)
)
.where(and_(cond, self.tables.script.name == "http-headers"))
)
for rec in self.db.execute(req):
if "http-headers" not in rec.data:
try:
data = xmlnmap.add_http_headers_data(
{"id": "http-headers", "output": rec.output}
)
except Exception:
utils.LOGGER.warning(
"Cannot migrate host %r", rec.id, exc_info=True
)
failed.add(rec.id)
else:
if data:
self.db.execute(
update(self.tables.script)
.where(
and_(
self.tables.script.port == rec.port,
self.tables.script.name == "http-headers",
)
)
.values(data={"http-headers": data})
)
if failed:
cond = and_(cond, self.tables.scan.id.notin_(failed))
self.db.execute(update(self.tables.scan).where(cond).values(schema_version=9))
return len(failed)
def _migrate_schema_9_10(self):
"""Converts a record from version 9 to version 10. Version 10 changes
the field names of the structured output for s7-info script.
"""
cond = self.tables.scan.schema_version == 9
failed = set()
req = (
select(
[
self.tables.scan.id,
self.tables.script.port,
self.tables.script.output,
self.tables.script.data,
]
)
.select_from(
join(join(self.tables.scan, self.tables.port), self.tables.script)
)
.where(and_(cond, self.tables.script.name == "s7-info"))
)
for rec in self.db.execute(req):
if "s7-info" in rec.data:
try:
data = xmlnmap.change_s7_info_keys(rec.data["s7-info"])
except Exception:
utils.LOGGER.warning(
"Cannot migrate host %r", rec.id, exc_info=True
)
failed.add(rec.id)
else:
if data:
self.db.execute(
update(self.tables.script)
.where(
and_(
self.tables.script.port == rec.port,
self.tables.script.name == "s7-info",
)
)
.values(data={"s7-info": data})
)
if failed:
cond = and_(cond, self.tables.scan.id.notin_(failed))
self.db.execute(update(self.tables.scan).where(cond).values(schema_version=10))
return len(failed)
def _migrate_schema_10_11(self):
"""Converts a record from version 10 to version 11. Version 11 changes
the way IP addresses are stored.
"""
raise NotImplementedError
def _migrate_schema_11_12(self):
"""Converts a record from version 11 to version 12. Version 12 changes
the structured output for fcrdns and rpcinfo script.
"""
cond = self.tables.scan.schema_version == 11
failed = set()
req = (
select(
[
self.tables.scan.id,
self.tables.script.name,
self.tables.script.port,
self.tables.script.output,
self.tables.script.data,
]
)
.select_from(
join(join(self.tables.scan, self.tables.port), self.tables.script)
)
.where(and_(cond, self.tables.script.name.in_(["fcrdns", "rpcinfo"])))
)
for rec in self.db.execute(req):
if rec.name in rec.data:
migr_func = {
"fcrdns": xmlnmap.change_fcrdns_migrate,
"rpcinfo": xmlnmap.change_rpcinfo,
}[rec.name]
try:
data = migr_func(rec.data[rec.name])
except Exception:
utils.LOGGER.warning(
"Cannot migrate host %r", rec.id, exc_info=True
)
failed.add(rec.id)
else:
if data:
self.db.execute(
update(self.tables.script)
.where(
and_(
self.tables.script.port == rec.port,
self.tables.script.name == rec.name,
)
)
.values(data={rec.name: data})
)
if failed:
cond = and_(cond, self.tables.scan.id.notin_(failed))
self.db.execute(update(self.tables.scan).where(cond).values(schema_version=12))
return len(failed)
def _migrate_schema_12_13(self):
"""Converts a record from version 12 to version 13. Version 13 changes
the structured output for ms-sql-info and smq-enum-shares scripts.
"""
cond = self.tables.scan.schema_version == 12
failed = set()
req = (
select(
[
self.tables.scan.id,
self.tables.script.name,
self.tables.script.port,
self.tables.script.output,
self.tables.script.data,
]
)
.select_from(
join(join(self.tables.scan, self.tables.port), self.tables.script)
)
.where(
and_(
cond,
self.tables.script.name.in_(["ms-sql-info", "smb-enum-shares"]),
)
)
)
for rec in self.db.execute(req):
print(repr(rec))
if rec.name in rec.data:
migr_func = {
"ms-sql-info": xmlnmap.change_ms_sql_info,
"smb-enum-shares": xmlnmap.change_smb_enum_shares,
}[rec.name]
try:
data = migr_func(rec.data[rec.name])
except Exception:
utils.LOGGER.warning(
"Cannot migrate host %r", rec.id, exc_info=True
)
failed.add(rec.id)
else:
if data:
self.db.execute(
update(self.tables.script)
.where(
and_(
self.tables.script.port == rec.port,
self.tables.script.name == rec.name,
)
)
.values(data={rec.name: data})
)
if failed:
cond = and_(cond, self.tables.scan.id.notin_(failed))
self.db.execute(update(self.tables.scan).where(cond).values(schema_version=13))
return len(failed)
def _migrate_schema_13_14(self):
"""Converts a record from version 13 to version 14. Version 14 changes
the structured output for ssh-hostkey and ls scripts to prevent a same
field from having different data types.
"""
cond = self.tables.scan.schema_version == 13
failed = set()
scripts = [
script_name
for script_name, alias in ALIASES_TABLE_ELEMS.items()
if alias == "ls"
]
scripts.append("ssh-hostkey")
req = (
select(
[
self.tables.scan.id,
self.tables.script.name,
self.tables.script.port,
self.tables.script.output,
self.tables.script.data,
]
)
.select_from(
join(join(self.tables.scan, self.tables.port), self.tables.script)
)
.where(and_(cond, self.tables.script.name.in_(scripts)))
)
for rec in self.db.execute(req):
if rec.name in rec.data:
migr_func = (
xmlnmap.change_ssh_hostkey
if rec.name == "ssh-hostkey"
else xmlnmap.change_ls_migrate
)
try:
data = migr_func(rec.data[rec.name])
except Exception:
utils.LOGGER.warning(
"Cannot migrate host %r", rec.id, exc_info=True
)
failed.add(rec.id)
else:
if data:
self.db.execute(
update(self.tables.script)
.where(
and_(
self.tables.script.port == rec.port,
self.tables.script.name == rec.name,
)
)
.values(data={rec.name: data})
)
if failed:
cond = and_(cond, self.tables.scan.id.notin_(failed))
self.db.execute(update(self.tables.scan).where(cond).values(schema_version=14))
return len(failed)
def _migrate_schema_14_15(self):
"""Converts a record from version 14 to version 15. Version 15 changes
the structured output for httpègit script to move data to values
instead of keys.
"""
cond = self.tables.scan.schema_version == 14
failed = set()
req = (
select(
[
self.tables.scan.id,
self.tables.script.name,
self.tables.script.port,
self.tables.script.output,
self.tables.script.data,
]
)
.select_from(
join(join(self.tables.scan, self.tables.port), self.tables.script)
)
.where(and_(cond, self.tables.script.name == "http-git"))
)
for rec in self.db.execute(req):
if rec.name in rec.data:
try:
data = xmlnmap.change_http_git(rec.data[rec.name])
except Exception:
utils.LOGGER.warning(
"Cannot migrate host %r", rec.id, exc_info=True
)
failed.add(rec.id)
else:
if data:
self.db.execute(
update(self.tables.script)
.where(
and_(
self.tables.script.port == rec.port,
self.tables.script.name == rec.name,
)
)
.values(data={rec.name: data})
)
if failed:
cond = and_(cond, self.tables.scan.id.notin_(failed))
self.db.execute(update(self.tables.scan).where(cond).values(schema_version=15))
return len(failed)
def _migrate_schema_15_16(self):
"""Converts a record from version 15 to version 16. Version 16 uses a
consistent structured output for Nmap http-server-header script (old
versions reported `{"Server": "value"}`, while recent versions report
`["value"]`).
"""
cond = self.tables.scan.schema_version == 15
failed = []
req = (
select(
[
self.tables.scan.id,
self.tables.script.port,
self.tables.script.output,
self.tables.script.data,
]
)
.select_from(
join(join(self.tables.scan, self.tables.port), self.tables.script)
)
.where(and_(cond, self.tables.script.name == "http-server-header"))
)
for rec in self.db.execute(req):
updated = False
if "http-server-header" in rec.data:
data = rec.data["http-server-header"]
if isinstance(data, dict):
updated = True
if "Server" in data:
data = [data["Server"]]
else:
data = []
else:
try:
data = [
line.split(":", 1)[1].lstrip()
for line in (line.strip() for line in rec.output.splitlines())
if line.startswith("Server:")
]
except Exception:
utils.LOGGER.warning(
"Cannot migrate host %r", rec.id, exc_info=True
)
failed.add(rec.id)
else:
updated = True
if updated:
self.db.execute(
update(self.tables.script)
.where(
and_(
self.tables.script.port == rec.port,
self.tables.script.name == "http-server-header",
)
)
.values(data={"http-server-header": data})
)
if failed:
cond = and_(cond, self.tables.scan.id.notin_(failed))
self.db.execute(update(self.tables.scan).where(cond).values(schema_version=16))
return len(failed)
def _migrate_schema_17_18(self):
"""Converts a record from version 17 to version 18. Version 18
introduces HASSH (SSH fingerprint) in ssh2-enum-algos.
"""
cond = self.tables.scan.schema_version == 17
failed = set()
req = (
select(
[
self.tables.scan.id,
self.tables.script.name,
self.tables.script.port,
self.tables.script.output,
self.tables.script.data,
]
)
.select_from(
join(join(self.tables.scan, self.tables.port), self.tables.script)
)
.where(and_(cond, self.tables.script.name == "ssh2-enum-algos"))
)
for rec in self.db.execute(req):
if rec.name in rec.data:
try:
output, data = xmlnmap.change_ssh2_enum_algos(
rec.output,
rec.data[rec.name],
)
except Exception:
utils.LOGGER.warning(
"Cannot migrate host %r", rec.id, exc_info=True
)
failed.add(rec.id)
else:
if data:
self.db.execute(
update(self.tables.script)
.where(
and_(
self.tables.script.port == rec.port,
self.tables.script.name == rec.name,
)
)
.values(output=output, data={rec.name: data})
)
if failed:
cond = and_(cond, self.tables.scan.id.notin_(failed))
self.db.execute(update(self.tables.scan).where(cond).values(schema_version=18))
return len(failed)
def _migrate_schema_18_19(self):
"""Converts a record from version 18 to version 19. Version 19
splits smb-os-discovery scripts into two, a ntlm-info one that contains all
the information the original smb-os-discovery script got from NTLM, and a
smb-os-discovery script with only the information regarding SMB
"""
cond = self.tables.scan.schema_version == 18
failed = set()
req = (
select(
[
self.tables.scan.id,
self.tables.script.name,
self.tables.script.port,
self.tables.script.output,
self.tables.script.data,
]
)
.select_from(
join(join(self.tables.scan, self.tables.port), self.tables.script)
)
.where(and_(cond, self.tables.script.name == "smb-os-discovery"))
)
for rec in self.db.execute(req):
if rec.name == "smb-os-discovery":
if rec.name in rec.data:
try:
smb, ntlm = xmlnmap.split_smb_os_discovery(rec.data)
except Exception:
utils.LOGGER.warning(
"Cannot migrate host %r", rec.id, exc_info=True
)
failed.add(rec.id)
else:
if "masscan" in smb:
data = {
"smb-os-discovery": smb["smb-os-discovery"],
"masscan": smb["masscan"],
}
else:
data = {"smb-os-discovery": smb["smb-os-discovery"]}
self.db.execute(
update(self.tables.script)
.where(
and_(
self.tables.script.port == rec.port,
self.tables.script.name == rec.name,
)
)
.values(output=smb["output"], data=data)
)
if ntlm:
self.db.execute(
insert(self.tables.script).values(
port=rec.port,
name=ntlm["id"],
output=ntlm["output"],
data={"ntlm-info": ntlm["ntlm-info"]},
)
)
elif rec.name.endswith("-ntlm-info"):
script = {"id": rec.name, "output": rec.output, rec.name: rec.data}
xmlnmap.post_ntlm_info(script, {}, {})
self.db.execute(
update(self.tables.script)
.where(
and_(
self.tables.script.port == rec.port,
self.tables.script.name == rec.name,
)
)
.values(
name="ntlm-info",
output=script["output"],
data=script.get("ntlm-info", {}),
)
)
if failed:
cond = and_(cond, self.tables.scan.id.notin_(failed))
self.db.execute(update(self.tables.scan).where(cond).values(schema_version=19))
return len(failed)
def count(self, flt, **_):
return self.db.execute(
flt.query(select([func.count()])).select_from(flt.select_from)
).fetchone()[0]
@staticmethod
def _distinct_req(field, flt):
flt = flt.copy()
return flt.query(
select([field.distinct()]).select_from(flt.select_from_base(field.parent))
)
def _get_open_port_count(self, flt, limit=None, skip=None):
req = flt.query(select([self.tables.scan.id]))
if skip is not None:
req = req.offset(skip)
if limit is not None:
req = req.limit(limit)
base = req.cte("base")
return (
{"addr": rec[2], "starttime": rec[1], "openports": {"count": rec[0]}}
for rec in self.db.execute(
select(
[
func.count(self.tables.port.id),
self.tables.scan.time_start,
self.tables.scan.addr,
]
)
.select_from(join(self.tables.port, self.tables.scan))
.where(self.tables.port.state == "open")
.group_by(self.tables.scan.addr, self.tables.scan.time_start)
.where(self.tables.scan.id.in_(base))
)
)
def get_open_port_count(self, flt, limit=None, skip=None):
result = list(self._get_open_port_count(flt, limit=limit, skip=skip))
return result, len(result)
def getlocations(self, flt, limit=None, skip=None):
req = flt.query(
select(
[
func.count(self.tables.scan.id),
self.tables.scan.info["coordinates"].astext,
]
).where(
self.tables.scan.info.has_key("coordinates") # noqa: W601
),
)
if skip is not None:
req = req.offset(skip)
if limit is not None:
req = req.limit(limit)
return (
{"_id": Point().result_processor(None, None)(rec[1]), "count": rec[0]}
for rec in self.db.execute(
req.group_by(self.tables.scan.info["coordinates"].astext)
)
)
def get_ips(self, flt, limit=None, skip=None):
return tuple(
action(flt, limit=limit, skip=skip) for action in [self.get, self.count]
)
def _get(self, flt, limit=None, skip=None, sort=None, fields=None):
if fields is not None:
utils.LOGGER.warning("Argument 'fields' provided but unused")
req = flt.query(
select(
[
self.tables.scan.id,
self.tables.scan.addr,
self.tables.scan.source,
self.tables.scan.info,
self.tables.scan.time_start,
self.tables.scan.time_stop,
self.tables.scan.state,
self.tables.scan.state_reason,
self.tables.scan.state_reason_ttl,
self.tables.scan.schema_version,
]
).select_from(flt.select_from)
)
for key, way in sort or []:
if isinstance(key, str) and key in self.fields:
key = self.fields[key]
req = req.order_by(key if way >= 0 else desc(key))
if skip is not None:
req = req.offset(skip)
if limit is not None:
req = req.limit(limit)
return req
def get(self, flt, limit=None, skip=None, sort=None, fields=None):
req = self._get(flt, limit=limit, skip=skip, sort=sort, fields=fields)
for scanrec in self.db.execute(req):
rec = {}
(
rec["_id"],
rec["addr"],
rec["source"],
rec["infos"],
rec["starttime"],
rec["endtime"],
rec["state"],
rec["state_reason"],
rec["state_reason_ttl"],
rec["schema_version"],
) = scanrec
try:
rec["addr"] = self.internal2ip(rec["addr"])
except ValueError:
pass
if not rec["infos"]:
del rec["infos"]
categories = (
select([self.tables.association_scan_category.category])
.where(self.tables.association_scan_category.scan == rec["_id"])
.cte("categories")
)
rec["categories"] = [
cat[0]
for cat in self.db.execute(
select([self.tables.category.name]).where(
self.tables.category.id == categories.c.category
)
)
]
tags = {}
for tag in self.db.execute(
select(
[self.tables.tag.value, self.tables.tag.type, self.tables.tag.info]
).where(self.tables.tag.scan == rec["_id"])
):
rect = {}
rect["value"], rect["type"], info = tag
cur_tag = tags.setdefault(rect["value"], rect)
if info:
cur_tag.setdefault("info", set()).add(info)
if tags:
rec["tags"] = [
dict(tag, info=sorted(tag["info"])) if "info" in tag else tag
for tag in (tags[key] for key in sorted(tags))
]
for port in self.db.execute(
select([self.tables.port]).where(self.tables.port.scan == rec["_id"])
):
recp = {}
(
portid,
_,
recp["port"],
recp["protocol"],
recp["state_state"],
recp["state_reason"],
recp["state_reason_ip"],
recp["state_reason_ttl"],
recp["service_name"],
recp["service_tunnel"],
recp["service_product"],
recp["service_version"],
recp["service_conf"],
recp["service_devicetype"],
recp["service_extrainfo"],
recp["service_hostname"],
recp["service_ostype"],
recp["service_servicefp"],
) = port
try:
recp["state_reason_ip"] = self.internal2ip(recp["state_reason_ip"])
except ValueError:
pass
for fld, value in list(recp.items()):
if value is None:
del recp[fld]
for script in self.db.execute(
select(
[
self.tables.script.name,
self.tables.script.output,
self.tables.script.data,
]
).where(self.tables.script.port == portid)
):
data = dict(
id=script.name,
output=script.output,
**(script.data if script.data else {}),
)
if "ssl-cert" in data:
for cert in data["ssl-cert"]:
for fld in ["not_before", "not_after"]:
try:
cert[fld] = utils.all2datetime(cert[fld])
except KeyError:
pass
recp.setdefault("scripts", []).append(data)
rec.setdefault("ports", []).append(recp)
for trace in self.db.execute(
select([self.tables.trace]).where(self.tables.trace.scan == rec["_id"])
):
curtrace = {}
rec.setdefault("traces", []).append(curtrace)
curtrace["port"] = trace["port"]
curtrace["protocol"] = trace["protocol"]
curtrace["hops"] = []
for hop in self.db.execute(
select([self.tables.hop])
.where(self.tables.hop.trace == trace["id"])
.order_by(self.tables.hop.ttl)
):
values = dict(
(key, hop[key])
for key in ["ipaddr", "ttl", "rtt", "host", "domains"]
)
try:
values["ipaddr"] = self.internal2ip(values["ipaddr"])
except ValueError:
pass
curtrace["hops"].append(values)
for hostname in self.db.execute(
select([self.tables.hostname]).where(
self.tables.hostname.scan == rec["_id"]
)
):
rec.setdefault("hostnames", []).append(
dict((key, hostname[key]) for key in ["name", "type", "domains"])
)
yield rec
def remove(self, host):
"""Removes the host scan result. `host` must be a record as yielded by
.get().
"""
self.db.execute(
delete(self.tables.scan).where(self.tables.scan.id == host["_id"])
)
def remove_many(self, flt):
"""Removes the host scan result. `flt` must be a valid NmapFilter()
instance.
"""
base = flt.query(select([self.tables.scan.id])).cte("base")
self.db.execute(delete(self.tables.scan).where(self.tables.scan.id.in_(base)))
_topstructure = namedtuple(
"topstructure", ["base", "fields", "where", "group_by", "extraselectfrom"]
)
_topstructure.__new__.__defaults__ = (None,) * len(_topstructure._fields)
@classmethod
def searchnonexistent(cls):
return cls.base_filter(main=False)
@classmethod
def _searchobjectid(cls, oid, neg=False):
if len(oid) == 1:
return cls.base_filter(
main=(cls.tables.scan.id != oid[0])
if neg
else (cls.tables.scan.id == oid[0])
)
return cls.base_filter(
main=(cls.tables.scan.id.notin_(oid[0]))
if neg
else (cls.tables.scan.id.in_(oid[0]))
)
@classmethod
def searchversion(cls, version):
return cls.base_filter(main=cls.tables.scan.schema_version == version)
@classmethod
def searchcmp(cls, key, val, cmpop):
if isinstance(key, str):
key = cls.fields[key]
return cls.base_filter(main=key.op(cmpop)(val))
@classmethod
def searchhost(cls, addr, neg=False):
"""Filters (if `neg` == True, filters out) one particular host
(IP address).
"""
if neg:
return cls.base_filter(main=cls.tables.scan.addr != cls.ip2internal(addr))
return cls.base_filter(main=cls.tables.scan.addr == cls.ip2internal(addr))
@classmethod
def searchhosts(cls, hosts, neg=False):
hosts = [cls.ip2internal(host) for host in hosts]
if neg:
return cls.base_filter(main=cls.tables.scan.addr.notin_(hosts))
return cls.base_filter(main=cls.tables.scan.addr.in_(hosts))
@classmethod
def searchrange(cls, start, stop, neg=False):
start, stop = cls.ip2internal(start), cls.ip2internal(stop)
if neg:
return cls.base_filter(
main=or_(cls.tables.scan.addr < start, cls.tables.scan.addr > stop)
)
return cls.base_filter(
main=and_(cls.tables.scan.addr >= start, cls.tables.scan.addr <= stop)
)
@classmethod
def searchdomain(cls, name, neg=False):
return cls.base_filter(
hostname=[
(
not neg,
cls._searchstring_re_inarray(
cls.tables.hostname.id,
cls.tables.hostname.domains,
name,
neg=False,
),
),
]
)
@classmethod
def searchhostname(cls, name, neg=False):
return cls.base_filter(
hostname=[
(
not neg,
cls._searchstring_re(cls.tables.hostname.name, name, neg=False),
),
]
)
@classmethod
def searchcategory(cls, cat, neg=False):
return cls.base_filter(
category=[cls._searchstring_re(cls.tables.category.name, cat, neg=neg)]
)
@classmethod
def searchcountry(cls, country, neg=False):
"""Filters (if `neg` == True, filters out) one particular
country, or a list of countries.
"""
country = utils.country_unalias(country)
return cls.base_filter(
main=cls._searchstring_list(
cls.tables.scan.info["country_code"].astext, country, neg=neg
)
)
@classmethod
def searchcity(cls, city, neg=False):
"""Filters (if `neg` == True, filters out) one particular
city
"""
return cls.base_filter(
main=cls._searchstring_re(
cls.tables.scan.info["city"].astext, city, neg=neg
)
)
@classmethod
def searchasnum(cls, asnum, neg=False):
"""Filters (if `neg` == True, filters out) one or more
particular AS number(s).
"""
return cls.base_filter(
main=cls._searchstring_list(
cls.tables.scan.info["as_num"], asnum, neg=neg, map_=str
)
)
@classmethod
def searchasname(cls, asname, neg=False):
"""Filters (if `neg` == True, filters out) one or more
particular AS.
"""
return cls.base_filter(
main=cls._searchstring_rec(
cls.tables.scan.info["as_name"].astext, asname, neg=neg
)
)
@classmethod
def searchport(cls, port, protocol="tcp", state="open", neg=False):
"""Filters (if `neg` == True, filters out) records with
specified protocol/port at required state. Be aware that when
a host has a lot of ports filtered or closed, it will not
report all of them, but only a summary, and thus the filter
might not work as expected. This filter will always work to
find open ports.
"""
if port == "host":
return cls.base_filter(
port=[
(
True,
(cls.tables.port.port >= 0)
if neg
else (cls.tables.port.port == -1),
),
]
)
return cls.base_filter(
port=[
(
not neg,
and_(
cls.tables.port.port == port,
cls.tables.port.protocol == protocol,
cls.tables.port.state == state,
),
),
]
)
@classmethod
def searchportsother(cls, ports, protocol="tcp", state="open"):
"""Filters records with at least one port other than those
listed in `ports` with state `state`.
"""
return cls.base_filter(
port=[
(
True,
and_(
or_(
cls.tables.port.port.notin_(ports),
cls.tables.port.protocol != protocol,
),
cls.tables.port.state == state,
),
)
]
)
@classmethod
def searchports(cls, ports, protocol="tcp", state="open", neg=False, any_=False):
if any_:
if neg:
raise ValueError("searchports: cannot set both neg and any_")
return cls.base_filter(
port=[
(
True,
and_(
cls.tables.port.port.in_(ports),
cls.tables.port.protocol == protocol,
cls.tables.port.state == state,
),
),
]
)
return cls.flt_and(
*(
cls.searchport(port, protocol=protocol, state=state, neg=neg)
for port in ports
)
)
@classmethod
def searchcountopenports(cls, minn=None, maxn=None, neg=False):
"Filters records with open port number between minn and maxn"
assert minn is not None or maxn is not None
req = select([column("scan", Integer)]).select_from(
select([cls.tables.port.scan.label("scan"), func.count().label("count")])
.where(cls.tables.port.state == "open")
.group_by(cls.tables.port.scan)
.alias("pcnt")
)
if minn == maxn:
req = req.where(column("count") == minn)
else:
if minn is not None:
req = req.where(column("count") >= minn)
if maxn is not None:
req = req.where(column("count") <= maxn)
return cls.base_filter(
main=cls.tables.scan.id.notin_(req) if neg else cls.tables.scan.id.in_(req)
)
@classmethod
def searchopenport(cls, neg=False):
"Filters records with at least one open port."
return cls.base_filter(port=[(not neg, cls.tables.port.state == "open")])
@classmethod
def searchservice(cls, srv, port=None, protocol=None):
"""Search an open port with a particular service."""
if srv is False:
req = cls.tables.port.service_name == None # noqa: E711
elif isinstance(srv, list):
req = cls.tables.port.service_name.in_(srv)
else:
req = cls._searchstring_re(cls.tables.port.service_name, srv)
if port is not None:
req = and_(req, cls.tables.port.port == port)
if protocol is not None:
req = and_(req, cls.tables.port.protocol == protocol)
return cls.base_filter(port=[(True, req)])
@classmethod
def searchproduct(
cls, product=None, version=None, service=None, port=None, protocol=None
):
"""Search a port with a particular `product`. It is (much)
better to provide the `service` name and/or `port` number
since those fields are indexed.
"""
req = True
if product is not None:
if product is False:
req = and_(
req,
cls.tables.port.service_product == None, # noqa: E711
)
elif isinstance(product, list):
req = and_(
req,
cls.tables.port.service_product.in_(product),
)
else:
req = and_(
req,
cls._searchstring_re(
cls.tables.port.service_product,
product,
),
)
if version is not None:
if version is False:
req = and_(
req,
cls.tables.port.service_version == None, # noqa: E711
)
elif isinstance(version, list):
req = and_(
req,
cls.tables.port.service_version.in_(version),
)
else:
req = and_(
req, cls._searchstring_re(cls.tables.port.service_version, version)
)
if service is not None:
if service is False:
req = and_(
req,
cls.tables.port.service_name == None, # noqa: E711
)
elif isinstance(service, list):
req = and_(
req,
cls.tables.port.service_name.in_(service),
)
else:
req = and_(
req, cls._searchstring_re(cls.tables.port.service_name, service)
)
if port is not None:
req = and_(req, cls.tables.port.port == port)
if protocol is not None:
req = and_(req, cls.tables.port.protocol == protocol)
return cls.base_filter(port=[(True, req)])
@classmethod
def searchscript(cls, name=None, output=None, values=None, neg=False):
"""Search a particular content in the scripts results.
If neg is True, filter out scan results which have at
least one script matching the name/output/value
"""
req = True
if isinstance(name, list):
req = and_(req, cls.tables.script.name.in_(name))
elif name is not None:
req = and_(
req, cls._searchstring_re(cls.tables.script.name, name, neg=False)
)
if output is not None:
req = and_(
req, cls._searchstring_re(cls.tables.script.output, output, neg=False)
)
if values:
if isinstance(name, list):
all_keys = set(ALIASES_TABLE_ELEMS.get(n, n) for n in name)
if len(all_keys) != 1:
raise TypeError(
".searchscript() needs similar `name` values when using a `values` arg"
)
basekey = all_keys.pop()
elif not isinstance(name, str):
raise TypeError(
".searchscript() needs a `name` arg when using a `values` arg"
)
else:
basekey = ALIASES_TABLE_ELEMS.get(name, name)
if isinstance(values, (str, utils.REGEXP_T)):
needunwind = sorted(set(cls.needunwind_script(basekey)))
else:
needunwind = sorted(
set(
unwind
for subkey in values
for unwind in cls.needunwind_script(
"%s.%s" % (basekey, subkey),
)
)
)
def _find_subkey(key):
lastmatch = None
if key is None:
key = []
else:
key = key.split(".")
for subkey in needunwind:
subkey = subkey.split(".")[1:]
if len(key) < len(subkey):
continue
if key == subkey:
return (".".join([basekey] + subkey), None)
if subkey == key[: len(subkey)]:
lastmatch = (
".".join([basekey] + subkey),
".".join(key[len(subkey) :]),
)
return lastmatch
def _to_json(key, value):
key = key.split(".")
result = value
while key:
result = {key.pop(): result}
return result
if isinstance(values, (str, utils.REGEXP_T)):
kv_generator = [(None, values)]
else:
kv_generator = values.items()
for key, value in kv_generator:
subkey = _find_subkey(key)
if subkey is None:
if isinstance(value, utils.REGEXP_T):
base = cls.tables.script.data.op("->")(basekey)
key = key.split(".")
lastkey = key.pop()
for subkey in key:
base = base.op("->")(subkey)
base = base.op("->>")(lastkey)
req = and_(
req,
cls._searchstring_re(base, value, neg=False),
)
elif isinstance(value, bool):
base = cls.tables.script.data.op("->")(basekey)
for subkey in key.split("."):
base = base.op("->")(subkey)
if neg:
req = and_(req, base.cast(Boolean) != value)
else:
req = and_(req, base.cast(Boolean) == value)
else:
req = and_(
req,
cls.tables.script.data.contains(
_to_json("%s.%s" % (basekey, key), value)
),
)
elif subkey[1] is None:
req = and_(
req,
cls._searchstring_re(
column(subkey[0].replace(".", "_").replace("-", "_")).op(
"->>"
)(0),
value,
neg=False,
),
)
elif "." in subkey[1]:
firstpart, tail = subkey[1].split(".", 1)
req = and_(
req,
column(subkey[0].replace(".", "_").replace("-", "_"))
.op("->")(firstpart)
.op("@>")(cast(_to_json(tail, value), JSONB)),
)
elif isinstance(value, bool):
base = (
column(subkey[0].replace(".", "_").replace("-", "_"))
.op("->")(subkey[1])
.cast(Boolean)
)
if neg:
req = and_(req, base != value)
else:
req = and_(req, base == value)
else:
req = and_(
req,
cls._searchstring_re(
column(subkey[0].replace(".", "_").replace("-", "_")).op(
"->>"
)(subkey[1]),
value,
neg=False,
),
)
return cls.base_filter(
script=[
(
not neg,
(
req,
[
func.jsonb_array_elements(
cls.tables.script.data[subkey2]
).alias(subkey2.replace(".", "_").replace("-", "_"))
for subkey2 in needunwind
],
),
)
]
)
return cls.base_filter(script=[(not neg, req)])
@classmethod
def searchsvchostname(cls, hostname):
return cls.base_filter(
port=[
(True, cls._searchstring_re(cls.tables.port.service_hostname, hostname))
]
)
@classmethod
def searchwebmin(cls):
return cls.base_filter(
port=[
(
True,
and_(
cls.tables.port.service_name == "http",
cls.tables.port.service_product == "MiniServ",
cls.tables.port.service_extrainfo != "Webmin httpd",
),
)
]
)
@classmethod
def searchx11(cls):
return cls.base_filter(
port=[
(
True,
and_(
cls.tables.port.service_name == "X11",
cls.tables.port.service_extrainfo != "access denied",
),
)
]
)
def searchtimerange(self, start, stop, neg=False):
start = utils.all2datetime(start)
stop = utils.all2datetime(stop)
if neg:
return self.base_filter(
main=(self.tables.scan.time_start < start)
| (self.tables.scan.time_stop > stop)
)
return self.base_filter(
main=(self.tables.scan.time_start >= start)
& (self.tables.scan.time_stop <= stop)
)
@classmethod
def searchfile(cls, fname=None, scripts=None):
"""Search shared files from a file name (either a string or a
regexp), only from scripts using the "ls" NSE module.
"""
if fname is None:
req = cls.tables.script.data.op("@>")(
'{"ls": {"volumes": [{"files": []}]}}'
)
else:
if isinstance(fname, (utils.REGEXP_T, list)):
base1 = (
select(
[
cls.tables.script.port,
func.jsonb_array_elements(
func.jsonb_array_elements(
cls.tables.script.data["ls"]["volumes"]
).op("->")("files")
)
.op("->>")("filename")
.label("filename"),
]
)
.where(
cls.tables.script.data.op("@>")(
'{"ls": {"volumes": [{"files": []}]}}'
)
)
.cte("base1")
)
if isinstance(fname, list):
where_clause = column("filename").in_(fname)
else:
where_clause = column("filename").op(
"~*" if (fname.flags & re.IGNORECASE) else "~"
)(fname.pattern)
base2 = (
select([column("port", Integer)])
.select_from(base1)
.where(where_clause)
)
return cls.base_filter(port=[(True, cls.tables.port.id.in_(base2))])
req = cls.tables.script.data.op("@>")(
json.dumps({"ls": {"volumes": [{"files": [{"filename": fname}]}]}})
)
if scripts is None:
return cls.base_filter(script=[(True, req)])
if isinstance(scripts, str):
scripts = [scripts]
if len(scripts) == 1:
return cls.base_filter(
script=[(True, and_(cls.tables.script.name == scripts.pop(), req))]
)
return cls.base_filter(
script=[(True, and_(cls.tables.script.name.in_(scripts), req))]
)
@classmethod
def searchhttptitle(cls, title):
return cls.base_filter(
script=[
(True, cls.tables.script.name.in_(["http-title", "html-title"])),
(True, cls._searchstring_re(cls.tables.script.output, title)),
]
)
@classmethod
def searchhop(cls, hop, ttl=None, neg=False):
res = cls.tables.hop.ipaddr == cls.ip2internal(hop)
if ttl is not None:
res &= cls.tables.hop.ttl == ttl
return cls.base_filter(trace=[not_(res) if neg else res])
@classmethod
def searchhopdomain(cls, hop, neg=False):
return cls.base_filter(
trace=[
cls._searchstring_re_inarray(
cls.tables.hop.id, cls.tables.hop.domains, hop, neg=neg
)
]
)
@classmethod
def searchhopname(cls, hop, neg=False):
return cls.base_filter(
trace=[cls._searchstring_re(cls.tables.hop.host, hop, neg=neg)]
)
@classmethod
def searchdevicetype(cls, devtype):
return cls.base_filter(
port=[
(
True,
cls._searchstring_re(cls.tables.port.service_devicetype, devtype),
)
]
)
@classmethod
def searchnetdev(cls):
return cls.base_filter(
port=[
(
True,
cls.tables.port.service_devicetype.in_(
[
"bridge",
"broadband router",
"firewall",
"hub",
"load balancer",
"proxy server",
"router",
"switch",
"WAP",
]
),
)
]
)
@classmethod
def searchphonedev(cls):
return cls.base_filter(
port=[
(
True,
cls.tables.port.service_devicetype.in_(
[
"PBX",
"phone",
"telecom-misc",
"VoIP adapter",
"VoIP phone",
]
),
)
]
)
@classmethod
def searchldapanon(cls):
return cls.base_filter(
port=[
(
True,
cls.tables.port.service_extrainfo == "Anonymous bind OK",
)
]
)
@classmethod
def searchvsftpdbackdoor(cls):
return cls.base_filter(
port=[
(
True,
and_(
cls.tables.port.protocol == "tcp",
cls.tables.port.state == "open",
cls.tables.port.service_product == "vsftpd",
cls.tables.port.service_version == "2.3.4",
),
)
]
)
@classmethod
def searchhassh(cls, value_or_hash=None, server=None):
if server is None:
return cls._searchhassh(value_or_hash=value_or_hash)
if server:
portflt = cls.tables.port.port != -1
else:
portflt = cls.tables.port.port == -1
if value_or_hash is None:
return cls.base_filter(
script=[
(
True,
and_(
portflt,
cls.tables.script.name == "ssh2-enum-algos",
),
)
]
)
key, value = cls._ja3keyvalue(value_or_hash)
return cls.base_filter(
script=[
(
True,
and_(
portflt,
cls.tables.script.name == "ssh2-enum-algos",
cls._searchstring_re(
cls.tables.script.data.op("->")("ssh2-enum-algos")
.op("->")("hassh")
.op("->>")(key),
value,
),
),
)
]
)
@classmethod
def searchtag(cls, tag=None, neg=False):
"""Filters (if `neg` == True, filters out) one particular tag (records
may have zero, one or more tags).
`tag` may be the value (as a str) or the tag (as a Tag, e.g.:
`{"value": value, "info": info}`).
"""
if not tag:
return cls.base_filter(tag=[(not neg, True)])
if not isinstance(tag, dict):
tag = {"value": tag}
req = [
cls._searchstring_re(getattr(cls.tables.tag, key), value)
for key, value in tag.items()
]
return cls.base_filter(tag=[(not neg, and_(*req))])
class SQLDBNmap(SQLDBActive, DBNmap):
table_layout = namedtuple(
"nmap_layout",
[
"scanfile",
"category",
"scan",
"hostname",
"port",
"script",
"tag",
"trace",
"hop",
"association_scan_hostname",
"association_scan_category",
"association_scan_scanfile",
],
)
tables = table_layout(
N_ScanFile,
N_Category,
N_Scan,
N_Hostname,
N_Port,
N_Script,
N_Tag,
N_Trace,
N_Hop,
N_Association_Scan_Hostname,
N_Association_Scan_Category,
N_Association_Scan_ScanFile,
)
fields = {
"_id": N_Scan.id,
"addr": N_Scan.addr,
"source": N_Scan.source,
"scanid": N_Association_Scan_ScanFile.scan_file,
"starttime": N_Scan.time_start,
"endtime": N_Scan.time_stop,
"infos": N_Scan.info,
"ports": N_Port,
"tags": N_Tag,
"state": N_Scan.state_reason_ttl,
"state_reason": N_Scan.state_reason_ttl,
"state_reason_ttl": N_Scan.state_reason_ttl,
"schema_version": N_Scan.schema_version,
"categories": N_Category.name,
"hostnames.name": N_Hostname.name,
"hostnames.domains": N_Hostname.domains,
}
base_filter = NmapFilter
content_handler = xmlnmap.Nmap2DB
def store_or_merge_host(self, host):
self.store_host(host)
def get(self, flt, limit=None, skip=None, sort=None, **kargs):
for rec in super().get(flt, limit=limit, skip=skip, sort=sort, **kargs):
rec["scanid"] = [
scanfile[0]
for scanfile in self.db.execute(
select([self.tables.association_scan_scanfile.scan_file]).where(
self.tables.association_scan_scanfile.scan == rec["_id"]
)
)
]
yield rec
def _remove_unused_scan_files(self):
"""Removes unused scan files, useful when some scan results have been
removed.
"""
base = select([self.tables.association_scan_scanfile.scan_file]).cte("base")
self.db.execute(
delete(self.tables.scanfile).where(self.tables.scanfile.sha256.notin_(base))
)
def remove(self, host):
"""Removes the host scan result. `host` must be a record as yielded by
.get().
The scan files that are no longer linked to a scan are removed at the
end of the call.
"""
super().remove(host)
self._remove_unused_scan_files()
def remove_many(self, flt):
"""Removes the host scan result. `flt` must be a valid NmapFilter()
instance.
The scan files that are no longer linked to a scan are removed at the
end of the call.
"""
super().remove_many(flt)
self._remove_unused_scan_files()
@staticmethod
def getscanids(host):
return host["scanid"]
def getscan(self, scanid):
if isinstance(scanid, (str, bytes)) and len(scanid) == 64:
scanid = utils.decode_hex(scanid)
return self.db.execute(
select([self.tables.scanfile]).where(self.tables.scanfile.sha256 == scanid)
).fetchone()
def is_scan_present(self, scanid):
return bool(
self.db.execute(
select([True])
.where(self.tables.scanfile.sha256 == utils.decode_hex(scanid))
.limit(1)
).fetchone()
)
@classmethod
def searchsource(cls, src, neg=False):
if isinstance(src, list):
if neg:
return cls.base_filter(main=(cls.tables.scan.source.notin_(src)))
return cls.base_filter(main=(cls.tables.scan.source.in_(src)))
return cls.base_filter(
main=cls._searchstring_re(cls.tables.scan.source, src, neg=neg)
)
class SQLDBView(SQLDBActive, DBView):
table_layout = namedtuple(
"view_layout",
[
"category",
"scan",
"hostname",
"port",
"script",
"tag",
"trace",
"hop",
"association_scan_hostname",
"association_scan_category",
],
)
tables = table_layout(
V_Category,
V_Scan,
V_Hostname,
V_Port,
V_Script,
V_Tag,
V_Trace,
V_Hop,
V_Association_Scan_Hostname,
V_Association_Scan_Category,
)
fields = {
"_id": V_Scan.id,
"addr": V_Scan.addr,
"source": V_Scan.source,
"starttime": V_Scan.time_start,
"endtime": V_Scan.time_stop,
"infos": V_Scan.info,
"ports": V_Port,
"tags": V_Tag,
"state": V_Scan.state_reason_ttl,
"state_reason": V_Scan.state_reason_ttl,
"state_reason_ttl": V_Scan.state_reason_ttl,
"schema_version": V_Scan.schema_version,
"categories": V_Category.name,
"hostnames.name": V_Hostname.name,
"hostnames.domains": V_Hostname.domains,
}
base_filter = ViewFilter
def store_or_merge_host(self, host):
# FIXME: may cause performance issues
self.start_store_hosts()
self.store_host(host)
self.stop_store_hosts()
@classmethod
def searchsource(cls, src, neg=False):
return cls.base_filter(
main=cls._searchstring_re_inarray(
cls.tables.scan.id, cls.tables.scan.source, src, neg=neg
)
)
class PassiveFilter(Filter):
def __init__(self, main=None, tables=None):
self.main = main
self.tables = SQLDBPassive.tables if tables is None else tables
@property
def all_queries(self):
return {
"main": self.main,
"tables": self.tables,
}
def __bool__(self):
return self.main is not None
def copy(self):
return self.__class__(
main=self.main,
tables=self.tables,
)
def __and__(self, other):
if self.tables != other.tables:
raise ValueError("Cannot 'AND' two filters on separate tables")
return self.__class__(
main=self.fltand(self.main, other.main),
tables=self.tables,
)
def __or__(self, other):
if self.tables != other.tables:
raise ValueError("Cannot 'OR' two filters on separate tables")
return self.__class__(
main=self.fltor(self.main, other.main),
tables=self.tables,
)
@property
def select_from(self):
return self.tables.passive
def query(self, req):
if self.main is not None:
req = req.where(self.main)
return req
class SQLDBPassive(SQLDB, DBPassive):
table_layout = namedtuple("passive_layout", ["passive"])
tables = table_layout(Passive)
fields = {
"_id": Passive.id,
"addr": Passive.addr,
"sensor": Passive.sensor,
"count": Passive.count,
"firstseen": Passive.firstseen,
"lastseen": Passive.lastseen,
"distance": Passive.info.op("->>")("distance"),
"signature": Passive.info.op("->>")("signature"),
"version": Passive.info.op("->>")("version"),
"infos": Passive.moreinfo,
"infos.domain": Passive.moreinfo.op("->>")("domain"),
"infos.issuer": Passive.moreinfo.op("->>")("issuer"),
"infos.issuer_text": Passive.moreinfo.op("->>")("issuer_text"),
"infos.md5": Passive.moreinfo.op("->>")("md5"),
"infos.pubkey.type": (Passive.moreinfo.op("->")("pubkey").op("->>")("type")),
"infos.self_signed": Passive.moreinfo.op("->")("self_signed"),
"infos.san": Passive.moreinfo.op("->>")("san"),
"infos.sha1": Passive.moreinfo.op("->>")("sha1"),
"infos.sha256": Passive.moreinfo.op("->>")("sha256"),
"infos.subject": Passive.moreinfo.op("->>")("subject"),
"infos.subject_text": Passive.moreinfo.op("->>")("subject_text"),
"infos.raw": Passive.moreinfo.op("->>")("raw"),
"infos.domaintarget": Passive.moreinfo.op("->>")("domaintarget"),
"infos.username": Passive.moreinfo.op("->>")("username"),
"infos.password": Passive.moreinfo.op("->>")("password"),
"infos.service_name": Passive.moreinfo.op("->>")("service_name"),
"infos.service_ostype": Passive.moreinfo.op("->>")("service_ostype"),
"infos.service_product": Passive.moreinfo.op("->>")("service_product"),
"infos.service_version": Passive.moreinfo.op("->>")("service_version"),
"infos.service_extrainfo": Passive.moreinfo.op("->>")("service_extrainfo"),
"port": Passive.port,
"recontype": Passive.recontype,
"source": Passive.source,
"targetval": Passive.targetval,
"value": Passive.value,
"schema_version": Passive.schema_version,
}
base_filter = PassiveFilter
def count(self, flt):
return self.db.execute(
flt.query(select([func.count()]).select_from(flt.select_from))
).fetchone()[0]
def remove(self, spec_or_id):
if not isinstance(spec_or_id, Filter):
spec_or_id = self.searchobjectid(spec_or_id)
base = spec_or_id.query(
select([self.tables.passive.id]).select_from(spec_or_id.select_from)
).cte("base")
self.db.execute(
delete(self.tables.passive).where(self.tables.passive.id.in_(base))
)
def _get(self, flt, limit=None, skip=None, sort=None, fields=None):
if fields is not None:
utils.LOGGER.warning("Argument 'fields' provided but unused")
req = flt.query(
select(
[
self.tables.passive.id.label("_id"),
self.tables.passive.addr,
self.tables.passive.sensor,
self.tables.passive.count,
self.tables.passive.firstseen,
self.tables.passive.lastseen,
self.tables.passive.port,
self.tables.passive.recontype,
self.tables.passive.source,
self.tables.passive.targetval,
self.tables.passive.value,
self.tables.passive.info,
self.tables.passive.moreinfo,
self.tables.passive.schema_version,
]
).select_from(flt.select_from)
)
for key, way in sort or []:
req = req.order_by(key if way >= 0 else desc(key))
if skip is not None:
req = req.offset(skip)
if limit is not None:
req = req.limit(limit)
return req
def get(self, flt, limit=None, skip=None, sort=None, fields=None):
"""Queries the passive database with the provided filter "flt", and
returns a generator.
"""
req = self._get(flt, limit=limit, skip=skip, sort=sort, fields=fields)
for rec in self.db.execute(req):
rec = dict((key, value) for key, value in rec.items() if value is not None)
try:
rec["addr"] = self.internal2ip(rec["addr"])
except (KeyError, ValueError):
pass
rec["infos"] = dict(rec.pop("info"), **rec.pop("moreinfo"))
if rec.get("recontype") in {"SSL_SERVER", "SSL_CLIENT"} and rec.get(
"source"
) in {
"cert",
"cacert",
}:
rec["value"] = self.from_binary(rec["value"])
for fld in ["not_before", "not_after"]:
try:
rec["infos"][fld] = utils.all2datetime(rec["infos"][fld])
except KeyError:
pass
if rec.get("port") == -1:
del rec["port"]
yield rec
def get_one(self, flt, skip=None):
"""Queries the passive database with the provided filter "flt", and
returns the first result, or None if no result exists."""
return next(self.get(flt, limit=1, skip=skip))
def _insert_or_update(self, timestamp, values, lastseen=None, replacecount=False):
raise NotImplementedError()
def insert_or_update(
self, timestamp, spec, getinfos=None, lastseen=None, replacecount=False
):
if spec is None:
return
try:
spec["addr"] = self.ip2internal(spec["addr"])
except (KeyError, ValueError):
pass
if getinfos is not None:
spec.update(getinfos(spec))
try:
spec.update(spec.pop("infos"))
except KeyError:
pass
addr = spec.pop("addr", None)
timestamp = utils.all2datetime(timestamp)
if lastseen is not None:
lastseen = utils.all2datetime(lastseen)
if addr:
addr = self.ip2internal(addr)
if spec["recontype"] in {"SSL_SERVER", "SSL_CLIENT"} and spec["source"] in {
"cert",
"cacert",
}:
for fld in ["not_before", "not_after"]:
if fld not in spec:
continue
if isinstance(spec[fld], datetime.datetime):
spec[fld] = spec[fld].timestamp()
elif isinstance(spec[fld], str):
spec[fld] = utils.all2datetime(spec[fld]).timestamp()
otherfields = dict(
(key, spec.pop(key, ""))
for key in ["sensor", "source", "targetval", "recontype", "value"]
)
info = dict(
(key, spec.pop(key))
for key in ["distance", "signature", "version"]
if key in spec
)
vals = {
"addr": addr,
# sensor: otherfields
"count": spec.pop("count", 1),
"firstseen": timestamp,
"lastseen": lastseen or timestamp,
"port": spec.pop("port", -1),
# source, targetval, recontype, value: otherfields
"info": info,
"moreinfo": spec,
"schema_version": spec.pop("schema_version", None),
}
vals.update(otherfields)
self._insert_or_update(
timestamp, vals, lastseen=lastseen, replacecount=replacecount
)
def topvalues(
self,
field,
flt=None,
topnbr=10,
sort=None,
limit=None,
skip=None,
least=False,
distinct=True,
):
"""This method produces top values for a given field.
If `distinct` is True (default), the top values are computed
by distinct events. If it is False, they are computed based on
the "count" field.
"""
more_filter = None
if flt is None:
flt = PassiveFilter()
if field == "domains":
flt = self.flt_and(flt, self.searchdns())
field = func.jsonb_array_elements(self.tables.passive.moreinfo["domain"])
elif field.startswith("domains:"):
subfield = field[8:]
field = func.jsonb_array_elements_text(
self.tables.passive.moreinfo["domain"]
).label("field")
if subfield.isdigit():
flt = self.flt_and(flt, self.searchdns())
def more_filter(base):
return (
func.length(base.field)
- func.length(func.replace(base.field, ".", ""))
== int(subfield) - 1
)
# another option would be:
# def more_filter(base):
# return base.field.op("~")("^([^\\.]+\\.){%d}[^\\.]+$" %
# (int(subfield) - 1))
elif ":" in subfield:
subfield, level = subfield.split(":", 1)
flt = self.flt_and(flt, self.searchdns(subfield, subdomains=True))
def more_filter(base):
return base.field.op("~")(
"^([^\\.]+\\.){%d}%s$"
% (int(level) - subfield.count(".") - 1, re.escape(subfield))
)
else:
flt = self.flt_and(flt, self.searchdns(subfield, subdomains=True))
def more_filter(base):
return base.field.op("~")("\\.%s$" % re.escape(subfield))
elif field == "net" or field.startswith("net:"):
info = field[4:]
info = int(info) if info else 24
field = func.set_masklen(text("addr::cidr"), info)
elif field == "hassh" or (field.startswith("hassh") and field[5] in "-."):
if "." in field:
field, subfield = field.split(".", 1)
else:
subfield = "md5"
if field == "hassh-server":
flt = self.flt_and(flt, self.searchhassh(server=True))
elif field == "hassh-client":
flt = self.flt_and(flt, self.searchhassh(server=False))
elif field == "hassh":
flt = self.flt_and(flt, self.searchhassh())
else:
raise ValueError("Unknown field %s" % field)
if subfield == "md5":
field = self.tables.passive.value
else:
field = self.tables.passive.moreinfo[subfield]
if isinstance(field, str):
field = self.fields[field]
if field is not None and field == self.fields["addr"]:
outputproc = self.internal2ip
else:
def outputproc(val):
return val
order = "count" if least else desc("count")
if more_filter is None:
req = flt.query(
select(
[
(
func.count()
if distinct
else func.sum(self.tables.passive.count)
).label("count"),
field,
]
)
.select_from(flt.select_from)
.group_by(field)
)
else:
base1 = flt.query(
select(
[
(
func.count()
if distinct
else func.sum(self.tables.passive.count)
).label("count"),
field,
]
)
.select_from(flt.select_from)
.group_by(field)
).cte("base1")
req = select([base1.c.count, base1.c.field]).where(more_filter(base1.c))
return (
{
"count": result[0],
"_id": outputproc(result[1:] if len(result) > 2 else result[1]),
}
for result in self.db.execute(req.order_by(order).limit(topnbr))
)
def _features_port_list(self, flt, yieldall, use_service, use_product, use_version):
# This is in SQLDBPassive because it **should** work with
# SQLite. However, because ACCESS_TXT does not work well with
# the result processor, it does not. This is a similar problem
# than .topvalues() with JSON fields.
flt = self.flt_and(flt, self.searchport(-1, neg=True))
if use_version:
fields = [
self.tables.passive.port,
self.tables.passive.moreinfo.op("->>")("service_name"),
self.tables.passive.moreinfo.op("->>")("service_product"),
self.tables.passive.moreinfo.op("->>")("service_version"),
]
elif use_product:
fields = [
self.tables.passive.port,
self.tables.passive.moreinfo.op("->>")("service_name"),
self.tables.passive.moreinfo.op("->>")("service_product"),
]
elif use_service:
fields = [
self.tables.passive.port,
self.tables.passive.moreinfo.op("->>")("service_name"),
]
else:
fields = [self.tables.passive.port]
req = flt.query(select(fields).group_by(*fields))
if not yieldall:
req = req.order_by(*(nullsfirst(fld) for fld in fields))
return self.db.execute(req)
# results will be modified, we cannot keep a RowProxy
# instance, so we convert the results to lists
return (list(rec) for rec in self.db.execute(req))
@classmethod
def searchnonexistent(cls):
return PassiveFilter(main=False)
@classmethod
def _searchobjectid(cls, oid, neg=False):
if len(oid) == 1:
return PassiveFilter(
main=(cls.tables.passive.id != oid[0])
if neg
else (cls.tables.passive.id == oid[0])
)
return PassiveFilter(
main=(cls.tables.passive.id.notin_(oid[0]))
if neg
else (cls.tables.passive.id.in_(oid[0]))
)
@classmethod
def searchcmp(cls, key, val, cmpop):
if isinstance(key, str):
key = cls.fields[key]
return PassiveFilter(main=key.op(cmpop)(val))
@classmethod
def searchval(cls, key, val):
if isinstance(key, str):
key = cls.fields[key]
if isinstance(val, utils.REGEXP_T):
return PassiveFilter(
main=key.op("~*" if (val.flags & re.IGNORECASE) else "~")(val.pattern)
)
return cls.searchcmp(key, val, "=")
@classmethod
def searchhost(cls, addr, neg=False):
"""Filters (if `neg` == True, filters out) one particular host
(IP address).
"""
addr = cls.ip2internal(addr)
return PassiveFilter(
main=(cls.tables.passive.addr != addr)
if neg
else (cls.tables.passive.addr == addr),
)
@classmethod
def searchhosts(cls, hosts, neg=False):
hosts = [cls.ip2internal(host) for host in hosts]
return PassiveFilter(
main=(
cls.tables.passive.addr.notin_(hosts)
if neg
else cls.tables.passive.addr.in_(hosts)
),
)
@classmethod
def searchrange(cls, start, stop, neg=False):
start, stop = cls.ip2internal(start), cls.ip2internal(stop)
if neg:
return PassiveFilter(
main=or_(
cls.tables.passive.addr < start, cls.tables.passive.addr > stop
)
)
return PassiveFilter(
main=and_(cls.tables.passive.addr >= start, cls.tables.passive.addr <= stop)
)
@classmethod
def searchranges(cls, ranges, neg=False):
"""Filters (if `neg` == True, filters out) some IP address ranges.
`ranges` is an instance of ivre.geoiputils.IPRanges().
"""
flt = []
for start, stop in ranges.iter_ranges():
start, stop = cls.ip2internal(start), cls.ip2internal(stop)
flt.append(
(or_ if neg else and_)(
cls.tables.passive.addr >= start, cls.tables.passive.addr <= stop
)
)
if flt:
return PassiveFilter(main=(and_ if neg else or_)(*flt))
return cls.flt_empty if neg else cls.searchnonexistent()
@classmethod
def searchrecontype(cls, rectype, neg=False):
if isinstance(rectype, list):
if neg:
return PassiveFilter(
main=(cls.tables.passive.recontype.notin_(rectype))
)
return PassiveFilter(main=(cls.tables.passive.recontype.in_(rectype)))
return PassiveFilter(
main=cls._searchstring_re(cls.tables.passive.recontype, rectype, neg=neg)
)
@classmethod
def searchdns(cls, name=None, reverse=False, dnstype=None, subdomains=False):
if name is not None:
if isinstance(name, list):
if len(name) == 1:
name = name[0]
else:
return cls.flt_or(
*(
cls._searchdns(
name=domain,
reverse=reverse,
dnstype=dnstype,
subdomains=subdomains,
)
for domain in name
)
)
return cls._searchdns(
name=name, reverse=reverse, dnstype=dnstype, subdomains=subdomains
)
@classmethod
def _searchdns(cls, name=None, reverse=False, dnstype=None, subdomains=False):
cnd = cls.tables.passive.recontype == "DNS_ANSWER"
if name is not None:
cnd &= (
(
cls.tables.passive.moreinfo[
"domaintarget" if reverse else "domain"
].has_key( # noqa: W601
name
)
)
if subdomains
else cls._searchstring_re(
cls.tables.passive.targetval
if reverse
else cls.tables.passive.value,
name,
)
)
if dnstype is not None:
cnd &= cls.tables.passive.source.op("~")("^%s-" % dnstype.upper())
return PassiveFilter(main=cnd)
@classmethod
def searchmac(cls, mac=None, neg=False):
if mac is None:
if neg:
return PassiveFilter(main=cls.tables.passive.recontype != "MAC_ADDRESS")
return PassiveFilter(main=cls.tables.passive.recontype == "MAC_ADDRESS")
value = cls.tables.passive.value
if isinstance(mac, utils.REGEXP_T):
cnd = value.op("~*")(mac.pattern)
if neg:
cnd = not_(cnd)
elif neg:
cnd = value != mac
else:
cnd = value == mac
return PassiveFilter(main=(cls.tables.passive.recontype == "MAC_ADDRESS") & cnd)
@classmethod
def searchuseragent(cls, useragent=None, neg=False):
if neg:
raise ValueError(
"searchuseragent([...], neg=True) is not " "supported in passive DB."
)
if useragent is None:
return PassiveFilter(
main=(
(cls.tables.passive.recontype == "HTTP_CLIENT_HEADER")
& (cls.tables.passive.source == "USER-AGENT")
)
)
return PassiveFilter(
main=(
(cls.tables.passive.recontype == "HTTP_CLIENT_HEADER")
& (cls.tables.passive.source == "USER-AGENT")
& (cls._searchstring_re(cls.tables.passive.value, useragent))
)
)
@classmethod
def searchftpauth(cls):
return PassiveFilter(
main=(
(cls.tables.passive.recontype == "FTP_CLIENT")
| (cls.tables.passive.recontype == "FTP_SERVER")
)
)
@classmethod
def searchpopauth(cls):
return PassiveFilter(
main=(
(cls.tables.passive.recontype == "POP_CLIENT")
| (cls.tables.passive.recontype == "POP_SERVER")
)
)
@classmethod
def searchbasicauth(cls):
return PassiveFilter(
main=(
(
(cls.tables.passive.recontype == "HTTP_CLIENT_HEADER")
| (cls.tables.passive.recontype == "HTTP_CLIENT_HEADER_SERVER")
)
& (
(cls.tables.passive.source == "AUTHORIZATION")
| (cls.tables.passive.source == "PROXY-AUTHORIZATION")
)
& cls.tables.passive.value.op("~*")("^Basic")
)
)
@classmethod
def searchhttpauth(cls):
return PassiveFilter(
main=(
(
(cls.tables.passive.recontype == "HTTP_CLIENT_HEADER")
| (cls.tables.passive.recontype == "HTTP_CLIENT_HEADER_SERVER")
)
& (
(cls.tables.passive.source == "AUTHORIZATION")
| (cls.tables.passive.source == "PROXY-AUTHORIZATION")
)
)
)
@classmethod
def searchcert(
cls,
keytype=None,
md5=None,
sha1=None,
sha256=None,
subject=None,
issuer=None,
self_signed=None,
pkmd5=None,
pksha1=None,
pksha256=None,
cacert=False,
):
return PassiveFilter(
main=(cls.tables.passive.recontype == "SSL_SERVER")
& (cls.tables.passive.source == ("cacert" if cacert else "cert"))
& (
cls._searchcert(
cls.tables.passive.moreinfo,
keytype=keytype,
md5=md5,
sha1=sha1,
sha256=sha256,
subject=subject,
issuer=issuer,
self_signed=self_signed,
pkmd5=pkmd5,
pksha1=pksha1,
pksha256=pksha256,
)
)
)
@classmethod
def _searchja3(cls, value_or_hash=None):
if not value_or_hash:
return True
key, value = cls._ja3keyvalue(value_or_hash)
try:
return {
"md5": cls.tables.passive.value,
"sha1": cls.tables.passive.moreinfo.op("->>")("sha1"),
"sha256": cls.tables.passive.moreinfo.op("->>")("sha256"),
}[key] == value
except KeyError:
return cls._searchstring_re(
cls.tables.passive.moreinfo.op("->>")("raw"),
value,
)
@classmethod
def searchja3client(cls, value_or_hash=None):
return PassiveFilter(
main=(
(cls.tables.passive.recontype == "SSL_CLIENT")
& (cls.tables.passive.source == "ja3")
& cls._searchja3(value_or_hash)
)
)
@classmethod
def searchja3server(cls, value_or_hash=None, client_value_or_hash=None):
base = (cls.tables.passive.recontype == "SSL_SERVER") & cls._searchja3(
value_or_hash
)
if not client_value_or_hash:
return PassiveFilter(
main=(base & cls.tables.passive.source.op("~")("^ja3-"))
)
key, value = cls._ja3keyvalue(client_value_or_hash)
if key == "md5":
return PassiveFilter(
main=(base & (cls.tables.passive.source == "ja3-%s" % value))
)
base &= cls.tables.passive.source.op("~")("^ja3-")
if key in ["sha1", "sha256"]:
return PassiveFilter(
main=(
base
& (
cls.tables.passive.moreinfo.op("->")("client").op("->>")(key)
== value
)
)
)
return PassiveFilter(
main=(
base
& cls._searchstring_re(
cls.tables.passive.moreinfo.op("->")("client").op("->>")("raw"),
value,
)
)
)
@classmethod
def searchsshkey(cls, keytype=None):
if keytype is None:
return PassiveFilter(
main=(
(cls.tables.passive.recontype == "SSH_SERVER_HOSTKEY")
& (cls.tables.passive.source == "SSHv2")
)
)
return PassiveFilter(
main=(
(cls.tables.passive.recontype == "SSH_SERVER_HOSTKEY")
& (cls.tables.passive.source == "SSHv2")
& (cls.tables.passive.moreinfo.op("->>")("algo") == "ssh-" + keytype)
)
)
@classmethod
def searchtcpsrvbanner(cls, banner):
return PassiveFilter(
main=(
(cls.tables.passive.recontype == "TCP_SERVER_BANNER")
& (cls._searchstring_re(cls.tables.passive.value, banner))
)
)
@classmethod
def searchsensor(cls, sensor, neg=False):
return PassiveFilter(
main=(cls._searchstring_re(cls.tables.passive.sensor, sensor, neg=neg)),
)
@classmethod
def searchport(cls, port, protocol="tcp", state="open", neg=False):
"""Filters (if `neg` == True, filters out) records on the specified
protocol/port.
"""
if protocol != "tcp":
raise ValueError("Protocols other than TCP are not supported " "in passive")
if state != "open":
raise ValueError("Only open ports can be found in passive")
return PassiveFilter(
main=(cls.tables.passive.port != port)
if neg
else (cls.tables.passive.port == port)
)
@classmethod
def searchservice(cls, srv, port=None, protocol=None):
"""Search a port with a particular service."""
if srv is False:
flt = [~cls.tables.passive.moreinfo.op("?")("service_name")]
elif isinstance(srv, list):
flt = [cls.tables.passive.moreinfo.op("->>")("service_name").in_(srv)]
else:
flt = [
cls._searchstring_re(
cls.tables.passive.moreinfo.op("->>")("service_name"), srv
)
]
if port is not None:
flt.append(cls.tables.passive.port == port)
if protocol is not None and protocol != "tcp":
raise ValueError("Protocols other than TCP are not supported " "in passive")
return PassiveFilter(main=and_(*flt))
@classmethod
def searchproduct(
cls, product=None, version=None, service=None, port=None, protocol=None
):
"""Search a port with a particular `product`. It is (much)
better to provide the `service` name and/or `port` number
since those fields are indexed.
"""
flt = []
if product is not None:
if product is False:
flt.append(~cls.tables.passive.moreinfo.op("?")("service_product"))
elif isinstance(product, list):
flt.append(
cls.tables.passive.moreinfo.op("->>")("service_product").in_(
product
)
)
else:
flt.append(
cls._searchstring_re(
cls.tables.passive.moreinfo.op("->>")("service_product"),
product,
)
)
if version is not None:
if version is False:
flt.append(~cls.tables.passive.moreinfo.op("?")("service_version"))
elif isinstance(version, list):
flt.append(
cls.tables.passive.moreinfo.op("->>")("service_version").in_(
version
)
)
else:
flt.append(
cls._searchstring_re(
cls.tables.passive.moreinfo.op("->>")("service_version"),
version,
)
)
if service is not None:
if service is False:
flt.append(~cls.tables.passive.moreinfo.op("?")("service_name"))
elif isinstance(service, list):
flt.append(
cls.tables.passive.moreinfo.op("->>")("service_name").in_(service)
)
else:
flt.append(
cls._searchstring_re(
cls.tables.passive.moreinfo.op("->>")("service_name"),
service,
)
)
if port is not None:
flt.append(cls.tables.passive.port == port)
if protocol is not None:
if protocol != "tcp":
raise ValueError(
"Protocols other than TCP are not supported " "in passive"
)
return PassiveFilter(main=and_(*flt))
@classmethod
def searchsvchostname(cls, hostname):
return PassiveFilter(
main=cls._searchstring_re(
cls.tables.passive.moreinfo.op("->>")("service_hostname"),
hostname,
)
)
@classmethod
def searchtimeago(cls, delta, neg=False, new=True):
field = cls.tables.passive.firstseen if new else cls.tables.passive.lastseen
if not isinstance(delta, datetime.timedelta):
delta = datetime.timedelta(seconds=delta)
now = datetime.datetime.now()
timestamp = now - delta
return PassiveFilter(main=(field < timestamp if neg else field >= timestamp))
@classmethod
def searchnewer(cls, timestamp, neg=False, new=True):
field = cls.tables.passive.firstseen if new else cls.tables.passive.lastseen
timestamp = utils.all2datetime(timestamp)
return PassiveFilter(main=(field <= timestamp if neg else field > timestamp))
| p-l-/ivre | ivre/db/sql/__init__.py | Python | gpl-3.0 | 124,119 |
# Copyright 2004-2011 Pexego Sistemas Informáticos. (http://pexego.es)
# Copyright 2012 NaN·Tic (http://www.nan-tic.com)
# Copyright 2013 Acysos (http://www.acysos.com)
# Copyright 2013 Joaquín Pedrosa Gutierrez (http://gutierrezweb.es)
# Copyright 2016 Tecnativa - Antonio Espinosa
# Copyright 2016 Tecnativa - Angel Moya <[email protected]>
# Copyright 2014-2019 Tecnativa - Pedro M. Baeza
# Copyright 2018 PESOL - Angel Moya <[email protected]>
# Copyright 2019 Tecnativa - Carlos Dauden
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl).
from odoo import fields, models, api, exceptions, _
from datetime import datetime
from calendar import monthrange
import odoo.addons.decimal_precision as dp
KEY_TAX_MAPPING = {
'A': 'l10n_es_aeat_mod347.aeat_mod347_map_a',
'B': 'l10n_es_aeat_mod347.aeat_mod347_map_b',
}
class L10nEsAeatMod347Report(models.Model):
_inherit = "l10n.es.aeat.report"
_name = "l10n.es.aeat.mod347.report"
_description = "AEAT 347 Report"
_period_yearly = True
_period_quarterly = False
_period_monthly = False
_aeat_number = '347'
@api.depends('partner_record_ids',
'partner_record_ids.amount',
'partner_record_ids.cash_amount',
'partner_record_ids.real_estate_transmissions_amount')
def _compute_totals(self):
"""Calculates the total_* fields from the line values."""
for record in self:
record.total_partner_records = len(record.partner_record_ids)
record.total_amount = sum(
record.mapped('partner_record_ids.amount')
)
record.total_cash_amount = sum(
record.mapped('partner_record_ids.cash_amount')
)
record.total_real_estate_transmissions_amount = sum(
record.mapped(
'partner_record_ids.real_estate_transmissions_amount'
)
)
@api.depends('real_estate_record_ids',
'real_estate_record_ids.amount')
def _compute_totals_real_estate(self):
"""Calculates the total_* fields from the line values."""
for record in self:
record.total_real_estate_records = len(
record.real_estate_record_ids
)
record.total_real_estate_amount = sum(
record.mapped('real_estate_record_ids.amount')
)
number = fields.Char(default='347')
operations_limit = fields.Float(
string='Invoiced Limit (1)', digits=(13, 2), default=3005.06,
help="The declaration will include partners with the total of "
"operations over this limit")
received_cash_limit = fields.Float(
string='Received cash Limit (2)', digits=(13, 2), default=6000.00,
help="The declaration will show the total of cash operations over "
"this limit")
total_partner_records = fields.Integer(
compute="_compute_totals",
string="Partners records",
store=True,
)
total_amount = fields.Float(
compute="_compute_totals",
string="Amount",
store=True,
)
total_cash_amount = fields.Float(
compute="_compute_totals",
string="Cash Amount",
store=True,
)
total_real_estate_transmissions_amount = fields.Float(
compute="_compute_totals",
string="Real Estate Transmissions Amount",
store=True,
)
total_real_estate_records = fields.Integer(
compute="_compute_totals_real_estate",
string="Real estate records",
store=True,
)
total_real_estate_amount = fields.Float(
compute="_compute_totals_real_estate",
string="Real Estate Amount",
store=True,
)
partner_record_ids = fields.One2many(
comodel_name='l10n.es.aeat.mod347.partner_record',
inverse_name='report_id',
string='Partner Records',
)
real_estate_record_ids = fields.One2many(
comodel_name='l10n.es.aeat.mod347.real_estate_record',
inverse_name='report_id',
string='Real Estate Records',
)
def button_confirm(self):
"""Different check out in report"""
for item in self:
# Browse partner record lines to check if all are correct (all
# fields filled)
partner_errors = []
for partner_record in item.partner_record_ids:
if not partner_record.check_ok:
partner_errors.append(
_("- %s (%s)") %
(partner_record.partner_id.name,
partner_record.partner_id.id))
real_state_errors = []
for real_estate_record in item.real_estate_record_ids:
if not real_estate_record.check_ok:
real_state_errors.append(
_("- %s (%s)") %
(real_estate_record.partner_id.name,
real_estate_record.partner_id.id))
error = _("Please review partner and real estate records, "
"some of them are in red color:\n\n")
if partner_errors:
error += _("Partner record errors:\n")
error += '\n'.join(partner_errors)
error += '\n\n'
if real_state_errors:
error += _("Real estate record errors:\n")
error += '\n'.join(real_state_errors)
if partner_errors or real_state_errors:
raise exceptions.ValidationError(error)
return super(L10nEsAeatMod347Report, self).button_confirm()
def button_send_mails(self):
self.partner_record_ids.filtered(
lambda x: x.state == 'pending'
).send_email_direct()
def btn_list_records(self):
return {
'domain': "[('report_id','in'," + str(self.ids) + ")]",
'name': _("Partner records"),
'view_mode': 'tree,form',
'view_type': 'form',
'res_model': 'l10n.es.aeat.mod347.partner_record',
'type': 'ir.actions.act_window',
}
def _account_move_line_domain(self, taxes):
"""Return domain for searching move lines.
:param: taxes: Taxes to look for in move lines.
"""
return [
('partner_id.not_in_mod347', '=', False),
('move_id.not_in_mod347', '=', False),
('date', '>=', self.date_start),
('date', '<=', self.date_end),
'|',
('tax_ids', 'in', taxes.ids),
('tax_line_id', 'in', taxes.ids),
]
@api.model
def _get_taxes(self, map):
tax_obj = self.env['account.tax']
# Obtain all the taxes to be considered
tax_templates = map.mapped('tax_ids').mapped('description')
if not tax_templates:
raise exceptions.Warning(_('No Tax Mapping was found'))
# search the account.tax referred to by the template
taxes = tax_obj.search(
[('description', 'in', tax_templates),
('company_id', 'child_of', self.company_id.id)])
return taxes
@api.model
def _get_partner_347_identification(self, partner):
country_code, _, vat = partner._parse_aeat_vat_info()
if country_code == 'ES':
return {
'partner_vat': vat,
# Odoo Spanish states codes use car license plates approach
# (CR, A, M...), instead of ZIP (01, 02...), so we need to
# convert them, but fallbacking in existing one if not found.
'partner_state_code': self.SPANISH_STATES.get(
partner.state_id.code, partner.state_id.code),
'partner_country_code': country_code,
}
else:
return {
'community_vat': vat,
'partner_state_code': 99,
'partner_country_code': country_code,
}
def _create_partner_records(self, key, map_ref, partner_record=None):
partner_record_obj = self.env['l10n.es.aeat.mod347.partner_record']
partner_obj = self.env['res.partner']
map_line = self.env.ref(map_ref)
taxes = self._get_taxes(map_line)
domain = self._account_move_line_domain(taxes)
if partner_record:
domain += [('partner_id', '=', partner_record.partner_id.id)]
groups = self.env['account.move.line'].read_group(
domain,
['partner_id', 'balance'],
['partner_id'],
)
filtered_groups = list(filter(
lambda d: abs(d['balance']) > self.operations_limit, groups)
)
for group in filtered_groups:
partner = partner_obj.browse(group['partner_id'][0])
vals = {
'report_id': self.id,
'partner_id': partner.id,
'representative_vat': '',
'operation_key': key,
'amount': (-1 if key == 'B' else 1) * group['balance'],
}
vals.update(self._get_partner_347_identification(partner))
move_groups = self.env['account.move.line'].read_group(
group['__domain'],
['move_id', 'balance'],
['move_id'],
)
vals['move_record_ids'] = [
(0, 0, {
'move_id': move_group['move_id'][0],
'amount': abs(move_group['balance']),
}) for move_group in move_groups
]
if partner_record:
vals['move_record_ids'][0:0] = [
(2, x) for x in partner_record.move_record_ids.ids
]
partner_record.write(vals)
else:
partner_record_obj.create(vals)
def _create_cash_moves(self):
partner_obj = self.env['res.partner']
move_line_obj = self.env['account.move.line']
cash_journals = self.env['account.journal'].search(
[('type', '=', 'cash')],
)
if not cash_journals:
return
domain = [
('account_id.internal_type', '=', 'receivable'),
('journal_id', 'in', cash_journals.ids),
('date', '>=', self.date_start),
('date', '<=', self.date_end),
('partner_id.not_in_mod347', '=', False),
]
cash_groups = move_line_obj.read_group(
domain,
['partner_id', 'balance'],
['partner_id']
)
for cash_group in cash_groups:
partner = partner_obj.browse(cash_group['partner_id'][0])
partner_record_obj = self.env[
'l10n.es.aeat.mod347.partner_record']
amount = abs(cash_group['balance'])
if amount > self.received_cash_limit:
move_lines = move_line_obj.search(cash_group['__domain'])
partner_record = partner_record_obj.search([
('partner_id', '=', partner.id),
('operation_key', '=', 'B'),
('report_id', '=', self.id),
])
if partner_record:
partner_record.write({
'cash_record_ids': [(6, 0, move_lines.ids)],
'cash_amount': amount,
})
else:
vals = {
'report_id': self.id,
'partner_id': partner.id,
'representative_vat': '',
'operation_key': 'B',
'amount': 0,
'cash_amount': amount,
'cash_record_ids': [(6, 0, move_lines.ids)],
}
vals.update(self._get_partner_347_identification(partner))
partner_record_obj.create(vals)
def calculate(self):
for report in self:
# Delete previous partner records
report.partner_record_ids.unlink()
with self.env.norecompute():
self._create_partner_records('A', KEY_TAX_MAPPING['A'])
self._create_partner_records('B', KEY_TAX_MAPPING['B'])
self._create_cash_moves()
self.recompute()
report.partner_record_ids.calculate_quarter_totals()
return True
class L10nEsAeatMod347PartnerRecord(models.Model):
_name = 'l10n.es.aeat.mod347.partner_record'
_inherit = ['mail.thread', 'mail.activity.mixin', 'portal.mixin']
_description = 'Partner Record'
_rec_name = "partner_vat"
@api.model
def _default_record_id(self):
return self.env.context.get('report_id', False)
report_id = fields.Many2one(
comodel_name='l10n.es.aeat.mod347.report', string='AEAT 347 Report',
ondelete="cascade", default=_default_record_id,
)
user_id = fields.Many2one(
comodel_name='res.users',
string='Salesperson',
track_visibility='onchange',
default=lambda self: self.env.user,
copy=False)
state = fields.Selection(
selection=[
('pending', 'Pending'),
('sent', 'Sent'),
('confirmed', 'Confirmed'),
('exception', 'Exception'),
], default='pending',
string='State',
)
operation_key = fields.Selection(
selection=[
('A', u'A - Adquisiciones de bienes y servicios superiores al '
u'límite (1)'),
('B',
u'B - Entregas de bienes y servicios superiores al límite (1)'),
('C',
u'C - Cobros por cuenta de terceros superiores al límite (3)'),
('D', u'D - Adquisiciones efectuadas por Entidades Públicas '
u'(...) superiores al límite (1)'),
('E', u'E - Subvenciones, auxilios y ayudas satisfechas por Ad. '
u'Públicas superiores al límite (1)'),
('F', u'F - Ventas agencia viaje'),
('G', u'G - Compras agencia viaje'),
],
string='Operation Key',
)
partner_id = fields.Many2one(
comodel_name='res.partner', string='Partner', required=True)
partner_vat = fields.Char(string='VAT number', size=9)
representative_vat = fields.Char(
string='L.R. VAT number', size=9,
help="Legal Representative VAT number")
community_vat = fields.Char(
string='Community vat number', size=17,
help="VAT number for professionals established in other state "
"member without national VAT")
partner_country_code = fields.Char(string='Country Code', size=2)
partner_state_code = fields.Char(string='State Code', size=2)
first_quarter = fields.Float(
string="First quarter operations", digits=dp.get_precision('Account'),
help="Total amount of first quarter in, out and refund invoices "
"for this partner",
track_visibility='onchange',
)
first_quarter_real_estate_transmission = fields.Float(
string="First quarter real estate", digits=dp.get_precision('Account'),
help="Total amount of first quarter real estate transmissions "
"for this partner",
oldname='first_quarter_real_estate_transmission_amount',
)
second_quarter = fields.Float(
string="Second quarter operations", digits=dp.get_precision('Account'),
help="Total amount of second quarter in, out and refund invoices "
"for this partner",
track_visibility='onchange',
)
second_quarter_real_estate_transmission = fields.Float(
string="Second quarter real estate",
digits=dp.get_precision('Account'),
help="Total amount of second quarter real estate transmissions "
"for this partner",
oldname='second_quarter_real_estate_transmission_amount',
)
third_quarter = fields.Float(
string="Third quarter operations", digits=dp.get_precision('Account'),
help="Total amount of third quarter in, out and refund invoices "
"for this partner",
track_visibility='onchange',
)
third_quarter_real_estate_transmission = fields.Float(
string="Third quarter real estate", digits=dp.get_precision('Account'),
help="Total amount of third quarter real estate transmissions "
"for this partner",
oldname='third_quarter_real_estate_transmission_amount',
)
fourth_quarter = fields.Float(
string="Fourth quarter operations", digits=dp.get_precision('Account'),
help="Total amount of fourth quarter in, out and refund invoices "
"for this partner",
track_visibility='onchange',
)
fourth_quarter_real_estate_transmission = fields.Float(
string="Fourth quarter real estate",
digits=dp.get_precision('Account'),
help="Total amount of fourth quarter real estate transmissions "
"for this partner",
oldname='fourth_quarter_real_estate_transmission_amount',
)
amount = fields.Float(
string='Operations amount',
digits=(13, 2),
track_visibility='onchange',
)
cash_amount = fields.Float(string='Received cash amount', digits=(13, 2))
real_estate_transmissions_amount = fields.Float(
string='Real Estate Transmisions amount', digits=(13, 2),
)
insurance_operation = fields.Boolean(
string='Insurance Operation',
help="Only for insurance companies. Set to identify insurance "
"operations aside from the rest.",
)
cash_basis_operation = fields.Boolean(
string='Cash Basis Operation',
help="Only for cash basis operations. Set to identify cash basis "
"operations aside from the rest.",
)
tax_person_operation = fields.Boolean(
string='Taxable Person Operation',
help="Only for taxable person operations. Set to identify taxable "
"person operations aside from the rest.",
)
related_goods_operation = fields.Boolean(
string='Related Goods Operation',
help="Only for related goods operations. Set to identify related "
"goods operations aside from the rest.",
)
bussiness_real_estate_rent = fields.Boolean(
string='Bussiness Real Estate Rent',
help="Set to identify real estate rent operations aside from the rest."
" You'll need to fill in the real estate info only when you are "
"the one that receives the money.",
)
origin_year = fields.Integer(
string='Origin year', help="Origin cash operation year",
)
move_record_ids = fields.One2many(
comodel_name='l10n.es.aeat.mod347.move.record',
inverse_name='partner_record_id', string='Move records',
)
cash_record_ids = fields.Many2many(
comodel_name='account.move.line',
string='Cash payments',
readonly=True,
)
check_ok = fields.Boolean(
compute="_compute_check_ok", string='Record is OK',
store=True,
help='Checked if this record is OK',
)
@api.depends('partner_country_code', 'partner_state_code', 'partner_vat',
'community_vat')
def _compute_check_ok(self):
for record in self:
record.check_ok = (
record.partner_country_code and
record.partner_state_code and
record.partner_state_code.isdigit() and
(record.partner_vat or record.partner_country_code != 'ES')
)
@api.onchange('partner_id')
def _onchange_partner_id(self):
"""Loads some partner data when the selected partner changes."""
if self.partner_id:
self.update(
self.report_id._get_partner_347_identification(self.partner_id)
)
@api.depends('move_record_ids.move_id.date', 'report_id.year')
def calculate_quarter_totals(self):
def calc_amount_by_quarter(invoices, refunds, year, month_start):
day_start = 1
month_end = month_start + 2
day_end = monthrange(year, month_end)[1]
date_start = fields.Date.to_string(
datetime(year, month_start, day_start)
)
date_end = fields.Date.to_string(
datetime(year, month_end, day_end)
)
return (
sum(invoices.filtered(
lambda x: date_start <= x.move_id.date <= date_end
).mapped('amount')) - sum(refunds.filtered(
lambda x: date_start <= x.move_id.date <= date_end
).mapped('amount'))
)
for record in self:
year = record.report_id.year
invoices = record.move_record_ids.filtered(
lambda rec: rec.move_id.move_type in ('receivable', 'payable')
)
refunds = record.move_record_ids.filtered(
lambda rec: (
rec.move_id.move_type in (
'receivable_refund', 'payable_refund')
)
)
record.first_quarter = calc_amount_by_quarter(
invoices, refunds, year, 1,
)
record.second_quarter = calc_amount_by_quarter(
invoices, refunds, year, 4,
)
record.third_quarter = calc_amount_by_quarter(
invoices, refunds, year, 7,
)
record.fourth_quarter = calc_amount_by_quarter(
invoices, refunds, year, 10,
)
def action_exception(self):
self.write({'state': 'exception'})
def get_confirm_url(self):
self.ensure_one()
return self._notification_link_helper(
'controller', controller='/mod347/accept'
)
def get_reject_url(self):
self.ensure_one()
return self._notification_link_helper(
'controller', controller='/mod347/reject'
)
def action_confirm(self):
self.write({'state': 'confirmed'})
def action_send(self):
self.write({'state': 'sent'})
self.ensure_one()
template = self.env.ref('l10n_es_aeat_mod347.email_template_347')
compose_form = self.env.ref('mail.email_compose_message_wizard_form')
ctx = dict(
default_model=self._name,
default_res_id=self.id,
default_use_template=bool(template),
default_template_id=template and template.id or False,
default_composition_mode='comment',
mark_invoice_as_sent=True,
)
return {
'name': _('Compose Email'),
'type': 'ir.actions.act_window',
'view_type': 'form',
'view_mode': 'form',
'res_model': 'mail.compose.message',
'views': [(compose_form.id, 'form')],
'view_id': compose_form.id,
'target': 'new',
'context': ctx,
}
def button_print(self):
return self.env.ref(
'l10n_es_aeat_mod347.347_partner'
).report_action(self)
def button_recompute(self):
self.ensure_one()
if self.operation_key not in ('A', 'B'):
return
self.report_id._create_partner_records(
self.operation_key,
KEY_TAX_MAPPING[self.operation_key],
partner_record=self,
)
self.calculate_quarter_totals()
self.action_pending()
def send_email_direct(self):
template = self.env.ref('l10n_es_aeat_mod347.email_template_347')
for record in self:
template.send_mail(record.id)
self.write({'state': 'sent'})
def action_pending(self):
self.write({'state': 'pending'})
def message_get_suggested_recipients(self):
"""Add the invoicing partner to the suggested recipients sending an
email.
"""
recipients = super().message_get_suggested_recipients()
partner_obj = self.env['res.partner']
for record in self:
partner = partner_obj.browse(
record.partner_id.address_get(['invoice'])['invoice']
)
record._message_add_suggested_recipient(
recipients, partner=partner,
)
return recipients
class L10nEsAeatMod347RealStateRecord(models.Model):
_name = 'l10n.es.aeat.mod347.real_estate_record'
_description = 'Real Estate Record'
_rec_name = "reference"
@api.model
def _default_record_id(self):
return self.env.context.get('report_id', False)
@api.model
def _default_representative_vat(self):
return self.env.context.get('representative_vat', False)
report_id = fields.Many2one(
comodel_name='l10n.es.aeat.mod347.report', string='AEAT 347 Report',
ondelete="cascade", index=1, default=_default_record_id,
)
partner_id = fields.Many2one(
comodel_name='res.partner', string='Partner', required=True,
)
partner_vat = fields.Char(
string='VAT number', size=32,
)
representative_vat = fields.Char(
string='L.R. VAT number', size=32, default=_default_representative_vat,
help="Legal Representative VAT number")
amount = fields.Float(string='Amount', digits=(13, 2))
situation = fields.Selection(
selection=[('1', '1 - Spain but Basque Country and Navarra'),
('2', '2 - Basque Country and Navarra'),
('3', '3 - Spain, without catastral reference'),
('4', '4 - Foreign')],
string='Real estate Situation',
required=True,
)
reference = fields.Char(
string='Catastral Reference', size=25)
address_type = fields.Char(
string='Address type', size=5)
address = fields.Char(string='Address', size=50)
number_type = fields.Selection(
selection=[('NUM', 'Number'),
('KM.', 'Kilometer'),
('S/N', 'Without number')],
string='Number type')
number = fields.Integer(string='Number', size=5)
number_calification = fields.Selection(
selection=[('BIS', 'Bis'),
('MOD', 'Mod'),
('DUP', 'Dup'),
('ANT', 'Ant')],
string='Number calification')
block = fields.Char(string='Block', size=3)
portal = fields.Char(string='Portal', size=3)
stairway = fields.Char(string='Stairway', size=3)
floor = fields.Char(string='Floor', size=3)
door = fields.Char(string='Door', size=3)
complement = fields.Char(
string='Complement', size=40,
help="Complement (urbanization, industrial park...)")
city = fields.Char(string='City', size=30)
township = fields.Char(string='Township', size=30)
township_code = fields.Char(string='Township Code', size=5)
partner_state_code = fields.Char(
string='State Code', oldname='state_code', size=2)
postal_code = fields.Char(string='Postal code', size=5)
check_ok = fields.Boolean(
compute="_compute_check_ok", string='Record is OK',
store=True, help='Checked if this record is OK',
)
@api.depends('partner_state_code')
def _compute_check_ok(self):
for record in self:
record.check_ok = bool(record.partner_state_code)
@api.onchange('partner_id')
def _onchange_partner_id(self):
"""Loads some partner data when the selected partner changes."""
if self.partner_id:
vals = self.report_id._get_partner_347_identification(
self.partner_id,
)
vals.pop('community_vat', None)
del vals['partner_country_code']
self.update(vals)
class L10nEsAeatMod347MoveRecord(models.Model):
_name = 'l10n.es.aeat.mod347.move.record'
_description = 'Move Record'
@api.model
def _default_partner_record(self):
return self.env.context.get('partner_record_id', False)
partner_record_id = fields.Many2one(
comodel_name='l10n.es.aeat.mod347.partner_record',
string='Partner record', required=True, ondelete="cascade", index=True,
default=_default_partner_record,
)
move_id = fields.Many2one(
comodel_name='account.move',
string='Move',
ondelete="restrict",
)
move_type = fields.Selection(
related='move_id.move_type',
store=True,
readonly=True,
)
invoice_id = fields.Many2one(
comodel_name='account.invoice',
string='Invoice',
related="move_id.line_ids.invoice_id",
store=True,
readonly=True,
)
date = fields.Date(
string='Date',
related='move_id.date',
store=True,
readonly=True,
)
amount = fields.Float(
string='Amount',
readonly=True,
)
amount_signed = fields.Float(
string='Amount signed',
compute="_compute_amount_signed",
)
def _compute_amount_signed(self):
for record in self:
if 'refund' in record.move_id.move_type:
record.amount_signed = record.amount * -1
else:
record.amount_signed = record.amount
| factorlibre/l10n-spain | l10n_es_aeat_mod347/models/mod347.py | Python | agpl-3.0 | 29,423 |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import apache_beam as beam
from log_elements import LogElements
with beam.Pipeline() as p:
(p | beam.Create(range(1, 11))
| beam.combiners.Count.Globally()
| LogElements())
| lukecwik/incubator-beam | learning/katas/python/Common Transforms/Aggregation/Count/task.py | Python | apache-2.0 | 1,000 |
#! /usr/bin/env python
# -*- coding: utf-8 -*-
# Automatic provisioning of AWS KMS.
import boto
from boto import kms
import nixops.util
import nixops.resources
from nixops import kms_utils
class KmsKeyDefinition(nixops.resources.ResourceDefinition):
"""Definition of an AWS KMS encryption key."""
@classmethod
def get_type(cls):
return "AWS-kms-key"
def __init__(self, xml):
nixops.resources.ResourceDefinition.__init__(self, xml)
self.region = xml.find("attrs/attr[@name='region']/string").get("value")
self.access_key_id = xml.find("attrs/attr[@name='accessKeyId']/string").get("value")
self.policy = xml.find("attrs/attr[@name='policy']/string").get("value")
self.alias = xml.find("attrs/attr[@name='alias']/string").get("value")
if self.alias == '': self.alias = None
self.KeyUsage = xml.find("attrs/attr[@name='KeyUsage']/string").get("value")
self.enabled = xml.find("attrs/attr[@name='enabled']/bool").get("value")
self.description = xml.find("attrs/attr[@name='description']/string").get("value")
if self.description == '': self.description = None
self.grants = xml.find("attrs/attr[@name='grants']/list").get("value")
def show_type(self):
return "{0} [{1}]".format(self.get_type(), self.region)
class KmsKeyState(nixops.resources.ResourceState):
"""State of an AWS KMS encryption key."""
state = nixops.util.attr_property("state", nixops.resources.ResourceState.MISSING, int)
KeyId = nixops.util.attr_property("kms.KeyId", None)
region = nixops.util.attr_property("kms.region", None)
access_key_id = nixops.util.attr_property("kms.accessKeyId", None)
policy = nixops.util.attr_property("kms.policy", None)
alias = nixops.util.attr_property("kms.alias", None, str)
keyUsage = nixops.util.attr_property("kms.keyUsage", None)
enabled = nixops.util.attr_property("kms.enabled", None)
description = nixops.util.attr_property("kms.description", None, str)
grants = nixops.util.attr_property("kms.grants", None)
@classmethod
def get_type(cls):
return "AWS-kms-key"
def __init__(self, depl, name, id):
nixops.resources.ResourceState.__init__(self, depl, name, id)
self._conn = None
def _exists(self):
return self.state != self.MISSING
def show_type(self):
s = super(KmsKeyState, self).show_type()
if self._exists(): s = "{0} [{1}]".format(s, self.region)
return s
@property
def resource_id(self):
return self.KeyId
def get_definition_prefix(self):
return "resources.KmsKeys."
def connect(self):
if self._conn:
return self._conn
(access_key_id, secret_access_key) = kms_utils.fetch_aws_secret_key(self.access_key_id)
#return (access_key_id, secret_access_key)
self._conn = kms.layer1.KMSConnection(aws_access_key_id=access_key_id, aws_secret_access_key=secret_access_key)
return self._conn
def update_key_alias(self,alias_name,target_key_id):
try:
slef._conn.create_alias(alias_name, target_key_id)
except boto.kms.exceptions.NotFoundException as e:
self.logger.log('Setting Alias Failed : wrong key ID (key not found)')
except boto.exception.JSONResponseError as e :
if e.error_code == 'ValidationException' :
self.logger.log('Wrong Alias format, the alias should be like "alias/my_alias". Aliases like alias/aws/.... are reserved for AWS')
return True
def update_grants(self,key_id,grantee_principal,retiring_principal=None,operations=None,constraints=None,grant_tokens=None):
try:
self._conn.create_grant(key_id,grantee_principal,retiring_principal=None,operations=None,constraints=None,grant_tokens=None)
except boto.exception.JSONResponseError as e:
if e.error_code == 'ValidationException' :
self.logger.log('Setting Grant Failed : unrecognized grantee or unsupported operation')
return True
def create(self, defn, check, allow_reboot, allow_recreate):
self.access_key_id = defn.access_key_id or kms_utils.get_access_key_id()
if not self.access_key_id:
raise Exception("please set ‘accessKeyId’, $EC2_ACCESS_KEY or $AWS_ACCESS_KEY_ID")
self.connect()
if self._exists():
if self.region != defn.region :
raise Exception("changing the region of an AWS KMS key is not supported")
if self.policy != None and defn.policy != self.policy:
raise Exception("You're trying to change the policies of an existing KMS Key, keep it blank to avoid this")
if defn.alias != None and defn.alias != self.alias:
raise Exception("You're trying to change the alias of an existing KMS Key, keep it blank to avoid this")
if defn.grants != None and defn.grants != self.grants:
raise Exception("You're trying to change the grants of an existing KMS Key, keep it blank to avoid this")
if defn.description != None and defn.description != self.description:
raise Exception("You're trying to change the description of an existing KMS Key, keep it blank to avoid this")
if defn.keyUsage != None and defn.keyUsage != self.keyUsage:
raise Exception("You're trying to change the usage definition of an existing KMS Key, keep it blank to avoid this")
if defn.enabled != None and defn.enabled != self.enabled:
if self.enabled == True :
raise Exception("You're trying to disable an existing enabled KMS Key, keep it blank to avoid this")
if self.enabled == False :
raise Exception("You're trying to enable an existing disabled KMS Key, keep it blank to avoid this")
if self.state == self.MISSING:
if defn.policy != "": policy = defn.policy
else : policy = 'DEFAULT'
if defn.description != "": description = defn.description
else : description = ''
if defn.keyUsage != "": keyUsage = defn.keyUsage
else : keyUsage = 'ENCRYPT/DECRYPT'
if policy and description and keyUsage :
self.log("Creating new KMS Key for '{2}' usage with '{0}' policies and described by '{1}'...".format(policy, description, keyUsage))
new_key = self._conn.create_key(policy=defn.policy, description=defn.description, keyUsage=defn.keyUsage)
with self.depl._db:
self.state = self.STARTING
self.region = defn.region
self.KeyId = new_key['KeyId']
self.alias = defn.alias
self.keyUsage = defn.keyUsage
self.description = defn.description
self.enabled = defn.enabled
self.grants = defn.grants
self.policy = defn.policy
self.log("KMS KEY ID is ‘{0}’".format(new_key['KeyId']))
self.update_key_alias(alias_name=defn.alias,target_key_id=new_key['KeyId']) ############
self.update_grants(new_key['KeyId'],defn.grants['GranteePrincipal'],defn.grants['retiring_principal'],
defn.grants['operations'], defn.grants['constraints'], defn.grants['grant_tokens'])
if self.state == self.STARTING or check:
kms_utils.wait_for_key_available(self._conn, self.KeyId, self.logger, states=['Creating', 'Created'])
self.state = self.UP
def provide_key(param):
assert isinstance(param,str)
conn = self.connect()
if param != "" :
if param == "new" :
key = conn.create_key()
return key['KeyId']
else :
key = kms_utils.get_kms_key_by_id(conn, param)
return key['KeyId']
else :
key = kms_utils.get_keyId_by_alias(conn,"alias/aws/ebs")
return key
| Bsami/nixops | nixops/resources/kms_keys.py | Python | lgpl-3.0 | 8,104 |
# -*- coding: utf-8 -*-
# Copyright 2018 Joan Marín <Github@JoanMarin>
# Copyright 2018 Guillermo Montoya <Github@guillermm>
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl).
{
"name": "Partner VAT Colombia",
"summary": "Module for Type of Identification Document and Colombian NIT Checking.",
"version": "10.0.1.0.0",
"license": "AGPL-3",
"website": "https://github.com/odooloco/l10n-colombia",
"author": "EXA Auto Parts Github@exaap, "
"Joan Marín Github@JoanMarin, "
"Guillermo Montoya Github@guillermm",
"category": "Localization",
"depends": [
"base_vat"
],
"data": [
"security/ir.model.access.csv",
"data/res_partner_document_type_data.xml",
"views/res_partner_views.xml"
],
"installable": True,
}
| odoo-colombia/l10n-colombia | l10n_co_partner_vat/__manifest__.py | Python | agpl-3.0 | 829 |
from __future__ import absolute_import, print_function, division
import urwid
from mitmproxy.console import common, searchable
from netlib import human
def maybe_timestamp(base, attr):
if base is not None and getattr(base, attr):
return human.format_timestamp_with_milli(getattr(base, attr))
else:
return "active"
def flowdetails(state, flow):
text = []
cc = flow.client_conn
sc = flow.server_conn
req = flow.request
resp = flow.response
if sc is not None:
text.append(urwid.Text([("head", "Server Connection:")]))
parts = [
["Address", repr(sc.address)],
["Resolved Address", repr(sc.ip_address)],
]
text.extend(
common.format_keyvals(parts, key="key", val="text", indent=4)
)
c = sc.cert
if c:
text.append(urwid.Text([("head", "Server Certificate:")]))
parts = [
["Type", "%s, %s bits" % c.keyinfo],
["SHA1 digest", c.digest("sha1")],
["Valid to", str(c.notafter)],
["Valid from", str(c.notbefore)],
["Serial", str(c.serial)],
[
"Subject",
urwid.BoxAdapter(
urwid.ListBox(
common.format_keyvals(
c.subject,
key="highlight",
val="text"
)
),
len(c.subject)
)
],
[
"Issuer",
urwid.BoxAdapter(
urwid.ListBox(
common.format_keyvals(
c.issuer, key="highlight", val="text"
)
),
len(c.issuer)
)
]
]
if c.altnames:
parts.append(
[
"Alt names",
", ".join(str(x) for x in c.altnames)
]
)
text.extend(
common.format_keyvals(parts, key="key", val="text", indent=4)
)
if cc is not None:
text.append(urwid.Text([("head", "Client Connection:")]))
parts = [
["Address", repr(cc.address)],
]
text.extend(
common.format_keyvals(parts, key="key", val="text", indent=4)
)
parts = []
if cc is not None and cc.timestamp_start:
parts.append(
[
"Client conn. established",
maybe_timestamp(cc, "timestamp_start")
]
)
if cc.ssl_established:
parts.append(
[
"Client conn. TLS handshake",
maybe_timestamp(cc, "timestamp_ssl_setup")
]
)
if sc is not None and sc.timestamp_start:
parts.append(
[
"Server conn. initiated",
maybe_timestamp(sc, "timestamp_start")
]
)
parts.append(
[
"Server conn. TCP handshake",
maybe_timestamp(sc, "timestamp_tcp_setup")
]
)
if sc.ssl_established:
parts.append(
[
"Server conn. TLS handshake",
maybe_timestamp(sc, "timestamp_ssl_setup")
]
)
if req is not None and req.timestamp_start:
parts.append(
[
"First request byte",
maybe_timestamp(req, "timestamp_start")
]
)
parts.append(
[
"Request complete",
maybe_timestamp(req, "timestamp_end")
]
)
if resp is not None and resp.timestamp_start:
parts.append(
[
"First response byte",
maybe_timestamp(resp, "timestamp_start")
]
)
parts.append(
[
"Response complete",
maybe_timestamp(resp, "timestamp_end")
]
)
if parts:
# sort operations by timestamp
parts = sorted(parts, key=lambda p: p[1])
text.append(urwid.Text([("head", "Timing:")]))
text.extend(common.format_keyvals(parts, key="key", val="text", indent=4))
return searchable.Searchable(state, text)
| x2Ident/x2Ident_test | mitmproxy/mitmproxy/console/flowdetailview.py | Python | gpl-3.0 | 4,821 |
# -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'StandSession.message_slug'
db.add_column(u'standbase_standsession', 'message_slug',
self.gf('django.db.models.fields.CharField')(default='something', max_length=255, blank=True),
keep_default=False)
def backwards(self, orm):
# Deleting field 'StandSession.message_slug'
db.delete_column(u'standbase_standsession', 'message_slug')
models = {
u'standbase.standsession': {
'Meta': {'object_name': 'StandSession'},
'datechanged': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'datecreated': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'datefinished': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'datelive': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'geocode': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'lat': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'lon': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'message': ('django.db.models.fields.CharField', [], {'default': "'something'", 'max_length': '255', 'blank': 'True'}),
'message_slug': ('django.db.models.fields.CharField', [], {'default': "'something'", 'max_length': '255', 'blank': 'True'}),
'secret': ('django.db.models.fields.CharField', [], {'default': "'4ujvhpa4on9ombjfgdf4z0x7lwaq62sr3025zuc4me66t8jt2g'", 'max_length': '255'}),
'vendorid': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'venueid': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'})
}
}
complete_apps = ['standbase'] | whatsthehubbub/stand-django | standbase/migrations/0005_auto__add_field_standsession_message_slug.py | Python | mit | 2,249 |
"""
Description:
Task:
This kata requires you to write an object that receives a file path and does operations on it. NOTE FOR PYTHON USERS: You cannot use modules os.path, glob, and re
The purpose of this kata is to use string parsing, so you're not supposed to import external libraries. I could only enforce this in python.
Testing:
Python:
>>> master = FileMaster('/Users/person1/Pictures/house.png')
>>> master.extension()
'png'
>>> master.filename()
'house'
>>> master.dirpath()
'/Users/person1/Pictures/'
"""
class FileMaster():
def __init__(self, filepath):
filepath = filepath.split('.')
self._extension = filepath.pop()
filepath = filepath[0].split('/')
self._filename = filepath.pop()
self._dirpath = '/'.join(filepath) + '/'
def extension(self):
return self._extension
def filename(self):
return self._filename
def dirpath(self):
return self._dirpath
| bgarnaat/codewars_katas | src/python/6kyu/file_path_operations/file_path_ops.py | Python | mit | 951 |
from __future__ import print_function, division
from sympy.core.containers import Tuple
from sympy.core.core import C
from sympy.core.expr import Expr
from sympy.core.mul import Mul
from sympy.core.singleton import S
from sympy.core.sympify import sympify
from sympy.concrete.expr_with_intlimits import ExprWithIntLimits
from sympy.functions.elementary.piecewise import piecewise_fold
from sympy.functions.elementary.exponential import exp, log
from sympy.polys import quo, roots
from sympy.simplify import powsimp
from sympy.core.compatibility import xrange
class Product(ExprWithIntLimits):
r"""Represents unevaluated products.
``Product`` represents a finite or infinite product, with the first
argument being the general form of terms in the series, and the second
argument being ``(dummy_variable, start, end)``, with ``dummy_variable``
taking all integer values from ``start`` through ``end``. In accordance
with long-standing mathematical convention, the end term is included in
the product.
Finite products
===============
For finite products (and products with symbolic limits assumed to be finite)
we follow the analogue of the summation convention described by Karr [1],
especially definition 3 of section 1.4. The product:
.. math::
\prod_{m \leq i < n} f(i)
has *the obvious meaning* for `m < n`, namely:
.. math::
\prod_{m \leq i < n} f(i) = f(m) f(m+1) \cdot \ldots \cdot f(n-2) f(n-1)
with the upper limit value `f(n)` excluded. The product over an empty set is
one if and only if `m = n`:
.. math::
\prod_{m \leq i < n} f(i) = 1 \quad \mathrm{for} \quad m = n
Finally, for all other products over empty sets we assume the following
definition:
.. math::
\prod_{m \leq i < n} f(i) = \frac{1}{\prod_{n \leq i < m} f(i)} \quad \mathrm{for} \quad m > n
It is important to note that above we define all products with the upper
limit being exclusive. This is in contrast to the usual mathematical notation,
but does not affect the product convention. Indeed we have:
.. math::
\prod_{m \leq i < n} f(i) = \prod_{i = m}^{n - 1} f(i)
where the difference in notation is intentional to emphasize the meaning,
with limits typeset on the top being inclusive.
Examples
========
>>> from sympy.abc import a, b, i, k, m, n, x
>>> from sympy import Product, factorial, oo
>>> Product(k,(k,1,m))
Product(k, (k, 1, m))
>>> Product(k,(k,1,m)).doit()
factorial(m)
>>> Product(k**2,(k,1,m))
Product(k**2, (k, 1, m))
>>> Product(k**2,(k,1,m)).doit()
(factorial(m))**2
Wallis' product for pi:
>>> W = Product(2*i/(2*i-1) * 2*i/(2*i+1), (i, 1, oo))
>>> W
Product(4*i**2/((2*i - 1)*(2*i + 1)), (i, 1, oo))
Direct computation currently fails:
>>> W.doit()
Product(4*i**2/((2*i - 1)*(2*i + 1)), (i, 1, oo))
But we can approach the infinite product by a limit of finite products:
>>> from sympy import limit
>>> W2 = Product(2*i/(2*i-1)*2*i/(2*i+1), (i, 1, n))
>>> W2
Product(4*i**2/((2*i - 1)*(2*i + 1)), (i, 1, n))
>>> W2e = W2.doit()
>>> W2e
2**(-2*n)*4**n*(factorial(n))**2/(RisingFactorial(1/2, n)*RisingFactorial(3/2, n))
>>> limit(W2e, n, oo)
pi/2
By the same formula we can compute sin(pi/2):
>>> from sympy import pi, gamma, simplify
>>> P = pi * x * Product(1 - x**2/k**2,(k,1,n))
>>> P = P.subs(x, pi/2)
>>> P
pi**2*Product(1 - pi**2/(4*k**2), (k, 1, n))/2
>>> Pe = P.doit()
>>> Pe
pi**2*RisingFactorial(1 + pi/2, n)*RisingFactorial(-pi/2 + 1, n)/(2*(factorial(n))**2)
>>> Pe = Pe.rewrite(gamma)
>>> Pe
pi**2*gamma(n + 1 + pi/2)*gamma(n - pi/2 + 1)/(2*gamma(1 + pi/2)*gamma(-pi/2 + 1)*gamma(n + 1)**2)
>>> Pe = simplify(Pe)
>>> Pe
sin(pi**2/2)*gamma(n + 1 + pi/2)*gamma(n - pi/2 + 1)/gamma(n + 1)**2
>>> limit(Pe, n, oo)
sin(pi**2/2)
Products with the lower limit being larger than the upper one:
>>> Product(1/i, (i, 6, 1)).doit()
120
>>> Product(i, (i, 2, 5)).doit()
120
The empty product:
>>> Product(i, (i, n, n-1)).doit()
1
An example showing that the symbolic result of a product is still
valid for seemingly nonsensical values of the limits. Then the Karr
convention allows us to give a perfectly valid interpretation to
those products by interchanging the limits according to the above rules:
>>> P = Product(2, (i, 10, n)).doit()
>>> P
2**(n - 9)
>>> P.subs(n, 5)
1/16
>>> Product(2, (i, 10, 5)).doit()
1/16
>>> 1/Product(2, (i, 6, 9)).doit()
1/16
An explicit example of the Karr summation convention applied to products:
>>> P1 = Product(x, (i, a, b)).doit()
>>> P1
x**(-a + b + 1)
>>> P2 = Product(x, (i, b+1, a-1)).doit()
>>> P2
x**(a - b - 1)
>>> simplify(P1 * P2)
1
And another one:
>>> P1 = Product(i, (i, b, a)).doit()
>>> P1
RisingFactorial(b, a - b + 1)
>>> P2 = Product(i, (i, a+1, b-1)).doit()
>>> P2
RisingFactorial(a + 1, -a + b - 1)
>>> P1 * P2
RisingFactorial(b, a - b + 1)*RisingFactorial(a + 1, -a + b - 1)
>>> simplify(P1 * P2)
1
See Also
========
Sum, summation
product
References
==========
.. [1] Michael Karr, "Summation in Finite Terms", Journal of the ACM,
Volume 28 Issue 2, April 1981, Pages 305-350
http://dl.acm.org/citation.cfm?doid=322248.322255
.. [2] http://en.wikipedia.org/wiki/Multiplication#Capital_Pi_notation
.. [3] http://en.wikipedia.org/wiki/Empty_product
"""
__slots__ = ['is_commutative']
def __new__(cls, function, *symbols, **assumptions):
obj = ExprWithIntLimits.__new__(cls, function, *symbols, **assumptions)
return obj
def _eval_rewrite_as_Sum(self, *args):
from sympy.concrete.summations import Sum
return exp(Sum(log(self.function), *self.limits))
@property
def term(self):
return self._args[0]
function = term
def _eval_is_zero(self):
# a Product is zero only if its term is zero.
return self.term.is_zero
def doit(self, **hints):
f = self.function
for index, limit in enumerate(self.limits):
i, a, b = limit
dif = b - a
if dif.is_Integer and dif < 0:
a, b = b + 1, a - 1
f = 1 / f
g = self._eval_product(f, (i, a, b))
if g in (None, S.NaN):
return self.func(powsimp(f), *self.limits[index:])
else:
f = g
if hints.get('deep', True):
return f.doit(**hints)
else:
return powsimp(f)
def _eval_adjoint(self):
if self.is_commutative:
return self.func(self.function.adjoint(), *self.limits)
return None
def _eval_conjugate(self):
return self.func(self.function.conjugate(), *self.limits)
def _eval_product(self, term, limits):
from sympy.concrete.delta import deltaproduct, _has_simple_delta
from sympy.concrete.summations import summation
from sympy.functions import KroneckerDelta
(k, a, n) = limits
if k not in term.free_symbols:
if (term - 1).is_zero:
return S.One
return term**(n - a + 1)
if a == n:
return term.subs(k, a)
if term.has(KroneckerDelta) and _has_simple_delta(term, limits[0]):
return deltaproduct(term, limits)
dif = n - a
if dif.is_Integer:
return Mul(*[term.subs(k, a + i) for i in xrange(dif + 1)])
elif term.is_polynomial(k):
poly = term.as_poly(k)
A = B = Q = S.One
all_roots = roots(poly)
M = 0
for r, m in all_roots.items():
M += m
A *= C.RisingFactorial(a - r, n - a + 1)**m
Q *= (n - r)**m
if M < poly.degree():
arg = quo(poly, Q.as_poly(k))
B = self.func(arg, (k, a, n)).doit()
return poly.LC()**(n - a + 1) * A * B
elif term.is_Add:
p, q = term.as_numer_denom()
p = self._eval_product(p, (k, a, n))
q = self._eval_product(q, (k, a, n))
return p / q
elif term.is_Mul:
exclude, include = [], []
for t in term.args:
p = self._eval_product(t, (k, a, n))
if p is not None:
exclude.append(p)
else:
include.append(t)
if not exclude:
return None
else:
arg = term._new_rawargs(*include)
A = Mul(*exclude)
B = self.func(arg, (k, a, n)).doit()
return A * B
elif term.is_Pow:
if not term.base.has(k):
s = summation(term.exp, (k, a, n))
return term.base**s
elif not term.exp.has(k):
p = self._eval_product(term.base, (k, a, n))
if p is not None:
return p**term.exp
elif isinstance(term, Product):
evaluated = term.doit()
f = self._eval_product(evaluated, limits)
if f is None:
return self.func(evaluated, limits)
else:
return f
def _eval_simplify(self, ratio, measure):
from sympy.simplify.simplify import product_simplify
return product_simplify(self)
def _eval_transpose(self):
if self.is_commutative:
return self.func(self.function.transpose(), *self.limits)
return None
def reverse_order(expr, *indices):
"""
Reverse the order of a limit in a Product.
Usage
=====
``reverse_order(expr, *indices)`` reverses some limits in the expression
``expr`` which can be either a ``Sum`` or a ``Product``. The selectors in
the argument ``indices`` specify some indices whose limits get reversed.
These selectors are either variable names or numerical indices counted
starting from the inner-most limit tuple.
Examples
========
>>> from sympy import Product, simplify, RisingFactorial, gamma, Sum
>>> from sympy.abc import x, y, a, b, c, d
>>> P = Product(x, (x, a, b))
>>> Pr = P.reverse_order(x)
>>> Pr
Product(1/x, (x, b + 1, a - 1))
>>> Pr = Pr.doit()
>>> Pr
1/RisingFactorial(b + 1, a - b - 1)
>>> simplify(Pr)
gamma(b + 1)/gamma(a)
>>> P = P.doit()
>>> P
RisingFactorial(a, -a + b + 1)
>>> simplify(P)
gamma(b + 1)/gamma(a)
While one should prefer variable names when specifying which limits
to reverse, the index counting notation comes in handy in case there
are several symbols with the same name.
>>> S = Sum(x*y, (x, a, b), (y, c, d))
>>> S
Sum(x*y, (x, a, b), (y, c, d))
>>> S0 = S.reverse_order( 0)
>>> S0
Sum(-x*y, (x, b + 1, a - 1), (y, c, d))
>>> S1 = S0.reverse_order( 1)
>>> S1
Sum(x*y, (x, b + 1, a - 1), (y, d + 1, c - 1))
Of course we can mix both notations:
>>> Sum(x*y, (x, a, b), (y, 2, 5)).reverse_order( x, 1)
Sum(x*y, (x, b + 1, a - 1), (y, 6, 1))
>>> Sum(x*y, (x, a, b), (y, 2, 5)).reverse_order( y, x)
Sum(x*y, (x, b + 1, a - 1), (y, 6, 1))
See Also
========
index, reorder_limit, reorder
References
==========
.. [1] Michael Karr, "Summation in Finite Terms", Journal of the ACM,
Volume 28 Issue 2, April 1981, Pages 305-350
http://dl.acm.org/citation.cfm?doid=322248.322255
"""
l_indices = list(indices)
for i, indx in enumerate(l_indices):
if not isinstance(indx, int):
l_indices[i] = expr.index(indx)
e = 1
limits = []
for i, limit in enumerate(expr.limits):
l = limit
if i in l_indices:
e = -e
l = (limit[0], limit[2] + 1 , limit[1] - 1)
limits.append(l)
return Product(expr.function ** e, *limits)
def product(*args, **kwargs):
r"""
Compute the product.
The notation for symbols is similiar to the notation used in Sum or
Integral. product(f, (i, a, b)) computes the product of f with
respect to i from a to b, i.e.,
::
b
_____
product(f(n), (i, a, b)) = | | f(n)
| |
i = a
If it cannot compute the product, it returns an unevaluated Product object.
Repeated products can be computed by introducing additional symbols tuples::
>>> from sympy import product, symbols
>>> i, n, m, k = symbols('i n m k', integer=True)
>>> product(i, (i, 1, k))
factorial(k)
>>> product(m, (i, 1, k))
m**k
>>> product(i, (i, 1, k), (k, 1, n))
Product(factorial(k), (k, 1, n))
"""
prod = Product(*args, **kwargs)
if isinstance(prod, Product):
return prod.doit(deep=False)
else:
return prod
| beni55/sympy | sympy/concrete/products.py | Python | bsd-3-clause | 13,558 |
# -*- coding: utf-8 -*-
"""
#-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=#
Copyright (C) 2016 Alejandro F. Carrera <[email protected]>
This file is part of Librairy. <https://github.com/librairy>
Licensed under Apache License. See LICENSE for more info.
#-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=#
"""
| librairy/web | librairy_src/__init__.py | Python | apache-2.0 | 379 |
#!/usr/bin/env python3
"""Maximum values sampled from various controllers.
--Jacques Gagnon <[email protected]>
"""
from collections import namedtuple
Point = namedtuple('Point', 'x y')
Origin = namedtuple('Origin', 'up right down left')
Diagonal = namedtuple('Diagonal', 'up_right down_right down_left up_left')
Maximum = namedtuple('Maximum', 'origin diagonal')
CTRL_DATA = {
# Data sampled directly from N64 controller.
"N64": [
# 1996 NUS-005 Launch Controller (Solid Grey)
# This controller joystick is quite beaten up as you can imagine.
# Ambiguous bit sent to it (2 us low, 2 us high) are randomly interpreted as either 0 or 1.
#[
# Maximum(
# Origin(Point(0, 0x47), Point(0x42, 0), Point(0, -0x50), Point(-0x47, 0)),
# Diagonal(Point(0x38, 0x39), Point(0x3c, -0x3f), Point(-0x38, -0x45), Point(-0x41, 0x40))
# ),
#],
# ~2002 NUS-005 Plastic Packaging Controller (Grape Purple)
# Joystick is in excellent condition.
# Older controllers will report controller slot status 0x00 (Empty) at power up.
# This one report right away 0x02 (Emptied). Cube64 mimic this behavior when disabling
# rumble emulation at boot up.
# Ambiguous bit sent to it (2 us low, 2 us high) are always interpreted as 1.
[
Maximum(
Origin(Point(0, 0x57), Point(0x52, 0), Point(0, -0x55), Point(-0x52, 0)),
Diagonal(Point(0x47, 0x48), Point(0x48, -0x45), Point(-0x43, -0x46), Point(-0x48, 0x49))
),
],
# 2003 iQue Controller
# Brand-new
# Internally a regular N64 controller. No slot externally.
#[
# Maximum(
# Origin(Point(0, 0x53), Point(0x54, 0), Point(0, -0x57), Point(-0x58, 0)),
# Diagonal(Point(0x45, 0x42), Point(0x44, -0x44), Point(-0x4a, -0x4a), Point(-0x4a, 0x41))
# ),
#],
# Horipad Mini 64
# Brand-new
# Joystick do not respect regular controller range.
# The bit period is 6 us!
#[
# Maximum(
# Origin(Point(0, 0x57), Point(0x70, 0), Point(0, -0x66), Point(-0x5e, 0)),
# Diagonal(Point(0x56, 0x46), Point(0x59, -0x4e), Point(-0x3f, -0x51), Point(-0x41, 0x45))
# ),
#],
],
# Data sampled through the Cube64 adaptor without any scaling.
# Deadzone and sign was applied.
"GC": [
# DOL-003 Controller (Silver)
# Joystick is in excellent condition.
[
Maximum(
Origin(Point(0, 0x5b), Point(0x5a, 0), Point(0, -0x5b), Point(-0x5b, 0)),
Diagonal(Point(0x45, 0x43), Point(0x44, -0x41), Point(-0x42, -0x3c), Point(-0x40, 0x42))
),
Maximum(
Origin(Point(0, 0x53), Point(0x54, 0), Point(0, -0x55), Point(-0x53, 0)),
Diagonal(Point(0x40, 0x3a), Point(0x3e, -0x3e), Point(-0x3d, -0x37), Point(-0x39, 0x3b))
),
],
# DOL-003 Controller SSBB Wii (White)
# Brand-new.
[
Maximum(
Origin(Point(0, 0x5a), Point(0x59, 0), Point(0, -0x57), Point(-0x53, 0)),
Diagonal(Point(0x42, 0x41), Point(0x41, -0x46), Point(-0x3a, -0x43), Point(-0x3a, 0x41))
),
Maximum(
Origin(Point(0, 0x4e), Point(0x51, 0), Point(0, -0x5c), Point(-0x50, 0)),
Diagonal(Point(0x3b, 0x35), Point(0x3a, -0x44), Point(-0x38, -0x3e), Point(-0x35, 0x36))
),
],
# DOL-003 Controller SSBB Wii (White)
# Brand-new.
[
Maximum(
Origin(Point(0, 0x59), Point(0x55, 0), Point(0, -0x5e), Point(-0x59, 0)),
Diagonal(Point(0x3c, 0x42), Point(0x3c, -0x46), Point(-0x3f, -0x42), Point(-0x3f, 0x40))
),
Maximum(
Origin(Point(0, 0x51), Point(0x4c, 0), Point(0, -0x5e), Point(-0x55, 0)),
Diagonal(Point(0x35, 0x39), Point(0x35, -0x41), Point(-0x3d, -0x3d), Point(-0x3b, 0x39))
),
],
# DOL-003 Controller SSB WiiU (White)
# Brand-new.
[
Maximum(
Origin(Point(0, 0x5d), Point(0x5a, 0), Point(0, -0x5a), Point(-0x59, 0)),
Diagonal(Point(0x41, 0x43), Point(0x41, -0x3f), Point(-0x41, -0x3c), Point(-0x40, 0x41))
),
Maximum(
Origin(Point(0, 0x50), Point(0x52, 0), Point(0, -0x5b), Point(-0x57, 0)),
Diagonal(Point(0x3c, 0x37), Point(0x3b, -0x43), Point(-0x41, -0x3c), Point(-0x3e, 0x37))
),
],
# WaveBird Controller (Silver)
# Joystick is in excellent condition.
[
Maximum(
Origin(Point(0, 0x5f), Point(0x58, 0), Point(0, -0x5d), Point(-0x59, 0)),
Diagonal(Point(0x3e, 0x43), Point(0x3f, -0x42), Point(-0x3e, -0x40), Point(-0x40, 0x43))
),
Maximum(
Origin(Point(0, 0x57), Point(0x52, 0), Point(0, -0x4e), Point(-0x4d, 0)),
Diagonal(Point(0x38, 0x3d), Point(0x38, -0x35), Point(-0x35, -0x36), Point(-0x35, 0x3a))
),
],
# WaveBird Controller (Silver)
# Joystick is in excellent condition.
[
Maximum(
Origin(Point(0, 0x5e), Point(0x57, 0), Point(0, -0x56), Point(-0x5a, 0)),
Diagonal(Point(0x3c, 0x44), Point(0x3d, -0x3c), Point(-0x4d, -0x3c), Point(-0x43, 0x45))
),
Maximum(
Origin(Point(0, 0x52), Point(0x4a, 0), Point(0, -0x4e), Point(-0x56, 0)),
Diagonal(Point(0x32, 0x38), Point(0x33, -0x36), Point(-0x3f, -0x35), Point(-0x3c, 0x39))
),
],
# WaveBird Controller (Grey)
# Joystick is in excellent condition.
[
Maximum(
Origin(Point(0, 0x64), Point(0x5b, 0), Point(0, -0x59), Point(-0x60, 0)),
Diagonal(Point(0x40, 0x49), Point(0x42, -0x3e), Point(-0x46, -0x3c), Point(-0x4a, 0x4c))
),
Maximum(
Origin(Point(0, 0x4f), Point(0x4f, 0), Point(0, -0x56), Point(-0x57, 0)),
Diagonal(Point(0x35, 0x34), Point(0x37, -0x3a), Point(-0x3f, -0x38), Point(-0x3e, 0x37))
),
],
# Sammy Keyboard Controller
# Brand-new.
#[
# Maximum(
# Origin(Point(0, 0x5d), Point(0x5c, 0), Point(0, -0x5b), Point(-0x5e, 0)),
# Diagonal(Point(0x43, 0x41), Point(0x41, -0x3e), Point(-0x42, -0x3d), Point(-0x47, 0x43))
# ),
# Maximum(
# Origin(Point(0, 0x4e), Point(0x49, 0), Point(0, -0x50), Point(-0x5a, 0)),
# Diagonal(Point(0x33, 0x39), Point(0x34, -0x35), Point(-0x3c, -0x37), Point(-0x40, 0x34))
# ),
#],
# GC DS5 3/4 Speed Prototype Controller
# Joystick is in excellent condition.
# The bit period is 5.3 us (3/4 of regular frequency)
#[
# Maximum(
# Origin(Point(0, 0x55), Point(0x55, 0), Point(0, -0x56), Point(-0x58, 0)),
# Diagonal(Point(0x38, 0x38), Point(0x3a, -0x3a), Point(-0x41, -0x42), Point(-0x3b, 0x3a))
# ),
# Maximum(
# Origin(Point(0, 0x51), Point(0x5a, 0), Point(0, -0x6f), Point(-0x5d, 0)),
# Diagonal(Point(0x3b, 0x35), Point(0x42, -0x50), Point(-0x43, -0x58), Point(-0x3f, 0x32))
# ),
#],
]
}
### The End ###
| darthcloud/cube64-dx | notes/controller_data.py | Python | gpl-2.0 | 7,661 |
"""add biobank version
Revision ID: e44e303ae759
Revises: 0e92151ebd4a
Create Date: 2018-08-20 10:08:20.904521
"""
import sqlalchemy as sa
from alembic import op
# revision identifiers, used by Alembic.
revision = "e44e303ae759"
down_revision = "0e92151ebd4a"
branch_labels = None
depends_on = None
def upgrade(engine_name):
globals()["upgrade_%s" % engine_name]()
def downgrade(engine_name):
globals()["downgrade_%s" % engine_name]()
def upgrade_rdr():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column("biobank_order", sa.Column("version", sa.Integer(), nullable=False))
# ### end Alembic commands ###
def downgrade_rdr():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_column("biobank_order", "version")
# ### end Alembic commands ###
def upgrade_metrics():
# ### commands auto generated by Alembic - please adjust! ###
pass
# ### end Alembic commands ###
def downgrade_metrics():
# ### commands auto generated by Alembic - please adjust! ###
pass
# ### end Alembic commands ###
| all-of-us/raw-data-repository | rdr_service/alembic/versions/e44e303ae759_add_biobank_version.py | Python | bsd-3-clause | 1,102 |
import mock
from unittest2 import TestCase
from marimo.middleware import MarimoEventContainer, Marimo, context_processor
class TestMiddleware(TestCase):
def setUp(self):
self.middleware = Marimo()
def tearDown(self):
del self.middleware
def test_process_request_marimo_widgets_added(self):
req = mock.Mock()
self.middleware.process_request(req)
self.assertTrue(isinstance(req.marimo_widgets, list))
def test_process_request_marimo_writecapture_delay_added(self):
req = mock.Mock()
self.middleware.process_request(req)
self.assertTrue(isinstance(req.marimo_writecapture_delay,
MarimoEventContainer))
def test_process_response_marimo_widgets_added(self):
req = mock.Mock()
req.marimo_writecapture_delay = MarimoEventContainer()
req.marimo_widgets = ['dummywidget']
resp = mock.Mock()
resp.content = "dummytext ${MARIMO} moredumbtext"
self.middleware.process_response(req, resp)
self.assertTrue("dummywidget" in resp.content)
def test_process_response_marimo_writecapture_delay_added(self):
req = mock.Mock()
req.marimo_widgets = []
req.marimo_writecapture_delay = MarimoEventContainer("documentready")
resp = mock.Mock()
resp.content = "dummytext ${MARIMO} moredumbtext"
self.middleware.process_response(req, resp)
self.assertTrue("documentready" in resp.content)
class TestContextProcessor(TestCase):
def setUp(self):
self.request = mock.Mock()
self.request.marimo_widgets = ['dummywidget']
self.request.marimo_writecapture_delay = MarimoEventContainer()
def tearDown(self):
del self.request
def test_marimo_widgets_added_to_context(self):
extra_context = context_processor(self.request)
self.assertEqual(extra_context["marimo_widgets"],
self.request.marimo_widgets)
def test_marimo_writecapture_delay_added_to_context(self):
extra_context = context_processor(self.request)
self.assertEqual(extra_context["marimo_writecapture_delay"],
self.request.marimo_writecapture_delay)
| brandonivey/django-marimo | marimo/tests/test_middleware.py | Python | mit | 2,252 |
"""Base class for Acmeda Roller Blinds."""
import aiopulse
from homeassistant.core import callback
from homeassistant.helpers import entity
from homeassistant.helpers.device_registry import async_get_registry as get_dev_reg
from homeassistant.helpers.dispatcher import async_dispatcher_connect
from homeassistant.helpers.entity_registry import async_get_registry as get_ent_reg
from .const import ACMEDA_ENTITY_REMOVE, DOMAIN, LOGGER
class AcmedaBase(entity.Entity):
"""Base representation of an Acmeda roller."""
def __init__(self, roller: aiopulse.Roller):
"""Initialize the roller."""
self.roller = roller
async def async_remove_and_unregister(self):
"""Unregister from entity and device registry and call entity remove function."""
LOGGER.error("Removing %s %s", self.__class__.__name__, self.unique_id)
ent_registry = await get_ent_reg(self.hass)
if self.entity_id in ent_registry.entities:
ent_registry.async_remove(self.entity_id)
dev_registry = await get_dev_reg(self.hass)
device = dev_registry.async_get_device(
identifiers={(DOMAIN, self.unique_id)}, connections=set()
)
if device is not None:
dev_registry.async_update_device(
device.id, remove_config_entry_id=self.registry_entry.config_entry_id
)
await self.async_remove()
async def async_added_to_hass(self):
"""Entity has been added to hass."""
self.roller.callback_subscribe(self.notify_update)
self.async_on_remove(
async_dispatcher_connect(
self.hass,
ACMEDA_ENTITY_REMOVE.format(self.roller.id),
self.async_remove_and_unregister,
)
)
async def async_will_remove_from_hass(self):
"""Entity being removed from hass."""
self.roller.callback_unsubscribe(self.notify_update)
@callback
def notify_update(self):
"""Write updated device state information."""
LOGGER.debug("Device update notification received: %s", self.name)
self.async_write_ha_state()
@property
def should_poll(self):
"""Report that Acmeda entities do not need polling."""
return False
@property
def unique_id(self):
"""Return the unique ID of this roller."""
return self.roller.id
@property
def device_id(self):
"""Return the ID of this roller."""
return self.roller.id
@property
def name(self):
"""Return the name of roller."""
return self.roller.name
@property
def device_info(self):
"""Return the device info."""
return {
"identifiers": {(DOMAIN, self.unique_id)},
"name": self.roller.name,
"manufacturer": "Rollease Acmeda",
"via_device": (DOMAIN, self.roller.hub.id),
}
| tchellomello/home-assistant | homeassistant/components/acmeda/base.py | Python | apache-2.0 | 2,925 |
from ..broker import Broker
class IprgBroker(Broker):
controller = "iprgs"
def show(self, **kwargs):
"""Shows the details for the specified iprg.
**Inputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` True
| ``default:`` None
:param IprgID: The internal NetMRI identifier for this HSRP/VRRP Group.
:type IprgID: Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param methods: A list of iprg methods. The listed methods will be called on each iprg returned and included in the output. Available methods are: active_member, data_source.
:type methods: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param include: A list of associated object types to include in the output. The listed associations will be returned as outputs named according to the association name (see outputs below). Available includes are: active_member, data_source.
:type include: Array of String
**Outputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return iprg: The iprg identified by the specified IprgID.
:rtype iprg: Iprg
"""
return self.api_request(self._get_method_fullname("show"), kwargs)
def index(self, **kwargs):
"""Lists the available iprgs. Any of the inputs listed may be be used to narrow the list; other inputs will be ignored. Of the various ways to query lists, using this method is most efficient.
**Inputs**
| ``api version min:`` 2.3
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param IprgID: The internal NetMRI identifier for this HSRP/VRRP Group.
:type IprgID: Integer
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param IprgID: The internal NetMRI identifier for this HSRP/VRRP Group.
:type IprgID: Array of Integer
| ``api version min:`` 2.3
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param IprgIPNumeric: The numerical value of the HSRP/VRRP virtual IP address.
:type IprgIPNumeric: Integer
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param IprgIPNumeric: The numerical value of the HSRP/VRRP virtual IP address.
:type IprgIPNumeric: Array of Integer
| ``api version min:`` 2.3
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param IprgNumber: The HSRP or VRRP group number.
:type IprgNumber: Integer
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param IprgNumber: The HSRP or VRRP group number.
:type IprgNumber: Array of Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param DeviceGroupID: The internal NetMRI identifier of the device groups to which to limit the results.
:type DeviceGroupID: Array of Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param timestamp: The data returned will represent the iprgs as of this date and time. If omitted, the result will indicate the most recently collected data.
:type timestamp: DateTime
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param methods: A list of iprg methods. The listed methods will be called on each iprg returned and included in the output. Available methods are: active_member, data_source.
:type methods: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param include: A list of associated object types to include in the output. The listed associations will be returned as outputs named according to the association name (see outputs below). Available includes are: active_member, data_source.
:type include: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` 0
:param start: The record number to return in the selected page of data. It will always appear, although it may not be the first record. See the :limit for more information.
:type start: Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` 1000
:param limit: The size of the page of data, that is, the maximum number of records returned. The limit size will be used to break the data up into pages and the first page with the start record will be returned. So if you have 100 records and use a :limit of 10 and a :start of 10, you will get records 10-19. The maximum limit is 10000.
:type limit: Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` IprgID
:param sort: The data field(s) to use for sorting the output. Default is IprgID. Valid values are IprgID, DataSourceID, IprgStartTime, IprgEndTime, IprgTimestamp, IprgChangedCols, ActiveIprgMemberID, IprgNumber, IprgIPDotted, IprgIPNumeric, IprgMAC, IprgAuth, IprgType, IprgActiveLastChanged.
:type sort: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` asc
:param dir: The direction(s) in which to sort the data. Default is 'asc'. Valid values are 'asc' and 'desc'.
:type dir: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param select: The list of attributes to return for each Iprg. Valid values are IprgID, DataSourceID, IprgStartTime, IprgEndTime, IprgTimestamp, IprgChangedCols, ActiveIprgMemberID, IprgNumber, IprgIPDotted, IprgIPNumeric, IprgMAC, IprgAuth, IprgType, IprgActiveLastChanged. If empty or omitted, all attributes will be returned.
:type select: Array
| ``api version min:`` 2.8
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param goto_field: The field name for NIOS GOTO that is used for locating a row position of records.
:type goto_field: String
| ``api version min:`` 2.8
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param goto_value: The value of goto_field for NIOS GOTO that is used for locating a row position of records.
:type goto_value: String
**Outputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return iprgs: An array of the Iprg objects that match the specified input criteria.
:rtype iprgs: Array of Iprg
"""
return self.api_list_request(self._get_method_fullname("index"), kwargs)
def search(self, **kwargs):
"""Lists the available iprgs matching the input criteria. This method provides a more flexible search interface than the index method, but searching using this method is more demanding on the system and will not perform to the same level as the index method. The input fields listed below will be used as in the index method, to filter the result, along with the optional query string and XML filter described below.
**Inputs**
| ``api version min:`` 2.3
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param ActiveIprgMemberID: The internal NetMRI identifier for the HSRP/VRRP group membership details of the active router.
:type ActiveIprgMemberID: Integer
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param ActiveIprgMemberID: The internal NetMRI identifier for the HSRP/VRRP group membership details of the active router.
:type ActiveIprgMemberID: Array of Integer
| ``api version min:`` 2.3
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param DataSourceID: The internal NetMRI identifier for the collector NetMRI that collected this data record.
:type DataSourceID: Integer
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param DataSourceID: The internal NetMRI identifier for the collector NetMRI that collected this data record.
:type DataSourceID: Array of Integer
| ``api version min:`` 2.3
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param IprgActiveLastChanged: The date and time of the last change of the active or master router for this group.
:type IprgActiveLastChanged: DateTime
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param IprgActiveLastChanged: The date and time of the last change of the active or master router for this group.
:type IprgActiveLastChanged: Array of DateTime
| ``api version min:`` 2.3
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param IprgAuth: The authentication method for this HSRP or VRRP group.
:type IprgAuth: String
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param IprgAuth: The authentication method for this HSRP or VRRP group.
:type IprgAuth: Array of String
| ``api version min:`` 2.3
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param IprgChangedCols: The fields that changed between this revision of the record and the previous revision.
:type IprgChangedCols: String
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param IprgChangedCols: The fields that changed between this revision of the record and the previous revision.
:type IprgChangedCols: Array of String
| ``api version min:`` 2.3
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param IprgEndTime: The ending effective time of this revision of this record, or empty if still in effect.
:type IprgEndTime: DateTime
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param IprgEndTime: The ending effective time of this revision of this record, or empty if still in effect.
:type IprgEndTime: Array of DateTime
| ``api version min:`` 2.3
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param IprgID: The internal NetMRI identifier for this HSRP/VRRP Group.
:type IprgID: Integer
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param IprgID: The internal NetMRI identifier for this HSRP/VRRP Group.
:type IprgID: Array of Integer
| ``api version min:`` 2.3
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param IprgIPDotted: The virtual IP address for this HSRP/VRRP group, in dotted (or colon-delimited for IPv6) format.
:type IprgIPDotted: String
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param IprgIPDotted: The virtual IP address for this HSRP/VRRP group, in dotted (or colon-delimited for IPv6) format.
:type IprgIPDotted: Array of String
| ``api version min:`` 2.3
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param IprgIPNumeric: The numerical value of the HSRP/VRRP virtual IP address.
:type IprgIPNumeric: Integer
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param IprgIPNumeric: The numerical value of the HSRP/VRRP virtual IP address.
:type IprgIPNumeric: Array of Integer
| ``api version min:`` 2.3
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param IprgMAC: The virtual MAC for this HSRP or VRRP group.
:type IprgMAC: String
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param IprgMAC: The virtual MAC for this HSRP or VRRP group.
:type IprgMAC: Array of String
| ``api version min:`` 2.3
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param IprgNumber: The HSRP or VRRP group number.
:type IprgNumber: Integer
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param IprgNumber: The HSRP or VRRP group number.
:type IprgNumber: Array of Integer
| ``api version min:`` 2.3
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param IprgStartTime: The starting effective time of this revision of the record.
:type IprgStartTime: DateTime
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param IprgStartTime: The starting effective time of this revision of the record.
:type IprgStartTime: Array of DateTime
| ``api version min:`` 2.3
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param IprgTimestamp: The date and time this record was collected or calculated.
:type IprgTimestamp: DateTime
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param IprgTimestamp: The date and time this record was collected or calculated.
:type IprgTimestamp: Array of DateTime
| ``api version min:`` 2.3
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param IprgType: Designates if this is an HSRP group or a VRRP group.
:type IprgType: String
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param IprgType: Designates if this is an HSRP group or a VRRP group.
:type IprgType: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param DeviceGroupID: The internal NetMRI identifier of the device groups to which to limit the results.
:type DeviceGroupID: Array of Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param timestamp: The data returned will represent the iprgs as of this date and time. If omitted, the result will indicate the most recently collected data.
:type timestamp: DateTime
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param methods: A list of iprg methods. The listed methods will be called on each iprg returned and included in the output. Available methods are: active_member, data_source.
:type methods: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param include: A list of associated object types to include in the output. The listed associations will be returned as outputs named according to the association name (see outputs below). Available includes are: active_member, data_source.
:type include: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` 0
:param start: The record number to return in the selected page of data. It will always appear, although it may not be the first record. See the :limit for more information.
:type start: Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` 1000
:param limit: The size of the page of data, that is, the maximum number of records returned. The limit size will be used to break the data up into pages and the first page with the start record will be returned. So if you have 100 records and use a :limit of 10 and a :start of 10, you will get records 10-19. The maximum limit is 10000.
:type limit: Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` IprgID
:param sort: The data field(s) to use for sorting the output. Default is IprgID. Valid values are IprgID, DataSourceID, IprgStartTime, IprgEndTime, IprgTimestamp, IprgChangedCols, ActiveIprgMemberID, IprgNumber, IprgIPDotted, IprgIPNumeric, IprgMAC, IprgAuth, IprgType, IprgActiveLastChanged.
:type sort: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` asc
:param dir: The direction(s) in which to sort the data. Default is 'asc'. Valid values are 'asc' and 'desc'.
:type dir: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param select: The list of attributes to return for each Iprg. Valid values are IprgID, DataSourceID, IprgStartTime, IprgEndTime, IprgTimestamp, IprgChangedCols, ActiveIprgMemberID, IprgNumber, IprgIPDotted, IprgIPNumeric, IprgMAC, IprgAuth, IprgType, IprgActiveLastChanged. If empty or omitted, all attributes will be returned.
:type select: Array
| ``api version min:`` 2.8
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param goto_field: The field name for NIOS GOTO that is used for locating a row position of records.
:type goto_field: String
| ``api version min:`` 2.8
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param goto_value: The value of goto_field for NIOS GOTO that is used for locating a row position of records.
:type goto_value: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param query: This value will be matched against iprgs, looking to see if one or more of the listed attributes contain the passed value. You may also surround the value with '/' and '/' to perform a regular expression search rather than a containment operation. Any record that matches will be returned. The attributes searched are: ActiveIprgMemberID, DataSourceID, IprgActiveLastChanged, IprgAuth, IprgChangedCols, IprgEndTime, IprgID, IprgIPDotted, IprgIPNumeric, IprgMAC, IprgNumber, IprgStartTime, IprgTimestamp, IprgType.
:type query: String
| ``api version min:`` 2.3
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param xml_filter: A SetFilter XML structure to further refine the search. The SetFilter will be applied AFTER any search query or field values, but before any limit options. The limit and pagination will be enforced after the filter. Remind that this kind of filter may be costly and inefficient if not associated with a database filtering.
:type xml_filter: String
**Outputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return iprgs: An array of the Iprg objects that match the specified input criteria.
:rtype iprgs: Array of Iprg
"""
return self.api_list_request(self._get_method_fullname("search"), kwargs)
def find(self, **kwargs):
"""Lists the available iprgs matching the input specification. This provides the most flexible search specification of all the query mechanisms, enabling searching using comparison operations other than equality. However, it is more complex to use and will not perform as efficiently as the index or search methods. In the input descriptions below, 'field names' refers to the following fields: ActiveIprgMemberID, DataSourceID, IprgActiveLastChanged, IprgAuth, IprgChangedCols, IprgEndTime, IprgID, IprgIPDotted, IprgIPNumeric, IprgMAC, IprgNumber, IprgStartTime, IprgTimestamp, IprgType.
**Inputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_ActiveIprgMemberID: The operator to apply to the field ActiveIprgMemberID. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. ActiveIprgMemberID: The internal NetMRI identifier for the HSRP/VRRP group membership details of the active router. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_ActiveIprgMemberID: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_ActiveIprgMemberID: If op_ActiveIprgMemberID is specified, the field named in this input will be compared to the value in ActiveIprgMemberID using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_ActiveIprgMemberID must be specified if op_ActiveIprgMemberID is specified.
:type val_f_ActiveIprgMemberID: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_ActiveIprgMemberID: If op_ActiveIprgMemberID is specified, this value will be compared to the value in ActiveIprgMemberID using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_ActiveIprgMemberID must be specified if op_ActiveIprgMemberID is specified.
:type val_c_ActiveIprgMemberID: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_DataSourceID: The operator to apply to the field DataSourceID. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. DataSourceID: The internal NetMRI identifier for the collector NetMRI that collected this data record. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_DataSourceID: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_DataSourceID: If op_DataSourceID is specified, the field named in this input will be compared to the value in DataSourceID using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_DataSourceID must be specified if op_DataSourceID is specified.
:type val_f_DataSourceID: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_DataSourceID: If op_DataSourceID is specified, this value will be compared to the value in DataSourceID using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_DataSourceID must be specified if op_DataSourceID is specified.
:type val_c_DataSourceID: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_IprgActiveLastChanged: The operator to apply to the field IprgActiveLastChanged. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. IprgActiveLastChanged: The date and time of the last change of the active or master router for this group. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_IprgActiveLastChanged: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_IprgActiveLastChanged: If op_IprgActiveLastChanged is specified, the field named in this input will be compared to the value in IprgActiveLastChanged using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_IprgActiveLastChanged must be specified if op_IprgActiveLastChanged is specified.
:type val_f_IprgActiveLastChanged: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_IprgActiveLastChanged: If op_IprgActiveLastChanged is specified, this value will be compared to the value in IprgActiveLastChanged using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_IprgActiveLastChanged must be specified if op_IprgActiveLastChanged is specified.
:type val_c_IprgActiveLastChanged: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_IprgAuth: The operator to apply to the field IprgAuth. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. IprgAuth: The authentication method for this HSRP or VRRP group. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_IprgAuth: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_IprgAuth: If op_IprgAuth is specified, the field named in this input will be compared to the value in IprgAuth using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_IprgAuth must be specified if op_IprgAuth is specified.
:type val_f_IprgAuth: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_IprgAuth: If op_IprgAuth is specified, this value will be compared to the value in IprgAuth using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_IprgAuth must be specified if op_IprgAuth is specified.
:type val_c_IprgAuth: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_IprgChangedCols: The operator to apply to the field IprgChangedCols. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. IprgChangedCols: The fields that changed between this revision of the record and the previous revision. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_IprgChangedCols: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_IprgChangedCols: If op_IprgChangedCols is specified, the field named in this input will be compared to the value in IprgChangedCols using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_IprgChangedCols must be specified if op_IprgChangedCols is specified.
:type val_f_IprgChangedCols: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_IprgChangedCols: If op_IprgChangedCols is specified, this value will be compared to the value in IprgChangedCols using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_IprgChangedCols must be specified if op_IprgChangedCols is specified.
:type val_c_IprgChangedCols: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_IprgEndTime: The operator to apply to the field IprgEndTime. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. IprgEndTime: The ending effective time of this revision of this record, or empty if still in effect. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_IprgEndTime: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_IprgEndTime: If op_IprgEndTime is specified, the field named in this input will be compared to the value in IprgEndTime using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_IprgEndTime must be specified if op_IprgEndTime is specified.
:type val_f_IprgEndTime: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_IprgEndTime: If op_IprgEndTime is specified, this value will be compared to the value in IprgEndTime using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_IprgEndTime must be specified if op_IprgEndTime is specified.
:type val_c_IprgEndTime: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_IprgID: The operator to apply to the field IprgID. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. IprgID: The internal NetMRI identifier for this HSRP/VRRP Group. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_IprgID: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_IprgID: If op_IprgID is specified, the field named in this input will be compared to the value in IprgID using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_IprgID must be specified if op_IprgID is specified.
:type val_f_IprgID: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_IprgID: If op_IprgID is specified, this value will be compared to the value in IprgID using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_IprgID must be specified if op_IprgID is specified.
:type val_c_IprgID: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_IprgIPDotted: The operator to apply to the field IprgIPDotted. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. IprgIPDotted: The virtual IP address for this HSRP/VRRP group, in dotted (or colon-delimited for IPv6) format. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_IprgIPDotted: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_IprgIPDotted: If op_IprgIPDotted is specified, the field named in this input will be compared to the value in IprgIPDotted using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_IprgIPDotted must be specified if op_IprgIPDotted is specified.
:type val_f_IprgIPDotted: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_IprgIPDotted: If op_IprgIPDotted is specified, this value will be compared to the value in IprgIPDotted using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_IprgIPDotted must be specified if op_IprgIPDotted is specified.
:type val_c_IprgIPDotted: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_IprgIPNumeric: The operator to apply to the field IprgIPNumeric. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. IprgIPNumeric: The numerical value of the HSRP/VRRP virtual IP address. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_IprgIPNumeric: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_IprgIPNumeric: If op_IprgIPNumeric is specified, the field named in this input will be compared to the value in IprgIPNumeric using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_IprgIPNumeric must be specified if op_IprgIPNumeric is specified.
:type val_f_IprgIPNumeric: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_IprgIPNumeric: If op_IprgIPNumeric is specified, this value will be compared to the value in IprgIPNumeric using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_IprgIPNumeric must be specified if op_IprgIPNumeric is specified.
:type val_c_IprgIPNumeric: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_IprgMAC: The operator to apply to the field IprgMAC. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. IprgMAC: The virtual MAC for this HSRP or VRRP group. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_IprgMAC: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_IprgMAC: If op_IprgMAC is specified, the field named in this input will be compared to the value in IprgMAC using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_IprgMAC must be specified if op_IprgMAC is specified.
:type val_f_IprgMAC: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_IprgMAC: If op_IprgMAC is specified, this value will be compared to the value in IprgMAC using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_IprgMAC must be specified if op_IprgMAC is specified.
:type val_c_IprgMAC: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_IprgNumber: The operator to apply to the field IprgNumber. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. IprgNumber: The HSRP or VRRP group number. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_IprgNumber: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_IprgNumber: If op_IprgNumber is specified, the field named in this input will be compared to the value in IprgNumber using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_IprgNumber must be specified if op_IprgNumber is specified.
:type val_f_IprgNumber: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_IprgNumber: If op_IprgNumber is specified, this value will be compared to the value in IprgNumber using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_IprgNumber must be specified if op_IprgNumber is specified.
:type val_c_IprgNumber: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_IprgStartTime: The operator to apply to the field IprgStartTime. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. IprgStartTime: The starting effective time of this revision of the record. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_IprgStartTime: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_IprgStartTime: If op_IprgStartTime is specified, the field named in this input will be compared to the value in IprgStartTime using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_IprgStartTime must be specified if op_IprgStartTime is specified.
:type val_f_IprgStartTime: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_IprgStartTime: If op_IprgStartTime is specified, this value will be compared to the value in IprgStartTime using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_IprgStartTime must be specified if op_IprgStartTime is specified.
:type val_c_IprgStartTime: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_IprgTimestamp: The operator to apply to the field IprgTimestamp. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. IprgTimestamp: The date and time this record was collected or calculated. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_IprgTimestamp: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_IprgTimestamp: If op_IprgTimestamp is specified, the field named in this input will be compared to the value in IprgTimestamp using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_IprgTimestamp must be specified if op_IprgTimestamp is specified.
:type val_f_IprgTimestamp: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_IprgTimestamp: If op_IprgTimestamp is specified, this value will be compared to the value in IprgTimestamp using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_IprgTimestamp must be specified if op_IprgTimestamp is specified.
:type val_c_IprgTimestamp: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_IprgType: The operator to apply to the field IprgType. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. IprgType: Designates if this is an HSRP group or a VRRP group. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_IprgType: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_IprgType: If op_IprgType is specified, the field named in this input will be compared to the value in IprgType using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_IprgType must be specified if op_IprgType is specified.
:type val_f_IprgType: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_IprgType: If op_IprgType is specified, this value will be compared to the value in IprgType using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_IprgType must be specified if op_IprgType is specified.
:type val_c_IprgType: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param DeviceGroupID: The internal NetMRI identifier of the device groups to which to limit the results.
:type DeviceGroupID: Array of Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param timestamp: The data returned will represent the iprgs as of this date and time. If omitted, the result will indicate the most recently collected data.
:type timestamp: DateTime
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param methods: A list of iprg methods. The listed methods will be called on each iprg returned and included in the output. Available methods are: active_member, data_source.
:type methods: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param include: A list of associated object types to include in the output. The listed associations will be returned as outputs named according to the association name (see outputs below). Available includes are: active_member, data_source.
:type include: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` 0
:param start: The record number to return in the selected page of data. It will always appear, although it may not be the first record. See the :limit for more information.
:type start: Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` 1000
:param limit: The size of the page of data, that is, the maximum number of records returned. The limit size will be used to break the data up into pages and the first page with the start record will be returned. So if you have 100 records and use a :limit of 10 and a :start of 10, you will get records 10-19. The maximum limit is 10000.
:type limit: Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` IprgID
:param sort: The data field(s) to use for sorting the output. Default is IprgID. Valid values are IprgID, DataSourceID, IprgStartTime, IprgEndTime, IprgTimestamp, IprgChangedCols, ActiveIprgMemberID, IprgNumber, IprgIPDotted, IprgIPNumeric, IprgMAC, IprgAuth, IprgType, IprgActiveLastChanged.
:type sort: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` asc
:param dir: The direction(s) in which to sort the data. Default is 'asc'. Valid values are 'asc' and 'desc'.
:type dir: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param select: The list of attributes to return for each Iprg. Valid values are IprgID, DataSourceID, IprgStartTime, IprgEndTime, IprgTimestamp, IprgChangedCols, ActiveIprgMemberID, IprgNumber, IprgIPDotted, IprgIPNumeric, IprgMAC, IprgAuth, IprgType, IprgActiveLastChanged. If empty or omitted, all attributes will be returned.
:type select: Array
| ``api version min:`` 2.8
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param goto_field: The field name for NIOS GOTO that is used for locating a row position of records.
:type goto_field: String
| ``api version min:`` 2.8
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param goto_value: The value of goto_field for NIOS GOTO that is used for locating a row position of records.
:type goto_value: String
| ``api version min:`` 2.3
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param xml_filter: A SetFilter XML structure to further refine the search. The SetFilter will be applied AFTER any search query or field values, but before any limit options. The limit and pagination will be enforced after the filter. Remind that this kind of filter may be costly and inefficient if not associated with a database filtering.
:type xml_filter: String
**Outputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return iprgs: An array of the Iprg objects that match the specified input criteria.
:rtype iprgs: Array of Iprg
"""
return self.api_list_request(self._get_method_fullname("find"), kwargs)
def data_source(self, **kwargs):
"""The collector NetMRI that collected this data record.
**Inputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` True
| ``default:`` None
:param IprgID: The internal NetMRI identifier for this HSRP/VRRP Group.
:type IprgID: Integer
**Outputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return : The collector NetMRI that collected this data record.
:rtype : DataSource
"""
return self.api_request(self._get_method_fullname("data_source"), kwargs)
def active_member(self, **kwargs):
"""The HSRP/VRRP group membership details of the active router.
**Inputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` True
| ``default:`` None
:param IprgID: The internal NetMRI identifier for this HSRP/VRRP Group.
:type IprgID: Integer
**Outputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return : The HSRP/VRRP group membership details of the active router.
:rtype : IprgMember
"""
return self.api_request(self._get_method_fullname("active_member"), kwargs)
| infobloxopen/infoblox-netmri | infoblox_netmri/api/broker/v3_8_0/iprg_broker.py | Python | apache-2.0 | 54,667 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from models import User, Blog
from transwarp import db
db.create_engine(user='www-data', password='www-data', database='awesome')
#===============================================================================
# # 创建用户
# user = User(name='Test', email='[email protected]', password='1234567890', image='about:blank')
# user.insert()
# print 'new user id:', user.id
#
# # 创建BLOG
# blog = Blog(user_id = user.id, user_name = user.name, user_image = user.image, name = 'Test Blog', summary = 'A wolf had been badly wounded by dogs. He lay sick and maimed in his lair.')
# blog.insert()
# print 'new blog id:', blog.id, 'summary:', blog.summary
#===============================================================================
user = User.find_first('where email=?', '[email protected]')
print 'find user\'s name:', user.name
blog2 = Blog(user_id = user.id, user_name = user.name, user_image = user.image, name = 'Learn python', summary = 'A wolf had been badly wounded by dogs. He lay sick and maimed in his lair.He felt very hungry and thirsty. When a sheep passed by')
blog2.insert()
#===============================================================================
# u = User(name='Test', email='[email protected]', password='1234567890', image='about:blank')
#
# u.insert()
#
# print 'new user id:', u.id
#
#
# print 'find user\'s name:', u1.name
#
# u1.delete()
#
# u2 = User.find_first('where email=?', '[email protected]')
# print 'find user:', u2
#===============================================================================
| rushlee2016/awesome-python-webapp | www/test_db.py | Python | gpl-2.0 | 1,605 |
# Copyright 2012 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import uuid
from tempest.api.compute import base
from tempest.common.utils import data_utils
from tempest import config
from tempest import exceptions
from tempest import test
CONF = config.CONF
class FloatingIPDetailsNegativeTestJSON(base.BaseV2ComputeTest):
@classmethod
def resource_setup(cls):
super(FloatingIPDetailsNegativeTestJSON, cls).resource_setup()
cls.client = cls.floating_ips_client
@test.attr(type=['negative', 'gate'])
@test.services('network')
def test_get_nonexistent_floating_ip_details(self):
# Negative test:Should not be able to GET the details
# of non-existent floating IP
# Creating a non-existent floatingIP id
if CONF.service_available.neutron:
non_exist_id = str(uuid.uuid4())
else:
non_exist_id = data_utils.rand_int_id(start=999)
self.assertRaises(exceptions.NotFound,
self.client.get_floating_ip_details, non_exist_id)
class FloatingIPDetailsNegativeTestXML(FloatingIPDetailsNegativeTestJSON):
_interface = 'xml'
| queria/my-tempest | tempest/api/compute/floating_ips/test_list_floating_ips_negative.py | Python | apache-2.0 | 1,727 |
from dallinger.nodes import Source
import random
import base64
import os
import json
class DrawingSource(Source):
"""A Source that reads in a random image from a file and transmits it."""
__mapper_args__ = {
"polymorphic_identity": "drawing_source"
}
def _contents(self):
"""Define the contents of new Infos.
transmit() -> _what() -> create_information() -> _contents().
"""
images = [
"owl.png",
]
image = random.choice(images)
image_path = os.path.join("static", "stimuli", image)
uri_encoded_image = (
"data:image/png;base64," +
base64.b64encode(open(image_path, "rb").read())
)
return json.dumps({
"image": uri_encoded_image,
"sketch": ""
})
| jcpeterson/Dallinger | demos/dlgr/demos/iterated_drawing/models.py | Python | mit | 826 |
from components.component import Component
from components.messages import QueryType
from managers.echo import EchoService
from stats.enums import StatsEnum
from util.decorators import cached, invalidate_cache
class Equipment(Component):
NAME = "equipment"
"""
This component attaches itself to anything with a bodies.
It represents equipment worn or wielded
"""
def __init__(self):
super().__init__()
self.host_body = None
self.worn_equipment_map = {}
self.wielded_equipment_map = {}
def copy(self):
# TODO Copying an equipment to another type of body would require some sort of validation.
# TODO Removing or dropping invalid mappings.
new_equipment = Equipment()
new_equipment.host_body = self.host_body
new_equipment.worn_equipment_map = self.__copy_all_items(self.worn_equipment_map)
new_equipment.wielded_equipment_map = self.__copy_all_items(self.wielded_equipment_map)
return new_equipment
@staticmethod
def __copy_all_items(collection):
collection_copy = collection.copy()
for index, item_list in enumerate(collection):
collection_copy[index].clear()
for item in item_list:
collection_copy[index].append(item.copy())
return collection_copy
def on_register(self, host):
super().on_register(host)
self.host_body = host.body
host.register_query_responder(self, QueryType.RemoveObject, self.remove_item)
@invalidate_cache
def remove_item(self, item):
success = False
for item_list in self.worn_equipment_map.values():
if item in item_list:
item_list.remove(item)
success = True
for key in self.wielded_equipment_map.copy().keys():
if self.wielded_equipment_map[key] == item:
del self.wielded_equipment_map[key]
success = True
return success
@invalidate_cache
def wear(self, item):
# Wearing requires the bodypart to be compatible with the item
if not self.host_body:
self.host_body = self.host.body
if item.armor:
armor = item.armor
if item.size == self.host.stats.size:
for compatible_bodypart_uid in armor.wearable_body_parts_uid:
host_body_parts = self.host_body.get_body_parts(compatible_bodypart_uid)
for host_body_part in host_body_parts:
if host_body_part:
if host_body_part in self.worn_equipment_map:
if armor.worn_layer not in [item.armor.worn_layer for item in
self.worn_equipment_map[host_body_part]]:
self.worn_equipment_map[host_body_part].append(item)
return True
else:
self.worn_equipment_map[host_body_part] = [item]
return True
return False
@invalidate_cache
def wield(self, item):
if not self.host_body:
self.host_body = self.host.body
# Wielding requires bodyparts with GRASP
grasp_able_body_parts = sorted(
[free_body_part for free_body_part in
self.host_body.get_grasp_able_body_parts()
if free_body_part not in self.wielded_equipment_map],
key=lambda x: x.relative_size, reverse=True
)
# Wielding with one hand gets priority
wielding_body_parts = []
total_size_held = 0
while grasp_able_body_parts:
free_body_part = grasp_able_body_parts.pop(0)
wielding_body_parts.append(free_body_part)
item_size = item.size.value
# 10 is the normal relative_size for a hand
relative_size_modifier = free_body_part.relative_size - 10
relative_size_modifier = round(relative_size_modifier / 10) if relative_size_modifier else 0
relative_size = self.host.stats.size.value + relative_size_modifier
total_size_held += relative_size
if total_size_held >= item_size:
if item.weapon.two_handed and len(wielding_body_parts) >= 2 or not item.weapon.two_handed:
for body_part in wielding_body_parts:
self.wielded_equipment_map[body_part] = item
return True
return False
@cached
def get_worn_items(self):
return [item for item_list in self.worn_equipment_map.values() for item in item_list]
@cached
def get_load_of_worn_items(self):
worn_items = self.get_worn_items()
total_weight = 0.0
for item in worn_items:
item_weight = item.stats.get_current_value(StatsEnum.Weight)
material_modifier = item.material.weight
total_weight += item_weight * material_modifier
return total_weight
@cached
def get_wielded_items(self):
return [item for item in self.wielded_equipment_map.values()]
| ChrisLR/Python-Roguelike-Template | components/equipment.py | Python | mit | 5,225 |
###############################################################################
#
# Copyright (C) 2001-2014 Micronaet SRL (<http://www.micronaet.it>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
###############################################################################
{
'name': 'XMLRPC Operation Invoice',
'version': '0.1',
'category': 'ETL',
'description': '''
XMLRPC Import invoice
''',
'author': 'Micronaet S.r.l. - Nicola Riolini',
'website': 'http://www.micronaet.it',
'license': 'AGPL-3',
'depends': [
'base',
'xmlrpc_base',
'account',
],
'init_xml': [],
'demo': [],
'data': [
'security/xml_groups.xml',
#'operation_view.xml',
'invoice_view.xml',
'data/operation.xml',
],
'active': False,
'installable': True,
'auto_install': False,
}
| Micronaet/micronaet-xmlrpc | xmlrpc_operation_invoice/__openerp__.py | Python | agpl-3.0 | 1,553 |
from rest_framework import serializers
from customers import choices as customers_choices
from customers.models import Customer
from readings.models import Reading, Condition
class ReadingListSerializer(serializers.ModelSerializer):
class Meta:
model = Reading
fields = (
'reading',
'daterecorded',
'latitude',
'longitude',
)
class ReadingLiveSerializer(serializers.ModelSerializer):
class Meta:
model = Reading
fields = (
'reading',
'latitude',
'longitude',
'daterecorded',
'user_id',
'tzoffset',
'sharing',
'provider',
'client_key',
'location_accuracy',
'reading_accuracy',
'observation_type',
'observation_unit',
)
def get_fields(self):
fields = super(ReadingLiveSerializer, self).get_fields()
api_key = self.context['view'].request.GET.get('api_key', '')
customer = Customer.objects.get(api_key=api_key)
if customer.customer_type == customers_choices.CUSTOMER_PUBLIC:
del fields['user_id']
return fields
class ConditionListSerializer(serializers.ModelSerializer):
class Meta:
model = Condition
fields = (
'latitude',
'longitude',
'altitude',
'daterecorded',
'general_condition',
'windy',
'fog_thickness',
'precipitation_type',
'precipitation_amount',
'precipitation_unit',
'thunderstorm_intensity',
'user_comment',
)
| JacobSheehy/pressureNETAnalysis | readings/serializers.py | Python | gpl-3.0 | 1,723 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""
Copyright (C) 2012 Fabio Erculiani
Authors:
Fabio Erculiani
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
Foundation; version 3.
This program is distributed in the hope that it will be useful, but WITHOUT
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
details.
You should have received a copy of the GNU General Public License along with
this program; if not, write to the Free Software Foundation, Inc.,
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
"""
import os
import argparse
# entropy.i18n will pick this up
os.environ['ETP_GETTEXT_DOMAIN'] = "rigo"
import sys
from threading import Lock, Timer
sys.path.insert(0, "../lib")
sys.path.insert(1, "../client")
sys.path.insert(2, "./")
sys.path.insert(3, "/usr/lib/entropy/lib")
sys.path.insert(4, "/usr/lib/entropy/client")
sys.path.insert(6, "/usr/lib/rigo")
from gi.repository import Gtk, Gdk, GLib
from rigo.paths import DATA_DIR
from rigo.enums import RigoViewStates, LocalActivityStates
from rigo.entropyapi import EntropyWebService, EntropyClient as Client
from rigo.ui.gtk3.widgets.apptreeview import AppTreeView
from rigo.ui.gtk3.widgets.confupdatetreeview import ConfigUpdatesTreeView
from rigo.ui.gtk3.widgets.noticeboardtreeview import NoticeBoardTreeView
from rigo.ui.gtk3.widgets.preferencestreeview import PreferencesTreeView
from rigo.ui.gtk3.widgets.grouptreeview import GroupTreeView
from rigo.ui.gtk3.widgets.repositorytreeview import RepositoryTreeView
from rigo.ui.gtk3.widgets.notifications import NotificationBox
from rigo.ui.gtk3.controllers.applications import \
ApplicationsViewController
from rigo.ui.gtk3.controllers.application import \
ApplicationViewController
from rigo.ui.gtk3.controllers.confupdate import \
ConfigUpdatesViewController
from rigo.ui.gtk3.controllers.noticeboard import \
NoticeBoardViewController
from rigo.ui.gtk3.controllers.preference import \
PreferenceViewController
from rigo.ui.gtk3.controllers.repository import \
RepositoryViewController
from rigo.ui.gtk3.controllers.group import \
GroupViewController
from rigo.ui.gtk3.controllers.notifications import \
UpperNotificationViewController, BottomNotificationViewController
from rigo.ui.gtk3.controllers.work import \
WorkViewController
from rigo.ui.gtk3.widgets.welcome import WelcomeBox
from rigo.ui.gtk3.models.appliststore import AppListStore
from rigo.ui.gtk3.models.confupdateliststore import ConfigUpdatesListStore
from rigo.ui.gtk3.models.noticeboardliststore import NoticeBoardListStore
from rigo.ui.gtk3.models.preferencesliststore import PreferencesListStore
from rigo.ui.gtk3.models.groupliststore import GroupListStore
from rigo.ui.gtk3.models.repositoryliststore import RepositoryListStore
from rigo.ui.gtk3.utils import init_sc_css_provider, get_sc_icon_theme
from rigo.utils import escape_markup
from rigo.controllers.daemon import RigoServiceController
from RigoDaemon.enums import ActivityStates as DaemonActivityStates
from entropy.const import const_debug_write, dump_signal
from entropy.misc import TimeScheduled, ParallelTask, ReadersWritersSemaphore
from entropy.i18n import _
from entropy.locks import EntropyResourcesLock
# Change the default in-RAM cache policy for repositories in order to
# save a huge amount of RAM.
from entropy.db.cache import EntropyRepositoryCachePolicies
_NONE_POL = EntropyRepositoryCachePolicies.NONE
EntropyRepositoryCachePolicies.DEFAULT_CACHE_POLICY = _NONE_POL
import entropy.tools
class Rigo(Gtk.Application):
class RigoHandler(object):
def __init__(self, rigo_app, rigo_service):
self._app = rigo_app
self._service = rigo_service
def onDeleteWindow(self, window, event):
# if UI is locked, do not allow to close Rigo
if self._app.is_ui_locked() or \
self._service.local_activity() != LocalActivityStates.READY:
rc = self._app._show_yesno_dialog(
None,
escape_markup(_("Hey hey hey!")),
escape_markup(_("Rigo is working, are you sure?")))
if rc == Gtk.ResponseType.NO:
return True
while True:
try:
entropy.tools.kill_threads()
Gtk.main_quit((window, event))
except KeyboardInterrupt:
continue
break
def __init__(self):
self._current_state_lock = False
self._current_state = RigoViewStates.STATIC_VIEW_STATE
self._state_transitions = {
RigoViewStates.BROWSER_VIEW_STATE: (
self._enter_browser_state,
self._exit_browser_state),
RigoViewStates.STATIC_VIEW_STATE: (
self._enter_static_state,
self._exit_static_state),
RigoViewStates.APPLICATION_VIEW_STATE: (
self._enter_application_state,
self._exit_application_state),
RigoViewStates.WORK_VIEW_STATE: (
self._enter_work_state,
self._exit_work_state),
RigoViewStates.CONFUPDATES_VIEW_STATE: (
self._enter_confupdates_state,
self._exit_confupdates_state),
RigoViewStates.NOTICEBOARD_VIEW_STATE: (
self._enter_noticeboard_state,
self._exit_noticeboard_state),
RigoViewStates.PREFERENCES_VIEW_STATE: (
self._enter_preferences_state,
self._exit_preferences_state),
RigoViewStates.REPOSITORY_VIEW_STATE: (
self._enter_repository_state,
self._exit_repository_state),
RigoViewStates.GROUPS_VIEW_STATE: (
self._enter_groups_state,
self._exit_groups_state)
}
self._state_metadata = {
RigoViewStates.BROWSER_VIEW_STATE: {
"title": _("Search"),
},
RigoViewStates.STATIC_VIEW_STATE: {
"title": _("Rigo Application Browser"),
},
RigoViewStates.APPLICATION_VIEW_STATE: {
"title": _("Application"),
},
RigoViewStates.WORK_VIEW_STATE: {
"title": _("Working Hard"),
},
RigoViewStates.CONFUPDATES_VIEW_STATE: {
"title": _("Wake Up"),
},
RigoViewStates.NOTICEBOARD_VIEW_STATE: {
"title": _("Important Stuff"),
},
RigoViewStates.PREFERENCES_VIEW_STATE: {
"title": _("Breaking Stuff"),
},
RigoViewStates.REPOSITORY_VIEW_STATE: {
"title": _("Repository Stuff"),
},
RigoViewStates.GROUPS_VIEW_STATE: {
"title": _("Application Groups"),
},
}
self._state_mutex = Lock()
icons = get_sc_icon_theme(DATA_DIR)
self._activity_rwsem = ReadersWritersSemaphore()
# This relies on the fact that the installed packages repository
# is lazily loaded (thus, schema update code is).
self._entropy = Client()
self._entropy_ws = EntropyWebService(self._entropy)
preload_task = ParallelTask(self._entropy_ws.preload)
preload_task.name = "PreloadEntropyWebService"
preload_task.daemon = True
preload_task.start()
self._service = RigoServiceController(
self, self._activity_rwsem,
self._entropy, self._entropy_ws)
app_handler = Rigo.RigoHandler(self, self._service)
self._builder = Gtk.Builder()
self._builder.add_from_file(os.path.join(DATA_DIR, "ui/gtk3/rigo.ui"))
self._builder.connect_signals(app_handler)
self._window = self._builder.get_object("rigoWindow")
self._window.set_name("rigo-view")
self._apps_view = self._builder.get_object("appsViewVbox")
self._scrolled_view = self._builder.get_object("appsViewScrolledWindow")
self._app_view = self._builder.get_object("appViewScrollWin")
self._app_view.set_name("rigo-view")
self._app_view_port = self._builder.get_object("appViewVport")
self._app_view_port.set_name("rigo-view")
self._not_found_box = self._builder.get_object("appsViewNotFoundVbox")
self._config_scrolled_view = self._builder.get_object(
"configViewScrolledWindow")
self._config_view = self._builder.get_object("configViewVbox")
self._config_view.set_name("rigo-view")
self._repo_scrolled_view = self._builder.get_object(
"repoViewScrolledWindow")
self._repo_view = self._builder.get_object("repoViewVbox")
self._repo_view.set_name("rigo-view")
self._notice_scrolled_view = self._builder.get_object(
"noticeViewScrolledWindow")
self._notice_view = self._builder.get_object("noticeViewVbox")
self._notice_view.set_name("rigo-view")
self._pref_scrolled_view = self._builder.get_object(
"preferencesViewScrolledWindow")
self._pref_view = self._builder.get_object("preferencesViewVbox")
self._pref_view.set_name("rigo-view")
self._group_scrolled_view = self._builder.get_object(
"groupViewScrolledWindow")
self._group_view = self._builder.get_object("groupViewVbox")
self._group_view.set_name("rigo-view")
self._search_entry = self._builder.get_object("searchEntry")
self._search_entry_completion = self._builder.get_object(
"searchEntryCompletion")
self._search_entry_store = self._builder.get_object(
"searchEntryStore")
self._static_view = self._builder.get_object("staticViewVbox")
self._notification = self._builder.get_object("notificationBox")
self._bottom_notification = \
self._builder.get_object("bottomNotificationBox")
self._work_view = self._builder.get_object("workViewVbox")
self._work_view.set_name("rigo-view")
self._pref_button = self._builder.get_object(
"prefButton")
def _pref_button_activate(widget):
self._change_view_state(
RigoViewStates.PREFERENCES_VIEW_STATE)
self._pref_button.connect(
"clicked", _pref_button_activate)
# Preferences model, view and controller
self._pref_store = PreferencesListStore()
self._view_pref = PreferencesTreeView(
icons, PreferencesListStore.ICON_SIZE)
self._pref_scrolled_view.add(self._view_pref)
def _pref_queue_draw(*args):
self._view_pref.queue_draw()
self._pref_store.connect("redraw-request", _pref_queue_draw)
self._pref_view_c = PreferenceViewController(
self._pref_store, self._view_pref)
self._app_view_c = ApplicationViewController(
self._entropy, self._entropy_ws, self._pref_view_c,
self._service, self._builder)
self._view = AppTreeView(
self._entropy, self._service, self._app_view_c, icons,
True, AppListStore.ICON_SIZE, store=None)
self._scrolled_view.add(self._view)
self._view.set_scrolled_view(self._scrolled_view)
self._app_store = AppListStore(
self._entropy, self._entropy_ws,
self._service, self._view, icons)
def _queue_draw(*args):
self._view.queue_draw()
self._app_store.connect("redraw-request", _queue_draw)
self._app_view_c.set_store(self._app_store)
self._app_view_c.connect("application-show",
self._on_application_show)
# Configuration file updates model, view and controller
self._config_store = ConfigUpdatesListStore()
self._view_config = ConfigUpdatesTreeView(
icons, ConfigUpdatesListStore.ICON_SIZE)
self._config_scrolled_view.add(self._view_config)
def _config_queue_draw(*args):
self._view_config.queue_draw()
self._config_store.connect("redraw-request", _config_queue_draw)
self._config_view_c = ConfigUpdatesViewController(
self._entropy, self._config_store, self._view_config)
self._config_view_c.connect(
"view-cleared", self._on_view_cleared)
self._service.set_configuration_controller(self._config_view_c)
# Repository model, view and controller
self._repo_store = RepositoryListStore()
self._view_repo = RepositoryTreeView(
icons, RepositoryListStore.ICON_SIZE)
self._repo_scrolled_view.add(self._view_repo)
def _repo_queue_draw(*args):
self._view_repo.queue_draw()
self._repo_store.connect("redraw-request", _repo_queue_draw)
self._repo_view_c = RepositoryViewController(
self._pref_view_c, self._service, self._repo_store,
self._view_repo)
# NoticeBoard model, view and controller
self._notice_store = NoticeBoardListStore()
self._view_notice = NoticeBoardTreeView(
icons, NoticeBoardListStore.ICON_SIZE)
self._notice_scrolled_view.add(self._view_notice)
def _notice_queue_draw(*args):
self._view_notice.queue_draw()
self._notice_store.connect("redraw-request", _notice_queue_draw)
self._notice_view_c = NoticeBoardViewController(
self._notice_store, self._view_notice)
self._service.set_noticeboard_controller(self._notice_view_c)
# Group model, view and controller
self._group_store = GroupListStore()
self._view_group = GroupTreeView(
icons, GroupListStore.ICON_SIZE)
self._group_scrolled_view.add(self._view_group)
def _group_queue_draw(*args):
self._view_group.queue_draw()
self._group_store.connect("redraw-request", _group_queue_draw)
self._group_view_c = GroupViewController(
self._service, self._group_store,
self._view_group, self._pref_view_c)
self._welcome_box = WelcomeBox()
settings = Gtk.Settings.get_default()
settings.set_property("gtk-error-bell", False)
# wire up the css provider to reconfigure on theme-changes
self._window.connect("style-updated",
self._on_style_updated,
init_sc_css_provider,
settings,
Gdk.Screen.get_default(),
DATA_DIR)
# Force the initialization of the css provider asap.
# This fixes a glitch with GTK 3.10
init_sc_css_provider(
self._window,
settings,
Gdk.Screen.get_default(),
DATA_DIR)
self._nc = UpperNotificationViewController(
self._entropy, self._entropy_ws, self._notification)
# Bottom NotificationBox controller.
# Bottom notifications are only used for
# providing Activity control to User during
# the Activity itself.
self._bottom_nc = BottomNotificationViewController(
self._window, self._bottom_notification,
self._pref_button)
self._avc = ApplicationsViewController(
self._activity_rwsem,
self._entropy, self._entropy_ws,
self._nc, self._bottom_nc, self._service,
self._pref_view_c, icons, self._not_found_box,
self._search_entry, self._search_entry_completion,
self._search_entry_store, self._app_store, self._view)
self._avc.connect("view-cleared", self._on_view_cleared)
self._avc.connect("view-filled", self._on_view_filled)
self._avc.connect("view-want-change", self._on_view_change)
self._service.set_bottom_notification_controller(
self._bottom_nc)
self._app_view_c.set_notification_controller(self._nc)
self._app_view_c.set_applications_controller(self._avc)
self._config_view_c.set_notification_controller(self._nc)
self._config_view_c.set_applications_controller(self._avc)
self._repo_view_c.set_notification_controller(self._nc)
self._repo_view_c.set_applications_controller(self._avc)
self._notice_view_c.set_notification_controller(self._nc)
self._notice_view_c.set_applications_controller(self._avc)
self._group_view_c.set_applications_controller(self._avc)
self._service.set_applications_controller(self._avc)
self._service.set_application_controller(self._app_view_c)
self._service.set_notification_controller(self._nc)
self._service.connect("start-working", self._on_start_working)
self._service.connect("repositories-updated",
self._on_repo_updated)
self._service.connect("applications-managed",
self._on_applications_managed)
self._work_view_c = WorkViewController(
icons, self._service, self._work_view)
self._service.set_work_controller(self._work_view_c)
self._bottom_nc.connect("show-work-view", self._on_show_work_view)
self._bottom_nc.connect("work-interrupt", self._on_work_interrupt)
def is_ui_locked(self):
"""
Return whether the UI is currently locked.
"""
return self._current_state_lock
def _thread_dumper(self):
"""
If --dumper is in argv, a recurring thread dump
function will be spawned every 30 seconds.
"""
dumper_enable = self._nsargs.dumper
if dumper_enable:
task = None
def _dumper():
def _dump():
task.kill()
dump_signal(None, None)
timer = Timer(10.0, _dump)
timer.name = "MainThreadHearthbeatCheck"
timer.daemon = True
timer.start()
GLib.idle_add(timer.cancel)
task = TimeScheduled(5.0, _dumper)
task.name = "ThreadDumper"
task.daemon = True
task.start()
def _on_start_working(self, widget, state, lock):
"""
Emitted by RigoServiceController when we're asked to
switch to the Work View and, if lock = True, lock UI.
"""
if lock:
self._search_entry.set_sensitive(False)
if state is not None:
self._change_view_state(state, lock=lock)
def _on_work_interrupt(self, widget):
"""
We've been explicitly asked to interrupt the currently
ongoing work
"""
rc = self._show_yesno_dialog(
self._window,
escape_markup(_("Activity Interruption")),
escape_markup(
_("Are you sure you want to interrupt"
" the ongoing Activity? The interruption will"
" occur as soon as possible, potentially not"
" immediately.")))
if rc == Gtk.ResponseType.NO:
return
self._service.interrupt_activity()
def _on_show_work_view(self, widget):
"""
We've been explicitly asked to switch to WORK_VIEW_STATE
"""
self._change_view_state(RigoViewStates.WORK_VIEW_STATE,
_ignore_lock=True)
def _on_repo_updated(self, widget, result, message):
"""
Emitted by RigoServiceController telling us that
repositories have been updated.
"""
with self._state_mutex:
self._current_state_lock = False
self._search_entry.set_sensitive(True)
if result != 0:
msg = "<b>%s</b>: %s" % (
_("Repositories update error"),
message,)
message_type = Gtk.MessageType.ERROR
else:
msg = _("Repositories updated <b>successfully</b>!")
message_type = Gtk.MessageType.INFO
box = NotificationBox(
msg, message_type=message_type,
context_id=RigoServiceController.NOTIFICATION_CONTEXT_ID)
box.add_destroy_button(_("Ok, thanks"))
self._nc.append(box)
def _on_applications_managed(self, widget, success, local_activity):
"""
Emitted by RigoServiceController telling us that
enqueue application actions have been completed.
"""
msg = "N/A"
if not success:
if local_activity == LocalActivityStates.MANAGING_APPLICATIONS:
msg = "<b>%s</b>: %s" % (
_("Application Management Error"),
_("please check the management log"),)
elif local_activity == LocalActivityStates.UPGRADING_SYSTEM:
msg = "<b>%s</b>: %s" % (
_("System Upgrade Error"),
_("please check the upgrade log"),)
message_type = Gtk.MessageType.ERROR
else:
if local_activity == LocalActivityStates.MANAGING_APPLICATIONS:
msg = _("Applications managed <b>successfully</b>!")
elif local_activity == LocalActivityStates.UPGRADING_SYSTEM:
msg = _("System Upgraded <b>successfully</b>!")
message_type = Gtk.MessageType.INFO
box = NotificationBox(
msg, message_type=message_type,
context_id=RigoServiceController.NOTIFICATION_CONTEXT_ID)
box.add_destroy_button(_("Ok, thanks"))
box.add_button(_("Show me"), self._on_show_work_view)
self._nc.append(box)
self._work_view_c.deactivate_app_box()
def _on_view_cleared(self, *args):
self._change_view_state(RigoViewStates.STATIC_VIEW_STATE)
def _on_view_filled(self, *args):
self._change_view_state(RigoViewStates.BROWSER_VIEW_STATE)
def _on_view_change(self, widget, state, payload):
self._change_view_state(state, payload=payload)
def _on_application_show(self, *args):
self._change_view_state(RigoViewStates.APPLICATION_VIEW_STATE)
def _exit_browser_state(self):
"""
Action triggered when UI exits the Application Browser
state (or mode).
"""
self._avc.deselect()
self._apps_view.hide()
def _enter_browser_state(self):
"""
Action triggered when UI exits the Application Browser
state (or mode).
"""
self._apps_view.show()
def _exit_confupdates_state(self):
"""
Action triggered when UI exits the Configuration Updates
state (or mode).
"""
self._config_view.hide()
def _enter_confupdates_state(self):
"""
Action triggered when UI enters the Configuration Updates
state (or mode).
"""
self._config_view.show()
def _exit_noticeboard_state(self):
"""
Action triggered when UI exits the NoticeBoard
state (or mode).
"""
self._notice_view.hide()
def _enter_noticeboard_state(self):
"""
Action triggered when UI enters the NoticeBoard
state (or mode).
"""
self._notice_view.show()
def _exit_repository_state(self):
"""
Action triggered when UI exits the Repository
Management state (or mode).
"""
self._repo_view.hide()
self._repo_view_c.clear()
def _enter_repository_state(self):
"""
Action triggered when UI enters the Repository
Management state (or mode).
"""
self._repo_view_c.load()
self._repo_view.show()
def _exit_preferences_state(self):
"""
Action triggered when UI exits the Preferences
state (or mode).
"""
self._pref_view.hide()
def _enter_preferences_state(self):
"""
Action triggered when UI enters the Preferences
state (or mode).
"""
self._pref_view.show()
def _exit_groups_state(self):
"""
Action triggered when UI exits the Groups
state (or mode).
"""
self._group_view.hide()
def _enter_groups_state(self):
"""
Action triggered when UI enters the Groups
state (or mode).
"""
self._group_view_c.load()
self._group_view.show()
def _exit_static_state(self):
"""
Action triggered when UI exits the Static Browser
state (or mode). AKA the Welcome Box.
"""
self._static_view.hide()
# release all the childrens of static_view
for child in self._static_view.get_children():
self._static_view.remove(child)
def _enter_static_state(self):
"""
Action triggered when UI exits the Static Browser
state (or mode). AKA the Welcome Box.
"""
# keep the current widget if any, or add the
# welcome widget
if not self._static_view.get_children():
self._welcome_box.show()
self._static_view.pack_start(self._welcome_box,
True, True, 10)
self._static_view.show()
def _enter_application_state(self):
"""
Action triggered when UI enters the Package Information
state (or mode). Showing application information.
"""
# change search_entry first icon to emphasize the
# back action
self._search_entry.set_icon_from_stock(
Gtk.EntryIconPosition.PRIMARY,
"gtk-go-back")
self._app_view.show()
def _exit_application_state(self):
"""
Action triggered when UI exits the Package Information
state (or mode). Hiding back application information.
"""
self._search_entry.set_icon_from_stock(
Gtk.EntryIconPosition.PRIMARY, "gtk-find")
self._app_view.hide()
self._app_view_c.hide()
def _enter_work_state(self):
"""
Action triggered when UI enters the Work View state (or mode).
Either for Updating Repositories or Installing new Apps.
"""
self._work_view.show()
def _exit_work_state(self):
"""
Action triggered when UI exits the Work View state (or mode).
"""
self._work_view.hide()
def _change_view_state(self, state, lock=False, _ignore_lock=False,
payload=None):
"""
Change Rigo Application UI state.
You can pass a custom widget that will be shown in case
of static view state.
"""
with self._state_mutex:
if self._current_state_lock and not _ignore_lock:
const_debug_write(
__name__,
"cannot change view state, UI locked")
return False
txc = self._state_transitions.get(state)
if txc is None:
raise AttributeError("wrong view state")
enter_st, exit_st = txc
current_enter_st, current_exit_st = \
self._state_transitions.get(
self._current_state)
# exit from current state
current_exit_st()
# enter the new state
enter_st()
self._current_state = state
if lock:
self._current_state_lock = True
state_meta = self._state_metadata[state]
self._window.set_title(escape_markup(
state_meta["title"]))
return True
def _change_view_state_safe(self, state):
"""
Thread-safe version of change_view_state().
"""
def _do_change():
return self._change_view_state(state)
GLib.idle_add(_do_change)
def _on_style_updated(self, widget, init_css_callback, *args):
"""
Gtk Style callback, nothing to see here.
"""
init_css_callback(widget, *args)
def _show_ok_dialog(self, parent, title, message):
"""
Show ugly OK dialog window.
"""
dlg = Gtk.MessageDialog(parent=parent,
type=Gtk.MessageType.INFO,
buttons=Gtk.ButtonsType.OK)
dlg.set_markup(message)
dlg.set_title(title)
dlg.run()
dlg.destroy()
def _show_yesno_dialog(self, parent, title, message):
"""
Show ugly Yes/No dialog window.
"""
dlg = Gtk.MessageDialog(parent=parent,
type=Gtk.MessageType.INFO,
buttons=Gtk.ButtonsType.YES_NO)
dlg.set_markup(message)
dlg.set_title(title)
rc = dlg.run()
dlg.destroy()
return rc
def _permissions_setup(self):
"""
Check execution privileges and spawn the Rigo UI.
"""
if not entropy.tools.is_user_in_entropy_group():
# otherwise the lock handling would potentially
# fail.
self._show_ok_dialog(
None,
escape_markup(_("Not authorized")),
escape_markup(_("You are not authorized to run Rigo")))
entropy.tools.kill_threads()
Gtk.main_quit()
return
if not self._service.service_available():
self._show_ok_dialog(
None,
escape_markup(_("Rigo")),
escape_markup(_("RigoDaemon service is not available")))
entropy.tools.kill_threads()
Gtk.main_quit()
return
supported_apis = self._service.supported_apis()
daemon_api = self._service.api()
if daemon_api not in supported_apis:
self._show_ok_dialog(
None,
escape_markup(_("Rigo")),
escape_markup(
_("API mismatch, please update Rigo and RigoDaemon")))
entropy.tools.kill_threads()
Gtk.main_quit()
return
lock = EntropyResourcesLock(output=self._entropy)
# always execute this from the MainThread, since the lock uses TLS
acquired = lock.try_acquire_shared()
is_exclusive = False
if not acquired:
# check whether RigoDaemon is running in excluive mode
# and ignore non-atomicity here (failing with error
# is acceptable)
if not self._service.exclusive():
self._show_ok_dialog(
None,
escape_markup(_("Rigo")),
escape_markup(_("Another Application Manager is active")))
entropy.tools.kill_threads()
Gtk.main_quit()
return
is_exclusive = True
# otherwise we can go ahead and handle our state later
# check RigoDaemon, don't worry about races between Rigo Clients
# it is fine to have multiple Rigo Clients connected. Mutual
# exclusion is handled via Entropy Resources Lock (which is a file
# based rwsem).
activity = self._service.activity()
if activity != DaemonActivityStates.AVAILABLE:
msg = ""
show_dialog = True
if activity == DaemonActivityStates.NOT_AVAILABLE:
msg = _("Background Service is currently not available")
elif activity == DaemonActivityStates.UPDATING_REPOSITORIES:
show_dialog = False
task = ParallelTask(
self._service._update_repositories,
[], False, master=False)
task.daemon = True
task.name = "UpdateRepositoriesUnlocked"
task.start()
elif activity == DaemonActivityStates.MANAGING_APPLICATIONS:
show_dialog = False
task = ParallelTask(
self._service._application_request,
None, None, master=False)
task.daemon = True
task.name = "ApplicationRequestUnlocked"
task.start()
elif activity == DaemonActivityStates.UPGRADING_SYSTEM:
show_dialog = False
task = ParallelTask(
self._service._upgrade_system,
False, master=False)
task.daemon = True
task.name = "UpgradeSystemUnlocked"
task.start()
elif activity == DaemonActivityStates.INTERNAL_ROUTINES:
msg = _("Background Service is currently busy")
else:
msg = _("Background Service is incompatible with Rigo")
if show_dialog:
self._show_ok_dialog(
None,
escape_markup(_("Rigo")),
escape_markup(msg))
entropy.tools.kill_threads()
Gtk.main_quit()
return
elif is_exclusive:
msg = _("Background Service is currently unavailable")
# no lock acquired, cannot continue the initialization
self._show_ok_dialog(
None,
escape_markup(_("Rigo")),
escape_markup(msg))
entropy.tools.kill_threads()
Gtk.main_quit()
return
parser = argparse.ArgumentParser(
description=_("Rigo Application Browser"))
parser.add_argument(
"package", nargs='?', type=file,
metavar="<path>", help="package path")
parser.add_argument(
"--install",
metavar="<dep string>", help="install given dependency")
parser.add_argument(
"--remove",
metavar="<dep string>", help="remove given dependency")
parser.add_argument(
"--upgrade", help="upgrade the system",
action="store_true", default=False)
parser.add_argument(
"--dumper", help="enable the main thread dumper (debug)",
action="store_true", default=False)
parser.add_argument(
"--debug", help="enable Entropy Library debug mode",
action="store_true", default=False)
try:
self._nsargs = parser.parse_args(sys.argv[1:])
except IOError as err:
self._show_ok_dialog(
None,
escape_markup(_("Rigo")),
escape_markup("%s" % (err,)))
entropy.tools.kill_threads()
Gtk.main_quit()
return
self._thread_dumper()
self._pref_view_c.setup()
self._group_view_c.setup()
self._config_view_c.setup()
self._repo_view_c.setup()
self._notice_view_c.setup()
self._app_view_c.setup()
self._avc.setup()
self._nc.setup()
self._work_view_c.setup()
self._service.setup(acquired)
self._easter_eggs()
self._window.show()
managing = self._start_managing()
if not managing:
self._change_view_state(RigoViewStates.GROUPS_VIEW_STATE)
self._service.hello()
def _easter_eggs(self):
"""
Moo!
"""
msg = None
if entropy.tools.is_st_valentine():
msg = escape_markup(_("Happy St. Valentine <3 <3 !"))
elif entropy.tools.is_xmas():
msg = escape_markup(_("Merry Xmas \o/ !"))
elif entropy.tools.is_author_bday():
msg = escape_markup(_("Happy birthday to my authoooooor!"))
elif entropy.tools.is_april_first():
msg = escape_markup(_("<=|=< (this is optimistically a fish)"))
if msg is not None:
box = NotificationBox(
msg, message_type=Gtk.MessageType.INFO,
context_id="EasterEggs")
box.add_destroy_button(_("Woot, thanks"))
self._nc.append(box)
def _start_managing(self):
"""
Start managing applications passed via argv.
"""
managing = False
if self._nsargs.install:
dependency = self._nsargs.install
task = ParallelTask(
self._avc.install, dependency)
task.name = "AppInstall-%s" % (dependency,)
task.daemon = True
task.start()
managing = True
if self._nsargs.remove:
dependency = self._nsargs.remove
task = ParallelTask(
self._avc.remove, dependency)
task.name = "AppRemove-%s" % (dependency,)
task.daemon = True
task.start()
managing = True
if self._nsargs.package:
path = self._nsargs.package.name
self._nsargs.package.close() # no need, unfortunately
task = ParallelTask(
self._avc.install_package, path)
task.name = "AppInstallPackage-%s" % (path,)
task.daemon = True
task.start()
managing = True
if self._nsargs.upgrade:
task = ParallelTask(self._avc.upgrade)
task.name = "SystemUpgrade"
task.daemon = True
task.start()
managing = True
return managing
def run(self):
"""
Run Rigo ;-)
"""
self._welcome_box.render()
self._change_view_state(self._current_state)
GLib.idle_add(self._permissions_setup)
GLib.threads_init()
Gdk.threads_enter()
Gtk.main()
Gdk.threads_leave()
entropy.tools.kill_threads()
if __name__ == "__main__":
import signal
signal.signal(signal.SIGINT, signal.SIG_DFL)
app = Rigo()
app.run()
| Enlik/entropy | rigo/rigo_app.py | Python | gpl-2.0 | 38,116 |
# -*- coding: utf-8 -*-
from django.conf.urls import patterns, url
urlpatterns = patterns(
'dnd.mobile.spells.views',
# spells
url(
r'^$',
'spell_index_mobile',
name='spell_index_mobile',
),
# spells > by rulebooks
url(
r'^by-rulebooks/$',
'spell_list_by_rulebook_mobile',
name='spell_list_by_rulebook_mobile',
),
# spells > rulebook
url(
r'^(?P<rulebook_slug>[^/]+)--(?P<rulebook_id>\d+)/$',
'spells_in_rulebook_mobile',
name='spells_in_rulebook_mobile',
),
# spells > rulebook > spell
url(
r'^(?P<rulebook_slug>[^/]+)--(?P<rulebook_id>\d+)/(?P<spell_slug>[^/]+)--(?P<spell_id>\d+)/$',
'spell_detail_mobile',
name='spell_detail_mobile',
),
# spells > descriptors
url(
r'^descriptors/$',
'spell_descriptor_list_mobile',
name='spell_descriptor_list_mobile',
),
# spells > descriptors > descriptor
url(
r'^descriptors/(?P<spell_descriptor_slug>[^/]+)/$',
'spell_descriptor_detail_mobile',
name='spell_descriptor_detail_mobile',
),
# spells > schools
url(
r'^schools/$',
'spell_school_list_mobile',
name='spell_school_list_mobile',
),
# spells > schools > detail
url(
r'^schools/(?P<spell_school_slug>[^/]+)/$',
'spell_school_detail_mobile',
name='spell_school_detail_mobile',
),
# spells > sub_schools > detail
url(
r'^sub-schools/(?P<spell_sub_school_slug>[^/]+)/$',
'spell_sub_school_detail_mobile',
name='spell_sub_school_detail_mobile',
),
# spells > domains
url(
r'^domains/$',
'spell_domain_list_mobile',
name='spell_domain_list_mobile',
),
# spells > domains > detail
url(
r'^domains/(?P<spell_domain_slug>[^/]+)/$',
'spell_domain_detail_mobile',
name='spell_domain_detail_mobile',
),
# spells > domains > detail (variant)
url(
r'^domains/(?P<rulebook_slug>[^/]+)--(?P<rulebook_id>\d+)/(?P<spell_domain_slug>[^/]+)/$',
'spell_domain_detail_mobile',
name='spell_variant_domain_detail_mobile',
),
)
| FreezyExp/dndtools | dndtools/dnd/mobile/spells/urls.py | Python | mit | 2,243 |
'''
Created on Nov 18, 2013
@author: Scott
Used for resizing images. Simplistic. Only resizes to a constant factor.
'''
import numpy
import utils.Utils as utils
'''
Assumes factor is int
'''
def resize_image(filename, factor):
im_arr = utils.get_array(filename)
resized_arr = numpy.ndarray(shape=(len(im_arr)*factor, len(im_arr[0])*factor,3), dtype='uint8', order='F')
for x in range(0,len(im_arr)):
for y in range(0, len(im_arr[0])):
y_off = y*factor
x_off = x*factor
for offx in range(0,factor):
for offy in range(0, factor):
resized_arr[x_off + offx][y_off + offy][0] = im_arr[x][y][0]
resized_arr[x_off + offx][y_off + offy][1] = im_arr[x][y][1]
resized_arr[x_off + offx][y_off + offy][2] = im_arr[x][y][2]
utils.write_image(resized_arr,filename[:-4] + "resized.png")
if __name__ == '__main__':
filename = "../../data/man.png"
resize_image(filename, 10)
| scotchval/ImageTDA | src/utils/Resize.py | Python | mit | 1,042 |
# pylint: disable=missing-docstring
from __future__ import print_function
def func1():
return 1
print('unreachable') # [unreachable]
def func2():
while 1:
break
print('unreachable') # [unreachable]
def func3():
for i in (1, 2, 3):
print(i)
continue
print('unreachable') # [unreachable]
def func4():
raise Exception
return 1 / 0 # [unreachable]
# https://github.com/PyCQA/pylint/issues/4698
def func5():
"""Empty generator functions should be allowed."""
return
yield
def func6():
"""Add 'unreachable' if yield is followed by another node."""
return
yield
print("unreachable") # [unreachable]
| PyCQA/pylint | tests/functional/u/unreachable.py | Python | gpl-2.0 | 693 |
#!/bin/python
import os, subprocess
import logging
from autotest.client import test
from autotest.client.shared import error
class libmng(test.test):
"""
Autotest module for testing basic functionality
of libmng
@author CSDL
"""
version = 1
nfail = 0
path = ''
def initialize(self, test_path=''):
"""
Sets the overall failure counter for the test.
"""
self.nfail = 0
os.system("yum -y install libmng-devel ")
ret_val = subprocess.Popen(['make', 'all'], cwd="%s/libmng" %(test_path))
ret_val.communicate()
if ret_val.returncode != 0:
self.nfail += 1
logging.info('\n Test initialize successfully')
def run_once(self, test_path=''):
"""
Trigger test run
"""
try:
os.environ["LTPBIN"] = "%s/shared" %(test_path)
ret_val = subprocess.Popen(['./libmng.sh'], cwd="%s/libmng" %(test_path))
ret_val.communicate()
if ret_val.returncode != 0:
self.nfail += 1
except error.CmdError, e:
self.nfail += 1
logging.error("Test Failed: %s", e)
def postprocess(self):
if self.nfail != 0:
logging.info('\n nfails is non-zero')
raise error.TestError('\nTest failed')
else:
logging.info('\n Test completed successfully ')
| PoornimaNayak/autotest-client-tests | linux-tools/libmng/libmng.py | Python | gpl-2.0 | 1,418 |
"""
Benchmark scikit-learn's Ward implement compared to SciPy's
"""
import time
import numpy as np
from scipy.cluster import hierarchy
import pylab as pl
from sklearn.cluster import AgglomerativeClustering
ward = AgglomerativeClustering(n_clusters=3, linkage='ward')
n_samples = np.logspace(.5, 3, 9)
n_features = np.logspace(1, 3.5, 7)
N_samples, N_features = np.meshgrid(n_samples,
n_features)
scikits_time = np.zeros(N_samples.shape)
scipy_time = np.zeros(N_samples.shape)
for i, n in enumerate(n_samples):
for j, p in enumerate(n_features):
X = np.random.normal(size=(n, p))
t0 = time.time()
ward.fit(X)
scikits_time[j, i] = time.time() - t0
t0 = time.time()
hierarchy.ward(X)
scipy_time[j, i] = time.time() - t0
ratio = scikits_time / scipy_time
pl.figure("scikit-learn Ward's method benchmark results")
pl.imshow(np.log(ratio), aspect='auto', origin="lower")
pl.colorbar()
pl.contour(ratio, levels=[1, ], colors='k')
pl.yticks(range(len(n_features)), n_features.astype(np.int))
pl.ylabel('N features')
pl.xticks(range(len(n_samples)), n_samples.astype(np.int))
pl.xlabel('N samples')
pl.title("Scikit's time, in units of scipy time (log)")
pl.show()
| RPGOne/Skynet | scikit-learn-c604ac39ad0e5b066d964df3e8f31ba7ebda1e0e/benchmarks/bench_plot_ward.py | Python | bsd-3-clause | 1,260 |
"""
Django settings for library project.
Generated by 'django-admin startproject' using Django 1.11.6.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.11/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.11/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
# SECRET_KEY = '+!6pkjuv2&wlf(9z1iw&%9ax1qk16pxm%_c40_=79^mxb%ua&d'
# SECURITY WARNING: don't run with debug turned on in production!
#
# SECURITY WARNING: keep the secret key used in production secret!
# SECRET_KEY = 'cg#p$g+j9tax!#a3cup@1$8obt2_+&k3q+pmu)5%asj6yjpkag'
SECRET_KEY = os.environ.get('DJANGO_SECRET_KEY', 'cg#p$g+j9tax!#a3cup@1$8obt2_+&k3q+pmu)5%asj6yjpkag')
DEBUG = bool( os.environ.get('DJANGO_DEBUG', True) )
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'catalog.apps.CatalogConfig',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'library.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': ['./templates',],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'library.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.11/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/1.11/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
LOGIN_REDIRECT_URL = '/'
# Internationalization
# https://docs.djangoproject.com/en/1.11/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'America/Los_Angeles'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.11/howto/static-files/
STATIC_URL = '/static/'
EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend'
| escottgoodwin/djangolocallibrary | library/settings.py | Python | mit | 3,536 |
import mock
from mock import patch
import unittest
from cfgm_common.vnc_db import DBBase
from svc_monitor import config_db
from svc_monitor import loadbalancer_agent
from vnc_api.vnc_api import *
import argparse
import ConfigParser
class LoadbalancerAgentTest(unittest.TestCase):
def setUp(self):
self.vnc_lib = mock.Mock()
self.cassandra = mock.Mock()
self.logger = mock.Mock()
self.svc = mock.Mock()
self._db = {}
def read_db(id):
if id in self._db:
return self._db[id]
def put_db_config(id, data):
if id not in self._db:
self._db[id] = {}
self._db[id]['config_info'] = data
def put_db_driver(id, data):
if id not in self._db:
self._db[id] = {}
self._db[id]['driver_info'] = data
def remove_db(id, data=None):
if data is None:
del self._db[id]
return
if self._db[id]['driver_info'][data[0]]:
del self._db[id]['driver_info'][data[0]]
def list_pools():
ret_list = []
for each_entry_id, each_entry_data in self._db.iteritems() or []:
config_info_obj_dict = each_entry_data['config_info']
driver_info_obj_dict = None
if 'driver_info' in each_entry_data:
driver_info_obj_dict = each_entry_data['driver_info']
ret_list.append((each_entry_id, config_info_obj_dict, driver_info_obj_dict))
return ret_list
self.cassandra.pool_list = mock.Mock(side_effect=list_pools)
self.cassandra.pool_remove = mock.Mock(side_effect=remove_db)
self.cassandra.pool_driver_info_get = mock.Mock(side_effect=read_db)
self.cassandra.pool_driver_info_insert = mock.Mock(side_effect=put_db_driver)
self.cassandra.pool_config_insert = mock.Mock(side_effect=put_db_config)
mocked_gsc = mock.MagicMock()
mocked_gsc.uuid = 'fake-gsc-uuid'
self.vnc_lib.service_appliance_set_create.return_value = "opencontrail"
self.vnc_lib.global_system_config_read.return_value = mocked_gsc
def no_id_side_effect(fq_name):
raise NoIdError("xxx")
self.vnc_lib.service_appliance_set_read = mock.Mock(side_effect=no_id_side_effect)
conf_parser = argparse.ArgumentParser(add_help=False)
config = ConfigParser.SafeConfigParser({'admin_token': None})
self._args, remaining_argv = conf_parser.parse_known_args()
self._args.config_sections = config
def sas_read_side_effect(obj_type, uuids):
if obj_type == 'service_appliance_set':
return (True, [{
'fq_name': ['default-global-system-config', 'opencontrail'],
'service_appliance_driver': 'svc_monitor.services.loadbalancer.drivers.ha_proxy.driver.OpencontrailLoadbalancerDriver'
}])
return (False, None)
self.cassandra.object_read = mock.Mock(
side_effect=sas_read_side_effect)
DBBase.init(self.svc, None, self.cassandra)
self.lb_agent = loadbalancer_agent.LoadbalancerAgent(self.svc, self.vnc_lib,
self.cassandra, self._args)
self.svc.loadbalancer_agent = self.lb_agent
sas = config_db.ServiceApplianceSetSM.get('opencontrail')
self.assertEqual(sas.driver, "svc_monitor.services.loadbalancer.drivers.ha_proxy.driver.OpencontrailLoadbalancerDriver")
sas.add()
self.assertIsNotNone(self.lb_agent._loadbalancer_driver['opencontrail'])
# end setUp
def tearDown(self):
config_db.ServiceApplianceSM.delete("test-lb-provider-0")
config_db.ServiceApplianceSetSM.delete("test-lb-provider")
config_db.ServiceApplianceSetSM.delete("opencontrail")
config_db.LoadbalancerPoolSM.reset()
config_db.LoadbalancerMemberSM.reset()
config_db.VirtualIpSM.reset()
# end tearDown
def create_sa_set(self, fq_name_str):
sas_obj = {}
sas_obj['fq_name'] = fq_name_str.split(':')
sas_obj['uuid'] = fq_name_str
sas_obj['display_name'] = fq_name_str
sas = config_db.ServiceApplianceSetSM.locate(sas_obj['uuid'], sas_obj)
sas.kvpairs = [{'key': 'key1', 'value': 'value1'},
{'key': 'key2', 'value': 'value2'},
{'key': 'key3', 'value': 'value3'}]
sas.ha_mode = "standalone"
sas.driver = "svc_monitor.tests.fake_lb_driver.OpencontrailFakeLoadbalancerDriver"
return sas
# end create_sa_set
def create_sa(self, fq_name_str, parent_uuid):
sa_obj = {}
sa_obj['fq_name'] = fq_name_str.split(':')
sa_obj['uuid'] = fq_name_str
sa_obj['display_name'] = fq_name_str
sa_obj['parent_uuid'] = parent_uuid
sa_obj['service_appliance_ip_address'] = "1.1.1.1"
sa_obj['service_appliance_user_credentials'] = {'username': "admin", 'password': "contrail123"}
sa = config_db.ServiceApplianceSM.locate(sa_obj['uuid'], sa_obj)
sa.kvpairs = [{'key': 'key1', 'value': 'value1'},
{'key': 'key2', 'value': 'value2'},
{'key': 'key3', 'value': 'value3'}]
return sa
# end create_sa_set
def create_pool(self, fq_name_str, vip=None, hm=None):
pool_obj = {}
pool_obj['fq_name'] = fq_name_str.split(':')
pool_obj['uuid'] = fq_name_str
pool_obj['display_name'] = fq_name_str
pool_obj['parent_uuid'] = 'parent_uuid'
pool_obj['id_perms'] = {'enable': 'true', 'description': 'Test pool'}
pool_obj['loadbalancer_pool_provider'] = 'test-lb-provider'
pool_obj['loadbalancer_pool_properties'] = {'protocol': 'HTTP',
'subnet_id': 'subnet-id',
'loadbalancer_method': 'ROUND_ROBIN',
'admin_state': 'true'}
if vip:
pool_obj['virtual_ip_back_refs']=[{'uuid': vip.uuid}]
if hm:
pool_obj['loadbalancer_healthmonitor_refs']=[{'uuid': hm.uuid}]
pool = config_db.LoadbalancerPoolSM.locate(pool_obj['uuid'], pool_obj)
return pool
# end create_pool
def create_hm_obj(self, fq_name_str):
hm_obj = {}
hm_obj['fq_name'] = fq_name_str.split(':')
hm_obj['fq_name'] = fq_name_str.split(':')
hm_obj['uuid'] = fq_name_str
hm_obj['display_name'] = fq_name_str
hm_obj['parent_uuid'] = 'parent_uuid'
hm_obj['id_perms'] = {'enable': 'true', 'description': 'Test pool'}
hm_obj['loadbalancer_healthmonitor_properties'] = {'delay': '5',
'expected_codes': '200',
'max_retries': '200',
'http_method': 'GET',
'timeout': '2',
'url_path': '/',
'monitor_type': 'HTTP',
'admin_state': 'true'}
return hm_obj
#end create_hm_obj
def create_hm(self, fq_name_str):
hm_obj = self.create_hm_obj(fq_name_str)
hm = config_db.HealthMonitorSM.locate(hm_obj['uuid'], hm_obj)
return hm
# end create_hm
def update_pool(self, pool_obj, vip=None):
pool_obj.params['loadbalancer_method'] = 'LEAST_CONNECTIONS'
pool_obj.params['protocol'] = 'HTTPS'
pool_obj.params['admin_state'] = 'false'
# end update_pool
def update_vip(self, vip_obj, pool=None):
vip_obj.params['connection_limit'] = '100'
vip_obj.params['persistence_type'] = 'always'
vip_obj.params['admin_state'] = 'false'
# end update_vip
def create_pool_members(self, pool_name, num_members):
for i in range(num_members):
self.create_pool_member(pool_name, 'member_'+str(i), '10.1.1.'+str(i))
# end create_pool_members
def create_pool_member(self, pool_name, member_name, member_address):
pool_member_obj = {}
pool_member_obj['fq_name'] = member_name
pool_member_obj['uuid'] = member_name
pool_member_obj['display_name'] = member_name
pool_member_obj['parent_uuid'] = pool_name
pool_member_obj['id_perms'] = {'enable': 'true', 'description': 'Test pool member'}
pool_member_obj['loadbalancer_member_properties'] = {'protocol_port': '80',
'address': member_address,
'weight': '1',
'status': 'up',
'admin_state': 'true'}
member = config_db.LoadbalancerMemberSM.locate(pool_member_obj['uuid'], pool_member_obj)
# end create_pool_member
def create_vip(self, vip):
vip_obj = {}
vip_obj['fq_name'] = vip.split(':')
vip_obj['uuid'] = vip
vip_obj['display_name'] = vip
vip_obj['parent_uuid'] = 'parent_uuid'
vip_obj['id_perms'] = {'enable': 'true', 'description': 'Test pool'}
vip_obj['virtual_ip_properties'] = {'status': 'UP',
'protocol_port': '80',
'subnet_id': 'subnet_id',
'protocol': 'HTTP',
'admin_state': 'true',
'connection_limit': '-1',
'persistence_type': None,
'persistence_cookie_name': None,
'address': '1.1.1.1'}
vip = config_db.VirtualIpSM.locate(vip_obj['uuid'], vip_obj)
return vip
# end create_vip
def validate_pool(self, driver_pool, config_pool):
self.assertEqual(driver_pool['id'], config_pool.uuid)
self.assertEqual(driver_pool['description'], config_pool.id_perms['description'])
self.assertEqual(driver_pool['name'], config_pool.name)
self.assertEqual(driver_pool['vip_id'], config_pool.virtual_ip)
self.assertEqual(driver_pool['protocol'], config_pool.params['protocol'])
self.assertEqual(driver_pool['lb_method'], config_pool.params['loadbalancer_method'])
self.assertEqual(driver_pool['subnet_id'], config_pool.params['subnet_id'])
self.assertEqual(driver_pool['admin_state_up'], config_pool.params['admin_state'])
self.assertEqual(driver_pool['tenant_id'], config_pool.parent_uuid)
self.assertEqual(len(driver_pool['members']), len(config_pool.members))
self.assertEqual(len(driver_pool['health_monitors']), len(config_pool.loadbalancer_healthmonitors))
#end
def validate_hm(self, driver_hm, config_hm):
self.assertEqual(driver_hm['id'], config_hm.uuid)
self.assertEqual(driver_hm['tenant_id'], config_hm.parent_uuid)
self.assertEqual(driver_hm['admin_state_up'], config_hm.params['admin_state'])
self.assertEqual(driver_hm['delay'], config_hm.params['delay'])
self.assertEqual(driver_hm['expected_codes'], config_hm.params['expected_codes'])
self.assertEqual(driver_hm['http_method'], config_hm.params['http_method'])
self.assertEqual(driver_hm['max_retries'], config_hm.params['max_retries'])
self.assertEqual(driver_hm['timeout'], config_hm.params['timeout'])
self.assertEqual(driver_hm['type'], config_hm.params['monitor_type'])
self.assertEqual(driver_hm['url_path'], config_hm.params['url_path'])
self.assertEqual(len(driver_hm['pools']), len(config_hm.loadbalancer_pools))
#end
def validate_pool_member(self, driver_member, config_member):
self.assertEqual(driver_member['address'], config_member.params['address'])
self.assertEqual(driver_member['admin_state_up'], config_member.params['admin_state'])
self.assertEqual(driver_member['protocol_port'], config_member.params['protocol_port'])
self.assertEqual(driver_member['weight'], config_member.params['weight'])
self.assertEqual(driver_member['pool_id'], config_member.loadbalancer_pool)
self.assertEqual(driver_member['id'], config_member.uuid)
#end
def validate_vip(self, driver_vip, config_vip):
self.assertEqual(driver_vip['address'], config_vip.params['address'])
self.assertEqual(driver_vip['admin_state_up'], config_vip.params['admin_state'])
self.assertEqual(driver_vip['connection_limit'], config_vip.params['connection_limit'])
self.assertEqual(driver_vip['protocol_port'], config_vip.params['protocol_port'])
self.assertEqual(driver_vip['subnet_id'], config_vip.params['subnet_id'])
self.assertEqual(driver_vip['protocol'], config_vip.params['protocol'])
self.assertEqual(driver_vip['tenant_id'], config_vip.parent_uuid)
self.assertEqual(driver_vip['admin_state_up'], config_vip.params['admin_state'])
self.assertEqual(driver_vip['pool_id'], config_vip.loadbalancer_pool)
#end
def test_add_delete_sas(self):
sas = self.create_sa_set("test-lb-provider")
sa = self.create_sa("test-lb-provider-0", "test-lb-provider")
sas.add()
sas_tmp = config_db.ServiceApplianceSetSM.get('test-lb-provider')
self.assertEqual(sas_tmp.driver, "svc_monitor.tests.fake_lb_driver.OpencontrailFakeLoadbalancerDriver")
self.assertIsNotNone(self.lb_agent._loadbalancer_driver['test-lb-provider'])
# end test_add_delete_sas
def test_add_delete_pool(self):
sas = self.create_sa_set("test-lb-provider")
sas.add()
pool = self.create_pool("test-lb-pool")
pool.add()
# Validate
self.validate_pool(self.lb_agent._loadbalancer_driver['test-lb-provider']._pools['test-lb-pool'], pool)
# Clean up
config_db.LoadbalancerPoolSM.delete('test-lb-pool')
# end test_add_delete_pool
def test_add_delete_hm(self):
sas = self.create_sa_set("test-lb-provider")
sas.add()
pool = self.create_pool(fq_name_str="test-lb-pool")
hm_obj = self.create_hm_obj("test-hm")
hm_obj['loadbalancer_pool_back_refs']=[{'uuid': pool.uuid}]
hm = config_db.HealthMonitorSM.locate(hm_obj['uuid'], hm_obj)
pool.add()
# Validate
self.validate_pool(self.lb_agent._loadbalancer_driver['test-lb-provider']._pools['test-lb-pool'], pool)
self.validate_hm(self.lb_agent._loadbalancer_driver['test-lb-provider']._hms['test-hm'], hm)
self.assertEqual(len(self.lb_agent._loadbalancer_driver['test-lb-provider']._hms_pools['test-hm']), 1)
self.assertTrue('test-lb-pool' in self.lb_agent._loadbalancer_driver['test-lb-provider']._hms_pools['test-hm'])
# Clean up
config_db.LoadbalancerPoolSM.delete('test-lb-pool')
config_db.HealthMonitorSM.delete('test-hm')
# end test_add_delete_hm
def test_add_delete_pool_with_members(self):
sas = self.create_sa_set("test-lb-provider")
sas.add()
pool = self.create_pool("test-lb-pool")
self.create_pool_members("test-lb-pool", 5)
pool.add()
# Validate
self.validate_pool(self.lb_agent._loadbalancer_driver['test-lb-provider']._pools['test-lb-pool'], pool)
for i in range(5):
id = 'member_'+str(i)
config_member = config_db.LoadbalancerMemberSM.get(id)
self.validate_pool_member(self.lb_agent._loadbalancer_driver['test-lb-provider']._members[id], config_member)
# Clean up
for i in range(5):
config_db.LoadbalancerMemberSM.delete('member_'+str(i))
config_db.LoadbalancerPoolSM.delete('test-lb-pool')
# end test_add_delete_pool_with_members
def test_add_delete_pool_with_members_vip(self):
sas = self.create_sa_set("test-lb-provider")
sas.add()
vip = self.create_vip('vip')
pool = self.create_pool("test-lb-pool", vip)
self.create_pool_members("test-lb-pool", 5)
pool.add()
# Validate
self.validate_pool(self.lb_agent._loadbalancer_driver['test-lb-provider']._pools['test-lb-pool'], pool)
for i in range(5):
id = 'member_'+str(i)
config_member = config_db.LoadbalancerMemberSM.get(id)
self.validate_pool_member(self.lb_agent._loadbalancer_driver['test-lb-provider']._members[id], config_member)
self.validate_vip(self.lb_agent._loadbalancer_driver['test-lb-provider']._vips['vip'], vip)
# Cleanup
for i in range(5):
config_db.LoadbalancerMemberSM.delete('member_'+str(i))
config_db.LoadbalancerPoolSM.delete('test-lb-pool')
config_db.VirtualIpSM.delete('vip')
# end test_add_delete_pool_with_members_vip
def test_update_pool(self):
sas = self.create_sa_set("test-lb-provider")
sas.add()
vip = self.create_vip('vip')
pool = self.create_pool("test-lb-pool", vip)
self.create_pool_members("test-lb-pool", 5)
pool.add()
# Validate
self.validate_pool(self.lb_agent._loadbalancer_driver['test-lb-provider']._pools['test-lb-pool'], pool)
for i in range(5):
id = 'member_'+str(i)
config_member = config_db.LoadbalancerMemberSM.get(id)
self.validate_pool_member(self.lb_agent._loadbalancer_driver['test-lb-provider']._members[id], config_member)
self.validate_vip(self.lb_agent._loadbalancer_driver['test-lb-provider']._vips['vip'], vip)
# update the Pool
self.update_pool(pool)
pool.add()
# validate
self.validate_pool(self.lb_agent._loadbalancer_driver['test-lb-provider']._pools['test-lb-pool'], pool)
for i in range(5):
id = 'member_'+str(i)
config_member = config_db.LoadbalancerMemberSM.get(id)
self.validate_pool_member(self.lb_agent._loadbalancer_driver['test-lb-provider']._members[id], config_member)
self.validate_vip(self.lb_agent._loadbalancer_driver['test-lb-provider']._vips['vip'], vip)
# Cleanup
for i in range(5):
config_db.LoadbalancerMemberSM.delete('member_'+str(i))
config_db.LoadbalancerPoolSM.delete('test-lb-pool')
config_db.VirtualIpSM.delete('vip')
# end test_update_pool
def test_update_members(self):
sas = self.create_sa_set("test-lb-provider")
sas.add()
vip = self.create_vip('vip')
pool = self.create_pool("test-lb-pool", vip)
self.create_pool_members("test-lb-pool", 5)
pool.add()
# update the Pool-- Add delete even members
for i in range(5):
if i%2 == 0:
config_db.LoadbalancerMemberSM.delete('member_'+str(i))
pool.add()
# validate - members
for i in range(5):
id = 'member_'+str(i)
config_member = config_db.LoadbalancerMemberSM.get(id)
if i%2 == 0:
self.assertIsNone(config_member)
self.assertIsNone(self.lb_agent._loadbalancer_driver['test-lb-provider']._members.get(id, None))
else:
self.validate_pool_member(self.lb_agent._loadbalancer_driver['test-lb-provider']._members[id], config_member)
# validate - pool
self.validate_pool(self.lb_agent._loadbalancer_driver['test-lb-provider']._pools['test-lb-pool'], pool)
# Update the pool again.. Add those members back
for i in range(5):
if i%2 == 0:
self.create_pool_member("test-lb-pool", 'member_'+str(i), '22.2.2.'+str(i))
pool.add()
# validate
self.validate_pool(self.lb_agent._loadbalancer_driver['test-lb-provider']._pools['test-lb-pool'], pool)
# validate - members
for i in range(5):
id = 'member_'+str(i)
config_member = config_db.LoadbalancerMemberSM.get(id)
self.validate_pool_member(self.lb_agent._loadbalancer_driver['test-lb-provider']._members[id], config_member)
# Update one member and check
config_member = config_db.LoadbalancerMemberSM.get('member_3')
config_member.weight = 20
pool.add()
# validate
self.validate_pool(self.lb_agent._loadbalancer_driver['test-lb-provider']._pools['test-lb-pool'], pool)
# validate - members
for i in range(5):
id = 'member_'+str(i)
config_member = config_db.LoadbalancerMemberSM.get(id)
self.validate_pool_member(self.lb_agent._loadbalancer_driver['test-lb-provider']._members[id], config_member)
# Cleanup
for i in range(5):
config_db.LoadbalancerMemberSM.delete('member_'+str(i))
config_db.LoadbalancerPoolSM.delete('test-lb-pool')
config_db.VirtualIpSM.delete('vip')
# end test_update_members
def test_update_vip(self):
sas = self.create_sa_set("test-lb-provider")
sas.add()
vip = self.create_vip('vip')
pool = self.create_pool("test-lb-pool", vip)
self.create_pool_members("test-lb-pool", 5)
pool.add()
# Validate
self.validate_pool(self.lb_agent._loadbalancer_driver['test-lb-provider']._pools['test-lb-pool'], pool)
for i in range(5):
id = 'member_'+str(i)
config_member = config_db.LoadbalancerMemberSM.get(id)
self.validate_pool_member(self.lb_agent._loadbalancer_driver['test-lb-provider']._members[id], config_member)
self.validate_vip(self.lb_agent._loadbalancer_driver['test-lb-provider']._vips['vip'], vip)
# update the Pool
self.update_vip(vip)
pool.add()
# validate
self.validate_pool(self.lb_agent._loadbalancer_driver['test-lb-provider']._pools['test-lb-pool'], pool)
for i in range(5):
id = 'member_'+str(i)
config_member = config_db.LoadbalancerMemberSM.get(id)
self.validate_pool_member(self.lb_agent._loadbalancer_driver['test-lb-provider']._members[id], config_member)
self.validate_vip(self.lb_agent._loadbalancer_driver['test-lb-provider']._vips['vip'], vip)
# Cleanup
for i in range(5):
config_db.LoadbalancerMemberSM.delete('member_'+str(i))
config_db.LoadbalancerPoolSM.delete('test-lb-pool')
config_db.VirtualIpSM.delete('vip')
# end test_update_vip
def test_update_hm_props(self):
sas = self.create_sa_set("test-lb-provider")
sas.add()
pool = self.create_pool(fq_name_str="test-lb-pool")
hm_obj = self.create_hm_obj("test-hm")
hm_obj['loadbalancer_pool_back_refs']=[{'uuid': pool.uuid}]
hm = config_db.HealthMonitorSM.locate(hm_obj['uuid'], hm_obj)
pool.add()
# Validate
self.validate_pool(self.lb_agent._loadbalancer_driver['test-lb-provider']._pools['test-lb-pool'], pool)
self.validate_hm(self.lb_agent._loadbalancer_driver['test-lb-provider']._hms['test-hm'], hm)
self.assertEqual(len(self.lb_agent._loadbalancer_driver['test-lb-provider']._hms_pools['test-hm']), 1)
self.assertTrue('test-lb-pool' in self.lb_agent._loadbalancer_driver['test-lb-provider']._hms_pools['test-hm'])
hm_obj['loadbalancer_healthmonitor_properties']['max_retries'] = '100'
config_db.HealthMonitorSM.update(hm, hm_obj)
pool.add()
self.validate_pool(self.lb_agent._loadbalancer_driver['test-lb-provider']._pools['test-lb-pool'], pool)
self.validate_hm(self.lb_agent._loadbalancer_driver['test-lb-provider']._hms['test-hm'], hm)
self.assertEqual(len(self.lb_agent._loadbalancer_driver['test-lb-provider']._hms_pools['test-hm']), 1)
self.assertTrue('test-lb-pool' in self.lb_agent._loadbalancer_driver['test-lb-provider']._hms_pools['test-hm'])
# Clean up
config_db.LoadbalancerPoolSM.delete('test-lb-pool')
config_db.HealthMonitorSM.delete('test-hm')
# end test_update_hm_props
def test_update_hm_pools(self):
sas = self.create_sa_set("test-lb-provider")
sas.add()
pool = self.create_pool(fq_name_str="test-lb-pool")
hm_obj = self.create_hm_obj("test-hm")
hm_obj['loadbalancer_pool_back_refs']=[{'uuid': pool.uuid}]
hm = config_db.HealthMonitorSM.locate(hm_obj['uuid'], hm_obj)
pool.add()
# Validate
self.validate_pool(self.lb_agent._loadbalancer_driver['test-lb-provider']._pools['test-lb-pool'], pool)
self.validate_hm(self.lb_agent._loadbalancer_driver['test-lb-provider']._hms['test-hm'], hm)
self.assertEqual(len(self.lb_agent._loadbalancer_driver['test-lb-provider']._hms_pools['test-hm']), 1)
self.assertTrue('test-lb-pool' in self.lb_agent._loadbalancer_driver['test-lb-provider']._hms_pools['test-hm'])
pool_1 = self.create_pool(fq_name_str="test-lb-pool_1")
hm_obj['loadbalancer_pool_back_refs']=[{'uuid': pool.uuid}, {'uuid': pool_1.uuid}]
config_db.HealthMonitorSM.update(hm, hm_obj)
pool.add()
# Validate after update
self.validate_pool(self.lb_agent._loadbalancer_driver['test-lb-provider']._pools['test-lb-pool'], pool)
self.validate_hm(self.lb_agent._loadbalancer_driver['test-lb-provider']._hms['test-hm'], hm)
self.assertEqual(len(self.lb_agent._loadbalancer_driver['test-lb-provider']._hms_pools['test-hm']), 2)
self.assertTrue('test-lb-pool' in self.lb_agent._loadbalancer_driver['test-lb-provider']._hms_pools['test-hm'])
self.assertTrue('test-lb-pool_1' in self.lb_agent._loadbalancer_driver['test-lb-provider']._hms_pools['test-hm'])
# Clean up
config_db.LoadbalancerPoolSM.delete('test-lb-pool')
config_db.HealthMonitorSM.delete('test-hm')
# end test_update_hm
def test_audit_pool(self):
sas = self.create_sa_set("test-lb-provider")
sas.add()
pool = self.create_pool("test-lb-pool")
pool.add()
# Validate
self.validate_pool(self.lb_agent._loadbalancer_driver['test-lb-provider']._pools['test-lb-pool'], pool)
# Delete the pool without letting the driver know about it..
config_db.LoadbalancerPoolSM.reset()
# Validate that all pool info is valid in driver..still..
self.assertEqual(len(self.lb_agent._loadbalancer_driver['test-lb-provider']._pools), 1)
self.validate_pool(self.lb_agent._loadbalancer_driver['test-lb-provider']._pools['test-lb-pool'], pool)
self.assertEqual(len(self._db), 1)
self.assertTrue('test-lb-pool' in self._db)
# call audit and ensure pool is deleted
self.lb_agent.audit_lb_pools()
# Validate that audit has deleted the pool from driver and from db
self.assertEqual(len(self.lb_agent._loadbalancer_driver['test-lb-provider']._pools), 0)
self.assertEqual(len(self._db), 0)
# end test_audit_pool
#end LoadbalancerAgentTest(unittest.TestCase):
| sajuptpm/contrail-controller | src/config/svc-monitor/svc_monitor/tests/test_loadbalancer_agent.py | Python | apache-2.0 | 27,474 |
# Copyright (C) 2016 Red Hat, Inc., Pep Turro Mauri <[email protected]>
# This file is part of the sos project: https://github.com/sosreport/sos
#
# This copyrighted material is made available to anyone wishing to use,
# modify, copy, or redistribute it subject to the terms and conditions of
# version 2 of the GNU General Public License.
#
# See the LICENSE file in the source distribution for further information.
from sos.report.plugins import Plugin, RedHatPlugin, PluginOpt
import os.path
# This plugin collects static configuration and runtime information
# about OpenShift Origin based environments, like OpenShift Enterprise 3
# Some clarification on naming:
# OpenShift Origin is the upstream project for OpenShift Enterprise,
# OpenShift Container Platflorm, and Atomic Platform.
#
# However, the name "OpenShift Origin" refers to two different code bases:
# * Origin M5 and later (https://github.com/openshift/origin)
# which is upstream for OpenShift 3.x and later.
# This is what this plugin handles
# * Origin M4 and earlier (https://github.com/openshift/origin-server)
# which is upstream for OpenShift 1.x and 2.x.
# This is handled by the plugin in openshift.py
# Note that this plugin should be used in conjunction with other plugins
# in order to capture relevant data: the Kubernetes plugin for the
# masters, the Docker plugin for the nodes, and also generic
# plugins (e.g. for /etc/sysconfig entries, network setup etc)
class OpenShiftOrigin(Plugin):
short_desc = 'OpenShift Origin'
plugin_name = "origin"
files = None # file lists assigned after path setup below
profiles = ('openshift',)
option_list = [
PluginOpt('diag', default=True,
desc='Collect oc adm diagnostics output'),
PluginOpt('diag-prevent', default=True,
desc='Use --prevent-modification with oc adm diagnostics'),
PluginOpt('all-namespaces', default=False,
desc='collect dc output for all namespaces')
]
master_base_dir = "/etc/origin/master"
node_base_dir = "/etc/origin/node"
master_cfg = os.path.join(master_base_dir, "master-config.yaml")
master_env = os.path.join(master_base_dir, "master.env")
node_cfg_file = "node-config.yaml"
node_cfg = os.path.join(node_base_dir, node_cfg_file)
node_kubeconfig = os.path.join(node_base_dir, "node.kubeconfig")
static_pod_dir = os.path.join(node_base_dir, "pods")
files = (master_cfg, node_cfg)
# Master vs. node
#
# OpenShift Origin/3.x cluster members can be a master, a node, or both at
# the same time: in most deployments masters are also nodes in order to get
# access to the pod network, which some functionality (e.g. the API proxy)
# requires. Therefore the following methods may all evaluate True on a
# single instance (at least one must evaluate True if this is an OpenShift
# installation)
def is_master(self):
"""Determine if we are on a master"""
return self.path_exists(self.master_cfg)
def is_node(self):
"""Determine if we are on a node"""
return self.path_exists(self.node_cfg)
def is_static_etcd(self):
"""Determine if we are on a node running etcd"""
return self.path_exists(os.path.join(self.static_pod_dir, "etcd.yaml"))
def is_static_pod_compatible(self):
"""Determine if a node is running static pods"""
return self.path_exists(self.static_pod_dir)
def setup(self):
bstrap_node_cfg = os.path.join(self.node_base_dir,
"bootstrap-" + self.node_cfg_file)
bstrap_kubeconfig = os.path.join(self.node_base_dir,
"bootstrap.kubeconfig")
node_certs = os.path.join(self.node_base_dir, "certs", "*")
node_client_ca = os.path.join(self.node_base_dir, "client-ca.crt")
admin_cfg = os.path.join(self.master_base_dir, "admin.kubeconfig")
oc_cmd_admin = "%s --config=%s" % ("oc", admin_cfg)
static_pod_logs_cmd = "master-logs"
# Note that a system can run both a master and a node.
# See "Master vs. node" above.
if self.is_master():
self.add_copy_spec([
self.master_cfg,
self.master_env,
os.path.join(self.master_base_dir, "*.crt"),
])
if self.is_static_pod_compatible():
self.add_copy_spec(os.path.join(self.static_pod_dir, "*.yaml"))
self.add_cmd_output([
"%s api api" % static_pod_logs_cmd,
"%s controllers controllers" % static_pod_logs_cmd,
])
# TODO: some thoughts about information that might also be useful
# to collect. However, these are maybe not needed in general
# and/or present some challenges (scale, sensitive, ...) and need
# some more thought. For now just leaving this comment here until
# we decide if it's worth collecting:
#
# General project status:
# oc status --all-namespaces (introduced in OSE 3.2)
# -> deemed as not worthy in BZ#1394527
# Metrics deployment configurations
# oc get -o json dc -n openshift-infra
# Logging stack deployment configurations
# oc get -o json dc -n logging
#
# Note: Information about nodes, events, pods, and services
# is already collected by the Kubernetes plugin
subcmds = [
"describe projects",
"adm top images",
"adm top imagestreams"
]
self.add_cmd_output([
'%s %s' % (oc_cmd_admin, subcmd) for subcmd in subcmds
])
jcmds = [
"hostsubnet",
"clusternetwork",
"netnamespaces"
]
self.add_cmd_output([
'%s get -o json %s' % (oc_cmd_admin, jcmd) for jcmd in jcmds
])
if self.get_option('all-namespaces'):
ocn = self.exec_cmd('%s get namespaces' % oc_cmd_admin)
ns_output = ocn['output'].splitlines()[1:]
nmsps = [n.split()[0] for n in ns_output if n]
else:
nmsps = [
'default',
'openshift-web-console',
'openshift-ansible-service-broker'
]
self.add_cmd_output([
'%s get -o json dc -n %s' % (oc_cmd_admin, n) for n in nmsps
])
if self.get_option('diag'):
diag_cmd = "%s adm diagnostics -l 0" % oc_cmd_admin
if self.get_option('diag-prevent'):
diag_cmd += " --prevent-modification=true"
self.add_cmd_output(diag_cmd)
self.add_journal(units=["atomic-openshift-master",
"atomic-openshift-master-api",
"atomic-openshift-master-controllers"])
# get logs from the infrastruture pods running in the default ns
pods = self.exec_cmd("%s get pod -o name -n default"
% oc_cmd_admin)
for pod in pods['output'].splitlines():
self.add_cmd_output("%s logs -n default %s"
% (oc_cmd_admin, pod))
# Note that a system can run both a master and a node.
# See "Master vs. node" above.
if self.is_node():
self.add_copy_spec([
self.node_cfg,
self.node_kubeconfig,
node_certs,
node_client_ca,
bstrap_node_cfg,
bstrap_kubeconfig,
os.path.join(self.node_base_dir, "*.crt"),
os.path.join(self.node_base_dir, "resolv.conf"),
os.path.join(self.node_base_dir, "node-dnsmasq.conf"),
])
self.add_journal(units="atomic-openshift-node")
if self.is_static_etcd():
self.add_cmd_output("%s etcd etcd" % static_pod_logs_cmd)
def postproc(self):
# Clear env values from objects that can contain sensitive data
# Sample JSON content:
# {
# "name": "MYSQL_PASSWORD",
# "value": "mypassword"
# },
# This will mask values when the "name" looks susceptible of
# values worth obfuscating, i.e. if the name contains strings
# like "pass", "pwd", "key" or "token".
env_regexp = r'(?P<var>{\s*"name":\s*[^,]*' \
r'(pass|pwd|key|token|cred|secret' \
r'|PASS|PWD|KEY|TOKEN|CRED|SECRET)[^,]*,' \
r'\s*"value":)[^}]*'
self.do_cmd_output_sub('oc*json', env_regexp, r'\g<var> "********"')
# LDAP identity provider
self.do_file_sub(self.master_cfg,
r"(bindPassword:\s*)(.*)",
r'\1"********"')
# github/google/OpenID identity providers
self.do_file_sub(self.master_cfg,
r"(clientSecret:\s*)(.*)",
r'\1"********"')
class AtomicOpenShift(OpenShiftOrigin, RedHatPlugin):
short_desc = 'OpenShift Enterprise / OpenShift Container Platform'
packages = ('atomic-openshift',)
# vim: set et ts=4 sw=4 :
| TurboTurtle/sos | sos/report/plugins/origin.py | Python | gpl-2.0 | 9,560 |
"""
{{ cookiecutter.project_module_name }}.exceptions
=================================================
Exceptions module used by the ``{{ cookiecutter.project_pkg_name }}`` package.
"""
| YetAnotherIgor/cookiecutter-barebones-py35 | {{ cookiecutter.project_pkg_name }}/{{ cookiecutter.project_module_name }}/exceptions.py | Python | bsd-3-clause | 188 |
# This file is part of Gajim.
#
# Gajim is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published
# by the Free Software Foundation; version 3 only.
#
# Gajim is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Gajim. If not, see <http://www.gnu.org/licenses/>.
from __future__ import annotations
import typing
from typing import Any
from typing import Union
from typing import Optional
from typing import Callable
from dataclasses import dataclass
from dataclasses import field
from nbxmpp.protocol import JID
from nbxmpp.structs import HTTPAuthData
from nbxmpp.structs import RosterItem
from nbxmpp.structs import TuneData
from nbxmpp.const import InviteType
from gajim.common.file_props import FileProp
from gajim.common.const import KindConstant
from gajim.common.helpers import AdditionalDataDict
if typing.TYPE_CHECKING:
from gajim.common.client import Client
MainEventT = Union['PresenceReceived',
'MessageSent',
'MamMessageReceived',
'GcMessageReceived',
'MessageUpdated',
'ReceiptReceived',
'DisplayedReceived',
'MessageError',
'MucDiscoUpdate',
'JingleConnectedReceived',
'JingleDisconnectedReceived',
'JingleErrorReceived',
'FileRequestSent']
@dataclass
class ApplicationEvent:
name: str
@dataclass
class StyleChanged(ApplicationEvent):
name: str = field(init=False, default='style-changed')
@dataclass
class ThemeUpdate(ApplicationEvent):
name: str = field(init=False, default='theme-update')
@dataclass
class ShowChanged(ApplicationEvent):
name: str = field(init=False, default='our-show')
account: str
show: str
@dataclass
class AccountConnected(ApplicationEvent):
name: str = field(init=False, default='account-connected')
account: str
@dataclass
class AccountDisonnected(ApplicationEvent):
name: str = field(init=False, default='account-disconnected')
account: str
@dataclass
class PlainConnection(ApplicationEvent):
name: str = field(init=False, default='plain-connection')
account: str
connect: Callable[..., Any]
abort: Callable[..., Any]
@dataclass
class PasswordRequired(ApplicationEvent):
name: str = field(init=False, default='password-required')
client: 'Client'
on_password: Callable[..., Any]
@dataclass
class Notification(ApplicationEvent):
name: str = field(init=False, default='notification')
account: str
type: str
title: str
text: str
jid: Optional[Union[JID, str]] = None
sub_type: Optional[str] = None
sound: Optional[str] = None
icon_name: Optional[str] = None
@dataclass
class StanzaSent(ApplicationEvent):
name: str = field(init=False, default='stanza-sent')
account: str
stanza: Any
@dataclass
class StanzaReceived(ApplicationEvent):
name: str = field(init=False, default='stanza-received')
account: str
stanza: Any
@dataclass
class SignedIn(ApplicationEvent):
name: str = field(init=False, default='signed-in')
account: str
conn: 'Client'
@dataclass
class MusicTrackChanged(ApplicationEvent):
name: str = field(init=False, default='music-track-changed')
info: Optional[TuneData]
@dataclass
class MessageSent(ApplicationEvent):
name: str = field(init=False, default='message-sent')
account: str
jid: JID
message: str
message_id: str
chatstate: Optional[str]
timestamp: float
additional_data: AdditionalDataDict
label: Optional[str]
correct_id: Optional[str]
play_sound: bool
@dataclass
class MessageNotSent(ApplicationEvent):
name: str = field(init=False, default='message-not-sent')
client: 'Client'
jid: str
message: str
error: str
time: float
@dataclass
class AdHocCommandError(ApplicationEvent):
name: str = field(init=False, default='adhoc-command-error')
conn: 'Client'
error: str
@dataclass
class AdHocCommandActionResponse(ApplicationEvent):
name: str = field(init=False, default='adhoc-command-action-response')
conn: 'Client'
command: Any
@dataclass
class FileProgress(ApplicationEvent):
name: str = field(init=False, default='file-progress')
file_props: FileProp
@dataclass
class FileCompleted(ApplicationEvent):
name: str = field(init=False, default='file-completed')
account: str
file_props: FileProp
jid: str
@dataclass
class FileError(ApplicationEvent):
name: str = field(init=False, default='file-error')
account: str
file_props: FileProp
jid: str
@dataclass
class FileHashError(ApplicationEvent):
name: str = field(init=False, default='file-hash-error')
account: str
file_props: FileProp
jid: str
@dataclass
class FileRequestSent(ApplicationEvent):
name: str = field(init=False, default='file-request-sent')
account: str
file_props: FileProp
jid: JID
@dataclass
class FileRequestError(ApplicationEvent):
name: str = field(init=False, default='file-request-error')
conn: 'Client'
file_props: FileProp
jid: str
error_msg: str = ''
@dataclass
class FileSendError(ApplicationEvent):
name: str = field(init=False, default='file-send-error')
account: str
file_props: FileProp
jid: str
error_msg: str = ''
@dataclass
class AccountEnabled(ApplicationEvent):
name: str = field(init=False, default='account-enabled')
account: str
@dataclass
class AccountDisabled(ApplicationEvent):
name: str = field(init=False, default='account-disabled')
account: str
@dataclass
class FeatureDiscovered(ApplicationEvent):
name: str = field(init=False, default='feature-discovered')
account: str
feature: str
@dataclass
class BookmarksReceived(ApplicationEvent):
name: str = field(init=False, default='bookmarks-received')
account: str
@dataclass
class BaseChatMarkerEvent(ApplicationEvent):
name: str
account: str
jid: JID
properties: Any
type: str
is_muc_pm: bool
marker_id: str
@dataclass
class ReadStateSync(BaseChatMarkerEvent):
name: str = field(init=False, default='read-state-sync')
@dataclass
class DisplayedReceived(BaseChatMarkerEvent):
name: str = field(init=False, default='displayed-received')
@dataclass
class IqErrorReceived(ApplicationEvent):
name: str = field(init=False, default='iq-error-received')
account: str
properties: Any
@dataclass
class HttpAuth(ApplicationEvent):
name: str = field(init=False, default='http-auth')
client: 'Client'
data: HTTPAuthData
stanza: Any
@dataclass
class AgentRemoved(ApplicationEvent):
name: str = field(init=False, default='agent-removed')
conn: 'Client'
agent: str
jid_list: list[str]
@dataclass
class GatewayPromptReceived(ApplicationEvent):
name: str = field(init=False, default='gateway-prompt-received')
conn: 'Client'
fjid: str
jid: str
resource: str
desc: str
prompt: str
prompt_jid: str
stanza: Any
@dataclass
class ServerDiscoReceived(ApplicationEvent):
name: str = field(init=False, default='server-disco-received')
@dataclass
class MucDiscoUpdate(ApplicationEvent):
name: str = field(init=False, default='muc-disco-update')
account: str
jid: JID
@dataclass
class RawMessageReceived(ApplicationEvent):
name: str = field(init=False, default='raw-message-received')
account: str
stanza: Any
conn: 'Client'
@dataclass
class RawMamMessageReceived(ApplicationEvent):
name: str = field(init=False, default='raw-mam-message-received')
account: str
stanza: Any
properties: Any
@dataclass
class ArchivingIntervalFinished(ApplicationEvent):
name: str = field(init=False, default='archiving-interval-finished')
account: str
query_id: str
@dataclass
class MessageUpdated(ApplicationEvent):
name: str = field(init=False, default='message-updated')
account: str
jid: JID
msgtxt: str
properties: Any
correct_id: str
@dataclass
class MamMessageReceived(ApplicationEvent):
name: str = field(init=False, default='mam-message-received')
account: str
jid: JID
msgtxt: str
properties: Any
additional_data: AdditionalDataDict
unique_id: str
stanza_id: str
archive_jid: str
kind: KindConstant
@dataclass
class MessageReceived(ApplicationEvent):
name: str = field(init=False, default='message-received')
conn: 'Client'
stanza: Any
account: str
jid: JID
msgtxt: str
properties: Any
additional_data: AdditionalDataDict
unique_id: str
stanza_id: str
fjid: str
resource: str
session: Any
delayed: Optional[float]
msg_log_id: int
displaymarking: str
@dataclass
class GcMessageReceived(MessageReceived):
name: str = field(init=False, default='gc-message-received')
room_jid: str
@dataclass
class MessageError(ApplicationEvent):
name: str = field(init=False, default='message-error')
account: str
jid: JID
room_jid: str
message_id: str
error: Any
@dataclass
class RosterItemExchangeEvent(ApplicationEvent):
name: str = field(init=False, default='roster-item-exchange')
client: 'Client'
jid: JID
exchange_items_list: dict[str, list[str]]
action: str
@dataclass
class RosterReceived(ApplicationEvent):
name: str = field(init=False, default='roster-received')
account: str
@dataclass
class RosterPush(ApplicationEvent):
name: str = field(init=False, default='roster-push')
account: str
item: RosterItem
@dataclass
class PluginAdded(ApplicationEvent):
name: str = field(init=False, default='plugin-added')
plugin: Any
@dataclass
class PluginRemoved(ApplicationEvent):
name: str = field(init=False, default='plugin-removed')
plugin: Any
@dataclass
class SearchFormReceivedEvent(ApplicationEvent):
name: str = field(init=False, default='search-form-received')
conn: 'Client'
is_dataform: bool
data: Any
@dataclass
class SearchResultReceivedEvent(ApplicationEvent):
name: str = field(init=False, default='search-result-received')
conn: 'Client'
is_dataform: bool
data: Any
@dataclass
class ReceiptReceived(ApplicationEvent):
name: str = field(init=False, default='receipt-received')
account: str
jid: JID
receipt_id: str
@dataclass
class JingleEvent(ApplicationEvent):
name: str
conn: 'Client'
account: str
fjid: str
jid: JID
sid: str
resource: str
jingle_session: Any
@dataclass
class JingleConnectedReceived(JingleEvent):
name: str = field(init=False, default='jingle-connected-received')
media: Any
@dataclass
class JingleDisconnectedReceived(JingleEvent):
name: str = field(init=False, default='jingle-disconnected-received')
media: Any
reason: str
@dataclass
class JingleRequestReceived(JingleEvent):
name: str = field(init=False, default='jingle-request-received')
contents: Any
@dataclass
class JingleFtCancelledReceived(JingleEvent):
name: str = field(init=False, default='jingle-ft-cancelled-received')
media: Any
reason: str
@dataclass
class JingleErrorReceived(JingleEvent):
name: str = field(init=False, default='jingle-error-received')
reason: str
@dataclass
class MucAdded(ApplicationEvent):
name: str = field(init=False, default='muc-added')
account: str
jid: JID
@dataclass
class MucDecline(ApplicationEvent):
name: str = field(init=False, default='muc-decline')
account: str
muc: JID
from_: JID
reason: Optional[str]
@dataclass
class MucInvitation(ApplicationEvent):
name: str = field(init=False, default='muc-invitation')
account: str
info: Any
muc: JID
from_: JID
reason: Optional[str]
password: Optional[str]
type: InviteType
continued: bool
thread: Optional[str]
@dataclass
class PingSent(ApplicationEvent):
name: str = field(init=False, default='ping-sent')
account: str
contact: Any
@dataclass
class PingError(ApplicationEvent):
name: str = field(init=False, default='ping-error')
account: str
contact: Any
error: str
@dataclass
class PingReply(ApplicationEvent):
name: str = field(init=False, default='ping-reply')
account: str
contact: Any
seconds: int
@dataclass
class SecCatalogReceived(ApplicationEvent):
name: str = field(init=False, default='sec-catalog-received')
account: str
jid: str
catalog: dict[str, Any]
@dataclass
class PresenceReceived(ApplicationEvent):
name: str = field(init=False, default='presence-received')
account: str
conn: 'Client'
stanza: Any
prio: int
need_add_in_roster: bool
popup: bool
ptype: str
jid: JID
resource: str
id_: str
fjid: str
timestamp: float
avatar_sha: Optional[str]
user_nick: Optional[str]
idle_time: Optional[float]
show: str
new_show: str
old_show: str
status: str
contact_list: list[str]
contact: Any
@dataclass
class SubscribePresenceReceived(ApplicationEvent):
name: str = field(init=False, default='subscribe-presence-received')
conn: 'Client'
account: str
jid: str
fjid: str
status: str
user_nick: str
is_transport: bool
@dataclass
class SubscribedPresenceReceived(ApplicationEvent):
name: str = field(init=False, default='subscribed-presence-received')
account: str
jid: str
@dataclass
class UnsubscribedPresenceReceived(ApplicationEvent):
name: str = field(init=False, default='unsubscribed-presence-received')
conn: 'Client'
account: str
jid: str
@dataclass
class FileRequestReceivedEvent(ApplicationEvent):
name: str = field(init=False, default='file-request-received')
conn: 'Client'
stanza: Any
id_: str
fjid: str
account: str
jid: JID
file_props: FileProp
@dataclass
class AllowGajimUpdateCheck(ApplicationEvent):
name: str = field(init=False, default='allow-gajim-update-check')
@dataclass
class GajimUpdateAvailable(ApplicationEvent):
name: str = field(init=False, default='gajim-update-available')
version: str
| gajim/gajim | gajim/common/events.py | Python | gpl-3.0 | 14,596 |
# -*- coding: utf-8 -*-
'''Twisted logging to Python loggin bridge.'''
'''
Kontalk Pyserver
Copyright (C) 2011 Kontalk Devteam <[email protected]>
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
from twisted.python import log
LEVEL_DEBUG = 1
LEVEL_INFO = 1 << 1
LEVEL_WARN = 1 << 2
LEVEL_ERROR = 1 << 3
# all levels
LEVEL_ALL = LEVEL_DEBUG | LEVEL_INFO | LEVEL_WARN | LEVEL_ERROR
level = 0
def init(cfg):
'''Initializes logging system.'''
global level
l = cfg['server']['log.levels']
if 'ALL' in l:
level = LEVEL_ALL
else:
if 'DEBUG' in l:
level |= LEVEL_DEBUG
if 'INFO' in l:
level |= LEVEL_INFO
if 'WARN' in l:
level |= LEVEL_WARN
if 'ERROR' in l:
level |= LEVEL_ERROR
def debug(*args, **kwargs):
global level
if level & LEVEL_DEBUG:
log.msg(*args, **kwargs)
def info(*args, **kwargs):
global level
if level & LEVEL_INFO:
log.msg(*args, **kwargs)
def warn(*args, **kwargs):
global level
if level & LEVEL_WARN:
log.msg(*args, **kwargs)
def error(*args, **kwargs):
global level
if level & LEVEL_ERROR:
log.msg(*args, **kwargs)
| cgvarela/pyserverlib | kontalklib/logging.py | Python | gpl-3.0 | 1,793 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Ver 18 - 15 November 2017 -
import time
import serial
import string
import sys
import mysql.connector
from mysql.connector import errorcode, pooling
from db import *
import datetime
#from threading import Thread
import multiprocessing as mp
from multiprocessing import Queue
from multiprocessing.managers import SyncManager
from os import system, devnull
from subprocess import call, STDOUT
from threading import Thread
from time import sleep
#import queue
ctrlStr = "*../"
HOST = ''
PORT0 = 5011
PORT1 = 5012
PORT2 = 5013
PORT3 = 5014
AUTHKEY = str("123456").encode("utf-8")
def output(o, x):
print(str(str(o) + " " + str(datetime.datetime.now().time())[:8]) + " "+ str(x))
sys.stdout.flush()
# -- DB Connection ---------------------------
try:
db = mysql.connector.connect(**config)
except mysql.connector.Error as err:
if err.errno == errorcode.ER_ACCESS_DENIED_ERROR:
output("DB", "Something is wrong with your user name or password")
elif err.errno == errorcode.ER_BAD_DB_ERROR:
output("DB", "Database does not exists")
else:
output(err)
else:
output("PYSERIAL","Start procedure")
db.commit()
# -- END DB Connection ---------------------------
# -- Open Serial to the Coordinator---------------
serCoord = serial.Serial('/dev/ttymxc3', 115200, timeout=10)
#serCoord = serial.Serial('COM5', 115200, timeout=5)
serCoord.timeout = 10
serCoord.setDTR(False)
time.sleep(1)
# toss any data already received, see
serCoord.flushInput()
# -- End Open Serial to the Coordinator-----------
#-----------------------------
# Global Variable declaration
#-----------------------------
endSerialChars = b"\r\n"
global readSerial
global serialBuffer
pnum = 5 #number of values to send for each sensor
# coordinator commands
INString = "IN" # to send Node data to the coordinator
ISString = "IS" # to send Sensor data to the coordinator
IXString = "IX" # to send Address data to the coordinator
IAString = "IA" # to send Actuators to the coordinator
IMString = "IM" # to send Methods to the coordinator
CommExecutedTrue = b"CX1\r\n"
CommExecutedFalse = b"CX0\r\n"
CommExecutedTrueX = b"CX1"
CommExecutedFalseX = b"CX0"
CommNotExecuted = b"X"
#-----------------------------
# End Global Variable declaration
#-----------------------------
# Gpio pin manager
class Gpio:
def __init__(self):
self.gpios = ["55", "57"]
self.gpioval = [0, 0]
self.gpiodir = [0, 0]
self.current = 0
self.OUTPUT = 1
self.INPUT = 0
self.HIGH = 1
self.LOW = 0
for num in self.gpios:
try:
with open("/sys/class/gpio/export", "w") as create:
create.write(num)
with open("/sys/class/gpio/gpio" + self.gpios[current] + "/value", "r") as reads:
self.gpioval[self.current] = reads.read()
with open("/sys/class/gpio/gpio" + self.gpios[current] + "/direction", "r") as readdir:
self.gpiodir[self.current] = (1 if "out" in readdir.read() else 0)
self.current += 1
except:
sleep(0.000001)
def pinMode(self, pin=0, direction=0):
try:
gpio = self.gpios[int(pin)]
if int(direction) != self.gpiodir[pin]:
with open("/sys/class/gpio/gpio" + gpio + "/direction", "w") as writer:
writer.write("in" if direction < 1 else "out")
self.gpiodir[pin] = (0 if direction < 1 else 1)
return True
except ValueError:
output("PYSERIAL","ERROR: pinMode, value inserted wasn't an int")
return False
except:
output("PYSERIAL","ERROR: pinMode, error using pinMode")
return False
def digitalWrite(self, pin=2, value=0):
try:
gpio = self.gpios[int(pin)]
if self.gpiodir[pin] != 1:
with open("/sys/class/gpio/gpio" + gpio + "/direction", "w") as re:
re.write("out")
self.gpiodir[pin] = 1
if self.gpioval[pin] != int(value):
with open("/sys/class/gpio/gpio" + gpio + "/value", "w") as writes:
writes.write("0" if value < 1 else "1")
self.gpioval[pin] = (0 if value < 1 else 1)
return True
except ValueError:
output("PYSERIAL","ERROR: digitalWrite, value inserted wasn't an int")
return False
except:
output("PYSERIAL","ERROR: digitalWrite, error running")
return False
def digitalRead(self, pin=2):
try:
gpio = self.gpios[int(pin)]
if self.gpiodir[pin] != 0:
with open("/sys/class/gpio/gpio" + gpio + "/direction", "w") as re:
re.write("in")
self.gpiodir[pin] = 0
with open("/sys/class/gpio/gpio" + gpio + "/value", "r") as reader:
self.gpioval[pin] = int(reader.read().replace('\n', ''))
return self.gpioval[pin]
except ValueError:
output("PYSERIAL","ERROR: digitalRead, value inserted wasn't an int")
return -1
except:
output("PYSERIAL","ERROR: digitalRead, error running")
return -1
#-- function to extract integer from strings
def parseint(string):
return int(''.join([x for x in string if x.isdigit()]))
def log(t, m):
#curLog = db.cursor()
sql = "insert into tblog (type,msg) VALUES (%s, %s)"
#try:
#curLog.execute(sql, (t,m))
#db.commit()
#curLog.close()
#except:
#raise
#curLog.close()
def printTime():
now = datetime.datetime.now()
print(now.strftime("%H %M %S %f"))
def checkInit():
# check Init
sql = "SELECT pvalue,pindex FROM tbparam WHERE ptype = 'I'"
cur.execute(sql)
for (pvalue,pindex) in cur:
i = int("{}".format(pindex))
if i == 1:
output ("PYSERIAL","Initialize Coordinator")
sql = "UPDATE tbparam SET pvalue = 0 WHERE ptype = 'I'"
cur.execute(sql)
db.commit()
cur.close
initCoordinator()
break
sys.stdout.flush()
# end check Init
#-- Send Init data to the Coordinator --#
def initCoordinator():
#printTime()
output ("PYSERIAL","Initializing...")
global pnum
global INString
global IXString
global ISString
global IAString
global IMString
cur = db.cursor()
#--------------------------------------------------------------------------------------------------------#
#----begin building string to send out-------------------------------------------------------------------#
#--------------------------------------------------------------------------------------------------------#
# set numbers of parameters to build the string to send to the coordinator
# count the number of nodes
sql = "select count(*) as CNT from vwnodes WHERE nodetype != 0" #exclude external node
cur.execute(sql)
for (CNT) in cur:
nodeNum=parseint("{}".format(CNT))
INString = INString + str(nodeNum*pnum)
sql = "select count(*) as CNT from vwnodes WHERE nodetype = 2" #xbee nodes
cur.execute(sql)
for (CNT) in cur:
nodeNum=parseint("{}".format(CNT))
IXString = IXString + str(nodeNum)
# retrieve node data and buid initialization strings
sql = "select id, xbee_high_address, xbee_low_address, nodetype from vwnodes WHERE nodetype != 0 AND status = 1 order by id"
cur.execute(sql)
for (id, xbee_high_address, xbee_low_address, nodetype) in cur:
INString = INString + "," + "{}".format(id) + "," + "{}".format(nodetype) + ",0,0,1"
if int("{}".format(nodetype)) == 2: #xbee
IXString = IXString + "," + "{}".format(id) + "," + "{}".format(xbee_high_address) + "," + "{}".format(xbee_low_address)
#db.commit()
# count the number of sensors
sql = "select count(*) as CNT from vwsensors where tbNodeType_id != 0 and pin_number < 30"
cur.execute(sql)
for (CNT) in cur:
sensorNum=parseint("{}".format(CNT))
ISString = ISString + str(sensorNum*pnum)
db.commit()
#//col 0=node 1=sensor 2=value 3=alarm 4=spare
#retrieve sensor data and build initialization strings
sql = "SELECT nodeid,tbnodetype_id,tbsensortype_id,pin_number FROM vwsensors where tbnodetype_id != 0 and pin_number < 30 and tbstatus_id = 1 order by nodeid,pin_number"
cur.execute(sql)
for (nodeid,tbnodetype_id,tbsensortype_id,pin_number) in cur:
ISString = ISString + "," + "{}".format(nodeid) + "," + "{}".format(pin_number) + ",0,0,0"
#db.commit()
# count the number of actuators
sql = "select count(*) as CNT from vwactuator"
cur.execute(sql)
for (CNT) in cur:
actuatorNum=parseint("{}".format(CNT))
IAString = IAString + str(actuatorNum*pnum)
db.commit()
#//col 0=node 1=sensor 2=value 3=alarm 4=spare
#retrieve actuator data and build initialization strings
sql = "select tbnode_id,pinnumber from tbactuator order by tbnode_id,pinnumber"
cur.execute(sql)
for (tbnode_id,pinnumber) in cur:
IAString = IAString + "," + "{}".format(tbnode_id) + "," + "{}".format(pinnumber) + ",0,0,0"
# count the number of methods
sql = "select count(*) as CNT from vwmethods"
cur.execute(sql)
for (CNT) in cur:
methodNum=parseint("{}".format(CNT))
IMString = IMString + str(methodNum*pnum)
db.commit()
#//col 0=node 1=actuator 2=method 3=value 4=spare
#retrieve method data and build initialization strings
sql = "select tbnode_id,pinnumber,method from vwmethods order by tbnode_id,pinnumber,method"
cur.execute(sql)
for (tbnode_id,pinnumber,method) in cur:
IMString = IMString + "," + "{}".format(tbnode_id) + "," + "{}".format(pinnumber) + "," + "{}".format(method) + ",0,0"
db.commit()
cur.close
#--------------------------------------------------------------------------------------------------------#
#----end building string to send out---------------------------------------------------------------------#
#--------------------------------------------------------------------------------------------------------#
#--------------------------------------------------------------------------------------------------------#
#----begin Sending init string to the coordinator -------------------------------------------------------#
#--------------------------------------------------------------------------------------------------------#
output("PYSERIAL","Init sensors")
ret = initSendStringsToCoordinator(ISString)
if ret == 0: #if fails
return 0
output("PYSERIAL","Init actuators")
#output(IAString)
ret = initSendStringsToCoordinator(IAString)
if ret == 0: #if fails
return 0
output("PYSERIAL","Init methods")
ret = initSendStringsToCoordinator(IMString)
if ret == 0: #if fails
return 0
output("PYSERIAL","Init nodes")
ret = initSendStringsToCoordinator(INString)
if ret == 0: #if fails
return 0
output("PYSERIAL","Init node addresses Xbee")
ret = initSendStringsToCoordinator(IXString)
if ret == 0: #if fails
return 0
#--------------------------------------------------------------------------------------------------------#
#----end Sending init string to the coordinator ---------------------------------------------------------#
#--------------------------------------------------------------------------------------------------------#
# if Ok
cur.close
output ("PYSERIAL","End Initializing")
return 1
def isResponse(response):
if "CX0" in str(response, 'utf-8'):
return True
elif "CX1" in str(response, 'utf-8'):
return True
else:
return False
def isResponseOK(response):
print(response)
res = False
if "CX0" in str(response, 'utf-8'):
print(1)
res = False
elif "CX1" in str(response, 'utf-8'):
print(2)
res = True
else:
print(3)
res = False
print("qqq:")
print("xx:", str(response))
return res
#--------------------------------------------------------------------------------------------------------#
#---- get serial incoming data ---------------------------------------------------------------------#
#--------------------------------------------------------------------------------------------------------#
def getSerialData(qIN, qOUT, qResponse):
output("PYSERIAL","init serial")
serCoord.flushInput()
readSerial = ""
serCoord.timeout = 1
while True:
gpio.digitalWrite(0,gpio.LOW) #write high value to pin
serialBuffer = serCoord.inWaiting()
if serialBuffer > 0: #data available on serial
gpio.digitalWrite(0,gpio.HIGH)
readSerial = serCoord.readline()
readSerial.rstrip(endSerialChars)
if isResponse(readSerial) == True:
# while not qResponse.empty():
# qResponse.get()
#qResponse.put(readSerial)
#output("Response received")
aa=1
else:
qIN.put(readSerial)
# print("Data received:", serialBuffer)
#print("Q size:", qIn.qsize())
while not qOUT.empty():
#print("Q OUT size:", qOUT.qsize())
stg = qOUT.get()
serCoord.write(bytes(stg, 'UTF-8'))
output("PYSERIAL","String sent: " + str(stg))
#--------------------------------------------------------------------------------------------------------#
#---- End AUTOreceiveDataFromCoordinator --------------------------------------------------------------------#
#--------------------------------------------------------------------------------------------------------#
def initSendStringsToCoordinator(stg):
serCoord.flushInput()
# output("PYSERIAL",stg)
# send the node string
attemptsCnt = 0
while serCoord.inWaiting() == 0 and attemptsCnt < 5:
ret = serCoord.write(bytes(stg, 'UTF-8'))
readSerial = serCoord.readline()
if readSerial == CommExecutedTrue:
return 1
time.sleep(0.2)
break
elif readSerial == CommExecutedFalse:
# write error in log
log("E", "Error "+stg)
else:
attemptsCnt = attemptsCnt + 1
#output("PYSERIAL",attemptsCnt)
continue
# write error in log
log("E", "no serial available")
return 0
def QueueServerClient(HOST, PORT, AUTHKEY):
class QueueManager(SyncManager):
pass
QueueManager.register('get_queue')
QueueManager.register('get_name')
QueueManager.register('get_description')
manager = QueueManager(address = (HOST, PORT), authkey = AUTHKEY)
manager.connect() # This starts the connected client
return manager
#------- Main section ----------------------------#
#------- Run once --------------------------------#
log("I", "Initialize coordinator")
gpio = Gpio()
gpio.pinMode(0, gpio.OUTPUT)
ret = 0
curInit = db.cursor()
#truncate output tables
curInit.callproc('init')
curInit.close()
# create three connected managers
qmIn = QueueServerClient(HOST, PORT0, AUTHKEY)
qmOut = QueueServerClient(HOST, PORT1, AUTHKEY)
qmSql = QueueServerClient(HOST, PORT2, AUTHKEY)
qmResp = QueueServerClient(HOST, PORT3, AUTHKEY)
# Get the queue objects from the clients
qIn = qmIn.get_queue()
qOut = qmOut.get_queue()
qSql = qmSql.get_queue()
qResp = qmResp.get_queue()
while ret == 0:
INString = "IN" # to send Node data to the coordinator
ISString = "IS" # to send Sensor data to the coordinator
IXString = "IX" # to send Address data to the coordinator
IAString = "IA" # to send Actuators data to the coordinator
IMString = "IM" # to send Methods data to the coordinator
ret = initCoordinator()
ret = 1
#------- End run once -------------------------#
log("I", "Start main loop")
getSerialData(qIn, qOut, qResp)
| theflorianmaas/dh | Python/dhproc/p_serial.py | Python | mit | 15,394 |
# fmt: off
__all__ = ["ProgressCallback", "PrintCallback", "MonitorCallback", "EarlyStopCallback", "SaveCallback"] # noqa
# fmt: on
import operator
import os
import torch
from tqdm import tqdm
from utils.training.trainer import Callback # isort: skip
class ProgressCallback(Callback):
def __init__(self):
self.training_pbar = None
self.evaluation_pbar = None
def on_train_begin(self, context):
self._ensure_close(train=True)
self.training_pbar = tqdm()
def on_train_end(self, context, metrics):
self._ensure_close(train=True)
def on_evaluate_begin(self, context):
self._ensure_close(eval=True)
self.evaluation_pbar = tqdm(leave=self.training_pbar is None)
def on_evaluate_end(self, context, metrics):
self._ensure_close(eval=True)
def on_loop_begin(self, context):
pbar = self.training_pbar if context.train else self.evaluation_pbar
pbar.reset(context.num_batches)
if context.train:
pbar.set_postfix({"epoch": context.epoch})
def on_step_end(self, context, output):
pbar = self.training_pbar if context.train else self.evaluation_pbar
pbar.update(1)
def _ensure_close(self, train=False, eval=False):
if train:
if self.training_pbar is not None:
self.training_pbar.close()
self.training_pbar = None
if eval:
if self.evaluation_pbar is not None:
self.evaluation_pbar.close()
self.evaluation_pbar = None
def __del__(self):
self._ensure_close(train=True, eval=True)
class PrintCallback(Callback):
def __init__(self, printer=None):
self.printer = printer or tqdm.write
def on_loop_end(self, context, metrics):
label = "train" if context.train else "eval"
loss = metrics[f"{label}/loss"]
message = f"[{label}] epoch {context.epoch} - loss: {loss:.4f}"
prefix = label + "/"
for key, val in metrics.items():
if not isinstance(val, float) or not key.startswith(prefix):
continue
key = key.split("/", 1)[1]
if key == "loss":
continue
message += f", {key}: {val:.4f}"
self.printer(message)
class MonitorCallback(Callback):
def __init__(self, monitor="eval/loss", mode="min"):
self.monitor = monitor
self.count = 0
self.mode = mode
if self.mode == "min":
self.monitor_op = operator.lt
self.best = float("inf")
elif self.mode == "max":
self.monitor_op = operator.gt
self.best = float("-inf")
else:
raise ValueError(f"invalid mode: {self.mode}")
def on_evaluate_end(self, context, metrics):
current_val = metrics[self.monitor]
if self.monitor_op(current_val, self.best):
self.best = current_val
self.count = 0
else:
self.count += 1
class EarlyStopCallback(MonitorCallback):
def __init__(self, monitor="eval/loss", patience=3, mode="min"):
super().__init__(monitor, mode)
self.patience = patience
def on_evaluate_end(self, context, metrics):
super().on_evaluate_end(context, metrics)
if self.count >= self.patience:
context.trainer.terminate()
class SaveCallback(Callback):
def __init__(self, output_dir, prefix="", mode="latest", monitor=None):
if mode not in {"latest", "min", "max"}:
raise ValueError(f"invalid mode: {self.mode}")
self.output_dir = output_dir
self.prefix = prefix
self.monitor = MonitorCallback(monitor, mode) if monitor else None
self._checkpoints = []
def on_evaluate_end(self, context, metrics):
if self.monitor:
self.monitor.on_evaluate_end(context, metrics)
if self.monitor.count > 0:
return
trainer = context.trainer
# TODO: add other configuration
checkpoint = {
"model": trainer.model.state_dict(),
"optimizer": trainer.optimizer.state_dict(),
"scheduler": trainer.scheduler.state_dict() if trainer.scheduler else None,
"trainer_config": trainer.config,
"trainer_state": trainer._state,
}
file = os.path.join(self.output_dir, f"{self.prefix}step-{context.global_step}.ckpt")
torch.save(checkpoint, file)
checkpoints = []
for ckpt_path in self._checkpoints:
if os.path.exists(ckpt_path):
os.remove(ckpt_path)
checkpoints.append(file)
self._checkpoints = checkpoints
| chantera/biaffineparser | src/utils/training/callbacks.py | Python | apache-2.0 | 4,715 |
#!/usr/bin/env python
# ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2015, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
import numbers
import random
import psutil
import sys
from htmresearch.support.expsuite import PyExperimentSuite
from nupic.frameworks.opf.model_factory import ModelFactory
# from nupic.algorithms.sdr_classifier import SDRClassifier
from htmresearch.algorithms.faulty_temporal_memory_shim import MonitoredFaultyTPShim
from htmresearch.support.sequence_prediction_dataset import ReberDataset
from htmresearch.support.sequence_prediction_dataset import SimpleDataset
from htmresearch.support.sequence_prediction_dataset import HighOrderDataset
from htmresearch.support.sequence_prediction_dataset import LongHighOrderDataset
MODEL_MEMORY = 2e9 # Memory requirements per model: ~2Gb
NUM_SYMBOLS = 16
RANDOM_END = 50000
MODEL_PARAMS = {
"model": "HTMPrediction",
"version": 1,
"predictAheadTime": None,
"modelParams": {
"inferenceType": "TemporalMultiStep",
"sensorParams": {
"verbosity" : 0,
"encoders": {
"element": {
"fieldname": u"element",
"name": u"element",
"type": "SDRCategoryEncoder",
"categoryList": range(max(RANDOM_END, NUM_SYMBOLS)),
"n": 2048,
"w": 41
}
},
"sensorAutoReset" : None,
},
"spEnable": False,
"spParams": {
"spVerbosity" : 0,
"globalInhibition": 1,
"columnCount": 2048,
"inputWidth": 0,
"numActiveColumnsPerInhArea": 40,
"seed": 1956,
"columnDimensions": 0.5,
"synPermConnected": 0.1,
"synPermActiveInc": 0.1,
"synPermInactiveDec": 0.01,
"boostStrength": 0.0
},
"tmEnable" : True,
"tmParams": {
"verbosity": 0,
"columnCount": 2048,
"cellsPerColumn": 32,
"inputWidth": 2048,
"seed": 1960,
"temporalImp": "monitored_tm_py",
"newSynapseCount": 32,
"maxSynapsesPerSegment": 128,
"maxSegmentsPerCell": 128,
"initialPerm": 0.21,
"connectedPerm": 0.50,
"permanenceInc": 0.1,
"permanenceDec": 0.1,
"predictedSegmentDecrement": 0.02,
"globalDecay": 0.0,
"maxAge": 0,
"minThreshold": 15,
"activationThreshold": 15,
"outputType": "normal",
"pamLength": 1,
},
"clParams": {
"implementation": "cpp",
"regionName" : "SDRClassifierRegion",
"verbosity" : 0,
"alpha": 0.01,
"steps": "1",
},
"trainSPNetOnlyIfRequested": False,
},
}
def getEncoderMapping(model, numSymbols):
encoder = model._getEncoder().encoders[0][1]
mapping = dict()
for i in range(numSymbols):
mapping[i] = set(encoder.encode(i).nonzero()[0])
return mapping
def classify(mapping, activeColumns, numPredictions):
scores = [(len(encoding & activeColumns), i) for i, encoding in mapping.iteritems()]
random.shuffle(scores) # break ties randomly
return [i for _, i in sorted(scores, reverse=True)[:numPredictions]]
class Suite(PyExperimentSuite):
def parse_opt(self):
options, args = super(self.__class__, self).parse_opt()
# Make sure the number of cores used is compatible with the available memory
maxcores = min(int(psutil.TOTAL_PHYMEM / MODEL_MEMORY), psutil.NUM_CPUS)
options.ncores = min(self.options.ncores, maxcores)
self.options = options
return options, args
def reset(self, params, repetition):
random.seed(params['seed'])
if params['dataset'] == 'simple':
self.dataset = SimpleDataset()
elif params['dataset'] == 'reber':
self.dataset = ReberDataset(maxLength=params['max_length'])
elif params['dataset'] == 'high-order':
self.dataset = HighOrderDataset(numPredictions=params['num_predictions'],
seed=params['seed'],
smallAlphabet=params['use_small_alphabet'])
print "Sequence dataset: "
print " Symbol Number {}".format(self.dataset.numSymbols)
for seq in self.dataset.sequences:
print seq
elif params['dataset'] == 'high-order-long':
self.dataset = LongHighOrderDataset(params['sequence_length'],
seed=params['seed'])
print "Sequence dataset: "
print " Symbol Number {}".format(self.dataset.numSymbols)
for seq in self.dataset.sequences:
print seq
else:
raise Exception("Dataset not found")
self.randomStart = self.dataset.numSymbols + 1
self.randomEnd = self.randomStart + 5000
MODEL_PARAMS['modelParams']['sensorParams']['encoders']['element']\
['categoryList'] = range(self.randomEnd)
# if not os.path.exists(resultsDir):
# os.makedirs(resultsDir)
# self.resultsFile = open(os.path.join(resultsDir, "0.log"), 'w')
if params['verbosity'] > 0:
print " initializing HTM model..."
# print MODEL_PARAMS
self.model = ModelFactory.create(MODEL_PARAMS)
self.model.enableInference({"predictedField": "element"})
# self.classifier = SDRClassifier(steps=[1], alpha=0.001)
print "finish initializing HTM model "
if params['kill_cell_percent'] > 0:
# a hack to use faulty temporal memory instead
self.model._getTPRegion().getSelf()._tfdr = MonitoredFaultyTPShim(
numberOfCols=2048,
cellsPerColumn=32,
newSynapseCount=32,
maxSynapsesPerSegment=128,
maxSegmentsPerCell=128,
initialPerm=0.21,
connectedPerm=0.50,
permanenceInc=0.10,
permanenceDec=0.10,
predictedSegmentDecrement=0.01,
minThreshold=15,
activationThreshold=15,
seed=1960,
)
self.mapping = getEncoderMapping(self.model, self.dataset.numSymbols)
self.numPredictedActiveCells = []
self.numPredictedInactiveCells = []
self.numUnpredictedActiveColumns = []
self.currentSequence = []
self.targetPrediction = []
self.replenish_sequence(params, iteration=0)
self.resets = []
self.randoms = []
self.verbosity = 1
self.sequenceCounter = 0
def replenish_sequence(self, params, iteration):
if iteration > params['perturb_after']:
print "PERTURBING"
sequence, target = self.dataset.generateSequence(params['seed']+iteration,
perturbed=True)
else:
sequence, target = self.dataset.generateSequence(params['seed']+iteration)
if (iteration > params['inject_noise_after'] and
iteration < params['stop_inject_noise_after']):
injectNoiseAt = random.randint(1, 3)
sequence[injectNoiseAt] = random.randrange(self.randomStart, self.randomEnd)
if params['verbosity'] > 0:
print "injectNoise ", sequence[injectNoiseAt], " at: ", injectNoiseAt
# separate sequences with random elements
if params['separate_sequences_with'] == 'random':
random.seed(params['seed']+iteration)
sequence.append(random.randrange(self.randomStart, self.randomEnd))
target.append(None)
if params['verbosity'] > 0:
print "Add sequence to buffer"
print "sequence: ", sequence
print "target: ", target
self.currentSequence += sequence
self.targetPrediction += target
def check_prediction(self, topPredictions, targets):
if targets is None:
correct = None
else:
if isinstance(targets, numbers.Number):
# single target, multiple predictions
correct = targets in topPredictions
else:
# multiple targets, multiple predictions
correct = True
for prediction in topPredictions:
correct = correct and (prediction in targets)
return correct
def iterate(self, params, repetition, iteration):
currentElement = self.currentSequence.pop(0)
target = self.targetPrediction.pop(0)
# whether there will be a reset signal after the current record
resetFlag = (len(self.currentSequence) == 0 and
params['separate_sequences_with'] == 'reset')
self.resets.append(resetFlag)
# whether there will be a random symbol after the current record
randomFlag = (len(self.currentSequence) == 1 and
params['separate_sequences_with'] == 'random')
self.randoms.append(randomFlag)
killCell = False
if iteration == params['kill_cell_after'] and params['kill_cell_percent'] > 0:
killCell = True
tm = self.model._getTPRegion().getSelf()._tfdr
tm.killCells(percent=params['kill_cell_percent'])
self.model.disableLearning()
result = self.model.run({"element": currentElement})
tm = self.model._getTPRegion().getSelf()._tfdr
tm.mmClearHistory()
# Try use SDR classifier to classify active (not predicted) cells
# The results is similar as classifying the predicted cells
# classLabel = min(currentElement, self.dataset.numSymbols)
# classification = {'bucketIdx': classLabel, 'actValue': classLabel}
# result = self.classifier.compute(iteration, list(tm.activeCells),
# classification,
# learn=True, infer=True)
# topPredictionsSDRClassifier = sorted(zip(result[1], result["actualValues"]),
# reverse=True)[0]
# topPredictionsSDRClassifier = [topPredictionsSDRClassifier[1]]
topPredictionsSDRClassifier = None
# activeColumns = set([tm.columnForCell(cell) for cell in tm.activeCells])
# print "active columns: "
# print activeColumns
# print "sdr mapping current: "
# print self.mapping[element]
# print "sdr mapping next: "
# print self.mapping[target]
# Use custom classifier (uses predicted cells to make predictions)
predictiveColumns = set([tm.columnForCell(cell) for cell in tm.getPredictiveCells()])
topPredictions = classify(
self.mapping, predictiveColumns, params['num_predictions'])
# correct = self.check_prediction(topPredictions, target)
truth = target
if params['separate_sequences_with'] == 'random':
if (self.randoms[-1] or
len(self.randoms) >= 2 and self.randoms[-2]):
truth = None
correct = None if truth is None else (truth in topPredictions)
data = {"iteration": iteration,
"current": currentElement,
"reset": resetFlag,
"random": randomFlag,
"train": True,
"predictions": topPredictions,
"predictionsSDR": topPredictionsSDRClassifier,
"truth": target,
"sequenceCounter": self.sequenceCounter}
if params['verbosity'] > 0:
print ("iteration: {0} \t"
"current: {1} \t"
"predictions: {2} \t"
"predictions SDR: {3} \t"
"truth: {4} \t"
"correct: {5} \t"
"predict column: {6}").format(
iteration, currentElement, topPredictions, topPredictionsSDRClassifier,
target, correct, len(predictiveColumns))
if len(self.currentSequence) == 0:
self.replenish_sequence(params, iteration)
self.sequenceCounter += 1
if self.resets[-1]:
if params['verbosity'] > 0:
print "Reset TM at iteration {}".format(iteration)
tm.reset()
return data
if __name__ == '__main__':
suite = Suite()
suite.start()
| subutai/htmresearch | projects/sequence_prediction/discrete_sequences/tm/suite.py | Python | agpl-3.0 | 12,331 |
# -------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# --------------------------------------------------------------------------
import time
import threading
from datetime import datetime, timedelta
import concurrent
import sys
import asyncio
import logging
try:
import psutil
except ImportError:
pass # If psutil isn't installed, simply does not capture process stats.
from azure.servicebus import ServiceBusMessage, ServiceBusMessageBatch
from azure.servicebus.exceptions import MessageAlreadySettled
import logger
from app_insights_metric import AbstractMonitorMetric
from process_monitor import ProcessMonitor
LOGFILE_NAME = "stress-test.log"
PRINT_CONSOLE = True
_logger = logger.get_base_logger(LOGFILE_NAME, "stress_test", logging.WARN)
class ReceiveType:
push = "push"
pull = "pull"
none = None
class StressTestResults(object):
def __init__(self):
self.total_sent = 0
self.total_received = 0
self.time_elapsed = None
self.state_by_sender = {}
self.state_by_receiver = {}
def __repr__(self):
return str(vars(self))
class StressTestRunnerState(object):
"""Per-runner state, e.g. if you spawn 3 senders each will have this as their state object,
which will be coalesced at completion into StressTestResults"""
def __init__(self):
self.total_sent = 0
self.total_received = 0
self.cpu_percent = None
self.memory_bytes = None
self.timestamp = None
self.exceptions = []
def __repr__(self):
return str(vars(self))
def populate_process_stats(self):
self.timestamp = datetime.utcnow()
try:
self.cpu_percent = psutil.cpu_percent()
self.memory_bytes = psutil.virtual_memory().total
except NameError:
return # psutil was not installed, fall back to simply not capturing these stats.
class StressTestRunner:
"""Framework for running a service bus stress test.
Duration can be overriden via the --stress_test_duration flag from the command line"""
def __init__(
self,
senders,
receivers,
duration=timedelta(minutes=15),
receive_type=ReceiveType.push,
send_batch_size=None,
message_size=10,
max_wait_time=10,
send_delay=0.01,
receive_delay=0,
should_complete_messages=True,
max_message_count=1,
send_session_id=None,
fail_on_exception=True,
azure_monitor_metric=None,
process_monitor=None,
):
self.senders = senders
self.receivers = receivers
self.duration = duration
self.receive_type = receive_type
self.message_size = message_size
self.send_batch_size = send_batch_size
self.max_wait_time = max_wait_time
self.send_delay = send_delay
self.receive_delay = receive_delay
self.should_complete_messages = should_complete_messages
self.max_message_count = max_message_count
self.fail_on_exception = fail_on_exception
self.send_session_id = send_session_id
self.azure_monitor_metric = azure_monitor_metric or AbstractMonitorMetric(
"fake_test_name"
)
self.process_monitor = process_monitor or ProcessMonitor(
"monitor_{}".format(LOGFILE_NAME),
"test_stress_queues",
print_console=PRINT_CONSOLE,
)
# Because of pickle we need to create a state object and not just pass around ourselves.
# If we ever require multiple runs of this one after another, just make Run() reset this.
self._state = StressTestRunnerState()
self._duration_override = None
for arg in sys.argv:
if arg.startswith("--stress_test_duration_seconds="):
self._duration_override = timedelta(seconds=int(arg.split("=")[1]))
self._should_stop = False
# Plugin functions the caller can override to further tailor the test.
def on_send(self, state, sent_message, sender):
"""Called on every successful send, per message"""
pass
def on_receive(self, state, received_message, receiver):
"""Called on every successful receive, per message"""
pass
def on_receive_batch(self, state, batch, receiver):
"""Called on every successful receive, at the batch or iterator level rather than per-message"""
pass
def post_receive(self, state, receiver):
"""Called after completion of every successful receive"""
pass
def on_complete(self, send_results=[], receive_results=[]):
"""Called on stress test run completion"""
pass
def pre_process_message(self, message):
"""Allows user to transform the message before batching or sending it."""
pass
def pre_process_message_batch(self, message):
"""Allows user to transform the batch before sending it."""
pass
def pre_process_message_body(self, payload):
"""Allows user to transform message payload before sending it."""
return payload
def _schedule_interval_logger(self, end_time, description="", interval_seconds=30):
def _do_interval_logging():
if end_time > datetime.utcnow() and not self._should_stop:
self._state.populate_process_stats()
_logger.critical(
"{} RECURRENT STATUS: {}".format(description, self._state)
)
self._schedule_interval_logger(end_time, description, interval_seconds)
t = threading.Timer(interval_seconds, _do_interval_logging)
t.start()
def _construct_message(self):
if self.send_batch_size != None:
batch = ServiceBusMessageBatch()
for _ in range(self.send_batch_size):
message = ServiceBusMessage(
self.pre_process_message_body("a" * self.message_size)
)
self.pre_process_message(message)
batch.add_message(message)
self.pre_process_message_batch(batch)
return batch
else:
message = ServiceBusMessage(
self.pre_process_message_body("a" * self.message_size)
)
self.pre_process_message(message)
return message
def _send(self, sender, end_time):
self._schedule_interval_logger(end_time, "Sender " + str(self))
try:
_logger.info("STARTING SENDER")
with sender:
while end_time > datetime.utcnow() and not self._should_stop:
_logger.info("SENDING")
try:
message = self._construct_message()
if self.send_session_id != None:
message.session_id = self.send_session_id
sender.send_messages(message)
self.azure_monitor_metric.record_messages_cpu_memory(
self.send_batch_size,
self.process_monitor.cpu_usage_percent,
self.process_monitor.memory_usage_percent,
)
if self.send_batch_size:
self._state.total_sent += self.send_batch_size
else:
self._state.total_sent += 1 # send single message
self.on_send(self._state, message, sender)
except Exception as e:
_logger.exception("Exception during send: {}".format(e))
self.azure_monitor_metric.record_error(e)
self._state.exceptions.append(e)
if self.fail_on_exception:
raise
time.sleep(self.send_delay)
self._state.timestamp = datetime.utcnow()
return self._state
except Exception as e:
_logger.exception("Exception in sender: {}".format(e))
self._should_stop = True
raise
def _receive(self, receiver, end_time):
self._schedule_interval_logger(end_time, "Receiver " + str(self))
try:
with receiver:
while end_time > datetime.utcnow() and not self._should_stop:
_logger.info("RECEIVE LOOP")
try:
if self.receive_type == ReceiveType.pull:
batch = receiver.receive_messages(
max_message_count=self.max_message_count,
max_wait_time=self.max_wait_time,
)
elif self.receive_type == ReceiveType.push:
batch = receiver._get_streaming_message_iter(
max_wait_time=self.max_wait_time
)
else:
batch = []
for message in batch:
self.on_receive(self._state, message, receiver)
try:
if self.should_complete_messages:
receiver.complete_message(message)
except MessageAlreadySettled: # It may have been settled in the plugin callback.
pass
self._state.total_received += 1
# TODO: Get EnqueuedTimeUtc out of broker properties and calculate latency. Should properties/app properties be mostly None?
if end_time <= datetime.utcnow():
break
time.sleep(self.receive_delay)
self.azure_monitor_metric.record_messages_cpu_memory(
1,
self.process_monitor.cpu_usage_percent,
self.process_monitor.memory_usage_percent,
)
self.post_receive(self._state, receiver)
except Exception as e:
_logger.exception("Exception during receive: {}".format(e))
self._state.exceptions.append(e)
self.azure_monitor_metric.record_error(e)
if self.fail_on_exception:
raise
self._state.timestamp = datetime.utcnow()
return self._state
except Exception as e:
self.azure_monitor_metric.record_error(e)
_logger.exception("Exception in receiver {}".format(e))
self._should_stop = True
raise
def run(self):
start_time = datetime.utcnow()
end_time = start_time + (self._duration_override or self.duration)
with self.process_monitor:
with concurrent.futures.ThreadPoolExecutor(max_workers=4) as proc_pool:
_logger.info("STARTING PROC POOL")
senders = [
proc_pool.submit(self._send, sender, end_time)
for sender in self.senders
]
receivers = [
proc_pool.submit(self._receive, receiver, end_time)
for receiver in self.receivers
]
result = StressTestResults()
for each in concurrent.futures.as_completed(senders + receivers):
_logger.info("SOMETHING FINISHED")
if each in senders:
result.state_by_sender[each] = each.result()
if each in receivers:
result.state_by_receiver[each] = each.result()
# TODO: do as_completed in one batch to provide a way to short-circuit on failure.
result.state_by_sender = {
s: f.result()
for s, f in zip(
self.senders, concurrent.futures.as_completed(senders)
)
}
result.state_by_receiver = {
r: f.result()
for r, f in zip(
self.receivers, concurrent.futures.as_completed(receivers)
)
}
_logger.info("got receiver results")
result.total_sent = sum(
[r.total_sent for r in result.state_by_sender.values()]
)
result.total_received = sum(
[r.total_received for r in result.state_by_receiver.values()]
)
result.time_elapsed = end_time - start_time
_logger.critical("Stress test completed. Results:\n{}".format(result))
return result
class StressTestRunnerAsync(StressTestRunner):
def __init__(
self,
senders,
receivers,
duration=timedelta(minutes=15),
receive_type=ReceiveType.push,
send_batch_size=None,
message_size=10,
max_wait_time=10,
send_delay=0.01,
receive_delay=0,
should_complete_messages=True,
max_message_count=1,
send_session_id=None,
fail_on_exception=True,
azure_monitor_metric=None,
process_monitor=None,
):
super(StressTestRunnerAsync, self).__init__(
senders,
receivers,
duration=duration,
receive_type=receive_type,
send_batch_size=send_batch_size,
message_size=message_size,
max_wait_time=max_wait_time,
send_delay=send_delay,
receive_delay=receive_delay,
should_complete_messages=should_complete_messages,
max_message_count=max_message_count,
send_session_id=send_session_id,
fail_on_exception=fail_on_exception,
azure_monitor_metric=azure_monitor_metric,
process_monitor=process_monitor,
)
async def _send_async(self, sender, end_time):
self._schedule_interval_logger(end_time, "Sender " + str(self))
try:
_logger.info("STARTING SENDER")
async with sender:
while end_time > datetime.utcnow() and not self._should_stop:
_logger.info("SENDING")
try:
message = self._construct_message()
if self.send_session_id != None:
message.session_id = self.send_session_id
await sender.send_messages(message)
self.azure_monitor_metric.record_messages_cpu_memory(
self.send_batch_size,
self.process_monitor.cpu_usage_percent,
self.process_monitor.memory_usage_percent,
)
self._state.total_sent += self.send_batch_size
self.on_send(self._state, message, sender)
except Exception as e:
_logger.exception("Exception during send: {}".format(e))
self.azure_monitor_metric.record_error(e)
self._state.exceptions.append(e)
if self.fail_on_exception:
raise
await asyncio.sleep(self.send_delay)
self._state.timestamp = datetime.utcnow()
return self._state
except Exception as e:
_logger.exception("Exception in sender: {}".format(e))
self._should_stop = True
raise
async def _receive_handle_message(self, message, receiver, end_time):
self.on_receive(self._state, message, receiver)
try:
if self.should_complete_messages:
await receiver.complete_message(message)
except MessageAlreadySettled: # It may have been settled in the plugin callback.
pass
self._state.total_received += 1
# TODO: Get EnqueuedTimeUtc out of broker properties and calculate latency. Should properties/app properties be mostly None?
await asyncio.sleep(self.receive_delay)
self.azure_monitor_metric.record_messages_cpu_memory(
1,
self.process_monitor.cpu_usage_percent,
self.process_monitor.memory_usage_percent,
)
async def _receive_async(self, receiver, end_time):
self._schedule_interval_logger(end_time, "Receiver " + str(self))
try:
async with receiver:
while end_time > datetime.utcnow() and not self._should_stop:
_logger.info("RECEIVE LOOP")
try:
if self.receive_type == ReceiveType.pull:
batch = await receiver.receive_messages(
max_message_count=self.max_message_count,
max_wait_time=self.max_wait_time,
)
for message in batch:
await self._receive_handle_message(
message, receiver, end_time
)
elif self.receive_type == ReceiveType.push:
batch = receiver._get_streaming_message_iter(
max_wait_time=self.max_wait_time
)
async for message in batch:
if end_time <= datetime.utcnow():
break
await self._receive_handle_message(
message, receiver, end_time
)
self.post_receive(self._state, receiver)
except Exception as e:
_logger.exception("Exception during receive: {}".format(e))
self._state.exceptions.append(e)
self.azure_monitor_metric.record_error(e)
if self.fail_on_exception:
raise
self._state.timestamp = datetime.utcnow()
return self._state
except Exception as e:
self.azure_monitor_metric.record_error(e)
_logger.exception("Exception in receiver {}".format(e))
self._should_stop = True
raise
async def run_async(self):
start_time = datetime.utcnow()
end_time = start_time + (self._duration_override or self.duration)
send_tasks = [
asyncio.create_task(self._send_async(sender, end_time))
for sender in self.senders
]
receive_tasks = [
asyncio.create_task(self._receive_async(receiver, end_time))
for receiver in self.receivers
]
with self.process_monitor:
await asyncio.gather(*send_tasks, *receive_tasks)
result = StressTestResults()
result.state_by_sender = {
s: f.result() for s, f in zip(self.senders, send_tasks)
}
result.state_by_receiver = {
r: f.result() for r, f in zip(self.receivers, receive_tasks)
}
_logger.info("got receiver results")
result.total_sent = sum(
[r.total_sent for r in result.state_by_sender.values()]
)
result.total_received = sum(
[r.total_received for r in result.state_by_receiver.values()]
)
result.time_elapsed = end_time - start_time
_logger.critical("Stress test completed. Results:\n{}".format(result))
return result
| Azure/azure-sdk-for-python | sdk/servicebus/azure-servicebus/stress/scripts/stress_test_base.py | Python | mit | 20,244 |
def buildConnectionString(params):
"""Build a connection string from a dictionary of parameters.
Returns string."""
return ";".join(["%s=%s" % (k, v) for k, v in params.items()])
if __name__ == "__main__":
myParams = {"server":"mpilgrim",
"database":"master",
"uid":"sa",
"pwd":"secret"
}
print buildConnectionString(myParams)
| MassD/python | dive/odbchelper.py | Python | mit | 404 |
#
# This file is part of Checkbox.
#
# Copyright 2008 Canonical Ltd.
#
# Checkbox is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Checkbox is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Checkbox. If not, see <http://www.gnu.org/licenses/>.
#
from checkbox.plugin import Plugin
class RemoteSuite(Plugin):
def register(self, manager):
super(RemoteSuite, self).register(manager)
for (rt, rh) in [
("prompt-remote", self.prompt_remote),
("report-remote", self.report_remote)]:
self._manager.reactor.call_on(rt, rh)
def prompt_remote(self, interface, suite):
self._manager.reactor.fire("prompt-suite", interface, suite)
# Register temporary handler for report-message events
def report_message(message):
message["suite"] = suite["name"]
self._manager.reactor.fire("report-job", message)
event_id = self._manager.reactor.call_on("report-message", report_message)
self._manager.reactor.fire("message-exec", suite)
self._manager.reactor.cancel_call(event_id)
def report_remote(self, suite):
self._manager.reactor.fire("report-suite", suite)
factory = RemoteSuite
| Alberto-Beralix/Beralix | i386-squashfs-root/usr/share/checkbox/plugins/remote_suite.py | Python | gpl-3.0 | 1,664 |
import SqlReader
def main():
reader=SqlReader.SqlReader("SELECT p.objid,p.ra,p.dec,p.r,s.z as redshift FROM galaxy as p join specobj as s on s.bestobjid=p.objid WHERE p.ra BETWEEN 194.138787 AND 195.548787 AND p.dec BETWEEN 27.259389 AND 28.709389")
reader.dataCollect()
if __name__ =="__main__":
main() | chshibo/CosData_Tools | test_SqlReader.py | Python | gpl-3.0 | 316 |
#!/usr/bin/python2.4
#
# CDDL HEADER START
#
# The contents of this file are subject to the terms of the
# Common Development and Distribution License (the "License").
# You may not use this file except in compliance with the License.
#
# You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
# or http://www.opensolaris.org/os/licensing.
# See the License for the specific language governing permissions
# and limitations under the License.
#
# When distributing Covered Code, include this CDDL HEADER in each
# file and include the License file at usr/src/OPENSOLARIS.LICENSE.
# If applicable, add the following below this CDDL HEADER, with the
# fields enclosed by brackets "[]" replaced with your own identifying
# information: Portions Copyright [yyyy] [name of copyright owner]
#
# CDDL HEADER END
#
# Copyright 2009 Sun Microsystems, Inc. All rights reserved.
# Use is subject to license terms.
import os
import tempfile
import pkg.client.api_errors as api_errors
import pkg.pkgsubprocess as subprocess
from pkg.misc import msg, emsg
# Since pkg(1) may be installed without libbe installed
# check for libbe and import it if it exists.
try:
import libbe as be
except ImportError:
# All recovery actions are disabled when libbe can't be imported.
pass
class BootEnv(object):
"""A BootEnv object is an object containing the logic for
managing the recovery of image-update, install and uninstall
operations.
Recovery is only enabled for ZFS filesystems. Any operation
attempted on UFS will not be handled by BootEnv.
This class makes use of usr/lib/python*/vendor-packages/libbe.so
as the python wrapper for interfacing with usr/lib/libbe. Both
libraries are delivered by the SUNWinstall-libs package. This
package is not required for pkg(1) to operate successfully. It is
soft required, meaning if it exists the bootenv class will attempt
to provide recovery support."""
def __init__(self, root):
self.be_name = None
self.dataset = None
self.be_name_clone = None
self.clone_dir = None
self.img = None
self.is_live_BE = False
self.is_valid = False
self.snapshot_name = None
self.root = root
rc = 0
assert root != None
# Check for the old beList() API since pkg(1) can be
# back published and live on a system without the latest libbe.
beVals = be.beList()
if isinstance(beVals[0], int):
rc, self.beList = beVals
else:
self.beList = beVals
# Happens e.g. in zones (at least, for now)
if not self.beList or rc != 0:
raise RuntimeError, "nobootenvironments"
# Need to find the name of the BE we're operating on in order
# to create a snapshot and/or a clone of the BE.
for i, beVals in enumerate(self.beList):
# pkg(1) expects a directory as the target of an
# operation. BootEnv needs to determine if this target
# directory maps to a BE. If a bogus directory is
# provided to pkg(1) via -R, then pkg(1) just updates
# '/' which also causes BootEnv to manage '/' as well.
# This should be fixed before this class is ever
# instantiated.
be_name = beVals.get("orig_be_name")
# If we're not looking at a boot env entry or an
# entry that is not mounted then continue.
if not be_name or not beVals.get("mounted"):
continue
# Check if we're operating on the live BE.
# If so it must also be active. If we are not
# operating on the live BE, then verify
# that the mountpoint of the BE matches
# the -R argument passed in by the user.
if root == '/':
if not beVals.get("active"):
continue
else:
self.is_live_BE = True
else:
if beVals.get("mountpoint") != root:
continue
# Set the needed BE components so snapshots
# and clones can be managed.
self.be_name = be_name
self.dataset = beVals.get("dataset")
# Let libbe provide the snapshot name
err, snapshot_name = be.beCreateSnapshot(self.be_name)
self.clone_dir = tempfile.mkdtemp()
# Check first field for failure.
# 2nd field is the returned snapshot name
if err == 0:
self.snapshot_name = snapshot_name
else:
emsg(_("pkg: unable to create an auto "
"snapshot. pkg recovery is disabled."))
raise RuntimeError, "recoveryDisabled"
self.is_valid = True
break
else:
# We will get here if we don't find find any BE's. e.g
# if were are on UFS.
raise RuntimeError, "recoveryDisabled"
def __store_image_state(self):
"""Internal function used to preserve current image information
and history state to be restored later with __reset_image_state
if needed."""
# Preserve the current history information and state so that if
# boot environment operations fail, they can be written to the
# original image root, etc.
self.img.history.create_snapshot()
def __reset_image_state(self, failure=False):
"""Internal function intended to be used to reset the image
state, if needed, after the failure or success of boot
environment operations."""
if not self.img:
# Nothing to restore.
return
if self.root != self.img.root:
if failure:
# Since the image root changed and the operation
# was not successful, restore the original
# history and state information so that it can
# be recorded in the original image root. This
# needs to be done before the image root is
# reset since it might fail.
self.img.history.restore_snapshot()
self.img.history.discard_snapshot()
# After the completion of an operation that has changed
# the image root, it needs to be reset back to its
# original value so that the client will read and write
# information using the correct location (this is
# especially important for bootenv operations).
self.img.find_root(self.root)
else:
self.img.history.discard_snapshot()
def exists(self):
"""Return true if this object represents a valid BE."""
return self.is_valid
@staticmethod
def check_be_name(be_name):
try:
if be_name is None:
return
if be.beVerifyBEName(be_name) != 0:
raise api_errors.InvalidBENameException(be_name)
# Check for the old beList() API since pkg(1) can be
# back published and live on a system without the
# latest libbe.
beVals = be.beList()
if isinstance(beVals[0], int):
rc, beList = beVals
else:
beList = beVals
# If there is already a BE with the same name as
# be_name, then raise an exception.
if be_name in (be.get("orig_be_name") for be in beList):
raise api_errors.DuplicateBEName(be_name)
except AttributeError:
raise api_errors.BENamingNotSupported(be_name)
def init_image_recovery(self, img, be_name=None):
"""Initialize for an image-update.
If a be_name is given, validate it.
If we're operating on a live BE then clone the
live BE and operate on the clone.
If we're operating on a non-live BE we use
the already created snapshot"""
self.img = img
if self.is_live_BE:
# Create a clone of the live BE and mount it.
self.destroy_snapshot()
self.check_be_name(be_name)
# Do nothing with the returned snapshot name
# that is taken of the clone during beCopy.
ret, self.be_name_clone, not_used = be.beCopy()
if ret != 0:
raise api_errors.UnableToCopyBE()
if be_name:
ret = be.beRename(self.be_name_clone, be_name)
if ret == 0:
self.be_name_clone = be_name
else:
raise api_errors.UnableToRenameBE(
self.be_name_clone, be_name)
if be.beMount(self.be_name_clone, self.clone_dir) != 0:
raise api_errors.UnableToMountBE(
self.be_name_clone, self.clone_dir)
# Set the image to our new mounted BE.
img.find_root(self.clone_dir)
elif be_name is not None:
raise api_errors.BENameGivenOnDeadBE(be_name)
def activate_image(self):
"""Activate a clone of the BE being operated on.
If were operating on a non-live BE then
destroy the snapshot."""
def exec_cmd(cmd):
ret = 0
try:
ret = subprocess.call(cmd,
stdout = file("/dev/null"),
stderr = subprocess.STDOUT)
except OSError, e:
emsg(_("pkg: A system error %(e)s was caught "
"executing %(cmd)s") %
{ "e": e, "cmd": " ".join(cmd) })
if ret != 0:
emsg(_("pkg: '%(cmd)s' failed. \nwith a "
"return code of %(ret)d.") %
{ "cmd": " ".join(cmd), "ret": ret })
return
def activate_live_be(cmd):
cmd += [self.clone_dir]
# Activate the clone.
exec_cmd(cmd)
if be.beActivate(self.be_name_clone) != 0:
emsg(_("pkg: unable to activate %s") \
% self.be_name_clone)
return
# Consider the last operation a success, and log it as
# ending here so that it will be recorded in the new
# image's history.
self.img.history.log_operation_end()
if be.beUnmount(self.be_name_clone) != 0:
emsg(_("pkg: unable to unmount %s") \
% self.clone_dir)
return
os.rmdir(self.clone_dir)
msg(_("""
A clone of %s exists and has been updated and activated.
On the next boot the Boot Environment %s will be mounted on '/'.
Reboot when ready to switch to this updated BE.
""") % \
(self.be_name, self.be_name_clone))
def activate_be(cmd):
# Delete the snapshot that was taken before we
# updated the image and update the the boot archive.
cmd += [self.root]
exec_cmd(cmd)
msg(_("%s has been updated successfully") % \
(self.be_name))
os.rmdir(self.clone_dir)
self.destroy_snapshot()
self.__store_image_state()
caught_exception = None
cmd = [ "/sbin/bootadm", "update-archive", "-R" ]
try:
if self.is_live_BE:
activate_live_be(cmd)
else:
activate_be(cmd)
except Exception, e:
caught_exception = e
self.__reset_image_state(failure=caught_exception)
if caught_exception:
self.img.history.log_operation_error(error=e)
raise caught_exception
def restore_image(self):
"""Restore a failed image-update attempt."""
self.__reset_image_state(failure=True)
# Leave the clone around for debugging purposes if we're
# operating on the live BE.
if self.is_live_BE:
emsg(_(" The running system has not been modified. "
"Modifications were only made to a clone of the "
"running system. This clone is mounted at %s "
"should you wish to inspect it.") % self.clone_dir)
else:
# Rollback and destroy the snapshot.
try:
if be.beRollback(self.be_name,
self.snapshot_name) != 0:
emsg(_("pkg: unable to rollback BE %s "
"and restore image") % self.be_name)
self.destroy_snapshot()
os.rmdir(self.clone_dir)
except Exception, e:
self.img.history.log_operation_error(error=e)
raise e
msg(_("%s failed to be updated. No changes have been "
"made to %s.") % (self.be_name, self.be_name))
def destroy_snapshot(self):
"""Destroy a snapshot of the BE being operated on.
Note that this will destroy the last created
snapshot and does not support destroying
multiple snapshots. Create another instance of
BootEnv to manage multiple snapshots."""
if be.beDestroySnapshot(self.be_name, self.snapshot_name) != 0:
emsg(_("pkg: unable to destroy snapshot %s") % \
self.snapshot_name)
def restore_install_uninstall(self):
"""Restore a failed install or uninstall attempt.
Clone the snapshot, mount the BE and
notify user of its existence. Rollback
if not operating on a live BE"""
if self.is_live_BE:
# Create a new BE based on the previously taken
# snapshot.
ret, self.be_name_clone, not_used = \
be.beCopy(None, self.be_name, self.snapshot_name)
if ret != 0:
# If the above beCopy() failed we will try it
# without expecting the BE clone name to be
# returned by libbe. We do this in case an old
# version of libbe is on a system with
# a new version of pkg.
self.be_name_clone = self.be_name + "_" + \
self.snapshot_name
ret, not_used, not_used2 = \
be.beCopy(self.be_name_clone, \
self.be_name, self.snapshot_name)
if ret != 0:
emsg(_("pkg: unable to create BE %s") \
% self.be_name_clone)
return
if be.beMount(self.be_name_clone, self.clone_dir) != 0:
emsg(_("pkg: unable to mount BE %(name)s "
"on %(clone_dir)s") %
{ "name": self.be_name_clone,
"clone_dir": self.clone_dir })
return
emsg(_("The Boot Environment %(name)s failed to be "
"updated. A snapshot was taken before the failed "
"attempt and is mounted here %(clone_dir)s. Use "
"'beadm unmount %(clone_name)s' and then 'beadm "
"activate %(clone_name)s' if you wish to boot "
"to this BE.") %
{ "name": self.be_name,
"clone_dir": self.clone_dir,
"clone_name": self.be_name_clone })
else:
if be.beRollback(self.be_name, self.snapshot_name) != 0:
emsg("pkg: unable to rollback BE %s" % \
self.be_name)
self.destroy_snapshot()
emsg(_("The Boot Environment %s failed to be updated. "
"A snapshot was taken before the failed attempt "
"and has been restored so no changes have been "
"made to %s.") % (self.be_name, self.be_name))
def activate_install_uninstall(self):
"""Activate an install/uninstall attempt. Which just means
destroy the snapshot for the live and non-live case."""
self.destroy_snapshot()
class BootEnvNull(object):
"""BootEnvNull is a class that gets used when libbe doesn't exist."""
def __init__(self, root):
pass
def exists(self):
return False
@staticmethod
def check_be_name(be_name):
if be_name:
raise api_errors.BENamingNotSupported(be_name)
def init_image_recovery(self, img, be_name=None):
if be_name is not None:
raise api_errors.BENameGivenOnDeadBE(be_name)
def activate_image(self):
pass
def restore_image(self):
pass
def destroy_snapshot(self):
pass
def restore_install_uninstall(self):
pass
def activate_install_uninstall(self):
pass
if "be" not in locals():
BootEnv = BootEnvNull
| marcellodesales/svnedge-console | ext/windows/pkg-toolkit/pkg/vendor-packages/pkg/client/bootenv.py | Python | agpl-3.0 | 20,740 |
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import json
import unittest
from unittest.mock import patch
from azure.mgmt.containerinstance.models import (
Container, ContainerGroup, Logs, ResourceRequests, ResourceRequirements,
)
from airflow.models import Connection
from airflow.providers.microsoft.azure.hooks.azure_container_instance import AzureContainerInstanceHook
from airflow.utils import db
class TestAzureContainerInstanceHook(unittest.TestCase):
def setUp(self):
db.merge_conn(
Connection(
conn_id='azure_container_instance_test',
conn_type='azure_container_instances',
login='login',
password='key',
extra=json.dumps({'tenantId': 'tenant_id',
'subscriptionId': 'subscription_id'})
)
)
self.resources = ResourceRequirements(requests=ResourceRequests(
memory_in_gb='4',
cpu='1'))
with patch('azure.common.credentials.ServicePrincipalCredentials.__init__',
autospec=True, return_value=None):
with patch('azure.mgmt.containerinstance.ContainerInstanceManagementClient'):
self.hook = AzureContainerInstanceHook(conn_id='azure_container_instance_test')
@patch('azure.mgmt.containerinstance.models.ContainerGroup')
@patch('azure.mgmt.containerinstance.operations.ContainerGroupsOperations.create_or_update')
def test_create_or_update(self, create_or_update_mock, container_group_mock):
self.hook.create_or_update('resource_group', 'aci-test', container_group_mock)
create_or_update_mock.assert_called_once_with('resource_group', 'aci-test', container_group_mock)
@patch('azure.mgmt.containerinstance.operations.ContainerGroupsOperations.get')
def test_get_state(self, get_state_mock):
self.hook.get_state('resource_group', 'aci-test')
get_state_mock.assert_called_once_with('resource_group', 'aci-test', raw=False)
@patch('azure.mgmt.containerinstance.operations.ContainerOperations.list_logs')
def test_get_logs(self, list_logs_mock):
expected_messages = ['log line 1\n', 'log line 2\n', 'log line 3\n']
logs = Logs(content=''.join(expected_messages))
list_logs_mock.return_value = logs
logs = self.hook.get_logs('resource_group', 'name', 'name')
self.assertSequenceEqual(logs, expected_messages)
@patch('azure.mgmt.containerinstance.operations.ContainerGroupsOperations.delete')
def test_delete(self, delete_mock):
self.hook.delete('resource_group', 'aci-test')
delete_mock.assert_called_once_with('resource_group', 'aci-test')
@patch('azure.mgmt.containerinstance.operations.ContainerGroupsOperations.list_by_resource_group')
def test_exists_with_existing(self, list_mock):
list_mock.return_value = [ContainerGroup(os_type='Linux',
containers=[Container(name='test1',
image='hello-world',
resources=self.resources)])]
self.assertFalse(self.hook.exists('test', 'test1'))
@patch('azure.mgmt.containerinstance.operations.ContainerGroupsOperations.list_by_resource_group')
def test_exists_with_not_existing(self, list_mock):
list_mock.return_value = [ContainerGroup(os_type='Linux',
containers=[Container(name='test1',
image='hello-world',
resources=self.resources)])]
self.assertFalse(self.hook.exists('test', 'not found'))
| spektom/incubator-airflow | tests/providers/microsoft/azure/hooks/test_azure_container_instance.py | Python | apache-2.0 | 4,590 |
from django.conf import settings
from django.core.management.base import BaseCommand, CommandError
from hordak.models import Account
from swiftwind.settings.models import Settings
class Command(BaseCommand):
help = 'Create the initial chart of accounts'
def add_arguments(self, parser):
super(Command, self).add_arguments(parser)
parser.add_argument(
'--preserve', dest='preserve', default=False, action='store_true',
help='Exit normally if any accounts already exist.',
)
parser.add_argument(
'--currency', dest='currency',
help='Account currencies, can be specified multiple times. Defaults to the default currency setting.',
nargs='+',
)
def handle(self, *args, **options):
if options.get('preserve') and Account.objects.count():
self.stdout.write('Exiting normally because accounts already exist and --preserve flag was present')
if options.get('currency'):
currency = options['currency']
else:
try:
currency = Settings.objects.get().default_currency
except Settings.DoesNotExist:
raise CommandError('No currency specified by either --currency or by the swiftwind settings.')
kw = dict(currencies=currency)
T = Account.TYPES
assets = Account.objects.create(name='Assets', code='1', type=T.asset, **kw)
liabilities = Account.objects.create(name='Liabilities', code='2', type=T.liability, **kw)
equity = Account.objects.create(name='Equity', code='3', type=T.equity, **kw)
income = Account.objects.create(name='Income', code='4', type=T.income, **kw)
expenses = Account.objects.create(name='Expenses', code='5', type=T.expense, **kw)
bank = Account.objects.create(name='Bank', code='1', is_bank_account=True, type='AS', parent=assets, **kw)
housemate_income = Account.objects.create(name='Housemate Income', code='1', parent=income, **kw)
other_income = Account.objects.create(name='Other Income', code='2', parent=income, **kw)
current_liabilities = Account.objects.create(name='Current Liabilities', code='1', parent=liabilities, **kw)
long_term_liabilities = Account.objects.create(name='Long Term Liabilities', code='2', parent=liabilities, **kw)
gas_payable = Account.objects.create(name='Gas Payable', code='1', parent=current_liabilities, **kw)
electricity_payable = Account.objects.create(name='Electricity Payable', code='2', parent=current_liabilities, **kw)
council_tax_payable = Account.objects.create(name='Council Tax Payable', code='3', parent=current_liabilities, **kw)
internet_payable = Account.objects.create(name='Internet Payable', code='4', parent=current_liabilities, **kw)
retained_earnings = Account.objects.create(name='Retained Earnings', code='1', parent=equity, **kw)
rent = Account.objects.create(name='Rent', code='1', parent=expenses, **kw)
utilities = Account.objects.create(name='Utilities', code='2', parent=expenses, **kw)
food = Account.objects.create(name='Food', code='3', parent=expenses, **kw)
other_expenses = Account.objects.create(name='Other Expenses', code='4', parent=expenses, **kw)
gas_expense = Account.objects.create(name='Gas Expense', code='1', parent=utilities, **kw)
electricity_expense = Account.objects.create(name='Electricity Expense', code='2', parent=utilities, **kw)
council_tax_expense = Account.objects.create(name='Council Tax Expense', code='3', parent=utilities, **kw)
internet_expense = Account.objects.create(name='Internet Expense', code='4', parent=utilities, **kw)
| adamcharnock/swiftwind | swiftwind/core/management/commands/swiftwind_create_accounts.py | Python | mit | 3,772 |
import ctypes
from .common import *
from . import functions
from .functions import p_cfg, p_db, p_pkg, p_elf
from .utils import *
SONAME = 'libpkgdepdb.so.1'
class LogLevel(object):
Debug = 0
Message = 1
Print = 2
Warn = 3
Error = 4
class PkgEntry(object):
Depends = 0
OptDepends = 1
MakeDepends = 2
CheckDepends = 3
Provides = 4
Conflicts = 5
Replaces = 6
class JSON(object):
Query = 1
DB = 2
class ELF(object):
CLASSNONE = 0
CLASS32 = 1
CLASS64 = 2
DATANONE = 0
DATA2LSB = 1
DATA2MSB = 2
OSABI_NONE = 0
OSABI_HPUX = 1
OSABI_NETBSD = 2
OSABI_LINUX = 3
OSABI_HURD = 4
OSABI_86OPEN = 5
OSABI_SOLARIS = 6
OSABI_AIX = 7
OSABI_IRIX = 8
OSABI_FREEBSD = 9
OSABI_TRU64 = 10
OSABI_MODESTO = 11
OSABI_OPENBSD = 12
OSABI_OPENVMS = 13
OSABI_NSK = 14
OSABI_AROS = 15
OSABI_ARM = 97
OSABI_STANDALONE = 255
OSABI_SYSV = 0 # ELFOSABI_NONE
OSABI_MONTEREY = 7 # ELFOSABI_AIX
class lib(object):
pass
rawlib = ctypes.CDLL(SONAME, mode=ctypes.RTLD_GLOBAL)
if rawlib is None:
raise PKGDepDBException('failed to open %s' % SONAME)
functions.load(rawlib, lib, functions.pkgdepdb_functions, 'pkgdepdb_')
class Config(object):
def __init__(self):
self._ptr = lib.cfg_new()
if self._ptr is None:
raise PKGDepDBException('failed to create config instance')
def __del__(self):
lib.cfg_delete(self._ptr)
self._ptr = None
def load(self, filepath):
path = filepath.encode('utf-8')
if lib.cfg_load(self._ptr, path) == 0:
raise PKGDepDBException('failed to load from: %s' % (filepath))
def load_default(self):
if lib.cfg_load_default(self._ptr) == 0:
raise PKGDepDBException('failed to load default config')
def read(self, name, data):
data = cstr(data)
if lib.cfg_read(self._ptr, cstr(name), data, len(data)) != 1:
raise PKGDepDBException('failed to parse config')
database = StringProperty(lib.cfg_database, lib.cfg_set_database)
verbosity = IntProperty(lib.cfg_verbosity, lib.cfg_set_verbosity)
log_level = IntProperty(lib.cfg_log_level, lib.cfg_set_log_level)
max_jobs = IntProperty(lib.cfg_max_jobs, lib.cfg_set_max_jobs)
json = IntProperty(lib.cfg_json, lib.cfg_set_json)
quiet = BoolProperty(lib.cfg_quiet, lib.cfg_set_quiet)
package_depends = BoolProperty(lib.cfg_package_depends,
lib.cfg_set_package_depends)
package_file_lists = BoolProperty(lib.cfg_package_file_lists,
lib.cfg_set_package_file_lists)
package_info = BoolProperty(lib.cfg_package_info,
lib.cfg_set_package_info)
def __eq__(self, other):
return self._ptr[0] == other._ptr[0]
def __ne__(self, other):
return self._ptr[0] != other._ptr[0]
class DB(object):
class PackageList(object):
def __init__(self, owner):
self.owner = owner
def __len__(self):
return lib.db_package_count(self.owner._ptr)
def get(self, off=0, count=None):
if isinstance(off, str):
if count is not None:
raise ValueError('named access cannot have a count')
return self.get_named(off)
if off < 0: raise IndexError
maxcount = len(self)
if off >= maxcount: raise IndexError
count = count or maxcount
if count < 0: raise ValueError('cannot fetch a negative count')
count = min(count, maxcount - off)
out = (p_pkg * count)()
got = lib.db_package_get(self.owner._ptr, out, off, count)
return [Package(x,True) for x in out[0:got]]
def get_named(self, name):
ptr = lib.db_package_find(self.owner._ptr, cstr(name))
if ptr is None:
raise KeyError('no such package: %s' % (name))
return Package(ptr,True)
def __getitem__(self, key):
if isinstance(key, slice):
return self.__getslice__(key.start, key.stop, key.step)
if isinstance(key, str):
return self.get_named(key)
return self.get(key, 1)[0]
def __getslice__(self, start=None, stop=None, step=None):
step = step or 1
start = start or 0
count = stop - start if stop else None
if step == 0: raise ValueError('step cannot be zero')
if count == 0: return []
if step > 0:
if count < 0: return []
return self.get(start, count)[::step]
else:
if count > 0: return []
return self.get(start+count, -count)[-count:0:step]
def __contains__(self, value):
return value in self.get()
class ElfList(object):
def __init__(self, owner):
self.owner = owner
def __len__(self):
return lib.db_object_count(self.owner._ptr)
def get_named(self, name):
return [i for i in self.get() if i.basename == name][0]
def get(self, off=0, count=None):
if isinstance(off, str):
if count is not None:
raise ValueError('named access cannot have a count')
return self.get_named(off)
if off < 0: raise IndexError
maxcount = len(self)
if off >= maxcount: raise IndexError
count = count or maxcount
if count < 0: raise ValueError('cannot fetch a negative count')
count = min(count, maxcount - off)
out = (p_pkg * count)()
got = lib.db_object_get(self.owner._ptr, out, off, count)
return [Elf(x) for x in out[0:got]]
def __getitem__(self, key):
if isinstance(key, slice):
return self.__getslice__(key.start, key.stop, key.step)
if isinstance(key, str):
return self.get_named(key)
return self.get(key, 1)[0]
def __getslice__(self, start=None, stop=None, step=None):
step = step or 1
start = start or 0
count = stop - start if stop else None
if step == 0: raise ValueError('step cannot be zero')
if count == 0: return []
if step > 0:
if count < 0: return []
return self.get(start, count)[::step]
else:
if count > 0: return []
return self.get(start+count, -count)[-count:0:step]
def __contains__(self, value):
return value in self.get()
def __init__(self, cfg):
self._ptr = lib.db_new(cfg._ptr)
if self._ptr is None:
raise PKGDepDBException('failed to create database instance')
self._library_path = StringListAccess(self,
lib.db_library_path_count,
lib.db_library_path_get,
lib.db_library_path_add,
lib.db_library_path_contains,
lib.db_library_path_del_s,
lib.db_library_path_del_i,
lib.db_library_path_del_r,
lib.db_library_path_set_i)
self._ignored_files = StringListAccess(self,
lib.db_ignored_files_count,
lib.db_ignored_files_get,
lib.db_ignored_files_add,
lib.db_ignored_files_contains,
lib.db_ignored_files_del_s,
lib.db_ignored_files_del_i,
lib.db_ignored_files_del_r)
self._base_packages = StringListAccess(self,
lib.db_base_packages_count,
lib.db_base_packages_get,
lib.db_base_packages_add,
lib.db_base_packages_contains,
lib.db_base_packages_del_s,
lib.db_base_packages_del_i,
lib.db_base_packages_del_r)
self._assume_found = StringListAccess(self,
lib.db_assume_found_count,
lib.db_assume_found_get,
lib.db_assume_found_add,
lib.db_assume_found_contains,
lib.db_assume_found_del_s,
lib.db_assume_found_del_i,
lib.db_assume_found_del_r)
self.packages = DB.PackageList(self)
self.elfs = DB.ElfList(self)
loaded_version = IntGetter(lib.db_loaded_version)
strict_linking = BoolProperty(lib.db_strict_linking,
lib.db_set_strict_linking)
name = StringProperty(lib.db_name, lib.db_set_name)
library_path = StringListProperty('_library_path')
ignored_files = StringListProperty('_ignored_files')
base_packages = StringListProperty('_base_packages')
assume_found = StringListProperty('_assume_found')
def __del__(self):
lib.db_delete(self._ptr)
def read(self, path):
if lib.db_load(self._ptr, cstr(path)) != 1:
raise PKGDepDBException('failed to read database from %s' % (path))
def load(self, path):
return self.read(path)
def store(self, path):
if lib.db_store(self._ptr, cstr(path)) != 1:
raise PKGDepDBException('failed to store database to %s' % (path))
def relink_all(self):
lib.db_relink_all(self._ptr)
def fix_paths(self):
lib.db_fix_paths(self._ptr)
def wipe_packages(self):
return lib.db_wipe_packages(self._ptr) == 1
def wipe_filelists(self):
return lib.db_wipe_file_lists(self._ptr) == 1
def install(self, pkg):
if lib.db_package_install(self._ptr, pkg._ptr) != 1:
raise PKGDepDBException('package installation failed')
pkg.linked = True
def uninstall_package(self, pkg):
if isinstance(pkg, int):
if lib.db_package_remove_i(self._ptr, pkg) != 1:
raise PKGDepDBException('failed to remove package')
else:
if lib.db_package_remove_p(self._ptr, pkg._ptr) != 1:
raise PKGDepDBException('failed to remove package')
pkg.linked = False
def delete_package(self, pkg):
if isinstance(pkg, int):
if lib.db_package_delete_i(self._ptr, pkg) != 1:
raise PKGDepDBException('cannot delete package %i' % (pkg))
elif isinstance(pkg, str):
if lib.db_package_delete_s(self._ptr, cstr(pkg)) != 1:
raise PKGDepDBException('cannot delete package %s' % (pkg))
else:
if lib.db_package_delete_p(self._ptr, pkg._ptr) != 1:
raise PKGDepDBException('failed to uninstall package')
pkg.linked = False
del pkg
def is_broken(self, what):
if type(what) == Package:
v = lib.db_package_is_broken(self._ptr, what._ptr)
elif type(what) == Elf:
v = lib.db_object_is_broken(self._ptr, what._ptr)
else:
raise TypeError('object must be a Package or Elf instance')
return v == 1
def __eq__(self, other):
return self._ptr[0] == other._ptr[0]
def __ne__(self, other):
return self._ptr[0] != other._ptr[0]
class Package(object):
class ElfList(object):
def __init__(self, owner):
self.owner = owner
def __len__(self):
return lib.pkg_elf_count(self.owner._ptr)
def get(self, off=0, count=None):
if off < 0: raise IndexError
maxcount = len(self)
if off >= maxcount: raise IndexError
count = count or maxcount
if count < 0: raise ValueError('cannot fetch a negative count')
count = min(count, maxcount - off)
out = (p_elf * count)()
got = lib.pkg_elf_get(self.owner._ptr, out, off, count)
return [Elf(x) for x in out[0:got]]
def add(self, elf):
return lib.pkg_elf_add(self.owner._ptr, elf._ptr) == 1
def delete(self, what):
if type(what) == Elf:
if 1 != lib.pkg_elf_del_e(self.owner._ptr, what._ptr):
raise KeyError('package does not contain this object')
elif isinstance(what, int):
if 1 != lib.pkg_elf_del_i(self.owner._ptr, what):
raise KeyError('no such index: %i' % (what))
else:
raise TypeError('cannot delete objects by name yet')
def delete_range(self, idx, count):
lib.pkg_elf_del_r(self.owner._ptr, idx, count)
def set_i(self, idx, what):
if what is not None:
what = what._ptr
if lib.pkg_elf_set_i(self.owner._ptr, idx, what) == 0:
raise IndexError('no such index: %i' % (idx))
def __getitem__(self, key):
if isinstance(key, slice):
return self.__getslice__(key.start, key.stop, key.step)
if isinstance(key, str):
raise TypeError('cannot index objects by name yet')
return self.get(key, 1)[0]
def __getslice__(self, start=None, stop=None, step=None):
step = step or 1
start = start or 0
count = stop - start if stop else None
if step == 0: raise ValueError('step cannot be zero')
if count == 0: return []
if step > 0:
if count < 0: return []
return self.get(start, count)[::step]
else:
if count > 0: return []
return self.get(start+count, -count)[-count:0:step]
def __contains__(self, value):
return value in self.get()
def __delitem__(self, key):
if isinstance(key, slice):
return self.__delslice__(key.start, key.stop, key.step)
self.delete(key)
def __delslice__(self, start=None, stop=None, step=None):
step = step or 1
start = start or 0
stop = stop or self.count()
if step == 0:
raise ValueError('step cannot be zero')
if step == 1:
if stop <= start: return
return self.delete_range(start, stop-start)
if step > 0:
s = slice(start, stop, step)
else:
s = reverse(slice(start, stop, step))
minus = 0
for idx in s:
self.delete(idx - minus)
minus += 1
def __setitem__(self, key, value):
if isinstance(key, slice):
return self.__setslice__(key.start, key.stop, value, key.step)
if not isinstance(key, int): raise TypeError
if key < 0: raise IndexError
count = self.__len__()
if key > count:
raise IndexError
elif key == count:
self.add(value)
else:
self.set_i(key, value)
def __setslice__(self, start, stop, values, step=None):
count = len(self)
start = start or 0
stop = stop or count
step = step or 1
if step == 0:
raise ValueError('step cannot be zero')
if step > 0:
if start < 0:
raise IndexError
for v in values:
if start >= stop:
return
if start == count:
if self.add(v):
count += 1
elif start > count:
raise IndexError
else:
self.set_i(start, v)
start += step
else:
for v in values:
if start <= stop:
return
if start < 0:
raise IndexError
if start == count:
if self.add(v):
count += 1
elif start > count:
raise IndexError
else:
self.set_i(start, v)
start += step
def append(self, value):
return self.add(value)
def extend(self, value):
for i in value:
self.append(i)
def __init__(self, ptr=None, linked=False):
self._ptr = ptr or lib.pkg_new()
if self._ptr is None:
raise PKGDepDBException('failed to create package instance')
self.linked = linked
self._groups = StringListAccess(self,
lib.pkg_groups_count,
lib.pkg_groups_get,
lib.pkg_groups_add,
lib.pkg_groups_contains,
lib.pkg_groups_del_s,
lib.pkg_groups_del_i,
lib.pkg_groups_del_r)
self._filelist = StringListAccess(self,
lib.pkg_filelist_count,
lib.pkg_filelist_get,
lib.pkg_filelist_add,
lib.pkg_filelist_contains,
lib.pkg_filelist_del_s,
lib.pkg_filelist_del_i,
lib.pkg_filelist_del_r,
lib.pkg_filelist_set_i)
self._info = StringMapOfStringList(self,
lib.pkg_info_count_keys,
lib.pkg_info_get_keys,
lib.pkg_info_count_values,
lib.pkg_info_get_values,
lib.pkg_info_add,
lib.pkg_info_del_s,
lib.pkg_info_del_i,
lib.pkg_info_del_r,
lib.pkg_info_set_i,
lib.pkg_info_contains_key)
def make_deplist(self, what):
return DepListAccess(self,
lambda o: lib.pkg_dep_count (o, what),
lambda o, *arg: lib.pkg_dep_get (o, what, *arg),
lambda o, *arg: lib.pkg_dep_add (o, what, *arg),
lambda o, *arg: lib.pkg_dep_contains(o, what, *arg),
lambda o, *arg: lib.pkg_dep_del_name(o, what, *arg),
lambda o, *arg: lib.pkg_dep_del_full(o, what, *arg),
lambda o, *arg: lib.pkg_dep_del_i (o, what, *arg),
lambda o, *arg: lib.pkg_dep_del_r (o, what, *arg),
lambda o, *arg: lib.pkg_dep_set_i (o, what, *arg))
self._depends = make_deplist(self, PkgEntry.Depends)
self._optdepends = make_deplist(self, PkgEntry.OptDepends)
self._makedepends = make_deplist(self, PkgEntry.MakeDepends)
self._checkdepends = make_deplist(self, PkgEntry.CheckDepends)
self._provides = make_deplist(self, PkgEntry.Provides)
self._conflicts = make_deplist(self, PkgEntry.Conflicts)
self._replaces = make_deplist(self, PkgEntry.Replaces)
self._elfs = Package.ElfList(self)
groups = StringListProperty('_groups')
filelist = StringListProperty('_filelist')
info = StringListProperty('_info')
depends = StringListProperty('_depends')
optdepends = StringListProperty('_optdepends')
makedepends = StringListProperty('_makedepends')
checkdepends = StringListProperty('_checkdepends')
provides = StringListProperty('_provides')
conflicts = StringListProperty('_conflicts')
replaces = StringListProperty('_replaces')
name = StringProperty(lib.pkg_name, lib.pkg_set_name)
version = StringProperty(lib.pkg_version, lib.pkg_set_version)
pkgbase = StringProperty(lib.pkg_pkgbase, lib.pkg_set_pkgbase)
description = StringProperty(lib.pkg_description, lib.pkg_set_description)
@property
def elfs(self):
return self._elfs
@elfs.setter
def elfs(self, value):
self._elfs.delete_range(0, len(self._elfs))
self._elfs.extend(value)
def __del__(self):
if not self.linked:
lib.pkg_delete(self._ptr)
@staticmethod
def load(path, cfg):
ptr = lib.pkg_load(cstr(path), cfg._ptr)
if ptr is None:
raise PKGDepDBException('failed to load package from: %s' % (path))
return Package(ptr, False)
def read_info(self, pkginfo_text, cfg):
if lib.pkg_read_info(self._ptr, cstr(pkginfo_text), len(pkginfo_text),
cfg._ptr) != 1:
raise PKGDepDBException('error parsing PKGINFO')
def guess(self, filename):
lib.pkg_guess(self._ptr, cstr(filename))
def conflicts_with(self, other):
if type(other) != Package:
raise TypeError('other must be a package')
return lib.pkg_conflict(self._ptr, other._ptr) != 0
def replaces_package(self, other):
if type(other) != Package:
raise TypeError('other must be a package')
return lib.pkg_replaces(self._ptr, other._ptr) != 0
def __eq__(self, other):
return self._ptr[0] == other._ptr[0]
def __ne__(self, other):
return self._ptr[0] != other._ptr[0]
class Elf(object):
class FoundList(object):
def __init__(self, owner):
self.owner = owner
def __len__(self):
return lib.elf_found_count(self.owner._ptr)
def get_named(self, name):
got = lib.elf_found_find(self.owner._ptr, cstr(name))
if got is None:
raise KeyError('no such found dependency: %s' % name)
return Elf(got)
def get(self, off=0, count=None):
if isinstance(off, str):
if count is not None:
raise ValueError('named access cannot have a count')
return get_named(off, count)
if off < 0: raise IndexError
maxcount = len(self)
if off >= maxcount: raise IndexError
count = count or maxcount
if count < 0: raise ValueError('cannot fetch a negative count')
count = min(count, maxcount - off)
out = (p_pkg * count)()
got = lib.elf_found_get(self.owner._ptr, out, off, count)
return [Elf(x) for x in out[0:got]]
def __getitem__(self, key):
if isinstance(key, slice):
return self.__getslice__(key.start, key.stop, key.step)
if isinstance(key, str):
return self.get_named(key)
return self.get(key, 1)[0]
def __getslice__(self, start=None, stop=None, step=None):
step = step or 1
start = start or 0
count = stop - start if stop else None
if step == 0: raise ValueError('step cannot be zero')
if count == 0: return []
if step > 0:
if count < 0: return []
return self.get(start, count)[::step]
else:
if count > 0: return []
return self.get(start+count, -count)[-count:0:step]
def __contains__(self, value):
if type(value) == Elf:
return value in self.get()
if isinstance(value, str):
try:
self.get_named(value)
return True
except KeyError:
return False
raise TypeError('need a library name or Elf instance')
def __init__(self, ptr=None):
self._ptr = ptr or lib.elf_new()
if self._ptr is None:
raise PKGDepDBException('failed to create Elf instance')
self._needed = StringListAccess(self,
lib.elf_needed_count,
lib.elf_needed_get,
lib.elf_needed_add,
lib.elf_needed_contains,
lib.elf_needed_del_s,
lib.elf_needed_del_i,
lib.elf_needed_del_r)
self._missing = StringListAccess(self,
lib.elf_missing_count,
lib.elf_missing_get,
None,
lib.elf_missing_contains,
None, None, None)
self._found = Elf.FoundList(self)
needed = StringListProperty('_needed')
dirname = StringProperty(lib.elf_dirname, lib.elf_set_dirname)
basename = StringProperty(lib.elf_basename, lib.elf_set_basename)
ei_class = IntProperty (lib.elf_class, lib.elf_set_class)
ei_data = IntProperty (lib.elf_data, lib.elf_set_data)
ei_osabi = IntProperty (lib.elf_osabi, lib.elf_set_osabi)
rpath = StringProperty(lib.elf_rpath, lib.elf_set_rpath)
runpath = StringProperty(lib.elf_runpath, lib.elf_set_runpath)
interpreter = StringProperty(lib.elf_interpreter, lib.elf_set_interpreter)
@property
def found(self):
return self._found
@found.setter
def found(self, unused_value):
raise PKGDepDBException('Elf.found is a read only property')
@property
def missing(self):
return self._missing
@missing.setter
def missing(self, unused_value):
raise PKGDepDBException('Elf.missing is a read only property')
def __del__(self):
lib.elf_unref(self._ptr)
@staticmethod
def load(path, cfg):
err = ctypes.c_int(0)
ptr = lib.elf_load(cstr(path), ctypes.byref(err), cfg._ptr)
if ptr is None:
if err != 0:
raise PKGDepDBException('failed to parse object %s' % (path))
else:
raise PKGDepDBException('failed to load %s: %s' % (path,
from_c_string(lib.error())))
return Elf(ptr)
@staticmethod
def read(byteobj, basename, dirname, cfg):
err = ctypes.c_int(0)
ptr = lib.elf_read(byteobj, len(byteobj), cstr(basename),
cstr(dirname), ctypes.byref(err), cfg)
if ptr is None:
if err != 0:
raise PKGDepDBException('failed to parse object %s'
% (basename))
else:
raise PKGDepDBException('failed to load %s: %s'
% (basename, from_c_string(lib.error())))
return Elf(ptr)
def class_string(self):
return from_c_string(lib.elf_class_string(self._ptr))
def data_string(self):
return from_c_string(lib.elf_data_string(self._ptr))
def osabi_string(self):
return from_c_string(lib.elf_osabi_string(self._ptr))
def can_use(self, other, strict=True):
return lib.elf_can_use(self._ptr, other._ptr, 1 if strict else 0)
def __eq__(self, other):
return self._ptr[0] == other._ptr[0]
def __ne__(self, other):
return self._ptr[0] != other._ptr[0]
__all__ = [
'PKGDepDBException',
'p_cfg', 'p_db', 'p_pkg', 'p_elf',
'LogLevel', 'PkgEntry', 'JSON', 'ELF'
'rawlib',
'lib',
'Config', 'DB', 'Package', 'Pkg', 'Elf',
# for testing
'StringListAccess'
'StringListProperty'
]
| Blub/pkgdepdb | pypkgdepdb/__init__.py | Python | bsd-3-clause | 29,250 |
#!/usr/bin/env python
from geonode.settings import GEONODE_APPS
import geonode.settings as settings
import traceback
from geonode.layers.models import Layer
from pprint import pprint
from geonode.cephgeo.models import CephDataObject, LidarCoverageBlock
import subprocess
import ogr
import os
import shutil
import time
import math
import argparse
import sys
# global block_name = ''
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "geonode.settings")
def get_cwd():
cur_path = os.path.realpath(__file__)
if "?" in cur_path:
return cur_path.rpartition("?")[0].rpartition(os.path.sep)[0] + os.path.sep
else:
return cur_path.rpartition(os.path.sep)[0] + os.path.sep
def rename_laz(inDir, outDir):
if not os.path.exists(outDir):
os.makedirs(outDir)
for path, dirs, files in os.walk(inDir, topdown=False):
for las in files:
# if las.endswith(".laz") or las.endswith(".las"):
if las.endswith(".laz"):
typeFile = las.split(".")[-1].upper()
ctr = 0
laz_file_path = os.path.join(path, las)
# get LAZ bounding box/extents
p = subprocess.Popen([os.path.join(get_cwd(), 'lasbb'), '-get_bb',
laz_file_path], stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
out, err = p.communicate()
returncode = p.returncode
if returncode is 0:
tokens = out.split(" ")
minX = float(tokens[1])
minY = float(tokens[2])
maxX = float(tokens[3])
maxY = float(tokens[4])
bbox_center_x = (minX + (maxX - minX) / 2)
bbox_center_y = (minY + (maxY - minY) / 2)
_TILE_SIZE = 1000
tile_x = int(math.floor(bbox_center_x / float(_TILE_SIZE)))
tile_y = int(math.floor(
bbox_center_y / float(_TILE_SIZE))) + 1
#outFN = ''.join(['E',tile_x,'N',tile_y,'_',typeFile,'.',typeFile.lower()])
outFN = 'E{0}N{1}'.format(tile_x, tile_y)
# outPath = os.path.join(outDir,outFN)
print 'OUTPUT PATH ', outFN
else:
print "Error reading extents of [{0}]. Trace from lasbb:\n{1}".format(laz_file_path, out)
print 'TRAVERSAL FINISHED'
def block_name(block_path):
# parses blockname from path
# input format: ../../Agno_Blk5C_20130418
block_name = block_path.split(os.sep)[-1]
if block_name == '':
block_name = block_path.split(os.sep)[-2]
# remove date flown
block_name = block_name.rsplit('_', 1)[0]
print 'BLOCK NAME', block_name
return block_name
def find_in_coverage(block_name):
# find block_name in lidar coverage model
try:
block = LidarCoverageBlock.objects.get(block_name=block_name)
print 'Block in Lidar Coverage'
print 'Block UID:',block.uid
return block.uid
except Exception:
print 'Block not in Lidar Coverage', block_name
return 0
def get_ceph_object():
if __name__ == '__main__':
block_name = ''
print 'PATH.BASENAME ', os.path.basename(__file__)
print 'PATH.JOIN GETCWD LASBB ', os.path.join(get_cwd(), 'lasbb')
# LAZ folder path
# blk_path_agno5A = '/home/geonode/DATA/LAS_FILES'
blk_path = '/home/geonode/DATA/Adjusted_LAZ_Tiles/DREAM/Agno/Agno5C_20130418'
# blk_name = block_name(blk_path)
outDir = '/home/geonode/DATA/Output/'
print 'Rename Laz'
# rename_laz(blk_path, outDir)
inDir = os.path.abspath(
'/home/geonode/Work/Data/Renamed/Agno/Agno_Blk5C_20130418')
if not os.path.isdir(inDir):
print 'Input directory error!'
# walk through all .laz
for path, dirs, files in os.walk(inDir, topdown=False):
block_name = block_name(path)
block_uid = find_in_coverage(block_name)
if block_uid > 0:
# parse laz files
for laz in files:
if laz.endswith('.laz'):
print 'LAZ filename:', laz
# parse gridref from filename
# filename: Gridref_datatype_etc..
gridref = laz.split('.')[0].split('_')[0]
print 'Gridref:', gridref
else:
# write in logfile
print 'Write ' + block_name + ' in log file'
| PhilLidar-DAD/geonode | scripts/utils/geometadata/rename_laz.py | Python | gpl-3.0 | 4,534 |
# -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2017-12-03 11:12
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('sigad', '0018_auto_20171010_1424'),
]
operations = [
migrations.AlterField(
model_name='permissionsuserdocumento',
name='permission',
field=models.ForeignKey(blank=True, default=None, null=True, on_delete=django.db.models.deletion.CASCADE, to='auth.Permission', verbose_name='Permissão'),
),
]
| cmjatai/cmj | cmj/sigad/migrations/0019_auto_20171203_0912.py | Python | gpl-3.0 | 619 |
# coding=utf-8
"""
The SNMPRawCollector is designed for collecting data from SNMP-enables devices,
using a set of specified OIDs
#### Configuration
Below is an example configuration for the SNMPRawCollector. The collector
can collect data any number of devices by adding configuration sections
under the *devices* header. By default the collector will collect every 60
seconds. This might be a bit excessive and put unnecessary load on the
devices being polled. You may wish to change this to every 300 seconds. However
you need modify your graphite data retentions to handle this properly.
```
# Options for SNMPRawCollector
enabled = True
interval = 60
[devices]
# Start the device configuration
# Note: this name will be used in the metric path.
[[my-identification-for-this-host]]
host = localhost
port = 161
community = public
# Start the OID list for this device
# Note: the value part will be used in the metric path.
[[[oids]]]
1.3.6.1.4.1.2021.10.1.3.1 = cpu.load.1min
1.3.6.1.4.1.2021.10.1.3.2 = cpu.load.5min
1.3.6.1.4.1.2021.10.1.3.3 = cpu.load.15min
# If you want another host, you can. But you probably won't need it.
[[another-identification]]
host = router1.example.com
port = 161
community = public
[[[oids]]]
oid = metric.path
oid = metric.path
```
Note: If you modify the SNMPRawCollector configuration, you will need to
restart diamond.
#### Dependencies
* pysmnp (which depends on pyasn1 0.1.7 and pycrypto)
"""
import os
import sys
import time
sys.path.insert(0, os.path.join(os.path.dirname(os.path.dirname(__file__)),
'snmp'))
from snmp import SNMPCollector as parent_SNMPCollector
from diamond.metric import Metric
class SNMPRawCollector(parent_SNMPCollector):
def __init__(self, *args, **kwargs):
super(SNMPRawCollector, self).__init__(*args, **kwargs)
# list to save non-existing oid's per device, to avoid repetition of
# errors in logging. restart diamond/collector to flush this
self.skip_list = []
def get_default_config(self):
"""
Override SNMPCollector.get_default_config method to provide
default_config for the SNMPInterfaceCollector
"""
default_config = super(SNMPRawCollector,
self).get_default_config()
default_config.update({
'oids': {},
'path_prefix': 'servers',
'path_suffix': 'snmp',
})
return default_config
def _precision(self, value):
"""
Return the precision of the number
"""
value = str(value)
decimal = value.rfind('.')
if decimal == -1:
return 0
return len(value) - decimal - 1
def _skip(self, device, oid, reason=None):
self.skip_list.append((device, oid))
if reason is not None:
self.log.warn('Muted \'{0}\' on \'{1}\', because: {2}'.format(
oid, device, reason))
def _get_value_walk(self, device, oid, host, port, community):
data = self.walk(oid, host, port, community)
if data is None:
self._skip(device, oid, 'device down (#2)')
return
self.log.debug('Data received from WALK \'{0}\': [{1}]'.format(
device, data))
if len(data) != 1:
self._skip(
device,
oid,
'unexpected response, data has {0} entries'.format(
len(data)))
return
# because we only allow 1-key dicts, we can pick with absolute index
value = data.items()[0][1]
return value
def _get_value(self, device, oid, host, port, community):
data = self.get(oid, host, port, community)
if data is None:
self._skip(device, oid, 'device down (#1)')
return
self.log.debug('Data received from GET \'{0}\': [{1}]'.format(
device, data))
if len(data) == 0:
self._skip(device, oid, 'empty response, device down?')
return
if oid not in data:
# oid is not even in hierarchy, happens when using 9.9.9.9
# but not when using 1.9.9.9
self._skip(device, oid, 'no object at OID (#1)')
return
value = data[oid]
if value == 'No Such Object currently exists at this OID':
self._skip(device, oid, 'no object at OID (#2)')
return
if value == 'No Such Instance currently exists at this OID':
return self._get_value_walk(device, oid, host, port, community)
return value
def collect_snmp(self, device, host, port, community):
"""
Collect SNMP interface data from device
"""
self.log.debug(
'Collecting raw SNMP statistics from device \'{0}\''.format(device))
dev_config = self.config['devices'][device]
if 'oids' in dev_config:
for oid, metricName in dev_config['oids'].items():
if (device, oid) in self.skip_list:
self.log.debug(
'Skipping OID \'{0}\' ({1}) on device \'{2}\''.format(
oid, metricName, device))
continue
timestamp = time.time()
value = self._get_value(device, oid, host, port, community)
if value is None:
continue
self.log.debug(
'\'{0}\' ({1}) on device \'{2}\' - value=[{3}]'.format(
oid, metricName, device, value))
path = '.'.join([self.config['path_prefix'], device,
self.config['path_suffix'], metricName])
metric = Metric(path=path, value=value, timestamp=timestamp,
precision=self._precision(value),
metric_type='GAUGE')
self.publish_metric(metric)
| disqus/Diamond | src/collectors/snmpraw/snmpraw.py | Python | mit | 6,089 |
##
# Copyright 2020 NVIDIA
#
# This file is triple-licensed under GPLv2 (see below), MIT, and
# BSD three-clause licenses.
#
# This file is part of EasyBuild,
# originally created by the HPC team of Ghent University (http://ugent.be/hpc/en),
# with support of Ghent University (http://ugent.be/hpc),
# the Flemish Supercomputer Centre (VSC) (https://www.vscentrum.be),
# Flemish Research Foundation (FWO) (http://www.fwo.be/en)
# and the Department of Economy, Science and Innovation (EWI) (http://www.ewi-vlaanderen.be/en).
#
# https://github.com/easybuilders/easybuild
#
# EasyBuild is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation v2.
#
# EasyBuild is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with EasyBuild. If not, see <http://www.gnu.org/licenses/>.
##
"""
EasyBuild support for XALT, implemented as an easyblock
@author: Scott McMillan (NVIDIA)
"""
import os
from easybuild.easyblocks.generic.configuremake import ConfigureMake
from easybuild.framework.easyconfig import CUSTOM, MANDATORY
from easybuild.tools.build_log import EasyBuildError
from easybuild.tools.modules import get_software_root
from easybuild.tools.systemtools import get_shared_lib_ext
class EB_XALT(ConfigureMake):
"""Support for building and installing XALT."""
@staticmethod
def extra_options():
extra_vars = {
'config_py': [None, "XALT site filter file", MANDATORY],
'executable_tracking': [True, "Enable executable tracking", CUSTOM],
'gpu_tracking': [None, "Enable GPU tracking", CUSTOM],
'logging_url': [None, "Logging URL for transmission", CUSTOM],
'mysql': [False, "Build with MySQL support", CUSTOM],
'scalar_sampling': [True, "Enable scalar sampling", CUSTOM],
'static_cxx': [False, "Statically link libstdc++ and libgcc_s", CUSTOM],
'syshost': [None, "System name", MANDATORY],
'transmission': [None, "Data tranmission method", MANDATORY],
'file_prefix': [None, "XALT record files prefix", CUSTOM],
}
return ConfigureMake.extra_options(extra_vars)
def configure_step(self):
"""Custom configuration step for XALT."""
# By default, XALT automatically appends 'xalt/<version>' to the
# prefix, i.e., --prefix=/opt will actually install in
# /opt/xalt/<version>. To precisely control the install prefix and
# not append anything to the prefix, use the configure option
# '--with-siteControlledPrefix=yes'.
# See https://xalt.readthedocs.io/en/latest/050_install_and_test.html
self.cfg.update('configopts', '--with-siteControlledPrefix=yes')
# XALT site filter config file is mandatory
config_py = self.cfg['config_py']
if config_py:
if os.path.exists(config_py):
self.cfg.update('configopts', '--with-config=%s' % config_py)
else:
raise EasyBuildError("Specified XALT configuration file %s does not exist!", config_py)
else:
error_msg = "Location of XALT configuration file must be specified via 'config_py' easyconfig parameter. "
error_msg += "You can edit the easyconfig file, or use 'eb --try-amend=config_py=<path>'. "
error_msg += "See https://xalt.readthedocs.io/en/latest/030_site_filtering.html for more information."
raise EasyBuildError(error_msg)
# XALT system name is mandatory
if self.cfg['syshost']:
self.cfg.update('configopts', '--with-syshostConfig=%s' % self.cfg['syshost'])
else:
error_msg = "The name of the system must be specified via the 'syshost' easyconfig parameter. "
error_msg += "You can edit the easyconfig file, or use 'eb --try-amend=syshost=<string>'. "
error_msg += "See https://xalt.readthedocs.io/en/latest/020_site_configuration.html for more information."
raise EasyBuildError(error_msg)
# Transmission method is mandatory
if self.cfg['transmission']:
self.cfg.update('configopts', '--with-transmission=%s' % self.cfg['transmission'])
else:
error_msg = "The XALT transmission method must be specified via the 'transmission' easyconfig parameter. "
error_msg = "You can edit the easyconfig file, or use 'eb --try-amend=transmission=<string>'. "
error_msg += "See https://xalt.readthedocs.io/en/latest/020_site_configuration.html for more information."
raise EasyBuildError(error_msg)
# GPU tracking
if self.cfg['gpu_tracking'] is True:
# User enabled
self.cfg.update('configopts', '--with-trackGPU=yes')
elif self.cfg['gpu_tracking'] is None:
# Default value, enable GPU tracking if nvml.h is present
# and the CUDA module is loaded
cuda_root = get_software_root('CUDA')
if cuda_root:
nvml_h = os.path.join(cuda_root, "include", "nvml.h")
if os.path.isfile(nvml_h):
self.cfg.update('configopts', '--with-trackGPU=yes')
self.cfg['gpu_tracking'] = True
else:
# User disabled
self.cfg.update('configopts', '--with-trackGPU=no')
# MySQL
if self.cfg['mysql'] is True:
self.cfg.update('configopts', '--with-MySQL=yes')
else:
self.cfg.update('configopts', '--with-MySQL=no')
# If XALT is built with a more recent compiler than the system
# compiler, then XALT likely will depend on symbol versions not
# available in the system libraries. Link statically as a workaround.
if self.cfg['static_cxx'] is True:
self.cfg.update('configopts', 'LDFLAGS="${LDFLAGS} -static-libstdc++ -static-libgcc"')
# XALT file prefix (optional). The default is $HOME/.xalt.d/ which
# entails that record files are stored separately for each user.
# If this option is specified, XALT will write to the specified
# location for every user. The file prefix can also be modified
# after the install using the XALT_FILE_PREFIX environment variable.
if self.cfg['file_prefix']:
self.cfg.update('configopts', '--with-xaltFilePrefix=%s' % self.cfg['file_prefix'])
# Configure
super(EB_XALT, self).configure_step()
def make_module_extra(self, *args, **kwargs):
txt = super(EB_XALT, self).make_module_extra(*args, **kwargs)
txt += self.module_generator.prepend_paths('LD_PRELOAD', 'lib64/libxalt_init.%s' % get_shared_lib_ext())
txt += self.module_generator.set_environment('XALT_DIR', self.installdir)
txt += self.module_generator.set_environment('XALT_ETC_DIR', '%s' % os.path.join(self.installdir, 'etc'))
txt += self.module_generator.set_environment('XALT_EXECUTABLE_TRACKING',
('no', 'yes')[bool(self.cfg['executable_tracking'])])
txt += self.module_generator.set_environment('XALT_GPU_TRACKING',
('no', 'yes')[bool(self.cfg['gpu_tracking'])])
if self.cfg['transmission'].lower() == 'curl' and self.cfg['logging_url']:
txt += self.module_generator.set_environment('XALT_LOGGING_URL', self.cfg['logging_url'])
txt += self.module_generator.set_environment('XALT_SCALAR_SAMPLING',
('no', 'yes')[bool(self.cfg['scalar_sampling'])])
# In order to track containerized executables, bind mount the XALT
# directory in the Singularity container and preload the XALT library
# https://xalt.readthedocs.io/en/latest/050_install_and_test.html#xalt-modulefile
txt += self.module_generator.prepend_paths('SINGULARITY_BINDPATH', '')
txt += self.module_generator.prepend_paths('SINGULARITYENV_LD_PRELOAD',
'lib64/libxalt_init.%s' % get_shared_lib_ext())
return txt
def make_module_req_guess(self):
"""Custom guesses for environment variables"""
return {'COMPILER_PATH': 'bin',
'PATH': 'bin'}
def sanity_check_step(self):
"""Custom sanity check"""
custom_paths = {
'files': ['bin/ld', 'bin/ld.gold', 'bin/xalt_extract_record',
'lib64/libxalt_init.%s' % get_shared_lib_ext()],
'dirs': ['bin', 'libexec', 'sbin'],
}
custom_commands = ['xalt_configuration_report']
super(EB_XALT, self).sanity_check_step(custom_commands=custom_commands,
custom_paths=custom_paths)
| boegel/easybuild-easyblocks | easybuild/easyblocks/x/xalt.py | Python | gpl-2.0 | 9,134 |
from six import iteritems
from six.moves import StringIO
from collections import defaultdict
#from itertools import count
import numpy as np
#from numpy import array
from pyNastran.bdf.field_writer_8 import print_card_8
from pyNastran.bdf.field_writer_16 import print_card_16
from pyNastran.bdf.cards.base_card import expand_thru
from pyNastran.bdf.bdf_interface.assign_type import (integer,
components)
def get_spc1_constraint(card):
constraint_id = integer(card, 1, 'constraint_id')
dofs = components(card, 2, 'constraints') # 246 = y; dx, dz dir
node_ids = card.fields(3)
node_ids = expand_thru(node_ids)
assert isinstance(constraint_id, int), constraint_id
return constraint_id, dofs, node_ids
class SPC1(object):
"""
+------+-----+------+--------+--------+--------+--------+--------+-----+
| SPC1 | SID | C | G1 | G2 | G3 | G4 | G5 | G6 |
+------+-----+------+--------+--------+--------+--------+--------+-----+
| | G7 | G8 | G9 | -etc.- | | | | |
+------+-----+------+--------+--------+--------+--------+--------+-----+
+------+-----+------+--------+--------+--------+--------+--------+-----+
| SPC1 | 3 | 246 | 209075 | 209096 | 209512 | 209513 | 209516 | |
+------+-----+------+--------+--------+--------+--------+--------+-----+
| SPC1 | 3 | 2 | 1 | 3 | 10 | 9 | 6 | 5 |
+------+-----+------+--------+--------+--------+--------+--------+-----+
| | 2 | 8 | | | | | | |
+------+-----+------+--------+--------+--------+--------+--------+-----+
+------+-----+-------+-------+--------+--------+--------+--------+-----+
| SPC1 | SID | C | G1 | THRU | G2 | | | |
+------+-----+-------+-------+--------+--------+--------+--------+-----+
| SPC1 | 313 | 12456 | 6 | THRU | 32 | | | |
+------+-----+-------+-------+--------+--------+--------+--------+-----+
"""
type = 'SPC1'
def __init__(self, model):
self.model = model
self.components = defaultdict(list)
self.n = 0
def add(self, constraint_id, dofs, node_ids, comment):
#if comment:
# self.comment = comment
assert isinstance(constraint_id, int), constraint_id
self.constraint_id = constraint_id
self.components[dofs] += node_ids
def add_card(self, card, comment=''):
#if comment:
# self.comment = comment
constraint_id = integer(card, 1, 'conid')
dofs = components(card, 2, 'constraints') # 246 = y; dx, dz dir
node_ids = card.fields(3)
assert isinstance(constraint_id, int), constraint_id
self.constraint_id = constraint_id
self.components[dofs] += node_ids
self.n += 1
#def allocate(self, card_count):
#pass
def build(self):
for comp, nodes_lists in iteritems(self.components):
nodes2 = []
for nodes in nodes_lists:
nodes2 += nodes
nodes2.sort()
self.components[comp] = np.array(nodes2, dtype='int32')
def update(self, maps):
"""
Cross links the card so referenced cards can be extracted directly
Parameters
----------
model : BDF()
the BDF object
"""
nid_map = maps['node']
components = {}
for dof, nids in iteritems(self.components):
components[dof] = [nid_map[nid] for nid in nids]
self.components = components
# TODO: constraint_map...
def write_card(self, bdf_file, size=8):
for comp, nodes in iteritems(self.components):
nodes = np.array(nodes, dtype='int32')
nodes = np.unique(nodes)
dnid = nodes.max() - nodes.min() + 1
nnodes = len(nodes)
#print('dnid=%s nnodes=%s' % (dnid, nnodes))
if dnid == len(nodes):
card = ['SPC1', self.constraint_id, comp, nodes.min(), 'THRU', nodes.max()]
else:
card = ['SPC1', self.constraint_id, comp] + list(nodes)
if size == 8:
bdf_file.write(print_card_8(card))
else:
bdf_file.write(print_card_16(card))
def __repr__(self):
f = StringIO()
self.write_card(f)
return f.getvalue().rstrip()
| saullocastro/pyNastran | pyNastran/bdf/dev_vectorized/cards/constraints/spc1.py | Python | lgpl-3.0 | 4,491 |
# -*- coding: utf-8 -*-
# -----------------------------------------------------------------------------
# (C) British Crown Copyright 2012-6 Met Office.
#
# This file is part of Rose, a framework for meteorological suites.
#
# Rose is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Rose is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Rose. If not, see <http://www.gnu.org/licenses/>.
# -----------------------------------------------------------------------------
import pygtk
pygtk.require('2.0')
import gtk
import rose
class ChoicesListView(gtk.TreeView):
"""Class to hold and display an ordered list of strings.
set_value is a function, accepting a new value string.
get_data is a function that accepts no arguments and returns an
ordered list of included names to display.
handle_search is a function that accepts a name and triggers a
search for it.
title is a string or gtk.Widget displayed as the column header, if
given.
get_custom_menu_items, if given, should be a function that
accepts no arguments and returns a list of gtk.MenuItem-derived
instances. The listview model and current TreeIter will be
available as attributes "_listview_model" and "_listview_iter" set
on each menu item to optionally use during the menu item callbacks
- this means that they can use them to modify the model
information. Menuitems that do this should connect to
"button-press-event", as the model cleanup will take place as a
connect_after to the same event.
"""
def __init__(self, set_value, get_data, handle_search,
title=rose.config_editor.CHOICE_TITLE_INCLUDED,
get_custom_menu_items=lambda: []):
super(ChoicesListView, self).__init__()
self._set_value = set_value
self._get_data = get_data
self._handle_search = handle_search
self._get_custom_menu_items = get_custom_menu_items
self.enable_model_drag_dest(
[('text/plain', 0, 0)], gtk.gdk.ACTION_MOVE)
self.enable_model_drag_source(
gtk.gdk.BUTTON1_MASK, [('text/plain', 0, 0)], gtk.gdk.ACTION_MOVE)
self.connect("button-press-event", self._handle_button_press)
self.connect("drag-data-get", self._handle_drag_get)
self.connect_after("drag-data-received",
self._handle_drag_received)
self.set_rules_hint(True)
self.connect("row-activated", self._handle_activation)
self.show()
col = gtk.TreeViewColumn()
if isinstance(title, gtk.Widget):
col.set_widget(title)
else:
col.set_title(title)
cell_text = gtk.CellRendererText()
cell_text.set_property('editable', True)
cell_text.connect('edited', self._handle_edited)
col.pack_start(cell_text, expand=True)
col.set_cell_data_func(cell_text, self._set_cell_text)
self.append_column(col)
self._populate()
def _handle_activation(self, treeview, path, col):
"""Handle a click on the main list view - start a search."""
iter_ = treeview.get_model().get_iter(path)
name = treeview.get_model().get_value(iter_, 0)
self._handle_search(name)
return False
def _handle_button_press(self, treeview, event):
"""Handle a right click event on the main list view."""
if not hasattr(event, "button") or event.button != 3:
return False
pathinfo = treeview.get_path_at_pos(int(event.x),
int(event.y))
if pathinfo is None:
return False
path, col, cell_x, cell_y = pathinfo
iter_ = treeview.get_model().get_iter(path)
name = treeview.get_model().get_value(iter_, 0)
self._popup_menu(iter_, event)
return False
def _handle_drag_get(self, treeview, drag, sel, info, time):
"""Handle an outgoing drag request."""
model, iter_ = treeview.get_selection().get_selected()
text = model.get_value(iter_, 0)
sel.set_text(text)
model.remove(iter_) # Triggers the 'row-deleted' signal, sets value
if not model.iter_n_children(None):
model.append([rose.config_editor.CHOICE_LABEL_EMPTY])
def _handle_drag_received(self, treeview, drag, x, y, sel, info, time):
"""Handle an incoming drag request."""
if sel.data is None:
return False
drop_info = treeview.get_dest_row_at_pos(x, y)
model = treeview.get_model()
if drop_info:
path, position = drop_info
if (position == gtk.TREE_VIEW_DROP_BEFORE or
position == gtk.TREE_VIEW_DROP_INTO_OR_BEFORE):
model.insert(path[0], [sel.data])
else:
model.insert(path[0] + 1, [sel.data])
else:
model.append([sel.data])
path = None
self._handle_reordering(model, path)
def _handle_edited(self, cell, path, new_text):
"""Handle cell text so it can be edited. """
liststore = self.get_model()
iter_ = liststore.get_iter(path)
liststore.set_value(iter_, 0, new_text)
self._handle_reordering()
return
def _handle_reordering(self, model=None, path=None):
"""Handle a drag-and-drop rearrangement in the main list view."""
if model is None:
model = self.get_model()
ok_values = []
iter_ = model.get_iter_first()
num_entries = model.iter_n_children(None)
while iter_ is not None:
name = model.get_value(iter_, 0)
next_iter = model.iter_next(iter_)
if name == rose.config_editor.CHOICE_LABEL_EMPTY:
if num_entries > 1:
model.remove(iter_)
else:
ok_values.append(name)
iter_ = next_iter
new_value = " ".join(ok_values)
self._set_value(new_value)
def _populate(self):
"""Populate the main list view."""
values = self._get_data()
model = gtk.ListStore(str)
if not values:
values = [rose.config_editor.CHOICE_LABEL_EMPTY]
for value in values:
model.append([value])
model.connect_after("row-deleted", self._handle_reordering)
self.set_model(model)
def _popup_menu(self, iter_, event):
# Pop up a menu for the main list view.
"""Launch a popup menu for add/clone/remove."""
ui_config_string = """<ui> <popup name='Popup'>
<menuitem action="Remove"/>
</popup></ui>"""
text = rose.config_editor.CHOICE_MENU_REMOVE
actions = [("Remove", gtk.STOCK_DELETE, text)]
uimanager = gtk.UIManager()
actiongroup = gtk.ActionGroup('Popup')
actiongroup.add_actions(actions)
uimanager.insert_action_group(actiongroup, pos=0)
uimanager.add_ui_from_string(ui_config_string)
remove_item = uimanager.get_widget('/Popup/Remove')
remove_item.connect("activate",
lambda b: self._remove_iter(iter_))
menu = uimanager.get_widget('/Popup')
for menuitem in self._get_custom_menu_items():
menuitem._listview_model = self.get_model()
menuitem._listview_iter = iter_
menuitem.connect_after(
"button-press-event",
lambda b, e: self._handle_reordering()
)
menu.append(menuitem)
menu.popup(None, None, None, event.button, event.time)
return False
def _remove_iter(self, iter_):
self.get_model().remove(iter_)
if self.get_model() is None:
# Removing the last iter makes get_model return None...
self._populate()
self._handle_reordering()
self._populate()
def _set_cell_text(self, column, cell, model, r_iter):
name = model.get_value(r_iter, 0)
if name == rose.config_editor.CHOICE_LABEL_EMPTY:
cell.set_property("markup", "<i>" + name + "</i>")
else:
cell.set_property("markup", "<b>" + name + "</b>")
def refresh(self):
"""Update the model values."""
self._populate()
class ChoicesTreeView(gtk.TreeView):
"""Class to hold and display a tree of content.
set_value is a function, accepting a new value string.
get_data is a function that accepts no arguments and returns a
list of included names.
get_available_data is a function that accepts no arguments and
returns a list of available names.
get_groups is a function that accepts a name and a list of
available names and returns groups that supercede name.
get_is_implicit is an optional function that accepts a name and
returns whether the name is implicitly included in the content.
title is a string displayed as the column header, if given.
get_is_included is an optional function that accepts a name and
an optional list of included names to test whether a
name is already included.
"""
def __init__(self, set_value, get_data, get_available_data,
get_groups, get_is_implicit=None,
title=rose.config_editor.CHOICE_TITLE_AVAILABLE,
get_is_included=None):
super(ChoicesTreeView, self).__init__()
# Generate the 'available' sections view.
self._set_value = set_value
self._get_data = get_data
self._get_available_data = get_available_data
self._get_groups = get_groups
self._get_is_implicit = get_is_implicit
self._get_is_included_func = get_is_included
self.set_headers_visible(True)
self.set_rules_hint(True)
self.enable_model_drag_dest(
[('text/plain', 0, 0)], gtk.gdk.ACTION_MOVE)
self.enable_model_drag_source(
gtk.gdk.BUTTON1_MASK, [('text/plain', 0, 0)], gtk.gdk.ACTION_MOVE)
self.connect_after("button-release-event", self._handle_button)
self.connect("drag-begin", self._handle_drag_begin)
self.connect("drag-data-get", self._handle_drag_get)
self.connect("drag-end", self._handle_drag_end)
self._is_dragging = False
model = gtk.TreeStore(str, bool, bool)
self.set_model(model)
col = gtk.TreeViewColumn()
cell_toggle = gtk.CellRendererToggle()
cell_toggle.connect_after("toggled", self._handle_cell_toggle)
col.pack_start(cell_toggle, expand=False)
col.set_cell_data_func(cell_toggle, self._set_cell_state)
self.append_column(col)
col = gtk.TreeViewColumn()
col.set_title(title)
cell_text = gtk.CellRendererText()
col.pack_start(cell_text, expand=True)
col.set_cell_data_func(cell_text, self._set_cell_text)
self.append_column(col)
self.set_expander_column(col)
self.show()
self._populate()
def _get_is_included(self, name, ok_names=None):
if self._get_is_included_func is not None:
return self._get_is_included_func(name, ok_names)
if ok_names is None:
ok_names = self._get_available_data()
return name in ok_names
def _populate(self):
"""Populate the 'available' sections view."""
ok_content_sections = self._get_available_data()
self._ok_content_sections = set(ok_content_sections)
ok_values = self._get_data()
model = self.get_model()
sections_left = list(ok_content_sections)
self._name_iter_map = {}
while sections_left:
name = sections_left.pop(0)
is_included = self._get_is_included(name, ok_values)
groups = self._get_groups(name, ok_content_sections)
if self._get_is_implicit is None:
is_implicit = any(
[self._get_is_included(g, ok_values) for g in groups])
else:
is_implicit = self._get_is_implicit(name)
if groups:
iter_ = model.append(self._name_iter_map[groups[-1]],
[name, is_included, is_implicit])
else:
iter_ = model.append(None, [name, is_included, is_implicit])
self._name_iter_map[name] = iter_
def _realign(self):
"""Refresh the states in the model."""
ok_values = self._get_data()
model = self.get_model()
ok_content_sections = self._get_available_data()
for name, iter_ in self._name_iter_map.items():
is_in_value = self._get_is_included(name, ok_values)
if self._get_is_implicit is None:
groups = self._get_groups(name, ok_content_sections)
is_implicit = any(
[self._get_is_included(g, ok_values) for g in groups])
else:
is_implicit = self._get_is_implicit(name)
if model.get_value(iter_, 1) != is_in_value:
model.set_value(iter_, 1, is_in_value)
if model.get_value(iter_, 2) != is_implicit:
model.set_value(iter_, 2, is_implicit)
def _set_cell_text(self, column, cell, model, r_iter):
"""Set markup for a section depending on its status."""
section_name = model.get_value(r_iter, 0)
is_in_value = model.get_value(r_iter, 1)
is_implicit = model.get_value(r_iter, 2)
r_iter = model.iter_children(r_iter)
while r_iter is not None:
if model.get_value(r_iter, 1):
is_in_value = True
break
r_iter = model.iter_next(r_iter)
if is_in_value:
cell.set_property("markup", "<b>{0}</b>".format(section_name))
cell.set_property("sensitive", True)
elif is_implicit:
cell.set_property("markup", "{0}".format(section_name))
cell.set_property("sensitive", False)
else:
cell.set_property("markup", section_name)
cell.set_property("sensitive", True)
def _set_cell_state(self, column, cell, model, r_iter):
"""Set the check box for a section depending on its status."""
is_in_value = model.get_value(r_iter, 1)
is_implicit = model.get_value(r_iter, 2)
if is_in_value:
cell.set_property("active", True)
cell.set_property("sensitive", True)
elif is_implicit:
cell.set_property("active", True)
cell.set_property("sensitive", False)
else:
cell.set_property("active", False)
cell.set_property("sensitive", True)
if not self._check_can_add(r_iter):
cell.set_property("sensitive", False)
def _handle_drag_begin(self, widget, drag):
self._is_dragging = True
def _handle_drag_end(self, widget, drag):
self._is_dragging = False
def _handle_drag_get(self, treeview, drag, sel, info, time):
"""Handle a drag data get."""
model, iter_ = treeview.get_selection().get_selected()
if not self._check_can_add(iter_):
return False
name = model.get_value(iter_, 0)
sel.set("text/plain", 8, name)
def _check_can_add(self, iter_):
"""Check whether a name can be added to the data."""
model = self.get_model()
if model.get_value(iter_, 1) or model.get_value(iter_, 2):
return False
child_iter = model.iter_children(iter_)
while child_iter is not None:
if (model.get_value(child_iter, 1) or
model.get_value(child_iter, 2)):
return False
child_iter = model.iter_next(child_iter)
return True
def _handle_button(self, treeview, event):
"""Connect a left click on the available section to a toggle."""
if event.button != 1 or self._is_dragging:
return False
pathinfo = treeview.get_path_at_pos(int(event.x),
int(event.y))
if pathinfo is None:
return False
path, col, cell_x, cell_y = pathinfo
iter_ = treeview.get_model().get_iter(path)
name = treeview.get_model().get_value(iter_, 0)
if treeview.get_columns().index(col) == 1:
self._handle_cell_toggle(None, path)
def _handle_cell_toggle(self, cell, path, should_turn_off=None):
"""Change the content variable value here.
cell is not used.
path is the name to turn off or on.
should_turn_off is as follows:
None - toggle based on the cell value
False - toggle on
True - toggle off
"""
text_index = 0
model = self.get_model()
r_iter = model.get_iter(path)
this_name = model.get_value(r_iter, text_index)
ok_values = self._get_data()
model = self.get_model()
can_add = self._check_can_add(r_iter)
should_add = False
if ((should_turn_off is None or should_turn_off) and
self._get_is_included(this_name, ok_values)):
ok_values.remove(this_name)
elif should_turn_off is None or not should_turn_off:
if not can_add:
return False
should_add = True
ok_values = ok_values + [this_name]
else:
self._realign()
return False
model.set_value(r_iter, 1, should_add)
if model.iter_n_children(r_iter):
self._toggle_internal_base(r_iter, this_name, should_add)
self._set_value(" ".join(ok_values))
self._realign()
return False
def _toggle_internal_base(self, base_iter, base_name, added=False):
"""Connect a toggle of a group to its children.
base_iter is the iter pointing to the group
base_name is the name of the group
added is a boolean denoting toggle state
"""
model = self.get_model()
iter_ = model.iter_children(base_iter)
skip_children = False
while iter_ is not None:
model.set_value(iter_, 2, added)
if not skip_children:
next_iter = model.iter_children(iter_)
if skip_children or next_iter is None:
next_iter = model.iter_next(iter_)
skip_children = False
if next_iter is None:
next_iter = model.iter_parent(iter_)
skip_children = True
iter_ = next_iter
return False
def refresh(self):
"""Refresh the model."""
self._realign()
| kaday/rose | lib/python/rose/gtk/choice.py | Python | gpl-3.0 | 19,160 |
# -*- coding: utf-8 -*-
import re
import urlparse
from pyload.plugin.Crypter import Crypter
class LixIn(Crypter):
__name = "LixIn"
__type = "crypter"
__version = "0.22"
__pattern = r'http://(?:www\.)?lix\.in/(?P<ID>.+)'
__config = [("use_subfolder" , "bool", "Save package to subfolder" , True),
("subfolder_per_pack", "bool", "Create a subfolder for each package", True)]
__description = """Lix.in decrypter plugin"""
__license = "GPLv3"
__authors = [("spoob", "[email protected]")]
CAPTCHA_PATTERN = r'<img src="(captcha_img\.php\?.*?)"'
SUBMIT_PATTERN = r'value=\'continue.*?\''
LINK_PATTERN = r'name="ifram" src="(.*?)"'
def decrypt(self, pyfile):
url = pyfile.url
m = re.match(self.__pattern, url)
if m is None:
self.error(_("Unable to identify file ID"))
id = m.group('ID')
self.logDebug("File id is %s" % id)
self.html = self.load(url, decode=True)
m = re.search(self.SUBMIT_PATTERN, self.html)
if m is None:
self.error(_("Link doesn't seem valid"))
m = re.search(self.CAPTCHA_PATTERN, self.html)
if m:
for _i in xrange(5):
m = re.search(self.CAPTCHA_PATTERN, self.html)
if m:
self.logDebug("Trying captcha")
captcharesult = self.decryptCaptcha(urlparse.urljoin("http://lix.in/", m.group(1)))
self.html = self.load(url, decode=True,
post={"capt": captcharesult, "submit": "submit", "tiny": id})
else:
self.logDebug("No captcha/captcha solved")
else:
self.html = self.load(url, decode=True, post={"submit": "submit", "tiny": id})
m = re.search(self.LINK_PATTERN, self.html)
if m is None:
self.error(_("Unable to find destination url"))
else:
self.urls = [m.group(1)]
self.logDebug("Found link %s, adding to package" % self.urls[0])
| ardi69/pyload-0.4.10 | pyload/plugin/crypter/LixIn.py | Python | gpl-3.0 | 2,102 |
# -*- coding: utf-8 -*-
# This file is part of emesene.
#
# emesene is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# emesene is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with emesene; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
import e3
import gui
import extension
import PyQt4.QtGui as QtGui
import PyQt4.QtCore as QtCore
class StatusButton(QtGui.QToolButton):
'''a button that when clicked displays a popup that allows the user to
select a status'''
NAME = 'Status Button'
DESCRIPTION = 'A button to select the status'
AUTHOR = 'Jose Rostagno'
WEBSITE = 'www.emesene.org'
def __init__(self, session=None):
QtGui.QToolButton.__init__(self, None)
self.session = session
# a cache of gtk.Images to not load the images everytime we change
# our status
self.cache_imgs = {}
self.setAutoRaise(True)
StatusMenu = extension.get_default('menu status')
self.menu = StatusMenu(self.set_status)
self.invertStatus = {}
for stat in e3.status.STATUS:
self.invertStatus[unicode(e3.status.STATUS[stat])] = stat
if self.session:
self.status = self.session.account.status
else:
self.status = e3.status.OFFLINE
self.set_status(self.status)
self.menu.triggered.connect(self.statusactionchange)
self.setMenu(self.menu)
# show status menu on button click
self.clicked.connect(self.showMenu)
def statusactionchange(self, action):
status = self.invertStatus[str(action.text())]
self.set_status(status)
def set_status(self, stat):
'''load an image representing a status and store it on cache'''
current_status = -1
if self.session:
current_status = self.session.account.status
if stat not in self.cache_imgs:
qt_icon = QtGui.QIcon(\
gui.theme.image_theme.status_icons[stat])
self.cache_imgs[stat] = qt_icon
else:
qt_icon = self.cache_imgs[stat]
self.setIcon(qt_icon)
if stat not in e3.status.ALL or stat == current_status:
return
self.status = stat
if self.session:
self.session.set_status(stat)
| tiancj/emesene | emesene/gui/qt4ui/widgets/StatusButton.py | Python | gpl-3.0 | 2,828 |
from django.shortcuts import render
from django.template import loader
from django.http import HttpResponse, HttpResponseRedirect
from django.core.paginator import Paginator, EmptyPage, PageNotAnInteger
from django.contrib.auth import authenticate, logout
from django.contrib.auth import logout as auth_logout
from django.contrib.auth import login as auth_login
from django.contrib.auth.decorators import login_required, permission_required
from .forms import UploadIsolateForm, LoginForm
from .helpers import handle_uploaded_file
from .models import Isolate
def login(request):
if request.method == 'POST':
form = LoginForm(request.POST)
if form.is_valid():
user = authenticate(username=form.cleaned_data['username'], password=form.cleaned_data['password'])
if user is not None and user.is_active: # success
auth_login(request, user)
# redirect
return HttpResponseRedirect('/kelpie/')
else: # authentication failed
return render(request, 'kelpie/login.html', {'form': form, 'message': 'Authentication failed'})
else: # something wrong with the form submission
return render(request, 'kelpie/login.html', {'form': form})
else: # not a post
return render(request, 'kelpie/login.html', {'form': LoginForm() })
@login_required
def logout(request):
auth_logout(request)
return HttpResponseRedirect('/kelpie/')
@login_required
def index(request):
isolates = Isolate.objects.all() # unfiltered
# filtering
if 'f_species' in request.GET and request.GET.get('f_species') != '':
isolates = isolates.filter(species=request.GET.get('f_species')) # species filter
if 'f_st' in request.GET and request.GET.get('f_st') != '':
isolates = isolates.filter(st=request.GET.get('f_st')) # st filter
# sorting
order_by = request.GET.get('o', 'isolate')
isolates = isolates.order_by(order_by)
# paging
paginator = Paginator(isolates, 100)
page = request.GET.get('p')
try:
isolates = paginator.page(page)
except PageNotAnInteger:
# If page is not an integer, deliver first page.
isolates = paginator.page(1)
except EmptyPage:
# If page is out of range (e.g. 9999), deliver last page of results.
isolates = paginator.page(paginator.num_pages)
template = loader.get_template('kelpie/index.html')
species_list = ', '.join( [ '"{0}"'.format(x[0]) for x in Isolate.objects.order_by('species').values_list('species').distinct() ] )
st_list = ', '.join( [ '"{0}"'.format(x[0]) for x in Isolate.objects.order_by('st').values_list('st').distinct() ] )
context = {
'isolates': isolates,
'order_by': order_by,
'page': 'main',
'species_list': species_list,
'st_list': st_list,
'f_species': request.GET.get('f_species', ''),
'f_st': request.GET.get('f_st', ''),
}
return HttpResponse(template.render(context, request))
@permission_required('is_superuser')
def upload_file(request):
if request.method == 'POST':
form = UploadIsolateForm(request.POST, request.FILES)
if form.is_valid():
msg = handle_uploaded_file(request.FILES['isolates'])
return HttpResponseRedirect('/kelpie/')
else:
form = UploadIsolateForm()
return render(request, 'kelpie/upload.html', {
'form': form,
'page': 'upload'})
| MDU-PHL/kelpie | website/kelpie/views.py | Python | gpl-3.0 | 3,501 |
# -*- coding: utf-8 -*-
#
# This file is part of Invenio.
# Copyright (C) 2016 CERN.
#
# Invenio is free software; you can redistribute it
# and/or modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# Invenio is distributed in the hope that it will be
# useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Invenio; if not, write to the
# Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston,
# MA 02111-1307, USA.
#
# In applying this license, CERN does not
# waive the privileges and immunities granted to it by virtue of its status
# as an Intergovernmental Organization or submit itself to any jurisdiction.
"""CDS interface."""
from __future__ import absolute_import, print_function
from flask import Blueprint
blueprint = Blueprint(
'cds_deposit',
__name__,
template_folder='templates',
static_folder='static'
)
| sourabhlal/cds | cds/modules/deposit/views.py | Python | gpl-2.0 | 1,197 |
"""
Grafana Bricks page abstraction
"""
from usmqe.web.grafana.auxiliary.pages import GenericChart, \
GenericDropDownList
import usmqe.web.grafana.bricks.models as m_bricks
location = ':3000/dashboard/db/tendrl-gluster-bricks'
class ClusterList(GenericDropDownList):
"""
DropDown list of clusters
"""
_model = m_bricks.ClusterListModel
_label = 'Cluster select list'
def selected_cluster(self):
""" returns selected cluster """
return self.value
def choose_cluster(self, cluster_id):
"""
Select cluster
Parameters:
cluster_id (string): cluster id
"""
self.value = cluster_id
class BrickList(GenericDropDownList):
"""
DropDown list of bricks
"""
_model = m_bricks.BrickListModel
_label = 'Brick select list'
def selected_brick(self):
""" returns selected brick """
return self.value
def choose_brick(self, brick_name):
"""
Select brick
Parameters:
brick_name (string): brick name
"""
self.value = brick_name
class BricksUtilization(GenericChart):
"""
page object for Bricks Utilization panel
"""
_model = m_bricks.BricksUtilizationModel
_label = 'Bricks Utilization panel'
class InodeUtilization(GenericChart):
"""
page object for Inode Utilization panel
"""
_model = m_bricks.InodeUtilizationModel
_label = 'Inode Utilization panel'
| fbalak/usmqe-tests | usmqe/web/grafana/bricks/pages.py | Python | gpl-3.0 | 1,485 |
import antlr3
import testbase
import unittest
class t040bug80(testbase.ANTLRTest):
def setUp(self):
self.compileGrammar()
def lexerClass(self, base):
class TLexer(base):
def recover(self, input, re):
# no error recovery yet, just crash!
raise
return TLexer
def testValid1(self):
cStream = antlr3.StringStream('defined')
lexer = self.getLexer(cStream)
while True:
t = lexer.nextToken()
if t.type == antlr3.EOF:
break
print(t)
if __name__ == '__main__':
unittest.main()
| ekcs/congress | thirdparty/antlr3-antlr-3.5/runtime/Python3/tests/t040bug80.py | Python | apache-2.0 | 659 |
"""This module provides the top level user-facing class ``Session``."""
import json
import importlib
import sys
from pybbix.api import Request
DEFAULT_USER = 'Admin'
DEFAULT_PASSWORD = 'zabbix'
PENDING = object()
WAITING = object()
class NotLoggedInError(Exception):
"""Raised when the user has not authenticated before calling the API."""
class PendingRequestError(Exception):
"""Raised when the user tries to do another ``Session`` API request
before dealing with the previous response."""
class NoRequestError(Exception):
"""Raised when user tries to advance the ``Session`` before doing any
requests first."""
class Authentication(Request):
"""The first request that is required before using the API."""
def __init__(self, user=None, password=None):
if user is None:
user = DEFAULT_USER
if password is None:
password = DEFAULT_PASSWORD
self.method = 'user.login'
self.params = {'user': user, 'password': password}
class Session:
"""Zabbix Session."""
def __init__(self):
self._api_cache = {}
self._auth = None
self._state = None
@staticmethod
def login(user, password):
"""Must first authenticate before doing API calls."""
auth = Authentication(user, password)
return auth()
def __getattr__(self, name):
if not self._auth:
raise NotLoggedInError('Must authenticate first. '
'Try calling ``session.login`` '
'with the appropriate args.')
if self._state is PENDING:
raise PendingRequestError('There is a pending request.')
api = self._api_cache.get(name)
if api is None:
cls = _lazy_import(name)
api = self._api_cache[name] = cls()
self._state = PENDING
return api
def __call__(self, response):
if self._state is not PENDING:
raise NoRequestError('Must do a request first.')
response = json.loads(response)
self._state = WAITING
return response
def version(self):
"""Get the version of the Zabbix server."""
# need a temporary sentinel if not authenticated
sentinel = object()
if self._auth is None:
self._auth = sentinel
req = self.apiinfo.version()
if self._auth is sentinel:
self._auth = None
return req
def _lazy_import(name):
try:
module = importlib.import_module('pybbix.api.' + name)
except ImportError:
raise AttributeError('There is no API called {}'.format(name))
sys.modules[name] = module
return getattr(module, name.capitalize())
| modulus-sa/pybbix | pybbix/__init__.py | Python | mit | 2,757 |
# -*- coding: utf-8 -*-
from __future__ import absolute_import, print_function, division
import json
import base64
import numpy as np
import pytest
from zarr.compat import binary_type, text_type, PY2
from zarr.meta import (decode_array_metadata, encode_dtype, decode_dtype, ZARR_FORMAT,
decode_group_metadata, encode_array_metadata)
from zarr.errors import MetadataError
from zarr.codecs import Delta, Zlib, Blosc
def assert_json_equal(expect, actual):
if isinstance(expect, binary_type): # pragma: py3 no cover
expect = text_type(expect, 'ascii')
if isinstance(actual, binary_type):
actual = text_type(actual, 'ascii')
ej = json.loads(expect)
aj = json.loads(actual)
assert ej == aj
def test_encode_decode_array_1():
meta = dict(
shape=(100,),
chunks=(10,),
dtype=np.dtype('<f8'),
compressor=Zlib(1).get_config(),
fill_value=None,
filters=None,
order='C'
)
meta_json = '''{
"chunks": [10],
"compressor": {"id": "zlib", "level": 1},
"dtype": "<f8",
"fill_value": null,
"filters": null,
"order": "C",
"shape": [100],
"zarr_format": %s
}''' % ZARR_FORMAT
# test encoding
meta_enc = encode_array_metadata(meta)
assert_json_equal(meta_json, meta_enc)
# test decoding
meta_dec = decode_array_metadata(meta_enc)
assert ZARR_FORMAT == meta_dec['zarr_format']
assert meta['shape'] == meta_dec['shape']
assert meta['chunks'] == meta_dec['chunks']
assert meta['dtype'] == meta_dec['dtype']
assert meta['compressor'] == meta_dec['compressor']
assert meta['order'] == meta_dec['order']
assert meta_dec['fill_value'] is None
assert meta_dec['filters'] is None
def test_encode_decode_array_2():
# some variations
df = Delta(astype='<u2', dtype='V14')
compressor = Blosc(cname='lz4', clevel=3, shuffle=2)
dtype = np.dtype([('a', '<i4'), ('b', 'S10')])
fill_value = np.zeros((), dtype=dtype)[()]
meta = dict(
shape=(100, 100),
chunks=(10, 10),
dtype=dtype,
compressor=compressor.get_config(),
fill_value=fill_value,
order='F',
filters=[df.get_config()]
)
meta_json = '''{
"chunks": [10, 10],
"compressor": {
"id": "blosc",
"clevel": 3,
"cname": "lz4",
"shuffle": 2,
"blocksize": 0
},
"dtype": [["a", "<i4"], ["b", "|S10"]],
"fill_value": "AAAAAAAAAAAAAAAAAAA=",
"filters": [
{"id": "delta", "astype": "<u2", "dtype": "|V14"}
],
"order": "F",
"shape": [100, 100],
"zarr_format": %s
}''' % ZARR_FORMAT
# test encoding
meta_enc = encode_array_metadata(meta)
assert_json_equal(meta_json, meta_enc)
# test decoding
meta_dec = decode_array_metadata(meta_enc)
assert ZARR_FORMAT == meta_dec['zarr_format']
assert meta['shape'] == meta_dec['shape']
assert meta['chunks'] == meta_dec['chunks']
assert meta['dtype'] == meta_dec['dtype']
assert meta['compressor'] == meta_dec['compressor']
assert meta['order'] == meta_dec['order']
assert fill_value == meta_dec['fill_value']
assert [df.get_config()] == meta_dec['filters']
def test_encode_decode_array_complex():
# some variations
for k in ['c8', 'c16']:
compressor = Blosc(cname='lz4', clevel=3, shuffle=2)
dtype = np.dtype(k)
fill_value = dtype.type(np.nan-1j)
meta = dict(
shape=(100, 100),
chunks=(10, 10),
dtype=dtype,
compressor=compressor.get_config(),
fill_value=fill_value,
order=dtype.char,
filters=[]
)
meta_json = '''{
"chunks": [10, 10],
"compressor": {
"id": "blosc",
"clevel": 3,
"cname": "lz4",
"shuffle": 2,
"blocksize": 0
},
"dtype": "%s",
"fill_value": ["NaN", -1.0],
"filters": [],
"order": "%s",
"shape": [100, 100],
"zarr_format": %s
}''' % (dtype.str, dtype.char, ZARR_FORMAT)
# test encoding
meta_enc = encode_array_metadata(meta)
assert_json_equal(meta_json, meta_enc)
# test decoding
meta_dec = decode_array_metadata(meta_enc)
assert ZARR_FORMAT == meta_dec['zarr_format']
assert meta['shape'] == meta_dec['shape']
assert meta['chunks'] == meta_dec['chunks']
assert meta['dtype'] == meta_dec['dtype']
assert meta['compressor'] == meta_dec['compressor']
assert meta['order'] == meta_dec['order']
# Based off of this SO answer: https://stackoverflow.com/a/49972198
assert np.all(
fill_value.view((np.uint8, fill_value.itemsize)) ==
meta_dec['fill_value'].view((np.uint8, meta_dec['fill_value'].itemsize))
)
assert [] == meta_dec['filters']
def test_encode_decode_array_datetime_timedelta():
# some variations
for k in ['m8[s]', 'M8[s]']:
compressor = Blosc(cname='lz4', clevel=3, shuffle=2)
dtype = np.dtype(k)
fill_value = dtype.type("NaT")
meta = dict(
shape=(100, 100),
chunks=(10, 10),
dtype=dtype,
compressor=compressor.get_config(),
fill_value=fill_value,
order=dtype.char,
filters=[]
)
meta_json = '''{
"chunks": [10, 10],
"compressor": {
"id": "blosc",
"clevel": 3,
"cname": "lz4",
"shuffle": 2,
"blocksize": 0
},
"dtype": "%s",
"fill_value": -9223372036854775808,
"filters": [],
"order": "%s",
"shape": [100, 100],
"zarr_format": %s
}''' % (dtype.str, dtype.char, ZARR_FORMAT)
# test encoding
meta_enc = encode_array_metadata(meta)
assert_json_equal(meta_json, meta_enc)
# test decoding
meta_dec = decode_array_metadata(meta_enc)
assert ZARR_FORMAT == meta_dec['zarr_format']
assert meta['shape'] == meta_dec['shape']
assert meta['chunks'] == meta_dec['chunks']
assert meta['dtype'] == meta_dec['dtype']
assert meta['compressor'] == meta_dec['compressor']
assert meta['order'] == meta_dec['order']
# Based off of this SO answer: https://stackoverflow.com/a/49972198
assert np.all(
fill_value.view((np.uint8, fill_value.itemsize)) ==
meta_dec['fill_value'].view((np.uint8, meta_dec['fill_value'].itemsize))
)
assert [] == meta_dec['filters']
def test_encode_decode_array_dtype_shape():
meta = dict(
shape=(100,),
chunks=(10,),
dtype=np.dtype('(10, 10)<f8'),
compressor=Zlib(1).get_config(),
fill_value=None,
filters=None,
order='C'
)
meta_json = '''{
"chunks": [10],
"compressor": {"id": "zlib", "level": 1},
"dtype": "<f8",
"fill_value": null,
"filters": null,
"order": "C",
"shape": [100, 10, 10],
"zarr_format": %s
}''' % ZARR_FORMAT
# test encoding
meta_enc = encode_array_metadata(meta)
assert_json_equal(meta_json, meta_enc)
# test decoding
meta_dec = decode_array_metadata(meta_enc)
assert ZARR_FORMAT == meta_dec['zarr_format']
# to maintain consistency with numpy unstructured arrays, unpack dimensions into shape
assert meta['shape'] + meta['dtype'].shape == meta_dec['shape']
assert meta['chunks'] == meta_dec['chunks']
# to maintain consistency with numpy unstructured arrays, unpack dtypes
assert meta['dtype'].base == meta_dec['dtype']
assert meta['compressor'] == meta_dec['compressor']
assert meta['order'] == meta_dec['order']
assert meta_dec['fill_value'] is None
assert meta_dec['filters'] is None
def test_encode_decode_array_structured():
meta = dict(
shape=(100,),
chunks=(10,),
dtype=np.dtype('<i8, (10, 10)<f8, (5, 10, 15)u1'),
compressor=Zlib(1).get_config(),
fill_value=None,
filters=None,
order='C'
)
meta_json = '''{
"chunks": [10],
"compressor": {"id": "zlib", "level": 1},
"dtype": [["f0", "<i8"], ["f1", "<f8", [10, 10]], ["f2", "|u1", [5, 10, 15]]],
"fill_value": null,
"filters": null,
"order": "C",
"shape": [100],
"zarr_format": %s
}''' % ZARR_FORMAT
# test encoding
meta_enc = encode_array_metadata(meta)
assert_json_equal(meta_json, meta_enc)
# test decoding
meta_dec = decode_array_metadata(meta_enc)
assert ZARR_FORMAT == meta_dec['zarr_format']
# to maintain consistency with numpy unstructured arrays, unpack dimensions into shape
assert meta['shape'] + meta['dtype'].shape == meta_dec['shape']
assert meta['chunks'] == meta_dec['chunks']
# to maintain consistency with numpy unstructured arrays, unpack dimensions into shape
assert meta['dtype'].base == meta_dec['dtype']
assert meta['compressor'] == meta_dec['compressor']
assert meta['order'] == meta_dec['order']
assert meta_dec['fill_value'] is None
assert meta_dec['filters'] is None
def test_encode_decode_fill_values_nan():
fills = (
(np.nan, "NaN", np.isnan),
(np.NINF, "-Infinity", np.isneginf),
(np.PINF, "Infinity", np.isposinf),
)
for v, s, f in fills:
meta = dict(
shape=(100,),
chunks=(10,),
dtype=np.dtype('<f8'),
compressor=Zlib(1).get_config(),
fill_value=v,
filters=None,
order='C'
)
meta_json = '''{
"chunks": [10],
"compressor": {"id": "zlib", "level": 1},
"dtype": "<f8",
"fill_value": "%s",
"filters": null,
"order": "C",
"shape": [100],
"zarr_format": %s
}''' % (s, ZARR_FORMAT)
# test encoding
meta_enc = encode_array_metadata(meta)
assert_json_equal(meta_json, meta_enc)
# test decoding
meta_dec = decode_array_metadata(meta_enc)
actual = meta_dec['fill_value']
assert f(actual)
def test_encode_decode_fill_values_bytes():
dtype = np.dtype('S10')
fills = b'foo', bytes(10)
for v in fills:
# setup and encode metadata
meta = dict(
shape=(100,),
chunks=(10,),
dtype=dtype,
compressor=Zlib(1).get_config(),
fill_value=v,
filters=None,
order='C'
)
meta_enc = encode_array_metadata(meta)
# define expected metadata encoded as JSON
s = base64.standard_b64encode(v)
if not PY2:
s = s.decode()
meta_json = '''{
"chunks": [10],
"compressor": {"id": "zlib", "level": 1},
"dtype": "|S10",
"fill_value": "%s",
"filters": null,
"order": "C",
"shape": [100],
"zarr_format": %s
}''' % (s, ZARR_FORMAT)
# test encoding
assert_json_equal(meta_json, meta_enc)
# test decoding
meta_dec = decode_array_metadata(meta_enc)
actual = meta_dec['fill_value']
expect = np.array(v, dtype=dtype)[()]
assert expect == actual
def test_decode_array_unsupported_format():
# unsupported format
meta_json = '''{
"zarr_format": %s,
"shape": [100],
"chunks": [10],
"dtype": "<f8",
"compressor": {"id": "zlib", "level": 1},
"fill_value": null,
"order": "C"
}''' % (ZARR_FORMAT - 1)
with pytest.raises(MetadataError):
decode_array_metadata(meta_json)
def test_decode_array_missing_fields():
# missing fields
meta_json = '''{
"zarr_format": %s
}''' % ZARR_FORMAT
with pytest.raises(MetadataError):
decode_array_metadata(meta_json)
def test_encode_decode_dtype():
for dt in ['f8', [('a', 'f8')], [('a', 'f8'), ('b', 'i1')]]:
e = encode_dtype(np.dtype(dt))
s = json.dumps(e) # check JSON serializable
o = json.loads(s)
d = decode_dtype(o)
assert np.dtype(dt) == d
def test_decode_group():
# typical
b = '''{
"zarr_format": %s
}''' % ZARR_FORMAT
meta = decode_group_metadata(b)
assert ZARR_FORMAT == meta['zarr_format']
# unsupported format
b = '''{
"zarr_format": %s
}''' % (ZARR_FORMAT - 1)
with pytest.raises(MetadataError):
decode_group_metadata(b)
| alimanfoo/zarr | zarr/tests/test_meta.py | Python | mit | 13,003 |
#!/usr/bin/env python
# encoding: utf-8
"""
crawler.py
~~~~~~~~~~~~~
主要模块,爬虫的具体实现。
"""
import re
import time
import logging
import threading
import traceback
from hashlib import md5
from bs4 import BeautifulSoup
from datetime import datetime
from collections import deque
from locale import getdefaultlocale
from urlparse import urljoin,urlparse
from database import Database
from webPage import WebPage
from threadPool import ThreadPool
log = logging.getLogger('spider')
class Crawler(threading.Thread):
def __init__(self, args, queue):
threading.Thread.__init__(self)
#指定网页深度
self.depth = args['depth']
#标注初始爬虫深度,从1开始
self.currentDepth = 1
#指定关键词,使用console的默认编码来解码
self.keyword = args['keyword'].decode(getdefaultlocale()[1])
#数据库
self.database = Database(db="bt_tornado")
#线程池,指定线程数
self.threadPool = ThreadPool(args['threadNum'])
#已访问的链接
self.visitedHrefs = set()
#待访问的链接
self.unvisitedHrefs = deque()
#添加待访问的链接
for url in args['url']:
self.unvisitedHrefs.append(url)
#标记爬虫是否开始执行任务
self.isCrawling = False
# allow or deny crawl url
self.entryFilter = args['entryFilter']
# allow to output back url
self.yieldFilter = args['yieldFilter']
#
self.callbackFilter = args['callbackFilter']
#
self.db = args['db']
self.collection = args['collection']
# communication queue
self.queue = queue
def run(self):
print '\nStart Crawling\n'
if not self._isDatabaseAvaliable():
print 'Error: Unable to open database file.\n'
else:
self.isCrawling = True
self.threadPool.startThreads()
while self.currentDepth < self.depth+1:
#分配任务,线程池并发下载当前深度的所有页面(该操作不阻塞)
self._assignCurrentDepthTasks ()
#等待当前线程池完成所有任务,当池内的所有任务完成时,即代表爬完了一个网页深度
#self.threadPool.taskJoin()可代替以下操作,可无法Ctrl-C Interupt
while self.threadPool.getTaskLeft():
time.sleep(8)
print 'Depth %d Finish. Totally visited %d links. \n' % (
self.currentDepth, len(self.visitedHrefs))
log.info('Depth %d Finish. Total visited Links: %d\n' % (
self.currentDepth, len(self.visitedHrefs)))
self.currentDepth += 1
self.stop()
def stop(self):
self.isCrawling = False
self.threadPool.stopThreads()
self.database.close()
#use queue to communicate between threads
self.queue.get()
self.queue.task_done()
def getAlreadyVisitedNum(self):
#visitedHrefs保存已经分配给taskQueue的链接,有可能链接还在处理中。
#因此真实的已访问链接数为visitedHrefs数减去待访问的链接数
return len(self.visitedHrefs) - self.threadPool.getTaskLeft()
def _assignCurrentDepthTasks(self):
while self.unvisitedHrefs:
url = self.unvisitedHrefs.popleft()
if not self.__entry_filter(url):
self.visitedHrefs.add(url)
continue
#向任务队列分配任务
self.threadPool.putTask(self._taskHandler, url)
#标注该链接已被访问,或即将被访问,防止重复访问相同链接
self.visitedHrefs.add(url)
def _callback_filter(self, webPage):
#parse the web page to do sth
url , pageSource = webPage.getDatas()
for tmp in self.callbackFilter['List']:
if re.compile(tmp,re.I|re.U).search(url):
self.callbackFilter['func'](webPage)
def _taskHandler(self, url):
#先拿网页源码,再保存,两个都是高阻塞的操作,交给线程处理
webPage = WebPage(url)
tmp = webPage.fetch()
if tmp:
self._callback_filter(webPage)
self._saveTaskResults(webPage)
self._addUnvisitedHrefs(webPage)
def _saveTaskResults(self, webPage):
url, pageSource = webPage.getDatas()
_id = md5(url).hexdigest()
try:
if self.__yield_filter(url):
query = {"id": _id}
document = {"id": _id, "url":url, "createTime": datetime.now()}
self.database.saveData(query=query, collection=self.collection, document=document)
except Exception, e:
log.error(' URL: %s ' % url + traceback.format_exc())
def _addUnvisitedHrefs(self, webPage):
'''添加未访问的链接。将有效的url放进UnvisitedHrefs列表'''
#对链接进行过滤:1.只获取http或https网页;2.保证每个链接只访问一次
url, pageSource = webPage.getDatas()
hrefs = self._getAllHrefsFromPage(url, pageSource)
for href in hrefs:
if self._isHttpOrHttpsProtocol(href):
if not self._isHrefRepeated(href):
self.unvisitedHrefs.append(href)
def _getAllHrefsFromPage(self, url, pageSource):
'''解析html源码,获取页面所有链接。返回链接列表'''
hrefs = []
soup = BeautifulSoup(pageSource)
results = soup.find_all('a',href=True)
for a in results:
#必须将链接encode为utf8, 因为中文文件链接如 http://aa.com/文件.pdf
#在bs4中不会被自动url编码,从而导致encodeException
href = a.get('href').encode('utf8')
if not href.startswith('http'):
href = urljoin(url, href)#处理相对链接的问题
hrefs.append(href)
return hrefs
def _isHttpOrHttpsProtocol(self, href):
protocal = urlparse(href).scheme
if protocal == 'http' or protocal == 'https':
return True
return False
def _isHrefRepeated(self, href):
if href in self.visitedHrefs or href in self.unvisitedHrefs:
return True
return False
def _isDatabaseAvaliable(self):
if self.database.isConn():
return True
return False
def __entry_filter(self, checkURL):
'''
入口过滤器
决定了爬虫可以进入哪些url指向的页面进行抓取
@param checkURL: 交给过滤器检查的url
@type checkURL: 字符串
@return: 通过检查则返回True,否则返回False
@rtype: 布尔值
'''
# 如果定义了过滤器则检查过滤器
if self.entryFilter:
if self.entryFilter['Type'] == 'allow': # 允许模式,只要满足一个就允许,否则不允许
result = False
for rule in self.entryFilter['List']:
pattern = re.compile(rule, re.I | re.U)
if pattern.search(checkURL):
result = True
break
return result
elif self.entryFilter['Type'] == 'deny': # 排除模式,只要满足一个就不允许,否则允许
result = True
for rule in self.entryFilter['List']:
pattern = re.compile(rule, re.I | re.U)
if pattern.search(checkURL):
result = False
break
return result
# 没有过滤器则默认允许
return True
def __yield_filter(self, checkURL):
'''
生成过滤器
决定了爬虫可以返回哪些url
@param checkURL: 交给过滤器检查的url
@type checkURL: 字符串
@return: 通过检查则返回True,否则返回False
@rtype: 布尔值
'''
# 如果定义了过滤器则检查过滤器
if self.yieldFilter:
if self.yieldFilter['Type'] == 'allow': # 允许模式,只要满足一个就允许,否则不允许
result = False
for rule in self.yieldFilter['List']:
pattern = re.compile(rule, re.I | re.U)
if pattern.search(checkURL):
result = True
break
return result
elif self.yieldFilter['Type'] == 'deny': # 排除模式,只要满足一个就不允许,否则允许
result = True
for rule in self.yieldFilter['List']:
pattern = re.compile(rule, re.I | re.U)
if pattern.search(checkURL):
result = False
break
return result
# 没有过滤器则默认允许
return True
| zhkzyth/a-super-fast-crawler | crawler.py | Python | mit | 9,133 |
import logging
import mailchimp
from django.conf import settings
mailchimp_default_list_id = settings.MAILCHIMP_LIST_ID
def add_user(user, mailing_list_id=None):
try:
mailchimp_api = mailchimp.Mailchimp(apikey=settings.MAILCHIMP_API_KEY)
except mailchimp.Error:
logging.error("Missing or invalid MAILCHIMP_API_KEY")
return
list_id = mailing_list_id or mailchimp_default_list_id
if list_id is None:
logging.error("Missing MAILCHIMP_LIST_ID")
return
try:
response = mailchimp_api.lists.subscribe(list_id,
{'email': user.email},
double_optin=False,
update_existing=False,
replace_interests=False)
logging.debug("{} was successfully subscribed to list {}".format(response['email'], list_id))
except mailchimp.ListDoesNotExistError:
logging.error("List {} does not exist".format(list_id))
except mailchimp.ListAlreadySubscribedError:
logging.info("User already subscribed to list {}".format(list_id))
except mailchimp.Error as e:
logging.error("An error occurred: {} - {}".format(e.__class__, e))
| math-foo/cloudpebble | ide/utils/mailinglist.py | Python | mit | 1,306 |
# This file is part of project Sverchok. It's copyrighted by the contributors
# recorded in the version control history of the file, available from
# its original location https://github.com/nortikin/sverchok/commit/master
#
# SPDX-License-Identifier: GPL3
# License-Filename: LICENSE
import numpy as np
from mathutils import Matrix, Vector
from sverchok.utils.field.scalar import SvScalarField
from sverchok.utils.field.vector import SvVectorField
from sverchok.dependencies import scipy
if scipy is not None:
from scipy.interpolate import Rbf
##################
# #
# Scalar Fields #
# #
##################
class SvRbfScalarField(SvScalarField):
def __init__(self, rbf):
self.rbf = rbf
def evaluate(self, x, y, z):
return self.rbf(x, y, z)
def evaluate_grid(self, xs, ys, zs):
value = self.rbf(xs, ys, zs)
return value
##################
# #
# Vector Fields #
# #
##################
class SvRbfVectorField(SvVectorField):
def __init__(self, rbf, relative = True):
self.rbf = rbf
self.relative = relative
def evaluate(self, x, y, z):
v = self.rbf(x, y, z)
if self.relative:
v = v - np.array([x, y, z])
return v
def evaluate_grid(self, xs, ys, zs):
value = self.rbf(xs, ys, zs)
vx = value[:,0]
vy = value[:,1]
vz = value[:,2]
if self.relative:
vx = vx - xs
vy = vy - ys
vz = vz - zs
return vx, vy, vz
class SvBvhRbfNormalVectorField(SvVectorField):
def __init__(self, bvh, rbf):
self.bvh = bvh
self.rbf = rbf
def evaluate(self, x, y, z):
vertex = Vector((x,y,z))
nearest, normal, idx, distance = self.bvh.find_nearest(vertex)
x0, y0, z0 = nearest
return self.rbf(x0, y0, z0)
def evaluate_grid(self, xs, ys, zs):
def find(v):
nearest, normal, idx, distance = self.bvh.find_nearest(v)
if nearest is None:
raise Exception("No nearest point on mesh found for vertex %s" % v)
x0, y0, z0 = nearest
return self.rbf(x0, y0, z0)
points = np.stack((xs, ys, zs)).T
vectors = np.vectorize(find, signature='(3)->(3)')(points)
R = vectors.T
return R[0], R[1], R[2]
def mesh_field(bm, function, smooth, epsilon, scale, use_verts=True, use_edges=False, use_faces=False):
src_points = []
dst_values = []
if use_verts:
for bm_vert in bm.verts:
src_points.append(tuple(bm_vert.co))
dst_values.append(0.0)
src_points.append(tuple(bm_vert.co + scale * bm_vert.normal))
dst_values.append(1.0)
if use_edges:
for bm_edge in bm.edges:
pt1 = 0.5*(bm_edge.verts[0].co + bm_edge.verts[1].co)
normal = (bm_edge.verts[0].normal + bm_edge.verts[1].normal).normalized()
pt2 = pt1 + scale * normal
src_points.append(tuple(pt1))
dst_values.append(0.0)
src_points.append(tuple(pt2))
dst_values.append(1.0)
if use_faces:
for bm_face in bm.faces:
pt1 = bm_face.calc_center_median()
pt2 = pt1 + scale * bm_face.normal
src_points.append(tuple(pt1))
dst_values.append(0.0)
src_points.append(tuple(pt2))
dst_values.append(1.0)
src_points = np.array(src_points)
dst_values = np.array(dst_values)
xs_from = src_points[:,0]
ys_from = src_points[:,1]
zs_from = src_points[:,2]
rbf = Rbf(xs_from, ys_from, zs_from, dst_values,
function = function,
smooth = smooth,
epsilon = epsilon,
mode = '1-D')
return SvRbfScalarField(rbf)
def register():
pass
def unregister():
pass
| DolphinDream/sverchok | utils/field/rbf.py | Python | gpl-3.0 | 3,912 |
import datetime
import os
import re
import tempfile
from collections import OrderedDict, defaultdict, namedtuple
from itertools import chain
from django.utils.functional import cached_property
import polib
from memoized import memoized
from corehq.apps.app_manager.dbaccessors import (
get_current_app,
get_version_build_id,
)
from corehq.apps.translations.const import MODULES_AND_FORMS_SHEET_NAME
from corehq.apps.translations.models import TransifexBlacklist
Translation = namedtuple('Translation', 'key translation occurrences msgctxt')
Unique_ID = namedtuple('UniqueID', 'type id')
HQ_MODULE_SHEET_NAME = re.compile(r'^menu(\d+)$')
HQ_FORM_SHEET_NAME = re.compile(r'^menu(\d+)_form(\d+)$')
POFileInfo = namedtuple("POFileInfo", "name path")
SKIP_TRANSFEX_STRING = "SKIP TRANSIFEX"
class EligibleForTransifexChecker(object):
def __init__(self, app):
self.app = app
@staticmethod
def exclude_module(module):
return SKIP_TRANSFEX_STRING in module.comment
@staticmethod
def exclude_form(form):
return SKIP_TRANSFEX_STRING in form.comment
def is_label_to_skip(self, form_id, label):
return label in self.get_labels_to_skip()[form_id]
def is_blacklisted(self, module_id, field_type, field_name, translations):
blacklist = self._get_blacklist()
for display_text in chain([''], translations):
try:
return blacklist[self.app.domain][self.app.id][module_id][field_type][field_name][display_text]
except KeyError:
pass
return False
@memoized
def get_labels_to_skip(self):
"""Returns the labels of questions that have the skip string in the comment,
so that those labels are not sent to Transifex later.
If there are questions that share the same label reference (and thus the
same translation), they will be included if any question does not have the
skip string.
"""
def _labels_from_question(question):
ret = {
question.get('label_ref'),
question.get('constraintMsg_ref'),
}
if question.get('options'):
for option in question['options']:
ret.add(option.get('label_ref'))
return ret
labels_to_skip = defaultdict(set)
necessary_labels = defaultdict(set)
for module in self.app.get_modules():
for form in module.get_forms():
questions = form.get_questions(self.app.langs, include_triggers=True,
include_groups=True, include_translations=True)
for question in questions:
if not question.get('label_ref'):
continue
if question['comment'] and SKIP_TRANSFEX_STRING in question['comment']:
labels_to_skip[form.unique_id] |= _labels_from_question(question)
else:
necessary_labels[form.unique_id] |= _labels_from_question(question)
for form_id in labels_to_skip.keys():
labels_to_skip[form_id] = labels_to_skip[form_id] - necessary_labels[form_id]
return labels_to_skip
@memoized
def _get_blacklist(self):
"""
Returns a nested dictionary of blacklisted translations for a given app.
A nested dictionary is used so that search for a translation fails at the
first missing key.
"""
blacklist = {}
for b in TransifexBlacklist.objects.filter(domain=self.app.domain, app_id=self.app.id).all():
blacklist.setdefault(b.domain, {})
blacklist[b.domain].setdefault(b.app_id, {})
blacklist[b.domain][b.app_id].setdefault(b.module_id, {})
blacklist[b.domain][b.app_id][b.module_id].setdefault(b.field_type, {})
blacklist[b.domain][b.app_id][b.module_id][b.field_type].setdefault(b.field_name, {})
blacklist[b.domain][b.app_id][b.module_id][b.field_type][b.field_name][b.display_text] = True
return blacklist
class AppTranslationsGenerator(object):
def __init__(self, domain, app_id, version, key_lang, source_lang, lang_prefix,
exclude_if_default=False, use_version_postfix=True):
"""
Generates translations for source/default lang files and also for translated files
:param domain: domain name
:param app_id: app UUID
:param version: version of the app to use, usually the built version. If none, the
current app state is used.
:param key_lang: the lang used to create msgid in PO files. Usually en.
:param source_lang: the lang to create the msgstr in PO files. Should be same as
key lang for source files and the target lang for translated files
:param lang_prefix: usually default_
:param exclude_if_default: set this to skip adding msgstr in case its same as the
default language. For details: https://github.com/dimagi/commcare-hq/pull/20706
:param use_version_postfix: use version number at the end of resource slugs
"""
if key_lang == source_lang and exclude_if_default:
raise Exception("Looks like you are setting up the file for default language "
"and doing that with exclude_if_default is not expected since "
"that would result in empty msgstr and no display for other lang")
self.domain = domain
self.app_id = app_id
self.app = get_current_app(domain, app_id)
self.key_lang = key_lang
self.source_lang = source_lang
self.lang_prefix = lang_prefix
self.exclude_if_default = exclude_if_default
self.version = self.app.version if version is None else version
self.use_version_postfix = use_version_postfix
self.checker = EligibleForTransifexChecker(self.app)
self.headers = dict() # headers for each sheet name
self.sheet_name_to_module_or_form_type_and_id = dict()
self.slug_to_name = defaultdict(dict)
self.slug_to_name[MODULES_AND_FORMS_SHEET_NAME] = {'en': MODULES_AND_FORMS_SHEET_NAME}
self.translations = self._build_translations()
@cached_property
def build_id(self):
if self.version:
return get_version_build_id(self.domain, self.app_id, self.version)
else:
return self.app_id
def _translation_data(self):
# get the translations data
from corehq.apps.translations.app_translations.download import get_bulk_app_sheets_by_name
# simply the rows of data per sheet name
rows = get_bulk_app_sheets_by_name(self.app, eligible_for_transifex_only=True)
# get the translation data headers
from corehq.apps.translations.app_translations.utils import get_bulk_app_sheet_headers
headers = get_bulk_app_sheet_headers(
self.app,
eligible_for_transifex_only=True,
)
for header_row in headers:
self.headers[header_row[0]] = header_row[1]
self._set_sheet_name_to_module_or_form_mapping(rows[MODULES_AND_FORMS_SHEET_NAME])
return rows
def _set_sheet_name_to_module_or_form_mapping(self, all_module_and_form_details):
# iterate the first sheet to get unique ids for forms/modules
sheet_name_column_index = self._get_header_index(MODULES_AND_FORMS_SHEET_NAME, 'menu_or_form')
unique_id_column_index = self._get_header_index(MODULES_AND_FORMS_SHEET_NAME, 'unique_id')
type_column_index = self._get_header_index(MODULES_AND_FORMS_SHEET_NAME, 'Type')
for row in all_module_and_form_details:
self.sheet_name_to_module_or_form_type_and_id[row[sheet_name_column_index]] = Unique_ID(
row[type_column_index],
row[unique_id_column_index]
)
def _generate_module_sheet_name(self, module_index):
"""
receive index of module and convert into name with module unique id
:param module_index: index of module in the app
:return: name like module_moduleUniqueId
"""
_module = self.app.get_module(module_index)
sheet_name = "_".join(["module", _module.unique_id])
self.slug_to_name[_module.unique_id] = _module.name
return sheet_name
def _generate_form_sheet_name(self, module_index, form_index):
"""
receive index of form and module and convert into name with form unique id
:param module_index: index of form's module in the app
:param form_index: index of form in the module
:return: name like form_formUniqueId
"""
_module = self.app.get_module(module_index)
form = _module.get_form(form_index)
sheet_name = "_".join(["form", form.unique_id])
self.slug_to_name[form.unique_id][self.source_lang] = "%s > %s" % (
_module.name.get(self.source_lang, _module.default_name()),
form.name.get(self.source_lang, form.default_name())
)
return sheet_name
def _update_sheet_name_with_unique_id(self, sheet_name):
"""
update sheet name with HQ format like menu0 or menu1_form1 to
a name with unique id of module or form instead
:param sheet_name: name like menu0 or menu1_form1
:return: name like module_moduleUniqueID or form_formUniqueId
"""
if sheet_name == MODULES_AND_FORMS_SHEET_NAME:
return sheet_name
module_sheet_name_match = HQ_MODULE_SHEET_NAME.match(sheet_name)
if module_sheet_name_match:
module_index = int(module_sheet_name_match.groups()[0]) - 1
return self._generate_module_sheet_name(module_index)
form_sheet_name_match = HQ_FORM_SHEET_NAME.match(sheet_name)
if form_sheet_name_match:
indexes = form_sheet_name_match.groups()
module_index, form_index = int(indexes[0]) - 1, int(indexes[1]) - 1
return self._generate_form_sheet_name(module_index, form_index)
raise Exception("Got unexpected sheet name %s" % sheet_name)
def _get_filename(self, sheet_name):
"""
receive sheet name in HQ format and return the name that should be used
to upload on transifex along with module/form unique ID and version postfix
:param sheet_name: name like menu0 or menu1_form1
:return: name like module_moduleUniqueID or form_formUniqueId
"""
sheet_name = self._update_sheet_name_with_unique_id(sheet_name)
if self.version and self.use_version_postfix:
return sheet_name + '_v' + str(self.version)
else:
return sheet_name
def _get_header_index(self, sheet_name, column_name):
for index, _column_name in enumerate(self.headers[sheet_name]):
if _column_name == column_name:
return index
raise Exception("Column not found with name {}".format(column_name))
def filter_invalid_rows_for_form(self, rows, form_id, label_index):
"""
Remove translations from questions that have SKIP TRANSIFEX in the comment
"""
labels_to_skip = self.checker.get_labels_to_skip()[form_id]
valid_rows = []
for i, row in enumerate(rows):
question_label = row[label_index]
if question_label not in labels_to_skip:
valid_rows.append(row)
return valid_rows
@cached_property
def _blacklisted_translations(self):
return TransifexBlacklist.objects.filter(domain=self.domain, app_id=self.app_id).all()
def filter_invalid_rows_for_module(self, rows, module_id, case_property_index,
list_or_detail_index, default_lang_index):
valid_rows = []
for i, row in enumerate(rows):
list_or_detail = row[list_or_detail_index]
case_property = row[case_property_index]
default_lang = row[default_lang_index]
if not self.checker.is_blacklisted(module_id, list_or_detail, case_property, [default_lang]):
valid_rows.append(row)
return valid_rows
def _get_translation_for_sheet(self, sheet_name, rows):
occurrence = None
# a dict mapping of a context to a Translation object with
# multiple occurrences
translations = OrderedDict()
type_and_id = None
key_lang_index = self._get_header_index(sheet_name, self.lang_prefix + self.key_lang)
source_lang_index = self._get_header_index(sheet_name, self.lang_prefix + self.source_lang)
default_lang_index = self._get_header_index(sheet_name, self.lang_prefix + self.app.default_language)
if sheet_name == MODULES_AND_FORMS_SHEET_NAME:
type_index = self._get_header_index(MODULES_AND_FORMS_SHEET_NAME, 'Type')
sheet_name_index = self._get_header_index(MODULES_AND_FORMS_SHEET_NAME, 'menu_or_form')
unique_id_index = self._get_header_index(MODULES_AND_FORMS_SHEET_NAME, 'unique_id')
def occurrence(_row):
# keep legacy notation to use module to avoid expiring translations already present
# caused by changing the context on the translation which is populated by this method
return ':'.join(
[_row[type_index].replace("Menu", "Module"),
_row[sheet_name_index].replace("menu", "module"),
_row[unique_id_index]])
else:
type_and_id = self.sheet_name_to_module_or_form_type_and_id[sheet_name]
if type_and_id.type == "Menu":
case_property_index = self._get_header_index(sheet_name, 'case_property')
list_or_detail_index = self._get_header_index(sheet_name, 'list_or_detail')
def occurrence(_row):
case_property = _row[case_property_index]
# limit case property length to avoid errors at Transifex where there is a limit of 1000
case_property = case_property[:950]
return ':'.join([case_property, _row[list_or_detail_index]])
elif type_and_id.type == "Form":
label_index = self._get_header_index(sheet_name, 'label')
def occurrence(_row):
return _row[label_index]
is_module = type_and_id and type_and_id.type == "Menu"
for index, row in enumerate(rows, 1):
source = row[key_lang_index]
translation = row[source_lang_index]
if self.exclude_if_default:
if translation == row[default_lang_index]:
translation = ''
occurrence_row = occurrence(row)
occurrence_row_and_source = "%s %s" % (occurrence_row, source)
if is_module:
# if there is already a translation with the same context and source,
# just add this occurrence
if occurrence_row_and_source in translations:
translations[occurrence_row_and_source].occurrences.append(
('', index)
)
continue
translations[occurrence_row_and_source] = Translation(
source,
translation,
[('', index)],
occurrence_row)
return list(translations.values())
def get_translations(self):
return OrderedDict(
(filename, _translations_to_po_entries(translations))
for filename, translations in self.translations.items()
)
def _build_translations(self):
"""
:return:
{
sheet_name_with_build_id: [
Translation(key, translation, occurrences),
Translation(key, translation, occurrences),
]
}
"""
translations = OrderedDict()
rows = self._translation_data()
for sheet_name, sheet in rows.items():
file_name = self._get_filename(sheet_name)
translations[file_name] = self._get_translation_for_sheet(
sheet_name, sheet
)
return translations
@property
def metadata(self):
now = str(datetime.datetime.now())
return {
'App-Id': self.app_id,
'PO-Creation-Date': now,
'MIME-Version': '1.0',
'Content-Type': 'text/plain; charset=utf-8',
'Language': self.key_lang,
'Version': self.version
}
class PoFileGenerator(object):
def __init__(self, translations, metadata):
self._generated_files = list() # list of tuples (filename, filepath)
self.translations = translations
self.metadata = metadata
def __enter__(self):
return self
def __exit__(self, *exc_info):
self._cleanup()
def generate_translation_files(self):
for file_name in self.translations:
sheet_translations = self.translations[file_name]
po = polib.POFile()
po.check_for_duplicates = False
po.metadata = self.metadata
po.extend(_translations_to_po_entries(sheet_translations))
temp_file = tempfile.NamedTemporaryFile(delete=False)
po.save(temp_file.name)
self._generated_files.append(POFileInfo(file_name, temp_file.name))
return self._generated_files
def _cleanup(self):
for resource_name, filepath in self._generated_files:
if os.path.exists(filepath):
os.remove(filepath)
self._generated_files = []
def _translations_to_po_entries(translations):
return [
polib.POEntry(
msgid=translation.key,
msgstr=translation.translation or '',
occurrences=translation.occurrences,
msgctxt=translation.msgctxt,
)
for translation in translations
if translation.key
]
| dimagi/commcare-hq | corehq/apps/translations/generators.py | Python | bsd-3-clause | 18,165 |
# coding: utf-8
#
# Copyright 2021 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Services for handling bulk email calls in DEV MODE."""
from __future__ import annotations
import logging
def permanently_delete_user_from_list(user_email: str) -> None:
"""Logs that the delete request was sent.
Args:
user_email: str. Email id of the user.
"""
logging.info(
'Email ID %s permanently deleted from bulk email provider\'s db. '
'Cannot access API, since this is a dev environment' % user_email)
def add_or_update_user_status(
user_email: str, can_receive_email_updates: bool
) -> bool:
"""Subscribes/unsubscribes an existing user or creates a new user with
correct status in the mailchimp DB.
Args:
user_email: str. Email id of the user.
can_receive_email_updates: bool. Whether they want to be subscribed to
list or not.
Returns:
bool. True to mock successful user creation.
"""
logging.info(
'Updated status of email ID %s\'s bulk email preference in the service '
'provider\'s db to %s. Cannot access API, since this is a dev '
'environment.' % (user_email, can_receive_email_updates))
return True
| brianrodri/oppia | core/platform/bulk_email/dev_mode_bulk_email_services.py | Python | apache-2.0 | 1,785 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
# athena.portfolio.risk_manager.py
'''
@since: 2014-11-20
@author: Javier Garcia
@contact: [email protected]
@summary: Handles the risk exposures for the portfolio, according
to a predefined criteria. i.e. VaR, Kelly Criterion, etc.
'''
# TODO: all
pass
| JavierGarciaD/athena | athena/portfolio/risk_manager.py | Python | gpl-3.0 | 332 |
from __future__ import print_function, absolute_import, division
import re
from collections import defaultdict, deque
from numba.config import MACHINE_BITS
from numba import cgutils
from llvmlite import ir, binding as llvm
_word_type = ir.IntType(MACHINE_BITS)
_pointer_type = ir.PointerType(ir.IntType(8))
_meminfo_struct_type = ir.LiteralStructType([
_word_type, # size_t refct
_pointer_type, # dtor_function dtor
_pointer_type, # void *dtor_info
_pointer_type, # void *data
_word_type, # size_t size
])
incref_decref_ty = ir.FunctionType(ir.VoidType(), [_pointer_type])
meminfo_data_ty = ir.FunctionType(_pointer_type, [_pointer_type])
def _define_nrt_meminfo_data(module):
"""
Implement NRT_MemInfo_data in the module. This allows inlined lookup
of the data pointer.
"""
fn = module.get_or_insert_function(meminfo_data_ty,
name="NRT_MemInfo_data")
builder = ir.IRBuilder(fn.append_basic_block())
[ptr] = fn.args
struct_ptr = builder.bitcast(ptr, _meminfo_struct_type.as_pointer())
data_ptr = builder.load(cgutils.gep(builder, struct_ptr, 0, 3))
builder.ret(data_ptr)
def _define_nrt_incref(module, atomic_incr):
"""
Implement NRT_incref in the module
"""
fn_incref = module.get_or_insert_function(incref_decref_ty,
name="NRT_incref")
builder = ir.IRBuilder(fn_incref.append_basic_block())
[ptr] = fn_incref.args
is_null = builder.icmp_unsigned("==", ptr, cgutils.get_null_value(ptr.type))
with cgutils.if_unlikely(builder, is_null):
builder.ret_void()
builder.call(atomic_incr, [builder.bitcast(ptr, atomic_incr.args[0].type)])
builder.ret_void()
def _define_nrt_decref(module, atomic_decr):
"""
Implement NRT_decref in the module
"""
fn_decref = module.get_or_insert_function(incref_decref_ty,
name="NRT_decref")
calldtor = module.add_function(ir.FunctionType(ir.VoidType(), [_pointer_type]),
name="NRT_MemInfo_call_dtor")
builder = ir.IRBuilder(fn_decref.append_basic_block())
[ptr] = fn_decref.args
is_null = builder.icmp_unsigned("==", ptr, cgutils.get_null_value(ptr.type))
with cgutils.if_unlikely(builder, is_null):
builder.ret_void()
newrefct = builder.call(atomic_decr,
[builder.bitcast(ptr, atomic_decr.args[0].type)])
refct_eq_0 = builder.icmp_unsigned("==", newrefct,
ir.Constant(newrefct.type, 0))
with cgutils.if_unlikely(builder, refct_eq_0):
builder.call(calldtor, [ptr])
builder.ret_void()
# Set this to True to measure the overhead of atomic refcounts compared
# to non-atomic.
_disable_atomicity = 0
def _define_atomic_inc_dec(module, op, ordering):
"""Define a llvm function for atomic increment/decrement to the given module
Argument ``op`` is the operation "add"/"sub". Argument ``ordering`` is
the memory ordering. The generated function returns the new value.
"""
ftype = ir.FunctionType(_word_type, [_word_type.as_pointer()])
fn_atomic = ir.Function(module, ftype, name="nrt_atomic_{0}".format(op))
[ptr] = fn_atomic.args
bb = fn_atomic.append_basic_block()
builder = ir.IRBuilder(bb)
ONE = ir.Constant(_word_type, 1)
if not _disable_atomicity:
oldval = builder.atomic_rmw(op, ptr, ONE, ordering=ordering)
# Perform the operation on the old value so that we can pretend returning
# the "new" value.
res = getattr(builder, op)(oldval, ONE)
builder.ret(res)
else:
oldval = builder.load(ptr)
newval = getattr(builder, op)(oldval, ONE)
builder.store(newval, ptr)
builder.ret(oldval)
return fn_atomic
def _define_atomic_cmpxchg(module, ordering):
"""Define a llvm function for atomic compare-and-swap.
The generated function is a direct wrapper of the LLVM cmpxchg with the
difference that the a int indicate success (1) or failure (0) is returned
and the last argument is a output pointer for storing the old value.
Note
----
On failure, the generated function behaves like an atomic load. The loaded
value is stored to the last argument.
"""
ftype = ir.FunctionType(ir.IntType(32), [_word_type.as_pointer(),
_word_type, _word_type,
_word_type.as_pointer()])
fn_cas = ir.Function(module, ftype, name="nrt_atomic_cas")
[ptr, cmp, repl, oldptr] = fn_cas.args
bb = fn_cas.append_basic_block()
builder = ir.IRBuilder(bb)
outtup = builder.cmpxchg(ptr, cmp, repl, ordering=ordering)
old, ok = cgutils.unpack_tuple(builder, outtup, 2)
builder.store(old, oldptr)
builder.ret(builder.zext(ok, ftype.return_type))
return fn_cas
def compile_nrt_functions(ctx):
"""
Compile all LLVM NRT functions and return a library containing them.
The library is created using the given target context.
"""
codegen = ctx.codegen()
library = codegen.create_library("nrt")
# Implement LLVM module with atomic ops
ir_mod = library.create_ir_module("nrt_module")
atomic_inc = _define_atomic_inc_dec(ir_mod, "add", ordering='monotonic')
atomic_dec = _define_atomic_inc_dec(ir_mod, "sub", ordering='monotonic')
_define_atomic_cmpxchg(ir_mod, ordering='monotonic')
_define_nrt_meminfo_data(ir_mod)
_define_nrt_incref(ir_mod, atomic_inc)
_define_nrt_decref(ir_mod, atomic_dec)
library.add_ir_module(ir_mod)
library.finalize()
return library
_regex_incref = re.compile(r'call void @NRT_incref\((.*)\)')
_regex_decref = re.compile(r'call void @NRT_decref\((.*)\)')
_regex_bb = re.compile(r'[-a-zA-Z$._][-a-zA-Z$._0-9]*:')
def remove_redundant_nrt_refct(ll_module):
"""
Remove redundant reference count operations from the
`llvmlite.binding.ModuleRef`. This parses the ll_module as a string and
line by line to remove the unnecessary nrt refct pairs within each block.
Note
-----
Should replace this. Not efficient.
"""
# Early escape if NRT_incref is not used
try:
ll_module.get_function('NRT_incref')
except NameError:
return ll_module
incref_map = defaultdict(deque)
decref_map = defaultdict(deque)
scopes = []
# Parse IR module as text
llasm = str(ll_module)
lines = llasm.splitlines()
# Phase 1:
# Find all refct ops and what they are operating on
for lineno, line in enumerate(lines):
# Match NRT_incref calls
m = _regex_incref.match(line.strip())
if m is not None:
incref_map[m.group(1)].append(lineno)
continue
# Match NRT_decref calls
m = _regex_decref.match(line.strip())
if m is not None:
decref_map[m.group(1)].append(lineno)
continue
# Split at BB boundaries
m = _regex_bb.match(line)
if m is not None:
# Push
scopes.append((incref_map, decref_map))
# Reset
incref_map = defaultdict(deque)
decref_map = defaultdict(deque)
# Phase 2:
# Determine which refct ops are unnecessary
to_remove = set()
for incref_map, decref_map in scopes:
# For each value being refct-ed
for val in incref_map.keys():
increfs = incref_map[val]
decrefs = decref_map[val]
# Mark the incref/decref pairs from the tail for removal
ref_pair_ct = min(len(increfs), len(decrefs))
for _ in range(ref_pair_ct):
to_remove.add(increfs.pop())
to_remove.add(decrefs.popleft())
# Phase 3
# Remove all marked instructions
newll = '\n'.join(ln for lno, ln in enumerate(lines) if lno not in
to_remove)
# Regenerate the LLVM module
return llvm.parse_assembly(newll)
| ssarangi/numba | numba/runtime/atomicops.py | Python | bsd-2-clause | 8,104 |
import os
class SSHKey(object):
pvkey = None
pubkey = None
pubkey_path = None
pvkey_path = None
default_path = {
'pvkey': "~/.ssh/id_rsa",
'pubkey': "~/.ssh/id_rsa.pub"
}
def __init__(self, path=None):
self.set_pubkey(path)
def set_pubkey(self, path=None):
try:
path = os.path.expanduser(path or self.default_path['pubkey'])
with open(path, "r") as f:
self.pubkey = f.read()
f.close()
self.pubkey_path = path
return True
except Exception as e:
# debug / log
# print (e)
return False
| lee212/simpleazure | simpleazure/sshkey.py | Python | gpl-3.0 | 703 |
# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0
# For details: https://bitbucket.org/ned/coveragepy/src/default/NOTICE.txt
"""Test cmdline.py for coverage.py."""
import pprint
import re
import shlex
import sys
import textwrap
import mock
import coverage
import coverage.cmdline
from coverage.config import CoverageConfig
from coverage.data import CoverageData, CoverageDataFiles
from coverage.misc import ExceptionDuringRun
from tests.coveragetest import CoverageTest, OK, ERR
class BaseCmdLineTest(CoverageTest):
"""Tests of execution paths through the command line interpreter."""
run_in_temp_dir = False
# Make a dict mapping function names to the default values that cmdline.py
# uses when calling the function.
defaults = mock.Mock()
defaults.coverage(
cover_pylib=None, data_suffix=None, timid=None, branch=None,
config_file=True, source=None, include=None, omit=None, debug=None,
concurrency=None,
)
defaults.annotate(
directory=None, ignore_errors=None, include=None, omit=None, morfs=[],
)
defaults.html_report(
directory=None, ignore_errors=None, include=None, omit=None, morfs=[],
title=None,
)
defaults.report(
ignore_errors=None, include=None, omit=None, morfs=[],
show_missing=None, skip_covered=None
)
defaults.xml_report(
ignore_errors=None, include=None, omit=None, morfs=[], outfile=None,
)
DEFAULT_KWARGS = dict((name, kw) for name, _, kw in defaults.mock_calls)
def model_object(self):
"""Return a Mock suitable for use in CoverageScript."""
mk = mock.Mock()
# We'll invoke .coverage as the constructor, and then keep using the
# same object as the resulting coverage object.
mk.coverage.return_value = mk
# The mock needs to get options, but shouldn't need to set them.
config = CoverageConfig()
mk.get_option = config.get_option
return mk
def mock_command_line(self, args, path_exists=None):
"""Run `args` through the command line, with a Mock.
Returns the Mock it used and the status code returned.
"""
m = self.model_object()
m.path_exists.return_value = path_exists
ret = coverage.cmdline.CoverageScript(
_covpkg=m, _run_python_file=m.run_python_file,
_run_python_module=m.run_python_module, _help_fn=m.help_fn,
_path_exists=m.path_exists,
).command_line(shlex.split(args))
return m, ret
def cmd_executes(self, args, code, ret=OK, path_exists=None):
"""Assert that the `args` end up executing the sequence in `code`."""
m1, r1 = self.mock_command_line(args, path_exists=path_exists)
self.assertEqual(r1, ret, "Wrong status: got %r, wanted %r" % (r1, ret))
# Remove all indentation, and change ".foo()" to "m2.foo()".
code = re.sub(r"(?m)^\s+", "", code)
code = re.sub(r"(?m)^\.", "m2.", code)
m2 = self.model_object()
m2.path_exists.return_value = path_exists
code_obj = compile(code, "<code>", "exec")
eval(code_obj, globals(), { 'm2': m2 }) # pylint: disable=eval-used
# Many of our functions take a lot of arguments, and cmdline.py
# calls them with many. But most of them are just the defaults, which
# we don't want to have to repeat in all tests. For each call, apply
# the defaults. This lets the tests just mention the interesting ones.
for name, args, kwargs in m2.method_calls:
for k, v in self.DEFAULT_KWARGS.get(name, {}).items():
if k not in kwargs:
kwargs[k] = v
self.assert_same_method_calls(m1, m2)
def cmd_executes_same(self, args1, args2):
"""Assert that the `args1` executes the same as `args2`."""
m1, r1 = self.mock_command_line(args1)
m2, r2 = self.mock_command_line(args2)
self.assertEqual(r1, r2)
self.assert_same_method_calls(m1, m2)
def assert_same_method_calls(self, m1, m2):
"""Assert that `m1.method_calls` and `m2.method_calls` are the same."""
# Use a real equality comparison, but if it fails, use a nicer assert
# so we can tell what's going on. We have to use the real == first due
# to CmdOptionParser.__eq__
if m1.method_calls != m2.method_calls:
pp1 = pprint.pformat(m1.method_calls)
pp2 = pprint.pformat(m2.method_calls)
self.assertMultiLineEqual(pp1+'\n', pp2+'\n')
def cmd_help(self, args, help_msg=None, topic=None, ret=ERR):
"""Run a command line, and check that it prints the right help.
Only the last function call in the mock is checked, which should be the
help message that we want to see.
"""
m, r = self.mock_command_line(args)
self.assertEqual(r, ret,
"Wrong status: got %s, wanted %s" % (r, ret)
)
if help_msg:
self.assertEqual(m.method_calls[-1],
('help_fn', (help_msg,), {})
)
else:
self.assertEqual(m.method_calls[-1],
('help_fn', (), {'topic':topic})
)
class BaseCmdLineTestTest(BaseCmdLineTest):
"""Tests that our BaseCmdLineTest helpers work."""
def test_assert_same_method_calls(self):
# All the other tests here use self.cmd_executes_same in successful
# ways, so here we just check that it fails.
with self.assertRaises(AssertionError):
self.cmd_executes_same("run", "debug")
class CmdLineTest(BaseCmdLineTest):
"""Tests of the coverage.py command line."""
def test_annotate(self):
# coverage annotate [-d DIR] [-i] [--omit DIR,...] [FILE1 FILE2 ...]
self.cmd_executes("annotate", """\
.coverage()
.load()
.annotate()
""")
self.cmd_executes("annotate -d dir1", """\
.coverage()
.load()
.annotate(directory="dir1")
""")
self.cmd_executes("annotate -i", """\
.coverage()
.load()
.annotate(ignore_errors=True)
""")
self.cmd_executes("annotate --omit fooey", """\
.coverage(omit=["fooey"])
.load()
.annotate(omit=["fooey"])
""")
self.cmd_executes("annotate --omit fooey,booey", """\
.coverage(omit=["fooey", "booey"])
.load()
.annotate(omit=["fooey", "booey"])
""")
self.cmd_executes("annotate mod1", """\
.coverage()
.load()
.annotate(morfs=["mod1"])
""")
self.cmd_executes("annotate mod1 mod2 mod3", """\
.coverage()
.load()
.annotate(morfs=["mod1", "mod2", "mod3"])
""")
def test_combine(self):
# coverage combine with args
self.cmd_executes("combine datadir1", """\
.coverage()
.load()
.combine(["datadir1"])
.save()
""")
# coverage combine without args
self.cmd_executes("combine", """\
.coverage()
.load()
.combine(None)
.save()
""")
def test_combine_doesnt_confuse_options_with_args(self):
# https://bitbucket.org/ned/coveragepy/issues/385/coverage-combine-doesnt-work-with-rcfile
self.cmd_executes("combine --rcfile cov.ini", """\
.coverage(config_file='cov.ini')
.load()
.combine(None)
.save()
""")
self.cmd_executes("combine --rcfile cov.ini data1 data2/more", """\
.coverage(config_file='cov.ini')
.load()
.combine(["data1", "data2/more"])
.save()
""")
def test_debug(self):
self.cmd_help("debug", "What information would you like: data, sys?")
self.cmd_help("debug foo", "Don't know what you mean by 'foo'")
def test_debug_sys(self):
self.command_line("debug sys")
out = self.stdout()
self.assertIn("version:", out)
self.assertIn("data_path:", out)
def test_erase(self):
# coverage erase
self.cmd_executes("erase", """\
.coverage()
.erase()
""")
def test_version(self):
# coverage --version
self.cmd_help("--version", topic="version", ret=OK)
def test_help_option(self):
# coverage -h
self.cmd_help("-h", topic="help", ret=OK)
self.cmd_help("--help", topic="help", ret=OK)
def test_help_command(self):
self.cmd_executes("help", ".help_fn(topic='help')")
def test_cmd_help(self):
self.cmd_executes("run --help",
".help_fn(parser='<CmdOptionParser:run>')")
self.cmd_executes_same("help run", "run --help")
def test_html(self):
# coverage html -d DIR [-i] [--omit DIR,...] [FILE1 FILE2 ...]
self.cmd_executes("html", """\
.coverage()
.load()
.html_report()
""")
self.cmd_executes("html -d dir1", """\
.coverage()
.load()
.html_report(directory="dir1")
""")
self.cmd_executes("html -i", """\
.coverage()
.load()
.html_report(ignore_errors=True)
""")
self.cmd_executes("html --omit fooey", """\
.coverage(omit=["fooey"])
.load()
.html_report(omit=["fooey"])
""")
self.cmd_executes("html --omit fooey,booey", """\
.coverage(omit=["fooey", "booey"])
.load()
.html_report(omit=["fooey", "booey"])
""")
self.cmd_executes("html mod1", """\
.coverage()
.load()
.html_report(morfs=["mod1"])
""")
self.cmd_executes("html mod1 mod2 mod3", """\
.coverage()
.load()
.html_report(morfs=["mod1", "mod2", "mod3"])
""")
self.cmd_executes("html --title=Hello_there", """\
.coverage()
.load()
.html_report(title='Hello_there')
""")
def test_report(self):
# coverage report [-m] [-i] [-o DIR,...] [FILE1 FILE2 ...]
self.cmd_executes("report", """\
.coverage()
.load()
.report(show_missing=None)
""")
self.cmd_executes("report -i", """\
.coverage()
.load()
.report(ignore_errors=True)
""")
self.cmd_executes("report -m", """\
.coverage()
.load()
.report(show_missing=True)
""")
self.cmd_executes("report --omit fooey", """\
.coverage(omit=["fooey"])
.load()
.report(omit=["fooey"])
""")
self.cmd_executes("report --omit fooey,booey", """\
.coverage(omit=["fooey", "booey"])
.load()
.report(omit=["fooey", "booey"])
""")
self.cmd_executes("report mod1", """\
.coverage()
.load()
.report(morfs=["mod1"])
""")
self.cmd_executes("report mod1 mod2 mod3", """\
.coverage()
.load()
.report(morfs=["mod1", "mod2", "mod3"])
""")
self.cmd_executes("report --skip-covered", """\
.coverage()
.load()
.report(skip_covered=True)
""")
def test_run(self):
# coverage run [-p] [-L] [--timid] MODULE.py [ARG1 ARG2 ...]
# run calls coverage.erase first.
self.cmd_executes("run foo.py", """\
.coverage()
.erase()
.start()
.run_python_file('foo.py', ['foo.py'])
.stop()
.save()
""")
# run -a combines with an existing data file before saving.
self.cmd_executes("run -a foo.py", """\
.coverage()
.start()
.run_python_file('foo.py', ['foo.py'])
.stop()
.path_exists('.coverage')
.combine(data_paths=['.coverage'])
.save()
""", path_exists=True)
# run -a doesn't combine anything if the data file doesn't exist.
self.cmd_executes("run -a foo.py", """\
.coverage()
.start()
.run_python_file('foo.py', ['foo.py'])
.stop()
.path_exists('.coverage')
.save()
""", path_exists=False)
# --timid sets a flag, and program arguments get passed through.
self.cmd_executes("run --timid foo.py abc 123", """\
.coverage(timid=True)
.erase()
.start()
.run_python_file('foo.py', ['foo.py', 'abc', '123'])
.stop()
.save()
""")
# -L sets a flag, and flags for the program don't confuse us.
self.cmd_executes("run -p -L foo.py -a -b", """\
.coverage(cover_pylib=True, data_suffix=True)
.erase()
.start()
.run_python_file('foo.py', ['foo.py', '-a', '-b'])
.stop()
.save()
""")
self.cmd_executes("run --branch foo.py", """\
.coverage(branch=True)
.erase()
.start()
.run_python_file('foo.py', ['foo.py'])
.stop()
.save()
""")
self.cmd_executes("run --rcfile=myrc.rc foo.py", """\
.coverage(config_file="myrc.rc")
.erase()
.start()
.run_python_file('foo.py', ['foo.py'])
.stop()
.save()
""")
self.cmd_executes("run --include=pre1,pre2 foo.py", """\
.coverage(include=["pre1", "pre2"])
.erase()
.start()
.run_python_file('foo.py', ['foo.py'])
.stop()
.save()
""")
self.cmd_executes("run --omit=opre1,opre2 foo.py", """\
.coverage(omit=["opre1", "opre2"])
.erase()
.start()
.run_python_file('foo.py', ['foo.py'])
.stop()
.save()
""")
self.cmd_executes("run --include=pre1,pre2 --omit=opre1,opre2 foo.py",
"""\
.coverage(include=["pre1", "pre2"], omit=["opre1", "opre2"])
.erase()
.start()
.run_python_file('foo.py', ['foo.py'])
.stop()
.save()
""")
self.cmd_executes("run --source=quux,hi.there,/home/bar foo.py", """\
.coverage(source=["quux", "hi.there", "/home/bar"])
.erase()
.start()
.run_python_file('foo.py', ['foo.py'])
.stop()
.save()
""")
self.cmd_executes("run --concurrency=gevent foo.py", """\
.coverage(concurrency='gevent')
.erase()
.start()
.run_python_file('foo.py', ['foo.py'])
.stop()
.save()
""")
def test_bad_concurrency(self):
self.command_line("run --concurrency=nothing", ret=ERR)
out = self.stdout()
self.assertIn("option --concurrency: invalid choice: 'nothing'", out)
def test_run_debug(self):
self.cmd_executes("run --debug=opt1 foo.py", """\
.coverage(debug=["opt1"])
.erase()
.start()
.run_python_file('foo.py', ['foo.py'])
.stop()
.save()
""")
self.cmd_executes("run --debug=opt1,opt2 foo.py", """\
.coverage(debug=["opt1","opt2"])
.erase()
.start()
.run_python_file('foo.py', ['foo.py'])
.stop()
.save()
""")
def test_run_module(self):
self.cmd_executes("run -m mymodule", """\
.coverage()
.erase()
.start()
.run_python_module('mymodule', ['mymodule'])
.stop()
.save()
""")
self.cmd_executes("run -m mymodule -qq arg1 arg2", """\
.coverage()
.erase()
.start()
.run_python_module('mymodule', ['mymodule', '-qq', 'arg1', 'arg2'])
.stop()
.save()
""")
self.cmd_executes("run --branch -m mymodule", """\
.coverage(branch=True)
.erase()
.start()
.run_python_module('mymodule', ['mymodule'])
.stop()
.save()
""")
self.cmd_executes_same("run -m mymodule", "run --module mymodule")
def test_run_nothing(self):
self.command_line("run", ret=ERR)
self.assertIn("Nothing to do", self.stdout())
def test_cant_append_parallel(self):
self.command_line("run --append --parallel-mode foo.py", ret=ERR)
self.assertIn("Can't append to data files in parallel mode.", self.stdout())
def test_xml(self):
# coverage xml [-i] [--omit DIR,...] [FILE1 FILE2 ...]
self.cmd_executes("xml", """\
.coverage()
.load()
.xml_report()
""")
self.cmd_executes("xml -i", """\
.coverage()
.load()
.xml_report(ignore_errors=True)
""")
self.cmd_executes("xml -o myxml.foo", """\
.coverage()
.load()
.xml_report(outfile="myxml.foo")
""")
self.cmd_executes("xml -o -", """\
.coverage()
.load()
.xml_report(outfile="-")
""")
self.cmd_executes("xml --omit fooey", """\
.coverage(omit=["fooey"])
.load()
.xml_report(omit=["fooey"])
""")
self.cmd_executes("xml --omit fooey,booey", """\
.coverage(omit=["fooey", "booey"])
.load()
.xml_report(omit=["fooey", "booey"])
""")
self.cmd_executes("xml mod1", """\
.coverage()
.load()
.xml_report(morfs=["mod1"])
""")
self.cmd_executes("xml mod1 mod2 mod3", """\
.coverage()
.load()
.xml_report(morfs=["mod1", "mod2", "mod3"])
""")
def test_no_arguments_at_all(self):
self.cmd_help("", topic="minimum_help", ret=OK)
def test_bad_command(self):
self.cmd_help("xyzzy", "Unknown command: 'xyzzy'")
class CmdLineWithFilesTest(BaseCmdLineTest):
"""Test the command line in ways that need temp files."""
run_in_temp_dir = True
no_files_in_temp_dir = True
def test_debug_data(self):
data = CoverageData()
data.add_lines({
"file1.py": dict.fromkeys(range(1, 18)),
"file2.py": dict.fromkeys(range(1, 24)),
})
data.add_file_tracers({"file1.py": "a_plugin"})
data_files = CoverageDataFiles()
data_files.write(data)
self.command_line("debug data")
self.assertMultiLineEqual(self.stdout(), textwrap.dedent("""\
-- data ------------------------------------------------------
path: FILENAME
has_arcs: False
2 files:
file1.py: 17 lines [a_plugin]
file2.py: 23 lines
""").replace("FILENAME", data_files.filename))
def test_debug_data_with_no_data(self):
data_files = CoverageDataFiles()
self.command_line("debug data")
self.assertMultiLineEqual(self.stdout(), textwrap.dedent("""\
-- data ------------------------------------------------------
path: FILENAME
No data collected
""").replace("FILENAME", data_files.filename))
class CmdLineStdoutTest(BaseCmdLineTest):
"""Test the command line with real stdout output."""
def test_minimum_help(self):
self.command_line("")
out = self.stdout()
self.assertIn("Code coverage for Python.", out)
self.assertLess(out.count("\n"), 4)
def test_version(self):
self.command_line("--version")
out = self.stdout()
self.assertIn("ersion ", out)
self.assertLess(out.count("\n"), 4)
def test_help(self):
self.command_line("help")
out = self.stdout()
self.assertIn("readthedocs.org", out)
self.assertGreater(out.count("\n"), 10)
def test_cmd_help(self):
self.command_line("help run")
out = self.stdout()
self.assertIn("<pyfile>", out)
self.assertIn("--timid", out)
self.assertGreater(out.count("\n"), 10)
def test_error(self):
self.command_line("fooey kablooey", ret=ERR)
out = self.stdout()
self.assertIn("fooey", out)
self.assertIn("help", out)
class CmdMainTest(CoverageTest):
"""Tests of coverage.cmdline.main(), using mocking for isolation."""
run_in_temp_dir = False
class CoverageScriptStub(object):
"""A stub for coverage.cmdline.CoverageScript, used by CmdMainTest."""
def command_line(self, argv):
"""Stub for command_line, the arg determines what it will do."""
if argv[0] == 'hello':
print("Hello, world!")
elif argv[0] == 'raise':
try:
raise Exception("oh noes!")
except:
raise ExceptionDuringRun(*sys.exc_info())
elif argv[0] == 'internalraise':
raise ValueError("coverage is broken")
elif argv[0] == 'exit':
sys.exit(23)
else:
raise AssertionError("Bad CoverageScriptStub: %r"% (argv,))
return 0
def setUp(self):
super(CmdMainTest, self).setUp()
self.old_CoverageScript = coverage.cmdline.CoverageScript
coverage.cmdline.CoverageScript = self.CoverageScriptStub
self.addCleanup(self.cleanup_coverage_script)
def cleanup_coverage_script(self):
"""Restore CoverageScript when the test is done."""
coverage.cmdline.CoverageScript = self.old_CoverageScript
def test_normal(self):
ret = coverage.cmdline.main(['hello'])
self.assertEqual(ret, 0)
self.assertEqual(self.stdout(), "Hello, world!\n")
def test_raise(self):
ret = coverage.cmdline.main(['raise'])
self.assertEqual(ret, 1)
self.assertEqual(self.stdout(), "")
err = self.stderr().split('\n')
self.assertEqual(err[0], 'Traceback (most recent call last):')
self.assertEqual(err[-3], ' raise Exception("oh noes!")')
self.assertEqual(err[-2], 'Exception: oh noes!')
def test_internalraise(self):
with self.assertRaisesRegex(ValueError, "coverage is broken"):
coverage.cmdline.main(['internalraise'])
def test_exit(self):
ret = coverage.cmdline.main(['exit'])
self.assertEqual(ret, 23)
| 7WebPages/coveragepy | tests/test_cmdline.py | Python | apache-2.0 | 23,343 |
# -*- coding: utf-8 -*-
##########################################################################
# #
# Eddy: a graphical editor for the specification of Graphol ontologies #
# Copyright (C) 2015 Daniele Pantaleone <[email protected]> #
# #
# This program is free software: you can redistribute it and/or modify #
# it under the terms of the GNU General Public License as published by #
# the Free Software Foundation, either version 3 of the License, or #
# (at your option) any later version. #
# #
# This program is distributed in the hope that it will be useful, #
# but WITHOUT ANY WARRANTY; without even the implied warranty of #
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #
# GNU General Public License for more details. #
# #
# You should have received a copy of the GNU General Public License #
# along with this program. If not, see <http://www.gnu.org/licenses/>. #
# #
# ##################### ##################### #
# #
# Graphol is developed by members of the DASI-lab group of the #
# Dipartimento di Ingegneria Informatica, Automatica e Gestionale #
# A.Ruberti at Sapienza University of Rome: http://www.dis.uniroma1.it #
# #
# - Domenico Lembo <[email protected]> #
# - Valerio Santarelli <[email protected]> #
# - Domenico Fabio Savo <[email protected]> #
# - Daniele Pantaleone <[email protected]> #
# - Marco Console <[email protected]> #
# #
##########################################################################
from PyQt5 import QtCore
from PyQt5 import QtWidgets
from eddy.core.clipboard import Clipboard
from eddy.core.commands.edges import CommandEdgeAdd
from eddy.core.commands.nodes import CommandNodeAdd
from eddy.core.commands.nodes import CommandNodeMove
from eddy.core.commands.labels import CommandLabelMove
from eddy.core.datatypes.graphol import Item, Identity
from eddy.core.datatypes.misc import DiagramMode
from eddy.core.functions.graph import bfs
from eddy.core.functions.misc import snap, partition, first
from eddy.core.functions.signals import connect
from eddy.core.generators import GUID
from eddy.core.items.factory import ItemFactory
from eddy.core.output import getLogger
LOGGER = getLogger()
class Diagram(QtWidgets.QGraphicsScene):
"""
Extension of QtWidgets.QGraphicsScene which implements a single Graphol diagram.
Additionally to built-in signals, this class emits:
* sgnItemAdded: whenever an element is added to the Diagram.
* sgnItemInsertionCompleted: whenever an item 'MANUAL' insertion process is completed.
* sgnItemRemoved: whenever an element is removed from the Diagram.
* sgnModeChanged: whenever the Diagram operational mode (or its parameter) changes.
* sgnUpdated: whenever the Diagram has been updated in any of its parts.
"""
GridSize = 10
KeyMoveFactor = 10
MinSize = 2000
MaxSize = 1000000
SelectionRadius = 4
sgnItemAdded = QtCore.pyqtSignal('QGraphicsScene', 'QGraphicsItem')
sgnItemInsertionCompleted = QtCore.pyqtSignal('QGraphicsItem', int)
sgnItemRemoved = QtCore.pyqtSignal('QGraphicsScene', 'QGraphicsItem')
sgnModeChanged = QtCore.pyqtSignal(DiagramMode)
sgnNodeIdentification = QtCore.pyqtSignal('QGraphicsItem')
sgnUpdated = QtCore.pyqtSignal()
def __init__(self, name, parent):
"""
Initialize the diagram.
:type name: str
:type parent: Project
"""
super().__init__(parent)
self.factory = ItemFactory(self)
self.guid = GUID(self)
self.mode = DiagramMode.Idle
self.modeParam = Item.Undefined
self.name = name
self.pasteX = Clipboard.PasteOffsetX
self.pasteY = Clipboard.PasteOffsetY
self.mo_Node = None
self.mp_Data = None
self.mp_Edge = None
self.mp_Label = None
self.mp_LabelPos = None
self.mp_Node = None
self.mp_NodePos = None
self.mp_Pos = None
connect(self.sgnItemAdded, self.onItemAdded)
connect(self.sgnItemRemoved, self.onItemRemoved)
connect(self.sgnNodeIdentification, self.doNodeIdentification)
#############################################
# FACTORY
#################################
@classmethod
def create(cls, name, size, project):
"""
Build and returns a new Diagram instance, using the given parameters.
:type name: str
:type size: int
:type project: Project
:rtype: Diagram
"""
diagram = Diagram(name, project)
diagram.setSceneRect(QtCore.QRectF(-size / 2, -size / 2, size, size))
diagram.setItemIndexMethod(Diagram.BspTreeIndex)
return diagram
#############################################
# PROPERTIES
#################################
@property
def project(self):
"""
Returns the project this diagram belongs to (alias for Diagram.parent()).
:rtype: Project
"""
return self.parent()
@property
def session(self):
"""
Returns the session this diagram belongs to (alias for Diagram.project.parent()).
:rtype: Session
"""
return self.project.parent()
#############################################
# EVENTS
#################################
def dragEnterEvent(self, dragEvent):
"""
Executed when a dragged element enters the scene area.
:type dragEvent: QGraphicsSceneDragDropEvent
"""
super().dragEnterEvent(dragEvent)
if dragEvent.mimeData().hasFormat('text/plain'):
dragEvent.setDropAction(QtCore.Qt.CopyAction)
dragEvent.accept()
else:
dragEvent.ignore()
def dragMoveEvent(self, dragEvent):
"""
Executed when an element is dragged over the scene.
:type dragEvent: QGraphicsSceneDragDropEvent
"""
super().dragMoveEvent(dragEvent)
if dragEvent.mimeData().hasFormat('text/plain'):
dragEvent.setDropAction(QtCore.Qt.CopyAction)
dragEvent.accept()
else:
dragEvent.ignore()
# noinspection PyTypeChecker
def dropEvent(self, dropEvent):
"""
Executed when a dragged element is dropped on the diagram.
:type dropEvent: QGraphicsSceneDragDropEvent
"""
super().dropEvent(dropEvent)
if dropEvent.mimeData().hasFormat('text/plain'):
snapToGrid = self.session.action('toggle_grid').isChecked()
node = self.factory.create(Item.valueOf(dropEvent.mimeData().text()))
node.setPos(snap(dropEvent.scenePos(), Diagram.GridSize, snapToGrid))
self.session.undostack.push(CommandNodeAdd(self, node))
self.sgnItemInsertionCompleted.emit(node, dropEvent.modifiers())
dropEvent.setDropAction(QtCore.Qt.CopyAction)
dropEvent.accept()
else:
dropEvent.ignore()
# noinspection PyTypeChecker
def mousePressEvent(self, mouseEvent):
"""
Executed when a mouse button is clicked on the scene.
:type mouseEvent: QGraphicsSceneMouseEvent
"""
mouseModifiers = mouseEvent.modifiers()
mouseButtons = mouseEvent.buttons()
mousePos = mouseEvent.scenePos()
if mouseButtons & QtCore.Qt.LeftButton:
if self.mode is DiagramMode.NodeAdd:
#############################################
# NODE INSERTION
#################################
snapToGrid = self.session.action('toggle_grid').isChecked()
node = self.factory.create(Item.valueOf(self.modeParam))
node.setPos(snap(mousePos, Diagram.GridSize, snapToGrid))
self.session.undostack.push(CommandNodeAdd(self, node))
self.sgnItemInsertionCompleted.emit(node, mouseEvent.modifiers())
elif self.mode is DiagramMode.EdgeAdd:
#############################################
# EDGE INSERTION
#################################
node = first(self.items(mousePos, edges=False))
if node:
edge = self.factory.create(Item.valueOf(self.modeParam), source=node)
edge.updateEdge(target=mousePos)
self.mp_Edge = edge
self.addItem(edge)
else:
# Execute super at first since this may change the diagram
# mode: some actions are directly handle by graphics items
# (i.e: edge breakpoint move, edge anchor move, node shape
# resize) and we need to check whether any of them is being
# performed before handling the even locally.
super().mousePressEvent(mouseEvent)
if self.mode is DiagramMode.Idle:
if mouseModifiers & QtCore.Qt.ShiftModifier:
#############################################
# LABEL MOVE
#################################
item = first(self.items(mousePos, nodes=False, edges=False, labels=True))
if item and item.isMovable():
self.clearSelection()
self.mp_Label = item
self.mp_LabelPos = item.pos()
self.mp_Pos = mousePos
self.setMode(DiagramMode.LabelMove)
else:
#############################################
# ITEM SELECTION
#################################
item = first(self.items(mousePos, labels=True))
if item:
if item.isLabel():
# If we are hitting a label, check whether the label
# is overlapping it's parent item and such item is
# also intersecting the current mouse position: if so,
# use the parent item as placeholder for the selection.
parent = item.parentItem()
items = self.items(mousePos)
item = parent if parent in items else None
if item:
if mouseModifiers & QtCore.Qt.ControlModifier:
# CTRL => support item multi selection.
item.setSelected(not item.isSelected())
else:
if self.selectedItems():
# Some elements have been already selected in the
# diagram, during a previous mouse press event.
if not item.isSelected():
# There are some items selected but we clicked
# on a node which is not currently selected, so
# make this node the only selected one.
self.clearSelection()
item.setSelected(True)
else:
# No item (nodes or edges) is selected and we just
# clicked on one so make sure to select this item and
# because selectedItems() filters out item Label's,
# clear out the selection on the diagram.
self.clearSelection()
item.setSelected(True)
# If we have some nodes selected we need to prepare data for a
# possible item move operation: we need to make sure to retrieve
# the node below the mouse cursor that will act as as mouse grabber
# to compute delta movements for each component in the selection.
selected = self.selectedNodes()
if selected:
self.mp_Node = first(self.items(mousePos, edges=False))
if self.mp_Node:
self.mp_NodePos = self.mp_Node.pos()
self.mp_Pos = mousePos
self.mp_Data = self.setupMove(selected)
def mouseMoveEvent(self, mouseEvent):
"""
Executed when then mouse is moved on the scene.
:type mouseEvent: QGraphicsSceneMouseEvent
"""
mouseButtons = mouseEvent.buttons()
mousePos = mouseEvent.scenePos()
if mouseButtons & QtCore.Qt.LeftButton:
if self.mode is DiagramMode.EdgeAdd:
#############################################
# EDGE INSERTION
#################################
if self.isEdgeAdd():
statusBar = self.session.statusBar()
edge = self.mp_Edge
edge.updateEdge(target=mousePos)
previousNode = self.mo_Node
if previousNode:
previousNode.updateNode(selected=False)
currentNode = first(self.items(mousePos, edges=False, skip={edge.source}))
if currentNode:
self.mo_Node = currentNode
pvr = self.project.profile.checkEdge(edge.source, edge, currentNode)
currentNode.updateNode(selected=False, valid=pvr.isValid())
if not pvr.isValid():
statusBar.showMessage(pvr.message())
else:
statusBar.clearMessage()
else:
statusBar.clearMessage()
self.mo_Node = None
self.project.profile.reset()
elif self.mode is DiagramMode.LabelMove:
#############################################
# LABEL MOVE
#################################
if self.isLabelMove():
snapToGrid = self.session.action('toggle_grid').isChecked()
point = self.mp_LabelPos + mousePos - self.mp_Pos
point = snap(point, Diagram.GridSize / 2, snapToGrid)
delta = point - self.mp_LabelPos
self.mp_Label.setPos(self.mp_LabelPos + delta)
else:
if self.mode is DiagramMode.Idle:
if self.mp_Node:
self.setMode(DiagramMode.NodeMove)
if self.mode is DiagramMode.NodeMove:
#############################################
# ITEM MOVEMENT
#################################
if self.isNodeMove():
snapToGrid = self.session.action('toggle_grid').isChecked()
point = self.mp_NodePos + mousePos - self.mp_Pos
point = snap(point, Diagram.GridSize, snapToGrid)
delta = point - self.mp_NodePos
edges = set()
for edge, breakpoints in self.mp_Data['edges'].items():
for i in range(len(breakpoints)):
edge.breakpoints[i] = breakpoints[i] + delta
for node, data in self.mp_Data['nodes'].items():
edges |= set(node.edges)
node.setPos(data['pos'] + delta)
for edge, pos in data['anchors'].items():
node.setAnchor(edge, pos + delta)
for edge in edges:
edge.updateEdge()
super().mouseMoveEvent(mouseEvent)
def mouseReleaseEvent(self, mouseEvent):
"""
Executed when the mouse is released from the scene.
:type mouseEvent: QGraphicsSceneMouseEvent
"""
mouseModifiers = mouseEvent.modifiers()
mouseButton = mouseEvent.button()
mousePos = mouseEvent.scenePos()
if mouseButton == QtCore.Qt.LeftButton:
if self.mode is DiagramMode.EdgeAdd:
#############################################
# EDGE INSERTION
#################################
if self.isEdgeAdd():
edge = self.mp_Edge
edge.source.updateNode(selected=False)
currentNode = first(self.items(mousePos, edges=False, skip={edge.source}))
insertEdge = False
if currentNode:
currentNode.updateNode(selected=False)
pvr = self.project.profile.checkEdge(edge.source, edge, currentNode)
if pvr.isValid():
edge.target = currentNode
insertEdge = True
# We temporarily remove the item from the diagram and we perform the
# insertion using the undo command that will also emit the sgnItemAdded
# signal hence all the widgets will be notified of the edge insertion.
# We do this because while creating the edge we need to display it so the
# user knows what he is connecting, but we don't want to truly insert
# it till it's necessary (when the mouse is released and the validation
# confirms that the generated expression is a valid graphol expression).
self.removeItem(edge)
if insertEdge:
self.session.undostack.push(CommandEdgeAdd(self, edge))
edge.updateEdge()
self.clearSelection()
self.project.profile.reset()
statusBar = self.session.statusBar()
statusBar.clearMessage()
self.sgnItemInsertionCompleted.emit(edge, mouseModifiers)
elif self.mode is DiagramMode.LabelMove:
#############################################
# LABEL MOVE
#################################
if self.isLabelMove():
pos = self.mp_Label.pos()
if self.mp_LabelPos != pos:
item = self.mp_Label.parentItem()
command = CommandLabelMove(self, item, self.mp_LabelPos, pos)
self.session.undostack.push(command)
self.setMode(DiagramMode.Idle)
elif self.mode is DiagramMode.NodeMove:
#############################################
# ITEM MOVEMENT
#################################
if self.isNodeMove():
pos = self.mp_Node.pos()
if self.mp_NodePos != pos:
moveData = self.completeMove(self.mp_Data)
self.session.undostack.push(CommandNodeMove(self, self.mp_Data, moveData))
self.setMode(DiagramMode.Idle)
elif mouseButton == QtCore.Qt.RightButton:
if self.mode is not DiagramMode.SceneDrag:
#############################################
# CONTEXTUAL MENU
#################################
item = first(self.items(mousePos))
if not item:
self.clearSelection()
items = []
else:
items = self.selectedItems()
if item not in items:
self.clearSelection()
item.setSelected(True)
items = [item]
self.mp_Pos = mousePos
menu = self.session.mf.create(self, items, mousePos)
menu.exec_(mouseEvent.screenPos())
super().mouseReleaseEvent(mouseEvent)
self.mo_Node = None
self.mp_Data = None
self.mp_Edge = None
self.mp_Label = None
self.mp_LabelPos = None
self.mp_Node = None
self.mp_NodePos = None
self.mp_Pos = None
#############################################
# SLOTS
#################################
@QtCore.pyqtSlot('QGraphicsItem')
def doNodeIdentification(self, node):
"""
Perform node identification.
:type node: AbstractNode
"""
if Identity.Neutral in node.identities():
func = lambda x: Identity.Neutral in x.identities()
collection = bfs(source=node, filter_on_visit=func)
generators = partition(func, collection)
excluded = set()
strong = set(generators[1])
weak = set(generators[0])
for node in weak:
identification = node.identify()
if identification:
strong = set.union(strong, identification[0])
strong = set.difference(strong, identification[1])
excluded = set.union(excluded, identification[2])
computed = Identity.Neutral
identities = set(x.identity() for x in strong)
if identities:
computed = first(identities)
if len(identities) > 1:
computed = Identity.Unknown
for node in weak - strong - excluded:
node.setIdentity(computed)
@QtCore.pyqtSlot('QGraphicsScene', 'QGraphicsItem')
def onItemAdded(self, _, item):
"""
Executed whenever a connection is created/removed.
:type _: Diagram
:type item: AbstractItem
"""
if item.isEdge():
# Execute the node identification procedure only if one of the
# endpoints we are connecting is currently identified as NEUTRAL.
if (item.source.identity() is Identity.Neutral) ^ (item.target.identity() is Identity.Neutral):
for node in (item.source, item.target):
self.sgnNodeIdentification.emit(node)
@QtCore.pyqtSlot('QGraphicsScene', 'QGraphicsItem')
def onItemRemoved(self, _, item):
"""
Executed whenever a connection is created/removed.
:type _: Diagram
:type item: AbstractItem
"""
if item.isEdge():
# When an edge is removed we may be in the case where
# the ontology is split into 2 subgraphs, hence we need
# to run the identification procedure on the 2 subgraphs.
for node in (item.source, item.target):
self.sgnNodeIdentification.emit(node)
#############################################
# INTERFACE
#################################
def addItem(self, item):
"""
Add an item to the Diagram (will redraw the item to reflect its status).
:type item: AbstractItem
"""
super().addItem(item)
if item.isNode():
item.updateNode()
@staticmethod
def completeMove(moveData, offset=QtCore.QPointF(0, 0)):
"""
Complete item movement, given initializated data for a collection of selected nodes.
:type moveData: dict
:type offset: QPointF
:rtype: dict
"""
return {
'nodes': {
node: {
'anchors': {k: v + offset for k, v in node.anchors.items()},
'pos': node.pos() + offset,
} for node in moveData['nodes']},
'edges': {x: [p + offset for p in x.breakpoints[:]] for x in moveData['edges']}
}
def edge(self, eid):
"""
Returns the edge matching the given id or None if no edge is found.
:type eid: str
:rtype: AbstractEdge
"""
return self.project.edge(self, eid)
def edges(self):
"""
Returns a collection with all the edges in the diagram.
:rtype: set
"""
return self.project.edges(self)
def isEdgeAdd(self):
"""
Returns True if an edge insertion is currently in progress, False otherwise.
:rtype: bool
"""
return self.mode is DiagramMode.EdgeAdd and self.mp_Edge is not None
def isLabelMove(self):
"""
Returns True if a label is currently being moved, False otherwise.
:rtype: bool
"""
return self.mode is DiagramMode.LabelMove and \
self.mp_Label is not None and \
self.mp_LabelPos is not None and \
self.mp_Pos is not None
def isNodeMove(self):
"""
Returns True if a node(s) is currently being moved, False otherwise.
:rtype: bool
"""
return self.mode is DiagramMode.NodeMove and \
self.mp_Data is not None and \
self.mp_Node is not None and \
self.mp_NodePos is not None and \
self.mp_Pos is not None
def isEmpty(self):
"""
Returns True if this diagram containts no element, False otherwise.
:rtype: bool
"""
return len(self.project.items(self)) == 0
def items(self, mixed=None, mode=QtCore.Qt.IntersectsItemShape, **kwargs):
"""
Returns a collection of items ordered from TOP to BOTTOM.
If no argument is supplied, an unordered list containing all the elements in the diagram is returned.
:type mixed: T <= QPointF | QRectF | QPolygonF | QPainterPath
:type mode: ItemSelectionMode
:rtype: list
"""
if mixed is None:
items = super().items()
elif isinstance(mixed, QtCore.QPointF):
x = mixed.x() - (Diagram.SelectionRadius / 2)
y = mixed.y() - (Diagram.SelectionRadius / 2)
w = Diagram.SelectionRadius
h = Diagram.SelectionRadius
items = super().items(QtCore.QRectF(x, y, w, h), mode)
else:
items = super().items(mixed, mode)
return sorted([
x for x in items
if (kwargs.get('nodes', True) and x.isNode() or
kwargs.get('edges', True) and x.isEdge() or
kwargs.get('labels', False) and x.isLabel()) and
x not in kwargs.get('skip', set())
], key=lambda i: i.zValue(), reverse=True)
def nodes(self):
"""
Returns a collection with all the nodes in the diagram.
:rtype: set
"""
return self.project.nodes(self)
def node(self, nid):
"""
Returns the node matching the given id or None if no node is found.
:type nid: str
:rtype: AbstractNode
"""
return self.project.node(self, nid)
def selectedEdges(self, filter_on_edges=lambda x: True):
"""
Returns the edges selected in the diagram.
:type filter_on_edges: callable
:rtype: list
"""
return [x for x in super().selectedItems() if x.isEdge() and filter_on_edges(x)]
def selectedItems(self, filter_on_items=lambda x: True):
"""
Returns the items selected in the diagram.
:type filter_on_items: callable
:rtype: list
"""
return [x for x in super().selectedItems() if (x.isNode() or x.isEdge()) and filter_on_items(x)]
def selectedNodes(self, filter_on_nodes=lambda x: True):
"""
Returns the nodes selected in the diagram.
:type filter_on_nodes: callable
:rtype: list
"""
return [x for x in super().selectedItems() if x.isNode() and filter_on_nodes(x)]
def setMode(self, mode, param=None):
"""
Set the operational mode.
:type mode: DiagramMode
:type param: Item
"""
if self.mode != mode or self.modeParam != param:
#LOGGER.debug('Diagram mode changed: mode=%s, param=%s', mode, param)
self.mode = mode
self.modeParam = param
self.sgnModeChanged.emit(mode)
@staticmethod
def setupMove(selected):
"""
Compute necessary data to initialize item movement, given a collection of selected nodes.
:type selected: T <= list | tuple
:rtype: dict
"""
# Initialize movement data considering only
# nodes which are involved in the selection.
moveData = {
'nodes': {
node: {
'anchors': {k: v for k, v in node.anchors.items()},
'pos': node.pos(),
} for node in selected},
'edges': {}
}
# Figure out if the nodes we are moving are sharing edges:
# if that's the case, move the edge together with the nodes
# (which actually means moving the edge breakpoints).
for node in moveData['nodes']:
for edge in node.edges:
if edge not in moveData['edges']:
if edge.other(node).isSelected():
moveData['edges'][edge] = edge.breakpoints[:]
return moveData
# noinspection PyTypeChecker
def visibleRect(self, margin=0):
"""
Returns a rectangle matching the area of visible items.
:type margin: float
:rtype: QtCore.QRectF
"""
items = self.items()
if items:
x = set()
y = set()
for item in items:
b = item.mapRectToScene(item.boundingRect())
x.update({b.left(), b.right()})
y.update({b.top(), b.bottom()})
return QtCore.QRectF(QtCore.QPointF(min(x) - margin, min(y) - margin), QtCore.QPointF(max(x) + margin, max(y) + margin))
return QtCore.QRectF()
class DiagramMalformedError(RuntimeError):
"""
Raised whenever a given diagram is detected as malformed.
This is not meant to be used as Syntax Error, but more to
detect malformation problems like operator nodes with no input, etc.
"""
def __init__(self, item, *args, **kwargs):
"""
Initialize the exception.
:type item: AbstractItem
:type args: iterable
:type kwargs: dict
"""
super().__init__(*args, **kwargs)
self.item = item
class DiagramNotFoundError(RuntimeError):
"""
Raised whenever we are not able to find a diagram given its path.
"""
pass
class DiagramNotValidError(RuntimeError):
"""
Raised whenever a diagram appear to have an invalid structure.
"""
pass
class DiagramParseError(RuntimeError):
"""
Raised whenever it's not possible parse a Diagram out of a document.
"""
pass | danielepantaleone/eddy | eddy/core/diagram.py | Python | gpl-3.0 | 32,354 |
import logging
import ssl
import sys
import threading
from typing import List
import websocket
from homematicip.base.enums import *
from homematicip.base.helpers import bytes2str
from homematicip.class_maps import *
from homematicip.connection import Connection
from homematicip.device import *
from homematicip.EventHook import *
from homematicip.group import *
from homematicip.rule import *
from homematicip.securityEvent import *
LOGGER = logging.getLogger(__name__)
class Weather(HomeMaticIPObject):
""" this class represents the weather of the home location"""
def __init__(self, connection):
super().__init__(connection)
#:float: the current temperature
self.temperature = 0.0
#:WeatherCondition: the current weather
self.weatherCondition = WeatherCondition.UNKNOWN
#:datetime: the current datime
self.weatherDayTime = WeatherDayTime.DAY
#:float: the minimum temperature of the day
self.minTemperature = 0.0
#:float: the maximum temperature of the day
self.maxTemperature = 0.0
#:float: the current humidity
self.humidity = 0
#:float: the current windspeed
self.windSpeed = 0.0
#:int: the current wind direction in 360° where 0° is north
self.windDirection = 0
#:float: the current vapor
self.vaporAmount = 0.0
def from_json(self, js):
super().from_json(js)
self.temperature = js["temperature"]
self.weatherCondition = WeatherCondition.from_str(js["weatherCondition"])
self.weatherDayTime = WeatherDayTime.from_str(js["weatherDayTime"])
self.minTemperature = js["minTemperature"]
self.maxTemperature = js["maxTemperature"]
self.humidity = js["humidity"]
self.windSpeed = js["windSpeed"]
self.windDirection = js["windDirection"]
self.vaporAmount = js["vaporAmount"]
def __str__(self):
return "temperature({}) weatherCondition({}) weatherDayTime({}) minTemperature({}) maxTemperature({}) humidity({}) vaporAmount({}) windSpeed({}) windDirection({})".format(
self.temperature,
self.weatherCondition,
self.weatherDayTime,
self.minTemperature,
self.maxTemperature,
self.humidity,
self.vaporAmount,
self.windSpeed,
self.windDirection,
)
class Location(HomeMaticIPObject):
"""This class represents the possible location"""
def __init__(self, connection):
super().__init__(connection)
#:str: the name of the city
self.city = "London"
#:float: the latitude of the location
self.latitude = 51.509865
#:float: the longitue of the location
self.longitude = -0.118092
def from_json(self, js):
super().from_json(js)
self.city = js["city"]
self.latitude = js["latitude"]
self.longitude = js["longitude"]
def __str__(self):
return "city({}) latitude({}) longitude({})".format(
self.city, self.latitude, self.longitude
)
class Client(HomeMaticIPObject):
"""A client is an app which has access to the access point.
e.g. smartphone, 3th party apps, google home, conrad connect
"""
def __init__(self, connection):
super().__init__(connection)
#:str: the unique id of the client
self.id = ""
#:str: a human understandable name of the client
self.label = ""
#:str: the home where the client belongs to
self.homeId = ""
#:str: the c2c service name
self.c2cServiceIdentifier = ""
#:ClientType: the type of this client
self.clientType = ClientType.APP
def from_json(self, js):
super().from_json(js)
self.id = js["id"]
self.label = js["label"]
self.homeId = js["homeId"]
self.clientType = ClientType.from_str(js["clientType"])
if "c2cServiceIdentifier" in js:
self.c2cServiceIdentifier = js["c2cServiceIdentifier"]
def __str__(self):
return "label({})".format(self.label)
class OAuthOTK(HomeMaticIPObject):
def __init__(self, connection):
super().__init__(connection)
self.authToken = None
self.expirationTimestamp = None
def from_json(self, js):
super().from_json(js)
self.authToken = js["authToken"]
self.expirationTimestamp = self.fromtimestamp(js["expirationTimestamp"])
class AccessPointUpdateState(HomeMaticIPObject):
def __init__(self, connection):
super().__init__(connection)
self.accessPointUpdateState = DeviceUpdateState.UP_TO_DATE
self.successfulUpdateTimestamp = None
self.updateStateChangedTimestamp = None
def from_json(self, js):
self.accessPointUpdateState = js["accessPointUpdateState"]
self.successfulUpdateTimestamp = self.fromtimestamp(
js["successfulUpdateTimestamp"]
)
self.updateStateChangedTimestamp = self.fromtimestamp(
js["updateStateChangedTimestamp"]
)
class Home(HomeMaticIPObject):
"""this class represents the 'Home' of the homematic ip"""
_typeClassMap = TYPE_CLASS_MAP
_typeGroupMap = TYPE_GROUP_MAP
_typeSecurityEventMap = TYPE_SECURITY_EVENT_MAP
_typeRuleMap = TYPE_RULE_MAP
_typeFunctionalHomeMap = TYPE_FUNCTIONALHOME_MAP
def __init__(self, connection=None):
if connection is None:
connection = Connection()
super().__init__(connection)
# List with create handlers.
self._on_create = []
self.apExchangeClientId = None
self.apExchangeState = ApExchangeState.NONE
self.availableAPVersion = None
self.carrierSense = None
#:bool:displays if the access point is connected to the hmip cloud or
# not
self.connected = None
#:str:the current version of the access point
self.currentAPVersion = None
self.deviceUpdateStrategy = DeviceUpdateStrategy.MANUALLY
self.dutyCycle = None
#:str:the SGTIN of the access point
self.id = None
self.lastReadyForUpdateTimestamp = None
#:Location:the location of the AP
self.location = None
#:bool:determines if a pin is set on this access point
self.pinAssigned = None
self.powerMeterCurrency = None
self.powerMeterUnitPrice = None
self.timeZoneId = None
self.updateState = HomeUpdateState.UP_TO_DATE
#:Weather:the current weather
self.weather = None
self.__webSocket = None
self.__webSocketThread = None
self.onEvent = EventHook()
self.onWsError = EventHook()
#:bool:switch to enable/disable automatic reconnection of the websocket (default=True)
self.websocket_reconnect_on_error = True
#:List[Device]: a collection of all devices in home
self.devices = []
#:List[Client]: a collection of all clients in home
self.clients = []
#:List[Group]: a collection of all groups in the home
self.groups = []
#:List[Rule]: a collection of all rules in the home
self.rules = []
#: a collection of all functionalHomes in the home
self.functionalHomes = []
#:Map: a map of all access points and their updateStates
self.accessPointUpdateStates = {}
def init(self, access_point_id, lookup=True):
self._connection.init(access_point_id, lookup)
def set_auth_token(self, auth_token):
self._connection.set_auth_token(auth_token)
def from_json(self, js_home):
super().from_json(js_home)
self.weather = Weather(self._connection)
self.weather.from_json(js_home["weather"])
if js_home["location"] != None:
self.location = Location(self._connection)
self.location.from_json(js_home["location"])
self.connected = js_home["connected"]
self.currentAPVersion = js_home["currentAPVersion"]
self.availableAPVersion = js_home["availableAPVersion"]
self.timeZoneId = js_home["timeZoneId"]
self.pinAssigned = js_home["pinAssigned"]
self.dutyCycle = js_home["dutyCycle"]
self.updateState = HomeUpdateState.from_str(js_home["updateState"])
self.powerMeterUnitPrice = js_home["powerMeterUnitPrice"]
self.powerMeterCurrency = js_home["powerMeterCurrency"]
self.deviceUpdateStrategy = DeviceUpdateStrategy.from_str(
js_home["deviceUpdateStrategy"]
)
self.lastReadyForUpdateTimestamp = js_home["lastReadyForUpdateTimestamp"]
self.apExchangeClientId = js_home["apExchangeClientId"]
self.apExchangeState = ApExchangeState.from_str(js_home["apExchangeState"])
self.id = js_home["id"]
self.carrierSense = js_home["carrierSense"]
for ap, state in js_home["accessPointUpdateStates"].items():
ap_state = AccessPointUpdateState(self._connection)
ap_state.from_json(state)
self.accessPointUpdateStates[ap] = ap_state
self._get_rules(js_home)
def on_create(self, handler):
"""Adds an event handler to the create method. Fires when a device
is created."""
self._on_create.append(handler)
def fire_create_event(self, *args, **kwargs):
"""Trigger the method tied to _on_create"""
for _handler in self._on_create:
_handler(*args, **kwargs)
def remove_callback(self, handler):
"""Remove event handler."""
super().remove_callback(handler)
if handler in self._on_create:
self._on_create.remove(handler)
def download_configuration(self) -> str:
"""downloads the current configuration from the cloud
Returns
the downloaded configuration or an errorCode
"""
return self._restCall(
"home/getCurrentState", json.dumps(self._connection.clientCharacteristics)
)
def get_current_state(self, clearConfig: bool = False):
"""downloads the current configuration and parses it into self
Args:
clearConfig(bool): if set to true, this function will remove all old objects
from self.devices, self.client, ... to have a fresh config instead of reparsing them
"""
json_state = self.download_configuration()
return self.update_home(json_state, clearConfig)
def update_home(self, json_state, clearConfig: bool = False):
"""parse a given json configuration into self.
This will update the whole home including devices, clients and groups.
Args:
clearConfig(bool): if set to true, this function will remove all old objects
from self.devices, self.client, ... to have a fresh config instead of reparsing them
"""
if "errorCode" in json_state:
LOGGER.error(
"Could not get the current configuration. Error: %s",
json_state["errorCode"],
)
return False
if clearConfig:
self.devices = []
self.clients = []
self.groups = []
self._get_devices(json_state)
self._get_clients(json_state)
self._get_groups(json_state)
self._load_functionalChannels()
js_home = json_state["home"]
return self.update_home_only(js_home, clearConfig)
def update_home_only(self, js_home, clearConfig: bool = False):
"""parse a given home json configuration into self.
This will update only the home without updating devices, clients and groups.
Args:
clearConfig(bool): if set to true, this function will remove all old objects
from self.devices, self.client, ... to have a fresh config instead of reparsing them
"""
if "errorCode" in js_home:
LOGGER.error(
"Could not get the current configuration. Error: %s",
js_home["errorCode"],
)
return False
if clearConfig:
self.rules = []
self.functionalHomes = []
self.from_json(js_home)
self._get_functionalHomes(js_home)
return True
def _get_devices(self, json_state):
self.devices = [x for x in self.devices if x.id in json_state["devices"].keys()]
for id_, raw in json_state["devices"].items():
_device = self.search_device_by_id(id_)
if _device:
_device.from_json(raw)
else:
self.devices.append(self._parse_device(raw))
def _parse_device(self, json_state):
try:
deviceType = DeviceType.from_str(json_state["type"])
d = self._typeClassMap[deviceType](self._connection)
d.from_json(json_state)
return d
except:
d = self._typeClassMap[DeviceType.DEVICE](self._connection)
d.from_json(json_state)
LOGGER.warning("There is no class for device '%s' yet", json_state["type"])
return d
def _get_rules(self, json_state):
self.rules = [
x for x in self.rules if x.id in json_state["ruleMetaDatas"].keys()
]
for id_, raw in json_state["ruleMetaDatas"].items():
_rule = self.search_rule_by_id(id_)
if _rule:
_rule.from_json(raw)
else:
self.rules.append(self._parse_rule(raw))
def _parse_rule(self, json_state):
try:
ruleType = AutomationRuleType.from_str(json_state["type"])
r = self._typeRuleMap[ruleType](self._connection)
r.from_json(json_state)
return r
except:
r = Rule(self._connection)
r.from_json(json_state)
LOGGER.warning("There is no class for rule '%s' yet", json_state["type"])
return r
def _get_clients(self, json_state):
self.clients = [x for x in self.clients if x.id in json_state["clients"].keys()]
for id_, raw in json_state["clients"].items():
_client = self.search_client_by_id(id_)
if _client:
_client.from_json(raw)
else:
c = Client(self._connection)
c.from_json(raw)
self.clients.append(c)
def _parse_group(self, json_state):
g = None
if json_state["type"] == "META":
g = MetaGroup(self._connection)
g.from_json(json_state, self.devices, self.groups)
else:
try:
groupType = GroupType.from_str(json_state["type"])
g = self._typeGroupMap[groupType](self._connection)
g.from_json(json_state, self.devices)
except:
g = self._typeGroupMap[GroupType.GROUP](self._connection)
g.from_json(json_state, self.devices)
LOGGER.warning(
"There is no class for group '%s' yet", json_state["type"]
)
return g
def _get_groups(self, json_state):
self.groups = [x for x in self.groups if x.id in json_state["groups"].keys()]
metaGroups = []
for id_, raw in json_state["groups"].items():
_group = self.search_group_by_id(id_)
if _group:
if isinstance(_group, MetaGroup):
_group.from_json(raw, self.devices, self.groups)
else:
_group.from_json(raw, self.devices)
else:
group_type = raw["type"]
if group_type == "META":
metaGroups.append(raw)
else:
self.groups.append(self._parse_group(raw))
for mg in metaGroups:
self.groups.append(self._parse_group(mg))
def _get_functionalHomes(self, json_state):
for solution, functionalHome in json_state["functionalHomes"].items():
try:
solutionType = FunctionalHomeType.from_str(solution)
h = None
for fh in self.functionalHomes:
if fh.solution == solution:
h = fh
break
if h is None:
h = self._typeFunctionalHomeMap[solutionType](self._connection)
self.functionalHomes.append(h)
h.from_json(functionalHome, self.groups)
except:
h = FunctionalHome(self._connection)
h.from_json(functionalHome, self.groups)
LOGGER.warning(
"There is no class for functionalHome '%s' yet", solution
)
self.functionalHomes.append(h)
def _load_functionalChannels(self):
for d in self.devices:
d.load_functionalChannels(self.groups)
def get_functionalHome(self, functionalHomeType: type) -> FunctionalHome:
""" gets the specified functionalHome
Args:
functionalHome(type): the type of the functionalHome which should be returned
Returns:
the FunctionalHome or None if it couldn't be found
"""
for x in self.functionalHomes:
if isinstance(x, functionalHomeType):
return x
return None
def search_device_by_id(self, deviceID) -> Device:
""" searches a device by given id
Args:
deviceID(str): the device to search for
Returns
the Device object or None if it couldn't find a device
"""
for d in self.devices:
if d.id == deviceID:
return d
return None
def search_group_by_id(self, groupID) -> Group:
""" searches a group by given id
Args:
groupID(str): groupID the group to search for
Returns
the group object or None if it couldn't find a group
"""
for g in self.groups:
if g.id == groupID:
return g
return None
def search_client_by_id(self, clientID) -> Client:
""" searches a client by given id
Args:
clientID(str): the client to search for
Returns
the client object or None if it couldn't find a client
"""
for c in self.clients:
if c.id == clientID:
return c
return None
def search_rule_by_id(self, ruleID) -> Rule:
""" searches a rule by given id
Args:
ruleID(str): the rule to search for
Returns
the rule object or None if it couldn't find a rule
"""
for r in self.rules:
if r.id == ruleID:
return r
return None
def get_security_zones_activation(self) -> (bool, bool):
""" returns the value of the security zones if they are armed or not
Returns
internal
True if the internal zone is armed
external
True if the external zone is armed
"""
internal_active = False
external_active = False
for g in self.groups:
if isinstance(g, SecurityZoneGroup):
if g.label == "EXTERNAL":
external_active = g.active
elif g.label == "INTERNAL":
internal_active = g.active
return internal_active, external_active
def set_security_zones_activation(self, internal=True, external=True):
""" this function will set the alarm system to armed or disable it
Args:
internal(bool): activates/deactivates the internal zone
external(bool): activates/deactivates the external zone
Examples:
arming while being at home
>>> home.set_security_zones_activation(False,True)
arming without being at home
>>> home.set_security_zones_activation(True,True)
disarming the alarm system
>>> home.set_security_zones_activation(False,False)
"""
data = {"zonesActivation": {"EXTERNAL": external, "INTERNAL": internal}}
return self._restCall("home/security/setZonesActivation", json.dumps(data))
def set_location(self, city, latitude, longitude):
data = {"city": city, "latitude": latitude, "longitude": longitude}
return self._restCall("home/setLocation", json.dumps(data))
def set_intrusion_alert_through_smoke_detectors(self, activate: bool = True):
""" activate or deactivate if smoke detectors should "ring" during an alarm
Args:
activate(bool): True will let the smoke detectors "ring" during an alarm
"""
data = {"intrusionAlertThroughSmokeDetectors": activate}
return self._restCall(
"home/security/setIntrusionAlertThroughSmokeDetectors", json.dumps(data)
)
def activate_absence_with_period(self, endtime: datetime):
""" activates the absence mode until the given time
Args:
endtime(datetime): the time when the absence should automatically be disabled
"""
data = {"endTime": endtime.strftime("%Y_%m_%d %H:%M")}
return self._restCall(
"home/heating/activateAbsenceWithPeriod", json.dumps(data)
)
def activate_absence_permanent(self):
""" activates the absence forever
"""
return self._restCall("home/heating/activateAbsencePermanent")
def activate_absence_with_duration(self, duration: int):
""" activates the absence mode for a given time
Args:
duration(int): the absence duration in minutes
"""
data = {"duration": duration}
return self._restCall(
"home/heating/activateAbsenceWithDuration", json.dumps(data)
)
def deactivate_absence(self):
""" deactivates the absence mode immediately"""
return self._restCall("home/heating/deactivateAbsence")
def activate_vacation(self, endtime: datetime, temperature: float):
""" activates the vatation mode until the given time
Args:
endtime(datetime): the time when the vatation mode should automatically be disabled
temperature(float): the settemperature during the vacation mode
"""
data = {
"endTime": endtime.strftime("%Y_%m_%d %H:%M"),
"temperature": temperature,
}
return self._restCall("home/heating/activateVacation", json.dumps(data))
def deactivate_vacation(self):
""" deactivates the vacation mode immediately"""
return self._restCall("home/heating/deactivateVacation")
def set_pin(self, newPin: str, oldPin: str = None) -> dict:
""" sets a new pin for the home
Args:
newPin(str): the new pin
oldPin(str): optional, if there is currently a pin active it must be given here.
Otherwise it will not be possible to set the new pin
Returns:
the result of the call
"""
if newPin is None:
newPin = ""
data = {"pin": newPin}
if oldPin:
self._connection.headers["PIN"] = str(oldPin)
result = self._restCall("home/setPin", body=json.dumps(data))
if oldPin:
del self._connection.headers["PIN"]
return result
def set_zone_activation_delay(self, delay):
data = {"zoneActivationDelay": delay}
return self._restCall(
"home/security/setZoneActivationDelay", body=json.dumps(data)
)
def get_security_journal(self):
journal = self._restCall("home/security/getSecurityJournal")
if "errorCode" in journal:
LOGGER.error(
"Could not get the security journal. Error: %s", journal["errorCode"]
)
return None
ret = []
for entry in journal["entries"]:
try:
eventType = SecurityEventType(entry["eventType"])
if eventType in self._typeSecurityEventMap:
j = self._typeSecurityEventMap[eventType](self._connection)
except:
j = SecurityEvent(self._connection)
LOGGER.warning("There is no class for %s yet", entry["eventType"])
j.from_json(entry)
ret.append(j)
return ret
def delete_group(self, group: Group):
"""deletes the given group from the cloud
Args:
group(Group):the group to delete
"""
return group.delete()
def get_OAuth_OTK(self):
token = OAuthOTK(self._connection)
token.from_json(self._restCall("home/getOAuthOTK"))
return token
def set_timezone(self, timezone: str):
""" sets the timezone for the AP. e.g. "Europe/Berlin"
Args:
timezone(str): the new timezone
"""
data = {"timezoneId": timezone}
return self._restCall("home/setTimezone", body=json.dumps(data))
def set_powermeter_unit_price(self, price):
data = {"powerMeterUnitPrice": price}
return self._restCall("home/setPowerMeterUnitPrice", body=json.dumps(data))
def set_zones_device_assignment(self, internal_devices, external_devices) -> dict:
""" sets the devices for the security zones
Args:
internal_devices(List[Device]): the devices which should be used for the internal zone
external_devices(List[Device]): the devices which should be used for the external(hull) zone
Returns:
the result of _restCall
"""
internal = [x.id for x in internal_devices]
external = [x.id for x in external_devices]
data = {"zonesDeviceAssignment": {"INTERNAL": internal, "EXTERNAL": external}}
return self._restCall(
"home/security/setZonesDeviceAssignment", body=json.dumps(data)
)
def start_inclusion(self, deviceId):
""" start inclusion mode for specific device
Args:
deviceId: sgtin of device
"""
data = {"deviceId": deviceId}
return self._restCall("home/startInclusionModeForDevice", body=json.dumps(data))
def enable_events(self):
websocket.enableTrace(True)
self.__webSocket = websocket.WebSocketApp(
self._connection.urlWebSocket,
header=[
"AUTHTOKEN: {}".format(self._connection.auth_token),
"CLIENTAUTH: {}".format(self._connection.clientauth_token),
],
on_message=self._ws_on_message,
on_error=self._ws_on_error,
on_close=self._ws_on_close,
)
websocket_kwargs = {"ping_interval": 3}
if hasattr(sys, "_called_from_test"): # disable ssl during a test run
sslopt = {"cert_reqs": ssl.CERT_NONE}
websocket_kwargs = {"sslopt": sslopt, "ping_interval": 2, "ping_timeout": 1}
self.__webSocketThread = threading.Thread(
name="hmip-websocket",
target=self.__webSocket.run_forever,
kwargs=websocket_kwargs,
)
self.__webSocketThread.setDaemon(True)
self.__webSocketThread.start()
def disable_events(self):
if self.__webSocket:
self.__webSocket.close()
self.__webSocket = None
def _ws_on_close(self):
self.__webSocket = None
def _ws_on_error(self, err):
LOGGER.exception(err)
self.onWsError.fire(err)
if self.websocket_reconnect_on_error:
logger.debug("Trying to reconnect websocket")
self.disable_events()
self.enable_events()
def _ws_on_message(self, message):
# json.loads doesn't support bytes as parameter before python 3.6
js = json.loads(bytes2str(message))
# LOGGER.debug(js)
eventList = []
for event in js["events"].values():
try:
pushEventType = EventType(event["pushEventType"])
LOGGER.debug(pushEventType)
obj = None
if pushEventType == EventType.GROUP_CHANGED:
data = event["group"]
obj = self.search_group_by_id(data["id"])
if obj is None:
obj = self._parse_group(data)
self.groups.append(obj)
pushEventType = EventType.GROUP_ADDED
self.fire_create_event(obj, event_type=pushEventType, obj=obj)
if type(obj) is MetaGroup:
obj.from_json(data, self.devices, self.groups)
else:
obj.from_json(data, self.devices)
obj.fire_update_event(data, event_type=pushEventType, obj=obj)
elif pushEventType == EventType.HOME_CHANGED:
data = event["home"]
obj = self
obj.update_home_only(data)
obj.fire_update_event(data, event_type=pushEventType, obj=obj)
elif pushEventType == EventType.CLIENT_ADDED:
data = event["client"]
obj = Client(self._connection)
obj.from_json(data)
self.clients.append(obj)
elif pushEventType == EventType.CLIENT_CHANGED:
data = event["client"]
obj = self.search_client_by_id(data["id"])
obj.from_json(data)
elif pushEventType == EventType.CLIENT_REMOVED:
obj = self.search_client_by_id(event["id"])
self.clients.remove(obj)
elif pushEventType == EventType.DEVICE_ADDED:
data = event["device"]
obj = self._parse_device(data)
obj.load_functionalChannels(self.groups)
self.devices.append(obj)
self.fire_create_event(data, event_type=pushEventType, obj=obj)
elif pushEventType == EventType.DEVICE_CHANGED:
data = event["device"]
obj = self.search_device_by_id(data["id"])
if obj is None: # no DEVICE_ADDED Event?
obj = self._parse_device(data)
self.devices.append(obj)
pushEventType = EventType.DEVICE_ADDED
self.fire_create_event(data, event_type=pushEventType, obj=obj)
else:
obj.from_json(data)
obj.load_functionalChannels(self.groups)
obj.fire_update_event(data, event_type=pushEventType, obj=obj)
elif pushEventType == EventType.DEVICE_REMOVED:
obj = self.search_device_by_id(event["id"])
obj.fire_remove_event(obj, event_type=pushEventType, obj=obj)
self.devices.remove(obj)
elif pushEventType == EventType.GROUP_REMOVED:
obj = self.search_group_by_id(event["id"])
obj.fire_remove_event(obj, event_type=pushEventType, obj=obj)
self.groups.remove(obj)
elif pushEventType == EventType.GROUP_ADDED:
group = event["group"]
obj = self._parse_group(group)
self.groups.append(obj)
self.fire_create_event(obj, event_type=pushEventType, obj=obj)
elif pushEventType == EventType.SECURITY_JOURNAL_CHANGED:
pass # data is just none so nothing to do here
# TODO: implement INCLUSION_REQUESTED, NONE
eventList.append({"eventType": pushEventType, "data": obj})
except ValueError as valerr: # pragma: no cover
LOGGER.warning(
"Uknown EventType '%s' Data: %s", event["pushEventType"], event
)
except Exception as err: # pragma: no cover
LOGGER.exception(err)
self.onEvent.fire(eventList)
| marcsowen/homematicip-rest-api | homematicip/home.py | Python | gpl-3.0 | 33,437 |
"""
Copyright (c) 2015 Red Hat, Inc
All rights reserved.
This software may be modified and distributed under the terms
of the BSD license. See the LICENSE file for details.
"""
from __future__ import unicode_literals
import os
try:
import koji as koji
except ImportError:
import inspect
import sys
# Find our mocked koji module
import tests.koji as koji
mock_koji_path = os.path.dirname(inspect.getfile(koji.ClientSession))
if mock_koji_path not in sys.path:
sys.path.append(os.path.dirname(mock_koji_path))
# Now load it properly, the same way the plugin will
del koji
import koji as koji
from atomic_reactor.plugins.pre_bump_release import BumpReleasePlugin
from atomic_reactor.util import df_parser
from flexmock import flexmock
import pytest
class TestBumpRelease(object):
def prepare(self,
tmpdir,
labels=None,
include_target=True,
certs=False,
append=False):
if labels is None:
labels = {}
workflow = flexmock()
setattr(workflow, 'builder', flexmock())
filename = os.path.join(str(tmpdir), 'Dockerfile')
with open(filename, 'wt') as df:
df.write('FROM base\n')
for key, value in labels.items():
df.write('LABEL {key}={value}\n'.format(key=key, value=value))
setattr(workflow.builder, 'df_path', filename)
kwargs = {
'tasker': None,
'workflow': workflow,
'hub': ''
}
if include_target:
kwargs['target'] = 'foo'
if append:
kwargs['append'] = True
if certs:
with open('{}/ca'.format(tmpdir), 'w') as ca_fd:
ca_fd.write('ca')
with open('{}/cert'.format(tmpdir), 'w') as cert_fd:
cert_fd.write('cert')
with open('{}/serverca'.format(tmpdir), 'w') as serverca_fd:
serverca_fd.write('serverca')
kwargs['koji_ssl_certs_dir'] = str(tmpdir)
plugin = BumpReleasePlugin(**kwargs)
return plugin
def test_component_missing(self, tmpdir):
flexmock(koji, ClientSession=lambda hub, opts=None: None)
plugin = self.prepare(tmpdir)
with pytest.raises(RuntimeError):
plugin.run()
@pytest.mark.parametrize('release_label', [
'release',
'Release',
])
def test_release_label_already_set(self, tmpdir, caplog, release_label):
flexmock(koji, ClientSession=lambda hub, opts=None: None)
plugin = self.prepare(tmpdir, labels={release_label: '1'})
plugin.run()
assert 'not incrementing' in caplog.text()
@pytest.mark.parametrize('labels', [
{'com.redhat.component': 'component'},
{'BZComponent': 'component'},
{'version': 'version'},
{'Version': 'version'},
{},
])
def test_missing_labels(self, tmpdir, caplog, labels):
flexmock(koji, ClientSession=lambda hub, opts=None: None)
plugin = self.prepare(tmpdir, labels=labels)
with pytest.raises(RuntimeError) as exc:
plugin.run()
assert 'missing label' in str(exc)
@pytest.mark.parametrize('component', [
{'com.redhat.component': 'component1'},
{'BZComponent': 'component2'},
])
@pytest.mark.parametrize('version', [
{'version': '7.1'},
{'Version': '7.2'},
])
@pytest.mark.parametrize('include_target', [
True,
False
])
@pytest.mark.parametrize('next_release', [
{'actual': '1', 'builds': [], 'expected': '1'},
{'actual': '1', 'builds': ['1'], 'expected': '2'},
{'actual': '1', 'builds': ['1', '2'], 'expected': '3'},
{'actual': '20', 'builds': ['19.1'], 'expected': '20'},
{'actual': '20', 'builds': ['20', '20.1'], 'expected': '21'},
{'actual': '20.1', 'builds': ['19.1'], 'expected': '20'},
{'actual': '20.1', 'builds': ['19.1', '20'], 'expected': '21'},
{'actual': '20.1', 'builds': ['20'], 'expected': '21'},
{'actual': '20.1', 'builds': ['20', '20.1'], 'expected': '21'},
{'actual': '20.2', 'builds': ['20', '20.1'], 'expected': '21'},
{'actual': '20.2', 'builds': ['20', '20.1', '20.2'], 'expected': '21'},
{'actual': '20.fc25', 'builds': ['20.fc24'], 'expected': '20.fc25'},
{'actual': '20.fc25', 'builds': ['20.fc25'], 'expected': '21.fc25'},
{'actual': '20.foo.fc25',
'builds': ['20.foo.fc25'],
'expected': '21.foo.fc25'},
{'actual': '20.1.fc25',
'builds': ['20.fc25', '20.1.fc25'],
'expected': '21.fc25'},
{'actual': '20.1.fc25',
'builds': ['20.fc25', '20.1.fc25', '21.fc25'],
'expected': '22.fc25'},
])
def test_increment(self, tmpdir, component, version, next_release,
include_target):
class MockedClientSession(object):
def __init__(self, hub, opts=None):
pass
def getNextRelease(self, build_info):
assert build_info['name'] == list(component.values())[0]
assert build_info['version'] == list(version.values())[0]
return next_release['actual']
def getBuild(self, build_info):
assert build_info['name'] == list(component.values())[0]
assert build_info['version'] == list(version.values())[0]
if build_info['release'] in next_release['builds']:
return True
return None
def ssl_login(self, cert, ca, serverca, proxyuser=None):
self.ca_path = ca
self.cert_path = cert
self.serverca_path = serverca
return True
session = MockedClientSession('')
flexmock(koji, ClientSession=session)
labels = {}
labels.update(component)
labels.update(version)
plugin = self.prepare(tmpdir, labels=labels,
include_target=include_target,
certs=True)
plugin.run()
for file_path, expected in [(session.ca_path, 'ca'),
(session.cert_path, 'cert'),
(session.serverca_path, 'serverca')]:
assert os.path.isfile(file_path)
with open(file_path, 'r') as fd:
assert fd.read() == expected
parser = df_parser(plugin.workflow.builder.df_path, workflow=plugin.workflow)
assert parser.labels['release'] == next_release['expected']
# Old-style spellings will be asserted only if other old-style labels are present
if 'BZComponent' not in parser.labels.keys():
assert 'Release' not in parser.labels
else:
assert parser.labels['Release'] == next_release['expected']
@pytest.mark.parametrize('base_release,builds,expected', [
('42', [], '42.1'),
('42', ['42.1', '42.2'], '42.3'),
# No interpretation of the base release when appending - just treated as string
('42.1', ['42.2'], '42.1.1'),
('42.1', ['42.1.1'], '42.1.2'),
(None, [], '1.1'),
(None, ['1.1'], '1.2'),
(None, ['1.1', '1.2'], '1.3'),
])
def test_append(self, tmpdir, base_release, builds, expected):
class MockedClientSession(object):
def __init__(self, hub, opts=None):
pass
def getBuild(self, build_info):
if build_info['release'] in builds:
return True
return None
session = MockedClientSession('')
flexmock(koji, ClientSession=session)
labels = {
'com.redhat.component': 'component1',
'version': 'fc26',
}
if base_release:
labels['release'] = base_release
plugin = self.prepare(tmpdir, labels=labels,
append=True)
plugin.run()
parser = df_parser(plugin.workflow.builder.df_path, workflow=plugin.workflow)
assert parser.labels['release'] == expected
| maxamillion/atomic-reactor | tests/plugins/test_bump_release.py | Python | bsd-3-clause | 8,255 |
#!/usr/bin/env python
#ffmpeg -i vtest.avi -c:a aac -b:a 128k -c:v libx264 -crf 23 output.mp4
import numpy as np
import cv2
cap = cv2.VideoCapture('vtest.mp4')
# cap = cv2.VideoCapture(0)
def MOG(cap):
kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(3,3))
print('kernel', kernel)
# fgbg = cv2.createBackgroundSubtractorMOG() # seems to be missing
fgbg = cv2.createBackgroundSubtractorMOG2()
# fgbg.setDetectShadows(False)
frame_save = 0
while True:
ret, frame = cap.read()
if not ret:
break
# find the change
fgmask = fgbg.apply(frame)
# clean up the image
fgmask = cv2.morphologyEx(fgmask, cv2.MORPH_OPEN, kernel)
frame_save += 1
if 100 < frame_save <105:
cv2.imwrite('frame{}.png'.format(frame_save), frame)
cv2.imshow('frame',fgmask)
k = cv2.waitKey(10)
if k == ord('q'):
break
cap.release()
cv2.destroyAllWindows()
def Subtract(cap):
ret = False
while not ret:
ret, last = cap.read()
last = cv2.cvtColor(last, cv2.COLOR_BGR2GRAY)
while True:
ret, frame = cap.read()
if not ret:
break
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
# smallest = np.amin(frame)
# biggest = np.amax(frame)
# print('max/min: {} {}'.format(smallest, biggest))
change = frame - last
last = frame
# change = cv2.adaptiveThreshold(
# change,
# 255,
# cv2.ADAPTIVE_THRESH_GAUSSIAN_C,
# cv2.THRESH_BINARY,
# 11,2)
# ret,change = cv2.threshold(change,200,255,cv2.THRESH_BINARY)
cv2.imshow('frame',change)
k = cv2.waitKey(30) & 0xff
if k == 27:
break
cap.release()
cv2.destroyAllWindows()
# Subtract(cap)
MOG(cap)
| MarsUniversity/ece387 | website/block_3_vision/lsn21/bg.py | Python | mit | 1,894 |
#!/usr/bin/env python
# ESP32 efuse get/set utility
# https://github.com/themadinventor/esptool
#
# Copyright (C) 2016 Espressif Systems (Shanghai) PTE LTD
#
# This program is free software; you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by the Free Software
# Foundation; either version 2 of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc., 51 Franklin
# Street, Fifth Floor, Boston, MA 02110-1301 USA.
from __future__ import division, print_function
import argparse
import esptool
import io
import json
import os
import struct
import sys
import time
# Table of efuse values - (category, block, word in block, mask, write disable bit, read disable bit, type, description)
# Match values in efuse_reg.h & Efuse technical reference chapter
EFUSES = [
('WR_DIS', "efuse", 0, 0, 0x0000FFFF, 1, None, "int", "Efuse write disable mask"),
('RD_DIS', "efuse", 0, 0, 0x000F0000, 0, None, "int", "Efuse read disablemask"),
('FLASH_CRYPT_CNT', "security", 0, 0, 0x07F00000, 2, None, "bitcount", "Flash encryption mode counter"),
('MAC', "identity", 0, 1, 0xFFFFFFFF, 3, None, "mac", "Factory MAC Address"),
('XPD_SDIO_FORCE', "config", 0, 4, 1 << 16, 5, None, "flag", "Ignore MTDI pin (GPIO12) for VDD_SDIO on reset"),
('XPD_SDIO_REG', "config", 0, 4, 1 << 14, 5, None, "flag", "If XPD_SDIO_FORCE, enable VDD_SDIO reg on reset"),
('XPD_SDIO_TIEH', "config", 0, 4, 1 << 15, 5, None, "flag", "If XPD_SDIO_FORCE & XPD_SDIO_REG, 1=3.3V 0=1.8V"),
('CLK8M_FREQ', "config", 0, 4, 0xFF, None, None, "int", "8MHz clock freq override"),
('SPI_PAD_CONFIG_CLK', "config", 0, 5, 0x1F << 0, 6, None, "spipin", "Override SD_CLK pad (GPIO6/SPICLK)"),
('SPI_PAD_CONFIG_Q', "config", 0, 5, 0x1F << 5, 6, None, "spipin", "Override SD_DATA_0 pad (GPIO7/SPIQ)"),
('SPI_PAD_CONFIG_D', "config", 0, 5, 0x1F << 10, 6, None, "spipin", "Override SD_DATA_1 pad (GPIO8/SPID)"),
('SPI_PAD_CONFIG_HD', "config", 0, 3, 0x1F << 4, 6, None, "spipin", "Override SD_DATA_2 pad (GPIO9/SPIHD)"),
('SPI_PAD_CONFIG_CS0', "config", 0, 5, 0x1F << 15, 6, None, "spipin", "Override SD_CMD pad (GPIO11/SPICS0)"),
('FLASH_CRYPT_CONFIG', "security", 0, 5, 0x0F << 28, 10, 3, "int", "Flash encryption config (key tweak bits)"),
('CHIP_VER_REV1', "identity", 0, 3, 1 << 15, 3, None, "flag", "Silicon Revision 1"),
('CHIP_VER_REV2', "identity", 0, 5, 1 << 20, 6, None, "flag", "Silicon Revision 2"),
('BLK3_PART_RESERVE', "calibration", 0, 3, 1 << 14, 10, 3, "flag", "BLOCK3 partially served for ADC calibration data"),
('CHIP_VERSION', "identity", 0, 3, 0x03 << 12, 3, None, "int", "Reserved for future chip versions"),
('CHIP_PACKAGE', "identity", 0, 3, 0x07 << 9, 3, None, "int", "Chip package identifier"),
('CODING_SCHEME', "efuse", 0, 6, 0x3, 10, 3, "int", "Efuse variable block length scheme"),
('CONSOLE_DEBUG_DISABLE',"security", 0, 6, 1 << 2, 15, None, "flag", "Disable ROM BASIC interpreter fallback"),
('DISABLE_SDIO_HOST', "config", 0, 6, 1 << 3, None, None, "flag", "Disable SDIO host"),
('ABS_DONE_0', "security", 0, 6, 1 << 4, 12, None, "flag", "secure boot enabled for bootloader"),
('ABS_DONE_1', "security", 0, 6, 1 << 5, 13, None, "flag", "secure boot abstract 1 locked"),
('JTAG_DISABLE', "security", 0, 6, 1 << 6, 14, None, "flag", "Disable JTAG"),
('DISABLE_DL_ENCRYPT', "security", 0, 6, 1 << 7, 15, None, "flag", "Disable flash encryption in UART bootloader"),
('DISABLE_DL_DECRYPT', "security", 0, 6, 1 << 8, 15, None, "flag", "Disable flash decryption in UART bootloader"),
('DISABLE_DL_CACHE', "security", 0, 6, 1 << 9, 15, None, "flag", "Disable flash cache in UART bootloader"),
('KEY_STATUS', "efuse", 0, 6, 1 << 10, 10, 3, "flag", "Usage of efuse block 3 (reserved)"),
('ADC_VREF', "calibration", 0, 4,0x1F << 8,0, None, "vref", "Voltage reference calibration"),
('BLK1', "security", 1, 0, 0xFFFFFFFF, 7, 0, "keyblock", "Flash encryption key"),
('BLK2', "security", 2, 0, 0xFFFFFFFF, 8, 1, "keyblock", "Secure boot key"),
('BLK3', "security", 3, 0, 0xFFFFFFFF, 9, 2, "keyblock", "Variable Block 3"),
]
# if BLK3_PART_RESERVE is set, these efuse fields are in BLK3:
BLK3_PART_EFUSES = [
('ADC1_TP_LOW', "calibration", 3, 3, 0x7F << 0, 9, 2, "adc_tp", "ADC1 150mV reading"),
('ADC1_TP_HIGH', "calibration", 3, 3, 0x1FF << 7, 9, 2, "adc_tp", "ADC1 850mV reading"),
('ADC2_TP_LOW', "calibration", 3, 3, 0x7F << 16, 9, 2, "adc_tp", "ADC2 150mV reading"),
('ADC2_TP_HIGH', "calibration", 3, 3, 0x1FF << 23, 9, 2, "adc_tp", "ADC2 850mV reading"),
]
# Offsets and lengths of each of the 4 efuse blocks in register space
#
# These offsets/lens are for esptool.read_efuse(X) which takes
# a word offset (into registers) not a byte offset.
EFUSE_BLOCK_OFFS = [0, 14, 22, 30]
EFUSE_BLOCK_LEN = [7, 8, 8, 8]
# EFUSE registers & command/conf values
EFUSE_REG_CONF = 0x3FF5A0FC
EFUSE_CONF_WRITE = 0x5A5A
EFUSE_CONF_READ = 0x5AA5
EFUSE_REG_CMD = 0x3FF5A104
EFUSE_CMD_WRITE = 0x2
EFUSE_CMD_READ = 0x1
# address of first word of write registers for each efuse
EFUSE_REG_WRITE = [0x3FF5A01C, 0x3FF5A098, 0x3FF5A0B8, 0x3FF5A0D8]
# 3/4 Coding scheme warnings registers
EFUSE_REG_DEC_STATUS = 0x3FF5A11C
EFUSE_REG_DEC_STATUS_MASK = 0xFFF
# Efuse clock control
EFUSE_DAC_CONF_REG = 0x3FF5A118
EFUSE_CLK_REG = 0x3FF5A0F8
EFUSE_DAC_CLK_DIV_MASK = 0xFF
EFUSE_CLK_SEL0_MASK = 0x00FF
EFUSE_CLK_SEL1_MASK = 0xFF00
EFUSE_CLK_SETTINGS = {
# APB freq: clk_sel0, clk_sel1, dac_clk_div
# Taken from TRM chapter "eFuse Controller": Timing Configuration
26: (250, 255, 52),
40: (160, 255, 80),
80: (80, 128, 100), # this is here for completeness only as esptool never sets an 80MHz APB clock
}
EFUSE_BURN_TIMEOUT = 0.250 # seconds
# Coding Scheme values
CODING_SCHEME_NONE = 0
CODING_SCHEME_34 = 1
def confirm(action, args):
print("%s%sThis is an irreversible operation." % (action, "" if action.endswith("\n") else ". "))
if not args.do_not_confirm:
print("Type 'BURN' (all capitals) to continue.")
sys.stdout.flush() # required for Pythons which disable line buffering, ie mingw in mintty
try:
yes = raw_input() # raw_input renamed to input in Python 3
except NameError:
yes = input()
if yes != "BURN":
print("Aborting.")
sys.exit(0)
def efuse_write_reg_addr(block, word):
"""
Return the physical address of the efuse write data register
block X word X.
"""
return EFUSE_REG_WRITE[block] + (4 * word)
class EspEfuses(object):
"""
Wrapper object to manage the efuse fields in a connected ESP bootloader
"""
def __init__(self, esp):
self._esp = esp
self._efuses = [EfuseField.from_tuple(self, efuse) for efuse in EFUSES]
if self["BLK3_PART_RESERVE"].get():
# add these BLK3 efuses, if the BLK3_PART_RESERVE flag is set...
self._efuses += [EfuseField.from_tuple(self, efuse) for efuse in BLK3_PART_EFUSES]
self.coding_scheme = self["CODING_SCHEME"].get()
def __getitem__(self, efuse_name):
""" Return the efuse field with the given name """
for e in self._efuses:
if efuse_name == e.register_name:
return e
raise KeyError
def __iter__(self):
return self._efuses.__iter__()
def write_efuses(self):
""" Write the values in the efuse write registers to
the efuse hardware, then refresh the efuse read registers.
"""
# Configure clock
apb_freq = self._esp.get_crystal_freq()
clk_sel0, clk_sel1, dac_clk_div = EFUSE_CLK_SETTINGS[apb_freq]
self.update_reg(EFUSE_DAC_CONF_REG, EFUSE_DAC_CLK_DIV_MASK, dac_clk_div)
self.update_reg(EFUSE_CLK_REG, EFUSE_CLK_SEL0_MASK, clk_sel0)
self.update_reg(EFUSE_CLK_REG, EFUSE_CLK_SEL1_MASK, clk_sel1)
self.write_reg(EFUSE_REG_CONF, EFUSE_CONF_WRITE)
self.write_reg(EFUSE_REG_CMD, EFUSE_CMD_WRITE)
def wait_idle():
deadline = time.time() + EFUSE_BURN_TIMEOUT
while time.time() < deadline:
if self._esp.read_reg(EFUSE_REG_CMD) == 0:
return
raise esptool.FatalError("Timed out waiting for Efuse controller command to complete")
wait_idle()
self.write_reg(EFUSE_REG_CONF, EFUSE_CONF_READ)
self.write_reg(EFUSE_REG_CMD, EFUSE_CMD_READ)
wait_idle()
def read_efuse(self, addr):
return self._esp.read_efuse(addr)
def read_reg(self, addr):
return self._esp.read_reg(addr)
def write_reg(self, addr, value):
return self._esp.write_reg(addr, value)
def update_reg(self, addr, mask, new_val):
return self._esp.update_reg(addr, mask, new_val)
def get_coding_scheme_warnings(self):
""" Check if the coding scheme has detected any errors.
Meaningless for default coding scheme (0)
"""
return self.read_reg(EFUSE_REG_DEC_STATUS) & EFUSE_REG_DEC_STATUS_MASK
def get_block_len(self):
""" Return the length of BLK1, BLK2, BLK3 in bytes """
return 24 if self.coding_scheme == CODING_SCHEME_34 else 32
class EfuseField(object):
@staticmethod
def from_tuple(parent, efuse_tuple):
category = efuse_tuple[7]
return {
"mac": EfuseMacField,
"keyblock": EfuseKeyblockField,
"spipin": EfuseSpiPinField,
"vref": EfuseVRefField,
"adc_tp": EfuseAdcPointCalibration,
}.get(category, EfuseField)(parent, *efuse_tuple)
def __init__(self, parent, register_name, category, block, word, mask, write_disable_bit, read_disable_bit, efuse_type, description):
self.category = category
self.parent = parent
self.block = block
self.word = word
self.data_reg_offs = EFUSE_BLOCK_OFFS[self.block] + self.word
self.mask = mask
self.shift = esptool._mask_to_shift(mask)
self.write_disable_bit = write_disable_bit
self.read_disable_bit = read_disable_bit
self.register_name = register_name
self.efuse_type = efuse_type
self.description = description
def get_raw(self):
""" Return the raw (unformatted) numeric value of the efuse bits
Returns a simple integer or (for some subclasses) a bitstring.
"""
value = self.parent.read_efuse(self.data_reg_offs)
return (value & self.mask) >> self.shift
def get(self):
""" Get a formatted version of the efuse value, suitable for display """
return self.get_raw()
def is_readable(self):
""" Return true if the efuse is readable by software """
if self.read_disable_bit is None:
return True # read cannot be disabled
value = (self.parent.read_efuse(0) >> 16) & 0xF # RD_DIS values
return (value & (1 << self.read_disable_bit)) == 0
def disable_read(self):
if self.read_disable_bit is None:
raise esptool.FatalError("This efuse cannot be read-disabled")
rddis_reg_addr = efuse_write_reg_addr(0, 0)
self.parent.write_reg(rddis_reg_addr, 1 << (16 + self.read_disable_bit))
self.parent.write_efuses()
return self.get()
def is_writeable(self):
if self.write_disable_bit is None:
return True # write cannot be disabled
value = self.parent.read_efuse(0) & 0xFFFF # WR_DIS values
return (value & (1 << self.write_disable_bit)) == 0
def disable_write(self):
wrdis_reg_addr = efuse_write_reg_addr(0, 0)
self.parent.write_reg(wrdis_reg_addr, 1 << self.write_disable_bit)
self.parent.write_efuses()
return self.get()
def burn(self, new_value):
raw_value = (new_value << self.shift) & self.mask
# don't both reading old value as we can only set bits 0->1
write_reg_addr = efuse_write_reg_addr(self.block, self.word)
self.parent.write_reg(write_reg_addr, raw_value)
self.parent.write_efuses()
return self.get()
class EfuseMacField(EfuseField):
def get_raw(self):
# MAC values are high half of second efuse word, then first efuse word
words = [self.parent.read_efuse(self.data_reg_offs + word) for word in [1,0]]
# endian-swap into a bitstring
bitstring = struct.pack(">II", *words)
return bitstring[2:] # trim 2 byte CRC from the beginning
@staticmethod
def get_and_check(raw_mac, stored_crc):
computed_crc = EfuseMacField.calc_crc(raw_mac)
if computed_crc == stored_crc:
valid_msg = "(CRC 0x%02x OK)" % stored_crc
else:
valid_msg = "(CRC 0x%02x invalid - calculated 0x%02x)" % (stored_crc, computed_crc)
return "%s %s" % (hexify(raw_mac, ":"), valid_msg)
def get(self):
stored_crc = self.get_stored_crc()
return EfuseMacField.get_and_check(self.get_raw(), stored_crc)
def burn(self, new_value):
# Writing the BLK0 default MAC is not sensible, as it's written in the factory.
raise esptool.FatalError("Writing Factory MAC address is not supported")
def get_stored_crc(self):
return (self.parent.read_efuse(self.data_reg_offs + 1) >> 16) & 0xFF
@staticmethod
def calc_crc(raw_mac):
"""
This algorithm is the equivalent of esp_crc8() in ESP32 ROM code
This is CRC-8 w/ inverted polynomial value 0x8C & initial value 0x00.
"""
result = 0x00
for b in struct.unpack("B" * 6, raw_mac):
result ^= b
for _ in range(8):
lsb = result & 1
result >>= 1
if lsb != 0:
result ^= 0x8c
return result
class EfuseKeyblockField(EfuseField):
def get_raw(self):
words = self.get_words()
return struct.pack("<" + ("I" * len(words)), *words)
def get_key(self):
# Keys are stored in reverse byte order
result = self.get_raw()
result = result[::-1]
return result
def get_words(self):
num_words = self.parent.get_block_len() // 4
return [self.parent.read_efuse(self.data_reg_offs + word) for word in range(num_words)]
def get(self):
return hexify(self.get_raw(), " ")
def apply_34_encoding(self, inbits):
""" Takes 24 byte sequence to be represented in 3/4 encoding,
returns 8 words suitable for writing "encoded" to an efuse block
"""
def popcnt(b):
""" Return number of "1" bits set in 'b' """
return len([x for x in bin(b) if x == "1"])
outbits = b""
while len(inbits) > 0: # process in chunks of 6 bytes
bits = inbits[0:6]
inbits = inbits[6:]
xor_res = 0
mul_res = 0
index = 1
for b in struct.unpack("B" * 6, bits):
xor_res ^= b
mul_res += index * popcnt(b)
index += 1
outbits += bits
outbits += struct.pack("BB", xor_res, mul_res)
return struct.unpack("<" + "I" * (len(outbits) // 4), outbits)
def burn_key(self, new_value):
new_value = new_value[::-1] # AES keys are stored in reverse order in efuse
return self.burn(new_value)
def burn(self, new_value):
key_len = self.parent.get_block_len()
if len(new_value) != key_len:
raise RuntimeError("Invalid new value length for key block (%d), %d is required" % len(new_value), key_len)
if self.parent.coding_scheme == CODING_SCHEME_34:
words = self.apply_34_encoding(new_value)
else:
words = struct.unpack("<" + ("I" * 8), new_value)
return self.burn_words(words)
def burn_words(self, words, word_offset=0):
write_reg_addr = efuse_write_reg_addr(self.block, self.word + word_offset)
for word in words:
self.parent.write_reg(write_reg_addr, word)
write_reg_addr += 4
warnings_before = self.parent.get_coding_scheme_warnings()
self.parent.write_efuses()
warnings_after = self.parent.get_coding_scheme_warnings()
if warnings_after & ~warnings_before != 0:
print("WARNING: Burning efuse block added coding scheme warnings 0x%x -> 0x%x. Encoding bug?" % (warnings_before, warnings_after))
return self.get()
class EfuseSpiPinField(EfuseField):
def get(self):
val = self.get_raw()
if val >= 30:
val += 2 # values 30,31 map to 32, 33
return val
def burn(self, new_value):
if new_value in [30, 31]:
raise esptool.FatalError("IO pins 30 & 31 cannot be set for SPI flash. 0-29, 32 & 33 only.")
if new_value > 33:
raise esptool.FatalError("IO pin %d cannot be set for SPI flash. 0-29, 32 & 33 only." % new_value)
if new_value > 30:
new_value -= 2 # values 32,33 map to 30, 31
return super(EfuseSpiPinField, self).burn(new_value)
class EfuseVRefField(EfuseField):
VREF_OFFSET = 1100 # ideal efuse value in mV
VREF_STEP_SIZE = 7 # 1 count in efuse == 7mV
VREF_SIGN_BIT = 0x10
VREF_MAG_BITS = 0x0F
def get(self):
val = self.get_raw()
# sign-magnitude format
if (val & self.VREF_SIGN_BIT):
val = -(val & self.VREF_MAG_BITS)
else:
val = (val & self.VREF_MAG_BITS)
val *= self.VREF_STEP_SIZE
return self.VREF_OFFSET + val
def burn(self, new_value):
raise RuntimeError("Writing to VRef is not supported.")
class EfuseAdcPointCalibration(EfuseField):
TP_OFFSET = { # See TP_xxxx_OFFSET in esp_adc_cal.c in ESP-IDF
"ADC1_TP_LOW": 278,
"ADC2_TP_LOW": 421,
"ADC1_TP_HIGH": 3265,
"ADC2_TP_HIGH": 3406,
}
SIGN_BIT = (0x40, 0x100) # LOW, HIGH (2s complement format)
STEP_SIZE = 4
def get(self):
idx = 0 if self.register_name.endswith("LOW") else 1
sign_bit = self.SIGN_BIT[idx]
offset = self.TP_OFFSET[self.register_name]
raw = self.get_raw()
delta = (raw & (sign_bit - 1)) - (raw & sign_bit)
return offset + (delta * self.STEP_SIZE)
def dump(esp, _efuses, args):
""" Dump raw efuse data registers """
for block in range(len(EFUSE_BLOCK_OFFS)):
print("EFUSE block %d:" % block)
offsets = [x + EFUSE_BLOCK_OFFS[block] for x in range(EFUSE_BLOCK_LEN[block])]
print(" ".join(["%08x" % esp.read_efuse(offs) for offs in offsets]))
def summary(esp, efuses, args):
""" Print a human-readable summary of efuse contents """
ROW_FORMAT = "%-22s %-50s%s= %s %s %s"
human_output = (args.format == 'summary')
json_efuse = {}
if args.file != sys.stdout:
print("Saving efuse values to " + args.file.name)
if human_output:
print(ROW_FORMAT.replace("-50", "-12") % ("EFUSE_NAME", "Description", "", "[Meaningful Value]", "[Readable/Writeable]", "(Hex Value)"),file=args.file)
print("-" * 88,file=args.file)
for category in set(e.category for e in efuses):
if human_output:
print("%s fuses:" % category.title(),file=args.file)
for e in (e for e in efuses if e.category == category):
raw = e.get_raw()
try:
raw = "(0x%x)" % raw
except TypeError:
raw = ""
(readable, writeable) = (e.is_readable(), e.is_writeable())
if readable and writeable:
perms = "R/W"
elif readable:
perms = "R/-"
elif writeable:
perms = "-/W"
else:
perms = "-/-"
base_value = e.get()
value = str(base_value)
if not readable:
value = value.replace("0", "?")
if human_output:
print(ROW_FORMAT % (e.register_name, e.description, "\n " if len(value) > 20 else "", value, perms, raw),file=args.file)
if args.format == 'json':
json_efuse[e.register_name] = {
'value': base_value if readable else value,
'readable':readable,
'writeable':writeable}
if human_output:
print("",file=args.file)
if human_output:
sdio_force = efuses["XPD_SDIO_FORCE"]
sdio_tieh = efuses["XPD_SDIO_TIEH"]
sdio_reg = efuses["XPD_SDIO_REG"]
if sdio_force.get() == 0:
print("Flash voltage (VDD_SDIO) determined by GPIO12 on reset (High for 1.8V, Low/NC for 3.3V).",file=args.file)
elif sdio_reg.get() == 0:
print("Flash voltage (VDD_SDIO) internal regulator disabled by efuse.",file=args.file)
elif sdio_tieh.get() == 0:
print("Flash voltage (VDD_SDIO) set to 1.8V by efuse.",file=args.file)
else:
print("Flash voltage (VDD_SDIO) set to 3.3V by efuse.",file=args.file)
warnings = efuses.get_coding_scheme_warnings()
if warnings:
print("WARNING: Coding scheme has encoding bit error warnings (0x%x)" % warnings,file=args.file)
if args.file != sys.stdout:
args.file.close()
print("Done")
if args.format == 'json':
json.dump(json_efuse,args.file,sort_keys=True,indent=4)
print("")
def burn_efuse(esp, efuses, args):
efuse = efuses[args.efuse_name]
old_value = efuse.get()
if efuse.efuse_type == "flag":
if args.new_value not in [None, 1]:
raise esptool.FatalError("Efuse %s is type 'flag'. New value is not accepted for this efuse (will always burn 0->1)" % efuse.register_name)
args.new_value = 1
if old_value:
print("Efuse %s is already burned." % efuse.register_name)
return
elif efuse.efuse_type == "int":
if args.new_value is None:
raise esptool.FatalError("New value required for efuse %s" % efuse.register_name)
elif efuse.efuse_type == "spipin":
if args.new_value is None or args.new_value == 0:
raise esptool.FatalError("New value required for efuse %s" % efuse.register_name)
elif efuse.efuse_type == "bitcount":
if args.new_value is None: # find the first unset bit and set it
args.new_value = old_value
bit = 1
while args.new_value == old_value:
args.new_value = bit | old_value
bit <<= 1
if args.new_value & (efuse.mask >> efuse.shift) != args.new_value:
raise esptool.FatalError("Value mask for efuse %s is 0x%x. Value 0x%x is too large." % (efuse.register_name, efuse.mask >> efuse.shift, args.new_value))
if args.new_value | old_value != args.new_value:
print("WARNING: New value contains some bits that cannot be cleared (value will be 0x%x)" % (old_value | args.new_value))
confirm("Burning efuse %s (%s) 0x%x -> 0x%x" % (efuse.register_name, efuse.description, old_value, args.new_value | old_value), args)
burned_value = efuse.burn(args.new_value)
if burned_value == old_value:
raise esptool.FatalError("Efuse %s failed to burn. Protected?" % efuse.register_name)
def read_protect_efuse(esp, efuses, args):
efuse = efuses[args.efuse_name]
if not efuse.is_readable():
print("Efuse %s is already read protected" % efuse.register_name)
else:
# make full list of which efuses will be disabled (ie share a read disable bit)
all_disabling = [e for e in efuses if e.read_disable_bit == efuse.read_disable_bit]
names = ", ".join(e.register_name for e in all_disabling)
confirm("Permanently read-disabling efuse%s %s" % ("s" if len(all_disabling) > 1 else "",names), args)
efuse.disable_read()
def write_protect_efuse(esp, efuses, args):
efuse = efuses[args.efuse_name]
if not efuse.is_writeable():
print("]fuse %s is already write protected" % efuse.register_name)
else:
# make full list of which efuses will be disabled (ie share a write disable bit)
all_disabling = [e for e in efuses if e.write_disable_bit == efuse.write_disable_bit]
names = ", ".join(e.register_name for e in all_disabling)
confirm("Permanently write-disabling efuse%s %s" % ("s" if len(all_disabling) > 1 else "",names), args)
efuse.disable_write()
def burn_key(esp, efuses, args):
# check block choice
if args.block in ["flash_encryption", "BLK1"]:
block_num = 1
elif args.block in ["secure_boot", "BLK2"]:
block_num = 2
elif args.block == "BLK3":
block_num = 3
else:
raise RuntimeError("args.block argument not in list!")
num_bytes = efuses.get_block_len()
# check keyfile
keyfile = args.keyfile
keyfile.seek(0,2) # seek t oend
size = keyfile.tell()
keyfile.seek(0)
if size != num_bytes:
raise esptool.FatalError("Incorrect key file size %d. Key file must be %d bytes (%d bits) of raw binary key data." %
(size, num_bytes, num_bytes * 8))
# check existing data
efuse = [e for e in efuses if e.register_name == "BLK%d" % block_num][0]
original = efuse.get_raw()
EMPTY_KEY = b'\x00' * num_bytes
if original != EMPTY_KEY:
if not args.force_write_always:
raise esptool.FatalError("Key block already has value %s." % efuse.get())
else:
print("WARNING: Key appears to have a value already. Trying anyhow, due to --force-write-always (result will be bitwise OR of new and old values.)")
if not efuse.is_writeable():
if not args.force_write_always:
raise esptool.FatalError("The efuse block has already been write protected.")
else:
print("WARNING: Key appears to be write protected. Trying anyhow, due to --force-write-always")
msg = "Write key in efuse block %d. " % block_num
if args.no_protect_key:
msg += "The key block will left readable and writeable (due to --no-protect-key)"
else:
msg += "The key block will be read and write protected (no further changes or readback)"
confirm(msg, args)
new_value = keyfile.read(num_bytes)
new = efuse.burn_key(new_value)
print("Burned key data. New value: %s" % (new,))
if not args.no_protect_key:
print("Disabling read/write to key efuse block...")
efuse.disable_write()
efuse.disable_read()
if efuse.is_readable():
print("WARNING: Key does not appear to have been read protected. Perhaps read disable efuse is write protected?")
if efuse.is_writeable():
print("WARNING: Key does not appear to have been write protected. Perhaps write disable efuse is write protected?")
else:
print("Key is left unprotected as per --no-protect-key argument.")
def burn_block_data(esp, efuses, args):
num_bytes = efuses.get_block_len()
offset = args.offset
data = args.datafile.read()
if offset >= num_bytes:
raise RuntimeError("Invalid offset: Key block only holds %d bytes." % num_bytes)
if len(data) > num_bytes - offset:
raise RuntimeError("Data will not fit: Key block size %d bytes, data file is %d bytes" % (num_bytes, len(data)))
if efuses.coding_scheme == CODING_SCHEME_34:
if offset % 6 != 0:
raise RuntimeError("Device has 3/4 Coding Scheme. Can only write at offsets which are a multiple of 6.")
if len(data) % 6 != 0:
raise RuntimeError("Device has 3/4 Coding Scheme. Can only write data lengths which are a multiple of 6 (data is %d bytes)" % len(data))
efuse = [e for e in efuses if e.register_name == args.block.upper()][0]
if not args.force_write_always and \
efuse.get_raw() != b'\x00' * num_bytes:
raise esptool.FatalError("Efuse block already has values written.")
if efuses.coding_scheme == CODING_SCHEME_NONE:
pad = offset % 4
if pad != 0: # left-pad to a word boundary
data = (b'\x00' * pad) + data
offset -= pad
pad = len(data) % 4
if pad != 0: # right-pad to a word boundary
data += (b'\x00' * (4 - pad))
words = struct.unpack("<" + "I" * (len(data) // 4), data)
word_offset = offset // 4
else: # CODING_SCHEME_34
words = efuse.apply_34_encoding(data)
word_offset = (offset // 6) * 2
confirm("Burning efuse %s (%s) with %d bytes of data at offset %d in the block" % (efuse.register_name, efuse.description, len(data), offset), args)
efuse.burn_words(words, word_offset)
def set_flash_voltage(esp, efuses, args):
sdio_force = efuses["XPD_SDIO_FORCE"]
sdio_tieh = efuses["XPD_SDIO_TIEH"]
sdio_reg = efuses["XPD_SDIO_REG"]
# check efuses aren't burned in a way which makes this impossible
if args.voltage == 'OFF' and sdio_reg.get() != 0:
raise esptool.FatalError("Can't set flash regulator to OFF as XPD_SDIO_REG efuse is already burned")
if args.voltage == '1.8V' and sdio_tieh.get() != 0:
raise esptool.FatalError("Can't set regulator to 1.8V is XPD_SDIO_TIEH efuse is already burned")
if args.voltage == 'OFF':
msg = """
Disable internal flash voltage regulator (VDD_SDIO). SPI flash will need to be powered from an external source.
The following efuse is burned: XPD_SDIO_FORCE.
It is possible to later re-enable the internal regulator (%s) by burning an additional efuse
""" % ("to 3.3V" if sdio_tieh.get() != 0 else "to 1.8V or 3.3V")
elif args.voltage == '1.8V':
msg = """
Set internal flash voltage regulator (VDD_SDIO) to 1.8V.
The following efuses are burned: XPD_SDIO_FORCE, XPD_SDIO_REG.
It is possible to later increase the voltage to 3.3V (permanently) by burning additional efuse XPD_SDIO_TIEH
"""
elif args.voltage == '3.3V':
msg = """
Enable internal flash voltage regulator (VDD_SDIO) to 3.3V.
The following efuses are burned: XPD_SDIO_FORCE, XPD_SDIO_REG, XPD_SDIO_TIEH.
"""
confirm(msg, args)
sdio_force.burn(1) # Disable GPIO12
if args.voltage != 'OFF':
sdio_reg.burn(1) # Enable internal regulator
if args.voltage == '3.3V':
sdio_tieh.burn(1)
print("VDD_SDIO setting complete.")
def adc_info(esp, efuses, args):
adc_vref = efuses["ADC_VREF"]
blk3_reserve = efuses["BLK3_PART_RESERVE"]
vref_raw = adc_vref.get_raw()
if vref_raw == 0:
print("ADC VRef calibration: None (1100mV nominal)")
else:
print("ADC VRef calibration: %dmV" % adc_vref.get())
if blk3_reserve.get():
print("ADC readings stored in efuse BLK3:")
print(" ADC1 Low reading (150mV): %d" % efuses["ADC1_TP_LOW"].get())
print(" ADC1 High reading (850mV): %d" % efuses["ADC1_TP_HIGH"].get())
print(" ADC2 Low reading (150mV): %d" % efuses["ADC2_TP_LOW"].get())
print(" ADC2 High reading (850mV): %d" % efuses["ADC2_TP_HIGH"].get())
class CustomMacAddressField(object):
"""
The custom MAC field uses the formatting according to the specification for version 1
"""
def __init__(self, efuses):
self.efuse = [e for e in efuses if e.register_name == 'BLK3'][0]
self.parent = self.efuse.parent
def get_raw(self):
words = [self.parent.read_efuse(self.efuse.data_reg_offs + word) for word in [0, 1]]
bitstring = struct.pack("<II", *words)
return bitstring[1:-1] # trim a byte from the beginning and one (CRC) from the end
def get_stored_crc(self):
return self.parent.read_efuse(self.efuse.data_reg_offs) & 0xFF
@staticmethod
def calc_crc(raw_mac):
return EfuseMacField.calc_crc(raw_mac)
def get(self):
return EfuseMacField.get_and_check(self.get_raw(), self.get_stored_crc())
def get_version(self):
"""
Returns the version of the MAC field
The version is stored in the block at the [191:184] bit positions. That is in the 5th 4-byte word, the most
significant byte (3 * 8 = 24)
"""
return (self.parent.read_efuse(self.efuse.data_reg_offs + 5) >> 24) & 0xFF
def get_block(self, new_mac, new_version):
"""
Returns a byte array which can be written directly to BLK3
"""
num_words = self.parent.get_block_len() // 4
words = [self.parent.read_efuse(self.efuse.data_reg_offs + word) for word in range(num_words)]
B = sum([x << (i * 32) for i, x in enumerate(words)]) # integer representation of the whole BLK content
new_mac_b = struct.pack(">Q", new_mac)[2:] # Q has 8-bytes. Removing two MSB bytes to get a 6-byte MAC
new_mac_rev = struct.unpack("<Q", new_mac_b + b'\x00\x00')[0] # bytes in reversed order
crc = self.calc_crc(new_mac_b)
# MAC fields according to esp_efuse_table.c:
# - CRC - offset 0 bits, length 8 bits
# - MAC - offset 8 bits, length 48 bits
# - MAC version - offset 184 bits, length 8 bits
B |= (crc & ((1 << 8) - 1)) << 0
B |= (new_mac_rev & ((1 << 48) - 1)) << 8
B |= (new_version & ((1 << 8) - 1)) << 184
return bytearray([(B >> i * 8) & 0xFF for i in range(self.parent.get_block_len())])
def burn_custom_mac(esp, efuses, args):
write_always = args.force_write_always
c = CustomMacAddressField(efuses)
old_version = c.get_version()
new_version = old_version | 1 # Only version 1 MAC Addresses are supported yet
if (not write_always and old_version != 0) or (write_always and old_version not in [0, new_version]):
raise esptool.FatalError("The version of the custom MAC Address is already burned ({})!".format(old_version))
old_mac_b = c.get_raw()
old_mac = struct.unpack(">Q", b'\x00\x00' + old_mac_b)[0]
new_mac_b = struct.pack(">Q", args.mac)[2:] # Q has 8-bytes. Removing two MSB bytes to get a 6-byte MAC
new_mac = args.mac
if (not write_always and old_mac != 0) or (write_always and new_mac | old_mac != new_mac):
raise esptool.FatalError("Custom MAC Address was previously burned ({})!".format(hexify(old_mac_b, ":")))
old_crc = c.get_stored_crc()
new_crc = c.calc_crc(new_mac_b)
if (not write_always and old_crc != 0) or (write_always and new_crc | old_crc != new_crc):
raise esptool.FatalError("The CRC of the custom MAC Address was previously burned ({})!".format(old_crc))
confirm("Burning efuse for custom MAC address {} (version {}, CRC 0x{:x}) -> {} (version {}, CRC 0x{:x})"
"".format(hexify(old_mac_b, ":"), old_version, old_crc, hexify(new_mac_b, ":"), new_version, new_crc), args)
with io.BytesIO(c.get_block(new_mac, new_version)) as buf:
args.do_not_confirm = True # Custom MAC burning was already confirmed. No need to ask twice.
# behavour of burn_block_data() for args.force_write_always is compatible
args.offset = 0
args.datafile = buf
args.block = 'BLK3'
burn_block_data(esp, efuses, args)
def get_custom_mac(esp, efuses, args):
c = CustomMacAddressField(efuses)
version = c.get_version()
if version > 0:
print("Custom MAC Address version {}: {}".format(version, c.get()))
else:
print("Custom MAC Address is not set in the device.")
def hexify(bitstring, separator=""):
try:
as_bytes = tuple(ord(b) for b in bitstring)
except TypeError: # python 3, items in bitstring already ints
as_bytes = tuple(b for b in bitstring)
return separator.join(("%02x" % b) for b in as_bytes)
def arg_auto_int(x):
return int(x, 0)
def mac_int(string):
if string.count(":") != 5:
raise argparse.ArgumentTypeError("MAC Address needs to be a 6-byte hexadecimal format separated by colons (:)!")
hexad = string.replace(":", "")
if len(hexad) != 12:
raise argparse.ArgumentTypeError("MAC Address needs to be a 6-byte hexadecimal number (12 hexadecimal characters)!")
return int(hexad, 16)
def main():
parser = argparse.ArgumentParser(description='espefuse.py v%s - ESP32 efuse get/set tool' % esptool.__version__, prog='espefuse')
parser.add_argument(
'--baud', '-b',
help='Serial port baud rate used when flashing/reading',
type=arg_auto_int,
default=os.environ.get('ESPTOOL_BAUD', esptool.ESPLoader.ESP_ROM_BAUD))
parser.add_argument(
'--port', '-p',
help='Serial port device',
default=os.environ.get('ESPTOOL_PORT', esptool.ESPLoader.DEFAULT_PORT))
parser.add_argument(
'--before',
help='What to do before connecting to the chip',
choices=['default_reset', 'no_reset', 'esp32r1', 'no_reset_no_sync'],
default='default_reset')
parser.add_argument('--do-not-confirm',
help='Do not pause for confirmation before permanently writing efuses. Use with caution.', action='store_true')
def add_force_write_always(p):
p.add_argument('--force-write-always', help="Write the efuse even if it looks like it's already been written, or is write protected. " +
"Note that this option can't disable write protection, or clear any bit which has already been set.", action='store_true')
subparsers = parser.add_subparsers(
dest='operation',
help='Run espefuse.py {command} -h for additional help')
subparsers.add_parser('dump', help='Dump raw hex values of all efuses')
p = subparsers.add_parser('summary',
help='Print human-readable summary of efuse values')
p.add_argument('--format', help='Select the summary format',choices=['summary','json'],default='summary')
p.add_argument('--file', help='File to save the efuse summary',type=argparse.FileType('w'),default=sys.stdout)
p = subparsers.add_parser('burn_efuse',
help='Burn the efuse with the specified name')
p.add_argument('efuse_name', help='Name of efuse register to burn',
choices=[efuse[0] for efuse in EFUSES])
p.add_argument('new_value', help='New value to burn (not needed for flag-type efuses', nargs='?', type=esptool.arg_auto_int)
p = subparsers.add_parser('read_protect_efuse',
help='Disable readback for the efuse with the specified name')
p.add_argument('efuse_name', help='Name of efuse register to burn',
choices=[efuse[0] for efuse in EFUSES if efuse[6] is not None]) # only allow if read_disable_bit is not None
p = subparsers.add_parser('write_protect_efuse',
help='Disable writing to the efuse with the specified name')
p.add_argument('efuse_name', help='Name of efuse register to burn',
choices=[efuse[0] for efuse in EFUSES])
p = subparsers.add_parser('burn_key',
help='Burn a 256-bit AES key to EFUSE BLK1,BLK2 or BLK3 (flash_encryption, secure_boot).')
p.add_argument('--no-protect-key', help='Disable default read- and write-protecting of the key. ' +
'If this option is not set, once the key is flashed it cannot be read back or changed.', action='store_true')
add_force_write_always(p)
p.add_argument('block', help='Key block to burn. "flash_encryption" is an alias for BLK1, ' +
'"secure_boot" is an alias for BLK2.', choices=["secure_boot", "flash_encryption","BLK1","BLK2","BLK3"])
p.add_argument('keyfile', help='File containing 256 bits of binary key data', type=argparse.FileType('rb'))
p = subparsers.add_parser('burn_block_data',
help="Burn non-key data to EFUSE BLK1, BLK2 or BLK3. " +
" Don't use this command to burn key data for Flash Encryption or Secure Boot, " +
"as the byte order of keys is swapped (use burn_key).")
p.add_argument('--offset', '-o', help='Byte offset in the efuse block', type=int, default=0)
add_force_write_always(p)
p.add_argument('block', help='Efuse block to burn.', choices=["BLK1","BLK2","BLK3"])
p.add_argument('datafile', help='File containing data to burn into the efuse block', type=argparse.FileType('rb'))
p = subparsers.add_parser('set_flash_voltage',
help='Permanently set the internal flash voltage regulator to either 1.8V, 3.3V or OFF. ' +
'This means GPIO12 can be high or low at reset without changing the flash voltage.')
p.add_argument('voltage', help='Voltage selection',
choices=['1.8V', '3.3V', 'OFF'])
p = subparsers.add_parser('adc_info',
help='Display information about ADC calibration data stored in efuse.')
p = subparsers.add_parser('burn_custom_mac',
help='Burn a 48-bit Custom MAC Address to EFUSE BLK3.')
p.add_argument('mac', help='Custom MAC Address to burn given in hexadecimal format with bytes separated by colons' +
' (e.g. AB:CD:EF:01:02:03).', type=mac_int)
add_force_write_always(p)
p = subparsers.add_parser('get_custom_mac',
help='Prints the Custom MAC Address.')
args = parser.parse_args()
print('espefuse.py v%s' % esptool.__version__)
if args.operation is None:
parser.print_help()
parser.exit(1)
# each 'operation' is a module-level function of the same name
operation_func = globals()[args.operation]
esp = esptool.ESP32ROM(args.port, baud=args.baud)
esp.connect(args.before)
# dict mapping register name to its efuse object
efuses = EspEfuses(esp)
operation_func(esp, efuses, args)
def _main():
try:
main()
except esptool.FatalError as e:
print('\nA fatal error occurred: %s' % e)
sys.exit(2)
if __name__ == '__main__':
_main()
| themadinventor/esptool | espefuse.py | Python | gpl-2.0 | 42,922 |
from __future__ import unicode_literals, division, absolute_import
from builtins import * # noqa pylint: disable=unused-import, redefined-builtin
import logging
from flexget import plugin
from flexget.entry import Entry
from flexget.event import event
from flexget.utils.requests import RequestException
from flexget.utils.soup import get_soup
from flexget.utils.search import torrent_availability, clean_symbols
from flexget.utils.tools import parse_filesize
log = logging.getLogger('limetorrents')
class Limetorrents(object):
"""
Limetorrents search plugin.
"""
schema = {
'oneOf': [
{'type': 'boolean'},
{
'type': 'object',
'properties': {
'category': {'type': 'string', 'enum': ['all', 'anime', 'applications', 'games', 'movies', 'music',
'tv', 'other'], 'default': 'all'},
'order_by': {'type': 'string', 'enum': ['date', 'seeds'], 'default': 'date'}
},
'additionalProperties': False
}
]
}
base_url = 'https://www.limetorrents.cc/'
errors = False
@plugin.internet(log)
def search(self, task, entry, config):
"""
Search for entries on Limetorrents
"""
if not isinstance(config, dict):
config = {'category': config}
order_by = ''
if isinstance(config.get('order_by'), str):
if config['order_by'] != 'date':
order_by = '{0}/1'.format(config['order_by'])
category = 'all'
if isinstance(config.get('category'), str):
category = '{0}'.format(config['category'])
entries = set()
for search_string in entry.get('search_strings', [entry['title']]):
# No special characters - use dashes instead of %20
cleaned_search_string = clean_symbols(search_string).replace(' ', '-')
query = 'search/{0}/{1}/{2}'.format(category, cleaned_search_string.encode('utf8'), order_by)
log.debug('Using search: %s; category: %s; ordering: %s', cleaned_search_string, category, order_by or 'default')
try:
page = task.requests.get(self.base_url + query)
log.debug('requesting: %s', page.url)
except RequestException as e:
log.error('Limetorrents request failed: %s', e)
continue
soup = get_soup(page.content)
if soup.find('a', attrs={'class': 'csprite_dl14'}) is not None:
for link in soup.findAll('a', attrs={'class': 'csprite_dl14'}):
row = link.find_parent('tr')
info_url = str(link.get('href'))
# Get the title from the URL as it's complete versus the actual Title text which gets cut off
title = str(link.next_sibling.get('href'))
title = title[:title.rfind('-torrent')].replace('-', ' ')
title = title[1:]
data = row.findAll('td', attrs={'class': 'tdnormal'})
size = str(data[1].text).replace(',', '')
seeds = int(row.find('td', attrs={'class': 'tdseed'}).text.replace(',', ''))
leeches = int(row.find('td', attrs={'class': 'tdleech'}).text.replace(',', ''))
size = parse_filesize(size)
e = Entry()
e['url'] = info_url
e['title'] = title
e['torrent_seeds'] = seeds
e['torrent_leeches'] = leeches
e['search_sort'] = torrent_availability(e['torrent_seeds'], e['torrent_leeches'])
e['content_size'] = size
entries.add(e)
return entries
@event('plugin.register')
def register_plugin():
plugin.register(Limetorrents, 'limetorrents', interfaces=['search'], api_ver=2)
| LynxyssCZ/Flexget | flexget/plugins/sites/limetorrents.py | Python | mit | 4,028 |
import py, pytest
def setup_module(mod):
mod.nose = py.test.importorskip("nose")
def test_nose_setup(testdir):
p = testdir.makepyfile("""
l = []
from nose.tools import with_setup
@with_setup(lambda: l.append(1), lambda: l.append(2))
def test_hello():
assert l == [1]
def test_world():
assert l == [1,2]
test_hello.setup = lambda: l.append(1)
test_hello.teardown = lambda: l.append(2)
""")
result = testdir.runpytest(p, '-p', 'nose')
result.stdout.fnmatch_lines([
"*2 passed*"
])
def test_setup_func_with_setup_decorator():
from _pytest.nose import call_optional
l = []
class A:
@pytest.fixture(autouse=True)
def f(self):
l.append(1)
call_optional(A(), "f")
assert not l
def test_setup_func_not_callable():
from _pytest.nose import call_optional
class A:
f = 1
call_optional(A(), "f")
def test_nose_setup_func(testdir):
p = testdir.makepyfile("""
from nose.tools import with_setup
l = []
def my_setup():
a = 1
l.append(a)
def my_teardown():
b = 2
l.append(b)
@with_setup(my_setup, my_teardown)
def test_hello():
print (l)
assert l == [1]
def test_world():
print (l)
assert l == [1,2]
""")
result = testdir.runpytest(p, '-p', 'nose')
result.stdout.fnmatch_lines([
"*2 passed*"
])
def test_nose_setup_func_failure(testdir):
p = testdir.makepyfile("""
from nose.tools import with_setup
l = []
my_setup = lambda x: 1
my_teardown = lambda x: 2
@with_setup(my_setup, my_teardown)
def test_hello():
print (l)
assert l == [1]
def test_world():
print (l)
assert l == [1,2]
""")
result = testdir.runpytest(p, '-p', 'nose')
result.stdout.fnmatch_lines([
"*TypeError: <lambda>()*"
])
def test_nose_setup_func_failure_2(testdir):
p = testdir.makepyfile("""
l = []
my_setup = 1
my_teardown = 2
def test_hello():
assert l == []
test_hello.setup = my_setup
test_hello.teardown = my_teardown
""")
reprec = testdir.inline_run()
reprec.assertoutcome(passed=1)
def test_nose_setup_partial(testdir):
py.test.importorskip("functools")
p = testdir.makepyfile("""
from functools import partial
l = []
def my_setup(x):
a = x
l.append(a)
def my_teardown(x):
b = x
l.append(b)
my_setup_partial = partial(my_setup, 1)
my_teardown_partial = partial(my_teardown, 2)
def test_hello():
print (l)
assert l == [1]
def test_world():
print (l)
assert l == [1,2]
test_hello.setup = my_setup_partial
test_hello.teardown = my_teardown_partial
""")
result = testdir.runpytest(p, '-p', 'nose')
result.stdout.fnmatch_lines([
"*2 passed*"
])
def test_nose_test_generator_fixtures(testdir):
p = testdir.makepyfile("""
# taken from nose-0.11.1 unit_tests/test_generator_fixtures.py
from nose.tools import eq_
called = []
def outer_setup():
called.append('outer_setup')
def outer_teardown():
called.append('outer_teardown')
def inner_setup():
called.append('inner_setup')
def inner_teardown():
called.append('inner_teardown')
def test_gen():
called[:] = []
for i in range(0, 5):
yield check, i
def check(i):
expect = ['outer_setup']
for x in range(0, i):
expect.append('inner_setup')
expect.append('inner_teardown')
expect.append('inner_setup')
eq_(called, expect)
test_gen.setup = outer_setup
test_gen.teardown = outer_teardown
check.setup = inner_setup
check.teardown = inner_teardown
class TestClass(object):
def setup(self):
print ("setup called in %s" % self)
self.called = ['setup']
def teardown(self):
print ("teardown called in %s" % self)
eq_(self.called, ['setup'])
self.called.append('teardown')
def test(self):
print ("test called in %s" % self)
for i in range(0, 5):
yield self.check, i
def check(self, i):
print ("check called in %s" % self)
expect = ['setup']
#for x in range(0, i):
# expect.append('setup')
# expect.append('teardown')
#expect.append('setup')
eq_(self.called, expect)
""")
result = testdir.runpytest(p, '-p', 'nose')
result.stdout.fnmatch_lines([
"*10 passed*"
])
def test_module_level_setup(testdir):
testdir.makepyfile("""
from nose.tools import with_setup
items = {}
def setup():
items[1]=1
def teardown():
del items[1]
def setup2():
items[2] = 2
def teardown2():
del items[2]
def test_setup_module_setup():
assert items[1] == 1
@with_setup(setup2, teardown2)
def test_local_setup():
assert items[2] == 2
assert 1 not in items
""")
result = testdir.runpytest('-p', 'nose')
result.stdout.fnmatch_lines([
"*2 passed*",
])
def test_nose_style_setup_teardown(testdir):
testdir.makepyfile("""
l = []
def setup_module():
l.append(1)
def teardown_module():
del l[0]
def test_hello():
assert l == [1]
def test_world():
assert l == [1]
""")
result = testdir.runpytest('-p', 'nose')
result.stdout.fnmatch_lines([
"*2 passed*",
])
def test_nose_setup_ordering(testdir):
testdir.makepyfile("""
def setup_module(mod):
mod.visited = True
class TestClass:
def setup(self):
assert visited
def test_first(self):
pass
""")
result = testdir.runpytest()
result.stdout.fnmatch_lines([
"*1 passed*",
])
def test_apiwrapper_problem_issue260(testdir):
# this would end up trying a call a optional teardown on the class
# for plain unittests we dont want nose behaviour
testdir.makepyfile("""
import unittest
class TestCase(unittest.TestCase):
def setup(self):
#should not be called in unittest testcases
assert 0, 'setup'
def teardown(self):
#should not be called in unittest testcases
assert 0, 'teardown'
def setUp(self):
print('setup')
def tearDown(self):
print('teardown')
def test_fun(self):
pass
""")
result = testdir.runpytest()
result.stdout.fnmatch_lines("*1 passed*")
@pytest.mark.skipif("sys.version_info < (2,6)")
def test_setup_teardown_linking_issue265(testdir):
# we accidentally didnt integrate nose setupstate with normal setupstate
# this test ensures that won't happen again
testdir.makepyfile('''
import pytest
class TestGeneric(object):
def test_nothing(self):
"""Tests the API of the implementation (for generic and specialized)."""
@pytest.mark.skipif("True", reason=
"Skip tests to check if teardown is skipped as well.")
class TestSkipTeardown(TestGeneric):
def setup(self):
"""Sets up my specialized implementation for $COOL_PLATFORM."""
raise Exception("should not call setup for skipped tests")
def teardown(self):
"""Undoes the setup."""
raise Exception("should not call teardown for skipped tests")
''')
reprec = testdir.inline_run()
reprec.assertoutcome(passed=1, skipped=1)
def test_SkipTest_during_collection(testdir):
testdir.makepyfile("""
import nose
raise nose.SkipTest("during collection")
def test_failing():
assert False
""")
reprec = testdir.inline_run()
reprec.assertoutcome(skipped=1)
| geraldoandradee/pytest | testing/test_nose.py | Python | mit | 8,735 |
#!/usr/bin/python2.7
# -*- coding: utf-8 -*-
# vim:ts=4:sw=4:softtabstop=4:smarttab:expandtab
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
A Globally Unique Identifier object. Mostly stolen from ASPN snippet.
"""
from pycopia import socket
from pycopia import ifconfig
from pycopia import timelib
from pycopia import sysrandom
class GUID(object):
'''
A globally unique identifier that combines ip, time, and random bits. Since the
time is listed first, you can sort records by guid. You can also extract the time
and ip if needed.
GUIDs make wonderful database keys. They require no access to the
database (to get the max index number), they are extremely unique, and they sort
automatically by time. GUIDs prevent key clashes when merging
two databases together, combining data, or generating keys in distributed
systems.
'''
ip = ''
try:
ip = ifconfig.get_myaddress()
except (socket.gaierror): # if we don't have an ip, default to someting in the 10.x.x.x private range
ip = '10'
for i in range(3):
ip += '.' + str(sysrandom.randrange(1, 254))
# leave space for ip v6 (65K in each sub)
hexip = ''.join(["%04x" % long(i) for i in ip.split('.')])
lastguid = ""
def __init__(self, guid=None):
'''Use no args if you want the guid generated (this is the normal method)
or send a string-typed guid to generate it from the string'''
if guid is None:
self.guid = self.__class__.lastguid
while self.guid == self.__class__.lastguid:
# time part
now = long(timelib.now() * 1000)
self.guid = ("%016x" % now) + self.__class__.hexip
# random part
self.guid += ("%03x" % (sysrandom.randrange(0, 4095)))
self.__class__.lastguid = self.guid
elif type(guid) == type(self): # if a GUID object, copy its value
self.guid = str(guid)
else: # if a string, just save its value
assert self._check(guid), guid + " is not a valid GUID!"
self.guid = guid
def __eq__(self, other):
'''Return true if both GUID strings are equal'''
if isinstance(other, self.__class__):
return str(self) == str(other)
return 0
def __str__(self):
'''Returns the string value of this guid'''
return self.guid
def time(self):
'''Extracts the time portion out of the guid and returns the
number of milliseconds since the epoch'''
return long(self.guid[0:16], 16)
def ip(self):
'''Extracts the ip portion out of the guid and returns it
as a string like 10.10.10.10'''
ip = []
index = 16
# XXX
def _test(argv):
guid = GUID()
guid_s = str(guid)
guid2 = GUID(guid_s)
print guid
print guid2
assert guid == guid2
if __name__ == "__main__":
import sys
_test(sys.argv)
| kdart/pycopia | core/pycopia/guid.py | Python | apache-2.0 | 3,500 |
"""Show apps report."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import click
from treadmill import cli
from treadmill.cli.scheduler import fetch_report, print_report
from treadmill import restclient
def init():
"""Return top level command handler."""
@click.command()
@cli.handle_exceptions(restclient.CLI_REST_EXCEPTIONS)
@click.option('--match', help='Server name pattern match')
@click.option('--partition', help='Partition name pattern match')
def allocs(match, partition):
"""View allocations report."""
report = fetch_report(
'allocations', match, partition
)
report = report.loc[
~report.name.str.startswith('_default/')
].reset_index(drop=True)
print_report(report)
return allocs
| Morgan-Stanley/treadmill | lib/python/treadmill/cli/scheduler/allocs.py | Python | apache-2.0 | 905 |
from __future__ import print_function
import os
import sys
import appdirs
import pkg_resources
import yaml
# The config is lazy-loaded
config = None
def load(key, required=True):
"""Return the value associated with the key"""
config_dir = appdirs.user_config_dir('jiradoc')
config_file = os.path.join(config_dir, 'config.yml')
if not os.path.isfile(config_file):
_create_user_config(config_file)
global config
if config is None:
_load_config(config_file)
if key in config:
return config[key]
elif required:
sys.exit("Configuration is missing required key: " + key)
def _create_user_config(config_file):
"""Create the user's configuration file"""
print('Creating configuration file at: ' + config_file)
config_dir = os.path.dirname(config_file)
if not os.path.exists(config_dir):
os.makedirs(config_dir)
src = pkg_resources.resource_stream(__name__, 'data/sample_config.yml')
with open(config_file, 'w') as dst:
dst.writelines(src)
def _load_config(config_file):
"""Load the user's configuration file."""
print('Loading configuration: ' + config_file)
try:
global config
with open(config_file) as f:
config = yaml.load(f)
except IOError as e:
sys.exit('Failed to load config: ' + str(e))
| lucianovdveekens/jiradoc | jiradoc/config.py | Python | mit | 1,356 |
import unittest
import os
import json
from processes.insert_movies2companies import Main
from processes.postgres import Postgres
try:
DB_SERVER = os.environ['DB_SERVER']
DB_PORT = os.environ['DB_PORT']
DB_DATABASE = os.environ['DB_DATABASE']
DB_USER = os.environ['DB_USER']
DB_PASSWORD = os.environ['DB_PASSWORD']
except KeyError:
try:
from processes.GLOBALS import DB_SERVER, DB_PORT, DB_DATABASE, DB_USER, DB_PASSWORD
except ImportError:
print("No parameters provided")
exit()
with open('test_data.json') as data_file:
data = json.load(data_file)
class TestInsertMovies2Companies(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.main = Main()
cls.pg = Postgres(DB_SERVER, DB_PORT, DB_DATABASE, DB_USER, DB_PASSWORD)
# We insert the corresponding film into kino.movies
# due to the foreign key constraint.
sql = """insert into kino.movies (imdb_id, title, runtime, rated, released, orig_language, plot)
values ('tt2562232', 'Birdman or (The Unexpected Virtue of Ignorance)', 119, 'R', '2014-08-27', 'en', 'Some plot')"""
cls.pg.pg_cur.execute(sql)
cls.pg.pg_conn.commit()
def test_insert_movies2companies(self):
destination_data = self.main.run(data)
# Inserted into kino.company_roles
self.pg.pg_cur.execute('select role from kino.company_roles')
result = self.pg.pg_cur.fetchall()
self.assertEqual(result, [('Production',)])
# Inserted into kino.companies
self.pg.pg_cur.execute('select name from kino.companies')
result = self.pg.pg_cur.fetchall()
expected = [(e['name'],) for e in data['tmdb_company']]
self.assertEqual(set(result),set(expected))
# Inserted into kino.movies2companies
sql = """select x.imdb_id, y.name, x.role
from kino.movies2companies x
join kino.companies y
on x.company_id = y.company_id"""
self.pg.pg_cur.execute(sql)
result = self.pg.pg_cur.fetchall()
expected = [(e['imdb_id'], e['name'], 'Production') for e in data['tmdb_company']]
self.assertEqual(set(result), set(expected))
# Check that correctly return the data we need for the destination topic
self.assertEqual(destination_data, None)
@classmethod
def tearDownClass(cls):
cls.pg = Postgres(DB_SERVER, DB_PORT, DB_DATABASE, DB_USER, DB_PASSWORD)
cls.pg.pg_cur.execute('delete from kino.movies2companies')
cls.pg.pg_cur.execute('delete from kino.companies')
cls.pg.pg_cur.execute('delete from kino.company_roles')
cls.pg.pg_cur.execute('delete from kino.movies')
cls.pg.pg_conn.commit()
if __name__ == '__main__':
unittest.main()
| kinoreel/kino-gather | processes/tests/test_insert_movies2companies.py | Python | mit | 2,844 |
# -*- coding: utf-8 -*-
###############################################################################
#
# SetPhotoLocation
# Sets the geo data (including latitude and longitude) for a specified photo.
#
# Python versions 2.6, 2.7, 3.x
#
# Copyright 2014, Temboo Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
# either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
#
#
###############################################################################
from temboo.core.choreography import Choreography
from temboo.core.choreography import InputSet
from temboo.core.choreography import ResultSet
from temboo.core.choreography import ChoreographyExecution
import json
class SetPhotoLocation(Choreography):
def __init__(self, temboo_session):
"""
Create a new instance of the SetPhotoLocation Choreo. A TembooSession object, containing a valid
set of Temboo credentials, must be supplied.
"""
super(SetPhotoLocation, self).__init__(temboo_session, '/Library/Flickr/Geo/SetPhotoLocation')
def new_input_set(self):
return SetPhotoLocationInputSet()
def _make_result_set(self, result, path):
return SetPhotoLocationResultSet(result, path)
def _make_execution(self, session, exec_id, path):
return SetPhotoLocationChoreographyExecution(session, exec_id, path)
class SetPhotoLocationInputSet(InputSet):
"""
An InputSet with methods appropriate for specifying the inputs to the SetPhotoLocation
Choreo. The InputSet object is used to specify input parameters when executing this Choreo.
"""
def set_APIKey(self, value):
"""
Set the value of the APIKey input for this Choreo. ((required, string) The API Key provided by Flickr (AKA the OAuth Consumer Key).)
"""
super(SetPhotoLocationInputSet, self)._set_input('APIKey', value)
def set_APISecret(self, value):
"""
Set the value of the APISecret input for this Choreo. ((required, string) The API Secret provided by Flickr (AKA the OAuth Consumer Secret).)
"""
super(SetPhotoLocationInputSet, self)._set_input('APISecret', value)
def set_AccessTokenSecret(self, value):
"""
Set the value of the AccessTokenSecret input for this Choreo. ((required, string) The Access Token Secret retrieved during the OAuth process.)
"""
super(SetPhotoLocationInputSet, self)._set_input('AccessTokenSecret', value)
def set_AccessToken(self, value):
"""
Set the value of the AccessToken input for this Choreo. ((required, string) The Access Token retrieved during the OAuth process.)
"""
super(SetPhotoLocationInputSet, self)._set_input('AccessToken', value)
def set_Accuracy(self, value):
"""
Set the value of the Accuracy input for this Choreo. ((optional, integer) Recorded accuracy level of the location information. Current range is 1-16. Defaults to 16 if not specified.)
"""
super(SetPhotoLocationInputSet, self)._set_input('Accuracy', value)
def set_Context(self, value):
"""
Set the value of the Context input for this Choreo. ((optional, string) A numeric value representing the photo's location beyond latitude and longitude. For example, you can indicate that a photo was taken "indoors" or "outdoors". Set to 1 for indoors or 2 for outdoors.)
"""
super(SetPhotoLocationInputSet, self)._set_input('Context', value)
def set_Latitude(self, value):
"""
Set the value of the Latitude input for this Choreo. ((required, decimal) The latitude whose valid range is -90 to 90. Anything more than 6 decimal places will be truncated.)
"""
super(SetPhotoLocationInputSet, self)._set_input('Latitude', value)
def set_Longitude(self, value):
"""
Set the value of the Longitude input for this Choreo. ((required, decimal) The longitude whose valid range is -180 to 180. Anything more than 6 decimal places will be truncated.)
"""
super(SetPhotoLocationInputSet, self)._set_input('Longitude', value)
def set_PhotoID(self, value):
"""
Set the value of the PhotoID input for this Choreo. ((required, integer) The id of the photo to set location data for.)
"""
super(SetPhotoLocationInputSet, self)._set_input('PhotoID', value)
def set_ResponseFormat(self, value):
"""
Set the value of the ResponseFormat input for this Choreo. ((optional, string) The format that the response should be in. Valid values are: xml and json. Defaults to json.)
"""
super(SetPhotoLocationInputSet, self)._set_input('ResponseFormat', value)
class SetPhotoLocationResultSet(ResultSet):
"""
A ResultSet with methods tailored to the values returned by the SetPhotoLocation Choreo.
The ResultSet object is used to retrieve the results of a Choreo execution.
"""
def getJSONFromString(self, str):
return json.loads(str)
def get_Response(self):
"""
Retrieve the value for the "Response" output from this Choreo execution. (The response from Flickr.)
"""
return self._output.get('Response', None)
class SetPhotoLocationChoreographyExecution(ChoreographyExecution):
def _make_result_set(self, response, path):
return SetPhotoLocationResultSet(response, path)
| jordanemedlock/psychtruths | temboo/core/Library/Flickr/Geo/SetPhotoLocation.py | Python | apache-2.0 | 5,835 |
"""
Django settings for angulardjango project.
For more information on this file, see
https://docs.djangoproject.com/en/1.6/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.6/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.6/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '3z6lr)pdm2lvg9(xxx4ae53d6w-$3&xf4bvmb#r_puv6li4*%_'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
TEMPLATE_DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
# 'django_extensions',
'rest_framework',
'tweeter',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'angulardjango.urls'
WSGI_APPLICATION = 'angulardjango.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.6/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.6/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.6/howto/static-files/
STATIC_URL = '/static/'
REST_FRAMEWORK = {
'DEFAULT_AUTHENTICATION_CLASSES': (
'rest_framework.authentication.BasicAuthentication',
'rest_framework.authentication.SessionAuthentication',
)
}
| johnofkorea/tweeter | angulardjango/settings.py | Python | mit | 2,248 |
# -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Deleting field 'Invoice.invoice_ttitle'
db.delete_column(u'django_fastbill_invoice', 'invoice_ttitle')
# Adding field 'Invoice.invoice_title'
db.add_column(u'django_fastbill_invoice', 'invoice_title',
self.gf('django.db.models.fields.CharField')(default='', max_length=300),
keep_default=False)
def backwards(self, orm):
# User chose to not deal with backwards NULL issues for 'Invoice.invoice_ttitle'
raise RuntimeError("Cannot reverse this migration. 'Invoice.invoice_ttitle' and its values cannot be restored.")
# The following code is provided here to aid in writing a correct migration # Adding field 'Invoice.invoice_ttitle'
db.add_column(u'django_fastbill_invoice', 'invoice_ttitle',
self.gf('django.db.models.fields.CharField')(max_length=300),
keep_default=False)
# Deleting field 'Invoice.invoice_title'
db.delete_column(u'django_fastbill_invoice', 'invoice_title')
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Group']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Permission']"}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'django_fastbill.article': {
'Meta': {'object_name': 'Article'},
'allow_multiple': ('django.db.models.fields.BooleanField', [], {}),
'article_number': ('django.db.models.fields.IntegerField', [], {'primary_key': 'True'}),
'changed_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'checkout_url': ('django.db.models.fields.URLField', [], {'max_length': '300'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'currency_code': ('django.db.models.fields.CharField', [], {'max_length': '3'}),
'description': ('django.db.models.fields.TextField', [], {}),
'is_addon': ('django.db.models.fields.BooleanField', [], {}),
'return_url_cancel': ('django.db.models.fields.URLField', [], {'max_length': '300'}),
'return_url_success': ('django.db.models.fields.URLField', [], {'max_length': '300'}),
'setup_fee': ('django.db.models.fields.FloatField', [], {}),
'subscription_cancellation': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'subscription_duration': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'subscription_duration_follow': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'subscription_interval': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'subscription_number_events': ('django.db.models.fields.IntegerField', [], {}),
'subscription_trial': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'tags': ('django.db.models.fields.CharField', [], {'max_length': '500'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '500'}),
'unit_price': ('django.db.models.fields.FloatField', [], {}),
'vat_percent': ('django.db.models.fields.FloatField', [], {})
},
u'django_fastbill.customer': {
'Meta': {'object_name': 'Customer'},
'changed_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'changedata_url': ('django.db.models.fields.URLField', [], {'max_length': '500'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'customer_ext_uid': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'customer_id': ('django.db.models.fields.IntegerField', [], {'primary_key': 'True'}),
'customer_number': ('django.db.models.fields.IntegerField', [], {}),
'dashboard_url': ('django.db.models.fields.URLField', [], {'max_length': '500'}),
'user': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "'fastbill_customer'", 'unique': 'True', 'null': 'True', 'to': u"orm['auth.User']"})
},
u'django_fastbill.invoice': {
'Meta': {'object_name': 'Invoice'},
'affiliate': ('django.db.models.fields.CharField', [], {'max_length': '10'}),
'cash_discount_days': ('django.db.models.fields.IntegerField', [], {}),
'cash_discount_percent': ('django.db.models.fields.FloatField', [], {}),
'changed_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'country_code': ('django.db.models.fields.CharField', [], {'max_length': '2'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'currency_code': ('django.db.models.fields.CharField', [], {'max_length': '3'}),
'customer_id': ('django.db.models.fields.IntegerField', [], {}),
'customer_number': ('django.db.models.fields.IntegerField', [], {}),
'days_for_payment': ('django.db.models.fields.IntegerField', [], {}),
'delivery_date': ('django.db.models.fields.CharField', [], {'max_length': '300'}),
'document_url': ('django.db.models.fields.URLField', [], {'max_length': '500'}),
'due_date': ('django.db.models.fields.DateTimeField', [], {}),
'fastbill_customer': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'invoices'", 'null': 'True', 'to': u"orm['django_fastbill.Customer']"}),
'fastbill_subscription': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'invoices'", 'null': 'True', 'to': u"orm['django_fastbill.Subscription']"}),
'introtext': ('django.db.models.fields.TextField', [], {}),
'invoice_date': ('django.db.models.fields.DateTimeField', [], {}),
'invoice_id': ('django.db.models.fields.IntegerField', [], {'primary_key': 'True'}),
'invoice_number': ('django.db.models.fields.CharField', [], {'max_length': '300'}),
'invoice_title': ('django.db.models.fields.CharField', [], {'max_length': '300'}),
'is_canceled': ('django.db.models.fields.BooleanField', [], {}),
'paid_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'payment_type': ('django.db.models.fields.CharField', [], {'max_length': '2'}),
'paypal_url': ('django.db.models.fields.URLField', [], {'max_length': '500'}),
'sub_total': ('django.db.models.fields.FloatField', [], {}),
'subscription_id': ('django.db.models.fields.IntegerField', [], {}),
'subscription_invoice_counter': ('django.db.models.fields.IntegerField', [], {}),
'template_id': ('django.db.models.fields.IntegerField', [], {}),
'total': ('django.db.models.fields.FloatField', [], {}),
'type': ('django.db.models.fields.CharField', [], {'max_length': '30'}),
'vat_total': ('django.db.models.fields.FloatField', [], {})
},
u'django_fastbill.subscription': {
'Meta': {'object_name': 'Subscription'},
'article_number': ('django.db.models.fields.IntegerField', [], {}),
'cancellation_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'changed_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'customer_id': ('django.db.models.fields.IntegerField', [], {}),
'expiration_date': ('django.db.models.fields.DateTimeField', [], {}),
'fastbill_article': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'subscriptions'", 'null': 'True', 'to': u"orm['django_fastbill.Article']"}),
'fastbill_customer': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'subscriptions'", 'null': 'True', 'to': u"orm['django_fastbill.Customer']"}),
'invoice_title': ('django.db.models.fields.CharField', [], {'max_length': '300'}),
'last_event': ('django.db.models.fields.DateTimeField', [], {}),
'next_event': ('django.db.models.fields.DateTimeField', [], {}),
'quantity': ('django.db.models.fields.IntegerField', [], {}),
'start': ('django.db.models.fields.DateTimeField', [], {}),
'status': ('django.db.models.fields.CharField', [], {'max_length': '20'}),
'subscription_ext_uid': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'subscription_id': ('django.db.models.fields.IntegerField', [], {'primary_key': 'True'}),
'x_attributes': ('django.db.models.fields.TextField', [], {})
}
}
complete_apps = ['django_fastbill'] | phelmig/django-fastbill | django_fastbill/migrations/0002_auto__del_field_invoice_invoice_ttitle__add_field_invoice_invoice_titl.py | Python | mit | 12,299 |
# -*- coding: utf-8 -*-
# Generated by Django 1.10.8 on 2017-11-28 08:15
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [("users", "0005_personalnote_rework")]
operations = [
migrations.AddField(
model_name="user",
name="email",
field=models.EmailField(blank=True, max_length=254),
),
migrations.AddField(
model_name="user",
name="last_email_send",
field=models.DateTimeField(blank=True, null=True),
),
]
| CatoTH/OpenSlides | server/openslides/users/migrations/0006_user_email.py | Python | mit | 614 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.