text
stringlengths 6
947k
| repo_name
stringlengths 5
100
| path
stringlengths 4
231
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 6
947k
| score
float64 0
0.34
|
---|---|---|---|---|---|---|
import requests
import re,time
import xbmc,xbmcaddon
from ..scraper import Scraper
from ..common import clean_title,clean_search,send_log,error_log
dev_log = xbmcaddon.Addon('script.module.universalscrapers').getSetting("dev_log")
requests.packages.urllib3.disable_warnings()
s = requests.session()
User_Agent = 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/55.0.2883.87 Safari/537.36'
class filmapik(Scraper):
domains = ['https://www.filmapik.io']
name = "Filmapik"
sources = []
def __init__(self):
self.base_link = 'https://www.filmapik.io'
def scrape_movie(self, title, year, imdb, debrid = False):
try:
start_time = time.time()
search_id = clean_search(title.lower())
start_url = '%s/?s=%s' %(self.base_link,search_id.replace(' ','+'))
#print 'start>>>> '+start_url
headers={'User-Agent':User_Agent}
html = requests.get(start_url,headers=headers,timeout=5).content
match = re.compile('data-movie-id=.+?href="(.+?)".+?<h2>(.+?)</h2>',re.DOTALL).findall(html)
for item_url, name in match:
#print 'clean name > '+clean_title(name).lower()
if not clean_title(search_id).lower() == clean_title(name).lower():
continue
if not year in name:
continue
item_url = item_url + 'play'
mode = 'movie'
#print item_url
self.get_source(item_url,mode,title,year,'','',start_time)
return self.sources
except Exception, argument:
if dev_log == 'true':
error_log(self.name,argument)
return self.sources
def scrape_episode(self,title, show_year, year, season, episode, imdb, tvdb, debrid = False):
try:
start_time = time.time()
search_id = clean_search(title.lower())
start_url = '%s/?s=%s' %(self.base_link,search_id.replace(' ','+'))
headers={'User-Agent':User_Agent}
html = requests.get(start_url,headers=headers,timeout=5).content
match = re.compile('data-movie-id=.+?href="(.+?)".+?<h2>(.+?)</h2>',re.DOTALL).findall(html)
for item_url, name in match:
#print item_url
if clean_title(search_id).lower() == clean_title(name).lower():
item_url = self.base_link + '/episodes/%s-%sx%s/play' %(search_id.replace(' ','-'),season,episode)
#print item_url
mode = 'tv'
self.get_source(item_url,mode,title,year,season,episode,start_time)
return self.sources
except Exception, argument:
if dev_log == 'true':
error_log(self.name,argument)
return self.sources
def get_source(self,item_url,mode,title,year,season,episode,start_time):
try:
#print 'cfwd > '+item_url
headers={'User-Agent':User_Agent}
OPEN = requests.get(item_url,headers=headers,timeout=20).content
#print OPEN
if mode == 'movie':
match = re.compile('<div class="player_nav" id="referNav">(.+?)<div class="swiper-wrapper" style="padding-bottom: 10px;">',re.DOTALL).findall(OPEN)
else:
match = re.compile('<div class="player_nav" id="referNav">(.+?)<div class="movies-list-wrap mlw-category">',re.DOTALL).findall(OPEN)
Sources = re.compile('href="(.+?)">(.+?)</a>',re.DOTALL).findall(str(match))
count = 0
for embFile, server in Sources:
if not 'G-SHARER'in server:
if 'FAST' in server:
#print embFile
qual = server.replace(' ','').replace('FAST','').replace('360p','')
#print qual
OPEN1 = requests.get(embFile,headers=headers,timeout=10).content
#print OPEN1
sources1 = re.compile('<iframe.+?src="(.+?)"',re.DOTALL).findall(OPEN1)[1]
#print sources1
OPEN2 = requests.get(sources1,headers=headers,timeout=10).content
match2 = re.compile('"file":"(.+?)"',re.DOTALL).findall(OPEN2)
for link in match2:
#print link
count +=1
self.sources.append({'source': self.name, 'quality': qual, 'scraper': self.name, 'url': link,'direct': False})
else:
#print embFile
qual = 'SD'
#print qual
OPEN1 = requests.get(embFile,headers=headers,timeout=10).content
#print OPEN1
sources1 = re.compile('<iframe.+?src="(.+?)"',re.DOTALL).findall(OPEN1)[1]
host = sources1.split('//')[1].replace('www.','')
host = host.split('/')[0].lower()
host = host.split('.')[0]
count +=1
self.sources.append({'source': host, 'quality': qual, 'scraper': self.name, 'url': sources1,'direct': False})
if dev_log=='true':
end_time = time.time() - self.start_time
send_log(self.name,end_time,count,title,year, season=season,episode=episode)
except:
pass
#filmapik().scrape_movie('tomb raider', '2018','')
#filmapik().scrape_episode('the resident', '2018', '', '1', '2', '', '')
| repotvsupertuga/tvsupertuga.repository | script.module.universalscrapers/lib/universalscrapers/scraperplugins/unsure/filmapik.py | Python | gpl-2.0 | 5,801 | 0.022755 |
# Copyright 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from telemetry.internal.app import possible_app
class PossibleBrowser(possible_app.PossibleApp):
"""A browser that can be controlled.
Call Create() to launch the browser and begin manipulating it..
"""
def __init__(self, browser_type, target_os, supports_tab_control):
super(PossibleBrowser, self).__init__(app_type=browser_type,
target_os=target_os)
self._supports_tab_control = supports_tab_control
self._credentials_path = None
def __repr__(self):
return 'PossibleBrowser(app_type=%s)' % self.app_type
@property
def browser_type(self):
return self.app_type
@property
def supports_tab_control(self):
return self._supports_tab_control
def _InitPlatformIfNeeded(self):
raise NotImplementedError()
def Create(self, finder_options):
raise NotImplementedError()
def SupportsOptions(self, browser_options):
"""Tests for extension support."""
raise NotImplementedError()
def IsRemote(self):
return False
def RunRemote(self):
pass
def UpdateExecutableIfNeeded(self):
pass
def last_modification_time(self):
return -1
def SetCredentialsPath(self, credentials_path):
self._credentials_path = credentials_path
| catapult-project/catapult-csm | telemetry/telemetry/internal/browser/possible_browser.py | Python | bsd-3-clause | 1,414 | 0.010608 |
import json
f = open('text-stripped-3.json')
out = open('text-lines.json', 'w')
start_obj = json.load(f)
end_obj = {'data': []}
characters_on_stage = []
currently_speaking = None
last_scene = '1.1'
for i in range(len(start_obj['data'])):
obj = start_obj['data'][i]
if obj['type'] == 'entrance':
if obj['characters'] in characters_on_stage:
raise Exception('Character tried to enter stage when already on stage at object ' + str(i))
characters_on_stage = characters_on_stage + obj['characters']
elif obj['type'] == 'exeunt':
characters_on_stage = []
elif obj['type'] == 'exit':
characters_on_stage = [char for char in characters_on_stage if char not in obj['characters']]
elif obj['type'] == 'speaker tag':
if obj['speaker'] not in characters_on_stage:
raise Exception('Character tried to speak when not on stage at object ' + str(i), start_obj['data'][i + 1])
currently_speaking = obj['speaker']
elif obj['type'] == 'line':
if currently_speaking == None:
raise Exception('A line did not have an associated speaker at object ' + str(i))
identifier_info = obj['identifier'].split('.')
scene = identifier_info[0] + '.' + identifier_info[1]
#if scene != last_scene:
# if len(characters_on_stage) != 0:
# print('Warning: scene ' + scene + ' just started with ' + str(characters_on_stage) + ' still on stage')
last_scene = scene
end_obj['data'].append({
'type': 'line',
'identifier': obj['identifier'],
'text': obj['text'].strip(),
'speaker': currently_speaking,
'characters': characters_on_stage
})
if len(characters_on_stage) == 0:
currently_speaking = None
json.dump(end_obj, out) | SyntaxBlitz/syntaxblitz.github.io | mining-lear/process/step6.py | Python | mit | 1,654 | 0.024788 |
##
## This file is part of the libsigrokdecode project.
##
## Copyright (C) 2011 Gareth McMullin <[email protected]>
## Copyright (C) 2012-2013 Uwe Hermann <[email protected]>
##
## This program is free software; you can redistribute it and/or modify
## it under the terms of the GNU General Public License as published by
## the Free Software Foundation; either version 2 of the License, or
## (at your option) any later version.
##
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with this program; if not, write to the Free Software
## Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
##
import sigrokdecode as srd
'''
OUTPUT_PYTHON format:
Packet:
[<ptype>, <pdata>]
<ptype>, <pdata>:
- 'SOP', None
- 'SYM', <sym>
- 'BIT', <bit>
- 'STUFF BIT', None
- 'EOP', None
<sym>:
- 'J', 'K', 'SE0', or 'SE1'
<bit>:
- 0 or 1
- Note: Symbols like SE0, SE1, and the J that's part of EOP don't yield 'BIT'.
'''
# Low-/full-speed symbols.
# Note: Low-speed J and K are inverted compared to the full-speed J and K!
symbols = {
'low-speed': {
# (<dp>, <dm>): <symbol/state>
(0, 0): 'SE0',
(1, 0): 'K',
(0, 1): 'J',
(1, 1): 'SE1',
},
'full-speed': {
# (<dp>, <dm>): <symbol/state>
(0, 0): 'SE0',
(1, 0): 'J',
(0, 1): 'K',
(1, 1): 'SE1',
},
}
bitrates = {
'low-speed': 1500000, # 1.5Mb/s (+/- 1.5%)
'full-speed': 12000000, # 12Mb/s (+/- 0.25%)
}
sym_idx = {
'J': 0,
'K': 1,
'SE0': 2,
'SE1': 3,
}
class Decoder(srd.Decoder):
api_version = 2
id = 'usb_signalling'
name = 'USB signalling'
longname = 'Universal Serial Bus (LS/FS) signalling'
desc = 'USB (low-speed and full-speed) signalling protocol.'
license = 'gplv2+'
inputs = ['logic']
outputs = ['usb_signalling']
channels = (
{'id': 'dp', 'name': 'D+', 'desc': 'USB D+ signal'},
{'id': 'dm', 'name': 'D-', 'desc': 'USB D- signal'},
)
options = (
{'id': 'signalling', 'desc': 'Signalling',
'default': 'full-speed', 'values': ('full-speed', 'low-speed')},
)
annotations = (
('sym-j', 'J symbol'),
('sym-k', 'K symbol'),
('sym-se0', 'SE0 symbol'),
('sym-se1', 'SE1 symbol'),
('sop', 'Start of packet (SOP)'),
('eop', 'End of packet (EOP)'),
('bit', 'Bit'),
('stuffbit', 'Stuff bit'),
)
annotation_rows = (
('bits', 'Bits', (4, 5, 6, 7)),
('symbols', 'Symbols', (0, 1, 2, 3)),
)
def __init__(self):
self.samplerate = None
self.oldsym = 'J' # The "idle" state is J.
self.ss_sop = None
self.ss_block = None
self.samplenum = 0
self.syms = []
self.bitrate = None
self.bitwidth = None
self.bitnum = 0
self.samplenum_target = None
self.oldpins = None
self.consecutive_ones = 0
self.state = 'IDLE'
def start(self):
self.out_python = self.register(srd.OUTPUT_PYTHON)
self.out_ann = self.register(srd.OUTPUT_ANN)
def metadata(self, key, value):
if key == srd.SRD_CONF_SAMPLERATE:
self.samplerate = value
self.bitrate = bitrates[self.options['signalling']]
self.bitwidth = float(self.samplerate) / float(self.bitrate)
self.halfbit = int(self.bitwidth / 2)
def putpx(self, data):
self.put(self.samplenum, self.samplenum, self.out_python, data)
def putx(self, data):
self.put(self.samplenum, self.samplenum, self.out_ann, data)
def putpm(self, data):
s, h = self.samplenum, self.halfbit
self.put(self.ss_block - h, s + h, self.out_python, data)
def putm(self, data):
s, h = self.samplenum, self.halfbit
self.put(self.ss_block - h, s + h, self.out_ann, data)
def putpb(self, data):
s, h = self.samplenum, self.halfbit
self.put(s - h, s + h, self.out_python, data)
def putb(self, data):
s, h = self.samplenum, self.halfbit
self.put(s - h, s + h, self.out_ann, data)
def set_new_target_samplenum(self):
bitpos = self.ss_sop + (self.bitwidth / 2)
bitpos += self.bitnum * self.bitwidth
self.samplenum_target = int(bitpos)
def wait_for_sop(self, sym):
# Wait for a Start of Packet (SOP), i.e. a J->K symbol change.
if sym != 'K':
self.oldsym = sym
return
self.ss_sop = self.samplenum
self.set_new_target_samplenum()
self.putpx(['SOP', None])
self.putx([4, ['SOP', 'S']])
self.state = 'GET BIT'
def handle_bit(self, sym, b):
if self.consecutive_ones == 6 and b == '0':
# Stuff bit.
self.putpb(['STUFF BIT', None])
self.putb([7, ['Stuff bit: %s' % b, 'SB: %s' % b, '%s' % b]])
self.putb([sym_idx[sym], ['%s' % sym]])
self.consecutive_ones = 0
else:
# Normal bit (not a stuff bit).
self.putpb(['BIT', b])
self.putb([6, ['%s' % b]])
self.putb([sym_idx[sym], ['%s' % sym]])
if b == '1':
self.consecutive_ones += 1
else:
self.consecutive_ones = 0
def get_eop(self, sym):
# EOP: SE0 for >= 1 bittime (usually 2 bittimes), then J.
self.syms.append(sym)
self.putpb(['SYM', sym])
self.putb([sym_idx[sym], ['%s' % sym, '%s' % sym[0]]])
self.bitnum += 1
self.set_new_target_samplenum()
self.oldsym = sym
if self.syms[-2:] == ['SE0', 'J']:
# Got an EOP.
self.putpm(['EOP', None])
self.putm([5, ['EOP', 'E']])
self.bitnum, self.syms, self.state = 0, [], 'IDLE'
self.consecutive_ones = 0
def get_bit(self, sym):
if sym == 'SE0':
# Start of an EOP. Change state, run get_eop() for this bit.
self.state = 'GET EOP'
self.ss_block = self.samplenum
self.get_eop(sym)
return
self.syms.append(sym)
self.putpb(['SYM', sym])
b = '0' if self.oldsym != sym else '1'
self.handle_bit(sym, b)
self.bitnum += 1
self.set_new_target_samplenum()
self.oldsym = sym
def decode(self, ss, es, data):
if self.samplerate is None:
raise Exception("Cannot decode without samplerate.")
for (self.samplenum, pins) in data:
# State machine.
if self.state == 'IDLE':
# Ignore identical samples early on (for performance reasons).
if self.oldpins == pins:
continue
self.oldpins = pins
sym = symbols[self.options['signalling']][tuple(pins)]
self.wait_for_sop(sym)
elif self.state in ('GET BIT', 'GET EOP'):
# Wait until we're in the middle of the desired bit.
if self.samplenum < self.samplenum_target:
continue
sym = symbols[self.options['signalling']][tuple(pins)]
if self.state == 'GET BIT':
self.get_bit(sym)
elif self.state == 'GET EOP':
self.get_eop(sym)
else:
raise Exception('Invalid state: %s' % self.state)
| salberin/libsigrokdecode | decoders/usb_signalling/pd.py | Python | gpl-3.0 | 7,715 | 0.002333 |
# -*- coding: utf-8 -*-
##############################################################################
# For copyright and license notices, see __openerp__.py file in module root
# directory
##############################################################################
import re
from openerp import netsvc
from openerp.osv import osv, fields
class partner(osv.osv):
""""""
_name = 'res.partner'
_inherits = { }
_inherit = [ 'res.partner' ]
_columns = {
'authorization_ids': fields.one2many('nautical.authorization', 'partner_id', string='Authorizations'),
'historical_record_ids': fields.one2many('nautical.partner_record', 'partner_id', string='historical_record_ids'),
'owned_craft_ids': fields.one2many('nautical.craft', 'owner_id', string='Owned Crafts'),
}
_defaults = {
}
_constraints = [
]
partner()
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| adhoc-dev/odoo-nautical | nautical/partner.py | Python | agpl-3.0 | 957 | 0.014629 |
# Database
DB_NAME = 'censusreporter_ke'
DB_USER = 'censusreporter_ke'
DB_PASSWORD = 'censusreporter_ke'
| callmealien/wazimap_zambia | censusreporter/api/config.py | Python | mit | 106 | 0 |
from flask import Flask, render_template, request
from lib.work import work
from lib.loaders.policedata import PoliceData
from lib.loaders.populationdata import PopulationData
from lib.loaders.childpovertydata import ChildPovertyData
from lib.loaders.cancerdata import CancerData
import json
app = Flask(__name__)
@app.route('/')
def root():
return render_template('index.html', police=request.args.get('police', 'false'),
population=request.args.get('population', 'false'),
child_poverty=request.args.get('childpoverty', 'false'),
cancer=request.args.get('cancer', 'false'),
no_line=request.args.get('noline', 'false'),
heat_map=request.args.get('heatmap', 'false'),
step=float(request.args.get('step', '0.2')),
realistic=request.args.get('realistic', 'false'))
@app.route('/data')
def data():
police = request.args.get('police', 'true')
population = request.args.get('population', 'false')
child_poverty = request.args.get('childpoverty', 'false')
cancer = request.args.get('cancer', 'false')
step = float(request.args.get('step', '0.2'))
data_sets = []
if police == 'true':
data_set = PoliceData()
data_set.load()
data_sets.append(data_set)
if population == 'true':
data_set = PopulationData()
data_set.load()
data_sets.append(data_set)
if child_poverty == 'true':
data_set = ChildPovertyData()
data_set.load()
data_sets.append(data_set)
if cancer == 'true':
data_set = CancerData()
data_set.load()
data_sets.append(data_set)
output = {}
average = 0
for data_set in data_sets:
data = work(step, data_set)
output[data_set.NAME] = data
average += data['average_line'][0]['latitude']
average /= len(data_sets)
output['average'] = [{'latitude': average, 'longitude': -5}, {'latitude': average, 'longitude': 2}]
return json.dumps(output)
if __name__ == '__main__':
app.run(debug=True)
| mattyhall/North-South-Divide | divide.py | Python | bsd-3-clause | 2,269 | 0.009255 |
# -*- coding: utf-8 -*-
##############################################################################
# For copyright and license notices, see __openerp__.py file in root directory
##############################################################################
from openerp import models, fields, api, exceptions, _
class StockPickingWave(models.Model):
_inherit = 'stock.picking.wave'
@api.one
def _count_confirmed_pickings(self):
self.num_confirmed = len(self.picking_ids.filtered(lambda x: x.state ==
'confirmed'))
@api.one
def _count_assigned_pickings(self):
self.num_assigned = len(self.picking_ids.filtered(lambda x: x.state ==
'assigned'))
pickings_products = fields.One2many(
'stock.move', 'wave', string='Products', readonly=True)
pickings_operations = fields.One2many(
'stock.pack.operation', 'wave', string='Operations', readonly=True)
num_confirmed = fields.Integer(
compute="_count_confirmed_pickings", string="Confirmed pickings")
num_assigned = fields.Integer(
compute="_count_assigned_pickings", string="Assigned pickings")
partner = fields.Many2one('res.partner', 'Partner')
@api.multi
def confirm_picking(self):
picking_obj = self.env['stock.picking']
for wave in self:
pickings = picking_obj.search([('wave_id', '=', wave.id),
('state', '=', 'draft')])
pickings.action_assign()
wave.state = 'in_progress'
return True
@api.one
def button_check_availability(self):
pickings = self.picking_ids.filtered(lambda x: x.state == 'confirmed')
pickings.action_assign()
# The old API is used because the father is updated method context
def action_transfer(self, cr, uid, ids, context=None):
picking_obj = self.pool['stock.picking']
wave = self.browse(cr, uid, ids[0], context=context)
pickings = wave.picking_ids.filtered(lambda x: x.state == 'assigned')
c = context.copy()
c.update({'origin_wave': wave.id})
return picking_obj.do_enter_transfer_details(
cr, uid, pickings.ids, context=c)
@api.multi
def _get_pickings_domain(self):
self.ensure_one()
cond = [('wave_id', '=', False),
('state', 'not in', ['done', 'cancel'])]
if self.partner.child_ids:
cond.extend(['|', ('partner_id', '=', self.partner.id),
('partner_id', 'in',
self.partner.child_ids.ids)])
elif self.partner:
cond.extend([('partner_id', '=', self.partner.id)])
return cond
@api.multi
@api.onchange('partner')
def onchange_partner(self):
self.ensure_one()
cond = self._get_pickings_domain()
return {'domain': {'picking_ids': cond}}
@api.multi
def done(self):
for wave in self:
for picking in wave.picking_ids:
if picking.state not in ('cancel', 'done'):
raise exceptions.Warning(_(
'Some pickings are not transferred. '
'Please transfer pickings before set wave to done'))
return super(StockPickingWave, self).done()
| factorlibre/odoomrp-wip | stock_picking_wave_management/models/stock_picking_wave.py | Python | agpl-3.0 | 3,408 | 0.000293 |
# Time: O(n * k^2), n is the number of the words, k is the max length of the words.
# Space: O(n * k)
# Given a list of unique words. Find all pairs of indices (i, j)
# in the given list, so that the concatenation of the two words,
# i.e. words[i] + words[j] is a palindrome.
#
# Example 1:
# Given words = ["bat", "tab", "cat"]
# Return [[0, 1], [1, 0]]
# The palindromes are ["battab", "tabbat"]
# Example 2:
# Given words = ["abcd", "dcba", "lls", "s", "sssll"]
# Return [[0, 1], [1, 0], [3, 2], [2, 4]]
# The palindromes are ["dcbaabcd", "abcddcba", "slls", "llssssll"]
import collections
class Solution(object):
def palindromePairs(self, words):
"""
:type words: List[str]
:rtype: List[List[int]]
"""
res = []
lookup = {}
for i, word in enumerate(words):
lookup[word] = i
for i in xrange(len(words)):
for j in xrange(len(words[i]) + 1):
prefix = words[i][j:]
suffix = words[i][:j]
if prefix == prefix[::-1] and \
suffix[::-1] in lookup and lookup[suffix[::-1]] != i:
res.append([i, lookup[suffix[::-1]]])
if j > 0 and suffix == suffix[::-1] and \
prefix[::-1] in lookup and lookup[prefix[::-1]] != i:
res.append([lookup[prefix[::-1]], i])
return res
# Time: O(n * k^2), n is the number of the words, k is the max length of the words.
# Space: O(n * k^2)
# Manacher solution.
class Solution_TLE(object):
def palindromePairs(self, words):
"""
:type words: List[str]
:rtype: List[List[int]]
"""
def manacher(s, P):
def preProcess(s):
if not s:
return ['^', '$']
T = ['^']
for c in s:
T += ["#", c]
T += ['#', '$']
return T
T = preProcess(s)
center, right = 0, 0
for i in xrange(1, len(T) - 1):
i_mirror = 2 * center - i
if right > i:
P[i] = min(right - i, P[i_mirror])
else:
P[i] = 0
while T[i + 1 + P[i]] == T[i - 1 - P[i]]:
P[i] += 1
if i + P[i] > right:
center, right = i, i + P[i]
prefix, suffix = collections.defaultdict(list), collections.defaultdict(list)
for i, word in enumerate(words):
P = [0] * (2 * len(word) + 3)
manacher(word, P)
for j in xrange(len(P)):
if j - P[j] == 1:
prefix[word[(j + P[j]) / 2:]].append(i)
if j + P[j] == len(P) - 2:
suffix[word[:(j - P[j]) / 2]].append(i)
res = []
for i, word in enumerate(words):
for j in prefix[word[::-1]]:
if j != i:
res.append([i, j])
for j in suffix[word[::-1]]:
if len(word) != len(words[j]):
res.append([j, i])
return res
# Time: O(n * k^2), n is the number of the words, k is the max length of the words.
# Space: O(n * k)
# Trie solution.
class TrieNode:
def __init__(self):
self.word_idx = -1
self.leaves = {}
def insert(self, word, i):
cur = self
for c in word:
if not c in cur.leaves:
cur.leaves[c] = TrieNode()
cur = cur.leaves[c]
cur.word_idx = i
def find(self, s, idx, res):
cur = self
for i in reversed(xrange(len(s))):
if s[i] in cur.leaves:
cur = cur.leaves[s[i]]
if cur.word_idx not in (-1, idx) and \
self.is_palindrome(s, i - 1):
res.append([cur.word_idx, idx])
else:
break
def is_palindrome(self, s, j):
i = 0
while i <= j:
if s[i] != s[j]:
return False
i += 1
j -= 1
return True
class Solution_MLE(object):
def palindromePairs(self, words):
"""
:type words: List[str]
:rtype: List[List[int]]
"""
res = []
trie = TrieNode()
for i in xrange(len(words)):
trie.insert(words[i], i)
for i in xrange(len(words)):
trie.find(words[i], i, res)
return res
| kamyu104/LeetCode | Python/palindrome-pairs.py | Python | mit | 4,503 | 0.001777 |
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from odoo import api, fields, models, _
from odoo.exceptions import ValidationError
class AccountInvoice(models.Model):
_inherit = 'account.invoice'
transaction_ids = fields.Many2many('payment.transaction', 'account_invoice_transaction_rel', 'invoice_id', 'transaction_id',
string='Transactions', copy=False, readonly=True)
authorized_transaction_ids = fields.Many2many('payment.transaction', compute='_compute_authorized_transaction_ids',
string='Authorized Transactions', copy=False, readonly=True)
@api.depends('transaction_ids')
def _compute_authorized_transaction_ids(self):
for trans in self:
trans.authorized_transaction_ids = trans.transaction_ids.filtered(lambda t: t.state == 'authorized')
@api.multi
def get_portal_last_transaction(self):
self.ensure_one()
return self.transaction_ids.get_last_transaction()
@api.multi
def _create_payment_transaction(self, vals):
'''Similar to self.env['payment.transaction'].create(vals) but the values are filled with the
current invoices fields (e.g. the partner or the currency).
:param vals: The values to create a new payment.transaction.
:return: The newly created payment.transaction record.
'''
# Ensure the currencies are the same.
currency = self[0].currency_id
if any([inv.currency_id != currency for inv in self]):
raise ValidationError(_('A transaction can\'t be linked to invoices having different currencies.'))
# Ensure the partner are the same.
partner = self[0].partner_id
if any([inv.partner_id != partner for inv in self]):
raise ValidationError(_('A transaction can\'t be linked to invoices having different partners.'))
# Try to retrieve the acquirer. However, fallback to the token's acquirer.
acquirer_id = vals.get('acquirer_id')
acquirer = None
payment_token_id = vals.get('payment_token_id')
if payment_token_id:
payment_token = self.env['payment.token'].sudo().browse(payment_token_id)
# Check payment_token/acquirer matching or take the acquirer from token
if acquirer_id:
acquirer = self.env['payment.acquirer'].browse(acquirer_id)
if payment_token and payment_token.acquirer_id != acquirer:
raise ValidationError(_('Invalid token found! Token acquirer %s != %s') % (
payment_token.acquirer_id.name, acquirer.name))
if payment_token and payment_token.partner_id != partner:
raise ValidationError(_('Invalid token found! Token partner %s != %s') % (
payment_token.partner.name, partner.name))
else:
acquirer = payment_token.acquirer_id
# Check an acquirer is there.
if not acquirer_id and not acquirer:
raise ValidationError(_('A payment acquirer is required to create a transaction.'))
if not acquirer:
acquirer = self.env['payment.acquirer'].browse(acquirer_id)
# Check a journal is set on acquirer.
if not acquirer.journal_id:
raise ValidationError(_('A journal must be specified of the acquirer %s.' % acquirer.name))
if not acquirer_id and acquirer:
vals['acquirer_id'] = acquirer.id
vals.update({
'amount': sum(self.mapped('residual')),
'currency_id': currency.id,
'partner_id': partner.id,
'invoice_ids': [(6, 0, self.ids)],
})
transaction = self.env['payment.transaction'].create(vals)
# Process directly if payment_token
if transaction.payment_token_id:
transaction.s2s_do_transaction()
return transaction
@api.multi
def payment_action_capture(self):
self.authorized_transaction_ids.s2s_capture_transaction()
@api.multi
def payment_action_void(self):
self.authorized_transaction_ids.s2s_void_transaction()
| t3dev/odoo | addons/payment/models/account_invoice.py | Python | gpl-3.0 | 4,255 | 0.00423 |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
import datetime
from typing import Any, Dict, List, Optional, Union
from azure.core.exceptions import HttpResponseError
import msrest.serialization
from ._iot_hub_client_enums import *
class ArmIdentity(msrest.serialization.Model):
"""ArmIdentity.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar principal_id: Principal Id.
:vartype principal_id: str
:ivar tenant_id: Tenant Id.
:vartype tenant_id: str
:ivar type: The type of identity used for the resource. The type 'SystemAssigned,UserAssigned'
includes both an implicitly created identity and a set of user assigned identities. The type
'None' will remove any identities from the service. Possible values include: "SystemAssigned",
"UserAssigned", "SystemAssigned, UserAssigned", "None".
:vartype type: str or ~azure.mgmt.iothub.v2021_03_03_preview.models.ResourceIdentityType
:ivar user_assigned_identities: Dictionary of :code:`<ArmUserIdentity>`.
:vartype user_assigned_identities: dict[str,
~azure.mgmt.iothub.v2021_03_03_preview.models.ArmUserIdentity]
"""
_validation = {
'principal_id': {'readonly': True},
'tenant_id': {'readonly': True},
}
_attribute_map = {
'principal_id': {'key': 'principalId', 'type': 'str'},
'tenant_id': {'key': 'tenantId', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'user_assigned_identities': {'key': 'userAssignedIdentities', 'type': '{ArmUserIdentity}'},
}
def __init__(
self,
*,
type: Optional[Union[str, "ResourceIdentityType"]] = None,
user_assigned_identities: Optional[Dict[str, "ArmUserIdentity"]] = None,
**kwargs
):
"""
:keyword type: The type of identity used for the resource. The type
'SystemAssigned,UserAssigned' includes both an implicitly created identity and a set of user
assigned identities. The type 'None' will remove any identities from the service. Possible
values include: "SystemAssigned", "UserAssigned", "SystemAssigned, UserAssigned", "None".
:paramtype type: str or ~azure.mgmt.iothub.v2021_03_03_preview.models.ResourceIdentityType
:keyword user_assigned_identities: Dictionary of :code:`<ArmUserIdentity>`.
:paramtype user_assigned_identities: dict[str,
~azure.mgmt.iothub.v2021_03_03_preview.models.ArmUserIdentity]
"""
super(ArmIdentity, self).__init__(**kwargs)
self.principal_id = None
self.tenant_id = None
self.type = type
self.user_assigned_identities = user_assigned_identities
class ArmUserIdentity(msrest.serialization.Model):
"""ArmUserIdentity.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar principal_id:
:vartype principal_id: str
:ivar client_id:
:vartype client_id: str
"""
_validation = {
'principal_id': {'readonly': True},
'client_id': {'readonly': True},
}
_attribute_map = {
'principal_id': {'key': 'principalId', 'type': 'str'},
'client_id': {'key': 'clientId', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
"""
"""
super(ArmUserIdentity, self).__init__(**kwargs)
self.principal_id = None
self.client_id = None
class CertificateBodyDescription(msrest.serialization.Model):
"""The JSON-serialized X509 Certificate.
:ivar certificate: base-64 representation of the X509 leaf certificate .cer file or just .pem
file content.
:vartype certificate: str
"""
_attribute_map = {
'certificate': {'key': 'certificate', 'type': 'str'},
}
def __init__(
self,
*,
certificate: Optional[str] = None,
**kwargs
):
"""
:keyword certificate: base-64 representation of the X509 leaf certificate .cer file or just
.pem file content.
:paramtype certificate: str
"""
super(CertificateBodyDescription, self).__init__(**kwargs)
self.certificate = certificate
class CertificateDescription(msrest.serialization.Model):
"""The X509 Certificate.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar properties: The description of an X509 CA Certificate.
:vartype properties: ~azure.mgmt.iothub.v2021_03_03_preview.models.CertificateProperties
:ivar id: The resource identifier.
:vartype id: str
:ivar name: The name of the certificate.
:vartype name: str
:ivar etag: The entity tag.
:vartype etag: str
:ivar type: The resource type.
:vartype type: str
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'etag': {'readonly': True},
'type': {'readonly': True},
}
_attribute_map = {
'properties': {'key': 'properties', 'type': 'CertificateProperties'},
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'etag': {'key': 'etag', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
}
def __init__(
self,
*,
properties: Optional["CertificateProperties"] = None,
**kwargs
):
"""
:keyword properties: The description of an X509 CA Certificate.
:paramtype properties: ~azure.mgmt.iothub.v2021_03_03_preview.models.CertificateProperties
"""
super(CertificateDescription, self).__init__(**kwargs)
self.properties = properties
self.id = None
self.name = None
self.etag = None
self.type = None
class CertificateListDescription(msrest.serialization.Model):
"""The JSON-serialized array of Certificate objects.
:ivar value: The array of Certificate objects.
:vartype value: list[~azure.mgmt.iothub.v2021_03_03_preview.models.CertificateDescription]
"""
_attribute_map = {
'value': {'key': 'value', 'type': '[CertificateDescription]'},
}
def __init__(
self,
*,
value: Optional[List["CertificateDescription"]] = None,
**kwargs
):
"""
:keyword value: The array of Certificate objects.
:paramtype value: list[~azure.mgmt.iothub.v2021_03_03_preview.models.CertificateDescription]
"""
super(CertificateListDescription, self).__init__(**kwargs)
self.value = value
class CertificateProperties(msrest.serialization.Model):
"""The description of an X509 CA Certificate.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar subject: The certificate's subject name.
:vartype subject: str
:ivar expiry: The certificate's expiration date and time.
:vartype expiry: ~datetime.datetime
:ivar thumbprint: The certificate's thumbprint.
:vartype thumbprint: str
:ivar is_verified: Determines whether certificate has been verified.
:vartype is_verified: bool
:ivar created: The certificate's create date and time.
:vartype created: ~datetime.datetime
:ivar updated: The certificate's last update date and time.
:vartype updated: ~datetime.datetime
:ivar certificate: The certificate content.
:vartype certificate: str
"""
_validation = {
'subject': {'readonly': True},
'expiry': {'readonly': True},
'thumbprint': {'readonly': True},
'is_verified': {'readonly': True},
'created': {'readonly': True},
'updated': {'readonly': True},
}
_attribute_map = {
'subject': {'key': 'subject', 'type': 'str'},
'expiry': {'key': 'expiry', 'type': 'rfc-1123'},
'thumbprint': {'key': 'thumbprint', 'type': 'str'},
'is_verified': {'key': 'isVerified', 'type': 'bool'},
'created': {'key': 'created', 'type': 'rfc-1123'},
'updated': {'key': 'updated', 'type': 'rfc-1123'},
'certificate': {'key': 'certificate', 'type': 'str'},
}
def __init__(
self,
*,
certificate: Optional[str] = None,
**kwargs
):
"""
:keyword certificate: The certificate content.
:paramtype certificate: str
"""
super(CertificateProperties, self).__init__(**kwargs)
self.subject = None
self.expiry = None
self.thumbprint = None
self.is_verified = None
self.created = None
self.updated = None
self.certificate = certificate
class CertificatePropertiesWithNonce(msrest.serialization.Model):
"""The description of an X509 CA Certificate including the challenge nonce issued for the Proof-Of-Possession flow.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar subject: The certificate's subject name.
:vartype subject: str
:ivar expiry: The certificate's expiration date and time.
:vartype expiry: ~datetime.datetime
:ivar thumbprint: The certificate's thumbprint.
:vartype thumbprint: str
:ivar is_verified: Determines whether certificate has been verified.
:vartype is_verified: bool
:ivar created: The certificate's create date and time.
:vartype created: ~datetime.datetime
:ivar updated: The certificate's last update date and time.
:vartype updated: ~datetime.datetime
:ivar verification_code: The certificate's verification code that will be used for proof of
possession.
:vartype verification_code: str
:ivar certificate: The certificate content.
:vartype certificate: str
"""
_validation = {
'subject': {'readonly': True},
'expiry': {'readonly': True},
'thumbprint': {'readonly': True},
'is_verified': {'readonly': True},
'created': {'readonly': True},
'updated': {'readonly': True},
'verification_code': {'readonly': True},
'certificate': {'readonly': True},
}
_attribute_map = {
'subject': {'key': 'subject', 'type': 'str'},
'expiry': {'key': 'expiry', 'type': 'rfc-1123'},
'thumbprint': {'key': 'thumbprint', 'type': 'str'},
'is_verified': {'key': 'isVerified', 'type': 'bool'},
'created': {'key': 'created', 'type': 'rfc-1123'},
'updated': {'key': 'updated', 'type': 'rfc-1123'},
'verification_code': {'key': 'verificationCode', 'type': 'str'},
'certificate': {'key': 'certificate', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
"""
"""
super(CertificatePropertiesWithNonce, self).__init__(**kwargs)
self.subject = None
self.expiry = None
self.thumbprint = None
self.is_verified = None
self.created = None
self.updated = None
self.verification_code = None
self.certificate = None
class CertificateVerificationDescription(msrest.serialization.Model):
"""The JSON-serialized leaf certificate.
:ivar certificate: base-64 representation of X509 certificate .cer file or just .pem file
content.
:vartype certificate: str
"""
_attribute_map = {
'certificate': {'key': 'certificate', 'type': 'str'},
}
def __init__(
self,
*,
certificate: Optional[str] = None,
**kwargs
):
"""
:keyword certificate: base-64 representation of X509 certificate .cer file or just .pem file
content.
:paramtype certificate: str
"""
super(CertificateVerificationDescription, self).__init__(**kwargs)
self.certificate = certificate
class CertificateWithNonceDescription(msrest.serialization.Model):
"""The X509 Certificate.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar properties: The description of an X509 CA Certificate including the challenge nonce
issued for the Proof-Of-Possession flow.
:vartype properties:
~azure.mgmt.iothub.v2021_03_03_preview.models.CertificatePropertiesWithNonce
:ivar id: The resource identifier.
:vartype id: str
:ivar name: The name of the certificate.
:vartype name: str
:ivar etag: The entity tag.
:vartype etag: str
:ivar type: The resource type.
:vartype type: str
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'etag': {'readonly': True},
'type': {'readonly': True},
}
_attribute_map = {
'properties': {'key': 'properties', 'type': 'CertificatePropertiesWithNonce'},
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'etag': {'key': 'etag', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
}
def __init__(
self,
*,
properties: Optional["CertificatePropertiesWithNonce"] = None,
**kwargs
):
"""
:keyword properties: The description of an X509 CA Certificate including the challenge nonce
issued for the Proof-Of-Possession flow.
:paramtype properties:
~azure.mgmt.iothub.v2021_03_03_preview.models.CertificatePropertiesWithNonce
"""
super(CertificateWithNonceDescription, self).__init__(**kwargs)
self.properties = properties
self.id = None
self.name = None
self.etag = None
self.type = None
class CloudToDeviceProperties(msrest.serialization.Model):
"""The IoT hub cloud-to-device messaging properties.
:ivar max_delivery_count: The max delivery count for cloud-to-device messages in the device
queue. See:
https://docs.microsoft.com/azure/iot-hub/iot-hub-devguide-messaging#cloud-to-device-messages.
:vartype max_delivery_count: int
:ivar default_ttl_as_iso8601: The default time to live for cloud-to-device messages in the
device queue. See:
https://docs.microsoft.com/azure/iot-hub/iot-hub-devguide-messaging#cloud-to-device-messages.
:vartype default_ttl_as_iso8601: ~datetime.timedelta
:ivar feedback: The properties of the feedback queue for cloud-to-device messages.
:vartype feedback: ~azure.mgmt.iothub.v2021_03_03_preview.models.FeedbackProperties
"""
_validation = {
'max_delivery_count': {'maximum': 100, 'minimum': 1},
}
_attribute_map = {
'max_delivery_count': {'key': 'maxDeliveryCount', 'type': 'int'},
'default_ttl_as_iso8601': {'key': 'defaultTtlAsIso8601', 'type': 'duration'},
'feedback': {'key': 'feedback', 'type': 'FeedbackProperties'},
}
def __init__(
self,
*,
max_delivery_count: Optional[int] = None,
default_ttl_as_iso8601: Optional[datetime.timedelta] = None,
feedback: Optional["FeedbackProperties"] = None,
**kwargs
):
"""
:keyword max_delivery_count: The max delivery count for cloud-to-device messages in the device
queue. See:
https://docs.microsoft.com/azure/iot-hub/iot-hub-devguide-messaging#cloud-to-device-messages.
:paramtype max_delivery_count: int
:keyword default_ttl_as_iso8601: The default time to live for cloud-to-device messages in the
device queue. See:
https://docs.microsoft.com/azure/iot-hub/iot-hub-devguide-messaging#cloud-to-device-messages.
:paramtype default_ttl_as_iso8601: ~datetime.timedelta
:keyword feedback: The properties of the feedback queue for cloud-to-device messages.
:paramtype feedback: ~azure.mgmt.iothub.v2021_03_03_preview.models.FeedbackProperties
"""
super(CloudToDeviceProperties, self).__init__(**kwargs)
self.max_delivery_count = max_delivery_count
self.default_ttl_as_iso8601 = default_ttl_as_iso8601
self.feedback = feedback
class EncryptionPropertiesDescription(msrest.serialization.Model):
"""The encryption properties for the IoT hub.
:ivar key_source: The source of the key.
:vartype key_source: str
:ivar key_vault_properties: The properties of the KeyVault key.
:vartype key_vault_properties:
list[~azure.mgmt.iothub.v2021_03_03_preview.models.KeyVaultKeyProperties]
"""
_attribute_map = {
'key_source': {'key': 'keySource', 'type': 'str'},
'key_vault_properties': {'key': 'keyVaultProperties', 'type': '[KeyVaultKeyProperties]'},
}
def __init__(
self,
*,
key_source: Optional[str] = None,
key_vault_properties: Optional[List["KeyVaultKeyProperties"]] = None,
**kwargs
):
"""
:keyword key_source: The source of the key.
:paramtype key_source: str
:keyword key_vault_properties: The properties of the KeyVault key.
:paramtype key_vault_properties:
list[~azure.mgmt.iothub.v2021_03_03_preview.models.KeyVaultKeyProperties]
"""
super(EncryptionPropertiesDescription, self).__init__(**kwargs)
self.key_source = key_source
self.key_vault_properties = key_vault_properties
class EndpointHealthData(msrest.serialization.Model):
"""The health data for an endpoint.
:ivar endpoint_id: Id of the endpoint.
:vartype endpoint_id: str
:ivar health_status: Health statuses have following meanings. The 'healthy' status shows that
the endpoint is accepting messages as expected. The 'unhealthy' status shows that the endpoint
is not accepting messages as expected and IoT Hub is retrying to send data to this endpoint.
The status of an unhealthy endpoint will be updated to healthy when IoT Hub has established an
eventually consistent state of health. The 'dead' status shows that the endpoint is not
accepting messages, after IoT Hub retried sending messages for the retrial period. See IoT Hub
metrics to identify errors and monitor issues with endpoints. The 'unknown' status shows that
the IoT Hub has not established a connection with the endpoint. No messages have been delivered
to or rejected from this endpoint. Possible values include: "unknown", "healthy", "degraded",
"unhealthy", "dead".
:vartype health_status: str or
~azure.mgmt.iothub.v2021_03_03_preview.models.EndpointHealthStatus
:ivar last_known_error: Last error obtained when a message failed to be delivered to iot hub.
:vartype last_known_error: str
:ivar last_known_error_time: Time at which the last known error occurred.
:vartype last_known_error_time: ~datetime.datetime
:ivar last_successful_send_attempt_time: Last time iot hub successfully sent a message to the
endpoint.
:vartype last_successful_send_attempt_time: ~datetime.datetime
:ivar last_send_attempt_time: Last time iot hub tried to send a message to the endpoint.
:vartype last_send_attempt_time: ~datetime.datetime
"""
_attribute_map = {
'endpoint_id': {'key': 'endpointId', 'type': 'str'},
'health_status': {'key': 'healthStatus', 'type': 'str'},
'last_known_error': {'key': 'lastKnownError', 'type': 'str'},
'last_known_error_time': {'key': 'lastKnownErrorTime', 'type': 'rfc-1123'},
'last_successful_send_attempt_time': {'key': 'lastSuccessfulSendAttemptTime', 'type': 'rfc-1123'},
'last_send_attempt_time': {'key': 'lastSendAttemptTime', 'type': 'rfc-1123'},
}
def __init__(
self,
*,
endpoint_id: Optional[str] = None,
health_status: Optional[Union[str, "EndpointHealthStatus"]] = None,
last_known_error: Optional[str] = None,
last_known_error_time: Optional[datetime.datetime] = None,
last_successful_send_attempt_time: Optional[datetime.datetime] = None,
last_send_attempt_time: Optional[datetime.datetime] = None,
**kwargs
):
"""
:keyword endpoint_id: Id of the endpoint.
:paramtype endpoint_id: str
:keyword health_status: Health statuses have following meanings. The 'healthy' status shows
that the endpoint is accepting messages as expected. The 'unhealthy' status shows that the
endpoint is not accepting messages as expected and IoT Hub is retrying to send data to this
endpoint. The status of an unhealthy endpoint will be updated to healthy when IoT Hub has
established an eventually consistent state of health. The 'dead' status shows that the endpoint
is not accepting messages, after IoT Hub retried sending messages for the retrial period. See
IoT Hub metrics to identify errors and monitor issues with endpoints. The 'unknown' status
shows that the IoT Hub has not established a connection with the endpoint. No messages have
been delivered to or rejected from this endpoint. Possible values include: "unknown",
"healthy", "degraded", "unhealthy", "dead".
:paramtype health_status: str or
~azure.mgmt.iothub.v2021_03_03_preview.models.EndpointHealthStatus
:keyword last_known_error: Last error obtained when a message failed to be delivered to iot
hub.
:paramtype last_known_error: str
:keyword last_known_error_time: Time at which the last known error occurred.
:paramtype last_known_error_time: ~datetime.datetime
:keyword last_successful_send_attempt_time: Last time iot hub successfully sent a message to
the endpoint.
:paramtype last_successful_send_attempt_time: ~datetime.datetime
:keyword last_send_attempt_time: Last time iot hub tried to send a message to the endpoint.
:paramtype last_send_attempt_time: ~datetime.datetime
"""
super(EndpointHealthData, self).__init__(**kwargs)
self.endpoint_id = endpoint_id
self.health_status = health_status
self.last_known_error = last_known_error
self.last_known_error_time = last_known_error_time
self.last_successful_send_attempt_time = last_successful_send_attempt_time
self.last_send_attempt_time = last_send_attempt_time
class EndpointHealthDataListResult(msrest.serialization.Model):
"""The JSON-serialized array of EndpointHealthData objects with a next link.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar value: JSON-serialized array of Endpoint health data.
:vartype value: list[~azure.mgmt.iothub.v2021_03_03_preview.models.EndpointHealthData]
:ivar next_link: Link to more results.
:vartype next_link: str
"""
_validation = {
'next_link': {'readonly': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': '[EndpointHealthData]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
*,
value: Optional[List["EndpointHealthData"]] = None,
**kwargs
):
"""
:keyword value: JSON-serialized array of Endpoint health data.
:paramtype value: list[~azure.mgmt.iothub.v2021_03_03_preview.models.EndpointHealthData]
"""
super(EndpointHealthDataListResult, self).__init__(**kwargs)
self.value = value
self.next_link = None
class EnrichmentProperties(msrest.serialization.Model):
"""The properties of an enrichment that your IoT hub applies to messages delivered to endpoints.
All required parameters must be populated in order to send to Azure.
:ivar key: Required. The key or name for the enrichment property.
:vartype key: str
:ivar value: Required. The value for the enrichment property.
:vartype value: str
:ivar endpoint_names: Required. The list of endpoints for which the enrichment is applied to
the message.
:vartype endpoint_names: list[str]
"""
_validation = {
'key': {'required': True},
'value': {'required': True},
'endpoint_names': {'required': True, 'min_items': 1},
}
_attribute_map = {
'key': {'key': 'key', 'type': 'str'},
'value': {'key': 'value', 'type': 'str'},
'endpoint_names': {'key': 'endpointNames', 'type': '[str]'},
}
def __init__(
self,
*,
key: str,
value: str,
endpoint_names: List[str],
**kwargs
):
"""
:keyword key: Required. The key or name for the enrichment property.
:paramtype key: str
:keyword value: Required. The value for the enrichment property.
:paramtype value: str
:keyword endpoint_names: Required. The list of endpoints for which the enrichment is applied to
the message.
:paramtype endpoint_names: list[str]
"""
super(EnrichmentProperties, self).__init__(**kwargs)
self.key = key
self.value = value
self.endpoint_names = endpoint_names
class ErrorDetails(msrest.serialization.Model):
"""Error details.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar code: The error code.
:vartype code: str
:ivar http_status_code: The HTTP status code.
:vartype http_status_code: str
:ivar message: The error message.
:vartype message: str
:ivar details: The error details.
:vartype details: str
"""
_validation = {
'code': {'readonly': True},
'http_status_code': {'readonly': True},
'message': {'readonly': True},
'details': {'readonly': True},
}
_attribute_map = {
'code': {'key': 'code', 'type': 'str'},
'http_status_code': {'key': 'httpStatusCode', 'type': 'str'},
'message': {'key': 'message', 'type': 'str'},
'details': {'key': 'details', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
"""
"""
super(ErrorDetails, self).__init__(**kwargs)
self.code = None
self.http_status_code = None
self.message = None
self.details = None
class EventHubConsumerGroupBodyDescription(msrest.serialization.Model):
"""The EventHub consumer group.
:ivar properties: The EventHub consumer group name.
:vartype properties: ~azure.mgmt.iothub.v2021_03_03_preview.models.EventHubConsumerGroupName
"""
_attribute_map = {
'properties': {'key': 'properties', 'type': 'EventHubConsumerGroupName'},
}
def __init__(
self,
*,
properties: Optional["EventHubConsumerGroupName"] = None,
**kwargs
):
"""
:keyword properties: The EventHub consumer group name.
:paramtype properties: ~azure.mgmt.iothub.v2021_03_03_preview.models.EventHubConsumerGroupName
"""
super(EventHubConsumerGroupBodyDescription, self).__init__(**kwargs)
self.properties = properties
class EventHubConsumerGroupInfo(msrest.serialization.Model):
"""The properties of the EventHubConsumerGroupInfo object.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar properties: The tags.
:vartype properties: dict[str, str]
:ivar id: The Event Hub-compatible consumer group identifier.
:vartype id: str
:ivar name: The Event Hub-compatible consumer group name.
:vartype name: str
:ivar type: the resource type.
:vartype type: str
:ivar etag: The etag.
:vartype etag: str
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'etag': {'readonly': True},
}
_attribute_map = {
'properties': {'key': 'properties', 'type': '{str}'},
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'etag': {'key': 'etag', 'type': 'str'},
}
def __init__(
self,
*,
properties: Optional[Dict[str, str]] = None,
**kwargs
):
"""
:keyword properties: The tags.
:paramtype properties: dict[str, str]
"""
super(EventHubConsumerGroupInfo, self).__init__(**kwargs)
self.properties = properties
self.id = None
self.name = None
self.type = None
self.etag = None
class EventHubConsumerGroupName(msrest.serialization.Model):
"""The EventHub consumer group name.
:ivar name: EventHub consumer group name.
:vartype name: str
"""
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
}
def __init__(
self,
*,
name: Optional[str] = None,
**kwargs
):
"""
:keyword name: EventHub consumer group name.
:paramtype name: str
"""
super(EventHubConsumerGroupName, self).__init__(**kwargs)
self.name = name
class EventHubConsumerGroupsListResult(msrest.serialization.Model):
"""The JSON-serialized array of Event Hub-compatible consumer group names with a next link.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar value: List of consumer groups objects.
:vartype value: list[~azure.mgmt.iothub.v2021_03_03_preview.models.EventHubConsumerGroupInfo]
:ivar next_link: The next link.
:vartype next_link: str
"""
_validation = {
'next_link': {'readonly': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': '[EventHubConsumerGroupInfo]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
*,
value: Optional[List["EventHubConsumerGroupInfo"]] = None,
**kwargs
):
"""
:keyword value: List of consumer groups objects.
:paramtype value: list[~azure.mgmt.iothub.v2021_03_03_preview.models.EventHubConsumerGroupInfo]
"""
super(EventHubConsumerGroupsListResult, self).__init__(**kwargs)
self.value = value
self.next_link = None
class EventHubProperties(msrest.serialization.Model):
"""The properties of the provisioned Event Hub-compatible endpoint used by the IoT hub.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar retention_time_in_days: The retention time for device-to-cloud messages in days. See:
https://docs.microsoft.com/azure/iot-hub/iot-hub-devguide-messaging#device-to-cloud-messages.
:vartype retention_time_in_days: long
:ivar partition_count: The number of partitions for receiving device-to-cloud messages in the
Event Hub-compatible endpoint. See:
https://docs.microsoft.com/azure/iot-hub/iot-hub-devguide-messaging#device-to-cloud-messages.
:vartype partition_count: int
:ivar partition_ids: The partition ids in the Event Hub-compatible endpoint.
:vartype partition_ids: list[str]
:ivar path: The Event Hub-compatible name.
:vartype path: str
:ivar endpoint: The Event Hub-compatible endpoint.
:vartype endpoint: str
"""
_validation = {
'partition_ids': {'readonly': True},
'path': {'readonly': True},
'endpoint': {'readonly': True},
}
_attribute_map = {
'retention_time_in_days': {'key': 'retentionTimeInDays', 'type': 'long'},
'partition_count': {'key': 'partitionCount', 'type': 'int'},
'partition_ids': {'key': 'partitionIds', 'type': '[str]'},
'path': {'key': 'path', 'type': 'str'},
'endpoint': {'key': 'endpoint', 'type': 'str'},
}
def __init__(
self,
*,
retention_time_in_days: Optional[int] = None,
partition_count: Optional[int] = None,
**kwargs
):
"""
:keyword retention_time_in_days: The retention time for device-to-cloud messages in days. See:
https://docs.microsoft.com/azure/iot-hub/iot-hub-devguide-messaging#device-to-cloud-messages.
:paramtype retention_time_in_days: long
:keyword partition_count: The number of partitions for receiving device-to-cloud messages in
the Event Hub-compatible endpoint. See:
https://docs.microsoft.com/azure/iot-hub/iot-hub-devguide-messaging#device-to-cloud-messages.
:paramtype partition_count: int
"""
super(EventHubProperties, self).__init__(**kwargs)
self.retention_time_in_days = retention_time_in_days
self.partition_count = partition_count
self.partition_ids = None
self.path = None
self.endpoint = None
class ExportDevicesRequest(msrest.serialization.Model):
"""Use to provide parameters when requesting an export of all devices in the IoT hub.
All required parameters must be populated in order to send to Azure.
:ivar export_blob_container_uri: Required. The export blob container URI.
:vartype export_blob_container_uri: str
:ivar exclude_keys: Required. The value indicating whether keys should be excluded during
export.
:vartype exclude_keys: bool
:ivar export_blob_name: The name of the blob that will be created in the provided output blob
container. This blob will contain the exported device registry information for the IoT Hub.
:vartype export_blob_name: str
:ivar authentication_type: Specifies authentication type being used for connecting to the
storage account. Possible values include: "keyBased", "identityBased".
:vartype authentication_type: str or
~azure.mgmt.iothub.v2021_03_03_preview.models.AuthenticationType
:ivar identity: Managed identity properties of storage endpoint for export devices.
:vartype identity: ~azure.mgmt.iothub.v2021_03_03_preview.models.ManagedIdentity
"""
_validation = {
'export_blob_container_uri': {'required': True},
'exclude_keys': {'required': True},
}
_attribute_map = {
'export_blob_container_uri': {'key': 'exportBlobContainerUri', 'type': 'str'},
'exclude_keys': {'key': 'excludeKeys', 'type': 'bool'},
'export_blob_name': {'key': 'exportBlobName', 'type': 'str'},
'authentication_type': {'key': 'authenticationType', 'type': 'str'},
'identity': {'key': 'identity', 'type': 'ManagedIdentity'},
}
def __init__(
self,
*,
export_blob_container_uri: str,
exclude_keys: bool,
export_blob_name: Optional[str] = None,
authentication_type: Optional[Union[str, "AuthenticationType"]] = None,
identity: Optional["ManagedIdentity"] = None,
**kwargs
):
"""
:keyword export_blob_container_uri: Required. The export blob container URI.
:paramtype export_blob_container_uri: str
:keyword exclude_keys: Required. The value indicating whether keys should be excluded during
export.
:paramtype exclude_keys: bool
:keyword export_blob_name: The name of the blob that will be created in the provided output
blob container. This blob will contain the exported device registry information for the IoT
Hub.
:paramtype export_blob_name: str
:keyword authentication_type: Specifies authentication type being used for connecting to the
storage account. Possible values include: "keyBased", "identityBased".
:paramtype authentication_type: str or
~azure.mgmt.iothub.v2021_03_03_preview.models.AuthenticationType
:keyword identity: Managed identity properties of storage endpoint for export devices.
:paramtype identity: ~azure.mgmt.iothub.v2021_03_03_preview.models.ManagedIdentity
"""
super(ExportDevicesRequest, self).__init__(**kwargs)
self.export_blob_container_uri = export_blob_container_uri
self.exclude_keys = exclude_keys
self.export_blob_name = export_blob_name
self.authentication_type = authentication_type
self.identity = identity
class FailoverInput(msrest.serialization.Model):
"""Use to provide failover region when requesting manual Failover for a hub.
All required parameters must be populated in order to send to Azure.
:ivar failover_region: Required. Region the hub will be failed over to.
:vartype failover_region: str
"""
_validation = {
'failover_region': {'required': True},
}
_attribute_map = {
'failover_region': {'key': 'failoverRegion', 'type': 'str'},
}
def __init__(
self,
*,
failover_region: str,
**kwargs
):
"""
:keyword failover_region: Required. Region the hub will be failed over to.
:paramtype failover_region: str
"""
super(FailoverInput, self).__init__(**kwargs)
self.failover_region = failover_region
class FallbackRouteProperties(msrest.serialization.Model):
"""The properties of the fallback route. IoT Hub uses these properties when it routes messages to the fallback endpoint.
All required parameters must be populated in order to send to Azure.
:ivar name: The name of the route. The name can only include alphanumeric characters, periods,
underscores, hyphens, has a maximum length of 64 characters, and must be unique.
:vartype name: str
:ivar source: Required. The source to which the routing rule is to be applied to. For example,
DeviceMessages. Possible values include: "Invalid", "DeviceMessages", "TwinChangeEvents",
"DeviceLifecycleEvents", "DeviceJobLifecycleEvents", "DigitalTwinChangeEvents",
"DeviceConnectionStateEvents".
:vartype source: str or ~azure.mgmt.iothub.v2021_03_03_preview.models.RoutingSource
:ivar condition: The condition which is evaluated in order to apply the fallback route. If the
condition is not provided it will evaluate to true by default. For grammar, See:
https://docs.microsoft.com/azure/iot-hub/iot-hub-devguide-query-language.
:vartype condition: str
:ivar endpoint_names: Required. The list of endpoints to which the messages that satisfy the
condition are routed to. Currently only 1 endpoint is allowed.
:vartype endpoint_names: list[str]
:ivar is_enabled: Required. Used to specify whether the fallback route is enabled.
:vartype is_enabled: bool
"""
_validation = {
'source': {'required': True},
'endpoint_names': {'required': True, 'max_items': 1, 'min_items': 1},
'is_enabled': {'required': True},
}
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'source': {'key': 'source', 'type': 'str'},
'condition': {'key': 'condition', 'type': 'str'},
'endpoint_names': {'key': 'endpointNames', 'type': '[str]'},
'is_enabled': {'key': 'isEnabled', 'type': 'bool'},
}
def __init__(
self,
*,
source: Union[str, "RoutingSource"],
endpoint_names: List[str],
is_enabled: bool,
name: Optional[str] = None,
condition: Optional[str] = None,
**kwargs
):
"""
:keyword name: The name of the route. The name can only include alphanumeric characters,
periods, underscores, hyphens, has a maximum length of 64 characters, and must be unique.
:paramtype name: str
:keyword source: Required. The source to which the routing rule is to be applied to. For
example, DeviceMessages. Possible values include: "Invalid", "DeviceMessages",
"TwinChangeEvents", "DeviceLifecycleEvents", "DeviceJobLifecycleEvents",
"DigitalTwinChangeEvents", "DeviceConnectionStateEvents".
:paramtype source: str or ~azure.mgmt.iothub.v2021_03_03_preview.models.RoutingSource
:keyword condition: The condition which is evaluated in order to apply the fallback route. If
the condition is not provided it will evaluate to true by default. For grammar, See:
https://docs.microsoft.com/azure/iot-hub/iot-hub-devguide-query-language.
:paramtype condition: str
:keyword endpoint_names: Required. The list of endpoints to which the messages that satisfy the
condition are routed to. Currently only 1 endpoint is allowed.
:paramtype endpoint_names: list[str]
:keyword is_enabled: Required. Used to specify whether the fallback route is enabled.
:paramtype is_enabled: bool
"""
super(FallbackRouteProperties, self).__init__(**kwargs)
self.name = name
self.source = source
self.condition = condition
self.endpoint_names = endpoint_names
self.is_enabled = is_enabled
class FeedbackProperties(msrest.serialization.Model):
"""The properties of the feedback queue for cloud-to-device messages.
:ivar lock_duration_as_iso8601: The lock duration for the feedback queue. See:
https://docs.microsoft.com/azure/iot-hub/iot-hub-devguide-messaging#cloud-to-device-messages.
:vartype lock_duration_as_iso8601: ~datetime.timedelta
:ivar ttl_as_iso8601: The period of time for which a message is available to consume before it
is expired by the IoT hub. See:
https://docs.microsoft.com/azure/iot-hub/iot-hub-devguide-messaging#cloud-to-device-messages.
:vartype ttl_as_iso8601: ~datetime.timedelta
:ivar max_delivery_count: The number of times the IoT hub attempts to deliver a message on the
feedback queue. See:
https://docs.microsoft.com/azure/iot-hub/iot-hub-devguide-messaging#cloud-to-device-messages.
:vartype max_delivery_count: int
"""
_validation = {
'max_delivery_count': {'maximum': 100, 'minimum': 1},
}
_attribute_map = {
'lock_duration_as_iso8601': {'key': 'lockDurationAsIso8601', 'type': 'duration'},
'ttl_as_iso8601': {'key': 'ttlAsIso8601', 'type': 'duration'},
'max_delivery_count': {'key': 'maxDeliveryCount', 'type': 'int'},
}
def __init__(
self,
*,
lock_duration_as_iso8601: Optional[datetime.timedelta] = None,
ttl_as_iso8601: Optional[datetime.timedelta] = None,
max_delivery_count: Optional[int] = None,
**kwargs
):
"""
:keyword lock_duration_as_iso8601: The lock duration for the feedback queue. See:
https://docs.microsoft.com/azure/iot-hub/iot-hub-devguide-messaging#cloud-to-device-messages.
:paramtype lock_duration_as_iso8601: ~datetime.timedelta
:keyword ttl_as_iso8601: The period of time for which a message is available to consume before
it is expired by the IoT hub. See:
https://docs.microsoft.com/azure/iot-hub/iot-hub-devguide-messaging#cloud-to-device-messages.
:paramtype ttl_as_iso8601: ~datetime.timedelta
:keyword max_delivery_count: The number of times the IoT hub attempts to deliver a message on
the feedback queue. See:
https://docs.microsoft.com/azure/iot-hub/iot-hub-devguide-messaging#cloud-to-device-messages.
:paramtype max_delivery_count: int
"""
super(FeedbackProperties, self).__init__(**kwargs)
self.lock_duration_as_iso8601 = lock_duration_as_iso8601
self.ttl_as_iso8601 = ttl_as_iso8601
self.max_delivery_count = max_delivery_count
class GroupIdInformation(msrest.serialization.Model):
"""The group information for creating a private endpoint on an IotHub.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:ivar id: The resource identifier.
:vartype id: str
:ivar name: The resource name.
:vartype name: str
:ivar type: The resource type.
:vartype type: str
:ivar properties: Required. The properties for a group information object.
:vartype properties: ~azure.mgmt.iothub.v2021_03_03_preview.models.GroupIdInformationProperties
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True, 'pattern': r'^(?![0-9]+$)(?!-)[a-zA-Z0-9-]{2,49}[a-zA-Z0-9]$'},
'type': {'readonly': True},
'properties': {'required': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'properties': {'key': 'properties', 'type': 'GroupIdInformationProperties'},
}
def __init__(
self,
*,
properties: "GroupIdInformationProperties",
**kwargs
):
"""
:keyword properties: Required. The properties for a group information object.
:paramtype properties:
~azure.mgmt.iothub.v2021_03_03_preview.models.GroupIdInformationProperties
"""
super(GroupIdInformation, self).__init__(**kwargs)
self.id = None
self.name = None
self.type = None
self.properties = properties
class GroupIdInformationProperties(msrest.serialization.Model):
"""The properties for a group information object.
:ivar group_id: The group id.
:vartype group_id: str
:ivar required_members: The required members for a specific group id.
:vartype required_members: list[str]
:ivar required_zone_names: The required DNS zones for a specific group id.
:vartype required_zone_names: list[str]
"""
_attribute_map = {
'group_id': {'key': 'groupId', 'type': 'str'},
'required_members': {'key': 'requiredMembers', 'type': '[str]'},
'required_zone_names': {'key': 'requiredZoneNames', 'type': '[str]'},
}
def __init__(
self,
*,
group_id: Optional[str] = None,
required_members: Optional[List[str]] = None,
required_zone_names: Optional[List[str]] = None,
**kwargs
):
"""
:keyword group_id: The group id.
:paramtype group_id: str
:keyword required_members: The required members for a specific group id.
:paramtype required_members: list[str]
:keyword required_zone_names: The required DNS zones for a specific group id.
:paramtype required_zone_names: list[str]
"""
super(GroupIdInformationProperties, self).__init__(**kwargs)
self.group_id = group_id
self.required_members = required_members
self.required_zone_names = required_zone_names
class ImportDevicesRequest(msrest.serialization.Model):
"""Use to provide parameters when requesting an import of all devices in the hub.
All required parameters must be populated in order to send to Azure.
:ivar input_blob_container_uri: Required. The input blob container URI.
:vartype input_blob_container_uri: str
:ivar output_blob_container_uri: Required. The output blob container URI.
:vartype output_blob_container_uri: str
:ivar input_blob_name: The blob name to be used when importing from the provided input blob
container.
:vartype input_blob_name: str
:ivar output_blob_name: The blob name to use for storing the status of the import job.
:vartype output_blob_name: str
:ivar authentication_type: Specifies authentication type being used for connecting to the
storage account. Possible values include: "keyBased", "identityBased".
:vartype authentication_type: str or
~azure.mgmt.iothub.v2021_03_03_preview.models.AuthenticationType
:ivar identity: Managed identity properties of storage endpoint for import devices.
:vartype identity: ~azure.mgmt.iothub.v2021_03_03_preview.models.ManagedIdentity
"""
_validation = {
'input_blob_container_uri': {'required': True},
'output_blob_container_uri': {'required': True},
}
_attribute_map = {
'input_blob_container_uri': {'key': 'inputBlobContainerUri', 'type': 'str'},
'output_blob_container_uri': {'key': 'outputBlobContainerUri', 'type': 'str'},
'input_blob_name': {'key': 'inputBlobName', 'type': 'str'},
'output_blob_name': {'key': 'outputBlobName', 'type': 'str'},
'authentication_type': {'key': 'authenticationType', 'type': 'str'},
'identity': {'key': 'identity', 'type': 'ManagedIdentity'},
}
def __init__(
self,
*,
input_blob_container_uri: str,
output_blob_container_uri: str,
input_blob_name: Optional[str] = None,
output_blob_name: Optional[str] = None,
authentication_type: Optional[Union[str, "AuthenticationType"]] = None,
identity: Optional["ManagedIdentity"] = None,
**kwargs
):
"""
:keyword input_blob_container_uri: Required. The input blob container URI.
:paramtype input_blob_container_uri: str
:keyword output_blob_container_uri: Required. The output blob container URI.
:paramtype output_blob_container_uri: str
:keyword input_blob_name: The blob name to be used when importing from the provided input blob
container.
:paramtype input_blob_name: str
:keyword output_blob_name: The blob name to use for storing the status of the import job.
:paramtype output_blob_name: str
:keyword authentication_type: Specifies authentication type being used for connecting to the
storage account. Possible values include: "keyBased", "identityBased".
:paramtype authentication_type: str or
~azure.mgmt.iothub.v2021_03_03_preview.models.AuthenticationType
:keyword identity: Managed identity properties of storage endpoint for import devices.
:paramtype identity: ~azure.mgmt.iothub.v2021_03_03_preview.models.ManagedIdentity
"""
super(ImportDevicesRequest, self).__init__(**kwargs)
self.input_blob_container_uri = input_blob_container_uri
self.output_blob_container_uri = output_blob_container_uri
self.input_blob_name = input_blob_name
self.output_blob_name = output_blob_name
self.authentication_type = authentication_type
self.identity = identity
class IotHubCapacity(msrest.serialization.Model):
"""IoT Hub capacity information.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar minimum: The minimum number of units.
:vartype minimum: long
:ivar maximum: The maximum number of units.
:vartype maximum: long
:ivar default: The default number of units.
:vartype default: long
:ivar scale_type: The type of the scaling enabled. Possible values include: "Automatic",
"Manual", "None".
:vartype scale_type: str or ~azure.mgmt.iothub.v2021_03_03_preview.models.IotHubScaleType
"""
_validation = {
'minimum': {'readonly': True, 'maximum': 1, 'minimum': 1},
'maximum': {'readonly': True},
'default': {'readonly': True},
'scale_type': {'readonly': True},
}
_attribute_map = {
'minimum': {'key': 'minimum', 'type': 'long'},
'maximum': {'key': 'maximum', 'type': 'long'},
'default': {'key': 'default', 'type': 'long'},
'scale_type': {'key': 'scaleType', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
"""
"""
super(IotHubCapacity, self).__init__(**kwargs)
self.minimum = None
self.maximum = None
self.default = None
self.scale_type = None
class Resource(msrest.serialization.Model):
"""The common properties of an Azure resource.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:ivar id: The resource identifier.
:vartype id: str
:ivar name: The resource name.
:vartype name: str
:ivar type: The resource type.
:vartype type: str
:ivar location: Required. The resource location.
:vartype location: str
:ivar tags: A set of tags. The resource tags.
:vartype tags: dict[str, str]
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True, 'pattern': r'^(?![0-9]+$)(?!-)[a-zA-Z0-9-]{2,49}[a-zA-Z0-9]$'},
'type': {'readonly': True},
'location': {'required': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'location': {'key': 'location', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
}
def __init__(
self,
*,
location: str,
tags: Optional[Dict[str, str]] = None,
**kwargs
):
"""
:keyword location: Required. The resource location.
:paramtype location: str
:keyword tags: A set of tags. The resource tags.
:paramtype tags: dict[str, str]
"""
super(Resource, self).__init__(**kwargs)
self.id = None
self.name = None
self.type = None
self.location = location
self.tags = tags
class IotHubDescription(Resource):
"""The description of the IoT hub.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:ivar id: The resource identifier.
:vartype id: str
:ivar name: The resource name.
:vartype name: str
:ivar type: The resource type.
:vartype type: str
:ivar location: Required. The resource location.
:vartype location: str
:ivar tags: A set of tags. The resource tags.
:vartype tags: dict[str, str]
:ivar etag: The Etag field is *not* required. If it is provided in the response body, it must
also be provided as a header per the normal ETag convention.
:vartype etag: str
:ivar properties: IotHub properties.
:vartype properties: ~azure.mgmt.iothub.v2021_03_03_preview.models.IotHubProperties
:ivar sku: Required. IotHub SKU info.
:vartype sku: ~azure.mgmt.iothub.v2021_03_03_preview.models.IotHubSkuInfo
:ivar identity: The managed identities for the IotHub.
:vartype identity: ~azure.mgmt.iothub.v2021_03_03_preview.models.ArmIdentity
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True, 'pattern': r'^(?![0-9]+$)(?!-)[a-zA-Z0-9-]{2,49}[a-zA-Z0-9]$'},
'type': {'readonly': True},
'location': {'required': True},
'sku': {'required': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'location': {'key': 'location', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
'etag': {'key': 'etag', 'type': 'str'},
'properties': {'key': 'properties', 'type': 'IotHubProperties'},
'sku': {'key': 'sku', 'type': 'IotHubSkuInfo'},
'identity': {'key': 'identity', 'type': 'ArmIdentity'},
}
def __init__(
self,
*,
location: str,
sku: "IotHubSkuInfo",
tags: Optional[Dict[str, str]] = None,
etag: Optional[str] = None,
properties: Optional["IotHubProperties"] = None,
identity: Optional["ArmIdentity"] = None,
**kwargs
):
"""
:keyword location: Required. The resource location.
:paramtype location: str
:keyword tags: A set of tags. The resource tags.
:paramtype tags: dict[str, str]
:keyword etag: The Etag field is *not* required. If it is provided in the response body, it
must also be provided as a header per the normal ETag convention.
:paramtype etag: str
:keyword properties: IotHub properties.
:paramtype properties: ~azure.mgmt.iothub.v2021_03_03_preview.models.IotHubProperties
:keyword sku: Required. IotHub SKU info.
:paramtype sku: ~azure.mgmt.iothub.v2021_03_03_preview.models.IotHubSkuInfo
:keyword identity: The managed identities for the IotHub.
:paramtype identity: ~azure.mgmt.iothub.v2021_03_03_preview.models.ArmIdentity
"""
super(IotHubDescription, self).__init__(location=location, tags=tags, **kwargs)
self.etag = etag
self.properties = properties
self.sku = sku
self.identity = identity
class IotHubDescriptionListResult(msrest.serialization.Model):
"""The JSON-serialized array of IotHubDescription objects with a next link.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar value: The array of IotHubDescription objects.
:vartype value: list[~azure.mgmt.iothub.v2021_03_03_preview.models.IotHubDescription]
:ivar next_link: The next link.
:vartype next_link: str
"""
_validation = {
'next_link': {'readonly': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': '[IotHubDescription]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
*,
value: Optional[List["IotHubDescription"]] = None,
**kwargs
):
"""
:keyword value: The array of IotHubDescription objects.
:paramtype value: list[~azure.mgmt.iothub.v2021_03_03_preview.models.IotHubDescription]
"""
super(IotHubDescriptionListResult, self).__init__(**kwargs)
self.value = value
self.next_link = None
class IotHubLocationDescription(msrest.serialization.Model):
"""Public representation of one of the locations where a resource is provisioned.
:ivar location: The name of the Azure region.
:vartype location: str
:ivar role: The role of the region, can be either primary or secondary. The primary region is
where the IoT hub is currently provisioned. The secondary region is the Azure disaster recovery
(DR) paired region and also the region where the IoT hub can failover to. Possible values
include: "primary", "secondary".
:vartype role: str or ~azure.mgmt.iothub.v2021_03_03_preview.models.IotHubReplicaRoleType
"""
_attribute_map = {
'location': {'key': 'location', 'type': 'str'},
'role': {'key': 'role', 'type': 'str'},
}
def __init__(
self,
*,
location: Optional[str] = None,
role: Optional[Union[str, "IotHubReplicaRoleType"]] = None,
**kwargs
):
"""
:keyword location: The name of the Azure region.
:paramtype location: str
:keyword role: The role of the region, can be either primary or secondary. The primary region
is where the IoT hub is currently provisioned. The secondary region is the Azure disaster
recovery (DR) paired region and also the region where the IoT hub can failover to. Possible
values include: "primary", "secondary".
:paramtype role: str or ~azure.mgmt.iothub.v2021_03_03_preview.models.IotHubReplicaRoleType
"""
super(IotHubLocationDescription, self).__init__(**kwargs)
self.location = location
self.role = role
class IotHubNameAvailabilityInfo(msrest.serialization.Model):
"""The properties indicating whether a given IoT hub name is available.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar name_available: The value which indicates whether the provided name is available.
:vartype name_available: bool
:ivar reason: The reason for unavailability. Possible values include: "Invalid",
"AlreadyExists".
:vartype reason: str or
~azure.mgmt.iothub.v2021_03_03_preview.models.IotHubNameUnavailabilityReason
:ivar message: The detailed reason message.
:vartype message: str
"""
_validation = {
'name_available': {'readonly': True},
'reason': {'readonly': True},
}
_attribute_map = {
'name_available': {'key': 'nameAvailable', 'type': 'bool'},
'reason': {'key': 'reason', 'type': 'str'},
'message': {'key': 'message', 'type': 'str'},
}
def __init__(
self,
*,
message: Optional[str] = None,
**kwargs
):
"""
:keyword message: The detailed reason message.
:paramtype message: str
"""
super(IotHubNameAvailabilityInfo, self).__init__(**kwargs)
self.name_available = None
self.reason = None
self.message = message
class IotHubProperties(msrest.serialization.Model):
"""The properties of an IoT hub.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar authorization_policies: The shared access policies you can use to secure a connection to
the IoT hub.
:vartype authorization_policies:
list[~azure.mgmt.iothub.v2021_03_03_preview.models.SharedAccessSignatureAuthorizationRule]
:ivar public_network_access: Whether requests from Public Network are allowed. Possible values
include: "Enabled", "Disabled".
:vartype public_network_access: str or
~azure.mgmt.iothub.v2021_03_03_preview.models.PublicNetworkAccess
:ivar ip_filter_rules: The IP filter rules.
:vartype ip_filter_rules: list[~azure.mgmt.iothub.v2021_03_03_preview.models.IpFilterRule]
:ivar network_rule_sets: Network Rule Set Properties of IotHub.
:vartype network_rule_sets:
~azure.mgmt.iothub.v2021_03_03_preview.models.NetworkRuleSetProperties
:ivar min_tls_version: Specifies the minimum TLS version to support for this hub. Can be set to
"1.2" to have clients that use a TLS version below 1.2 to be rejected.
:vartype min_tls_version: str
:ivar private_endpoint_connections: Private endpoint connections created on this IotHub.
:vartype private_endpoint_connections:
list[~azure.mgmt.iothub.v2021_03_03_preview.models.PrivateEndpointConnection]
:ivar provisioning_state: The provisioning state.
:vartype provisioning_state: str
:ivar state: The hub state.
:vartype state: str
:ivar host_name: The name of the host.
:vartype host_name: str
:ivar event_hub_endpoints: The Event Hub-compatible endpoint properties. The only possible keys
to this dictionary is events. This key has to be present in the dictionary while making create
or update calls for the IoT hub.
:vartype event_hub_endpoints: dict[str,
~azure.mgmt.iothub.v2021_03_03_preview.models.EventHubProperties]
:ivar routing: The routing related properties of the IoT hub. See:
https://docs.microsoft.com/azure/iot-hub/iot-hub-devguide-messaging.
:vartype routing: ~azure.mgmt.iothub.v2021_03_03_preview.models.RoutingProperties
:ivar storage_endpoints: The list of Azure Storage endpoints where you can upload files.
Currently you can configure only one Azure Storage account and that MUST have its key as
$default. Specifying more than one storage account causes an error to be thrown. Not specifying
a value for this property when the enableFileUploadNotifications property is set to True,
causes an error to be thrown.
:vartype storage_endpoints: dict[str,
~azure.mgmt.iothub.v2021_03_03_preview.models.StorageEndpointProperties]
:ivar messaging_endpoints: The messaging endpoint properties for the file upload notification
queue.
:vartype messaging_endpoints: dict[str,
~azure.mgmt.iothub.v2021_03_03_preview.models.MessagingEndpointProperties]
:ivar enable_file_upload_notifications: If True, file upload notifications are enabled.
:vartype enable_file_upload_notifications: bool
:ivar cloud_to_device: The IoT hub cloud-to-device messaging properties.
:vartype cloud_to_device: ~azure.mgmt.iothub.v2021_03_03_preview.models.CloudToDeviceProperties
:ivar comments: IoT hub comments.
:vartype comments: str
:ivar device_streams: The device streams properties of iothub.
:vartype device_streams:
~azure.mgmt.iothub.v2021_03_03_preview.models.IotHubPropertiesDeviceStreams
:ivar features: The capabilities and features enabled for the IoT hub. Possible values include:
"None", "DeviceManagement".
:vartype features: str or ~azure.mgmt.iothub.v2021_03_03_preview.models.Capabilities
:ivar encryption: The encryption properties for the IoT hub.
:vartype encryption:
~azure.mgmt.iothub.v2021_03_03_preview.models.EncryptionPropertiesDescription
:ivar locations: Primary and secondary location for iot hub.
:vartype locations:
list[~azure.mgmt.iothub.v2021_03_03_preview.models.IotHubLocationDescription]
"""
_validation = {
'provisioning_state': {'readonly': True},
'state': {'readonly': True},
'host_name': {'readonly': True},
'locations': {'readonly': True},
}
_attribute_map = {
'authorization_policies': {'key': 'authorizationPolicies', 'type': '[SharedAccessSignatureAuthorizationRule]'},
'public_network_access': {'key': 'publicNetworkAccess', 'type': 'str'},
'ip_filter_rules': {'key': 'ipFilterRules', 'type': '[IpFilterRule]'},
'network_rule_sets': {'key': 'networkRuleSets', 'type': 'NetworkRuleSetProperties'},
'min_tls_version': {'key': 'minTlsVersion', 'type': 'str'},
'private_endpoint_connections': {'key': 'privateEndpointConnections', 'type': '[PrivateEndpointConnection]'},
'provisioning_state': {'key': 'provisioningState', 'type': 'str'},
'state': {'key': 'state', 'type': 'str'},
'host_name': {'key': 'hostName', 'type': 'str'},
'event_hub_endpoints': {'key': 'eventHubEndpoints', 'type': '{EventHubProperties}'},
'routing': {'key': 'routing', 'type': 'RoutingProperties'},
'storage_endpoints': {'key': 'storageEndpoints', 'type': '{StorageEndpointProperties}'},
'messaging_endpoints': {'key': 'messagingEndpoints', 'type': '{MessagingEndpointProperties}'},
'enable_file_upload_notifications': {'key': 'enableFileUploadNotifications', 'type': 'bool'},
'cloud_to_device': {'key': 'cloudToDevice', 'type': 'CloudToDeviceProperties'},
'comments': {'key': 'comments', 'type': 'str'},
'device_streams': {'key': 'deviceStreams', 'type': 'IotHubPropertiesDeviceStreams'},
'features': {'key': 'features', 'type': 'str'},
'encryption': {'key': 'encryption', 'type': 'EncryptionPropertiesDescription'},
'locations': {'key': 'locations', 'type': '[IotHubLocationDescription]'},
}
def __init__(
self,
*,
authorization_policies: Optional[List["SharedAccessSignatureAuthorizationRule"]] = None,
public_network_access: Optional[Union[str, "PublicNetworkAccess"]] = None,
ip_filter_rules: Optional[List["IpFilterRule"]] = None,
network_rule_sets: Optional["NetworkRuleSetProperties"] = None,
min_tls_version: Optional[str] = None,
private_endpoint_connections: Optional[List["PrivateEndpointConnection"]] = None,
event_hub_endpoints: Optional[Dict[str, "EventHubProperties"]] = None,
routing: Optional["RoutingProperties"] = None,
storage_endpoints: Optional[Dict[str, "StorageEndpointProperties"]] = None,
messaging_endpoints: Optional[Dict[str, "MessagingEndpointProperties"]] = None,
enable_file_upload_notifications: Optional[bool] = None,
cloud_to_device: Optional["CloudToDeviceProperties"] = None,
comments: Optional[str] = None,
device_streams: Optional["IotHubPropertiesDeviceStreams"] = None,
features: Optional[Union[str, "Capabilities"]] = None,
encryption: Optional["EncryptionPropertiesDescription"] = None,
**kwargs
):
"""
:keyword authorization_policies: The shared access policies you can use to secure a connection
to the IoT hub.
:paramtype authorization_policies:
list[~azure.mgmt.iothub.v2021_03_03_preview.models.SharedAccessSignatureAuthorizationRule]
:keyword public_network_access: Whether requests from Public Network are allowed. Possible
values include: "Enabled", "Disabled".
:paramtype public_network_access: str or
~azure.mgmt.iothub.v2021_03_03_preview.models.PublicNetworkAccess
:keyword ip_filter_rules: The IP filter rules.
:paramtype ip_filter_rules: list[~azure.mgmt.iothub.v2021_03_03_preview.models.IpFilterRule]
:keyword network_rule_sets: Network Rule Set Properties of IotHub.
:paramtype network_rule_sets:
~azure.mgmt.iothub.v2021_03_03_preview.models.NetworkRuleSetProperties
:keyword min_tls_version: Specifies the minimum TLS version to support for this hub. Can be set
to "1.2" to have clients that use a TLS version below 1.2 to be rejected.
:paramtype min_tls_version: str
:keyword private_endpoint_connections: Private endpoint connections created on this IotHub.
:paramtype private_endpoint_connections:
list[~azure.mgmt.iothub.v2021_03_03_preview.models.PrivateEndpointConnection]
:keyword event_hub_endpoints: The Event Hub-compatible endpoint properties. The only possible
keys to this dictionary is events. This key has to be present in the dictionary while making
create or update calls for the IoT hub.
:paramtype event_hub_endpoints: dict[str,
~azure.mgmt.iothub.v2021_03_03_preview.models.EventHubProperties]
:keyword routing: The routing related properties of the IoT hub. See:
https://docs.microsoft.com/azure/iot-hub/iot-hub-devguide-messaging.
:paramtype routing: ~azure.mgmt.iothub.v2021_03_03_preview.models.RoutingProperties
:keyword storage_endpoints: The list of Azure Storage endpoints where you can upload files.
Currently you can configure only one Azure Storage account and that MUST have its key as
$default. Specifying more than one storage account causes an error to be thrown. Not specifying
a value for this property when the enableFileUploadNotifications property is set to True,
causes an error to be thrown.
:paramtype storage_endpoints: dict[str,
~azure.mgmt.iothub.v2021_03_03_preview.models.StorageEndpointProperties]
:keyword messaging_endpoints: The messaging endpoint properties for the file upload
notification queue.
:paramtype messaging_endpoints: dict[str,
~azure.mgmt.iothub.v2021_03_03_preview.models.MessagingEndpointProperties]
:keyword enable_file_upload_notifications: If True, file upload notifications are enabled.
:paramtype enable_file_upload_notifications: bool
:keyword cloud_to_device: The IoT hub cloud-to-device messaging properties.
:paramtype cloud_to_device:
~azure.mgmt.iothub.v2021_03_03_preview.models.CloudToDeviceProperties
:keyword comments: IoT hub comments.
:paramtype comments: str
:keyword device_streams: The device streams properties of iothub.
:paramtype device_streams:
~azure.mgmt.iothub.v2021_03_03_preview.models.IotHubPropertiesDeviceStreams
:keyword features: The capabilities and features enabled for the IoT hub. Possible values
include: "None", "DeviceManagement".
:paramtype features: str or ~azure.mgmt.iothub.v2021_03_03_preview.models.Capabilities
:keyword encryption: The encryption properties for the IoT hub.
:paramtype encryption:
~azure.mgmt.iothub.v2021_03_03_preview.models.EncryptionPropertiesDescription
"""
super(IotHubProperties, self).__init__(**kwargs)
self.authorization_policies = authorization_policies
self.public_network_access = public_network_access
self.ip_filter_rules = ip_filter_rules
self.network_rule_sets = network_rule_sets
self.min_tls_version = min_tls_version
self.private_endpoint_connections = private_endpoint_connections
self.provisioning_state = None
self.state = None
self.host_name = None
self.event_hub_endpoints = event_hub_endpoints
self.routing = routing
self.storage_endpoints = storage_endpoints
self.messaging_endpoints = messaging_endpoints
self.enable_file_upload_notifications = enable_file_upload_notifications
self.cloud_to_device = cloud_to_device
self.comments = comments
self.device_streams = device_streams
self.features = features
self.encryption = encryption
self.locations = None
class IotHubPropertiesDeviceStreams(msrest.serialization.Model):
"""The device streams properties of iothub.
:ivar streaming_endpoints: List of Device Streams Endpoints.
:vartype streaming_endpoints: list[str]
"""
_attribute_map = {
'streaming_endpoints': {'key': 'streamingEndpoints', 'type': '[str]'},
}
def __init__(
self,
*,
streaming_endpoints: Optional[List[str]] = None,
**kwargs
):
"""
:keyword streaming_endpoints: List of Device Streams Endpoints.
:paramtype streaming_endpoints: list[str]
"""
super(IotHubPropertiesDeviceStreams, self).__init__(**kwargs)
self.streaming_endpoints = streaming_endpoints
class IotHubQuotaMetricInfo(msrest.serialization.Model):
"""Quota metrics properties.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar name: The name of the quota metric.
:vartype name: str
:ivar current_value: The current value for the quota metric.
:vartype current_value: long
:ivar max_value: The maximum value of the quota metric.
:vartype max_value: long
"""
_validation = {
'name': {'readonly': True},
'current_value': {'readonly': True},
'max_value': {'readonly': True},
}
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'current_value': {'key': 'currentValue', 'type': 'long'},
'max_value': {'key': 'maxValue', 'type': 'long'},
}
def __init__(
self,
**kwargs
):
"""
"""
super(IotHubQuotaMetricInfo, self).__init__(**kwargs)
self.name = None
self.current_value = None
self.max_value = None
class IotHubQuotaMetricInfoListResult(msrest.serialization.Model):
"""The JSON-serialized array of IotHubQuotaMetricInfo objects with a next link.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar value: The array of quota metrics objects.
:vartype value: list[~azure.mgmt.iothub.v2021_03_03_preview.models.IotHubQuotaMetricInfo]
:ivar next_link: The next link.
:vartype next_link: str
"""
_validation = {
'next_link': {'readonly': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': '[IotHubQuotaMetricInfo]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
*,
value: Optional[List["IotHubQuotaMetricInfo"]] = None,
**kwargs
):
"""
:keyword value: The array of quota metrics objects.
:paramtype value: list[~azure.mgmt.iothub.v2021_03_03_preview.models.IotHubQuotaMetricInfo]
"""
super(IotHubQuotaMetricInfoListResult, self).__init__(**kwargs)
self.value = value
self.next_link = None
class IotHubSkuDescription(msrest.serialization.Model):
"""SKU properties.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:ivar resource_type: The type of the resource.
:vartype resource_type: str
:ivar sku: Required. The type of the resource.
:vartype sku: ~azure.mgmt.iothub.v2021_03_03_preview.models.IotHubSkuInfo
:ivar capacity: Required. IotHub capacity.
:vartype capacity: ~azure.mgmt.iothub.v2021_03_03_preview.models.IotHubCapacity
"""
_validation = {
'resource_type': {'readonly': True},
'sku': {'required': True},
'capacity': {'required': True},
}
_attribute_map = {
'resource_type': {'key': 'resourceType', 'type': 'str'},
'sku': {'key': 'sku', 'type': 'IotHubSkuInfo'},
'capacity': {'key': 'capacity', 'type': 'IotHubCapacity'},
}
def __init__(
self,
*,
sku: "IotHubSkuInfo",
capacity: "IotHubCapacity",
**kwargs
):
"""
:keyword sku: Required. The type of the resource.
:paramtype sku: ~azure.mgmt.iothub.v2021_03_03_preview.models.IotHubSkuInfo
:keyword capacity: Required. IotHub capacity.
:paramtype capacity: ~azure.mgmt.iothub.v2021_03_03_preview.models.IotHubCapacity
"""
super(IotHubSkuDescription, self).__init__(**kwargs)
self.resource_type = None
self.sku = sku
self.capacity = capacity
class IotHubSkuDescriptionListResult(msrest.serialization.Model):
"""The JSON-serialized array of IotHubSkuDescription objects with a next link.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar value: The array of IotHubSkuDescription.
:vartype value: list[~azure.mgmt.iothub.v2021_03_03_preview.models.IotHubSkuDescription]
:ivar next_link: The next link.
:vartype next_link: str
"""
_validation = {
'next_link': {'readonly': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': '[IotHubSkuDescription]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
*,
value: Optional[List["IotHubSkuDescription"]] = None,
**kwargs
):
"""
:keyword value: The array of IotHubSkuDescription.
:paramtype value: list[~azure.mgmt.iothub.v2021_03_03_preview.models.IotHubSkuDescription]
"""
super(IotHubSkuDescriptionListResult, self).__init__(**kwargs)
self.value = value
self.next_link = None
class IotHubSkuInfo(msrest.serialization.Model):
"""Information about the SKU of the IoT hub.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:ivar name: Required. The name of the SKU. Possible values include: "F1", "S1", "S2", "S3",
"B1", "B2", "B3".
:vartype name: str or ~azure.mgmt.iothub.v2021_03_03_preview.models.IotHubSku
:ivar tier: The billing tier for the IoT hub. Possible values include: "Free", "Standard",
"Basic".
:vartype tier: str or ~azure.mgmt.iothub.v2021_03_03_preview.models.IotHubSkuTier
:ivar capacity: The number of provisioned IoT Hub units. See:
https://docs.microsoft.com/azure/azure-subscription-service-limits#iot-hub-limits.
:vartype capacity: long
"""
_validation = {
'name': {'required': True},
'tier': {'readonly': True},
}
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'tier': {'key': 'tier', 'type': 'str'},
'capacity': {'key': 'capacity', 'type': 'long'},
}
def __init__(
self,
*,
name: Union[str, "IotHubSku"],
capacity: Optional[int] = None,
**kwargs
):
"""
:keyword name: Required. The name of the SKU. Possible values include: "F1", "S1", "S2", "S3",
"B1", "B2", "B3".
:paramtype name: str or ~azure.mgmt.iothub.v2021_03_03_preview.models.IotHubSku
:keyword capacity: The number of provisioned IoT Hub units. See:
https://docs.microsoft.com/azure/azure-subscription-service-limits#iot-hub-limits.
:paramtype capacity: long
"""
super(IotHubSkuInfo, self).__init__(**kwargs)
self.name = name
self.tier = None
self.capacity = capacity
class IpFilterRule(msrest.serialization.Model):
"""The IP filter rules for the IoT hub.
All required parameters must be populated in order to send to Azure.
:ivar filter_name: Required. The name of the IP filter rule.
:vartype filter_name: str
:ivar action: Required. The desired action for requests captured by this rule. Possible values
include: "Accept", "Reject".
:vartype action: str or ~azure.mgmt.iothub.v2021_03_03_preview.models.IpFilterActionType
:ivar ip_mask: Required. A string that contains the IP address range in CIDR notation for the
rule.
:vartype ip_mask: str
"""
_validation = {
'filter_name': {'required': True},
'action': {'required': True},
'ip_mask': {'required': True},
}
_attribute_map = {
'filter_name': {'key': 'filterName', 'type': 'str'},
'action': {'key': 'action', 'type': 'str'},
'ip_mask': {'key': 'ipMask', 'type': 'str'},
}
def __init__(
self,
*,
filter_name: str,
action: Union[str, "IpFilterActionType"],
ip_mask: str,
**kwargs
):
"""
:keyword filter_name: Required. The name of the IP filter rule.
:paramtype filter_name: str
:keyword action: Required. The desired action for requests captured by this rule. Possible
values include: "Accept", "Reject".
:paramtype action: str or ~azure.mgmt.iothub.v2021_03_03_preview.models.IpFilterActionType
:keyword ip_mask: Required. A string that contains the IP address range in CIDR notation for
the rule.
:paramtype ip_mask: str
"""
super(IpFilterRule, self).__init__(**kwargs)
self.filter_name = filter_name
self.action = action
self.ip_mask = ip_mask
class JobResponse(msrest.serialization.Model):
"""The properties of the Job Response object.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar job_id: The job identifier.
:vartype job_id: str
:ivar start_time_utc: The start time of the job.
:vartype start_time_utc: ~datetime.datetime
:ivar end_time_utc: The time the job stopped processing.
:vartype end_time_utc: ~datetime.datetime
:ivar type: The type of the job. Possible values include: "unknown", "export", "import",
"backup", "readDeviceProperties", "writeDeviceProperties", "updateDeviceConfiguration",
"rebootDevice", "factoryResetDevice", "firmwareUpdate".
:vartype type: str or ~azure.mgmt.iothub.v2021_03_03_preview.models.JobType
:ivar status: The status of the job. Possible values include: "unknown", "enqueued", "running",
"completed", "failed", "cancelled".
:vartype status: str or ~azure.mgmt.iothub.v2021_03_03_preview.models.JobStatus
:ivar failure_reason: If status == failed, this string containing the reason for the failure.
:vartype failure_reason: str
:ivar status_message: The status message for the job.
:vartype status_message: str
:ivar parent_job_id: The job identifier of the parent job, if any.
:vartype parent_job_id: str
"""
_validation = {
'job_id': {'readonly': True},
'start_time_utc': {'readonly': True},
'end_time_utc': {'readonly': True},
'type': {'readonly': True},
'status': {'readonly': True},
'failure_reason': {'readonly': True},
'status_message': {'readonly': True},
'parent_job_id': {'readonly': True},
}
_attribute_map = {
'job_id': {'key': 'jobId', 'type': 'str'},
'start_time_utc': {'key': 'startTimeUtc', 'type': 'rfc-1123'},
'end_time_utc': {'key': 'endTimeUtc', 'type': 'rfc-1123'},
'type': {'key': 'type', 'type': 'str'},
'status': {'key': 'status', 'type': 'str'},
'failure_reason': {'key': 'failureReason', 'type': 'str'},
'status_message': {'key': 'statusMessage', 'type': 'str'},
'parent_job_id': {'key': 'parentJobId', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
"""
"""
super(JobResponse, self).__init__(**kwargs)
self.job_id = None
self.start_time_utc = None
self.end_time_utc = None
self.type = None
self.status = None
self.failure_reason = None
self.status_message = None
self.parent_job_id = None
class JobResponseListResult(msrest.serialization.Model):
"""The JSON-serialized array of JobResponse objects with a next link.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar value: The array of JobResponse objects.
:vartype value: list[~azure.mgmt.iothub.v2021_03_03_preview.models.JobResponse]
:ivar next_link: The next link.
:vartype next_link: str
"""
_validation = {
'next_link': {'readonly': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': '[JobResponse]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
*,
value: Optional[List["JobResponse"]] = None,
**kwargs
):
"""
:keyword value: The array of JobResponse objects.
:paramtype value: list[~azure.mgmt.iothub.v2021_03_03_preview.models.JobResponse]
"""
super(JobResponseListResult, self).__init__(**kwargs)
self.value = value
self.next_link = None
class KeyVaultKeyProperties(msrest.serialization.Model):
"""The properties of the KeyVault key.
:ivar key_identifier: The identifier of the key.
:vartype key_identifier: str
:ivar identity: Managed identity properties of KeyVault Key.
:vartype identity: ~azure.mgmt.iothub.v2021_03_03_preview.models.ManagedIdentity
"""
_attribute_map = {
'key_identifier': {'key': 'keyIdentifier', 'type': 'str'},
'identity': {'key': 'identity', 'type': 'ManagedIdentity'},
}
def __init__(
self,
*,
key_identifier: Optional[str] = None,
identity: Optional["ManagedIdentity"] = None,
**kwargs
):
"""
:keyword key_identifier: The identifier of the key.
:paramtype key_identifier: str
:keyword identity: Managed identity properties of KeyVault Key.
:paramtype identity: ~azure.mgmt.iothub.v2021_03_03_preview.models.ManagedIdentity
"""
super(KeyVaultKeyProperties, self).__init__(**kwargs)
self.key_identifier = key_identifier
self.identity = identity
class ManagedIdentity(msrest.serialization.Model):
"""The properties of the Managed identity.
:ivar user_assigned_identity: The user assigned identity.
:vartype user_assigned_identity: str
"""
_attribute_map = {
'user_assigned_identity': {'key': 'userAssignedIdentity', 'type': 'str'},
}
def __init__(
self,
*,
user_assigned_identity: Optional[str] = None,
**kwargs
):
"""
:keyword user_assigned_identity: The user assigned identity.
:paramtype user_assigned_identity: str
"""
super(ManagedIdentity, self).__init__(**kwargs)
self.user_assigned_identity = user_assigned_identity
class MatchedRoute(msrest.serialization.Model):
"""Routes that matched.
:ivar properties: Properties of routes that matched.
:vartype properties: ~azure.mgmt.iothub.v2021_03_03_preview.models.RouteProperties
"""
_attribute_map = {
'properties': {'key': 'properties', 'type': 'RouteProperties'},
}
def __init__(
self,
*,
properties: Optional["RouteProperties"] = None,
**kwargs
):
"""
:keyword properties: Properties of routes that matched.
:paramtype properties: ~azure.mgmt.iothub.v2021_03_03_preview.models.RouteProperties
"""
super(MatchedRoute, self).__init__(**kwargs)
self.properties = properties
class MessagingEndpointProperties(msrest.serialization.Model):
"""The properties of the messaging endpoints used by this IoT hub.
:ivar lock_duration_as_iso8601: The lock duration. See:
https://docs.microsoft.com/azure/iot-hub/iot-hub-devguide-file-upload.
:vartype lock_duration_as_iso8601: ~datetime.timedelta
:ivar ttl_as_iso8601: The period of time for which a message is available to consume before it
is expired by the IoT hub. See:
https://docs.microsoft.com/azure/iot-hub/iot-hub-devguide-file-upload.
:vartype ttl_as_iso8601: ~datetime.timedelta
:ivar max_delivery_count: The number of times the IoT hub attempts to deliver a message. See:
https://docs.microsoft.com/azure/iot-hub/iot-hub-devguide-file-upload.
:vartype max_delivery_count: int
"""
_validation = {
'max_delivery_count': {'maximum': 100, 'minimum': 1},
}
_attribute_map = {
'lock_duration_as_iso8601': {'key': 'lockDurationAsIso8601', 'type': 'duration'},
'ttl_as_iso8601': {'key': 'ttlAsIso8601', 'type': 'duration'},
'max_delivery_count': {'key': 'maxDeliveryCount', 'type': 'int'},
}
def __init__(
self,
*,
lock_duration_as_iso8601: Optional[datetime.timedelta] = None,
ttl_as_iso8601: Optional[datetime.timedelta] = None,
max_delivery_count: Optional[int] = None,
**kwargs
):
"""
:keyword lock_duration_as_iso8601: The lock duration. See:
https://docs.microsoft.com/azure/iot-hub/iot-hub-devguide-file-upload.
:paramtype lock_duration_as_iso8601: ~datetime.timedelta
:keyword ttl_as_iso8601: The period of time for which a message is available to consume before
it is expired by the IoT hub. See:
https://docs.microsoft.com/azure/iot-hub/iot-hub-devguide-file-upload.
:paramtype ttl_as_iso8601: ~datetime.timedelta
:keyword max_delivery_count: The number of times the IoT hub attempts to deliver a message.
See: https://docs.microsoft.com/azure/iot-hub/iot-hub-devguide-file-upload.
:paramtype max_delivery_count: int
"""
super(MessagingEndpointProperties, self).__init__(**kwargs)
self.lock_duration_as_iso8601 = lock_duration_as_iso8601
self.ttl_as_iso8601 = ttl_as_iso8601
self.max_delivery_count = max_delivery_count
class Name(msrest.serialization.Model):
"""Name of Iot Hub type.
:ivar value: IotHub type.
:vartype value: str
:ivar localized_value: Localized value of name.
:vartype localized_value: str
"""
_attribute_map = {
'value': {'key': 'value', 'type': 'str'},
'localized_value': {'key': 'localizedValue', 'type': 'str'},
}
def __init__(
self,
*,
value: Optional[str] = None,
localized_value: Optional[str] = None,
**kwargs
):
"""
:keyword value: IotHub type.
:paramtype value: str
:keyword localized_value: Localized value of name.
:paramtype localized_value: str
"""
super(Name, self).__init__(**kwargs)
self.value = value
self.localized_value = localized_value
class NetworkRuleSetIpRule(msrest.serialization.Model):
"""IP Rule to be applied as part of Network Rule Set.
All required parameters must be populated in order to send to Azure.
:ivar filter_name: Required. Name of the IP filter rule.
:vartype filter_name: str
:ivar action: IP Filter Action. Possible values include: "Allow". Default value: "Allow".
:vartype action: str or ~azure.mgmt.iothub.v2021_03_03_preview.models.NetworkRuleIPAction
:ivar ip_mask: Required. A string that contains the IP address range in CIDR notation for the
rule.
:vartype ip_mask: str
"""
_validation = {
'filter_name': {'required': True},
'ip_mask': {'required': True},
}
_attribute_map = {
'filter_name': {'key': 'filterName', 'type': 'str'},
'action': {'key': 'action', 'type': 'str'},
'ip_mask': {'key': 'ipMask', 'type': 'str'},
}
def __init__(
self,
*,
filter_name: str,
ip_mask: str,
action: Optional[Union[str, "NetworkRuleIPAction"]] = "Allow",
**kwargs
):
"""
:keyword filter_name: Required. Name of the IP filter rule.
:paramtype filter_name: str
:keyword action: IP Filter Action. Possible values include: "Allow". Default value: "Allow".
:paramtype action: str or ~azure.mgmt.iothub.v2021_03_03_preview.models.NetworkRuleIPAction
:keyword ip_mask: Required. A string that contains the IP address range in CIDR notation for
the rule.
:paramtype ip_mask: str
"""
super(NetworkRuleSetIpRule, self).__init__(**kwargs)
self.filter_name = filter_name
self.action = action
self.ip_mask = ip_mask
class NetworkRuleSetProperties(msrest.serialization.Model):
"""Network Rule Set Properties of IotHub.
All required parameters must be populated in order to send to Azure.
:ivar default_action: Default Action for Network Rule Set. Possible values include: "Deny",
"Allow". Default value: "Deny".
:vartype default_action: str or ~azure.mgmt.iothub.v2021_03_03_preview.models.DefaultAction
:ivar apply_to_built_in_event_hub_endpoint: Required. If True, then Network Rule Set is also
applied to BuiltIn EventHub EndPoint of IotHub.
:vartype apply_to_built_in_event_hub_endpoint: bool
:ivar ip_rules: Required. List of IP Rules.
:vartype ip_rules: list[~azure.mgmt.iothub.v2021_03_03_preview.models.NetworkRuleSetIpRule]
"""
_validation = {
'apply_to_built_in_event_hub_endpoint': {'required': True},
'ip_rules': {'required': True},
}
_attribute_map = {
'default_action': {'key': 'defaultAction', 'type': 'str'},
'apply_to_built_in_event_hub_endpoint': {'key': 'applyToBuiltInEventHubEndpoint', 'type': 'bool'},
'ip_rules': {'key': 'ipRules', 'type': '[NetworkRuleSetIpRule]'},
}
def __init__(
self,
*,
apply_to_built_in_event_hub_endpoint: bool,
ip_rules: List["NetworkRuleSetIpRule"],
default_action: Optional[Union[str, "DefaultAction"]] = "Deny",
**kwargs
):
"""
:keyword default_action: Default Action for Network Rule Set. Possible values include: "Deny",
"Allow". Default value: "Deny".
:paramtype default_action: str or ~azure.mgmt.iothub.v2021_03_03_preview.models.DefaultAction
:keyword apply_to_built_in_event_hub_endpoint: Required. If True, then Network Rule Set is also
applied to BuiltIn EventHub EndPoint of IotHub.
:paramtype apply_to_built_in_event_hub_endpoint: bool
:keyword ip_rules: Required. List of IP Rules.
:paramtype ip_rules: list[~azure.mgmt.iothub.v2021_03_03_preview.models.NetworkRuleSetIpRule]
"""
super(NetworkRuleSetProperties, self).__init__(**kwargs)
self.default_action = default_action
self.apply_to_built_in_event_hub_endpoint = apply_to_built_in_event_hub_endpoint
self.ip_rules = ip_rules
class Operation(msrest.serialization.Model):
"""IoT Hub REST API operation.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar name: Operation name: {provider}/{resource}/{read | write | action | delete}.
:vartype name: str
:ivar display: The object that represents the operation.
:vartype display: ~azure.mgmt.iothub.v2021_03_03_preview.models.OperationDisplay
"""
_validation = {
'name': {'readonly': True},
}
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'display': {'key': 'display', 'type': 'OperationDisplay'},
}
def __init__(
self,
*,
display: Optional["OperationDisplay"] = None,
**kwargs
):
"""
:keyword display: The object that represents the operation.
:paramtype display: ~azure.mgmt.iothub.v2021_03_03_preview.models.OperationDisplay
"""
super(Operation, self).__init__(**kwargs)
self.name = None
self.display = display
class OperationDisplay(msrest.serialization.Model):
"""The object that represents the operation.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar provider: Service provider: Microsoft Devices.
:vartype provider: str
:ivar resource: Resource Type: IotHubs.
:vartype resource: str
:ivar operation: Name of the operation.
:vartype operation: str
:ivar description: Description of the operation.
:vartype description: str
"""
_validation = {
'provider': {'readonly': True},
'resource': {'readonly': True},
'operation': {'readonly': True},
'description': {'readonly': True},
}
_attribute_map = {
'provider': {'key': 'provider', 'type': 'str'},
'resource': {'key': 'resource', 'type': 'str'},
'operation': {'key': 'operation', 'type': 'str'},
'description': {'key': 'description', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
"""
"""
super(OperationDisplay, self).__init__(**kwargs)
self.provider = None
self.resource = None
self.operation = None
self.description = None
class OperationInputs(msrest.serialization.Model):
"""Input values.
All required parameters must be populated in order to send to Azure.
:ivar name: Required. The name of the IoT hub to check.
:vartype name: str
"""
_validation = {
'name': {'required': True},
}
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
}
def __init__(
self,
*,
name: str,
**kwargs
):
"""
:keyword name: Required. The name of the IoT hub to check.
:paramtype name: str
"""
super(OperationInputs, self).__init__(**kwargs)
self.name = name
class OperationListResult(msrest.serialization.Model):
"""Result of the request to list IoT Hub operations. It contains a list of operations and a URL link to get the next set of results.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar value: List of IoT Hub operations supported by the Microsoft.Devices resource provider.
:vartype value: list[~azure.mgmt.iothub.v2021_03_03_preview.models.Operation]
:ivar next_link: URL to get the next set of operation list results if there are any.
:vartype next_link: str
"""
_validation = {
'value': {'readonly': True},
'next_link': {'readonly': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': '[Operation]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
"""
"""
super(OperationListResult, self).__init__(**kwargs)
self.value = None
self.next_link = None
class PrivateEndpoint(msrest.serialization.Model):
"""The private endpoint property of a private endpoint connection.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: The resource identifier.
:vartype id: str
"""
_validation = {
'id': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
"""
"""
super(PrivateEndpoint, self).__init__(**kwargs)
self.id = None
class PrivateEndpointConnection(msrest.serialization.Model):
"""The private endpoint connection of an IotHub.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:ivar id: The resource identifier.
:vartype id: str
:ivar name: The resource name.
:vartype name: str
:ivar type: The resource type.
:vartype type: str
:ivar properties: Required. The properties of a private endpoint connection.
:vartype properties:
~azure.mgmt.iothub.v2021_03_03_preview.models.PrivateEndpointConnectionProperties
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True, 'pattern': r'^(?![0-9]+$)(?!-)[a-zA-Z0-9-]{2,49}[a-zA-Z0-9]$'},
'type': {'readonly': True},
'properties': {'required': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'properties': {'key': 'properties', 'type': 'PrivateEndpointConnectionProperties'},
}
def __init__(
self,
*,
properties: "PrivateEndpointConnectionProperties",
**kwargs
):
"""
:keyword properties: Required. The properties of a private endpoint connection.
:paramtype properties:
~azure.mgmt.iothub.v2021_03_03_preview.models.PrivateEndpointConnectionProperties
"""
super(PrivateEndpointConnection, self).__init__(**kwargs)
self.id = None
self.name = None
self.type = None
self.properties = properties
class PrivateEndpointConnectionProperties(msrest.serialization.Model):
"""The properties of a private endpoint connection.
All required parameters must be populated in order to send to Azure.
:ivar private_endpoint: The private endpoint property of a private endpoint connection.
:vartype private_endpoint: ~azure.mgmt.iothub.v2021_03_03_preview.models.PrivateEndpoint
:ivar private_link_service_connection_state: Required. The current state of a private endpoint
connection.
:vartype private_link_service_connection_state:
~azure.mgmt.iothub.v2021_03_03_preview.models.PrivateLinkServiceConnectionState
"""
_validation = {
'private_link_service_connection_state': {'required': True},
}
_attribute_map = {
'private_endpoint': {'key': 'privateEndpoint', 'type': 'PrivateEndpoint'},
'private_link_service_connection_state': {'key': 'privateLinkServiceConnectionState', 'type': 'PrivateLinkServiceConnectionState'},
}
def __init__(
self,
*,
private_link_service_connection_state: "PrivateLinkServiceConnectionState",
private_endpoint: Optional["PrivateEndpoint"] = None,
**kwargs
):
"""
:keyword private_endpoint: The private endpoint property of a private endpoint connection.
:paramtype private_endpoint: ~azure.mgmt.iothub.v2021_03_03_preview.models.PrivateEndpoint
:keyword private_link_service_connection_state: Required. The current state of a private
endpoint connection.
:paramtype private_link_service_connection_state:
~azure.mgmt.iothub.v2021_03_03_preview.models.PrivateLinkServiceConnectionState
"""
super(PrivateEndpointConnectionProperties, self).__init__(**kwargs)
self.private_endpoint = private_endpoint
self.private_link_service_connection_state = private_link_service_connection_state
class PrivateLinkResources(msrest.serialization.Model):
"""The available private link resources for an IotHub.
:ivar value: The list of available private link resources for an IotHub.
:vartype value: list[~azure.mgmt.iothub.v2021_03_03_preview.models.GroupIdInformation]
"""
_attribute_map = {
'value': {'key': 'value', 'type': '[GroupIdInformation]'},
}
def __init__(
self,
*,
value: Optional[List["GroupIdInformation"]] = None,
**kwargs
):
"""
:keyword value: The list of available private link resources for an IotHub.
:paramtype value: list[~azure.mgmt.iothub.v2021_03_03_preview.models.GroupIdInformation]
"""
super(PrivateLinkResources, self).__init__(**kwargs)
self.value = value
class PrivateLinkServiceConnectionState(msrest.serialization.Model):
"""The current state of a private endpoint connection.
All required parameters must be populated in order to send to Azure.
:ivar status: Required. The status of a private endpoint connection. Possible values include:
"Pending", "Approved", "Rejected", "Disconnected".
:vartype status: str or
~azure.mgmt.iothub.v2021_03_03_preview.models.PrivateLinkServiceConnectionStatus
:ivar description: Required. The description for the current state of a private endpoint
connection.
:vartype description: str
:ivar actions_required: Actions required for a private endpoint connection.
:vartype actions_required: str
"""
_validation = {
'status': {'required': True},
'description': {'required': True},
}
_attribute_map = {
'status': {'key': 'status', 'type': 'str'},
'description': {'key': 'description', 'type': 'str'},
'actions_required': {'key': 'actionsRequired', 'type': 'str'},
}
def __init__(
self,
*,
status: Union[str, "PrivateLinkServiceConnectionStatus"],
description: str,
actions_required: Optional[str] = None,
**kwargs
):
"""
:keyword status: Required. The status of a private endpoint connection. Possible values
include: "Pending", "Approved", "Rejected", "Disconnected".
:paramtype status: str or
~azure.mgmt.iothub.v2021_03_03_preview.models.PrivateLinkServiceConnectionStatus
:keyword description: Required. The description for the current state of a private endpoint
connection.
:paramtype description: str
:keyword actions_required: Actions required for a private endpoint connection.
:paramtype actions_required: str
"""
super(PrivateLinkServiceConnectionState, self).__init__(**kwargs)
self.status = status
self.description = description
self.actions_required = actions_required
class RegistryStatistics(msrest.serialization.Model):
"""Identity registry statistics.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar total_device_count: The total count of devices in the identity registry.
:vartype total_device_count: long
:ivar enabled_device_count: The count of enabled devices in the identity registry.
:vartype enabled_device_count: long
:ivar disabled_device_count: The count of disabled devices in the identity registry.
:vartype disabled_device_count: long
"""
_validation = {
'total_device_count': {'readonly': True},
'enabled_device_count': {'readonly': True},
'disabled_device_count': {'readonly': True},
}
_attribute_map = {
'total_device_count': {'key': 'totalDeviceCount', 'type': 'long'},
'enabled_device_count': {'key': 'enabledDeviceCount', 'type': 'long'},
'disabled_device_count': {'key': 'disabledDeviceCount', 'type': 'long'},
}
def __init__(
self,
**kwargs
):
"""
"""
super(RegistryStatistics, self).__init__(**kwargs)
self.total_device_count = None
self.enabled_device_count = None
self.disabled_device_count = None
class RouteCompilationError(msrest.serialization.Model):
"""Compilation error when evaluating route.
:ivar message: Route error message.
:vartype message: str
:ivar severity: Severity of the route error. Possible values include: "error", "warning".
:vartype severity: str or ~azure.mgmt.iothub.v2021_03_03_preview.models.RouteErrorSeverity
:ivar location: Location where the route error happened.
:vartype location: ~azure.mgmt.iothub.v2021_03_03_preview.models.RouteErrorRange
"""
_attribute_map = {
'message': {'key': 'message', 'type': 'str'},
'severity': {'key': 'severity', 'type': 'str'},
'location': {'key': 'location', 'type': 'RouteErrorRange'},
}
def __init__(
self,
*,
message: Optional[str] = None,
severity: Optional[Union[str, "RouteErrorSeverity"]] = None,
location: Optional["RouteErrorRange"] = None,
**kwargs
):
"""
:keyword message: Route error message.
:paramtype message: str
:keyword severity: Severity of the route error. Possible values include: "error", "warning".
:paramtype severity: str or ~azure.mgmt.iothub.v2021_03_03_preview.models.RouteErrorSeverity
:keyword location: Location where the route error happened.
:paramtype location: ~azure.mgmt.iothub.v2021_03_03_preview.models.RouteErrorRange
"""
super(RouteCompilationError, self).__init__(**kwargs)
self.message = message
self.severity = severity
self.location = location
class RouteErrorPosition(msrest.serialization.Model):
"""Position where the route error happened.
:ivar line: Line where the route error happened.
:vartype line: int
:ivar column: Column where the route error happened.
:vartype column: int
"""
_attribute_map = {
'line': {'key': 'line', 'type': 'int'},
'column': {'key': 'column', 'type': 'int'},
}
def __init__(
self,
*,
line: Optional[int] = None,
column: Optional[int] = None,
**kwargs
):
"""
:keyword line: Line where the route error happened.
:paramtype line: int
:keyword column: Column where the route error happened.
:paramtype column: int
"""
super(RouteErrorPosition, self).__init__(**kwargs)
self.line = line
self.column = column
class RouteErrorRange(msrest.serialization.Model):
"""Range of route errors.
:ivar start: Start where the route error happened.
:vartype start: ~azure.mgmt.iothub.v2021_03_03_preview.models.RouteErrorPosition
:ivar end: End where the route error happened.
:vartype end: ~azure.mgmt.iothub.v2021_03_03_preview.models.RouteErrorPosition
"""
_attribute_map = {
'start': {'key': 'start', 'type': 'RouteErrorPosition'},
'end': {'key': 'end', 'type': 'RouteErrorPosition'},
}
def __init__(
self,
*,
start: Optional["RouteErrorPosition"] = None,
end: Optional["RouteErrorPosition"] = None,
**kwargs
):
"""
:keyword start: Start where the route error happened.
:paramtype start: ~azure.mgmt.iothub.v2021_03_03_preview.models.RouteErrorPosition
:keyword end: End where the route error happened.
:paramtype end: ~azure.mgmt.iothub.v2021_03_03_preview.models.RouteErrorPosition
"""
super(RouteErrorRange, self).__init__(**kwargs)
self.start = start
self.end = end
class RouteProperties(msrest.serialization.Model):
"""The properties of a routing rule that your IoT hub uses to route messages to endpoints.
All required parameters must be populated in order to send to Azure.
:ivar name: Required. The name of the route. The name can only include alphanumeric characters,
periods, underscores, hyphens, has a maximum length of 64 characters, and must be unique.
:vartype name: str
:ivar source: Required. The source that the routing rule is to be applied to, such as
DeviceMessages. Possible values include: "Invalid", "DeviceMessages", "TwinChangeEvents",
"DeviceLifecycleEvents", "DeviceJobLifecycleEvents", "DigitalTwinChangeEvents",
"DeviceConnectionStateEvents".
:vartype source: str or ~azure.mgmt.iothub.v2021_03_03_preview.models.RoutingSource
:ivar condition: The condition that is evaluated to apply the routing rule. If no condition is
provided, it evaluates to true by default. For grammar, see:
https://docs.microsoft.com/azure/iot-hub/iot-hub-devguide-query-language.
:vartype condition: str
:ivar endpoint_names: Required. The list of endpoints to which messages that satisfy the
condition are routed. Currently only one endpoint is allowed.
:vartype endpoint_names: list[str]
:ivar is_enabled: Required. Used to specify whether a route is enabled.
:vartype is_enabled: bool
"""
_validation = {
'name': {'required': True, 'pattern': r'^[A-Za-z0-9-._]{1,64}$'},
'source': {'required': True},
'endpoint_names': {'required': True, 'max_items': 1, 'min_items': 1},
'is_enabled': {'required': True},
}
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'source': {'key': 'source', 'type': 'str'},
'condition': {'key': 'condition', 'type': 'str'},
'endpoint_names': {'key': 'endpointNames', 'type': '[str]'},
'is_enabled': {'key': 'isEnabled', 'type': 'bool'},
}
def __init__(
self,
*,
name: str,
source: Union[str, "RoutingSource"],
endpoint_names: List[str],
is_enabled: bool,
condition: Optional[str] = None,
**kwargs
):
"""
:keyword name: Required. The name of the route. The name can only include alphanumeric
characters, periods, underscores, hyphens, has a maximum length of 64 characters, and must be
unique.
:paramtype name: str
:keyword source: Required. The source that the routing rule is to be applied to, such as
DeviceMessages. Possible values include: "Invalid", "DeviceMessages", "TwinChangeEvents",
"DeviceLifecycleEvents", "DeviceJobLifecycleEvents", "DigitalTwinChangeEvents",
"DeviceConnectionStateEvents".
:paramtype source: str or ~azure.mgmt.iothub.v2021_03_03_preview.models.RoutingSource
:keyword condition: The condition that is evaluated to apply the routing rule. If no condition
is provided, it evaluates to true by default. For grammar, see:
https://docs.microsoft.com/azure/iot-hub/iot-hub-devguide-query-language.
:paramtype condition: str
:keyword endpoint_names: Required. The list of endpoints to which messages that satisfy the
condition are routed. Currently only one endpoint is allowed.
:paramtype endpoint_names: list[str]
:keyword is_enabled: Required. Used to specify whether a route is enabled.
:paramtype is_enabled: bool
"""
super(RouteProperties, self).__init__(**kwargs)
self.name = name
self.source = source
self.condition = condition
self.endpoint_names = endpoint_names
self.is_enabled = is_enabled
class RoutingEndpoints(msrest.serialization.Model):
"""The properties related to the custom endpoints to which your IoT hub routes messages based on the routing rules. A maximum of 10 custom endpoints are allowed across all endpoint types for paid hubs and only 1 custom endpoint is allowed across all endpoint types for free hubs.
:ivar service_bus_queues: The list of Service Bus queue endpoints that IoT hub routes the
messages to, based on the routing rules.
:vartype service_bus_queues:
list[~azure.mgmt.iothub.v2021_03_03_preview.models.RoutingServiceBusQueueEndpointProperties]
:ivar service_bus_topics: The list of Service Bus topic endpoints that the IoT hub routes the
messages to, based on the routing rules.
:vartype service_bus_topics:
list[~azure.mgmt.iothub.v2021_03_03_preview.models.RoutingServiceBusTopicEndpointProperties]
:ivar event_hubs: The list of Event Hubs endpoints that IoT hub routes messages to, based on
the routing rules. This list does not include the built-in Event Hubs endpoint.
:vartype event_hubs:
list[~azure.mgmt.iothub.v2021_03_03_preview.models.RoutingEventHubProperties]
:ivar storage_containers: The list of storage container endpoints that IoT hub routes messages
to, based on the routing rules.
:vartype storage_containers:
list[~azure.mgmt.iothub.v2021_03_03_preview.models.RoutingStorageContainerProperties]
"""
_attribute_map = {
'service_bus_queues': {'key': 'serviceBusQueues', 'type': '[RoutingServiceBusQueueEndpointProperties]'},
'service_bus_topics': {'key': 'serviceBusTopics', 'type': '[RoutingServiceBusTopicEndpointProperties]'},
'event_hubs': {'key': 'eventHubs', 'type': '[RoutingEventHubProperties]'},
'storage_containers': {'key': 'storageContainers', 'type': '[RoutingStorageContainerProperties]'},
}
def __init__(
self,
*,
service_bus_queues: Optional[List["RoutingServiceBusQueueEndpointProperties"]] = None,
service_bus_topics: Optional[List["RoutingServiceBusTopicEndpointProperties"]] = None,
event_hubs: Optional[List["RoutingEventHubProperties"]] = None,
storage_containers: Optional[List["RoutingStorageContainerProperties"]] = None,
**kwargs
):
"""
:keyword service_bus_queues: The list of Service Bus queue endpoints that IoT hub routes the
messages to, based on the routing rules.
:paramtype service_bus_queues:
list[~azure.mgmt.iothub.v2021_03_03_preview.models.RoutingServiceBusQueueEndpointProperties]
:keyword service_bus_topics: The list of Service Bus topic endpoints that the IoT hub routes
the messages to, based on the routing rules.
:paramtype service_bus_topics:
list[~azure.mgmt.iothub.v2021_03_03_preview.models.RoutingServiceBusTopicEndpointProperties]
:keyword event_hubs: The list of Event Hubs endpoints that IoT hub routes messages to, based on
the routing rules. This list does not include the built-in Event Hubs endpoint.
:paramtype event_hubs:
list[~azure.mgmt.iothub.v2021_03_03_preview.models.RoutingEventHubProperties]
:keyword storage_containers: The list of storage container endpoints that IoT hub routes
messages to, based on the routing rules.
:paramtype storage_containers:
list[~azure.mgmt.iothub.v2021_03_03_preview.models.RoutingStorageContainerProperties]
"""
super(RoutingEndpoints, self).__init__(**kwargs)
self.service_bus_queues = service_bus_queues
self.service_bus_topics = service_bus_topics
self.event_hubs = event_hubs
self.storage_containers = storage_containers
class RoutingEventHubProperties(msrest.serialization.Model):
"""The properties related to an event hub endpoint.
All required parameters must be populated in order to send to Azure.
:ivar id: Id of the event hub endpoint.
:vartype id: str
:ivar connection_string: The connection string of the event hub endpoint.
:vartype connection_string: str
:ivar endpoint_uri: The url of the event hub endpoint. It must include the protocol sb://.
:vartype endpoint_uri: str
:ivar entity_path: Event hub name on the event hub namespace.
:vartype entity_path: str
:ivar authentication_type: Method used to authenticate against the event hub endpoint. Possible
values include: "keyBased", "identityBased".
:vartype authentication_type: str or
~azure.mgmt.iothub.v2021_03_03_preview.models.AuthenticationType
:ivar identity: Managed identity properties of routing event hub endpoint.
:vartype identity: ~azure.mgmt.iothub.v2021_03_03_preview.models.ManagedIdentity
:ivar name: Required. The name that identifies this endpoint. The name can only include
alphanumeric characters, periods, underscores, hyphens and has a maximum length of 64
characters. The following names are reserved: events, fileNotifications, $default. Endpoint
names must be unique across endpoint types.
:vartype name: str
:ivar subscription_id: The subscription identifier of the event hub endpoint.
:vartype subscription_id: str
:ivar resource_group: The name of the resource group of the event hub endpoint.
:vartype resource_group: str
"""
_validation = {
'name': {'required': True, 'pattern': r'^[A-Za-z0-9-._]{1,64}$'},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'connection_string': {'key': 'connectionString', 'type': 'str'},
'endpoint_uri': {'key': 'endpointUri', 'type': 'str'},
'entity_path': {'key': 'entityPath', 'type': 'str'},
'authentication_type': {'key': 'authenticationType', 'type': 'str'},
'identity': {'key': 'identity', 'type': 'ManagedIdentity'},
'name': {'key': 'name', 'type': 'str'},
'subscription_id': {'key': 'subscriptionId', 'type': 'str'},
'resource_group': {'key': 'resourceGroup', 'type': 'str'},
}
def __init__(
self,
*,
name: str,
id: Optional[str] = None,
connection_string: Optional[str] = None,
endpoint_uri: Optional[str] = None,
entity_path: Optional[str] = None,
authentication_type: Optional[Union[str, "AuthenticationType"]] = None,
identity: Optional["ManagedIdentity"] = None,
subscription_id: Optional[str] = None,
resource_group: Optional[str] = None,
**kwargs
):
"""
:keyword id: Id of the event hub endpoint.
:paramtype id: str
:keyword connection_string: The connection string of the event hub endpoint.
:paramtype connection_string: str
:keyword endpoint_uri: The url of the event hub endpoint. It must include the protocol sb://.
:paramtype endpoint_uri: str
:keyword entity_path: Event hub name on the event hub namespace.
:paramtype entity_path: str
:keyword authentication_type: Method used to authenticate against the event hub endpoint.
Possible values include: "keyBased", "identityBased".
:paramtype authentication_type: str or
~azure.mgmt.iothub.v2021_03_03_preview.models.AuthenticationType
:keyword identity: Managed identity properties of routing event hub endpoint.
:paramtype identity: ~azure.mgmt.iothub.v2021_03_03_preview.models.ManagedIdentity
:keyword name: Required. The name that identifies this endpoint. The name can only include
alphanumeric characters, periods, underscores, hyphens and has a maximum length of 64
characters. The following names are reserved: events, fileNotifications, $default. Endpoint
names must be unique across endpoint types.
:paramtype name: str
:keyword subscription_id: The subscription identifier of the event hub endpoint.
:paramtype subscription_id: str
:keyword resource_group: The name of the resource group of the event hub endpoint.
:paramtype resource_group: str
"""
super(RoutingEventHubProperties, self).__init__(**kwargs)
self.id = id
self.connection_string = connection_string
self.endpoint_uri = endpoint_uri
self.entity_path = entity_path
self.authentication_type = authentication_type
self.identity = identity
self.name = name
self.subscription_id = subscription_id
self.resource_group = resource_group
class RoutingMessage(msrest.serialization.Model):
"""Routing message.
:ivar body: Body of routing message.
:vartype body: str
:ivar app_properties: App properties.
:vartype app_properties: dict[str, str]
:ivar system_properties: System properties.
:vartype system_properties: dict[str, str]
"""
_attribute_map = {
'body': {'key': 'body', 'type': 'str'},
'app_properties': {'key': 'appProperties', 'type': '{str}'},
'system_properties': {'key': 'systemProperties', 'type': '{str}'},
}
def __init__(
self,
*,
body: Optional[str] = None,
app_properties: Optional[Dict[str, str]] = None,
system_properties: Optional[Dict[str, str]] = None,
**kwargs
):
"""
:keyword body: Body of routing message.
:paramtype body: str
:keyword app_properties: App properties.
:paramtype app_properties: dict[str, str]
:keyword system_properties: System properties.
:paramtype system_properties: dict[str, str]
"""
super(RoutingMessage, self).__init__(**kwargs)
self.body = body
self.app_properties = app_properties
self.system_properties = system_properties
class RoutingProperties(msrest.serialization.Model):
"""The routing related properties of the IoT hub. See: https://docs.microsoft.com/azure/iot-hub/iot-hub-devguide-messaging.
:ivar endpoints: The properties related to the custom endpoints to which your IoT hub routes
messages based on the routing rules. A maximum of 10 custom endpoints are allowed across all
endpoint types for paid hubs and only 1 custom endpoint is allowed across all endpoint types
for free hubs.
:vartype endpoints: ~azure.mgmt.iothub.v2021_03_03_preview.models.RoutingEndpoints
:ivar routes: The list of user-provided routing rules that the IoT hub uses to route messages
to built-in and custom endpoints. A maximum of 100 routing rules are allowed for paid hubs and
a maximum of 5 routing rules are allowed for free hubs.
:vartype routes: list[~azure.mgmt.iothub.v2021_03_03_preview.models.RouteProperties]
:ivar fallback_route: The properties of the route that is used as a fall-back route when none
of the conditions specified in the 'routes' section are met. This is an optional parameter.
When this property is not set, the messages which do not meet any of the conditions specified
in the 'routes' section get routed to the built-in eventhub endpoint.
:vartype fallback_route: ~azure.mgmt.iothub.v2021_03_03_preview.models.FallbackRouteProperties
:ivar enrichments: The list of user-provided enrichments that the IoT hub applies to messages
to be delivered to built-in and custom endpoints. See: https://aka.ms/telemetryoneventgrid.
:vartype enrichments: list[~azure.mgmt.iothub.v2021_03_03_preview.models.EnrichmentProperties]
"""
_attribute_map = {
'endpoints': {'key': 'endpoints', 'type': 'RoutingEndpoints'},
'routes': {'key': 'routes', 'type': '[RouteProperties]'},
'fallback_route': {'key': 'fallbackRoute', 'type': 'FallbackRouteProperties'},
'enrichments': {'key': 'enrichments', 'type': '[EnrichmentProperties]'},
}
def __init__(
self,
*,
endpoints: Optional["RoutingEndpoints"] = None,
routes: Optional[List["RouteProperties"]] = None,
fallback_route: Optional["FallbackRouteProperties"] = None,
enrichments: Optional[List["EnrichmentProperties"]] = None,
**kwargs
):
"""
:keyword endpoints: The properties related to the custom endpoints to which your IoT hub routes
messages based on the routing rules. A maximum of 10 custom endpoints are allowed across all
endpoint types for paid hubs and only 1 custom endpoint is allowed across all endpoint types
for free hubs.
:paramtype endpoints: ~azure.mgmt.iothub.v2021_03_03_preview.models.RoutingEndpoints
:keyword routes: The list of user-provided routing rules that the IoT hub uses to route
messages to built-in and custom endpoints. A maximum of 100 routing rules are allowed for paid
hubs and a maximum of 5 routing rules are allowed for free hubs.
:paramtype routes: list[~azure.mgmt.iothub.v2021_03_03_preview.models.RouteProperties]
:keyword fallback_route: The properties of the route that is used as a fall-back route when
none of the conditions specified in the 'routes' section are met. This is an optional
parameter. When this property is not set, the messages which do not meet any of the conditions
specified in the 'routes' section get routed to the built-in eventhub endpoint.
:paramtype fallback_route:
~azure.mgmt.iothub.v2021_03_03_preview.models.FallbackRouteProperties
:keyword enrichments: The list of user-provided enrichments that the IoT hub applies to
messages to be delivered to built-in and custom endpoints. See:
https://aka.ms/telemetryoneventgrid.
:paramtype enrichments:
list[~azure.mgmt.iothub.v2021_03_03_preview.models.EnrichmentProperties]
"""
super(RoutingProperties, self).__init__(**kwargs)
self.endpoints = endpoints
self.routes = routes
self.fallback_route = fallback_route
self.enrichments = enrichments
class RoutingServiceBusQueueEndpointProperties(msrest.serialization.Model):
"""The properties related to service bus queue endpoint types.
All required parameters must be populated in order to send to Azure.
:ivar id: Id of the service bus queue endpoint.
:vartype id: str
:ivar connection_string: The connection string of the service bus queue endpoint.
:vartype connection_string: str
:ivar endpoint_uri: The url of the service bus queue endpoint. It must include the protocol
sb://.
:vartype endpoint_uri: str
:ivar entity_path: Queue name on the service bus namespace.
:vartype entity_path: str
:ivar authentication_type: Method used to authenticate against the service bus queue endpoint.
Possible values include: "keyBased", "identityBased".
:vartype authentication_type: str or
~azure.mgmt.iothub.v2021_03_03_preview.models.AuthenticationType
:ivar identity: Managed identity properties of routing service bus queue endpoint.
:vartype identity: ~azure.mgmt.iothub.v2021_03_03_preview.models.ManagedIdentity
:ivar name: Required. The name that identifies this endpoint. The name can only include
alphanumeric characters, periods, underscores, hyphens and has a maximum length of 64
characters. The following names are reserved: events, fileNotifications, $default. Endpoint
names must be unique across endpoint types. The name need not be the same as the actual queue
name.
:vartype name: str
:ivar subscription_id: The subscription identifier of the service bus queue endpoint.
:vartype subscription_id: str
:ivar resource_group: The name of the resource group of the service bus queue endpoint.
:vartype resource_group: str
"""
_validation = {
'name': {'required': True, 'pattern': r'^[A-Za-z0-9-._]{1,64}$'},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'connection_string': {'key': 'connectionString', 'type': 'str'},
'endpoint_uri': {'key': 'endpointUri', 'type': 'str'},
'entity_path': {'key': 'entityPath', 'type': 'str'},
'authentication_type': {'key': 'authenticationType', 'type': 'str'},
'identity': {'key': 'identity', 'type': 'ManagedIdentity'},
'name': {'key': 'name', 'type': 'str'},
'subscription_id': {'key': 'subscriptionId', 'type': 'str'},
'resource_group': {'key': 'resourceGroup', 'type': 'str'},
}
def __init__(
self,
*,
name: str,
id: Optional[str] = None,
connection_string: Optional[str] = None,
endpoint_uri: Optional[str] = None,
entity_path: Optional[str] = None,
authentication_type: Optional[Union[str, "AuthenticationType"]] = None,
identity: Optional["ManagedIdentity"] = None,
subscription_id: Optional[str] = None,
resource_group: Optional[str] = None,
**kwargs
):
"""
:keyword id: Id of the service bus queue endpoint.
:paramtype id: str
:keyword connection_string: The connection string of the service bus queue endpoint.
:paramtype connection_string: str
:keyword endpoint_uri: The url of the service bus queue endpoint. It must include the protocol
sb://.
:paramtype endpoint_uri: str
:keyword entity_path: Queue name on the service bus namespace.
:paramtype entity_path: str
:keyword authentication_type: Method used to authenticate against the service bus queue
endpoint. Possible values include: "keyBased", "identityBased".
:paramtype authentication_type: str or
~azure.mgmt.iothub.v2021_03_03_preview.models.AuthenticationType
:keyword identity: Managed identity properties of routing service bus queue endpoint.
:paramtype identity: ~azure.mgmt.iothub.v2021_03_03_preview.models.ManagedIdentity
:keyword name: Required. The name that identifies this endpoint. The name can only include
alphanumeric characters, periods, underscores, hyphens and has a maximum length of 64
characters. The following names are reserved: events, fileNotifications, $default. Endpoint
names must be unique across endpoint types. The name need not be the same as the actual queue
name.
:paramtype name: str
:keyword subscription_id: The subscription identifier of the service bus queue endpoint.
:paramtype subscription_id: str
:keyword resource_group: The name of the resource group of the service bus queue endpoint.
:paramtype resource_group: str
"""
super(RoutingServiceBusQueueEndpointProperties, self).__init__(**kwargs)
self.id = id
self.connection_string = connection_string
self.endpoint_uri = endpoint_uri
self.entity_path = entity_path
self.authentication_type = authentication_type
self.identity = identity
self.name = name
self.subscription_id = subscription_id
self.resource_group = resource_group
class RoutingServiceBusTopicEndpointProperties(msrest.serialization.Model):
"""The properties related to service bus topic endpoint types.
All required parameters must be populated in order to send to Azure.
:ivar id: Id of the service bus topic endpoint.
:vartype id: str
:ivar connection_string: The connection string of the service bus topic endpoint.
:vartype connection_string: str
:ivar endpoint_uri: The url of the service bus topic endpoint. It must include the protocol
sb://.
:vartype endpoint_uri: str
:ivar entity_path: Queue name on the service bus topic.
:vartype entity_path: str
:ivar authentication_type: Method used to authenticate against the service bus topic endpoint.
Possible values include: "keyBased", "identityBased".
:vartype authentication_type: str or
~azure.mgmt.iothub.v2021_03_03_preview.models.AuthenticationType
:ivar identity: Managed identity properties of routing service bus topic endpoint.
:vartype identity: ~azure.mgmt.iothub.v2021_03_03_preview.models.ManagedIdentity
:ivar name: Required. The name that identifies this endpoint. The name can only include
alphanumeric characters, periods, underscores, hyphens and has a maximum length of 64
characters. The following names are reserved: events, fileNotifications, $default. Endpoint
names must be unique across endpoint types. The name need not be the same as the actual topic
name.
:vartype name: str
:ivar subscription_id: The subscription identifier of the service bus topic endpoint.
:vartype subscription_id: str
:ivar resource_group: The name of the resource group of the service bus topic endpoint.
:vartype resource_group: str
"""
_validation = {
'name': {'required': True, 'pattern': r'^[A-Za-z0-9-._]{1,64}$'},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'connection_string': {'key': 'connectionString', 'type': 'str'},
'endpoint_uri': {'key': 'endpointUri', 'type': 'str'},
'entity_path': {'key': 'entityPath', 'type': 'str'},
'authentication_type': {'key': 'authenticationType', 'type': 'str'},
'identity': {'key': 'identity', 'type': 'ManagedIdentity'},
'name': {'key': 'name', 'type': 'str'},
'subscription_id': {'key': 'subscriptionId', 'type': 'str'},
'resource_group': {'key': 'resourceGroup', 'type': 'str'},
}
def __init__(
self,
*,
name: str,
id: Optional[str] = None,
connection_string: Optional[str] = None,
endpoint_uri: Optional[str] = None,
entity_path: Optional[str] = None,
authentication_type: Optional[Union[str, "AuthenticationType"]] = None,
identity: Optional["ManagedIdentity"] = None,
subscription_id: Optional[str] = None,
resource_group: Optional[str] = None,
**kwargs
):
"""
:keyword id: Id of the service bus topic endpoint.
:paramtype id: str
:keyword connection_string: The connection string of the service bus topic endpoint.
:paramtype connection_string: str
:keyword endpoint_uri: The url of the service bus topic endpoint. It must include the protocol
sb://.
:paramtype endpoint_uri: str
:keyword entity_path: Queue name on the service bus topic.
:paramtype entity_path: str
:keyword authentication_type: Method used to authenticate against the service bus topic
endpoint. Possible values include: "keyBased", "identityBased".
:paramtype authentication_type: str or
~azure.mgmt.iothub.v2021_03_03_preview.models.AuthenticationType
:keyword identity: Managed identity properties of routing service bus topic endpoint.
:paramtype identity: ~azure.mgmt.iothub.v2021_03_03_preview.models.ManagedIdentity
:keyword name: Required. The name that identifies this endpoint. The name can only include
alphanumeric characters, periods, underscores, hyphens and has a maximum length of 64
characters. The following names are reserved: events, fileNotifications, $default. Endpoint
names must be unique across endpoint types. The name need not be the same as the actual topic
name.
:paramtype name: str
:keyword subscription_id: The subscription identifier of the service bus topic endpoint.
:paramtype subscription_id: str
:keyword resource_group: The name of the resource group of the service bus topic endpoint.
:paramtype resource_group: str
"""
super(RoutingServiceBusTopicEndpointProperties, self).__init__(**kwargs)
self.id = id
self.connection_string = connection_string
self.endpoint_uri = endpoint_uri
self.entity_path = entity_path
self.authentication_type = authentication_type
self.identity = identity
self.name = name
self.subscription_id = subscription_id
self.resource_group = resource_group
class RoutingStorageContainerProperties(msrest.serialization.Model):
"""The properties related to a storage container endpoint.
All required parameters must be populated in order to send to Azure.
:ivar id: Id of the storage container endpoint.
:vartype id: str
:ivar connection_string: The connection string of the storage account.
:vartype connection_string: str
:ivar endpoint_uri: The url of the storage endpoint. It must include the protocol https://.
:vartype endpoint_uri: str
:ivar authentication_type: Method used to authenticate against the storage endpoint. Possible
values include: "keyBased", "identityBased".
:vartype authentication_type: str or
~azure.mgmt.iothub.v2021_03_03_preview.models.AuthenticationType
:ivar identity: Managed identity properties of routing storage endpoint.
:vartype identity: ~azure.mgmt.iothub.v2021_03_03_preview.models.ManagedIdentity
:ivar name: Required. The name that identifies this endpoint. The name can only include
alphanumeric characters, periods, underscores, hyphens and has a maximum length of 64
characters. The following names are reserved: events, fileNotifications, $default. Endpoint
names must be unique across endpoint types.
:vartype name: str
:ivar subscription_id: The subscription identifier of the storage account.
:vartype subscription_id: str
:ivar resource_group: The name of the resource group of the storage account.
:vartype resource_group: str
:ivar container_name: Required. The name of storage container in the storage account.
:vartype container_name: str
:ivar file_name_format: File name format for the blob. Default format is
{iothub}/{partition}/{YYYY}/{MM}/{DD}/{HH}/{mm}. All parameters are mandatory but can be
reordered.
:vartype file_name_format: str
:ivar batch_frequency_in_seconds: Time interval at which blobs are written to storage. Value
should be between 60 and 720 seconds. Default value is 300 seconds.
:vartype batch_frequency_in_seconds: int
:ivar max_chunk_size_in_bytes: Maximum number of bytes for each blob written to storage. Value
should be between 10485760(10MB) and 524288000(500MB). Default value is 314572800(300MB).
:vartype max_chunk_size_in_bytes: int
:ivar encoding: Encoding that is used to serialize messages to blobs. Supported values are
'avro', 'avrodeflate', and 'JSON'. Default value is 'avro'. Possible values include: "Avro",
"AvroDeflate", "JSON".
:vartype encoding: str or
~azure.mgmt.iothub.v2021_03_03_preview.models.RoutingStorageContainerPropertiesEncoding
"""
_validation = {
'name': {'required': True, 'pattern': r'^[A-Za-z0-9-._]{1,64}$'},
'container_name': {'required': True},
'batch_frequency_in_seconds': {'maximum': 720, 'minimum': 60},
'max_chunk_size_in_bytes': {'maximum': 524288000, 'minimum': 10485760},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'connection_string': {'key': 'connectionString', 'type': 'str'},
'endpoint_uri': {'key': 'endpointUri', 'type': 'str'},
'authentication_type': {'key': 'authenticationType', 'type': 'str'},
'identity': {'key': 'identity', 'type': 'ManagedIdentity'},
'name': {'key': 'name', 'type': 'str'},
'subscription_id': {'key': 'subscriptionId', 'type': 'str'},
'resource_group': {'key': 'resourceGroup', 'type': 'str'},
'container_name': {'key': 'containerName', 'type': 'str'},
'file_name_format': {'key': 'fileNameFormat', 'type': 'str'},
'batch_frequency_in_seconds': {'key': 'batchFrequencyInSeconds', 'type': 'int'},
'max_chunk_size_in_bytes': {'key': 'maxChunkSizeInBytes', 'type': 'int'},
'encoding': {'key': 'encoding', 'type': 'str'},
}
def __init__(
self,
*,
name: str,
container_name: str,
id: Optional[str] = None,
connection_string: Optional[str] = None,
endpoint_uri: Optional[str] = None,
authentication_type: Optional[Union[str, "AuthenticationType"]] = None,
identity: Optional["ManagedIdentity"] = None,
subscription_id: Optional[str] = None,
resource_group: Optional[str] = None,
file_name_format: Optional[str] = None,
batch_frequency_in_seconds: Optional[int] = None,
max_chunk_size_in_bytes: Optional[int] = None,
encoding: Optional[Union[str, "RoutingStorageContainerPropertiesEncoding"]] = None,
**kwargs
):
"""
:keyword id: Id of the storage container endpoint.
:paramtype id: str
:keyword connection_string: The connection string of the storage account.
:paramtype connection_string: str
:keyword endpoint_uri: The url of the storage endpoint. It must include the protocol https://.
:paramtype endpoint_uri: str
:keyword authentication_type: Method used to authenticate against the storage endpoint.
Possible values include: "keyBased", "identityBased".
:paramtype authentication_type: str or
~azure.mgmt.iothub.v2021_03_03_preview.models.AuthenticationType
:keyword identity: Managed identity properties of routing storage endpoint.
:paramtype identity: ~azure.mgmt.iothub.v2021_03_03_preview.models.ManagedIdentity
:keyword name: Required. The name that identifies this endpoint. The name can only include
alphanumeric characters, periods, underscores, hyphens and has a maximum length of 64
characters. The following names are reserved: events, fileNotifications, $default. Endpoint
names must be unique across endpoint types.
:paramtype name: str
:keyword subscription_id: The subscription identifier of the storage account.
:paramtype subscription_id: str
:keyword resource_group: The name of the resource group of the storage account.
:paramtype resource_group: str
:keyword container_name: Required. The name of storage container in the storage account.
:paramtype container_name: str
:keyword file_name_format: File name format for the blob. Default format is
{iothub}/{partition}/{YYYY}/{MM}/{DD}/{HH}/{mm}. All parameters are mandatory but can be
reordered.
:paramtype file_name_format: str
:keyword batch_frequency_in_seconds: Time interval at which blobs are written to storage. Value
should be between 60 and 720 seconds. Default value is 300 seconds.
:paramtype batch_frequency_in_seconds: int
:keyword max_chunk_size_in_bytes: Maximum number of bytes for each blob written to storage.
Value should be between 10485760(10MB) and 524288000(500MB). Default value is 314572800(300MB).
:paramtype max_chunk_size_in_bytes: int
:keyword encoding: Encoding that is used to serialize messages to blobs. Supported values are
'avro', 'avrodeflate', and 'JSON'. Default value is 'avro'. Possible values include: "Avro",
"AvroDeflate", "JSON".
:paramtype encoding: str or
~azure.mgmt.iothub.v2021_03_03_preview.models.RoutingStorageContainerPropertiesEncoding
"""
super(RoutingStorageContainerProperties, self).__init__(**kwargs)
self.id = id
self.connection_string = connection_string
self.endpoint_uri = endpoint_uri
self.authentication_type = authentication_type
self.identity = identity
self.name = name
self.subscription_id = subscription_id
self.resource_group = resource_group
self.container_name = container_name
self.file_name_format = file_name_format
self.batch_frequency_in_seconds = batch_frequency_in_seconds
self.max_chunk_size_in_bytes = max_chunk_size_in_bytes
self.encoding = encoding
class RoutingTwin(msrest.serialization.Model):
"""Twin reference input parameter. This is an optional parameter.
:ivar tags: A set of tags. Twin Tags.
:vartype tags: any
:ivar properties:
:vartype properties: ~azure.mgmt.iothub.v2021_03_03_preview.models.RoutingTwinProperties
"""
_attribute_map = {
'tags': {'key': 'tags', 'type': 'object'},
'properties': {'key': 'properties', 'type': 'RoutingTwinProperties'},
}
def __init__(
self,
*,
tags: Optional[Any] = None,
properties: Optional["RoutingTwinProperties"] = None,
**kwargs
):
"""
:keyword tags: A set of tags. Twin Tags.
:paramtype tags: any
:keyword properties:
:paramtype properties: ~azure.mgmt.iothub.v2021_03_03_preview.models.RoutingTwinProperties
"""
super(RoutingTwin, self).__init__(**kwargs)
self.tags = tags
self.properties = properties
class RoutingTwinProperties(msrest.serialization.Model):
"""RoutingTwinProperties.
:ivar desired: Twin desired properties.
:vartype desired: any
:ivar reported: Twin desired properties.
:vartype reported: any
"""
_attribute_map = {
'desired': {'key': 'desired', 'type': 'object'},
'reported': {'key': 'reported', 'type': 'object'},
}
def __init__(
self,
*,
desired: Optional[Any] = None,
reported: Optional[Any] = None,
**kwargs
):
"""
:keyword desired: Twin desired properties.
:paramtype desired: any
:keyword reported: Twin desired properties.
:paramtype reported: any
"""
super(RoutingTwinProperties, self).__init__(**kwargs)
self.desired = desired
self.reported = reported
class SharedAccessSignatureAuthorizationRule(msrest.serialization.Model):
"""The properties of an IoT hub shared access policy.
All required parameters must be populated in order to send to Azure.
:ivar key_name: Required. The name of the shared access policy.
:vartype key_name: str
:ivar primary_key: The primary key.
:vartype primary_key: str
:ivar secondary_key: The secondary key.
:vartype secondary_key: str
:ivar rights: Required. The permissions assigned to the shared access policy. Possible values
include: "RegistryRead", "RegistryWrite", "ServiceConnect", "DeviceConnect", "RegistryRead,
RegistryWrite", "RegistryRead, ServiceConnect", "RegistryRead, DeviceConnect", "RegistryWrite,
ServiceConnect", "RegistryWrite, DeviceConnect", "ServiceConnect, DeviceConnect",
"RegistryRead, RegistryWrite, ServiceConnect", "RegistryRead, RegistryWrite, DeviceConnect",
"RegistryRead, ServiceConnect, DeviceConnect", "RegistryWrite, ServiceConnect, DeviceConnect",
"RegistryRead, RegistryWrite, ServiceConnect, DeviceConnect".
:vartype rights: str or ~azure.mgmt.iothub.v2021_03_03_preview.models.AccessRights
"""
_validation = {
'key_name': {'required': True},
'rights': {'required': True},
}
_attribute_map = {
'key_name': {'key': 'keyName', 'type': 'str'},
'primary_key': {'key': 'primaryKey', 'type': 'str'},
'secondary_key': {'key': 'secondaryKey', 'type': 'str'},
'rights': {'key': 'rights', 'type': 'str'},
}
def __init__(
self,
*,
key_name: str,
rights: Union[str, "AccessRights"],
primary_key: Optional[str] = None,
secondary_key: Optional[str] = None,
**kwargs
):
"""
:keyword key_name: Required. The name of the shared access policy.
:paramtype key_name: str
:keyword primary_key: The primary key.
:paramtype primary_key: str
:keyword secondary_key: The secondary key.
:paramtype secondary_key: str
:keyword rights: Required. The permissions assigned to the shared access policy. Possible
values include: "RegistryRead", "RegistryWrite", "ServiceConnect", "DeviceConnect",
"RegistryRead, RegistryWrite", "RegistryRead, ServiceConnect", "RegistryRead, DeviceConnect",
"RegistryWrite, ServiceConnect", "RegistryWrite, DeviceConnect", "ServiceConnect,
DeviceConnect", "RegistryRead, RegistryWrite, ServiceConnect", "RegistryRead, RegistryWrite,
DeviceConnect", "RegistryRead, ServiceConnect, DeviceConnect", "RegistryWrite, ServiceConnect,
DeviceConnect", "RegistryRead, RegistryWrite, ServiceConnect, DeviceConnect".
:paramtype rights: str or ~azure.mgmt.iothub.v2021_03_03_preview.models.AccessRights
"""
super(SharedAccessSignatureAuthorizationRule, self).__init__(**kwargs)
self.key_name = key_name
self.primary_key = primary_key
self.secondary_key = secondary_key
self.rights = rights
class SharedAccessSignatureAuthorizationRuleListResult(msrest.serialization.Model):
"""The list of shared access policies with a next link.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar value: The list of shared access policies.
:vartype value:
list[~azure.mgmt.iothub.v2021_03_03_preview.models.SharedAccessSignatureAuthorizationRule]
:ivar next_link: The next link.
:vartype next_link: str
"""
_validation = {
'next_link': {'readonly': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': '[SharedAccessSignatureAuthorizationRule]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
*,
value: Optional[List["SharedAccessSignatureAuthorizationRule"]] = None,
**kwargs
):
"""
:keyword value: The list of shared access policies.
:paramtype value:
list[~azure.mgmt.iothub.v2021_03_03_preview.models.SharedAccessSignatureAuthorizationRule]
"""
super(SharedAccessSignatureAuthorizationRuleListResult, self).__init__(**kwargs)
self.value = value
self.next_link = None
class StorageEndpointProperties(msrest.serialization.Model):
"""The properties of the Azure Storage endpoint for file upload.
All required parameters must be populated in order to send to Azure.
:ivar sas_ttl_as_iso8601: The period of time for which the SAS URI generated by IoT Hub for
file upload is valid. See:
https://docs.microsoft.com/azure/iot-hub/iot-hub-devguide-file-upload#file-upload-notification-configuration-options.
:vartype sas_ttl_as_iso8601: ~datetime.timedelta
:ivar connection_string: Required. The connection string for the Azure Storage account to which
files are uploaded.
:vartype connection_string: str
:ivar container_name: Required. The name of the root container where you upload files. The
container need not exist but should be creatable using the connectionString specified.
:vartype container_name: str
:ivar authentication_type: Specifies authentication type being used for connecting to the
storage account. Possible values include: "keyBased", "identityBased".
:vartype authentication_type: str or
~azure.mgmt.iothub.v2021_03_03_preview.models.AuthenticationType
:ivar identity: Managed identity properties of storage endpoint for file upload.
:vartype identity: ~azure.mgmt.iothub.v2021_03_03_preview.models.ManagedIdentity
"""
_validation = {
'connection_string': {'required': True},
'container_name': {'required': True},
}
_attribute_map = {
'sas_ttl_as_iso8601': {'key': 'sasTtlAsIso8601', 'type': 'duration'},
'connection_string': {'key': 'connectionString', 'type': 'str'},
'container_name': {'key': 'containerName', 'type': 'str'},
'authentication_type': {'key': 'authenticationType', 'type': 'str'},
'identity': {'key': 'identity', 'type': 'ManagedIdentity'},
}
def __init__(
self,
*,
connection_string: str,
container_name: str,
sas_ttl_as_iso8601: Optional[datetime.timedelta] = None,
authentication_type: Optional[Union[str, "AuthenticationType"]] = None,
identity: Optional["ManagedIdentity"] = None,
**kwargs
):
"""
:keyword sas_ttl_as_iso8601: The period of time for which the SAS URI generated by IoT Hub for
file upload is valid. See:
https://docs.microsoft.com/azure/iot-hub/iot-hub-devguide-file-upload#file-upload-notification-configuration-options.
:paramtype sas_ttl_as_iso8601: ~datetime.timedelta
:keyword connection_string: Required. The connection string for the Azure Storage account to
which files are uploaded.
:paramtype connection_string: str
:keyword container_name: Required. The name of the root container where you upload files. The
container need not exist but should be creatable using the connectionString specified.
:paramtype container_name: str
:keyword authentication_type: Specifies authentication type being used for connecting to the
storage account. Possible values include: "keyBased", "identityBased".
:paramtype authentication_type: str or
~azure.mgmt.iothub.v2021_03_03_preview.models.AuthenticationType
:keyword identity: Managed identity properties of storage endpoint for file upload.
:paramtype identity: ~azure.mgmt.iothub.v2021_03_03_preview.models.ManagedIdentity
"""
super(StorageEndpointProperties, self).__init__(**kwargs)
self.sas_ttl_as_iso8601 = sas_ttl_as_iso8601
self.connection_string = connection_string
self.container_name = container_name
self.authentication_type = authentication_type
self.identity = identity
class TagsResource(msrest.serialization.Model):
"""A container holding only the Tags for a resource, allowing the user to update the tags on an IoT Hub instance.
:ivar tags: A set of tags. Resource tags.
:vartype tags: dict[str, str]
"""
_attribute_map = {
'tags': {'key': 'tags', 'type': '{str}'},
}
def __init__(
self,
*,
tags: Optional[Dict[str, str]] = None,
**kwargs
):
"""
:keyword tags: A set of tags. Resource tags.
:paramtype tags: dict[str, str]
"""
super(TagsResource, self).__init__(**kwargs)
self.tags = tags
class TestAllRoutesInput(msrest.serialization.Model):
"""Input for testing all routes.
:ivar routing_source: Routing source. Possible values include: "Invalid", "DeviceMessages",
"TwinChangeEvents", "DeviceLifecycleEvents", "DeviceJobLifecycleEvents",
"DigitalTwinChangeEvents", "DeviceConnectionStateEvents".
:vartype routing_source: str or ~azure.mgmt.iothub.v2021_03_03_preview.models.RoutingSource
:ivar message: Routing message.
:vartype message: ~azure.mgmt.iothub.v2021_03_03_preview.models.RoutingMessage
:ivar twin: Routing Twin Reference.
:vartype twin: ~azure.mgmt.iothub.v2021_03_03_preview.models.RoutingTwin
"""
_attribute_map = {
'routing_source': {'key': 'routingSource', 'type': 'str'},
'message': {'key': 'message', 'type': 'RoutingMessage'},
'twin': {'key': 'twin', 'type': 'RoutingTwin'},
}
def __init__(
self,
*,
routing_source: Optional[Union[str, "RoutingSource"]] = None,
message: Optional["RoutingMessage"] = None,
twin: Optional["RoutingTwin"] = None,
**kwargs
):
"""
:keyword routing_source: Routing source. Possible values include: "Invalid", "DeviceMessages",
"TwinChangeEvents", "DeviceLifecycleEvents", "DeviceJobLifecycleEvents",
"DigitalTwinChangeEvents", "DeviceConnectionStateEvents".
:paramtype routing_source: str or ~azure.mgmt.iothub.v2021_03_03_preview.models.RoutingSource
:keyword message: Routing message.
:paramtype message: ~azure.mgmt.iothub.v2021_03_03_preview.models.RoutingMessage
:keyword twin: Routing Twin Reference.
:paramtype twin: ~azure.mgmt.iothub.v2021_03_03_preview.models.RoutingTwin
"""
super(TestAllRoutesInput, self).__init__(**kwargs)
self.routing_source = routing_source
self.message = message
self.twin = twin
class TestAllRoutesResult(msrest.serialization.Model):
"""Result of testing all routes.
:ivar routes: JSON-serialized array of matched routes.
:vartype routes: list[~azure.mgmt.iothub.v2021_03_03_preview.models.MatchedRoute]
"""
_attribute_map = {
'routes': {'key': 'routes', 'type': '[MatchedRoute]'},
}
def __init__(
self,
*,
routes: Optional[List["MatchedRoute"]] = None,
**kwargs
):
"""
:keyword routes: JSON-serialized array of matched routes.
:paramtype routes: list[~azure.mgmt.iothub.v2021_03_03_preview.models.MatchedRoute]
"""
super(TestAllRoutesResult, self).__init__(**kwargs)
self.routes = routes
class TestRouteInput(msrest.serialization.Model):
"""Input for testing route.
All required parameters must be populated in order to send to Azure.
:ivar message: Routing message.
:vartype message: ~azure.mgmt.iothub.v2021_03_03_preview.models.RoutingMessage
:ivar route: Required. Route properties.
:vartype route: ~azure.mgmt.iothub.v2021_03_03_preview.models.RouteProperties
:ivar twin: Routing Twin Reference.
:vartype twin: ~azure.mgmt.iothub.v2021_03_03_preview.models.RoutingTwin
"""
_validation = {
'route': {'required': True},
}
_attribute_map = {
'message': {'key': 'message', 'type': 'RoutingMessage'},
'route': {'key': 'route', 'type': 'RouteProperties'},
'twin': {'key': 'twin', 'type': 'RoutingTwin'},
}
def __init__(
self,
*,
route: "RouteProperties",
message: Optional["RoutingMessage"] = None,
twin: Optional["RoutingTwin"] = None,
**kwargs
):
"""
:keyword message: Routing message.
:paramtype message: ~azure.mgmt.iothub.v2021_03_03_preview.models.RoutingMessage
:keyword route: Required. Route properties.
:paramtype route: ~azure.mgmt.iothub.v2021_03_03_preview.models.RouteProperties
:keyword twin: Routing Twin Reference.
:paramtype twin: ~azure.mgmt.iothub.v2021_03_03_preview.models.RoutingTwin
"""
super(TestRouteInput, self).__init__(**kwargs)
self.message = message
self.route = route
self.twin = twin
class TestRouteResult(msrest.serialization.Model):
"""Result of testing one route.
:ivar result: Result of testing route. Possible values include: "undefined", "false", "true".
:vartype result: str or ~azure.mgmt.iothub.v2021_03_03_preview.models.TestResultStatus
:ivar details: Detailed result of testing route.
:vartype details: ~azure.mgmt.iothub.v2021_03_03_preview.models.TestRouteResultDetails
"""
_attribute_map = {
'result': {'key': 'result', 'type': 'str'},
'details': {'key': 'details', 'type': 'TestRouteResultDetails'},
}
def __init__(
self,
*,
result: Optional[Union[str, "TestResultStatus"]] = None,
details: Optional["TestRouteResultDetails"] = None,
**kwargs
):
"""
:keyword result: Result of testing route. Possible values include: "undefined", "false",
"true".
:paramtype result: str or ~azure.mgmt.iothub.v2021_03_03_preview.models.TestResultStatus
:keyword details: Detailed result of testing route.
:paramtype details: ~azure.mgmt.iothub.v2021_03_03_preview.models.TestRouteResultDetails
"""
super(TestRouteResult, self).__init__(**kwargs)
self.result = result
self.details = details
class TestRouteResultDetails(msrest.serialization.Model):
"""Detailed result of testing a route.
:ivar compilation_errors: JSON-serialized list of route compilation errors.
:vartype compilation_errors:
list[~azure.mgmt.iothub.v2021_03_03_preview.models.RouteCompilationError]
"""
_attribute_map = {
'compilation_errors': {'key': 'compilationErrors', 'type': '[RouteCompilationError]'},
}
def __init__(
self,
*,
compilation_errors: Optional[List["RouteCompilationError"]] = None,
**kwargs
):
"""
:keyword compilation_errors: JSON-serialized list of route compilation errors.
:paramtype compilation_errors:
list[~azure.mgmt.iothub.v2021_03_03_preview.models.RouteCompilationError]
"""
super(TestRouteResultDetails, self).__init__(**kwargs)
self.compilation_errors = compilation_errors
class UserSubscriptionQuota(msrest.serialization.Model):
"""User subscription quota response.
:ivar id: IotHub type id.
:vartype id: str
:ivar type: Response type.
:vartype type: str
:ivar unit: Unit of IotHub type.
:vartype unit: str
:ivar current_value: Current number of IotHub type.
:vartype current_value: int
:ivar limit: Numerical limit on IotHub type.
:vartype limit: int
:ivar name: IotHub type.
:vartype name: ~azure.mgmt.iothub.v2021_03_03_preview.models.Name
"""
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'unit': {'key': 'unit', 'type': 'str'},
'current_value': {'key': 'currentValue', 'type': 'int'},
'limit': {'key': 'limit', 'type': 'int'},
'name': {'key': 'name', 'type': 'Name'},
}
def __init__(
self,
*,
id: Optional[str] = None,
type: Optional[str] = None,
unit: Optional[str] = None,
current_value: Optional[int] = None,
limit: Optional[int] = None,
name: Optional["Name"] = None,
**kwargs
):
"""
:keyword id: IotHub type id.
:paramtype id: str
:keyword type: Response type.
:paramtype type: str
:keyword unit: Unit of IotHub type.
:paramtype unit: str
:keyword current_value: Current number of IotHub type.
:paramtype current_value: int
:keyword limit: Numerical limit on IotHub type.
:paramtype limit: int
:keyword name: IotHub type.
:paramtype name: ~azure.mgmt.iothub.v2021_03_03_preview.models.Name
"""
super(UserSubscriptionQuota, self).__init__(**kwargs)
self.id = id
self.type = type
self.unit = unit
self.current_value = current_value
self.limit = limit
self.name = name
class UserSubscriptionQuotaListResult(msrest.serialization.Model):
"""Json-serialized array of User subscription quota response.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar value:
:vartype value: list[~azure.mgmt.iothub.v2021_03_03_preview.models.UserSubscriptionQuota]
:ivar next_link:
:vartype next_link: str
"""
_validation = {
'next_link': {'readonly': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': '[UserSubscriptionQuota]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
*,
value: Optional[List["UserSubscriptionQuota"]] = None,
**kwargs
):
"""
:keyword value:
:paramtype value: list[~azure.mgmt.iothub.v2021_03_03_preview.models.UserSubscriptionQuota]
"""
super(UserSubscriptionQuotaListResult, self).__init__(**kwargs)
self.value = value
self.next_link = None
| Azure/azure-sdk-for-python | sdk/iothub/azure-mgmt-iothub/azure/mgmt/iothub/v2021_03_03_preview/models/_models_py3.py | Python | mit | 167,573 | 0.003473 |
# -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2011 Smile. All Rights Reserved
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import matrix
| emimorad/smile_openerp_matrix_widget | smile_matrix_widget/widgets/__init__.py | Python | gpl-3.0 | 966 | 0 |
#!/usr/bin/env python3
import numpy as np
import matplotlib.pyplot as plt
import re
rootPath = "/Users/jeff/work/debug/20181216_hard_fe2k_15fps/"
finalLogFile = "rosout.log.2"
def appendTimestamps(arr, start, stop, flag):
#flag = True
d = stop - start
if flag or (d > -10 and d < 2000):
arr.append(d)
return True
return False
## camera -> OA(ObjectAanalytics) -> Fusion -> Flink -> V2X
stamps = [[],[],[],[],[]]
log = open(rootPath + finalLogFile)
lines = log.readlines()
log.close()
for i in range(0, len(lines)):
line = lines[i].rstrip('\n').strip()
ret = re.findall(r'\"camera_output_ts\":(\d+),.*\"flink_output_ts\":(\d+),.*\"fusion_output_ts\":(\d+),.*\"oa_output_ts\":\[([\d,]+)\],.*\"v2xnode_input_ts\":(\d+)', line)
if len(ret) > 0 and len(ret[0]) == 5:
if i < 2:
#print("line", line)
print("ret:", ret)
stamps[0].append(long(ret[0][0])) # camera
stamps[2].append(long(ret[0][2])) # fusion
stamps[3].append(long(ret[0][1])) # flink
stamps[4].append(long(ret[0][4])) # v2x
# oa
oastamps = ret[0][3].split(',')
t1 = long(oastamps[0])
t2 = long(oastamps[1])
t3 = long(oastamps[2])
mi = t1
ma = t1
if mi > t2:
mi = t2
if mi > t3:
mi = t3
if ma < t2:
ma = t2
if ma < t3:
ma = t3
#stamps[1].append((t1 + t2 + t3) / 3)
#stamps[1].append(mi)
stamps[1].append(ma)
stamps[1].append(long(oastamps[0]))
stamps[1].append(long(oastamps[1]))
stamps[1].append(long(oastamps[2]))
## [ 0 1 2 3 4 ]
## [ Camera OA(3) Fusion Flink V2X ]
## [ 0 1 2 3 4 5 ]
## [ Total(V2X - Camera), OA(OA-Camera), Fusion(Fusion-OA), Flink(Flink - Fusion), V2X(V2X - Flink) Fusion-CAM ]
delays = [[], [], [], [], [], [], [], []]
for i in range(len(stamps[0])):
if appendTimestamps(delays[0], stamps[0][i], stamps[4][i], False): # total
appendTimestamps(delays[1], stamps[0][i], stamps[1][i * 4], True) # OA
appendTimestamps(delays[2], stamps[1][i * 4], stamps[2][i], True) # Fusion
appendTimestamps(delays[3], stamps[2][i], stamps[3][i], True) # Flink
appendTimestamps(delays[4], stamps[3][i], stamps[4][i], True) # V2x
appendTimestamps(delays[5], stamps[0][i], stamps[2][i], True) # Fusion - Cam
print("===length: ", len(delays[0]),len(delays[1]),len(delays[2]),len(delays[3]),len(delays[4]))
delayavg = [0,0,0,0,0,0]
if len(delays[0]) == 0:
print("empty delay array")
quit()
for i in range(len(delays[0])):
delayavg[0] = delayavg[0] + delays[0][i]
delayavg[1] = delayavg[1] + delays[1][i]
delayavg[2] = delayavg[2] + delays[2][i]
delayavg[3] = delayavg[3] + delays[3][i]
delayavg[4] = delayavg[4] + delays[4][i]
delayavg[5] = delayavg[5] + delays[5][i]
for i in range(6):
delayavg[i] = delayavg[i] / len(delays[0])
print("===AVG(Total, OA, Fusion, Flink, V2X): ", delayavg)
frameIntervals = []
for i in range(len(stamps[0]) - 1):
tmp = stamps[0][i + 1] - stamps[0][i]
if tmp < 1000:
frameIntervals.append(stamps[0][i + 1] - stamps[0][i])
## plot
plt.figure()
#plt.plot(delays[0])
#plt.plot(delays[1])
#plt.plot(delays[2])
#plt.plot(delays[3])
plt.plot(delays[4])
#plt.plot(delays[5])
plt.legend(["Total", "OA", "Fusion", "Flink", "V2X", "OA+Fusion"])
plt.show()
'''
## interval
plt.plot(frameIntervals)
plt.show()
'''
print("done!")
| yejingfu/samples | tensorflow/pyplot03.py | Python | mit | 3,650 | 0.011233 |
# -*- coding: UTF-8 -*-
# to run by anaconda
# from bokeh.plotting import figure, output_file, show
import bokeh.plotting as bp
import bokeh_gmapm
import logging
logger = logging.getLogger(__name__)
class Figure(object):
def __init__(self, *args, **kwargs):
self._output_fname = kwargs.get('output_fname',"bokeh.html")
bp.output_file(self._output_fname)
self._use_gmap = kwargs.get('use_gmap',False)
if self._use_gmap and kwargs.get('center_coords',False):
self._p = bokeh_gmapm.create_plot(kwargs['center_coords'],zoom_level = 7)
else:
self._p = bp.figure(plot_width=640, plot_height=480)
def add_line(self, *args,**kwargs):
logger.info("starting line add with points num = %d" % (len(args[0])))
if self._use_gmap:
bokeh_gmapm.add_line(self._p,args[0],**kwargs)
else:
if len(args[0])==0:
lats = [0,1,2,3]
lngs = [2,3,4,5]
else:
c_size=kwargs.get('circle_size',15)
c_color=kwargs.get('circles_color','red')
self._p.line([d['lat'] for d in args[0]],
[d['lng'] for d in args[0]],
size=c_size,color=c_color,alpha=0.5)
self._p.circle([d['lat'] for d in args[0]],
[d['lng'] for d in args[0]],
line_width=c_size/2,color=c_color,alpha=0.5)
return True
def save2html(self):
bp.save(self._p)
return self._output_fname
def show(self):
bp.show(self._p)
return True
# def plot_route_on_basemap(coord_pairs,annotes,added_points_param_list=None):
# bp.output_file("map_bokeh.html")
# p = bp.figure(plot_width=640, plot_height=480)
# lat_list, lng_list = zip(*coord_pairs)
# MIN_L_WIDTH=7
# POINT_SIZE=2*MIN_L_WIDTH
# x_all=[]
# y_all=[]
# for i,point in enumerate(coord_pairs):
# lon = point[-1]
# lat = point[0]
# x,y = lon,lat
# # x,y = m(*[lon,lat])
# x_all.append(x)
# y_all.append(y)
# if (i!=0 and i!=len(annotes)-1):
# pass
# # plt.annotate(annotes[i], xy=(x,y), xytext=(POINT_SIZE/2,POINT_SIZE/2), textcoords='offset points',bbox=dict(boxstyle="round", fc=(1.0, 0.7, 0.7), ec="none"))
# p.line(x_all,y_all,line_width=5,color='red')
# if added_points_param_list!=None:
# added_points_coords = added_points_param_list[0]
# names = added_points_param_list[1]
# x_added=[]
# y_added=[]
# for i,point in enumerate(added_points_coords):
# lat = point[0]
# lon = point[-1]
# # x,y = m(*[lon,lat])
# x,y = lon,lat
# x_added.append(x)
# y_added.append(y)
# if (i!=0 and i!=len(names)-1):
# p.text(x, y, text=[names[i]], text_color="#449944", text_align="left", text_font_size="10pt")
# p.circle(x,y,size=20,color='red',alpha=0.5)
# bp.save(p)
def test_simple_bokeh_plot():
tver_coords = {u'lat':56.8583600, u'lng':35.9005700}
fig = Figure(output_fname='bokehm_simple_test.html',use_gmap=False, center_coords=tver_coords)
line_to_plot = [{u'lat':tver_coords[u'lat']*(1+i*0.0001),
u'lng':tver_coords[u'lng']*(1+i*0.0001)} \
for i in range(10)]
fig.add_line(line_to_plot,circle_size=20, circles_color='green')
fig.save2html()
fig.show()
def test_gmap_bokeh_plot():
tver_coords = {u'lat':56.8583600, u'lng':35.9005700}
fig = Figure(output_fname='bokehm_test.html',use_gmap=True, center_coords=tver_coords)
line_to_plot = []
for i in range(10):
line_to_plot.append({u'lat':tver_coords[u'lat']*(1+i*0.0001), u'lng':tver_coords[u'lng']*(1+i*0.0001)})
print(type(line_to_plot[0]))
fig.add_line(line_to_plot,circle_size=20, circles_color='green')
fig.save2html()
fig.show()
def main():
pass
if __name__ == "__main__":
main() | sergeimoiseev/othodi | bokehm.py | Python | mit | 4,203 | 0.012848 |
# This is purely the result of trial and error.
import sys
from setuptools import setup, find_packages
import httpie
# Note: keep requirements here to ease distributions packaging
tests_require = [
'pytest',
'pytest-httpbin>=0.0.6',
'responses',
]
dev_require = [
*tests_require,
'flake8',
'flake8-comprehensions',
'flake8-deprecated',
'flake8-mutable',
'flake8-tuple',
'pyopenssl',
'pytest-cov',
'pyyaml',
'twine',
'wheel',
'Jinja2'
]
install_requires = [
'charset_normalizer>=2.0.0',
'defusedxml>=0.6.0',
'requests[socks]>=2.22.0',
'Pygments>=2.5.2',
'requests-toolbelt>=0.9.1',
'multidict>=4.7.0',
'setuptools',
]
install_requires_win_only = [
'colorama>=0.2.4',
]
# Conditional dependencies:
# sdist
if 'bdist_wheel' not in sys.argv:
if 'win32' in str(sys.platform).lower():
# Terminal colors for Windows
install_requires.extend(install_requires_win_only)
# bdist_wheel
extras_require = {
'dev': dev_require,
'test': tests_require,
# https://wheel.readthedocs.io/en/latest/#defining-conditional-dependencies
':sys_platform == "win32"': install_requires_win_only,
}
def long_description():
with open('README.md', encoding='utf-8') as f:
return f.read()
setup(
name='httpie',
version=httpie.__version__,
description=httpie.__doc__.strip(),
long_description=long_description(),
long_description_content_type='text/markdown',
url='https://httpie.io/',
download_url=f'https://github.com/httpie/httpie/archive/{httpie.__version__}.tar.gz',
author=httpie.__author__,
author_email='[email protected]',
license=httpie.__licence__,
packages=find_packages(include=['httpie', 'httpie.*']),
entry_points={
'console_scripts': [
'http = httpie.__main__:main',
'https = httpie.__main__:main',
],
},
python_requires='>=3.6',
extras_require=extras_require,
install_requires=install_requires,
classifiers=[
'Development Status :: 5 - Production/Stable',
'Programming Language :: Python',
'Programming Language :: Python :: 3 :: Only',
'Environment :: Console',
'Intended Audience :: Developers',
'Intended Audience :: System Administrators',
'License :: OSI Approved :: BSD License',
'Topic :: Internet :: WWW/HTTP',
'Topic :: Software Development',
'Topic :: System :: Networking',
'Topic :: Terminals',
'Topic :: Text Processing',
'Topic :: Utilities'
],
project_urls={
'GitHub': 'https://github.com/httpie/httpie',
'Twitter': 'https://twitter.com/httpie',
'Discord': 'https://httpie.io/discord',
'Documentation': 'https://httpie.io/docs',
'Online Demo': 'https://httpie.io/run',
},
)
| jakubroztocil/httpie | setup.py | Python | bsd-3-clause | 2,890 | 0.000346 |
#!/usr/bin/env python3
import logging
from src import util
from src import etherscan
from src import messages
from crypto.prices import *
logger = logging.getLogger("node")
######
# Telegram command handler for adding nodes for the user who fired the command.
#
# Command: /node :address0;name0 ... :addressN;nameN
#
# Command parameter: :address0 - Address of the first node to add
# :name0 - Name of the first node
# :addressN - Address of the last node to add
# :nameN - Name of the last node
#
# Gets only called by the telegram bot api
######
def nodeAdd(bot, update, args):
response = "*Add*\n\n"
chatId = update.message.chat_id
logger.warning("add - args " + " ".join(args))
logger.warning("add - user: {}".format(update.message.from_user.id))
pool = bot.database.getPoolById(chatId)
if pool == None:
bot.create(bot,update,[])
if len(args) == 0:
response += ("Arguments required: address_0;name_0 ... address_n;name_n\n\n"
"Example: /add 0xFf2ED74286a5686Bc4F4896761718DE031680000;Node1 0xFf2ED74286a5686Bc4F4896761718DE031681111;Node2\n")
valid = False
else:
for arg in args:
valid = True
newNode = arg.split(";")
if len(newNode) != 2:
response += messages.invalidParameterError.format(arg)
valid = False
else:
if not util.validateTntAddress( newNode[0] ):
response += messages.invalidTntAddressError.format(newNode[0])
valid = False
if not util.validateName( newNode[1] ):
response += messages.invalidNameError.format(newNode[1])
valid = False
if valid:
address = newNode[0]
name = newNode[1]
if bot.database.addNode(update.message.chat_id, address, name, update.message.from_user.id,update.message.from_user.username):
response += "Added node {}!\n".format(address)
else:
response += messages.nodeExistsError.format(address)
bot.sendMessage(update.message.chat_id, response )
######
# Telegram command handler for updating nodes for the user who fired the command.
#
# Command: /add :address :newname
#
# Command parameter: :address - Address of the node to update
# :newname - New name for the node
#
# Gets only called by the telegram bot api
######
def nodeUpdate(bot, update, args):
response = "*Update*\n\n"
chatId = update.message.chat_id
logger.warning("update - args " + " ".join(args))
logger.warning("update - user: {}".format(update.message.from_user.id))
pool = bot.database.getPoolById(chatId)
user = bot.database.getUser(chatId, update.message.from_user.id)
if pool == None:
response+= messages.noPoolError
elif user == None:
response += messages.notActiveInPoolError
elif len(args) != 2:
response += ("Exactly 2 arguments required: :address :newname\n"
"Where :address is the address of the node to update and :newname the"
"new name of the node.\n\n"
"Example: /update 0xFf2ED74286a5686Bc4F4896761718DE031680000 MyNewNodeName\n")
else:
valid = True
if not util.validateTntAddress( args[0] ):
response += messages.invalidTntAddressError.format(args[0])
valid = False
elif not util.validateName( args[1] ):
response += messages.invalidNameError.format(args[1])
valid = False
if valid:
address = args[0]
name = args[1]
logger.info("update - {} {}".format(address, user['id']))
if bot.database.getNode(address, user['id']) == None:
response += messages.nodeNotExistsError.format(address)
else:
bot.database.updateNode(address,user['id'], name)
response += "Node successfully updated. {}\n".format(address)
bot.sendMessage(chatId, response )
######
# Telegram command handler for removing nodes for the user who fired the command.
#
# Command: /remove :address
#
# Command parameter: :address - Address of the node to remove
#
#
# Gets only called by the telegram bot api
######
def nodeRemove(bot, update, args):
response = "*Remove*\n\n"
chatId = update.message.chat_id
logger.warning("remove - " + " ".join(args))
logger.warning("remove - user: {}".format(update.message.from_user.id))
pool = bot.database.getPoolById(chatId)
user = bot.database.getUser(chatId, update.message.from_user.id)
if pool == None:
response+= messages.noPoolError
elif user == None:
response += messages.notActiveInPoolError
elif len(args) != 1:
response += ("Argument required: :address\n\n"
"Example: /remove 0xFf2ED74286a5686Bc4F4896761718DE031680000\n")
else:
address = args[0]
if not util.validateTntAddress( address ):
response += "ERROR: Invalid TNT-Address: {}\n".format(address)
else:
logger.info("remove - valid {}".format(address, ))
if bot.database.getNode(address, user['id']) == None:
response += "ERROR: Node {} does not exist!\n".format(address)
else:
bot.database.deleteNode(address,user['id'])
response += "Node {} successfully deleted.\n".format(address)
bot.sendMessage(chatId, response )
######
# Telegram command handler for reading the amounts of each node of the users
# in the pool
#
# Command: /nodes
#
# Gets only called by the telegram bot api
######
def nodes(bot, update):
response = ""
chatId = update.message.chat_id
nodesFound = False
pool = bot.database.getPoolById(chatId)
logger.warning("nodes - {}".format(chatId))
if pool == None:
response = "*Nodes*\n\n"
response += ("You need to create a pool with nodes first. "
"Type /help to show the list of commands.")
else:
tntPrice = liqui(Cryptos.TNT)
addresses = []
for user in bot.database.getUsers(pool['id']):
nodes = bot.database.getNodes(user['id'])
if len(nodes) == 0:
continue
for node in nodes:
addresses.append(node["address"])
amounts = etherscan.getTNTAmount(addresses, pool['api_key'])
for user in bot.database.getUsers(pool['id']):
nodes = bot.database.getNodes(user['id'])
if len(nodes) == 0:
continue
nodesFound = True
response += "*" + user['name'] + "*\n"
total = 0
for node in nodes:
tnt = amounts[node["address"]]
if tnt == -1:
response += node['name'] + " -> Sorry, there was an error.\n".format(tnt)
else:
total += int(tnt)
response += node['name'] + " -> {} TNT\n".format(tnt)
if tntPrice != None:
response += '\n*Total:\n TNT: {}\n USD: {}*\n\n'.format(total,int(total*tntPrice.usd))
else:
response += '\n*Total TNT: {}*\n\n'.format(total)
response += "\n\n"
if not nodesFound and pool:
response = "*Nodes*\n\n"
response += ("There are currently no nodes in this pool. You can create "
"nodes with /add.")
bot.sendMessage(chatId, response )
######
# Telegram command handler for reading the total amounts of all nodes of the users
# in the pool
#
# Command: /total
#
# Gets only called by the telegram bot api
######
def total(bot, update):
response = ""
chatId = update.message.chat_id
nodesFound = False
pool = bot.database.getPoolById(chatId)
logger.warning("total - {}".format(chatId))
if pool == None:
response = "*Total*\n\n"
response += ("You need to create a pool with nodes first. "
"Type /help to show the list of commands.")
else:
tntPrice = liqui(Cryptos.TNT)
addresses = []
for user in bot.database.getUsers(pool['id']):
nodes = bot.database.getNodes(user['id'])
if len(nodes) == 0:
continue
for node in nodes:
addresses.append(node["address"])
amounts = etherscan.getTNTAmount(addresses, pool['api_key'])
for user in bot.database.getUsers(chatId):
nodes = bot.database.getNodes(user['id'])
total = 0
if len(nodes) == 0:
continue
nodesFound = True
for node in bot.database.getNodes(user['id']):
total += amounts[node['address']]
if tntPrice != None:
response += '{} -> {} TNT | {} USD\n'.format(user['name'],total,int(total * tntPrice.usd))
else:
response += '{} -> {} TNT\n'.format(user['name'],total,int(total))
if not nodesFound:
response = "*Total*\n\n"
response += ("There are currently no nodes in this pool. You can create "
"nodes with /add.")
bot.sendMessage(chatId, response )
######
# Telegram command handler for reading the addresses of all nodes of the users
# in the pool
#
# Command: /addresses
#
# Gets only called by the telegram bot api
######
def addresses(bot, update):
response = ""
chatId = update.message.chat_id
nodesFound = False
pool = bot.database.getPoolById(chatId)
logger.warning("addresses - {}".format(chatId))
if pool == None:
response = "*Addresses*\n\n"
response += ("You need to create a pool with nodes first. "
"Type /help to show the list of commands.")
else:
for user in bot.database.getUsers(pool['id']):
nodes = bot.database.getNodes(user['id'])
if len(nodes) == 0:
continue
response += "*" + user['name'] + "*\n"
nodesFound = True
for node in nodes:
response += node['name'] + " -> " + node['address'] + "\n"
response += "\n\n"
if not nodesFound:
response = "*Addresses*\n\n"
response += ("There are currently no nodes in this pool. You can create "
"nodes with /add.")
bot.sendMessage(update.message.chat_id, response )
| kevinrombach/TNTNodeMonitorBot | src/commandhandler/node.py | Python | apache-2.0 | 10,737 | 0.011456 |
#!/usr/bin/env python3
#pylint: disable=missing-docstring
#* This file is part of the MOOSE framework
#* https://www.mooseframework.org
#*
#* All rights reserved, see COPYRIGHT for full restrictions
#* https://github.com/idaholab/moose/blob/master/COPYRIGHT
#*
#* Licensed under LGPL 2.1, please see LICENSE for details
#* https://www.gnu.org/licenses/lgpl-2.1.html
import os
import unittest
import time
import mooseutils
import chigger
class Test_getActiveFilenames(unittest.TestCase):
@classmethod
def setUpClass(cls):
"""
Copy the temporary files to working directory.
"""
cls.basename = cls.__name__
cls.testfiles = chigger.utils.copy_adaptive_exodus_test_files(cls.basename)
@classmethod
def tearDownClass(cls):
"""
Cleanup test files
"""
for fname in cls.testfiles:
if os.path.exists(fname): os.remove(fname)
def testBasic(self):
"""
Test that all files can be read.
"""
active = chigger.utils.get_active_filenames(self.basename + '.e', self.basename + '.e-s*')
self.assertEqual(len(active), 9)
self.assertEqual(active[0][0], self.basename + '.e')
self.assertEqual(active[-1][0], self.basename + '.e-s009')
def testUpdate(self):
"""
Test that updating the files updates the active list.
"""
# Wait and the "update" the first few files
time.sleep(1.5)
for i in range(5):
print(self.testfiles[i])
mooseutils.touch(self.testfiles[i])
active = chigger.utils.get_active_filenames(self.basename + '.e', self.basename + '.e-s*')
self.assertEqual(len(active), 5)
self.assertEqual(active[0][0], self.basename + '.e')
self.assertEqual(active[-1][0], self.basename + '.e-s005')
if __name__ == '__main__':
unittest.main(module=__name__, verbosity=2)
| nuclear-wizard/moose | python/chigger/tests/utils/test_get_active_filenames.py | Python | lgpl-2.1 | 1,925 | 0.007792 |
import pipes
from fabric.api import settings, task, local, hide
from fabric.contrib.console import confirm
def is_working_tree_clean():
with settings(hide('everything'), warn_only=True):
local('git update-index -q --ignore-submodules --refresh')
unstaged = local('git diff-files --quiet --ignore-submodules --',
capture=True)
uncommitted = local('git diff-index --cached --quiet HEAD '
'--ignore-submodules --', capture=True)
return unstaged.succeeded and uncommitted.succeeded
@task
def lint():
"""
Checks the source code using flake8.
"""
local('flake8 --statistics --exit-zero --max-complexity=10 '
'--exclude=\'*/migrations/*,build,dist\' .')
@task
def authors():
"""
Updates the AUTHORS file with a list of committers from GIT.
"""
local('git shortlog -s -e -n | cut -f 2- > AUTHORS')
@task
def compass():
local('compass watch -c ppc/assets/sass/config.rb')
@task
def livereload():
local('bundle exec guard')
@task
def release():
"""
Create a new release and upload it to PyPI.
"""
if not is_working_tree_clean():
print 'Your working tree is not clean. Refusing to create a release.'
return
print 'Rebuilding the AUTHORS file to check for modifications...'
authors()
if not is_working_tree_clean():
print (
'Your working tree is not clean after the AUTHORS file was '
'rebuilt.'
)
print 'Please commit the changes before continuing.'
return
# Get version
version = 'v{}'.format(local('python setup.py --version', capture=True))
name = local('python setup.py --name', capture=True)
# Tag
tag_message = '{} release version {}.'.format(name, version)
print '----------------------'
print 'Proceeding will tag the release, push the repository upstream,'
print 'and release a new version on PyPI.'
print
print 'Version: {}'.format(version)
print 'Tag message: {}'.format(tag_message)
print
if not confirm('Continue?', default=True):
print 'Aborting.'
return
local('git tag -a {} -m {}'.format(pipes.quote(version),
pipes.quote(tag_message)))
# Push
local('git push origin master')
# Package and upload to pypi
local('python setup.py sdist upload')
| GaretJax/ppc | fabfile.py | Python | mit | 2,432 | 0.000411 |
from vt_manager.controller.actions.ActionController import ActionController
from vt_manager.controller.drivers.VTDriver import VTDriver
from vt_manager.models.Action import Action
from vt_manager.models.VirtualMachine import VirtualMachine
import xmlrpclib, threading, logging, copy
from vt_manager.communication.utils.XmlHelper import XmlHelper
from vt_manager.models.resourcesHash import resourcesHash
class InformationDispatcher():
@staticmethod
def listResources(remoteHashValue, projectUUID = 'None', sliceUUID ='None'):
logging.debug("Enter listResources")
infoRspec = XmlHelper.getSimpleInformation()
servers = VTDriver.getAllServers()
baseVM = copy.deepcopy(infoRspec.response.information.resources.server[0].virtual_machine[0])
if not servers:
logging.debug("No VTServers available")
infoRspec.response.information.resources.server.pop()
resourcesString = XmlHelper.craftXmlClass(infoRspec)
localHashValue = str(hash(resourcesString))
else:
for sIndex, server in enumerate(servers):
if(sIndex == 0):
baseServer = copy.deepcopy(infoRspec.response.information.resources.server[0])
if(sIndex != 0):
newServer = copy.deepcopy(baseServer)
infoRspec.response.information.resources.server.append(newServer)
InformationDispatcher.__ServerModelToClass(server, infoRspec.response.information.resources.server[sIndex] )
if (projectUUID is not 'None'):
vms = server.getVMs(projectId = projectUUID)
else:
vms = server.getVMs()
if not vms:
logging.debug("No VMs available")
if infoRspec.response.information.resources.server[sIndex].virtual_machine:
infoRspec.response.information.resources.server[sIndex].virtual_machine.pop()
elif (sliceUUID is not 'None'):
vms = vms.filter(sliceId = sliceUUID)
if not vms:
logging.error("No VMs available")
infoRspec.response.information.resources.server[sIndex].virtual_machine.pop()
for vIndex, vm in enumerate(vms):
if (vIndex != 0):
newVM = copy.deepcopy(baseVM)
infoRspec.response.information.resources.server[sIndex].virtual_machine.append(newVM)
InformationDispatcher.__VMmodelToClass(vm, infoRspec.response.information.resources.server[sIndex].virtual_machine[vIndex])
resourcesString = XmlHelper.craftXmlClass(infoRspec)
localHashValue = str(hash(resourcesString))
try:
rHashObject = resourcesHash.objects.get(projectUUID = projectUUID, sliceUUID = sliceUUID)
rHashObject.hashValue = localHashValue
rHashObject.save()
except:
rHashObject = resourcesHash(hashValue = localHashValue, projectUUID= projectUUID, sliceUUID = sliceUUID)
rHashObject.save()
if remoteHashValue == rHashObject.hashValue:
return localHashValue, ''
else:
return localHashValue, resourcesString
@staticmethod
def listVMTemplatesInfo(serverUUID):
#def listVMTemplatesInfo(serverUUID, callbackURL):
logging.debug("Enter listVMTemplatesInfo")
server = VTDriver.getServerByUUID(serverUUID)
xmlrpc_server = xmlrpclib.Server(server.getAgentURL())
templates_info = xmlrpc_server.list_vm_templates(server.getAgentPassword())
#templates_info = xmlrpc_server.list_vm_templates(callbackURL, server.getAgentPassword())
return str(templates_info)
@staticmethod
def forceListActiveVMs(serverID='None', vmID='None'):
if serverID != 'None':
server = VTDriver.getServerById(serverID)
vtam_vms = server.getVMs()
else:
if vmID != 'None':
servers = VTDriver.getAllServers()
vtam_vms = list()
for server in servers:
vtam_vms = server.getVMs(id=int(vmID))
if vtam_vms:
vmID = vtam_vms[0].getUUID()
break
if not vtam_vms:
raise Exception("VM not found")
xmlrpc_server = xmlrpclib.Server(server.getAgentURL())
# Handle safely the connection against the agent
try:
server_active_vms = xmlrpc_server.force_list_active_vms(server.getAgentPassword(), vmID)
for vm in vtam_vms:
if vm.getUUID() in server_active_vms.keys():
vm.setState("running")
vm.save()
else:
# XXX: avoiding "on queue" and "unknown" states to avoid bad management
#if vm.getState() in ['deleting...', 'failed', 'on queue', 'unknown']:
if vm.getState() in ["deleting...", "failed"]:
child = vm.getChildObject()
server = vm.Server.get()
#Action.objects.all().filter(objectUUID = vm.uuid).delete()
server.deleteVM(vm)
# Keep actions table up-to-date after each deletion
vm_uuids = [ vm.uuid for vm in VirtualMachine.objects.all() ]
Action.objects.all().exclude(objectUUID__in = vm_uuids).delete()
elif vm.getState() in ["running", "starting...", "stopping..."] :
vm.setState("stopped")
vm.save()
else:
continue
except:
server_active_vms = dict()
return server_active_vms
@staticmethod
def __ServerModelToClass(sModel, sClass ):
sClass.name = sModel.getName()
#XXX: CHECK THIS
sClass.id = sModel.id
sClass.uuid = sModel.getUUID()
sClass.operating_system_type = sModel.getOSType()
sClass.operating_system_distribution = sModel.getOSDistribution()
sClass.operating_system_version = sModel.getOSVersion()
sClass.virtualization_type = sModel.getVirtTech()
ifaces = sModel.getNetworkInterfaces()
for ifaceIndex, iface in enumerate(ifaces):
if ifaceIndex != 0:
newInterface = copy.deepcopy(sClass.interfaces.interface[0])
sClass.interfaces.interface.append(newInterface)
if iface.isMgmt:
sClass.interfaces.interface[ifaceIndex].ismgmt = True
else:
sClass.interfaces.interface[ifaceIndex].ismgmt = False
sClass.interfaces.interface[ifaceIndex].name = iface.name
sClass.interfaces.interface[ifaceIndex].switch_id= iface.switchID
sClass.interfaces.interface[ifaceIndex].switch_port = iface.port
@staticmethod
def __VMmodelToClass(VMmodel, VMxmlClass):
VMxmlClass.name = VMmodel.getName()
VMxmlClass.uuid = VMmodel.getUUID()
VMxmlClass.status = VMmodel.getState()
VMxmlClass.project_id = VMmodel.getProjectId()
VMxmlClass.slice_id = VMmodel.getSliceId()
VMxmlClass.project_name = VMmodel.getProjectName()
VMxmlClass.slice_name = VMmodel.getSliceName()
VMxmlClass.operating_system_type = VMmodel.getOSType()
VMxmlClass.operating_system_version = VMmodel.getOSVersion()
VMxmlClass.operating_system_distribution = VMmodel.getOSDistribution()
VMxmlClass.virtualization_type = VMmodel.Server.get().getVirtTech()
VMxmlClass.server_id = VMmodel.Server.get().getUUID()
VMxmlClass.xen_configuration.hd_setup_type = VMmodel.getHdSetupType()
VMxmlClass.xen_configuration.hd_origin_path = VMmodel.getHdOriginPath()
VMxmlClass.xen_configuration.virtualization_setup_type = VMmodel.getVirtualizationSetupType()
VMxmlClass.xen_configuration.memory_mb = VMmodel.getMemory()
ActionController.PopulateNetworkingParams(VMxmlClass.xen_configuration.interfaces.interface, VMmodel)
| dana-i2cat/felix | vt_manager/src/python/vt_manager/controller/dispatchers/xmlrpc/InformationDispatcher.py | Python | apache-2.0 | 7,769 | 0.022268 |
# This code is licensed under the MIT License (see LICENSE file for details)
from PyQt5 import Qt
class ViewportRectItem(Qt.QGraphicsObject):
size_changed = Qt.pyqtSignal(Qt.QSizeF)
def __init__(self):
super().__init__()
self.setFlags(
Qt.QGraphicsItem.ItemIgnoresTransformations |
Qt.QGraphicsItem.ItemSendsGeometryChanges |
Qt.QGraphicsItem.ItemSendsScenePositionChanges |
Qt.QGraphicsItem.ItemHasNoContents
)
self._size = Qt.QSizeF()
# Children are generally overlay items that should appear over anything else rather than z-fighting
self.setZValue(10)
@property
def size(self):
return self._size
@size.setter
def size(self, v):
if not isinstance(v, Qt.QSizeF):
v = Qt.QSizeF(v)
if self._size != v:
self.prepareGeometryChange()
self._size = v
self.size_changed.emit(v)
def boundingRect(self):
return Qt.QRectF(Qt.QPointF(), self._size)
| zpincus/RisWidget | ris_widget/qgraphicsitems/viewport_rect_item.py | Python | mit | 1,046 | 0.002868 |
###############################################################################
###############################################################################
# MODULE COPIER_COLLER_FICHIERS
###############################################################################
###############################################################################
###############################################################################
# IMPORT
###############################################################################
import os
import shutil
###############################################################################
# CONSTANTES / VARIABLES
###############################################################################
###############################################################################
# CLASSES
###############################################################################
###############################################################################
# FONCTIONS
###############################################################################
def creer_dossier_doublons(source, dossier):
os.mkdir(source + "/" + dossier)
def copier_fichier(fi, source, dest):
shutil.copy2(fi, str(source + "/" + dest))
def supprimer_fichier(fi, source):
os.remove(source+"/"+fi)
def traiter_fichiers(liste, o, p):
for i in range(len(liste.traitee)):
if liste.doublons[i] != False :
if o.supprimer_doublons :
if liste.doublons[i] == True :
supprimer_fichier(liste[i], repertoire_source)
else :
if o.deplacer_orginal and o.deplacer_doublons :
copier_fichier(fichier, repertoire_source, repertoire_destination)
supprimer_fichier(fichier, repertoire_source)
elif o.deplacer_doublons and not o.deplacer_original:
if liste.doublons[i] == True :
copier_fichier(fichier, repertoire_source, repertoire_destination)
supprimer_fichier(fichier, repertoire_source)
else:
input("probleme")
| AlexandreGazagnes/Unduplicator | Copier_coller_fichiers.py | Python | mit | 2,039 | 0.02207 |
# coding: utf-8
if DefLANG in ("RU", "UA"):
AnsBase_temp = tuple([line.decode("utf-8") for line in (
"\nВсего входов - %d\nВремя последнего входа - %s\nПоследняя роль - %s", # 0
"\nВремя последнего выхода - %s\nПричина выхода - %s", # 1
"\nНики: %s", # 2
"Нет статистики.", # 3
"«%s» сидит здесь - %s.", # 4
"Ты провёл здесь - %s.", # 5
"Здесь нет такого юзера." # 6
)])
else:
AnsBase_temp = (
"\nTotal joins - %d\nThe Last join-time - %s\nThe last role - %s", # 0
"\nThe last leave-time - %s\nExit reason - %s", # 1
"\nNicks: %s", # 2
"No statistics.", # 3
"'%s' spent here - %s.", # 4
"You spent here - %s.", # 5
"No such user here." # 6
) | alkorgun/blacksmith-2 | expansions/user_stats/insc.py | Python | apache-2.0 | 813 | 0.048961 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (c) 2007 Michael Howitz, gocept gmbh & co. kg
#
# This is free software. You may redistribute it under the terms
# of the Apache license and the GNU General Public License Version
# 2 or at your option any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
#
# Contributor(s):
#
import unittest
import os
import os.path
import odf.userfield
import tempfile
import zipfile
try:
from cStringIO import StringIO
except InputError:
from StringIO import StringIO
def get_file_path(file_name):
return os.path.join(os.path.dirname(__file__), "examples", file_name)
def get_user_fields(file_path):
return odf.userfield.UserFields(file_path)
class TestUserFields(unittest.TestCase):
userfields_odt = get_file_path("userfields.odt")
userfields_ooo3_odt = get_file_path("userfields_ooo3.odt")
no_userfields_odt = get_file_path("no_userfields.odt")
def setUp(self):
self.unlink_list = []
def tearDown(self):
# delete created destination files
for filename in self.unlink_list:
os.unlink(filename)
def test_exception(self):
# no zip-file
no_zip = odf.userfield.UserFields(__file__)
self.assertRaises(TypeError, no_zip.list_fields)
self.assertRaises(TypeError, no_zip.update, {})
def test_list_fields(self):
""" Find the expected fields in the file """
self.assertEqual([],
get_user_fields(self.no_userfields_odt).list_fields())
self.assertEqual(['username', 'firstname', 'lastname', 'address'],
get_user_fields(self.userfields_odt).list_fields())
def test_list_fields_and_values(self):
""" Find the expected fields and values in the file """
no_user_fields = get_user_fields(self.no_userfields_odt)
self.assertEqual([],
no_user_fields.list_fields_and_values())
self.assertEqual([],
no_user_fields.list_fields_and_values(['username']))
user_fields = get_user_fields(self.userfields_odt)
self.assertEqual([('username', 'string', ''),
('lastname', 'string', '<none>')],
user_fields.list_fields_and_values(['username',
'lastname']))
self.assertEqual(4, len(user_fields.list_fields_and_values()))
def test_list_values(self):
self.assertEqual(
[],
get_user_fields(self.no_userfields_odt).list_values(['username']))
self.assertEqual(
['', '<none>'],
get_user_fields(self.userfields_odt).list_values(
['username', 'lastname']))
def test_get(self):
user_fields = get_user_fields(self.userfields_odt)
self.assertEqual(
None,
get_user_fields(self.no_userfields_odt).get('username'))
self.assertEqual('', user_fields.get('username'))
self.assertEqual('<none>', user_fields.get('lastname'))
self.assertEqual(None, user_fields.get('street'))
def test_get_type_and_value(self):
self.assertEqual(
None,
get_user_fields(self.no_userfields_odt).get_type_and_value(
'username'))
user_fields = get_user_fields(self.userfields_odt)
self.assertEqual(
('string', ''), user_fields.get_type_and_value('username'))
self.assertEqual(
('string', '<none>'),
user_fields.get_type_and_value('lastname'))
self.assertEqual(None, user_fields.get_type_and_value('street'))
def test_update(self):
# test for file without user fields
no_user_fields = get_user_fields(self.no_userfields_odt)
no_user_fields.dest_file = self._get_dest_file_name()
no_user_fields.update({'username': 'mac'})
dest = odf.userfield.UserFields(no_user_fields.dest_file)
self.assertEqual([], dest.list_fields_and_values())
# test for file with user field, including test of encoding
user_fields = get_user_fields(self.userfields_odt)
user_fields.dest_file = self._get_dest_file_name()
user_fields.update({'username': 'mac',
'firstname': u'André',
'street': 'I do not exist'})
dest = odf.userfield.UserFields(user_fields.dest_file)
self.assertEqual([('username', 'string', 'mac'),
('firstname', 'string', 'André'),
('lastname', 'string', '<none>'),
('address', 'string', '')],
dest.list_fields_and_values())
def test_update_open_office_version_3(self):
"""Update fields in OpenOffice.org 3.x version of file."""
user_fields = get_user_fields(self.userfields_ooo3_odt)
user_fields.dest_file = self._get_dest_file_name()
user_fields.update({'username': 'mari',
'firstname': u'Lukas',
'street': 'I might exist.'})
dest = odf.userfield.UserFields(user_fields.dest_file)
self.assertEqual([('username', 'string', 'mari'),
('firstname', 'string', 'Lukas'),
('lastname', 'string', '<none>'),
('address', 'string', '')],
dest.list_fields_and_values())
def test_stringio(self):
# test wether it is possible to use a StringIO as src and dest
src = StringIO(file(self.userfields_odt).read())
dest = StringIO()
# update fields
user_fields = odf.userfield.UserFields(src, dest)
user_fields.update({'username': 'mac',
'firstname': u'André',
'street': 'I do not exist'})
# reread dest StringIO to get field values
dest_user_fields = odf.userfield.UserFields(dest)
self.assertEqual([('username', 'string', 'mac'),
('firstname', 'string', 'André'),
('lastname', 'string', '<none>'),
('address', 'string', '')],
dest_user_fields.list_fields_and_values())
def test_newlines_in_values(self):
# test that newlines in values are encoded correctly so that
# they get read back correctly
user_fields = get_user_fields(self.userfields_odt)
user_fields.dest_file = self._get_dest_file_name()
user_fields.update({'username': 'mac',
'firstname': 'mac',
'lastname': 'mac',
'address': 'Hall-Platz 3\n01234 Testheim'})
dest = odf.userfield.UserFields(user_fields.dest_file)
self.assertEqual([('username', 'string', 'mac'),
('firstname', 'string', 'mac'),
('lastname', 'string', 'mac'),
('address', 'string',
'Hall-Platz 3\n01234 Testheim')],
dest.list_fields_and_values())
def _get_dest_file_name(self):
dummy_fh, dest_file_name = tempfile.mkstemp('.odt')
os.close(dummy_fh)
self.unlink_list.append(dest_file_name)
return dest_file_name
if __name__ == '__main__':
unittest.main()
| pacoqueen/odfpy | tests/testuserfields.py | Python | gpl-2.0 | 7,878 | 0.000127 |
# -*- coding: utf-8 -*-
import pandas as pd
import os
import os.path as path
import logging
from oemof import network
from oemof.solph import EnergySystem
from oemof.solph.options import BinaryFlow, Investment
from oemof.solph.plumbing import sequence
from oemof.solph.network import (Bus, Source, Sink, Flow, LinearTransformer,
Storage)
PARAMETER = (
'conversion_factors', 'nominal_value',
'min', 'max', 'summed_max', 'actual_value', 'fixed_costs', 'variable_costs',
'fixed', 'nominal_capacity', 'capacity_loss', 'inflow_conversion_factor',
'outflow_conversion_factor', 'initial_capacity', 'capacity_min',
'capacity_max', 'balanced', 'sort_index')
INDEX = ('class', 'label', 'source', 'target')
class SolphScenario(EnergySystem):
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.p = kwargs.get('parameters')
self.s = kwargs.get('sequences')
self.path = kwargs.get('path', path.dirname(path.realpath(__file__)))
self.name = kwargs.get('name')
def create_parameter_table(self, additional_parameter=None):
"""Create an empty parameter table."""
if additional_parameter is None:
additional_parameter = tuple()
my_index = pd.MultiIndex(levels=[[], [], [], []],
labels=[[], [], [], []],
names=INDEX)
self.p = pd.DataFrame(columns=PARAMETER + tuple(additional_parameter),
index=my_index)
def create_sequence_table(self, datetime_index=None):
"""Create an empty sequence table."""
if datetime_index is None:
datetime_index = self.timeindex
my_index = pd.MultiIndex(
levels=[[], [], [], [], []], labels=[[], [], [], [], []],
names=INDEX + ('attributes',))
df = pd.DataFrame(index=datetime_index, columns=my_index)
self.s = df
def create_tables(self, **kwargs):
"""Create empty scenario tables (sequence and parameter)."""
self.create_parameter_table(
additional_parameter=kwargs.get('additional_parameter'))
self.create_sequence_table(datetime_index=kwargs.get('datetime_index'))
def read_parameter_table(self, filename=None):
"""Read existing parameter table from file."""
if filename is None:
filename = path.join(self.path, self.name + '.csv')
self.p = pd.read_csv(filename, index_col=[0, 1, 2, 3])
def read_sequence_table(self, filename=None):
"""Read existing parameter table from file."""
if filename is None:
filename = path.join(self.path, self.name + '_seq.csv')
self.s = pd.read_csv(filename, header=[0, 1, 2, 3, 4], parse_dates=True,
index_col=0)
def read_tables(self, parameterfile=None, sequencefile=None):
"""Read existing scenario tables (parameter and sequence)"""
self.read_parameter_table(parameterfile)
self.read_sequence_table(sequencefile)
def write_parameter_table(self, filename=None):
"""Write parameter table to file."""
if filename is None:
filename = path.join(self.path, self.name + '.csv')
self.p.sort_values('sort_index', inplace=True)
self.p.fillna('').to_csv(filename)
def write_sequence_table(self, filename=None):
"""Write sequence table to file."""
if filename is None:
filename = path.join(self.path, self.name + '_seq.csv')
self.s.to_csv(filename)
def write_tables(self, parameterfile=None, sequencefile=None):
"""Write scenario tables into two separate files."""
self.write_parameter_table(parameterfile)
self.write_sequence_table(sequencefile)
def create_nodes(self):
"""
Create nodes for a solph.energysystem
Notes
-----
At the moment the nodes_from_csv function does not accept Multiindex
DataFrames therefore the DataFrames need to be reshaped.
"""
tmp1 = pd.DataFrame(
index=self.s.columns).reset_index().transpose().reset_index()
tmp2 = self.s.reset_index()
for n in range(len(tmp2.columns.levels) - 1):
tmp2.columns = tmp2.columns.droplevel(0)
length = len(tmp1.columns)
tmp1.columns = list(range(length))
tmp2.columns = list(range(length))
return nodes_from_csv(
nodes_flows=self.p.reset_index(),
nodes_flows_seq=pd.DataFrame(pd.concat([tmp1, tmp2],
ignore_index=True)))
def add_parameters(self, idx, columns, values):
self.p.loc[idx, columns] = values
self.p = self.p.sortlevel()
def add_sequences(self, idx, seq):
self.s[idx[0], idx[1], idx[2], idx[3], idx[4]] = seq
def add_comment_line(self, comment, sort_entry):
self.p.loc[('### {0}'.format(comment), '', '', ''),
'sort_index'] = sort_entry
self.p = self.p.sortlevel()
def function1(row, nodes, classes, flow_attrs, seq_attributes, nodes_flows_seq,
i):
"""
create node if not existent and set attributes
(attributes must be placed either in the first line or in all
lines of multiple node entries (flows) in csv file)
"""
try:
if row['class'] in classes.keys():
node = nodes.get(row['label'])
if node is None:
node = classes[row['class']](label=row['label'])
# for the if check below we use all flow_attrs except
# investment
# because for storages investment needs to be set as a node
# attribute (and a flow attribute)
flow_attrs_ = [i for i in flow_attrs if i != 'investment']
for attr in row.keys():
if (attr not in flow_attrs_ and
attr not in ('class', 'label', 'source', 'target',
'conversion_factors')):
if row[attr] != 'seq':
if attr in seq_attributes:
print(attr)
print(row)
print(row[attr])
print('blubb')
row[attr] = sequence(float(row[attr]))
# again from investment storage the next lines
# are a little hacky as we need to create an
# solph.options.Investment() object
if (isinstance(node, Storage) and
attr == 'investment'):
setattr(node, attr, Investment())
invest_attrs = vars(Investment()).keys()
for iattr in invest_attrs:
if iattr in row.keys() and row[attr]:
setattr(node.investment,
iattr, row[iattr])
# for all 'normal' attributes
else:
setattr(node, attr, row[attr])
else:
seq = nodes_flows_seq.loc[row['class'],
row['label'],
row['source'],
row['target'],
attr]
if attr in seq_attributes:
seq = [i for i in seq]
seq = sequence(seq)
else:
seq = [i for i in seq.values]
setattr(node, attr, seq)
except:
print('Error with node creation in line', i+2, 'in csv file.')
print('Label:', row['label'])
raise
return node
def function2(row, node, flow_attrs, seq_attributes, nodes_flows_seq, i):
"""create flow and set attributes
"""
try:
flow = Flow()
for attr in flow_attrs:
if attr in row.keys() and row[attr]:
if row[attr] != 'seq':
if attr in seq_attributes:
row[attr] = sequence(float(row[attr]))
setattr(flow, attr, row[attr])
if row[attr] == 'seq':
seq = nodes_flows_seq.loc[row['class'],
row['label'],
row['source'],
row['target'],
attr]
if attr in seq_attributes:
seq = [i for i in seq]
seq = sequence(seq)
else:
seq = [i for i in seq.values]
setattr(flow, attr, seq)
# this block is only for binary flows!
if attr == 'binary' and row[attr] is True:
# create binary object for flow
setattr(flow, attr, BinaryFlow())
binary_attrs = vars(BinaryFlow()).keys()
for battr in binary_attrs:
if battr in row.keys() and row[attr]:
setattr(flow.binary, battr, row[battr])
# this block is only for investment flows!
if attr == 'investment' and row[attr] is True:
if isinstance(node, Storage):
# set the flows of the storage to Investment as
# without attributes, as costs etc are set at
# the node
setattr(flow, attr, Investment())
else:
# create binary object for flow
setattr(flow, attr, Investment())
invest_attrs = vars(Investment()).keys()
for iattr in invest_attrs:
if iattr in row.keys() and row[attr]:
setattr(flow.investment, iattr,
row[iattr])
except:
print('Error with flow creation in line', i + 2, 'in csv file.')
print('Label:', row['label'])
raise
return flow
def function3(row, nodes, flow, bus_attrs, type1, type2, i):
"""create an output entry for the current lin
"""
try:
if row['label'] == row[type1]:
if row[type2] not in nodes.keys():
nodes[row[type2]] = Bus(label=row[type2])
for attr in bus_attrs:
if attr in row.keys() and row[attr] is not None:
setattr(nodes[row[type2]], attr, row[attr])
tmp = {nodes[row[type2]]: flow}
else:
tmp = {}
except:
print('Error with output creation in line', i + 2,
'in csv file.')
print('Label:', row['label'])
raise
return tmp
def function4(row, nodes, nodes_flows_seq, i):
"""create a conversion_factor entry for the current lin
"""
try:
if row['target'] and 'conversion_factors' in row:
if row['conversion_factors'] == 'seq':
seq = nodes_flows_seq.loc[row['class'],
row['label'],
row['source'],
row['target'],
'conversion_factors']
seq = [i for i in seq]
seq = sequence(seq)
conversion_factors = {nodes[row['target']]: seq}
else:
conversion_factors = {
nodes[row['target']]:
sequence(float(row['conversion_factors']))}
else:
conversion_factors = {}
except:
print('Error with conversion factor creation in line', i + 2,
'in csv file.')
print('Label:', row['label'])
raise
return conversion_factors
def NodesFromCSV(file_nodes_flows, file_nodes_flows_sequences, **kwargs):
"""Keep old name to keep the API."""
nodes_from_csv(file_nodes_flows, file_nodes_flows_sequences, **kwargs)
def nodes_from_csv(file_nodes_flows=None, file_nodes_flows_sequences=None,
nodes_flows=None, nodes_flows_seq=None, delimiter=',',
additional_classes=None, additional_seq_attributes=None,
additional_flow_attributes=None):
""" Creates nodes with their respective flows and sequences from
a pre-defined CSV structure. An example has been provided in the
development examples
Parameters
----------
nodes_flows_seq : pandas.DataFrame
nodes_flows : pandas.DataFrame
file_nodes_flows : string
Name of CSV file with nodes and flows
file_nodes_flows_sequences : string
Name of of CSV file containing sequences
delimiter : str
Delimiter of CSV file
additional_classes : dict
Dictionary containing additional classes to be recognized inside the
csv reader. Looks like: {'MyClass1': MyClass1, ...}
additional_seq_attributes : iterable
List of string with attributes that have to be of type 'solph sequence'
and that shall be recognized inside the csv file.
additional_flow_attributes : iterable
List of string with attributes that shall be recognized inside the
csv file and set as flow attribute
"""
# Check attributes for None values
if additional_classes is None:
additional_classes = dict()
if additional_seq_attributes is None:
additional_seq_attributes = list()
if additional_flow_attributes is None:
additional_flow_attributes = list()
# DataFrame creation and manipulation
if nodes_flows is None:
nodes_flows = pd.read_csv(file_nodes_flows, sep=delimiter)
if nodes_flows_seq is None:
nodes_flows_seq = pd.read_csv(file_nodes_flows_sequences, sep=delimiter,
header=None)
nodes_flows_seq.dropna(axis=0, how='all', inplace=True)
nodes_flows_seq.drop(0, axis=1, inplace=True)
nodes_flows_seq = nodes_flows_seq.transpose()
nodes_flows_seq.set_index([0, 1, 2, 3, 4], inplace=True)
nodes_flows_seq.columns = range(0, len(nodes_flows_seq.columns))
nodes_flows_seq = nodes_flows_seq.astype(float)
# class dictionary for dynamic instantiation
classes = {'Source': Source, 'Sink': Sink,
'LinearTransformer': LinearTransformer,
'Storage': Storage, 'Bus': Bus}
classes.update(additional_classes)
# attributes that have to be converted into a solph sequence
seq_attributes = ['actual_value', 'min', 'max', 'positive_gradient',
'negative_gradient', 'variable_costs',
'capacity_loss', 'inflow_conversion_factor',
'outflow_conversion_factor', 'capacity_max',
'capacity_min'] + additional_seq_attributes
# attributes of different classes
flow_attrs = list(vars(Flow()).keys()) + additional_flow_attributes
bus_attrs = vars(Bus()).keys()
# iteration over dataframe rows to create objects
nodes = {}
for i, r in nodes_flows.iterrows():
# check if current line holds valid data or is just for visual purposes
# e.g. a blank line or a line that contains data explanations
if isinstance(r['class'], str) and r['class'] in classes.keys():
# drop NaN values from series
r = r.dropna()
# save column labels and row values in dict
row = dict(zip(r.index.values, r.values))
# function1
node = function1(row, nodes, classes, flow_attrs, seq_attributes,
nodes_flows_seq, i)
# create flow and set attributes
flow = function2(row, node, flow_attrs, seq_attributes,
nodes_flows_seq, i)
# inputs, outputs and conversion_factors
inputs = function3(row, nodes, flow, bus_attrs, 'target', 'source',
i)
outputs = function3(row, nodes, flow, bus_attrs, 'source', 'target',
i)
conversion_factors = function4(row, nodes, nodes_flows_seq, i)
# add node to dict and assign attributes depending on
# if there are multiple lines per node or not
try:
for source, f in inputs.items():
network.flow[source, node] = f
for target, f in outputs.items():
network.flow[node, target] = f
if node.label in nodes.keys():
if not isinstance(node, Bus):
node.conversion_factors.update(conversion_factors)
else:
if not isinstance(node, Bus):
node.conversion_factors = conversion_factors
nodes[node.label] = node
except:
print('Error adding node to dict in line', i+2, 'in csv file.')
print('Label:', row['label'])
raise
return nodes
def merge_csv_files(path=None, output_path=None, write=True):
"""
Merge csv files from a specified directory. All files with 'seq' will be
merged and all other files. Make sure that no other csv-files than the ones
to be merged are inside the specified directory.
Parameters
----------
path: str
Path to the directory where csv files are stored
output_path : str
Path where the merged files are written to (default is `path` above)
write : boolean
Indicating if new, merged dataframes should be written to csv
Returns
-------
Tuple of dataframes (nodes_flows, nodes_flows_seq)
"""
if output_path is None:
output_path = path
files = [f for f in os.listdir(path) if f.endswith('.csv')]
nodes_flows = pd.DataFrame()
nodes_flows_seq = pd.DataFrame()
for f in files:
if 'seq' in f:
tmp_df = pd.read_csv(os.path.join(path, f), index_col=[0],
header=[0, 1, 2, 3, 4])
nodes_flows_seq = pd.concat([nodes_flows_seq, tmp_df], axis=1)
else:
tmp_df = pd.read_csv(os.path.join(path, f))
nodes_flows = pd.concat([nodes_flows, tmp_df])
if write is True:
nodes_flows.to_csv(os.path.join(output_path,
'merged_nodes_flows.csv'), index=False)
if isinstance(nodes_flows_seq.columns, pd.MultiIndex):
nodes_flows_seq.to_csv(os.path.join(output_path,
'merged_nodes_flows_seq.csv'))
else:
raise ValueError('Columns of merge seq-csvfile is not Multiindex.'
'Did you use unique column-headers across all '
'files?')
return nodes_flows, nodes_flows_seq
def resample_sequence(seq_base_file=None, output_path=None,
samples=None, file_prefix=None, file_suffix='_seq',
header=[0, 1, 2, 3, 4]):
"""
This function can be used for resampling the sequence csv-data file.
The file is read from the specified path: `seq_base_file`, resampled and,
written back to the a specified directory. Note that the sequence files
are expected to have a timeindex column that can be parsed by
pandas, with entries like: '2014-01-01 00:00:00+00:00'
Parameters
----------
seq_base_file : string
File that contains data to be resampled.
output_path : string
Path for resampled seq-files. If no path is specified,
attr:`seq_base_file` path will be used.
samples : list
List of strings with the resampling rate e.g. ['4H', '2H']. See
`pandas.DataFrame.resample` method for more information on format.
file_prefix : string
String that is put as prefix of the file name, i.e. filename is created
by: `file_prefix+s+file_suffix+'.csv'`
file_suffix : string
Sring that is put as suffix (before .csv), default is '_seq'. See also
file_prefix.
header : list
List of integers to specifiy the header lines
"""
if samples is None:
raise ValueError('Missing sample attribute. Please specifiy!')
if output_path is None:
logging.info('No output_path specified' +
', setting output_path to seq_path!')
output_path = seq_base_file
if not os.path.exists(output_path):
os.makedirs(output_path, exist_ok=True)
seq_path, seq_file = os.path.split(seq_base_file)
# read the file and parse the dates from the first column (index 0)
seq = pd.read_csv(os.path.join(seq_path, seq_file),
header=header, parse_dates=[0])
# store the first column name for reuse
first_col = seq.columns[0]
# set the index as datetimeindex from column with parsed dates
seq.index = seq[first_col]
# set timeindex
# convert columns to numeric values, except the datetimecolumn, but!
# which we keep for reuse
for col in seq:
if col == first_col:
seq[col] = -999999
else:
seq[col] = seq[col].astype(float)
for s in samples:
# resample dataframes
seq_sampled = seq.resample(s).mean()
# assign the resampled datetimeindex values to the first columns,
# replacing the -999999
seq_sampled[first_col] = seq_sampled.index
if file_prefix is None:
file_prefix = seq_file.split('seq')[0]
logging.info('Setting filename prefix to: {}'.format(file_prefix))
filename = os.path.join(output_path, file_prefix+s+file_suffix+'.csv')
logging.info('Writing sample file to {0}.'.format(filename))
seq_sampled.to_csv(filename, index=False)
return seq_sampled
# my = SolphScenario(path='my_scenarios', name='reegis_de_3_short')
# my.read_tables()
# my.create_nodes()
| rl-institut/reegis_hp | reegis_hp/de21/scenario_tools.py | Python | gpl-3.0 | 22,415 | 0.000491 |
from __future__ import absolute_import
import posixpath
from rest_framework import serializers
from rest_framework.response import Response
from sentry.api.base import DocSection
from sentry.api.bases.project import ProjectEndpoint, ProjectReleasePermission
from sentry.api.exceptions import ResourceDoesNotExist
from sentry.api.serializers import serialize
from sentry.models import Release, ReleaseFile
from sentry.utils.apidocs import scenario, attach_scenarios
try:
from django.http import (
CompatibleStreamingHttpResponse as StreamingHttpResponse
)
except ImportError:
from django.http import StreamingHttpResponse
@scenario('RetrieveReleaseFile')
def retrieve_file_scenario(runner):
rf = runner.utils.create_release_file(
project=runner.default_project,
release=runner.default_release,
path='/demo/readme.txt',
contents='Hello World!'
)
runner.request(
method='GET',
path='/projects/%s/%s/releases/%s/files/%s/' % (
runner.org.slug, runner.default_project.slug,
runner.default_release.version, rf.id)
)
@scenario('UpdateReleaseFile')
def update_file_scenario(runner):
rf = runner.utils.create_release_file(
project=runner.default_project,
release=runner.default_release,
path='/demo/hello.txt',
contents='Good bye World!'
)
runner.request(
method='PUT',
path='/projects/%s/%s/releases/%s/files/%s/' % (
runner.org.slug, runner.default_project.slug,
runner.default_release.version, rf.id),
data={
'name': '/demo/goodbye.txt'
}
)
@scenario('DeleteReleaseFile')
def delete_file_scenario(runner):
rf = runner.utils.create_release_file(
project=runner.default_project,
release=runner.default_release,
path='/demo/badfile.txt',
contents='Whatever!'
)
runner.request(
method='DELETE',
path='/projects/%s/%s/releases/%s/files/%s/' % (
runner.org.slug, runner.default_project.slug,
runner.default_release.version, rf.id)
)
class ReleaseFileSerializer(serializers.Serializer):
name = serializers.CharField(max_length=200, required=True)
class ProjectReleaseFileDetailsEndpoint(ProjectEndpoint):
doc_section = DocSection.RELEASES
permission_classes = (ProjectReleasePermission,)
def download(self, releasefile):
file = releasefile.file
fp = file.getfile()
response = StreamingHttpResponse(
iter(lambda: fp.read(4096), b''),
content_type=file.headers.get('content-type', 'application/octet-stream'),
)
response['Content-Length'] = file.size
response['Content-Disposition'] = 'attachment; filename="%s"' % posixpath.basename(" ".join(releasefile.name.split()))
return response
@attach_scenarios([retrieve_file_scenario])
def get(self, request, project, version, file_id):
"""
Retrieve a Project Release's File
`````````````````````````````````
Return details on an individual file within a release. This does
not actually return the contents of the file, just the associated
metadata.
:pparam string organization_slug: the slug of the organization the
release belongs to.
:pparam string project_slug: the slug of the project to retrieve the
file of.
:pparam string version: the version identifier of the release.
:pparam string file_id: the ID of the file to retrieve.
:auth: required
"""
try:
release = Release.objects.get(
organization_id=project.organization_id,
projects=project,
version=version,
)
except Release.DoesNotExist:
raise ResourceDoesNotExist
try:
releasefile = ReleaseFile.objects.get(
release=release,
id=file_id,
)
except ReleaseFile.DoesNotExist:
raise ResourceDoesNotExist
download_requested = request.GET.get('download') is not None
if download_requested and (
request.access.has_scope('project:write')):
return self.download(releasefile)
elif download_requested:
return Response(status=403)
return Response(serialize(releasefile, request.user))
@attach_scenarios([update_file_scenario])
def put(self, request, project, version, file_id):
"""
Update a File
`````````````
Update metadata of an existing file. Currently only the name of
the file can be changed.
:pparam string organization_slug: the slug of the organization the
release belongs to.
:pparam string project_slug: the slug of the project to update the
file of.
:pparam string version: the version identifier of the release.
:pparam string file_id: the ID of the file to update.
:param string name: the new name of the file.
:auth: required
"""
try:
release = Release.objects.get(
organization_id=project.organization_id,
projects=project,
version=version,
)
except Release.DoesNotExist:
raise ResourceDoesNotExist
try:
releasefile = ReleaseFile.objects.get(
release=release,
id=file_id,
)
except ReleaseFile.DoesNotExist:
raise ResourceDoesNotExist
serializer = ReleaseFileSerializer(data=request.DATA)
if not serializer.is_valid():
return Response(serializer.errors, status=400)
result = serializer.object
releasefile.update(
name=result['name'],
)
return Response(serialize(releasefile, request.user))
@attach_scenarios([delete_file_scenario])
def delete(self, request, project, version, file_id):
"""
Delete a File
`````````````
Permanently remove a file from a release.
This will also remove the physical file from storage.
:pparam string organization_slug: the slug of the organization the
release belongs to.
:pparam string project_slug: the slug of the project to delete the
file of.
:pparam string version: the version identifier of the release.
:pparam string file_id: the ID of the file to delete.
:auth: required
"""
try:
release = Release.objects.get(
organization_id=project.organization_id,
projects=project,
version=version,
)
except Release.DoesNotExist:
raise ResourceDoesNotExist
try:
releasefile = ReleaseFile.objects.get(
release=release,
id=file_id,
)
except ReleaseFile.DoesNotExist:
raise ResourceDoesNotExist
file = releasefile.file
# TODO(dcramer): this doesnt handle a failure from file.deletefile() to
# the actual deletion of the db row
releasefile.delete()
file.delete()
return Response(status=204)
| JackDanger/sentry | src/sentry/api/endpoints/project_release_file_details.py | Python | bsd-3-clause | 7,531 | 0.000266 |
#!/usr/bin/env python
# ***** BEGIN LICENSE BLOCK *****
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this file,
# You can obtain one at http://mozilla.org/MPL/2.0/.
# ***** END LICENSE BLOCK *****
"""release.py
"""
import os
from mozharness.base.config import parse_config_file
# SignAndroid {{{1
class ReleaseMixin():
release_config = {}
def query_release_config(self):
if self.release_config:
return self.release_config
c = self.config
dirs = self.query_abs_dirs()
if c.get("release_config_file"):
self.info("Getting release config from %s..." % c["release_config_file"])
rc = None
try:
rc = parse_config_file(
os.path.join(dirs['abs_work_dir'],
c["release_config_file"]),
config_dict_name="releaseConfig"
)
except IOError:
self.fatal("Release config file %s not found!" % c["release_config_file"])
except RuntimeError:
self.fatal("Invalid release config file %s!" % c["release_config_file"])
self.release_config['version'] = rc['version']
self.release_config['buildnum'] = rc['buildNumber']
self.release_config['ftp_server'] = rc['stagingServer']
self.release_config['ftp_user'] = c.get('ftp_user', rc['hgUsername'])
self.release_config['ftp_ssh_key'] = c.get('ftp_ssh_key', rc['hgSshKey'])
else:
self.info("No release config file; using default config.")
for key in ('version', 'buildnum',
'ftp_server', 'ftp_user', 'ftp_ssh_key'):
self.release_config[key] = c[key]
self.info("Release config:\n%s" % self.release_config)
return self.release_config
| lissyx/build-mozharness | mozharness/mozilla/release.py | Python | mpl-2.0 | 1,945 | 0.002571 |
#!/usr/bin/env python2.7
from PyQt4 import QtCore, QtGui
class MigrationWizardIntroPage(QtGui.QWizardPage):
def __init__(self):
super(QtGui.QWizardPage, self).__init__()
self.setTitle("Migrating configuration")
label = QtGui.QLabel("This wizard will help you to migrate your configuration. "
"You can still keep using PyBitMessage once you migrate, the changes are backwards compatible.")
label.setWordWrap(True)
layout = QtGui.QVBoxLayout()
layout.addWidget(label)
self.setLayout(layout)
def nextId(self):
return 1
class MigrationWizardAddressesPage(QtGui.QWizardPage):
def __init__(self, addresses):
super(QtGui.QWizardPage, self).__init__()
self.setTitle("Addresses")
label = QtGui.QLabel("Please select addresses that you are already using with mailchuck. ")
label.setWordWrap(True)
layout = QtGui.QVBoxLayout()
layout.addWidget(label)
self.setLayout(layout)
def nextId(self):
return 10
class MigrationWizardGPUPage(QtGui.QWizardPage):
def __init__(self):
super(QtGui.QWizardPage, self).__init__()
self.setTitle("GPU")
label = QtGui.QLabel("Are you using a GPU? ")
label.setWordWrap(True)
layout = QtGui.QVBoxLayout()
layout.addWidget(label)
self.setLayout(layout)
def nextId(self):
return 10
class MigrationWizardConclusionPage(QtGui.QWizardPage):
def __init__(self):
super(QtGui.QWizardPage, self).__init__()
self.setTitle("All done!")
label = QtGui.QLabel("You successfully migrated.")
label.setWordWrap(True)
layout = QtGui.QVBoxLayout()
layout.addWidget(label)
self.setLayout(layout)
class Ui_MigrationWizard(QtGui.QWizard):
def __init__(self, addresses):
super(QtGui.QWizard, self).__init__()
self.pages = {}
page = MigrationWizardIntroPage()
self.setPage(0, page)
self.setStartId(0)
page = MigrationWizardAddressesPage(addresses)
self.setPage(1, page)
page = MigrationWizardGPUPage()
self.setPage(2, page)
page = MigrationWizardConclusionPage()
self.setPage(10, page)
self.setWindowTitle("Migration from PyBitMessage wizard")
self.adjustSize()
self.show() | timothyparez/PyBitmessage | src/bitmessageqt/migrationwizard.py | Python | mit | 2,437 | 0.005334 |
# encoding: utf-8
# module samba.dcerpc.dnsserver
# from /usr/lib/python2.7/dist-packages/samba/dcerpc/dnsserver.so
# by generator 1.135
""" dnsserver DCE/RPC """
# imports
import dcerpc as __dcerpc
import talloc as __talloc
class DNS_RPC_ENUM_ZONES_FILTER(__talloc.Object):
# no doc
def __init__(self, *args, **kwargs): # real signature unknown
pass
@staticmethod # known case of __new__
def __new__(S, *more): # real signature unknown; restored from __doc__
""" T.__new__(S, ...) -> a new object with type S, a subtype of T """
pass
dwFilter = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
dwReserved0 = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
dwRpcStructureVersion = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
pszPartitionFqdn = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
pszQueryString = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
pszReserved = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
| ProfessorX/Config | .PyCharm30/system/python_stubs/-1247972723/samba/dcerpc/dnsserver/DNS_RPC_ENUM_ZONES_FILTER.py | Python | gpl-2.0 | 1,215 | 0.00823 |
import sys
import numpy as np
import pylab
import matplotlib.pyplot as plt
import scipy.integrate
import scipy.optimize
from collections import namedtuple
import geo
import astro_help as ah
import disk_sub as disk
RADIAN=57.29598
C=2.997925e10
MSOL=1.979e33
G=6.670e-8
YR=3.1556925e7
EPSILON=1e-6
PI=3.1416
STEFAN_BOLTZMANN=5.669e-5
def tdisk (m, mdot, r):
t = 3. * G / (8. * PI * STEFAN_BOLTZMANN) * m * mdot / (r * r * r)
t = pow (t, 0.25)
return (t)
def teff (t, x):
q = (1.e0 - (x ** -0.5e0)) / (x * x * x);
q = t * (q ** 0.25e0);
return (q)
def spec_disk (f1,f2,m,mdot,rmin,rmax):
tref=tdisk(m, mdot, rmin)
nfreq=(f2/f1)*100
freq=np.linspace(f1,f2,nfreq)
spec=np.empty(nfreq)
dfreq=freq[1]-freq[0]
rtemp=np.logspace(np.log10(rmin),np.log10(rmax),num=100)
rdisk=[]
for j in range(len(rtemp)-1):
rdisk.append((rtemp[j]+rtemp[j+1])/2.0)
r=rdisk[j]/rmin
area=PI*(rtemp[j+1]*rtemp[j+1]-rtemp[j]*rtemp[j])
t=(disk.teff(tref,r))
for i in range(len(freq)):
spec[i]=spec[i]+(ah.planck_nu(t,freq[i])*area*PI*2)
return (freq,spec)
def spec_disk1 (f1,f2,m,mdot,rmin,rmax):
tref=tdisk(m, mdot, rmin)
nfreq=1000
freq=np.logspace(np.log10(f1),np.log10(f2),nfreq)
spec=np.empty(nfreq)
dfreq=freq[1]-freq[0]
rtemp=np.logspace(np.log10(rmin),np.log10(rmax),num=100)
rdisk=[]
for j in range(len(rtemp)-1):
rdisk.append((rtemp[j]+rtemp[j+1])/2.0)
r=rdisk[j]/rmin
area=PI*(rtemp[j+1]*rtemp[j+1]-rtemp[j]*rtemp[j])
t=(disk.teff(tref,r))
for i in range(len(freq)-1):
spec[i]=spec[i]+(ah.planck_nu(t,(freq[i+1]+freq[i])/2.0)*area*PI*2*(freq[i+1]-freq[i]))
return (freq,spec)
def lnu_disk (f,m,mdot,rmin,rmax):
tref=tdisk(m, mdot, rmin)
rtemp=np.logspace(np.log10(rmin),np.log10(rmax),num=100)
rdisk=[]
lnu=0.0
for j in range(len(rtemp)-1):
rdisk.append((rtemp[j]+rtemp[j+1])/2.0)
r=rdisk[j]/rmin
area=PI*(rtemp[j+1]*rtemp[j+1]-rtemp[j]*rtemp[j])
t=(disk.teff(tref,r))
lnu=lnu+(ah.planck_nu(t,f)*area*PI*2.0)
return (lnu)
def llamb_disk (lamb,m,mdot,rmin,rmax):
tref=tdisk(m, mdot, rmin)
rtemp=np.logspace(np.log10(rmin),np.log10(rmax),num=100)
rdisk=[]
llamb=0.0
for j in range(len(rtemp)-1):
rdisk.append((rtemp[j]+rtemp[j+1])/2.0)
r=rdisk[j]/rmin
area=PI*(rtemp[j+1]*rtemp[j+1]-rtemp[j]*rtemp[j])
t=(disk.teff(tref,r))
llamb=llamb+(ah.planck_lamb(t,lamb)*area*PI*2.0)
return (llamb)
def spec_disk2 (f1,f2,m,mdot,rmin,rmax):
tref=tdisk(m, mdot, rmin)
nfreq=10
f1a=10**float(int(np.log10(f1)))
f2a=10**float(int(np.log10(f2))+1)
nrange=int(np.log10((f2a/f1a)))
freq=[]
dfreq=[]
ftemp=f1a
df=f1a/nfreq
for i in range(nrange):
for j in range(nfreq*9):
ftemp=ftemp+df
if ftemp > f2:
break
if ftemp >= f1:
freq.append(ftemp)
df=df*10.0
#print freq[0],freq[len(freq)-1]
spec=np.zeros(len(freq))
rtemp=np.logspace(np.log10(rmin),np.log10(rmax),num=100)
rdisk=[]
for j in range(len(rtemp)-1):
rdisk.append((rtemp[j]+rtemp[j+1])/2.0)
r=rdisk[j]/rmin
area=PI*(rtemp[j+1]*rtemp[j+1]-rtemp[j]*rtemp[j])
t=(disk.teff(tref,r))
for i in range(len(freq)-1):
spec[i]=spec[i]+(ah.planck_nu(t,freq[i])*area*PI*2)
return (freq,spec)
| jhmatthews/cobra | source/disk_sub.py | Python | gpl-2.0 | 3,164 | 0.072377 |
#!/usr/bin/env python
# coding=utf-8
# Contributor:
# Phus Lu <[email protected]>
__version__ = '3.1.1'
__password__ = ''
__hostsdeny__ = () # __hostsdeny__ = ('.youtube.com', '.youku.com')
import gevent.monkey
gevent.monkey.patch_all(subprocess=True)
import sys
import errno
import time
import itertools
import logging
import string
import base64
import urlparse
import httplib
import socket
import ssl
import select
TIMEOUT = 20
def message_html(title, banner, detail=''):
MESSAGE_TEMPLATE = '''
<html><head>
<meta http-equiv="content-type" content="text/html;charset=utf-8">
<title>$title</title>
<style><!--
body {font-family: arial,sans-serif}
div.nav {margin-top: 1ex}
div.nav A {font-size: 10pt; font-family: arial,sans-serif}
span.nav {font-size: 10pt; font-family: arial,sans-serif; font-weight: bold}
div.nav A,span.big {font-size: 12pt; color: #0000cc}
div.nav A {font-size: 10pt; color: black}
A.l:link {color: #6f6f6f}
A.u:link {color: green}
//--></style>
</head>
<body text=#000000 bgcolor=#ffffff>
<table border=0 cellpadding=2 cellspacing=0 width=100%>
<tr><td bgcolor=#3366cc><font face=arial,sans-serif color=#ffffff><b>Message</b></td></tr>
<tr><td> </td></tr></table>
<blockquote>
<H1>$banner</H1>
$detail
<p>
</blockquote>
<table width=100% cellpadding=0 cellspacing=0><tr><td bgcolor=#3366cc><img alt="" width=1 height=4></td></tr></table>
</body></html>
'''
return string.Template(MESSAGE_TEMPLATE).substitute(title=title, banner=banner, detail=detail)
class XORCipher(object):
"""XOR Cipher Class"""
def __init__(self, key):
self.__key_gen = itertools.cycle(key).next
def encrypt(self, data):
return ''.join(chr(ord(x) ^ ord(self.__key_gen())) for x in data)
class XORFileObject(object):
"""fileobj for xor"""
def __init__(self, stream, key):
self.__stream = stream
self.__cipher = XORCipher(key)
def __getattr__(self, attr):
if attr not in ('__stream', '__key_gen'):
return getattr(self.__stream, attr)
def read(self, size=-1):
return self.__cipher.encrypt(self.__stream.read(size))
def forward_socket(local, remote, timeout=60, tick=2, bufsize=8192, maxping=None, maxpong=None):
try:
timecount = timeout
while 1:
timecount -= tick
if timecount <= 0:
break
(ins, _, errors) = select.select([local, remote], [], [local, remote], tick)
if errors:
break
if ins:
for sock in ins:
data = sock.recv(bufsize)
if data:
if sock is remote:
local.sendall(data)
timecount = maxpong or timeout
else:
remote.sendall(data)
timecount = maxping or timeout
else:
return
except socket.error as e:
if e.args[0] not in ('timed out', errno.ECONNABORTED, errno.ECONNRESET, errno.EBADF, errno.EPIPE, errno.ENOTCONN, errno.ETIMEDOUT):
raise
finally:
if local:
local.close()
if remote:
remote.close()
def application(environ, start_response):
if environ['REQUEST_METHOD'] == 'GET':
start_response('302 Found', [('Location', 'https://www.google.com')])
raise StopIteration
query_string = environ['QUERY_STRING']
kwargs = dict(urlparse.parse_qsl(query_string))
host = kwargs.pop('host')
port = int(kwargs.pop('port'))
timeout = int(kwargs.get('timeout') or TIMEOUT)
logging.info('%s "%s %s %s" - -', environ['REMOTE_ADDR'], host, port, 'HTTP/1.1')
if __password__ and __password__ != kwargs.get('password'):
random_host = 'g%d%s' % (int(time.time()*100), environ['HTTP_HOST'])
conn = httplib.HTTPConnection(random_host, timeout=timeout)
conn.request('GET', '/')
response = conn.getresponse(True)
status_line = '%s %s' % (response.status, httplib.responses.get(response.status, 'OK'))
start_response(status_line, response.getheaders())
yield response.read()
raise StopIteration
if __hostsdeny__ and host.endswith(__hostsdeny__):
start_response('403 Forbidden', [('Content-Type', 'text/html')])
yield message_html('403 Forbidden Host', 'Hosts Deny(%s)' % host, detail='host=%r' % host)
raise StopIteration
wsgi_input = environ['wsgi.input']
remote = socket.create_connection((host, port), timeout=timeout)
if kwargs.get('ssl'):
remote = ssl.wrap_socket(remote)
while True:
data = wsgi_input.read(8192)
if not data:
break
remote.send(data)
start_response('200 OK', [])
forward_socket(wsgi_input.socket, remote)
yield 'out'
if __name__ == '__main__':
import gevent.wsgi
logging.basicConfig(level=logging.INFO, format='%(levelname)s - - %(asctime)s %(message)s', datefmt='[%b %d %H:%M:%S]')
server = gevent.wsgi.WSGIServer(('', int(sys.argv[1])), application)
logging.info('local paas_application serving at %s:%s', server.address[0], server.address[1])
server.serve_forever()
| JerryXia/fastgoagent | goagent/server/paas/wsgi.py | Python | mit | 5,380 | 0.004833 |
"""Helpers for components that manage entities."""
import asyncio
from datetime import timedelta
from homeassistant import config as conf_util
from homeassistant.setup import async_prepare_setup_platform
from homeassistant.const import (
ATTR_ENTITY_ID, CONF_SCAN_INTERVAL, CONF_ENTITY_NAMESPACE,
DEVICE_DEFAULT_NAME)
from homeassistant.core import callback, valid_entity_id
from homeassistant.exceptions import HomeAssistantError, PlatformNotReady
from homeassistant.loader import get_component
from homeassistant.helpers import config_per_platform, discovery
from homeassistant.helpers.entity import async_generate_entity_id
from homeassistant.helpers.event import (
async_track_time_interval, async_track_point_in_time)
from homeassistant.helpers.service import extract_entity_ids
from homeassistant.util import slugify
from homeassistant.util.async import (
run_callback_threadsafe, run_coroutine_threadsafe)
import homeassistant.util.dt as dt_util
DEFAULT_SCAN_INTERVAL = timedelta(seconds=15)
SLOW_SETUP_WARNING = 10
SLOW_SETUP_MAX_WAIT = 60
PLATFORM_NOT_READY_RETRIES = 10
class EntityComponent(object):
"""Helper class that will help a component manage its entities."""
def __init__(self, logger, domain, hass,
scan_interval=DEFAULT_SCAN_INTERVAL, group_name=None):
"""Initialize an entity component."""
self.logger = logger
self.hass = hass
self.domain = domain
self.entity_id_format = domain + '.{}'
self.scan_interval = scan_interval
self.group_name = group_name
self.entities = {}
self.config = None
self._platforms = {
'core': EntityPlatform(self, domain, self.scan_interval, 0, None),
}
self.async_add_entities = self._platforms['core'].async_add_entities
self.add_entities = self._platforms['core'].add_entities
def setup(self, config):
"""Set up a full entity component.
This doesn't block the executor to protect from deadlocks.
"""
self.hass.add_job(self.async_setup(config))
@asyncio.coroutine
def async_setup(self, config):
"""Set up a full entity component.
Loads the platforms from the config and will listen for supported
discovered platforms.
This method must be run in the event loop.
"""
self.config = config
# Look in config for Domain, Domain 2, Domain 3 etc and load them
tasks = []
for p_type, p_config in config_per_platform(config, self.domain):
tasks.append(self._async_setup_platform(p_type, p_config))
if tasks:
yield from asyncio.wait(tasks, loop=self.hass.loop)
# Generic discovery listener for loading platform dynamically
# Refer to: homeassistant.components.discovery.load_platform()
@callback
def component_platform_discovered(platform, info):
"""Handle the loading of a platform."""
self.hass.async_add_job(
self._async_setup_platform(platform, {}, info))
discovery.async_listen_platform(
self.hass, self.domain, component_platform_discovered)
def extract_from_service(self, service, expand_group=True):
"""Extract all known entities from a service call.
Will return all entities if no entities specified in call.
Will return an empty list if entities specified but unknown.
"""
return run_callback_threadsafe(
self.hass.loop, self.async_extract_from_service, service,
expand_group
).result()
@callback
def async_extract_from_service(self, service, expand_group=True):
"""Extract all known and available entities from a service call.
Will return all entities if no entities specified in call.
Will return an empty list if entities specified but unknown.
This method must be run in the event loop.
"""
if ATTR_ENTITY_ID not in service.data:
return [entity for entity in self.entities.values()
if entity.available]
return [self.entities[entity_id] for entity_id
in extract_entity_ids(self.hass, service, expand_group)
if entity_id in self.entities and
self.entities[entity_id].available]
@asyncio.coroutine
def _async_setup_platform(self, platform_type, platform_config,
discovery_info=None, tries=0):
"""Set up a platform for this component.
This method must be run in the event loop.
"""
platform = yield from async_prepare_setup_platform(
self.hass, self.config, self.domain, platform_type)
if platform is None:
return
# Config > Platform > Component
scan_interval = (
platform_config.get(CONF_SCAN_INTERVAL) or
getattr(platform, 'SCAN_INTERVAL', None) or self.scan_interval)
parallel_updates = getattr(
platform, 'PARALLEL_UPDATES',
int(not hasattr(platform, 'async_setup_platform')))
entity_namespace = platform_config.get(CONF_ENTITY_NAMESPACE)
key = (platform_type, scan_interval, entity_namespace)
if key not in self._platforms:
entity_platform = self._platforms[key] = EntityPlatform(
self, platform_type, scan_interval, parallel_updates,
entity_namespace)
else:
entity_platform = self._platforms[key]
self.logger.info("Setting up %s.%s", self.domain, platform_type)
warn_task = self.hass.loop.call_later(
SLOW_SETUP_WARNING, self.logger.warning,
"Setup of platform %s is taking over %s seconds.", platform_type,
SLOW_SETUP_WARNING)
try:
if getattr(platform, 'async_setup_platform', None):
task = platform.async_setup_platform(
self.hass, platform_config,
entity_platform.async_schedule_add_entities, discovery_info
)
else:
# This should not be replaced with hass.async_add_job because
# we don't want to track this task in case it blocks startup.
task = self.hass.loop.run_in_executor(
None, platform.setup_platform, self.hass, platform_config,
entity_platform.schedule_add_entities, discovery_info
)
yield from asyncio.wait_for(
asyncio.shield(task, loop=self.hass.loop),
SLOW_SETUP_MAX_WAIT, loop=self.hass.loop)
yield from entity_platform.async_block_entities_done()
self.hass.config.components.add(
'{}.{}'.format(self.domain, platform_type))
except PlatformNotReady:
tries += 1
wait_time = min(tries, 6) * 30
self.logger.warning(
'Platform %s not ready yet. Retrying in %d seconds.',
platform_type, wait_time)
async_track_point_in_time(
self.hass, self._async_setup_platform(
platform_type, platform_config, discovery_info, tries),
dt_util.utcnow() + timedelta(seconds=wait_time))
except asyncio.TimeoutError:
self.logger.error(
"Setup of platform %s is taking longer than %s seconds."
" Startup will proceed without waiting any longer.",
platform_type, SLOW_SETUP_MAX_WAIT)
except Exception: # pylint: disable=broad-except
self.logger.exception(
"Error while setting up platform %s", platform_type)
finally:
warn_task.cancel()
def add_entity(self, entity, platform=None, update_before_add=False):
"""Add entity to component."""
return run_coroutine_threadsafe(
self.async_add_entity(entity, platform, update_before_add),
self.hass.loop
).result()
@asyncio.coroutine
def async_add_entity(self, entity, platform=None, update_before_add=False):
"""Add entity to component.
This method must be run in the event loop.
"""
if entity is None or entity in self.entities.values():
return False
entity.hass = self.hass
# Update properties before we generate the entity_id
if update_before_add:
try:
yield from entity.async_device_update(warning=False)
except Exception: # pylint: disable=broad-except
self.logger.exception("Error on device update!")
return False
# Write entity_id to entity
if getattr(entity, 'entity_id', None) is None:
object_id = entity.name or DEVICE_DEFAULT_NAME
if platform is not None and platform.entity_namespace is not None:
object_id = '{} {}'.format(platform.entity_namespace,
object_id)
entity.entity_id = async_generate_entity_id(
self.entity_id_format, object_id,
self.entities.keys())
# Make sure it is valid in case an entity set the value themselves
if entity.entity_id in self.entities:
raise HomeAssistantError(
'Entity id already exists: {}'.format(entity.entity_id))
elif not valid_entity_id(entity.entity_id):
raise HomeAssistantError(
'Invalid entity id: {}'.format(entity.entity_id))
self.entities[entity.entity_id] = entity
if hasattr(entity, 'async_added_to_hass'):
yield from entity.async_added_to_hass()
yield from entity.async_update_ha_state()
return True
def update_group(self):
"""Set up and/or update component group."""
run_callback_threadsafe(
self.hass.loop, self.async_update_group).result()
@callback
def async_update_group(self):
"""Set up and/or update component group.
This method must be run in the event loop.
"""
if self.group_name is not None:
ids = sorted(self.entities,
key=lambda x: self.entities[x].name or x)
group = get_component('group')
group.async_set_group(
self.hass, slugify(self.group_name), name=self.group_name,
visible=False, entity_ids=ids
)
def reset(self):
"""Remove entities and reset the entity component to initial values."""
run_coroutine_threadsafe(self.async_reset(), self.hass.loop).result()
@asyncio.coroutine
def async_reset(self):
"""Remove entities and reset the entity component to initial values.
This method must be run in the event loop.
"""
tasks = [platform.async_reset() for platform
in self._platforms.values()]
if tasks:
yield from asyncio.wait(tasks, loop=self.hass.loop)
self._platforms = {
'core': self._platforms['core']
}
self.entities = {}
self.config = None
if self.group_name is not None:
group = get_component('group')
group.async_remove(self.hass, slugify(self.group_name))
def prepare_reload(self):
"""Prepare reloading this entity component."""
return run_coroutine_threadsafe(
self.async_prepare_reload(), loop=self.hass.loop).result()
@asyncio.coroutine
def async_prepare_reload(self):
"""Prepare reloading this entity component.
This method must be run in the event loop.
"""
try:
conf = yield from \
conf_util.async_hass_config_yaml(self.hass)
except HomeAssistantError as err:
self.logger.error(err)
return None
conf = conf_util.async_process_component_config(
self.hass, conf, self.domain)
if conf is None:
return None
yield from self.async_reset()
return conf
class EntityPlatform(object):
"""Keep track of entities for a single platform and stay in loop."""
def __init__(self, component, platform, scan_interval, parallel_updates,
entity_namespace):
"""Initialize the entity platform."""
self.component = component
self.platform = platform
self.scan_interval = scan_interval
self.parallel_updates = None
self.entity_namespace = entity_namespace
self.platform_entities = []
self._tasks = []
self._async_unsub_polling = None
self._process_updates = asyncio.Lock(loop=component.hass.loop)
if parallel_updates:
self.parallel_updates = asyncio.Semaphore(
parallel_updates, loop=component.hass.loop)
@asyncio.coroutine
def async_block_entities_done(self):
"""Wait until all entities add to hass."""
if self._tasks:
pending = [task for task in self._tasks if not task.done()]
self._tasks.clear()
if pending:
yield from asyncio.wait(pending, loop=self.component.hass.loop)
def schedule_add_entities(self, new_entities, update_before_add=False):
"""Add entities for a single platform."""
run_callback_threadsafe(
self.component.hass.loop,
self.async_schedule_add_entities, list(new_entities),
update_before_add
).result()
@callback
def async_schedule_add_entities(self, new_entities,
update_before_add=False):
"""Add entities for a single platform async."""
self._tasks.append(self.component.hass.async_add_job(
self.async_add_entities(
new_entities, update_before_add=update_before_add)
))
def add_entities(self, new_entities, update_before_add=False):
"""Add entities for a single platform."""
# That avoid deadlocks
if update_before_add:
self.component.logger.warning(
"Call 'add_entities' with update_before_add=True "
"only inside tests or you can run into a deadlock!")
run_coroutine_threadsafe(
self.async_add_entities(list(new_entities), update_before_add),
self.component.hass.loop).result()
@asyncio.coroutine
def async_add_entities(self, new_entities, update_before_add=False):
"""Add entities for a single platform async.
This method must be run in the event loop.
"""
# handle empty list from component/platform
if not new_entities:
return
@asyncio.coroutine
def async_process_entity(new_entity):
"""Add entities to StateMachine."""
new_entity.parallel_updates = self.parallel_updates
ret = yield from self.component.async_add_entity(
new_entity, self, update_before_add=update_before_add
)
if ret:
self.platform_entities.append(new_entity)
tasks = [async_process_entity(entity) for entity in new_entities]
yield from asyncio.wait(tasks, loop=self.component.hass.loop)
self.component.async_update_group()
if self._async_unsub_polling is not None or \
not any(entity.should_poll for entity
in self.platform_entities):
return
self._async_unsub_polling = async_track_time_interval(
self.component.hass, self._update_entity_states, self.scan_interval
)
@asyncio.coroutine
def async_reset(self):
"""Remove all entities and reset data.
This method must be run in the event loop.
"""
if not self.platform_entities:
return
tasks = [entity.async_remove() for entity in self.platform_entities]
yield from asyncio.wait(tasks, loop=self.component.hass.loop)
if self._async_unsub_polling is not None:
self._async_unsub_polling()
self._async_unsub_polling = None
@asyncio.coroutine
def _update_entity_states(self, now):
"""Update the states of all the polling entities.
To protect from flooding the executor, we will update async entities
in parallel and other entities sequential.
This method must be run in the event loop.
"""
if self._process_updates.locked():
self.component.logger.warning(
"Updating %s %s took longer than the scheduled update "
"interval %s", self.platform, self.component.domain,
self.scan_interval)
return
with (yield from self._process_updates):
tasks = []
for entity in self.platform_entities:
if not entity.should_poll:
continue
tasks.append(entity.async_update_ha_state(True))
if tasks:
yield from asyncio.wait(tasks, loop=self.component.hass.loop)
| ewandor/home-assistant | homeassistant/helpers/entity_component.py | Python | apache-2.0 | 17,208 | 0.000058 |
# encoding: utf-8
from collections import namedtuple
import inspect
import keyword
import sys
try:
import unittest2 as unittest
except ImportError:
import unittest
try:
import jedi
has_jedi = True
except ImportError:
has_jedi = False
from bpython import autocomplete
from bpython._py3compat import py3
from bpython.test import mock
is_py34 = sys.version_info[:2] >= (3, 4)
if is_py34:
glob_function = 'glob.iglob'
else:
glob_function = 'glob.glob'
class TestSafeEval(unittest.TestCase):
def test_catches_syntax_error(self):
with self.assertRaises(autocomplete.EvaluationError):
autocomplete.safe_eval('1re', {})
class TestFormatters(unittest.TestCase):
def test_filename(self):
completer = autocomplete.FilenameCompletion()
last_part_of_filename = completer.format
self.assertEqual(last_part_of_filename('abc'), 'abc')
self.assertEqual(last_part_of_filename('abc/'), 'abc/')
self.assertEqual(last_part_of_filename('abc/efg'), 'efg')
self.assertEqual(last_part_of_filename('abc/efg/'), 'efg/')
self.assertEqual(last_part_of_filename('/abc'), 'abc')
self.assertEqual(last_part_of_filename('ab.c/e.f.g/'), 'e.f.g/')
def test_attribute(self):
self.assertEqual(autocomplete.after_last_dot('abc.edf'), 'edf')
def completer(matches):
mock_completer = autocomplete.BaseCompletionType()
mock_completer.matches = mock.Mock(return_value=matches)
return mock_completer
class TestGetCompleter(unittest.TestCase):
def test_no_completers(self):
self.assertTupleEqual(autocomplete.get_completer([], 0, ''),
([], None))
def test_one_completer_without_matches_returns_empty_list_and_none(self):
a = completer([])
self.assertTupleEqual(autocomplete.get_completer([a], 0, ''),
([], None))
def test_one_completer_returns_matches_and_completer(self):
a = completer(['a'])
self.assertTupleEqual(autocomplete.get_completer([a], 0, ''),
(['a'], a))
def test_two_completers_with_matches_returns_first_matches(self):
a = completer(['a'])
b = completer(['b'])
self.assertEqual(autocomplete.get_completer([a, b], 0, ''), (['a'], a))
def test_first_non_none_completer_matches_are_returned(self):
a = completer([])
b = completer(['a'])
self.assertEqual(autocomplete.get_completer([a, b], 0, ''), ([], None))
def test_only_completer_returns_None(self):
a = completer(None)
self.assertEqual(autocomplete.get_completer([a], 0, ''), ([], None))
def test_first_completer_returns_None(self):
a = completer(None)
b = completer(['a'])
self.assertEqual(autocomplete.get_completer([a, b], 0, ''), (['a'], b))
class TestCumulativeCompleter(unittest.TestCase):
def completer(self, matches, ):
mock_completer = autocomplete.BaseCompletionType()
mock_completer.matches = mock.Mock(return_value=matches)
return mock_completer
def test_no_completers_fails(self):
with self.assertRaises(ValueError):
autocomplete.CumulativeCompleter([])
def test_one_empty_completer_returns_empty(self):
a = self.completer([])
cumulative = autocomplete.CumulativeCompleter([a])
self.assertEqual(cumulative.matches(3, 'abc'), set())
def test_one_none_completer_returns_none(self):
a = self.completer(None)
cumulative = autocomplete.CumulativeCompleter([a])
self.assertEqual(cumulative.matches(3, 'abc'), None)
def test_two_completers_get_both(self):
a = self.completer(['a'])
b = self.completer(['b'])
cumulative = autocomplete.CumulativeCompleter([a, b])
self.assertEqual(cumulative.matches(3, 'abc'), set(['a', 'b']))
class TestFilenameCompletion(unittest.TestCase):
def setUp(self):
self.completer = autocomplete.FilenameCompletion()
def test_locate_fails_when_not_in_string(self):
self.assertEqual(self.completer.locate(4, "abcd"), None)
def test_locate_succeeds_when_in_string(self):
self.assertEqual(self.completer.locate(4, "a'bc'd"), (2, 4, 'bc'))
def test_issue_491(self):
self.assertNotEqual(self.completer.matches(9, '"a[a.l-1]'), None)
@mock.patch(glob_function, new=lambda text: [])
def test_match_returns_none_if_not_in_string(self):
self.assertEqual(self.completer.matches(2, 'abcd'), None)
@mock.patch(glob_function, new=lambda text: [])
def test_match_returns_empty_list_when_no_files(self):
self.assertEqual(self.completer.matches(2, '"a'), set())
@mock.patch(glob_function, new=lambda text: ['abcde', 'aaaaa'])
@mock.patch('os.path.expanduser', new=lambda text: text)
@mock.patch('os.path.isdir', new=lambda text: False)
@mock.patch('os.path.sep', new='/')
def test_match_returns_files_when_files_exist(self):
self.assertEqual(sorted(self.completer.matches(2, '"x')),
['aaaaa', 'abcde'])
@mock.patch(glob_function, new=lambda text: ['abcde', 'aaaaa'])
@mock.patch('os.path.expanduser', new=lambda text: text)
@mock.patch('os.path.isdir', new=lambda text: True)
@mock.patch('os.path.sep', new='/')
def test_match_returns_dirs_when_dirs_exist(self):
self.assertEqual(sorted(self.completer.matches(2, '"x')),
['aaaaa/', 'abcde/'])
@mock.patch(glob_function,
new=lambda text: ['/expand/ed/abcde', '/expand/ed/aaaaa'])
@mock.patch('os.path.expanduser',
new=lambda text: text.replace('~', '/expand/ed'))
@mock.patch('os.path.isdir', new=lambda text: False)
@mock.patch('os.path.sep', new='/')
def test_tilde_stays_pretty(self):
self.assertEqual(sorted(self.completer.matches(4, '"~/a')),
['~/aaaaa', '~/abcde'])
@mock.patch('os.path.sep', new='/')
def test_formatting_takes_just_last_part(self):
self.assertEqual(self.completer.format('/hello/there/'), 'there/')
self.assertEqual(self.completer.format('/hello/there'), 'there')
class MockNumPy(object):
"""This is a mock numpy object that raises an error when there is an atempt
to convert it to a boolean."""
def __nonzero__(self):
raise ValueError("The truth value of an array with more than one "
"element is ambiguous. Use a.any() or a.all()")
class TestDictKeyCompletion(unittest.TestCase):
def test_set_of_keys_returned_when_matches_found(self):
com = autocomplete.DictKeyCompletion()
local = {'d': {"ab": 1, "cd": 2}}
self.assertSetEqual(com.matches(2, "d[", locals_=local),
set(["'ab']", "'cd']"]))
def test_none_returned_when_eval_error(self):
com = autocomplete.DictKeyCompletion()
local = {'e': {"ab": 1, "cd": 2}}
self.assertEqual(com.matches(2, "d[", locals_=local), None)
def test_none_returned_when_not_dict_type(self):
com = autocomplete.DictKeyCompletion()
local = {'l': ["ab", "cd"]}
self.assertEqual(com.matches(2, "l[", locals_=local), None)
def test_none_returned_when_no_matches_left(self):
com = autocomplete.DictKeyCompletion()
local = {'d': {"ab": 1, "cd": 2}}
self.assertEqual(com.matches(3, "d[r", locals_=local), None)
def test_obj_that_does_not_allow_conversion_to_bool(self):
com = autocomplete.DictKeyCompletion()
local = {'mNumPy': MockNumPy()}
self.assertEqual(com.matches(7, "mNumPy[", locals_=local), None)
class Foo(object):
a = 10
def __init__(self):
self.b = 20
def method(self, x):
pass
class OldStyleFoo:
a = 10
def __init__(self):
self.b = 20
def method(self, x):
pass
skip_old_style = unittest.skipIf(py3,
'In Python 3 there are no old style classes')
class Properties(Foo):
@property
def asserts_when_called(self):
raise AssertionError("getter method called")
class Slots(object):
__slots__ = ['a', 'b']
class TestAttrCompletion(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.com = autocomplete.AttrCompletion()
def test_att_matches_found_on_instance(self):
self.assertSetEqual(self.com.matches(2, 'a.', locals_={'a': Foo()}),
set(['a.method', 'a.a', 'a.b']))
@skip_old_style
def test_att_matches_found_on_old_style_instance(self):
self.assertSetEqual(self.com.matches(2, 'a.',
locals_={'a': OldStyleFoo()}),
set(['a.method', 'a.a', 'a.b']))
self.assertIn(u'a.__dict__',
self.com.matches(4, 'a.__',
locals_={'a': OldStyleFoo()}))
@skip_old_style
def test_att_matches_found_on_old_style_class_object(self):
self.assertIn(u'A.__dict__',
self.com.matches(4, 'A.__', locals_={'A': OldStyleFoo}))
@skip_old_style
def test_issue536(self):
class OldStyleWithBrokenGetAttr:
def __getattr__(self, attr):
raise Exception()
locals_ = {'a': OldStyleWithBrokenGetAttr()}
self.assertIn(u'a.__module__',
self.com.matches(4, 'a.__', locals_=locals_))
def test_descriptor_attributes_not_run(self):
com = autocomplete.AttrCompletion()
self.assertSetEqual(com.matches(2, 'a.', locals_={'a': Properties()}),
set(['a.b', 'a.a', 'a.method',
'a.asserts_when_called']))
def test_slots_not_crash(self):
com = autocomplete.AttrCompletion()
self.assertSetEqual(com.matches(2, 'A.', locals_={'A': Slots}),
set(['A.b', 'A.a', 'A.mro']))
class TestExpressionAttributeCompletion(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.com = autocomplete.ExpressionAttributeCompletion()
def test_att_matches_found_on_instance(self):
self.assertSetEqual(self.com.matches(5, 'a[0].',
locals_={'a': [Foo()]}),
set(['method', 'a', 'b']))
@skip_old_style
def test_att_matches_found_on_old_style_instance(self):
self.assertSetEqual(self.com.matches(5, 'a[0].',
locals_={'a': [OldStyleFoo()]}),
set(['method', 'a', 'b']))
def test_other_getitem_methods_not_called(self):
class FakeList(object):
def __getitem__(inner_self, i):
self.fail("possibly side-effecting __getitem_ method called")
self.com.matches(5, 'a[0].', locals_={'a': FakeList()})
def test_tuples_complete(self):
self.assertSetEqual(self.com.matches(5, 'a[0].',
locals_={'a': (Foo(),)}),
set(['method', 'a', 'b']))
@unittest.skip('TODO, subclasses do not complete yet')
def test_list_subclasses_complete(self):
class ListSubclass(list):
pass
self.assertSetEqual(self.com.matches(5, 'a[0].',
locals_={'a': ListSubclass([Foo()])}),
set(['method', 'a', 'b']))
def test_getitem_not_called_in_list_subclasses_overriding_getitem(self):
class FakeList(list):
def __getitem__(inner_self, i):
self.fail("possibly side-effecting __getitem_ method called")
self.com.matches(5, 'a[0].', locals_={'a': FakeList()})
def test_literals_complete(self):
self.assertSetEqual(self.com.matches(10, '[a][0][0].',
locals_={'a': (Foo(),)}),
set(['method', 'a', 'b']))
def test_dictionaries_complete(self):
self.assertSetEqual(self.com.matches(7, 'a["b"].',
locals_={'a': {'b': Foo()}}),
set(['method', 'a', 'b']))
class TestMagicMethodCompletion(unittest.TestCase):
def test_magic_methods_complete_after_double_underscores(self):
com = autocomplete.MagicMethodCompletion()
block = "class Something(object)\n def __"
self.assertSetEqual(com.matches(10, ' def __', current_block=block),
set(autocomplete.MAGIC_METHODS))
Comp = namedtuple('Completion', ['name', 'complete'])
@unittest.skipUnless(has_jedi, "jedi required")
class TestMultilineJediCompletion(unittest.TestCase):
def test_returns_none_with_single_line(self):
com = autocomplete.MultilineJediCompletion()
self.assertEqual(com.matches(2, 'Va', current_block='Va', history=[]),
None)
def test_returns_non_with_blank_second_line(self):
com = autocomplete.MultilineJediCompletion()
self.assertEqual(com.matches(0, '', current_block='class Foo():\n',
history=['class Foo():']), None)
def matches_from_completions(self, cursor, line, block, history,
completions):
with mock.patch('bpython.autocomplete.jedi.Script') as Script:
script = Script.return_value
script.completions.return_value = completions
com = autocomplete.MultilineJediCompletion()
return com.matches(cursor, line, current_block=block,
history=history)
def test_completions_starting_with_different_letters(self):
matches = self.matches_from_completions(
2, ' a', 'class Foo:\n a', ['adsf'],
[Comp('Abc', 'bc'), Comp('Cbc', 'bc')])
self.assertEqual(matches, None)
def test_completions_starting_with_different_cases(self):
matches = self.matches_from_completions(
2, ' a', 'class Foo:\n a', ['adsf'],
[Comp('Abc', 'bc'), Comp('ade', 'de')])
self.assertSetEqual(matches, set(['ade']))
@unittest.skipUnless(is_py34, 'asyncio required')
def test_issue_544(self):
com = autocomplete.MultilineJediCompletion()
code = '@asyncio.coroutine\ndef'
history = ('import asyncio', '@asyncio.coroutin')
com.matches(3, 'def', current_block=code, history=history)
class TestGlobalCompletion(unittest.TestCase):
def setUp(self):
self.com = autocomplete.GlobalCompletion()
def test_function(self):
def function():
pass
self.assertEqual(self.com.matches(8, 'function',
locals_={'function': function}),
set(('function(', )))
def test_completions_are_unicode(self):
for m in self.com.matches(1, 'a', locals_={'abc': 10}):
self.assertIsInstance(m, type(u''))
@unittest.skipIf(py3, "in Python 3 invalid identifiers are passed through")
def test_ignores_nonascii_encodable(self):
self.assertEqual(self.com.matches(3, 'abc', locals_={'abcß': 10}),
None)
def test_mock_kwlist(self):
with mock.patch.object(keyword, 'kwlist', new=['abcd']):
self.assertEqual(self.com.matches(3, 'abc', locals_={}), None)
def test_mock_kwlist_non_ascii(self):
with mock.patch.object(keyword, 'kwlist', new=['abcß']):
self.assertEqual(self.com.matches(3, 'abc', locals_={}), None)
class TestParameterNameCompletion(unittest.TestCase):
def test_set_of_params_returns_when_matches_found(self):
def func(apple, apricot, banana, carrot):
pass
if py3:
argspec = list(inspect.getfullargspec(func))
else:
argspec = list(inspect.getargspec(func))
argspec = ["func", argspec, False]
com = autocomplete.ParameterNameCompletion()
self.assertSetEqual(com.matches(1, "a", argspec=argspec),
set(['apple=', 'apricot=']))
self.assertSetEqual(com.matches(2, "ba", argspec=argspec),
set(['banana=']))
self.assertSetEqual(com.matches(3, "car", argspec=argspec),
set(['carrot=']))
| MarkWh1te/xueqiu_predict | python3_env/lib/python3.4/site-packages/bpython/test/test_autocomplete.py | Python | mit | 16,423 | 0 |
# -*- coding: utf-8 -*-
"""Page model for Automation/Ansible/Repositories"""
import attr
from navmazing import NavigateToAttribute, NavigateToSibling
from widgetastic.exceptions import NoSuchElementException
from widgetastic.widget import Checkbox, Fillable, ParametrizedView, Text, View
from widgetastic_manageiq import PaginationPane, ParametrizedSummaryTable, Table
from widgetastic_patternfly import Button, Dropdown, Input
from cfme.base.login import BaseLoggedInPage
from cfme.common import Taggable, TagPageView
from cfme.exceptions import ItemNotFound
from cfme.modeling.base import BaseCollection, BaseEntity
from cfme.utils.appliance.implementations.ui import navigator, navigate_to, CFMENavigateStep
from cfme.utils.wait import wait_for
from .playbooks import PlaybooksCollection
class RepositoryBaseView(BaseLoggedInPage):
title = Text(locator='.//div[@id="main-content"]//h1')
@property
def in_ansible_repositories(self):
return (
self.logged_in_as_current_user and
self.navigation.currently_selected == ["Automation", "Ansible", "Repositories"]
)
class RepositoryAllView(RepositoryBaseView):
@View.nested
class toolbar(View): # noqa
configuration = Dropdown("Configuration")
policy = Dropdown(text='Policy')
entities = Table(".//div[@id='gtl_div']//table")
paginator = PaginationPane()
@property
def is_displayed(self):
return self.in_ansible_repositories and self.title.text == "Repositories"
class RepositoryDetailsView(RepositoryBaseView):
@View.nested
class toolbar(View): # noqa
refresh = Button(title="Refresh this page")
configuration = Dropdown("Configuration")
download = Button(title="Download summary in PDF format")
policy = Dropdown(text='Policy')
@View.nested
class entities(View): # noqa
summary = ParametrizedView.nested(ParametrizedSummaryTable)
@property
def is_displayed(self):
return (
self.in_ansible_repositories and
self.title.text == "{} (Summary)".format(self.context["object"].name)
)
class RepositoryFormView(RepositoryBaseView):
name = Input(name="name")
description = Input(name="description")
url = Input(name="scm_url")
scm_credentials = Dropdown("Select credentials")
scm_branch = Input(name="scm_branch")
# SCM Update Options
clean = Checkbox(name="clean")
delete_on_update = Checkbox(name="scm_delete_on_update")
update_on_launch = Checkbox(name="scm_update_on_launch")
cancel_button = Button("Cancel")
class RepositoryAddView(RepositoryFormView):
add_button = Button("Add")
@property
def is_displayed(self):
return (
self.in_ansible_repositories and
self.title.text == "Add new Repository"
)
class RepositoryEditView(RepositoryFormView):
save_button = Button("Save")
reset_button = Button("Reset")
@property
def is_displayed(self):
return (
self.in_ansible_repositories and
self.title.text == 'Edit Repository "{}"'.format(self.context["object"].name)
)
@attr.s
class Repository(BaseEntity, Fillable, Taggable):
"""A class representing one Embedded Ansible repository in the UI."""
name = attr.ib()
url = attr.ib()
description = attr.ib(default="")
scm_credentials = attr.ib(default=None)
scm_branch = attr.ib(default=False)
clean = attr.ib(default=False)
delete_on_update = attr.ib(default=False)
update_on_launch = attr.ib(default=None)
_collections = {'playbooks': PlaybooksCollection}
@property
def db_object(self):
table = self.appliance.db.client["configuration_script_sources"]
return self.appliance.db.client.sessionmaker(autocommit=True).query(table).filter(
table.name == self.name).first()
@property
def playbooks(self):
return self.collections.playbooks
@property
def as_fill_value(self):
"""For use when selecting this repo in the UI forms"""
return self.name
def update(self, updates):
"""Update the repository in the UI.
Args:
updates (dict): :py:class:`dict` of the updates.
"""
original_updated_at = self.db_object.updated_at
view = navigate_to(self, "Edit")
changed = view.fill(updates)
if changed:
view.save_button.click()
else:
view.cancel_button.click()
view = self.create_view(RepositoryAllView)
assert view.is_displayed
view.flash.assert_no_error()
if changed:
if self.appliance.version < "5.9":
msg = 'Edit of Repository "{}" was successfully initialized.'
else:
msg = 'Edit of Repository "{}" was successfully initiated.'
view.flash.assert_message(msg.format(updates.get("name", self.name)))
def _wait_until_changes_applied():
changed_updated_at = self.db_object.updated_at
return not original_updated_at == changed_updated_at
wait_for(_wait_until_changes_applied, delay=10, timeout="5m")
else:
view.flash.assert_message(
'Edit of Repository "{}" cancelled by the user.'.format(self.name))
@property
def exists(self):
try:
navigate_to(self, "Details")
return True
except ItemNotFound:
return False
def delete(self):
"""Delete the repository in the UI."""
view = navigate_to(self, "Details")
if self.appliance.version < "5.9":
remove_str = "Remove this Repository"
else:
remove_str = "Remove this Repository from Inventory"
view.toolbar.configuration.item_select(remove_str, handle_alert=True)
repo_list_page = self.create_view(RepositoryAllView)
assert repo_list_page.is_displayed
repo_list_page.flash.assert_no_error()
repo_list_page.flash.assert_message(
'Delete of Repository "{}" was successfully initiated.'.format(self.name))
wait_for(
lambda: not self.exists,
delay=10,
timeout=120,
fail_func=repo_list_page.browser.selenium.refresh)
def refresh(self):
"""Perform a refresh to update the repository."""
view = navigate_to(self, "Details")
view.toolbar.configuration.item_select("Refresh this Repository", handle_alert=True)
view.flash.assert_no_error()
view.flash.assert_message("Embedded Ansible refresh has been successfully initiated")
@attr.s
class RepositoryCollection(BaseCollection):
"""Collection object for the :py:class:`cfme.ansible.repositories.Repository`."""
ENTITY = Repository
def create(self, name, url, description=None, scm_credentials=None, scm_branch=None,
clean=None, delete_on_update=None, update_on_launch=None):
"""Add an ansible repository in the UI and return a Repository object.
Args:
name (str): name of the repository
url (str): url of the repository
description (str): description of the repository
scm_credentials (str): credentials of the repository
scm_branch (str): branch name
clean (bool): clean
delete_on_update (bool): delete the repo at each update
update_on_launch (bool): update the repo at each launch
Returns: an instance of :py:class:`cfme.ansible.repositories.Repository`
"""
add_page = navigate_to(self, "Add")
fill_dict = {
"name": name,
"description": description,
"url": url,
"scm_credentials": scm_credentials,
"scm_branch": scm_branch,
"clean": clean,
"delete_on_update": delete_on_update,
"update_on_launch": update_on_launch
}
add_page.fill(fill_dict)
add_page.add_button.click()
repo_list_page = self.create_view(RepositoryAllView)
assert repo_list_page.is_displayed
repo_list_page.flash.assert_no_error()
if self.appliance.version < "5.9.2":
initiated_message = 'Add of Repository "{}" was successfully initialized.'.format(name)
else:
initiated_message = 'Add of Repository "{}" was successfully initiated.'.format(name)
repo_list_page.flash.assert_message(initiated_message)
repository = self.instantiate(
name,
url,
description=description,
scm_credentials=scm_credentials,
scm_branch=scm_branch,
clean=clean,
delete_on_update=delete_on_update,
update_on_launch=update_on_launch)
wait_for(lambda: repository.exists,
fail_func=repo_list_page.browser.selenium.refresh,
delay=5,
timeout=900)
return repository
def all(self):
"""Return all repositories of the appliance.
Returns: a :py:class:`list` of :py:class:`cfme.ansible.repositories.Repository` instances
"""
table = self.appliance.db.client["configuration_script_sources"]
result = []
for row in self.appliance.db.client.session.query(table):
result.append(
self.instantiate(
row.name,
row.scm_url,
description=row.description,
scm_branch=row.scm_branch,
clean=row.scm_clean,
delete_on_update=row.scm_delete_on_update,
update_on_launch=row.scm_update_on_launch)
)
return result
def delete(self, *repositories):
"""Delete one or more ansible repositories in the UI.
Args:
repositories: a list of :py:class:`cfme.ansible.repositories.Repository`
instances to delete
Raises:
ValueError: if some of the repositories were not found in the UI
"""
repositories = list(repositories)
checked_repositories = []
view = navigate_to(self.appliance.server, "AnsibleRepositories")
view.paginator.uncheck_all()
if not view.entities.is_displayed:
raise ValueError("No repository found!")
for row in view.entities:
for repository in repositories:
if repository.name == row.name.text:
checked_repositories.append(repository)
row[0].check()
break
if set(repositories) == set(checked_repositories):
break
if set(repositories) != set(checked_repositories):
raise ValueError("Some of the repositories were not found in the UI.")
view.toolbar.configuration.item_select("Remove selected Repositories", handle_alert=True)
view.flash.assert_no_error()
for repository in checked_repositories:
view.flash.assert_message(
'Delete of Repository "{}" was successfully initiated.'.format(repository.name))
@navigator.register(RepositoryCollection, 'All')
class AnsibleRepositories(CFMENavigateStep):
VIEW = RepositoryAllView
prerequisite = NavigateToAttribute('appliance.server', 'LoggedIn')
def step(self):
self.view.navigation.select("Automation", "Ansible", "Repositories")
@navigator.register(Repository, 'Details')
class Details(CFMENavigateStep):
VIEW = RepositoryDetailsView
prerequisite = NavigateToAttribute('parent', 'All')
def step(self):
try:
row = self.prerequisite_view.paginator.find_row_on_pages(
table=self.prerequisite_view.entities,
name=self.obj.name)
row.click()
except NoSuchElementException:
raise ItemNotFound('Could not locate ansible repository table row with name {}'
.format(self.obj.name))
@navigator.register(RepositoryCollection, 'Add')
class Add(CFMENavigateStep):
VIEW = RepositoryAddView
prerequisite = NavigateToSibling("All")
def step(self):
# workaround for disabled Dropdown
dropdown = self.prerequisite_view.toolbar.configuration
wait_for(
dropdown.item_enabled,
func_args=["Add New Repository"],
timeout=60,
fail_func=self.prerequisite_view.browser.refresh
)
dropdown.item_select("Add New Repository")
@navigator.register(Repository, "Edit")
class Edit(CFMENavigateStep):
VIEW = RepositoryEditView
prerequisite = NavigateToSibling("Details")
def step(self):
self.prerequisite_view.toolbar.configuration.item_select("Edit this Repository")
@navigator.register(Repository, 'EditTags')
class EditTagsFromListCollection(CFMENavigateStep):
VIEW = TagPageView
prerequisite = NavigateToAttribute('parent', 'All')
def step(self):
try:
row = self.prerequisite_view.paginator.find_row_on_pages(
table=self.prerequisite_view.entities,
name=self.obj.name)
row[0].click()
except NoSuchElementException:
raise ItemNotFound('Could not locate ansible repository table row with name {}'
.format(self.obj.name))
self.prerequisite_view.toolbar.policy.item_select('Edit Tags')
| anurag03/integration_tests | cfme/ansible/repositories.py | Python | gpl-2.0 | 13,574 | 0.001842 |
from color import Color
from orientation import Orientation
from tile import (
Terrain,
Harbor,
)
CENTER_TILE_TEMPLATE = [
list(' + -- + '),
list(' / \ '),
list('+ +'),
list(' \ / '),
list(' + -- + '),
]
BORDER_TILE_TEMPLATE = [
list(' | -- | '),
list(' - - '),
list(' | | '),
list(' - - '),
list(' | -- | '),
]
NUMBER_SPACES = [
(2, 4), (2, 5)
]
PERIMETER_SPACES = [
(0, 2), (0, 4),
(0, 5), (0, 7),
(1, 1), (1, 8),
(2, 0), (2, 2),
(2, 7), (2, 9),
(3, 1), (3, 8),
(4, 2), (4, 4),
(4, 5), (4, 7),
]
RESOURCE_SPACES = [
(1, 3), (1, 4),
(1, 5), (1, 6),
(2, 2), (2, 7),
(3, 3), (3, 4),
(3, 5), (3, 6),
]
# TODO: upforgrabs
# Fix ports to work with all size boards
# HARBOR_BRIDGE_SPACES = {
# Orientation.NORTH_EAST: [(2, 7), (1, 6)],
# Orientation.NORTH: [(1, 6), (1, 3)],
# Orientation.NORTH_WEST: [(1, 3), (2, 2)],
# Orientation.SOUTH_WEST: [(2, 2), (3, 3)],
# Orientation.SOUTH: [(3, 3), (3, 6)],
# Orientation.SOUTH_EAST: [(3, 6), (2, 7)],
# }
def remove_border_characters(board, coordinate, diff, tile_grid):
# First, calculate some helper values
helper_value_one = board.size.width // 2
helper_value_two = board.size.height - helper_value_one
# Top vertical ticks
if (
coordinate.row == -1 or
coordinate.column == -1
):
tile_grid[0][2] = ' '
tile_grid[0][7] = ' '
# Top horizonal ticks
else:
tile_grid[0][4] = ' '
tile_grid[0][5] = ' '
# Bottom vertical ticks
if (
coordinate.row == board.size.height or
coordinate.column == board.size.height
):
tile_grid[4][2] = ' '
tile_grid[4][7] = ' '
# Bottom horizonal ticks
else:
tile_grid[4][4] = ' '
tile_grid[4][5] = ' '
# Upper left single tick
if not (
coordinate.column == -1 and
coordinate.row < helper_value_one
):
tile_grid[1][1] = ' '
# Upper right single tick
if not (
coordinate.row == -1 and
coordinate.column < helper_value_one
):
tile_grid[1][8] = ' '
# Bottom left single tick
if not (
coordinate.row == board.size.height and
helper_value_two <= coordinate.column
):
tile_grid[3][1] = ' '
# Bottom right single tick
if not (
coordinate.column == board.size.height and
helper_value_two <= coordinate.row
):
tile_grid[3][8] = ' '
# Left vertical ticks
if abs(diff) <= helper_value_one or diff < 0:
tile_grid[0][2] = ' '
tile_grid[2][2] = ' '
tile_grid[4][2] = ' '
# Right vertical ticks
if abs(diff) <= helper_value_one or 0 < diff:
tile_grid[0][7] = ' '
tile_grid[2][7] = ' '
tile_grid[4][7] = ' '
return tile_grid
def copy_grid(grid):
return [[char for char in row] for row in grid]
def grid_to_str(grid):
return '\n'.join(''.join(row) for row in grid)
def str_to_grid(string):
return [[c for c in line] for line in string.split('\n')]
def get_tile_grid(tile, tile_grid):
tile_grid = copy_grid(tile_grid)
tile_grid = replace_numbers(tile, tile_grid)
tile_grid = replace_perimeter(tile, tile_grid)
tile_grid = replace_resources(tile, tile_grid)
return tile_grid
def replace_numbers(tile, tile_grid):
if isinstance(tile, Harbor):
return tile_grid
if not tile.number:
return tile_grid
if isinstance(tile, Terrain):
number_string = str(tile.number).zfill(len(NUMBER_SPACES))
tile_grid = copy_grid(tile_grid)
for row, col in NUMBER_SPACES:
index = col - min(NUMBER_SPACES)[1]
tile_grid[row][col] = number_string[index]
return tile_grid
def replace_perimeter(tile, tile_grid):
tile_grid = copy_grid(tile_grid)
for row, col in PERIMETER_SPACES:
colored = Color.GRAY.apply(tile_grid[row][col])
tile_grid[row][col] = colored
# TODO: upforgrabs
# Fix ports to work with all size boards
# if isinstance(tile, Harbor) and tile.orientation:
# spaces = HARBOR_BRIDGE_SPACES[tile.orientation]
# for row, col in spaces:
# char = '-'
# if row != 2:
# char = '\\' if (row == 1) == (col == 3) else '/'
# tile_grid[row][col] = Color.GRAY.apply(char)
return tile_grid
def replace_resources(tile, tile_grid):
if isinstance(tile, Terrain):
if not tile.resource:
return tile_grid
spaces = RESOURCE_SPACES
if isinstance(tile, Harbor):
# TODO: upforgrabs
# Fix ports to work with all size boards
# if not tile.orientation:
# return tile_grid
return tile_grid
spaces = NUMBER_SPACES
char = '?'
if tile.resource:
char = tile.resource.color.apply(tile.resource.char)
tile_grid = copy_grid(tile_grid)
for row, col in spaces:
tile_grid[row][col] = char
return tile_grid
class View(object):
def __init__(self, board):
self.board = board
def __str__(self):
return grid_to_str(self.get_board_grid())
def get_board_grid(self):
# Add two to the height and width of the
# board to account for the perimeter tiles
num_tiles_tall = self.board.size.height + 2
num_tiles_wide = self.board.size.width + 2
# The number of characters tall and wide for the tile grid
tile_grid_height = len(CENTER_TILE_TEMPLATE)
tile_grid_narrow = len(''.join(CENTER_TILE_TEMPLATE[0]).strip())
tile_grid_wide = len(''.join(CENTER_TILE_TEMPLATE[2]).strip())
# The number of characters tall and wide for the board grid
total_grid_height = (tile_grid_height - 1) * num_tiles_tall + 1
total_grid_width = (
(num_tiles_wide // 2 + 1) * (tile_grid_wide - 1) +
(num_tiles_wide // 2 ) * (tile_grid_narrow - 1) + 1
)
# Create a 2D array of empty spaces, large enough to
# contain all characters for all tiles (but no larger)
board_grid = [
[' ' for i in range(total_grid_width)]
for j in range(total_grid_height)
]
# For all board tiles ...
for coordinate, tile in self.board.tiles.items():
# ... determine some intermediate values ...
# Note: We add +1 here to account for perimeter tiles
sum_ = (coordinate.row + 1) + (coordinate.column + 1)
diff = (coordinate.row + 1) - (coordinate.column + 1)
# ... and use them to figure the location of the upper
# left corner of the tile grid within the board grid ...
spaces_from_top = sum_ * (tile_grid_height // 2)
spaces_from_left = (
((num_tiles_wide // 2) - diff) *
((tile_grid_wide + tile_grid_narrow) // 2 - 1)
)
# ... then retrieve the base tile grid for the tile ...
template = (
CENTER_TILE_TEMPLATE if
isinstance(tile, Terrain) else
remove_border_characters(
board=self.board,
coordinate=coordinate,
diff=diff,
tile_grid=copy_grid(BORDER_TILE_TEMPLATE),
)
)
# ... and then replace the blank characters in the board
# grid with the correct characters from the tile grid
tile_grid = get_tile_grid(tile, template)
for i, tile_line in enumerate(tile_grid):
for j, char in enumerate(tile_line):
if ' ' not in char:
row = board_grid[spaces_from_top + i]
row[spaces_from_left + j] = char
# Trim extra columns off front and back of the grid
board_grid = [row[2:-2] for row in board_grid]
return board_grid
| mackorone/catan | src/view.py | Python | mit | 8,044 | 0.001119 |
# Principal Component Analysis Code :
from numpy import mean,cov,double,cumsum,dot,linalg,array,rank,size,flipud
from pylab import *
import numpy as np
import matplotlib.pyplot as pp
#from enthought.mayavi import mlab
import scipy.ndimage as ni
import roslib; roslib.load_manifest('sandbox_tapo_darpa_m3')
import rospy
#import hrl_lib.mayavi2_util as mu
import hrl_lib.viz as hv
import hrl_lib.util as ut
import hrl_lib.matplotlib_util as mpu
import pickle
from mvpa.clfs.knn import kNN
from mvpa.datasets import Dataset
from mvpa.clfs.transerror import TransferError
from mvpa.misc.data_generators import normalFeatureDataset
from mvpa.algorithms.cvtranserror import CrossValidatedTransferError
from mvpa.datasets.splitters import NFoldSplitter
import sys
sys.path.insert(0, '/home/tapo/svn/robot1_data/usr/tapo/data_code/Classification/Data/Single_Contact_kNN/Scaled')
from data_method_V import Fmat_original
def pca(X):
#get dimensions
num_data,dim = X.shape
#center data
mean_X = X.mean(axis=1)
M = (X-mean_X) # subtract the mean (along columns)
Mcov = cov(M)
###### Sanity Check ######
i=0
n=0
while i < 123:
j=0
while j < 140:
if X[i,j] != X[i,j]:
print X[i,j]
print i,j
n=n+1
j = j+1
i=i+1
print n
##########################
print 'PCA - COV-Method used'
val,vec = linalg.eig(Mcov)
#return the projection matrix, the variance and the mean
return vec,val,mean_X, M, Mcov
def my_mvpa(Y,num2):
#Using PYMVPA
PCA_data = np.array(Y)
PCA_label_2 = ['Styrofoam-Fixed']*5 + ['Books-Fixed']*5 + ['Bucket-Fixed']*5 + ['Bowl-Fixed']*5 + ['Can-Fixed']*5 + ['Box-Fixed']*5 + ['Pipe-Fixed']*5 + ['Styrofoam-Movable']*5 + ['Container-Movable']*5 + ['Books-Movable']*5 + ['Cloth-Roll-Movable']*5 + ['Black-Rubber-Movable']*5 + ['Can-Movable']*5 + ['Box-Movable']*5 + ['Rug-Fixed']*5 + ['Bubble-Wrap-1-Fixed']*5 + ['Pillow-1-Fixed']*5 + ['Bubble-Wrap-2-Fixed']*5 + ['Sponge-Fixed']*5 + ['Foliage-Fixed']*5 + ['Pillow-2-Fixed']*5 + ['Rug-Movable']*5 + ['Bubble-Wrap-1-Movable']*5 + ['Pillow-1-Movable']*5 + ['Bubble-Wrap-2-Movable']*5 + ['Pillow-2-Movable']*5 + ['Cushion-Movable']*5 + ['Sponge-Movable']*5
clf = kNN(k=num2)
terr = TransferError(clf)
ds1 = Dataset(samples=PCA_data,labels=PCA_label_2)
cvterr = CrossValidatedTransferError(terr,NFoldSplitter(cvtype=1),enable_states=['confusion'])
error = cvterr(ds1)
return (1-error)*100
def result(eigvec_total,eigval_total,mean_data_total,B,C,num_PC):
# Reduced Eigen-Vector Matrix according to highest Eigenvalues..(Considering First 20 based on above figure)
W = eigvec_total[:,0:num_PC]
m_W, n_W = np.shape(W)
# Normalizes the data set with respect to its variance (Not an Integral part of PCA, but useful)
length = len(eigval_total)
s = np.matrix(np.zeros(length)).T
i = 0
while i < length:
s[i] = sqrt(C[i,i])
i = i+1
Z = np.divide(B,s)
m_Z, n_Z = np.shape(Z)
#Projected Data:
Y = (W.T)*B # 'B' for my Laptop: otherwise 'Z' instead of 'B'
m_Y, n_Y = np.shape(Y.T)
return Y.T
if __name__ == '__main__':
Fmat = Fmat_original
# Checking the Data-Matrix
m_tot, n_tot = np.shape(Fmat)
print 'Total_Matrix_Shape:',m_tot,n_tot
eigvec_total, eigval_total, mean_data_total, B, C = pca(Fmat)
#print eigvec_total
#print eigval_total
#print mean_data_total
m_eigval_total, n_eigval_total = np.shape(np.matrix(eigval_total))
m_eigvec_total, n_eigvec_total = np.shape(eigvec_total)
m_mean_data_total, n_mean_data_total = np.shape(np.matrix(mean_data_total))
print 'Eigenvalue Shape:',m_eigval_total, n_eigval_total
print 'Eigenvector Shape:',m_eigvec_total, n_eigvec_total
print 'Mean-Data Shape:',m_mean_data_total, n_mean_data_total
#Recall that the cumulative sum of the eigenvalues shows the level of variance accounted by each of the corresponding eigenvectors. On the x axis there is the number of eigenvalues used.
perc_total = cumsum(eigval_total)/sum(eigval_total)
num_PC=1
while num_PC <=20:
Proj = np.zeros((140,num_PC))
Proj = result(eigvec_total,eigval_total,mean_data_total,B,C,num_PC)
# PYMVPA:
num=0
cv_acc = np.zeros(21)
while num <=20:
cv_acc[num] = my_mvpa(Proj,num)
num = num+1
plot(np.arange(21),cv_acc,'-s')
grid('True')
hold('True')
num_PC = num_PC+1
legend(('1-PC', '2-PCs', '3-PCs', '4-PCs', '5-PCs', '6-PCs', '7-PCs', '8-PCs', '9-PCs', '10-PCs', '11-PC', '12-PCs', '13-PCs', '14-PCs', '15-PCs', '16-PCs', '17-PCs', '18-PCs', '19-PCs', '20-PCs'))
ylabel('Cross-Validation Accuracy')
xlabel('k in k-NN Classifier')
show()
| tapomayukh/projects_in_python | classification/Classification_with_kNN/Single_Contact_Classification/Scaled_Features/best_kNN_PCA/objects/test11_cross_validate_objects_1200ms_scaled_method_v.py | Python | mit | 4,915 | 0.020753 |
import logging
import pickle
from time import time
from hashlib import md5
from base64 import urlsafe_b64encode
from os import urandom
import redis
from flask import Flask, request, render_template
import config
logging.basicConfig(level=logging.DEBUG)
app = Flask(__name__,static_folder='public')
r = redis.StrictRedis(
host=config.REDIS_HOST,
port=config.REDIS_PORT,
db=config.REDIS_DB,
password=config.REDIS_PASSWORD
)
@app.route('/set', methods=['post'])
def setPass():
assert request.method == 'POST'
password = request.form['pass']
iv = request.form['iv']
uuid = urlsafe_b64encode(md5(urandom(128)).digest())[:8].decode('utf-8')
with r.pipeline() as pipe:
data = {'status': 'ok', 'iv': iv, 'pass': password}
pipe.set(uuid, pickle.dumps(data))
pipe.expire(uuid, config.TTL)
pipe.execute()
return '/get/{}'.format(uuid)
@app.route('/get/<uuid>', methods=['get'])
def getPass(uuid):
with r.pipeline() as pipe:
raw_data = r.get(uuid)
if not raw_data:
return render_template('expired.html')
data = pickle.loads(raw_data)
if data['status'] == 'ok':
new_data = {'status': 'withdrawn', 'time': int(time()), 'ip': request.remote_addr}
r.set(uuid, pickle.dumps(new_data))
return render_template('get.html', data=data['iv'] + '|' + data['pass'])
if data['status'] == 'withdrawn':
return render_template('withdrawn.html')
@app.route('/', methods=['get'])
def index():
ttl = int(config.TTL/60)
return render_template('index.html', ttl=ttl)
if __name__ == '__main__':
try:
port = config.APP_PORT
except AttributeError:
port = 5000
try:
host = config.APP_HOST
except AttributeError:
host = '127.0.0.1'
app.run(host=host, port=port)
| skooda/passIon | index.py | Python | mit | 1,879 | 0.002129 |
# Put libraries such as Divisi in the PYTHONPATH.
import sys, pickle, os
sys.path = ['/stuff/openmind'] + sys.path
from csc.divisi.cnet import *
from csc.divisi.graphics import output_svg
from vendor_db import iter_info
from csamoa.corpus.models import *
from csamoa.conceptnet.models import *
# Load the OMCS language model
en = Language.get('en')
en_nl=get_nl('en')
# Load OMCS stopwords
sw = open('stopwords.txt', 'r')
swords = [x.strip() for x in sw.readlines()]
# Parameters
factor = 1
wsize = 2
def check_concept(concept):
try:
Concept.get(concept, 'en')
return True
except:
return False
def english_window(text):
windows = []
words = [x for x in text.lower().replace('&', 'and').split() if x not in swords]
for x in range(len(words)-wsize+1):
pair = " ".join(words[x:x+wsize])
if check_concept(pair): windows.append(pair)
if check_concept(words[x]): windows.append(words[x])
for c in range(wsize-1):
if check_concept(words[c]): windows.append(words[c])
return windows
if 'vendor_only.pickle' in os.listdir('.'):
print "Loading saved matrix."
matrix = pickle.load(open("vendor_only.pickle"))
else:
print "Creating New Tensor"
matrix = SparseLabeledTensor(ndim=2)
print "Adding Vendors"
for co, englist in iter_info('CFB_Cities'):
print co
for phrase in englist:
parts = english_window(phrase)
print parts
for part in parts:
matrix[co, ('sells', part)] += factor
matrix[part, ('sells_inv', co)] += factor
pickle.dump(matrix, open("vendor_only.pickle", 'w'))
print "Normalizing."
matrix = matrix.normalized()
print "Matrix constructed. Running SVD."
svd = matrix.svd(k=10)
svd.summarize()
output_svg(svd.u, "vendorplain.svg", xscale=3000, yscale=3000, min=0.03)
| commonsense/divisi | doc/demo/vendor_only_svd.py | Python | gpl-3.0 | 1,871 | 0.008017 |
import os
busybox_tar_path = os.path.join(os.path.dirname(__file__), '../../../data/busyboxlight.tar')
# these are in correct ancestry order
busybox_ids = (
'769b9341d937a3dba9e460f664b4f183a6cecdd62b337220a28b3deb50ee0a02',
'48e5f45168b97799ad0aafb7e2fef9fac57b5f16f6db7f67ba2000eb947637eb',
'bf747efa0e2fa9f7c691588ce3938944c75607a7bb5e757f7369f86904d97c78',
'511136ea3c5a64f264b78b5433614aec563103b4d4702f3ba7d4d2698e22c158',
)
| rbarlow/pulp_docker | plugins/test/unit/plugins/importers/data.py | Python | gpl-2.0 | 450 | 0.002222 |
import sys
import argparse
from svtools.external_cmd import ExternalCmd
class BedpeSort(ExternalCmd):
def __init__(self):
super(BedpeSort, self).__init__('bedpesort', 'bin/bedpesort')
def description():
return 'sort a BEDPE file'
def epilog():
return 'To read in stdin and output to a file, use /dev/stdin or - as the first positional argument.'
def add_arguments_to_parser(parser):
parser.add_argument('input', metavar='<BEDPE file>', nargs='?', help='BEDPE file to sort')
parser.add_argument('output', metavar='<output file>', nargs='?', help='output file to write to')
parser.set_defaults(entry_point=run_from_args)
def command_parser():
parser = argparse.ArgumentParser(description=description())
add_arguments_to_parser(parser)
return parser
def run_from_args(args):
opts = list()
if args.input:
opts.append(args.input)
if args.output:
opts.append(args.output)
sort_cmd_runner = BedpeSort()
sort_cmd_runner.run_cmd_with_options(opts)
if __name__ == "__main__":
parser = command_parser()
args = parser.parse_args()
sys.exit(args.entry_point(args))
| hall-lab/svtools | svtools/bedpesort.py | Python | mit | 1,152 | 0.008681 |
#!/usr/bin/env python
#coding=utf-8
'''
Created on 2010-4-27
GPL License
@author: [email protected]
'''
import urllib,pickle,StringIO
from micolog_plugin import *
from google.appengine.ext import db
from model import OptionSet,Comment,Blog,Entry,Blog
from google.appengine.api import urlfetch
class akismet(Plugin):
def __init__(self):
Plugin.__init__(self,__file__)
self.author="sypxue"
self.authoruri="http://sypxue.appspot.com"
self.uri="http://sypxue.appspot.com"
self.description="""Wordpress自带的Akismet插件的micolog版本,现在已实现过滤Spam,提交Spam,解除Spam等功能,开启即可使用,也可输入自己的Akismet API Key使用 。Author: [email protected]"""
self.name="Akismet"
self.version="0.3.2"
self.AKISMET_VERSION = "2.2.7"
self.AKISMET_default_Key = "80e9452f5962"
self.register_action("pre_comment",self.pre_comment)
self.register_action("save_comment",self.save_comment)
def comment_handler(self,comment,action,*arg1,**arg2):
# rm 指示 是否自动过滤掉评论
rm=OptionSet.getValue("Akismet_AutoRemove",False)
if action=='pre' and rm!=True:
return
elif action=='save' and rm==True:
return
url = arg2['blog'].baseurl
user_agent = os.environ.get('HTTP_USER_AGENT','')
referrer = os.environ.get('HTTP_REFERER', 'unknown')
AkismetItem = {
'user_agent':user_agent,
'referrer':referrer,
'user_ip' : comment.ip,
'comment_type' : 'comment',
'comment_author' : comment.author.encode('utf-8'),
'comment_author_email' : comment.email,
'comment_author_url' : comment.weburl,
'comment_content' : comment.content.encode('utf-8')
}
apikey = OptionSet.getValue("Akismet_code",default=self.AKISMET_default_Key)
if len(apikey)<5:
apikey = self.AKISMET_default_Key
m = AkismetManager(apikey,url)
if m.IsSpam(AkismetItem):
if rm==True:
raise Exception
sComments=OptionSet.getValue("Akismet_Comments_v0.3",[])
if type(sComments)!=type([]):
sComments=[]
db.Model.put(comment)
sComments.append({'key':(str(comment.key()),str(comment.entry.key())),
'other':{'user_agent':user_agent,'referrer':referrer,'url':url}})
OptionSet.setValue("Akismet_Comments_v0.3",
sComments)
comment.entry.commentcount-=1
comment.entry.put()
e,comment.entry = comment.entry,None
try:
db.Model.put(comment)
comment.entry = e
except:
pass
def pre_comment(self,comment,*arg1,**arg2):
self.comment_handler(comment,'pre',*arg1,**arg2)
def save_comment(self,comment,*arg1,**arg2):
self.comment_handler(comment,'save',*arg1,**arg2)
def filter(self,content,*arg1,**arg2):
code=OptionSet.getValue("Akismet_code",default="")
return content+str(code)
def SubmitAkismet(self,item,url,f):
apikey = OptionSet.getValue("Akismet_code",default=self.AKISMET_default_Key)
if len(apikey)<5:
apikey = self.AKISMET_default_Key
m = AkismetManager(apikey,url)
try:
if f=="Ham":
m.SubmitHam(item)
elif f=="Spam":
m.SubmitSpam(item)
except:
pass
def get(self,page):
code=OptionSet.getValue("Akismet_code",default="")
up=OptionSet.getValue("Akismet_Comments_v0.3",default=[])
rm=OptionSet.getValue("Akismet_AutoRemove",False)
if type(up)!=type([]):
up=[]
delkey = page.param('delkey')
rekey = page.param('rekey')
if rekey or delkey:
newup = []
for i in up:
cmtkey = i['key'][0];
enykey = i['key'][1];
if delkey and cmtkey==delkey:
cm = Comment.get(cmtkey)
db.Model.delete(cm)
elif rekey and cmtkey==rekey:
cm = Comment.get(cmtkey)
eny = Entry.get(enykey)
eny.commentcount+=1
eny.put()
cm.entry = eny
db.Model.put(cm)
self.SubmitAkismet({
'user_agent':i['other']['user_agent'],
'referrer':i['other']['referrer'],
'user_ip' : cm.ip,
'comment_type' : 'comment',
'comment_author' : cm.author.encode('utf-8'),
'comment_author_email' : cm.email,
'comment_author_url' : cm.weburl,
'comment_content' : cm.content.encode('utf-8')
},i['other'].get('url',''),"Ham")
else:
newup.append(i)
if not len(up)==len(newup):
OptionSet.setValue("Akismet_Comments_v0.3",newup)
up = newup
cmts = [(Comment.get(i['key'][0]),Entry.get(i['key'][1])) for i in up]
comments = [u'<tr><td>%s</td><td>%s</td><td>%s</td><td>%s</td><td>%s</td><td><a target="_blank" href="/%s">%s</a></td><td><a href="?delkey=%s" title="删除">删除</a> <a href="?rekey=%s" title="这不是一个垃圾评论">还原</a></td></tr>'%(i[0].date,
i[0].author,i[0].content,i[0].email,i[0].ip,i[1].link,i[1].title,str(i[0].key()),str(i[0].key())) for i in cmts if i is not None and i[0] is not None]
comments = ''.join(comments)
apikey = OptionSet.getValue("Akismet_code",default=self.AKISMET_default_Key)
if len(apikey)<5:
apikey = self.AKISMET_default_Key
api = AkismetManager(apikey,Blog.all()[0].baseurl)
if not code:
status = ''
elif api.IsValidKey():
status = 'True'
else:
status = 'False'
if rm==True:
rmchecked='checked="checked"'
else:
rmchecked=''
return u'''<h3>Akismet</h3>
<form action="" method="post">
<p>Akismet Api Key:</p>
<input name="code" style="width:400px;" value="%s"> %s
<br />
<p>自动删除检测到的垃圾评论:
<input type="checkbox" name="autorm" value="1" %s></p>
<p>删除一条正常的评论并提交Spam(输入评论的ID):</p>
<input name="spam" style="width:400px;" value="">
<br />
<input type="submit" value="submit">
</form>
<div>
<br />
<h3>被过滤的评论</h3> <table class="widefat"><thead><tr><th>日期</th><th>作者</th><th>内容</th><th>电子邮件</th><th>IP地址</th><th>文章/页面</th><th style="width:15%%;">选择操作</th></tr></thead><tbody>%s </tbody></table>
</div>'''%(code,status,rmchecked,comments)
def post(self,page):
code=page.param("code")
OptionSet.setValue("Akismet_code",code)
rm=page.param('autorm')
if rm and int(rm)==1:
rm=True
else:
rm=False
oldrm = OptionSet.getValue("Akismet_AutoRemove",False)
if oldrm!=rm:
OptionSet.setValue("Akismet_AutoRemove",rm)
spam=page.param("spam")
spam = len(spam)>0 and int(spam) or 0
sOther = ""
if spam>0:
cm = Comment.get_by_id(spam)
try:
url = Blog.all().fetch(1)[0].baseurl
self.SubmitAkismet({
'user_ip' : cm.ip,
'comment_type' : 'comment',
'comment_author' : cm.author.encode('utf-8'),
'comment_author_email' : cm.email,
'comment_author_url' : cm.weburl,
'comment_content' : cm.content.encode('utf-8')
},url,"Spam")
sOther = u"<div style='padding:8px;margin:8px;border:1px solid #aaa;color:red;'>评论已删除</div>"
cm.delit()
except:
sOther = u"<div style='padding:8px;margin:8px;border:1px solid #aaa;color:red;'>无法找到对应的评论项</div>"
return sOther + self.get(page)
class AkismetManager():
def __init__(self,key,url):
self.ApiKey = key
self.Url = url
def ExecuteRequest(self,url,content,method="GET"):
request = urlfetch.fetch(url,
method='POST',
payload=content
)
return request
def IsValidKey(self):
content = "key=%(key)s&blog=%(url)s&"%{'key':self.ApiKey,'url':self.Url}
response = self.ExecuteRequest("http://rest.akismet.com/1.1/verify-key",
content).content
if response and response == 'valid':
return True
else:
return False
def IsSpam(self,item=None):
if not item:
raise Exception
content = self.AddDefaultFields(urllib.urlencode(item))
response = self.ExecuteRequest(
"http://%(key)s.rest.akismet.com/1.1/comment-check"%{'key':self.ApiKey},
content).content
if response:
result = {'true':True,'false': False}
return result[response]
return False
def SubmitSpam(self,item):
if not item:
raise Exception
content = self.AddDefaultFields(urllib.urlencode(item))
response = self.ExecuteRequest(
"http://%(key)s.rest.akismet.com/1.1/submit-spam"%{'key':self.ApiKey},
content).content
if response == 'invalid':
raise Exception
elif len(response)>0:
raise Exception
def SubmitHam(self,item):
if not item:
raise Exception
content = self.AddDefaultFields(urllib.urlencode(item))
response = self.ExecuteRequest(
"http://%(key)s.rest.akismet.com/1.1/submit-ham"%{'key':self.ApiKey},
content).content
if response == 'invalid':
raise Exception
elif len(response)>0:
raise Exception
def AddDefaultFields(self,content):
return ''.join(["key=%(key)s&blog=%(url)s&"%{'key':self.ApiKey,'url':self.Url},content])
| Alwnikrotikz/micolog2 | plugins/akismet.py | Python | gpl-3.0 | 8,593 | 0.055148 |
from django.shortcuts import render
from django.http import HttpResponse
# Create your views here.
def index(request):
return render(request, 'todo/index.html')
| deshmukhmayur/django-todo | todo/views.py | Python | mit | 166 | 0.006024 |
# NeoPixel driver for MicroPython on ESP8266
# MIT license; Copyright (c) 2016 Damien P. George
from esp import neopixel_write
class NeoPixel:
ORDER = (1, 0, 2, 3)
def __init__(self, pin, n, bpp=3):
self.pin = pin
self.n = n
self.bpp = bpp
self.buf = bytearray(n * bpp)
self.pin.init(pin.OUT)
def __setitem__(self, index, val):
offset = index * self.bpp
for i in range(self.bpp):
self.buf[offset + self.ORDER[i]] = val[i]
def __getitem__(self, index):
offset = index * self.bpp
return tuple(self.buf[offset + self.ORDER[i]]
for i in range(self.bpp))
def fill(self, color):
for i in range(self.n):
self[i] = color
def write(self):
neopixel_write(self.pin, self.buf, True)
| swegener/micropython | ports/esp8266/modules/neopixel.py | Python | mit | 836 | 0 |
#!/usr/bin/env python
import os
from gi.repository import Gtk
from gi.repository import Vte
from gi.repository import GLib
from gi.repository import Keybinder
from gi.repository import Gdk
class Tida(Gtk.Window):
"""A micro-drop-down terminal like TILDA"""
def __init__(self, config=None):
Gtk.Window.__init__(self)
self.init_config(config)
self.init_icon()
self.init_terminal()
Gtk.main()
def init_config(self, config=None):
"""Initialise the program with config if exists, else set default values"""
if config != None:
self.set_default_size(config['width'], config['heigth'])
self.set_decorated(config['decorated'])
self.set_skip_taskbar_hint(config['skip_taskbar_hint'])
self.set_keep_above(config['keep_above'])
self.set_skip_pager_hint(config['skip_pager_hint'])
self.set_modal(config['modal'])
s = Gdk.Screen.get_default()
c = (s.get_width() - self.get_size()[0]) / 2.
self.move(int(c), 0)
else:
self.set_decorated(False)
self.set_skip_taskbar_hint(True)
self.set_keep_above(True)
self.set_skip_pager_hint(False)
self.set_modal(False)
self.set_default_size(720, 300)
self.move(323, 0)
self.init_keybinder(config)
def init_icon(self):
"""Initialise status icon"""
self.status_icon = Gtk.StatusIcon()
abs_file_name = os.path.join(os.path.dirname(__file__), "terminal.png")
self.status_icon.set_from_file(abs_file_name)
self.status_icon.set_title("StatusIcon TIDA")
self.status_icon.set_tooltip_text("TIDA :>")
def init_terminal(self):
"""Initialise and add new Vte Terminal to Window"""
self.term = Vte.Terminal()
self.term.set_scrollback_lines(-1)
self.term.connect('child-exited', Gtk.main_quit)
self.term.fork_command_full(Vte.PtyFlags.DEFAULT, os.environ['HOME'], ['/usr/bin/bash'], [], GLib.SpawnFlags.DO_NOT_REAP_CHILD, None, None)
self.add(self.term)
self.connect('delete-event', Gtk.main_quit)
def init_keybinder(self, config):
"""Initialise keybinder and bind some keys (toggle, copy, paste)"""
Keybinder.init()
Keybinder.set_use_cooked_accelerators(False)
self.bind_all_key(config['key_toggle_visibility'],
config['key_copy_to_clipboard'],
config['key_paste_from_clipboard'])
def bind_all_key(self, key_toggle, key_copy, key_paste):
"""Bind all keys used with tida"""
Keybinder.bind(key_toggle, self.callback_toggle_visibility, "asd")
Keybinder.bind(key_copy, self.callback_copy, "asd")
Keybinder.bind(key_paste, self.callback_paste, "asd")
def callback_copy(self, key, asd):
"""Callback function used when press the shortcut for copy to clipboard"""
if self.is_visible():
self.term.copy_clipboard()
return True
return False
def callback_paste(self, key, asd):
"""Callback function used when press the shortcut for paste from clipboard"""
if self.is_visible():
self.term.paste_clipboard()
return True
return False
def callback_toggle_visibility(self, key, asd):
"""Callback function used when press the shortcut for toggle visibility of tida"""
if self.is_visible():
self.hide()
else:
self.show_all()
| headlins/tida | Tida.py | Python | lgpl-3.0 | 3,125 | 0.03296 |
import md5
import os
import sys
path = sys.argv[1]
db_file = open(os.path.join(path,"pics_mysql.txt"),"w")
for file_name in os.listdir(path):
if not file_name.lower().endswith(".gif"): continue
with open(os.path.join(path,file_name),"rb") as fp:
contents = fp.read()
new_file_name = md5.new(contents).hexdigest() + ".gif"
print file_name + " --> " + new_file_name
os.rename(os.path.join(path,file_name),os.path.join(path,new_file_name))
db_file.write('INSERT INTO pics (name) VALUES ("' + new_file_name + '");\n')
db_file.close() | 0x1001/funornot | utils/image_convert.py | Python | gpl-2.0 | 593 | 0.025295 |
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (c) 2012 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
from nova import context
from nova import db
from nova import exception
from nova.objects import instance
from nova.objects import pci_device
from nova.tests.objects import test_objects
dev_dict = {
'compute_node_id': 1,
'address': 'a',
'product_id': 'p',
'vendor_id': 'v',
'status': 'available'}
fake_db_dev = {
'created_at': None,
'updated_at': None,
'deleted_at': None,
'deleted': None,
'id': 1,
'compute_node_id': 1,
'address': 'a',
'vendor_id': 'v',
'product_id': 'p',
'dev_type': 't',
'status': 'available',
'dev_id': 'i',
'label': 'l',
'instance_uuid': None,
'extra_info': '{}',
}
fake_db_dev_1 = {
'created_at': None,
'updated_at': None,
'deleted_at': None,
'deleted': None,
'id': 2,
'compute_node_id': 1,
'address': 'a1',
'vendor_id': 'v1',
'product_id': 'p1',
'dev_type': 't',
'status': 'available',
'dev_id': 'i',
'label': 'l',
'instance_uuid': None,
'extra_info': '{}',
}
class _TestPciDeviceObject(object):
def _create_fake_instance(self):
self.inst = instance.Instance()
self.inst.uuid = 'fake-inst-uuid'
self.inst.pci_devices = pci_device.PciDeviceList()
def _create_fake_pci_device(self):
ctxt = context.get_admin_context()
self.mox.StubOutWithMock(db, 'pci_device_get_by_addr')
db.pci_device_get_by_addr(ctxt, 1, 'a').AndReturn(fake_db_dev)
self.mox.ReplayAll()
self.pci_device = pci_device.PciDevice.get_by_dev_addr(ctxt, 1, 'a')
def test_create_pci_device(self):
self.pci_device = pci_device.PciDevice.create(dev_dict)
self.assertEqual(self.pci_device.product_id, 'p')
self.assertEqual(self.pci_device.obj_what_changed(),
set(['compute_node_id', 'product_id', 'vendor_id',
'status', 'address', 'extra_info']))
def test_pci_device_extra_info(self):
self.dev_dict = copy.copy(dev_dict)
self.dev_dict['k1'] = 'v1'
self.dev_dict['k2'] = 'v2'
self.pci_device = pci_device.PciDevice.create(self.dev_dict)
extra_value = self.pci_device.extra_info
self.assertEqual(extra_value.get('k1'), 'v1')
self.assertEqual(set(extra_value.keys()), set(('k1', 'k2')))
self.assertEqual(self.pci_device.obj_what_changed(),
set(['compute_node_id', 'address', 'product_id',
'vendor_id', 'status', 'extra_info']))
def test_update_device(self):
self.pci_device = pci_device.PciDevice.create(dev_dict)
self.pci_device.obj_reset_changes()
changes = {'product_id': 'p2', 'vendor_id': 'v2'}
self.pci_device.update_device(changes)
self.assertEqual(self.pci_device.vendor_id, 'v2')
self.assertEqual(self.pci_device.obj_what_changed(),
set(['vendor_id', 'product_id']))
def test_update_device_same_value(self):
self.pci_device = pci_device.PciDevice.create(dev_dict)
self.pci_device.obj_reset_changes()
changes = {'product_id': 'p', 'vendor_id': 'v2'}
self.pci_device.update_device(changes)
self.assertEqual(self.pci_device.product_id, 'p')
self.assertEqual(self.pci_device.vendor_id, 'v2')
self.assertEqual(self.pci_device.obj_what_changed(),
set(['vendor_id', 'product_id']))
def test_get_by_dev_addr(self):
ctxt = context.get_admin_context()
self.mox.StubOutWithMock(db, 'pci_device_get_by_addr')
db.pci_device_get_by_addr(ctxt, 1, 'a').AndReturn(fake_db_dev)
self.mox.ReplayAll()
self.pci_device = pci_device.PciDevice.get_by_dev_addr(ctxt, 1, 'a')
self.assertEqual(self.pci_device.product_id, 'p')
self.assertEqual(self.pci_device.obj_what_changed(), set())
self.assertRemotes()
def test_get_by_dev_id(self):
ctxt = context.get_admin_context()
self.mox.StubOutWithMock(db, 'pci_device_get_by_id')
db.pci_device_get_by_id(ctxt, 1).AndReturn(fake_db_dev)
self.mox.ReplayAll()
self.pci_device = pci_device.PciDevice.get_by_dev_id(ctxt, 1)
self.assertEqual(self.pci_device.product_id, 'p')
self.assertEqual(self.pci_device.obj_what_changed(), set())
self.assertRemotes()
def test_claim_device(self):
self._create_fake_instance()
self.pci_device = pci_device.PciDevice.create(dev_dict)
self.pci_device.claim(self.inst)
self.assertEqual(self.pci_device.status, 'claimed')
self.assertEqual(self.pci_device.instance_uuid,
'fake-inst-uuid')
self.assertEqual(len(self.inst.pci_devices), 0)
def test_claim_device_fail(self):
self._create_fake_instance()
self._create_fake_pci_device()
self.pci_device.status = 'allocated'
self.assertRaises(exception.PciDeviceInvalidStatus,
self.pci_device.claim, self.inst)
def test_allocate_device(self):
self._create_fake_instance()
self._create_fake_pci_device()
self.pci_device.claim(self.inst)
self.pci_device.allocate(self.inst)
self.assertEqual(self.pci_device.status, 'allocated')
self.assertEqual(self.pci_device.instance_uuid, 'fake-inst-uuid')
self.assertEqual(len(self.inst.pci_devices), 1)
self.assertEqual(self.inst.pci_devices[0]['vendor_id'], 'v')
self.assertEqual(self.inst.pci_devices[0]['status'], 'allocated')
def test_allocacte_device_fail_status(self):
self._create_fake_instance()
self._create_fake_pci_device()
self.pci_device.status = 'removed'
self.assertRaises(exception.PciDeviceInvalidStatus,
self.pci_device.allocate,
self.inst)
def test_allocacte_device_fail_owner(self):
self._create_fake_instance()
self._create_fake_pci_device()
inst_2 = instance.Instance()
inst_2.uuid = 'fake-inst-uuid-2'
self.pci_device.claim(self.inst)
self.assertRaises(exception.PciDeviceInvalidOwner,
self.pci_device.allocate, inst_2)
def test_free_claimed_device(self):
self._create_fake_instance()
self._create_fake_pci_device()
self.pci_device.claim(self.inst)
self.pci_device.free(self.inst)
self.assertEqual(self.pci_device.status, 'available')
self.assertEqual(self.pci_device.instance_uuid, None)
def test_free_allocated_device(self):
self._create_fake_instance()
self._create_fake_pci_device()
self.pci_device.claim(self.inst)
self.pci_device.allocate(self.inst)
self.assertEqual(len(self.inst.pci_devices), 1)
self.pci_device.free(self.inst)
self.assertEqual(len(self.inst.pci_devices), 0)
self.assertEqual(self.pci_device.status, 'available')
self.assertEqual(self.pci_device.instance_uuid, None)
def test_free_device_fail(self):
self._create_fake_pci_device()
self.pci_device.status = 'removed'
self.assertRaises(exception.PciDeviceInvalidStatus,
self.pci_device.free)
def test_remove_device(self):
self._create_fake_pci_device()
self.pci_device.remove()
self.assertEqual(self.pci_device.status, 'removed')
self.assertEqual(self.pci_device.instance_uuid, None)
def test_remove_device_fail(self):
self._create_fake_instance()
self._create_fake_pci_device()
self.pci_device.claim(self.inst)
self.assertRaises(exception.PciDeviceInvalidStatus,
self.pci_device.remove)
def test_save(self):
ctxt = context.get_admin_context()
self._create_fake_pci_device()
return_dev = dict(fake_db_dev, status='available',
instance_uuid='fake-uuid-3')
self.pci_device.status = 'allocated'
self.pci_device.instance_uuid = 'fake-uuid-2'
expected_updates = dict(status='allocated',
instance_uuid='fake-uuid-2')
self.mox.StubOutWithMock(db, 'pci_device_update')
db.pci_device_update(ctxt, 1, 'a',
expected_updates).AndReturn(return_dev)
self.mox.ReplayAll()
self.pci_device.save(ctxt)
self.assertEqual(self.pci_device.status, 'available')
self.assertEqual(self.pci_device.instance_uuid,
'fake-uuid-3')
self.assertRemotes()
def test_save_no_extra_info(self):
return_dev = dict(fake_db_dev, status='available',
instance_uuid='fake-uuid-3')
def _fake_update(ctxt, node_id, addr, updates):
self.extra_info = updates.get('extra_info')
return return_dev
ctxt = context.get_admin_context()
self.stubs.Set(db, 'pci_device_update', _fake_update)
self.pci_device = pci_device.PciDevice.create(dev_dict)
self.pci_device.save(ctxt)
self.assertEqual(self.extra_info, '{}')
def test_save_removed(self):
ctxt = context.get_admin_context()
self._create_fake_pci_device()
self.pci_device.status = 'removed'
self.mox.StubOutWithMock(db, 'pci_device_destroy')
db.pci_device_destroy(ctxt, 1, 'a')
self.mox.ReplayAll()
self.pci_device.save(ctxt)
self.assertEqual(self.pci_device.status, 'deleted')
self.assertRemotes()
def test_save_deleted(self):
def _fake_destroy(ctxt, node_id, addr):
self.called = True
def _fake_update(ctxt, node_id, addr, updates):
self.called = True
ctxt = context.get_admin_context()
self.stubs.Set(db, 'pci_device_destroy', _fake_destroy)
self.stubs.Set(db, 'pci_device_update', _fake_update)
self._create_fake_pci_device()
self.pci_device.status = 'deleted'
self.called = False
self.pci_device.save(ctxt)
self.assertEqual(self.called, False)
class TestPciDeviceObject(test_objects._LocalTest,
_TestPciDeviceObject):
pass
class TestPciDeviceObjectRemote(test_objects._RemoteTest,
_TestPciDeviceObject):
pass
fake_pci_devs = [fake_db_dev, fake_db_dev_1]
class _TestPciDeviceListObject(object):
def test_get_by_compute_node(self):
ctxt = context.get_admin_context()
self.mox.StubOutWithMock(db, 'pci_device_get_all_by_node')
db.pci_device_get_all_by_node(ctxt, 1).AndReturn(fake_pci_devs)
self.mox.ReplayAll()
devs = pci_device.PciDeviceList.get_by_compute_node(ctxt, 1)
for i in range(len(fake_pci_devs)):
self.assertTrue(isinstance(devs[i], pci_device.PciDevice))
self.assertEqual(fake_pci_devs[i]['vendor_id'], devs[i].vendor_id)
self.assertRemotes()
def test_get_by_instance_uuid(self):
ctxt = context.get_admin_context()
fake_db_1 = dict(fake_db_dev, address='a1',
status='allocated', instance_uuid='1')
fake_db_2 = dict(fake_db_dev, address='a2',
status='allocated', instance_uuid='1')
self.mox.StubOutWithMock(db, 'pci_device_get_all_by_instance_uuid')
db.pci_device_get_all_by_instance_uuid(ctxt, '1').AndReturn(
[fake_db_1, fake_db_2])
self.mox.ReplayAll()
devs = pci_device.PciDeviceList.get_by_instance_uuid(ctxt, '1')
self.assertEqual(len(devs), 2)
for i in range(len(fake_pci_devs)):
self.assertTrue(isinstance(devs[i], pci_device.PciDevice))
self.assertEqual(devs[0].vendor_id, 'v')
self.assertEqual(devs[1].vendor_id, 'v')
self.assertRemotes()
class TestPciDeviceListObject(test_objects._LocalTest,
_TestPciDeviceListObject):
pass
class TestPciDeviceListObjectRemote(test_objects._RemoteTest,
_TestPciDeviceListObject):
pass
| TieWei/nova | nova/tests/objects/test_pci_device.py | Python | apache-2.0 | 12,888 | 0.000155 |
import os
from pathlib import Path
import shutil
import joblib
import hvc
from config import rewrite_config
HERE = Path(__file__).parent
DATA_FOR_TESTS = HERE / ".." / "data_for_tests"
TEST_CONFIGS = DATA_FOR_TESTS.joinpath("config.yml").resolve()
FEATURE_FILES_DST = DATA_FOR_TESTS.joinpath("feature_files").resolve()
MODEL_FILES_DST = DATA_FOR_TESTS.joinpath("model_files").resolve()
config_feature_file_pairs = {
"knn": ("test_select_knn_ftr_grp.config.yml", "knn.features"),
"svm": ("test_select_svm.config.yml", "svm.features"),
"flatwindow": ("test_select_flatwindow.config.yml", "flatwindow.features"),
}
def main():
for model_name, (
select_config,
feature_filename,
) in config_feature_file_pairs.items():
print("running {} to create model files".format(select_config))
# have to put tmp_output_dir into yaml file
select_config = TEST_CONFIGS / select_config
feature_file = sorted(FEATURE_FILES_DST.glob(feature_filename))
if len(feature_file) != 1:
raise ValueError(
"found more than one feature file with search {}:\n{}".format(
feature_filename, feature_file
)
)
else:
# call `resolve` to get full path to model file, so pytest fixtures find it from inside tmp directories
feature_file = feature_file[0].resolve()
replace_dict = {
"feature_file": ("replace with feature_file", str(feature_file)),
"output_dir": ("replace with tmp_output_dir", str(MODEL_FILES_DST)),
}
select_config_rewritten = rewrite_config(
select_config, str(MODEL_FILES_DST), replace_dict
)
select_output_before = [
select_output_dir
for select_output_dir in sorted(MODEL_FILES_DST.glob("*select*output*"))
if select_output_dir.is_dir()
]
hvc.select(select_config_rewritten)
select_output_after = [
select_output_dir
for select_output_dir in sorted(MODEL_FILES_DST.glob("*select*output*"))
if select_output_dir.is_dir()
]
select_output_dir = [
after for after in select_output_after if after not in select_output_before
]
if len(select_output_dir) != 1:
raise ValueError(
"incorrect number of outputs when looking for select "
"ouput dirs:\n{}".format(select_output_dir)
)
else:
select_output_dir = select_output_dir[0]
# arbitrarily grab the last .model and associated .meta file
model_file = sorted(select_output_dir.glob("*/*.model"))[-1]
# call `resolve` to get full path to model file, so pytest fixtures find it from inside tmp directories
model_file_dst = MODEL_FILES_DST.joinpath(model_name + ".model").resolve()
shutil.move(src=model_file, dst=model_file_dst)
meta_file = sorted(select_output_dir.glob("*/*.meta"))[-1]
meta_file_dst = MODEL_FILES_DST.joinpath(model_name + ".meta")
shutil.move(src=str(meta_file), dst=str(meta_file_dst))
# need to change 'model_filename' in .meta file
meta_file = joblib.load(meta_file_dst)
meta_file["model_filename"] = os.path.abspath(model_file_dst)
joblib.dump(meta_file, meta_file_dst)
# clean up -- delete all the other model files, directory, and config
shutil.rmtree(select_output_dir)
os.remove(select_config_rewritten)
if __name__ == "__main__":
main()
| NickleDave/hybrid-vocal-classifier | tests/scripts/remake_model_files.py | Python | bsd-3-clause | 3,608 | 0.00194 |
# Copyright 2015 FUJITSU LIMITED
# (C) Copyright 2016 Hewlett Packard Enterprise Development LP
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except
# in compliance with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed under the License
# is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
# or implied. See the License for the specific language governing permissions and limitations under
# the License.
class BaseRepo(object):
def __init__(self, config):
self._find_alarm_action_sql = \
"""SELECT id, type, name, address, period
FROM alarm_action as aa
JOIN notification_method as nm ON aa.action_id = nm.id
WHERE aa.alarm_definition_id = %s and aa.alarm_state = %s"""
self._find_alarm_state_sql = \
"""SELECT state
FROM alarm
WHERE alarm.id = %s"""
self._insert_notification_types_sql = \
"""INSERT INTO notification_method_type (name) VALUES ( %s)"""
self._find_all_notification_types_sql = """SELECT name from notification_method_type """
self._get_notification_sql = """SELECT name, type, address, period
FROM notification_method
WHERE id = %s"""
| openstack/monasca-notification | monasca_notification/common/repositories/base/base_repo.py | Python | apache-2.0 | 1,519 | 0.003292 |
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""mixup: Beyond Empirical Risk Minimization.
Adaption to SSL of MixUp: https://arxiv.org/abs/1710.09412
"""
import functools
import os
import tensorflow as tf
from absl import app
from absl import flags
from libml import data, utils, models
from libml.utils import EasyDict
FLAGS = flags.FLAGS
class Mixup(models.MultiModel):
def augment(self, x, l, beta, **kwargs):
del kwargs
mix = tf.distributions.Beta(beta, beta).sample([tf.shape(x)[0], 1, 1, 1])
mix = tf.maximum(mix, 1 - mix)
xmix = x * mix + x[::-1] * (1 - mix)
lmix = l * mix[:, :, 0, 0] + l[::-1] * (1 - mix[:, :, 0, 0])
return xmix, lmix
def model(self, batch, lr, wd, ema, **kwargs):
hwc = [self.dataset.height, self.dataset.width, self.dataset.colors]
xt_in = tf.placeholder(tf.float32, [batch] + hwc, 'xt') # For training
x_in = tf.placeholder(tf.float32, [None] + hwc, 'x')
y_in = tf.placeholder(tf.float32, [batch] + hwc, 'y')
l_in = tf.placeholder(tf.int32, [batch], 'labels')
wd *= lr
classifier = lambda x, **kw: self.classifier(x, **kw, **kwargs).logits
def get_logits(x):
logits = classifier(x, training=True)
return logits
x, labels_x = self.augment(xt_in, tf.one_hot(l_in, self.nclass), **kwargs)
logits_x = get_logits(x)
post_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
y, labels_y = self.augment(y_in, tf.nn.softmax(get_logits(y_in)), **kwargs)
labels_y = tf.stop_gradient(labels_y)
logits_y = get_logits(y)
loss_xe = tf.nn.softmax_cross_entropy_with_logits_v2(labels=labels_x, logits=logits_x)
loss_xe = tf.reduce_mean(loss_xe)
loss_xeu = tf.nn.softmax_cross_entropy_with_logits_v2(labels=labels_y, logits=logits_y)
loss_xeu = tf.reduce_mean(loss_xeu)
tf.summary.scalar('losses/xe', loss_xe)
tf.summary.scalar('losses/xeu', loss_xeu)
ema = tf.train.ExponentialMovingAverage(decay=ema)
ema_op = ema.apply(utils.model_vars())
ema_getter = functools.partial(utils.getter_ema, ema)
post_ops.append(ema_op)
post_ops.extend([tf.assign(v, v * (1 - wd)) for v in utils.model_vars('classify') if 'kernel' in v.name])
train_op = tf.train.AdamOptimizer(lr).minimize(loss_xe + loss_xeu, colocate_gradients_with_ops=True)
with tf.control_dependencies([train_op]):
train_op = tf.group(*post_ops)
return EasyDict(
xt=xt_in, x=x_in, y=y_in, label=l_in, train_op=train_op,
classify_raw=tf.nn.softmax(classifier(x_in, training=False)), # No EMA, for debugging.
classify_op=tf.nn.softmax(classifier(x_in, getter=ema_getter, training=False)))
def main(argv):
utils.setup_main()
del argv # Unused.
dataset = data.DATASETS()[FLAGS.dataset]()
log_width = utils.ilog2(dataset.width)
model = Mixup(
os.path.join(FLAGS.train_dir, dataset.name),
dataset,
lr=FLAGS.lr,
wd=FLAGS.wd,
arch=FLAGS.arch,
batch=FLAGS.batch,
nclass=dataset.nclass,
ema=FLAGS.ema,
beta=FLAGS.beta,
scales=FLAGS.scales or (log_width - 2),
filters=FLAGS.filters,
repeat=FLAGS.repeat)
model.train(FLAGS.train_kimg << 10, FLAGS.report_kimg << 10)
if __name__ == '__main__':
utils.setup_tf()
flags.DEFINE_float('wd', 0.02, 'Weight decay.')
flags.DEFINE_float('ema', 0.999, 'Exponential moving average of params.')
flags.DEFINE_float('beta', 0.5, 'Mixup beta distribution.')
flags.DEFINE_integer('scales', 0, 'Number of 2x2 downscalings in the classifier.')
flags.DEFINE_integer('filters', 32, 'Filter size of convolutions.')
flags.DEFINE_integer('repeat', 4, 'Number of residual layers per stage.')
FLAGS.set_default('dataset', 'cifar10.3@250-5000')
FLAGS.set_default('batch', 64)
FLAGS.set_default('lr', 0.002)
FLAGS.set_default('train_kimg', 1 << 16)
app.run(main)
| google-research/remixmatch | mixup.py | Python | apache-2.0 | 4,608 | 0.002604 |
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Python functions which run only within a Jupyter notebook."""
# internal imports
import IPython
from magenta.music import midi_synth
_DEFAULT_SAMPLE_RATE = 44100
def play_sequence(sequence,
synth=midi_synth.synthesize,
sample_rate=_DEFAULT_SAMPLE_RATE,
**synth_args):
"""Creates an interactive player for a synthesized note sequence.
This function should only be called from a Jupyter notebook.
Args:
sequence: A music_pb2.NoteSequence to synthesize and play.
synth: A synthesis function that takes a sequence and sample rate as input.
sample_rate: The sample rate at which to synthesize.
**synth_args: Additional keyword arguments to pass to the synth function.
"""
array_of_floats = synth(sequence, sample_rate=sample_rate, **synth_args)
IPython.display.display(IPython.display.Audio(array_of_floats,
rate=sample_rate))
| YoshikawaMasashi/magenta | magenta/music/notebook_utils.py | Python | apache-2.0 | 1,557 | 0.001927 |
from collections import defaultdict
import mock
from searx.engines import bing_news
from searx.testing import SearxTestCase
import lxml
class TestBingNewsEngine(SearxTestCase):
def test_request(self):
bing_news.supported_languages = ['en', 'fr']
query = 'test_query'
dicto = defaultdict(dict)
dicto['pageno'] = 1
dicto['language'] = 'fr-FR'
dicto['time_range'] = ''
params = bing_news.request(query, dicto)
self.assertIn('url', params)
self.assertIn(query, params['url'])
self.assertIn('bing.com', params['url'])
self.assertIn('fr', params['url'])
dicto['language'] = 'all'
params = bing_news.request(query, dicto)
self.assertIn('en', params['url'])
def test_no_url_in_request_year_time_range(self):
dicto = defaultdict(dict)
query = 'test_query'
dicto['time_range'] = 'year'
params = bing_news.request(query, dicto)
self.assertEqual({}, params['url'])
def test_response(self):
self.assertRaises(AttributeError, bing_news.response, None)
self.assertRaises(AttributeError, bing_news.response, [])
self.assertRaises(AttributeError, bing_news.response, '')
self.assertRaises(AttributeError, bing_news.response, '[]')
response = mock.Mock(content='<html></html>')
self.assertEqual(bing_news.response(response), [])
response = mock.Mock(content='<html></html>')
self.assertEqual(bing_news.response(response), [])
html = """<?xml version="1.0" encoding="utf-8" ?>
<rss version="2.0" xmlns:News="https://www.bing.com:443/news/search?q=python&setmkt=en-US&first=1&format=RSS">
<channel>
<title>python - Bing News</title>
<link>https://www.bing.com:443/news/search?q=python&setmkt=en-US&first=1&format=RSS</link>
<description>Search results</description>

<copyright>Copyright</copyright>
<item>
<title>Title</title>
<link>https://www.bing.com/news/apiclick.aspx?ref=FexRss&aid=&tid=c237eccc50bd4758b106a5e3c94fce09&url=http%3a%2f%2furl.of.article%2f&c=xxxxxxxxx&mkt=en-us</link>
<description>Article Content</description>
<pubDate>Tue, 02 Jun 2015 13:37:00 GMT</pubDate>
<News:Source>Infoworld</News:Source>
<News:Image>http://a1.bing4.com/th?id=ON.13371337133713371337133713371337&pid=News</News:Image>
<News:ImageSize>w={0}&h={1}&c=7</News:ImageSize>
<News:ImageKeepOriginalRatio></News:ImageKeepOriginalRatio>
<News:ImageMaxWidth>620</News:ImageMaxWidth>
<News:ImageMaxHeight>413</News:ImageMaxHeight>
</item>
<item>
<title>Another Title</title>
<link>https://www.bing.com/news/apiclick.aspx?ref=FexRss&aid=&tid=c237eccc50bd4758b106a5e3c94fce09&url=http%3a%2f%2fanother.url.of.article%2f&c=xxxxxxxxx&mkt=en-us</link>
<description>Another Article Content</description>
<pubDate>Tue, 02 Jun 2015 13:37:00 GMT</pubDate>
</item>
</channel>
</rss>""" # noqa
response = mock.Mock(content=html.encode('utf-8'))
results = bing_news.response(response)
self.assertEqual(type(results), list)
self.assertEqual(len(results), 2)
self.assertEqual(results[0]['title'], 'Title')
self.assertEqual(results[0]['url'], 'http://url.of.article/')
self.assertEqual(results[0]['content'], 'Article Content')
self.assertEqual(results[0]['img_src'], 'https://www.bing.com/th?id=ON.13371337133713371337133713371337')
self.assertEqual(results[1]['title'], 'Another Title')
self.assertEqual(results[1]['url'], 'http://another.url.of.article/')
self.assertEqual(results[1]['content'], 'Another Article Content')
self.assertNotIn('img_src', results[1])
html = """<?xml version="1.0" encoding="utf-8" ?>
<rss version="2.0" xmlns:News="https://www.bing.com:443/news/search?q=python&setmkt=en-US&first=1&format=RSS">
<channel>
<title>python - Bing News</title>
<link>https://www.bing.com:443/news/search?q=python&setmkt=en-US&first=1&format=RSS</link>
<description>Search results</description>

<copyright>Copyright</copyright>
<item>
<title>Title</title>
<link>http://another.url.of.article/</link>
<description>Article Content</description>
<pubDate>garbage</pubDate>
<News:Source>Infoworld</News:Source>
<News:Image>http://another.bing.com/image</News:Image>
<News:ImageSize>w={0}&h={1}&c=7</News:ImageSize>
<News:ImageKeepOriginalRatio></News:ImageKeepOriginalRatio>
<News:ImageMaxWidth>620</News:ImageMaxWidth>
<News:ImageMaxHeight>413</News:ImageMaxHeight>
</item>
</channel>
</rss>""" # noqa
response = mock.Mock(content=html.encode('utf-8'))
results = bing_news.response(response)
self.assertEqual(type(results), list)
self.assertEqual(len(results), 1)
self.assertEqual(results[0]['title'], 'Title')
self.assertEqual(results[0]['url'], 'http://another.url.of.article/')
self.assertEqual(results[0]['content'], 'Article Content')
self.assertEqual(results[0]['img_src'], 'http://another.bing.com/image')
html = """<?xml version="1.0" encoding="utf-8" ?>
<rss version="2.0" xmlns:News="https://www.bing.com:443/news/search?q=python&setmkt=en-US&first=1&format=RSS">
<channel>
<title>python - Bing News</title>
<link>https://www.bing.com:443/news/search?q=python&setmkt=en-US&first=1&format=RSS</link>
<description>Search results</description>

</channel>
</rss>""" # noqa
response = mock.Mock(content=html.encode('utf-8'))
results = bing_news.response(response)
self.assertEqual(type(results), list)
self.assertEqual(len(results), 0)
html = """<?xml version="1.0" encoding="utf-8" ?>gabarge"""
response = mock.Mock(content=html.encode('utf-8'))
self.assertRaises(lxml.etree.XMLSyntaxError, bing_news.response, response)
| jcherqui/searx | tests/unit/engines/test_bing_news.py | Python | agpl-3.0 | 7,039 | 0.000426 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
with open('README.rst') as readme_file:
readme = readme_file.read()
with open('HISTORY.rst') as history_file:
history = history_file.read()
requirements = [
"jsonschema"
# TODO: put package requirements here
]
test_requirements = [
"jsonschema"
# TODO: put package test requirements here
]
setup(
name='pycorm',
version='0.2.13',
description="a pico orm that uses jsonschema",
long_description=readme + '\n\n' + history,
author="Johannes Valbjørn",
author_email='[email protected]',
url='https://github.com/sloev/pycorm',
packages=[
'pycorm',
],
package_dir={'pycorm':
'pycorm'},
include_package_data=True,
install_requires=requirements,
license="MIT",
zip_safe=False,
keywords='pycorm',
classifiers=[
'Development Status :: 2 - Pre-Alpha',
'Intended Audience :: Developers',
'License :: OSI Approved :: ISC License (ISCL)',
'Natural Language :: English',
"Programming Language :: Python :: 2",
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7'
],
test_suite='tests',
tests_require=test_requirements
)
| sloev/pycorm | setup.py | Python | mit | 1,378 | 0.000726 |
from sklearn2sql_heroku.tests.classification import generic as class_gen
class_gen.test_model("DummyClassifier" , "BreastCancer" , "mysql")
| antoinecarme/sklearn2sql_heroku | tests/classification/BreastCancer/ws_BreastCancer_DummyClassifier_mysql_code_gen.py | Python | bsd-3-clause | 142 | 0.014085 |
import io
import logging
from collections import defaultdict
from numpy import linspace
from scipy import interp
from sklearn.metrics import (auc, average_precision_score,
precision_recall_curve)
from tabulate import tabulate
from .test_statistic import ClassifierStatistic, TestStatistic
logger = logging.getLogger(__name__)
class precision_recall(ClassifierStatistic):
"""
Constructs a precision/recall statistics generator.
See https://en.wikipedia.org/wiki/Precision_and_recall
When applied to a test set, the `score()` method will return a dictionary
with four fields:
* auc: the area under the precision-recall curve
* precisions: a list of precisions
* recalls: a list of recalls
* thresholds: a list of probability thresholds
"""
@classmethod
def _single_class_stat(cls, scores, labels, comparison_label):
y_proba = [s['probability'][comparison_label] for s in scores]
y_true = [l == comparison_label for l in labels]
precisions, recalls, thresholds = \
precision_recall_curve(y_true, y_proba)
return {
'auc': average_precision_score(y_true, y_proba),
'precisions': list(precisions),
'recalls': list(recalls)
}
def merge(self, stats):
individual_auc = defaultdict(list)
label_sum_recalls = defaultdict(float)
for stat in stats:
for label, label_stat in stat.items():
individual_auc[label].append(label_stat['auc'])
precisions, recalls = \
label_stat['precisions'], label_stat['recalls']
label_sum_recalls[label] += \
interp(linspace(0, 1, 100), precisions, recalls)
merged_stat = {}
for label, sum_recalls in label_sum_recalls.items():
mean_recalls = sum_recalls / len(stats)
interp_auc = auc(linspace(0, 1, 100), mean_recalls)
logger.debug("interp_auc={0}, individual_auc={1}"
.format(interp_auc, individual_auc[label]))
merged_stat[label] = {
'auc': interp_auc,
'precisions': list(linspace(0, 1, 100)),
'recalls': list(mean_recalls)
}
return merged_stat
@classmethod
def format(cls, stat, format="str"):
if format == "str":
return cls.format_str(stat)
elif format == "json":
return {label: {'auc': round(ss['auc'], 3)}
for label, ss in stat.items()}
else:
raise TypeError("Format '{0}' not available for {1}."
.format(format, cls.__name__))
@classmethod
def format_str(cls, stats):
formatted = io.StringIO()
if 'auc' in stats and 'thresholds' in stats:
# Single class
formatted.write("PR-AUC: {0}".format(round(stats['auc'], 3)))
else:
# multiple classes
formatted.write("PR-AUC:\n")
table_data = [(repr(label), round(stats[label]['auc'], 3))
for label in sorted(stats.keys())]
formatted.write("".join(["\t" + line + "\n" for line in
tabulate(table_data).split("\n")]))
return formatted.getvalue()
TestStatistic.register("precision_recall", precision_recall)
TestStatistic.register("pr", precision_recall) # Backwards compatible
| yafeunteun/wikipedia-spam-classifier | revscoring/revscoring/scorer_models/test_statistics/precision_recall.py | Python | mit | 3,506 | 0.001141 |
from datetime import datetime
from casexml.apps.case.xml.generator import date_to_xml_string
DUMMY_ID = "foo"
DUMMY_USERNAME = "mclovin"
DUMMY_PASSWORD = "changeme"
DUMMY_PROJECT = "domain"
def dummy_user_xml(user=None):
username = user.username if user else DUMMY_USERNAME
password = user.password if user else DUMMY_PASSWORD
user_id = user.user_id if user else DUMMY_ID
date_joined = user.date_joined if user else datetime.utcnow()
project = user.domain if user else DUMMY_PROJECT
return """
<Registration xmlns="http://openrosa.org/user/registration">
<username>{}</username>
<password>{}</password>
<uuid>{}</uuid>
<date>{}</date>
<user_data>
<data key="commcare_first_name"/>
<data key="commcare_last_name"/>
<data key="commcare_phone_number"/>
<data key="commcare_project">{}</data>
<data key="something">arbitrary</data>
</user_data>
</Registration>""".format(
username,
password,
user_id,
date_to_xml_string(date_joined),
project
)
DUMMY_RESTORE_XML_TEMPLATE = ("""
<OpenRosaResponse xmlns="http://openrosa.org/http/response"%(items_xml)s>
<message nature="ota_restore_success">%(message)s</message>
<Sync xmlns="http://commcarehq.org/sync">
<restore_id>%(restore_id)s</restore_id>
</Sync>
%(user_xml)s
%(case_xml)s
</OpenRosaResponse>
""")
def dummy_restore_xml(restore_id, case_xml="", items=None, user=None):
return DUMMY_RESTORE_XML_TEMPLATE % {
"restore_id": restore_id,
"items_xml": '' if items is None else (' items="%s"' % items),
"user_xml": dummy_user_xml(user),
"case_xml": case_xml,
"message": "Successfully restored account mclovin!"
}
| dimagi/commcare-hq | corehq/ex-submodules/casexml/apps/phone/tests/dummy.py | Python | bsd-3-clause | 1,824 | 0.000548 |
#!/usr/bin/env python
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from google.api_core.client_options import ClientOptions
import os
import logging
import googleapiclient.discovery
logging.basicConfig()
# In this sample, we will reply on 6 features only:
# trip_miles trip_seconds fare
# trip_start_month trip_start_hour trip_start_day
instances = [
[1.1, 420, 625, 8, 16, 3],
[0.3, 960, 1485, 3, 22, 2],
[1.0, 300, 505, 1, 1, 1],
]
PROJECT_ID = os.getenv('PROJECT_ID')
MODEL_NAME = os.getenv('MODEL_NAME')
MODEL_VERSION = os.getenv('MODEL_VERSION')
REGION = os.getenv('REGION')
logging.info('PROJECT_ID: %s', PROJECT_ID)
logging.info('MODEL_NAME: %s', MODEL_NAME)
logging.info('MODEL_VERSION: %s', MODEL_VERSION)
logging.info('REGION: %s', REGION)
prefix = "{}-ml".format(REGION) if REGION else "ml"
api_endpoint = "https://{}.googleapis.com".format(prefix)
client_options = ClientOptions(api_endpoint=api_endpoint)
# Use Regional support
service = googleapiclient.discovery.build('ml', 'v1',
cache_discovery=False,
client_options=client_options)
name = 'projects/{}/models/{}/versions/{}'.format(PROJECT_ID, MODEL_NAME,
MODEL_VERSION)
response = service.projects().predict(
name=name,
body={'instances': instances}
).execute()
if 'error' in response:
logging.error(response['error'])
else:
print(response['predictions'])
| GoogleCloudPlatform/ai-platform-samples | prediction/xgboost/structured/base/prediction/predict.py | Python | apache-2.0 | 2,134 | 0 |
# -*- coding: utf-8 -*-
"""File containing a Windows Registry plugin to parse the AMCache.hve file."""
from __future__ import unicode_literals
import pyregf
from dfdatetime import filetime
from dfdatetime import posix_time
from dfwinreg import definitions as dfwinreg_definitions
from plaso.containers import events
from plaso.containers import time_events
from plaso.lib import definitions
from plaso.parsers import interface
from plaso.parsers import manager
class AMCacheFileEventData(events.EventData):
"""AMCache file event data.
Attributes:
company_name (str): company name that created product file belongs to.
file_description (str): description of file.
file_reference (str): file system file reference, for example 9-1 (MFT
entry - sequence number).
file_size (int): size of file in bytes.
file_version (str): version of file.
full_path (str): full path of file.
language_code (int): language code of file.
product_name (str): product name file belongs to.
program_identifier (str): GUID of entry under Root/Program key file belongs
to.
sha1 (str): SHA-1 of file.
"""
DATA_TYPE = 'windows:registry:amcache'
def __init__(self):
"""Initializes event data."""
super(AMCacheFileEventData, self).__init__(data_type=self.DATA_TYPE)
self.company_name = None
self.file_description = None
self.file_reference = None
self.file_size = None
self.file_version = None
self.full_path = None
self.language_code = None
self.product_name = None
self.program_identifier = None
self.sha1 = None
class AMCacheProgramEventData(events.EventData):
"""AMCache programs event data.
Attributes:
entry_type (str): type of entry (usually AddRemoveProgram).
file_paths (str): file paths of installed program.
files (str): list of files belonging to program.
language_code (int): language_code of program.
msi_package_code (str): MSI package code of program.
msi_product_code (str): MSI product code of program.
name (str): name of installed program.
package_code (str): package code of program.
product_code (str): product code of program.
publisher (str): publisher of program.
uninstall_key (str): unicode string of uninstall registry key for program.
version (str): version of program.
"""
DATA_TYPE = 'windows:registry:amcache:programs'
def __init__(self):
"""Initializes event data."""
super(AMCacheProgramEventData, self).__init__(data_type=self.DATA_TYPE)
self.entry_type = None
self.file_paths = None
self.files = None
self.language_code = None
self.msi_package_code = None
self.msi_product_code = None
self.name = None
self.package_code = None
self.product_code = None
self.publisher = None
self.uninstall_key = None
self.version = None
class AMCacheParser(interface.FileObjectParser):
"""AMCache Registry plugin for recently run programs."""
NAME = 'amcache'
DATA_FORMAT = 'AMCache Windows NT Registry (AMCache.hve) file'
# Contains: {value name: attribute name}
_FILE_REFERENCE_KEY_VALUES = {
'0': 'product_name',
'1': 'company_name',
'3': 'language_code',
'5': 'file_version',
'6': 'file_size',
'c': 'file_description',
'15': 'full_path',
'100': 'program_identifier',
'101': 'sha1'}
_AMCACHE_COMPILATION_TIME = 'f'
_AMCACHE_FILE_MODIFICATION_TIME = '11'
_AMCACHE_FILE_CREATION_TIME = '12'
_AMCACHE_ENTRY_WRITE_TIME = '17'
_AMCACHE_P_INSTALLATION_TIME = 'a'
_AMCACHE_P_FILES = 'Files'
_PRODUCT_KEY_VALUES = {
'0': 'name',
'1': 'version',
'2': 'publisher',
'3': 'language_code',
'6': 'entry_type',
'7': 'uninstall_key',
'd': 'file_paths',
'f': 'product_code',
'10': 'package_code',
'11': 'msi_product_code',
'12': 'msi_package_code',
}
#TODO Add GetFormatSpecification when issues are fixed with adding
# multiple parsers for the same file format (in this case regf files)
# AddNewSignature ->
# b'\x41\x00\x6d\x00\x63\x00\x61\x00\x63\x00\x68\x00\x65', offset=88
def _GetValueDataAsObject(self, parser_mediator, value):
"""Retrieves the value data as an object.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
value (pyregf_value): value.
Returns:
object: data as a Python type or None if the value cannot be read.
"""
try:
if value.type in (
dfwinreg_definitions.REG_SZ,
dfwinreg_definitions.REG_EXPAND_SZ,
dfwinreg_definitions.REG_LINK):
value_data = value.get_data_as_string()
elif value.type in (
dfwinreg_definitions.REG_DWORD,
dfwinreg_definitions.REG_DWORD_BIG_ENDIAN,
dfwinreg_definitions.REG_QWORD):
value_data = value.get_data_as_integer()
elif value.type == dfwinreg_definitions.REG_MULTI_SZ:
value_data = list(value.get_data_as_multi_string())
else:
value_data = value.data
except (IOError, OverflowError) as exception:
parser_mediator.ProduceExtractionWarning(
'Unable to read data from value: {0:s} with error: {1!s}'.format(
value.name, exception))
return None
return value_data
def _ParseFileKey(self, parser_mediator, file_key):
"""Parses a Root\\File key.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
file_key (pyregf.key): the File key.
"""
for volume_key in file_key.sub_keys:
for file_reference_key in volume_key.sub_keys:
self._ParseFileReferenceKey(parser_mediator, file_reference_key)
def _ParseFileReferenceKey(self, parser_mediator, file_reference_key):
"""Parses a file reference key (sub key of Root\\File\\%VOLUME%) for events.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
file_reference_key (pyregf.key): file reference key.
"""
event_data = AMCacheFileEventData()
try:
if '0000' in file_reference_key.name:
# A NTFS file is a combination of MFT entry and sequence number.
sequence_number, mft_entry = file_reference_key.name.split('0000')
mft_entry = int(mft_entry, 16)
sequence_number = int(sequence_number, 16)
event_data.file_reference = '{0:d}-{1:d}'.format(
mft_entry, sequence_number)
else:
# A FAT file is a single number.
file_reference = int(file_reference_key.name, 16)
event_data.file_reference = '{0:d}'.format(file_reference)
except (ValueError, TypeError):
pass
for value_name, attribute_name in self._FILE_REFERENCE_KEY_VALUES.items():
value = file_reference_key.get_value_by_name(value_name)
if not value:
continue
value_data = self._GetValueDataAsObject(parser_mediator, value)
if attribute_name == 'sha1' and value_data.startswith('0000'):
# Strip off the 4 leading zero's from the sha1 hash.
value_data = value_data[4:]
setattr(event_data, attribute_name, value_data)
amcache_time_value = file_reference_key.get_value_by_name(
self._AMCACHE_ENTRY_WRITE_TIME)
if amcache_time_value:
amcache_time = filetime.Filetime(amcache_time_value.get_data_as_integer())
event = time_events.DateTimeValuesEvent(
amcache_time, definitions.TIME_DESCRIPTION_MODIFICATION)
parser_mediator.ProduceEventWithEventData(event, event_data)
creation_time_value = file_reference_key.get_value_by_name(
self._AMCACHE_FILE_CREATION_TIME)
if creation_time_value:
creation_time = filetime.Filetime(
creation_time_value.get_data_as_integer())
event = time_events.DateTimeValuesEvent(
creation_time, definitions.TIME_DESCRIPTION_CREATION)
parser_mediator.ProduceEventWithEventData(event, event_data)
modification_time_value = file_reference_key.get_value_by_name(
self._AMCACHE_FILE_MODIFICATION_TIME)
if modification_time_value:
modification_time = filetime.Filetime(
modification_time_value.get_data_as_integer())
event = time_events.DateTimeValuesEvent(
modification_time, definitions.TIME_DESCRIPTION_MODIFICATION)
parser_mediator.ProduceEventWithEventData(event, event_data)
compilation_time_value = file_reference_key.get_value_by_name(
self._AMCACHE_COMPILATION_TIME)
if compilation_time_value:
link_time = posix_time.PosixTime(
compilation_time_value.get_data_as_integer())
event = time_events.DateTimeValuesEvent(
link_time, definitions.TIME_DESCRIPTION_CHANGE)
parser_mediator.ProduceEventWithEventData(event, event_data)
def _ParseProgramKey(self, parser_mediator, program_key):
"""Parses a program key (a sub key of Root\\Programs) for events.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
program_key (pyregf_key): program key.
"""
event_data = AMCacheProgramEventData()
for value_name, attribute_name in self._PRODUCT_KEY_VALUES.items():
value = program_key.get_value_by_name(value_name)
if not value:
continue
value_data = self._GetValueDataAsObject(parser_mediator, value)
setattr(event_data, attribute_name, value_data)
installation_time_value = program_key.get_value_by_name(
self._AMCACHE_P_INSTALLATION_TIME)
if installation_time_value:
installation_time = posix_time.PosixTime(
installation_time_value.get_data_as_integer())
event = time_events.DateTimeValuesEvent(
installation_time, definitions.TIME_DESCRIPTION_INSTALLATION)
parser_mediator.ProduceEventWithEventData(event, event_data)
def _ParseProgramsKey(self, parser_mediator, programs_key):
"""Parses a Root\\Programs key.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
programs_key (pyregf.key): the Programs key.
"""
for program_key in programs_key.sub_keys:
self._ParseProgramKey(parser_mediator, program_key)
def ParseFileObject(self, parser_mediator, file_object):
"""Parses an AMCache.hve file-like object for events.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
file_object (dfvfs.FileIO): file-like object.
"""
regf_file = pyregf.file()
try:
regf_file.open_file_object(file_object)
except IOError:
# The error is currently ignored -> see TODO above related to the
# fixing of handling multiple parsers for the same file format.
return
root_key = regf_file.get_key_by_path('Root')
if root_key:
file_key = root_key.get_sub_key_by_path('File')
if file_key:
self._ParseFileKey(parser_mediator, file_key)
programs_key = root_key.get_sub_key_by_path('Programs')
if programs_key:
self._ParseProgramsKey(parser_mediator, programs_key)
regf_file.close()
manager.ParsersManager.RegisterParser(AMCacheParser)
| rgayon/plaso | plaso/parsers/amcache.py | Python | apache-2.0 | 11,498 | 0.006784 |
from tornado.options import options, logging
from itertools import product
import json
import tornado.web
import pymongo
import csv
class MongoDbLookupHandler(tornado.web.RequestHandler):
def get(self, identity):
logging.info("uri=%s [%s] [%s]" % (self.request.uri, identity, self.request.arguments))
ids = identity.split("/")
db_name = ids[1]
collection = self.open_collection(db_name, ids[2])
# TODO : Improve this logic to correctly parse arguments and convert to a proper mongo DB query
args = self.request.arguments
query = {}
case_sensitive_lookups = frozenset(options.case_sensitive_lookups)
normalize_fn = None
if db_name in case_sensitive_lookups:
normalize_fn = lambda x: x
else:
normalize_fn = lambda x: x.lower()
for key in args.keys():
if key != "output":
iargs = args[key]
if len(iargs) == 1:
query[key] = normalize_fn(args[key][0])
else:
query[key] = {"$in": map(normalize_fn, args[key])}
query_limit = options.mongo_lookup_query_limit
json_items = []
for idx, item in enumerate(collection.find(query)):
if idx > query_limit:
break
json_item = self.jsonable_item(item)
#json_item["uri"] = self.request.uri + "/" + json_item["id"]
json_items.append(json_item)
if self.get_argument("output", "json") == "tsv":
WriteTsv(self, json_items)
self.set_status(200)
return
self.write({"items": json_items})
self.set_status(200)
return
def jsonable_item(self, item):
json_item = {}
for k in item.iterkeys():
if k == "_id":
json_item["id"] = str(item["_id"])
elif "[]" in k:
json_item[k.replace("[]", "")] = item[k]
else:
json_item[k] = item[k]
return json_item
def open_collection(self, db_name, collection_name):
#if options.verbose:
logging.info("open_collection(%s)" % collection_name)
connection = pymongo.Connection(options.mongo_lookup_uri)
database = connection[db_name]
return database[collection_name]
class MongoDbPairwiseLookupHandler(tornado.web.RequestHandler):
def get(self, identity):
logging.info("uri=%s [%s] [%s]" % (self.request.uri, identity, self.request.arguments))
args = self.request.arguments
ids = identity.split("/")
feature_matrix_name = ids[1]
gene_label_1 = args['gene1'][0]
gene_label_2 = args['gene2'][0]
cancer_label = args['cancer'][0].lower()
# Get feature IDs
fmx_collection = self.open_feature_matrix_collection("qed_lookups", "fmx_" + feature_matrix_name)
pairwise_collection = self.open_pairwise_collection("qed_lookups", "pw_" + feature_matrix_name + "_" + cancer_label)
features_1 = filter(self.feature_filter_fn, fmx_collection.find({"cancer": cancer_label, "gene": gene_label_1}))
features_2 = filter(self.feature_filter_fn, fmx_collection.find({"cancer": cancer_label, "gene": gene_label_2}))
feature_ids_1 = map(lambda f: f['id'], features_1)
feature_ids_2 = map(lambda f: f['id'], features_2)
# Get pairwise values
pairwise_results = []
for id1, id2 in product(feature_ids_1, feature_ids_2):
pw = self.get_pairwise_result(pairwise_collection, id1, id2)
if pw is not None:
pairwise_results.append(pw)
result = {
"features": {
gene_label_1: map(self.jsonable_item, features_1),
gene_label_2: map(self.jsonable_item, features_2)
},
"pairwise_results": map(self.jsonable_item, pairwise_results)
}
log_msg = "Features found: "
log_msg += gene_label_1 + ": " + str(len(feature_ids_1))
log_msg += "\t" + gene_label_2 + ": " + str(len(feature_ids_2))
log_msg += "\tPairwise results: " + str(len(pairwise_results))
logging.info(log_msg)
self.write(json.dumps(result))
self.set_status(200)
def feature_filter_fn(self, feature):
fields = feature['id'].split(':')
source = fields[1]
if source == 'METH' or source == 'CNVR' or source == 'GEXP':
return True
elif source == 'GNAB' and fields[-1] == 'y_n_somatic':
return True
else:
return False
def jsonable_item(self, item):
json_item = {}
for k in item.iterkeys():
if k == "_id":
json_item["id"] = str(item["_id"])
elif "[]" in k:
json_item[k.replace("[]", "")] = item[k]
else:
json_item[k] = item[k]
return json_item
def get_pairwise_result(self, collection, id1, id2):
res1 = collection.find_one({"target": id1, "predictor": id2})
res2 = collection.find_one({"target": id2, "predictor": id1})
if res1 is not None:
return res1
elif res2 is not None:
return res2
else:
return None
def open_feature_matrix_collection(self, db_name, collection_name):
logging.info("open_collection(%s)" % collection_name)
return self.open_collection(options.mongo_lookup_uri, db_name, collection_name)
def open_pairwise_collection(self, db_name, collection_name):
logging.info("open_collection(%s)" % collection_name)
return self.open_collection(options.mongo_pairwise_lookup_uri, db_name, collection_name)
def open_collection(self, mongo_uri, db_name, collection_name):
logging.info("open_collection(%s)" % collection_name)
connection = pymongo.Connection(mongo_uri)
database = connection[db_name]
return database[collection_name]
class MongoDbMutSigHandler(tornado.web.RequestHandler):
def get(self, identity):
logging.info("uri=%s [%s] [%s]" % (self.request.uri, identity, self.request.arguments))
args = self.request.arguments
query = {}
for key in args.keys():
if key != "cancer":
continue
iargs = args[key]
if len(iargs) == 1:
query[key] = args[key][0].lower()
else:
query[key] = {"$in": map(lambda x: x.lower(), args[key])}
if "max_rank" not in args:
query["rank"] = {"$lt": 21}
else:
query["rank"] = {"$lt": int(args["max_rank"][0]) + 1}
collection = self.open_collection("qed_lookups", "mutsig_rankings")
items = []
if "cancer" in query:
items = collection.find(query)
json_items = map(self.jsonable_item, items)
if self.get_argument("output", "json") == "tsv":
WriteTsv(self, json_items)
self.set_status(200)
return
self.write(json.dumps({ "items": json_items }))
self.set_status(200)
def jsonable_item(self, item):
json_item = {}
for k in item.iterkeys():
if k == "_id":
json_item["id"] = str(item["_id"])
elif "[]" in k:
json_item[k.replace("[]", "")] = item[k]
else:
json_item[k] = item[k]
return json_item
def open_collection(self, db_name, collection_name):
logging.info("open_collection(%s)" % collection_name)
connection = pymongo.Connection(options.mongo_lookup_uri)
database = connection[db_name]
return database[collection_name]
class MongoDbFeaturesByLocationHandler(tornado.web.RequestHandler):
def get(self, identity):
logging.info("uri=%s [%s] [%s]" % (self.request.uri, identity, self.request.arguments))
args = self.request.arguments
ids = identity.split("/")
query = {
"chr": str(args["chr"][0]),
"start": {"$gt": int(args["start"][0])},
"end": {"$lt": int(args["end"][0])},
"cancer": {"$in": map(lambda x: x.lower(), args["cancer"])},
"source": {"$in": map(lambda x: x.lower(), args["source"])}
}
logging.info("query=%s" % str(query))
query_limit = options.mongo_lookup_query_limit
collection = self.open_collection(ids[1], ids[2])
items = []
for idx, item in enumerate(collection.find(query, {'values':0})):
if idx > query_limit: break
items.append(item)
self.write(json.dumps({ "items": map(self.jsonable_item, items) }))
self.set_status(200)
def jsonable_item(self, item):
json_item = {}
for k in item.iterkeys():
if k == "_id":
json_item["id"] = str(item["_id"])
elif "[]" in k:
json_item[k.replace("[]", "")] = item[k]
else:
json_item[k] = item[k]
return json_item
def open_collection(self, db_name, collection_name):
logging.info("open_collection(%s)" % collection_name)
connection = pymongo.Connection(options.mongo_lookup_uri)
database = connection[db_name]
return database[collection_name]
def WriteTsv(handler, items):
handler.set_header("Content-Type", "text/tab-separated-values")
handler.set_header("Content-Disposition", "attachment; filename='data_export.tsv'")
tsvwriter = csv.writer(handler, delimiter='\t')
excludedheaders = ["uri","id","p_ns_s"]
if len(items) > 0:
colheaders = [a for a in items[0].keys() if a not in excludedheaders]
tsvwriter.writerow(colheaders)
for item in items:
vals = []
for colheader in colheaders:
val = item[colheader]
if isinstance(val, (list, tuple)):
vals.append(len(val))
else:
vals.append(val)
tsvwriter.writerow(vals)
| cancerregulome/GeneSpot_1.0 | websvcs/endpoints/storage/mongodb_lookups.py | Python | mit | 10,153 | 0.002659 |
from ..core import mi, nmi
from .base import (AlphaAngleBaseMetric, ContactBaseMetric, DihedralBaseMetric,
BaseMetric)
import numpy as np
from itertools import combinations_with_replacement as combinations
from multiprocessing import Pool
from contextlib import closing
__all__ = ['AlphaAngleMutualInformation', 'ContactMutualInformation',
'DihedralMutualInformation']
class MutualInformationBase(BaseMetric):
"""Base mutual information object"""
def _partial_mutinf(self, p):
i, j = p
return self._est(self.n_bins,
self.data[i].values,
self.shuffled_data[j].values,
rng=self.rng,
method=self.method)
def _exec(self):
M = np.zeros((self.labels.size, self.labels.size))
with closing(Pool(processes=self.n_threads)) as pool:
values = pool.map(self._partial_mutinf,
combinations(self.labels, 2))
pool.terminate()
idx = np.triu_indices_from(M)
M[idx] = values
return M + M.T - np.diag(M.diagonal())
def __init__(self, normed=True, **kwargs):
self._est = nmi if normed else mi
self.partial_transform.__func__.__doc__ = """
Partial transform a mdtraj.Trajectory into an n_residue by n_residue
matrix of mutual information scores.
Parameters
----------
traj : mdtraj.Trajectory
Trajectory to transform
shuffle : int
Number of shuffle iterations (default: 0)
verbose : bool
Whether to display performance
Returns
-------
result : np.ndarray, shape = (n_residue, n_residue)
Mutual information matrix
"""
super(MutualInformationBase, self).__init__(**kwargs)
class AlphaAngleMutualInformation(AlphaAngleBaseMetric, MutualInformationBase):
"""Mutual information calculations for alpha angles"""
class ContactMutualInformation(ContactBaseMetric, MutualInformationBase):
"""Mutual information calculations for contacts"""
class DihedralMutualInformation(DihedralBaseMetric, MutualInformationBase):
"""Mutual information calculations for dihedral angles"""
| cxhernandez/mdentropy | mdentropy/metrics/mutinf.py | Python | gpl-3.0 | 2,347 | 0 |
import model
EmployeeColumns = ["name", "role_id", "is_active", "street_address", "city", "state", "zip", "phone"]
class StaffMember(object):
"""
Represents a staff member
"""
def __init__(self, name, roleId, isActive, street=None, city=None, state=None, zipCode=None, phone=None):
"""
Creates a new staff member
"""
self.name = name
self.street = street
self.city = city
self.state = state
self.zipCode = zipCode
self.phone = phone
self.roleId = roleId
self.isActive = isActive
def __repr__(self):
return "<Staff> %s, %i, %s, %s, %s, %s, %s, %s" % (self.name, self.roleId, self.isActive, self.street, self.city, self.state, self.zipCode, self.phone)
def __eq__(self, other):
return self.name == other.name \
and self.street == other.street \
and self.city == other.city \
and self.state == other.state \
and self.zipCode == other.zipCode \
and self.phone == other.phone \
and self.roleId == other.roleId \
and self.isActive == other.isActive
def fields(self):
"""
Returns a dictionary of all the classes fields
"""
return model.getFieldMap(self)
def flush(self, connection, oldName=None):
"""
Updates or creates the appointment in the database
"""
cursor = connection.cursor()
#map the database fields to this objects attributes
sqlMap = {"name":"name", "role_id":"roleId", "is_active":"isActive",
"street_address":"street", "city":"city",
"zip":"zipCode", "phone":"phone", "state":"state"}
#map the data
params = model.createSqlParams(EmployeeColumns, sqlMap, self)
#if a old name was given then do an update statement
if oldName:
query = model.updateString("employee", EmployeeColumns, "name = %(oldName)s")
params["oldName"] = oldName
#else do a create statement
else:
query = model.insertString("employee", EmployeeColumns)
cursor.execute(query, params)
connection.commit()
cursor.close()
| jworr/scheduler | model/staff.py | Python | gpl-2.0 | 1,931 | 0.052822 |
import random
import time
import logging
import sys
from os.path import dirname
sys.path.append(dirname(dirname(dirname(__file__))))
import hazelcast
def do_benchmark():
THREAD_COUNT = 1
ENTRY_COUNT = 10 * 1000
VALUE_SIZE = 10000
GET_PERCENTAGE = 40
PUT_PERCENTAGE = 40
logging.basicConfig(format='%(asctime)s%(msecs)03d [%(name)s] %(levelname)s: %(message)s', datefmt="%H:%M%:%S,")
logging.getLogger().setLevel(logging.INFO)
logger = logging.getLogger("main")
config = hazelcast.ClientConfig()
config.group_config.name = "dev"
config.group_config.password = "dev-pass"
try:
from tests.hzrc.client import HzRemoteController
rc = HzRemoteController('127.0.0.1', '9701')
if not rc.ping():
logger.info("Remote Controller Server not running... exiting.")
exit()
logger.info("Remote Controller Server OK...")
rc_cluster = rc.createCluster(None, None)
rc_member = rc.startMember(rc_cluster.id)
config.network.addresses.append('{}:{}'.format(rc_member.host, rc_member.port))
except (ImportError, NameError):
config.network.addresses.append('127.0.0.1')
client = hazelcast.HazelcastClient(config)
my_map = client.get_map("default")
for i in range(0, 1000):
key = int(random.random() * ENTRY_COUNT)
operation = int(random.random() * 100)
if operation < GET_PERCENTAGE:
my_map.get(key)
elif operation < GET_PERCENTAGE + PUT_PERCENTAGE:
my_map.put(key, "x" * VALUE_SIZE)
else:
my_map.remove(key)
if __name__ == '__main__':
start = time.time()
do_benchmark()
time_taken = time.time() - start
print("Took %s seconds" % (time_taken))
| hazelcast/hazelcast-python-client | benchmarks/map_bench.py | Python | apache-2.0 | 1,776 | 0.001689 |
#
# helpers.py
#
# Copyright (C) 2011, 2013, 2015 Uli Fouquet
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
"""
Helpers for trivial jobs.
"""
import base64
import cssutils
import logging
import os
import re
import shutil
import tempfile
import zipfile
from bs4 import BeautifulSoup, UnicodeDammit
try:
from cStringIO import StringIO # Python 2.x
except ImportError: # pragma: no cover
from io import StringIO # Python 3.x
from pkg_resources import iter_entry_points
try:
from urlparse import urlparse # Python 2.x
except ImportError: # pragma: no cover
from urllib.parse import urlparse # Python 3.x
from six import string_types
try:
basestring = basestring # Python 2.x
except NameError: # pragma: no cover
basestring = (str, bytes) # Python 3.x
def copytree(src, dst, symlinks=False, ignore=None):
"""Recursively copy an entire directory tree rooted at `src`. The
destination directory, named by `dst`, might exist already; if
not, thenit will be created as well as missing parent
directories. Permissions and times of directories are copied with
:func:`shutil.copystat`, individual files are copied using
:func:`shutil.copy2`.
If `symlinks` is true, symbolic links in the source tree are
represented as symbolic links in the new tree; if false or
omitted, the contents of the linked files are copied to the new
tree.
If ignore is given, it must be a callable that will receive as its
arguments the directory being visited by :func:`shutil.copytree`,
and a list of its contents, as returned by
:func:`os.listdir`. Since :func:`copytree` is called recursively,
the ignore callable will be called once for each directory that is
copied. The callable must return a sequence of directory and file
names relative to the current directory (i.e. a subset of the
items in its second argument); these names will then be ignored in
the copy process. :func:`shutil.ignore_patterns` can be used to
create such a callable that ignores names based on glob-style
patterns.
If exception(s) occur, a :exc:`shutil.Error` is raised with a list
of reasons.
.. note:: This is a plain copy of the :func:`shutil.copytree`
implementation as provided with Python >= 2.6. There is,
however, one difference: this version will try to go on
if the destination directory already exists.
It is the callers responsibility to make sure that the
`dst` directory is in a proper state for
:func:`copytree`.
"""
if src in dst:
raise ValueError("Cannot copy %s to %s: trees are nested" % (
src, dst))
names = os.listdir(src)
if ignore is not None:
ignored_names = ignore(src, names)
else:
ignored_names = set()
try:
os.makedirs(dst)
except os.error:
pass
errors = []
for name in names:
if name in ignored_names:
continue
srcname = os.path.join(src, name)
dstname = os.path.join(dst, name)
try:
if symlinks and os.path.islink(srcname):
linkto = os.readlink(srcname)
os.symlink(linkto, dstname)
elif os.path.isdir(srcname):
copytree(srcname, dstname, symlinks, ignore)
else:
shutil.copy2(srcname, dstname)
# XXX What about devices, sockets etc.?
except (IOError, os.error) as why:
errors.append((srcname, dstname, str(why)))
# catch the Error from the recursive copytree so that we can
# continue with other files
except (shutil.Error) as why: # pragma: no cover
errors.append((srcname, dstname, str(why)))
try:
shutil.copystat(src, dst)
except (OSError) as why: # pragma: no cover
errors.extend((src, dst, str(why)))
if errors:
raise shutil.Error(errors)
def copy_to_secure_location(src):
"""Copy `src` to a temporay location.
If `src` is a file, the complete directory containing this file
will be copied. If `src` is a directory this directory will be
copied.
Returns the path of the newly created directory.
To copy the filetree we use :func:`shutil.copytree` with no
additional parameters. That means that symlinks won't be copied
and other restrictions apply. See :func:`shutil.copytree` docs to
check.
"""
if os.path.isfile(src):
src = os.path.dirname(src)
dst = tempfile.mkdtemp()
copytree(src, dst)
return dst
def get_entry_points(group):
"""Get all entry point plugins registered for group `group`.
The found entry points are returned as a dict with ``<NAME>`` as
key and ``<PLUGIN>`` as value where ``<NAME>`` is the name under
which the respective plugin was registered with setuptools and
``<PLUGIN>`` is the registered component itself.
"""
return dict(
[(x.name, x.load())
for x in iter_entry_points(group=group)])
def unzip(path, dst_dir):
"""Unzip the files stored in zipfile `path` in `dst_dir`.
`dst_dir` is the directory where all contents of the ZIP file is
stored into.
"""
zf = zipfile.ZipFile(path)
# Create all dirs
dirs = sorted([name for name in zf.namelist() if name.endswith('/')])
for dir in dirs:
new_dir = os.path.join(dst_dir, dir)
if not os.path.exists(new_dir):
os.mkdir(new_dir)
# Create all files
for name in zf.namelist():
if name.endswith('/'):
continue
outfile = open(os.path.join(dst_dir, name), 'wb')
outfile.write(zf.read(name))
outfile.flush()
outfile.close()
zf.close()
return
def zip(path):
"""Create a ZIP file out of `path`.
If `path` points to a file then a ZIP archive is created with this
file in compressed form in a newly created directory. The name of
the created zipfile is the basename of the input file with a
``.zip`` extension appended.
If `path` points to a directory then files and directories
_inside_ this directory are added to the archive.
Also empty directories are added although it cannot be guaranteed
that these entries are recovered correctly later on with all tools
and utilities on all platforms.
.. note:: It is the callers responsibility to remove the directory
the zipfile is created in after usage.
"""
if not os.path.isdir(path) and not os.path.isfile(path):
raise ValueError('Must be an existing path or directory: %s' % path)
new_dir = tempfile.mkdtemp()
basename = os.path.basename(path)
new_path = os.path.join(new_dir, basename) + '.zip'
zout = zipfile.ZipFile(new_path, 'w', zipfile.ZIP_DEFLATED)
if os.path.isfile(path):
zout.write(path, basename)
zout.close()
return new_path
for root, dirs, files in os.walk(path):
for dir in dirs:
# XXX: Maybe the wrong way to store directories?
dir_path = os.path.join(root, dir)
arc_name = dir_path[len(path) + 1:] + '/'
info = zipfile.ZipInfo(arc_name)
zout.writestr(info, '')
for file in files:
file_path = os.path.join(root, file)
arc_name = file_path[len(path) + 1:]
zout.write(file_path, arc_name)
zout.close()
return new_path
def remove_file_dir(path):
"""Remove a directory.
If `path` points to a file, the directory containing the file is
removed. If `path` is a directory, this directory is removed.
"""
if not isinstance(path, string_types):
return
if not os.path.exists(path):
return
if os.path.isfile(path):
path = os.path.dirname(path)
assert path not in ['/', '/tmp'] # Safety belt
shutil.rmtree(path)
return
RE_CSS_TAG = re.compile('(.+?)(\.?\s*){')
RE_CSS_STMT_START = re.compile('\s*(.*?{.*?)')
RE_CURLY_OPEN = re.compile('{([^ ])')
RE_CURLY_CLOSE = re.compile('([^ ])}')
RE_EMPTY_COMMENTS = re.compile('/\*\s*\*/')
RE_CDATA_MASSAGE = '(((/\*)?<!\[CDATA\[(\*/)?)((.*?)<!--)?'
RE_CDATA_MASSAGE += '(.*?)(-->(.*?))?((/\*)?]]>(\*/)?))'
MARKUP_MASSAGE = [
(re.compile('(<[^<>]*)/>'), lambda x: x.group(1) + ' />'),
(re.compile('<!\s+([^<>]*)>'),
lambda x: '<!' + x.group(1) + '>')
]
CDATA_MASSAGE = MARKUP_MASSAGE
CDATA_MASSAGE.extend([
(re.compile(RE_CDATA_MASSAGE, re.M + re.S),
lambda match: match.group(7))])
def extract_css(html_input, basename='sample.html', prettify_html=False):
"""Scan `html_input` and replace all styles with single link to a CSS
file.
Returns tuple ``<MODIFIED_HTML>, <CSS-CODE>``.
If the `html_input` contains any ``<style>`` tags, their content
is aggregated and returned in ``<CSS-CODE``.
The tags are all stripped from `html` input and replaced by a link
to a stylesheet file named ``<basename>.css``. Any extension in
`basename` is stripped. So ``sample.html`` as `basename` will
result in a link to ``sample.css``. The same applies for a
`basename` ``sample.css`` or ``sample``. The modified HTML code is
returned as first item of the result tuple.
If `pretify_html` is True, the generated HTML code is prettified
by BeautifulSoup. This might result in unexpected, visible gaps in
rendered output.
"""
# create HTML massage that removes CDATA and HTML comments in styles
for fix, m in CDATA_MASSAGE:
html_input = fix.sub(m, html_input)
soup = BeautifulSoup(html_input, 'html.parser')
css = '\n'.join([style.text for style in soup.findAll('style')])
if '<style>' in css:
css = css.replace('<style>', '\n')
# lowercase leading tag names
css = re.sub(
RE_CSS_TAG,
lambda match:
match.group(1).lower() + match.group(2) + '{', css)
# set indent of all CSS statement lines to nil.
css = re.sub(RE_CSS_STMT_START,
lambda match: '\n' + match.group(1), css)
# insert spaces after and before curly brackets.
css = re.sub(RE_CURLY_OPEN, lambda match: '{ ' + match.group(1), css)
css = re.sub(RE_CURLY_CLOSE, lambda match: match.group(1) + ' }', css)
css_name = os.path.splitext(basename)[0] + '.css'
# Remove empty style comments
css = re.sub(RE_EMPTY_COMMENTS, lambda match: '', css)
if css.startswith('\n'):
css = css[1:]
for num, style in enumerate(soup.findAll('style')):
if num == 0 and css != '':
# replace first style with link to stylesheet
# if there are any styles contained
new_tag = soup.new_tag(
'link', rel='stylesheet', type='text/css', href=css_name)
style.replace_with(new_tag)
else:
style.extract()
if css == '':
css = None
if prettify_html:
return soup.prettify(), css
return UnicodeDammit(str(soup)).markup, css
RE_HEAD_NUM = re.compile('(<h[1-6][^>]*>\s*)(([\d\.]+)+)([^\d])',
re.M + re.S)
def cleanup_html(html_input, basename,
fix_head_nums=True, fix_img_links=True, fix_sdfields=True):
"""Clean up HTML code.
If `fix_head_nums` is ``True``, we look for heading contents of
style ``1.1Heading`` where the number is not separated from the
real heading text. In that case we wrap the heading number in a
``<span class="u-o-headnum"> tag.
If `fix_img_links` is ``True`` we run
:func:`rename_html_img_links` over the result.
If `fix_sdfields` is ``True`` we rename all ``<sdfield>`` tags to
``<span>``. See :func:`rename_sdfield_tags` for details.
Returns a tuple ``(<HTML_OUTPUT>, <IMG_NAME_MAP>)`` where
``<HTML_OUTPUT>`` is the modified HTML code and ``<IMG_NAME_MAP>``
a mapping from old filenames to new ones (see
:func:`rename_html_img_links`) for details.
"""
img_name_map = {}
if fix_img_links is True:
html_input, img_name_map = rename_html_img_links(html_input, basename)
if fix_sdfields is True:
html_input = rename_sdfield_tags(html_input)
if fix_head_nums is not True:
return html_input, img_name_map
# Wrap leading num-dots in headings in own span-tag.
html_input = re.sub(
RE_HEAD_NUM,
lambda match: ''.join([
match.group(1),
'<span class="u-o-headnum">',
match.group(3),
'</span>',
match.group(4)]),
html_input)
return html_input, img_name_map
def cleanup_css(css_input, minified=True):
"""Cleanup CSS code delivered in `css_input`, a string.
Returns 2-item tuple ``(<CSS>, <ERRORS>)`` where ``<CSS>`` is the
cleaned and minimized CSS code and ``<ERRORS>`` is a multiline
string containing warnings and errors occured during processing
the CSS.
By default the ``<CSS>`` returned is minified to reduce network
load, etc. If you want pretty non-minified output, set `minified`
to ``False``.
We expect and return texts, not bytestreams.
"""
# Set up a local logger for warnings and errors
local_log = StringIO()
handler = logging.StreamHandler(local_log)
handler.setFormatter(logging.Formatter('%(levelname)s %(message)s'))
handler.propagate = False
handler.setLevel(logging.WARNING)
logger = logging.getLogger()
logger.addHandler(handler)
cssutils.log.setLog(logger)
cssutils.ser.prefs.useDefaults()
if minified is True:
cssutils.ser.prefs.useMinified()
sheet = cssutils.parseString(css_input)
local_log.flush()
encoding = sheet.encoding or 'utf-8'
css_text = sheet.cssText.decode(encoding)
return css_text, local_log.getvalue()
def rename_html_img_links(html_input, basename):
"""Rename all ``<img>`` tag ``src`` attributes based on `basename`.
Each `src` of each ``<img>`` tag in `html_input` is renamed to a
new location of form ``<BASENAME>_<NUM>.<EXT>`` where
``<BASENAME>`` is the basename of `basename`, ``<NUM>`` a unique
number starting with 1 (one) and ``<EXT>`` the filename extension
of the original ``src`` file.
For example:
``<img src="foo_m1234.jpeg">``
with a `basename` ``sample.html`` will be replaced by
``<img src="sample_1.jpeg">``
if this is the first ``<img>`` tag in the document.
Returns a tuple ``<HTML_OUTPUT>, <NAME_MAP>`` where
``<HTML_OUTPUT>`` is the modified HTML and ``<NAME_MAP>`` is a
dictionary with a mapping from old filenames to new ones. The
latter can be used to rename any real files (which is not done by
this function).
Links to 'external' sources (http and similar) are ignored.
This funtion expects text as input and returns text, not bytes.
I.e. you will get unicode snippets under Python 2.x and text
(or `str`) under Python 3.x.
"""
soup = BeautifulSoup(html_input, 'html.parser')
img_tags = soup.findAll('img')
img_map = {}
num = 1
basename = os.path.splitext(basename)[0]
basename = basename.replace('.', '_')
for tag in img_tags:
src = tag.get('src', None)
if src is None:
continue
if src in img_map.keys():
# We found a link to the same image already
tag['src'] = img_map[src]
continue
scheme = urlparse(src)[0]
if scheme not in ['file', '']:
# only handle local files
continue
ext = ''
if '.' in src:
ext = os.path.splitext(src)[1]
new_src = '%s_%s%s' % (basename, num, ext)
num += 1
tag['src'] = new_src
img_map[src] = new_src
return soup.decode(), img_map
RE_SDFIELD_OPEN = re.compile('<sdfield([^>]*)>', re.M + re.S + re.I)
RE_SDFIELD_CLOSE = re.compile('</sdfield>', re.M + re.S + re.I)
def rename_sdfield_tags(html_input):
"""Rename all ``<sdfield>`` tags to ``<span class="sdfield">``
Any attributes are preserved. `html_input` must be a text, not a
bytes stream.
"""
html_input = re.sub(
RE_SDFIELD_OPEN, lambda match: '<span %s%s>' % (
'class="sdfield"', match.group(1)), html_input)
return re.sub(
RE_SDFIELD_CLOSE, lambda match: '</span>', html_input)
def base64url_encode(string):
"""Get a base64url encoding of string.
base64url is regular base64 encoding with ``/`` and ``+`` in the
result substituted by ``_`` and ``-`` respectively.
This encoding is better suited for generating file system paths
out of binary data.
"""
if isinstance(string, str):
try:
string = string.encode("latin-1")
except UnicodeDecodeError: # pragma: no cover
# Python 2.x
pass
result = base64.urlsafe_b64encode(string)
if not isinstance(result, str): # pragma: no cover
# Python 3.x only
result = result.decode("ascii")
return result
def base64url_decode(string):
"""Decode the base64url encoded `string`.
.. seealso:: base64url_encode
"""
result = base64.urlsafe_b64decode(string)
if not isinstance(result, str): # pragma: no cover
# Python 3.x only.
result = result.decode("latin-1")
return result
def string_to_bool(string):
"""Turn string into a boolean value.
``yes``, ``1``, and ``true`` are considered as ``True``. ``no``,
``0``, and ``false`` are considered ``False``. If none of that
applies, ``None`` is returned. The case does not matter, so you
can use upper, lower or mixed case.
If, by accident, you pass in a boolean value this will be returned
unchanged.
Other values result in ``None``.
"""
if not isinstance(string, string_types):
if string is True or string is False:
return string
return None
if string.lower() in ['yes', '1', 'true']:
return True
if string.lower() in ['no', '0', 'false']:
return False
return None
def strict_string_to_bool(string):
"""A variant of `string_to_bool` which raises a `ValueError` if no
valid boolean value can be parsed from `string`.
"""
result = string_to_bool(string)
if result is None:
raise ValueError(
'%s is not a valid boolean. Use "yes" or "no".' % string)
return result
def string_to_stringtuple(string, strict=False):
"""Convert a single string into a tuple of strings.
The input string is expected to contain comma-separated string
values. The single values are stripped (whitespaces removed at
beginning and ending).
>>> string_to_stringtuple('foo, bar,baz')
('foo', 'bar', 'baz')
By default empty strings (``',,,,'`` and similar) are filtered
out.
This function is _not_ 'strict' by default. If `strict` is set to
``True`` it does not accept empty strings or ``None`` as input.
"""
if not string:
if strict:
raise ValueError('`string` must contain at least some string')
else:
return ()
result = [x.strip() for x in string.split(',') if x]
return tuple(result)
def filelike_cmp(file1, file2, chunksize=512):
"""Compare `file1` and `file2`.
Returns ``True`` if both are equal, ``False`` else.
Both, `file1` and `file2` can be paths to files or file-like
objects already open for reading.
If both are arguments are paths, consider using `filecmp.cmp` from
the standard library instead.
`chunksize` gives chunk size in bytes used during comparison.
"""
f1 = file1
f2 = file2
result = True
if isinstance(file1, string_types) or isinstance(file1, bytes):
f1 = open(file1, 'rb')
if isinstance(file2, string_types) or isinstance(file2, bytes):
f2 = open(file2, 'rb')
f1.seek(0) # make sure we read from beginning, especially whe used
f2.seek(0) # in loops.
try:
while True:
chunk1 = f1.read(chunksize)
chunk2 = f2.read(chunksize)
try:
chunk1 = chunk1.encode('utf-8')
except AttributeError: # pragma: no cover
# already a bytes object, or py2.x
pass
try:
chunk2 = chunk2.encode('utf-8')
except AttributeError: # pragma: no cover
# already a bytes object, or py2.x
pass
if chunk1 != chunk2:
result = False
break
if not chunk1:
break
finally:
if isinstance(file1, string_types) or isinstance(file1, bytes):
f1.close()
if isinstance(file2, string_types) or isinstance(file2, bytes):
f2.close()
return result
def write_filelike(file_obj, path, chunksize=512):
"""Write contents of `file_obj` to `path`.
`file_obj` can be a string or some file-like object. If it is a
file-like object, it must be opened for reading.
Content is written in chunks of `chunksize`.
"""
f1 = file_obj
if isinstance(file_obj, string_types):
f1 = StringIO(file_obj)
elif isinstance(file_obj, bytes): # pragma: no cover
f1 = StringIO(file_obj.decode('utf-8'))
f2 = open(path, 'w')
try:
while True:
chunk = f1.read(512)
if chunk:
f2.write(chunk)
else:
break
finally:
f2.close()
return
| ulif/ulif.openoffice | src/ulif/openoffice/helpers.py | Python | gpl-2.0 | 22,408 | 0.000759 |
############################################################
# $HeadURL$
############################################################
"""
DIRAC.WorkloadManagementSystem.PilotAgent package
"""
__RCSID__ = "$Id$" | sposs/DIRAC | WorkloadManagementSystem/PilotAgent/__init__.py | Python | gpl-3.0 | 215 | 0.004651 |
print("-------------- assigning numbers -----------")
fred =100
print(fred)
print(fred)
fred = 200
print(fred)
print(fred)
john = fred
fred = john
print("-------------- assigning letters -----------")
adam = "jj"
print(adam)
print("-------------- assigning coins -----------")
number_of_coins = 200
| wibeasley/mayan-playground-1 | teagen/chapter_02/calculations_and_variables.py | Python | apache-2.0 | 306 | 0.009804 |
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (c) 2010 OpenStack, LLC.
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Simple Scheduler
"""
from cinder import db
from cinder import exception
from cinder import flags
from cinder.openstack.common import cfg
from cinder.scheduler import chance
from cinder.scheduler import driver
from cinder import utils
simple_scheduler_opts = [
cfg.IntOpt("max_gigabytes",
default=10000,
help="maximum number of volume gigabytes to allow per host"), ]
FLAGS = flags.FLAGS
FLAGS.register_opts(simple_scheduler_opts)
class SimpleScheduler(chance.ChanceScheduler):
"""Implements Naive Scheduler that tries to find least loaded host."""
def schedule_create_volume(self, context, request_spec, filter_properties):
"""Picks a host that is up and has the fewest volumes."""
elevated = context.elevated()
volume_id = request_spec.get('volume_id')
snapshot_id = request_spec.get('snapshot_id')
image_id = request_spec.get('image_id')
volume_properties = request_spec.get('volume_properties')
volume_size = volume_properties.get('size')
availability_zone = volume_properties.get('availability_zone')
zone, host = None, None
if availability_zone:
zone, _x, host = availability_zone.partition(':')
if host and context.is_admin:
topic = FLAGS.volume_topic
service = db.service_get_by_args(elevated, host, topic)
if not utils.service_is_up(service):
raise exception.WillNotSchedule(host=host)
updated_volume = driver.volume_update_db(context, volume_id, host)
self.volume_rpcapi.create_volume(context,
updated_volume,
host,
snapshot_id,
image_id)
return None
results = db.service_get_all_volume_sorted(elevated)
if zone:
results = [(service, gigs) for (service, gigs) in results
if service['availability_zone'] == zone]
for result in results:
(service, volume_gigabytes) = result
if volume_gigabytes + volume_size > FLAGS.max_gigabytes:
msg = _("Not enough allocatable volume gigabytes remaining")
raise exception.NoValidHost(reason=msg)
if utils.service_is_up(service) and not service['disabled']:
updated_volume = driver.volume_update_db(context, volume_id,
service['host'])
self.volume_rpcapi.create_volume(context,
updated_volume,
service['host'],
snapshot_id,
image_id)
return None
msg = _("Is the appropriate service running?")
raise exception.NoValidHost(reason=msg)
| citrix-openstack-build/cinder | cinder/scheduler/simple.py | Python | apache-2.0 | 3,857 | 0 |
'''
New Integration Test for Ceph Pool Capacity.
@author: Legion
'''
import zstackwoodpecker.test_util as test_util
import zstackwoodpecker.test_lib as test_lib
import zstackwoodpecker.test_state as test_state
import time
test_stub = test_lib.lib_get_test_stub()
test_obj_dict = test_state.TestStateDict()
pool_cap = test_stub.PoolCapacity()
def test():
pool_cap.get_bs()
pool_cap.create_vm()
test_obj_dict.add_vm(pool_cap.vm)
pool_cap.crt_vm_image(pool_cap.bs)
time.sleep(300)
pool_cap.get_bs()
used1 = pool_cap.bs.poolUsedCapacity
avail1 = pool_cap.bs.poolAvailableCapacity
pool_cap.check_pool_cap([used1, avail1], bs=True)
pool_cap.vm.destroy()
test_obj_dict.rm_vm(pool_cap.vm)
test_lib.lib_robot_cleanup(test_obj_dict)
test_util.test_pass('Ceph Image Pool Capacity Test Success')
#Will be called only if exception happens in test().
def error_cleanup():
global test_obj_dict
test_lib.lib_error_cleanup(test_obj_dict)
| zstackio/zstack-woodpecker | integrationtest/vm/virtualrouter/ceph_pool_capacity/test_ceph_pool_cap_crt_vm_image.py | Python | apache-2.0 | 989 | 0.001011 |
"""
Support for BME280 temperature, humidity and pressure sensor.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/sensor.bme280/
"""
import asyncio
from datetime import timedelta
from functools import partial
import logging
import voluptuous as vol
from homeassistant.components.sensor import PLATFORM_SCHEMA
import homeassistant.helpers.config_validation as cv
from homeassistant.const import (
TEMP_FAHRENHEIT, CONF_NAME, CONF_MONITORED_CONDITIONS)
from homeassistant.helpers.entity import Entity
from homeassistant.util import Throttle
from homeassistant.util.temperature import celsius_to_fahrenheit
REQUIREMENTS = ['i2csense==0.0.4',
'smbus-cffi==0.5.1']
_LOGGER = logging.getLogger(__name__)
CONF_I2C_ADDRESS = 'i2c_address'
CONF_I2C_BUS = 'i2c_bus'
CONF_OVERSAMPLING_TEMP = 'oversampling_temperature'
CONF_OVERSAMPLING_PRES = 'oversampling_pressure'
CONF_OVERSAMPLING_HUM = 'oversampling_humidity'
CONF_OPERATION_MODE = 'operation_mode'
CONF_T_STANDBY = 'time_standby'
CONF_FILTER_MODE = 'filter_mode'
CONF_DELTA_TEMP = 'delta_temperature'
DEFAULT_NAME = 'BME280 Sensor'
DEFAULT_I2C_ADDRESS = '0x76'
DEFAULT_I2C_BUS = 1
DEFAULT_OVERSAMPLING_TEMP = 1 # Temperature oversampling x 1
DEFAULT_OVERSAMPLING_PRES = 1 # Pressure oversampling x 1
DEFAULT_OVERSAMPLING_HUM = 1 # Humidity oversampling x 1
DEFAULT_OPERATION_MODE = 3 # Normal mode (forced mode: 2)
DEFAULT_T_STANDBY = 5 # Tstandby 5ms
DEFAULT_FILTER_MODE = 0 # Filter off
DEFAULT_DELTA_TEMP = 0.
MIN_TIME_BETWEEN_UPDATES = timedelta(seconds=3)
SENSOR_TEMP = 'temperature'
SENSOR_HUMID = 'humidity'
SENSOR_PRESS = 'pressure'
SENSOR_TYPES = {
SENSOR_TEMP: ['Temperature', None],
SENSOR_HUMID: ['Humidity', '%'],
SENSOR_PRESS: ['Pressure', 'mb']
}
DEFAULT_MONITORED = [SENSOR_TEMP, SENSOR_HUMID, SENSOR_PRESS]
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
vol.Optional(CONF_I2C_ADDRESS, default=DEFAULT_I2C_ADDRESS): cv.string,
vol.Optional(CONF_MONITORED_CONDITIONS, default=DEFAULT_MONITORED):
vol.All(cv.ensure_list, [vol.In(SENSOR_TYPES)]),
vol.Optional(CONF_I2C_BUS, default=DEFAULT_I2C_BUS): vol.Coerce(int),
vol.Optional(CONF_OVERSAMPLING_TEMP,
default=DEFAULT_OVERSAMPLING_TEMP): vol.Coerce(int),
vol.Optional(CONF_OVERSAMPLING_PRES,
default=DEFAULT_OVERSAMPLING_PRES): vol.Coerce(int),
vol.Optional(CONF_OVERSAMPLING_HUM,
default=DEFAULT_OVERSAMPLING_HUM): vol.Coerce(int),
vol.Optional(CONF_OPERATION_MODE,
default=DEFAULT_OPERATION_MODE): vol.Coerce(int),
vol.Optional(CONF_T_STANDBY,
default=DEFAULT_T_STANDBY): vol.Coerce(int),
vol.Optional(CONF_FILTER_MODE,
default=DEFAULT_FILTER_MODE): vol.Coerce(int),
vol.Optional(CONF_DELTA_TEMP,
default=DEFAULT_DELTA_TEMP): vol.Coerce(float),
})
# pylint: disable=import-error
@asyncio.coroutine
def async_setup_platform(hass, config, async_add_devices, discovery_info=None):
"""Set up the BME280 sensor."""
import smbus
from i2csense.bme280 import BME280
SENSOR_TYPES[SENSOR_TEMP][1] = hass.config.units.temperature_unit
name = config.get(CONF_NAME)
i2c_address = config.get(CONF_I2C_ADDRESS)
bus = smbus.SMBus(config.get(CONF_I2C_BUS))
sensor = yield from hass.async_add_job(
partial(BME280, bus, i2c_address,
osrs_t=config.get(CONF_OVERSAMPLING_TEMP),
osrs_p=config.get(CONF_OVERSAMPLING_PRES),
osrs_h=config.get(CONF_OVERSAMPLING_HUM),
mode=config.get(CONF_OPERATION_MODE),
t_sb=config.get(CONF_T_STANDBY),
filter_mode=config.get(CONF_FILTER_MODE),
delta_temp=config.get(CONF_DELTA_TEMP),
logger=_LOGGER)
)
if not sensor.sample_ok:
_LOGGER.error("BME280 sensor not detected at %s", i2c_address)
return False
sensor_handler = yield from hass.async_add_job(BME280Handler, sensor)
dev = []
try:
for variable in config[CONF_MONITORED_CONDITIONS]:
dev.append(BME280Sensor(
sensor_handler, variable, SENSOR_TYPES[variable][1], name))
except KeyError:
pass
async_add_devices(dev)
class BME280Handler:
"""BME280 sensor working in i2C bus."""
def __init__(self, sensor):
"""Initialize the sensor handler."""
self.sensor = sensor
self.update(True)
@Throttle(MIN_TIME_BETWEEN_UPDATES)
def update(self, first_reading=False):
"""Read sensor data."""
self.sensor.update(first_reading)
class BME280Sensor(Entity):
"""Implementation of the BME280 sensor."""
def __init__(self, bme280_client, sensor_type, temp_unit, name):
"""Initialize the sensor."""
self.client_name = name
self._name = SENSOR_TYPES[sensor_type][0]
self.bme280_client = bme280_client
self.temp_unit = temp_unit
self.type = sensor_type
self._state = None
self._unit_of_measurement = SENSOR_TYPES[sensor_type][1]
@property
def name(self):
"""Return the name of the sensor."""
return '{} {}'.format(self.client_name, self._name)
@property
def state(self):
"""Return the state of the sensor."""
return self._state
@property
def unit_of_measurement(self):
"""Return the unit of measurement of the sensor."""
return self._unit_of_measurement
@asyncio.coroutine
def async_update(self):
"""Get the latest data from the BME280 and update the states."""
yield from self.hass.async_add_job(self.bme280_client.update)
if self.bme280_client.sensor.sample_ok:
if self.type == SENSOR_TEMP:
temperature = round(self.bme280_client.sensor.temperature, 1)
if self.temp_unit == TEMP_FAHRENHEIT:
temperature = round(celsius_to_fahrenheit(temperature), 1)
self._state = temperature
elif self.type == SENSOR_HUMID:
self._state = round(self.bme280_client.sensor.humidity, 1)
elif self.type == SENSOR_PRESS:
self._state = round(self.bme280_client.sensor.pressure, 1)
else:
_LOGGER.warning("Bad update of sensor.%s", self.name)
| MungoRae/home-assistant | homeassistant/components/sensor/bme280.py | Python | apache-2.0 | 6,473 | 0 |
import cProfile
import StringIO
import pstats
import contextlib
@contextlib.contextmanager
def profiled():
pr = cProfile.Profile()
pr.enable()
yield
pr.disable()
s = StringIO.StringIO()
ps = pstats.Stats(pr, stream=s).sort_stats("cumulative")
ps.print_stats()
# uncomment this to see who's calling what
# ps.print_callers()
print(s.getvalue())
| DBeath/flask-feedrsub | feedrsub/utils/profiler.py | Python | mit | 386 | 0 |
import asposecellscloud
from asposecellscloud.CellsApi import CellsApi
from asposecellscloud.CellsApi import ApiException
import asposestoragecloud
from asposestoragecloud.StorageApi import StorageApi
apiKey = "XXXXX" #sepcify App Key
appSid = "XXXXX" #sepcify App SID
apiServer = "http://api.aspose.com/v1.1"
data_folder = "../../data/"
#Instantiate Aspose Storage API SDK
storage_apiClient = asposestoragecloud.ApiClient.ApiClient(apiKey, appSid, True)
storageApi = StorageApi(storage_apiClient)
#Instantiate Aspose Cells API SDK
api_client = asposecellscloud.ApiClient.ApiClient(apiKey, appSid, True)
cellsApi = CellsApi(api_client);
#set input file name
filename = "Sample_Test_Book.xls"
sheetName = "Sheet1"
mergedCellIndex = 0
#upload file to aspose cloud storage
#storageApi.PutCreate(Path=filename, file=data_folder + filename)
try:
#invoke Aspose.Cells Cloud SDK API to get merged cells from a worksheet
response = cellsApi.GetWorkSheetMergedCell(name=filename, sheetName=sheetName, mergedCellIndex=mergedCellIndex)
if response.Status == "OK":
mergedCell = response.MergedCell
print "Merge Start Column :: " + str(mergedCell.StartColumn)
print "Merge End Column :: " + str(mergedCell.EndColumn)
except ApiException as ex:
print "ApiException:"
print "Code:" + str(ex.code)
print "Message:" + ex.message
| aspose-cells/Aspose.Cells-for-Cloud | Examples/Python/Examples/GetMergedCellFromWorksheet.py | Python | mit | 1,392 | 0.010057 |
import sys # pragma: no cover
from remoteappmanager.command_line_config import (
CommandLineConfig) # pragma: no cover
from remoteappmanager.environment_config import (
EnvironmentConfig) # pragma: no cover
from remoteappmanager.file_config import FileConfig # pragma: no cover
from tornado.options import print_help # pragma: no cover
from remoteappmanager.admin_application import (
AdminApplication) # pragma: no cover
def main(): # pragma: no cover
try:
command_line_config = CommandLineConfig()
command_line_config.parse_config()
file_config = FileConfig()
if command_line_config.config_file:
file_config.parse_config(command_line_config.config_file)
environment_config = EnvironmentConfig()
environment_config.parse_config()
except Exception as e:
print_help()
print("Error: {}".format(e))
sys.exit(1)
app = AdminApplication(
command_line_config,
file_config,
environment_config)
app.start()
| simphony/simphony-remote | remoteappmanager/cli/remoteappadmin/__main__.py | Python | bsd-3-clause | 1,056 | 0 |
"""
Linked List: Merge K Sorted Lists (hard)
Description:
You are given an array of k linked-lists lists, each linked-list is
sorted in ascending order.
Merge all the linked-lists into one sorted linked-list and return it.
Example:
Input: lists = [[1,4,5],[1,3,4],[2,6]]
Output: [1,1,2,3,4,4,5,6]
Explanation: The linked-lists are:
[
1->4->5,
1->3->4,
2->6
]
merging them into one sorted list:
1->1->2->3->4->4->5->6
Solutions:
1. Brute force - Add all nodes in all lists into one list and then sort.
2. Brute force - Merge list one by one.
3. Divide and conquer - Merge half and half's half, ...
4. Priority queue - Get minimum element each time.
Notes:
For the priority queue based method, we need to
LeetCode Link: https://leetcode-cn.com/problems/merge-k-sorted-lists/
"""
from linked_list import ListNode, create_linked_list, traverse
import heapq
# import sys
# sys.setrecursionlimit(2000)
def merge_two_sorted_lists(l1: ListNode, l2: ListNode) -> ListNode:
""" Merge two sorted lists.
"""
if not l1: return l2
if not l2: return l1
if l1.val <= l2.val:
l1.next = merge_two_sorted_lists(l1.next, l2)
return l1
else:
l2.next = merge_two_sorted_lists(l1, l2.next)
return l2
def merge(input_lists: list, left: int, right: int) -> ListNode:
""" Divide and Conquer - divide the input lists into half and
process them and then merge them together.
"""
if left == right: return input_lists[left]
mid = left + (right - left) // 2
l1 = merge(input_lists, left, mid)
l2 = merge(input_lists, mid+1, right)
return merge_two_sorted_lists(l1, l2)
def merge_k_lists_divide_and_conquer(input_lists: list) -> ListNode:
""" Solution - Divide and Conquer
We can merge lists in pairs, suppose we have k lists at the beginning,
then we can merge list pairs for the first round, so we will have k/2
merged lists, repeat the process, until we have the final one sorted list.
Time Complexity - O(kn*logk) - the first round merging k/2 pair of lists,
the time complexity is O(2n) for each pair, the second round merging k/4
pair of lists, and the time complexty for each pair is O(4n), ... in total
the time complexity is O(kn*logk).
Space Complexity - O(logk) - for recursion stack.
"""
if not input_lists: return
n = len(input_lists)
return merge(input_lists, 0, n-1)
def merge_k_sorted_lists_heapq(input_lists: list) -> ListNode:
""" Solution - Min Heap
We first insert the first element (also smallest as the lists are sorted) of each
linked list in a min heap. After this, we can take out the smallest element from
the heap and add it to the merged list. After removing the smallest element from the
heap, we can insert the next element of the same list into the heap. Repeat previous
steps to populate the merged list in sorted order.
Time Complexity - O(kn*logk) - the number of elements in the priority queue will be
less than k, so the time complexity for insertion and deletion will be O(logk), there
are at most k*n elements (every node is inserted and deleted once), so the total time
complexity will be O(kn*logk)
Space Complexity - O(k) - for the priority queue (min-heap).
"""
dummy = ListNode(-1)
current = dummy
min_heap = []
# Put the root of each list in the min heap
for root in input_lists:
if root:
heapq.heappush(min_heap, root)
# Pop the smallest element from the min heap and add it to the result sorted list.
while min_heap:
node = heapq.heappop(min_heap)
current.next = node
current = current.next
# If the element poped still have next node, then add it into the heap.
if node.next:
heapq.heappush(min_heap, node.next)
return dummy.next
if __name__ == "__main__":
l1 = create_linked_list([2,6,8])
l2 = create_linked_list([3,6,7])
l3 = create_linked_list([1,3,4])
new_list = merge_k_lists_divide_and_conquer([l1, l2, l3])
print(traverse(new_list))
l1 = create_linked_list([1,4,5])
l2 = create_linked_list([1,3,4])
l3 = create_linked_list([2,6])
result = merge_k_sorted_lists_heapq([l1, l2, l3])
print(traverse(result))
| dreamibor/Algorithms-and-Data-Structures-Using-Python | practice/implementation/linked_list/merge_k_sorted_list.py | Python | gpl-3.0 | 4,308 | 0.009749 |
# dr14_t.meter: compute the DR14 value of the given audiofiles
# Copyright (C) 2011 Simone Riva
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import os
import sys
def float_formatter( el ):
if abs( el ) >= 1.0 :
return "%.2f" % el
else :
return "%.2E" % el
def default_formatter( el ):
if sys.version_info[0] == 2:
return unicode( el )
else:
return str( el )
def string_formatter( el ):
if sys.version_info[0] == 2:
return unicode( el )
else:
return str( el )
class Table:
def __init__(self):
self.__float_format = "%.2f"
self.__col_cnt = 5
self.__ini_txt = ""
self.__txt = ""
self.__formatter = {}
self.add_formatter( float , float_formatter )
self.add_formatter( str , string_formatter )
if sys.version_info[0] == 2:
self.add_formatter( unicode , string_formatter )
def _get_txt(self):
return self.__txt
def _set_txt( self , txt ):
self.__txt = txt
def _append_txt( self , txt ):
self.__txt += txt
def init_txt(self, txt = "" ):
self.__ini_txt = txt
def get_init_txt(self):
return self.__ini_txt
def new_table( self ):
NotImplementedError(" %s : is virutal and must be overridden." % sys._getframe().f_code.co_name )
def end_table( self ):
NotImplementedError(" %s : is virutal and must be overridden." % sys._getframe().f_code.co_name )
def write_table(self):
return self.__ini_txt + self._get_txt()
def nl(self):
if sys.platform.startswith('linux') or sys.platform.startswith('darwin'):
return '\n'
elif sys.platform.startswith('win'):
return '\n\r'
def add_formatter( self , _type , formatter ):
self.__formatter[_type] = formatter
def format_element( self , el ):
return self.__formatter.get( type(el) , default_formatter )( el )
def append_row( self , row_el , cell_type='d'):
if cell_type == 'd':
n_cell = self.new_cell
e_cell = self.end_cell
elif cell_type == 'h':
n_cell = self.new_hcell
e_cell = self.end_hcell
self.new_row()
for i in row_el:
n_cell()
self.add_value( i )
e_cell()
self.end_row()
def get_col_cnt( self ):
return self.__col_cnt
def set_col_cnt( self , col_cnt ):
self.__col_cnt = col_cnt
col_cnt = property( get_col_cnt , set_col_cnt )
def append_separator_line( self ):
self._append_txt( self.format_element( "" ) )
def append_closing_line( self ):
self._append_txt( self.format_element( "" ) )
def append_empty_line( self ):
self.append_row( [ "" ]*self.col_cnt )
def add_title( self , title ):
NotImplementedError(" %s : is virutal and must be overridden." % sys._getframe().f_code.co_name )
def add_value( self , val ):
self._append_txt( self.format_element(val) )
def new_head( self ):
self._append_txt( self.format_element( "" ) )
def end_head( self ):
self._append_txt( self.format_element( "" ) )
def new_tbody( self ):
self._append_txt( self.format_element( "" ) )
def end_tbody( self ):
self._append_txt( self.format_element( "" ) )
def new_foot( self ):
self._append_txt( self.format_element( "" ) )
def end_foot( self ):
self._append_txt( self.format_element( "" ) )
def new_row( self ):
NotImplementedError(" %s : is virutal and must be overridden." % sys._getframe().f_code.co_name )
def end_row( self ):
NotImplementedError(" %s : is virutal and must be overridden." % sys._getframe().f_code.co_name )
def new_cell( self ):
NotImplementedError(" %s : is virutal and must be overridden." % sys._getframe().f_code.co_name )
def end_cell( self ):
NotImplementedError(" %s : is virutal and must be overridden." % sys._getframe().f_code.co_name )
def new_hcell( self ):
return self.new_cell()
def end_hcell( self):
return self.end_cell()
def new_bold( self ):
NotImplementedError(" %s : is virutal and must be overridden." % sys._getframe().f_code.co_name )
def end_bold( self ):
NotImplementedError(" %s : is virutal and must be overridden." % sys._getframe().f_code.co_name )
class TextTable ( Table ):
def append_separator_line( self ):
self.append_row( [ "----------------------------------------------------------------------------------------------" ] )
def append_closing_line( self ):
self.append_row( [ "==============================================================================================" ] )
def append_empty_line( self ):
self.append_row( [ "" ] )
def add_title( self , title ):
self._append_txt( title + self.nl() )
def new_table( self ):
self._set_txt("")
def end_table( self ):
self._append_txt( self.nl() )
def new_row( self ):
self._append_txt("")
def end_row( self ):
self._append_txt( self.nl() )
def new_cell( self ):
self._append_txt("")
def end_cell( self ):
self._append_txt( "\t" )
def new_bold( self ):
self._append_txt("")
def end_bold( self ):
self._append_txt("")
class BBcodeTable ( Table ):
def append_separator_line( self ):
self.append_row( [ "------------" ] * self.col_cnt )
def append_closing_line( self ):
self.append_row( [ "============" ] * self.col_cnt )
def add_title( self , title ):
self._append_txt( self.nl() + "[tr]" + self.nl() + " [td colspan=%d] " % self.col_cnt + title + " [/td] " + self.nl() + "[/tr]" + self.nl() )
def new_table( self ):
self._set_txt("")
self._append_txt( '[table]' + self.nl() )
def end_table( self ):
self._append_txt( self.nl() + '[/table]' + self.nl() )
def new_row( self ):
self._append_txt( self.nl() + '[tr]' + self.nl() )
def end_row( self ):
self._append_txt( self.nl() + '[/tr]' + self.nl() )
def new_cell( self ):
self._append_txt( ' [td]' )
def end_cell( self ):
self._append_txt( '[/td]' )
def new_bold( self ):
self._append_txt( '[b]' )
def end_bold( self ):
self._append_txt( '[/b]' )
class HtmlTable ( Table ):
def add_title( self , title ):
self._append_txt( self.nl() + "<tr>" + self.nl() + " <th colspan=\"%d\" > " % self.col_cnt + title + "</th>" + self.nl() + "</tr>" + self.nl() )
def new_table( self ):
self._set_txt("")
self._append_txt( "<table>" + self.nl() )
def end_table( self ):
self._append_txt( self.nl() + "</table>" + self.nl() )
def new_head( self ):
self._append_txt( self.nl() + "<thead>" + self.nl() )
def end_head( self ):
self._append_txt( self.nl() + "</thead>" + self.nl() )
def new_tbody( self ):
self._append_txt( self.nl() + "<tbody>" + self.nl() )
def end_tbody( self ):
self._append_txt( self.nl() + "</tbody>" + self.nl() )
def new_foot( self ):
self._append_txt( self.nl() + "<tfoot>" + self.nl() )
def end_foot( self ):
self._append_txt( self.nl() + "</tfoot>" + self.nl() )
def new_row( self ):
self._append_txt( self.nl() + "<tr>" + self.nl() )
def end_row( self ):
self._append_txt( self.nl() + "</tr>" + self.nl() )
def new_cell( self ):
self._append_txt( ' <td>' )
def end_cell( self ):
self._append_txt( '</td>' )
def new_hcell( self ):
self._append_txt( ' <th>' )
def end_hcell( self ):
self._append_txt( '</th>' )
def new_bold( self ):
self._append_txt( '<b>' )
def end_bold( self ):
self._append_txt( '</b>' )
class MediaWikiTable ( Table ):
def add_title( self , title ):
self._append_txt( "|-" + self.nl() + "!align=\"left\" colspan=\"%d\" | " % self.col_cnt + title + self.nl() )
def new_table( self ):
self._set_txt("")
self._append_txt( "{| " + self.nl() )
def end_table( self ):
self._append_txt( "|}" + self.nl() )
def new_row( self ):
self._append_txt( "|-" + self.nl() )
def end_row( self ):
self._append_txt( self.nl() )
def new_cell( self ):
self._append_txt( '||' )
def end_cell( self ):
self._append_txt( "" )
def new_bold( self ):
self._append_txt( "\'\'\'" )
def end_bold( self ):
self._append_txt( "\'\'\'" )
class row:
def __init__(self):
self.row = []
self.cursor = 0
self.inds = []
self.rclass = "b"
self.type = ""
self.cell_type = []
def set_type(self, t):
self.type = t
def set_rclass(self, c):
self.rclass = c
@property
def set_row(self):
self.set_type("r")
return self.type
@property
def set_head(self):
self.set_rclass("h")
return self.rclass
@property
def set_body(self):
self.set_rclass("b")
return self.rclass
@property
def set_foot(self):
self.set_rclass("f")
return self.rclass
@property
def set_title(self):
self.set_type("t")
return self.type
@property
def set_separator_line(self):
self.set_type("sl")
return self.type
@property
def set_closing_line(self):
self.set_type("cl")
return self.type
@property
def is_row(self):
if self.type == "r" :
return True
else :
return False
@property
def is_head(self):
if self.rclass == "h" :
return True
else :
return False
@property
def is_body(self):
if self.rclass == "b" :
return True
else :
return False
@property
def is_foot(self):
if self.rclass == "f" :
return True
else :
return False
@property
def is_title(self):
if self.type == "t" :
return True
else :
return False
@property
def is_separator_line(self):
if self.type == "sl" :
return True
else :
return False
@property
def is_closing_line(self):
if self.type == "cl" :
return True
else :
return False
class ExtendedTextTable ( Table ):
def __init__(self):
Table.__init__( self )
self._cols_sz = [0] * Table.get_col_cnt( self )
self._rows = []
self._bold_state = False
self._rclass_state = "b"
def get_col_cnt( self ):
return Table.get_col_cnt(self)
def set_col_cnt( self , col_cnt ):
Table.set_col_cnt( self , col_cnt )
if len( self._cols_sz ) < col_cnt :
self._cols_sz = self.col_sz[:col_cnt]
elif len( self._cols_sz ) > col_cnt :
for n in range(len( self._cols_sz ), col_cnt):
self._cols_sz.append(0)
col_cnt = property( get_col_cnt , set_col_cnt )
def _eval_row_len(self):
l = sum(self._cols_sz)
l = l + len(self._cols_sz)*3
return l
def _update_col_sz(self):
r = self._rows[-1]
if r.is_row and len(r.row) == self.col_cnt :
for c , i in zip( r.row , range(self.col_cnt) ) :
if len(c) > self._cols_sz[i] :
self._cols_sz[i] = len(c)
elif r.is_title :
d = self._eval_row_len() - len( r.row[0] )
if d > 0 :
c = 0
while d > 0 :
self._cols_sz[c] += 1
d-=1
c = ( c + 1 ) % len(self._cols_sz)
elif r.is_separator_line :
pass
elif r.is_closing_line :
pass
else :
raise Exception( "%s : Row model: Not Allowed " % sys._getframe().f_code.co_name )
def _write_title( self , r ):
txt = " "
txt += r.row[0]
txt += self.nl()
self._append_txt(txt)
def _write_row( self , r ):
txt = " "
for cell , i in zip( r.row , range( len(r.row) ) ) :
t_txt = " "
t_txt += cell
a = self._cols_sz[i] - len( cell )
if a < 0 : a = 0
t_txt += " "*(a+1)
txt += t_txt
txt += self.nl()
self._append_txt(txt)
def _write_separator_line( self , r ):
l = self._eval_row_len()
txt = " "
txt += "="*(l-2)
txt += self.nl()
self._append_txt(txt)
def _write_closing_line( self , r ):
self._write_separator_line(r)
def write_table(self):
for r in self._rows :
if r.is_title :
self._write_title(r)
elif r.is_row :
self._write_row(r)
elif r.is_separator_line :
self._write_separator_line(r)
elif r.is_closing_line :
self._write_closing_line(r)
else :
raise Exception( "%s : Row model: Not Allowed " % sys._getframe().f_code.co_name )
return self.get_init_txt() + self._get_txt()
def new_table( self ):
self._cols_sz = [0] * self.col_cnt
self._rows = []
def end_table( self ):
pass
def append_separator_line( self ):
r = row()
r.set_separator_line
r.set_rclass( self._rclass_state )
self._rows.append( r )
def append_closing_line( self ):
r = row()
r.set_closing_line
r.set_rclass( self._rclass_state )
self._rows.append( r )
def append_empty_line( self ):
self.append_row( [ "" ]*self.col_cnt )
self._update_col_sz()
def add_title( self , title ):
r = row()
r.set_title
r.set_rclass( self._rclass_state )
r.row.append( title )
self._rows.append( r )
self._update_col_sz()
def new_row( self ):
r = row()
r.set_row
r.set_rclass( self._rclass_state )
r.cursor = 0
self._rows.append( r )
def end_row( self ):
self._update_col_sz()
def new_cell( self ):
self._rows[-1].inds.append( self._rows[-1].cursor )
self._rows[-1].row.append("")
self._rows[-1].cell_type.append("c")
def end_cell( self ):
self._rows[-1].cursor += 1
def add_value( self , val ):
c = self._rows[-1].cursor
self._rows[-1].row[c] = self.format_element( val )
def new_head( self ):
self._rclass_state = "h"
def end_head( self ):
self._rclass_state = "b"
def new_tbody( self ):
self._rclass_state = "b"
def end_tbody( self ):
self._rclass_state = "b"
def new_foot( self ):
self._rclass_state = "f"
def end_foot( self ):
self._rclass_state = "b"
def new_hcell( self ):
self._rows[-1].inds.append( self._rows[-1].cursor )
self._rows[-1].row.append("")
self._rows[-1].cell_type.append("h")
def end_hcell( self):
self.end_cell()
def new_bold( self ):
self._bold_state = True
def end_bold( self ):
self._bold_state = False
| magicgoose/dr14_t.meter | dr14tmeter/table.py | Python | gpl-3.0 | 16,975 | 0.043122 |
'''
Created on April 26, 2013
@package: livedesk
@copyright: 2013 Sourcefabric o.p.s.
@license: http://www.gnu.org/licenses/gpl-3.0.txt
@author: Mugur Rus
API specifications for liveblog sync.
'''
from ally.support.api.entity import Entity, IEntityService, QEntity
from livedesk.api.blog import Blog
from datetime import datetime
from livedesk.api.domain_livedesk import modelLiveDesk
from ally.api.config import query, service, LIMIT_DEFAULT, call, UPDATE
from ally.api.criteria import AsRangeOrdered, AsDateTimeOrdered, AsBoolean
from superdesk.source.api.source import Source
from ally.api.type import Iter
from superdesk.source.api.type import SourceType
# --------------------------------------------------------------------
@modelLiveDesk(name='Sync')
class BlogSync(Entity):
'''
Provides the blog sync model. It is used for all kind of blog sync, currently chainde blog and SMS
'''
Blog = Blog
Source = Source
CId = int
LastActivity = datetime
Auto = bool
# --------------------------------------------------------------------
@query(BlogSync)
class QBlogSync(QEntity):
'''
Provides the query for BlogSync.
'''
cId = AsRangeOrdered
lastActivity = AsDateTimeOrdered
auto = AsBoolean
# --------------------------------------------------------------------
@service((Entity, BlogSync), (QEntity, QBlogSync))
class IBlogSyncService(IEntityService):
'''
Provides the service methods for the blog sync.
'''
@call(webName="checkTimeout", method=UPDATE)
def checkTimeout(self, blogSyncId:BlogSync.Id, timeout:int) -> bool:
'''
Returns true if the last activity is older than timeout and if it is older update the last activity value
'''
@call
def getBySourceType(self, sourceType:SourceType.Key, offset:int=None, limit:int=LIMIT_DEFAULT, detailed:bool=True, q:QBlogSync=None) -> Iter(BlogSync):
'''
Returns the list of blog sync models for source type.
@param sourceType: SourceType.Key
The source(provider) identifier
@param offset: integer
The offset to retrieve the entities from.
@param limit: integer
The limit of entities to retrieve.
@param detailed: boolean
If true will present the total count, limit and offset for the partially returned collection.
@param q: QBlogSync
The query to search by.
'''
@call
def getByBlog(self, blogId:Blog.Id, offset:int=None, limit:int=LIMIT_DEFAULT, detailed:bool=True, q:QBlogSync=None) -> Iter(BlogSync):
'''
Returns the list of blog sync models for blog.
@param blogId: Blog.Id
The blog id
@param offset: integer
The offset to retrieve the entities from.
@param limit: integer
The limit of entities to retrieve.
@param detailed: boolean
If true will present the total count, limit and offset for the partially returned collection.
@param q: QBlogSync
The query to search by.
''' | superdesk/Live-Blog | plugins/livedesk-sync/livedesk/api/blog_sync.py | Python | agpl-3.0 | 3,125 | 0.0144 |
#!/usr/bin/python
import numpy as np
mdir = "mesh3d/"
fname = "out_p6-p4-p8"
####################
print "input mesh data file"
f1 = open(mdir+fname+".mesh", 'r')
for line in f1:
if line.startswith("Vertices"): break
pcount = int(f1.next())
xyz = np.empty((pcount, 3), dtype=np.float)
for t in range(pcount):
xyz[t] = map(float,f1.next().split()[0:3])
for line in f1:
if line.startswith("Triangles"): break
trisc = int(f1.next())
tris = np.empty((trisc,4), dtype=int)
for t in range(trisc):
tris[t] = map(int,f1.next().split())
for line in f1:
if line.startswith("Tetrahedra"): break
tetsc = int(f1.next())
tets = np.empty((tetsc,5), dtype=int)
for t in range(tetsc):
tets[t] = map(int,f1.next().split())
f1.close()
####################
print "identify geometry"
ftype = [('v0', np.int),('v1', np.int),('v2', np.int),('label', 'S2')]
faces = np.empty(trisc/2, dtype=ftype)
for i in range(len(faces)):
faces[i] = (tris[2*i][0],tris[2*i][1],tris[2*i][2],str(tris[2*i][3])+str(tris[2*i+1][3]))
face_list,face_count = np.unique(faces['label'], return_counts=True)
vtype = [('v0', np.int),('v1', np.int),('v2', np.int),('v3', np.int),('label', 'S1')]
vols = np.empty(tetsc, dtype=vtype)
for i in range(tetsc):
vols[i] = (tets[i][0],tets[i][1],tets[i][2],tets[i][3],str(tets[i][4]))
vol_list,vol_count = np.unique(vols['label'], return_counts=True)
####################
print "output vtk data files for faces"
for i, f in enumerate(face_list):
f2 = open(mdir+fname+"_"+face_list[i]+".vtk", 'w')
f2.write("# vtk DataFile Version 2.0\n")
f2.write("mesh data\n")
f2.write("ASCII\n")
f2.write("DATASET UNSTRUCTURED_GRID\n")
f2.write("POINTS "+str(pcount)+" float\n") # overkill, all points!
for v in xyz:
f2.write(str(v[0]-35.33)+' '+str(35.33-v[1])+' '+str(12.36-v[2])+'\n')
f2.write("CELLS "+str(face_count[i])+" "+str(face_count[i]*4)+"\n")
for v in faces:
if v[3] == f:
f2.write("3 "+str(v[0]-1)+' '+str(v[1]-1)+' '+str(v[2]-1)+'\n')
f2.write("CELL_TYPES "+str(face_count[i])+"\n")
for t in range(face_count[i]): f2.write("5 ")
f2.write("\n")
f2.close()
####################
print "output vtk data files for volumes"
for i, f in enumerate(vol_list):
f2 = open(mdir+fname+"_"+vol_list[i]+".vtk", 'w')
f2.write("# vtk DataFile Version 2.0\n")
f2.write("mesh data\n")
f2.write("ASCII\n")
f2.write("DATASET UNSTRUCTURED_GRID\n")
f2.write("POINTS "+str(pcount)+" float\n") # overkill, all points!
for v in xyz:
f2.write(str(v[0]-35.33)+' '+str(35.33-v[1])+' '+str(12.36-v[2])+'\n')
f2.write("CELLS "+str(vol_count[i])+" "+str(vol_count[i]*5)+"\n")
for v in vols:
if v[4] == f:
f2.write("4 "+str(v[0]-1)+' '+str(v[1]-1)+' '+str(v[2]-1)+' '+str(v[3]-1)+'\n')
f2.write("CELL_TYPES "+str(vol_count[i])+"\n")
for t in range(vol_count[i]): f2.write("10 ")
f2.write("\n")
f2.close()
####################
| jrugis/cell_mesh | mesh2vtk.py | Python | gpl-3.0 | 2,909 | 0.024751 |
import sys
import os
import warnings
import ruamel.yaml as yaml
from fnmatch import fnmatch
__author__ = "Pymatgen Development Team"
__email__ ="[email protected]"
__maintainer__ = "Shyue Ping Ong"
__maintainer_email__ ="[email protected]"
__version__ = "2019.7.2"
SETTINGS_FILE = os.path.join(os.path.expanduser("~"), ".pmgrc.yaml")
def _load_pmg_settings():
try:
with open(SETTINGS_FILE, "rt") as f:
d = yaml.safe_load(f)
except IOError:
# If there are any errors, default to using environment variables
# if present.
d = {}
for k, v in os.environ.items():
if k.startswith("PMG_"):
d[k] = v
elif k in ["VASP_PSP_DIR", "MAPI_KEY", "DEFAULT_FUNCTIONAL"]:
d["PMG_" + k] = v
return dict(d)
SETTINGS = _load_pmg_settings()
# Order of imports is important on some systems to avoid
# failures when loading shared libraries.
# import spglib
# from . import optimization, util
# del(spglib, optimization, util)
# Useful aliases for commonly used objects and modules.
# Allows from pymatgen import <class> for quick usage.
from pymatgen.core import *
from .electronic_structure.core import Spin, Orbital
from .ext.matproj import MPRester
from monty.json import MontyEncoder, MontyDecoder, MSONable
def get_structure_from_mp(formula):
"""
Convenience method to get a crystal from the Materials Project database via
the API. Requires PMG_MAPI_KEY to be set.
Args:
formula (str): A formula
Returns:
(Structure) The lowest energy structure in Materials Project with that
formula.
"""
m = MPRester()
entries = m.get_entries(formula, inc_structure="final")
if len(entries) == 0:
raise ValueError("No structure with formula %s in Materials Project!" %
formula)
elif len(entries) > 1:
warnings.warn("%d structures with formula %s found in Materials "
"Project. The lowest energy structure will be returned." %
(len(entries), formula))
return min(entries, key=lambda e: e.energy_per_atom).structure
if sys.version_info < (3, 5):
warnings.warn("""
Pymatgen will drop Py2k support from v2019.1.1. Pls consult the documentation
at https://www.pymatgen.org for more details.""")
def loadfn(fname):
"""
Convenience method to perform quick loading of data from a filename. The
type of object returned depends the file type.
Args:
fname (string): A filename.
Returns:
Note that fname is matched using unix-style, i.e., fnmatch.
(Structure) if *POSCAR*/*CONTCAR*/*.cif
(Vasprun) *vasprun*
(obj) if *json* (passthrough to monty.serialization.loadfn)
"""
if (fnmatch(fname, "*POSCAR*") or fnmatch(fname, "*CONTCAR*") or
".cif" in fname.lower()) or fnmatch(fname, "*.vasp"):
return Structure.from_file(fname)
elif fnmatch(fname, "*vasprun*"):
from pymatgen.io.vasp import Vasprun
return Vasprun(fname)
elif fnmatch(fname, "*.json*"):
from monty.serialization import loadfn
return loadfn(fname) | blondegeek/pymatgen | pymatgen/__init__.py | Python | mit | 3,203 | 0.002498 |
<<<<<<< HEAD
from flask import Blueprint, render_template, request, url_for, jsonify
from config import mongo
import pandas as pd
import json
from bson import json_util
import retrieve_model as rmodel
from collections import Counter
main = Blueprint('main', __name__, template_folder='templates')
@main.route('/')
def index():
#mongo.db.visits.insert_one({"no":"way"})
#visits = mongo.db.visits.find_one()
#return str(visits)
return render_template('index.html')
@main.route('/predict/')
def get_started():
down_list = [{'value':1,'name':'1st'},{'value':2,'name':'2nd'},{'value':3,'name':'3rd'},{'value':4,'name':'4th'}]
quarter_list = [{'value':1,'name':'1st'},{'value':2,'name':'2nd'},{'value':3,'name':'3rd'},{'value':4,'name':'4th'}]
clock_list = [{'value':15,'name':'<15'}, {'value':14,'name':'<14'}, {'value':13,'name':'<13'},
{'value':12,'name':'<12'}, {'value':11,'name':'<11'}, {'value':10,'name':'<10'},
{'value':9,'name':'<9'}, {'value':8,'name':'<8'}, {'value':7,'name':'<7'},
{'value':6,'name':'<6'}, {'value':5,'name':'<5'}, {'value':4,'name':'<4'},
{'value':3,'name':'<3'}, {'value':2,'name':'<2'}, {'value':1,'name':'<1'}]
yards_list = [{'value':0,'name':'inches'}, {'value':1,'name':'1'},
{'value':2,'name':'2'}, {'value':3,'name':'3'}, {'value':4,'name':'4'},
{'value':5,'name':'5'}, {'value':6,'name':'6'}, {'value':7,'name':'7'},
{'value':8,'name':'8'}, {'value':9,'name':'9'}, {'value':10,'name':'10'},
{'value':11,'name':'11'}, {'value':12,'name':'12'}, {'value':13,'name':'13'},
{'value':14,'name':'14'}, {'value':15,'name':'15'}, {'value':16,'name':'16'},
{'value':17,'name':'17'}, {'value':18,'name':'18'}, {'value':19,'name':'19'},
{'value':20,'name':'20'}, {'value':21,'name':'21'}, {'value':22,'name':'22'},
{'value':23,'name':'23'}, {'value':24,'name':'24'}, {'value':25,'name':'25'}]
field_list = range(0,101,1)
score_list = range(0,61,1)
down_dict = [{'value':1,'name':'1st'},{'value':2,'name':'2nd'},{'value':3,'name':'3rd'},{'value':4,'name':'4th'}]
return render_template('predict.html',
=======
from flask import Blueprint, render_template, request, url_for
from config import mongo
main = Blueprint('main', __name__, template_folder='templates')
@main.route('/')
def index():
mongo.db.visits.insert_one({"foo":"bar"})
visits = mongo.db.visits.find_one()
return str(visits)
#return render_template('index.html')
@main.route('/getstarted/')
def get_started():
down_list = ['1st','2nd','3rd','4th']
quarter_list = ['1st','2nd','3rd','4th']
clock_list = ['> 15 min', '> 10 min', '> 5 min', '> 2 min', '< 2 min', '< 1 min']
yards_list = ['inches', 'goal', '1', '2', '3', '4', '5', '6', '7' ,'8', '9', '10', '> 10']
field_list = range(0,105,5)
score_list = range(-60,61,1)
return render_template('getstarted.html',
>>>>>>> master
down_list=down_list,
quarter_list=quarter_list,
clock_list=clock_list,
yards_list=yards_list,
field_list=field_list,
<<<<<<< HEAD
score_list=score_list,
down_dict=down_dict
)
@main.route('/results/', methods=['POST'])
def results():
=======
score_list=score_list
)
@main.route('/run/', methods=['POST'])
def run():
>>>>>>> master
down = request.form['down']
quarter = request.form['quarter']
clock = request.form['clock']
yards = request.form['yards']
field = request.form['field']
score = request.form['score']
<<<<<<< HEAD
sign = request.form['sign']
guess = request.form['guess']
score = str(int(score) * int(sign))
# Store scenario in mongodb
scenario = {
'down': int(down),
'quarter': int(quarter),
'clock': int(clock),
'yards': int(yards),
'field': int(field),
'score': int(score),
'guess': guess
}
# Insert the current user's guess into the DB
print('Puting this into db:', scenario)
mongo.db.scenarios.insert_one(scenario)
# Pull User guesses from MongoDB
#scenarios = mongo.db.scenarios.find()
# Pull NFL Stats from MongoDB
#nflstats = mongo.db.nfldata.find()
guesses = {'pass':'Pass', 'run':'Run', 'punt':'Punt', 'fg':'Field Goal', 'kneel': 'QB Kneel'}
try:
return render_template('results.html',
guess_title = guesses[guess],
=======
guess = request.form['guess']
# Store scenario in mongodb
scenario = {
'down': down,
'quarter': quarter,
'clock': clock,
'yards': yards,
'field': field,
'score': score,
'guess': guess
}
mongo.db.scenarios.insert_one(scenario)
scenarios = mongo.db.scenarios.find()
try:
return render_template('results.html',
>>>>>>> master
down=down,
quarter=quarter,
clock=clock,
yards=yards,
field=field,
score=score,
guess=guess,
<<<<<<< HEAD
scenarios=[None],#scenarios,
nflstats=[None]#nflstats
)
except Exception as e:
return "Something went wrong..." + str(e)
@main.route('/stats/')
def tables():
title = 'Test Table'
title = rmodel.predict_proba(4,4,1,20,-1)
table = title
return render_template('stats.html', table=table, title=title)
@main.route('/data/guesses/')
def guessData():
guess = request.args.get('guess')
down = request.args.get('down')
quarter = request.args.get('quarter')
clock = request.args.get('clock')
yards = request.args.get('yards')
field = request.args.get('field')
score = request.args.get('score')
search_dict = request.args.to_dict()
for key in search_dict:
#if key != 'guess':
try:
search_dict[key] = int(search_dict[key])
except:
pass
print(search_dict)
s=[data['guess'] for data in mongo.db.scenarios.find(search_dict)]
options = ['pass', 'run', 'punt', 'fg', 'kneel']
count = {option:s.count(option) for option in options}
print(count)
return json.dumps(count, default=json_util.default)
@main.route('/data/nfl/')
def nflData():
playtype = request.args.get('PlayType')
down = request.args.get('down')
quarter = request.args.get('quarter')
clock = request.args.get('clock')
yards = request.args.get('yards')
field = request.args.get('field')
score = request.args.get('score')
search_dict = request.args.to_dict()
for key in search_dict:
if key != 'playtype':
try:
search_dict[key] = int(search_dict[key])
except:
pass
s=[data["PlayType"] for data in mongo.db.nfldata.find(search_dict)]
print(s)
options = ['pass', 'run', 'punt', 'fg', 'kneel']
count = {option:s.count(option) for option in options}
print(count)
return json.dumps(count, default=json_util.default)
@main.route('/api/predict/')
def apiPredict():
arg_dict = request.args.to_dict()
for key in arg_dict:
try:
arg_dict[key] = int(arg_dict[key])
except:
pass
calculations = [
{name:rmodel.predict_group_proba(
arg_dict['quarter'],
arg_dict['down'],
arg_dict['yards'],
arg_dict['clock'],
arg_dict['field'],
arg_dict['score'],
name)
} for name in ['quarter', 'down', 'yards', 'timeunder', 'yrdline100', 'scorediff']
]
calculations.append({'request':rmodel.predict_proba(
arg_dict['quarter'],
arg_dict['down'],
arg_dict['yards'],
arg_dict['clock'],
arg_dict['field'],
arg_dict['score'],
False)
})
return jsonify(calculations)
=======
scenarios=scenarios
)
except:
return "fail"
>>>>>>> master
| skrzym/monday-morning-quarterback | Application/Site/mmq/main/controllers.py | Python | mit | 7,841 | 0.054202 |
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for naming module."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.py2tf.impl import naming
from tensorflow.python.platform import test
class NamerTest(test.TestCase):
def test_compiled_function_name_tracks_names(self):
def bar():
pass
namer = naming.Namer({}, True, None, ())
self.assertEqual(('tf__foo', True), namer.compiled_function_name('foo'))
self.assertEqual(('tf__bar', True), namer.compiled_function_name(
'bar', bar))
self.assertEqual({bar: 'tf__bar'}, namer.renamed_calls)
self.assertItemsEqual(('tf__bar', 'tf__foo'), namer.generated_names)
def test_compiled_function_name_consistent(self):
def foo():
pass
namer = naming.Namer({}, True, None, ())
self.assertEqual(('tf__foo', True), namer.compiled_function_name(
'foo', foo))
self.assertEqual(('tf__foo', True), namer.compiled_function_name(
'foo', foo))
def test_compiled_function_name_avoids_global_conflicts(self):
def foo():
pass
namer = naming.Namer({'tf__foo': 1}, True, None, ())
self.assertEqual(('tf__foo_1', True),
namer.compiled_function_name('foo', foo))
def test_new_symbol_tracks_names(self):
namer = naming.Namer({}, True, None, ())
self.assertEqual('temp', namer.new_symbol('temp', set()))
self.assertItemsEqual(('temp',), namer.generated_names)
def test_new_symbol_avoids_duplicates(self):
namer = naming.Namer({}, True, None, ())
self.assertEqual('temp', namer.new_symbol('temp', set()))
self.assertEqual('temp_1', namer.new_symbol('temp', set()))
self.assertItemsEqual(('temp', 'temp_1'), namer.generated_names)
def test_new_symbol_avoids_conflicts(self):
namer = naming.Namer({'temp': 1}, True, None, ())
# temp is reserved in the global namespace
self.assertEqual('temp_1', namer.new_symbol('temp', set()))
# temp_2 is reserved in the local namespace
self.assertEqual('temp_3', namer.new_symbol('temp', set(('temp_2',))))
self.assertItemsEqual(('temp_1', 'temp_3'), namer.generated_names)
if __name__ == '__main__':
test.main()
| rabipanda/tensorflow | tensorflow/contrib/py2tf/impl/naming_test.py | Python | apache-2.0 | 2,895 | 0.003454 |
# Copyright 2015: Mirantis Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import ddt
import mock
from rally.common import costilius
from rally.task.processing import charts
from tests.unit import test
CHARTS = "rally.task.processing.charts."
class ChartTestCase(test.TestCase):
class Chart(charts.Chart):
def _map_iteration_values(self, iteration):
return [("foo_" + k, iteration[k]) for k in ["a", "b"]]
@property
def bench_info(self):
return {"iterations_count": 42, "atomic": {"a": {}, "b": {}, "c": {}}}
def test___init__(self):
self.assertRaises(TypeError, charts.Chart, self.bench_info)
chart = self.Chart(self.bench_info)
self.assertEqual({}, chart._data)
self.assertEqual(42, chart.base_size)
self.assertEqual(1000, chart.zipped_size)
chart = self.Chart(self.bench_info, zipped_size=24)
self.assertEqual({}, chart._data)
self.assertEqual(42, chart.base_size)
self.assertEqual(24, chart.zipped_size)
@mock.patch(CHARTS + "utils.GraphZipper")
def test_add_iteration_and_render(self, mock_graph_zipper):
gzipper_a = mock.Mock(get_zipped_graph=lambda: "a_points")
gzipper_b = mock.Mock(get_zipped_graph=lambda: "b_points")
mock_graph_zipper.side_effect = [gzipper_a, gzipper_b]
chart = self.Chart(self.bench_info, 24)
self.assertEqual([], chart.render())
[chart.add_iteration(itr) for itr in [{"a": 1, "b": 2},
{"a": 3, "b": 4}]]
self.assertEqual([mock.call(42, 24), mock.call(42, 24)],
mock_graph_zipper.mock_calls)
self.assertEqual(2, len(chart._data))
self.assertEqual([mock.call(1), mock.call(3)],
chart._data["foo_a"].add_point.mock_calls)
self.assertEqual([mock.call(2), mock.call(4)],
chart._data["foo_b"].add_point.mock_calls)
self.assertEqual([("foo_a", "a_points"), ("foo_b", "b_points")],
chart.render())
def test__fix_atomic_actions(self):
chart = self.Chart(self.bench_info)
self.assertEqual(
{"atomic_actions": {"a": 5, "b": 6, "c": 0}},
chart._fix_atomic_actions({"atomic_actions": {"a": 5, "b": 6}}))
class MainStackedAreaChartTestCase(test.TestCase):
def test_add_iteration_and_render(self):
chart = charts.MainStackedAreaChart({"iterations_count": 3,
"iterations_failed": 0}, 10)
self.assertIsInstance(chart, charts.Chart)
[chart.add_iteration(itr) for itr in (
{"duration": 1.1, "idle_duration": 2.2, "error": []},
{"error": [], "duration": 1.1, "idle_duration": 0.5},
{"duration": 1.3, "idle_duration": 3.4, "error": []})]
expected = [("duration", [[1, 1.1], [2, 1.1], [3, 1.3]]),
("idle_duration", [[1, 2.2], [2, 0.5], [3, 3.4]])]
self.assertEqual(expected, chart.render())
def test_add_iteration_and_render_with_failed_iterations(self):
chart = charts.MainStackedAreaChart({"iterations_count": 3,
"iterations_failed": 2}, 10)
self.assertIsInstance(chart, charts.Chart)
[chart.add_iteration(itr) for itr in (
{"duration": 1.1, "idle_duration": 2.2, "error": []},
{"error": ["foo_err"], "duration": 1.1, "idle_duration": 0.5},
{"duration": 1.3, "idle_duration": 3.4, "error": ["foo_err"]})]
expected = [("duration", [[1, 1.1], [2, 0], [3, 0]]),
("idle_duration", [[1, 2.2], [2, 0], [3, 0]]),
("failed_duration", [[1, 0], [2, 1.6], [3, 4.7]])]
self.assertEqual(expected, chart.render())
class AtomicStackedAreaChartTestCase(test.TestCase):
def test_add_iteration_and_render(self):
iterations = (
{"atomic_actions": {"foo": 1.1}, "error": []},
{"atomic_actions": {"foo": 1.1, "bar": 1.2},
"error": [], "duration": 40, "idle_duration": 2},
{"atomic_actions": {"bar": 1.2},
"error": [], "duration": 5.5, "idle_duration": 2.5})
expected = [("bar", [[1, 0], [2, 1.2], [3, 1.2]]),
("foo", [[1, 1.1], [2, 1.1], [3, 0]])]
chart = charts.AtomicStackedAreaChart(
{"iterations_count": 3, "iterations_failed": 0,
"atomic": {"foo": {}, "bar": {}}}, 10)
self.assertIsInstance(chart, charts.Chart)
[chart.add_iteration(iteration) for iteration in iterations]
self.assertEqual(expected, sorted(chart.render()))
def test_add_iteration_and_render_with_failed_iterations(self):
iterations = (
{"atomic_actions": {"foo": 1.1}, "error": []},
{"atomic_actions": {"foo": 1.1, "bar": 1.2},
"error": ["foo_err"], "duration": 40, "idle_duration": 2},
{"atomic_actions": {"bar": 1.2},
"error": ["foo_err"], "duration": 5.5, "idle_duration": 2.5})
expected = [("bar", [[1, 0], [2, 1.2], [3, 1.2]]),
("failed_duration", [[1, 0], [2, 39.7], [3, 6.8]]),
("foo", [[1, 1.1], [2, 1.1], [3, 0]])]
chart = charts.AtomicStackedAreaChart(
{"iterations_count": 3, "iterations_failed": 2,
"atomic": {"foo": {}, "bar": {}}}, 10)
self.assertIsInstance(chart, charts.Chart)
[chart.add_iteration(iteration) for iteration in iterations]
self.assertEqual(expected, sorted(chart.render()))
class OutputStackedAreaChartTestCase(test.TestCase):
def test_add_iteration_and_render(self):
chart = charts.OutputStackedAreaChart(
{"iterations_count": 3, "output_names": ["foo", "bar"]}, 10)
self.assertIsInstance(chart, charts.Chart)
[chart.add_iteration({"scenario_output": {"data": x}})
for x in ({"foo": 1.1, "bar": 1.2}, {"foo": 1.3}, {"bar": 1.4})]
expected = [("bar", [[1, 1.2], [2, 0], [3, 1.4]]),
("foo", [[1, 1.1], [2, 1.3], [3, 0]])]
self.assertEqual(expected, sorted(chart.render()))
class AvgChartTestCase(test.TestCase):
class AvgChart(charts.AvgChart):
def _map_iteration_values(self, iteration):
return iteration["foo"].items()
def test_add_iteration_and_render(self):
self.assertRaises(TypeError, charts.AvgChart, {"iterations_count": 3})
chart = self.AvgChart({"iterations_count": 3})
self.assertIsInstance(chart, charts.AvgChart)
[chart.add_iteration({"foo": x}) for x in ({"a": 1.3, "b": 4.3},
{"a": 2.4, "b": 5.4},
{"a": 3.5, "b": 7.7})]
self.assertEqual([("a", 2.4), ("b", 5.8)], sorted(chart.render()))
class AtomicAvgChartTestCase(test.TestCase):
def test_add_iteration_and_render(self):
chart = charts.AtomicAvgChart({"iterations_count": 3,
"atomic": {"foo": {}, "bar": {}}})
self.assertIsInstance(chart, charts.AvgChart)
[chart.add_iteration({"atomic_actions": costilius.OrderedDict(a)})
for a in ([("foo", 2), ("bar", 5)], [("foo", 4)], [("bar", 7)])]
self.assertEqual([("bar", 4.0), ("foo", 2.0)], sorted(chart.render()))
@ddt.ddt
class LoadProfileChartTestCase(test.TestCase):
@ddt.data({"count": 5, "load_duration": 63, "tstamp_start": 12345,
"kwargs": {"scale": 10}, "data": [
(12345, 4.2, False), (12347, 42, False), (12349, 10, True),
(12351, 5.5, False), (12353, 0.42, False)],
"expected": [("parallel iterations", [
[6.0, 3], [12.0, 3], [18.0, 1], [24.0, 1], [30.0, 1],
[36.0, 1], [42.0, 1], [48.0, 1], [54.0, 0], [63, 0]])]},
{"count": 5, "load_duration": 63, "tstamp_start": 12345,
"kwargs": {"scale": 8, "name": "Custom text"}, "data": [
(12345, 4.2, False), (12347, 42, False), (12349, 10, True),
(12351, 5.5, False), (12353, 0.42, False)],
"expected": [("Custom text", [
[8.0, 4], [16.0, 3], [24.0, 1], [32.0, 1], [40.0, 1],
[48.0, 1], [56.0, 0], [63, 0]])]},
{"count": 0, "load_duration": 0, "tstamp_start": 12345,
"kwargs": {"scale": 8}, "data": [],
"expected": [("parallel iterations", [[0, 0]])]})
@ddt.unpack
def test_add_iteration_and_render(self, count, load_duration,
tstamp_start, kwargs, data, expected):
chart = charts.LoadProfileChart(
{"iterations_count": count,
"load_duration": load_duration, "tstamp_start": tstamp_start},
**kwargs)
self.assertIsInstance(chart, charts.Chart)
[chart.add_iteration({"timestamp": t, "duration": d, "error": e})
for t, d, e in data]
self.assertEqual(expected, chart.render())
@ddt.ddt
class HistogramChartTestCase(test.TestCase):
class HistogramChart(charts.HistogramChart):
def __init__(self, benchmark_info):
super(HistogramChartTestCase.HistogramChart,
self).__init__(benchmark_info)
self._data["bar"] = {"views": self._init_views(1.2, 4.2),
"disabled": None}
def _map_iteration_values(self, iteration):
return iteration["foo"].items()
def test_add_iteration_and_render(self):
self.assertRaises(TypeError, charts.HistogramChart,
{"iterations_count": 3})
chart = self.HistogramChart({"iterations_count": 3})
self.assertIsInstance(chart, charts.HistogramChart)
[chart.add_iteration({"foo": x}) for x in ({"bar": 1.2}, {"bar": 2.4},
{"bar": 4.2})]
expected = [[{"disabled": None, "key": "bar",
"values": [{"x": 2.7, "y": 2}, {"x": 4.2, "y": 1}],
"view": "Square Root Choice"},
{"disabled": None, "key": "bar",
"values": [{"x": 2.2, "y": 1}, {"x": 3.2, "y": 1},
{"x": 4.2, "y": 1}],
"view": "Sturges Formula"},
{"disabled": None,
"key": "bar",
"values": [{"x": 2.2, "y": 1}, {"x": 3.2, "y": 1},
{"x": 4.2, "y": 1}],
"view": "Rice Rule"},
{"disabled": None, "key": "bar",
"values": [{"x": 2.7, "y": 2}, {"x": 4.2, "y": 1}],
"view": "One Half"}]]
self.assertEqual(expected, chart.render())
@ddt.data(
{"base_size": 2, "min_value": 1, "max_value": 4,
"expected": [{"bins": 2, "view": "Square Root Choice",
"x": [2.5, 4.0], "y": [0, 0]},
{"bins": 2, "view": "Sturges Formula",
"x": [2.5, 4.0], "y": [0, 0]},
{"bins": 3, "view": "Rice Rule",
"x": [2.0, 3.0, 4.0], "y": [0, 0, 0]},
{"bins": 1, "view": "One Half", "x": [4.0], "y": [0]}]},
{"base_size": 100, "min_value": 27, "max_value": 42,
"expected": [
{"bins": 10, "view": "Square Root Choice",
"x": [28.5, 30.0, 31.5, 33.0, 34.5, 36.0, 37.5, 39.0, 40.5,
42.0], "y": [0, 0, 0, 0, 0, 0, 0, 0, 0, 0]},
{"bins": 8, "view": "Sturges Formula",
"x": [28.875, 30.75, 32.625, 34.5, 36.375, 38.25, 40.125,
42.0], "y": [0, 0, 0, 0, 0, 0, 0, 0]},
{"bins": 10, "view": "Rice Rule",
"x": [28.5, 30.0, 31.5, 33.0, 34.5, 36.0, 37.5, 39.0, 40.5,
42.0], "y": [0, 0, 0, 0, 0, 0, 0, 0, 0, 0]},
{"bins": 50, "view": "One Half",
"x": [27.3, 27.6, 27.9, 28.2, 28.5, 28.8, 29.1, 29.4, 29.7,
30.0, 30.3, 30.6, 30.9, 31.2, 31.5, 31.8, 32.1, 32.4,
32.7, 33.0, 33.3, 33.6, 33.9, 34.2, 34.5, 34.8, 35.1,
35.4, 35.7, 36.0, 36.3, 36.6, 36.9, 37.2, 37.5, 37.8,
38.1, 38.4, 38.7, 39.0, 39.3, 39.6, 39.9, 40.2, 40.5,
40.8, 41.1, 41.4, 41.7, 42.0], "y": [0] * 50}]})
@ddt.unpack
def test_views(self, base_size=None, min_value=None, max_value=None,
expected=None):
chart = self.HistogramChart({"iterations_count": base_size})
self.assertEqual(expected, chart._init_views(min_value, max_value))
class MainHistogramChartTestCase(test.TestCase):
def test_add_iteration_and_render(self):
chart = charts.MainHistogramChart(
{"iterations_count": 3, "min_duration": 2, "max_duration": 7})
self.assertIsInstance(chart, charts.HistogramChart)
[chart.add_iteration(itr) for itr in (
{"duration": 1.1, "idle_duration": 2.2, "error": None},
{"error": True},
{"duration": 1.3, "idle_duration": 3.4, "error": None})]
expected = [
{"disabled": None, "key": "task", "view": "Square Root Choice",
"values": [{"x": 4.5, "y": 3}, {"x": 7.0, "y": 0}]},
{"disabled": None, "key": "task", "view": "Sturges Formula",
"values": [{"x": 3.666666666666667, "y": 3},
{"x": 5.333333333333334, "y": 0},
{"x": 7.0, "y": 0}]},
{"disabled": None, "key": "task", "view": "Rice Rule",
"values": [{"x": 3.666666666666667, "y": 3},
{"x": 5.333333333333334, "y": 0},
{"x": 7.0, "y": 0}]},
{"disabled": None, "key": "task", "view": "One Half",
"values": [{"x": 4.5, "y": 3}, {"x": 7.0, "y": 0}]}]
self.assertEqual([expected], chart.render())
class AtomicHistogramChartTestCase(test.TestCase):
def test_add_iteration_and_render(self):
chart = charts.AtomicHistogramChart(
{"iterations_count": 3,
"atomic": costilius.OrderedDict(
[("foo", {"min_duration": 1.6, "max_duration": 2.8}),
("bar", {"min_duration": 3.1, "max_duration": 5.5})])})
self.assertIsInstance(chart, charts.HistogramChart)
[chart.add_iteration({"atomic_actions": a})
for a in ({"foo": 1.6, "bar": 3.1}, {"foo": 2.8}, {"bar": 5.5})]
expected = [
[{"disabled": 0, "key": "foo", "view": "Square Root Choice",
"values": [{"x": 2.2, "y": 2}, {"x": 2.8, "y": 1}]},
{"disabled": 0, "key": "foo", "view": "Sturges Formula",
"values": [{"x": 2.0, "y": 2}, {"x": 2.4, "y": 0},
{"x": 2.8, "y": 1}]},
{"disabled": 0, "key": "foo", "view": "Rice Rule",
"values": [{"x": 2.0, "y": 2}, {"x": 2.4, "y": 0},
{"x": 2.8, "y": 1}]},
{"disabled": 0, "key": "foo", "view": "One Half",
"values": [{"x": 2.2, "y": 2}, {"x": 2.8, "y": 1}]}],
[{"disabled": 1, "key": "bar", "view": "Square Root Choice",
"values": [{"x": 4.3, "y": 2}, {"x": 5.5, "y": 1}]},
{"disabled": 1, "key": "bar", "view": "Sturges Formula",
"values": [{"x": 3.9, "y": 2}, {"x": 4.7, "y": 0},
{"x": 5.5, "y": 1}]},
{"disabled": 1, "key": "bar", "view": "Rice Rule",
"values": [{"x": 3.9, "y": 2}, {"x": 4.7, "y": 0},
{"x": 5.5, "y": 1}]},
{"disabled": 1, "key": "bar", "view": "One Half",
"values": [{"x": 4.3, "y": 2}, {"x": 5.5, "y": 1}]}]]
self.assertEqual(expected, chart.render())
MAIN_STATS_TABLE_COLUMNS = ["Action", "Min (sec)", "Median (sec)",
"90%ile (sec)", "95%ile (sec)", "Max (sec)",
"Avg (sec)", "Success", "Count"]
def generate_iteration(duration, error, *args):
return {
"atomic_actions": costilius.OrderedDict(args),
"duration": duration,
"error": error
}
@ddt.ddt
class MainStatsTableTestCase(test.TestCase):
@ddt.data(
{
"info": {
"iterations_count": 1,
"atomic": costilius.OrderedDict([("foo", {}), ("bar", {})])
},
"data": [
generate_iteration(10.0, False, ("foo", 1.0), ("bar", 2.0))
],
"expected": {
"cols": MAIN_STATS_TABLE_COLUMNS,
"rows": [
["foo", 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, "100.0%", 1],
["bar", 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, "100.0%", 1],
["total", 10.0, 10.0, 10.0, 10.0, 10.0, 10.0, "100.0%", 1],
]
}
},
{
"info": {"iterations_count": 2, "atomic": {"foo": {}}},
"data": [
generate_iteration(10.0, True, ("foo", 1.0)),
generate_iteration(10.0, True, ("foo", 2.0))
],
"expected": {
"cols": MAIN_STATS_TABLE_COLUMNS,
"rows": [
["foo", "n/a", "n/a", "n/a", "n/a", "n/a", "n/a", "0.0%",
2],
["total", "n/a", "n/a", "n/a", "n/a", "n/a", "n/a", "0.0%",
2],
]
}
},
{
"info": {"iterations_count": 2, "atomic": {"foo": {}}},
"data": [
generate_iteration(10.0, False, ("foo", 1.0)),
generate_iteration(20.0, True, ("foo", 2.0))
],
"expected": {
"cols": MAIN_STATS_TABLE_COLUMNS,
"rows": [
["foo", 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, "50.0%", 2],
["total", 10.0, 10.0, 10.0, 10.0, 10.0, 10.0, "50.0%", 2]
]
}
},
{
"info": {
"iterations_count": 4,
"atomic": costilius.OrderedDict([("foo", {}), ("bar", {})])
},
"data": [
generate_iteration(10.0, False, ("foo", 1.0), ("bar", 4.0)),
generate_iteration(20.0, False, ("foo", 2.0), ("bar", 4.0)),
generate_iteration(30.0, False, ("foo", 3.0), ("bar", 4.0)),
generate_iteration(40.0, True, ("foo", 4.0), ("bar", 4.0))
],
"expected": {
"cols": MAIN_STATS_TABLE_COLUMNS,
"rows": [
["foo", 1.0, 2.0, 2.8, 2.9, 3.0, 2.0, "75.0%", 4],
["bar", 4.0, 4.0, 4.0, 4.0, 4.0, 4.0, "75.0%", 4],
["total", 10.0, 20.0, 28.0, 29.0, 30.0, 20.0, "75.0%", 4]
]
}
}
)
@ddt.unpack
def test_add_iteration_and_render(self, info, data, expected):
table = charts.MainStatsTable(info)
for el in data:
table.add_iteration(el)
self.assertEqual(expected, table.render())
| aforalee/rally | tests/unit/task/processing/test_charts.py | Python | apache-2.0 | 19,909 | 0 |
from __future__ import unicode_literals
import copy
import pickle
import sys
import warnings
from unittest import TestCase
from django.utils import six
from django.utils.functional import LazyObject, SimpleLazyObject, empty
from .models import Category, CategoryInfo
class Foo(object):
"""
A simple class with just one attribute.
"""
foo = 'bar'
def __eq__(self, other):
return self.foo == other.foo
class LazyObjectTestCase(TestCase):
def lazy_wrap(self, wrapped_object):
"""
Wrap the given object into a LazyObject
"""
class AdHocLazyObject(LazyObject):
def _setup(self):
self._wrapped = wrapped_object
return AdHocLazyObject()
def test_getattr(self):
obj = self.lazy_wrap(Foo())
self.assertEqual(obj.foo, 'bar')
def test_setattr(self):
obj = self.lazy_wrap(Foo())
obj.foo = 'BAR'
obj.bar = 'baz'
self.assertEqual(obj.foo, 'BAR')
self.assertEqual(obj.bar, 'baz')
def test_setattr2(self):
# Same as test_setattr but in reversed order
obj = self.lazy_wrap(Foo())
obj.bar = 'baz'
obj.foo = 'BAR'
self.assertEqual(obj.foo, 'BAR')
self.assertEqual(obj.bar, 'baz')
def test_delattr(self):
obj = self.lazy_wrap(Foo())
obj.bar = 'baz'
self.assertEqual(obj.bar, 'baz')
del obj.bar
with self.assertRaises(AttributeError):
obj.bar
def test_cmp(self):
obj1 = self.lazy_wrap('foo')
obj2 = self.lazy_wrap('bar')
obj3 = self.lazy_wrap('foo')
self.assertEqual(obj1, 'foo')
self.assertEqual(obj1, obj3)
self.assertNotEqual(obj1, obj2)
self.assertNotEqual(obj1, 'bar')
def test_bytes(self):
obj = self.lazy_wrap(b'foo')
self.assertEqual(bytes(obj), b'foo')
def test_text(self):
obj = self.lazy_wrap('foo')
self.assertEqual(six.text_type(obj), 'foo')
def test_bool(self):
# Refs #21840
for f in [False, 0, (), {}, [], None, set()]:
self.assertFalse(self.lazy_wrap(f))
for t in [True, 1, (1,), {1: 2}, [1], object(), {1}]:
self.assertTrue(t)
def test_dir(self):
obj = self.lazy_wrap('foo')
self.assertEqual(dir(obj), dir('foo'))
def test_len(self):
for seq in ['asd', [1, 2, 3], {'a': 1, 'b': 2, 'c': 3}]:
obj = self.lazy_wrap(seq)
self.assertEqual(len(obj), 3)
def test_class(self):
self.assertIsInstance(self.lazy_wrap(42), int)
class Bar(Foo):
pass
self.assertIsInstance(self.lazy_wrap(Bar()), Foo)
def test_hash(self):
obj = self.lazy_wrap('foo')
d = {}
d[obj] = 'bar'
self.assertIn('foo', d)
self.assertEqual(d['foo'], 'bar')
def test_contains(self):
test_data = [
('c', 'abcde'),
(2, [1, 2, 3]),
('a', {'a': 1, 'b': 2, 'c': 3}),
(2, {1, 2, 3}),
]
for needle, haystack in test_data:
self.assertIn(needle, self.lazy_wrap(haystack))
# __contains__ doesn't work when the haystack is a string and the needle a LazyObject
for needle_haystack in test_data[1:]:
self.assertIn(self.lazy_wrap(needle), haystack)
self.assertIn(self.lazy_wrap(needle), self.lazy_wrap(haystack))
def test_getitem(self):
obj_list = self.lazy_wrap([1, 2, 3])
obj_dict = self.lazy_wrap({'a': 1, 'b': 2, 'c': 3})
self.assertEqual(obj_list[0], 1)
self.assertEqual(obj_list[-1], 3)
self.assertEqual(obj_list[1:2], [2])
self.assertEqual(obj_dict['b'], 2)
with self.assertRaises(IndexError):
obj_list[3]
with self.assertRaises(KeyError):
obj_dict['f']
def test_setitem(self):
obj_list = self.lazy_wrap([1, 2, 3])
obj_dict = self.lazy_wrap({'a': 1, 'b': 2, 'c': 3})
obj_list[0] = 100
self.assertEqual(obj_list, [100, 2, 3])
obj_list[1:2] = [200, 300, 400]
self.assertEqual(obj_list, [100, 200, 300, 400, 3])
obj_dict['a'] = 100
obj_dict['d'] = 400
self.assertEqual(obj_dict, {'a': 100, 'b': 2, 'c': 3, 'd': 400})
def test_delitem(self):
obj_list = self.lazy_wrap([1, 2, 3])
obj_dict = self.lazy_wrap({'a': 1, 'b': 2, 'c': 3})
del obj_list[-1]
del obj_dict['c']
self.assertEqual(obj_list, [1, 2])
self.assertEqual(obj_dict, {'a': 1, 'b': 2})
with self.assertRaises(IndexError):
del obj_list[3]
with self.assertRaises(KeyError):
del obj_dict['f']
def test_iter(self):
# Tests whether an object's custom `__iter__` method is being
# used when iterating over it.
class IterObject(object):
def __init__(self, values):
self.values = values
def __iter__(self):
return iter(self.values)
original_list = ['test', '123']
self.assertEqual(
list(self.lazy_wrap(IterObject(original_list))),
original_list
)
def test_pickle(self):
# See ticket #16563
obj = self.lazy_wrap(Foo())
pickled = pickle.dumps(obj)
unpickled = pickle.loads(pickled)
self.assertIsInstance(unpickled, Foo)
self.assertEqual(unpickled, obj)
self.assertEqual(unpickled.foo, obj.foo)
def test_deepcopy(self):
# Check that we *can* do deep copy, and that it returns the right
# objects.
l = [1, 2, 3]
obj = self.lazy_wrap(l)
len(l) # forces evaluation
obj2 = copy.deepcopy(obj)
self.assertIsInstance(obj2, list)
self.assertEqual(obj2, [1, 2, 3])
def test_deepcopy_no_evaluation(self):
# copying doesn't force evaluation
l = [1, 2, 3]
obj = self.lazy_wrap(l)
obj2 = copy.deepcopy(obj)
# Copying shouldn't force evaluation
self.assertIs(obj._wrapped, empty)
self.assertIs(obj2._wrapped, empty)
class SimpleLazyObjectTestCase(LazyObjectTestCase):
# By inheriting from LazyObjectTestCase and redefining the lazy_wrap()
# method which all testcases use, we get to make sure all behaviors
# tested in the parent testcase also apply to SimpleLazyObject.
def lazy_wrap(self, wrapped_object):
return SimpleLazyObject(lambda: wrapped_object)
def test_repr(self):
# First, for an unevaluated SimpleLazyObject
obj = self.lazy_wrap(42)
# __repr__ contains __repr__ of setup function and does not evaluate
# the SimpleLazyObject
six.assertRegex(self, repr(obj), '^<SimpleLazyObject:')
self.assertIs(obj._wrapped, empty) # make sure evaluation hasn't been triggered
self.assertEqual(obj, 42) # evaluate the lazy object
self.assertIsInstance(obj._wrapped, int)
self.assertEqual(repr(obj), '<SimpleLazyObject: 42>')
def test_trace(self):
# See ticket #19456
old_trace_func = sys.gettrace()
try:
def trace_func(frame, event, arg):
frame.f_locals['self'].__class__
if old_trace_func is not None:
old_trace_func(frame, event, arg)
sys.settrace(trace_func)
self.lazy_wrap(None)
finally:
sys.settrace(old_trace_func)
def test_none(self):
i = [0]
def f():
i[0] += 1
return None
x = SimpleLazyObject(f)
self.assertEqual(str(x), "None")
self.assertEqual(i, [1])
self.assertEqual(str(x), "None")
self.assertEqual(i, [1])
def test_dict(self):
# See ticket #18447
lazydict = SimpleLazyObject(lambda: {'one': 1})
self.assertEqual(lazydict['one'], 1)
lazydict['one'] = -1
self.assertEqual(lazydict['one'], -1)
self.assertIn('one', lazydict)
self.assertNotIn('two', lazydict)
self.assertEqual(len(lazydict), 1)
del lazydict['one']
with self.assertRaises(KeyError):
lazydict['one']
def test_list_set(self):
lazy_list = SimpleLazyObject(lambda: [1, 2, 3, 4, 5])
lazy_set = SimpleLazyObject(lambda: {1, 2, 3, 4})
self.assertIn(1, lazy_list)
self.assertIn(1, lazy_set)
self.assertNotIn(6, lazy_list)
self.assertNotIn(6, lazy_set)
self.assertEqual(len(lazy_list), 5)
self.assertEqual(len(lazy_set), 4)
class BaseBaz(object):
"""
A base class with a funky __reduce__ method, meant to simulate the
__reduce__ method of Model, which sets self._django_version.
"""
def __init__(self):
self.baz = 'wrong'
def __reduce__(self):
self.baz = 'right'
return super(BaseBaz, self).__reduce__()
def __eq__(self, other):
if self.__class__ != other.__class__:
return False
for attr in ['bar', 'baz', 'quux']:
if hasattr(self, attr) != hasattr(other, attr):
return False
elif getattr(self, attr, None) != getattr(other, attr, None):
return False
return True
class Baz(BaseBaz):
"""
A class that inherits from BaseBaz and has its own __reduce_ex__ method.
"""
def __init__(self, bar):
self.bar = bar
super(Baz, self).__init__()
def __reduce_ex__(self, proto):
self.quux = 'quux'
return super(Baz, self).__reduce_ex__(proto)
class BazProxy(Baz):
"""
A class that acts as a proxy for Baz. It does some scary mucking about with
dicts, which simulates some crazy things that people might do with
e.g. proxy models.
"""
def __init__(self, baz):
self.__dict__ = baz.__dict__
self._baz = baz
super(BaseBaz, self).__init__()
class SimpleLazyObjectPickleTestCase(TestCase):
"""
Regression test for pickling a SimpleLazyObject wrapping a model (#25389).
Also covers other classes with a custom __reduce__ method.
"""
def test_pickle_with_reduce(self):
"""
Test in a fairly synthetic setting.
"""
# Test every pickle protocol available
for protocol in range(pickle.HIGHEST_PROTOCOL + 1):
lazy_objs = [
SimpleLazyObject(lambda: BaseBaz()),
SimpleLazyObject(lambda: Baz(1)),
SimpleLazyObject(lambda: BazProxy(Baz(2))),
]
for obj in lazy_objs:
pickled = pickle.dumps(obj, protocol)
unpickled = pickle.loads(pickled)
self.assertEqual(unpickled, obj)
self.assertEqual(unpickled.baz, 'right')
def test_pickle_model(self):
"""
Test on an actual model, based on the report in #25426.
"""
category = Category.objects.create(name="thing1")
CategoryInfo.objects.create(category=category)
# Test every pickle protocol available
for protocol in range(pickle.HIGHEST_PROTOCOL + 1):
lazy_category = SimpleLazyObject(lambda: category)
# Test both if we accessed a field on the model and if we didn't.
lazy_category.categoryinfo
lazy_category_2 = SimpleLazyObject(lambda: category)
with warnings.catch_warnings(record=True) as recorded:
self.assertEqual(pickle.loads(pickle.dumps(lazy_category, protocol)), category)
self.assertEqual(pickle.loads(pickle.dumps(lazy_category_2, protocol)), category)
# Assert that there were no warnings.
self.assertEqual(len(recorded), 0)
| MounirMesselmeni/django | tests/utils_tests/test_lazyobject.py | Python | bsd-3-clause | 11,862 | 0.000506 |
# -*- coding: utf-8 -*-
class horizontal_rule:
def __init__(self):
pass
@staticmethod
def process(data, args):
data['note_viewer'].call_function('insert_horizontal_rule') | marcoconstancio/yanta | actions/horizontal_rule/horizontal_rule.py | Python | gpl-2.0 | 200 | 0.005 |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from marvin.cloudstackTestCase import *
from marvin.cloudstackAPI import *
from marvin.sshClient import SshClient
from marvin.integration.lib.utils import *
from marvin.integration.lib.base import *
from marvin.integration.lib.common import *
from nose.plugins.attrib import attr
#Import System modules
import time
_multiprocess_shared_ = True
class Services:
"""Test Network Services
"""
def __init__(self):
self.services = {
"ostype": "CentOS 5.3 (64-bit)",
# Cent OS 5.3 (64 bit)
"lb_switch_wait": 10,
# Time interval after which LB switches the requests
"sleep": 60,
"timeout":10,
"network_offering": {
"name": 'Test Network offering',
"displaytext": 'Test Network offering',
"guestiptype": 'Isolated',
"supportedservices": 'Dhcp,Dns,SourceNat,PortForwarding',
"traffictype": 'GUEST',
"availability": 'Optional',
"serviceProviderList" : {
"Dhcp": 'VirtualRouter',
"Dns": 'VirtualRouter',
"SourceNat": 'VirtualRouter',
"PortForwarding": 'VirtualRouter',
},
},
"network": {
"name": "Test Network",
"displaytext": "Test Network",
},
"service_offering": {
"name": "Tiny Instance",
"displaytext": "Tiny Instance",
"cpunumber": 1,
"cpuspeed": 100,
# in MHz
"memory": 256,
# In MBs
},
"account": {
"email": "[email protected]",
"firstname": "Test",
"lastname": "User",
"username": "test",
"password": "password",
},
"server":
{
"displayname": "Small Instance",
"username": "root",
"password": "password",
"hypervisor": 'XenServer',
"privateport": 22,
"publicport": 22,
"ssh_port": 22,
"protocol": 'TCP',
},
"natrule":
{
"privateport": 22,
"publicport": 2222,
"protocol": "TCP"
},
"lbrule":
{
"name": "SSH",
"alg": "roundrobin",
# Algorithm used for load balancing
"privateport": 22,
"publicport": 2222,
"protocol": 'TCP'
}
}
class TestLoadBalance(cloudstackTestCase):
@classmethod
def setUpClass(cls):
cls.api_client = super(TestLoadBalance, cls).getClsTestClient().getApiClient()
cls.services = Services().services
# Get Zone, Domain and templates
cls.domain = get_domain(cls.api_client, cls.services)
cls.zone = get_zone(cls.api_client, cls.services)
template = get_template(
cls.api_client,
cls.zone.id,
cls.services["ostype"]
)
cls.services["server"]["zoneid"] = cls.zone.id
#Create an account, network, VM and IP addresses
cls.account = Account.create(
cls.api_client,
cls.services["account"],
admin=True,
domainid=cls.domain.id
)
cls.service_offering = ServiceOffering.create(
cls.api_client,
cls.services["service_offering"]
)
cls.vm_1 = VirtualMachine.create(
cls.api_client,
cls.services["server"],
templateid=template.id,
accountid=cls.account.name,
domainid=cls.account.domainid,
serviceofferingid=cls.service_offering.id
)
cls.vm_2 = VirtualMachine.create(
cls.api_client,
cls.services["server"],
templateid=template.id,
accountid=cls.account.name,
domainid=cls.account.domainid,
serviceofferingid=cls.service_offering.id
)
cls.vm_3 = VirtualMachine.create(
cls.api_client,
cls.services["server"],
templateid=template.id,
accountid=cls.account.name,
domainid=cls.account.domainid,
serviceofferingid=cls.service_offering.id
)
cls.non_src_nat_ip = PublicIPAddress.create(
cls.api_client,
cls.account.name,
cls.zone.id,
cls.account.domainid,
cls.services["server"]
)
# Open up firewall port for SSH
cls.fw_rule = FireWallRule.create(
cls.api_client,
ipaddressid=cls.non_src_nat_ip.ipaddress.id,
protocol=cls.services["lbrule"]["protocol"],
cidrlist=['0.0.0.0/0'],
startport=cls.services["lbrule"]["publicport"],
endport=cls.services["lbrule"]["publicport"]
)
cls._cleanup = [
cls.account,
cls.service_offering
]
def setUp(self):
self.apiclient = self.testClient.getApiClient()
self.cleanup = []
return
def tearDown(self):
cleanup_resources(self.apiclient, self.cleanup)
return
@classmethod
def tearDownClass(cls):
cleanup_resources(cls.api_client, cls._cleanup)
return
def try_ssh(self, ip_addr, hostnames):
try:
self.debug(
"SSH into VM (IPaddress: %s) & NAT Rule (Public IP: %s)" %
(self.vm_1.ipaddress, ip_addr)
)
# If Round Robin Algorithm is chosen,
# each ssh command should alternate between VMs
ssh_1 = SshClient(
ip_addr,
self.services['lbrule']["publicport"],
self.vm_1.username,
self.vm_1.password
)
hostnames.append(ssh_1.execute("hostname")[0])
self.debug(hostnames)
except Exception as e:
self.fail("%s: SSH failed for VM with IP Address: %s" %
(e, ip_addr))
time.sleep(self.services["lb_switch_wait"])
return
@attr(tags = ["advanced", "advancedns", "smoke"])
def test_01_create_lb_rule_src_nat(self):
"""Test to create Load balancing rule with source NAT"""
# Validate the Following:
#1. listLoadBalancerRules should return the added rule
#2. attempt to ssh twice on the load balanced IP
#3. verify using the hostname of the VM
# that round robin is indeed happening as expected
src_nat_ip_addrs = PublicIPAddress.list(
self.apiclient,
account=self.account.name,
domainid=self.account.domainid
)
self.assertEqual(
isinstance(src_nat_ip_addrs, list),
True,
"Check list response returns a valid list"
)
src_nat_ip_addr = src_nat_ip_addrs[0]
# Check if VM is in Running state before creating LB rule
vm_response = VirtualMachine.list(
self.apiclient,
account=self.account.name,
domainid=self.account.domainid
)
self.assertEqual(
isinstance(vm_response, list),
True,
"Check list VM returns a valid list"
)
self.assertNotEqual(
len(vm_response),
0,
"Check Port Forwarding Rule is created"
)
for vm in vm_response:
self.assertEqual(
vm.state,
'Running',
"VM state should be Running before creating a NAT rule."
)
#Create Load Balancer rule and assign VMs to rule
lb_rule = LoadBalancerRule.create(
self.apiclient,
self.services["lbrule"],
src_nat_ip_addr.id,
accountid=self.account.name
)
self.cleanup.append(lb_rule)
lb_rule.assign(self.apiclient, [self.vm_1, self.vm_2])
lb_rules = list_lb_rules(
self.apiclient,
id=lb_rule.id
)
self.assertEqual(
isinstance(lb_rules, list),
True,
"Check list response returns a valid list"
)
#verify listLoadBalancerRules lists the added load balancing rule
self.assertNotEqual(
len(lb_rules),
0,
"Check Load Balancer Rule in its List"
)
self.assertEqual(
lb_rules[0].id,
lb_rule.id,
"Check List Load Balancer Rules returns valid Rule"
)
# listLoadBalancerRuleInstances should list all
# instances associated with that LB rule
lb_instance_rules = list_lb_instances(
self.apiclient,
id=lb_rule.id
)
self.assertEqual(
isinstance(lb_instance_rules, list),
True,
"Check list response returns a valid list"
)
self.assertNotEqual(
len(lb_instance_rules),
0,
"Check Load Balancer instances Rule in its List"
)
self.debug("lb_instance_rules Ids: %s, %s" % (
lb_instance_rules[0].id,
lb_instance_rules[1].id
))
self.debug("VM ids: %s, %s" % (self.vm_1.id, self.vm_2.id))
self.assertIn(
lb_instance_rules[0].id,
[self.vm_1.id, self.vm_2.id],
"Check List Load Balancer instances Rules returns valid VM ID"
)
self.assertIn(
lb_instance_rules[1].id,
[self.vm_1.id, self.vm_2.id],
"Check List Load Balancer instances Rules returns valid VM ID"
)
hostnames = []
self.try_ssh(src_nat_ip_addr.ipaddress, hostnames)
self.try_ssh(src_nat_ip_addr.ipaddress, hostnames)
self.try_ssh(src_nat_ip_addr.ipaddress, hostnames)
self.try_ssh(src_nat_ip_addr.ipaddress, hostnames)
self.try_ssh(src_nat_ip_addr.ipaddress, hostnames)
self.debug("Hostnames: %s" % str(hostnames))
self.assertIn(
self.vm_1.name,
hostnames,
"Check if ssh succeeded for server1"
)
self.assertIn(
self.vm_2.name,
hostnames,
"Check if ssh succeeded for server2"
)
#SSH should pass till there is a last VM associated with LB rule
lb_rule.remove(self.apiclient, [self.vm_2])
# making hostnames list empty
hostnames[:] = []
try:
self.debug("SSHing into IP address: %s after removing VM (ID: %s)" %
(
src_nat_ip_addr.ipaddress,
self.vm_2.id
))
self.try_ssh(src_nat_ip_addr.ipaddress, hostnames)
self.assertIn(
self.vm_1.name,
hostnames,
"Check if ssh succeeded for server1"
)
except Exception as e:
self.fail("%s: SSH failed for VM with IP Address: %s" %
(e, src_nat_ip_addr.ipaddress))
lb_rule.remove(self.apiclient, [self.vm_1])
with self.assertRaises(Exception):
self.debug("Removed all VMs, trying to SSH")
self.try_ssh(src_nat_ip_addr.ipaddress, hostnames)
return
@attr(tags = ["advanced", "advancedns", "smoke"])
def test_02_create_lb_rule_non_nat(self):
"""Test to create Load balancing rule with non source NAT"""
# Validate the Following:
#1. listLoadBalancerRules should return the added rule
#2. attempt to ssh twice on the load balanced IP
#3. verify using the hostname of the VM that
# round robin is indeed happening as expected
#Create Load Balancer rule and assign VMs to rule
lb_rule = LoadBalancerRule.create(
self.apiclient,
self.services["lbrule"],
self.non_src_nat_ip.ipaddress.id,
accountid=self.account.name
)
self.cleanup.append(lb_rule)
lb_rule.assign(self.apiclient, [self.vm_1, self.vm_2])
lb_rules = list_lb_rules(
self.apiclient,
id=lb_rule.id
)
self.assertEqual(
isinstance(lb_rules, list),
True,
"Check list response returns a valid list"
)
#verify listLoadBalancerRules lists the added load balancing rule
self.assertNotEqual(
len(lb_rules),
0,
"Check Load Balancer Rule in its List"
)
self.assertEqual(
lb_rules[0].id,
lb_rule.id,
"Check List Load Balancer Rules returns valid Rule"
)
# listLoadBalancerRuleInstances should list
# all instances associated with that LB rule
lb_instance_rules = list_lb_instances(
self.apiclient,
id=lb_rule.id
)
self.assertEqual(
isinstance(lb_instance_rules, list),
True,
"Check list response returns a valid list"
)
self.assertNotEqual(
len(lb_instance_rules),
0,
"Check Load Balancer instances Rule in its List"
)
self.assertIn(
lb_instance_rules[0].id,
[self.vm_1.id, self.vm_2.id],
"Check List Load Balancer instances Rules returns valid VM ID"
)
self.assertIn(
lb_instance_rules[1].id,
[self.vm_1.id, self.vm_2.id],
"Check List Load Balancer instances Rules returns valid VM ID"
)
try:
hostnames = []
self.try_ssh(self.non_src_nat_ip.ipaddress.ipaddress, hostnames)
self.try_ssh(self.non_src_nat_ip.ipaddress.ipaddress, hostnames)
self.try_ssh(self.non_src_nat_ip.ipaddress.ipaddress, hostnames)
self.try_ssh(self.non_src_nat_ip.ipaddress.ipaddress, hostnames)
self.try_ssh(self.non_src_nat_ip.ipaddress.ipaddress, hostnames)
self.debug("Hostnames: %s" % str(hostnames))
self.assertIn(
self.vm_1.name,
hostnames,
"Check if ssh succeeded for server1"
)
self.assertIn(
self.vm_2.name,
hostnames,
"Check if ssh succeeded for server2"
)
#SSH should pass till there is a last VM associated with LB rule
lb_rule.remove(self.apiclient, [self.vm_2])
self.debug("SSHing into IP address: %s after removing VM (ID: %s) from LB rule" %
(
self.non_src_nat_ip.ipaddress.ipaddress,
self.vm_2.id
))
# Making host list empty
hostnames[:] = []
self.try_ssh(self.non_src_nat_ip.ipaddress.ipaddress, hostnames)
self.assertIn(
self.vm_1.name,
hostnames,
"Check if ssh succeeded for server1"
)
self.debug("Hostnames after removing VM2: %s" % str(hostnames))
except Exception as e:
self.fail("%s: SSH failed for VM with IP Address: %s" %
(e, self.non_src_nat_ip.ipaddress.ipaddress))
lb_rule.remove(self.apiclient, [self.vm_1])
with self.assertRaises(Exception):
self.debug("SSHing into IP address: %s after removing VM (ID: %s) from LB rule" %
(
self.non_src_nat_ip.ipaddress.ipaddress,
self.vm_1.id
))
self.try_ssh(self.non_src_nat_ip.ipaddress.ipaddress, hostnames)
return
@attr(tags = ["advanced", "advancedns", "smoke"])
def test_assign_and_removal_lb(self):
"""Test for assign & removing load balancing rule"""
# Validate:
#1. Verify list API - listLoadBalancerRules lists
# all the rules with the relevant ports
#2. listLoadBalancerInstances will list
# the instances associated with the corresponding rule.
#3. verify ssh attempts should pass as long as there
# is at least one instance associated with the rule
# Check if VM is in Running state before creating LB rule
vm_response = VirtualMachine.list(
self.apiclient,
account=self.account.name,
domainid=self.account.domainid
)
self.assertEqual(
isinstance(vm_response, list),
True,
"Check list VM returns a valid list"
)
self.assertNotEqual(
len(vm_response),
0,
"Check Port Forwarding Rule is created"
)
for vm in vm_response:
self.assertEqual(
vm.state,
'Running',
"VM state should be Running before creating a NAT rule."
)
lb_rule = LoadBalancerRule.create(
self.apiclient,
self.services["lbrule"],
self.non_src_nat_ip.ipaddress.id,
self.account.name
)
lb_rule.assign(self.apiclient, [self.vm_1, self.vm_2])
hostnames = []
self.try_ssh(self.non_src_nat_ip.ipaddress.ipaddress, hostnames)
self.try_ssh(self.non_src_nat_ip.ipaddress.ipaddress, hostnames)
self.try_ssh(self.non_src_nat_ip.ipaddress.ipaddress, hostnames)
self.try_ssh(self.non_src_nat_ip.ipaddress.ipaddress, hostnames)
self.try_ssh(self.non_src_nat_ip.ipaddress.ipaddress, hostnames)
self.debug("Hostnames: %s" % str(hostnames))
self.assertIn(
self.vm_1.name,
hostnames,
"Check if ssh succeeded for server1"
)
self.assertIn(
self.vm_2.name,
hostnames,
"Check if ssh succeeded for server2"
)
#Removing VM and assigning another VM to LB rule
lb_rule.remove(self.apiclient, [self.vm_2])
# making hostnames list empty
hostnames[:] = []
try:
self.debug("SSHing again into IP address: %s with VM (ID: %s) added to LB rule" %
(
self.non_src_nat_ip.ipaddress.ipaddress,
self.vm_1.id,
))
self.try_ssh(self.non_src_nat_ip.ipaddress.ipaddress, hostnames)
self.assertIn(
self.vm_1.name,
hostnames,
"Check if ssh succeeded for server1"
)
except Exception as e:
self.fail("SSH failed for VM with IP: %s" %
self.non_src_nat_ip.ipaddress.ipaddress)
lb_rule.assign(self.apiclient, [self.vm_3])
# # Making hostnames list empty
hostnames[:] = []
self.try_ssh(self.non_src_nat_ip.ipaddress.ipaddress, hostnames)
self.try_ssh(self.non_src_nat_ip.ipaddress.ipaddress, hostnames)
self.try_ssh(self.non_src_nat_ip.ipaddress.ipaddress, hostnames)
self.try_ssh(self.non_src_nat_ip.ipaddress.ipaddress, hostnames)
self.try_ssh(self.non_src_nat_ip.ipaddress.ipaddress, hostnames)
self.debug("Hostnames: %s" % str(hostnames))
self.assertIn(
self.vm_1.name,
hostnames,
"Check if ssh succeeded for server1"
)
self.assertIn(
self.vm_3.name,
hostnames,
"Check if ssh succeeded for server3"
)
return
| mufaddalq/cloudstack-datera-driver | test/integration/smoke/test_loadbalance.py | Python | apache-2.0 | 25,268 | 0.001979 |
from __future__ import absolute_import
from __future__ import print_function
import os
import numpy
import matplotlib.pyplot as plt
import datetime
import clawpack.visclaw.colormaps as colormap
import clawpack.visclaw.gaugetools as gaugetools
import clawpack.clawutil.data as clawutil
import clawpack.amrclaw.data as amrclaw
import clawpack.geoclaw.data as geodata
from clawpack.geoclaw.util import fetch_noaa_tide_data
import clawpack.geoclaw.surge.plot as surgeplot
try:
from setplotfg import setplotfg
except:
setplotfg = None
def setplot(plotdata=None):
""""""
if plotdata is None:
from clawpack.visclaw.data import ClawPlotData
plotdata = ClawPlotData()
# clear any old figures,axes,items data
plotdata.clearfigures()
plotdata.format = 'ascii'
# Load data from output
clawdata = clawutil.ClawInputData(2)
clawdata.read(os.path.join(plotdata.outdir, 'claw.data'))
physics = geodata.GeoClawData()
physics.read(os.path.join(plotdata.outdir, 'geoclaw.data'))
surge_data = geodata.SurgeData()
surge_data.read(os.path.join(plotdata.outdir, 'surge.data'))
friction_data = geodata.FrictionData()
friction_data.read(os.path.join(plotdata.outdir, 'friction.data'))
# Load storm track
track = surgeplot.track_data(os.path.join(plotdata.outdir, 'fort.track'))
# Set afteraxes function
def surge_afteraxes(cd):
surgeplot.surge_afteraxes(cd, track, plot_direction=False,
kwargs={"markersize": 4})
# Color limits
surface_limits = [-5.0, 5.0]
speed_limits = [0.0, 3.0]
wind_limits = [0, 64]
pressure_limits = [935, 1013]
friction_bounds = [0.01, 0.04]
def friction_after_axes(cd):
plt.title(r"Manning's $n$ Coefficient")
# ==========================================================================
# Plot specifications
# ==========================================================================
regions = {"Gulf": {"xlimits": (clawdata.lower[0], clawdata.upper[0]),
"ylimits": (clawdata.lower[1], clawdata.upper[1]),
"figsize": (6.4, 4.8)},
"Texas Gulf Coast": {"xlimits": (-99.2, -94.2),
"ylimits": (26.4, 30.4),
"figsize": (6, 6)}}
for (name, region_dict) in regions.items():
# Surface Figure
plotfigure = plotdata.new_plotfigure(name="Surface - %s" % name)
plotfigure.kwargs = {"figsize": region_dict['figsize']}
plotaxes = plotfigure.new_plotaxes()
plotaxes.title = "Surface"
plotaxes.xlimits = region_dict["xlimits"]
plotaxes.ylimits = region_dict["ylimits"]
plotaxes.afteraxes = surge_afteraxes
surgeplot.add_surface_elevation(plotaxes, bounds=surface_limits)
surgeplot.add_land(plotaxes)
plotaxes.plotitem_dict['surface'].amr_patchedges_show = [0] * 10
plotaxes.plotitem_dict['land'].amr_patchedges_show = [0] * 10
# Speed Figure
plotfigure = plotdata.new_plotfigure(name="Currents - %s" % name)
plotfigure.kwargs = {"figsize": region_dict['figsize']}
plotaxes = plotfigure.new_plotaxes()
plotaxes.title = "Currents"
plotaxes.xlimits = region_dict["xlimits"]
plotaxes.ylimits = region_dict["ylimits"]
plotaxes.afteraxes = surge_afteraxes
surgeplot.add_speed(plotaxes, bounds=speed_limits)
surgeplot.add_land(plotaxes)
plotaxes.plotitem_dict['speed'].amr_patchedges_show = [0] * 10
plotaxes.plotitem_dict['land'].amr_patchedges_show = [0] * 10
#
# Friction field
#
plotfigure = plotdata.new_plotfigure(name='Friction')
plotfigure.show = friction_data.variable_friction and True
plotaxes = plotfigure.new_plotaxes()
plotaxes.xlimits = regions['Gulf']['xlimits']
plotaxes.ylimits = regions['Gulf']['ylimits']
# plotaxes.title = "Manning's N Coefficient"
plotaxes.afteraxes = friction_after_axes
plotaxes.scaled = True
surgeplot.add_friction(plotaxes, bounds=friction_bounds, shrink=0.9)
plotaxes.plotitem_dict['friction'].amr_patchedges_show = [0] * 10
plotaxes.plotitem_dict['friction'].colorbar_label = "$n$"
#
# Hurricane Forcing fields
#
# Pressure field
plotfigure = plotdata.new_plotfigure(name='Pressure')
plotfigure.show = surge_data.pressure_forcing and True
plotaxes = plotfigure.new_plotaxes()
plotaxes.xlimits = regions['Gulf']['xlimits']
plotaxes.ylimits = regions['Gulf']['ylimits']
plotaxes.title = "Pressure Field"
plotaxes.afteraxes = surge_afteraxes
plotaxes.scaled = True
surgeplot.add_pressure(plotaxes, bounds=pressure_limits)
surgeplot.add_land(plotaxes)
# Wind field
plotfigure = plotdata.new_plotfigure(name='Wind Speed')
plotfigure.show = surge_data.wind_forcing and True
plotaxes = plotfigure.new_plotaxes()
plotaxes.xlimits = regions['Gulf']['xlimits']
plotaxes.ylimits = regions['Gulf']['ylimits']
plotaxes.title = "Wind Field"
plotaxes.afteraxes = surge_afteraxes
plotaxes.scaled = True
surgeplot.add_wind(plotaxes, bounds=wind_limits)
surgeplot.add_land(plotaxes)
# ========================================================================
# Figures for gauges
# ========================================================================
plotfigure = plotdata.new_plotfigure(name='Gauge Surfaces', figno=300,
type='each_gauge')
plotfigure.show = True
plotfigure.clf_each_gauge = True
# Set up for axes in this figure:
plotaxes = plotfigure.new_plotaxes()
#Time Conversions
def days2seconds(days):
return days * 60.0**2 * 24.0
stations = [('8773037', 'Seadrift'),
('8773701', 'Port OConnor'),
('8774230', 'Aransas Wildlife Refuge'),
('8775237', 'Port Aransas'),
('8775296', 'USS Lexington')]
landfall_time = numpy.datetime64('2017-08-25T10:00')
begin_date = datetime.datetime(2017, 8, 24)
end_date = datetime.datetime(2017, 8, 28)
def get_actual_water_levels(station_id):
# Fetch water levels and tide predictions for given station
date_time, water_level, tide = fetch_noaa_tide_data(station_id,
begin_date, end_date)
# Calculate times relative to landfall
seconds_rel_landfall = (date_time - landfall_time) / numpy.timedelta64(1, 's')
# Subtract tide predictions from measured water levels
water_level -= tide
return seconds_rel_landfall, water_level
def gauge_afteraxes(cd):
station_id, station_name = stations[cd.gaugeno - 1]
seconds_rel_landfall, actual_level = get_actual_water_levels(station_id)
axes = plt.gca()
surgeplot.plot_landfall_gauge(cd.gaugesoln, axes)
axes.plot(seconds_rel_landfall, actual_level, 'g')
# Fix up plot - in particular fix time labels
axes.set_title(station_name)
axes.set_xlabel('Seconds relative to landfall')
axes.set_ylabel('Surface (m)')
axes.set_xlim([days2seconds(-1), days2seconds(3)])
axes.set_ylim([-1, 5])
axes.set_xticks([-days2seconds(-1), 0, days2seconds(1), days2seconds(2), days2seconds(3)])
#axes.set_xticklabels([r"$-1$", r"$0$", r"$1$", r"$2$", r"$3$"])
#axes.grid(True)
# Set up for axes in this figure:
plotaxes = plotfigure.new_plotaxes()
plotaxes.afteraxes = gauge_afteraxes
# Plot surface as blue curve:
plotitem = plotaxes.new_plotitem(plot_type='1d_plot')
plotitem.plot_var = 3
plotitem.plotstyle = 'b-'
#
# Gauge Location Plot
#
def gauge_location_afteraxes(cd):
plt.subplots_adjust(left=0.12, bottom=0.06, right=0.97, top=0.97)
surge_afteraxes(cd)
gaugetools.plot_gauge_locations(cd.plotdata, gaugenos='all',
format_string='ko', add_labels=False)
#Plot for gauge location 1
plotfigure = plotdata.new_plotfigure(name="Gauge Location 1")
plotfigure.show = True
# Set up for axes in this figure:
plotaxes = plotfigure.new_plotaxes()
plotaxes.title = 'Gauge Location 1'
plotaxes.scaled = True
plotaxes.xlimits = [-96.83, -96.63]
plotaxes.ylimits = [28.33, 28.43]
plotaxes.afteraxes = gauge_location_afteraxes
surgeplot.add_surface_elevation(plotaxes, bounds=surface_limits)
surgeplot.add_land(plotaxes)
plotaxes.plotitem_dict['surface'].amr_patchedges_show = [0] * 10
plotaxes.plotitem_dict['land'].amr_patchedges_show = [0] * 10
#Plot for gauge location 2
plotfigure = plotdata.new_plotfigure(name="Gauge Location 2")
plotfigure.show = True
# Set up for axes in this figure:
plotaxes = plotfigure.new_plotaxes()
plotaxes.title = 'Gauge Location 2'
plotaxes.scaled = True
plotaxes.xlimits = [-96.48, -96.28]
plotaxes.ylimits = [28.40, 28.50]
plotaxes.afteraxes = gauge_location_afteraxes
surgeplot.add_surface_elevation(plotaxes, bounds=surface_limits)
surgeplot.add_land(plotaxes)
plotaxes.plotitem_dict['surface'].amr_patchedges_show = [0] * 10
plotaxes.plotitem_dict['land'].amr_patchedges_show = [0] * 10
#Plot for gauge location 3
plotfigure = plotdata.new_plotfigure(name="Gauge Location 3")
plotfigure.show = True
# Set up for axes in this figure:
plotaxes = plotfigure.new_plotaxes()
plotaxes.title = 'Gauge Location 3'
plotaxes.scaled = True
plotaxes.xlimits = [-96.85, -96.65]
plotaxes.ylimits = [28.17, 28.27]
plotaxes.afteraxes = gauge_location_afteraxes
surgeplot.add_surface_elevation(plotaxes, bounds=surface_limits)
surgeplot.add_land(plotaxes)
plotaxes.plotitem_dict['surface'].amr_patchedges_show = [0] * 10
plotaxes.plotitem_dict['land'].amr_patchedges_show = [0] * 10
#Plot for gauge location 4
plotfigure = plotdata.new_plotfigure(name="Gauge Location 4")
plotfigure.show = True
# Set up for axes in this figure:
plotaxes = plotfigure.new_plotaxes()
plotaxes.title = 'Gauge Location 4'
plotaxes.scaled = True
plotaxes.xlimits = [-97.15, -96.95]
plotaxes.ylimits = [27.79, 27.89]
plotaxes.afteraxes = gauge_location_afteraxes
surgeplot.add_surface_elevation(plotaxes, bounds=surface_limits)
surgeplot.add_land(plotaxes)
plotaxes.plotitem_dict['surface'].amr_patchedges_show = [0] * 10
plotaxes.plotitem_dict['land'].amr_patchedges_show = [0] * 10
#Plot for gauge location 5
plotfigure = plotdata.new_plotfigure(name="Gauge Location 5")
plotfigure.show = True
# Set up for axes in this figure:
plotaxes = plotfigure.new_plotaxes()
plotaxes.title = 'Gauge Location 5'
plotaxes.scaled = True
plotaxes.xlimits = [-97.48, -97.28]
plotaxes.ylimits = [27.75, 27.85]
plotaxes.afteraxes = gauge_location_afteraxes
surgeplot.add_surface_elevation(plotaxes, bounds=surface_limits)
surgeplot.add_land(plotaxes)
plotaxes.plotitem_dict['surface'].amr_patchedges_show = [0] * 10
plotaxes.plotitem_dict['land'].amr_patchedges_show = [0] * 10
# -----------------------------------------
# Parameters used only when creating html and/or latex hardcopy
# e.g., via pyclaw.plotters.frametools.printframes:
plotdata.printfigs = True # print figures
plotdata.print_format = 'png' # file format
plotdata.print_framenos = 'all' # list of frames to print
plotdata.print_gaugenos = [1, 2, 3, 4, 5] # list of gauges to print
plotdata.print_fignos = 'all' # list of figures to print
plotdata.html = True # create html files of plots?
plotdata.latex = True # create latex file of plots?
plotdata.latex_figsperline = 2 # layout of plots
plotdata.latex_framesperline = 1 # layout of plots
plotdata.latex_makepdf = False # also run pdflatex?
plotdata.parallel = True # parallel plotting
return plotdata
| mandli/surge-examples | harvey/setplot.py | Python | mit | 12,304 | 0.002682 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Contains expectations."""
import inquisition
FISHY = inquisition.SPANISH
FISHY = FISHY.replace('surprise', 'haddock')
print FISHY
| aedoler/is210-week-03-synthesizing | task_01.py | Python | mpl-2.0 | 183 | 0 |
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
from __future__ import division # Standardmäßig float division - Ganzzahldivision kann man explizit mit '//' durchführen
from __future__ import absolute_import
import pygame
from pyecs import *
# from pyecs.components import *
# from components import *
class ProcessOutput(Component):
"""docstring for ProcessOutput"""
def __init__(self, process, *args,**kwargs):
super(ProcessOutput, self).__init__(*args,**kwargs)
self.process = process
| xaedes/PyCompuPipe | pycompupipe/components/processing/process_output.py | Python | mit | 525 | 0.009579 |
from colossus.game import Route
def test_route_step():
route = Route(100.0, 1)
route.add_packet()
dmg = route.update(10.0)
assert dmg <= 0.0
dmg = 0.0
dmg += route.update(100000.0)
dmg += route.update(100000.0)
assert dmg > 0.0
def test_route_removal_of_packet_after_action():
route = Route(1.0, 1)
route.add_packet()
assert route.packet_count() == 1
route.update(100.0)
assert route.packet_count() == 0
| spectralflux/colossus | tests/test_route.py | Python | mit | 458 | 0.004367 |
# -*- encoding: utf-8 -*-
from abjad import *
from abjad.tools.quantizationtools import *
def test_quantizationtools_QGrid___call___01():
q_grid = QGrid()
a = QEventProxy(SilentQEvent(0, ['A']), 0)
b = QEventProxy(SilentQEvent((1, 20), ['B']), (1, 20))
c = QEventProxy(SilentQEvent((9, 20), ['C']), (9, 20))
d = QEventProxy(SilentQEvent((1, 2), ['D']), (1, 2))
e = QEventProxy(SilentQEvent((11, 20), ['E']), (11, 20))
f = QEventProxy(SilentQEvent((19, 20), ['F']), (19, 20))
g = QEventProxy(SilentQEvent(1, ['G']), 1)
q_grid.fit_q_events([a, b, c, d, e, f, g])
result = q_grid((1, 4))
assert len(result) == 1
assert format(result[0]) == "c'4"
annotation = inspect_(result[0]).get_indicator(indicatortools.Annotation)
assert isinstance(annotation.value, tuple) and len(annotation.value) == 4
assert annotation.value[0].attachments == ('A',)
assert annotation.value[1].attachments == ('B',)
assert annotation.value[2].attachments == ('C',)
assert annotation.value[3].attachments == ('D',)
def test_quantizationtools_QGrid___call___02():
q_grid = QGrid()
q_grid.subdivide_leaves([(0, (1, 1, 1))])
q_grid.subdivide_leaves([(1, (1, 1))])
q_grid.subdivide_leaves([(-2, (1, 1, 1))])
a = QEventProxy(SilentQEvent(0, ['A']), 0)
b = QEventProxy(SilentQEvent((1, 20), ['B']), (1, 20))
c = QEventProxy(SilentQEvent((9, 20), ['C']), (9, 20))
d = QEventProxy(SilentQEvent((1, 2), ['D']), (1, 2))
e = QEventProxy(SilentQEvent((11, 20), ['E']), (11, 20))
f = QEventProxy(SilentQEvent((19, 20), ['F']), (19, 20))
g = QEventProxy(SilentQEvent(1, ['G']), 1)
q_grid.fit_q_events([a, b, c, d, e, f, g])
result = q_grid((1, 4))
assert isinstance(result, list) and len(result) == 1
assert systemtools.TestManager.compare(
result[0],
r'''
\times 2/3 {
c'8
c'16
c'16
\times 2/3 {
c'16
c'16
c'16
}
}
'''
)
leaf = result[0].select_leaves()[0]
annotation = inspect_(leaf).get_indicators(indicatortools.Annotation)[0]
assert isinstance(annotation.value, tuple) and len(annotation.value) == 2
assert annotation.value[0].attachments == ('A',)
assert annotation.value[1].attachments == ('B',)
leaf = result[0].select_leaves()[1]
assert not inspect_(leaf).get_indicators(indicatortools.Annotation)
leaf = result[0].select_leaves()[2]
annotation = inspect_(leaf).get_indicator(indicatortools.Annotation)
assert isinstance(annotation.value, tuple) and len(annotation.value) == 3
assert annotation.value[0].attachments == ('C',)
assert annotation.value[1].attachments == ('D',)
assert annotation.value[2].attachments == ('E',)
for leaf in result[0].select_leaves()[3:6]:
assert not inspect_(leaf).get_indicators(indicatortools.Annotation)
def test_quantizationtools_QGrid___call___03():
r'''Non-binary works too.
'''
q_grid = QGrid()
q_grid.subdivide_leaves([(0, (1, 1))])
a = QEventProxy(SilentQEvent(0, ['A']), 0)
b = QEventProxy(SilentQEvent((1, 20), ['B']), (1, 20))
c = QEventProxy(SilentQEvent((9, 20), ['C']), (9, 20))
d = QEventProxy(SilentQEvent((1, 2), ['D']), (1, 2))
e = QEventProxy(SilentQEvent((11, 20), ['E']), (11, 20))
f = QEventProxy(SilentQEvent((19, 20), ['F']), (19, 20))
g = QEventProxy(SilentQEvent(1, ['G']), 1)
q_grid.fit_q_events([a, b, c, d, e, f, g])
result = q_grid((1, 3))
assert isinstance(result, list) and len(result) == 1
assert systemtools.TestManager.compare(
result[0],
r'''
\tweak #'edge-height #'(0.7 . 0)
\times 2/3 {
c'4
c'4
}
'''
) | mscuthbert/abjad | abjad/tools/quantizationtools/test/test_quantizationtools_QGrid___call__.py | Python | gpl-3.0 | 3,905 | 0.000256 |
"""
Contains basic interface (abstract base class) for word embeddings.
"""
import os
from abc import ABCMeta, abstractmethod
class IWordEmbedding(object):
"""
Abstract base class for word embeddings
"""
__metaclass__ = ABCMeta
def __init__(self, path, vector_length):
self.model = None
self.path = path
self.vector_length = vector_length
self.already_built = False
@abstractmethod
def _build(self):
raise NotImplementedError
@abstractmethod
def __getitem__(self, word):
raise NotImplementedError
def build(self):
""" Loads word embedding from its file """
if not self.already_built:
print("Loading pre-trained word embedding from {0}...".format(self.path))
self._build()
self.already_built = True
print("Pre-trained word embedding from {0} loaded!".format(self.path))
def get_embedding_model_path(self):
""" :return: absolute path to folder containing saved word embedding model """
return os.path.join(os.path.dirname(__file__), '../../../models/word_embeddings', self.path)
@staticmethod
def data_file_to_sentences(data_file_path):
"""
Converts a processed data file to generator of lists of words
:param data_file_path: path to data file
:return: iterator yielding sentences as lists of words
"""
with open(data_file_path, 'r') as f:
for line in f:
sentence = line.split(' ')[1]
yield map(lambda word: word.rstrip(), sentence.split(','))
def __str__(self):
return type(self).__name__
| mikolajsacha/tweetsclassification | src/features/word_embeddings/iword_embedding.py | Python | mit | 1,681 | 0.003569 |
# automatically generated by the FlatBuffers compiler, do not modify
# namespace: graph
import flatbuffers
class FlatProperties(object):
__slots__ = ['_tab']
@classmethod
def GetRootAsFlatProperties(cls, buf, offset):
n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
x = FlatProperties()
x.Init(buf, n + offset)
return x
# FlatProperties
def Init(self, buf, pos):
self._tab = flatbuffers.table.Table(buf, pos)
# FlatProperties
def Name(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
if o != 0:
return self._tab.String(o + self._tab.Pos)
return None
# FlatProperties
def I(self, j):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6))
if o != 0:
a = self._tab.Vector(o)
return self._tab.Get(flatbuffers.number_types.Int32Flags, a + flatbuffers.number_types.UOffsetTFlags.py_type(j * 4))
return 0
# FlatProperties
def IAsNumpy(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6))
if o != 0:
return self._tab.GetVectorAsNumpy(flatbuffers.number_types.Int32Flags, o)
return 0
# FlatProperties
def ILength(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6))
if o != 0:
return self._tab.VectorLen(o)
return 0
# FlatProperties
def L(self, j):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8))
if o != 0:
a = self._tab.Vector(o)
return self._tab.Get(flatbuffers.number_types.Int64Flags, a + flatbuffers.number_types.UOffsetTFlags.py_type(j * 8))
return 0
# FlatProperties
def LAsNumpy(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8))
if o != 0:
return self._tab.GetVectorAsNumpy(flatbuffers.number_types.Int64Flags, o)
return 0
# FlatProperties
def LLength(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8))
if o != 0:
return self._tab.VectorLen(o)
return 0
# FlatProperties
def D(self, j):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(10))
if o != 0:
a = self._tab.Vector(o)
return self._tab.Get(flatbuffers.number_types.Float64Flags, a + flatbuffers.number_types.UOffsetTFlags.py_type(j * 8))
return 0
# FlatProperties
def DAsNumpy(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(10))
if o != 0:
return self._tab.GetVectorAsNumpy(flatbuffers.number_types.Float64Flags, o)
return 0
# FlatProperties
def DLength(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(10))
if o != 0:
return self._tab.VectorLen(o)
return 0
# FlatProperties
def A(self, j):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(12))
if o != 0:
x = self._tab.Vector(o)
x += flatbuffers.number_types.UOffsetTFlags.py_type(j) * 4
x = self._tab.Indirect(x)
from .FlatArray import FlatArray
obj = FlatArray()
obj.Init(self._tab.Bytes, x)
return obj
return None
# FlatProperties
def ALength(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(12))
if o != 0:
return self._tab.VectorLen(o)
return 0
# FlatProperties
def B(self, j):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(14))
if o != 0:
a = self._tab.Vector(o)
return self._tab.Get(flatbuffers.number_types.BoolFlags, a + flatbuffers.number_types.UOffsetTFlags.py_type(j * 1))
return 0
# FlatProperties
def BAsNumpy(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(14))
if o != 0:
return self._tab.GetVectorAsNumpy(flatbuffers.number_types.BoolFlags, o)
return 0
# FlatProperties
def BLength(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(14))
if o != 0:
return self._tab.VectorLen(o)
return 0
# FlatProperties
def S(self, j):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(16))
if o != 0:
a = self._tab.Vector(o)
return self._tab.String(a + flatbuffers.number_types.UOffsetTFlags.py_type(j * 4))
return ""
# FlatProperties
def SLength(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(16))
if o != 0:
return self._tab.VectorLen(o)
return 0
# FlatProperties
def Shape(self, j):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(18))
if o != 0:
a = self._tab.Vector(o)
return self._tab.Get(flatbuffers.number_types.Int32Flags, a + flatbuffers.number_types.UOffsetTFlags.py_type(j * 4))
return 0
# FlatProperties
def ShapeAsNumpy(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(18))
if o != 0:
return self._tab.GetVectorAsNumpy(flatbuffers.number_types.Int32Flags, o)
return 0
# FlatProperties
def ShapeLength(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(18))
if o != 0:
return self._tab.VectorLen(o)
return 0
def FlatPropertiesStart(builder): builder.StartObject(8)
def FlatPropertiesAddName(builder, name): builder.PrependUOffsetTRelativeSlot(0, flatbuffers.number_types.UOffsetTFlags.py_type(name), 0)
def FlatPropertiesAddI(builder, i): builder.PrependUOffsetTRelativeSlot(1, flatbuffers.number_types.UOffsetTFlags.py_type(i), 0)
def FlatPropertiesStartIVector(builder, numElems): return builder.StartVector(4, numElems, 4)
def FlatPropertiesAddL(builder, l): builder.PrependUOffsetTRelativeSlot(2, flatbuffers.number_types.UOffsetTFlags.py_type(l), 0)
def FlatPropertiesStartLVector(builder, numElems): return builder.StartVector(8, numElems, 8)
def FlatPropertiesAddD(builder, d): builder.PrependUOffsetTRelativeSlot(3, flatbuffers.number_types.UOffsetTFlags.py_type(d), 0)
def FlatPropertiesStartDVector(builder, numElems): return builder.StartVector(8, numElems, 8)
def FlatPropertiesAddA(builder, a): builder.PrependUOffsetTRelativeSlot(4, flatbuffers.number_types.UOffsetTFlags.py_type(a), 0)
def FlatPropertiesStartAVector(builder, numElems): return builder.StartVector(4, numElems, 4)
def FlatPropertiesAddB(builder, b): builder.PrependUOffsetTRelativeSlot(5, flatbuffers.number_types.UOffsetTFlags.py_type(b), 0)
def FlatPropertiesStartBVector(builder, numElems): return builder.StartVector(1, numElems, 1)
def FlatPropertiesAddS(builder, s): builder.PrependUOffsetTRelativeSlot(6, flatbuffers.number_types.UOffsetTFlags.py_type(s), 0)
def FlatPropertiesStartSVector(builder, numElems): return builder.StartVector(4, numElems, 4)
def FlatPropertiesAddShape(builder, shape): builder.PrependUOffsetTRelativeSlot(7, flatbuffers.number_types.UOffsetTFlags.py_type(shape), 0)
def FlatPropertiesStartShapeVector(builder, numElems): return builder.StartVector(4, numElems, 4)
def FlatPropertiesEnd(builder): return builder.EndObject()
| deeplearning4j/deeplearning4j | libnd4j/include/graph/generated/nd4j/graph/FlatProperties.py | Python | apache-2.0 | 7,580 | 0.005937 |
#!/usr/bin/env python3
import os
import sys
import math
from colorsys import hsv_to_rgb
from random import random
from hippietrap.hippietrap import HippieTrap, ALL, NUM_NODES
from hippietrap.color import Color, ColorGenerator, random_color, hue_to_color
from hippietrap.geometry import HippieTrapGeometry
from hippietrap.pattern import PatternBase, run_pattern
from time import sleep, time
class SweepTwoColorShiftPattern(PatternBase):
geo = HippieTrapGeometry()
cg = ColorGenerator()
name = "sweep two colors"
color_shift_between_rings = .045
def pattern(self):
self.trap.send_decay(ALL, 90)
self.trap.start_pattern(ALL)
index = 0
hue_offset = 0.0
stop = False
color_rings = [ random_color(), random_color(), random_color() , random_color() ]
while not stop:
for bottle, angle in self.geo.enumerate_all_bottles(index % 2 == 0):
self.trap.set_color(bottle, color_rings[self.geo.get_ring_from_bottle(bottle)])
sleep(.01)
if self.stop_thread:
stop = True
break
index += 1
hue_offset = math.fmod(hue_offset + .02, 1.0)
shift = math.sin(index / self.color_shift_between_rings) / 2.0 + .50
new_offset = math.fmod(shift, 1.0)
color_rings.pop()
color_rings.insert(0, hue_to_color(new_offset))
self.trap.stop_pattern(ALL)
if __name__ == "__main__":
with HippieTrap() as trap:
trap.begin()
trap.set_brightness(ALL, 100)
p = SweepTwoColorShiftPattern(trap)
p.pattern()
| mayhem/led-chandelier | software/patterns/sweep_two_color_shift.py | Python | mit | 1,663 | 0.004811 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2015-2015: Alignak team, see AUTHORS.txt file for contributors
#
# This file is part of Alignak.
#
# Alignak is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Alignak is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with Alignak. If not, see <http://www.gnu.org/licenses/>.
#
#
# This file incorporates work covered by the following copyright and
# permission notice:
#
# Copyright (C) 2009-2014:
# xkilian, [email protected]
# Hartmut Goebel, [email protected]
# Nicolas Dupeux, [email protected]
# Grégory Starck, [email protected]
# Sebastien Coavoux, [email protected]
# Thibault Cohen, [email protected]
# Jean Gabes, [email protected]
# Zoran Zaric, [email protected]
# This file is part of Shinken.
#
# Shinken is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Shinken is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with Shinken. If not, see <http://www.gnu.org/licenses/>.
# Failed to import will be catch by __init__.py
from cx_Oracle import connect as connect_function
from cx_Oracle import IntegrityError as IntegrityError_exp
from cx_Oracle import ProgrammingError as ProgrammingError_exp
from cx_Oracle import DatabaseError as DatabaseError_exp
from cx_Oracle import InternalError as InternalError_exp
from cx_Oracle import DataError as DataError_exp
from cx_Oracle import OperationalError as OperationalError_exp
from alignak.db import DB
from alignak.log import logger
connect_function = None
IntegrityError_exp = None
ProgrammingError_exp = None
DatabaseError_exp = None
InternalError_exp = None
DataError_exp = None
OperationalError_exp = None
class DBOracle(DB):
"""Manage connection and query execution against Oracle databases."""
def __init__(self, user, password, database, table_prefix=''):
self.user = user
self.password = password
self.database = database
self.table_prefix = table_prefix
def connect_database(self):
"""Create the database connection
TODO: finish (begin :) ) error catch and conf parameters...
"""
connstr = '%s/%s@%s' % (self.user, self.password, self.database)
self.db = connect_function(connstr)
self.db_cursor = self.db.cursor()
self.db_cursor.arraysize = 50
def execute_query(self, query):
""" Execute a query against an Oracle database.
"""
logger.debug("[DBOracle] Execute Oracle query %s\n", query)
try:
self.db_cursor.execute(query)
self.db.commit()
except IntegrityError_exp, exp:
logger.warning("[DBOracle] Warning: a query raise an integrity error: %s, %s",
query, exp)
except ProgrammingError_exp, exp:
logger.warning("[DBOracle] Warning: a query raise a programming error: %s, %s",
query, exp)
except DatabaseError_exp, exp:
logger.warning("[DBOracle] Warning: a query raise a database error: %s, %s",
query, exp)
except InternalError_exp, exp:
logger.warning("[DBOracle] Warning: a query raise an internal error: %s, %s",
query, exp)
except DataError_exp, exp:
logger.warning("[DBOracle] Warning: a query raise a data error: %s, %s",
query, exp)
except OperationalError_exp, exp:
logger.warning("[DBOracle] Warning: a query raise an operational error: %s, %s",
query, exp)
except Exception, exp:
logger.warning("[DBOracle] Warning: a query raise an unknown error: %s, %s",
query, exp)
logger.warning(exp.__dict__)
| ddurieux/alignak | alignak/db_oracle.py | Python | agpl-3.0 | 4,741 | 0.001477 |
# -*- coding: utf-8 -*-
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
sys.path.insert(0, os.path.abspath('../../tools'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.pngmath',
'sphinx.ext.intersphinx',
# Create links to Python source code for the module.
# 'sphinx.ext.viewcode',
'sphinx.ext.autosummary',
'sphinx.ext.inheritance_diagram',
]
# Add any locations and names of other projects that should be linked to in this documentation.
intersphinx_mapping = {
'python': ('http://docs.python.org', None),
}
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'MPipe'
copyright = u'2014, Velimir Mlaker'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.1'
# The full version, including alpha/beta/rc tags.
release = ''
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = []
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = ['wm5.', 'wm5', ]
# Set this to 'both' to append the __init__(self) docstring to the class docstring.
autoclass_content = 'both'
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'mpipe'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
html_theme_path = ['_themes']
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
html_title = '{0} Documentation'.format(project)
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
html_sidebars = {
'**' : [],
# '**' : ['localtoc.html'],
# '**' : ['globaltoc.html'],
# '**' : ['searchbox.html', 'search.html'],
# '**' : ['searchbox.html'],
}
# Additional templates that should be rendered to pages, maps page names to
# template names.
html_additional_pages = { 'search' : 'search.html' }
# If false, no module index is generated.
html_domain_indices = False
# If false, no index is generated.
html_use_index = True
# If true, the index is split into individual pages for each letter.
html_split_index = False
# If true, links to the reST sources are added to the pages.
html_show_sourcelink = False
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'MPipedoc'
# -- Options for LaTeX output --------------------------------------------------
# The paper size ('letter' or 'a4').
#latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
#latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'MPipe.tex', u'MPipe Documentation',
u'Velimir Mlaker', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Additional stuff for the LaTeX preamble.
#latex_preamble = ''
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
latex_domain_indices = False
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'mpipe', u'MPipe Documentation',
[u'Velimir Mlaker'], 1)
]
rst_prolog = '''
.. |NAME| replace:: MPipe
'''
# End of file.
| vmlaker/mpipe | doc/source/conf.py | Python | mit | 7,581 | 0.006068 |
#!/usr/bin/env python2
'''
genPOI.py
Scans regionsets for TileEntities and Entities, filters them, and writes out
POI/marker info.
A markerSet is list of POIs to display on a tileset. It has a display name,
and a group name.
markersDB.js holds a list of POIs in each group
markers.js holds a list of which markerSets are attached to each tileSet
'''
import gzip
import itertools
import json
import logging
import multiprocessing
import os
import re
import sys
import time
import urllib2
import datetime
from collections import defaultdict
from contextlib import closing
from multiprocessing import Pool
from optparse import OptionParser
from overviewer_core import logger
from overviewer_core import nbt
from overviewer_core import configParser, world
from overviewer_core.files import FileReplacer, get_fs_caps
UUID_LOOKUP_URL = 'https://sessionserver.mojang.com/session/minecraft/profile/'
def replaceBads(s):
"Replaces bad characters with good characters!"
bads = [" ", "(", ")"]
x=s
for bad in bads:
x = x.replace(bad,"_")
return x
# If you want to keep your stomach contents do not, under any circumstance,
# read the body of the following function. You have been warned.
# All of this could be replaced by a simple json.loads if Mojang had
# introduced a TAG_JSON, but they didn't.
#
# So here are a few curiosities how 1.7 signs get seen in 1.8 in Minecraft:
# - null ->
# - "null" -> null
# - ["Hello"] -> Hello
# - [Hello] -> Hello
# - [1,2,3] -> 123
# Mojang just broke signs for everyone who ever used [, { and ". GG.
def jsonText(s):
if s is None or s == "null":
return ""
if (s.startswith('"') and s.endswith('"')) or \
(s.startswith('{') and s.endswith('}')):
try:
js = json.loads(s)
except ValueError:
return s
def parseLevel(foo):
bar = ""
if isinstance(foo, list):
for extra in foo:
bar += parseLevel(extra)
elif isinstance(foo, dict):
if "text" in foo:
bar += foo["text"]
if "extra" in foo:
bar += parseLevel(foo["extra"])
elif isinstance(foo, basestring):
bar = foo
return bar
return parseLevel(js)
else:
return s
# Since functions are not pickleable, we send their names instead.
# Here, set up worker processes to have a name -> function map
bucketChunkFuncs = {}
def initBucketChunks(config_path):
global bucketChunkFuncs
mw_parser = configParser.MultiWorldParser()
mw_parser.parse(config_path)
# ought not to fail since we already did it once
config = mw_parser.get_validated_config()
for name, render in config['renders'].iteritems():
for f in render['markers']:
ff = f['filterFunction']
bucketChunkFuncs[ff.__name__] = ff
# yes there's a double parenthesis here
# see below for when this is called, and why we do this
# a smarter way would be functools.partial, but that's broken on python 2.6
# when used with multiprocessing
def parseBucketChunks((bucket, rset, filters)):
global bucketChunkFuncs
pid = multiprocessing.current_process().pid
markers = defaultdict(list)
i = 0
cnt = 0
mcnt_prev = 0
for b in bucket:
try:
data = rset.get_chunk(b[0],b[1])
for poi in itertools.chain(data['TileEntities'], data['Entities']):
if poi['id'] == 'Sign' or poi['id'] == 'minecraft:sign':
poi = signWrangler(poi)
for name, filter_function in filters:
ff = bucketChunkFuncs[filter_function]
result = ff(poi)
if result:
d = create_marker_from_filter_result(poi, result)
markers[name].append(d)
except nbt.CorruptChunkError:
logging.warning("Ignoring POIs in corrupt chunk %d,%d", b[0], b[1])
# Perhaps only on verbose ?
i = i + 1
if i == 250:
i = 0
cnt = 250 + cnt
mcnt = sum(len(v) for v in markers.itervalues())
if mcnt > mcnt_prev:
logging.info("Found %d markers in thread %d so far at %d chunks", mcnt, pid, cnt);
mcnt_prev = mcnt
return markers
def signWrangler(poi):
"""
Just does the JSON things for signs
"""
for field in ["Text1", "Text2", "Text3", "Text4"]:
poi[field] = jsonText(poi[field])
return poi
def handleEntities(rset, config, config_path, filters, markers):
"""
Add markers for Entities or TileEntities.
For this every chunk of the regionset is parsed and filtered using multiple
processes, if so configured.
This function will not return anything, but it will update the parameter
`markers`.
"""
logging.info("Looking for entities in %r", rset)
numbuckets = config['processes'];
if numbuckets < 0:
numbuckets = multiprocessing.cpu_count()
if numbuckets == 1:
for (x, z, mtime) in rset.iterate_chunks():
try:
data = rset.get_chunk(x, z, entities_only=True)
for poi in itertools.chain(data['TileEntities'], data['Entities']):
if poi['id'] == 'Sign' or poi['id'] == 'minecraft:sign': # kill me
poi = signWrangler(poi)
for name, __, filter_function, __, __, __ in filters:
result = filter_function(poi)
if result:
d = create_marker_from_filter_result(poi, result)
markers[name]['raw'].append(d)
except nbt.CorruptChunkError:
logging.warning("Ignoring POIs in corrupt chunk %d,%d", x,z)
else:
buckets = [[] for i in range(numbuckets)];
for (x, z, mtime) in rset.iterate_chunks():
i = x / 32 + z / 32
i = i % numbuckets
buckets[i].append([x, z])
for b in buckets:
logging.info("Buckets has %d entries", len(b));
# Create a pool of processes and run all the functions
pool = Pool(processes=numbuckets, initializer=initBucketChunks, initargs=(config_path,))
# simplify the filters dict, so pickle doesn't have to do so much
filters = [(name, filter_function.__name__) for name, __, filter_function, __, __, __ in filters]
results = pool.map(parseBucketChunks, ((buck, rset, filters) for buck in buckets))
logging.info("All the threads completed")
for marker_dict in results:
for name, marker_list in marker_dict.iteritems():
markers[name]['raw'].extend(marker_list)
logging.info("Done.")
class PlayerDict(dict):
use_uuid = False
_name = ''
uuid_cache = None # A cache for the UUID->profile lookups
@classmethod
def load_cache(cls, outputdir):
cache_file = os.path.join(outputdir, "uuidcache.dat")
if os.path.exists(cache_file):
try:
with closing(gzip.GzipFile(cache_file)) as gz:
cls.uuid_cache = json.load(gz)
logging.info("Loaded UUID cache from %r with %d entries",
cache_file, len(cls.uuid_cache.keys()))
except (ValueError, IOError):
logging.warning("Failed to load UUID cache -- it might be corrupt")
cls.uuid_cache = {}
corrupted_cache = cache_file + ".corrupted." + datetime.datetime.now().isoformat()
try:
os.rename(cache_file, corrupted_cache)
logging.warning("If %s does not appear to contain meaningful data, you may safely delete it", corrupted_cache)
except OSError:
logging.warning("Failed to backup corrupted UUID cache")
logging.info("Initialized an empty UUID cache")
else:
cls.uuid_cache = {}
logging.info("Initialized an empty UUID cache")
@classmethod
def save_cache(cls, outputdir):
cache_file = os.path.join(outputdir, "uuidcache.dat")
caps = get_fs_caps(outputdir)
with FileReplacer(cache_file, caps) as cache_file_name:
with closing(gzip.GzipFile(cache_file_name, "wb")) as gz:
json.dump(cls.uuid_cache, gz)
logging.info("Wrote UUID cache with %d entries",
len(cls.uuid_cache.keys()))
def __getitem__(self, item):
if item == "EntityId":
if not super(PlayerDict, self).has_key("EntityId"):
if self.use_uuid:
super(PlayerDict, self).__setitem__("EntityId", self.get_name_from_uuid())
else:
super(PlayerDict, self).__setitem__("EntityId", self._name)
return super(PlayerDict, self).__getitem__(item)
def get_name_from_uuid(self):
sname = self._name.replace('-','')
try:
profile = PlayerDict.uuid_cache[sname]
if profile['retrievedAt'] > time.mktime(self['time']):
return profile['name']
except (KeyError,):
pass
try:
profile = json.loads(urllib2.urlopen(UUID_LOOKUP_URL + sname).read())
if 'name' in profile:
profile['retrievedAt'] = time.mktime(time.localtime())
PlayerDict.uuid_cache[sname] = profile
return profile['name']
except (ValueError, urllib2.URLError):
logging.warning("Unable to get player name for UUID %s", self._name)
def handlePlayers(worldpath, filters, markers):
"""
Add markers for players to the list of markers.
For this the player files under the given `worldpath` are parsed and
filtered.
This function will not return anything, but it will update the parameter
`markers`.
"""
playerdir = os.path.join(worldpath, "playerdata")
useUUIDs = True
if not os.path.isdir(playerdir):
playerdir = os.path.join(worldpath, "players")
useUUIDs = False
if os.path.isdir(playerdir):
playerfiles = os.listdir(playerdir)
playerfiles = [x for x in playerfiles if x.endswith(".dat")]
isSinglePlayer = False
else:
playerfiles = [os.path.join(worldpath, "level.dat")]
isSinglePlayer = True
for playerfile in playerfiles:
try:
data = PlayerDict(nbt.load(os.path.join(playerdir, playerfile))[1])
data.use_uuid = useUUIDs
if isSinglePlayer:
data = data['Data']['Player']
except (IOError, TypeError):
logging.warning("Skipping bad player dat file %r", playerfile)
continue
playername = playerfile.split(".")[0]
if isSinglePlayer:
playername = 'Player'
data._name = playername
if useUUIDs:
data['uuid'] = playername
# Position at last logout
data['id'] = "Player"
data['x'] = int(data['Pos'][0])
data['y'] = int(data['Pos'][1])
data['z'] = int(data['Pos'][2])
# Time at last logout, calculated from last time the player's file was modified
data['time'] = time.localtime(os.path.getmtime(os.path.join(playerdir, playerfile)))
# Spawn position (bed or main spawn)
if "SpawnX" in data:
# Spawn position (bed or main spawn)
spawn = PlayerDict()
spawn.use_uuid = useUUIDs
spawn._name = playername
spawn["id"] = "PlayerSpawn"
spawn["x"] = data['SpawnX']
spawn["y"] = data['SpawnY']
spawn["z"] = data['SpawnZ']
for name, __, filter_function, rset, __, __ in filters:
# get the dimension for the filter
# This has do be done every time, because we have filters for
# different regionsets.
if rset.get_type():
dimension = int(re.match(r"^DIM(_MYST)?(-?\d+)$", rset.get_type()).group(2))
else:
dimension = 0
if data['Dimension'] == dimension:
result = filter_function(data)
if result:
d = create_marker_from_filter_result(data, result)
markers[name]['raw'].append(d)
if dimension == 0 and "SpawnX" in data:
result = filter_function(spawn)
if result:
d = create_marker_from_filter_result(spawn, result)
markers[name]['raw'].append(d)
def handleManual(manualpois, filters, markers):
"""
Add markers for manually defined POIs to the list of markers.
This function will not return anything, but it will update the parameter
`markers`.
"""
for poi in manualpois:
for name, __, filter_function, __, __, __ in filters:
result = filter_function(poi)
if result:
d = create_marker_from_filter_result(poi, result)
markers[name]['raw'].append(d)
def create_marker_from_filter_result(poi, result):
"""
Takes a POI and the return value of a filter function for it and creates a
marker dict depending on the type of the returned value.
"""
# every marker has a position either directly via attributes x, y, z or
# via tuple attribute Pos
if 'Pos' in poi:
d = dict((v, poi['Pos'][i]) for i, v in enumerate('xyz'))
else:
d = dict((v, poi[v]) for v in 'xyz')
# read some Defaults from POI
if "icon" in poi:
d["icon"] = poi['icon']
if "image" in poi:
d["image"] = poi['image']
if "createInfoWindow" in poi:
d["createInfoWindow"] = poi['createInfoWindow']
# Fill in the rest from result
if isinstance(result, basestring):
d.update(dict(text=result, hovertext=result))
elif isinstance(result, tuple):
d.update(dict(text=result[1], hovertext=result[0]))
# Dict support to allow more flexible things in the future as well as polylines on the map.
elif isinstance(result, dict):
d['text'] = result['text']
# Use custom hovertext if provided...
if 'hovertext' in result:
d['hovertext'] = unicode(result['hovertext'])
else: # ...otherwise default to display text.
d['hovertext'] = result['text']
if 'polyline' in result and hasattr(result['polyline'], '__iter__'):
d['polyline'] = []
for point in result['polyline']:
d['polyline'].append(dict(x=point['x'], y=point['y'], z=point['z'])) # point.copy() would work, but this validates better
if isinstance(result['color'], basestring):
d['strokeColor'] = result['color']
if "icon" in result:
d["icon"] = result['icon']
if "image" in result:
d["image"] = result['image']
if "createInfoWindow" in result:
d["createInfoWindow"] = result['createInfoWindow']
else:
raise ValueError("got an %s as result for POI with id %s" % (type(result).__name__, poi['id']))
return d
def main():
if os.path.basename(sys.argv[0]) == """genPOI.py""":
helptext = """genPOI.py
%prog --config=<config file> [options]"""
else:
helptext = """genPOI
%prog --genpoi --config=<config file> [options]"""
logger.configure()
parser = OptionParser(usage=helptext)
parser.add_option("-c", "--config", dest="config", action="store",
help="Specify the config file to use.")
parser.add_option("-q", "--quiet", dest="quiet", action="count",
help="Reduce logging output")
parser.add_option("--skip-scan", dest="skipscan", action="store_true",
help="Skip scanning for entities when using GenPOI")
parser.add_option("--skip-players", dest="skipplayers", action="store_true",
help="Skip getting player data when using GenPOI")
options, args = parser.parse_args()
if not options.config:
parser.print_help()
return
if options.quiet > 0:
logger.configure(logging.WARN, False)
# Parse the config file
mw_parser = configParser.MultiWorldParser()
mw_parser.parse(options.config)
try:
config = mw_parser.get_validated_config()
except Exception:
logging.exception("An error was encountered with your configuration. See the info below.")
return 1
destdir = config['outputdir']
# saves us from creating the same World object over and over again
worldcache = {}
filters = set()
marker_groups = defaultdict(list)
# collect all filters and get regionsets
for rname, render in config['renders'].iteritems():
# Convert render['world'] to the world path, and store the original
# in render['worldname_orig']
try:
worldpath = config['worlds'][render['world']]
except KeyError:
logging.error("Render %s's world is '%s', but I could not find a corresponding entry in the worlds dictionary.",
rname, render['world'])
return 1
render['worldname_orig'] = render['world']
render['world'] = worldpath
# find or create the world object
if (render['world'] not in worldcache):
w = world.World(render['world'])
worldcache[render['world']] = w
else:
w = worldcache[render['world']]
# get the regionset for this dimension
rset = w.get_regionset(render['dimension'][1])
if rset == None: # indicates no such dimension was found:
logging.warn("Sorry, you requested dimension '%s' for the render '%s', but I couldn't find it", render['dimension'][0], rname)
continue
# find filters for this render
for f in render['markers']:
# internal identifier for this filter
name = replaceBads(f['name']) + hex(hash(f['filterFunction']))[-4:] + "_" + hex(hash(rname))[-4:]
# add it to the list of filters
filters.add((name, f['name'], f['filterFunction'], rset, worldpath, rname))
# add an entry in the menu to show markers found by this filter
group = dict(groupName=name,
displayName = f['name'],
icon=f.get('icon', 'signpost_icon.png'),
createInfoWindow=f.get('createInfoWindow', True),
checked = f.get('checked', False))
marker_groups[rname].append(group)
# initialize the structure for the markers
markers = dict((name, dict(created=False, raw=[], name=filter_name))
for name, filter_name, __, __, __, __ in filters)
# apply filters to regionsets
if not options.skipscan:
# group filters by rset
keyfunc = lambda x: x[3]
sfilters = sorted(filters, key=keyfunc)
for rset, rset_filters in itertools.groupby(sfilters, keyfunc):
handleEntities(rset, config, options.config, list(rset_filters), markers)
# apply filters to players
if not options.skipplayers:
PlayerDict.load_cache(destdir)
# group filters by worldpath, so we only search for players once per
# world
keyfunc = lambda x: x[4]
sfilters = sorted(filters, key=keyfunc)
for worldpath, worldpath_filters in itertools.groupby(sfilters, keyfunc):
handlePlayers(worldpath, list(worldpath_filters), markers)
# add manual POIs
# group filters by name of the render, because only filter functions for
# the current render should be used on the current render's manualpois
keyfunc = lambda x: x[5]
sfilters = sorted(filters, key=keyfunc)
for rname, rname_filters in itertools.groupby(sfilters, keyfunc):
manualpois = config['renders'][rname]['manualpois']
handleManual(manualpois, list(rname_filters), markers)
logging.info("Done handling POIs")
logging.info("Writing out javascript files")
if not options.skipplayers:
PlayerDict.save_cache(destdir)
with open(os.path.join(destdir, "markersDB.js"), "w") as output:
output.write("var markersDB=")
json.dump(markers, output, indent=2)
output.write(";\n");
with open(os.path.join(destdir, "markers.js"), "w") as output:
output.write("var markers=")
json.dump(marker_groups, output, indent=2)
output.write(";\n");
with open(os.path.join(destdir, "baseMarkers.js"), "w") as output:
output.write("overviewer.util.injectMarkerScript('markersDB.js');\n")
output.write("overviewer.util.injectMarkerScript('markers.js');\n")
output.write("overviewer.util.injectMarkerScript('regions.js');\n")
output.write("overviewer.collections.haveSigns=true;\n")
logging.info("Done")
if __name__ == "__main__":
main()
| kevinwchang/Minecraft-Overviewer | overviewer_core/aux_files/genPOI.py | Python | gpl-3.0 | 21,247 | 0.003624 |
# Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
Tests models.parameters
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import itertools
import numpy as np
from numpy.testing import utils
from . import irafutil
from .. import models, fitting
from ..core import Model, FittableModel, ModelDefinitionError
from ..parameters import Parameter, InputParameterError
from ...utils.data import get_pkg_data_filename
from ...tests.helper import pytest
class TParModel(Model):
"""
A toy model to test parameters machinery
"""
coeff = Parameter()
e = Parameter()
def __init__(self, coeff, e, **kwargs):
super(TParModel, self).__init__(coeff=coeff, e=e, **kwargs)
@staticmethod
def evaluate(coeff, e):
pass
class MockModel(FittableModel):
alpha = Parameter(name='alpha', default=42)
@staticmethod
def evaluate(*args):
pass
def test_parameter_properties():
"""Test if getting / setting of Parameter properties works."""
m = MockModel()
p = m.alpha
assert p.name == 'alpha'
# Parameter names are immutable
with pytest.raises(AttributeError):
p.name = 'beta'
assert p.fixed == False
p.fixed = True
assert p.fixed == True
assert p.tied == False
p.tied = lambda _: 0
p.tied = False
assert p.tied == False
assert p.min == None
p.min = 42
assert p.min == 42
p.min = None
assert p.min == None
assert p.max == None
# TODO: shouldn't setting a max < min give an error?
p.max = 41
assert p.max == 41
def test_parameter_operators():
"""Test if the parameter arithmetic operators work."""
m = MockModel()
par = m.alpha
num = 42.
val = 3
assert par - val == num - val
assert val - par == val - num
assert par / val == num / val
assert val / par == val / num
assert par ** val == num ** val
assert val ** par == val ** num
assert par < 45
assert par > 41
assert par <= par
assert par >= par
assert par == par
assert -par == -num
assert abs(par) == abs(num)
def test_parameter_name_attribute_mismatch():
"""
It should not be possible to define Parameters on a model with different
names from the attributes they are assigned to.
"""
def make_bad_class():
class BadModel(Model):
foo = Parameter('bar')
def __call__(self): pass
def make_good_class():
class GoodModel(Model):
# This is redundant but okay
foo = Parameter('foo')
def __call__(self): pass
make_good_class()
pytest.raises(ModelDefinitionError, make_bad_class)
class TestParameters(object):
def setup_class(self):
"""
Unit tests for parameters
Read an iraf database file created by onedspec.identify. Use the
information to create a 1D Chebyshev model and perform the same fit.
Create also a gausian model.
"""
test_file = get_pkg_data_filename('data/idcompspec.fits')
f = open(test_file)
lines = f.read()
reclist = lines.split("begin")
f.close()
record = irafutil.IdentifyRecord(reclist[1])
self.icoeff = record.coeff
order = int(record.fields['order'])
self.model = models.Chebyshev1D(order - 1)
self.gmodel = models.Gaussian1D(2, mean=3, stddev=4)
self.linear_fitter = fitting.LinearLSQFitter()
self.x = record.x
self.y = record.z
self.yy = np.array([record.z, record.z])
def test_set_slice(self):
"""
Tests updating the parameters attribute with a slice.
This is what fitters internally do.
"""
self.model.parameters[:] = np.array([3, 4, 5, 6, 7])
assert (self.model.parameters == [3., 4., 5., 6., 7.]).all()
def test_set_parameters_as_list(self):
"""Tests updating parameters using a list."""
self.model.parameters = [30, 40, 50, 60, 70]
assert (self.model.parameters == [30., 40., 50., 60, 70]).all()
def test_set_parameters_as_array(self):
"""Tests updating parameters using an array."""
self.model.parameters = np.array([3, 4, 5, 6, 7])
assert (self.model.parameters == [3., 4., 5., 6., 7.]).all()
def test_set_as_tuple(self):
"""Tests updating parameters using a tuple."""
self.model.parameters = (1, 2, 3, 4, 5)
assert (self.model.parameters == [1, 2, 3, 4, 5]).all()
def test_set_model_attr_seq(self):
"""
Tests updating the parameters attribute when a model's
parameter (in this case coeff) is updated.
"""
self.model.parameters = [0, 0., 0., 0, 0]
self.model.c0 = 7
assert (self.model.parameters == [7, 0., 0., 0, 0]).all()
def test_set_model_attr_num(self):
"""Update the parameter list when a model's parameter is updated."""
self.gmodel.amplitude = 7
assert (self.gmodel.parameters == [7, 3, 4]).all()
def test_set_item(self):
"""Update the parameters using indexing."""
self.model.parameters = [1, 2, 3, 4, 5]
self.model.parameters[0] = 10.
assert (self.model.parameters == [10, 2, 3, 4, 5]).all()
assert self.model.c0 == 10
def test_wrong_size1(self):
"""
Tests raising an error when attempting to reset the parameters
using a list of a different size.
"""
with pytest.raises(InputParameterError):
self.model.parameters = [1, 2, 3]
def test_wrong_size2(self):
"""
Tests raising an exception when attempting to update a model's
parameter (in this case coeff) with a sequence of the wrong size.
"""
with pytest.raises(InputParameterError):
self.model.c0 = [1, 2, 3]
def test_wrong_shape(self):
"""
Tests raising an exception when attempting to update a model's
parameter and the new value has the wrong shape.
"""
with pytest.raises(InputParameterError):
self.gmodel.amplitude = [1, 2]
def test_par_against_iraf(self):
"""
Test the fitter modifies model.parameters.
Uses an iraf example.
"""
new_model = self.linear_fitter(self.model, self.x, self.y)
print(self.y, self.x)
utils.assert_allclose(new_model.parameters,
np.array(
[4826.1066602783685, 952.8943813407858,
12.641236013982386,
-1.7910672553339604,
0.90252884366711317]),
rtol=10 ** (-2))
def testPolynomial1D(self):
d = {'c0': 11, 'c1': 12, 'c2': 13, 'c3': 14}
p1 = models.Polynomial1D(3, **d)
utils.assert_equal(p1.parameters, [11, 12, 13, 14])
def test_poly1d_multiple_sets(self):
p1 = models.Polynomial1D(3, n_models=3)
utils.assert_equal(p1.parameters, [0.0, 0.0, 0.0, 0, 0, 0,
0, 0, 0, 0, 0, 0])
utils.assert_equal(p1.c0, [0, 0, 0])
p1.c0 = [10, 10, 10]
utils.assert_equal(p1.parameters, [10.0, 10.0, 10.0, 0, 0,
0, 0, 0, 0, 0, 0, 0])
def test_par_slicing(self):
"""
Test assigning to a parameter slice
"""
p1 = models.Polynomial1D(3, n_models=3)
p1.c0[:2] = [10, 10]
utils.assert_equal(p1.parameters, [10.0, 10.0, 0.0, 0, 0,
0, 0, 0, 0, 0, 0, 0])
def test_poly2d(self):
p2 = models.Polynomial2D(degree=3)
p2.c0_0 = 5
utils.assert_equal(p2.parameters, [5, 0, 0, 0, 0, 0, 0, 0, 0, 0])
def test_poly2d_multiple_sets(self):
kw = {'c0_0': [2, 3], 'c1_0': [1, 2], 'c2_0': [4, 5],
'c0_1': [1, 1], 'c0_2': [2, 2], 'c1_1': [5, 5]}
p2 = models.Polynomial2D(2, **kw)
utils.assert_equal(p2.parameters, [2, 3, 1, 2, 4, 5,
1, 1, 2, 2, 5, 5])
def test_shift_model_parameters1d(self):
sh1 = models.Shift(2)
sh1.offset = 3
assert sh1.offset == 3
assert sh1.offset.value == 3
def test_scale_model_parametersnd(self):
sc1 = models.Scale([2, 2])
sc1.factor = [3, 3]
assert sc1.factor == [3, 3]
utils.assert_array_equal(sc1.factor.value, [3, 3])
def test_parameters_wrong_shape(self):
sh1 = models.Shift(2)
with pytest.raises(InputParameterError):
sh1.offset = [3, 3]
class TestMultipleParameterSets(object):
def setup_class(self):
self.x1 = np.arange(1, 10, .1)
self.y, self.x = np.mgrid[:10, :7]
self.x11 = np.array([self.x1, self.x1]).T
self.gmodel = models.Gaussian1D([12, 10], [3.5, 5.2], stddev=[.4, .7],
n_models=2)
def test_change_par(self):
"""
Test that a change to one parameter as a set propagates to param_sets.
"""
self.gmodel.amplitude = [1, 10]
utils.assert_almost_equal(
self.gmodel.param_sets,
np.array([[1.,
10],
[3.5,
5.2],
[0.4,
0.7]]))
np.all(self.gmodel.parameters == [1.0, 10.0, 3.5, 5.2, 0.4, 0.7])
def test_change_par2(self):
"""
Test that a change to one single parameter in a set propagates to
param_sets.
"""
self.gmodel.amplitude[0] = 11
utils.assert_almost_equal(
self.gmodel.param_sets,
np.array([[11.,
10],
[3.5,
5.2],
[0.4,
0.7]]))
np.all(self.gmodel.parameters == [11.0, 10.0, 3.5, 5.2, 0.4, 0.7])
def test_change_parameters(self):
self.gmodel.parameters = [13, 10, 9, 5.2, 0.4, 0.7]
utils.assert_almost_equal(self.gmodel.amplitude.value, [13., 10.])
utils.assert_almost_equal(self.gmodel.mean.value, [9., 5.2])
class TestParameterInitialization(object):
"""
This suite of tests checks most if not all cases if instantiating a model
with parameters of different shapes/sizes and with different numbers of
parameter sets.
"""
def test_single_model_scalar_parameters(self):
t = TParModel(10, 1)
assert len(t) == 1
assert t.model_set_axis is False
assert np.all(t.param_sets == [[10], [1]])
assert np.all(t.parameters == [10, 1])
assert t.coeff.shape == ()
assert t.e.shape == ()
def test_single_model_scalar_and_array_parameters(self):
t = TParModel(10, [1, 2])
assert len(t) == 1
assert t.model_set_axis is False
assert np.issubdtype(t.param_sets.dtype, object)
assert len(t.param_sets) == 2
assert np.all(t.param_sets[0] == [10])
assert np.all(t.param_sets[1] == [[1, 2]])
assert np.all(t.parameters == [10, 1, 2])
assert t.coeff.shape == ()
assert t.e.shape == (2,)
def test_single_model_1d_array_parameters(self):
t = TParModel([10, 20], [1, 2])
assert len(t) == 1
assert t.model_set_axis is False
assert np.all(t.param_sets == [[[10, 20]], [[1, 2]]])
assert np.all(t.parameters == [10, 20, 1, 2])
assert t.coeff.shape == (2,)
assert t.e.shape == (2,)
def test_single_model_1d_array_different_length_parameters(self):
with pytest.raises(InputParameterError):
# Not broadcastable
t = TParModel([1, 2], [3, 4, 5])
def test_single_model_2d_array_parameters(self):
t = TParModel([[10, 20], [30, 40]], [[1, 2], [3, 4]])
assert len(t) == 1
assert t.model_set_axis is False
assert np.all(t.param_sets == [[[[10, 20], [30, 40]]],
[[[1, 2], [3, 4]]]])
assert np.all(t.parameters == [10, 20, 30, 40, 1, 2, 3, 4])
assert t.coeff.shape == (2, 2)
assert t.e.shape == (2, 2)
def test_single_model_2d_non_square_parameters(self):
coeff = np.array([[10, 20], [30, 40], [50, 60]])
e = np.array([[1, 2], [3, 4], [5, 6]])
t = TParModel(coeff, e)
assert len(t) == 1
assert t.model_set_axis is False
assert np.all(t.param_sets == [[[[10, 20], [30, 40], [50, 60]]],
[[[1, 2], [3, 4], [5, 6]]]])
assert np.all(t.parameters == [10, 20, 30, 40, 50, 60,
1, 2, 3, 4, 5, 6])
assert t.coeff.shape == (3, 2)
assert t.e.shape == (3, 2)
t2 = TParModel(coeff.T, e.T)
assert len(t2) == 1
assert t2.model_set_axis is False
assert np.all(t2.param_sets == [[[[10, 30, 50], [20, 40, 60]]],
[[[1, 3, 5], [2, 4, 6]]]])
assert np.all(t2.parameters == [10, 30, 50, 20, 40, 60,
1, 3, 5, 2, 4, 6])
assert t2.coeff.shape == (2, 3)
assert t2.e.shape == (2, 3)
# Not broadcastable
with pytest.raises(InputParameterError):
TParModel(coeff, e.T)
with pytest.raises(InputParameterError):
TParModel(coeff.T, e)
def test_single_model_2d_broadcastable_parameters(self):
t = TParModel([[10, 20, 30], [40, 50, 60]], [1, 2, 3])
assert len(t) == 1
assert t.model_set_axis is False
assert len(t.param_sets) == 2
assert np.issubdtype(t.param_sets.dtype, object)
assert np.all(t.param_sets[0] == [[[10, 20, 30], [40, 50, 60]]])
assert np.all(t.param_sets[1] == [[1, 2, 3]])
assert np.all(t.parameters == [10, 20, 30, 40, 50, 60, 1, 2, 3])
@pytest.mark.parametrize(('p1', 'p2'), [
(1, 2), (1, [2, 3]), ([1, 2], 3), ([1, 2, 3], [4, 5]),
([1, 2], [3, 4, 5])])
def test_two_model_incorrect_scalar_parameters(self, p1, p2):
with pytest.raises(InputParameterError):
TParModel(p1, p2, n_models=2)
@pytest.mark.parametrize('kwargs', [
{'n_models': 2}, {'model_set_axis': 0},
{'n_models': 2, 'model_set_axis': 0}])
def test_two_model_scalar_parameters(self, kwargs):
t = TParModel([10, 20], [1, 2], **kwargs)
assert len(t) == 2
assert t.model_set_axis == 0
assert np.all(t.param_sets == [[10, 20], [1, 2]])
assert np.all(t.parameters == [10, 20, 1, 2])
assert t.coeff.shape == ()
assert t.e.shape == ()
@pytest.mark.parametrize('kwargs', [
{'n_models': 2}, {'model_set_axis': 0},
{'n_models': 2, 'model_set_axis': 0}])
def test_two_model_scalar_and_array_parameters(self, kwargs):
t = TParModel([10, 20], [[1, 2], [3, 4]], **kwargs)
assert len(t) == 2
assert t.model_set_axis == 0
assert len(t.param_sets) == 2
assert np.issubdtype(t.param_sets.dtype, object)
assert np.all(t.param_sets[0] == [[10], [20]])
assert np.all(t.param_sets[1] == [[1, 2], [3, 4]])
assert np.all(t.parameters == [10, 20, 1, 2, 3, 4])
assert t.coeff.shape == ()
assert t.e.shape == (2,)
def test_two_model_1d_array_parameters(self):
t = TParModel([[10, 20], [30, 40]], [[1, 2], [3, 4]], n_models=2)
assert len(t) == 2
assert t.model_set_axis == 0
assert np.all(t.param_sets == [[[10, 20], [30, 40]],
[[1, 2], [3, 4]]])
assert np.all(t.parameters == [10, 20, 30, 40, 1, 2, 3, 4])
assert t.coeff.shape == (2,)
assert t.e.shape == (2,)
t2 = TParModel([[10, 20, 30], [40, 50, 60]],
[[1, 2, 3], [4, 5, 6]], n_models=2)
assert len(t2) == 2
assert t2.model_set_axis == 0
assert np.all(t2.param_sets == [[[10, 20, 30], [40, 50, 60]],
[[1, 2, 3], [4, 5, 6]]])
assert np.all(t2.parameters == [10, 20, 30, 40, 50, 60,
1, 2, 3, 4, 5, 6])
assert t2.coeff.shape == (3,)
assert t2.e.shape == (3,)
def test_two_model_mixed_dimension_array_parameters(self):
with pytest.raises(InputParameterError):
# Can't broadcast different array shapes
TParModel([[[1, 2], [3, 4]], [[5, 6], [7, 8]]],
[[9, 10, 11], [12, 13, 14]], n_models=2)
t = TParModel([[[10, 20], [30, 40]], [[50, 60], [70, 80]]],
[[1, 2], [3, 4]], n_models=2)
assert len(t) == 2
assert t.model_set_axis == 0
assert len(t.param_sets) == 2
assert np.issubdtype(t.param_sets.dtype, object)
assert np.all(t.param_sets[0] == [[[10, 20], [30, 40]],
[[50, 60], [70, 80]]])
assert np.all(t.param_sets[1] == [[[1, 2]], [[3, 4]]])
assert np.all(t.parameters == [10, 20, 30, 40, 50, 60, 70, 80,
1, 2, 3, 4])
assert t.coeff.shape == (2, 2)
assert t.e.shape == (2,)
def test_two_model_2d_array_parameters(self):
t = TParModel([[[10, 20], [30, 40]], [[50, 60], [70, 80]]],
[[[1, 2], [3, 4]], [[5, 6], [7, 8]]], n_models=2)
assert len(t) == 2
assert t.model_set_axis == 0
assert np.all(t.param_sets == [[[[10, 20], [30, 40]],
[[50, 60], [70, 80]]],
[[[1, 2], [3, 4]],
[[5, 6], [7, 8]]]])
assert np.all(t.parameters == [10, 20, 30, 40, 50, 60, 70, 80,
1, 2, 3, 4, 5, 6, 7, 8])
assert t.coeff.shape == (2, 2)
assert t.e.shape == (2, 2)
def test_two_model_nonzero_model_set_axis(self):
# An example where the model set axis is the *last* axis of the
# parameter arrays
coeff = np.array([[[10, 20], [30, 40]], [[50, 60], [70, 80]]])
coeff = np.rollaxis(coeff, 0, 3)
e = np.array([[1, 2], [3, 4]])
e = np.rollaxis(e, 0, 2)
t = TParModel(coeff, e, model_set_axis=-1)
assert len(t) == 2
assert t.model_set_axis == -1
assert len(t.param_sets) == 2
assert np.issubdtype(t.param_sets.dtype, object)
assert np.all(t.param_sets[0] == [[[10, 50], [20, 60]],
[[30, 70], [40, 80]]])
assert np.all(t.param_sets[1] == [[[1, 3], [2, 4]]])
assert np.all(t.parameters == [10, 50, 20, 60, 30, 70, 40, 80,
1, 3, 2, 4])
assert t.coeff.shape == (2, 2)
assert t.e.shape == (2,)
def test_wrong_number_of_params(self):
with pytest.raises(InputParameterError):
TParModel(coeff=[[1, 2], [3, 4]], e=(2, 3, 4), n_models=2)
with pytest.raises(InputParameterError):
TParModel(coeff=[[1, 2], [3, 4]], e=(2, 3, 4), model_set_axis=0)
def test_wrong_number_of_params2(self):
with pytest.raises(InputParameterError):
m = TParModel(coeff=[[1, 2], [3, 4]], e=4, n_models=2)
with pytest.raises(InputParameterError):
m = TParModel(coeff=[[1, 2], [3, 4]], e=4, model_set_axis=0)
def test_array_parameter1(self):
with pytest.raises(InputParameterError):
t = TParModel(np.array([[1, 2], [3, 4]]), 1, model_set_axis=0)
def test_array_parameter2(self):
with pytest.raises(InputParameterError):
m = TParModel(np.array([[1, 2], [3, 4]]), (1, 1, 11),
model_set_axis=0)
def test_array_parameter4(self):
"""
Test multiple parameter model with array-valued parameters of the same
size as the number of parameter sets.
"""
t4 = TParModel([[1, 2], [3, 4]], [5, 6], model_set_axis=False)
assert len(t4) == 1
assert t4.coeff.shape == (2, 2)
assert t4.e.shape == (2,)
assert np.issubdtype(t4.param_sets.dtype, object)
assert np.all(t4.param_sets[0] == [[1, 2], [3, 4]])
assert np.all(t4.param_sets[1] == [5, 6])
def test_non_broadcasting_parameters():
"""
Tests that in a model with 3 parameters that do not all mutually broadcast,
this is determined correctly regardless of what order the parameters are
in.
"""
a = 3
b = np.array([[1, 2, 3], [4, 5, 6]])
c = np.array([[1, 2, 3, 4], [1, 2, 3, 4]])
class TestModel(Model):
p1 = Parameter()
p2 = Parameter()
p3 = Parameter()
def evaluate(self, *args):
return
# a broadcasts with both b and c, but b does not broadcast with c
for args in itertools.permutations((a, b, c)):
with pytest.raises(InputParameterError):
TestModel(*args)
| piotroxp/scibibscan | scib/lib/python3.5/site-packages/astropy/modeling/tests/test_parameters.py | Python | mit | 21,265 | 0.000611 |
# -*- coding: utf-8 -*-
from itertools import product
from itertools import permutations
from numba import njit
from numba.core import types, utils
import unittest
from numba.tests.support import (TestCase, no_pyobj_flags, MemoryLeakMixin)
from numba.core.errors import TypingError, UnsupportedError
from numba.cpython.unicode import _MAX_UNICODE
from numba.core.types.functions import _header_lead
from numba.extending import overload
_py37_or_later = utils.PYVERSION >= (3, 7)
def isascii(s):
return all(ord(c) < 128 for c in s)
def literal_usecase():
return '大处着眼,小处着手。'
def passthrough_usecase(x):
return x
def eq_usecase(x, y):
return x == y
def len_usecase(x):
return len(x)
def bool_usecase(x):
return bool(x)
def getitem_usecase(x, i):
return x[i]
def getitem_check_kind_usecase(x, i):
return hash(x[i])
def zfill_usecase(x, y):
return x.zfill(y)
def concat_usecase(x, y):
return x + y
def repeat_usecase(x, y):
return x * y
def inplace_concat_usecase(x, y):
x += y
return x
def in_usecase(x, y):
return x in y
def lt_usecase(x, y):
return x < y
def le_usecase(x, y):
return x <= y
def gt_usecase(x, y):
return x > y
def ge_usecase(x, y):
return x >= y
def partition_usecase(s, sep):
return s.partition(sep)
def find_usecase(x, y):
return x.find(y)
def find_with_start_only_usecase(x, y, start):
return x.find(y, start)
def find_with_start_end_usecase(x, y, start, end):
return x.find(y, start, end)
def rpartition_usecase(s, sep):
return s.rpartition(sep)
def count_usecase(x, y):
return x.count(y)
def count_with_start_usecase(x, y, start):
return x.count(y, start)
def count_with_start_end_usecase(x, y, start, end):
return x.count(y, start, end)
def rfind_usecase(x, y):
return x.rfind(y)
def rfind_with_start_only_usecase(x, y, start):
return x.rfind(y, start)
def rfind_with_start_end_usecase(x, y, start, end):
return x.rfind(y, start, end)
def replace_usecase(s, x, y):
return s.replace(x, y)
def replace_with_count_usecase(s, x, y, count):
return s.replace(x, y, count)
def rindex_usecase(x, y):
return x.rindex(y)
def rindex_with_start_only_usecase(x, y, start):
return x.rindex(y, start)
def rindex_with_start_end_usecase(x, y, start, end):
return x.rindex(y, start, end)
def index_usecase(x, y):
return x.index(y)
def index_with_start_only_usecase(x, y, start):
return x.index(y, start)
def index_with_start_end_usecase(x, y, start, end):
return x.index(y, start, end)
def startswith_usecase(x, y):
return x.startswith(y)
def endswith_usecase(x, y):
return x.endswith(y)
def expandtabs_usecase(s):
return s.expandtabs()
def expandtabs_with_tabsize_usecase(s, tabsize):
return s.expandtabs(tabsize)
def expandtabs_with_tabsize_kwarg_usecase(s, tabsize):
return s.expandtabs(tabsize=tabsize)
def endswith_with_start_only_usecase(x, y, start):
return x.endswith(y, start)
def endswith_with_start_end_usecase(x, y, start, end):
return x.endswith(y, start, end)
def split_usecase(x, y):
return x.split(y)
def split_with_maxsplit_usecase(x, y, maxsplit):
return x.split(y, maxsplit)
def split_with_maxsplit_kwarg_usecase(x, y, maxsplit):
return x.split(y, maxsplit=maxsplit)
def split_whitespace_usecase(x):
return x.split()
def splitlines_usecase(s):
return s.splitlines()
def splitlines_with_keepends_usecase(s, keepends):
return s.splitlines(keepends)
def splitlines_with_keepends_kwarg_usecase(s, keepends):
return s.splitlines(keepends=keepends)
def rsplit_usecase(s, sep):
return s.rsplit(sep)
def rsplit_with_maxsplit_usecase(s, sep, maxsplit):
return s.rsplit(sep, maxsplit)
def rsplit_with_maxsplit_kwarg_usecase(s, sep, maxsplit):
return s.rsplit(sep, maxsplit=maxsplit)
def rsplit_whitespace_usecase(s):
return s.rsplit()
def lstrip_usecase(x):
return x.lstrip()
def lstrip_usecase_chars(x, chars):
return x.lstrip(chars)
def rstrip_usecase(x):
return x.rstrip()
def rstrip_usecase_chars(x, chars):
return x.rstrip(chars)
def strip_usecase(x):
return x.strip()
def strip_usecase_chars(x, chars):
return x.strip(chars)
def join_usecase(x, y):
return x.join(y)
def join_empty_usecase(x):
# hack to make empty typed list
l = ['']
l.pop()
return x.join(l)
def center_usecase(x, y):
return x.center(y)
def center_usecase_fillchar(x, y, fillchar):
return x.center(y, fillchar)
def ljust_usecase(x, y):
return x.ljust(y)
def ljust_usecase_fillchar(x, y, fillchar):
return x.ljust(y, fillchar)
def rjust_usecase(x, y):
return x.rjust(y)
def rjust_usecase_fillchar(x, y, fillchar):
return x.rjust(y, fillchar)
def istitle_usecase(x):
return x.istitle()
def iter_usecase(x):
l = []
for i in x:
l.append(i)
return l
def title(x):
return x.title()
def literal_iter_usecase():
l = []
for i in '大处着眼,小处着手。':
l.append(i)
return l
def enumerated_iter_usecase(x):
buf = ""
scan = 0
for i, s in enumerate(x):
buf += s
scan += 1
return buf, scan
def iter_stopiteration_usecase(x):
n = len(x)
i = iter(x)
for _ in range(n + 1):
next(i)
def literal_iter_stopiteration_usecase():
s = '大处着眼,小处着手。'
i = iter(s)
n = len(s)
for _ in range(n + 1):
next(i)
def islower_usecase(x):
return x.islower()
def lower_usecase(x):
return x.lower()
def ord_usecase(x):
return ord(x)
def chr_usecase(x):
return chr(x)
class BaseTest(MemoryLeakMixin, TestCase):
def setUp(self):
super(BaseTest, self).setUp()
UNICODE_EXAMPLES = [
'',
'ascii',
'12345',
'1234567890',
'¡Y tú quién te crees?',
'🐍⚡',
'大处着眼,小处着手。',
]
UNICODE_ORDERING_EXAMPLES = [
'',
'a'
'aa',
'aaa',
'b',
'aab',
'ab',
'asc',
'ascih',
'ascii',
'ascij',
'大处着眼,小处着手',
'大处着眼,小处着手。',
'大处着眼,小处着手。🐍⚡',
]
UNICODE_COUNT_EXAMPLES = [
('', ''),
('', 'ascii'),
('ascii', ''),
('asc ii', ' '),
('ascii', 'ci'),
('ascii', 'ascii'),
('ascii', 'Ă'),
('ascii', '大处'),
('ascii', 'étú?'),
('', '大处 着眼,小处着手。大大大处'),
('大处 着眼,小处着手。大大大处', ''),
('大处 着眼,小处着手。大大大处', ' '),
('大处 着眼,小处着手。大大大处', 'ci'),
('大处 着眼,小处着手。大大大处', '大处大处'),
('大处 着眼,小处着手。大大大处', '大处 着眼,小处着手。大大大处'),
('大处 着眼,小处着手。大大大处', 'Ă'),
('大处 着眼,小处着手。大大大处', '大处'),
('大处 着眼,小处着手。大大大处', 'étú?'),
('', 'tú quién te crees?'),
('tú quién te crees?', ''),
('tú quién te crees?', ' '),
('tú quién te crees?', 'ci'),
('tú quién te crees?', 'tú quién te crees?'),
('tú quién te crees?', 'Ă'),
('tú quién te crees?', '大处'),
('tú quién te crees?', 'étú?'),
('abababab', 'a'),
('abababab', 'ab'),
('abababab', 'aba'),
('aaaaaaaaaa', 'aaa'),
('aaaaaaaaaa', 'aĂ'),
('aabbaaaabbaa', 'aa')
]
class TestUnicode(BaseTest):
def test_literal(self, flags=no_pyobj_flags):
pyfunc = literal_usecase
self.run_nullary_func(pyfunc, flags=flags)
def test_passthrough(self, flags=no_pyobj_flags):
pyfunc = passthrough_usecase
cfunc = njit(pyfunc)
for s in UNICODE_EXAMPLES:
self.assertEqual(pyfunc(s), cfunc(s))
def test_eq(self, flags=no_pyobj_flags):
pyfunc = eq_usecase
cfunc = njit(pyfunc)
for a in UNICODE_EXAMPLES:
for b in reversed(UNICODE_EXAMPLES):
self.assertEqual(pyfunc(a, b),
cfunc(a, b), '%s, %s' % (a, b))
# comparing against something that's not unicode
self.assertEqual(pyfunc(a, 1),
cfunc(a, 1), '%s, %s' % (a, 1))
self.assertEqual(pyfunc(1, b),
cfunc(1, b), '%s, %s' % (1, b))
def _check_ordering_op(self, usecase):
pyfunc = usecase
cfunc = njit(pyfunc)
# Check comparison to self
for a in UNICODE_ORDERING_EXAMPLES:
self.assertEqual(
pyfunc(a, a),
cfunc(a, a),
'%s: "%s", "%s"' % (usecase.__name__, a, a),
)
# Check comparison to adjacent
for a, b in permutations(UNICODE_ORDERING_EXAMPLES, r=2):
self.assertEqual(
pyfunc(a, b),
cfunc(a, b),
'%s: "%s", "%s"' % (usecase.__name__, a, b),
)
# and reversed
self.assertEqual(
pyfunc(b, a),
cfunc(b, a),
'%s: "%s", "%s"' % (usecase.__name__, b, a),
)
def test_lt(self, flags=no_pyobj_flags):
self._check_ordering_op(lt_usecase)
def test_le(self, flags=no_pyobj_flags):
self._check_ordering_op(le_usecase)
def test_gt(self, flags=no_pyobj_flags):
self._check_ordering_op(gt_usecase)
def test_ge(self, flags=no_pyobj_flags):
self._check_ordering_op(ge_usecase)
def test_len(self, flags=no_pyobj_flags):
pyfunc = len_usecase
cfunc = njit(pyfunc)
for s in UNICODE_EXAMPLES:
self.assertEqual(pyfunc(s), cfunc(s))
def test_bool(self, flags=no_pyobj_flags):
pyfunc = bool_usecase
cfunc = njit(pyfunc)
for s in UNICODE_EXAMPLES:
self.assertEqual(pyfunc(s), cfunc(s))
def test_expandtabs(self):
pyfunc = expandtabs_usecase
cfunc = njit(pyfunc)
cases = ['', '\t', 't\tt\t', 'a\t', '\t⚡', 'a\tbc\nab\tc',
'🐍\t⚡', '🐍⚡\n\t\t🐍\t', 'ab\rab\t\t\tab\r\n\ta']
msg = 'Results of "{}".expandtabs() must be equal'
for s in cases:
self.assertEqual(pyfunc(s), cfunc(s), msg=msg.format(s))
def test_expandtabs_with_tabsize(self):
fns = [njit(expandtabs_with_tabsize_usecase),
njit(expandtabs_with_tabsize_kwarg_usecase)]
messages = ['Results of "{}".expandtabs({}) must be equal',
'Results of "{}".expandtabs(tabsize={}) must be equal']
cases = ['', '\t', 't\tt\t', 'a\t', '\t⚡', 'a\tbc\nab\tc',
'🐍\t⚡', '🐍⚡\n\t\t🐍\t', 'ab\rab\t\t\tab\r\n\ta']
for s in cases:
for tabsize in range(-1, 10):
for fn, msg in zip(fns, messages):
self.assertEqual(fn.py_func(s, tabsize), fn(s, tabsize),
msg=msg.format(s, tabsize))
def test_expandtabs_exception_noninteger_tabsize(self):
pyfunc = expandtabs_with_tabsize_usecase
cfunc = njit(pyfunc)
accepted_types = (types.Integer, int)
with self.assertRaises(TypingError) as raises:
cfunc('\t', 2.4)
msg = '"tabsize" must be {}, not float'.format(accepted_types)
self.assertIn(msg, str(raises.exception))
def test_startswith(self, flags=no_pyobj_flags):
pyfunc = startswith_usecase
cfunc = njit(pyfunc)
for a in UNICODE_EXAMPLES:
for b in ['', 'x', a[:-2], a[3:], a, a + a]:
self.assertEqual(pyfunc(a, b),
cfunc(a, b),
'%s, %s' % (a, b))
def test_endswith(self, flags=no_pyobj_flags):
pyfunc = endswith_usecase
cfunc = njit(pyfunc)
for a in UNICODE_EXAMPLES:
for b in ['', 'x', a[:-2], a[3:], a, a + a]:
self.assertEqual(pyfunc(a, b),
cfunc(a, b),
'%s, %s' % (a, b))
def test_endswith_default(self):
pyfunc = endswith_usecase
cfunc = njit(pyfunc)
# Samples taken from CPython testing:
# https://github.com/python/cpython/blob/865c3b257fe38154a4320c7ee6afb416f665b9c2/Lib/test/string_tests.py#L1049-L1099 # noqa: E501
cpython_str = ['hello', 'helloworld', '']
cpython_subs = [
'he', 'hello', 'helloworld', 'ello',
'', 'lowo', 'lo', 'he', 'lo', 'o',
]
extra_subs = ['hellohellohello', ' ']
for s in cpython_str + UNICODE_EXAMPLES:
default_subs = ['', 'x', s[:-2], s[3:], s, s + s]
for sub_str in cpython_subs + default_subs + extra_subs:
msg = 'Results "{}".endswith("{}") must be equal'
self.assertEqual(pyfunc(s, sub_str), cfunc(s, sub_str),
msg=msg.format(s, sub_str))
def test_endswith_with_start(self):
pyfunc = endswith_with_start_only_usecase
cfunc = njit(pyfunc)
# Samples taken from CPython testing:
# https://github.com/python/cpython/blob/865c3b257fe38154a4320c7ee6afb416f665b9c2/Lib/test/string_tests.py#L1049-L1099 # noqa: E501
cpython_str = ['hello', 'helloworld', '']
cpython_subs = [
'he', 'hello', 'helloworld', 'ello',
'', 'lowo', 'lo', 'he', 'lo', 'o',
]
extra_subs = ['hellohellohello', ' ']
for s in cpython_str + UNICODE_EXAMPLES:
default_subs = ['', 'x', s[:-2], s[3:], s, s + s]
for sub_str in cpython_subs + default_subs + extra_subs:
for start in list(range(-20, 20)) + [None]:
msg = 'Results "{}".endswith("{}", {}) must be equal'
self.assertEqual(pyfunc(s, sub_str, start),
cfunc(s, sub_str, start),
msg=msg.format(s, sub_str, start))
def test_endswith_with_start_end(self):
pyfunc = endswith_with_start_end_usecase
cfunc = njit(pyfunc)
# Samples taken from CPython testing:
# https://github.com/python/cpython/blob/865c3b257fe38154a4320c7ee6afb416f665b9c2/Lib/test/string_tests.py#LL1049-L1099 # noqa: E501
cpython_str = ['hello', 'helloworld', '']
cpython_subs = [
'he', 'hello', 'helloworld', 'ello',
'', 'lowo', 'lo', 'he', 'lo', 'o',
]
extra_subs = ['hellohellohello', ' ']
for s in cpython_str + UNICODE_EXAMPLES:
default_subs = ['', 'x', s[:-2], s[3:], s, s + s]
for sub_str in cpython_subs + default_subs + extra_subs:
for start in list(range(-20, 20)) + [None]:
for end in list(range(-20, 20)) + [None]:
msg = 'Results "{}".endswith("{}", {}, {})\
must be equal'
self.assertEqual(pyfunc(s, sub_str, start, end),
cfunc(s, sub_str, start, end),
msg=msg.format(s, sub_str, start, end))
def test_endswith_tuple(self):
pyfunc = endswith_usecase
cfunc = njit(pyfunc)
# Samples taken from CPython testing:
# https://github.com/python/cpython/blob/865c3b257fe38154a4320c7ee6afb416f665b9c2/Lib/test/string_tests.py#L1049-L1099 # noqa: E501
cpython_str = ['hello', 'helloworld', '']
cpython_subs = [
'he', 'hello', 'helloworld', 'ello',
'', 'lowo', 'lo', 'he', 'lo', 'o',
]
extra_subs = ['hellohellohello', ' ']
for s in cpython_str + UNICODE_EXAMPLES:
default_subs = ['', 'x', s[:-2], s[3:], s, s + s]
for sub_str in cpython_subs + default_subs + extra_subs:
msg = 'Results "{}".endswith({}) must be equal'
tuple_subs = (sub_str, 'lo')
self.assertEqual(pyfunc(s, tuple_subs),
cfunc(s, tuple_subs),
msg=msg.format(s, tuple_subs))
def test_endswith_tuple_args(self):
pyfunc = endswith_with_start_end_usecase
cfunc = njit(pyfunc)
# Samples taken from CPython testing:
# https://github.com/python/cpython/blob/865c3b257fe38154a4320c7ee6afb416f665b9c2/Lib/test/string_tests.py#L1049-L1099 # noqa: E501
cpython_str = ['hello', 'helloworld', '']
cpython_subs = [
'he', 'hello', 'helloworld', 'ello',
'', 'lowo', 'lo', 'he', 'lo', 'o',
]
extra_subs = ['hellohellohello', ' ']
for s in cpython_str + UNICODE_EXAMPLES:
default_subs = ['', 'x', s[:-2], s[3:], s, s + s]
for sub_str in cpython_subs + default_subs + extra_subs:
for start in list(range(-20, 20)) + [None]:
for end in list(range(-20, 20)) + [None]:
msg = 'Results "{}".endswith("{}", {}, {})\
must be equal'
tuple_subs = (sub_str, 'lo')
self.assertEqual(pyfunc(s, tuple_subs, start, end),
cfunc(s, tuple_subs, start, end),
msg=msg.format(s, tuple_subs,
start, end))
def test_in(self, flags=no_pyobj_flags):
pyfunc = in_usecase
cfunc = njit(pyfunc)
for a in UNICODE_EXAMPLES:
extras = ['', 'xx', a[::-1], a[:-2], a[3:], a, a + a]
for substr in extras:
self.assertEqual(pyfunc(substr, a),
cfunc(substr, a),
"'%s' in '%s'?" % (substr, a))
def test_partition_exception_invalid_sep(self):
self.disable_leak_check()
pyfunc = partition_usecase
cfunc = njit(pyfunc)
# Handle empty separator exception
for func in [pyfunc, cfunc]:
with self.assertRaises(ValueError) as raises:
func('a', '')
self.assertIn('empty separator', str(raises.exception))
accepted_types = (types.UnicodeType, types.UnicodeCharSeq)
with self.assertRaises(TypingError) as raises:
cfunc('a', None)
msg = '"sep" must be {}, not none'.format(accepted_types)
self.assertIn(msg, str(raises.exception))
def test_partition(self):
pyfunc = partition_usecase
cfunc = njit(pyfunc)
CASES = [
('', '⚡'),
('abcabc', '⚡'),
('🐍⚡', '⚡'),
('🐍⚡🐍', '⚡'),
('abababa', 'a'),
('abababa', 'b'),
('abababa', 'c'),
('abababa', 'ab'),
('abababa', 'aba'),
]
msg = 'Results of "{}".partition("{}") must be equal'
for s, sep in CASES:
self.assertEqual(pyfunc(s, sep), cfunc(s, sep),
msg=msg.format(s, sep))
def test_find(self, flags=no_pyobj_flags):
pyfunc = find_usecase
cfunc = njit(pyfunc)
default_subs = [
(s, ['', 'xx', s[:-2], s[3:], s]) for s in UNICODE_EXAMPLES
]
# Samples taken from CPython testing:
# https://github.com/python/cpython/blob/201c8f79450628241574fba940e08107178dc3a5/Lib/test/test_unicode.py#L202-L231 # noqa: E501
cpython_subs = [
('a' * 100 + '\u0102', ['\u0102', '\u0201', '\u0120', '\u0220']),
('a' * 100 + '\U00100304', ['\U00100304', '\U00100204',
'\U00102004']),
('\u0102' * 100 + 'a', ['a']),
('\U00100304' * 100 + 'a', ['a']),
('\U00100304' * 100 + '\u0102', ['\u0102']),
('a' * 100, ['\u0102', '\U00100304', 'a\u0102', 'a\U00100304']),
('\u0102' * 100, ['\U00100304', '\u0102\U00100304']),
('\u0102' * 100 + 'a_', ['a_']),
('\U00100304' * 100 + 'a_', ['a_']),
('\U00100304' * 100 + '\u0102_', ['\u0102_']),
]
for s, subs in default_subs + cpython_subs:
for sub_str in subs:
msg = 'Results "{}".find("{}") must be equal'
self.assertEqual(pyfunc(s, sub_str), cfunc(s, sub_str),
msg=msg.format(s, sub_str))
def test_find_with_start_only(self):
pyfunc = find_with_start_only_usecase
cfunc = njit(pyfunc)
for s in UNICODE_EXAMPLES:
for sub_str in ['', 'xx', s[:-2], s[3:], s]:
for start in list(range(-20, 20)) + [None]:
msg = 'Results "{}".find("{}", {}) must be equal'
self.assertEqual(pyfunc(s, sub_str, start),
cfunc(s, sub_str, start),
msg=msg.format(s, sub_str, start))
def test_find_with_start_end(self):
pyfunc = find_with_start_end_usecase
cfunc = njit(pyfunc)
starts = ends = list(range(-20, 20)) + [None]
for s in UNICODE_EXAMPLES:
for sub_str in ['', 'xx', s[:-2], s[3:], s]:
for start, end in product(starts, ends):
msg = 'Results of "{}".find("{}", {}, {}) must be equal'
self.assertEqual(pyfunc(s, sub_str, start, end),
cfunc(s, sub_str, start, end),
msg=msg.format(s, sub_str, start, end))
def test_find_exception_noninteger_start_end(self):
pyfunc = find_with_start_end_usecase
cfunc = njit(pyfunc)
accepted = (types.Integer, types.NoneType)
for start, end, name in [(0.1, 5, 'start'), (0, 0.5, 'end')]:
with self.assertRaises(TypingError) as raises:
cfunc('ascii', 'sci', start, end)
msg = '"{}" must be {}, not float'.format(name, accepted)
self.assertIn(msg, str(raises.exception))
def test_rpartition_exception_invalid_sep(self):
self.disable_leak_check()
pyfunc = rpartition_usecase
cfunc = njit(pyfunc)
# Handle empty separator exception
for func in [pyfunc, cfunc]:
with self.assertRaises(ValueError) as raises:
func('a', '')
self.assertIn('empty separator', str(raises.exception))
accepted_types = (types.UnicodeType, types.UnicodeCharSeq)
with self.assertRaises(TypingError) as raises:
cfunc('a', None)
msg = '"sep" must be {}, not none'.format(accepted_types)
self.assertIn(msg, str(raises.exception))
def test_rpartition(self):
pyfunc = rpartition_usecase
cfunc = njit(pyfunc)
CASES = [
('', '⚡'),
('abcabc', '⚡'),
('🐍⚡', '⚡'),
('🐍⚡🐍', '⚡'),
('abababa', 'a'),
('abababa', 'b'),
('abababa', 'c'),
('abababa', 'ab'),
('abababa', 'aba'),
]
msg = 'Results of "{}".rpartition("{}") must be equal'
for s, sep in CASES:
self.assertEqual(pyfunc(s, sep), cfunc(s, sep),
msg=msg.format(s, sep))
def test_count(self):
pyfunc = count_usecase
cfunc = njit(pyfunc)
error_msg = "'{0}'.py_count('{1}') = {2}\n'{0}'.c_count('{1}') = {3}"
for s, sub in UNICODE_COUNT_EXAMPLES:
py_result = pyfunc(s, sub)
c_result = cfunc(s, sub)
self.assertEqual(py_result, c_result,
error_msg.format(s, sub, py_result, c_result))
def test_count_with_start(self):
pyfunc = count_with_start_usecase
cfunc = njit(pyfunc)
error_msg = "%s\n%s" % ("'{0}'.py_count('{1}', {2}) = {3}",
"'{0}'.c_count('{1}', {2}) = {4}")
for s, sub in UNICODE_COUNT_EXAMPLES:
for i in range(-18, 18):
py_result = pyfunc(s, sub, i)
c_result = cfunc(s, sub, i)
self.assertEqual(py_result, c_result,
error_msg.format(s, sub, i, py_result,
c_result))
py_result = pyfunc(s, sub, None)
c_result = cfunc(s, sub, None)
self.assertEqual(py_result, c_result,
error_msg.format(s, sub, None, py_result,
c_result))
def test_count_with_start_end(self):
pyfunc = count_with_start_end_usecase
cfunc = njit(pyfunc)
error_msg = "%s\n%s" % ("'{0}'.py_count('{1}', {2}, {3}) = {4}",
"'{0}'.c_count('{1}', {2}, {3}) = {5}")
for s, sub in UNICODE_COUNT_EXAMPLES:
for i, j in product(range(-18, 18), (-18, 18)):
py_result = pyfunc(s, sub, i, j)
c_result = cfunc(s, sub, i, j)
self.assertEqual(py_result, c_result,
error_msg.format(s, sub, i, j, py_result,
c_result))
for j in range(-18, 18):
py_result = pyfunc(s, sub, None, j)
c_result = cfunc(s, sub, None, j)
self.assertEqual(py_result, c_result,
error_msg.format(s, sub, None, j, py_result,
c_result))
py_result = pyfunc(s, sub, None, None)
c_result = cfunc(s, sub, None, None)
self.assertEqual(py_result, c_result,
error_msg.format(s, sub, None, None, py_result,
c_result))
def test_count_arg_type_check(self):
cfunc = njit(count_with_start_end_usecase)
with self.assertRaises(TypingError) as raises:
cfunc('ascii', 'c', 1, 0.5)
self.assertIn('The slice indices must be an Integer or None',
str(raises.exception))
with self.assertRaises(TypingError) as raises:
cfunc('ascii', 'c', 1.2, 7)
self.assertIn('The slice indices must be an Integer or None',
str(raises.exception))
with self.assertRaises(TypingError) as raises:
cfunc('ascii', 12, 1, 7)
self.assertIn('The substring must be a UnicodeType, not',
str(raises.exception))
def test_count_optional_arg_type_check(self):
pyfunc = count_with_start_end_usecase
def try_compile_bad_optional(*args):
bad_sig = types.int64(types.unicode_type,
types.unicode_type,
types.Optional(types.float64),
types.Optional(types.float64))
njit([bad_sig])(pyfunc)
with self.assertRaises(TypingError) as raises:
try_compile_bad_optional('tú quis?', 'tú', 1.1, 1.1)
self.assertIn('The slice indices must be an Integer or None',
str(raises.exception))
error_msg = "%s\n%s" % ("'{0}'.py_count('{1}', {2}, {3}) = {4}",
"'{0}'.c_count_op('{1}', {2}, {3}) = {5}")
sig_optional = types.int64(types.unicode_type,
types.unicode_type,
types.Optional(types.int64),
types.Optional(types.int64))
cfunc_optional = njit([sig_optional])(pyfunc)
py_result = pyfunc('tú quis?', 'tú', 0, 8)
c_result = cfunc_optional('tú quis?', 'tú', 0, 8)
self.assertEqual(py_result, c_result,
error_msg.format('tú quis?', 'tú', 0, 8, py_result,
c_result))
def test_rfind(self):
pyfunc = rfind_usecase
cfunc = njit(pyfunc)
default_subs = [
(s, ['', 'xx', s[:-2], s[3:], s]) for s in UNICODE_EXAMPLES
]
# Samples taken from CPython testing:
# https://github.com/python/cpython/blob/201c8f79450628241574fba940e08107178dc3a5/Lib/test/test_unicode.py#L233-L259 # noqa: E501
cpython_subs = [
('\u0102' + 'a' * 100, ['\u0102', '\u0201', '\u0120', '\u0220']),
('\U00100304' + 'a' * 100, ['\U00100304', '\U00100204',
'\U00102004']),
('abcdefghiabc', ['abc', '']),
('a' + '\u0102' * 100, ['a']),
('a' + '\U00100304' * 100, ['a']),
('\u0102' + '\U00100304' * 100, ['\u0102']),
('a' * 100, ['\u0102', '\U00100304', '\u0102a', '\U00100304a']),
('\u0102' * 100, ['\U00100304', '\U00100304\u0102']),
('_a' + '\u0102' * 100, ['_a']),
('_a' + '\U00100304' * 100, ['_a']),
('_\u0102' + '\U00100304' * 100, ['_\u0102']),
]
for s, subs in default_subs + cpython_subs:
for sub_str in subs:
msg = 'Results "{}".rfind("{}") must be equal'
self.assertEqual(pyfunc(s, sub_str), cfunc(s, sub_str),
msg=msg.format(s, sub_str))
def test_rfind_with_start_only(self):
pyfunc = rfind_with_start_only_usecase
cfunc = njit(pyfunc)
for s in UNICODE_EXAMPLES:
for sub_str in ['', 'xx', s[:-2], s[3:], s]:
for start in list(range(-20, 20)) + [None]:
msg = 'Results "{}".rfind("{}", {}) must be equal'
self.assertEqual(pyfunc(s, sub_str, start),
cfunc(s, sub_str, start),
msg=msg.format(s, sub_str, start))
def test_rfind_with_start_end(self):
pyfunc = rfind_with_start_end_usecase
cfunc = njit(pyfunc)
starts = list(range(-20, 20)) + [None]
ends = list(range(-20, 20)) + [None]
for s in UNICODE_EXAMPLES:
for sub_str in ['', 'xx', s[:-2], s[3:], s]:
for start, end in product(starts, ends):
msg = 'Results of "{}".rfind("{}", {}, {}) must be equal'
self.assertEqual(pyfunc(s, sub_str, start, end),
cfunc(s, sub_str, start, end),
msg=msg.format(s, sub_str, start, end))
def test_rfind_wrong_substr(self):
cfunc = njit(rfind_usecase)
for s in UNICODE_EXAMPLES:
for sub_str in [None, 1, False]:
with self.assertRaises(TypingError) as raises:
cfunc(s, sub_str)
msg = 'must be {}'.format(types.UnicodeType)
self.assertIn(msg, str(raises.exception))
def test_rfind_wrong_start_end(self):
cfunc = njit(rfind_with_start_end_usecase)
accepted_types = (types.Integer, types.NoneType)
for s in UNICODE_EXAMPLES:
for sub_str in ['', 'xx', s[:-2], s[3:], s]:
# test wrong start
for start, end in product([0.1, False], [-1, 1]):
with self.assertRaises(TypingError) as raises:
cfunc(s, sub_str, start, end)
msg = '"start" must be {}'.format(accepted_types)
self.assertIn(msg, str(raises.exception))
# test wrong end
for start, end in product([-1, 1], [-0.1, True]):
with self.assertRaises(TypingError) as raises:
cfunc(s, sub_str, start, end)
msg = '"end" must be {}'.format(accepted_types)
self.assertIn(msg, str(raises.exception))
def test_rfind_wrong_start_end_optional(self):
s = UNICODE_EXAMPLES[0]
sub_str = s[1:-1]
accepted_types = (types.Integer, types.NoneType)
msg = 'must be {}'.format(accepted_types)
def try_compile_wrong_start_optional(*args):
wrong_sig_optional = types.int64(types.unicode_type,
types.unicode_type,
types.Optional(types.float64),
types.Optional(types.intp))
njit([wrong_sig_optional])(rfind_with_start_end_usecase)
with self.assertRaises(TypingError) as raises:
try_compile_wrong_start_optional(s, sub_str, 0.1, 1)
self.assertIn(msg, str(raises.exception))
def try_compile_wrong_end_optional(*args):
wrong_sig_optional = types.int64(types.unicode_type,
types.unicode_type,
types.Optional(types.intp),
types.Optional(types.float64))
njit([wrong_sig_optional])(rfind_with_start_end_usecase)
with self.assertRaises(TypingError) as raises:
try_compile_wrong_end_optional(s, sub_str, 1, 0.1)
self.assertIn(msg, str(raises.exception))
def test_rindex(self):
pyfunc = rindex_usecase
cfunc = njit(pyfunc)
default_subs = [
(s, ['', s[:-2], s[3:], s]) for s in UNICODE_EXAMPLES
]
# Samples taken from CPython testing:
# https://github.com/python/cpython/blob/1d4b6ba19466aba0eb91c4ba01ba509acf18c723/Lib/test/test_unicode.py#L284-L308 # noqa: E501
cpython_subs = [
('abcdefghiabc', ['', 'def', 'abc']),
('a' + '\u0102' * 100, ['a']),
('a' + '\U00100304' * 100, ['a']),
('\u0102' + '\U00100304' * 100, ['\u0102']),
('_a' + '\u0102' * 100, ['_a']),
('_a' + '\U00100304' * 100, ['_a']),
('_\u0102' + '\U00100304' * 100, ['_\u0102'])
]
for s, subs in default_subs + cpython_subs:
for sub_str in subs:
msg = 'Results "{}".rindex("{}") must be equal'
self.assertEqual(pyfunc(s, sub_str), cfunc(s, sub_str),
msg=msg.format(s, sub_str))
def test_index(self):
pyfunc = index_usecase
cfunc = njit(pyfunc)
default_subs = [
(s, ['', s[:-2], s[3:], s]) for s in UNICODE_EXAMPLES
]
# Samples taken from CPython testing:
# https://github.com/python/cpython/blob/1d4b6ba19466aba0eb91c4ba01ba509acf18c723/Lib/test/test_unicode.py#L260-L282 # noqa: E501
cpython_subs = [
('abcdefghiabc', ['', 'def', 'abc']),
('\u0102' * 100 + 'a', ['a']),
('\U00100304' * 100 + 'a', ['a']),
('\U00100304' * 100 + '\u0102', ['\u0102']),
('\u0102' * 100 + 'a_', ['a_']),
('\U00100304' * 100 + 'a_', ['a_']),
('\U00100304' * 100 + '\u0102_', ['\u0102_'])
]
for s, subs in default_subs + cpython_subs:
for sub_str in subs:
msg = 'Results "{}".index("{}") must be equal'
self.assertEqual(pyfunc(s, sub_str), cfunc(s, sub_str),
msg=msg.format(s, sub_str))
def test_index_rindex_with_start_only(self):
pyfuncs = [index_with_start_only_usecase,
rindex_with_start_only_usecase]
messages = ['Results "{}".index("{}", {}) must be equal',
'Results "{}".rindex("{}", {}) must be equal']
unicode_examples = [
'ascii',
'12345',
'1234567890',
'¡Y tú quién te crees?',
'大处着眼,小处着手。',
]
for pyfunc, msg in zip(pyfuncs, messages):
cfunc = njit(pyfunc)
for s in unicode_examples:
l = len(s)
cases = [
('', list(range(-10, l + 1))),
(s[:-2], [0] + list(range(-10, 1 - l))),
(s[3:], list(range(4)) + list(range(-10, 4 - l))),
(s, [0] + list(range(-10, 1 - l))),
]
for sub_str, starts in cases:
for start in starts + [None]:
self.assertEqual(pyfunc(s, sub_str, start),
cfunc(s, sub_str, start),
msg=msg.format(s, sub_str, start))
def test_index_rindex_with_start_end(self):
pyfuncs = [index_with_start_end_usecase, rindex_with_start_end_usecase]
messages = ['Results of "{}".index("{}", {}, {}) must be equal',
'Results of "{}".rindex("{}", {}, {}) must be equal']
unicode_examples = [
'ascii',
'12345',
'1234567890',
'¡Y tú quién te crees?',
'大处着眼,小处着手。',
]
for pyfunc, msg in zip(pyfuncs, messages):
cfunc = njit(pyfunc)
for s in unicode_examples:
l = len(s)
cases = [
('', list(range(-10, l + 1)), list(range(l, 10))),
(s[:-2], [0] + list(range(-10, 1 - l)),
[-2, -1] + list(range(l - 2, 10))),
(s[3:], list(range(4)) + list(range(-10, -1)),
list(range(l, 10))),
(s, [0] + list(range(-10, 1 - l)), list(range(l, 10))),
]
for sub_str, starts, ends in cases:
for start, end in product(starts + [None], ends):
self.assertEqual(pyfunc(s, sub_str, start, end),
cfunc(s, sub_str, start, end),
msg=msg.format(s, sub_str, start, end))
def test_index_rindex_exception_substring_not_found(self):
self.disable_leak_check()
unicode_examples = [
'ascii',
'12345',
'1234567890',
'¡Y tú quién te crees?',
'大处着眼,小处着手。',
]
pyfuncs = [index_with_start_end_usecase, rindex_with_start_end_usecase]
for pyfunc in pyfuncs:
cfunc = njit(pyfunc)
for s in unicode_examples:
l = len(s)
cases = [
('', list(range(l + 1, 10)), [l]),
(s[:-2], [0], list(range(l - 2))),
(s[3:], list(range(4, 10)), [l]),
(s, [None], list(range(l))),
]
for sub_str, starts, ends in cases:
for start, end in product(starts, ends):
for func in [pyfunc, cfunc]:
with self.assertRaises(ValueError) as raises:
func(s, sub_str, start, end)
msg = 'substring not found'
self.assertIn(msg, str(raises.exception))
def test_index_rindex_exception_noninteger_start_end(self):
accepted = (types.Integer, types.NoneType)
pyfuncs = [index_with_start_end_usecase, rindex_with_start_end_usecase]
for pyfunc in pyfuncs:
cfunc = njit(pyfunc)
for start, end, name in [(0.1, 5, 'start'), (0, 0.5, 'end')]:
with self.assertRaises(TypingError) as raises:
cfunc('ascii', 'sci', start, end)
msg = '"{}" must be {}, not float'.format(name, accepted)
self.assertIn(msg, str(raises.exception))
def test_getitem(self):
pyfunc = getitem_usecase
cfunc = njit(pyfunc)
for s in UNICODE_EXAMPLES:
for i in range(-len(s), len(s)):
self.assertEqual(pyfunc(s, i),
cfunc(s, i),
"'%s'[%d]?" % (s, i))
def test_getitem_scalar_kind(self):
# See issue #6135, make sure that getitem returns a char of the minimal
# kind required to represent the "got" item, this is done via the use
# of `hash` in the test function as it is sensitive to kind.
pyfunc = getitem_check_kind_usecase
cfunc = njit(pyfunc)
samples = ['a\u1234', '¡着']
for s in samples:
for i in range(-len(s), len(s)):
self.assertEqual(pyfunc(s, i),
cfunc(s, i),
"'%s'[%d]?" % (s, i))
def test_getitem_error(self):
self.disable_leak_check()
pyfunc = getitem_usecase
cfunc = njit(pyfunc)
for s in UNICODE_EXAMPLES:
with self.assertRaises(IndexError) as raises:
pyfunc(s, len(s))
self.assertIn('string index out of range', str(raises.exception))
with self.assertRaises(IndexError) as raises:
cfunc(s, len(s))
self.assertIn('string index out of range', str(raises.exception))
def test_slice2(self):
pyfunc = getitem_usecase
cfunc = njit(pyfunc)
for s in UNICODE_EXAMPLES:
for i in list(range(-len(s), len(s))):
for j in list(range(-len(s), len(s))):
sl = slice(i, j)
self.assertEqual(pyfunc(s, sl),
cfunc(s, sl),
"'%s'[%d:%d]?" % (s, i, j))
def test_slice2_error(self):
pyfunc = getitem_usecase
cfunc = njit(pyfunc)
for s in UNICODE_EXAMPLES:
for i in [-2, -1, len(s), len(s) + 1]:
for j in [-2, -1, len(s), len(s) + 1]:
sl = slice(i, j)
self.assertEqual(pyfunc(s, sl),
cfunc(s, sl),
"'%s'[%d:%d]?" % (s, i, j))
def test_getitem_slice2_kind(self):
# See issue #6135. Also see note in test_getitem_scalar_kind regarding
# testing.
pyfunc = getitem_check_kind_usecase
cfunc = njit(pyfunc)
samples = ['abc\u1234\u1234', '¡¡¡着着着']
for s in samples:
for i in [-2, -1, 0, 1, 2, len(s), len(s) + 1]:
for j in [-2, -1, 0, 1, 2, len(s), len(s) + 1]:
sl = slice(i, j)
self.assertEqual(pyfunc(s, sl),
cfunc(s, sl),
"'%s'[%d:%d]?" % (s, i, j))
def test_slice3(self):
pyfunc = getitem_usecase
cfunc = njit(pyfunc)
for s in UNICODE_EXAMPLES:
for i in range(-len(s), len(s)):
for j in range(-len(s), len(s)):
for k in [-2, -1, 1, 2]:
sl = slice(i, j, k)
self.assertEqual(pyfunc(s, sl),
cfunc(s, sl),
"'%s'[%d:%d:%d]?" % (s, i, j, k))
def test_getitem_slice3_kind(self):
# See issue #6135. Also see note in test_getitem_scalar_kind regarding
# testing.
pyfunc = getitem_check_kind_usecase
cfunc = njit(pyfunc)
samples = ['abc\u1234\u1234',
'a\u1234b\u1234c'
'¡¡¡着着着',
'¡着¡着¡着',
'着a着b着c',
'¡着a¡着b¡着c',
'¡着a着¡c',]
for s in samples:
for i in range(-len(s), len(s)):
for j in range(-len(s), len(s)):
for k in [-2, -1, 1, 2]:
sl = slice(i, j, k)
self.assertEqual(pyfunc(s, sl),
cfunc(s, sl),
"'%s'[%d:%d:%d]?" % (s, i, j, k))
def test_slice3_error(self):
pyfunc = getitem_usecase
cfunc = njit(pyfunc)
for s in UNICODE_EXAMPLES:
for i in [-2, -1, len(s), len(s) + 1]:
for j in [-2, -1, len(s), len(s) + 1]:
for k in [-2, -1, 1, 2]:
sl = slice(i, j, k)
self.assertEqual(pyfunc(s, sl),
cfunc(s, sl),
"'%s'[%d:%d:%d]?" % (s, i, j, k))
def test_slice_ascii_flag(self):
"""
Make sure ascii flag is False when ascii and non-ascii characters are
mixed in output of Unicode slicing.
"""
@njit
def f(s):
return s[::2]._is_ascii, s[1::2]._is_ascii
s = "¿abc¡Y tú, quién te cre\t\tes?"
self.assertEqual(f(s), (0, 1))
def test_zfill(self):
pyfunc = zfill_usecase
cfunc = njit(pyfunc)
ZFILL_INPUTS = [
'ascii',
'+ascii',
'-ascii',
'-asc ii-',
'12345',
'-12345',
'+12345',
'',
'¡Y tú crs?',
'🐍⚡',
'+🐍⚡',
'-🐍⚡',
'大眼,小手。',
'+大眼,小手。',
'-大眼,小手。',
]
with self.assertRaises(TypingError) as raises:
cfunc(ZFILL_INPUTS[0], 1.1)
self.assertIn('<width> must be an Integer', str(raises.exception))
for s in ZFILL_INPUTS:
for width in range(-3, 20):
self.assertEqual(pyfunc(s, width),
cfunc(s, width))
def test_concat(self, flags=no_pyobj_flags):
pyfunc = concat_usecase
cfunc = njit(pyfunc)
for a in UNICODE_EXAMPLES:
for b in UNICODE_EXAMPLES[::-1]:
self.assertEqual(pyfunc(a, b),
cfunc(a, b),
"'%s' + '%s'?" % (a, b))
def test_repeat(self, flags=no_pyobj_flags):
pyfunc = repeat_usecase
cfunc = njit(pyfunc)
for a in UNICODE_EXAMPLES:
for b in (-1, 0, 1, 2, 3, 4, 5, 7, 8, 15, 70):
self.assertEqual(pyfunc(a, b),
cfunc(a, b))
self.assertEqual(pyfunc(b, a),
cfunc(b, a))
def test_repeat_exception_float(self):
self.disable_leak_check()
cfunc = njit(repeat_usecase)
with self.assertRaises(TypingError) as raises:
cfunc('hi', 2.5)
self.assertIn(_header_lead + ' Function(<built-in function mul>)',
str(raises.exception))
def test_split_exception_empty_sep(self):
self.disable_leak_check()
pyfunc = split_usecase
cfunc = njit(pyfunc)
# Handle empty separator exception
for func in [pyfunc, cfunc]:
with self.assertRaises(ValueError) as raises:
func('a', '')
self.assertIn('empty separator', str(raises.exception))
def test_split_exception_noninteger_maxsplit(self):
pyfunc = split_with_maxsplit_usecase
cfunc = njit(pyfunc)
# Handle non-integer maxsplit exception
for sep in [' ', None]:
with self.assertRaises(TypingError) as raises:
cfunc('a', sep, 2.4)
self.assertIn('float64', str(raises.exception),
'non-integer maxsplit with sep = %s' % sep)
def test_split(self):
pyfunc = split_usecase
cfunc = njit(pyfunc)
CASES = [
(' a ', None),
('', '⚡'),
('abcabc', '⚡'),
('🐍⚡', '⚡'),
('🐍⚡🐍', '⚡'),
('abababa', 'a'),
('abababa', 'b'),
('abababa', 'c'),
('abababa', 'ab'),
('abababa', 'aba'),
]
for test_str, splitter in CASES:
self.assertEqual(pyfunc(test_str, splitter),
cfunc(test_str, splitter),
"'%s'.split('%s')?" % (test_str, splitter))
def test_split_with_maxsplit(self):
CASES = [
(' a ', None, 1),
('', '⚡', 1),
('abcabc', '⚡', 1),
('🐍⚡', '⚡', 1),
('🐍⚡🐍', '⚡', 1),
('abababa', 'a', 2),
('abababa', 'b', 1),
('abababa', 'c', 2),
('abababa', 'ab', 1),
('abababa', 'aba', 5),
]
for pyfunc, fmt_str in [(split_with_maxsplit_usecase,
"'%s'.split('%s', %d)?"),
(split_with_maxsplit_kwarg_usecase,
"'%s'.split('%s', maxsplit=%d)?")]:
cfunc = njit(pyfunc)
for test_str, splitter, maxsplit in CASES:
self.assertEqual(pyfunc(test_str, splitter, maxsplit),
cfunc(test_str, splitter, maxsplit),
fmt_str % (test_str, splitter, maxsplit))
def test_split_whitespace(self):
# explicit sep=None cases covered in test_split and
# test_split_with_maxsplit
pyfunc = split_whitespace_usecase
cfunc = njit(pyfunc)
# list copied from
# https://github.com/python/cpython/blob/master/Objects/unicodetype_db.h
all_whitespace = ''.join(map(chr, [
0x0009, 0x000A, 0x000B, 0x000C, 0x000D, 0x001C, 0x001D, 0x001E,
0x001F, 0x0020, 0x0085, 0x00A0, 0x1680, 0x2000, 0x2001, 0x2002,
0x2003, 0x2004, 0x2005, 0x2006, 0x2007, 0x2008, 0x2009, 0x200A,
0x2028, 0x2029, 0x202F, 0x205F, 0x3000
]))
CASES = [
'',
'abcabc',
'🐍 ⚡',
'🐍 ⚡ 🐍',
'🐍 ⚡ 🐍 ',
' 🐍 ⚡ 🐍',
' 🐍' + all_whitespace + '⚡ 🐍 ',
]
for test_str in CASES:
self.assertEqual(pyfunc(test_str),
cfunc(test_str),
"'%s'.split()?" % (test_str,))
def test_split_exception_invalid_keepends(self):
pyfunc = splitlines_with_keepends_usecase
cfunc = njit(pyfunc)
accepted_types = (types.Integer, int, types.Boolean, bool)
for ty, keepends in (('none', None), ('unicode_type', 'None')):
with self.assertRaises(TypingError) as raises:
cfunc('\n', keepends)
msg = '"keepends" must be {}, not {}'.format(accepted_types, ty)
self.assertIn(msg, str(raises.exception))
def test_splitlines(self):
pyfunc = splitlines_usecase
cfunc = njit(pyfunc)
cases = ['', '\n', 'abc\r\rabc\r\n', '🐍⚡\v', '\f🐍⚡\f\v\v🐍\x85',
'\u2028aba\u2029baba', '\n\r\na\v\fb\x0b\x0cc\x1c\x1d\x1e']
msg = 'Results of "{}".splitlines() must be equal'
for s in cases:
self.assertEqual(pyfunc(s), cfunc(s), msg=msg.format(s))
def test_splitlines_with_keepends(self):
pyfuncs = [
splitlines_with_keepends_usecase,
splitlines_with_keepends_kwarg_usecase
]
messages = [
'Results of "{}".splitlines({}) must be equal',
'Results of "{}".splitlines(keepends={}) must be equal'
]
cases = ['', '\n', 'abc\r\rabc\r\n', '🐍⚡\v', '\f🐍⚡\f\v\v🐍\x85',
'\u2028aba\u2029baba', '\n\r\na\v\fb\x0b\x0cc\x1c\x1d\x1e']
all_keepends = [True, False, 0, 1, -1, 100]
for pyfunc, msg in zip(pyfuncs, messages):
cfunc = njit(pyfunc)
for s, keepends in product(cases, all_keepends):
self.assertEqual(pyfunc(s, keepends), cfunc(s, keepends),
msg=msg.format(s, keepends))
def test_rsplit_exception_empty_sep(self):
self.disable_leak_check()
pyfunc = rsplit_usecase
cfunc = njit(pyfunc)
# Handle empty separator exception
for func in [pyfunc, cfunc]:
with self.assertRaises(ValueError) as raises:
func('a', '')
self.assertIn('empty separator', str(raises.exception))
def test_rsplit_exception_noninteger_maxsplit(self):
pyfunc = rsplit_with_maxsplit_usecase
cfunc = njit(pyfunc)
accepted_types = (types.Integer, int)
for sep in [' ', None]:
with self.assertRaises(TypingError) as raises:
cfunc('a', sep, 2.4)
msg = '"maxsplit" must be {}, not float'.format(accepted_types)
self.assertIn(msg, str(raises.exception))
def test_rsplit(self):
pyfunc = rsplit_usecase
cfunc = njit(pyfunc)
CASES = [
(' a ', None),
('', '⚡'),
('abcabc', '⚡'),
('🐍⚡', '⚡'),
('🐍⚡🐍', '⚡'),
('abababa', 'a'),
('abababa', 'b'),
('abababa', 'c'),
('abababa', 'ab'),
('abababa', 'aba'),
]
msg = 'Results of "{}".rsplit("{}") must be equal'
for s, sep in CASES:
self.assertEqual(pyfunc(s, sep), cfunc(s, sep),
msg=msg.format(s, sep))
def test_rsplit_with_maxsplit(self):
pyfuncs = [rsplit_with_maxsplit_usecase,
rsplit_with_maxsplit_kwarg_usecase]
CASES = [
(' a ', None, 1),
('', '⚡', 1),
('abcabc', '⚡', 1),
('🐍⚡', '⚡', 1),
('🐍⚡🐍', '⚡', 1),
('abababa', 'a', 2),
('abababa', 'b', 1),
('abababa', 'c', 2),
('abababa', 'ab', 1),
('abababa', 'aba', 5),
]
messages = [
'Results of "{}".rsplit("{}", {}) must be equal',
'Results of "{}".rsplit("{}", maxsplit={}) must be equal'
]
for pyfunc, msg in zip(pyfuncs, messages):
cfunc = njit(pyfunc)
for test_str, sep, maxsplit in CASES:
self.assertEqual(pyfunc(test_str, sep, maxsplit),
cfunc(test_str, sep, maxsplit),
msg=msg.format(test_str, sep, maxsplit))
def test_rsplit_whitespace(self):
pyfunc = rsplit_whitespace_usecase
cfunc = njit(pyfunc)
# list copied from
# https://github.com/python/cpython/blob/1d4b6ba19466aba0eb91c4ba01ba509acf18c723/Objects/unicodetype_db.h#L5996-L6031 # noqa: E501
all_whitespace = ''.join(map(chr, [
0x0009, 0x000A, 0x000B, 0x000C, 0x000D, 0x001C, 0x001D, 0x001E,
0x001F, 0x0020, 0x0085, 0x00A0, 0x1680, 0x2000, 0x2001, 0x2002,
0x2003, 0x2004, 0x2005, 0x2006, 0x2007, 0x2008, 0x2009, 0x200A,
0x2028, 0x2029, 0x202F, 0x205F, 0x3000
]))
CASES = [
'',
'abcabc',
'🐍 ⚡',
'🐍 ⚡ 🐍',
'🐍 ⚡ 🐍 ',
' 🐍 ⚡ 🐍',
' 🐍' + all_whitespace + '⚡ 🐍 ',
]
msg = 'Results of "{}".rsplit() must be equal'
for s in CASES:
self.assertEqual(pyfunc(s), cfunc(s), msg.format(s))
def test_join_empty(self):
# Can't pass empty list to nopython mode, so we have to make a
# separate test case
pyfunc = join_empty_usecase
cfunc = njit(pyfunc)
CASES = [
'',
'🐍🐍🐍',
]
for sep in CASES:
self.assertEqual(pyfunc(sep),
cfunc(sep),
"'%s'.join([])?" % (sep,))
def test_join_non_string_exception(self):
# Verify that join of list of integers raises typing exception
pyfunc = join_usecase
cfunc = njit(pyfunc)
# Handle empty separator exception
with self.assertRaises(TypingError) as raises:
cfunc('', [1, 2, 3])
# This error message is obscure, but indicates the error was trapped
# in the typing of str.join()
# Feel free to change this as we update error messages.
exc_message = str(raises.exception)
self.assertIn(
"During: resolving callee type: BoundFunction",
exc_message,
)
# could be int32 or int64
self.assertIn("reflected list(int", exc_message)
def test_join(self):
pyfunc = join_usecase
cfunc = njit(pyfunc)
CASES = [
('', ['', '', '']),
('a', ['', '', '']),
('', ['a', 'bbbb', 'c']),
('🐍🐍🐍', ['⚡⚡'] * 5),
]
for sep, parts in CASES:
self.assertEqual(pyfunc(sep, parts),
cfunc(sep, parts),
"'%s'.join('%s')?" % (sep, parts))
def test_join_interleave_str(self):
# can pass a string as the parts iterable
pyfunc = join_usecase
cfunc = njit(pyfunc)
CASES = [
('abc', '123'),
('🐍🐍🐍', '⚡⚡'),
]
for sep, parts in CASES:
self.assertEqual(pyfunc(sep, parts),
cfunc(sep, parts),
"'%s'.join('%s')?" % (sep, parts))
def test_justification(self):
for pyfunc, case_name in [(center_usecase, 'center'),
(ljust_usecase, 'ljust'),
(rjust_usecase, 'rjust')]:
cfunc = njit(pyfunc)
with self.assertRaises(TypingError) as raises:
cfunc(UNICODE_EXAMPLES[0], 1.1)
self.assertIn('The width must be an Integer', str(raises.exception))
for s in UNICODE_EXAMPLES:
for width in range(-3, 20):
self.assertEqual(pyfunc(s, width),
cfunc(s, width),
"'%s'.%s(%d)?" % (s, case_name, width))
def test_justification_fillchar(self):
for pyfunc, case_name in [(center_usecase_fillchar, 'center'),
(ljust_usecase_fillchar, 'ljust'),
(rjust_usecase_fillchar, 'rjust')]:
cfunc = njit(pyfunc)
# allowed fillchar cases
for fillchar in [' ', '+', 'ú', '处']:
with self.assertRaises(TypingError) as raises:
cfunc(UNICODE_EXAMPLES[0], 1.1, fillchar)
self.assertIn('The width must be an Integer',
str(raises.exception))
for s in UNICODE_EXAMPLES:
for width in range(-3, 20):
self.assertEqual(pyfunc(s, width, fillchar),
cfunc(s, width, fillchar),
"'%s'.%s(%d, '%s')?" % (s, case_name,
width,
fillchar))
def test_justification_fillchar_exception(self):
self.disable_leak_check()
for pyfunc in [center_usecase_fillchar,
ljust_usecase_fillchar,
rjust_usecase_fillchar]:
cfunc = njit(pyfunc)
# disallowed fillchar cases
for fillchar in ['', '+0', 'quién', '处着']:
with self.assertRaises(ValueError) as raises:
cfunc(UNICODE_EXAMPLES[0], 20, fillchar)
self.assertIn('The fill character must be exactly one',
str(raises.exception))
# forbid fillchar cases with different types
for fillchar in [1, 1.1]:
with self.assertRaises(TypingError) as raises:
cfunc(UNICODE_EXAMPLES[0], 20, fillchar)
self.assertIn('The fillchar must be a UnicodeType',
str(raises.exception))
def test_inplace_concat(self, flags=no_pyobj_flags):
pyfunc = inplace_concat_usecase
cfunc = njit(pyfunc)
for a in UNICODE_EXAMPLES:
for b in UNICODE_EXAMPLES[::-1]:
self.assertEqual(pyfunc(a, b),
cfunc(a, b),
"'%s' + '%s'?" % (a, b))
def test_isidentifier(self):
def pyfunc(s):
return s.isidentifier()
cfunc = njit(pyfunc)
# https://github.com/python/cpython/blob/1d4b6ba19466aba0eb91c4ba01ba509acf18c723/Lib/test/test_unicode.py#L695-L708 # noqa: E501
cpython = ['a', 'Z', '_', 'b0', 'bc', 'b_', 'µ',
'𝔘𝔫𝔦𝔠𝔬𝔡𝔢', ' ', '[', '©', '0']
# https://github.com/python/cpython/blob/1d4b6ba19466aba0eb91c4ba01ba509acf18c723/Lib/test/test_unicode.py#L742-L749 # noqa: E501
cpython_extras = ['\uD800', '\uDFFF', '\uD800\uD800', '\uDFFF\uDFFF',
'a\uD800b\uDFFF', 'a\uDFFFb\uD800',
'a\uD800b\uDFFFa', 'a\uDFFFb\uD800a']
msg = 'Results of "{}".isidentifier() must be equal'
for s in UNICODE_EXAMPLES + [''] + cpython + cpython_extras:
self.assertEqual(pyfunc(s), cfunc(s), msg=msg.format(s))
def test_strip(self):
STRIP_CASES = [
('ass cii', 'ai'),
('ass cii', None),
('asscii', 'ai '),
('asscii ', 'ai '),
(' asscii ', 'ai '),
(' asscii ', 'asci '),
(' asscii ', 's'),
(' ', ' '),
('', ' '),
('', ''),
('', None),
(' ', None),
(' asscii ', 'ai '),
(' asscii ', ''),
(' asscii ', None),
('tú quién te crees?', 'étú? '),
(' tú quién te crees? ', 'étú? '),
(' tú qrees? ', ''),
(' tú quién te crees? ', None),
('大处 着眼,小处着手。大大大处', '大处'),
(' 大处大处 ', ''),
('\t\nabcd\t', '\ta'),
(' 大处大处 ', None),
('\t abcd \t', None),
('\n abcd \n', None),
('\r abcd \r', None),
('\x0b abcd \x0b', None),
('\x0c abcd \x0c', None),
('\u2029abcd\u205F', None),
('\u0085abcd\u2009', None)
]
# form with no parameter
for pyfunc, case_name in [(strip_usecase, 'strip'),
(lstrip_usecase, 'lstrip'),
(rstrip_usecase, 'rstrip')]:
cfunc = njit(pyfunc)
for string, chars in STRIP_CASES:
self.assertEqual(pyfunc(string),
cfunc(string),
"'%s'.%s()?" % (string, case_name))
# parametrized form
for pyfunc, case_name in [(strip_usecase_chars, 'strip'),
(lstrip_usecase_chars, 'lstrip'),
(rstrip_usecase_chars, 'rstrip')]:
cfunc = njit(pyfunc)
sig1 = types.unicode_type(types.unicode_type,
types.Optional(types.unicode_type))
cfunc_optional = njit([sig1])(pyfunc)
def try_compile_bad_optional(*args):
bad = types.unicode_type(types.unicode_type,
types.Optional(types.float64))
njit([bad])(pyfunc)
for fn in cfunc, try_compile_bad_optional:
with self.assertRaises(TypingError) as raises:
fn('tú quis?', 1.1)
self.assertIn('The arg must be a UnicodeType or None',
str(raises.exception))
for fn in cfunc, cfunc_optional:
for string, chars in STRIP_CASES:
self.assertEqual(pyfunc(string, chars),
fn(string, chars),
"'%s'.%s('%s')?" % (string, case_name,
chars))
def test_isspace(self):
def pyfunc(s):
return s.isspace()
cfunc = njit(pyfunc)
# https://github.com/python/cpython/blob/1d4b6ba19466aba0eb91c4ba01ba509acf18c723/Lib/test/test_unicode.py#L613-L621 # noqa: E501
cpython = ['\u2000', '\u200a', '\u2014', '\U00010401', '\U00010427',
'\U00010429', '\U0001044E', '\U0001F40D', '\U0001F46F']
# https://github.com/python/cpython/blob/1d4b6ba19466aba0eb91c4ba01ba509acf18c723/Lib/test/test_unicode.py#L742-L749 # noqa: E501
cpython_extras = ['\uD800', '\uDFFF', '\uD800\uD800', '\uDFFF\uDFFF',
'a\uD800b\uDFFF', 'a\uDFFFb\uD800',
'a\uD800b\uDFFFa', 'a\uDFFFb\uD800a']
msg = 'Results of "{}".isspace() must be equal'
for s in UNICODE_EXAMPLES + [''] + cpython + cpython_extras:
self.assertEqual(pyfunc(s), cfunc(s), msg=msg.format(s))
def test_istitle(self):
pyfunc = istitle_usecase
cfunc = njit(pyfunc)
error_msg = "'{0}'.py_istitle() = {1}\n'{0}'.c_istitle() = {2}"
unicode_title = [x.title() for x in UNICODE_EXAMPLES]
special = [
'',
' ',
' AA ',
' Ab ',
'1',
'A123',
'A12Bcd',
'+abA',
'12Abc',
'A12abc',
'%^Abc 5 $% Def'
'𐐁𐐩',
'𐐧𐑎',
'𐐩',
'𐑎',
'🐍 Is',
'🐍 NOT',
'👯Is',
'ῼ',
'Greek ῼitlecases ...'
]
ISTITLE_EXAMPLES = UNICODE_EXAMPLES + unicode_title + special
for s in ISTITLE_EXAMPLES:
py_result = pyfunc(s)
c_result = cfunc(s)
self.assertEqual(py_result, c_result,
error_msg.format(s, py_result, c_result))
def test_isprintable(self):
def pyfunc(s):
return s.isprintable()
cfunc = njit(pyfunc)
# https://github.com/python/cpython/blob/1d4b6ba19466aba0eb91c4ba01ba509acf18c723/Lib/test/test_unicode.py#L710-L723 # noqa: E501
cpython = ['', ' ', 'abcdefg', 'abcdefg\n', '\u0374', '\u0378',
'\ud800', '\U0001F46F', '\U000E0020']
msg = 'Results of "{}".isprintable() must be equal'
for s in UNICODE_EXAMPLES + cpython:
self.assertEqual(pyfunc(s), cfunc(s), msg=msg.format(s))
def test_pointless_slice(self, flags=no_pyobj_flags):
def pyfunc(a):
return a[:]
cfunc = njit(pyfunc)
args = ['a']
self.assertEqual(pyfunc(*args), cfunc(*args))
def test_walk_backwards(self, flags=no_pyobj_flags):
def pyfunc(a):
return a[::-1]
cfunc = njit(pyfunc)
args = ['a']
self.assertEqual(pyfunc(*args), cfunc(*args))
def test_stride_slice(self, flags=no_pyobj_flags):
def pyfunc(a):
return a[::2]
cfunc = njit(pyfunc)
args = ['a']
self.assertEqual(pyfunc(*args), cfunc(*args))
def test_basic_lt(self, flags=no_pyobj_flags):
def pyfunc(a, b):
return a < b
cfunc = njit(pyfunc)
args = ['ab', 'b']
self.assertEqual(pyfunc(*args), cfunc(*args))
def test_basic_gt(self, flags=no_pyobj_flags):
def pyfunc(a, b):
return a > b
cfunc = njit(pyfunc)
args = ['ab', 'b']
self.assertEqual(pyfunc(*args), cfunc(*args))
def test_comparison(self):
def pyfunc(option, x, y):
if option == '==':
return x == y
elif option == '!=':
return x != y
elif option == '<':
return x < y
elif option == '>':
return x > y
elif option == '<=':
return x <= y
elif option == '>=':
return x >= y
else:
return None
cfunc = njit(pyfunc)
for x, y in permutations(UNICODE_ORDERING_EXAMPLES, r=2):
for cmpop in ['==', '!=', '<', '>', '<=', '>=', '']:
args = [cmpop, x, y]
self.assertEqual(pyfunc(*args), cfunc(*args),
msg='failed on {}'.format(args))
def test_literal_concat(self):
def pyfunc(x):
abc = 'abc'
if len(x):
return abc + 'b123' + x + 'IO'
else:
return x + abc + '123' + x
cfunc = njit(pyfunc)
args = ['x']
self.assertEqual(pyfunc(*args), cfunc(*args))
args = ['']
self.assertEqual(pyfunc(*args), cfunc(*args))
def test_literal_comparison(self):
def pyfunc(option):
x = 'a123'
y = 'aa12'
if option == '==':
return x == y
elif option == '!=':
return x != y
elif option == '<':
return x < y
elif option == '>':
return x > y
elif option == '<=':
return x <= y
elif option == '>=':
return x >= y
else:
return None
cfunc = njit(pyfunc)
for cmpop in ['==', '!=', '<', '>', '<=', '>=', '']:
args = [cmpop]
self.assertEqual(pyfunc(*args), cfunc(*args),
msg='failed on {}'.format(args))
def test_literal_len(self):
def pyfunc():
return len('abc')
cfunc = njit(pyfunc)
self.assertEqual(pyfunc(), cfunc())
def test_literal_getitem(self):
def pyfunc(which):
return 'abc'[which]
cfunc = njit(pyfunc)
for a in [-1, 0, 1, slice(1, None), slice(None, -1)]:
args = [a]
self.assertEqual(pyfunc(*args), cfunc(*args),
msg='failed on {}'.format(args))
def test_literal_in(self):
def pyfunc(x):
return x in '9876zabiuh'
cfunc = njit(pyfunc)
for a in ['a', '9', '1', '', '8uha', '987']:
args = [a]
self.assertEqual(pyfunc(*args), cfunc(*args),
msg='failed on {}'.format(args))
def test_literal_xyzwith(self):
def pyfunc(x, y):
return 'abc'.startswith(x), 'cde'.endswith(y)
cfunc = njit(pyfunc)
for args in permutations('abcdefg', r=2):
self.assertEqual(pyfunc(*args), cfunc(*args),
msg='failed on {}'.format(args))
def test_literal_find(self):
def pyfunc(x):
return 'abc'.find(x), x.find('a')
cfunc = njit(pyfunc)
for a in ['ab']:
args = [a]
self.assertEqual(pyfunc(*args), cfunc(*args),
msg='failed on {}'.format(args))
def test_not(self):
def pyfunc(x):
return not x
cfunc = njit(pyfunc)
for a in UNICODE_EXAMPLES:
args = [a]
self.assertEqual(pyfunc(*args), cfunc(*args),
msg='failed on {}'.format(args))
def test_capitalize(self):
def pyfunc(x):
return x.capitalize()
cfunc = njit(pyfunc)
# Samples taken from CPython testing:
# https://github.com/python/cpython/blob/1d4b6ba19466aba0eb91c4ba01ba509acf18c723/Lib/test/test_unicode.py#L800-L815 # noqa: E501
cpython = ['\U0001044F', '\U0001044F\U0001044F', '\U00010427\U0001044F',
'\U0001044F\U00010427', 'X\U00010427x\U0001044F', 'h\u0130',
'\u1fd2\u0130', 'finnish', 'A\u0345\u03a3']
# https://github.com/python/cpython/blob/1d4b6ba19466aba0eb91c4ba01ba509acf18c723/Lib/test/test_unicode.py#L926 # noqa: E501
cpython_extras = ['\U00010000\U00100000']
msg = 'Results of "{}".capitalize() must be equal'
for s in UNICODE_EXAMPLES + [''] + cpython + cpython_extras:
self.assertEqual(pyfunc(s), cfunc(s), msg=msg.format(s))
def test_isupper(self):
def pyfunc(x):
return x.isupper()
cfunc = njit(pyfunc)
uppers = [x.upper() for x in UNICODE_EXAMPLES]
extras = ["AA12A", "aa12a", "大AA12A", "大aa12a", "AAADŽA", "A 1 1 大"]
# Samples taken from CPython testing:
# https://github.com/python/cpython/blob/1d4b6ba19466aba0eb91c4ba01ba509acf18c723/Lib/test/test_unicode.py#L585-L599 # noqa: E501
cpython = ['\u2167', '\u2177', '\U00010401', '\U00010427', '\U00010429',
'\U0001044E', '\U0001F40D', '\U0001F46F']
fourxcpy = [x * 4 for x in cpython]
for a in UNICODE_EXAMPLES + uppers + extras + cpython + fourxcpy:
args = [a]
self.assertEqual(pyfunc(*args), cfunc(*args),
msg='failed on {}'.format(args))
def test_upper(self):
def pyfunc(x):
return x.upper()
cfunc = njit(pyfunc)
for a in UNICODE_EXAMPLES:
args = [a]
self.assertEqual(pyfunc(*args), cfunc(*args),
msg='failed on {}'.format(args))
def test_casefold(self):
def pyfunc(x):
return x.casefold()
cfunc = njit(pyfunc)
# https://github.com/python/cpython/blob/1d4b6ba19466aba0eb91c4ba01ba509acf18c723/Lib/test/test_unicode.py#L774-L781 # noqa: E501
cpython = ['hello', 'hELlo', 'ß', 'fi', '\u03a3',
'A\u0345\u03a3', '\u00b5']
# https://github.com/python/cpython/blob/1d4b6ba19466aba0eb91c4ba01ba509acf18c723/Lib/test/test_unicode.py#L924 # noqa: E501
cpython_extras = ['\U00010000\U00100000']
msg = 'Results of "{}".casefold() must be equal'
for s in UNICODE_EXAMPLES + [''] + cpython + cpython_extras:
self.assertEqual(pyfunc(s), cfunc(s), msg=msg.format(s))
def test_isalpha(self):
def pyfunc(x):
return x.isalpha()
cfunc = njit(pyfunc)
# Samples taken from CPython testing:
# https://github.com/python/cpython/blob/201c8f79450628241574fba940e08107178dc3a5/Lib/test/test_unicode.py#L630-L640 # noqa: E501
cpython = ['\u1FFc', '\U00010401', '\U00010427', '\U00010429',
'\U0001044E', '\U0001F40D', '\U0001F46F']
# https://github.com/python/cpython/blob/201c8f79450628241574fba940e08107178dc3a5/Lib/test/test_unicode.py#L738-L745 # noqa: E501
extras = ['\uD800', '\uDFFF', '\uD800\uD800', '\uDFFF\uDFFF',
'a\uD800b\uDFFF', 'a\uDFFFb\uD800',
'a\uD800b\uDFFFa', 'a\uDFFFb\uD800a']
msg = 'Results of "{}".isalpha() must be equal'
for s in UNICODE_EXAMPLES + [''] + extras + cpython:
self.assertEqual(pyfunc(s), cfunc(s), msg=msg.format(s))
@unittest.skipUnless(_py37_or_later,
'isascii method requires Python 3.7 or later')
def test_isascii(self):
def pyfunc(x):
return x.isascii()
cfunc = njit(pyfunc)
# Samples taken from CPython testing:
# https://github.com/python/cpython/blob/865c3b257fe38154a4320c7ee6afb416f665b9c2/Lib/test/string_tests.py#L913-L926 # noqa: E501
cpython = ['', '\x00', '\x7f', '\x00\x7f', '\x80', '\xe9', ' ']
msg = 'Results of "{}".isascii() must be equal'
for s in UNICODE_EXAMPLES + cpython:
self.assertEqual(pyfunc(s), cfunc(s), msg=msg.format(s))
def test_title(self):
pyfunc = title
cfunc = njit(pyfunc)
# Samples taken from CPython testing:
# https://github.com/python/cpython/blob/201c8f79450628241574fba940e08107178dc3a5/Lib/test/test_unicode.py#L813-L828 # noqa: E501
cpython = ['\U0001044F', '\U0001044F\U0001044F',
'\U0001044F\U0001044F \U0001044F\U0001044F',
'\U00010427\U0001044F \U00010427\U0001044F',
'\U0001044F\U00010427 \U0001044F\U00010427',
'X\U00010427x\U0001044F X\U00010427x\U0001044F',
'fiNNISH', 'A\u03a3 \u1fa1xy', 'A\u03a3A']
msg = 'Results of "{}".title() must be equal'
for s in UNICODE_EXAMPLES + [''] + cpython:
self.assertEqual(pyfunc(s), cfunc(s), msg=msg.format(s))
def test_swapcase(self):
def pyfunc(x):
return x.swapcase()
cfunc = njit(pyfunc)
# https://github.com/python/cpython/blob/1d4b6ba19466aba0eb91c4ba01ba509acf18c723/Lib/test/test_unicode.py#L834-L858 # noqa: E501
cpython = ['\U0001044F', '\U00010427', '\U0001044F\U0001044F',
'\U00010427\U0001044F', '\U0001044F\U00010427',
'X\U00010427x\U0001044F', 'fi', '\u0130', '\u03a3',
'\u0345\u03a3', 'A\u0345\u03a3', 'A\u0345\u03a3a',
'A\u0345\u03a3', 'A\u03a3\u0345', '\u03a3\u0345 ',
'\u03a3', 'ß', '\u1fd2']
# https://github.com/python/cpython/blob/1d4b6ba19466aba0eb91c4ba01ba509acf18c723/Lib/test/test_unicode.py#L928 # noqa: E501
cpython_extras = ['\U00010000\U00100000']
msg = 'Results of "{}".swapcase() must be equal'
for s in UNICODE_EXAMPLES + [''] + cpython + cpython_extras:
self.assertEqual(pyfunc(s), cfunc(s), msg=msg.format(s))
def test_islower(self):
pyfunc = islower_usecase
cfunc = njit(pyfunc)
lowers = [x.lower() for x in UNICODE_EXAMPLES]
extras = ['AA12A', 'aa12a', '大AA12A', '大aa12a', 'AAADŽA', 'A 1 1 大']
# Samples taken from CPython testing:
# https://github.com/python/cpython/blob/201c8f79450628241574fba940e08107178dc3a5/Lib/test/test_unicode.py#L586-L600 # noqa: E501
cpython = ['\u2167', '\u2177', '\U00010401', '\U00010427',
'\U00010429', '\U0001044E', '\U0001F40D', '\U0001F46F']
cpython += [x * 4 for x in cpython]
msg = 'Results of "{}".islower() must be equal'
for s in UNICODE_EXAMPLES + lowers + [''] + extras + cpython:
self.assertEqual(pyfunc(s), cfunc(s), msg=msg.format(s))
def test_isalnum(self):
def pyfunc(x):
return x.isalnum()
cfunc = njit(pyfunc)
# Samples taken from CPython testing:
# https://github.com/python/cpython/blob/201c8f79450628241574fba940e08107178dc3a5/Lib/test/test_unicode.py#L624-L628 # noqa: E501
cpython = ['\U00010401', '\U00010427', '\U00010429', '\U0001044E',
'\U0001D7F6', '\U00011066', '\U000104A0', '\U0001F107']
# https://github.com/python/cpython/blob/201c8f79450628241574fba940e08107178dc3a5/Lib/test/test_unicode.py#L738-L745 # noqa: E501
extras = ['\uD800', '\uDFFF', '\uD800\uD800', '\uDFFF\uDFFF',
'a\uD800b\uDFFF', 'a\uDFFFb\uD800',
'a\uD800b\uDFFFa', 'a\uDFFFb\uD800a']
msg = 'Results of "{}".isalnum() must be equal'
for s in UNICODE_EXAMPLES + [''] + extras + cpython:
self.assertEqual(pyfunc(s), cfunc(s), msg=msg.format(s))
def test_lower(self):
pyfunc = lower_usecase
cfunc = njit(pyfunc)
extras = ['AA12A', 'aa12a', '大AA12A', '大aa12a', 'AAADŽA', 'A 1 1 大']
# Samples taken from CPython testing:
# https://github.com/python/cpython/blob/201c8f79450628241574fba940e08107178dc3a5/Lib/test/test_unicode.py#L748-L758 # noqa: E501
cpython = ['\U00010401', '\U00010427', '\U0001044E', '\U0001F46F',
'\U00010427\U00010427', '\U00010427\U0001044F',
'X\U00010427x\U0001044F', '\u0130']
# special cases for sigma from CPython testing:
# https://github.com/python/cpython/blob/201c8f79450628241574fba940e08107178dc3a5/Lib/test/test_unicode.py#L759-L768 # noqa: E501
sigma = ['\u03a3', '\u0345\u03a3', 'A\u0345\u03a3', 'A\u0345\u03a3a',
'\u03a3\u0345 ', '\U0008fffe', '\u2177']
extra_sigma = 'A\u03a3\u03a2'
sigma.append(extra_sigma)
msg = 'Results of "{}".lower() must be equal'
for s in UNICODE_EXAMPLES + [''] + extras + cpython + sigma:
self.assertEqual(pyfunc(s), cfunc(s), msg=msg.format(s))
def test_isnumeric(self):
def pyfunc(x):
return x.isnumeric()
cfunc = njit(pyfunc)
# https://github.com/python/cpython/blob/1d4b6ba19466aba0eb91c4ba01ba509acf18c723/Lib/test/test_unicode.py#L676-L693 # noqa: E501
cpython = ['', 'a', '0', '\u2460', '\xbc', '\u0660', '0123456789',
'0123456789a', '\U00010401', '\U00010427', '\U00010429',
'\U0001044E', '\U0001F40D', '\U0001F46F', '\U00011065',
'\U0001D7F6', '\U00011066', '\U000104A0', '\U0001F107']
# https://github.com/python/cpython/blob/1d4b6ba19466aba0eb91c4ba01ba509acf18c723/Lib/test/test_unicode.py#L742-L749 # noqa: E501
cpython_extras = ['\uD800', '\uDFFF', '\uD800\uD800', '\uDFFF\uDFFF',
'a\uD800b\uDFFF', 'a\uDFFFb\uD800', 'a\uD800b\uDFFFa',
'a\uDFFFb\uD800a']
msg = 'Results of "{}".isnumeric() must be equal'
for s in UNICODE_EXAMPLES + [''] + cpython + cpython_extras:
self.assertEqual(pyfunc(s), cfunc(s), msg=msg.format(s))
def test_isdigit(self):
def pyfunc(x):
return x.isdigit()
cfunc = njit(pyfunc)
# https://github.com/python/cpython/blob/1d4b6ba19466aba0eb91c4ba01ba509acf18c723/Lib/test/test_unicode.py#L664-L674 # noqa: E501
cpython = ['\u2460', '\xbc', '\u0660', '\U00010401', '\U00010427',
'\U00010429', '\U0001044E', '\U0001F40D', '\U0001F46F',
'\U00011065', '\U0001D7F6', '\U00011066', '\U000104A0',
'\U0001F107']
# https://github.com/python/cpython/blob/1d4b6ba19466aba0eb91c4ba01ba509acf18c723/Lib/test/test_unicode.py#L742-L749 # noqa: E501
cpython_extras = ['\uD800', '\uDFFF', '\uD800\uD800', '\uDFFF\uDFFF',
'a\uD800b\uDFFF', 'a\uDFFFb\uD800',
'a\uD800b\uDFFFa', 'a\uDFFFb\uD800a']
msg = 'Results of "{}".isdigit() must be equal'
for s in UNICODE_EXAMPLES + [''] + cpython + cpython_extras:
self.assertEqual(pyfunc(s), cfunc(s), msg=msg.format(s))
def test_isdecimal(self):
def pyfunc(x):
return x.isdecimal()
cfunc = njit(pyfunc)
# https://github.com/python/cpython/blob/1d4b6ba19466aba0eb91c4ba01ba509acf18c723/Lib/test/test_unicode.py#L646-L662 # noqa: E501
cpython = ['', 'a', '0', '\u2460', '\xbc', '\u0660', '0123456789',
'0123456789a', '\U00010401', '\U00010427', '\U00010429',
'\U0001044E', '\U0001F40D', '\U0001F46F', '\U00011065',
'\U0001F107', '\U0001D7F6', '\U00011066', '\U000104A0']
# https://github.com/python/cpython/blob/1d4b6ba19466aba0eb91c4ba01ba509acf18c723/Lib/test/test_unicode.py#L742-L749 # noqa: E501
cpython_extras = ['\uD800', '\uDFFF', '\uD800\uD800', '\uDFFF\uDFFF',
'a\uD800b\uDFFF', 'a\uDFFFb\uD800', 'a\uD800b\uDFFFa',
'a\uDFFFb\uD800a']
msg = 'Results of "{}".isdecimal() must be equal'
for s in UNICODE_EXAMPLES + [''] + cpython + cpython_extras:
self.assertEqual(pyfunc(s), cfunc(s), msg=msg.format(s))
def test_replace(self):
pyfunc = replace_usecase
cfunc = njit(pyfunc)
CASES = [
('abc', '', 'A'),
('', '⚡', 'A'),
('abcabc', '⚡', 'A'),
('🐍⚡', '⚡', 'A'),
('🐍⚡🐍', '⚡', 'A'),
('abababa', 'a', 'A'),
('abababa', 'b', 'A'),
('abababa', 'c', 'A'),
('abababa', 'ab', 'A'),
('abababa', 'aba', 'A'),
]
for test_str, old_str, new_str in CASES:
self.assertEqual(pyfunc(test_str, old_str, new_str),
cfunc(test_str, old_str, new_str),
"'%s'.replace('%s', '%s')?" %
(test_str, old_str, new_str))
def test_replace_with_count(self):
pyfunc = replace_with_count_usecase
cfunc = njit(pyfunc)
CASES = [
('abc', '', 'A'),
('', '⚡', 'A'),
('abcabc', '⚡', 'A'),
('🐍⚡', '⚡', 'A'),
('🐍⚡🐍', '⚡', 'A'),
('abababa', 'a', 'A'),
('abababa', 'b', 'A'),
('abababa', 'c', 'A'),
('abababa', 'ab', 'A'),
('abababa', 'aba', 'A'),
]
count_test = [-1, 1, 0, 5]
for test_str, old_str, new_str in CASES:
for count in count_test:
self.assertEqual(pyfunc(test_str, old_str, new_str, count),
cfunc(test_str, old_str, new_str, count),
"'%s'.replace('%s', '%s', '%s')?" %
(test_str, old_str, new_str, count))
def test_replace_unsupported(self):
def pyfunc(s, x, y, count):
return s.replace(x, y, count)
cfunc = njit(pyfunc)
with self.assertRaises(TypingError) as raises:
cfunc('ababababab', 'ba', 'qqq', 3.5)
msg = 'Unsupported parameters. The parametrs must be Integer.'
self.assertIn(msg, str(raises.exception))
with self.assertRaises(TypingError) as raises:
cfunc('ababababab', 0, 'qqq', 3)
msg = 'The object must be a UnicodeType.'
self.assertIn(msg, str(raises.exception))
with self.assertRaises(TypingError) as raises:
cfunc('ababababab', 'ba', 0, 3)
msg = 'The object must be a UnicodeType.'
self.assertIn(msg, str(raises.exception))
class TestUnicodeInTuple(BaseTest):
def test_const_unicode_in_tuple(self):
# Issue 3673
@njit
def f():
return ('aa',) < ('bb',)
self.assertEqual(f.py_func(), f())
@njit
def f():
return ('cc',) < ('bb',)
self.assertEqual(f.py_func(), f())
def test_const_unicode_in_hetero_tuple(self):
@njit
def f():
return ('aa', 1) < ('bb', 1)
self.assertEqual(f.py_func(), f())
@njit
def f():
return ('aa', 1) < ('aa', 2)
self.assertEqual(f.py_func(), f())
def test_ascii_flag_unbox(self):
@njit
def f(s):
return s._is_ascii
for s in UNICODE_EXAMPLES:
self.assertEqual(f(s), isascii(s))
def test_ascii_flag_join(self):
@njit
def f():
s1 = 'abc'
s2 = '123'
s3 = '🐍⚡'
s4 = '大处着眼,小处着手。'
return (",".join([s1, s2])._is_ascii,
"🐍⚡".join([s1, s2])._is_ascii,
",".join([s1, s3])._is_ascii,
",".join([s3, s4])._is_ascii)
self.assertEqual(f(), (1, 0, 0, 0))
def test_ascii_flag_getitem(self):
@njit
def f():
s1 = 'abc123'
s2 = '🐍⚡🐍⚡🐍⚡'
return (s1[0]._is_ascii, s1[2:]._is_ascii, s2[0]._is_ascii,
s2[2:]._is_ascii)
self.assertEqual(f(), (1, 1, 0, 0))
def test_ascii_flag_add_mul(self):
@njit
def f():
s1 = 'abc'
s2 = '123'
s3 = '🐍⚡'
s4 = '大处着眼,小处着手。'
return ((s1 + s2)._is_ascii,
(s1 + s3)._is_ascii,
(s3 + s4)._is_ascii,
(s1 * 2)._is_ascii,
(s3 * 2)._is_ascii)
self.assertEqual(f(), (1, 0, 0, 1, 0))
class TestUnicodeIteration(BaseTest):
def test_unicode_iter(self):
pyfunc = iter_usecase
cfunc = njit(pyfunc)
for a in UNICODE_EXAMPLES:
self.assertPreciseEqual(pyfunc(a), cfunc(a))
def test_unicode_literal_iter(self):
pyfunc = literal_iter_usecase
cfunc = njit(pyfunc)
self.assertPreciseEqual(pyfunc(), cfunc())
def test_unicode_enumerate_iter(self):
pyfunc = enumerated_iter_usecase
cfunc = njit(pyfunc)
for a in UNICODE_EXAMPLES:
self.assertPreciseEqual(pyfunc(a), cfunc(a))
def test_unicode_stopiteration_iter(self):
self.disable_leak_check()
pyfunc = iter_stopiteration_usecase
cfunc = njit(pyfunc)
for f in (pyfunc, cfunc):
for a in UNICODE_EXAMPLES:
with self.assertRaises(StopIteration):
f(a)
def test_unicode_literal_stopiteration_iter(self):
pyfunc = literal_iter_stopiteration_usecase
cfunc = njit(pyfunc)
for f in (pyfunc, cfunc):
with self.assertRaises(StopIteration):
f()
class TestUnicodeAuxillary(BaseTest):
def test_ord(self):
pyfunc = ord_usecase
cfunc = njit(pyfunc)
for ex in UNICODE_EXAMPLES:
for a in ex:
self.assertPreciseEqual(pyfunc(a), cfunc(a))
def test_ord_invalid(self):
self.disable_leak_check()
pyfunc = ord_usecase
cfunc = njit(pyfunc)
# wrong number of chars
for func in (pyfunc, cfunc):
for ch in ('', 'abc'):
with self.assertRaises(TypeError) as raises:
func(ch)
self.assertIn('ord() expected a character',
str(raises.exception))
# wrong type
with self.assertRaises(TypingError) as raises:
cfunc(1.23)
self.assertIn(_header_lead, str(raises.exception))
def test_chr(self):
pyfunc = chr_usecase
cfunc = njit(pyfunc)
for ex in UNICODE_EXAMPLES:
for x in ex:
a = ord(x)
self.assertPreciseEqual(pyfunc(a), cfunc(a))
# test upper/lower bounds
for a in (0x0, _MAX_UNICODE):
self.assertPreciseEqual(pyfunc(a), cfunc(a))
def test_chr_invalid(self):
pyfunc = chr_usecase
cfunc = njit(pyfunc)
# value negative/>_MAX_UNICODE
for func in (pyfunc, cfunc):
for v in (-2, _MAX_UNICODE + 1):
with self.assertRaises(ValueError) as raises:
func(v)
self.assertIn("chr() arg not in range", str(raises.exception))
# wrong type
with self.assertRaises(TypingError) as raises:
cfunc('abc')
self.assertIn(_header_lead, str(raises.exception))
def test_unicode_type_mro(self):
# see issue #5635
def bar(x):
return True
@overload(bar)
def ol_bar(x):
ok = False
if isinstance(x, types.UnicodeType):
if isinstance(x, types.Hashable):
ok = True
return lambda x: ok
@njit
def foo(strinst):
return bar(strinst)
inst = "abc"
self.assertEqual(foo.py_func(inst), foo(inst))
self.assertIn(types.Hashable, types.unicode_type.__class__.__mro__)
def test_f_strings(self):
"""test f-string support, which requires bytecode handling
"""
# requires formatting (FORMAT_VALUE) and concatenation (BUILD_STRINGS)
def impl1(a):
return f"AA_{a+3}_B"
# does not require concatenation
def impl2(a):
return f"{a+2}"
# no expression
def impl3(a):
return f"ABC_{a}"
# format spec not allowed
def impl4(a):
return f"ABC_{a:0}"
# corner case: empty string
def impl5():
return f"" # noqa: F541
self.assertEqual(impl1(3), njit(impl1)(3))
self.assertEqual(impl2(2), njit(impl2)(2))
# string input
self.assertEqual(impl3("DE"), njit(impl3)("DE"))
# check error when input type doesn't have str() implementation
with self.assertRaises(TypingError) as raises:
njit(impl3)(["A", "B"])
msg = "No implementation of function Function(<class 'str'>)"
self.assertIn(msg, str(raises.exception))
# check error when format spec provided
with self.assertRaises(UnsupportedError) as raises:
njit(impl4)(["A", "B"])
msg = "format spec in f-strings not supported yet"
self.assertIn(msg, str(raises.exception))
self.assertEqual(impl5(), njit(impl5)())
if __name__ == '__main__':
unittest.main()
| cpcloud/numba | numba/tests/test_unicode.py | Python | bsd-2-clause | 93,714 | 0.000151 |
# Copyright (c) 2015 - present Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the BSD style license found in the
# LICENSE file in the root directory of this source tree. An additional grant
# of patent rights can be found in the PATENTS file in the same directory.
import logging
import os
from . import util
from inferlib import jwlib
MODULE_NAME = __name__
MODULE_DESCRIPTION = '''Run analysis of code built with a command like:
ant [options] [target]
Analysis examples:
infer -- ant compile'''
LANG = ['java']
def gen_instance(*args):
return AntCapture(*args)
# This creates an empty argparser for the module, which provides only
# description/usage information and no arguments.
create_argparser = util.base_argparser(MODULE_DESCRIPTION, MODULE_NAME)
class AntCapture:
def __init__(self, args, cmd):
self.args = args
util.log_java_version()
logging.info(util.run_cmd_ignore_fail(['ant', '-version']))
# TODO: make the extraction of targets smarter
self.build_cmd = ['ant', '-verbose'] + cmd[1:]
def is_interesting(self, content):
return self.is_quoted(content) or content.endswith('.java')
def is_quoted(self, argument):
quote = '\''
return len(argument) > 2 and argument[0] == quote\
and argument[-1] == quote
def remove_quotes(self, argument):
if self.is_quoted(argument):
return argument[1:-1]
else:
return argument
def get_infer_commands(self, verbose_output):
javac_pattern = '[javac]'
argument_start_pattern = 'Compilation arguments'
calls = []
javac_arguments = []
collect = False
for line in verbose_output:
if javac_pattern in line:
if argument_start_pattern in line:
collect = True
if javac_arguments != []:
capture = jwlib.create_infer_command(javac_arguments)
calls.append(capture)
javac_arguments = []
if collect:
pos = line.index(javac_pattern) + len(javac_pattern)
content = line[pos:].strip()
if self.is_interesting(content):
arg = self.remove_quotes(content)
javac_arguments.append(arg)
if javac_arguments != []:
capture = jwlib.create_infer_command(javac_arguments)
calls.append(capture)
javac_arguments = []
return calls
def capture(self):
(code, verbose_out) = util.get_build_output(self.build_cmd)
if code != os.EX_OK:
return code
clean_cmd = '\'{}\' clean'.format(self.build_cmd[0])
cmds = self.get_infer_commands(verbose_out)
return util.run_compilation_commands(cmds, clean_cmd)
| blzq/infer | infer/lib/python/inferlib/capture/ant.py | Python | bsd-3-clause | 2,914 | 0 |
""" TokenManager service
.. literalinclude:: ../ConfigTemplate.cfg
:start-after: ##BEGIN TokenManager:
:end-before: ##END
:dedent: 2
:caption: TokenManager options
"""
import pprint
from DIRAC import gLogger, S_OK, S_ERROR
from DIRAC.Core.Security import Properties
from DIRAC.Core.Tornado.Server.TornadoService import TornadoService
from DIRAC.FrameworkSystem.DB.TokenDB import TokenDB
from DIRAC.ConfigurationSystem.Client.Helpers import Registry
from DIRAC.Resources.IdProvider.IdProviderFactory import IdProviderFactory
class TokenManagerHandler(TornadoService):
__maxExtraLifeFactor = 1.5
__tokenDB = None
@classmethod
def initializeHandler(cls, serviceInfoDict):
try:
cls.__tokenDB = TokenDB()
except Exception as e:
gLogger.exception(e)
return S_ERROR("Could not connect to the database %s" % repr(e))
cls.idps = IdProviderFactory()
return S_OK()
def __generateUsersTokensInfo(self, users):
"""Generate information dict about user tokens
:return: dict
"""
tokensInfo = []
credDict = self.getRemoteCredentials()
result = Registry.getDNForUsername(credDict["username"])
if not result["OK"]:
return result
for dn in result["Value"]:
result = Registry.getIDFromDN(dn)
if result["OK"]:
result = self.__tokenDB.getTokensByUserID(result["Value"])
if not result["OK"]:
gLogger.error(result["Message"])
tokensInfo += result["Value"]
return tokensInfo
def __generateUserTokensInfo(self):
"""Generate information dict about user tokens
:return: dict
"""
tokensInfo = []
credDict = self.getRemoteCredentials()
result = Registry.getDNForUsername(credDict["username"])
if not result["OK"]:
return result
for dn in result["Value"]:
result = Registry.getIDFromDN(dn)
if result["OK"]:
result = self.__tokenDB.getTokensByUserID(result["Value"])
if not result["OK"]:
gLogger.error(result["Message"])
tokensInfo += result["Value"]
return tokensInfo
def __addKnownUserTokensInfo(self, retDict):
"""Given a S_OK/S_ERR add a tokens entry with info of all the tokens a user has uploaded
:return: S_OK(dict)/S_ERROR()
"""
retDict["tokens"] = self.__generateUserTokensInfo()
return retDict
auth_getUserTokensInfo = ["authenticated"]
def export_getUserTokensInfo(self):
"""Get the info about the user tokens in the system
:return: S_OK(dict)
"""
return S_OK(self.__generateUserTokensInfo())
auth_getUsersTokensInfo = [Properties.PROXY_MANAGEMENT]
def export_getUsersTokensInfo(self, users):
"""Get the info about the user tokens in the system
:param list users: user names
:return: S_OK(dict)
"""
tokensInfo = []
for user in users:
result = Registry.getDNForUsername(user)
if not result["OK"]:
return result
for dn in result["Value"]:
uid = Registry.getIDFromDN(dn).get("Value")
if uid:
result = self.__tokenDB.getTokensByUserID(uid)
if not result["OK"]:
gLogger.error(result["Message"])
else:
for tokenDict in result["Value"]:
if tokenDict not in tokensInfo:
tokenDict["username"] = user
tokensInfo.append(tokenDict)
return S_OK(tokensInfo)
auth_uploadToken = ["authenticated"]
def export_updateToken(self, token, userID, provider, rt_expired_in=24 * 3600):
"""Request to delegate tokens to DIRAC
:param dict token: token
:param str userID: user ID
:param str provider: provider name
:param int rt_expired_in: refresh token expires time
:return: S_OK(list)/S_ERROR() -- list contain uploaded tokens info as dictionaries
"""
self.log.verbose("Update %s user token for %s:\n" % (userID, provider), pprint.pformat(token))
result = self.idps.getIdProvider(provider)
if not result["OK"]:
return result
idPObj = result["Value"]
result = self.__tokenDB.updateToken(token, userID, provider, rt_expired_in)
if not result["OK"]:
return result
for oldToken in result["Value"]:
if "refresh_token" in oldToken and oldToken["refresh_token"] != token["refresh_token"]:
self.log.verbose("Revoke old refresh token:\n", pprint.pformat(oldToken))
idPObj.revokeToken(oldToken["refresh_token"])
return self.__tokenDB.getTokensByUserID(userID)
def __checkProperties(self, requestedUserDN, requestedUserGroup):
"""Check the properties and return if they can only download limited tokens if authorized
:param str requestedUserDN: user DN
:param str requestedUserGroup: DIRAC group
:return: S_OK(bool)/S_ERROR()
"""
credDict = self.getRemoteCredentials()
if Properties.FULL_DELEGATION in credDict["properties"]:
return S_OK(False)
if Properties.LIMITED_DELEGATION in credDict["properties"]:
return S_OK(True)
if Properties.PRIVATE_LIMITED_DELEGATION in credDict["properties"]:
if credDict["DN"] != requestedUserDN:
return S_ERROR("You are not allowed to download any token")
if Properties.PRIVATE_LIMITED_DELEGATION not in Registry.getPropertiesForGroup(requestedUserGroup):
return S_ERROR("You can't download tokens for that group")
return S_OK(True)
# Not authorized!
return S_ERROR("You can't get tokens!")
def export_getToken(self, username, userGroup):
"""Get a access token for a user/group
* Properties:
* FullDelegation <- permits full delegation of tokens
* LimitedDelegation <- permits downloading only limited tokens
* PrivateLimitedDelegation <- permits downloading only limited tokens for one self
"""
userID = []
provider = Registry.getIdPForGroup(userGroup)
if not provider:
return S_ERROR("The %s group belongs to the VO that is not tied to any Identity Provider." % userGroup)
result = self.idps.getIdProvider(provider)
if not result["OK"]:
return result
idpObj = result["Value"]
result = Registry.getDNForUsername(username)
if not result["OK"]:
return result
err = []
for dn in result["Value"]:
result = Registry.getIDFromDN(dn)
if result["OK"]:
result = self.__tokenDB.getTokenForUserProvider(result["Value"], provider)
if result["OK"] and result["Value"]:
idpObj.token = result["Value"]
result = self.__checkProperties(dn, userGroup)
if result["OK"]:
result = idpObj.exchangeGroup(userGroup)
if result["OK"]:
return result
err.append(result.get("Message", "No token found for %s." % dn))
return S_ERROR("; ".join(err or ["No user ID found for %s" % username]))
def export_deleteToken(self, userDN):
"""Delete a token from the DB
:param str userDN: user DN
:return: S_OK()/S_ERROR()
"""
credDict = self.getRemoteCredentials()
if Properties.PROXY_MANAGEMENT not in credDict["properties"]:
if userDN != credDict["DN"]:
return S_ERROR("You aren't allowed!")
result = Registry.getIDFromDN(userDN)
return self.__tokenDB.removeToken(user_id=result["Value"]) if result["OK"] else result
| ic-hep/DIRAC | src/DIRAC/FrameworkSystem/Service/TokenManagerHandler.py | Python | gpl-3.0 | 8,162 | 0.001715 |
p = dict(
subject = 'EG009',
#Fixation size (in degrees):
fixation_size = 0.4,
monitor='testMonitor',
scanner=True,
screen_number = 1,
full_screen = True,
radial_cyc = 10,
angular_cyc = 15,
angular_width=30,
size = 60, #This just needs to be larger than the screen
temporal_freq = 2,
sf = 10,
n_blocks = 20, #20 blocks = 200 sec = 3:20 minutes
block_duration=10,
color_dur = 1/3. # 2 Hz
)
| arokem/bowties | params.py | Python | apache-2.0 | 464 | 0.064655 |
import os
import sys
from numpy import *
from scipy.integrate import ode
from scipy.interpolate import griddata
from mesh import *
from navierstokes import NavierStokes
nE = 5000
dt = 0.0005
nsteps = 2000
Mach = 0.3
Re = 10000
HiRes = 1.
z = load('data/navierstokesInit.npz')
geom, v, t, b, soln = z['geom'], z['v'], z['t'], z['b'], z['soln']
solver = NavierStokes(v, t, b, Mach, Re, HiRes)
solver.integrate(1E-8, soln[-1])
for istep, T in enumerate(arange(1,nsteps+1) * dt):
solver.integrate(T)
sys.stdout.write('t = {0}\n'.format(solver.time)); sys.stdout.flush()
fname = 'data/navierstokesStep{0:06d}.npz'.format(istep)
savez(fname, geom=array(geom), v=v, t=t, b=b, soln=solver.soln)
| gomezstevena/x-wind | src/navierstokesRun.py | Python | gpl-3.0 | 709 | 0.004231 |
# Copyright (c) 2015 Ultimaker B.V.
# Uranium is released under the terms of the AGPLv3 or higher.
from UM.Mesh.MeshWriter import MeshWriter
from UM.Math.Vector import Vector
from UM.Logger import Logger
from UM.Math.Matrix import Matrix
from UM.Application import Application
import UM.Scene.SceneNode
import Savitar
import numpy
MYPY = False
try:
if not MYPY:
import xml.etree.cElementTree as ET
except ImportError:
Logger.log("w", "Unable to load cElementTree, switching to slower version")
import xml.etree.ElementTree as ET
import zipfile
import UM.Application
class ThreeMFWriter(MeshWriter):
def __init__(self):
super().__init__()
self._namespaces = {
"3mf": "http://schemas.microsoft.com/3dmanufacturing/core/2015/02",
"content-types": "http://schemas.openxmlformats.org/package/2006/content-types",
"relationships": "http://schemas.openxmlformats.org/package/2006/relationships",
"cura": "http://software.ultimaker.com/xml/cura/3mf/2015/10"
}
self._unit_matrix_string = self._convertMatrixToString(Matrix())
self._archive = None
self._store_archive = False
def _convertMatrixToString(self, matrix):
result = ""
result += str(matrix._data[0, 0]) + " "
result += str(matrix._data[1, 0]) + " "
result += str(matrix._data[2, 0]) + " "
result += str(matrix._data[0, 1]) + " "
result += str(matrix._data[1, 1]) + " "
result += str(matrix._data[2, 1]) + " "
result += str(matrix._data[0, 2]) + " "
result += str(matrix._data[1, 2]) + " "
result += str(matrix._data[2, 2]) + " "
result += str(matrix._data[0, 3]) + " "
result += str(matrix._data[1, 3]) + " "
result += str(matrix._data[2, 3])
return result
## Should we store the archive
# Note that if this is true, the archive will not be closed.
# The object that set this parameter is then responsible for closing it correctly!
def setStoreArchive(self, store_archive):
self._store_archive = store_archive
## Convenience function that converts an Uranium SceneNode object to a SavitarSceneNode
# \returns Uranium Scenen node.
def _convertUMNodeToSavitarNode(self, um_node, transformation = Matrix()):
if type(um_node) is not UM.Scene.SceneNode.SceneNode:
return None
savitar_node = Savitar.SceneNode()
node_matrix = um_node.getLocalTransformation()
matrix_string = self._convertMatrixToString(node_matrix.preMultiply(transformation))
savitar_node.setTransformation(matrix_string)
mesh_data = um_node.getMeshData()
if mesh_data is not None:
savitar_node.getMeshData().setVerticesFromBytes(mesh_data.getVerticesAsByteArray())
indices_array = mesh_data.getIndicesAsByteArray()
if indices_array is not None:
savitar_node.getMeshData().setFacesFromBytes(indices_array)
else:
savitar_node.getMeshData().setFacesFromBytes(numpy.arange(mesh_data.getVertices().size / 3, dtype=numpy.int32).tostring())
# Handle per object settings (if any)
stack = um_node.callDecoration("getStack")
if stack is not None:
changed_setting_keys = set(stack.getTop().getAllKeys())
# Ensure that we save the extruder used for this object.
if stack.getProperty("machine_extruder_count", "value") > 1:
changed_setting_keys.add("extruder_nr")
# Get values for all changed settings & save them.
for key in changed_setting_keys:
savitar_node.setSetting(key, str(stack.getProperty(key, "value")))
for child_node in um_node.getChildren():
savitar_child_node = self._convertUMNodeToSavitarNode(child_node)
if savitar_child_node is not None:
savitar_node.addChild(savitar_child_node)
return savitar_node
def getArchive(self):
return self._archive
def write(self, stream, nodes, mode = MeshWriter.OutputMode.BinaryMode):
self._archive = None # Reset archive
archive = zipfile.ZipFile(stream, "w", compression = zipfile.ZIP_DEFLATED)
try:
model_file = zipfile.ZipInfo("3D/3dmodel.model")
# Because zipfile is stupid and ignores archive-level compression settings when writing with ZipInfo.
model_file.compress_type = zipfile.ZIP_DEFLATED
# Create content types file
content_types_file = zipfile.ZipInfo("[Content_Types].xml")
content_types_file.compress_type = zipfile.ZIP_DEFLATED
content_types = ET.Element("Types", xmlns = self._namespaces["content-types"])
rels_type = ET.SubElement(content_types, "Default", Extension = "rels", ContentType = "application/vnd.openxmlformats-package.relationships+xml")
model_type = ET.SubElement(content_types, "Default", Extension = "model", ContentType = "application/vnd.ms-package.3dmanufacturing-3dmodel+xml")
# Create _rels/.rels file
relations_file = zipfile.ZipInfo("_rels/.rels")
relations_file.compress_type = zipfile.ZIP_DEFLATED
relations_element = ET.Element("Relationships", xmlns = self._namespaces["relationships"])
model_relation_element = ET.SubElement(relations_element, "Relationship", Target = "/3D/3dmodel.model", Id = "rel0", Type = "http://schemas.microsoft.com/3dmanufacturing/2013/01/3dmodel")
savitar_scene = Savitar.Scene()
transformation_matrix = Matrix()
transformation_matrix._data[1, 1] = 0
transformation_matrix._data[1, 2] = -1
transformation_matrix._data[2, 1] = 1
transformation_matrix._data[2, 2] = 0
global_container_stack = Application.getInstance().getGlobalContainerStack()
# Second step: 3MF defines the left corner of the machine as center, whereas cura uses the center of the
# build volume.
if global_container_stack:
translation_vector = Vector(x=global_container_stack.getProperty("machine_width", "value") / 2,
y=global_container_stack.getProperty("machine_depth", "value") / 2,
z=0)
translation_matrix = Matrix()
translation_matrix.setByTranslation(translation_vector)
transformation_matrix.preMultiply(translation_matrix)
root_node = UM.Application.Application.getInstance().getController().getScene().getRoot()
for node in nodes:
if node == root_node:
for root_child in node.getChildren():
savitar_node = self._convertUMNodeToSavitarNode(root_child, transformation_matrix)
if savitar_node:
savitar_scene.addSceneNode(savitar_node)
else:
savitar_node = self._convertUMNodeToSavitarNode(node, transformation_matrix)
if savitar_node:
savitar_scene.addSceneNode(savitar_node)
parser = Savitar.ThreeMFParser()
scene_string = parser.sceneToString(savitar_scene)
archive.writestr(model_file, scene_string)
archive.writestr(content_types_file, b'<?xml version="1.0" encoding="UTF-8"?> \n' + ET.tostring(content_types))
archive.writestr(relations_file, b'<?xml version="1.0" encoding="UTF-8"?> \n' + ET.tostring(relations_element))
except Exception as e:
Logger.logException("e", "Error writing zip file")
return False
finally:
if not self._store_archive:
archive.close()
else:
self._archive = archive
return True
| Curahelper/Cura | plugins/3MFWriter/ThreeMFWriter.py | Python | agpl-3.0 | 7,998 | 0.006627 |
import json
class SettingsEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, type):
return obj.__name__
if callable(obj):
return obj.__name__
return json.JSONEncoder.default(self, obj)
| dhermyt/WONS | configuration/Encoder.py | Python | bsd-2-clause | 258 | 0 |
########
# Copyright (c) 2016 GigaSpaces Technologies Ltd. All rights reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
############
import sh
import yaml
from clue import tests
git = sh.git
class TestGit(tests.BaseTest):
def test_clone_location(self):
repo_base = 'custom'
repo_dir = self._install(
repo_base=repo_base,
properties={'location': str(self.repos_dir / 'custom')})
with repo_dir:
self.assertIn('master', git.status())
def test_clone_branch(self):
repo_dir = self._install(properties={'branch': '3.3.1-build'})
with repo_dir:
self.assertIn('3.3.1-build', git.status())
def test_clone_organization(self):
repo_dir = self._install(
repo='claw-scripts',
properties={'organization': 'dankilman'})
with repo_dir:
origin = git.config('remote.origin.url').stdout.strip()
if self.default_clone_method == 'ssh':
prefix = '[email protected]:'
else:
prefix = 'https://github.com/'
self.assertEqual(origin, '{}dankilman/claw-scripts.git'.format(prefix))
def test_clone_method_https(self):
self._test_clone_method(clone_method='https')
# sort of problematic testing ssh clone method
# def test_clone_method_ssh(self):
# self._test_clone_method(clone_method='ssh')
def _test_clone_method(self, clone_method):
repo_dir = self._install(clone_method=clone_method)
with repo_dir:
origin = git.config('remote.origin.url').stdout.strip()
if clone_method == 'ssh':
prefix = '[email protected]:'
elif clone_method == 'https':
prefix = 'https://github.com/'
else:
self.fail(clone_method)
self.assertEqual(origin, '{}cloudify-cosmo/cloudify-rest-client.git'
.format(prefix))
def test_configure(self):
name = 'John Doe'
email = '[email protected]'
repo_dir = self._install(git_config={
'user.name': name,
'user.email': email
})
with repo_dir:
# Test commit message hook
jira = 'CFY-10000'
branch = '{}-hello-world'.format(jira)
commit_message = 'my commit message'
git.checkout('-b', branch)
(repo_dir / 'tox.ini').write_text('testing 123')
git.commit('-am', commit_message)
self.assertEqual(self._log_message(),
'{} {}'.format(jira, commit_message))
# Test git config
self.assertEqual(name, git.config('user.name').stdout.strip())
self.assertEqual(email, git.config('user.email').stdout.strip())
def test_pull(self):
repo_dir = self._install()
with repo_dir:
initial_status = git.status().stdout.strip()
git.reset('HEAD~')
self.assertNotEqual(initial_status, git.status().stdout.strip())
git.reset(hard=True)
self.clue.git.pull()
with repo_dir:
self.assertEqual(initial_status, git.status().stdout.strip())
def test_status(self):
core_repo_dir, _, _ = self._install_repo_types_with_branches()
output = self.clue.git.status().stdout.strip()
self.assertRegexpMatches(output,
r'.*cloudify-rest-client.*\| .*master')
self.assertIn('cloudify-script-plugin', output)
self.assertIn('flask-securest', output)
self.clue.git.checkout('.3.1-build')
with core_repo_dir:
git.reset('HEAD~')
output = self.clue.git.status().stdout.strip()
self.assertRegexpMatches(output,
r'.*cloudify-rest-client.*\| .*3.3.1-build')
self.assertRegexpMatches(output,
r'.*cloudify-rest-client.*\| .*'
r'M.*cloudify_rest_client/client.py')
# test active
with core_repo_dir:
git.reset('--hard', 'HEAD')
self.clue.feature.checkout('test')
output = self.clue.git.status(active=True).stdout.strip()
self.assertIn('cloudify-rest-client', output)
self.assertNotIn('flask-securest', output)
self.assertNotIn('cloudify-script-plugin', output)
def test_checkout(self):
(core_repo_dir,
plugin_repo_dir,
misc_repo_dir) = self._install_repo_types()
test_branches = {
'repos': {
'cloudify-rest-client': '3.3.1-build'
}
}
test_branches2 = {
'branch': '3.3.1-build',
'repos': ['cloudify-rest-client']
}
features_file = self.workdir / 'features.yaml'
features_file.write_text(yaml.safe_dump({
'test': test_branches,
'test2': test_branches2
}))
def assert_master():
for repo in [core_repo_dir, plugin_repo_dir, misc_repo_dir]:
with repo:
self.assertIn('master', git.status())
def assert_custom():
for repo, expected in [(core_repo_dir, '3.3.1-build'),
(plugin_repo_dir, '1.3.1-build'),
(misc_repo_dir, 'master')]:
with repo:
self.assertIn(expected, git.status())
def assert_features_file():
for repo, expected in [(core_repo_dir, '3.3.1-build'),
(plugin_repo_dir, 'master'),
(misc_repo_dir, 'master')]:
with repo:
self.assertIn(expected, git.status())
assert_master()
self.clue.git.checkout('.3.1-build')
assert_custom()
self.clue.git.checkout('master')
assert_master()
self.clue.feature.checkout('test')
assert_features_file()
self.clue.git.checkout('.3.1-build')
assert_custom()
self.clue.git.checkout('master')
assert_master()
self.clue.feature.checkout('test2')
assert_features_file()
self.clue.git.checkout('master')
assert_master()
with misc_repo_dir:
git.checkout('0.6')
self.assertIn('0.6', git.status())
self.clue.git.checkout('default')
assert_master()
def test_rebase(self):
branch = '3.2.1-build'
base = branch
core_repo_dir, _, _ = self._install_repo_types_with_branches(
branch=branch,
base=base)
# rebase with no "active" feature should not do anything
output = self.clue.git.rebase().stdout.strip()
self.assertEqual(len(output), 0)
# only "active" feature repos should be affected
self.clue.feature.checkout('test')
output = self.clue.git.rebase().stdout.strip()
self.assertEqual(len(output.split('\n')), 1)
self.assertIn('cloudify-rest-client', output)
self.assertIn('Current branch 3.2.1-build is up to date.', output)
# test repo type consideration (.2.1-build for core type should
# transform to 3.2.1-build)
output = self.clue.git.rebase().stdout.strip()
self.assertEqual(len(output.split('\n')), 1)
self.assertIn('cloudify-rest-client', output)
self.assertIn('Current branch 3.2.1-build is up to date.', output)
# being on a different branch then the one from the active state
# should result in a warning, and no state change
with core_repo_dir:
git.checkout('3.3')
output = self.clue.git.rebase().stdout.strip()
self.assertEqual(len(output.split('\n')), 1)
self.assertIn('cloudify-rest-client', output)
self.assertIn('does not match the active feature branch', output)
# Unclean re-bases should be aborted
self._update_features_yaml(branch=branch, base='master')
self.clue.feature.checkout('test')
output = self.clue.git.rebase().stdout.strip()
self.assertIn('Failed rebase, aborting', output)
self.assertFalse((core_repo_dir / '.git' / 'rebase-apply').isdir())
# Test default master base
self._update_features_yaml(branch='master')
self.clue.feature.checkout('test')
output = self.clue.git.rebase().stdout.strip()
self.assertEqual(len(output.split('\n')), 1)
self.assertIn('cloudify-rest-client', output)
self.assertIn('Current branch master is up to date.', output)
def test_squash(self):
branch = 'test_branch'
core_repo_dir, _, _ = self._install_repo_types_with_branches(
branch=branch)
# squash with no "active" feature should not do anything
output = self.clue.git.squash().stdout.strip()
self.assertEqual(len(output), 0)
with core_repo_dir:
git.checkout.bake(b=True)(branch)
temp_file = core_repo_dir / 'temp'
temp_file.write_text('temp')
commit_message = 'real_commit'
git.add(temp_file)
git.commit('-m', commit_message)
initial_sha = self._current_sha()
self._make_modifications(core_repo_dir)
after_commits_sha = self._current_sha()
self.assertNotEqual(initial_sha, after_commits_sha)
# squash command requires active feature
self.clue.feature.checkout('test')
# Test squash when there is more than 1 commit
self.clue.git.squash()
after_squash_sha = self._current_sha()
with core_repo_dir:
self.assertNotEqual(after_commits_sha, self._current_sha())
current_commit_message = self._log_message(after_squash_sha)
self.assertEqual(commit_message, current_commit_message)
# Test squash with no expected change (previous changes were squashed
# into 1 commit)
self.clue.git.squash()
self.assertEqual(after_squash_sha, self._current_sha())
# test .3.1-build => 3.3.1-build transformation by examining
# error message of illegal squash
self._update_features_yaml(branch=branch, base='.3.1-build')
self.clue.feature.checkout('test')
with self.assertRaises(sh.ErrorReturnCode) as c:
self.clue.git.squash()
self.assertIn('3.3.1-build', c.exception.stdout)
def test_reset(self):
core_repo_dir, _, _ = self._install_repo_types_with_branches()
# reset with no "active" feature should not do anything
output = self.clue.git.reset().stdout.strip()
self.assertEqual(len(output), 0)
# reset command requires active feature
self.clue.feature.checkout('test')
def test_reset(hard=False, origin=None):
with core_repo_dir:
initial_sha = self._current_sha()
self._make_modifications(repo_dir=core_repo_dir,
skip_last_commit=hard)
self.assertNotEqual(initial_sha, self._current_sha())
command = self.clue.git.reset.bake(hard=hard)
if origin:
command.bake(origin=origin)
output = command().stdout.strip()
if hard:
self.assertNotIn('Unstaged changes', output)
with core_repo_dir:
self.assertEqual(initial_sha, self._current_sha())
test_reset(hard=False)
test_reset(hard=True)
test_reset(origin='origin')
def test_diff(self):
core, _, _ = self._install_repo_types_with_branches()
self.clue.git.checkout('.3.1-build')
self.clue.git.checkout('.3-build')
revision_range = '.3-build...3.1-build'
# test revision range
output = self.clue.git.diff(r=revision_range).stdout.strip()
self.assertIn('cloudify-rest-client', output)
self.assertIn('cloudify-script-plugin', output)
self.assertNotIn('flask-securest', output)
self.assertNotIn('ERROR', output)
self.clue.feature.checkout('test')
# test active
output = self.clue.git.diff(r=revision_range, a=True).stdout.strip()
self.assertIn('cloudify-rest-client', output)
self.assertNotIn('cloudify-script-plugin', output)
self.assertNotIn('flask-securest', output)
self.assertNotIn('ERROR', output)
with core:
git.reset('HEAD~')
# test plain
output = self.clue.git.diff(a=True).stdout.strip()
self.assertIn('cloudify-rest-client', output)
self.assertNotIn('ERROR', output)
output = self.clue.git.diff(a=True, c=True).stdout.strip()
self.assertEqual(0, len(output))
# test cached
with core:
git.add('.')
output = self.clue.git.diff(a=True, c=True).stdout.strip()
self.assertIn('cloudify-rest-client', output)
self.assertNotIn('ERROR', output)
output = self.clue.git.diff(a=True).stdout.strip()
self.assertEqual(0, len(output))
def _install(self, repo=None, repo_base=None, properties=None,
git_config=None, clone_method=None):
properties = properties or {}
repo = repo or 'cloudify-rest-client'
if repo_base:
repo_dir = self.repos_dir / repo_base / repo
else:
repo_dir = self.repos_dir / repo
repos = {
repo: {'python': False, 'properties': properties, 'type': 'core'}
}
self.clue_install(repos=repos, git_config=git_config,
clone_method=clone_method)
return repo_dir
def _install_repo_types_with_branches(self, branch='3.2.1-build',
base=None):
git_config = {
'user.name': 'John Doe',
'user.email': '[email protected]'
}
core, plugin, misc = self._install_repo_types(git_config=git_config)
self._update_features_yaml(branch, base)
return core, plugin, misc
def _update_features_yaml(self, branch, base=None):
test_branches = {
'repos': {
'cloudify-rest-client': branch
}
}
if base:
test_branches['base'] = base
features_file = self.workdir / 'features.yaml'
features_file.write_text(yaml.safe_dump({
'test': test_branches,
}))
def _install_repo_types(self, git_config=None):
core_repo = 'cloudify-rest-client'
core_repo_dir = self.repos_dir / core_repo
plugin_repo = 'cloudify-script-plugin'
plugin_repo_dir = self.repos_dir / plugin_repo
misc_repo = 'flask-securest'
misc_repo_dir = self.repos_dir / misc_repo
repos = {
core_repo: {'type': 'core', 'python': False},
plugin_repo: {'type': 'plugin', 'python': False},
misc_repo: {'python': False}
}
self.clue_install(repos=repos, git_config=git_config)
return core_repo_dir, plugin_repo_dir, misc_repo_dir
def _current_sha(self):
return git('rev-parse', 'HEAD').stdout.strip()
def _make_modifications(self, repo_dir, count=3, skip_last_commit=False):
with repo_dir:
tox_ini = repo_dir / 'tox.ini'
for i in range(count):
tox_ini_text = tox_ini.text()
tox_ini_text = '{}\n{}'.format(tox_ini_text, i)
tox_ini.write_text(tox_ini_text)
if i == count - 1 and skip_last_commit:
continue
git.commit('-am', str(i))
def _log_message(self, sha=None):
if not sha:
sha = self._current_sha()
return git.bake(no_pager=True).log(sha, n=1,
format='%B').stdout.strip()
| dankilman/clue | clue/tests/test_git.py | Python | apache-2.0 | 16,458 | 0 |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.