repo_name
stringlengths 7
94
| repo_path
stringlengths 4
237
| repo_head_hexsha
stringlengths 40
40
| content
stringlengths 10
680k
| apis
stringlengths 2
840k
|
---|---|---|---|---|
zsolt-beringer/osm-gimmisn | tests/test_webframe.py | b0cbf2e88c1846ef49e33fd32aeb6b4ecabea4c0 | #!/usr/bin/env python3
#
# Copyright (c) 2019 Miklos Vajna and contributors.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""The test_webframe module covers the webframe module."""
from typing import List
from typing import TYPE_CHECKING
from typing import Tuple
from typing import cast
import configparser
import datetime
import os
import unittest
import unittest.mock
import time
# pylint: disable=unused-import
import yattag
import webframe
if TYPE_CHECKING:
# pylint: disable=no-name-in-module,import-error,unused-import
from wsgiref.types import StartResponse # noqa: F401
class TestHandleStatic(unittest.TestCase):
"""Tests handle_static()."""
def test_happy(self) -> None:
"""Tests the happy path: css case."""
content, content_type = webframe.handle_static("/osm/static/osm.css")
self.assertTrue(len(content))
self.assertEqual(content_type, "text/css")
def test_javascript(self) -> None:
"""Tests the javascript case."""
content, content_type = webframe.handle_static("/osm/static/sorttable.js")
self.assertTrue(len(content))
self.assertEqual(content_type, "application/x-javascript")
def test_else(self) -> None:
"""Tests the case when the content type is not recognized."""
content, content_type = webframe.handle_static("/osm/static/test.xyz")
self.assertFalse(len(content))
self.assertFalse(len(content_type))
class TestHandleException(unittest.TestCase):
"""Tests handle_exception()."""
def test_happy(self) -> None:
"""Tests the happy path."""
environ = {
"PATH_INFO": "/"
}
def start_response(status: str, response_headers: List[Tuple[str, str]]) -> None:
self.assertTrue(status.startswith("500"))
header_dict = dict(response_headers)
self.assertEqual(header_dict["Content-type"], "text/html; charset=utf-8")
try:
int("a")
# pylint: disable=broad-except
except Exception:
callback = cast('StartResponse', start_response)
output_iterable = webframe.handle_exception(environ, callback)
output_list = cast(List[bytes], output_iterable)
self.assertTrue(output_list)
output = output_list[0].decode('utf-8')
self.assertIn("ValueError", output)
return
self.fail()
class TestLocalToUiTz(unittest.TestCase):
"""Tests local_to_ui_tz()."""
def test_happy(self) -> None:
"""Tests the happy path."""
def get_abspath(path: str) -> str:
if os.path.isabs(path):
return path
return os.path.join(os.path.dirname(__file__), path)
def get_config() -> configparser.ConfigParser:
config = configparser.ConfigParser()
config.read_dict({"wsgi": {"timezone": "Europe/Budapest"}})
return config
with unittest.mock.patch('util.get_abspath', get_abspath):
with unittest.mock.patch('webframe.get_config', get_config):
local_dt = datetime.datetime.fromtimestamp(0)
ui_dt = webframe.local_to_ui_tz(local_dt)
if time.strftime('%Z%z') == "CET+0100":
self.assertEqual(ui_dt.timestamp(), 0)
class TestFillMissingHeaderItems(unittest.TestCase):
"""Tests fill_missing_header_items()."""
def test_happy(self) -> None:
"""Tests the happy path."""
streets = "no"
relation_name = "gazdagret"
items: List[yattag.doc.Doc] = []
webframe.fill_missing_header_items(streets, relation_name, items)
html = items[0].getvalue()
self.assertIn("Missing house numbers", html)
self.assertNotIn("Missing streets", html)
if __name__ == '__main__':
unittest.main()
| [((114, 4, 114, 19), 'unittest.main', 'unittest.main', ({}, {}), '()', False, 'import unittest\n'), ((34, 32, 34, 77), 'webframe.handle_static', 'webframe.handle_static', ({(34, 55, 34, 76): '"""/osm/static/osm.css"""'}, {}), "('/osm/static/osm.css')", False, 'import webframe\n'), ((40, 32, 40, 82), 'webframe.handle_static', 'webframe.handle_static', ({(40, 55, 40, 81): '"""/osm/static/sorttable.js"""'}, {}), "('/osm/static/sorttable.js')", False, 'import webframe\n'), ((46, 32, 46, 78), 'webframe.handle_static', 'webframe.handle_static', ({(46, 55, 46, 77): '"""/osm/static/test.xyz"""'}, {}), "('/osm/static/test.xyz')", False, 'import webframe\n'), ((107, 8, 107, 73), 'webframe.fill_missing_header_items', 'webframe.fill_missing_header_items', ({(107, 43, 107, 50): 'streets', (107, 52, 107, 65): 'relation_name', (107, 67, 107, 72): 'items'}, {}), '(streets, relation_name, items)', False, 'import webframe\n'), ((83, 15, 83, 34), 'os.path.isabs', 'os.path.isabs', ({(83, 29, 83, 33): 'path'}, {}), '(path)', False, 'import os\n'), ((88, 21, 88, 48), 'configparser.ConfigParser', 'configparser.ConfigParser', ({}, {}), '()', False, 'import configparser\n'), ((92, 13, 92, 65), 'unittest.mock.patch', 'unittest.mock.patch', ({(92, 33, 92, 51): '"""util.get_abspath"""', (92, 53, 92, 64): 'get_abspath'}, {}), "('util.get_abspath', get_abspath)", False, 'import unittest\n'), ((68, 23, 68, 60), 'typing.cast', 'cast', ({(68, 28, 68, 43): '"""StartResponse"""', (68, 45, 68, 59): 'start_response'}, {}), "('StartResponse', start_response)", False, 'from typing import cast\n'), ((69, 30, 69, 74), 'webframe.handle_exception', 'webframe.handle_exception', ({(69, 56, 69, 63): 'environ', (69, 65, 69, 73): 'callback'}, {}), '(environ, callback)', False, 'import webframe\n'), ((70, 26, 70, 60), 'typing.cast', 'cast', ({(70, 31, 70, 42): 'List[bytes]', (70, 44, 70, 59): 'output_iterable'}, {}), '(List[bytes], output_iterable)', False, 'from typing import cast\n'), ((85, 32, 85, 57), 'os.path.dirname', 'os.path.dirname', ({(85, 48, 85, 56): '__file__'}, {}), '(__file__)', False, 'import os\n'), ((93, 17, 93, 71), 'unittest.mock.patch', 'unittest.mock.patch', ({(93, 37, 93, 58): '"""webframe.get_config"""', (93, 60, 93, 70): 'get_config'}, {}), "('webframe.get_config', get_config)", False, 'import unittest\n'), ((94, 27, 94, 61), 'datetime.datetime.fromtimestamp', 'datetime.datetime.fromtimestamp', ({(94, 59, 94, 60): '0'}, {}), '(0)', False, 'import datetime\n'), ((95, 24, 95, 57), 'webframe.local_to_ui_tz', 'webframe.local_to_ui_tz', ({(95, 48, 95, 56): 'local_dt'}, {}), '(local_dt)', False, 'import webframe\n'), ((96, 19, 96, 40), 'time.strftime', 'time.strftime', ({(96, 33, 96, 39): '"""%Z%z"""'}, {}), "('%Z%z')", False, 'import time\n')] |
nimatest1234/telegram_spotify_downloader_bot | spotify.py | 7e0a9ba32ee219752582b917867600653337f3d1 | from __future__ import unicode_literals
import spotipy
from spotipy.oauth2 import SpotifyClientCredentials
import requests
from youtube_search import YoutubeSearch
import youtube_dl
import eyed3.id3
import eyed3
import lyricsgenius
import telepot
spotifyy = spotipy.Spotify(
client_credentials_manager=SpotifyClientCredentials(client_id='a145db3dcd564b9592dacf10649e4ed5',
client_secret='389614e1ec874f17b8c99511c7baa2f6'))
genius = lyricsgenius.Genius('biZZReO7F98mji5oz3cE0FiIG73Hh07qoXSIzYSGNN3GBsnY-eUrPAVSdJk_0_de')
token = 'token bot'
bot = telepot.Bot(token)
def DOWNLOADMP3(link,chat_id):
#Get MetaData
results = spotifyy.track(link)
song = results['name']
print('[Spotify]MetaData Found!')
artist = results['artists'][0]['name']
YTSEARCH = str(song + " " + artist)
artistfinder = results['artists']
tracknum = results['track_number']
album = results['album']['name']
realese_date = int(results['album']['release_date'][:4])
if len(artistfinder) > 1:
fetures = "( Ft."
for lomi in range(0, len(artistfinder)):
try:
if lomi < len(artistfinder) - 2:
artistft = artistfinder[lomi + 1]['name'] + ", "
fetures += artistft
else:
artistft = artistfinder[lomi + 1]['name'] + ")"
fetures += artistft
except:
pass
else:
fetures = ""
time_duration = ""
time_duration1 = ""
time_duration2 = ""
time_duration3 = ""
millis = results['duration_ms']
millis = int(millis)
seconds = (millis / 1000) % 60
minutes = (millis / (1000 * 60)) % 60
seconds = int(seconds)
minutes = int(minutes)
if seconds >= 10:
if seconds < 59:
time_duration = "{0}:{1}".format(minutes, seconds)
time_duration1 = "{0}:{1}".format(minutes, seconds + 1)
time_duration2 = "{0}:{1}".format(minutes, seconds - 1)
if seconds == 10:
time_duration2 = "{0}:0{1}".format(minutes, seconds - 1)
time_duration3 = "{0}:{1}".format(minutes, seconds + 2)
elif seconds < 58:
time_duration3 = "{0}:{1}".format(minutes, seconds + 2)
time_duration2 = "{0}:{1}".format(minutes, seconds - 1)
elif seconds == 58:
time_duration3 = "{0}:0{1}".format(minutes + 1, seconds - 58)
time_duration2 = "{0}:{1}".format(minutes, seconds - 1)
else:
time_duration2 = "{0}:{1}".format(minutes, seconds - 1)
else:
time_duration1 = "{0}:0{1}".format(minutes + 1, seconds - 59)
if seconds == 59:
time_duration3 = "{0}:0{1}".format(minutes + 1, seconds - 58)
else:
time_duration = "{0}:0{1}".format(minutes, seconds)
time_duration1 = "{0}:0{1}".format(minutes, seconds + 1)
if seconds < 8:
time_duration3 = "{0}:0{1}".format(minutes, seconds + 2)
time_duration2 = "{0}:0{1}".format(minutes, seconds - 1)
elif seconds == 9 or seconds == 8:
time_duration3 = "{0}:{1}".format(minutes, seconds + 2)
elif seconds == 0:
time_duration2 = "{0}:{1}".format(minutes - 1, seconds + 59)
time_duration3 = "{0}:0{1}".format(minutes, seconds + 2)
else:
time_duration2 = "{0}:0{1}".format(minutes, seconds - 1)
time_duration3 = "{0}:0{1}".format(minutes, seconds + 2)
trackname = song + fetures
#Download Cover
response = requests.get(results['album']['images'][0]['url'])
DIRCOVER = "songpicts//" + trackname + ".png"
file = open(DIRCOVER, "wb")
file.write(response.content)
file.close()
#search for music on youtube
results = list(YoutubeSearch(str(YTSEARCH)).to_dict())
LINKASLI = ''
for URLSSS in results:
timeyt = URLSSS["duration"]
print(URLSSS['title'])
if timeyt == time_duration or timeyt == time_duration1:
LINKASLI = URLSSS['url_suffix']
break
elif timeyt == time_duration2 or timeyt == time_duration3:
LINKASLI = URLSSS['url_suffix']
break
YTLINK = str("https://www.youtube.com/" + LINKASLI)
print('[Youtube]song found!')
print(f'[Youtube]Link song on youtube : {YTLINK}')
#Donwload Music from youtube
options = {
# PERMANENT options
'format': 'bestaudio/best',
'keepvideo': False,
'outtmpl': f'song//{trackname}.*',
'postprocessors': [{
'key': 'FFmpegExtractAudio',
'preferredcodec': 'mp3',
'preferredquality': '320'
}]
}
with youtube_dl.YoutubeDL(options) as mp3:
mp3.download([YTLINK])
aud = eyed3.load(f"song//{trackname}.mp3")
print('[Youtube]Song Downloaded!')
aud.tag.artist = artist
aud.tag.album = album
aud.tag.album_artist = artist
aud.tag.title = trackname
aud.tag.track_num = tracknum
aud.tag.year = realese_date
try:
songok = genius.search_song(song, artist)
aud.tag.lyrics.set(songok.lyrics)
print('[Genius]Song lyric Found!')
except:
print('[Genius]Song lyric NOT Found!')
aud.tag.images.set(3, open("songpicts//" + trackname + ".png", 'rb').read(), 'image/png')
aud.tag.save()
bot.sendAudio(chat_id, open(f'song//{trackname}.mp3', 'rb'), title=trackname)
print('[Telegram]Song sent!')
def album(link):
results = spotifyy.album_tracks(link)
albums = results['items']
while results['next']:
results = spotifyy.next(results)
albums.extend(results['items'])
print('[Spotify]Album Found!')
return albums
def artist(link):
results = spotifyy.artist_top_tracks(link)
albums = results['tracks']
print('[Spotify]Artist Found!')
return albums
def searchalbum(track):
results = spotifyy.search(track)
return results['tracks']['items'][0]['album']['external_urls']['spotify']
def playlist(link):
results = spotifyy.playlist_tracks(link)
print('[Spotify]Playlist Found!')
return results['items']
def searchsingle(track):
results = spotifyy.search(track)
return results['tracks']['items'][0]['href']
def searchartist(searchstr):
results = spotifyy.search(searchstr)
return results['tracks']['items'][0]['artists'][0]["external_urls"]['spotify']
| [((15, 9, 15, 96), 'lyricsgenius.Genius', 'lyricsgenius.Genius', ({(15, 29, 15, 95): '"""biZZReO7F98mji5oz3cE0FiIG73Hh07qoXSIzYSGNN3GBsnY-eUrPAVSdJk_0_de"""'}, {}), "(\n 'biZZReO7F98mji5oz3cE0FiIG73Hh07qoXSIzYSGNN3GBsnY-eUrPAVSdJk_0_de')", False, 'import lyricsgenius\n'), ((19, 6, 19, 24), 'telepot.Bot', 'telepot.Bot', ({(19, 18, 19, 23): 'token'}, {}), '(token)', False, 'import telepot\n'), ((97, 15, 97, 65), 'requests.get', 'requests.get', ({(97, 28, 97, 64): "results['album']['images'][0]['url']"}, {}), "(results['album']['images'][0]['url'])", False, 'import requests\n'), ((134, 10, 134, 46), 'eyed3.load', 'eyed3.load', ({(134, 21, 134, 45): 'f"""song//{trackname}.mp3"""'}, {}), "(f'song//{trackname}.mp3')", False, 'import eyed3\n'), ((13, 31, 14, 105), 'spotipy.oauth2.SpotifyClientCredentials', 'SpotifyClientCredentials', (), '', False, 'from spotipy.oauth2 import SpotifyClientCredentials\n'), ((131, 9, 131, 38), 'youtube_dl.YoutubeDL', 'youtube_dl.YoutubeDL', ({(131, 30, 131, 37): 'options'}, {}), '(options)', False, 'import youtube_dl\n')] |
Tillsten/atom | tests/test_atomdict.py | 19b6291f7d3c9b3828dcd73e900b8dcbc2ddf92d | #------------------------------------------------------------------------------
# Copyright (c) 2018-2019, Nucleic Development Team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file LICENSE, distributed with this software.
#------------------------------------------------------------------------------
"""Test the typed dictionary.
"""
import sys
import pytest
from atom.api import Atom, Dict, Int, atomdict
@pytest.fixture
def atom_dict():
"""Atom with different Dict members.
"""
class DictAtom(Atom):
untyped = Dict()
keytyped = Dict(Int())
valuetyped = Dict(value=Int())
fullytyped = Dict(Int(), Int())
untyped_default = Dict(default={1: 1})
keytyped_default = Dict(Int(), default={1: 1})
valuetyped_default = Dict(value=Int(), default={1: 1})
fullytyped_default = Dict(Int(), Int(), default={1: 1})
return DictAtom()
MEMBERS = ['untyped', 'keytyped', 'valuetyped', 'fullytyped',
'untyped_default', 'keytyped_default', 'valuetyped_default',
'fullytyped_default']
@pytest.mark.parametrize('member', MEMBERS)
def test_instance(atom_dict, member):
"""Test the repr.
"""
assert isinstance(getattr(atom_dict, member), atomdict)
@pytest.mark.parametrize('member', MEMBERS)
def test_repr(atom_dict, member):
"""Test the repr.
"""
d = getattr(atom_dict.__class__, member).default_value_mode[1]
if not d:
d = {i: i**2 for i in range(10)}
setattr(atom_dict, member, d)
assert repr(getattr(atom_dict, member)) == repr(d)
@pytest.mark.parametrize('member', MEMBERS)
def test_len(atom_dict, member):
"""Test the len.
"""
d = getattr(atom_dict.__class__, member).default_value_mode[1]
if not d:
d = {i: i**2 for i in range(10)}
setattr(atom_dict, member, d)
assert len(getattr(atom_dict, member)) == len(d)
@pytest.mark.parametrize('member', MEMBERS)
def test_contains(atom_dict, member):
"""Test __contains__.
"""
d = {i: i**2 for i in range(10)}
setattr(atom_dict, member, d)
assert 5 in getattr(atom_dict, member)
del getattr(atom_dict, member)[5]
assert 5 not in getattr(atom_dict, member)
@pytest.mark.parametrize('member', MEMBERS)
def test_keys(atom_dict, member):
"""Test the keys.
"""
d = getattr(atom_dict.__class__, member).default_value_mode[1]
if not d:
d = {i: i**2 for i in range(10)}
setattr(atom_dict, member, d)
assert getattr(atom_dict, member).keys() == d.keys()
@pytest.mark.parametrize('member', MEMBERS)
def test_copy(atom_dict, member):
"""Test copy.
"""
d = getattr(atom_dict.__class__, member).default_value_mode[1]
if not d:
d = {i: i**2 for i in range(10)}
setattr(atom_dict, member, d)
assert getattr(atom_dict, member).copy() == d
def test_setitem(atom_dict):
"""Test setting items.
"""
atom_dict.untyped[''] = 1
assert atom_dict.untyped[''] == 1
atom_dict.keytyped[1] = ''
assert atom_dict.keytyped[1] == ''
with pytest.raises(TypeError):
atom_dict.keytyped[''] = 1
atom_dict.valuetyped[1] = 1
assert atom_dict.valuetyped[1] == 1
with pytest.raises(TypeError):
atom_dict.valuetyped[''] = ''
atom_dict.fullytyped[1] = 1
assert atom_dict.fullytyped[1] == 1
with pytest.raises(TypeError):
atom_dict.fullytyped[''] = 1
with pytest.raises(TypeError):
atom_dict.fullytyped[1] = ''
def test_setdefault(atom_dict):
"""Test using setdefault.
"""
assert atom_dict.untyped.setdefault('', 1) == 1
assert atom_dict.untyped.setdefault('', 2) == 1
assert atom_dict.untyped[''] == 1
assert atom_dict.keytyped.setdefault(1, '') == ''
assert atom_dict.keytyped[1] == ''
with pytest.raises(TypeError):
atom_dict.keytyped.setdefault('', 1)
assert atom_dict.valuetyped.setdefault(1, 1) == 1
assert atom_dict.valuetyped.setdefault(1, '') == 1
assert atom_dict.valuetyped[1] == 1
with pytest.raises(TypeError):
atom_dict.valuetyped.setdefault(2, '')
assert atom_dict.fullytyped.setdefault(1, 1) == 1
assert atom_dict.fullytyped.setdefault(1, '') == 1
assert atom_dict.fullytyped[1] == 1
with pytest.raises(TypeError):
atom_dict.fullytyped.setdefault('', 1)
with pytest.raises(TypeError):
atom_dict.fullytyped.setdefault(2, '')
def test_update(atom_dict):
"""Test update a dict.
"""
atom_dict.untyped.update({'': 1})
assert atom_dict.untyped[''] == 1
atom_dict.untyped.update([('1', 1)])
assert atom_dict.untyped['1'] == 1
atom_dict.keytyped.update({1: 1})
assert atom_dict.keytyped[1] == 1
atom_dict.keytyped.update([(2, 1)])
assert atom_dict.keytyped[1] == 1
with pytest.raises(TypeError):
atom_dict.keytyped.update({'': 1})
atom_dict.valuetyped.update({1: 1})
assert atom_dict.valuetyped[1] == 1
atom_dict.valuetyped.update([(2, 1)])
assert atom_dict.valuetyped[1] == 1
with pytest.raises(TypeError):
atom_dict.valuetyped.update({'': ''})
atom_dict.fullytyped.update({1: 1})
assert atom_dict.fullytyped[1] == 1
atom_dict.fullytyped.update([(2, 1)])
assert atom_dict.fullytyped[1] == 1
with pytest.raises(TypeError):
atom_dict.fullytyped.update({'': 1})
with pytest.raises(TypeError):
atom_dict.fullytyped.update({'': ''})
| [((41, 1, 41, 43), 'pytest.mark.parametrize', 'pytest.mark.parametrize', ({(41, 25, 41, 33): '"""member"""', (41, 35, 41, 42): 'MEMBERS'}, {}), "('member', MEMBERS)", False, 'import pytest\n'), ((49, 1, 49, 43), 'pytest.mark.parametrize', 'pytest.mark.parametrize', ({(49, 25, 49, 33): '"""member"""', (49, 35, 49, 42): 'MEMBERS'}, {}), "('member', MEMBERS)", False, 'import pytest\n'), ((61, 1, 61, 43), 'pytest.mark.parametrize', 'pytest.mark.parametrize', ({(61, 25, 61, 33): '"""member"""', (61, 35, 61, 42): 'MEMBERS'}, {}), "('member', MEMBERS)", False, 'import pytest\n'), ((73, 1, 73, 43), 'pytest.mark.parametrize', 'pytest.mark.parametrize', ({(73, 25, 73, 33): '"""member"""', (73, 35, 73, 42): 'MEMBERS'}, {}), "('member', MEMBERS)", False, 'import pytest\n'), ((84, 1, 84, 43), 'pytest.mark.parametrize', 'pytest.mark.parametrize', ({(84, 25, 84, 33): '"""member"""', (84, 35, 84, 42): 'MEMBERS'}, {}), "('member', MEMBERS)", False, 'import pytest\n'), ((96, 1, 96, 43), 'pytest.mark.parametrize', 'pytest.mark.parametrize', ({(96, 25, 96, 33): '"""member"""', (96, 35, 96, 42): 'MEMBERS'}, {}), "('member', MEMBERS)", False, 'import pytest\n'), ((24, 18, 24, 24), 'atom.api.Dict', 'Dict', ({}, {}), '()', False, 'from atom.api import Atom, Dict, Int, atomdict\n'), ((28, 26, 28, 46), 'atom.api.Dict', 'Dict', (), '', False, 'from atom.api import Atom, Dict, Int, atomdict\n'), ((117, 9, 117, 33), 'pytest.raises', 'pytest.raises', ({(117, 23, 117, 32): 'TypeError'}, {}), '(TypeError)', False, 'import pytest\n'), ((122, 9, 122, 33), 'pytest.raises', 'pytest.raises', ({(122, 23, 122, 32): 'TypeError'}, {}), '(TypeError)', False, 'import pytest\n'), ((127, 9, 127, 33), 'pytest.raises', 'pytest.raises', ({(127, 23, 127, 32): 'TypeError'}, {}), '(TypeError)', False, 'import pytest\n'), ((129, 9, 129, 33), 'pytest.raises', 'pytest.raises', ({(129, 23, 129, 32): 'TypeError'}, {}), '(TypeError)', False, 'import pytest\n'), ((144, 9, 144, 33), 'pytest.raises', 'pytest.raises', ({(144, 23, 144, 32): 'TypeError'}, {}), '(TypeError)', False, 'import pytest\n'), ((150, 9, 150, 33), 'pytest.raises', 'pytest.raises', ({(150, 23, 150, 32): 'TypeError'}, {}), '(TypeError)', False, 'import pytest\n'), ((156, 9, 156, 33), 'pytest.raises', 'pytest.raises', ({(156, 23, 156, 32): 'TypeError'}, {}), '(TypeError)', False, 'import pytest\n'), ((158, 9, 158, 33), 'pytest.raises', 'pytest.raises', ({(158, 23, 158, 32): 'TypeError'}, {}), '(TypeError)', False, 'import pytest\n'), ((175, 9, 175, 33), 'pytest.raises', 'pytest.raises', ({(175, 23, 175, 32): 'TypeError'}, {}), '(TypeError)', False, 'import pytest\n'), ((182, 9, 182, 33), 'pytest.raises', 'pytest.raises', ({(182, 23, 182, 32): 'TypeError'}, {}), '(TypeError)', False, 'import pytest\n'), ((189, 9, 189, 33), 'pytest.raises', 'pytest.raises', ({(189, 23, 189, 32): 'TypeError'}, {}), '(TypeError)', False, 'import pytest\n'), ((191, 9, 191, 33), 'pytest.raises', 'pytest.raises', ({(191, 23, 191, 32): 'TypeError'}, {}), '(TypeError)', False, 'import pytest\n'), ((25, 24, 25, 29), 'atom.api.Int', 'Int', ({}, {}), '()', False, 'from atom.api import Atom, Dict, Int, atomdict\n'), ((27, 26, 27, 31), 'atom.api.Int', 'Int', ({}, {}), '()', False, 'from atom.api import Atom, Dict, Int, atomdict\n'), ((27, 33, 27, 38), 'atom.api.Int', 'Int', ({}, {}), '()', False, 'from atom.api import Atom, Dict, Int, atomdict\n'), ((29, 32, 29, 37), 'atom.api.Int', 'Int', ({}, {}), '()', False, 'from atom.api import Atom, Dict, Int, atomdict\n'), ((31, 34, 31, 39), 'atom.api.Int', 'Int', ({}, {}), '()', False, 'from atom.api import Atom, Dict, Int, atomdict\n'), ((31, 41, 31, 46), 'atom.api.Int', 'Int', ({}, {}), '()', False, 'from atom.api import Atom, Dict, Int, atomdict\n'), ((26, 32, 26, 37), 'atom.api.Int', 'Int', ({}, {}), '()', False, 'from atom.api import Atom, Dict, Int, atomdict\n'), ((30, 40, 30, 45), 'atom.api.Int', 'Int', ({}, {}), '()', False, 'from atom.api import Atom, Dict, Int, atomdict\n')] |
eggveloper/dippy.core | dippy/core/timestamp.py | 8ad613a50bcbf52132de1ece889e22fa4aba3a44 | from datetime import datetime
class Timestamp(float):
def __new__(cls, value=None):
return super().__new__(
cls, datetime.utcnow().timestamp() if value is None else value
)
def to_date(self) -> datetime:
return datetime.utcfromtimestamp(self)
def __repr__(self):
return f"<{type(self).__name__} {self}>"
def __str__(self):
return self.to_date().isoformat(" ")
| [((11, 15, 11, 46), 'datetime.datetime.utcfromtimestamp', 'datetime.utcfromtimestamp', ({(11, 41, 11, 45): 'self'}, {}), '(self)', False, 'from datetime import datetime\n'), ((7, 17, 7, 34), 'datetime.datetime.utcnow', 'datetime.utcnow', ({}, {}), '()', False, 'from datetime import datetime\n')] |
tushortz/biblelover | bible/admin.py | 8ef4980d7f68e4037874373fb0ecde12d2d63d76 | from django.contrib import admin
from bible.models import Bible, VerseOfTheDay
@admin.register(Bible)
class BibleAdmin(admin.ModelAdmin):
list_display = ['__str__', 'text']
readonly_fields = ['book', 'chapter', 'verse', 'text', 'category']
search_fields = ['text', 'book', 'chapter']
list_filter = ['category', 'book']
def has_add_permission(self, request):
return False
def has_delete_permission(self, request, obj=None):
return False
def has_change_permission(self, request, obj=None):
return False
@admin.register(VerseOfTheDay)
class VerseOfTheDayAdmin(admin.ModelAdmin):
autocomplete_fields = ['verse']
raw_id_fields = ['verse']
| [((5, 1, 5, 22), 'django.contrib.admin.register', 'admin.register', ({(5, 16, 5, 21): 'Bible'}, {}), '(Bible)', False, 'from django.contrib import admin\n'), ((22, 1, 22, 30), 'django.contrib.admin.register', 'admin.register', ({(22, 16, 22, 29): 'VerseOfTheDay'}, {}), '(VerseOfTheDay)', False, 'from django.contrib import admin\n')] |
Procrat/typy | typy/nodes.py | 668cedb7f929256a09f565af9ee43c02889bec3f | """
Our own implementation of an abstract syntax tree (AST).
The convert function recursively converts a Python AST (from the module `ast`)
to our own AST (of the class `Node`).
"""
import ast
from logging import debug
from typy.builtin import data_types
from typy.exceptions import NotYetSupported, NoSuchAttribute, NotIterable
from typy import types
class Node:
def __init__(self, type_map, ast_node):
self.type_map = type_map
self._ast_fields = ast_node._fields
def check(self):
"""Must be overriden in subtype."""
raise NotYetSupported('check call to', self)
def iter_fields(self):
for field in self._ast_fields:
try:
yield field, getattr(self, field)
except AttributeError:
pass
def iter_child_nodes(self):
for _name, field in self.iter_fields():
if isinstance(field, Node):
yield field
elif isinstance(field, list):
for item in field:
if isinstance(item, Node):
yield item
class FunctionDef(Node):
def __init__(self, type_map, ast_node):
if (ast_node.args.vararg is not None or
len(ast_node.args.kwonlyargs) > 0 or
len(ast_node.args.kw_defaults) > 0 or
ast_node.args.kwarg is not None or
len(ast_node.args.defaults) > 0):
raise NotYetSupported('default arguments and keyword arguments')
super().__init__(type_map, ast_node)
self.name = ast_node.name
self.params = [arg.arg for arg in ast_node.args.args]
self.body = [convert(type_map, stmt) for stmt in ast_node.body]
self._ast_fields = ('name', 'params', 'body')
def check(self):
debug('checking func def %s', self.name)
function = types.Function(self, self.type_map)
self.type_map.add_variable(self.name, function)
return data_types.None_()
def __repr__(self):
return 'def ' + self.name + '()'
class ClassDef(Node):
def __init__(self, type_map, ast_node):
super().__init__(type_map, ast_node)
self.name = ast_node.name
self.body = [convert(type_map, stmt) for stmt in ast_node.body]
def check(self):
debug('checking class def %s', self.name)
class_namespace = self.type_map.enter_namespace(self.name)
for stmt in self.body:
stmt.check()
self.type_map.exit_namespace()
class_ = types.Class(self, self.type_map, class_namespace)
self.type_map.add_variable(self.name, class_)
return data_types.None_()
def __repr__(self):
return 'def ' + self.name
class Attribute(Node):
def __init__(self, type_map, ast_node):
super().__init__(type_map, ast_node)
self.value = convert(type_map, ast_node.value)
self.attr = ast_node.attr
self.ctx = ast_node.ctx
def check(self):
debug('checking attr %s', self)
value_type = self.value.check()
debug('attr %r = %r', self, value_type)
if isinstance(self.ctx, ast.Load):
return value_type.get_attribute(self.attr)
elif isinstance(self.ctx, ast.Store):
return (value_type, self.attr)
else:
# TODO implement for Del, AugLoad, AugStore, Param
raise NotYetSupported('name context', self.ctx)
def __repr__(self):
return repr(self.value) + '.' + self.attr
class Name(Node):
def __init__(self, type_map, ast_node):
super().__init__(type_map, ast_node)
self.id = ast_node.id
self.ctx = ast_node.ctx
def check(self):
debug('checking name %s', self.id)
if isinstance(self.ctx, ast.Load):
return self.type_map.find(self.id)
elif isinstance(self.ctx, ast.Store):
return self
else:
# TODO implement for Del, AugLoad, AugStore, Param
raise NotYetSupported('name context', self.ctx)
def __repr__(self):
return self.id
class Call(Node):
def __init__(self, type_map, ast_node):
if (len(ast_node.keywords) > 0 or
ast_node.starargs is not None or
ast_node.kwargs is not None):
raise NotYetSupported('keyword arguments and star arguments')
super().__init__(type_map, ast_node)
self.func = convert(type_map, ast_node.func)
self.args = [convert(type_map, expr) for expr in ast_node.args]
def check(self):
debug('checking call')
func = self.func.check()
args = [arg.check() for arg in self.args]
return func.check_call(args)
def __repr__(self):
return repr(self.func) + \
'(' + ', '.join(repr(x) for x in self.args) + ')'
class Expr(Node):
def __init__(self, type_map, ast_node):
super().__init__(type_map, ast_node)
self.value = convert(type_map, ast_node.value)
def check(self):
debug('checking expr')
self.value.check()
return data_types.None_()
def __repr__(self):
return repr(self.value)
class Return(Node):
def __init__(self, type_map, ast_node):
super().__init__(type_map, ast_node)
self.value = convert(type_map, ast_node.value)
def check(self):
debug('checking return')
return self.value.check()
def __repr__(self):
return 'return ' + repr(self.value)
class Module(Node, types.Type):
def __init__(self, type_map, ast_node):
Node.__init__(self, type_map, ast_node)
types.Type.__init__(self, type_map)
self.body = [convert(type_map, stmt) for stmt in ast_node.body]
def check(self):
debug('checking module')
self.module_namespace = self.type_map.enter_namespace('__main__')
debug('entering %r', self.type_map.current_namespace)
for stmt in self.body:
debug('still in %r', self.type_map.current_namespace)
stmt.check()
debug('leaving %r', self.type_map.current_namespace)
self.type_map.exit_namespace()
def get_attribute(self, name):
try:
return self.module_namespace[name]
except KeyError:
types.Type.get_attribute(self, name)
class Assign(Node):
def __init__(self, type_map, ast_node):
# TODO handle multiple targets
if len(ast_node.targets) > 1:
raise NotYetSupported('assignment with multiple targets')
super().__init__(type_map, ast_node)
self.target = convert(type_map, ast_node.targets[0])
self.value = convert(type_map, ast_node.value)
self._ast_fields = ('target', 'value')
def check(self):
debug('checking assign %r', self.target)
_assign(self.target, self.value, self.type_map)
return data_types.None_()
def __repr__(self):
return repr(self.target) + ' = ' + repr(self.value)
class Pass(Node):
def check(self):
debug('checking pass')
return data_types.None_()
def __repr__(self):
return 'pass'
class Not(Node):
def __init__(self, type_map, ast_node):
super().__init__(type_map, ast_node)
self.value = convert(type_map, ast_node.value)
def check(self):
debug('checking not')
self.value.check()
return data_types.Bool()
def __repr__(self):
return 'not ' + repr(self.value)
class BoolOp(Node):
def __init__(self, type_map, ast_node):
super().__init__(type_map, ast_node)
self.op = ast_node.op
self.values = [convert(type_map, value) for value in ast_node.values]
def check(self):
debug('checking boolop')
for value in self.values:
value.check()
# TODO return intersection van types?
return data_types.Bool()
def __repr__(self):
op_name = ' {} '.format(self.op)
return '(' + op_name.join(repr(val) for val in self.values) + ')'
class In(Node):
def __init__(self, type_map, ast_node):
super().__init__(type_map, ast_node)
self.element = convert(type_map, ast_node.element)
self.container = convert(type_map, ast_node.container)
def check(self):
debug('checking in')
element = self.element.check()
container = self.container.check()
try:
container.call_magic_method('__contains__', element)
except NoSuchAttribute:
if not container.is_iterable():
raise NotIterable(container)
return data_types.Bool()
def __repr__(self):
return '{!r} in {!r}'.format(self.element, self.container)
class For(Node):
def __init__(self, type_map, ast_node):
super().__init__(type_map, ast_node)
self.target = convert(type_map, ast_node.target)
self.iter = convert(type_map, ast_node.iter)
self.body = [convert(type_map, stmt) for stmt in ast_node.body]
self.orelse = [convert(type_map, clause) for clause in ast_node.orelse]
def check(self):
debug('checking for')
iterator = self.iter.check()
enclosed_type = iterator.get_enclosed_type()
_assign(self.target, enclosed_type, self.type_map)
for stmt in self.body:
stmt.check()
for stmt in self.orelse:
stmt.check()
# TODO return intersection of values of both branches
return data_types.None_()
def __repr__(self):
s = 'for {!r} in {!r}:\n '.format(self.target, self.iter)
s += '\n '.join(repr(stmt) for stmt in self.body)
if self.orelse:
s += 'else:\n '
s += '\n '.join(repr(stmt) for stmt in self.orelse)
return s
class If(Node):
def __init__(self, type_map, ast_node):
super().__init__(type_map, ast_node)
self.test = convert(type_map, ast_node.test)
self.body = [convert(type_map, stmt) for stmt in ast_node.body]
self.orelse = [convert(type_map, stmt) for stmt in ast_node.orelse]
def check(self):
debug('checking if')
# TODO take isinstance into account (?)
# TODO real branching?
self.test.check()
for stmt in self.body:
stmt.check()
for stmt in self.orelse:
stmt.check()
# TODO return intersection of values of both branches
return data_types.None_()
def __repr__(self):
s = 'if {!r}:\n '.format(self.test)
s += '\n '.join(repr(stmt) for stmt in self.body)
if self.orelse:
s += 'else:\n '
s += '\n '.join(repr(stmt) for stmt in self.orelse)
return s
class IfExp(Node):
def __init__(self, type_map, ast_node):
super().__init__(type_map, ast_node)
self.test = convert(type_map, ast_node.test)
self.body = convert(type_map, ast_node.body)
self.orelse = convert(type_map, ast_node.orelse)
def check(self):
debug('checking ifexp')
# TODO take isinstance into account (?)
self.test.check()
value1 = self.body.check()
value2 = self.orelse.check()
return types.Intersection(value1, value2)
def __repr__(self):
template = '{!r} if {!r} else {!r}'
return template.format(self.test, self.body, self.orelse)
class NameConstant(Node):
def __init__(self, type_map, ast_node):
super().__init__(type_map, ast_node)
self.value = ast_node.value
def check(self):
debug('checking name constant %r', self.value)
if self.value is None:
return data_types.None_()
elif self.value is True or self.value is False:
return data_types.Bool()
else:
raise NotYetSupported('name constant', self.value)
def __repr__(self):
return repr(self.value)
class While(Node):
def __init__(self, type_map, ast_node):
super().__init__(type_map, ast_node)
self.test = convert(type_map, ast_node.test)
self.body = [convert(type_map, stmt) for stmt in ast_node.body]
self.orelse = [convert(type_map, stmt) for stmt in ast_node.orelse]
def check(self):
debug('checking while')
# TODO take isinstance into account (?)
# TODO real branching?
self.test.check()
for stmt in self.body:
stmt.check()
for stmt in self.orelse:
stmt.check()
# TODO return intersection of values of both branches
return data_types.None_()
def __repr__(self):
s = 'while {!r}:\n '.format(self.test)
s += '\n '.join(repr(stmt) for stmt in self.body)
if self.orelse:
s += 'else:\n '
s += '\n '.join(repr(stmt) for stmt in self.orelse)
return s
class Break(Node):
def check(self):
debug('checking break')
return data_types.None_()
def __repr__(self):
return 'break'
class Continue(Node):
def check(self):
debug('checking continue')
return data_types.None_()
def __repr__(self):
return 'continue'
class Num(Node):
def __init__(self, type_map, ast_node):
super().__init__(type_map, ast_node)
self.number_type = {
int: data_types.Int,
# float: data_types.Float,
# complex: data_types.Complex,
}[type(ast_node.n)]
def check(self):
debug('checking num')
return self.number_type()
class Tuple(Node):
def __init__(self, type_map, ast_node):
super().__init__(type_map, ast_node)
self.elts = [convert(type_map, el) for el in ast_node.elts]
self.ctx = ast_node.ctx
def check(self):
debug('checking tuple %r', self)
if isinstance(self.ctx, ast.Load):
el_types = (el.check() for el in self.elts)
return types.Tuple(self.type_map, *el_types)
elif isinstance(self.ctx, ast.Store):
return self
else:
# TODO implement for Del, AugLoad, AugStore, Param
raise NotYetSupported('name context', self.ctx)
def __repr__(self):
return '(' + ', '.join(repr(el) for el in self.elts) + ')'
def _assign(target, value, type_map):
value_type = value.check()
if isinstance(target, Name):
target_type = target.check()
type_map.add_variable(target_type.id, value_type)
elif isinstance(target, Attribute):
target_type, attr = target.check()
target_type.set_attribute(attr, value_type)
else:
raise NotYetSupported('assignment to', target)
def convert(type_map, node):
class_name = node.__class__.__name__
try:
# Try to convert to a node
class_ = globals()[class_name]
return class_(type_map, node)
except KeyError:
try:
# Try to convert to a builtin type
class_ = getattr(data_types, class_name)
return class_()
except AttributeError:
raise NotYetSupported('node', node)
| [((23, 14, 23, 52), 'typy.exceptions.NotYetSupported', 'NotYetSupported', ({(23, 30, 23, 45): '"""check call to"""', (23, 47, 23, 51): 'self'}, {}), "('check call to', self)", False, 'from typy.exceptions import NotYetSupported, NoSuchAttribute, NotIterable\n'), ((58, 8, 58, 48), 'logging.debug', 'debug', ({(58, 14, 58, 36): '"""checking func def %s"""', (58, 38, 58, 47): 'self.name'}, {}), "('checking func def %s', self.name)", False, 'from logging import debug\n'), ((59, 19, 59, 54), 'typy.types.Function', 'types.Function', ({(59, 34, 59, 38): 'self', (59, 40, 59, 53): 'self.type_map'}, {}), '(self, self.type_map)', False, 'from typy import types\n'), ((61, 15, 61, 33), 'typy.builtin.data_types.None_', 'data_types.None_', ({}, {}), '()', False, 'from typy.builtin import data_types\n'), ((74, 8, 74, 49), 'logging.debug', 'debug', ({(74, 14, 74, 37): '"""checking class def %s"""', (74, 39, 74, 48): 'self.name'}, {}), "('checking class def %s', self.name)", False, 'from logging import debug\n'), ((83, 17, 83, 66), 'typy.types.Class', 'types.Class', ({(83, 29, 83, 33): 'self', (83, 35, 83, 48): 'self.type_map', (83, 50, 83, 65): 'class_namespace'}, {}), '(self, self.type_map, class_namespace)', False, 'from typy import types\n'), ((86, 15, 86, 33), 'typy.builtin.data_types.None_', 'data_types.None_', ({}, {}), '()', False, 'from typy.builtin import data_types\n'), ((100, 8, 100, 39), 'logging.debug', 'debug', ({(100, 14, 100, 32): '"""checking attr %s"""', (100, 34, 100, 38): 'self'}, {}), "('checking attr %s', self)", False, 'from logging import debug\n'), ((103, 8, 103, 47), 'logging.debug', 'debug', ({(103, 14, 103, 28): '"""attr %r = %r"""', (103, 30, 103, 34): 'self', (103, 36, 103, 46): 'value_type'}, {}), "('attr %r = %r', self, value_type)", False, 'from logging import debug\n'), ((124, 8, 124, 42), 'logging.debug', 'debug', ({(124, 14, 124, 32): '"""checking name %s"""', (124, 34, 124, 41): 'self.id'}, {}), "('checking name %s', self.id)", False, 'from logging import debug\n'), ((150, 8, 150, 30), 'logging.debug', 'debug', ({(150, 14, 150, 29): '"""checking call"""'}, {}), "('checking call')", False, 'from logging import debug\n'), ((166, 8, 166, 30), 'logging.debug', 'debug', ({(166, 14, 166, 29): '"""checking expr"""'}, {}), "('checking expr')", False, 'from logging import debug\n'), ((168, 15, 168, 33), 'typy.builtin.data_types.None_', 'data_types.None_', ({}, {}), '()', False, 'from typy.builtin import data_types\n'), ((180, 8, 180, 32), 'logging.debug', 'debug', ({(180, 14, 180, 31): '"""checking return"""'}, {}), "('checking return')", False, 'from logging import debug\n'), ((190, 8, 190, 43), 'typy.types.Type.__init__', 'types.Type.__init__', ({(190, 28, 190, 32): 'self', (190, 34, 190, 42): 'type_map'}, {}), '(self, type_map)', False, 'from typy import types\n'), ((194, 8, 194, 32), 'logging.debug', 'debug', ({(194, 14, 194, 31): '"""checking module"""'}, {}), "('checking module')", False, 'from logging import debug\n'), ((198, 8, 198, 61), 'logging.debug', 'debug', ({(198, 14, 198, 27): '"""entering %r"""', (198, 29, 198, 60): 'self.type_map.current_namespace'}, {}), "('entering %r', self.type_map.current_namespace)", False, 'from logging import debug\n'), ((204, 8, 204, 60), 'logging.debug', 'debug', ({(204, 14, 204, 26): '"""leaving %r"""', (204, 28, 204, 59): 'self.type_map.current_namespace'}, {}), "('leaving %r', self.type_map.current_namespace)", False, 'from logging import debug\n'), ((227, 8, 227, 48), 'logging.debug', 'debug', ({(227, 14, 227, 34): '"""checking assign %r"""', (227, 36, 227, 47): 'self.target'}, {}), "('checking assign %r', self.target)", False, 'from logging import debug\n'), ((229, 15, 229, 33), 'typy.builtin.data_types.None_', 'data_types.None_', ({}, {}), '()', False, 'from typy.builtin import data_types\n'), ((237, 8, 237, 30), 'logging.debug', 'debug', ({(237, 14, 237, 29): '"""checking pass"""'}, {}), "('checking pass')", False, 'from logging import debug\n'), ((238, 15, 238, 33), 'typy.builtin.data_types.None_', 'data_types.None_', ({}, {}), '()', False, 'from typy.builtin import data_types\n'), ((250, 8, 250, 29), 'logging.debug', 'debug', ({(250, 14, 250, 28): '"""checking not"""'}, {}), "('checking not')", False, 'from logging import debug\n'), ((252, 15, 252, 32), 'typy.builtin.data_types.Bool', 'data_types.Bool', ({}, {}), '()', False, 'from typy.builtin import data_types\n'), ((265, 8, 265, 32), 'logging.debug', 'debug', ({(265, 14, 265, 31): '"""checking boolop"""'}, {}), "('checking boolop')", False, 'from logging import debug\n'), ((271, 15, 271, 32), 'typy.builtin.data_types.Bool', 'data_types.Bool', ({}, {}), '()', False, 'from typy.builtin import data_types\n'), ((285, 8, 285, 28), 'logging.debug', 'debug', ({(285, 14, 285, 27): '"""checking in"""'}, {}), "('checking in')", False, 'from logging import debug\n'), ((296, 15, 296, 32), 'typy.builtin.data_types.Bool', 'data_types.Bool', ({}, {}), '()', False, 'from typy.builtin import data_types\n'), ((311, 8, 311, 29), 'logging.debug', 'debug', ({(311, 14, 311, 28): '"""checking for"""'}, {}), "('checking for')", False, 'from logging import debug\n'), ((323, 15, 323, 33), 'typy.builtin.data_types.None_', 'data_types.None_', ({}, {}), '()', False, 'from typy.builtin import data_types\n'), ((342, 8, 342, 28), 'logging.debug', 'debug', ({(342, 14, 342, 27): '"""checking if"""'}, {}), "('checking if')", False, 'from logging import debug\n'), ((353, 15, 353, 33), 'typy.builtin.data_types.None_', 'data_types.None_', ({}, {}), '()', False, 'from typy.builtin import data_types\n'), ((372, 8, 372, 31), 'logging.debug', 'debug', ({(372, 14, 372, 30): '"""checking ifexp"""'}, {}), "('checking ifexp')", False, 'from logging import debug\n'), ((378, 15, 378, 49), 'typy.types.Intersection', 'types.Intersection', ({(378, 34, 378, 40): 'value1', (378, 42, 378, 48): 'value2'}, {}), '(value1, value2)', False, 'from typy import types\n'), ((391, 8, 391, 54), 'logging.debug', 'debug', ({(391, 14, 391, 41): '"""checking name constant %r"""', (391, 43, 391, 53): 'self.value'}, {}), "('checking name constant %r', self.value)", False, 'from logging import debug\n'), ((411, 8, 411, 31), 'logging.debug', 'debug', ({(411, 14, 411, 30): '"""checking while"""'}, {}), "('checking while')", False, 'from logging import debug\n'), ((422, 15, 422, 33), 'typy.builtin.data_types.None_', 'data_types.None_', ({}, {}), '()', False, 'from typy.builtin import data_types\n'), ((435, 8, 435, 31), 'logging.debug', 'debug', ({(435, 14, 435, 30): '"""checking break"""'}, {}), "('checking break')", False, 'from logging import debug\n'), ((436, 15, 436, 33), 'typy.builtin.data_types.None_', 'data_types.None_', ({}, {}), '()', False, 'from typy.builtin import data_types\n'), ((444, 8, 444, 34), 'logging.debug', 'debug', ({(444, 14, 444, 33): '"""checking continue"""'}, {}), "('checking continue')", False, 'from logging import debug\n'), ((445, 15, 445, 33), 'typy.builtin.data_types.None_', 'data_types.None_', ({}, {}), '()', False, 'from typy.builtin import data_types\n'), ((461, 8, 461, 29), 'logging.debug', 'debug', ({(461, 14, 461, 28): '"""checking num"""'}, {}), "('checking num')", False, 'from logging import debug\n'), ((472, 8, 472, 40), 'logging.debug', 'debug', ({(472, 14, 472, 33): '"""checking tuple %r"""', (472, 35, 472, 39): 'self'}, {}), "('checking tuple %r', self)", False, 'from logging import debug\n'), ((49, 18, 49, 76), 'typy.exceptions.NotYetSupported', 'NotYetSupported', ({(49, 34, 49, 75): '"""default arguments and keyword arguments"""'}, {}), "('default arguments and keyword arguments')", False, 'from typy.exceptions import NotYetSupported, NoSuchAttribute, NotIterable\n'), ((143, 18, 143, 73), 'typy.exceptions.NotYetSupported', 'NotYetSupported', ({(143, 34, 143, 72): '"""keyword arguments and star arguments"""'}, {}), "('keyword arguments and star arguments')", False, 'from typy.exceptions import NotYetSupported, NoSuchAttribute, NotIterable\n'), ((201, 12, 201, 65), 'logging.debug', 'debug', ({(201, 18, 201, 31): '"""still in %r"""', (201, 33, 201, 64): 'self.type_map.current_namespace'}, {}), "('still in %r', self.type_map.current_namespace)", False, 'from logging import debug\n'), ((219, 18, 219, 69), 'typy.exceptions.NotYetSupported', 'NotYetSupported', ({(219, 34, 219, 68): '"""assignment with multiple targets"""'}, {}), "('assignment with multiple targets')", False, 'from typy.exceptions import NotYetSupported, NoSuchAttribute, NotIterable\n'), ((393, 19, 393, 37), 'typy.builtin.data_types.None_', 'data_types.None_', ({}, {}), '()', False, 'from typy.builtin import data_types\n'), ((476, 19, 476, 56), 'typy.types.Tuple', 'types.Tuple', ({(476, 31, 476, 44): 'self.type_map', (476, 46, 476, 55): '*el_types'}, {}), '(self.type_map, *el_types)', False, 'from typy import types\n'), ((497, 14, 497, 54), 'typy.exceptions.NotYetSupported', 'NotYetSupported', ({(497, 30, 497, 45): '"""assignment to"""', (497, 47, 497, 53): 'target'}, {}), "('assignment to', target)", False, 'from typy.exceptions import NotYetSupported, NoSuchAttribute, NotIterable\n'), ((111, 18, 111, 59), 'typy.exceptions.NotYetSupported', 'NotYetSupported', ({(111, 34, 111, 48): '"""name context"""', (111, 50, 111, 58): 'self.ctx'}, {}), "('name context', self.ctx)", False, 'from typy.exceptions import NotYetSupported, NoSuchAttribute, NotIterable\n'), ((132, 18, 132, 59), 'typy.exceptions.NotYetSupported', 'NotYetSupported', ({(132, 34, 132, 48): '"""name context"""', (132, 50, 132, 58): 'self.ctx'}, {}), "('name context', self.ctx)", False, 'from typy.exceptions import NotYetSupported, NoSuchAttribute, NotIterable\n'), ((212, 12, 212, 48), 'typy.types.Type.get_attribute', 'types.Type.get_attribute', ({(212, 37, 212, 41): 'self', (212, 43, 212, 47): 'name'}, {}), '(self, name)', False, 'from typy import types\n'), ((395, 19, 395, 36), 'typy.builtin.data_types.Bool', 'data_types.Bool', ({}, {}), '()', False, 'from typy.builtin import data_types\n'), ((397, 18, 397, 62), 'typy.exceptions.NotYetSupported', 'NotYetSupported', ({(397, 34, 397, 49): '"""name constant"""', (397, 51, 397, 61): 'self.value'}, {}), "('name constant', self.value)", False, 'from typy.exceptions import NotYetSupported, NoSuchAttribute, NotIterable\n'), ((481, 18, 481, 59), 'typy.exceptions.NotYetSupported', 'NotYetSupported', ({(481, 34, 481, 48): '"""name context"""', (481, 50, 481, 58): 'self.ctx'}, {}), "('name context', self.ctx)", False, 'from typy.exceptions import NotYetSupported, NoSuchAttribute, NotIterable\n'), ((294, 22, 294, 44), 'typy.exceptions.NotIterable', 'NotIterable', ({(294, 34, 294, 43): 'container'}, {}), '(container)', False, 'from typy.exceptions import NotYetSupported, NoSuchAttribute, NotIterable\n'), ((512, 18, 512, 47), 'typy.exceptions.NotYetSupported', 'NotYetSupported', ({(512, 34, 512, 40): '"""node"""', (512, 42, 512, 46): 'node'}, {}), "('node', node)", False, 'from typy.exceptions import NotYetSupported, NoSuchAttribute, NotIterable\n')] |
Sam-Gresh/linkage-agent-tools | anonlink-entity-service/backend/entityservice/integrationtests/objectstoretests/test_objectstore.py | f405c7efe3fa82d99bc047f130c0fac6f3f5bf82 | """
Testing:
- uploading over existing files
- using deleted credentials
- using expired credentials
"""
import io
import minio
from minio import Minio
import pytest
from minio.credentials import AssumeRoleProvider, Credentials
from entityservice.object_store import connect_to_object_store, connect_to_upload_object_store
from entityservice.settings import Config
restricted_upload_policy = """{
"Version": "2012-10-17",
"Statement": [
{
"Action": [
"s3:PutObject"
],
"Effect": "Allow",
"Resource": [
"arn:aws:s3:::uploads/2020/*"
],
"Sid": "Upload-access-to-specific-bucket-only"
}
]
}
"""
class TestAssumeRole:
def test_temp_credentials_minio(self):
upload_endpoint = Config.UPLOAD_OBJECT_STORE_SERVER
bucket_name = "uploads"
root_mc_client = connect_to_object_store()
upload_restricted_minio_client = connect_to_upload_object_store()
if not root_mc_client.bucket_exists(bucket_name):
root_mc_client.make_bucket(bucket_name)
with pytest.raises(minio.error.AccessDenied):
upload_restricted_minio_client.list_buckets()
# Should be able to put an object though
upload_restricted_minio_client.put_object(bucket_name, 'testobject', io.BytesIO(b'data'), length=4)
credentials_provider = AssumeRoleProvider(upload_restricted_minio_client,
Policy=restricted_upload_policy
)
temp_creds = Credentials(provider=credentials_provider)
newly_restricted_mc_client = Minio(upload_endpoint, credentials=temp_creds, region='us-east-1', secure=False)
with pytest.raises(minio.error.AccessDenied):
newly_restricted_mc_client.list_buckets()
# Note this put object worked with the earlier credentials
# But should fail if we have applied the more restrictive policy
with pytest.raises(minio.error.AccessDenied):
newly_restricted_mc_client.put_object(bucket_name, 'testobject2', io.BytesIO(b'data'), length=4)
# this path is allowed in the policy however
newly_restricted_mc_client.put_object(bucket_name, '2020/testobject', io.BytesIO(b'data'), length=4)
| [((43, 25, 43, 50), 'entityservice.object_store.connect_to_object_store', 'connect_to_object_store', ({}, {}), '()', False, 'from entityservice.object_store import connect_to_object_store, connect_to_upload_object_store\n'), ((44, 41, 44, 73), 'entityservice.object_store.connect_to_upload_object_store', 'connect_to_upload_object_store', ({}, {}), '()', False, 'from entityservice.object_store import connect_to_object_store, connect_to_upload_object_store\n'), ((54, 31, 56, 51), 'minio.credentials.AssumeRoleProvider', 'AssumeRoleProvider', (), '', False, 'from minio.credentials import AssumeRoleProvider, Credentials\n'), ((57, 21, 57, 63), 'minio.credentials.Credentials', 'Credentials', (), '', False, 'from minio.credentials import AssumeRoleProvider, Credentials\n'), ((59, 37, 59, 117), 'minio.Minio', 'Minio', (), '', False, 'from minio import Minio\n'), ((48, 13, 48, 52), 'pytest.raises', 'pytest.raises', ({(48, 27, 48, 51): 'minio.error.AccessDenied'}, {}), '(minio.error.AccessDenied)', False, 'import pytest\n'), ((52, 77, 52, 96), 'io.BytesIO', 'io.BytesIO', ({(52, 88, 52, 95): "b'data'"}, {}), "(b'data')", False, 'import io\n'), ((61, 13, 61, 52), 'pytest.raises', 'pytest.raises', ({(61, 27, 61, 51): 'minio.error.AccessDenied'}, {}), '(minio.error.AccessDenied)', False, 'import pytest\n'), ((66, 13, 66, 52), 'pytest.raises', 'pytest.raises', ({(66, 27, 66, 51): 'minio.error.AccessDenied'}, {}), '(minio.error.AccessDenied)', False, 'import pytest\n'), ((70, 78, 70, 97), 'io.BytesIO', 'io.BytesIO', ({(70, 89, 70, 96): "b'data'"}, {}), "(b'data')", False, 'import io\n'), ((67, 78, 67, 97), 'io.BytesIO', 'io.BytesIO', ({(67, 89, 67, 96): "b'data'"}, {}), "(b'data')", False, 'import io\n')] |
JackDan9/soil | soil/build/lib/soil/db/sqlalchemy/api.py | ae612a4634634aace834491fbdefbc69e6167674 | # Copyright 2020 Soil, Inc.
# Copyright (c) 2011 X.commerce, a business unit of eBay Inc.
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Implementation of SQLAlchemy backend."""
import collections
import copy
import datetime
import functools
import inspect
import sys
import threading
from oslo_db.sqlalchemy import session as db_session
from oslo_log import log as logging
import soil.conf
from soil.i18n import _
CONF = soil.conf.CONF
LOG = logging.getLogger(__name__)
_LOCK = threading.Lock()
_FACADE = None
def _create_facade_lazily():
global _LOCK
with _LOCK:
global _FACADE
if _FACADE is None:
_FACADE = db_session.EngineFacade(
CONF.database.connection,
**dict(CONF.database)
)
return _FACADE
def get_engine():
facade = _create_facade_lazily()
return facade.get_engine()
def get_session(**kwargs):
facade = _create_facade_lazily()
return facade.get_session(**kwargs)
def dispose_engine():
get_engine().dispose()
| [((38, 6, 38, 33), 'oslo_log.log.getLogger', 'logging.getLogger', ({(38, 24, 38, 32): '__name__'}, {}), '(__name__)', True, 'from oslo_log import log as logging\n'), ((41, 8, 41, 24), 'threading.Lock', 'threading.Lock', ({}, {}), '()', False, 'import threading\n')] |
kykrueger/redash | tests/test_models.py | 5fd78fdb2324a7c194e8a99c13deb5a57268866c | import calendar
import datetime
from unittest import TestCase
import pytz
from dateutil.parser import parse as date_parse
from tests import BaseTestCase
from redash import models, redis_connection
from redash.models import db, types
from redash.utils import gen_query_hash, utcnow
class DashboardTest(BaseTestCase):
def test_appends_suffix_to_slug_when_duplicate(self):
d1 = self.factory.create_dashboard()
db.session.flush()
self.assertEqual(d1.slug, 'test')
d2 = self.factory.create_dashboard(user=d1.user)
db.session.flush()
self.assertNotEqual(d1.slug, d2.slug)
d3 = self.factory.create_dashboard(user=d1.user)
db.session.flush()
self.assertNotEqual(d1.slug, d3.slug)
self.assertNotEqual(d2.slug, d3.slug)
class ShouldScheduleNextTest(TestCase):
def test_interval_schedule_that_needs_reschedule(self):
now = utcnow()
two_hours_ago = now - datetime.timedelta(hours=2)
self.assertTrue(models.should_schedule_next(two_hours_ago, now, "3600"))
def test_interval_schedule_that_doesnt_need_reschedule(self):
now = utcnow()
half_an_hour_ago = now - datetime.timedelta(minutes=30)
self.assertFalse(models.should_schedule_next(half_an_hour_ago, now, "3600"))
def test_exact_time_that_needs_reschedule(self):
now = utcnow()
yesterday = now - datetime.timedelta(days=1)
scheduled_datetime = now - datetime.timedelta(hours=3)
scheduled_time = "{:02d}:00".format(scheduled_datetime.hour)
self.assertTrue(models.should_schedule_next(yesterday, now, "86400",
scheduled_time))
def test_exact_time_that_doesnt_need_reschedule(self):
now = date_parse("2015-10-16 20:10")
yesterday = date_parse("2015-10-15 23:07")
schedule = "23:00"
self.assertFalse(models.should_schedule_next(yesterday, now, "86400", schedule))
def test_exact_time_with_day_change(self):
now = utcnow().replace(hour=0, minute=1)
previous = (now - datetime.timedelta(days=2)).replace(hour=23,
minute=59)
schedule = "23:59".format(now.hour + 3)
self.assertTrue(models.should_schedule_next(previous, now, "86400", schedule))
def test_exact_time_every_x_days_that_needs_reschedule(self):
now = utcnow()
four_days_ago = now - datetime.timedelta(days=4)
three_day_interval = "259200"
scheduled_datetime = now - datetime.timedelta(hours=3)
scheduled_time = "{:02d}:00".format(scheduled_datetime.hour)
self.assertTrue(models.should_schedule_next(four_days_ago, now, three_day_interval,
scheduled_time))
def test_exact_time_every_x_days_that_doesnt_need_reschedule(self):
now = utcnow()
four_days_ago = now - datetime.timedelta(days=2)
three_day_interval = "259200"
scheduled_datetime = now - datetime.timedelta(hours=3)
scheduled_time = "{:02d}:00".format(scheduled_datetime.hour)
self.assertFalse(models.should_schedule_next(four_days_ago, now, three_day_interval,
scheduled_time))
def test_exact_time_every_x_days_with_day_change(self):
now = utcnow().replace(hour=23, minute=59)
previous = (now - datetime.timedelta(days=2)).replace(hour=0, minute=1)
schedule = "23:58"
three_day_interval = "259200"
self.assertTrue(models.should_schedule_next(previous, now, three_day_interval, schedule))
def test_exact_time_every_x_weeks_that_needs_reschedule(self):
# Setup:
#
# 1) The query should run every 3 weeks on Tuesday
# 2) The last time it ran was 3 weeks ago from this week's Thursday
# 3) It is now Wednesday of this week
#
# Expectation: Even though less than 3 weeks have passed since the
# last run 3 weeks ago on Thursday, it's overdue since
# it should be running on Tuesdays.
this_thursday = utcnow() + datetime.timedelta(days=list(calendar.day_name).index("Thursday") - utcnow().weekday())
three_weeks_ago = this_thursday - datetime.timedelta(weeks=3)
now = this_thursday - datetime.timedelta(days=1)
three_week_interval = "1814400"
scheduled_datetime = now - datetime.timedelta(hours=3)
scheduled_time = "{:02d}:00".format(scheduled_datetime.hour)
self.assertTrue(models.should_schedule_next(three_weeks_ago, now, three_week_interval,
scheduled_time, "Tuesday"))
def test_exact_time_every_x_weeks_that_doesnt_need_reschedule(self):
# Setup:
#
# 1) The query should run every 3 weeks on Thurday
# 2) The last time it ran was 3 weeks ago from this week's Tuesday
# 3) It is now Wednesday of this week
#
# Expectation: Even though more than 3 weeks have passed since the
# last run 3 weeks ago on Tuesday, it's not overdue since
# it should be running on Thursdays.
this_tuesday = utcnow() + datetime.timedelta(days=list(calendar.day_name).index("Tuesday") - utcnow().weekday())
three_weeks_ago = this_tuesday - datetime.timedelta(weeks=3)
now = this_tuesday + datetime.timedelta(days=1)
three_week_interval = "1814400"
scheduled_datetime = now - datetime.timedelta(hours=3)
scheduled_time = "{:02d}:00".format(scheduled_datetime.hour)
self.assertFalse(models.should_schedule_next(three_weeks_ago, now, three_week_interval,
scheduled_time, "Thursday"))
def test_backoff(self):
now = utcnow()
two_hours_ago = now - datetime.timedelta(hours=2)
self.assertTrue(models.should_schedule_next(two_hours_ago, now, "3600",
failures=5))
self.assertFalse(models.should_schedule_next(two_hours_ago, now,
"3600", failures=10))
def test_next_iteration_overflow(self):
now = utcnow()
two_hours_ago = now - datetime.timedelta(hours=2)
self.assertFalse(models.should_schedule_next(two_hours_ago, now, "3600", failures=32))
class QueryOutdatedQueriesTest(BaseTestCase):
# TODO: this test can be refactored to use mock version of should_schedule_next to simplify it.
def test_outdated_queries_skips_unscheduled_queries(self):
query = self.factory.create_query(schedule={'interval':None, 'time': None, 'until':None, 'day_of_week':None})
query_with_none = self.factory.create_query(schedule=None)
queries = models.Query.outdated_queries()
self.assertNotIn(query, queries)
self.assertNotIn(query_with_none, queries)
def test_outdated_queries_works_with_ttl_based_schedule(self):
two_hours_ago = utcnow() - datetime.timedelta(hours=2)
query = self.factory.create_query(schedule={'interval':'3600', 'time': None, 'until':None, 'day_of_week':None})
query_result = self.factory.create_query_result(query=query.query_text, retrieved_at=two_hours_ago)
query.latest_query_data = query_result
queries = models.Query.outdated_queries()
self.assertIn(query, queries)
def test_outdated_queries_works_scheduled_queries_tracker(self):
two_hours_ago = utcnow() - datetime.timedelta(hours=2)
query = self.factory.create_query(schedule={'interval':'3600', 'time': None, 'until':None, 'day_of_week':None})
query_result = self.factory.create_query_result(query=query, retrieved_at=two_hours_ago)
query.latest_query_data = query_result
models.scheduled_queries_executions.update(query.id)
queries = models.Query.outdated_queries()
self.assertNotIn(query, queries)
def test_skips_fresh_queries(self):
half_an_hour_ago = utcnow() - datetime.timedelta(minutes=30)
query = self.factory.create_query(schedule={'interval':'3600', 'time': None, 'until':None, 'day_of_week':None})
query_result = self.factory.create_query_result(query=query.query_text, retrieved_at=half_an_hour_ago)
query.latest_query_data = query_result
queries = models.Query.outdated_queries()
self.assertNotIn(query, queries)
def test_outdated_queries_works_with_specific_time_schedule(self):
half_an_hour_ago = utcnow() - datetime.timedelta(minutes=30)
query = self.factory.create_query(schedule={'interval':'86400', 'time':half_an_hour_ago.strftime('%H:%M'), 'until':None, 'day_of_week':None})
query_result = self.factory.create_query_result(query=query.query_text, retrieved_at=half_an_hour_ago - datetime.timedelta(days=1))
query.latest_query_data = query_result
queries = models.Query.outdated_queries()
self.assertIn(query, queries)
def test_enqueues_query_only_once(self):
"""
Only one query per data source with the same text will be reported by
Query.outdated_queries().
"""
query = self.factory.create_query(schedule={'interval':'60', 'until':None, 'time': None, 'day_of_week':None})
query2 = self.factory.create_query(
schedule={'interval':'60', 'until':None, 'time': None, 'day_of_week':None}, query_text=query.query_text,
query_hash=query.query_hash)
retrieved_at = utcnow() - datetime.timedelta(minutes=10)
query_result = self.factory.create_query_result(
retrieved_at=retrieved_at, query_text=query.query_text,
query_hash=query.query_hash)
query.latest_query_data = query_result
query2.latest_query_data = query_result
self.assertEqual(list(models.Query.outdated_queries()), [query2])
def test_enqueues_query_with_correct_data_source(self):
"""
Queries from different data sources will be reported by
Query.outdated_queries() even if they have the same query text.
"""
query = self.factory.create_query(
schedule={'interval':'60', 'until':None, 'time': None, 'day_of_week':None}, data_source=self.factory.create_data_source())
query2 = self.factory.create_query(
schedule={'interval':'60', 'until':None, 'time': None, 'day_of_week':None}, query_text=query.query_text,
query_hash=query.query_hash)
retrieved_at = utcnow() - datetime.timedelta(minutes=10)
query_result = self.factory.create_query_result(
retrieved_at=retrieved_at, query_text=query.query_text,
query_hash=query.query_hash)
query.latest_query_data = query_result
query2.latest_query_data = query_result
outdated_queries = models.Query.outdated_queries()
self.assertEqual(len(outdated_queries), 2)
self.assertIn(query, outdated_queries)
self.assertIn(query2, outdated_queries)
def test_enqueues_only_for_relevant_data_source(self):
"""
If multiple queries with the same text exist, only ones that are
scheduled to be refreshed are reported by Query.outdated_queries().
"""
query = self.factory.create_query(schedule={'interval':'60', 'until':None, 'time': None, 'day_of_week':None})
query2 = self.factory.create_query(
schedule={'interval':'3600', 'until':None, 'time': None, 'day_of_week':None}, query_text=query.query_text,
query_hash=query.query_hash)
retrieved_at = utcnow() - datetime.timedelta(minutes=10)
query_result = self.factory.create_query_result(
retrieved_at=retrieved_at, query_text=query.query_text,
query_hash=query.query_hash)
query.latest_query_data = query_result
query2.latest_query_data = query_result
self.assertEqual(list(models.Query.outdated_queries()), [query])
def test_failure_extends_schedule(self):
"""
Execution failures recorded for a query result in exponential backoff
for scheduling future execution.
"""
query = self.factory.create_query(schedule={'interval':'60', 'until':None, 'time': None, 'day_of_week':None}, schedule_failures=4)
retrieved_at = utcnow() - datetime.timedelta(minutes=16)
query_result = self.factory.create_query_result(
retrieved_at=retrieved_at, query_text=query.query_text,
query_hash=query.query_hash)
query.latest_query_data = query_result
self.assertEqual(list(models.Query.outdated_queries()), [])
query_result.retrieved_at = utcnow() - datetime.timedelta(minutes=17)
self.assertEqual(list(models.Query.outdated_queries()), [query])
def test_schedule_until_after(self):
"""
Queries with non-null ``schedule['until']`` are not reported by
Query.outdated_queries() after the given time is past.
"""
one_day_ago = (utcnow() - datetime.timedelta(days=1)).strftime("%Y-%m-%d")
two_hours_ago = utcnow() - datetime.timedelta(hours=2)
query = self.factory.create_query(schedule={'interval':'3600', 'until':one_day_ago, 'time':None, 'day_of_week':None})
query_result = self.factory.create_query_result(query=query.query_text, retrieved_at=two_hours_ago)
query.latest_query_data = query_result
queries = models.Query.outdated_queries()
self.assertNotIn(query, queries)
def test_schedule_until_before(self):
"""
Queries with non-null ``schedule['until']`` are reported by
Query.outdated_queries() before the given time is past.
"""
one_day_from_now = (utcnow() + datetime.timedelta(days=1)).strftime("%Y-%m-%d")
two_hours_ago = utcnow() - datetime.timedelta(hours=2)
query = self.factory.create_query(schedule={'interval':'3600', 'until':one_day_from_now, 'time': None, 'day_of_week':None})
query_result = self.factory.create_query_result(query=query.query_text, retrieved_at=two_hours_ago)
query.latest_query_data = query_result
queries = models.Query.outdated_queries()
self.assertIn(query, queries)
class QueryArchiveTest(BaseTestCase):
def test_archive_query_sets_flag(self):
query = self.factory.create_query()
db.session.flush()
query.archive()
self.assertEqual(query.is_archived, True)
def test_archived_query_doesnt_return_in_all(self):
query = self.factory.create_query(schedule={'interval':'1', 'until':None, 'time': None, 'day_of_week':None})
yesterday = utcnow() - datetime.timedelta(days=1)
query_result = models.QueryResult.store_result(
query.org_id, query.data_source, query.query_hash, query.query_text,
"1", 123, yesterday)
query.latest_query_data = query_result
groups = list(models.Group.query.filter(models.Group.id.in_(query.groups)))
self.assertIn(query, list(models.Query.all_queries([g.id for g in groups])))
self.assertIn(query, models.Query.outdated_queries())
db.session.flush()
query.archive()
self.assertNotIn(query, list(models.Query.all_queries([g.id for g in groups])))
self.assertNotIn(query, models.Query.outdated_queries())
def test_removes_associated_widgets_from_dashboards(self):
widget = self.factory.create_widget()
query = widget.visualization.query_rel
db.session.commit()
query.archive()
db.session.flush()
self.assertEqual(models.Widget.query.get(widget.id), None)
def test_removes_scheduling(self):
query = self.factory.create_query(schedule={'interval':'1', 'until':None, 'time': None, 'day_of_week':None})
query.archive()
self.assertIsNone(query.schedule)
def test_deletes_alerts(self):
subscription = self.factory.create_alert_subscription()
query = subscription.alert.query_rel
db.session.commit()
query.archive()
db.session.flush()
self.assertEqual(models.Alert.query.get(subscription.alert.id), None)
self.assertEqual(models.AlertSubscription.query.get(subscription.id), None)
class TestUnusedQueryResults(BaseTestCase):
def test_returns_only_unused_query_results(self):
two_weeks_ago = utcnow() - datetime.timedelta(days=14)
qr = self.factory.create_query_result()
self.factory.create_query(latest_query_data=qr)
db.session.flush()
unused_qr = self.factory.create_query_result(retrieved_at=two_weeks_ago)
self.assertIn(unused_qr, list(models.QueryResult.unused()))
self.assertNotIn(qr, list(models.QueryResult.unused()))
def test_returns_only_over_a_week_old_results(self):
two_weeks_ago = utcnow() - datetime.timedelta(days=14)
unused_qr = self.factory.create_query_result(retrieved_at=two_weeks_ago)
db.session.flush()
new_unused_qr = self.factory.create_query_result()
self.assertIn(unused_qr, list(models.QueryResult.unused()))
self.assertNotIn(new_unused_qr, list(models.QueryResult.unused()))
class TestQueryAll(BaseTestCase):
def test_returns_only_queries_in_given_groups(self):
ds1 = self.factory.create_data_source()
ds2 = self.factory.create_data_source()
group1 = models.Group(name="g1", org=ds1.org, permissions=['create', 'view'])
group2 = models.Group(name="g2", org=ds1.org, permissions=['create', 'view'])
q1 = self.factory.create_query(data_source=ds1)
q2 = self.factory.create_query(data_source=ds2)
db.session.add_all([
ds1, ds2,
group1, group2,
q1, q2,
models.DataSourceGroup(
group=group1, data_source=ds1),
models.DataSourceGroup(group=group2, data_source=ds2)
])
db.session.flush()
self.assertIn(q1, list(models.Query.all_queries([group1.id])))
self.assertNotIn(q2, list(models.Query.all_queries([group1.id])))
self.assertIn(q1, list(models.Query.all_queries([group1.id, group2.id])))
self.assertIn(q2, list(models.Query.all_queries([group1.id, group2.id])))
def test_skips_drafts(self):
q = self.factory.create_query(is_draft=True)
self.assertNotIn(q, models.Query.all_queries([self.factory.default_group.id]))
def test_includes_drafts_of_given_user(self):
q = self.factory.create_query(is_draft=True)
self.assertIn(q, models.Query.all_queries([self.factory.default_group.id], user_id=q.user_id))
def test_order_by_relationship(self):
u1 = self.factory.create_user(name='alice')
u2 = self.factory.create_user(name='bob')
self.factory.create_query(user=u1)
self.factory.create_query(user=u2)
db.session.commit()
# have to reset the order here with None since all_queries orders by
# created_at by default
base = models.Query.all_queries([self.factory.default_group.id]).order_by(None)
qs1 = base.order_by(models.User.name)
self.assertEqual(['alice', 'bob'], [q.user.name for q in qs1])
qs2 = base.order_by(models.User.name.desc())
self.assertEqual(['bob', 'alice'], [q.user.name for q in qs2])
class TestGroup(BaseTestCase):
def test_returns_groups_with_specified_names(self):
org1 = self.factory.create_org()
org2 = self.factory.create_org()
matching_group1 = models.Group(id=999, name="g1", org=org1)
matching_group2 = models.Group(id=888, name="g2", org=org1)
non_matching_group = models.Group(id=777, name="g1", org=org2)
groups = models.Group.find_by_name(org1, ["g1", "g2"])
self.assertIn(matching_group1, groups)
self.assertIn(matching_group2, groups)
self.assertNotIn(non_matching_group, groups)
def test_returns_no_groups(self):
org1 = self.factory.create_org()
models.Group(id=999, name="g1", org=org1)
self.assertEqual([], models.Group.find_by_name(org1, ["non-existing"]))
class TestQueryResultStoreResult(BaseTestCase):
def setUp(self):
super(TestQueryResultStoreResult, self).setUp()
self.data_source = self.factory.data_source
self.query = "SELECT 1"
self.query_hash = gen_query_hash(self.query)
self.runtime = 123
self.utcnow = utcnow()
self.data = '{"a": 1}'
def test_stores_the_result(self):
query_result = models.QueryResult.store_result(
self.data_source.org_id, self.data_source, self.query_hash,
self.query, self.data, self.runtime, self.utcnow)
self.assertEqual(query_result._data, self.data)
self.assertEqual(query_result.runtime, self.runtime)
self.assertEqual(query_result.retrieved_at, self.utcnow)
self.assertEqual(query_result.query_text, self.query)
self.assertEqual(query_result.query_hash, self.query_hash)
self.assertEqual(query_result.data_source, self.data_source)
class TestEvents(BaseTestCase):
def raw_event(self):
timestamp = 1411778709.791
user = self.factory.user
created_at = datetime.datetime.utcfromtimestamp(timestamp)
db.session.flush()
raw_event = {"action": "view",
"timestamp": timestamp,
"object_type": "dashboard",
"user_id": user.id,
"object_id": 1,
"org_id": 1}
return raw_event, user, created_at
def test_records_event(self):
raw_event, user, created_at = self.raw_event()
event = models.Event.record(raw_event)
db.session.flush()
self.assertEqual(event.user, user)
self.assertEqual(event.action, "view")
self.assertEqual(event.object_type, "dashboard")
self.assertEqual(event.object_id, 1)
self.assertEqual(event.created_at, created_at)
def test_records_additional_properties(self):
raw_event, _, _ = self.raw_event()
additional_properties = {'test': 1, 'test2': 2, 'whatever': "abc"}
raw_event.update(additional_properties)
event = models.Event.record(raw_event)
self.assertDictEqual(event.additional_properties, additional_properties)
def _set_up_dashboard_test(d):
d.g1 = d.factory.create_group(name='First', permissions=['create', 'view'])
d.g2 = d.factory.create_group(name='Second', permissions=['create', 'view'])
d.ds1 = d.factory.create_data_source()
d.ds2 = d.factory.create_data_source()
db.session.flush()
d.u1 = d.factory.create_user(group_ids=[d.g1.id])
d.u2 = d.factory.create_user(group_ids=[d.g2.id])
db.session.add_all([
models.DataSourceGroup(group=d.g1, data_source=d.ds1),
models.DataSourceGroup(group=d.g2, data_source=d.ds2)
])
d.q1 = d.factory.create_query(data_source=d.ds1)
d.q2 = d.factory.create_query(data_source=d.ds2)
d.v1 = d.factory.create_visualization(query_rel=d.q1)
d.v2 = d.factory.create_visualization(query_rel=d.q2)
d.w1 = d.factory.create_widget(visualization=d.v1)
d.w2 = d.factory.create_widget(visualization=d.v2)
d.w3 = d.factory.create_widget(visualization=d.v2, dashboard=d.w2.dashboard)
d.w4 = d.factory.create_widget(visualization=d.v2)
d.w5 = d.factory.create_widget(visualization=d.v1, dashboard=d.w4.dashboard)
d.w1.dashboard.is_draft = False
d.w2.dashboard.is_draft = False
d.w4.dashboard.is_draft = False
class TestDashboardAll(BaseTestCase):
def setUp(self):
super(TestDashboardAll, self).setUp()
_set_up_dashboard_test(self)
def test_requires_group_or_user_id(self):
d1 = self.factory.create_dashboard()
self.assertNotIn(d1, list(models.Dashboard.all(
d1.user.org, d1.user.group_ids, None)))
l2 = list(models.Dashboard.all(
d1.user.org, [0], d1.user.id))
self.assertIn(d1, l2)
def test_returns_dashboards_based_on_groups(self):
self.assertIn(self.w1.dashboard, list(models.Dashboard.all(
self.u1.org, self.u1.group_ids, None)))
self.assertIn(self.w2.dashboard, list(models.Dashboard.all(
self.u2.org, self.u2.group_ids, None)))
self.assertNotIn(self.w1.dashboard, list(models.Dashboard.all(
self.u2.org, self.u2.group_ids, None)))
self.assertNotIn(self.w2.dashboard, list(models.Dashboard.all(
self.u1.org, self.u1.group_ids, None)))
def test_returns_each_dashboard_once(self):
dashboards = list(models.Dashboard.all(self.u2.org, self.u2.group_ids, None))
self.assertEqual(len(dashboards), 2)
def test_returns_dashboard_you_have_partial_access_to(self):
self.assertIn(self.w5.dashboard, models.Dashboard.all(self.u1.org, self.u1.group_ids, None))
def test_returns_dashboards_created_by_user(self):
d1 = self.factory.create_dashboard(user=self.u1)
db.session.flush()
self.assertIn(d1, list(models.Dashboard.all(self.u1.org, self.u1.group_ids, self.u1.id)))
self.assertIn(d1, list(models.Dashboard.all(self.u1.org, [0], self.u1.id)))
self.assertNotIn(d1, list(models.Dashboard.all(self.u2.org, self.u2.group_ids, self.u2.id)))
def test_returns_dashboards_with_text_widgets(self):
w1 = self.factory.create_widget(visualization=None)
self.assertIn(w1.dashboard, models.Dashboard.all(self.u1.org, self.u1.group_ids, None))
self.assertIn(w1.dashboard, models.Dashboard.all(self.u2.org, self.u2.group_ids, None))
def test_returns_dashboards_from_current_org_only(self):
w1 = self.factory.create_widget(visualization=None)
user = self.factory.create_user(org=self.factory.create_org())
self.assertIn(w1.dashboard, models.Dashboard.all(self.u1.org, self.u1.group_ids, None))
self.assertNotIn(w1.dashboard, models.Dashboard.all(user.org, user.group_ids, None))
| [((495, 4, 495, 22), 'redash.models.db.session.flush', 'db.session.flush', ({}, {}), '()', False, 'from redash.models import db, types\n'), ((17, 8, 17, 26), 'redash.models.db.session.flush', 'db.session.flush', ({}, {}), '()', False, 'from redash.models import db, types\n'), ((21, 8, 21, 26), 'redash.models.db.session.flush', 'db.session.flush', ({}, {}), '()', False, 'from redash.models import db, types\n'), ((25, 8, 25, 26), 'redash.models.db.session.flush', 'db.session.flush', ({}, {}), '()', False, 'from redash.models import db, types\n'), ((32, 14, 32, 22), 'redash.utils.utcnow', 'utcnow', ({}, {}), '()', False, 'from redash.utils import gen_query_hash, utcnow\n'), ((37, 14, 37, 22), 'redash.utils.utcnow', 'utcnow', ({}, {}), '()', False, 'from redash.utils import gen_query_hash, utcnow\n'), ((42, 14, 42, 22), 'redash.utils.utcnow', 'utcnow', ({}, {}), '()', False, 'from redash.utils import gen_query_hash, utcnow\n'), ((50, 14, 50, 44), 'dateutil.parser.parse', 'date_parse', ({(50, 25, 50, 43): '"""2015-10-16 20:10"""'}, {}), "('2015-10-16 20:10')", True, 'from dateutil.parser import parse as date_parse\n'), ((51, 20, 51, 50), 'dateutil.parser.parse', 'date_parse', ({(51, 31, 51, 49): '"""2015-10-15 23:07"""'}, {}), "('2015-10-15 23:07')", True, 'from dateutil.parser import parse as date_parse\n'), ((63, 14, 63, 22), 'redash.utils.utcnow', 'utcnow', ({}, {}), '()', False, 'from redash.utils import gen_query_hash, utcnow\n'), ((72, 14, 72, 22), 'redash.utils.utcnow', 'utcnow', ({}, {}), '()', False, 'from redash.utils import gen_query_hash, utcnow\n'), ((126, 14, 126, 22), 'redash.utils.utcnow', 'utcnow', ({}, {}), '()', False, 'from redash.utils import gen_query_hash, utcnow\n'), ((134, 14, 134, 22), 'redash.utils.utcnow', 'utcnow', ({}, {}), '()', False, 'from redash.utils import gen_query_hash, utcnow\n'), ((145, 18, 145, 49), 'redash.models.Query.outdated_queries', 'models.Query.outdated_queries', ({}, {}), '()', False, 'from redash import models, redis_connection\n'), ((156, 18, 156, 49), 'redash.models.Query.outdated_queries', 'models.Query.outdated_queries', ({}, {}), '()', False, 'from redash import models, redis_connection\n'), ((165, 8, 165, 60), 'redash.models.scheduled_queries_executions.update', 'models.scheduled_queries_executions.update', ({(165, 51, 165, 59): 'query.id'}, {}), '(query.id)', False, 'from redash import models, redis_connection\n'), ((167, 18, 167, 49), 'redash.models.Query.outdated_queries', 'models.Query.outdated_queries', ({}, {}), '()', False, 'from redash import models, redis_connection\n'), ((176, 18, 176, 49), 'redash.models.Query.outdated_queries', 'models.Query.outdated_queries', ({}, {}), '()', False, 'from redash import models, redis_connection\n'), ((185, 18, 185, 49), 'redash.models.Query.outdated_queries', 'models.Query.outdated_queries', ({}, {}), '()', False, 'from redash import models, redis_connection\n'), ((223, 27, 223, 58), 'redash.models.Query.outdated_queries', 'models.Query.outdated_queries', ({}, {}), '()', False, 'from redash import models, redis_connection\n'), ((274, 18, 274, 49), 'redash.models.Query.outdated_queries', 'models.Query.outdated_queries', ({}, {}), '()', False, 'from redash import models, redis_connection\n'), ((288, 18, 288, 49), 'redash.models.Query.outdated_queries', 'models.Query.outdated_queries', ({}, {}), '()', False, 'from redash import models, redis_connection\n'), ((295, 8, 295, 26), 'redash.models.db.session.flush', 'db.session.flush', ({}, {}), '()', False, 'from redash.models import db, types\n'), ((303, 23, 305, 32), 'redash.models.QueryResult.store_result', 'models.QueryResult.store_result', ({(304, 12, 304, 24): 'query.org_id', (304, 26, 304, 43): 'query.data_source', (304, 45, 304, 61): 'query.query_hash', (304, 63, 304, 79): 'query.query_text', (305, 12, 305, 15): '"""1"""', (305, 17, 305, 20): '123', (305, 22, 305, 31): 'yesterday'}, {}), "(query.org_id, query.data_source, query.\n query_hash, query.query_text, '1', 123, yesterday)", False, 'from redash import models, redis_connection\n'), ((311, 8, 311, 26), 'redash.models.db.session.flush', 'db.session.flush', ({}, {}), '()', False, 'from redash.models import db, types\n'), ((320, 8, 320, 27), 'redash.models.db.session.commit', 'db.session.commit', ({}, {}), '()', False, 'from redash.models import db, types\n'), ((322, 8, 322, 26), 'redash.models.db.session.flush', 'db.session.flush', ({}, {}), '()', False, 'from redash.models import db, types\n'), ((335, 8, 335, 27), 'redash.models.db.session.commit', 'db.session.commit', ({}, {}), '()', False, 'from redash.models import db, types\n'), ((337, 8, 337, 26), 'redash.models.db.session.flush', 'db.session.flush', ({}, {}), '()', False, 'from redash.models import db, types\n'), ((347, 8, 347, 26), 'redash.models.db.session.flush', 'db.session.flush', ({}, {}), '()', False, 'from redash.models import db, types\n'), ((355, 8, 355, 26), 'redash.models.db.session.flush', 'db.session.flush', ({}, {}), '()', False, 'from redash.models import db, types\n'), ((367, 17, 367, 85), 'redash.models.Group', 'models.Group', (), '', False, 'from redash import models, redis_connection\n'), ((368, 17, 368, 85), 'redash.models.Group', 'models.Group', (), '', False, 'from redash import models, redis_connection\n'), ((381, 8, 381, 26), 'redash.models.db.session.flush', 'db.session.flush', ({}, {}), '()', False, 'from redash.models import db, types\n'), ((400, 8, 400, 27), 'redash.models.db.session.commit', 'db.session.commit', ({}, {}), '()', False, 'from redash.models import db, types\n'), ((415, 26, 415, 67), 'redash.models.Group', 'models.Group', (), '', False, 'from redash import models, redis_connection\n'), ((416, 26, 416, 67), 'redash.models.Group', 'models.Group', (), '', False, 'from redash import models, redis_connection\n'), ((417, 29, 417, 70), 'redash.models.Group', 'models.Group', (), '', False, 'from redash import models, redis_connection\n'), ((419, 17, 419, 62), 'redash.models.Group.find_by_name', 'models.Group.find_by_name', ({(419, 43, 419, 47): 'org1', (419, 49, 419, 61): "['g1', 'g2']"}, {}), "(org1, ['g1', 'g2'])", False, 'from redash import models, redis_connection\n'), ((427, 8, 427, 49), 'redash.models.Group', 'models.Group', (), '', False, 'from redash import models, redis_connection\n'), ((436, 26, 436, 52), 'redash.utils.gen_query_hash', 'gen_query_hash', ({(436, 41, 436, 51): 'self.query'}, {}), '(self.query)', False, 'from redash.utils import gen_query_hash, utcnow\n'), ((438, 22, 438, 30), 'redash.utils.utcnow', 'utcnow', ({}, {}), '()', False, 'from redash.utils import gen_query_hash, utcnow\n'), ((442, 23, 444, 61), 'redash.models.QueryResult.store_result', 'models.QueryResult.store_result', ({(443, 12, 443, 35): 'self.data_source.org_id', (443, 37, 443, 53): 'self.data_source', (443, 55, 443, 70): 'self.query_hash', (444, 12, 444, 22): 'self.query', (444, 24, 444, 33): 'self.data', (444, 35, 444, 47): 'self.runtime', (444, 49, 444, 60): 'self.utcnow'}, {}), '(self.data_source.org_id, self.data_source,\n self.query_hash, self.query, self.data, self.runtime, self.utcnow)', False, 'from redash import models, redis_connection\n'), ((458, 21, 458, 66), 'datetime.datetime.utcfromtimestamp', 'datetime.datetime.utcfromtimestamp', ({(458, 56, 458, 65): 'timestamp'}, {}), '(timestamp)', False, 'import datetime\n'), ((459, 8, 459, 26), 'redash.models.db.session.flush', 'db.session.flush', ({}, {}), '()', False, 'from redash.models import db, types\n'), ((472, 16, 472, 46), 'redash.models.Event.record', 'models.Event.record', ({(472, 36, 472, 45): 'raw_event'}, {}), '(raw_event)', False, 'from redash import models, redis_connection\n'), ((473, 8, 473, 26), 'redash.models.db.session.flush', 'db.session.flush', ({}, {}), '()', False, 'from redash.models import db, types\n'), ((485, 16, 485, 46), 'redash.models.Event.record', 'models.Event.record', ({(485, 36, 485, 45): 'raw_event'}, {}), '(raw_event)', False, 'from redash import models, redis_connection\n'), ((548, 8, 548, 26), 'redash.models.db.session.flush', 'db.session.flush', ({}, {}), '()', False, 'from redash.models import db, types\n'), ((33, 30, 33, 57), 'datetime.timedelta', 'datetime.timedelta', (), '', False, 'import datetime\n'), ((34, 24, 34, 79), 'redash.models.should_schedule_next', 'models.should_schedule_next', ({(34, 52, 34, 65): 'two_hours_ago', (34, 67, 34, 70): 'now', (34, 72, 34, 78): '"""3600"""'}, {}), "(two_hours_ago, now, '3600')", False, 'from redash import models, redis_connection\n'), ((38, 33, 38, 63), 'datetime.timedelta', 'datetime.timedelta', (), '', False, 'import datetime\n'), ((39, 25, 39, 83), 'redash.models.should_schedule_next', 'models.should_schedule_next', ({(39, 53, 39, 69): 'half_an_hour_ago', (39, 71, 39, 74): 'now', (39, 76, 39, 82): '"""3600"""'}, {}), "(half_an_hour_ago, now, '3600')", False, 'from redash import models, redis_connection\n'), ((43, 26, 43, 52), 'datetime.timedelta', 'datetime.timedelta', (), '', False, 'import datetime\n'), ((44, 35, 44, 62), 'datetime.timedelta', 'datetime.timedelta', (), '', False, 'import datetime\n'), ((46, 24, 47, 67), 'redash.models.should_schedule_next', 'models.should_schedule_next', ({(46, 52, 46, 61): 'yesterday', (46, 63, 46, 66): 'now', (46, 68, 46, 75): '"""86400"""', (47, 52, 47, 66): 'scheduled_time'}, {}), "(yesterday, now, '86400', scheduled_time)", False, 'from redash import models, redis_connection\n'), ((53, 25, 53, 87), 'redash.models.should_schedule_next', 'models.should_schedule_next', ({(53, 53, 53, 62): 'yesterday', (53, 64, 53, 67): 'now', (53, 69, 53, 76): '"""86400"""', (53, 78, 53, 86): 'schedule'}, {}), "(yesterday, now, '86400', schedule)", False, 'from redash import models, redis_connection\n'), ((60, 24, 60, 85), 'redash.models.should_schedule_next', 'models.should_schedule_next', ({(60, 52, 60, 60): 'previous', (60, 62, 60, 65): 'now', (60, 67, 60, 74): '"""86400"""', (60, 76, 60, 84): 'schedule'}, {}), "(previous, now, '86400', schedule)", False, 'from redash import models, redis_connection\n'), ((64, 30, 64, 56), 'datetime.timedelta', 'datetime.timedelta', (), '', False, 'import datetime\n'), ((66, 35, 66, 62), 'datetime.timedelta', 'datetime.timedelta', (), '', False, 'import datetime\n'), ((68, 24, 69, 67), 'redash.models.should_schedule_next', 'models.should_schedule_next', ({(68, 52, 68, 65): 'four_days_ago', (68, 67, 68, 70): 'now', (68, 72, 68, 90): 'three_day_interval', (69, 52, 69, 66): 'scheduled_time'}, {}), '(four_days_ago, now, three_day_interval,\n scheduled_time)', False, 'from redash import models, redis_connection\n'), ((73, 30, 73, 56), 'datetime.timedelta', 'datetime.timedelta', (), '', False, 'import datetime\n'), ((75, 35, 75, 62), 'datetime.timedelta', 'datetime.timedelta', (), '', False, 'import datetime\n'), ((77, 25, 78, 67), 'redash.models.should_schedule_next', 'models.should_schedule_next', ({(77, 53, 77, 66): 'four_days_ago', (77, 68, 77, 71): 'now', (77, 73, 77, 91): 'three_day_interval', (78, 52, 78, 66): 'scheduled_time'}, {}), '(four_days_ago, now, three_day_interval,\n scheduled_time)', False, 'from redash import models, redis_connection\n'), ((85, 24, 85, 96), 'redash.models.should_schedule_next', 'models.should_schedule_next', ({(85, 52, 85, 60): 'previous', (85, 62, 85, 65): 'now', (85, 67, 85, 85): 'three_day_interval', (85, 87, 85, 95): 'schedule'}, {}), '(previous, now, three_day_interval, schedule)', False, 'from redash import models, redis_connection\n'), ((97, 24, 97, 32), 'redash.utils.utcnow', 'utcnow', ({}, {}), '()', False, 'from redash.utils import gen_query_hash, utcnow\n'), ((98, 42, 98, 69), 'datetime.timedelta', 'datetime.timedelta', (), '', False, 'import datetime\n'), ((99, 30, 99, 56), 'datetime.timedelta', 'datetime.timedelta', (), '', False, 'import datetime\n'), ((101, 35, 101, 62), 'datetime.timedelta', 'datetime.timedelta', (), '', False, 'import datetime\n'), ((103, 24, 104, 78), 'redash.models.should_schedule_next', 'models.should_schedule_next', ({(103, 52, 103, 67): 'three_weeks_ago', (103, 69, 103, 72): 'now', (103, 74, 103, 93): 'three_week_interval', (104, 52, 104, 66): 'scheduled_time', (104, 68, 104, 77): '"""Tuesday"""'}, {}), "(three_weeks_ago, now, three_week_interval,\n scheduled_time, 'Tuesday')", False, 'from redash import models, redis_connection\n'), ((116, 23, 116, 31), 'redash.utils.utcnow', 'utcnow', ({}, {}), '()', False, 'from redash.utils import gen_query_hash, utcnow\n'), ((117, 41, 117, 68), 'datetime.timedelta', 'datetime.timedelta', (), '', False, 'import datetime\n'), ((118, 29, 118, 55), 'datetime.timedelta', 'datetime.timedelta', (), '', False, 'import datetime\n'), ((120, 35, 120, 62), 'datetime.timedelta', 'datetime.timedelta', (), '', False, 'import datetime\n'), ((122, 25, 123, 79), 'redash.models.should_schedule_next', 'models.should_schedule_next', ({(122, 53, 122, 68): 'three_weeks_ago', (122, 70, 122, 73): 'now', (122, 75, 122, 94): 'three_week_interval', (123, 52, 123, 66): 'scheduled_time', (123, 68, 123, 78): '"""Thursday"""'}, {}), "(three_weeks_ago, now, three_week_interval,\n scheduled_time, 'Thursday')", False, 'from redash import models, redis_connection\n'), ((127, 30, 127, 57), 'datetime.timedelta', 'datetime.timedelta', (), '', False, 'import datetime\n'), ((128, 24, 129, 63), 'redash.models.should_schedule_next', 'models.should_schedule_next', (), '', False, 'from redash import models, redis_connection\n'), ((130, 25, 131, 73), 'redash.models.should_schedule_next', 'models.should_schedule_next', (), '', False, 'from redash import models, redis_connection\n'), ((135, 30, 135, 57), 'datetime.timedelta', 'datetime.timedelta', (), '', False, 'import datetime\n'), ((136, 25, 136, 93), 'redash.models.should_schedule_next', 'models.should_schedule_next', (), '', False, 'from redash import models, redis_connection\n'), ((151, 24, 151, 32), 'redash.utils.utcnow', 'utcnow', ({}, {}), '()', False, 'from redash.utils import gen_query_hash, utcnow\n'), ((151, 35, 151, 62), 'datetime.timedelta', 'datetime.timedelta', (), '', False, 'import datetime\n'), ((160, 24, 160, 32), 'redash.utils.utcnow', 'utcnow', ({}, {}), '()', False, 'from redash.utils import gen_query_hash, utcnow\n'), ((160, 35, 160, 62), 'datetime.timedelta', 'datetime.timedelta', (), '', False, 'import datetime\n'), ((171, 27, 171, 35), 'redash.utils.utcnow', 'utcnow', ({}, {}), '()', False, 'from redash.utils import gen_query_hash, utcnow\n'), ((171, 38, 171, 68), 'datetime.timedelta', 'datetime.timedelta', (), '', False, 'import datetime\n'), ((180, 27, 180, 35), 'redash.utils.utcnow', 'utcnow', ({}, {}), '()', False, 'from redash.utils import gen_query_hash, utcnow\n'), ((180, 38, 180, 68), 'datetime.timedelta', 'datetime.timedelta', (), '', False, 'import datetime\n'), ((197, 23, 197, 31), 'redash.utils.utcnow', 'utcnow', ({}, {}), '()', False, 'from redash.utils import gen_query_hash, utcnow\n'), ((197, 34, 197, 64), 'datetime.timedelta', 'datetime.timedelta', (), '', False, 'import datetime\n'), ((216, 23, 216, 31), 'redash.utils.utcnow', 'utcnow', ({}, {}), '()', False, 'from redash.utils import gen_query_hash, utcnow\n'), ((216, 34, 216, 64), 'datetime.timedelta', 'datetime.timedelta', (), '', False, 'import datetime\n'), ((237, 23, 237, 31), 'redash.utils.utcnow', 'utcnow', ({}, {}), '()', False, 'from redash.utils import gen_query_hash, utcnow\n'), ((237, 34, 237, 64), 'datetime.timedelta', 'datetime.timedelta', (), '', False, 'import datetime\n'), ((252, 23, 252, 31), 'redash.utils.utcnow', 'utcnow', ({}, {}), '()', False, 'from redash.utils import gen_query_hash, utcnow\n'), ((252, 34, 252, 64), 'datetime.timedelta', 'datetime.timedelta', (), '', False, 'import datetime\n'), ((260, 36, 260, 44), 'redash.utils.utcnow', 'utcnow', ({}, {}), '()', False, 'from redash.utils import gen_query_hash, utcnow\n'), ((260, 47, 260, 77), 'datetime.timedelta', 'datetime.timedelta', (), '', False, 'import datetime\n'), ((269, 24, 269, 32), 'redash.utils.utcnow', 'utcnow', ({}, {}), '()', False, 'from redash.utils import gen_query_hash, utcnow\n'), ((269, 35, 269, 62), 'datetime.timedelta', 'datetime.timedelta', (), '', False, 'import datetime\n'), ((283, 24, 283, 32), 'redash.utils.utcnow', 'utcnow', ({}, {}), '()', False, 'from redash.utils import gen_query_hash, utcnow\n'), ((283, 35, 283, 62), 'datetime.timedelta', 'datetime.timedelta', (), '', False, 'import datetime\n'), ((302, 20, 302, 28), 'redash.utils.utcnow', 'utcnow', ({}, {}), '()', False, 'from redash.utils import gen_query_hash, utcnow\n'), ((302, 31, 302, 57), 'datetime.timedelta', 'datetime.timedelta', (), '', False, 'import datetime\n'), ((310, 29, 310, 60), 'redash.models.Query.outdated_queries', 'models.Query.outdated_queries', ({}, {}), '()', False, 'from redash import models, redis_connection\n'), ((315, 32, 315, 63), 'redash.models.Query.outdated_queries', 'models.Query.outdated_queries', ({}, {}), '()', False, 'from redash import models, redis_connection\n'), ((323, 25, 323, 59), 'redash.models.Widget.query.get', 'models.Widget.query.get', ({(323, 49, 323, 58): 'widget.id'}, {}), '(widget.id)', False, 'from redash import models, redis_connection\n'), ((338, 25, 338, 70), 'redash.models.Alert.query.get', 'models.Alert.query.get', ({(338, 48, 338, 69): 'subscription.alert.id'}, {}), '(subscription.alert.id)', False, 'from redash import models, redis_connection\n'), ((339, 25, 339, 76), 'redash.models.AlertSubscription.query.get', 'models.AlertSubscription.query.get', ({(339, 60, 339, 75): 'subscription.id'}, {}), '(subscription.id)', False, 'from redash import models, redis_connection\n'), ((344, 24, 344, 32), 'redash.utils.utcnow', 'utcnow', ({}, {}), '()', False, 'from redash.utils import gen_query_hash, utcnow\n'), ((344, 35, 344, 62), 'datetime.timedelta', 'datetime.timedelta', (), '', False, 'import datetime\n'), ((353, 24, 353, 32), 'redash.utils.utcnow', 'utcnow', ({}, {}), '()', False, 'from redash.utils import gen_query_hash, utcnow\n'), ((353, 35, 353, 62), 'datetime.timedelta', 'datetime.timedelta', (), '', False, 'import datetime\n'), ((389, 28, 389, 85), 'redash.models.Query.all_queries', 'models.Query.all_queries', ({(389, 53, 389, 84): '[self.factory.default_group.id]'}, {}), '([self.factory.default_group.id])', False, 'from redash import models, redis_connection\n'), ((393, 25, 393, 101), 'redash.models.Query.all_queries', 'models.Query.all_queries', (), '', False, 'from redash import models, redis_connection\n'), ((406, 28, 406, 51), 'redash.models.User.name.desc', 'models.User.name.desc', ({}, {}), '()', False, 'from redash import models, redis_connection\n'), ((428, 29, 428, 78), 'redash.models.Group.find_by_name', 'models.Group.find_by_name', ({(428, 55, 428, 59): 'org1', (428, 61, 428, 77): "['non-existing']"}, {}), "(org1, ['non-existing'])", False, 'from redash import models, redis_connection\n'), ((499, 8, 499, 61), 'redash.models.DataSourceGroup', 'models.DataSourceGroup', (), '', False, 'from redash import models, redis_connection\n'), ((500, 8, 500, 61), 'redash.models.DataSourceGroup', 'models.DataSourceGroup', (), '', False, 'from redash import models, redis_connection\n'), ((525, 18, 526, 41), 'redash.models.Dashboard.all', 'models.Dashboard.all', ({(526, 12, 526, 23): 'd1.user.org', (526, 25, 526, 28): '[0]', (526, 30, 526, 40): 'd1.user.id'}, {}), '(d1.user.org, [0], d1.user.id)', False, 'from redash import models, redis_connection\n'), ((540, 26, 540, 84), 'redash.models.Dashboard.all', 'models.Dashboard.all', ({(540, 47, 540, 58): 'self.u2.org', (540, 60, 540, 77): 'self.u2.group_ids', (540, 79, 540, 83): 'None'}, {}), '(self.u2.org, self.u2.group_ids, None)', False, 'from redash import models, redis_connection\n'), ((544, 41, 544, 99), 'redash.models.Dashboard.all', 'models.Dashboard.all', ({(544, 62, 544, 73): 'self.u1.org', (544, 75, 544, 92): 'self.u1.group_ids', (544, 94, 544, 98): 'None'}, {}), '(self.u1.org, self.u1.group_ids, None)', False, 'from redash import models, redis_connection\n'), ((556, 36, 556, 94), 'redash.models.Dashboard.all', 'models.Dashboard.all', ({(556, 57, 556, 68): 'self.u1.org', (556, 70, 556, 87): 'self.u1.group_ids', (556, 89, 556, 93): 'None'}, {}), '(self.u1.org, self.u1.group_ids, None)', False, 'from redash import models, redis_connection\n'), ((557, 36, 557, 94), 'redash.models.Dashboard.all', 'models.Dashboard.all', ({(557, 57, 557, 68): 'self.u2.org', (557, 70, 557, 87): 'self.u2.group_ids', (557, 89, 557, 93): 'None'}, {}), '(self.u2.org, self.u2.group_ids, None)', False, 'from redash import models, redis_connection\n'), ((564, 36, 564, 94), 'redash.models.Dashboard.all', 'models.Dashboard.all', ({(564, 57, 564, 68): 'self.u1.org', (564, 70, 564, 87): 'self.u1.group_ids', (564, 89, 564, 93): 'None'}, {}), '(self.u1.org, self.u1.group_ids, None)', False, 'from redash import models, redis_connection\n'), ((565, 39, 565, 91), 'redash.models.Dashboard.all', 'models.Dashboard.all', ({(565, 60, 565, 68): 'user.org', (565, 70, 565, 84): 'user.group_ids', (565, 86, 565, 90): 'None'}, {}), '(user.org, user.group_ids, None)', False, 'from redash import models, redis_connection\n'), ((56, 14, 56, 22), 'redash.utils.utcnow', 'utcnow', ({}, {}), '()', False, 'from redash.utils import gen_query_hash, utcnow\n'), ((81, 14, 81, 22), 'redash.utils.utcnow', 'utcnow', ({}, {}), '()', False, 'from redash.utils import gen_query_hash, utcnow\n'), ((204, 30, 204, 61), 'redash.models.Query.outdated_queries', 'models.Query.outdated_queries', ({}, {}), '()', False, 'from redash import models, redis_connection\n'), ((244, 30, 244, 61), 'redash.models.Query.outdated_queries', 'models.Query.outdated_queries', ({}, {}), '()', False, 'from redash import models, redis_connection\n'), ((258, 30, 258, 61), 'redash.models.Query.outdated_queries', 'models.Query.outdated_queries', ({}, {}), '()', False, 'from redash import models, redis_connection\n'), ((261, 30, 261, 61), 'redash.models.Query.outdated_queries', 'models.Query.outdated_queries', ({}, {}), '()', False, 'from redash import models, redis_connection\n'), ((308, 48, 308, 81), 'redash.models.Group.id.in_', 'models.Group.id.in_', ({(308, 68, 308, 80): 'query.groups'}, {}), '(query.groups)', False, 'from redash import models, redis_connection\n'), ((309, 34, 309, 82), 'redash.models.Query.all_queries', 'models.Query.all_queries', ({(309, 59, 309, 81): '[g.id for g in groups]'}, {}), '([g.id for g in groups])', False, 'from redash import models, redis_connection\n'), ((314, 37, 314, 85), 'redash.models.Query.all_queries', 'models.Query.all_queries', ({(314, 62, 314, 84): '[g.id for g in groups]'}, {}), '([g.id for g in groups])', False, 'from redash import models, redis_connection\n'), ((349, 38, 349, 65), 'redash.models.QueryResult.unused', 'models.QueryResult.unused', ({}, {}), '()', False, 'from redash import models, redis_connection\n'), ((350, 34, 350, 61), 'redash.models.QueryResult.unused', 'models.QueryResult.unused', ({}, {}), '()', False, 'from redash import models, redis_connection\n'), ((358, 38, 358, 65), 'redash.models.QueryResult.unused', 'models.QueryResult.unused', ({}, {}), '()', False, 'from redash import models, redis_connection\n'), ((359, 45, 359, 72), 'redash.models.QueryResult.unused', 'models.QueryResult.unused', ({}, {}), '()', False, 'from redash import models, redis_connection\n'), ((377, 12, 378, 46), 'redash.models.DataSourceGroup', 'models.DataSourceGroup', (), '', False, 'from redash import models, redis_connection\n'), ((379, 12, 379, 65), 'redash.models.DataSourceGroup', 'models.DataSourceGroup', (), '', False, 'from redash import models, redis_connection\n'), ((382, 31, 382, 68), 'redash.models.Query.all_queries', 'models.Query.all_queries', ({(382, 56, 382, 67): '[group1.id]'}, {}), '([group1.id])', False, 'from redash import models, redis_connection\n'), ((383, 34, 383, 71), 'redash.models.Query.all_queries', 'models.Query.all_queries', ({(383, 59, 383, 70): '[group1.id]'}, {}), '([group1.id])', False, 'from redash import models, redis_connection\n'), ((384, 31, 384, 79), 'redash.models.Query.all_queries', 'models.Query.all_queries', ({(384, 56, 384, 78): '[group1.id, group2.id]'}, {}), '([group1.id, group2.id])', False, 'from redash import models, redis_connection\n'), ((385, 31, 385, 79), 'redash.models.Query.all_queries', 'models.Query.all_queries', ({(385, 56, 385, 78): '[group1.id, group2.id]'}, {}), '([group1.id, group2.id])', False, 'from redash import models, redis_connection\n'), ((403, 15, 403, 72), 'redash.models.Query.all_queries', 'models.Query.all_queries', ({(403, 40, 403, 71): '[self.factory.default_group.id]'}, {}), '([self.factory.default_group.id])', False, 'from redash import models, redis_connection\n'), ((523, 34, 524, 48), 'redash.models.Dashboard.all', 'models.Dashboard.all', ({(524, 11, 524, 22): 'd1.user.org', (524, 24, 524, 41): 'd1.user.group_ids', (524, 43, 524, 47): 'None'}, {}), '(d1.user.org, d1.user.group_ids, None)', False, 'from redash import models, redis_connection\n'), ((530, 46, 531, 49), 'redash.models.Dashboard.all', 'models.Dashboard.all', ({(531, 12, 531, 23): 'self.u1.org', (531, 25, 531, 42): 'self.u1.group_ids', (531, 44, 531, 48): 'None'}, {}), '(self.u1.org, self.u1.group_ids, None)', False, 'from redash import models, redis_connection\n'), ((532, 46, 533, 49), 'redash.models.Dashboard.all', 'models.Dashboard.all', ({(533, 12, 533, 23): 'self.u2.org', (533, 25, 533, 42): 'self.u2.group_ids', (533, 44, 533, 48): 'None'}, {}), '(self.u2.org, self.u2.group_ids, None)', False, 'from redash import models, redis_connection\n'), ((534, 49, 535, 49), 'redash.models.Dashboard.all', 'models.Dashboard.all', ({(535, 12, 535, 23): 'self.u2.org', (535, 25, 535, 42): 'self.u2.group_ids', (535, 44, 535, 48): 'None'}, {}), '(self.u2.org, self.u2.group_ids, None)', False, 'from redash import models, redis_connection\n'), ((536, 49, 537, 49), 'redash.models.Dashboard.all', 'models.Dashboard.all', ({(537, 12, 537, 23): 'self.u1.org', (537, 25, 537, 42): 'self.u1.group_ids', (537, 44, 537, 48): 'None'}, {}), '(self.u1.org, self.u1.group_ids, None)', False, 'from redash import models, redis_connection\n'), ((549, 31, 549, 95), 'redash.models.Dashboard.all', 'models.Dashboard.all', ({(549, 52, 549, 63): 'self.u1.org', (549, 65, 549, 82): 'self.u1.group_ids', (549, 84, 549, 94): 'self.u1.id'}, {}), '(self.u1.org, self.u1.group_ids, self.u1.id)', False, 'from redash import models, redis_connection\n'), ((550, 31, 550, 81), 'redash.models.Dashboard.all', 'models.Dashboard.all', ({(550, 52, 550, 63): 'self.u1.org', (550, 65, 550, 68): '[0]', (550, 70, 550, 80): 'self.u1.id'}, {}), '(self.u1.org, [0], self.u1.id)', False, 'from redash import models, redis_connection\n'), ((551, 34, 551, 98), 'redash.models.Dashboard.all', 'models.Dashboard.all', ({(551, 55, 551, 66): 'self.u2.org', (551, 68, 551, 85): 'self.u2.group_ids', (551, 87, 551, 97): 'self.u2.id'}, {}), '(self.u2.org, self.u2.group_ids, self.u2.id)', False, 'from redash import models, redis_connection\n'), ((57, 26, 57, 52), 'datetime.timedelta', 'datetime.timedelta', (), '', False, 'import datetime\n'), ((82, 26, 82, 52), 'datetime.timedelta', 'datetime.timedelta', (), '', False, 'import datetime\n'), ((182, 112, 182, 138), 'datetime.timedelta', 'datetime.timedelta', (), '', False, 'import datetime\n'), ((268, 23, 268, 31), 'redash.utils.utcnow', 'utcnow', ({}, {}), '()', False, 'from redash.utils import gen_query_hash, utcnow\n'), ((268, 34, 268, 60), 'datetime.timedelta', 'datetime.timedelta', (), '', False, 'import datetime\n'), ((282, 28, 282, 36), 'redash.utils.utcnow', 'utcnow', ({}, {}), '()', False, 'from redash.utils import gen_query_hash, utcnow\n'), ((282, 39, 282, 65), 'datetime.timedelta', 'datetime.timedelta', (), '', False, 'import datetime\n'), ((97, 103, 97, 111), 'redash.utils.utcnow', 'utcnow', ({}, {}), '()', False, 'from redash.utils import gen_query_hash, utcnow\n'), ((116, 101, 116, 109), 'redash.utils.utcnow', 'utcnow', ({}, {}), '()', False, 'from redash.utils import gen_query_hash, utcnow\n')] |
EvanthiosPapadopoulos/Python3 | .history/List of Capstone Projects/FibonacciSequence_20200516134123.py | ab773fd458e365c1510f98ecac65965234c881e8 | '''
Fibonacci Sequence
'''
import HeaderOfFiles
def fibonacciSeq(number):
'''
Generate Fibonacci Sequence to the given number.
'''
a = 1
b = 1
for i in range(number):
yield a
a,b = b,a+b
while True:
try:
f = int(input("Enter a number for Fibonacci: "))
break
except:
print("Give me a number please!")
fibonacciSeq(f) | [] |
jacobfulano/composer | composer/algorithms/mixup/__init__.py | 4ad81df2d2ca6e5f0b4922bb2db750cd76ba34e8 | # Copyright 2021 MosaicML. All Rights Reserved.
from composer.algorithms.mixup.mixup import MixUp as MixUp
from composer.algorithms.mixup.mixup import MixUpHparams as MixUpHparams
from composer.algorithms.mixup.mixup import mixup_batch as mixup_batch
_name = 'MixUp'
_class_name = 'MixUp'
_functional = 'mixup_batch'
_tldr = 'Blends pairs of examples and labels'
_attribution = '(Zhang et al, 2017)'
_link = 'https://arxiv.org/abs/1710.09412'
_method_card = ''
| [] |
alanpeixinho/NiftyNet | tests/simple_gan_test.py | 9a17022a71985974f9e5ca992c765d55860fdd7d | from __future__ import absolute_import, print_function
import unittest
import os
import tensorflow as tf
from tensorflow.keras import regularizers
from niftynet.network.simple_gan import SimpleGAN
from tests.niftynet_testcase import NiftyNetTestCase
class SimpleGANTest(NiftyNetTestCase):
def test_3d_reg_shape(self):
input_shape = (2, 32, 32, 32, 1)
noise_shape = (2, 512)
x = tf.ones(input_shape)
r = tf.ones(noise_shape)
simple_gan_instance = SimpleGAN()
out = simple_gan_instance(r, x, is_training=True)
with self.cached_session() as sess:
sess.run(tf.compat.v1.global_variables_initializer())
out = sess.run(out)
self.assertAllClose(input_shape, out[0].shape)
self.assertAllClose((2, 1), out[1].shape)
self.assertAllClose((2, 1), out[2].shape)
def test_2d_reg_shape(self):
input_shape = (2, 64, 64, 1)
noise_shape = (2, 512)
x = tf.ones(input_shape)
r = tf.ones(noise_shape)
simple_gan_instance = SimpleGAN()
out = simple_gan_instance(r, x, is_training=True)
with self.cached_session() as sess:
sess.run(tf.compat.v1.global_variables_initializer())
out = sess.run(out)
self.assertAllClose(input_shape, out[0].shape)
self.assertAllClose((2, 1), out[1].shape)
self.assertAllClose((2, 1), out[2].shape)
if __name__ == "__main__":
tf.test.main()
| [((48, 4, 48, 18), 'tensorflow.test.main', 'tf.test.main', ({}, {}), '()', True, 'import tensorflow as tf\n'), ((16, 12, 16, 32), 'tensorflow.ones', 'tf.ones', ({(16, 20, 16, 31): 'input_shape'}, {}), '(input_shape)', True, 'import tensorflow as tf\n'), ((17, 12, 17, 32), 'tensorflow.ones', 'tf.ones', ({(17, 20, 17, 31): 'noise_shape'}, {}), '(noise_shape)', True, 'import tensorflow as tf\n'), ((19, 30, 19, 41), 'niftynet.network.simple_gan.SimpleGAN', 'SimpleGAN', ({}, {}), '()', False, 'from niftynet.network.simple_gan import SimpleGAN\n'), ((32, 12, 32, 32), 'tensorflow.ones', 'tf.ones', ({(32, 20, 32, 31): 'input_shape'}, {}), '(input_shape)', True, 'import tensorflow as tf\n'), ((33, 12, 33, 32), 'tensorflow.ones', 'tf.ones', ({(33, 20, 33, 31): 'noise_shape'}, {}), '(noise_shape)', True, 'import tensorflow as tf\n'), ((35, 30, 35, 41), 'niftynet.network.simple_gan.SimpleGAN', 'SimpleGAN', ({}, {}), '()', False, 'from niftynet.network.simple_gan import SimpleGAN\n'), ((23, 21, 23, 64), 'tensorflow.compat.v1.global_variables_initializer', 'tf.compat.v1.global_variables_initializer', ({}, {}), '()', True, 'import tensorflow as tf\n'), ((39, 21, 39, 64), 'tensorflow.compat.v1.global_variables_initializer', 'tf.compat.v1.global_variables_initializer', ({}, {}), '()', True, 'import tensorflow as tf\n')] |
N4S4/thingspeak_wrapper | tests/test.py | f5c26e52c09124b85cc6056782d766d145e65a31 | import time
import thingspeak_wrapper as tsw
# Initiate the class ThingWrapper with (CHANNEL_ID, WRITE_API__KEY, READ_API_KEY)
# if is a public channel just pass the CHANNEL_ID argument, api_key defaults are None
my_channel = tsw.wrapper.ThingWrapper(501309, '6TQDNWJQ44FA0GAQ', '10EVD2N6YIHI5O7Z')
# all set of functions are:
# my_channel.sender()
# my_channel.multiple_sender()
# my_channel.get_json_feeds()
# my_channel.get_json_feeds_from()
# my_channel.get_xml_feeds()
# my_channel.get_xml_feeds_from()
# my_channel.get_csv_feeds()
# my_channel.get_csv_feeds_from()
# ---------------------------
# Now you can use all the possible functions
# Send a value to a single field
my_channel.sender(1, 4)
# this delay is due to limitation of thingspeak free account which allow you to post data every 15 sec minimum
time.sleep(15)
# ---------------------------
# Send data to multiple field
# It take 2 input as lists ([..], [..])
# Create lists of fields and values
fields = [1, 2, 3]
values = [22.0, 1029, 700]
# pass them to the function
my_channel.multiple_sender(fields, values)
# ---------------------------
# Get data functions returns data as json, xml, csv
# optionally csv can be returned as Pandas Data frame
# pass arguments to the function (field, data_quantity)
# default values are ( fields='feeds', results_quantity=None)
# you will get all fields and all values (max 8000)
json_field1 = my_channel.get_json_feeds(1, 300)
print(json_field1)
# get xml data pass same values as previous function
xml_field1 = my_channel.get_xml_feeds(1, 300)
print(xml_field1)
# get csv data
# this function requires to specify (field, pandas_format=True, result_quantity=None)
# defaults are (fields='feeds', pandas_format=True, result_quantity=None)
csv_field1 = my_channel.get_csv_feeds(1, pandas_format=True,
results_quantity=300)
print(csv_field1)
# data without pandas_format
csv_no_pandas = my_channel.get_csv_feeds(1, pandas_format=False,
results_quantity=300)
print(csv_no_pandas)
# there is the possibility to request data from and to specific dates
# set date and time as strings YYYY-MM-DD HH:NN:SS
start_date, start_time = '2018-05-21', '12:00:00'
stop_date, stop_time = '2018-05-21', '23:59:59'
# pass values to the function
# defaults are (start_date, start_time, stop_date=None, stop_time=None, fields='feeds')
values_from_date = my_channel.get_json_feeds_from(stop_date, start_time, stop_date, stop_time, 1)
print(values_from_date)
| [((7, 13, 7, 85), 'thingspeak_wrapper.wrapper.ThingWrapper', 'tsw.wrapper.ThingWrapper', ({(7, 38, 7, 44): '501309', (7, 46, 7, 64): '"""6TQDNWJQ44FA0GAQ"""', (7, 66, 7, 84): '"""10EVD2N6YIHI5O7Z"""'}, {}), "(501309, '6TQDNWJQ44FA0GAQ', '10EVD2N6YIHI5O7Z')", True, 'import thingspeak_wrapper as tsw\n'), ((26, 0, 26, 14), 'time.sleep', 'time.sleep', ({(26, 11, 26, 13): '(15)'}, {}), '(15)', False, 'import time\n')] |
neptune-ai/neptune-contrib | neptunecontrib/monitoring/skopt.py | fe5c6853128020aaaa59b440cc5203b940dcd39a | #
# Copyright (c) 2019, Neptune Labs Sp. z o.o.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import warnings
import matplotlib.pyplot as plt
import neptune
import numpy as np
import skopt.plots as sk_plots
from skopt.utils import dump
from neptunecontrib.monitoring.utils import axes2fig, expect_not_a_run
class NeptuneCallback:
"""Logs hyperparameter optimization process to Neptune.
Specifically using NeptuneCallback will log: run metrics and run parameters, best run metrics so far, and
the current results checkpoint.
Examples:
Initialize NeptuneCallback::
import neptune
import neptunecontrib.monitoring.skopt as sk_utils
neptune.init(api_token='ANONYMOUS',
project_qualified_name='shared/showroom')
neptune.create_experiment(name='optuna sweep')
neptune_callback = sk_utils.NeptuneCallback()
Run skopt training passing neptune_callback as a callback::
...
results = skopt.forest_minimize(objective, space, callback=[neptune_callback],
base_estimator='ET', n_calls=100, n_random_starts=10)
You can explore an example experiment in Neptune:
https://ui.neptune.ai/o/shared/org/showroom/e/SHOW-1065/logs
"""
def __init__(self, experiment=None, log_checkpoint=True):
self._exp = experiment if experiment else neptune
expect_not_a_run(self._exp)
self.log_checkpoint = log_checkpoint
self._iteration = 0
def __call__(self, res):
self._exp.log_metric('run_score', x=self._iteration, y=res.func_vals[-1])
self._exp.log_metric('best_so_far_run_score', x=self._iteration, y=np.min(res.func_vals))
self._exp.log_text('run_parameters', x=self._iteration, y=NeptuneCallback._get_last_params(res))
if self.log_checkpoint:
self._exp.log_artifact(_export_results_object(res), 'results.pkl')
self._iteration += 1
@staticmethod
def _get_last_params(res):
param_vals = res.x_iters[-1]
named_params = _format_to_named_params(param_vals, res)
return str(named_params)
def log_results(results, experiment=None, log_plots=True, log_pickle=True):
"""Logs runs results and parameters to neptune.
Logs all hyperparameter optimization results to Neptune. Those include best score ('best_score' metric),
best parameters ('best_parameters' property), convergence plot ('diagnostics' log),
evaluations plot ('diagnostics' log), and objective plot ('diagnostics' log).
Args:
results('scipy.optimize.OptimizeResult'): Results object that is typically an output
| of the function like `skopt.forest_minimize(...)`
experiment(`neptune.experiments.Experiment`): Neptune experiment. Default is None.
log_plots: ('bool'): If True skopt plots will be logged to Neptune.
log_pickle: ('bool'): if True pickled skopt results object will be logged to Neptune.
Examples:
Run skopt training::
...
results = skopt.forest_minimize(objective, space,
base_estimator='ET', n_calls=100, n_random_starts=10)
Initialize Neptune::
import neptune
neptune.init(api_token='ANONYMOUS',
project_qualified_name='shared/showroom')
neptune.create_experiment(name='optuna sweep')
Send best parameters to Neptune::
import neptunecontrib.monitoring.skopt as sk_utils
sk_utils.log_results(results)
You can explore an example experiment in Neptune:
https://ui.neptune.ai/o/shared/org/showroom/e/SHOW-1065/logs
"""
_exp = experiment if experiment else neptune
expect_not_a_run(_exp)
_log_best_score(results, _exp)
_log_best_parameters(results, _exp)
if log_plots:
_log_plot_convergence(results, _exp)
_log_plot_evaluations(results, _exp)
_log_plot_regret(results, _exp)
_log_plot_objective(results, _exp)
if log_pickle:
_log_results_object(results, _exp)
def NeptuneMonitor(*args, **kwargs):
message = """NeptuneMonitor was renamed to NeptuneCallback and will be removed in future releases.
"""
warnings.warn(message)
return NeptuneCallback(*args, **kwargs)
def _log_best_parameters(results, experiment):
expect_not_a_run(experiment)
named_params = ([(dimension.name, param) for dimension, param in zip(results.space, results.x)])
experiment.set_property('best_parameters', str(named_params))
def _log_best_score(results, experiment):
experiment.log_metric('best_score', results.fun)
def _log_plot_convergence(results, experiment, name='diagnostics'):
expect_not_a_run(experiment)
fig, ax = plt.subplots()
sk_plots.plot_convergence(results, ax=ax)
experiment.log_image(name, fig)
def _log_plot_regret(results, experiment, name='diagnostics'):
expect_not_a_run(experiment)
fig, ax = plt.subplots()
sk_plots.plot_regret(results, ax=ax)
experiment.log_image(name, fig)
def _log_plot_evaluations(results, experiment, name='diagnostics'):
expect_not_a_run(experiment)
fig = plt.figure(figsize=(16, 12))
fig = axes2fig(sk_plots.plot_evaluations(results, bins=10), fig=fig)
experiment.log_image(name, fig)
def _log_plot_objective(results, experiment, name='diagnostics'):
try:
expect_not_a_run(experiment)
fig = plt.figure(figsize=(16, 12))
fig = axes2fig(sk_plots.plot_objective(results), fig=fig)
experiment.log_image(name, fig)
except Exception as e:
print('Could not create the objective chart due to error: {}'.format(e))
def _log_results_object(results, experiment=None):
expect_not_a_run(experiment)
experiment.log_artifact(_export_results_object(results), 'results.pkl')
def _export_results_object(results):
from io import BytesIO
results.specs['args'].pop('callback', None)
buffer = BytesIO()
dump(results, buffer, store_objective=False)
buffer.seek(0)
return buffer
def _format_to_named_params(params, result):
return [(dimension.name, param) for dimension, param in zip(result.space, params)]
| [((120, 4, 120, 26), 'neptunecontrib.monitoring.utils.expect_not_a_run', 'expect_not_a_run', ({(120, 21, 120, 25): '_exp'}, {}), '(_exp)', False, 'from neptunecontrib.monitoring.utils import axes2fig, expect_not_a_run\n'), ((138, 4, 138, 26), 'warnings.warn', 'warnings.warn', ({(138, 18, 138, 25): 'message'}, {}), '(message)', False, 'import warnings\n'), ((143, 4, 143, 32), 'neptunecontrib.monitoring.utils.expect_not_a_run', 'expect_not_a_run', ({(143, 21, 143, 31): 'experiment'}, {}), '(experiment)', False, 'from neptunecontrib.monitoring.utils import axes2fig, expect_not_a_run\n'), ((153, 4, 153, 32), 'neptunecontrib.monitoring.utils.expect_not_a_run', 'expect_not_a_run', ({(153, 21, 153, 31): 'experiment'}, {}), '(experiment)', False, 'from neptunecontrib.monitoring.utils import axes2fig, expect_not_a_run\n'), ((154, 14, 154, 28), 'matplotlib.pyplot.subplots', 'plt.subplots', ({}, {}), '()', True, 'import matplotlib.pyplot as plt\n'), ((155, 4, 155, 45), 'skopt.plots.plot_convergence', 'sk_plots.plot_convergence', (), '', True, 'import skopt.plots as sk_plots\n'), ((160, 4, 160, 32), 'neptunecontrib.monitoring.utils.expect_not_a_run', 'expect_not_a_run', ({(160, 21, 160, 31): 'experiment'}, {}), '(experiment)', False, 'from neptunecontrib.monitoring.utils import axes2fig, expect_not_a_run\n'), ((161, 14, 161, 28), 'matplotlib.pyplot.subplots', 'plt.subplots', ({}, {}), '()', True, 'import matplotlib.pyplot as plt\n'), ((162, 4, 162, 40), 'skopt.plots.plot_regret', 'sk_plots.plot_regret', (), '', True, 'import skopt.plots as sk_plots\n'), ((167, 4, 167, 32), 'neptunecontrib.monitoring.utils.expect_not_a_run', 'expect_not_a_run', ({(167, 21, 167, 31): 'experiment'}, {}), '(experiment)', False, 'from neptunecontrib.monitoring.utils import axes2fig, expect_not_a_run\n'), ((168, 10, 168, 38), 'matplotlib.pyplot.figure', 'plt.figure', (), '', True, 'import matplotlib.pyplot as plt\n'), ((184, 4, 184, 32), 'neptunecontrib.monitoring.utils.expect_not_a_run', 'expect_not_a_run', ({(184, 21, 184, 31): 'experiment'}, {}), '(experiment)', False, 'from neptunecontrib.monitoring.utils import axes2fig, expect_not_a_run\n'), ((193, 13, 193, 22), 'io.BytesIO', 'BytesIO', ({}, {}), '()', False, 'from io import BytesIO\n'), ((194, 4, 194, 48), 'skopt.utils.dump', 'dump', (), '', False, 'from skopt.utils import dump\n'), ((59, 8, 59, 35), 'neptunecontrib.monitoring.utils.expect_not_a_run', 'expect_not_a_run', ({(59, 25, 59, 34): 'self._exp'}, {}), '(self._exp)', False, 'from neptunecontrib.monitoring.utils import axes2fig, expect_not_a_run\n'), ((169, 19, 169, 62), 'skopt.plots.plot_evaluations', 'sk_plots.plot_evaluations', (), '', True, 'import skopt.plots as sk_plots\n'), ((175, 8, 175, 36), 'neptunecontrib.monitoring.utils.expect_not_a_run', 'expect_not_a_run', ({(175, 25, 175, 35): 'experiment'}, {}), '(experiment)', False, 'from neptunecontrib.monitoring.utils import axes2fig, expect_not_a_run\n'), ((176, 14, 176, 42), 'matplotlib.pyplot.figure', 'plt.figure', (), '', True, 'import matplotlib.pyplot as plt\n'), ((177, 23, 177, 55), 'skopt.plots.plot_objective', 'sk_plots.plot_objective', ({(177, 47, 177, 54): 'results'}, {}), '(results)', True, 'import skopt.plots as sk_plots\n'), ((66, 75, 66, 96), 'numpy.min', 'np.min', ({(66, 82, 66, 95): 'res.func_vals'}, {}), '(res.func_vals)', True, 'import numpy as np\n')] |
aiddenkeli/Snoopy | snoopy/server/transforms/Maltego.py | dd76180145981b3574b419edce39dbb060bd8c8c | #!/usr/bin/python
#
# This might be horrible code...
# ...but it works
# Feel free to re-write in a better way
# And if you want to - send it to us, we'll update ;)
# [email protected] (2010/10/18)
#
import sys
from xml.dom import minidom
class MaltegoEntity(object):
value = "";
weight = 100;
displayInformation = "";
additionalFields = [];
iconURL = "";
entityType = "Phrase"
def __init__(self,eT=None,v=None):
if (eT is not None):
self.entityType = eT;
if (v is not None):
self.value = v;
self.additionalFields = None;
self.additionalFields = [];
self.weight = 100;
self.displayInformation = "";
self.iconURL = "";
def setType(self,eT=None):
if (eT is not None):
self.entityType = eT;
def setValue(self,eV=None):
if (eV is not None):
self.value = eV;
def setWeight(self,w=None):
if (w is not None):
self.weight = w;
def setDisplayInformation(self,di=None):
if (di is not None):
self.displayInformation = di;
def addAdditionalFields(self,fieldName=None,displayName=None,matchingRule=False,value=None):
self.additionalFields.append([fieldName,displayName,matchingRule,value]);
def setIconURL(self,iU=None):
if (iU is not None):
self.iconURL = iU;
def returnEntity(self):
print "<Entity Type=\"" + str(self.entityType) + "\">";
print "<Value>" + str(self.value) + "</Value>";
print "<Weight>" + str(self.weight) + "</Weight>";
if (self.displayInformation is not None):
print "<DisplayInformation><Label Name=\"\" Type=\"text/html\"><![CDATA[" + str(self.displayInformation) + "]]></Label></DisplayInformation>";
if (len(self.additionalFields) > 0):
print "<AdditionalFields>";
for i in range(len(self.additionalFields)):
if (str(self.additionalFields[i][2]) <> "strict"):
print "<Field Name=\"" + str(self.additionalFields[i][0]) + "\" DisplayName=\"" + str(self.additionalFields[i][1]) + "\">" + str(self.additionalFields[i][3]) + "</Field>";
else:
print "<Field MatchingRule=\"" + str(self.additionalFields[i][2]) + "\" Name=\"" + str(self.additionalFields[i][0]) + "\" DisplayName=\"" + str(self.additionalFields[i][1]) + "\">" + str(self.additionalFields[i][3]) + "</Field>";
print "</AdditionalFields>";
if (len(self.iconURL) > 0):
print "<IconURL>" + self.iconURL + "</IconURL>";
print "</Entity>";
class MaltegoTransform(object):
entities = []
exceptions = []
UIMessages = []
#def __init__(self):
#empty.
def addEntity(self,enType,enValue):
me = MaltegoEntity(enType,enValue);
self.addEntityToMessage(me);
return self.entities[len(self.entities)-1];
def addEntityToMessage(self,maltegoEntity):
self.entities.append(maltegoEntity);
def addUIMessage(self,message,messageType="Inform"):
self.UIMessages.append([messageType,message]);
def addException(self,exceptionString):
self.exceptions.append(exceptionString);
def throwExceptions(self):
print "<MaltegoMessage>";
print "<MaltegoTransformExceptionMessage>";
print "<Exceptions>"
for i in range(len(self.exceptions)):
print "<Exception>" + self.exceptions[i] + "</Exceptions>";
print "</Exceptions>"
print "</MaltegoTransformExceptionMessage>";
print "</MaltegoMessage>";
def returnOutput(self):
print "<MaltegoMessage>";
print "<MaltegoTransformResponseMessage>";
print "<Entities>"
for i in range(len(self.entities)):
self.entities[i].returnEntity();
print "</Entities>"
print "<UIMessages>"
for i in range(len(self.UIMessages)):
print "<UIMessage MessageType=\"" + self.UIMessages[i][0] + "\">" + self.UIMessages[i][1] + "</UIMessage>";
print "</UIMessages>"
print "</MaltegoTransformResponseMessage>";
print "</MaltegoMessage>";
def writeSTDERR(self,msg):
sys.stderr.write(str(msg));
def heartbeat(self):
self.writeSTDERR("+");
def progress(self,percent):
self.writeSTDERR("%" + str(percent));
def debug(self,msg):
self.writeSTDERR("D:" + str(msg));
class MaltegoMsg:
def __init__(self,MaltegoXML=""):
xmldoc = minidom.parseString(MaltegoXML)
#read the easy stuff like value, limits etc
self.Value = self.i_getNodeValue(xmldoc,"Value")
self.Weight = self.i_getNodeValue(xmldoc,"Weight")
self.Slider = self.i_getNodeAttributeValue(xmldoc,"Limits","SoftLimit")
self.Type = self.i_getNodeAttributeValue(xmldoc,"Entity","Type")
#read additional fields
AdditionalFields = {}
try:
AFNodes= xmldoc.getElementsByTagName("AdditionalFields")[0]
Settings = AFNodes.getElementsByTagName("Field")
for node in Settings:
AFName = node.attributes["Name"].value;
AFValue = self.i_getText(node.childNodes);
AdditionalFields[AFName] = AFValue
except:
#sure this is not the right way...;)
dontcare=1
#parse transform settings
TransformSettings = {}
try:
TSNodes= xmldoc.getElementsByTagName("TransformFields")[0]
Settings = TSNodes.getElementsByTagName("Field")
for node in Settings:
TSName = node.attributes["Name"].value;
TSValue = self.i_getText(node.childNodes);
TransformSettings[TSName] = TSValue
except:
dontcare=1
#load back into object
self.AdditionalFields = AdditionalFields
self.TransformSettings = TransformSettings
def i_getText(self,nodelist):
rc = []
for node in nodelist:
if node.nodeType == node.TEXT_NODE:
rc.append(node.data)
return ''.join(rc)
def i_getNodeValue(self,node,Tag):
return self.i_getText(node.getElementsByTagName(Tag)[0].childNodes)
def i_getNodeAttributeValue(self,node,Tag,Attribute):
return node.getElementsByTagName(Tag)[0].attributes[Attribute].value;
| [] |
sfdc-qbranch/MetaDeploy | metadeploy/api/migrations/0050_add_clickthrough_agreement.py | d22547b3814dbec6aefa4d86b9f81c6f175c1b67 | # Generated by Django 2.1.5 on 2019-02-12 21:18
import django.db.models.deletion
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [("api", "0049_add_all_other_translations")]
operations = [
migrations.CreateModel(
name="ClickThroughAgreement",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
("text", models.TextField()),
],
),
migrations.AddField(
model_name="job",
name="click_through_agreement",
field=models.ForeignKey(
null=True,
on_delete=django.db.models.deletion.PROTECT,
to="api.ClickThroughAgreement",
),
),
]
| [((30, 18, 34, 13), 'django.db.models.ForeignKey', 'models.ForeignKey', (), '', False, 'from django.db import migrations, models\n'), ((17, 20, 22, 21), 'django.db.models.AutoField', 'models.AutoField', (), '', False, 'from django.db import migrations, models\n'), ((24, 25, 24, 43), 'django.db.models.TextField', 'models.TextField', ({}, {}), '()', False, 'from django.db import migrations, models\n')] |
dfdan/invenio-iiif | invenio_iiif/config.py | 2ea2747fd29ab03b1d38e0ca6d2a9c1506aa8cbc | # -*- coding: utf-8 -*-
#
# This file is part of Invenio.
# Copyright (C) 2018 CERN.
#
# Invenio is free software; you can redistribute it and/or modify it
# under the terms of the MIT License; see LICENSE file for more details.
"""IIIF API for Invenio."""
IIIF_API_PREFIX = '/iiif/'
"""URL prefix to IIIF API."""
IIIF_UI_URL = '/api{}'.format(IIIF_API_PREFIX)
"""URL to IIIF API endpoint (allow hostname)."""
IIIF_PREVIEWER_PARAMS = {
'size': '750,'
}
"""Parameters for IIIF image previewer extension."""
IIIF_PREVIEW_TEMPLATE = 'invenio_iiif/preview.html'
"""Template for IIIF image preview."""
IIIF_API_DECORATOR_HANDLER = 'invenio_iiif.handlers:protect_api'
"""Image opener handler decorator."""
IIIF_IMAGE_OPENER_HANDLER = 'invenio_iiif.handlers:image_opener'
"""Image opener handler function."""
| [] |
mconlon17/vivo-pub-ingest | pub_ingest.py | 7c03ecdd6dc5418121a6b92de1572d1cc63f5cb5 | #!/user/bin/env/python
"""
pub_ingest.py -- Read a bibtex file and make VIVO RDF
The following objects will be made as needed:
-- publisher
-- journal
-- information resource
-- timestamp for the information resource
-- people
-- authorships
-- concepts
The resulting ADD and SUB RDF file can then be read into VIVO
To Do
-- Complete refactor as an update process. Create resuable parts so that
a publication can be created from bibtex, doi or pmid
-- Improve DateTimeValue accuracy. Currently all publications are entered
as yearMonth precision. Sometimes we have more information, sometimes
we have less. We should use the information as presented by the
publisher, not overstate (yearMonth when there is only year) and not
understate (yearMonth when we know the day).
-- Reuse date objects -- only create dates when the appropriate date entity
is not already in VIVO
-- Update for VIVO-ISF
-- Update or vivofoundation and vivopubs
"""
__author__ = "Michael Conlon"
__copyright__ = "Copyright 2014, University of Florida"
__license__ = "BSD 3-Clause license"
__version__ = "1.3"
import sys
from datetime import datetime, date
from pybtex.database.input import bibtex
import tempita
import vivotools
MAX_AUTHORS = 50
publisher_report = {}
journal_report = {}
title_report = {}
author_report = {}
disambiguation_report = {}
dictionaries = []
journal_dictionary = {}
publisher_dictionary = {}
title_dictionary = {}
def open_files(bibtex_file_name):
"""
Give the name of the bibitex file to be used as input, generate the file
names for rdf, rpt and lst. Return the open file handles
"""
base = bibtex_file_name[:bibtex_file_name.find('.')]
rpt_file = open(base+'.rpt', 'w')
lst_file = open(base+'.lst', 'w')
rdf_file = open(base+'.rdf', 'w')
return [rdf_file, rpt_file, lst_file]
def update_disambiguation_report(authors, publication_uri):
"""
Given the authors structure and thte publication_uri, add to the report
if any of the authors need to be disambiguated
"""
for value in authors.values():
if value[8] == "Disambig":
if publication_uri in disambiguation_report:
result = disambiguation_report[publication_uri]
result[len(result.keys())+1] = value
disambiguation_report[publication_uri] = result
else:
disambiguation_report[publication_uri] = {1:value}
return
# start here. Create a parser for bibtex and use it to read the file of
# bibtex entries. open the output files
print datetime.now(), "Read the BibTex"
bibtex_file_name = sys.argv[1]
[rdf_file, rpt_file, lst_file] = open_files(bibtex_file_name)
parser = bibtex.Parser()
bib_data = parser.parse_file(bibtex_file_name)
bib_sorted = sorted(bib_data.entries.items(),
key=lambda x: x[1].fields['title'])
print >>rdf_file, "<!--", len(bib_data.entries.keys()),\
"publications to be processed -->"
print datetime.now(), len(bib_data.entries.keys()),\
"publications to be processed."
# make dictionaries for people, papers, publishers, journals, concepts
print datetime.now(), "Creating the dictionaries"
print datetime.now(), "Publishers"
publisher_dictionary = vivotools.make_publisher_dictionary()
print datetime.now(), "Journals"
journal_dictionary = vivotools.make_journal_dictionary()
print datetime.now(), "People"
dictionaries = make_people_dictionaries()
print datetime.now(), "Titles"
title_dictionary = vivotools.make_title_dictionary()
print datetime.now(), "Concepts"
vivotools.make_concept_dictionary()
# process the papers
print >>rdf_file, vivotools.rdf_header()
for key, value in bib_sorted:
try:
title = value.fields['title'].title() + " "
except:
title_report["No title"] = ["No Title", None, 1]
print >>rdf_file, "<!-- No title found. No RDF necessary -->"
continue
title = abbrev_to_words(title)
title = title[0:-1]
if title in title_report:
print >>rdf_file, "<!-- Title", title,\
"handled previously. No RDF necessary -->"
title_report[title][2] = title_report[title][2] + 1
continue
else:
print >>rdf_file, "<!-- Begin RDF for " + title + " -->"
print datetime.now(), "<!-- Begin RDF for " + title + " -->"
document = {}
document['title'] = title
title_report[title] = ["Start", None, 1]
[found, uri] = vivotools.find_title(title, title_dictionary)
if not found:
title_report[title][0] = "Create" # Create
# Authors
[author_rdf, authors] = make_author_rdf(value)
document['authors'] = make_document_authors(authors)
if count_uf_authors(authors) == 0:
print >>rdf_file, "<!-- End RDF. No UF authors for " +\
title + " No RDF necessary -->"
title_report[title][0] = "No UF Auth"
continue
update_author_report(authors)
# Datetime
[datetime_rdf, datetime_uri] = make_datetime_rdf(value, title)
# Publisher
[journal_create, journal_name, journal_uri] =\
make_journal_uri(value)
[publisher_create, publisher, publisher_uri, publisher_rdf] =\
make_publisher_rdf(value)
# Journal
[journal_rdf, journal_uri] = make_journal_rdf(value,\
journal_create, journal_name, journal_uri)
# Publisher/Journal bi-directional links
publisher_journal_rdf = ""
if journal_uri != "" and publisher_uri != "" and\
(journal_create or publisher_create):
publisher_journal_rdf = \
make_publisher_journal_rdf(publisher_uri, journal_uri)
# Authorships
publication_uri = vivotools.get_vivo_uri()
title_report[title][1] = publication_uri
[authorship_rdf, authorship_uris] = make_authorship_rdf(authors,\
publication_uri)
# AuthorInAuthorships
author_in_authorship_rdf = make_author_in_authorship_rdf(authors,\
authorship_uris)
# Journal/Publication bi-directional links
if journal_uri != "" and publication_uri != "":
journal_publication_rdf = \
make_journal_publication_rdf(journal_uri, publication_uri)
# PubMed values
pubmed_rdf = ""
if 'doi' in value.fields:
[pubmed_rdf, sub] = vivotools.update_pubmed(publication_uri,\
value.fields['doi'])
if sub != "":
raise Exception("Non empty subtraction RDF"+\
"for Update PubMed")
# Publication
publication_rdf = make_publication_rdf(value,\
title,publication_uri,datetime_uri,authorship_uris)
print >>rdf_file, datetime_rdf, publisher_rdf, journal_rdf,\
publisher_journal_rdf, author_rdf, authorship_rdf,\
author_in_authorship_rdf, journal_publication_rdf,\
publication_rdf, pubmed_rdf
print >>rdf_file, "<!-- End RDF for " + title + " -->"
print >>lst_file, vivotools.string_from_document(document),\
'VIVO uri', publication_uri, '\n'
update_disambiguation_report(authors, publication_uri)
else:
title_report[title][0] = "Found"
title_report[title][1] = uri
print >>rdf_file, "<!-- Found: " + title + " No RDF necessary -->"
print >>rdf_file, vivotools.rdf_footer()
#
# Reports
#
print >>rpt_file,"""
Publisher Report
Lists the publishers that appear in the bibtex file in alphabetical order. For
each publisher, show the improved name, the number of papers in journals of this publisher,
the action to be taken for the publisher and the VIVO URI -- the URI is the new
URI to be created if Action is Create, otherwise it is the URI of the found publisher
in VIVO.
Publisher Papers Action VIVO URI
---------------------------------------------------------------------------------"""
publisher_count = 0
actions = {}
for publisher in sorted(publisher_report.keys()):
publisher_count = publisher_count + 1
[create,uri,count] = publisher_report[publisher]
if create:
result = "Create"
else:
result = "Found "
actions[result] = actions.get(result,0) + 1
print >>rpt_file, "{0:40}".format(publisher[0:40]),"{0:>3}".format(count),result,uri
print >>rpt_file,""
print >>rpt_file, "Publisher count by action"
print >>rpt_file, ""
for action in sorted(actions):
print >>rpt_file, action,actions[action]
print >>rpt_file, publisher_count,"publisher(s)"
print >>rpt_file, """
Journal Report
Lists the journals that appear in the bibtex file in alphabetical order. For
each journal, show the improved name, the number of papers t be linked to the journal,
the action to be taken for the journal and the VIVO URI -- the URI is the new
URI to be created if Action is Create, otherwise it is the URI of the found journal
in VIVO.
Journal Papers Action VIVO URI
---------------------------------------------------------------------------------"""
journal_count = 0
actions = {}
for journal in sorted(journal_report.keys()):
journal_count = journal_count + 1
[create,uri,count] = journal_report[journal]
if create:
result = "Create"
else:
result = "Found "
actions[result] = actions.get(result,0) + 1
print >>rpt_file, "{0:40}".format(journal[0:40]),"{0:>3}".format(count),result,uri
print >>rpt_file, ""
print >>rpt_file, "Journal count by action"
print >>rpt_file, ""
for action in sorted(actions):
print >>rpt_file, action,actions[action]
print >>rpt_file, journal_count,"journal(s)"
print >>rpt_file, """
Title Report
Lists the titles that appear in the bibtex file in alphabetical order. For
each title, show the action to be taken, the number of times the title appears in
the bibtex, the improved title and the VIVO URI of the publication -- the URI is the new
URI to be created if action is Create, otherwise it is the URI of the found publication
in VIVO.
Action # Title and VIVO URI
---------------------------------------------------------------------------------"""
title_count = 0
actions = {}
for title in sorted(title_report.keys()):
title_count = title_count +1
[action,uri,count] = title_report[title]
actions[action] = actions.get(action,0) + 1
print >>rpt_file, "{0:>10}".format(action),title,uri
print >>rpt_file, ""
print >>rpt_file, "Title count by action"
print >>rpt_file, ""
for action in sorted(actions):
print >>rpt_file, action,actions[action]
print >>rpt_file, title_count,"title(s)"
print >>rpt_file, """
Author Report
For each author found in the bibtex file, show the author's name followed by the number of papers
for the author in the bibtex to be entered, followed by
a pair of results for each time the author appears on a paper in the bibtex. The result
pair contains an action and a URI. The action is "non UF" if a non-UF author stub will be
be created, the URI is the URI of the new author stub. Action "Make UF" if a new UF author
stub will be created with the URI of the new author stub. "Found UF" indicate the author was
found at the URI. "Disambig" if multiple UF people were found with the given name. The URI
is the URI of one of the found people. Follow-up is needed to determine if correct and
reassign author if not correct.
Author Action URI Action URI
----------------------------------------------------------------------------------------------"""
author_count = 0
actions = {}
for author in sorted(author_report.keys()):
author_count = author_count + 1
results = ""
papers = len(author_report[author])
action = author_report[author][1][8] # 1st report, 8th value is action
actions[action] = actions.get(action,0) + 1
for key in author_report[author].keys():
value = author_report[author][key]
results = results + value[8] + " " + "{0:45}".format(value[9])
print >>rpt_file, "{0:25}".format(author),"{0:>3}".format(papers),results
print >>rpt_file, ""
print >>rpt_file, "Author count by action"
print >>rpt_file, ""
for action in sorted(actions):
print >>rpt_file, action,actions[action]
print >>rpt_file, author_count,"authors(s)"
print >>rpt_file, """
Disambiguation Report
For each publication with one or more authors to disambiguate, list the paper, and
then the authors in question with each of the possible URIs to be disambiguated, show the URI
of the paper, and then for each author that needs to be disambiguated on the paper, show
the last name, first name and middle initial and the all the URIs in VIVO for UF persons
with the same names.
"""
for uri in disambiguation_report.keys():
print >>rpt_file,"The publication at",uri,"has one or more authors in question"
for key,value in disambiguation_report[uri].items():
uris = value[9].split(";")
print >>rpt_file," ",value[4],value[5],value[6],":"
for u in uris:
person = vivotools.get_person(u)
if 'last_name' not in person:
person['last_name'] = "No last name"
if 'middle_name' not in person:
person['middle_name'] = "No middle name"
if 'first_name' not in person:
person['first_name'] = "No first name"
if 'home_department_name' not in person:
person['home_department_name'] = "No home department"
npubs = len(person['authorship_uris'])
print >>rpt_file," ",u,person['last_name'], \
person['first_name'],person['middle_name'], \
person['home_department_name'],"Number of pubs = ",npubs
print >>rpt_file
print >>rpt_file
#
# Close the files, we're done
#
rpt_file.close()
rdf_file.close()
lst_file.close()
| [] |
u-blox/ubxlib | port/platform/common/automation/u_utils.py | 4dc1b16e6f12354b601cb1c9d799c10f4e2afb54 | #!/usr/bin/env python
'''Generally useful bits and bobs.'''
import queue # For PrintThread and exe_run
from time import sleep, time, gmtime, strftime # For lock timeout, exe_run timeout and logging
from multiprocessing import RLock
from copy import copy
import threading # For PrintThread
import sys
import os # For ChangeDir, has_admin
import stat # To help deltree out
from collections import deque # For storing a window of debug
from telnetlib import Telnet # For talking to JLink server
import socket
import shutil # To delete a directory tree
import signal # For CTRL_C_EVENT
import subprocess
import platform # Figure out current OS
import re # Regular Expression
import serial # Pyserial (make sure to do pip install pyserial)
import psutil # For killing things (make sure to do pip install psutil)
import requests # For HTTP comms with a KMTronic box (do pip install requests)
import u_settings
# Since this function is used by the global variables below it needs
# to be placed here.
def is_linux():
'''Returns True when system is Linux'''
return platform.system() == 'Linux'
# Since this function is used by the global variables below it needs
# to be placed here.
def pick_by_os(linux=None, other=None):
'''
This is a convenience function for selecting a value based on platform.
As an example the line below will print out "Linux" when running on a
Linux platform and "Not Linux" when running on some other platform:
print( u_utils.pick_by_os(linux="Linux", other="Not Linux") )
'''
if is_linux():
return linux
return other
# The port that this agent service runs on
# Deliberately NOT a setting, we need to be sure
# everyone uses the same value
AGENT_SERVICE_PORT = 17003
# The maximum number of characters that an agent will
# use from controller_name when constructing a directory
# name for a ubxlib branch to be checked out into
AGENT_WORKING_SUBDIR_CONTROLLER_NAME_MAX_LENGTH = 4
# How long to wait for an install lock in seconds
INSTALL_LOCK_WAIT_SECONDS = u_settings.INSTALL_LOCK_WAIT_SECONDS #(60 * 60)
# The URL for Unity, the unit test framework
UNITY_URL = u_settings.UNITY_URL #"https://github.com/ThrowTheSwitch/Unity"
# The sub-directory that Unity is usually put in
# (off the working directory)
UNITY_SUBDIR = u_settings.UNITY_SUBDIR #"Unity"
# The path to DevCon, a Windows tool that allows
# USB devices to be reset, amongst other things
DEVCON_PATH = u_settings.DEVCON_PATH #"devcon.exe"
# The path to jlink.exe (or just the name 'cos it's on the path)
JLINK_PATH = u_settings.JLINK_PATH #"jlink.exe"
# The port number for SWO trace capture out of JLink
JLINK_SWO_PORT = u_settings.JLINK_SWO_PORT #19021
# The port number for GDB control of ST-LINK GDB server
STLINK_GDB_PORT = u_settings.STLINK_GDB_PORT #61200
# The port number for SWO trace capture out of ST-LINK GDB server
STLINK_SWO_PORT = u_settings.STLINK_SWO_PORT #61300
# The format string passed to strftime()
# for logging prints
TIME_FORMAT = u_settings.TIME_FORMAT #"%Y-%m-%d_%H:%M:%S"
# The default guard time waiting for a platform lock in seconds
PLATFORM_LOCK_GUARD_TIME_SECONDS = u_settings.PLATFORM_LOCK_GUARD_TIME_SECONDS #60 * 60
# The default guard time for downloading to a target in seconds
DOWNLOAD_GUARD_TIME_SECONDS = u_settings.DOWNLOAD_GUARD_TIME_SECONDS #60
# The default guard time for running tests in seconds
RUN_GUARD_TIME_SECONDS = u_settings.RUN_GUARD_TIME_SECONDS #60 * 60
# The default inactivity timer for running tests in seconds
RUN_INACTIVITY_TIME_SECONDS = u_settings.RUN_INACTIVITY_TIME_SECONDS #60 * 5
# The name of the #define that forms the filter string
# for which tests to run
FILTER_MACRO_NAME = u_settings.FILTER_MACRO_NAME #"U_CFG_APP_FILTER"
# The name of the environment variable that indicates we're running under automation
ENV_UBXLIB_AUTO = "U_UBXLIB_AUTO"
# The time for which to wait for something from the
# queue in exe_run(). If this is too short, in a
# multiprocessing world or on a slow machine, it is
# possible to miss things as the task putting things
# on the queue may be blocked from doing so until
# we've decided the queue has been completely emptied
# and moved on
EXE_RUN_QUEUE_WAIT_SECONDS = u_settings.EXE_RUN_QUEUE_WAIT_SECONDS #1
# The number of seconds a USB cutter and the bit positions of
# a KMTronic box are switched off for
HW_RESET_DURATION_SECONDS = u_settings.HW_RESET_DURATION_SECONDS # e.g. 5
# Executable file extension. This will be "" for Linux
# and ".exe" for Windows
EXE_EXT = pick_by_os(linux="", other=".exe")
def keep_going(flag, printer=None, prompt=None):
'''Check a keep_going flag'''
do_not_stop = True
if flag is not None and not flag.is_set():
do_not_stop = False
if printer and prompt:
printer.string("{}aborting as requested.".format(prompt))
return do_not_stop
# subprocess arguments behaves a little differently on Linux and Windows
# depending if a shell is used or not, which can be read here:
# https://stackoverflow.com/a/15109975
# This function will compensate for these deviations
def subprocess_osify(cmd, shell=True):
''' expects an array of strings being [command, param, ...] '''
if is_linux() and shell:
line = ''
for item in cmd:
# Put everything in a single string and quote args containing spaces
if ' ' in item:
line += '\"{}\" '.format(item)
else:
line += '{} '.format(item)
cmd = line
return cmd
def split_command_line_args(cmd_line):
''' Will split a command line string into a list of arguments.
Quoted arguments will be preserved as one argument '''
return [p for p in re.split("( |\\\".*?\\\"|'.*?')", cmd_line) if p.strip()]
def get_actual_path(path):
'''Given a drive number return real path if it is a subst'''
actual_path = path
if is_linux():
return actual_path
if os.name == 'nt':
# Get a list of substs
text = subprocess.check_output("subst",
stderr=subprocess.STDOUT,
shell=True) # Jenkins hangs without this
for line in text.splitlines():
# Lines should look like this:
# Z:\: => C:\projects\ubxlib_priv
# So, in this example, if we were given z:\blah
# then the actual path should be C:\projects\ubxlib_priv\blah
text = line.decode()
bits = text.rsplit(": => ")
if (len(bits) > 1) and (len(path) > 1) and \
(bits[0].lower()[0:2] == path[0:2].lower()):
actual_path = bits[1] + path[2:]
break
return actual_path
def get_instance_text(instance):
'''Return the instance as a text string'''
instance_text = ""
for idx, item in enumerate(instance):
if idx == 0:
instance_text += str(item)
else:
instance_text += "." + str(item)
return instance_text
# Get a list of instances as a text string separated
# by spaces.
def get_instances_text(instances):
'''Return the instances as a text string'''
instances_text = ""
for instance in instances:
if instance:
instances_text += " {}".format(get_instance_text(instance))
return instances_text
def remove_readonly(func, path, exec_info):
'''Help deltree out'''
del exec_info
os.chmod(path, stat.S_IWRITE)
func(path)
def deltree(directory, printer, prompt):
'''Remove an entire directory tree'''
tries = 3
success = False
if os.path.isdir(directory):
# Retry this as sometimes Windows complains
# that the directory is not empty when it
# it really should be, some sort of internal
# Windows race condition
while not success and (tries > 0):
try:
# Need the onerror bit on Winders, see
# this Stack Overflow post:
# https://stackoverflow.com/questions/1889597/deleting-directory-in-python
shutil.rmtree(directory, onerror=remove_readonly)
success = True
except OSError as ex:
if printer and prompt:
printer.string("{}ERROR unable to delete \"{}\" {}: \"{}\"".
format(prompt, directory,
ex.errno, ex.strerror))
sleep(1)
tries -= 1
else:
success = True
return success
# Some list types aren't quite list types: for instance,
# the lists returned by RPyC look like lists but they
# aren't of type list and so "in", for instance, will fail.
# This converts an instance list (i.e. a list-like object
# containing items that are each another list-like object)
# into a plain-old two-level list.
def copy_two_level_list(instances_in):
'''Convert instances_in into a true list'''
instances_out = []
if instances_in:
for item1 in instances_in:
instances_out1 = []
for item2 in item1:
instances_out1.append(item2)
instances_out.append(copy(instances_out1))
return instances_out
# Check if admin privileges are available, from:
# https://stackoverflow.com/questions/2946746/python-checking-if-a-user-has-administrator-privileges
def has_admin():
'''Check for administrator privileges'''
admin = False
if os.name == 'nt':
try:
# only Windows users with admin privileges can read the C:\windows\temp
if os.listdir(os.sep.join([os.environ.get("SystemRoot", "C:\\windows"), "temp"])):
admin = True
except PermissionError:
pass
else:
# Pylint will complain about the following line but
# that's OK, it is only executed if we're NOT on Windows
# and there the geteuid() method will exist
if "SUDO_USER" in os.environ and os.geteuid() == 0:
admin = True
return admin
# Reset a USB port with the given Device Description
def usb_reset(device_description, printer, prompt):
''' Reset a device'''
instance_id = None
found = False
success = False
try:
# Run devcon and parse the output to find the given device
printer.string("{}running {} to look for \"{}\"...". \
format(prompt, DEVCON_PATH, device_description))
cmd = [DEVCON_PATH, "hwids", "=ports"]
text = subprocess.check_output(subprocess_osify(cmd),
stderr=subprocess.STDOUT,
shell=True) # Jenkins hangs without this
for line in text.splitlines():
# The format of a devcon entry is this:
#
# USB\VID_1366&PID_1015&MI_00\6&38E81674&0&0000
# Name: JLink CDC UART Port (COM45)
# Hardware IDs:
# USB\VID_1366&PID_1015&REV_0100&MI_00
# USB\VID_1366&PID_1015&MI_00
# Compatible IDs:
# USB\Class_02&SubClass_02&Prot_00
# USB\Class_02&SubClass_02
# USB\Class_02
#
# Grab what we hope is the instance ID
line = line.decode()
if line.startswith("USB"):
instance_id = line
else:
# If the next line is the Name we want then we're done
if instance_id and ("Name: " + device_description in line):
found = True
printer.string("{}\"{}\" found with instance ID \"{}\"". \
format(prompt, device_description,
instance_id))
break
instance_id = None
if found:
# Now run devcon to reset the device
printer.string("{}running {} to reset device \"{}\"...". \
format(prompt, DEVCON_PATH, instance_id))
cmd = [DEVCON_PATH, "restart", "@" + instance_id]
text = subprocess.check_output(subprocess_osify(cmd),
stderr=subprocess.STDOUT,
shell=False) # Has to be False or devcon won't work
for line in text.splitlines():
printer.string("{}{}".format(prompt, line.decode()))
success = True
else:
printer.string("{}device with description \"{}\" not found.". \
format(prompt, device_description))
except subprocess.CalledProcessError:
printer.string("{} unable to find and reset device.".format(prompt))
return success
# Open the required serial port.
def open_serial(serial_name, speed, printer, prompt):
'''Open serial port'''
serial_handle = None
text = "{}: trying to open \"{}\" as a serial port...". \
format(prompt, serial_name)
try:
return_value = serial.Serial(serial_name, speed, timeout=0.05)
serial_handle = return_value
printer.string("{} opened.".format(text))
except (ValueError, serial.SerialException) as ex:
printer.string("{}{} while accessing port {}: {}.".
format(prompt, type(ex).__name__,
serial_handle.name, str(ex)))
return serial_handle
def open_telnet(port_number, printer, prompt):
'''Open telnet port on localhost'''
telnet_handle = None
text = "{}trying to open \"{}\" as a telnet port on localhost...". \
format(prompt, port_number)
try:
telnet_handle = Telnet("localhost", int(port_number), timeout=5)
if telnet_handle is not None:
printer.string("{} opened.".format(text))
else:
printer.string("{} failed.".format(text))
except (socket.error, socket.timeout, ValueError) as ex:
printer.string("{}{} failed to open telnet {}: {}.".
format(prompt, type(ex).__name__,
port_number, str(ex)))
return telnet_handle
def install_lock_acquire(install_lock, printer, prompt, keep_going_flag=None):
'''Attempt to acquire install lock'''
timeout_seconds = INSTALL_LOCK_WAIT_SECONDS
success = False
if install_lock:
printer.string("{}waiting for install lock...".format(prompt))
while not install_lock.acquire(False) and (timeout_seconds > 0) and \
keep_going(keep_going_flag, printer, prompt):
sleep(1)
timeout_seconds -= 1
if timeout_seconds > 0:
printer.string("{}got install lock.".format(prompt))
success = True
else:
printer.string("{}failed to aquire install lock.".format(prompt))
else:
printer.string("{}warning, there is no install lock.".format(prompt))
return success
def install_lock_release(install_lock, printer, prompt):
'''Release install lock'''
if install_lock:
install_lock.release()
printer.string("{}install lock released.".format(prompt))
def fetch_repo(url, directory, branch, printer, prompt, submodule_init=True, force=False):
'''Fetch a repo: directory can be relative or absolute, branch can be a hash'''
got_code = False
success = False
dir_text = directory
if dir_text == ".":
dir_text = "this directory"
if printer and prompt:
printer.string("{}in directory {}, fetching"
" {} to {}.".format(prompt, os.getcwd(),
url, dir_text))
if not branch:
branch = "master"
if os.path.isdir(directory):
# Update existing code
with ChangeDir(directory):
if printer and prompt:
printer.string("{}updating code in {}...".
format(prompt, dir_text))
target = branch
if branch.startswith("#"):
# Actually been given a branch, lose the
# preceding #
target = branch[1:len(branch)]
# Try this once and, if it fails and force is set,
# do a git reset --hard and try again
tries = 1
if force:
tries += 1
while tries > 0:
try:
call_list = []
call_list.append("git")
call_list.append("fetch")
call_list.append("origin")
call_list.append(target)
if printer and prompt:
text = ""
for item in call_list:
if text:
text += " "
text += item
printer.string("{}in {} calling {}...".
format(prompt, os.getcwd(), text))
# Try to pull the code
text = subprocess.check_output(subprocess_osify(call_list),
stderr=subprocess.STDOUT,
shell=True) # Jenkins hangs without this
for line in text.splitlines():
if printer and prompt:
printer.string("{}{}".format(prompt, line.decode()))
got_code = True
except subprocess.CalledProcessError as error:
if printer and prompt:
printer.string("{}git returned error {}: \"{}\"".
format(prompt, error.returncode,
error.output))
if got_code:
tries = 0
else:
if force:
# git reset --hard
printer.string("{}in directory {} calling git reset --hard...". \
format(prompt, os.getcwd()))
try:
text = subprocess.check_output(subprocess_osify(["git", "reset",
"--hard"]),
stderr=subprocess.STDOUT,
shell=True) # Jenkins hangs without this
for line in text.splitlines():
if printer and prompt:
printer.string("{}{}".format(prompt, line.decode()))
except subprocess.CalledProcessError as error:
if printer and prompt:
printer.string("{}git returned error {}: \"{}\"".
format(prompt, error.returncode,
error.output))
force = False
tries -= 1
if not got_code:
# If we still haven't got the code, delete the
# directory for a true clean start
deltree(directory, printer, prompt)
if not os.path.isdir(directory):
# Clone the repo
if printer and prompt:
printer.string("{}cloning from {} into {}...".
format(prompt, url, dir_text))
try:
text = subprocess.check_output(subprocess_osify(["git", "clone", "-q",
url, directory]),
stderr=subprocess.STDOUT,
shell=True) # Jenkins hangs without this
for line in text.splitlines():
if printer and prompt:
printer.string("{}{}".format(prompt, line.decode()))
got_code = True
except subprocess.CalledProcessError as error:
if printer and prompt:
printer.string("{}git returned error {}: \"{}\"".
format(prompt, error.returncode,
error.output))
if got_code and os.path.isdir(directory):
# Check out the correct branch and recurse submodules
with ChangeDir(directory):
target = "origin/" + branch
if branch.startswith("#"):
# Actually been given a branch, so lose the
# "origin/" and the preceding #
target = branch[1:len(branch)]
if printer and prompt:
printer.string("{}checking out {}...".
format(prompt, target))
try:
call_list = ["git", "-c", "advice.detachedHead=false",
"checkout", "--no-progress"]
if submodule_init:
call_list.append("--recurse-submodules")
printer.string("{}also recursing sub-modules (can take some time" \
" and gives no feedback).".format(prompt))
call_list.append(target)
if printer and prompt:
text = ""
for item in call_list:
if text:
text += " "
text += item
printer.string("{}in {} calling {}...".
format(prompt, os.getcwd(), text))
text = subprocess.check_output(subprocess_osify(call_list),
stderr=subprocess.STDOUT,
shell=True) # Jenkins hangs without this
for line in text.splitlines():
if printer and prompt:
printer.string("{}{}".format(prompt, line.decode()))
success = True
except subprocess.CalledProcessError as error:
if printer and prompt:
printer.string("{}git returned error {}: \"{}\"".
format(prompt, error.returncode,
error.output))
return success
def exe_where(exe_name, help_text, printer, prompt):
'''Find an executable using where.exe or which on linux'''
success = False
try:
printer.string("{}looking for \"{}\"...". \
format(prompt, exe_name))
# See here:
# https://stackoverflow.com/questions/14928860/passing-double-quote-shell-commands-in-python-to-subprocess-popen
# ...for why the construction "".join() is necessary when
# passing things which might have spaces in them.
# It is the only thing that works.
if is_linux():
cmd = ["which {}".format(exe_name.replace(":", "/"))]
printer.string("{}detected linux, calling \"{}\"...".format(prompt, cmd))
else:
cmd = ["where", "".join(exe_name)]
printer.string("{}detected nonlinux, calling \"{}\"...".format(prompt, cmd))
text = subprocess.check_output(cmd,
stderr=subprocess.STDOUT,
shell=True) # Jenkins hangs without this
for line in text.splitlines():
printer.string("{}{} found in {}".format(prompt, exe_name,
line.decode()))
success = True
except subprocess.CalledProcessError:
if help_text:
printer.string("{}ERROR {} not found: {}". \
format(prompt, exe_name, help_text))
else:
printer.string("{}ERROR {} not found". \
format(prompt, exe_name))
return success
def exe_version(exe_name, version_switch, printer, prompt):
'''Print the version of a given executable'''
success = False
if not version_switch:
version_switch = "--version"
try:
text = subprocess.check_output(subprocess_osify(["".join(exe_name), version_switch]),
stderr=subprocess.STDOUT,
shell=True) # Jenkins hangs without this
for line in text.splitlines():
printer.string("{}{}".format(prompt, line.decode()))
success = True
except subprocess.CalledProcessError:
printer.string("{}ERROR {} either not found or didn't like {}". \
format(prompt, exe_name, version_switch))
return success
def exe_terminate(process_pid):
'''Jonathan's killer'''
process = psutil.Process(process_pid)
for proc in process.children(recursive=True):
proc.terminate()
process.terminate()
def read_from_process_and_queue(process, read_queue):
'''Read from a process, non-blocking'''
while process.poll() is None:
string = process.stdout.readline().decode()
if string and string != "":
read_queue.put(string)
else:
sleep(0.1)
def queue_get_no_exception(the_queue, block=True, timeout=None):
'''A version of queue.get() that doesn't throw an Empty exception'''
thing = None
try:
thing = the_queue.get(block=block, timeout=timeout)
except queue.Empty:
pass
return thing
def capture_env_var(line, env, printer, prompt):
'''A bit of exe_run that needs to be called from two places'''
# Find a KEY=VALUE bit in the line,
# parse it out and put it in the dictionary
# we were given
pair = line.split('=', 1)
if len(pair) == 2:
env[pair[0]] = pair[1].rstrip()
else:
printer.string("{}WARNING: not an environment variable: \"{}\"".
format(prompt, line))
# Note: if returned_env is given then "set"
# will be executed after the exe and the environment
# variables will be returned in it. The down-side
# of this is that the return value of the exe is,
# of course, lost.
def exe_run(call_list, guard_time_seconds=None, printer=None, prompt=None,
shell_cmd=False, set_env=None, returned_env=None,
bash_cmd=False, keep_going_flag=None):
'''Call an executable, printing out what it does'''
success = False
start_time = time()
flibbling = False
kill_time = None
read_time = start_time
if returned_env is not None:
# The caller wants the environment after the
# command has run, so, from this post:
# https://stackoverflow.com/questions/1214496/how-to-get-environment-from-a-subprocess
# append a tag that we can detect
# to the command and then call set,
# from which we can parse the environment
call_list.append("&&")
call_list.append("echo")
call_list.append("flibble")
call_list.append("&&")
if is_linux():
call_list.append("env")
bash_cmd = True
else:
call_list.append("set")
# I've seen output from set get lost,
# possibly because the process ending
# is asynchronous with stdout,
# so add a delay here as well
call_list.append("&&")
call_list.append("sleep")
call_list.append("2")
try:
popen_keywords = {
'stdout': subprocess.PIPE,
'stderr': subprocess.STDOUT,
'shell': shell_cmd,
'env': set_env,
'executable': "bin/bash" if bash_cmd else None
}
# Call the thang
# Note: used to have bufsize=1 here but it turns out
# that is ignored 'cos the output is considered
# binary. Seems to work in any case, I guess
# Winders, at least, is in any case line-buffered.
process = subprocess.Popen(subprocess_osify(call_list, shell=shell_cmd),
**popen_keywords)
if printer:
printer.string("{}{}, pid {} started with guard time {} second(s)". \
format(prompt, call_list[0], process.pid,
guard_time_seconds))
# This is over complex but, unfortunately, necessary.
# At least one thing that we try to run, nrfjprog, can
# crash silently: just hangs and sends no output. However
# it also doesn't flush and close stdout and so read(1)
# will hang, meaning we can't read its output as a means
# to check that it has hung.
# So, here we poll for the return value, which is normally
# how things will end, and we start another thread which
# reads from the process's stdout. If the thread sees
# nothing for guard_time_seconds then we terminate the
# process.
read_queue = queue.Queue()
read_thread = threading.Thread(target=read_from_process_and_queue,
args=(process, read_queue))
read_thread.start()
while process.poll() is None:
if keep_going_flag is None or keep_going(keep_going_flag, printer, prompt):
if guard_time_seconds and (kill_time is None) and \
((time() - start_time > guard_time_seconds) or
(time() - read_time > guard_time_seconds)):
kill_time = time()
if printer:
printer.string("{}guard time of {} second(s)." \
" expired, stopping {}...".
format(prompt, guard_time_seconds,
call_list[0]))
exe_terminate(process.pid)
else:
exe_terminate(process.pid)
line = queue_get_no_exception(read_queue, True, EXE_RUN_QUEUE_WAIT_SECONDS)
read_time = time()
while line is not None:
line = line.rstrip()
if flibbling:
capture_env_var(line, returned_env, printer, prompt)
else:
if returned_env is not None and "flibble" in line:
flibbling = True
else:
printer.string("{}{}".format(prompt, line))
line = queue_get_no_exception(read_queue, True, EXE_RUN_QUEUE_WAIT_SECONDS)
read_time = time()
sleep(0.1)
# Can't join() read_thread here as it might have
# blocked on a read() (if nrfjprog has anything to
# do with it). It will be tidied up when this process
# exits.
# There may still be stuff on the queue, read it out here
line = queue_get_no_exception(read_queue, True, EXE_RUN_QUEUE_WAIT_SECONDS)
while line is not None:
line = line.rstrip()
if flibbling:
capture_env_var(line, returned_env, printer, prompt)
else:
if returned_env is not None and "flibble" in line:
flibbling = True
else:
printer.string("{}{}".format(prompt, line))
line = queue_get_no_exception(read_queue, True, EXE_RUN_QUEUE_WAIT_SECONDS)
# There may still be stuff in the buffer after
# the application has finished running so flush that
# out here
line = process.stdout.readline().decode()
while line:
line = line.rstrip()
if flibbling:
capture_env_var(line, returned_env, printer, prompt)
else:
if returned_env is not None and "flibble" in line:
flibbling = True
else:
printer.string("{}{}".format(prompt, line))
line = process.stdout.readline().decode()
if (process.poll() == 0) and kill_time is None:
success = True
if printer:
printer.string("{}{}, pid {} ended with return value {}.". \
format(prompt, call_list[0],
process.pid, process.poll()))
except ValueError as ex:
if printer:
printer.string("{}failed: {} while trying to execute {}.". \
format(prompt, type(ex).__name__, str(ex)))
except KeyboardInterrupt as ex:
process.kill()
raise KeyboardInterrupt from ex
return success
def set_process_prio_high():
'''Set the priority of the current process to high'''
if is_linux():
print("Setting process priority currently not supported for Linux")
# It should be possible to set prio with:
# psutil.Process().nice(-10)
# However we get "[Errno 13] Permission denied" even when run as root
else:
psutil.Process().nice(psutil.HIGH_PRIORITY_CLASS)
def set_process_prio_normal():
'''Set the priority of the current process to normal'''
if is_linux():
print("Setting process priority currently not supported for Linux")
# It should be possible to set prio with:
# psutil.Process().nice(0)
# However we get "[Errno 13] Permission denied" even when run as root
else:
psutil.Process().nice(psutil.NORMAL_PRIORITY_CLASS)
class ExeRun():
'''Run an executable as a "with:"'''
def __init__(self, call_list, printer=None, prompt=None, shell_cmd=False, with_stdin=False):
self._call_list = call_list
self._printer = printer
self._prompt = prompt
self._shell_cmd = shell_cmd
self._with_stdin=with_stdin
self._process = None
def __enter__(self):
if self._printer:
text = ""
for idx, item in enumerate(self._call_list):
if idx == 0:
text = item
else:
text += " {}".format(item)
self._printer.string("{}starting {}...".format(self._prompt,
text))
try:
# Start exe
popen_keywords = {
'stdout': subprocess.PIPE,
'stderr': subprocess.STDOUT,
'shell': self._shell_cmd
}
if not is_linux():
popen_keywords['creationflags'] = subprocess.CREATE_NEW_PROCESS_GROUP
if self._with_stdin:
popen_keywords['stdin'] = subprocess.PIPE
self._process = subprocess.Popen(self._call_list, **popen_keywords)
if self._printer:
self._printer.string("{}{} pid {} started".format(self._prompt,
self._call_list[0],
self._process.pid))
except (OSError, subprocess.CalledProcessError, ValueError) as ex:
if self._printer:
self._printer.string("{}failed: {} to start {}.". \
format(self._prompt,
type(ex).__name__, str(ex)))
except KeyboardInterrupt as ex:
self._process.kill()
raise KeyboardInterrupt from ex
return self._process
def __exit__(self, _type, value, traceback):
del _type
del value
del traceback
# Stop exe
if self._printer:
self._printer.string("{}stopping {}...". \
format(self._prompt,
self._call_list[0]))
return_value = self._process.poll()
if not return_value:
retry = 5
while (self._process.poll() is None) and (retry > 0):
# Try to stop with CTRL-C
if is_linux():
sig = signal.SIGINT
else:
sig = signal.CTRL_BREAK_EVENT
self._process.send_signal(sig)
sleep(1)
retry -= 1
return_value = self._process.poll()
if not return_value:
# Terminate with a vengeance
self._process.terminate()
while self._process.poll() is None:
sleep(0.1)
if self._printer:
self._printer.string("{}{} pid {} terminated".format(self._prompt,
self._call_list[0],
self._process.pid))
else:
if self._printer:
self._printer.string("{}{} pid {} CTRL-C'd".format(self._prompt,
self._call_list[0],
self._process.pid))
else:
if self._printer:
self._printer.string("{}{} pid {} already ended".format(self._prompt,
self._call_list[0],
self._process.pid))
return return_value
# Simple SWO decoder: only handles single bytes of application
# data at a time, i.e. what ITM_SendChar() sends.
class SwoDecoder():
'''Take the contents of a byte_array and decode it as SWO'''
def __init__(self, address, replaceLfWithCrLf=False):
self._address = address
self._replace_lf_with_crlf = replaceLfWithCrLf
self._expecting_swit = True
def decode(self, swo_byte_array):
'''Do the decode'''
decoded_byte_array = bytearray()
if swo_byte_array:
for data_byte in swo_byte_array:
# We're looking only for "address" and we also know
# that CMSIS only offers ITM_SendChar(), so packet length
# is always 1, and we only send ASCII characters,
# so the top bit of the data byte must be 0.
#
# For the SWO protocol, see:
#
# https://developer.arm.com/documentation/ddi0314/h/
# instrumentation-trace-macrocell/
# about-the-instrumentation-trace-macrocell/trace-packet-format
#
# When we see SWIT (SoftWare Instrumentation Trace
# I think, anyway, the bit that carries our prints
# off the target) which is 0bBBBBB0SS, where BBBBB is
# address and SS is the size of payload to follow,
# in our case 0x01, we know that the next
# byte is probably data and if it is ASCII then
# it is data. Anything else is ignored.
# The reason for doing it this way is that the
# ARM ITM only sends out sync packets under
# special circumstances so it is not a recovery
# mechanism for simply losing a byte in the
# transfer, which does happen occasionally.
if self._expecting_swit:
if ((data_byte & 0x03) == 0x01) and ((data_byte & 0xf8) >> 3 == self._address):
# Trace packet type is SWIT, i.e. our
# application logging
self._expecting_swit = False
else:
if data_byte & 0x80 == 0:
if (data_byte == 10) and self._replace_lf_with_crlf:
decoded_byte_array.append(13)
decoded_byte_array.append(data_byte)
self._expecting_swit = True
return decoded_byte_array
class PrintThread(threading.Thread):
'''Print thread to organise prints nicely'''
def __init__(self, print_queue, file_handle=None,
window_file_handle=None, window_size=10000,
window_update_period_seconds=1):
self._queue = print_queue
self._lock = RLock()
self._queue_forwards = []
self._running = False
self._file_handle = file_handle
self._window = None
self._window_file_handle = window_file_handle
if self._window_file_handle:
self._window = deque(self._window_file_handle, maxlen=window_size)
self._window_update_pending = False
self._window_update_period_seconds = window_update_period_seconds
self._window_next_update_time = time()
threading.Thread.__init__(self)
def _send_forward(self, flush=False):
# Send from any forwarding buffers
# self._lock should be acquired before this is called
queue_idxes_to_remove = []
for idx, queue_forward in enumerate(self._queue_forwards):
if flush or time() > queue_forward["last_send"] + queue_forward["buffer_time"]:
string_forward = ""
len_queue_forward = len(queue_forward["buffer"])
count = 0
for item in queue_forward["buffer"]:
count += 1
if count < len_queue_forward:
item += "\n"
if queue_forward["prefix_string"]:
item = queue_forward["prefix_string"] + item
string_forward += item
queue_forward["buffer"] = []
if string_forward:
try:
queue_forward["queue"].put(string_forward)
except TimeoutError:
pass
except (OSError, EOFError, BrokenPipeError):
queue_idxes_to_remove.append(idx)
queue_forward["last_send"] = time()
for idx in queue_idxes_to_remove:
self._queue_forwards.pop(idx)
def add_forward_queue(self, queue_forward, prefix_string=None, buffer_time=0):
'''Forward things received on the print queue to another queue'''
self._lock.acquire()
already_done = False
for item in self._queue_forwards:
if item["queue"] == queue_forward:
already_done = True
break
if not already_done:
item = {}
item["queue"] = queue_forward
item["prefix_string"] = prefix_string
item["buffer"] = []
item["buffer_time"] = buffer_time
item["last_send"] = time()
self._queue_forwards.append(item)
self._lock.release()
def remove_forward_queue(self, queue_forward):
'''Stop forwarding things received on the print queue to another queue'''
self._lock.acquire()
queues = []
self._send_forward(flush=True)
for item in self._queue_forwards:
if item["queue"] != queue_forward:
queues.append(item)
self._queue_forwards = queues
self._lock.release()
def stop_thread(self):
'''Helper function to stop the thread'''
self._lock.acquire()
self._running = False
# Write anything remaining to the window file
if self._window_update_pending:
self._window_file_handle.seek(0)
for item in self._window:
self._window_file_handle.write(item)
self._window_file_handle.flush()
self._window_update_pending = False
self._window_next_update_time = time() + self._window_update_period_seconds
self._lock.release()
def run(self):
'''Worker thread'''
self._running = True
while self._running:
# Print locally and store in any forwarding buffers
try:
my_string = self._queue.get(block=False, timeout=0.5)
print(my_string)
if self._file_handle:
self._file_handle.write(my_string + "\n")
self._lock.acquire()
if self._window is not None:
# Note that my_string can contain multiple lines,
# hence the need to split it here to maintain the
# window
for line in my_string.splitlines():
self._window.append(line + "\n")
self._window_update_pending = True
for queue_forward in self._queue_forwards:
queue_forward["buffer"].append(my_string)
self._lock.release()
except queue.Empty:
sleep(0.1)
except (OSError, EOFError, BrokenPipeError):
# Try to restore stdout
sleep(0.1)
sys.stdout = sys.__stdout__
self._lock.acquire()
# Send from any forwarding buffers
self._send_forward()
# Write the window to file if required
if self._window_update_pending and time() > self._window_next_update_time:
# If you don't do this you can end up with garbage
# at the end of the file
self._window_file_handle.truncate()
self._window_file_handle.seek(0)
for item in self._window:
self._window_file_handle.write(item)
self._window_update_pending = False
self._window_next_update_time = time() + self._window_update_period_seconds
self._lock.release()
class PrintToQueue():
'''Print to a queue, if there is one'''
def __init__(self, print_queue, file_handle, include_timestamp=False):
self._queues = []
self._lock = RLock()
if print_queue:
self._queues.append(print_queue)
self._file_handle = file_handle
self._include_timestamp = include_timestamp
def add_queue(self, print_queue):
'''Add a queue to the list of places to print to'''
self._lock.acquire()
already_done = False
for item in self._queues:
if item == print_queue:
already_done = True
break
if not already_done:
self._queues.append(print_queue)
self._lock.release()
def remove_queue(self, print_queue):
'''Remove a queue from the list of places to print to'''
self._lock.acquire()
queues = []
for item in self._queues:
if item != print_queue:
queues.append(item)
self._queues = queues
self._lock.release()
def string(self, string, file_only=False):
'''Print a string to the queue(s)'''
if self._include_timestamp:
string = strftime(TIME_FORMAT, gmtime()) + " " + string
if not file_only:
self._lock.acquire()
queue_idxes_to_remove = []
if self._queues:
for idx, print_queue in enumerate(self._queues):
try:
print_queue.put(string)
except (EOFError, BrokenPipeError):
queue_idxes_to_remove.append(idx)
for idx in queue_idxes_to_remove:
self._queues.pop(idx)
else:
print(string)
self._lock.release()
if self._file_handle:
self._file_handle.write(string + "\n")
self._file_handle.flush()
# This stolen from here:
# https://stackoverflow.com/questions/431684/how-do-i-change-the-working-directory-in-python
class ChangeDir():
'''Context manager for changing the current working directory'''
def __init__(self, new_path):
self._new_path = os.path.expanduser(new_path)
self._saved_path = None
def __enter__(self):
'''CD to new_path'''
self._saved_path = os.getcwd()
os.chdir(self._new_path)
def __exit__(self, etype, value, traceback):
'''CD back to saved_path'''
os.chdir(self._saved_path)
class Lock():
'''Hold a lock as a "with:"'''
def __init__(self, lock, guard_time_seconds,
lock_type, printer, prompt, keep_going_flag=None):
self._lock = lock
self._guard_time_seconds = guard_time_seconds
self._lock_type = lock_type
self._printer = printer
self._prompt = prompt
self._keep_going_flag = keep_going_flag
self._locked = False
def __enter__(self):
if not self._lock:
return True
# Wait on the lock
if not self._locked:
timeout_seconds = self._guard_time_seconds
self._printer.string("{}waiting up to {} second(s)" \
" for a {} lock...". \
format(self._prompt,
self._guard_time_seconds,
self._lock_type))
count = 0
while not self._lock.acquire(False) and \
((self._guard_time_seconds == 0) or (timeout_seconds > 0)) and \
keep_going(self._keep_going_flag, self._printer, self._prompt):
sleep(1)
timeout_seconds -= 1
count += 1
if count == 30:
self._printer.string("{}still waiting {} second(s)" \
" for a {} lock (locker is" \
" currently {}).". \
format(self._prompt, timeout_seconds,
self._lock_type, self._lock))
count = 0
if (self._guard_time_seconds == 0) or (timeout_seconds > 0):
self._locked = True
self._printer.string("{}{} lock acquired ({}).". \
format(self._prompt, self._lock_type,
self._lock))
return self._locked
def __exit__(self, _type, value, traceback):
del _type
del value
del traceback
if self._lock and self._locked:
try:
self._lock.release()
self._locked = False
self._printer.string("{}released a {} lock.".format(self._prompt,
self._lock_type))
except RuntimeError:
self._locked = False
self._printer.string("{}{} lock was already released.". \
format(self._prompt, self._lock_type))
def wait_for_completion(_list, purpose, guard_time_seconds,
printer, prompt, keep_going_flag):
'''Wait for a completion list to empty'''
completed = False
if len(_list) > 0:
timeout_seconds = guard_time_seconds
printer.string("{}waiting up to {} second(s)" \
" for {} completion...". \
format(prompt, guard_time_seconds, purpose))
count = 0
while (len(_list) > 0) and \
((guard_time_seconds == 0) or (timeout_seconds > 0)) and \
keep_going(keep_going_flag, printer, prompt):
sleep(1)
timeout_seconds -= 1
count += 1
if count == 30:
list_text = ""
for item in _list:
if list_text:
list_text += ", "
list_text += str(item)
printer.string("{}still waiting {} second(s)" \
" for {} to complete (waiting" \
" for {}).". \
format(prompt, timeout_seconds,
purpose, list_text))
count = 0
if len(_list) == 0:
completed = True
printer.string("{}{} completed.".format(prompt, purpose))
return completed
def reset_nrf_target(connection, printer, prompt):
'''Reset a Nordic NRFxxx target'''
call_list = []
printer.string("{}resetting target...".format(prompt))
# Assemble the call list
call_list.append("nrfjprog")
call_list.append("--reset")
if connection and "debugger" in connection and connection["debugger"]:
call_list.append("-s")
call_list.append(connection["debugger"])
# Print what we're gonna do
tmp = ""
for item in call_list:
tmp += " " + item
printer.string("{}in directory {} calling{}". \
format(prompt, os.getcwd(), tmp))
# Call it
return exe_run(call_list, 60, printer, prompt)
def usb_cutter_reset(usb_cutter_id_strs, printer, prompt):
'''Cut and then un-cut USB cables using Cleware USB cutters'''
# First switch the USB cutters off
action = "1"
count = 0
call_list_root = ["usbswitchcmd"]
call_list_root.append("-s")
call_list_root.append("-n")
while count < 2:
for usb_cutter_id_str in usb_cutter_id_strs:
call_list = call_list_root.copy()
call_list.append(usb_cutter_id_str)
call_list.append(action)
# Print what we're gonna do
tmp = ""
for item in call_list:
tmp += " " + item
if printer:
printer.string("{}in directory {} calling{}". \
format(prompt, os.getcwd(), tmp))
# Set shell to keep Jenkins happy
exe_run(call_list, 0, printer, prompt, shell_cmd=True)
# Wait 5ish seconds
if printer:
printer.string("{}waiting {} second(s)...". \
format(prompt, HW_RESET_DURATION_SECONDS))
sleep(HW_RESET_DURATION_SECONDS)
# "0" to switch the USB cutters on again
action = "0"
count += 1
def kmtronic_reset(ip_address, hex_bitmap, printer, prompt):
'''Cut and then un-cut power using a KMTronic box'''
# KMTronic is a web relay box which will be controlling
# power to, for instance, EVKs The last byte of the URL
# is a hex bitmap of the outputs where 0 sets off and 1
# sets on
# Take only the last two digits of the hex bitmap
hex_bitmap_len = len(hex_bitmap)
hex_bitmap = hex_bitmap[hex_bitmap_len - 2:hex_bitmap_len]
kmtronic_off = "http://" + ip_address + "FFE0" + hex_bitmap
kmtronic_on = "http://" + ip_address + "FFE0" + "{0:x}".format(int(hex_bitmap, 16) ^ 0xFF)
try:
# First switch the given bit positions off
if printer:
printer.string("{}sending {}". \
format(prompt, kmtronic_off))
response = requests.get(kmtronic_off)
# Wait 5ish seconds
if printer:
printer.string("{}...received response {}, waiting {} second(s)...". \
format(prompt, response.status_code, HW_RESET_DURATION_SECONDS))
sleep(HW_RESET_DURATION_SECONDS)
# Switch the given bit positions on
if printer:
printer.string("{}sending {}".format(prompt, kmtronic_on))
response = requests.get(kmtronic_on)
if printer:
printer.string("{}...received response {}.". \
format(prompt, response.status_code))
except requests.ConnectionError:
if printer:
printer.string("{}unable to connect to KMTronic box at {}.". \
format(prompt, ip_address))
# Look for a single line anywhere in message
# beginning with "test: ". This must be followed by
# "x.y.z a.b.c m.n.o" (i.e. instance IDs space separated)
# and then an optional "blah" filter string, or just "*"
# and an optional "blah" filter string or "None".
# Valid examples are:
#
# test: 1
# test: 1 3 7
# test: 1.0.3 3 7.0
# test: 1 2 example
# test: 1.1 8 portInit
# test: *
# test: * port
# test: none
#
# Filter strings must NOT begin with a digit.
# There cannot be more than one * or a * with any other instance.
# There can only be one filter string.
# Only whitespace is expected after this on the line.
# Anything else is ignored.
# Populates instances with the "0 4.5 13.5.1" bit as instance
# entries [[0], [4, 5], [13, 5, 1]] and returns the filter
# string, if any.
def commit_message_parse(message, instances, printer=None, prompt=None):
'''Find stuff in a commit message'''
instances_all = False
instances_local = []
filter_string_local = None
found = False
if message:
# Search through message for a line beginning
# with "test:"
if printer:
printer.string("{}### parsing message to see if it contains a test directive...". \
format(prompt))
lines = message.split("\\n")
for idx1, line in enumerate(lines):
if printer:
printer.string("{}text line {}: \"{}\"".format(prompt, idx1 + 1, line))
if line.lower().startswith("test:"):
found = True
instances_all = False
# Pick through what follows
parts = line[5:].split()
for part in parts:
if instances_all and (part[0].isdigit() or part == "*" or part.lower() == "none"):
# If we've had a "*" and this is another one
# or it begins with a digit then this is
# obviously not a "test:" line,
# leave the loop and try again.
instances_local = []
filter_string_local = None
if printer:
printer.string("{}...badly formed test directive, ignoring.". \
format(prompt))
found = False
break
if filter_string_local:
# If we've had a filter string then nothing
# must follow so this is not a "test:" line,
# leave the loop and try again.
instances_local = []
filter_string_local = None
if printer:
printer.string("{}...extraneous characters after test directive," \
" ignoring.".format(prompt))
found = False
break
if part[0].isdigit():
# If this part begins with a digit it could
# be an instance containing numbers
instance = []
bad = False
for item in part.split("."):
try:
instance.append(int(item))
except ValueError:
# Some rubbish, not a test line so
# leave the loop and try the next
# line
bad = True
break
if bad:
instances_local = []
filter_string_local = None
if printer:
printer.string("{}...badly formed test directive, ignoring.". \
format(prompt))
found = False
break
if instance:
instances_local.append(instance[:])
elif part == "*":
if instances_local:
# If we've already had any instances
# this is obviously not a test line,
# leave the loop and try again
instances_local = []
filter_string_local = None
if printer:
printer.string("{}...badly formed test directive, ignoring.". \
format(prompt))
found = False
break
# If we haven't had any instances and
# this is a * then it means "all"
instances_local.append(part)
instances_all = True
elif part.lower() == "none":
if instances_local:
# If we've already had any instances
# this is obviously not a test line,
# leave the loop and try again
if printer:
printer.string("{}...badly formed test directive, ignoring.". \
format(prompt))
found = False
instances_local = []
filter_string_local = None
break
elif instances_local and not part == "*":
# If we've had an instance and this
# is not a "*" then this must be a
# filter string
filter_string_local = part
else:
# Found some rubbish, not a "test:"
# line after all, leave the loop
# and try the next line
instances_local = []
filter_string_local = None
if printer:
printer.string("{}...badly formed test directive, ignoring.". \
format(prompt))
found = False
break
if found:
text = "found test directive with"
if instances_local:
text += " instance(s)" + get_instances_text(instances_local)
if filter_string_local:
text += " and filter \"" + filter_string_local + "\""
else:
text += " instances \"None\""
if printer:
printer.string("{}{}.".format(prompt, text))
break
if printer:
printer.string("{}no test directive found".format(prompt))
if found and instances_local:
instances.extend(instances_local[:])
return found, filter_string_local
| [((203, 4, 203, 33), 'os.chmod', 'os.chmod', ({(203, 13, 203, 17): 'path', (203, 19, 203, 32): 'stat.S_IWRITE'}, {}), '(path, stat.S_IWRITE)', False, 'import os\n'), ((211, 7, 211, 31), 'os.path.isdir', 'os.path.isdir', ({(211, 21, 211, 30): 'directory'}, {}), '(directory)', False, 'import os\n'), ((410, 7, 410, 31), 'os.path.isdir', 'os.path.isdir', ({(410, 21, 410, 30): 'directory'}, {}), '(directory)', False, 'import os\n'), ((597, 14, 597, 41), 'psutil.Process', 'psutil.Process', ({(597, 29, 597, 40): 'process_pid'}, {}), '(process_pid)', False, 'import psutil\n'), ((645, 17, 645, 23), 'time.time', 'time', ({}, {}), '()', False, 'from time import sleep, time, gmtime, strftime\n'), ((30, 11, 30, 28), 'platform.system', 'platform.system', ({}, {}), '()', False, 'import platform\n'), ((161, 15, 163, 50), 'subprocess.check_output', 'subprocess.check_output', (), '', False, 'import subprocess\n'), ((341, 23, 341, 70), 'serial.Serial', 'serial.Serial', (), '', False, 'import serial\n'), ((480, 11, 480, 35), 'os.path.isdir', 'os.path.isdir', ({(480, 25, 480, 34): 'directory'}, {}), '(directory)', False, 'import os\n'), ((500, 20, 500, 44), 'os.path.isdir', 'os.path.isdir', ({(500, 34, 500, 43): 'directory'}, {}), '(directory)', False, 'import os\n'), ((559, 15, 561, 50), 'subprocess.check_output', 'subprocess.check_output', (), '', False, 'import subprocess\n'), ((706, 21, 706, 34), 'queue.Queue', 'queue.Queue', ({}, {}), '()', False, 'import queue\n'), ((707, 22, 708, 66), 'threading.Thread', 'threading.Thread', (), '', False, 'import threading\n'), ((954, 21, 954, 28), 'multiprocessing.RLock', 'RLock', ({}, {}), '()', False, 'from multiprocessing import RLock\n'), ((964, 40, 964, 46), 'time.time', 'time', ({}, {}), '()', False, 'from time import sleep, time, gmtime, strftime\n'), ((965, 8, 965, 39), 'threading.Thread.__init__', 'threading.Thread.__init__', ({(965, 34, 965, 38): 'self'}, {}), '(self)', False, 'import threading\n'), ((1079, 21, 1079, 28), 'multiprocessing.RLock', 'RLock', ({}, {}), '()', False, 'from multiprocessing import RLock\n'), ((1131, 25, 1131, 53), 'os.path.expanduser', 'os.path.expanduser', ({(1131, 44, 1131, 52): 'new_path'}, {}), '(new_path)', False, 'import os\n'), ((1135, 27, 1135, 38), 'os.getcwd', 'os.getcwd', ({}, {}), '()', False, 'import os\n'), ((1136, 8, 1136, 32), 'os.chdir', 'os.chdir', ({(1136, 17, 1136, 31): 'self._new_path'}, {}), '(self._new_path)', False, 'import os\n'), ((1139, 8, 1139, 34), 'os.chdir', 'os.chdir', ({(1139, 17, 1139, 33): 'self._saved_path'}, {}), '(self._saved_path)', False, 'import os\n'), ((1283, 8, 1283, 40), 'time.sleep', 'sleep', ({(1283, 14, 1283, 39): 'HW_RESET_DURATION_SECONDS'}, {}), '(HW_RESET_DURATION_SECONDS)', False, 'from time import sleep, time, gmtime, strftime\n'), ((1308, 19, 1308, 45), 'requests.get', 'requests.get', ({(1308, 32, 1308, 44): 'kmtronic_off'}, {}), '(kmtronic_off)', False, 'import requests\n'), ((1313, 8, 1313, 40), 'time.sleep', 'sleep', ({(1313, 14, 1313, 39): 'HW_RESET_DURATION_SECONDS'}, {}), '(HW_RESET_DURATION_SECONDS)', False, 'from time import sleep, time, gmtime, strftime\n'), ((1317, 19, 1317, 44), 'requests.get', 'requests.get', ({(1317, 32, 1317, 43): 'kmtronic_on'}, {}), '(kmtronic_on)', False, 'import requests\n'), ((151, 23, 151, 66), 're.split', 're.split', ({(151, 32, 151, 55): '"""( |\\\\".*?\\\\"|\'.*?\')"""', (151, 57, 151, 65): 'cmd_line'}, {}), '(\'( |\\\\".*?\\\\"|\\\'.*?\\\')\', cmd_line)', False, 'import re\n'), ((376, 12, 376, 20), 'time.sleep', 'sleep', ({(376, 18, 376, 19): '(1)'}, {}), '(1)', False, 'from time import sleep, time, gmtime, strftime\n'), ((609, 12, 609, 22), 'time.sleep', 'sleep', ({(609, 18, 609, 21): '(0.1)'}, {}), '(0.1)', False, 'from time import sleep, time, gmtime, strftime\n'), ((725, 24, 725, 30), 'time.time', 'time', ({}, {}), '()', False, 'from time import sleep, time, gmtime, strftime\n'), ((737, 12, 737, 22), 'time.sleep', 'sleep', ({(737, 18, 737, 21): '(0.1)'}, {}), '(0.1)', False, 'from time import sleep, time, gmtime, strftime\n'), ((839, 28, 839, 79), 'subprocess.Popen', 'subprocess.Popen', ({(839, 45, 839, 60): 'self._call_list'}, {}), '(self._call_list, **popen_keywords)', False, 'import subprocess\n'), ((961, 27, 961, 78), 'collections.deque', 'deque', (), '', False, 'from collections import deque\n'), ((1007, 32, 1007, 38), 'time.time', 'time', ({}, {}), '()', False, 'from time import sleep, time, gmtime, strftime\n'), ((1211, 12, 1211, 20), 'time.sleep', 'sleep', ({(1211, 18, 1211, 19): '(1)'}, {}), '(1)', False, 'from time import sleep, time, gmtime, strftime\n'), ((1248, 34, 1248, 45), 'os.getcwd', 'os.getcwd', ({}, {}), '()', False, 'import os\n'), ((221, 16, 221, 65), 'shutil.rmtree', 'shutil.rmtree', (), '', False, 'import shutil\n'), ((249, 33, 249, 53), 'copy.copy', 'copy', ({(249, 38, 249, 52): 'instances_out1'}, {}), '(instances_out1)', False, 'from copy import copy\n'), ((269, 41, 269, 53), 'os.geteuid', 'os.geteuid', ({}, {}), '()', False, 'import os\n'), ((406, 51, 406, 62), 'os.getcwd', 'os.getcwd', ({}, {}), '()', False, 'import os\n'), ((736, 28, 736, 34), 'time.time', 'time', ({}, {}), '()', False, 'from time import sleep, time, gmtime, strftime\n'), ((796, 8, 796, 24), 'psutil.Process', 'psutil.Process', ({}, {}), '()', False, 'import psutil\n'), ((806, 8, 806, 24), 'psutil.Process', 'psutil.Process', ({}, {}), '()', False, 'import psutil\n'), ((873, 16, 873, 24), 'time.sleep', 'sleep', ({(873, 22, 873, 23): '(1)'}, {}), '(1)', False, 'from time import sleep, time, gmtime, strftime\n'), ((990, 45, 990, 51), 'time.time', 'time', ({}, {}), '()', False, 'from time import sleep, time, gmtime, strftime\n'), ((1031, 44, 1031, 50), 'time.time', 'time', ({}, {}), '()', False, 'from time import sleep, time, gmtime, strftime\n'), ((1167, 16, 1167, 24), 'time.sleep', 'sleep', ({(1167, 22, 1167, 23): '(1)'}, {}), '(1)', False, 'from time import sleep, time, gmtime, strftime\n'), ((228, 16, 228, 24), 'time.sleep', 'sleep', ({(228, 22, 228, 23): '(1)'}, {}), '(1)', False, 'from time import sleep, time, gmtime, strftime\n'), ((715, 32, 715, 38), 'time.time', 'time', ({}, {}), '()', False, 'from time import sleep, time, gmtime, strftime\n'), ((880, 20, 880, 30), 'time.sleep', 'sleep', ({(880, 26, 880, 29): '(0.1)'}, {}), '(0.1)', False, 'from time import sleep, time, gmtime, strftime\n'), ((971, 24, 971, 30), 'time.time', 'time', ({}, {}), '()', False, 'from time import sleep, time, gmtime, strftime\n'), ((1055, 16, 1055, 26), 'time.sleep', 'sleep', ({(1055, 22, 1055, 25): '(0.1)'}, {}), '(0.1)', False, 'from time import sleep, time, gmtime, strftime\n'), ((1058, 16, 1058, 26), 'time.sleep', 'sleep', ({(1058, 22, 1058, 25): '(0.1)'}, {}), '(0.1)', False, 'from time import sleep, time, gmtime, strftime\n'), ((1064, 47, 1064, 53), 'time.time', 'time', ({}, {}), '()', False, 'from time import sleep, time, gmtime, strftime\n'), ((1072, 48, 1072, 54), 'time.time', 'time', ({}, {}), '()', False, 'from time import sleep, time, gmtime, strftime\n'), ((261, 39, 261, 82), 'os.environ.get', 'os.environ.get', ({(261, 54, 261, 66): '"""SystemRoot"""', (261, 68, 261, 81): '"""C:\\\\windows"""'}, {}), "('SystemRoot', 'C:\\\\windows')", False, 'import os\n'), ((1107, 43, 1107, 51), 'time.gmtime', 'gmtime', ({}, {}), '()', False, 'from time import sleep, time, gmtime, strftime\n'), ((1274, 46, 1274, 57), 'os.getcwd', 'os.getcwd', ({}, {}), '()', False, 'import os\n'), ((526, 50, 526, 61), 'os.getcwd', 'os.getcwd', ({}, {}), '()', False, 'import os\n'), ((440, 54, 440, 65), 'os.getcwd', 'os.getcwd', ({}, {}), '()', False, 'import os\n'), ((460, 54, 460, 65), 'os.getcwd', 'os.getcwd', ({}, {}), '()', False, 'import os\n'), ((713, 21, 713, 27), 'time.time', 'time', ({}, {}), '()', False, 'from time import sleep, time, gmtime, strftime\n'), ((714, 21, 714, 27), 'time.time', 'time', ({}, {}), '()', False, 'from time import sleep, time, gmtime, strftime\n')] |
tcjansen/beer | faigler_mazeh.py | c6421371b6506cef1adf88cefa9a55db2f04e2dc | import numpy as np
import astropy.modeling.blackbody as bb
import astropy.constants as const
from astropy.io import fits
from scipy.interpolate import interp2d
class FaiglerMazehFit():
def __init__(self, P_orb, inc, R_star, M_star, T_star, A_ellip=False, A_beam=False,
R_p=False, a=False, u=False, g=0.65, logg=None, tele='TESS', M_p=False,
K=False):
self.P_orb = P_orb # orbital period in days
self.inc = inc * np.pi / 180 # inclination converted to radians
self.R_star = R_star # radius of the star in solar units
self.M_star = M_star # mass of the star in solar units
self.T_star = T_star # temperature of the star [K]
self.A_ellip = A_ellip # ellipsoidal amplitude in ppm
self.A_beam = A_beam # beaming amplitude in ppm
self.g = g # gravity-darkening coefficient, expected range is 0.3-1.0
self.logg = logg # log surface gravity of the star [cm s^-2]
self.tele = tele.lower() # observation instrument used, default is TESS. Only other
# other option (for now) is Kepler.
self.R_p = R_p # radius of the planet in jupiter radii
self.a = a
self.u = u # the limb-darkening coefficient, range is 0-1
self.g = g
self.M_p = M_p
self.K = K
# get the mass from the ellipsoidal amplitude, if given.
# u is the limb-darkening coefficient, range is 0-1
if not M_p and not not A_ellip and not not logg:
self.u = self.LDC()
self.M_p = self.m_from_ellip()
# star-planet separation [au] assuming a circular orbit
if not a and not not M_p:
self.a = get_a(self.P_orb * 86400, self.M_star * const.M_sun.value, \
self.M_p * const.M_jup.value) / const.au.value
def alpha_ellip(self):
if not self.u:
self.u = self.LDC()
if not self.g:
self.g = self.GDC()
a = 15 + self.u
b = 1 + self.g
c = 3 - self.u
return 0.15 * a * b / c
def RV_amp(self):
"""
Returns the radial velocity amplitude [m/s] of the star given a companion mass.
"""
return 27 / 40 * const.c.value \
* self.M_star ** (-2/3) \
* self.P_orb ** (-1/3) \
* self.M_p * np.sin(self.inc)
def doppler_shift(self, K):
"""
Returns the shift in wavelength for a given radial velocity amplitude.
"""
return K / const.c.value
def response_convolution(self, lambdas, response):
return response * bb.blackbody_lambda(lambdas, self.T_star).value
def alpha_beam(self, K):
"""
Returns the factor that accounts for the flux lost when a star gets Doppler shifted
in and out of the observer's bandpass.
"""
print(K)
rest_lambdas, response = response_func(self.tele)
flux_rest = np.trapz(self.response_convolution(rest_lambdas, response), \
x=rest_lambdas)
blueshifted_lambdas = rest_lambdas - self.doppler_shift(K=K)
flux_blueshift = np.trapz(self.response_convolution(blueshifted_lambdas, response), \
x=rest_lambdas)
redshifted_lambdas = rest_lambdas + self.doppler_shift(K=K)
flux_redshift = np.trapz(self.response_convolution(redshifted_lambdas, response), \
x=rest_lambdas)
alpha_blue = abs( (flux_rest - flux_blueshift) / flux_rest )
alpha_red = abs( (flux_rest - flux_redshift) / flux_rest )
return 1 - np.mean([alpha_red, alpha_blue])
def m_from_ellip(self):
return self.A_ellip \
* self.R_star ** (-3) \
* self.M_star ** 2 \
* self.P_orb ** 2 \
/ (12.8 * self.alpha_ellip() * np.sin(self.inc) ** 2)
def ellip_from_m(self):
return self.M_p * 12.8 * self.alpha_ellip() * np.sin(self.inc) ** 2 \
* self.R_star ** 3 \
* self.M_star ** (-2) \
* self.P_orb ** (-2)
def m_from_beam(self, K=False, alpha_beam=False):
if not alpha_beam and not K and not not self.M_p:
alpha_beam = self.alpha_beam(K=self.RV_amp())
elif not alpha_beam and not not K:
alpha_beam = self.alpha_beam(K=K)
elif not not K and not not alpha_beam:
raise ValueError("Please only specify either K or alpha_beam, not both.")
elif not K and not alpha_beam:
raise ValueError("Please specify a radial velocity (K) or alpha_beam parameter")
return self.A_beam \
* self.M_star ** (2/3) \
* self.P_orb ** (1/3) \
/ (alpha_beam * np.sin(self.inc) * 2.7)
def beam_from_m(self):
"""
Returns the expected Doppler beaming amplitude [ppm] for a given mass.
"""
if not self.M_p:
raise ValueError("Argument 'M_p' must be specified if you're trying to " +
"derive a beaming amplitude from a mass.")
if not self.K:
K=self.RV_amp()
return 2.7 * self.alpha_beam(K=self.K) \
* self.M_star ** (-2/3) \
* self.P_orb ** (-1/3) \
* self.M_p * np.sin(self.inc)
def Ag_from_thermref(self, A_thermref):
"""
Return the geometric albedo derived from the thermal + ref amplitude.
"""
return A_thermref * (self.R_p / self.a) ** -2 * (const.au / const.R_jup) ** 2
def mass(self, derived_from=None, K=False, alpha_beam=False):
if derived_from == "ellip":
return self.m_from_ellip()
elif derived_from == "beam":
return self.m_from_beam(K=K, alpha_beam=alpha_beam)
else:
raise ValueError("derived_from must equal either 'ellip' or 'beam'")
def nearest_neighbors(self, value, array, max_difference):
"""
Returns a set of nearest neighbor indices of the given array.
"""
return set(list((np.where(abs(array - value) < max_difference))[0]))
def correct_maxdiff(self, value, array, guess):
while len(self.nearest_neighbors(value, array, guess)) > 0:
guess -= 0.01 * guess
return guess
def shared_neighbor(self, value1, array1, max_diff1, value2, array2, max_diff2):
set1 = self.nearest_neighbors(value1, array1, max_diff1)
set2 = self.nearest_neighbors(value2, array2, max_diff2)
nearest = list(set1.intersection(set2))
# if len(nearest) > 1:
# newmax_diff1 = self.correct_maxdiff(value1, array1, max_diff1)
# newmax_diff2 = self.correct_maxdiff(value2, array2, max_diff2)
# print(newmax_diff1, newmax_diff2)
# if newmax_diff2 > newmax_diff1:
# max_diff2 = newmax_diff2
# else:
# max_diff1 = newmax_diff1
# set1 = self.nearest_neighbors(value1, array1, max_diff1)
# set2 = self.nearest_neighbors(value2, array2, max_diff2)
# nearest = list(set1.intersection(set2))
# print(nearest)
# # if len(nearest) > 1:
# # raise ValueError("Multiple shared nearest neighbors, indices = ", nearest)
# # else:
# # return nearest[0]
return nearest[0]
def tess_warning(self):
if self.tele != 'tess':
raise ValueError("This function is only appropriate for observations done with " +
"the TESS satellite")
def claret_LDC(self):
"""
Returns the mu coefficient and the four-parameters used in the Claret four-parameter
limb-darkening law (Claret 2000). These are obtained by finding the nearest neighbor
in the model limb-darkening of TESS from Claret 2018.
"""
# print("claret_LDC is still garbage, sorry. Quitting now...")
# exit()
self.tess_warning()
logg, Teff, a1, a2, a3, a4, mu, mod = np.genfromtxt('../claret_ldc.dat',
usecols=(0,1,4,5,6,7,8,10),
unpack=True)
mod = np.genfromtxt('../claret_ldc.dat', usecols=(10,), dtype='str')
if self.T_star <= 3000:
# the PC model is meant for cool stars, and if we break it up this way we can do an
# easier 2D interpolation.
mask = mod == 'PD'
else:
mask = mod == 'PC'
logg = logg[mask]
Teff = Teff[mask]
a1 = a1[mask]
a2 = a2[mask]
a3 = a3[mask]
a4 = a4[mask]
mu = mu[mask]
nearest = self.shared_neighbor(self.T_star, Teff, 100, self.logg, logg, 0.25)
mu = mu[nearest]
a_coeffs = [a1[nearest], a2[nearest], a3[nearest], a4[nearest]]
return mu, a_coeffs
def GDC(self):
"""
Returns the gravity-darkening coefficient from the Claret 2017 model
"""
self.tess_warning()
logg, log_Teff, g = np.genfromtxt('../claret_gdc.dat', usecols=(2,3,4), unpack=True)
nearest = self.shared_neighbor(np.log10(self.T_star), log_Teff, .01, self.logg,
logg, 0.25)
return g[nearest]
def LDC(self):
"""
Returns the limb-darkening coefficient of the host star.
"""
mu, a_coeffs = self.claret_LDC()
return 1 - sum([a_coeffs[k] * (1 - mu ** ((k+1) / 2)) for k in range(4)])
def get_response_specs(tele):
if tele=="tess":
return "../tess-response-function-v1.0.csv", ',', 1e1
elif tele=="kepler":
return "../kepler_hires.dat", '\t', 1e4
def response_func(tele):
file, delimiter, to_AA = get_response_specs(tele)
lambdas, response = np.genfromtxt(file, delimiter=delimiter, usecols=(0,1), unpack=True)
return lambdas * to_AA, response
def get_a(P, M_star, M_p):
"""
Use Kepler's third law to derive the star-planet separation.
"""
return (P ** 2 * const.G.value * (M_star + M_p) / (4 * np.pi ** 2)) ** (1/3)
| [((266, 21, 266, 89), 'numpy.genfromtxt', 'np.genfromtxt', (), '', True, 'import numpy as np\n'), ((213, 40, 215, 26), 'numpy.genfromtxt', 'np.genfromtxt', (), '', True, 'import numpy as np\n'), ((216, 8, 216, 70), 'numpy.genfromtxt', 'np.genfromtxt', (), '', True, 'import numpy as np\n'), ((245, 22, 245, 86), 'numpy.genfromtxt', 'np.genfromtxt', (), '', True, 'import numpy as np\n'), ((61, 19, 61, 35), 'numpy.sin', 'np.sin', ({(61, 26, 61, 34): 'self.inc'}, {}), '(self.inc)', True, 'import numpy as np\n'), ((94, 13, 94, 45), 'numpy.mean', 'np.mean', ({(94, 21, 94, 44): '[alpha_red, alpha_blue]'}, {}), '([alpha_red, alpha_blue])', True, 'import numpy as np\n'), ((140, 19, 140, 35), 'numpy.sin', 'np.sin', ({(140, 26, 140, 34): 'self.inc'}, {}), '(self.inc)', True, 'import numpy as np\n'), ((247, 33, 247, 54), 'numpy.log10', 'np.log10', ({(247, 42, 247, 53): 'self.T_star'}, {}), '(self.T_star)', True, 'import numpy as np\n'), ((70, 20, 70, 61), 'astropy.modeling.blackbody.blackbody_lambda', 'bb.blackbody_lambda', ({(70, 40, 70, 47): 'lambdas', (70, 49, 70, 60): 'self.T_star'}, {}), '(lambdas, self.T_star)', True, 'import astropy.modeling.blackbody as bb\n'), ((102, 37, 102, 53), 'numpy.sin', 'np.sin', ({(102, 44, 102, 52): 'self.inc'}, {}), '(self.inc)', True, 'import numpy as np\n'), ((123, 22, 123, 38), 'numpy.sin', 'np.sin', ({(123, 29, 123, 37): 'self.inc'}, {}), '(self.inc)', True, 'import numpy as np\n'), ((105, 48, 105, 64), 'numpy.sin', 'np.sin', ({(105, 55, 105, 63): 'self.inc'}, {}), '(self.inc)', True, 'import numpy as np\n')] |
f2010126/LTH_Master | src/vanilla_pytorch/prune_model.py | 709472e7e7962fbf3a56a620c536fb03d359734f | import torch.nn.utils.prune as prune
import torch
from src.vanilla_pytorch.utils import count_rem_weights
from src.vanilla_pytorch.models.linearnets import LeNet, init_weights
from src.vanilla_pytorch.models.resnets import Resnets
def remove_pruning(model):
for i, (name, module) in enumerate(model.named_modules()):
# name and val
if any([isinstance(module, cl) for cl in [torch.nn.Conv2d, torch.nn.Linear]]):
prune.remove(module, 'weight')
def get_masks(model, prune_amts=None):
"""
prune the lowest p% weights by magnitude per layer
:param model: model to prune
:param p_rate: prune rate = 0.2 as per paper
:param prune_amts: dictionary
:return: the created mask. model has served it's purpose.
"""
# TODO: Adjust pruning with output layer
if prune_amts is None: # ie dict is empty, use the default prune rate = 0.2
prune_amts = {"linear": 0.2, "conv": 0.2, "last": 0.2}
for i, (name, module) in enumerate(model.named_modules()):
# prune 20% of connections in all 2D-conv layers
if isinstance(module, torch.nn.Conv2d):
module = prune.l1_unstructured(module, name='weight', amount=prune_amts['conv'])
# prune 20% of connections in all linear layers
elif isinstance(module, torch.nn.Linear):
module = prune.l1_unstructured(module, name='weight', amount=prune_amts['linear'])
masks = list(model.named_buffers())
remove_pruning(model)
return masks
def update_apply_masks(model, masks):
# doesn't seem to be needed.
# for key, val in masks.items():
# print(f"key {key}")
# layer = getattr(model, key.split('.')[0])
# layer.weight_mask = val
for name, module in model.named_modules():
if any([isinstance(module, cl) for cl in [torch.nn.Conv2d, torch.nn.Linear]]):
module = prune.custom_from_mask(module, name='weight', mask=masks[name + ".weight_mask"])
# remove_pruning(model)
return model
def prune_random(model, prune_amts=None):
if prune_amts is None: # ie dict is empty, use the default prune rate =0.2
prune_amts = {"linear": 0.2, "conv": 0.2, "last": 0.2}
for name, module in model.named_modules():
# prune 20% of connections in all 2D-conv layers
if isinstance(module, torch.nn.Conv2d):
module = prune.random_unstructured(module, name='weight', amount=prune_amts['conv'])
# prune 20% of connections in all linear layers
elif isinstance(module, torch.nn.Linear):
module = prune.random_unstructured(module, name='weight', amount=prune_amts['linear'])
remove_pruning(model)
if __name__ == '__main__':
net = Resnets(in_channels=3)
net.apply(init_weights)
prune_rate = 0.8
prune_custom = {"linear": 0.2, "conv": 0.2, "last": 0.1}
for i in range(3):
masks = get_masks(net, prune_amts=prune_custom)
print(f"Count zero : {count_rem_weights(net)}")
| [((70, 10, 70, 32), 'src.vanilla_pytorch.models.resnets.Resnets', 'Resnets', (), '', False, 'from src.vanilla_pytorch.models.resnets import Resnets\n'), ((12, 12, 12, 42), 'torch.nn.utils.prune.remove', 'prune.remove', ({(12, 25, 12, 31): 'module', (12, 33, 12, 41): '"""weight"""'}, {}), "(module, 'weight')", True, 'import torch.nn.utils.prune as prune\n'), ((31, 21, 31, 92), 'torch.nn.utils.prune.l1_unstructured', 'prune.l1_unstructured', (), '', True, 'import torch.nn.utils.prune as prune\n'), ((49, 21, 49, 101), 'torch.nn.utils.prune.custom_from_mask', 'prune.custom_from_mask', (), '', True, 'import torch.nn.utils.prune as prune\n'), ((61, 21, 61, 96), 'torch.nn.utils.prune.random_unstructured', 'prune.random_unstructured', (), '', True, 'import torch.nn.utils.prune as prune\n'), ((34, 21, 34, 94), 'torch.nn.utils.prune.l1_unstructured', 'prune.l1_unstructured', (), '', True, 'import torch.nn.utils.prune as prune\n'), ((64, 21, 64, 98), 'torch.nn.utils.prune.random_unstructured', 'prune.random_unstructured', (), '', True, 'import torch.nn.utils.prune as prune\n'), ((76, 30, 76, 52), 'src.vanilla_pytorch.utils.count_rem_weights', 'count_rem_weights', ({(76, 48, 76, 51): 'net'}, {}), '(net)', False, 'from src.vanilla_pytorch.utils import count_rem_weights\n')] |
CitrusAqua/mol-infer | Grid-neighbor-search/GNS/read_instance_2layer_2LMM_L.py | 6d5411a2cdc7feda418f9413153b1b66b45a2e96 | """
read_instance_BH-cyclic.py
"""
'''
[seed graph]
V_C : "V_C"
E_C : "E_C"
[core specification]
ell_LB : "\ell_{\rm LB}"
ell_UB : "\ell_{\rm UB}"
cs_LB : "\textsc{cs}_{\rm LB}"
cs_UB : "\textsc{cs}_{\rm UB}"
'''
import sys
def read_pmax_file(filename):
with open(filename,'r') as f:
F = [line.rstrip('\n') for line in f if line[0]!='#']
p_max = int(F.pop(0))
s = F.pop(0)
delta = list(map(float, s.split(' ')))
s = F.pop(0)
r = list(map(int, s.split(' ')))
return p_max, delta, r
def read_seed_graph(filename):
with open(filename,'r') as f:
F = [line.rstrip('\n') for line in f if line[0]!='#']
### read V_C ###
num_V_C = int(F.pop(0))
V_C = tuple(range(1,num_V_C+1))
### read E_C ###
num_E_C = int(F.pop(0))
E_C = {}
for e in range(num_E_C):
s = F.pop(0)
arr = list(map(int, s.split(' ')))
E_C[arr[0]] = (arr[0], arr[1], arr[2]) # Add arr[0] to distinguish two edges with same starting and ending vertices, by Zhu
### read ell_LB and ell_UB ###
ell_LB = {}
ell_UB = {}
for e in range(num_E_C):
s = F.pop(0)
arr = list(map(int, s.split(' ')))
ell_LB[arr[0]] = arr[1]
ell_UB[arr[0]] = arr[2]
### compute E_ge_two, E_ge_one, E_zero_one, E_equal_one ###
E_ge_two = []
E_ge_one = []
E_zero_one = []
E_equal_one = []
I_ge_two = []
I_ge_one = []
I_zero_one = []
I_equal_one = []
for e in E_C:
if ell_LB[e] >= 2:
E_ge_two.append(E_C[e])
I_ge_two.append(e)
elif ell_LB[e] == 1 and ell_UB[e] >= 2:
E_ge_one.append(E_C[e])
I_ge_one.append(e)
elif ell_LB[e] == 0 and ell_UB[e] == 1:
E_zero_one.append(E_C[e])
I_zero_one.append(e)
elif ell_LB[e] == 1 and ell_UB[e] == 1:
E_equal_one.append(E_C[e])
I_equal_one.append(e)
else:
sys.stderr.write('error: a strange edge is found.\n')
sys.exit(1)
### read n_LB_int and n_UB_int ###
n_LB_int = int(F.pop(0))
n_UB_int = int(F.pop(0))
# read n_LB and n_star
n_LB = int(F.pop(0))
n_star = int(F.pop(0))
# read rho
rho = int(F.pop(0))
### read ch_LB and ch_UB ###
ch_LB = {}
ch_UB = {}
for v in range(num_V_C):
s = F.pop(0)
arr = list(map(int, s.split(' ')))
ch_LB[arr[0]] = arr[1]
ch_UB[arr[0]] = arr[2]
for e in range(len(E_ge_two + E_ge_one)):
s = F.pop(0)
arr = list(map(int, s.split(' ')))
ch_LB[E_C[arr[0]]] = arr[1]
ch_UB[E_C[arr[0]]] = arr[2]
### read bl_LB and bl_UB ###
bl_LB = {}
bl_UB = {}
for v in range(num_V_C):
s = F.pop(0)
arr = list(map(int, s.split(' ')))
bl_LB[arr[0]] = arr[1]
bl_UB[arr[0]] = arr[2]
for e in range(len(E_ge_two + E_ge_one)):
s = F.pop(0)
arr = list(map(int, s.split(' ')))
bl_LB[E_C[arr[0]]] = arr[1]
bl_UB[E_C[arr[0]]] = arr[2]
# read Lambda
s = F.pop(0)
Lambda = list(s.split(' '))
# read Lambda_dg_int
s = F.pop(0)
num = int(s)
Lambda_dg_int = list()
for i in range(num):
s = F.pop(0)
arr = list(s.split(' '))
Lambda_dg_int.append((arr[0], int(arr[1])))
# read Gamma_int_ac
s = F.pop(0)
num = int(s)
Gamma_int_ac = list()
nu_int = list()
for i in range(num):
s = F.pop(0)
arr = list(s.split(' '))
tmp_1 = (arr[0], arr[1], int(arr[2]))
tmp_2 = (arr[1], arr[0], int(arr[2]))
nu_int.append(tmp_1)
if tmp_1 not in Gamma_int_ac:
Gamma_int_ac.append(tmp_1)
if tmp_2 not in Gamma_int_ac:
Gamma_int_ac.append(tmp_2)
# read Gamma_int
s = F.pop(0)
num = int(s)
Gamma_int = list()
gam_int = list()
for i in range(num):
s = F.pop(0)
arr = list(s.split(' '))
tmp_1 = ((arr[0], int(arr[1])), (arr[2], int(arr[3])), int(arr[4]))
tmp_2 = ((arr[2], int(arr[3])), (arr[0], int(arr[1])), int(arr[4]))
gam_int.append(tmp_1)
if tmp_1 not in Gamma_int:
Gamma_int.append(tmp_1)
if tmp_2 not in Gamma_int:
Gamma_int.append(tmp_2)
# read Lambda_star
Lambda_star = {i: set() for i in range(1, num_V_C + 1)}
for i in range(1, num_V_C + 1):
s = F.pop(0)
arr = list(s.split(' '))
ind = int(arr[0])
arr.pop(0)
for a in arr:
Lambda_star[ind].add(a)
Lambda_int = list()
# read na_LB and na_UB
s = F.pop(0)
num = int(s)
na_LB = {}
na_UB = {}
for i in range(num):
s = F.pop(0)
arr = list(s.split(' '))
na_LB[arr[0]] = int(arr[1])
na_UB[arr[0]] = int(arr[2])
# read na_LB_int and na_UB_int
s = F.pop(0)
num = int(s)
na_LB_int = {}
na_UB_int = {}
for i in range(num):
s = F.pop(0)
arr = list(s.split(' '))
na_LB_int[arr[0]] = int(arr[1])
na_UB_int[arr[0]] = int(arr[2])
Lambda_int.append(arr[0])
# read ns_LB_int and ns_UB_int
s = F.pop(0)
num = int(s)
ns_LB_int = {}
ns_UB_int = {}
for i in range(num):
s = F.pop(0)
arr = list(s.split(' '))
ns_LB_int[(arr[0], int(arr[1]))] = int(arr[2])
ns_UB_int[(arr[0], int(arr[1]))] = int(arr[3])
# read ac_LB_int and ac_UB_int
s = F.pop(0)
num = int(s)
ac_LB_int = {}
ac_UB_int = {}
for i in range(num):
s = F.pop(0)
arr = list(s.split(' '))
a1, a2, m = nu_int[int(arr[0]) - 1]
ac_LB_int[(a1, a2, m)] = int(arr[1])
ac_LB_int[(a2, a1, m)] = int(arr[1])
ac_UB_int[(a1, a2, m)] = int(arr[2])
ac_UB_int[(a2, a1, m)] = int(arr[2])
# read ec_LB_int and ec_UB_int
s = F.pop(0)
num = int(s)
ec_LB_int = {}
ec_UB_int = {}
for i in range(num):
s = F.pop(0)
arr = list(s.split(' '))
a1, a2, m = gam_int[int(arr[0]) - 1]
ec_LB_int[(a1, a2, m)] = int(arr[1])
ec_LB_int[(a2, a1, m)] = int(arr[1])
ec_UB_int[(a1, a2, m)] = int(arr[2])
ec_UB_int[(a2, a1, m)] = int(arr[2])
# read bd2_LB and bd2_UB
bd2_LB = {}
bd2_UB = {}
for e in range(len(E_C)):
s = F.pop(0)
arr = list(map(int, s.split(' ')))
bd2_LB[E_C[arr[0]]] = arr[1]
bd2_UB[E_C[arr[0]]] = arr[2]
# read bd3_LB and bd3_UB
bd3_LB = {}
bd3_UB = {}
for e in range(len(E_C)):
s = F.pop(0)
arr = list(map(int, s.split(' ')))
bd3_LB[E_C[arr[0]]] = arr[1]
bd3_UB[E_C[arr[0]]] = arr[2]
# read ac_LB_lf and ac_UB_lf
s = F.pop(0)
num = int(s)
ac_LB_lf = dict()
ac_UB_lf = dict()
for e in range(num):
s = F.pop(0)
arr = list(s.split(' '))
ac_LB_lf[(arr[0], arr[1], int(arr[2]))] = int(arr[3])
ac_UB_lf[(arr[0], arr[1], int(arr[2]))] = int(arr[4])
s = F.pop(0)
arr = list(map(int, s.split(' ')))
ac_LB_lf_common = arr[0]
ac_UB_lf_common = arr[1]
####################################
# # Undefined constants for instances but used in MILP
r_GC = num_E_C - (num_V_C - 1)
dg_LB = [0,0,0,0,0]
dg_UB = [n_star,n_star,n_star,n_star,n_star]
return V_C, E_C, \
E_ge_two, E_ge_one, E_zero_one, E_equal_one, \
I_ge_two, I_ge_one, I_zero_one, I_equal_one, \
ell_LB, ell_UB, n_LB_int, n_UB_int, \
n_LB, n_star, rho, \
ch_LB, ch_UB, bl_LB, bl_UB, \
Lambda, Lambda_dg_int, Gamma_int_ac, Gamma_int, \
Lambda_star, na_LB, na_UB, Lambda_int, \
na_LB_int, na_UB_int, ns_LB_int, ns_UB_int, \
ac_LB_int, ac_UB_int, ec_LB_int, ec_UB_int, \
bd2_LB, bd2_UB, bd3_LB, bd3_UB, \
dg_LB, dg_UB, ac_LB_lf, ac_UB_lf, ac_LB_lf_common, ac_UB_lf_common, r_GC
def get_value(filename):
y_min = 0
y_max = 0
ind = 0
with open(filename, 'r') as f:
lines = f.readlines()
for line in lines:
if len(line.split(",")) < 2:
continue
if line.split(",")[0] == "CID":
continue
if ind == 0:
y_min = float(line.split(",")[1])
y_max = float(line.split(",")[1])
ind = 1
else:
y_tmp = float(line.split(",")[1])
if y_tmp > y_max:
y_max = y_tmp
if y_tmp < y_min:
y_min = y_tmp
return y_min, y_max
# prepare a set of chemical rooted tree
class chemicalRootedTree():
def __init__(self):
self.root = ("e", 0)
self.index = 0
self.vertex = []
self.adj = []
self.alpha = []
self.beta = []
self.height = 0
self.chg = []
def prepare_fringe_trees(fringe_filename, Lambda):
# modified for 2LMM, 0527
set_F = list()
strF = dict()
fc_LB = dict()
fc_UB = dict()
with open(fringe_filename,'r') as f:
lines = f.readlines()
for line in lines:
if len(line.split(",")) < 4:
continue
ind = int(line.split(",")[0])
str1 = line.split(",")[1]
str2 = line.split(",")[2]
str3 = line.split(",")[3].replace('\n', '')
if len(line.split(",")) > 4:
LB_tmp = line.split(",")[4].replace('\n', '')
LB_tmp = LB_tmp.replace(' ', '')
fc_LB[ind] = int(LB_tmp)
UB_tmp = line.split(",")[5].replace('\n', '')
UB_tmp = UB_tmp.replace(' ', '')
fc_UB[ind] = int(UB_tmp)
else:
fc_LB[ind] = 0
fc_UB[ind] = 10
psi = chemicalRootedTree()
seq1 = str1.split()
seq2 = [int(mul) for mul in line.split(",")[2].split()]
seq3 = [int(chg) for chg in line.split(",")[3].split()]
psi.index = ind
psi.vertex = [(seq1[j], int(seq1[j + 1])) for j in range(0, len(seq1), 2)]
psi.root = psi.vertex[0]
psi.height = max(psi.vertex[v][1] for v in range(len(psi.vertex)) if psi.vertex[v][0] != "H1")
psi.adj = [set() for _ in range(len(psi.vertex))]
psi.beta = [[0 for _ in range(len(psi.vertex))] for _ in range(len(psi.vertex))]
psi.chg = [chg for chg in seq3]
for j in range(len(seq2)):
cld = j + 1
prt = max(v for v in range(j + 1) if psi.vertex[v][1] == psi.vertex[cld][1] - 1)
psi.adj[prt].add(cld)
psi.adj[cld].add(prt)
psi.beta[prt][cld] = seq2[j]
psi.beta[cld][prt] = seq2[j]
# print(str(prt) + " " + str(cld) + " " + str(j) + " " + str(seq2[j]))
flag = True
for (a, d) in psi.vertex:
if a not in Lambda:
flag = False
break
if flag:
strF[ind] = (str1, str2, str3)
set_F.append(psi)
Lambda_ex = list()
for psi in set_F:
for (a, d) in psi.vertex[1:]:
if a not in Lambda_ex and a in Lambda:
Lambda_ex.append(a)
return set_F, Lambda_ex, strF, fc_LB, fc_UB
if __name__=="__main__":
V_C, E_C, \
E_ge_two, E_ge_one, E_zero_one, E_equal_one, \
I_ge_two, I_ge_one, I_zero_one, I_equal_one, \
ell_LB, ell_UB, n_LB_int, n_UB_int, \
n_LB, n_star, rho, \
ch_LB, ch_UB, bl_LB, bl_UB, \
Lambda, Lambda_dg_int, Gamma_int_ac, Gamma_int, \
Lambda_star, na_LB, na_UB, Lambda_int, \
na_LB_int, na_UB_int, ns_LB_int, ns_UB_int, \
ac_LB_int, ac_UB_int, ec_LB_int, ec_UB_int, \
bd2_LB, bd2_UB, bd3_LB, bd3_UB, dg_LB, dg_UB = read_seed_graph(sys.argv[1])
set_F, psi_epsilon, Code_F, n_psi, deg_r, \
beta_r, atom_r, ht, Lambda_ex = prepare_fringe_trees(sys.argv[2])
# print(V_C)
# print(E_C)
# print(E_ge_two)
# print(E_ge_one)
# print(E_zero_one)
# print(E_equal_one)
# print(ell_LB)
# print(ell_UB)
# print(bl_UB)
for psi in (set_F + [psi_epsilon]):
print(str(Code_F[psi]) + " " + str(n_psi[Code_F[psi]]) + " " + \
str(ht[Code_F[psi]]) + " " + str(atom_r[Code_F[psi]]) + " " + \
str(deg_r[Code_F[psi]]) + " " + str(beta_r[Code_F[psi]]))
# print(Lambda_ex)
# set_F_v = {v : set_F for v in V_C}
# set_F_E = set_F
# n_C = max(psi.numVertex - 1 for v in V_C for psi in set_F_v[v])
# n_T = max(psi.numVertex - 1 for psi in set_F_E)
# n_F = max(psi.numVertex - 1 for psi in set_F_E)
# print(str(n_C) + " " + str(n_T) + " " + str(n_F))
MAX_VAL = 4
val = {"C": 4, "O": 2, "N": 3}
n_H = dict()
na_alpha_ex = {ele : {i + 1 : 0} for i in range(len(set_F)) for ele in Lambda_ex}
for i, psi in enumerate(set_F):
n_H_tmp = {d : 0 for d in range(MAX_VAL)}
na_ex_tmp = {ele : 0 for ele in Lambda_ex}
for u, (ele, dep) in enumerate(psi.vertex[1:]):
beta_tmp = 0
na_ex_tmp[ele] += 1
for v in psi.adj[u + 1]:
beta_tmp += psi.beta[u + 1][v]
d_tmp = val[ele] - beta_tmp
n_H_tmp[d_tmp] += 1
for ele, d in na_alpha_ex.items():
d[i + 1] = na_ex_tmp[ele]
n_H[i + 1] = n_H_tmp
print(n_H)
print(na_alpha_ex)
| [((82, 12, 82, 65), 'sys.stderr.write', 'sys.stderr.write', ({(82, 29, 82, 64): '"""error: a strange edge is found.\n"""'}, {}), "('error: a strange edge is found.\\n')", False, 'import sys\n'), ((83, 12, 83, 23), 'sys.exit', 'sys.exit', ({(83, 21, 83, 22): '(1)'}, {}), '(1)', False, 'import sys\n')] |
ArtBIT/gamma | gamma/system_input.py | 4ec03251fcd46cd7ae7b5123ad101064b0f9bdd1 | from .system import *
from .colours import *
class InputSystem(System):
def init(self):
self.key = 'input'
def setRequirements(self):
self.requiredComponents = ['input']
def updateEntity(self, entity, scene):
# don't allow input during a cutscene
if scene.cutscene is not None:
return
# run the stored input context
if entity.getComponent('input').inputContext is not None:
entity.getComponent('input').inputContext(entity)
| [] |
mcognetta/federated | tensorflow_federated/python/research/utils/checkpoint_utils_test.py | fa0c1a00b5d77768bc2f38f503f3ef1a65693945 | # Lint as: python3
# Copyright 2019, The TensorFlow Federated Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for ServerState save."""
import functools
import os
import attr
import tensorflow as tf
import tensorflow_federated as tff
from tensorflow_federated.python.examples.mnist import models
from tensorflow_federated.python.research.utils import checkpoint_utils
@attr.s(cmp=False, frozen=False)
class Obj(object):
"""Container for all state that need to be stored in the checkpoint.
Attributes:
model: A ModelWeights structure, containing Tensors or Variables.
optimizer_state: A list of Tensors or Variables, in the order returned by
optimizer.variables().
round_num: Training round_num.
"""
model = attr.ib()
optimizer_state = attr.ib()
round_num = attr.ib()
@classmethod
def from_anon_tuple(cls, anon_tuple, round_num):
# TODO(b/130724878): These conversions should not be needed.
return cls(
model=anon_tuple.model._asdict(recursive=True),
optimizer_state=list(anon_tuple.optimizer_state),
round_num=round_num)
class SavedStateTest(tf.test.TestCase):
def test_save_and_load(self):
server_optimizer_fn = functools.partial(
tf.keras.optimizers.SGD, learning_rate=0.1, momentum=0.9)
iterative_process = tff.learning.build_federated_averaging_process(
models.model_fn, server_optimizer_fn=server_optimizer_fn)
server_state = iterative_process.initialize()
# TODO(b/130724878): These conversions should not be needed.
obj = Obj.from_anon_tuple(server_state, 1)
export_dir = os.path.join(self.get_temp_dir(), 'ckpt_1')
checkpoint_utils.save(obj, export_dir)
loaded_obj = checkpoint_utils.load(export_dir, obj)
self.assertAllClose(tf.nest.flatten(obj), tf.nest.flatten(loaded_obj))
def test_load_latest_state(self):
server_optimizer_fn = functools.partial(
tf.keras.optimizers.SGD, learning_rate=0.1, momentum=0.9)
iterative_process = tff.learning.build_federated_averaging_process(
models.model_fn, server_optimizer_fn=server_optimizer_fn)
server_state = iterative_process.initialize()
# TODO(b/130724878): These conversions should not be needed.
obj_1 = Obj.from_anon_tuple(server_state, 1)
export_dir = os.path.join(self.get_temp_dir(), 'ckpt_1')
checkpoint_utils.save(obj_1, export_dir)
# TODO(b/130724878): These conversions should not be needed.
obj_2 = Obj.from_anon_tuple(server_state, 2)
export_dir = os.path.join(self.get_temp_dir(), 'ckpt_2')
checkpoint_utils.save(obj_2, export_dir)
export_dir = checkpoint_utils.latest_checkpoint(self.get_temp_dir())
loaded_obj = checkpoint_utils.load(export_dir, obj_1)
self.assertEqual(os.path.join(self.get_temp_dir(), 'ckpt_2'), export_dir)
self.assertAllClose(tf.nest.flatten(obj_2), tf.nest.flatten(loaded_obj))
if __name__ == '__main__':
tf.compat.v1.enable_v2_behavior()
tf.test.main()
| [((28, 1, 28, 32), 'attr.s', 'attr.s', (), '', False, 'import attr\n'), ((38, 10, 38, 19), 'attr.ib', 'attr.ib', ({}, {}), '()', False, 'import attr\n'), ((39, 20, 39, 29), 'attr.ib', 'attr.ib', ({}, {}), '()', False, 'import attr\n'), ((40, 14, 40, 23), 'attr.ib', 'attr.ib', ({}, {}), '()', False, 'import attr\n'), ((96, 2, 96, 35), 'tensorflow.compat.v1.enable_v2_behavior', 'tf.compat.v1.enable_v2_behavior', ({}, {}), '()', True, 'import tensorflow as tf\n'), ((97, 2, 97, 16), 'tensorflow.test.main', 'tf.test.main', ({}, {}), '()', True, 'import tensorflow as tf\n'), ((54, 26, 55, 65), 'functools.partial', 'functools.partial', (), '', False, 'import functools\n'), ((57, 24, 58, 65), 'tensorflow_federated.learning.build_federated_averaging_process', 'tff.learning.build_federated_averaging_process', (), '', True, 'import tensorflow_federated as tff\n'), ((64, 4, 64, 42), 'tensorflow_federated.python.research.utils.checkpoint_utils.save', 'checkpoint_utils.save', ({(64, 26, 64, 29): 'obj', (64, 31, 64, 41): 'export_dir'}, {}), '(obj, export_dir)', False, 'from tensorflow_federated.python.research.utils import checkpoint_utils\n'), ((66, 17, 66, 55), 'tensorflow_federated.python.research.utils.checkpoint_utils.load', 'checkpoint_utils.load', ({(66, 39, 66, 49): 'export_dir', (66, 51, 66, 54): 'obj'}, {}), '(export_dir, obj)', False, 'from tensorflow_federated.python.research.utils import checkpoint_utils\n'), ((71, 26, 72, 65), 'functools.partial', 'functools.partial', (), '', False, 'import functools\n'), ((74, 24, 75, 65), 'tensorflow_federated.learning.build_federated_averaging_process', 'tff.learning.build_federated_averaging_process', (), '', True, 'import tensorflow_federated as tff\n'), ((80, 4, 80, 44), 'tensorflow_federated.python.research.utils.checkpoint_utils.save', 'checkpoint_utils.save', ({(80, 26, 80, 31): 'obj_1', (80, 33, 80, 43): 'export_dir'}, {}), '(obj_1, export_dir)', False, 'from tensorflow_federated.python.research.utils import checkpoint_utils\n'), ((85, 4, 85, 44), 'tensorflow_federated.python.research.utils.checkpoint_utils.save', 'checkpoint_utils.save', ({(85, 26, 85, 31): 'obj_2', (85, 33, 85, 43): 'export_dir'}, {}), '(obj_2, export_dir)', False, 'from tensorflow_federated.python.research.utils import checkpoint_utils\n'), ((89, 17, 89, 57), 'tensorflow_federated.python.research.utils.checkpoint_utils.load', 'checkpoint_utils.load', ({(89, 39, 89, 49): 'export_dir', (89, 51, 89, 56): 'obj_1'}, {}), '(export_dir, obj_1)', False, 'from tensorflow_federated.python.research.utils import checkpoint_utils\n'), ((68, 24, 68, 44), 'tensorflow.nest.flatten', 'tf.nest.flatten', ({(68, 40, 68, 43): 'obj'}, {}), '(obj)', True, 'import tensorflow as tf\n'), ((68, 46, 68, 73), 'tensorflow.nest.flatten', 'tf.nest.flatten', ({(68, 62, 68, 72): 'loaded_obj'}, {}), '(loaded_obj)', True, 'import tensorflow as tf\n'), ((92, 24, 92, 46), 'tensorflow.nest.flatten', 'tf.nest.flatten', ({(92, 40, 92, 45): 'obj_2'}, {}), '(obj_2)', True, 'import tensorflow as tf\n'), ((92, 48, 92, 75), 'tensorflow.nest.flatten', 'tf.nest.flatten', ({(92, 64, 92, 74): 'loaded_obj'}, {}), '(loaded_obj)', True, 'import tensorflow as tf\n')] |
alexli0707/pyforum | website/models/user.py | 4f5ea4a0b07e094e24410ae699016590b9c20d59 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import peewee
from flask import current_app,abort
from flask.ext.login import AnonymousUserMixin, UserMixin
from itsdangerous import TimedJSONWebSignatureSerializer as Serializer
from peewee import Model, IntegerField, CharField,PrimaryKeyField
from website.app import db_wrapper, login_manager
from website.http.main_exception import MainException
from werkzeug.security import check_password_hash,generate_password_hash
class User(UserMixin, db_wrapper.Model):
id = PrimaryKeyField()
email = CharField(index=True)
username = CharField(index=True)
password_hash = CharField()
role_id = IntegerField()
confirmed = IntegerField()
class Meta:
db_table = 'users'
def register(self,email,password,username):
user = User(email=email, username=username, password_hash=generate_password_hash(password))
try:
user.save()
except peewee.IntegrityError as err:
print(err.args)
if err.args[0] == 1062:
if 'ix_users_email' in err.args[1]:
raise MainException.DUPLICATE_EMAIL
if 'ix_users_username' in err.args[1]:
raise MainException.DUPLICATE_USERNAME
return user
def verify_password(self, password):
return check_password_hash(self.password_hash, password)
def generate_confirmation_token(self, expiration=3600):
"""生成验证邮箱的token"""
s = Serializer(current_app.config['SECRET_KEY'], expiration)
return s.dumps({'confirm': self.id})
def confirm(self, token):
"""验证邮箱"""
s = Serializer(current_app.config['SECRET_KEY'])
try:
data = s.loads(token)
print(data)
except:
return False
if data.get('confirm') != self.id:
return False
# 验证成功,写入数据库
self.confirmed = True
self.save()
return True
def generate_reset_token(self, expiration=3600):
"""生成重置密码的token"""
s = Serializer(current_app.config['SECRET_KEY'], expiration)
return s.dumps({'reset': self.id})
def reset_password(self, token, new_password):
"""重置密码"""
s = Serializer(current_app.config['SECRET_KEY'])
try:
data = s.loads(token)
except:
return False
if data.get('reset') != self.id:
return False
# 验证成功,写入数据库
self.password = new_password
self.save()
return True
"""
匿名用户
"""
class AnonymousUser(AnonymousUserMixin):
def can(self, permissions):
return False
def is_administrator(self):
return False
login_manager.anonymous_user = AnonymousUser
@login_manager.user_loader
def load_user(user_id):
user = User.get(User.id == int(user_id))
if not user:
abort(404)
else:
return user
| [((15, 9, 15, 26), 'peewee.PrimaryKeyField', 'PrimaryKeyField', ({}, {}), '()', False, 'from peewee import Model, IntegerField, CharField, PrimaryKeyField\n'), ((16, 12, 16, 33), 'peewee.CharField', 'CharField', (), '', False, 'from peewee import Model, IntegerField, CharField, PrimaryKeyField\n'), ((17, 15, 17, 36), 'peewee.CharField', 'CharField', (), '', False, 'from peewee import Model, IntegerField, CharField, PrimaryKeyField\n'), ((18, 20, 18, 31), 'peewee.CharField', 'CharField', ({}, {}), '()', False, 'from peewee import Model, IntegerField, CharField, PrimaryKeyField\n'), ((19, 14, 19, 28), 'peewee.IntegerField', 'IntegerField', ({}, {}), '()', False, 'from peewee import Model, IntegerField, CharField, PrimaryKeyField\n'), ((20, 16, 20, 30), 'peewee.IntegerField', 'IntegerField', ({}, {}), '()', False, 'from peewee import Model, IntegerField, CharField, PrimaryKeyField\n'), ((41, 15, 41, 64), 'werkzeug.security.check_password_hash', 'check_password_hash', ({(41, 35, 41, 53): 'self.password_hash', (41, 55, 41, 63): 'password'}, {}), '(self.password_hash, password)', False, 'from werkzeug.security import check_password_hash, generate_password_hash\n'), ((46, 12, 46, 68), 'itsdangerous.TimedJSONWebSignatureSerializer', 'Serializer', ({(46, 23, 46, 55): "current_app.config['SECRET_KEY']", (46, 57, 46, 67): 'expiration'}, {}), "(current_app.config['SECRET_KEY'], expiration)", True, 'from itsdangerous import TimedJSONWebSignatureSerializer as Serializer\n'), ((51, 12, 51, 56), 'itsdangerous.TimedJSONWebSignatureSerializer', 'Serializer', ({(51, 23, 51, 55): "current_app.config['SECRET_KEY']"}, {}), "(current_app.config['SECRET_KEY'])", True, 'from itsdangerous import TimedJSONWebSignatureSerializer as Serializer\n'), ((66, 12, 66, 68), 'itsdangerous.TimedJSONWebSignatureSerializer', 'Serializer', ({(66, 23, 66, 55): "current_app.config['SECRET_KEY']", (66, 57, 66, 67): 'expiration'}, {}), "(current_app.config['SECRET_KEY'], expiration)", True, 'from itsdangerous import TimedJSONWebSignatureSerializer as Serializer\n'), ((71, 12, 71, 56), 'itsdangerous.TimedJSONWebSignatureSerializer', 'Serializer', ({(71, 23, 71, 55): "current_app.config['SECRET_KEY']"}, {}), "(current_app.config['SECRET_KEY'])", True, 'from itsdangerous import TimedJSONWebSignatureSerializer as Serializer\n'), ((101, 8, 101, 18), 'flask.abort', 'abort', ({(101, 14, 101, 17): '(404)'}, {}), '(404)', False, 'from flask import current_app, abort\n'), ((27, 66, 27, 98), 'werkzeug.security.generate_password_hash', 'generate_password_hash', ({(27, 89, 27, 97): 'password'}, {}), '(password)', False, 'from werkzeug.security import check_password_hash, generate_password_hash\n')] |
robertavram/project5 | FlaskApp/__init__.py | 12a2816b84be994b561f2f693cf34c0fa4f0ca19 | # application
import application | [] |
harikuts/dsr_optimization | sim2net/speed/constant.py | 796e58da578f7841a060233a8981eb69d92b798b | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# (c) 2012 Michal Kalewski <mkalewski at cs.put.poznan.pl>
#
# This file is a part of the Simple Network Simulator (sim2net) project.
# USE, MODIFICATION, COPYING AND DISTRIBUTION OF THIS SOFTWARE IS SUBJECT TO
# THE TERMS AND CONDITIONS OF THE MIT LICENSE. YOU SHOULD HAVE RECEIVED A COPY
# OF THE MIT LICENSE ALONG WITH THIS SOFTWARE; IF NOT, YOU CAN DOWNLOAD A COPY
# FROM HTTP://WWW.OPENSOURCE.ORG/.
#
# For bug reports, feature and support requests please visit
# <https://github.com/mkalewski/sim2net/issues>.
"""
Provides an implementation of a constant node speed. In this case a speed of a
node is constant at a given value.
"""
from math import fabs
from sim2net.speed._speed import Speed
from sim2net.utility.validation import check_argument_type
__docformat__ = 'reStructuredText'
class Constant(Speed):
"""
This class implements a constant node speed fixed at a given value.
"""
def __init__(self, speed):
"""
*Parameters*:
- **speed** (`float`): a value of the node speed.
*Example*:
.. testsetup::
from sim2net.speed.constant import Constant
.. doctest::
>>> speed = Constant(5.0)
>>> speed.current
5.0
>>> speed.get_new()
5.0
>>> speed = Constant(-5.0)
>>> speed.current
5.0
>>> speed.get_new()
5.0
"""
super(Constant, self).__init__(Constant.__name__)
check_argument_type(Constant.__name__, 'speed', float, speed,
self.logger)
self.__current_speed = fabs(float(speed))
@property
def current(self):
"""
(*Property*) The absolute value of the current speed of type `float`.
"""
return self.__current_speed
def get_new(self):
"""
Returns the absolute value of the given node speed of type `float`.
"""
return self.current
| [((60, 8, 61, 40), 'sim2net.utility.validation.check_argument_type', 'check_argument_type', ({(60, 28, 60, 45): 'Constant.__name__', (60, 47, 60, 54): '"""speed"""', (60, 56, 60, 61): 'float', (60, 63, 60, 68): 'speed', (61, 28, 61, 39): 'self.logger'}, {}), "(Constant.__name__, 'speed', float, speed, self.logger)", False, 'from sim2net.utility.validation import check_argument_type\n')] |
haryoa/nexula | nexula/nexula_utility/utility_extract_func.py | cc3b5a9b8dd8294bdc47150a1971cb49c4dde225 | from nexula.nexula_utility.utility_import_var import import_class
class NexusFunctionModuleExtractor():
"""
Used for constructing pipeline data preporcessing and feature representer
"""
def __init__(self, module_class_list, args_dict, **kwargs):
"""
Instantiate class(es) object in pipeline
Parameters
----------
module_class_list
args_dict
kwargs
"""
# self.list_of_cls = self._search_module_function(module_class_list)
self.list_of_cls = module_class_list
if 'logger' in kwargs:
self.logger = kwargs['logger']
self.logger.debug(args_dict) if 'logger' in self.__dict__ else None
self.args_init = [arg['init'] for arg in args_dict]
self.args_call = [arg['call'] for arg in args_dict]
self._construct_object()
# Extract call
def _construct_object(self):
"""
Instantiate object of all pipeline
"""
import logging
logger = logging.getLogger('nexula')
logger.debug(self.list_of_cls)
new_list_of_cls = []
for i, cls in enumerate(self.list_of_cls): # REFACTOR
logger.debug(cls)
new_list_of_cls.append(cls(**self.args_init[i]))
self.list_of_cls = new_list_of_cls
def _search_module_function(self, module_function_list):
"""
Search the module in the library
Parameters
----------
module_function_list
Returns
-------
"""
list_of_cls = []
for module, function in module_function_list:
# TODO Raise exception if empty
list_of_cls.append(import_class(function, module))
return list_of_cls
def __call__(self, x, y, *args, **kwargs):
"""
Call the object by evoking __call__ function
Returns
-------
"""
for i,cls in enumerate(self.list_of_cls):
current_args = self.args_call[i]
x, y = cls(x, y, **kwargs, **current_args)
return x, y
| [((34, 17, 34, 44), 'logging.getLogger', 'logging.getLogger', ({(34, 35, 34, 43): '"""nexula"""'}, {}), "('nexula')", False, 'import logging\n'), ((56, 31, 56, 61), 'nexula.nexula_utility.utility_import_var.import_class', 'import_class', ({(56, 44, 56, 52): 'function', (56, 54, 56, 60): 'module'}, {}), '(function, module)', False, 'from nexula.nexula_utility.utility_import_var import import_class\n')] |
MJ-Jang/Marbas | marbas/preprocessing.py | 0a144e4f2ae868604ed4d3b7ae892a53fdebf388 | import os
from configparser import ConfigParser
cfg = ConfigParser()
#PATH_CUR = os.getcwd() + '/pynori'
PATH_CUR = os.path.dirname(__file__)
cfg.read(PATH_CUR+'/config.ini')
# PREPROCESSING
ENG_LOWER = cfg.getboolean('PREPROCESSING', 'ENG_LOWER')
class Preprocessing(object):
"""Preprocessing modules before tokenizing
It doesn't need to be initialized.
"""
def __init__(self):
pass
def pipeline(self, input_str):
if ENG_LOWER:
input_str = self.lower(input_str)
return input_str
def lower(self, input_str):
return input_str.lower()
def typo(self, input_str):
"""To correct typing errors"""
pass
def spacing(self, input_str):
"""To correct spacing errors"""
pass
| [((4, 6, 4, 20), 'configparser.ConfigParser', 'ConfigParser', ({}, {}), '()', False, 'from configparser import ConfigParser\n'), ((6, 11, 6, 36), 'os.path.dirname', 'os.path.dirname', ({(6, 27, 6, 35): '__file__'}, {}), '(__file__)', False, 'import os\n')] |
amvasudeva/rapidata | pravash/servicenowplugin/xlr-servicenow-plugin-master/src/main/resources/servicenow/ServiceNowQueryTile.py | 7b6e984d24866f5cf474847cf462ac628427cf48 | #
# THIS CODE AND INFORMATION ARE PROVIDED "AS IS" WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE IMPLIED WARRANTIES OF MERCHANTABILITY AND/OR FITNESS
# FOR A PARTICULAR PURPOSE. THIS CODE AND INFORMATION ARE NOT SUPPORTED BY XEBIALABS.
#
import com.xhaus.jyson.JysonCodec as json
if not servicenowServer:
raise Exception("ServiceNow server ID must be provided")
if not username:
username = servicenowServer["username"]
if not password:
password = servicenowServer["password"]
servicenowUrl = servicenowServer['url']
credentials = CredentialsFallback(servicenowServer, username, password).getCredentials()
content = None
RESPONSE_OK_STATUS = 200
print "Sending content %s" % content
def get_row_data(item):
row_map = {}
for column in detailsViewColumns:
if detailsViewColumns[column] and "." in detailsViewColumns[column]:
json_col = detailsViewColumns[column].split('.')
if item[json_col[0]]:
row_map[column] = item[json_col[0]][json_col[1]]
else:
row_map[column] = item[column]
row_map['link'] = servicenowUrl + "nav_to.do?uri=%s.do?sys_id=%s" % (tableName, item['sys_id'])
return row_map
servicenowAPIUrl = servicenowUrl + '/api/now/v1/table/%s?sysparm_display_value=true&sysparm_limit=1000&sysparm_query=%s' % (tableName, query)
servicenowResponse = XLRequest(servicenowAPIUrl, 'GET', content, credentials['username'], credentials['password'], 'application/json').send()
if servicenowResponse.status == RESPONSE_OK_STATUS:
json_data = json.loads(servicenowResponse.read())
rows = {}
for item in json_data['result']:
row = item['number']
rows[row] = get_row_data(item)
data = rows
else:
error = json.loads(servicenowResponse.read())
if 'Invalid table' in error['error']['message']:
print "Invalid Table Name"
data = {"Invalid table name"}
servicenowResponse.errorDump()
else:
print "Failed to run query in Service Now"
servicenowResponse.errorDump()
sys.exit(1) | [] |
Buckinghamshire-Digital-Service/buckinghamshire-council | bc/recruitment/migrations/0022_merge_20200331_1633.py | bbbdb52b515bcdfc79a2bd9198dfa4828405370e | # Generated by Django 2.2.10 on 2020-03-31 15:33
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
("recruitment", "0021_merge_20200331_1503"),
("recruitment", "0013_button_block"),
]
operations = []
| [] |
Pyabecedarian/Algorithms-and-Data-Structures-using-Python | Stage_3/Task11_Graph/depth_first_search.py | 08642357df60d48cb185b5487150204b42764260 | """
The Depth First Search (DFS)
The goal of a dfs is to search as deeply as possible, connecting as many nodes in the graph as possible and
branching where necessary. Think of the BFS that builds a search tree one level at a time, whereas the DFS
creates a search tree by exploring one branch of the tree as deeply as possible.
As with bfs the dfs makes use of `predecessor` links to construct the tree. In
addition, the dfs will make use of two additional instance variables in the Vertex class, `discovery` and
`finish_time`.
predecessor : same as bfs
discovery : tracks the number of steps in the algorithm before a vertex is first encountered;
finish_time : is the number of steps before a vertex is colored black
"""
from datastruct.graph import Vertex, Graph
class DFSGraph(Graph):
def __init__(self):
super(DFSGraph, self).__init__()
self.time = 0
def reset(self):
self.time = 0
for v in self:
v.color = 'white'
v.predecessor = None
def dfs(self):
self.reset()
for v in self:
if v.color == 'white':
self._dfs_visit(v)
def _dfs_visit(self, vert: Vertex):
vert.color = 'gray'
self.time += 1
vert.discovery = self.time
for nextv in vert.get_connections():
if nextv.color == 'white':
nextv.predecessor = vert
self._dfs_visit(nextv)
vert.color = 'black'
self.time += 1
vert.finish_time = self.time
| [] |
rbtcollins/rusty_rail | salt/_modules/freebsd_common.py | 6ab8a95247b42a81add03500a75ce6678ede5d58 | def sysrc(value):
"""Call sysrc.
CLI Example:
.. code-block:: bash
salt '*' freebsd_common.sysrc sshd_enable=YES
salt '*' freebsd_common.sysrc static_routes
"""
return __salt__['cmd.run_all']("sysrc %s" % value)
| [] |
jhunken/auth0-python | auth0/v3/management/blacklists.py | af5d863ffe75a4a7cd729c9d084cad6b37bd632e | from .rest import RestClient
class Blacklists(object):
"""Auth0 blacklists endpoints
Args:
domain (str): Your Auth0 domain, e.g: 'username.auth0.com'
token (str): Management API v2 Token
telemetry (bool, optional): Enable or disable Telemetry
(defaults to True)
"""
def __init__(self, domain, token, telemetry=True):
self.url = 'https://{}/api/v2/blacklists/tokens'.format(domain)
self.client = RestClient(jwt=token, telemetry=telemetry)
def get(self, aud=None):
"""Retrieves the jti and aud of all tokens in the blacklist.
Args:
aud (str, optional): The JWT's aud claim. The client_id of the
application for which it was issued.
See: https://auth0.com/docs/api/management/v2#!/Blacklists/get_tokens
"""
params = {
'aud': aud
}
return self.client.get(self.url, params=params)
def create(self, jti, aud=''):
"""Adds a token to the blacklist.
Args:
jti (str): the jti of the JWT to blacklist.
aud (str, optional): The JWT's aud claim. The client_id of the
application for which it was issued.
body (dict):
See: https://auth0.com/docs/api/management/v2#!/Blacklists/post_tokens
"""
return self.client.post(self.url, data={'jti': jti, 'aud': aud})
| [] |
qzm/QUANTAXIS | test_backtest/simplebacktest.py | 055fdc16d67670fb4770e7097865336199e55f3e | # coding=utf-8
#
# The MIT License (MIT)
#
# Copyright (c) 2016-2018 yutiansut/QUANTAXIS
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import QUANTAXIS as QA
import random
"""
该代码旨在给出一个极其容易实现的小回测 高效 无事件驱动
"""
B = QA.QA_BacktestBroker()
AC = QA.QA_Account()
"""
# 账户设置初始资金
AC.reset_assets(assets)
# 发送订单
Order=AC.send_order(code='000001',amount=1000,time='2018-03-21',towards=QA.ORDER_DIRECTION.BUY,price=0,order_model=QA.ORDER_MODEL.MARKET,amount_model=QA.AMOUNT_MODEL.BY_AMOUNT)
# 撮合订单
dealmes=B.receive_order(QA.QA_Event(order=Order,market_data=data))
# 更新账户
AC.receive_deal(dealmes)
# 分析结果
risk=QA.QA_Risk(AC)
"""
AC.reset_assets(20000000) #设置初始资金
def simple_backtest(AC, code, start, end):
DATA = QA.QA_fetch_stock_day_adv(code, start, end).to_qfq()
for items in DATA.panel_gen: # 一天过去了
for item in items.security_gen:
if random.random()>0.5:# 加入一个随机 模拟买卖的
if AC.sell_available.get(item.code[0], 0) == 0:
order=AC.send_order(
code=item.data.code[0], time=item.data.date[0], amount=1000, towards=QA.ORDER_DIRECTION.BUY, price=0, order_model=QA.ORDER_MODEL.MARKET, amount_model=QA.AMOUNT_MODEL.BY_AMOUNT
)
AC.receive_deal(B.receive_order(QA.QA_Event(order=order,market_data=item)))
else:
AC.receive_deal(B.receive_order(QA.QA_Event(order=AC.send_order(
code=item.data.code[0], time=item.data.date[0], amount=1000, towards=QA.ORDER_DIRECTION.SELL, price=0, order_model=QA.ORDER_MODEL.MARKET, amount_model=QA.AMOUNT_MODEL.BY_AMOUNT
),market_data=item)))
AC.settle()
simple_backtest(AC, QA.QA_fetch_stock_block_adv(
).code[0:10], '2017-01-01', '2018-01-31')
print(AC.message)
AC.save()
risk = QA.QA_Risk(AC)
print(risk.message)
risk.save() | [((31, 4, 31, 26), 'QUANTAXIS.QA_BacktestBroker', 'QA.QA_BacktestBroker', ({}, {}), '()', True, 'import QUANTAXIS as QA\n'), ((32, 5, 32, 20), 'QUANTAXIS.QA_Account', 'QA.QA_Account', ({}, {}), '()', True, 'import QUANTAXIS as QA\n'), ((75, 7, 75, 21), 'QUANTAXIS.QA_Risk', 'QA.QA_Risk', ({(75, 18, 75, 20): 'AC'}, {}), '(AC)', True, 'import QUANTAXIS as QA\n'), ((53, 11, 53, 54), 'QUANTAXIS.QA_fetch_stock_day_adv', 'QA.QA_fetch_stock_day_adv', ({(53, 37, 53, 41): 'code', (53, 43, 53, 48): 'start', (53, 50, 53, 53): 'end'}, {}), '(code, start, end)', True, 'import QUANTAXIS as QA\n'), ((71, 20, 72, 1), 'QUANTAXIS.QA_fetch_stock_block_adv', 'QA.QA_fetch_stock_block_adv', ({}, {}), '()', True, 'import QUANTAXIS as QA\n'), ((57, 15, 57, 30), 'random.random', 'random.random', ({}, {}), '()', False, 'import random\n'), ((62, 52, 62, 93), 'QUANTAXIS.QA_Event', 'QA.QA_Event', (), '', True, 'import QUANTAXIS as QA\n')] |
jjhelmus/artview | artview/components/field.py | 2af5ccad8d509d11ef6da7c97bee0f7b255b6879 | """
field.py
Class instance used for modifying field via Display window.
"""
# Load the needed packages
from functools import partial
from ..core import Variable, Component, QtGui, QtCore
class FieldButtonWindow(Component):
'''Class to display a Window with Field name radio buttons.'''
Vradar = None #: see :ref:`shared_variable`
Vfield = None #: see :ref:`shared_variable`
def __init__(self, Vradar=None, Vfield=None, name="FieldButtons",
parent=None):
'''
Initialize the class to create the interface.
Parameters
----------
[Optional]
Vradar : :py:class:`~artview.core.core.Variable` instance
Radar signal variable. If None start new one with None
Vfield : :py:class:`~artview.core.core.Variable` instance
Field signal variable. If None start new one empty string
name : string
Field Radiobutton window name.
parent : PyQt instance
Parent instance to associate to FieldButtonWindow.
If None, then Qt owns, otherwise associated with parent PyQt
instance.
Notes
-----
This class records the selected button and passes the
change value back to variable.
'''
super(FieldButtonWindow, self).__init__(name=name, parent=parent)
# Set up signal, so that DISPLAY can react to external
# (or internal) changes in field (Core.Variable instances expected)
# The change is sent through Vfield
if Vradar is None:
self.Vradar = Variable(None)
else:
self.Vradar = Vradar
if Vfield is None:
self.Vfield = Variable('')
else:
self.Vfield = Vfield
self.sharedVariables = {"Vradar": self.NewRadar,
"Vfield": self.NewField}
self.connectAllVariables()
self.CreateFieldWidget()
self.SetFieldRadioButtons()
self.show()
########################
# Button methods #
########################
def FieldSelectCmd(self, field):
'''Captures a selection and updates field variable.'''
self.Vfield.change(field)
def CreateFieldWidget(self):
'''Create a widget to store radio buttons to control field adjust.'''
self.radioBox = QtGui.QGroupBox("Field Selection", parent=self)
self.rBox_layout = QtGui.QVBoxLayout(self.radioBox)
self.radioBox.setLayout(self.rBox_layout)
self.setCentralWidget(self.radioBox)
def SetFieldRadioButtons(self):
'''Set a field selection using radio buttons.'''
# Instantiate the buttons into a list for future use
self.fieldbutton = {}
if self.Vradar.value is None:
return
# Loop through and create each field button and
# connect a value when selected
for field in self.Vradar.value.fields.keys():
button = QtGui.QRadioButton(field, self.radioBox)
self.fieldbutton[field] = button
QtCore.QObject.connect(button, QtCore.SIGNAL("clicked()"),
partial(self.FieldSelectCmd, field))
self.rBox_layout.addWidget(button)
# set Checked the current field
self.NewField(self.Vfield, self.Vfield.value, True)
def NewField(self, variable, value, strong):
'''Slot for 'ValueChanged' signal of
:py:class:`Vfield <artview.core.core.Variable>`.
This will:
* Update radio check
'''
if (self.Vradar.value is not None and
value in self.Vradar.value.fields):
self.fieldbutton[value].setChecked(True)
def NewRadar(self, variable, value, strong):
'''Slot for 'ValueChanged' signal of
:py:class:`Vradar <artview.core.core.Variable>`.
This will:
* Recreate radio items
'''
self.CreateFieldWidget()
self.SetFieldRadioButtons()
| [((93, 35, 93, 70), 'functools.partial', 'partial', ({(93, 43, 93, 62): 'self.FieldSelectCmd', (93, 64, 93, 69): 'field'}, {}), '(self.FieldSelectCmd, field)', False, 'from functools import partial\n')] |
Careerleaf/django-rest-framework-mongoengine | rest_framework_mongoengine/fields.py | fc28dbf7af760528f6f7247e567328df46458799 | from bson.errors import InvalidId
from django.core.exceptions import ValidationError
from django.utils.encoding import smart_str
from mongoengine import dereference
from mongoengine.base.document import BaseDocument
from mongoengine.document import Document
from rest_framework import serializers
from mongoengine.fields import ObjectId
import sys
if sys.version_info[0] >= 3:
def unicode(val):
return str(val)
class MongoDocumentField(serializers.WritableField):
MAX_RECURSION_DEPTH = 5 # default value of depth
def __init__(self, *args, **kwargs):
try:
self.model_field = kwargs.pop('model_field')
self.depth = kwargs.pop('depth', self.MAX_RECURSION_DEPTH)
except KeyError:
raise ValueError("%s requires 'model_field' kwarg" % self.type_label)
super(MongoDocumentField, self).__init__(*args, **kwargs)
def transform_document(self, document, depth):
data = {}
# serialize each required field
for field in document._fields:
if hasattr(document, smart_str(field)):
# finally check for an attribute 'field' on the instance
obj = getattr(document, field)
else:
continue
val = self.transform_object(obj, depth-1)
if val is not None:
data[field] = val
return data
def transform_dict(self, obj, depth):
return dict([(key, self.transform_object(val, depth-1))
for key, val in obj.items()])
def transform_object(self, obj, depth):
"""
Models to natives
Recursion for (embedded) objects
"""
if depth == 0:
# Return primary key if exists, else return default text
return str(getattr(obj, 'pk', "Max recursion depth exceeded"))
elif isinstance(obj, BaseDocument):
# Document, EmbeddedDocument
return self.transform_document(obj, depth-1)
elif isinstance(obj, dict):
# Dictionaries
return self.transform_dict(obj, depth-1)
elif isinstance(obj, list):
# List
return [self.transform_object(value, depth-1) for value in obj]
elif obj is None:
return None
else:
return unicode(obj) if isinstance(obj, ObjectId) else obj
class ReferenceField(MongoDocumentField):
type_label = 'ReferenceField'
def from_native(self, value):
try:
dbref = self.model_field.to_python(value)
except InvalidId:
raise ValidationError(self.error_messages['invalid'])
instance = dereference.DeReference().__call__([dbref])[0]
# Check if dereference was successful
if not isinstance(instance, Document):
msg = self.error_messages['invalid']
raise ValidationError(msg)
return instance
def to_native(self, obj):
return self.transform_object(obj, self.depth)
class ListField(MongoDocumentField):
type_label = 'ListField'
def from_native(self, value):
return self.model_field.to_python(value)
def to_native(self, obj):
return self.transform_object(obj, self.depth)
class EmbeddedDocumentField(MongoDocumentField):
type_label = 'EmbeddedDocumentField'
def __init__(self, *args, **kwargs):
try:
self.document_type = kwargs.pop('document_type')
except KeyError:
raise ValueError("EmbeddedDocumentField requires 'document_type' kwarg")
super(EmbeddedDocumentField, self).__init__(*args, **kwargs)
def get_default_value(self):
return self.to_native(self.default())
def to_native(self, obj):
if obj is None:
return None
else:
return self.model_field.to_mongo(obj)
def from_native(self, value):
return self.model_field.to_python(value)
class DynamicField(MongoDocumentField):
type_label = 'DynamicField'
def to_native(self, obj):
return self.model_field.to_python(obj) | [((89, 18, 89, 38), 'django.core.exceptions.ValidationError', 'ValidationError', ({(89, 34, 89, 37): 'msg'}, {}), '(msg)', False, 'from django.core.exceptions import ValidationError\n'), ((34, 33, 34, 49), 'django.utils.encoding.smart_str', 'smart_str', ({(34, 43, 34, 48): 'field'}, {}), '(field)', False, 'from django.utils.encoding import smart_str\n'), ((82, 18, 82, 65), 'django.core.exceptions.ValidationError', 'ValidationError', ({(82, 34, 82, 64): "self.error_messages['invalid']"}, {}), "(self.error_messages['invalid'])", False, 'from django.core.exceptions import ValidationError\n'), ((84, 19, 84, 44), 'mongoengine.dereference.DeReference', 'dereference.DeReference', ({}, {}), '()', False, 'from mongoengine import dereference\n')] |
bbhunter/fuzz-lightyear | tests/conftest.py | 75c1318d2f747a4fac6b55a46649c944528769ba | import pytest
from fuzz_lightyear.datastore import _ALL_POST_FUZZ_HOOKS_BY_OPERATION
from fuzz_lightyear.datastore import _ALL_POST_FUZZ_HOOKS_BY_TAG
from fuzz_lightyear.datastore import _RERUN_POST_FUZZ_HOOKS_BY_OPERATION
from fuzz_lightyear.datastore import _RERUN_POST_FUZZ_HOOKS_BY_TAG
from fuzz_lightyear.datastore import get_excluded_operations
from fuzz_lightyear.datastore import get_included_tags
from fuzz_lightyear.datastore import get_non_vulnerable_operations
from fuzz_lightyear.datastore import get_user_defined_mapping
from fuzz_lightyear.plugins import get_enabled_plugins
from fuzz_lightyear.request import get_victim_session_factory
from fuzz_lightyear.supplements.abstraction import get_abstraction
@pytest.fixture(autouse=True)
def clear_caches():
get_abstraction.cache_clear()
get_user_defined_mapping.cache_clear()
get_enabled_plugins.cache_clear()
get_victim_session_factory.cache_clear()
get_excluded_operations.cache_clear()
get_non_vulnerable_operations.cache_clear()
get_included_tags.cache_clear()
_ALL_POST_FUZZ_HOOKS_BY_OPERATION.clear()
_ALL_POST_FUZZ_HOOKS_BY_TAG.clear()
_RERUN_POST_FUZZ_HOOKS_BY_OPERATION.clear()
_RERUN_POST_FUZZ_HOOKS_BY_TAG.clear()
@pytest.fixture(autouse=True)
def ignore_hypothesis_non_interactive_example_warning():
"""In theory we're not supposed to use hypothesis'
strategy.example(), but fuzz-lightyear isn't using
hypothesis in a normal way.
"""
import warnings
from hypothesis.errors import NonInteractiveExampleWarning
warnings.filterwarnings(
'ignore',
category=NonInteractiveExampleWarning,
)
| [((16, 1, 16, 29), 'pytest.fixture', 'pytest.fixture', (), '', False, 'import pytest\n'), ((32, 1, 32, 29), 'pytest.fixture', 'pytest.fixture', (), '', False, 'import pytest\n'), ((18, 4, 18, 33), 'fuzz_lightyear.supplements.abstraction.get_abstraction.cache_clear', 'get_abstraction.cache_clear', ({}, {}), '()', False, 'from fuzz_lightyear.supplements.abstraction import get_abstraction\n'), ((19, 4, 19, 42), 'fuzz_lightyear.datastore.get_user_defined_mapping.cache_clear', 'get_user_defined_mapping.cache_clear', ({}, {}), '()', False, 'from fuzz_lightyear.datastore import get_user_defined_mapping\n'), ((20, 4, 20, 37), 'fuzz_lightyear.plugins.get_enabled_plugins.cache_clear', 'get_enabled_plugins.cache_clear', ({}, {}), '()', False, 'from fuzz_lightyear.plugins import get_enabled_plugins\n'), ((21, 4, 21, 44), 'fuzz_lightyear.request.get_victim_session_factory.cache_clear', 'get_victim_session_factory.cache_clear', ({}, {}), '()', False, 'from fuzz_lightyear.request import get_victim_session_factory\n'), ((22, 4, 22, 41), 'fuzz_lightyear.datastore.get_excluded_operations.cache_clear', 'get_excluded_operations.cache_clear', ({}, {}), '()', False, 'from fuzz_lightyear.datastore import get_excluded_operations\n'), ((23, 4, 23, 47), 'fuzz_lightyear.datastore.get_non_vulnerable_operations.cache_clear', 'get_non_vulnerable_operations.cache_clear', ({}, {}), '()', False, 'from fuzz_lightyear.datastore import get_non_vulnerable_operations\n'), ((24, 4, 24, 35), 'fuzz_lightyear.datastore.get_included_tags.cache_clear', 'get_included_tags.cache_clear', ({}, {}), '()', False, 'from fuzz_lightyear.datastore import get_included_tags\n'), ((26, 4, 26, 45), 'fuzz_lightyear.datastore._ALL_POST_FUZZ_HOOKS_BY_OPERATION.clear', '_ALL_POST_FUZZ_HOOKS_BY_OPERATION.clear', ({}, {}), '()', False, 'from fuzz_lightyear.datastore import _ALL_POST_FUZZ_HOOKS_BY_OPERATION\n'), ((27, 4, 27, 39), 'fuzz_lightyear.datastore._ALL_POST_FUZZ_HOOKS_BY_TAG.clear', '_ALL_POST_FUZZ_HOOKS_BY_TAG.clear', ({}, {}), '()', False, 'from fuzz_lightyear.datastore import _ALL_POST_FUZZ_HOOKS_BY_TAG\n'), ((28, 4, 28, 47), 'fuzz_lightyear.datastore._RERUN_POST_FUZZ_HOOKS_BY_OPERATION.clear', '_RERUN_POST_FUZZ_HOOKS_BY_OPERATION.clear', ({}, {}), '()', False, 'from fuzz_lightyear.datastore import _RERUN_POST_FUZZ_HOOKS_BY_OPERATION\n'), ((29, 4, 29, 41), 'fuzz_lightyear.datastore._RERUN_POST_FUZZ_HOOKS_BY_TAG.clear', '_RERUN_POST_FUZZ_HOOKS_BY_TAG.clear', ({}, {}), '()', False, 'from fuzz_lightyear.datastore import _RERUN_POST_FUZZ_HOOKS_BY_TAG\n'), ((40, 4, 43, 5), 'warnings.filterwarnings', 'warnings.filterwarnings', (), '', False, 'import warnings\n')] |
michael-the1/diepvries | src/diepvries/field.py | ddba9c91ee5fb2014dc576ffb74faa40c3d0d04f | """Module for a Data Vault field."""
from typing import Optional
from . import (
FIELD_PREFIX,
FIELD_SUFFIX,
METADATA_FIELDS,
TABLE_PREFIXES,
UNKNOWN,
FieldDataType,
FieldRole,
TableType,
)
class Field:
"""A field in a Data Vault model."""
def __init__(
self,
parent_table_name: str,
name: str,
data_type: FieldDataType,
position: int,
is_mandatory: bool,
precision: int = None,
scale: int = None,
length: int = None,
):
"""Instantiate a Field.
Convert both name and parent_table_name to lower case.
Args:
parent_table_name: Name of parent table in the database.
name: Column name in the database.
data_type: Column data type in the database.
position: Column position in the database.
is_mandatory: Column is mandatory in the database.
precision: Numeric precision (maximum number of digits before the decimal
separator). Only applicable when `self.data_type==FieldDataType.NUMBER`.
scale: Numeric scale (maximum number of digits after the decimal
separator). Only applicable when `self.data_type==FieldDataType.NUMBER`.
length: Character length (maximum number of characters allowed). Only
applicable when `self.data_type==FieldDataType.TEXT`.
"""
self.parent_table_name = parent_table_name.lower()
self.name = name.lower()
self.data_type = data_type
self.position = position
self.is_mandatory = is_mandatory
self.precision = precision
self.scale = scale
self.length = length
def __hash__(self):
"""Hash of a Data Vault field."""
return hash(self.name_in_staging)
def __eq__(self, other):
"""Equality of a Data Vault field."""
return self.name_in_staging == other.name_in_staging
def __str__(self) -> str:
"""Representation of a Field object as a string.
This helps the tracking of logging events per entity.
Returns:
String representation for the `Field` object.
"""
return f"{type(self).__name__}: {self.name}"
@property
def data_type_sql(self) -> str:
"""Build SQL expression to represent the field data type."""
if self.data_type == FieldDataType.NUMBER:
return f"{self.data_type.value} ({self.precision}, {self.scale})"
if self.data_type == FieldDataType.TEXT and self.length:
return f"{self.data_type.value} ({self.length})"
return f"{self.data_type.name}"
@property
def hash_concatenation_sql(self) -> str:
"""Build SQL expression to deterministically represent the field as a string.
This expression is needed to produce hashes (hashkey/hashdiff) that are
consistent, independently on the data type used to store the field in the
extraction table.
The SQL expression does the following steps:
1. Cast field to its data type in the DV model.
2. Produce a consistent string representation of the result of step 1, depending
on the field data type.
3. Ensure the result of step 2 never returns NULL.
Returns:
SQL expression to deterministically represent the field as a string.
"""
hash_concatenation_sql = ""
date_format = "yyyy-mm-dd"
time_format = "hh24:mi:ss.ff9"
timezone_format = "tzhtzm"
cast_expression = (
f"CAST({self.name} AS {self.data_type_sql})"
if self.data_type != FieldDataType.GEOGRAPHY
else f"TO_GEOGRAPHY({self.name})"
)
if self.data_type in (FieldDataType.TIMESTAMP_LTZ, FieldDataType.TIMESTAMP_TZ):
hash_concatenation_sql = (
f"TO_CHAR({cast_expression}, "
f"'{date_format} {time_format} {timezone_format}')"
)
elif self.data_type == FieldDataType.TIMESTAMP_NTZ:
hash_concatenation_sql = (
f"TO_CHAR({cast_expression}, '{date_format} {time_format}')"
)
elif self.data_type == FieldDataType.DATE:
hash_concatenation_sql = f"TO_CHAR({cast_expression}, '{date_format}')"
elif self.data_type == FieldDataType.TIME:
hash_concatenation_sql = f"TO_CHAR({cast_expression}, '{time_format}')"
elif self.data_type == FieldDataType.TEXT:
hash_concatenation_sql = cast_expression
elif self.data_type == FieldDataType.GEOGRAPHY:
hash_concatenation_sql = f"ST_ASTEXT({cast_expression})"
else:
hash_concatenation_sql = f"CAST({cast_expression} AS TEXT)"
default_value = UNKNOWN if self.role == FieldRole.BUSINESS_KEY else ""
return f"COALESCE({hash_concatenation_sql}, '{default_value}')"
@property
def suffix(self) -> str:
"""Get field suffix.
Returns:
Field suffix.
"""
return self.name.split("_").pop()
@property
def prefix(self) -> str:
"""Get field prefix.
Returns:
Field prefix.
"""
return next(split_part for split_part in self.name.split("_"))
@property
def parent_table_type(self) -> TableType:
"""Get parent table type, based on table prefix.
Returns:
Table type (HUB, LINK or SATELLITE).
"""
table_prefix = next(
split_part for split_part in self.parent_table_name.split("_")
)
if table_prefix in TABLE_PREFIXES[TableType.LINK]:
return TableType.LINK
if table_prefix in TABLE_PREFIXES[TableType.SATELLITE]:
return TableType.SATELLITE
return TableType.HUB
@property
def name_in_staging(self) -> str:
"""Get the name that this field should have, when created in a staging table.
In most cases this function will return `self.name`, but for hashdiffs the name
is <parent_table_name>_hashdiff (every Satellite has one hashdiff field, named
s_hashdiff).
Returns:
Name of the field in staging.
"""
if self.role == FieldRole.HASHDIFF:
return f"{self.parent_table_name}_{FIELD_SUFFIX[FieldRole.HASHDIFF]}"
return self.name
@property
def ddl_in_staging(self) -> str:
"""Get DDL expression to create this field in the staging table.
Returns:
The DDL expression for this field.
"""
return (
f"{self.name_in_staging} {self.data_type_sql}"
f"{' NOT NULL' if self.is_mandatory else ''}"
)
@property
def role(self) -> FieldRole:
"""Get the role of the field in a Data Vault model.
See `FieldRole` enum for more information.
Returns:
Field role in a Data Vault model.
Raises:
RuntimeError: When no field role can be attributed.
"""
found_role: Optional[FieldRole] = None
if self.name in METADATA_FIELDS.values():
found_role = FieldRole.METADATA
elif (
self.name == f"{self.parent_table_name}_{self.suffix}"
and self.suffix == FIELD_SUFFIX[FieldRole.HASHKEY]
):
found_role = FieldRole.HASHKEY
elif self.suffix == FIELD_SUFFIX[FieldRole.HASHKEY]:
found_role = FieldRole.HASHKEY_PARENT
elif self.prefix == FIELD_PREFIX[FieldRole.CHILD_KEY]:
found_role = FieldRole.CHILD_KEY
elif (
self.parent_table_type != TableType.SATELLITE
and self.prefix not in FIELD_PREFIX.values()
and self.position != 1
):
found_role = FieldRole.BUSINESS_KEY
elif self.suffix == FIELD_SUFFIX[FieldRole.HASHDIFF]:
found_role = FieldRole.HASHDIFF
elif self.parent_table_type == TableType.SATELLITE:
found_role = FieldRole.DESCRIPTIVE
if found_role is not None:
return found_role
raise RuntimeError(
(
f"{self.name}: It was not possible to assign a valid field role "
f" (validate FieldRole and FIELD_PREFIXES configuration)"
)
)
| [] |
tuggeluk/mmdetection | mmdet/datasets/deepscoresV2.py | 669a535c944628a3ab43330cae5c77b643e13a4b | """DEEPSCORESV2
Provides access to the DEEPSCORESV2 database with a COCO-like interface. The
only changes made compared to the coco.py file are the class labels.
Author:
Lukas Tuggener <[email protected]>
Yvan Satyawan <[email protected]>
Created on:
November 23, 2019
"""
from .coco import *
import os
import json
from obb_anns import OBBAnns
@DATASETS.register_module
class DeepScoresV2Dataset(CocoDataset):
def load_annotations(self, ann_file):
self.obb = OBBAnns(ann_file)
self.obb.load_annotations()
self.obb.set_annotation_set_filter(['deepscores'])
self.obb.set_class_blacklist(["staff"])
self.cat_ids = list(self.obb.get_cats().keys())
self.cat2label = {
cat_id: i
for i, cat_id in enumerate(self.cat_ids)
}
self.label2cat = {v: k for k, v in self.cat2label.items()}
self.CLASSES = tuple([v["name"] for (k, v) in self.obb.get_cats().items()])
self.img_ids = [id['id'] for id in self.obb.img_info]
return self.obb.img_info
def get_ann_info(self, idx):
return self._parse_ann_info(*self.obb.get_img_ann_pair(idxs=[idx]))
def _filter_imgs(self, min_size=32):
valid_inds = []
for i, img_info in enumerate(self.obb.img_info):
if self.filter_empty_gt and len(img_info['ann_ids']) == 0:
continue
if min(img_info['width'], img_info['height']) >= min_size:
valid_inds.append(i)
return valid_inds
def _parse_ann_info(self, img_info, ann_info):
img_info, ann_info = img_info[0], ann_info[0]
gt_bboxes = []
gt_labels = []
gt_bboxes_ignore = np.zeros((0, 4), dtype=np.float32)
for i, ann in ann_info.iterrows():
# we have no ignore feature
if ann['area'] <= 0:
continue
bbox = ann['a_bbox']
gt_bboxes.append(bbox)
gt_labels.append(self.cat2label[ann['cat_id'][0]])
gt_bboxes = np.array(gt_bboxes, dtype=np.float32)
gt_labels = np.array(gt_labels, dtype=np.int64)
ann = dict(
bboxes=gt_bboxes,
labels=gt_labels,
bboxes_ignore=gt_bboxes_ignore,
masks=None,
seg_map=None)
return ann
def prepare_json_dict(self, results):
json_results = {"annotation_set": "deepscores", "proposals": []}
for idx in range(len(self)):
img_id = self.img_ids[idx]
result = results[idx]
for label in range(len(result)):
bboxes = result[label]
for i in range(bboxes.shape[0]):
data = dict()
data['img_id'] = img_id
data['bbox'] = [str(nr) for nr in bboxes[i][0:-1]]
data['score'] = str(bboxes[i][-1])
data['cat_id'] = self.label2cat[label]
json_results["proposals"].append(data)
return json_results
def write_results_json(self, results, filename=None):
if filename is None:
filename = "deepscores_results.json"
json_results = self.prepare_json_dict(results)
with open(filename, "w") as fo:
json.dump(json_results, fo)
return filename
def evaluate(self,
results,
metric='bbox',
logger=None,
jsonfile_prefix=None,
classwise=True,
proposal_nums=(100, 300, 1000),
iou_thrs=np.arange(0.5, 0.96, 0.05),
average_thrs=False):
"""Evaluation in COCO protocol.
Args:
results (list): Testing results of the dataset.
metric (str | list[str]): Metrics to be evaluated.
logger (logging.Logger | str | None): Logger used for printing
related information during evaluation. Default: None.
jsonfile_prefix (str | None): The prefix of json files. It includes
the file path and the prefix of filename, e.g., "a/b/prefix".
If not specified, a temp file will be created. Default: None.
classwise (bool): Whether to evaluating the AP for each class.
proposal_nums (Sequence[int]): Proposal number used for evaluating
recalls, such as recall@100, recall@1000.
Default: (100, 300, 1000).
iou_thrs (Sequence[float]): IoU threshold used for evaluating
recalls. If set to a list, the average recall of all IoUs will
also be computed. Default: 0.5.
Returns:
dict[str: float]
"""
metrics = metric if isinstance(metric, list) else [metric]
allowed_metrics = ['bbox']
for metric in metrics:
if metric not in allowed_metrics:
raise KeyError(f'metric {metric} is not supported')
filename = self.write_results_json(results)
self.obb.load_proposals(filename)
metric_results = self.obb.calculate_metrics(iou_thrs=iou_thrs, classwise=classwise, average_thrs=average_thrs)
metric_results = {self.CLASSES[self.cat2label[key]]: value for (key, value) in metric_results.items()}
# add occurences
occurences_by_class = self.obb.get_class_occurences()
for (key, value) in metric_results.items():
value.update(no_occurences=occurences_by_class[key])
if True:
import pickle
pickle.dump(metric_results, open('evaluation_renamed_rcnn.pickle', 'wb'))
print(metric_results)
return metric_results
| [((23, 19, 23, 36), 'obb_anns.OBBAnns', 'OBBAnns', ({(23, 27, 23, 35): 'ann_file'}, {}), '(ann_file)', False, 'from obb_anns import OBBAnns\n'), ((98, 12, 98, 39), 'json.dump', 'json.dump', ({(98, 22, 98, 34): 'json_results', (98, 36, 98, 38): 'fo'}, {}), '(json_results, fo)', False, 'import json\n')] |
agsmorodin/gomatic | tests/go_cd_configurator_test.py | e6ae871ffc2d027823f6b7a5755e0ac65c724538 | #!/usr/bin/env python
import unittest
from xml.dom.minidom import parseString
import xml.etree.ElementTree as ET
from decimal import Decimal
from gomatic import GoCdConfigurator, FetchArtifactDir, RakeTask, ExecTask, ScriptExecutorTask, FetchArtifactTask, \
FetchArtifactFile, Tab, GitMaterial, PipelineMaterial, Pipeline
from gomatic.fake import FakeHostRestClient, empty_config_xml, config, empty_config
from gomatic.gocd.pipelines import DEFAULT_LABEL_TEMPLATE
from gomatic.gocd.artifacts import Artifact
from gomatic.xml_operations import prettify
def find_with_matching_name(things, name):
return [thing for thing in things if thing.name == name]
def standard_pipeline_group():
return GoCdConfigurator(config('config-with-typical-pipeline')).ensure_pipeline_group('P.Group')
def typical_pipeline():
return standard_pipeline_group().find_pipeline('typical')
def more_options_pipeline():
return GoCdConfigurator(config('config-with-more-options-pipeline')).ensure_pipeline_group('P.Group').find_pipeline('more-options')
def empty_pipeline():
return GoCdConfigurator(empty_config()).ensure_pipeline_group("pg").ensure_pipeline("pl").set_git_url("gurl")
def empty_stage():
return empty_pipeline().ensure_stage("deploy-to-dev")
class TestAgents(unittest.TestCase):
def _agents_from_config(self):
return GoCdConfigurator(config('config-with-just-agents')).agents
def test_could_have_no_agents(self):
agents = GoCdConfigurator(empty_config()).agents
self.assertEquals(0, len(agents))
def test_agents_have_resources(self):
agents = self._agents_from_config()
self.assertEquals(2, len(agents))
self.assertEquals({'a-resource', 'b-resource'}, agents[0].resources)
def test_agents_have_names(self):
agents = self._agents_from_config()
self.assertEquals('go-agent-1', agents[0].hostname)
self.assertEquals('go-agent-2', agents[1].hostname)
def test_agent_could_have_no_resources(self):
agents = self._agents_from_config()
self.assertEquals(0, len(agents[1].resources))
def test_can_add_resource_to_agent_with_no_resources(self):
agent = self._agents_from_config()[1]
agent.ensure_resource('a-resource-that-it-does-not-already-have')
self.assertEquals(1, len(agent.resources))
def test_can_add_resource_to_agent(self):
agent = self._agents_from_config()[0]
self.assertEquals(2, len(agent.resources))
agent.ensure_resource('a-resource-that-it-does-not-already-have')
self.assertEquals(3, len(agent.resources))
class TestJobs(unittest.TestCase):
def test_jobs_have_resources(self):
stages = typical_pipeline().stages
job = stages[0].jobs[0]
resources = job.resources
self.assertEquals(1, len(resources))
self.assertEquals({'a-resource'}, resources)
def test_job_has_nice_tostring(self):
job = typical_pipeline().stages[0].jobs[0]
self.assertEquals("Job('compile', [ExecTask(['make', 'options', 'source code'])])", str(job))
def test_jobs_can_have_timeout(self):
job = typical_pipeline().ensure_stage("deploy").ensure_job("upload")
self.assertEquals(True, job.has_timeout)
self.assertEquals('20', job.timeout)
def test_can_set_timeout(self):
job = empty_stage().ensure_job("j")
j = job.set_timeout("42")
self.assertEquals(j, job)
self.assertEquals(True, job.has_timeout)
self.assertEquals('42', job.timeout)
def test_jobs_do_not_have_to_have_timeout(self):
stages = typical_pipeline().stages
job = stages[0].jobs[0]
self.assertEquals(False, job.has_timeout)
try:
timeout = job.timeout
self.fail("should have thrown exception")
except RuntimeError:
pass
def test_jobs_can_run_on_all_agents(self):
job = more_options_pipeline().ensure_stage("earlyStage").ensure_job("earlyWorm")
self.assertEquals(True, job.runs_on_all_agents)
def test_jobs_do_not_have_to_run_on_all_agents(self):
job = typical_pipeline().ensure_stage("build").ensure_job("compile")
self.assertEquals(False, job.runs_on_all_agents)
def test_jobs_can_be_made_to_run_on_all_agents(self):
job = typical_pipeline().ensure_stage("build").ensure_job("compile")
j = job.set_runs_on_all_agents()
self.assertEquals(j, job)
self.assertEquals(True, job.runs_on_all_agents)
def test_jobs_can_be_made_to_not_run_on_all_agents(self):
job = typical_pipeline().ensure_stage("build").ensure_job("compile")
j = job.set_runs_on_all_agents(False)
self.assertEquals(j, job)
self.assertEquals(False, job.runs_on_all_agents)
def test_can_ensure_job_has_resource(self):
stages = typical_pipeline().stages
job = stages[0].jobs[0]
j = job.ensure_resource('moo')
self.assertEquals(j, job)
self.assertEquals(2, len(job.resources))
self.assertEquals({'a-resource', 'moo'}, job.resources)
def test_jobs_have_artifacts(self):
job = more_options_pipeline().ensure_stage("earlyStage").ensure_job("earlyWorm")
artifacts = job.artifacts
self.assertEquals({
Artifact.get_build_artifact("target/universal/myapp*.zip", "artifacts"),
Artifact.get_build_artifact("scripts/*", "files"),
Artifact.get_test_artifact("from", "to")},
artifacts)
def test_job_that_has_no_artifacts_has_no_artifacts_element_to_reduce_thrash(self):
go_cd_configurator = GoCdConfigurator(empty_config())
job = go_cd_configurator.ensure_pipeline_group("g").ensure_pipeline("p").ensure_stage("s").ensure_job("j")
job.ensure_artifacts(set())
self.assertEquals(set(), job.artifacts)
xml = parseString(go_cd_configurator.config)
self.assertEquals(0, len(xml.getElementsByTagName('artifacts')))
def test_artifacts_might_have_no_dest(self):
job = more_options_pipeline().ensure_stage("s1").ensure_job("rake-job")
artifacts = job.artifacts
self.assertEquals(1, len(artifacts))
self.assertEquals({Artifact.get_build_artifact("things/*")}, artifacts)
def test_can_add_build_artifacts_to_job(self):
job = more_options_pipeline().ensure_stage("earlyStage").ensure_job("earlyWorm")
job_with_artifacts = job.ensure_artifacts({
Artifact.get_build_artifact("a1", "artifacts"),
Artifact.get_build_artifact("a2", "others")})
self.assertEquals(job, job_with_artifacts)
artifacts = job.artifacts
self.assertEquals(5, len(artifacts))
self.assertTrue({Artifact.get_build_artifact("a1", "artifacts"), Artifact.get_build_artifact("a2", "others")}.issubset(artifacts))
def test_can_add_test_artifacts_to_job(self):
job = more_options_pipeline().ensure_stage("earlyStage").ensure_job("earlyWorm")
job_with_artifacts = job.ensure_artifacts({
Artifact.get_test_artifact("a1"),
Artifact.get_test_artifact("a2")})
self.assertEquals(job, job_with_artifacts)
artifacts = job.artifacts
self.assertEquals(5, len(artifacts))
self.assertTrue({Artifact.get_test_artifact("a1"), Artifact.get_test_artifact("a2")}.issubset(artifacts))
def test_can_ensure_artifacts(self):
job = more_options_pipeline().ensure_stage("earlyStage").ensure_job("earlyWorm")
job.ensure_artifacts({
Artifact.get_test_artifact("from", "to"),
Artifact.get_build_artifact("target/universal/myapp*.zip", "somewhereElse"),
Artifact.get_test_artifact("another", "with dest"),
Artifact.get_build_artifact("target/universal/myapp*.zip", "artifacts")})
self.assertEquals({
Artifact.get_build_artifact("target/universal/myapp*.zip", "artifacts"),
Artifact.get_build_artifact("scripts/*", "files"),
Artifact.get_test_artifact("from", "to"),
Artifact.get_build_artifact("target/universal/myapp*.zip", "somewhereElse"),
Artifact.get_test_artifact("another", "with dest")
},
job.artifacts)
def test_jobs_have_tasks(self):
job = more_options_pipeline().ensure_stage("s1").jobs[2]
tasks = job.tasks
self.assertEquals(4, len(tasks))
self.assertEquals('rake', tasks[0].type)
self.assertEquals('sometarget', tasks[0].target)
self.assertEquals('passed', tasks[0].runif)
self.assertEquals('fetchartifact', tasks[1].type)
self.assertEquals('more-options', tasks[1].pipeline)
self.assertEquals('earlyStage', tasks[1].stage)
self.assertEquals('earlyWorm', tasks[1].job)
self.assertEquals(FetchArtifactDir('sourceDir'), tasks[1].src)
self.assertEquals('destDir', tasks[1].dest)
self.assertEquals('passed', tasks[1].runif)
def test_runif_defaults_to_passed(self):
pipeline = typical_pipeline()
tasks = pipeline.ensure_stage("build").ensure_job("compile").tasks
self.assertEquals("passed", tasks[0].runif)
def test_jobs_can_have_rake_tasks(self):
job = more_options_pipeline().ensure_stage("s1").jobs[0]
tasks = job.tasks
self.assertEquals(1, len(tasks))
self.assertEquals('rake', tasks[0].type)
self.assertEquals("boo", tasks[0].target)
def test_can_ensure_rake_task(self):
job = more_options_pipeline().ensure_stage("s1").jobs[0]
job.ensure_task(RakeTask("boo"))
self.assertEquals(1, len(job.tasks))
def test_can_add_rake_task(self):
job = more_options_pipeline().ensure_stage("s1").jobs[0]
job.ensure_task(RakeTask("another"))
self.assertEquals(2, len(job.tasks))
self.assertEquals("another", job.tasks[1].target)
def test_script_executor_task(self):
script = '''
echo This is script
echo 'This is a string in single quotes'
echo "This is a string in double quotes"
'''
job = more_options_pipeline().ensure_stage("script-executor").\
ensure_job('test-script-executor')
job.ensure_task(ScriptExecutorTask(script, runif='any'))
self.assertEquals(1, len(job.tasks))
self.assertEquals('script', job.tasks[0].type)
self.assertEquals(script, job.tasks[0].script)
self.assertEquals('any', job.tasks[0].runif)
job.ensure_task(ScriptExecutorTask(script, runif='failed'))
self.assertEquals(2, len(job.tasks))
self.assertEquals('script', job.tasks[1].type)
self.assertEquals(script, job.tasks[1].script)
self.assertEquals('failed', job.tasks[1].runif)
job.ensure_task(ScriptExecutorTask(script))
self.assertEquals(3, len(job.tasks))
self.assertEquals('script', job.tasks[2].type)
self.assertEquals(script, job.tasks[2].script)
self.assertEquals('passed', job.tasks[2].runif)
def test_can_add_exec_task_with_runif(self):
stages = typical_pipeline().stages
job = stages[0].jobs[0]
added_task = job.add_task(ExecTask(['ls', '-la'], 'some/dir', "failed"))
self.assertEquals(2, len(job.tasks))
task = job.tasks[1]
self.assertEquals(task, added_task)
self.assertEquals(['ls', '-la'], task.command_and_args)
self.assertEquals('some/dir', task.working_dir)
self.assertEquals('failed', task.runif)
def test_can_add_exec_task(self):
stages = typical_pipeline().stages
job = stages[0].jobs[0]
added_task = job.add_task(ExecTask(['ls', '-la'], 'some/dir'))
self.assertEquals(2, len(job.tasks))
task = job.tasks[1]
self.assertEquals(task, added_task)
self.assertEquals(['ls', '-la'], task.command_and_args)
self.assertEquals('some/dir', task.working_dir)
def test_can_ensure_exec_task(self):
stages = typical_pipeline().stages
job = stages[0].jobs[0]
t1 = job.ensure_task(ExecTask(['ls', '-la'], 'some/dir'))
t2 = job.ensure_task(ExecTask(['make', 'options', 'source code']))
job.ensure_task(ExecTask(['ls', '-la'], 'some/otherdir'))
job.ensure_task(ExecTask(['ls', '-la'], 'some/dir'))
self.assertEquals(3, len(job.tasks))
self.assertEquals(t2, job.tasks[0])
self.assertEquals(['make', 'options', 'source code'], (job.tasks[0]).command_and_args)
self.assertEquals(t1, job.tasks[1])
self.assertEquals(['ls', '-la'], (job.tasks[1]).command_and_args)
self.assertEquals('some/dir', (job.tasks[1]).working_dir)
self.assertEquals(['ls', '-la'], (job.tasks[2]).command_and_args)
self.assertEquals('some/otherdir', (job.tasks[2]).working_dir)
def test_exec_task_args_are_unescaped_as_appropriate(self):
job = more_options_pipeline().ensure_stage("earlyStage").ensure_job("earlyWorm")
task = job.tasks[1]
self.assertEquals(["bash", "-c",
'curl "http://domain.com/service/check?target=one+two+three&key=2714_beta%40domain.com"'],
task.command_and_args)
def test_exec_task_args_are_escaped_as_appropriate(self):
job = empty_stage().ensure_job("j")
task = job.add_task(ExecTask(["bash", "-c",
'curl "http://domain.com/service/check?target=one+two+three&key=2714_beta%40domain.com"']))
self.assertEquals(["bash", "-c",
'curl "http://domain.com/service/check?target=one+two+three&key=2714_beta%40domain.com"'],
task.command_and_args)
def test_can_have_no_tasks(self):
self.assertEquals(0, len(empty_stage().ensure_job("empty_job").tasks))
def test_can_add_fetch_artifact_task_to_job(self):
stages = typical_pipeline().stages
job = stages[0].jobs[0]
added_task = job.add_task(FetchArtifactTask('p', 's', 'j', FetchArtifactDir('d'), runif="any"))
self.assertEquals(2, len(job.tasks))
task = job.tasks[1]
self.assertEquals(added_task, task)
self.assertEquals('p', task.pipeline)
self.assertEquals('s', task.stage)
self.assertEquals('j', task.job)
self.assertEquals(FetchArtifactDir('d'), task.src)
self.assertEquals('any', task.runif)
def test_fetch_artifact_task_can_have_src_file_rather_than_src_dir(self):
job = more_options_pipeline().ensure_stage("s1").ensure_job("variety-of-tasks")
tasks = job.tasks
self.assertEquals(4, len(tasks))
self.assertEquals('more-options', tasks[1].pipeline)
self.assertEquals('earlyStage', tasks[1].stage)
self.assertEquals('earlyWorm', tasks[1].job)
self.assertEquals(FetchArtifactFile('someFile'), tasks[2].src)
self.assertEquals('passed', tasks[1].runif)
self.assertEquals(['true'], tasks[3].command_and_args)
def test_fetch_artifact_task_can_have_dest(self):
pipeline = more_options_pipeline()
job = pipeline.ensure_stage("s1").ensure_job("variety-of-tasks")
tasks = job.tasks
self.assertEquals(FetchArtifactTask("more-options",
"earlyStage",
"earlyWorm",
FetchArtifactDir("sourceDir"),
dest="destDir"),
tasks[1])
def test_can_ensure_fetch_artifact_tasks(self):
job = more_options_pipeline().ensure_stage("s1").ensure_job("variety-of-tasks")
job.ensure_task(FetchArtifactTask("more-options", "middleStage", "middleJob", FetchArtifactFile("someFile")))
first_added_task = job.ensure_task(FetchArtifactTask('p', 's', 'j', FetchArtifactDir('dir')))
self.assertEquals(5, len(job.tasks))
self.assertEquals(first_added_task, job.tasks[4])
self.assertEquals('p', (job.tasks[4]).pipeline)
self.assertEquals('s', (job.tasks[4]).stage)
self.assertEquals('j', (job.tasks[4]).job)
self.assertEquals(FetchArtifactDir('dir'), (job.tasks[4]).src)
self.assertEquals('passed', (job.tasks[4]).runif)
job.ensure_task(FetchArtifactTask('p', 's', 'j', FetchArtifactFile('f')))
self.assertEquals(FetchArtifactFile('f'), (job.tasks[5]).src)
job.ensure_task(FetchArtifactTask('p', 's', 'j', FetchArtifactDir('dir'), dest="somedest"))
self.assertEquals("somedest", (job.tasks[6]).dest)
job.ensure_task(FetchArtifactTask('p', 's', 'j', FetchArtifactDir('dir'), runif="failed"))
self.assertEquals('failed', (job.tasks[7]).runif)
def test_tasks_run_if_defaults_to_passed(self):
job = empty_stage().ensure_job("j")
job.add_task(ExecTask(['ls', '-la'], 'some/dir'))
job.add_task(FetchArtifactTask('p', 's', 'j', FetchArtifactDir('dir')))
job.add_task(RakeTask('x'))
self.assertEquals('passed', (job.tasks[0]).runif)
self.assertEquals('passed', (job.tasks[1]).runif)
self.assertEquals('passed', (job.tasks[2]).runif)
def test_tasks_run_if_variants(self):
job = more_options_pipeline().ensure_stage("s1").ensure_job("run-if-variants")
tasks = job.tasks
self.assertEquals('t-passed', tasks[0].command_and_args[0])
self.assertEquals('passed', tasks[0].runif)
self.assertEquals('t-none', tasks[1].command_and_args[0])
self.assertEquals('passed', tasks[1].runif)
self.assertEquals('t-failed', tasks[2].command_and_args[0])
self.assertEquals('failed', tasks[2].runif)
self.assertEquals('t-any', tasks[3].command_and_args[0])
self.assertEquals('any', tasks[3].runif)
self.assertEquals('t-both', tasks[4].command_and_args[0])
self.assertEquals('any', tasks[4].runif)
def test_cannot_set_runif_to_random_things(self):
try:
ExecTask(['x'], runif='whatever')
self.fail("should have thrown exception")
except RuntimeError as e:
self.assertTrue(e.message.count("whatever") > 0)
def test_can_set_runif_to_particular_values(self):
self.assertEquals('passed', ExecTask(['x'], runif='passed').runif)
self.assertEquals('failed', ExecTask(['x'], runif='failed').runif)
self.assertEquals('any', ExecTask(['x'], runif='any').runif)
def test_tasks_dest_defaults_to_none(self): # TODO: maybe None could be avoided
job = empty_stage().ensure_job("j")
job.add_task(FetchArtifactTask('p', 's', 'j', FetchArtifactDir('dir')))
self.assertEquals(None, (job.tasks[0]).dest)
def test_can_add_exec_task_to_empty_job(self):
job = empty_stage().ensure_job("j")
added_task = job.add_task(ExecTask(['ls', '-la'], 'some/dir', "any"))
self.assertEquals(1, len(job.tasks))
task = job.tasks[0]
self.assertEquals(task, added_task)
self.assertEquals(['ls', '-la'], task.command_and_args)
self.assertEquals('some/dir', task.working_dir)
self.assertEquals('any', task.runif)
def test_can_remove_all_tasks(self):
stages = typical_pipeline().stages
job = stages[0].jobs[0]
self.assertEquals(1, len(job.tasks))
j = job.without_any_tasks()
self.assertEquals(j, job)
self.assertEquals(0, len(job.tasks))
def test_can_have_encrypted_environment_variables(self):
pipeline = GoCdConfigurator(config('config-with-encrypted-variable')).ensure_pipeline_group("defaultGroup").find_pipeline("example")
job = pipeline.ensure_stage('defaultStage').ensure_job('defaultJob')
self.assertEquals({"MY_JOB_PASSWORD": "yq5qqPrrD9/j=="}, job.encrypted_environment_variables)
def test_can_set_encrypted_environment_variables(self):
job = empty_stage().ensure_job("j")
job.ensure_encrypted_environment_variables({'one': 'blah=='})
self.assertEquals({"one": "blah=="}, job.encrypted_environment_variables)
def test_can_add_environment_variables(self):
job = typical_pipeline() \
.ensure_stage("build") \
.ensure_job("compile")
j = job.ensure_environment_variables({"new": "one"})
self.assertEquals(j, job)
self.assertEquals({"CF_COLOR": "false", "new": "one"}, job.environment_variables)
def test_environment_variables_get_added_in_sorted_order_to_reduce_config_thrash(self):
go_cd_configurator = GoCdConfigurator(empty_config())
job = go_cd_configurator\
.ensure_pipeline_group('P.Group')\
.ensure_pipeline('P.Name') \
.ensure_stage("build") \
.ensure_job("compile")
job.ensure_environment_variables({"ant": "a", "badger": "a", "zebra": "a"})
xml = parseString(go_cd_configurator.config)
names = [e.getAttribute('name') for e in xml.getElementsByTagName('variable')]
self.assertEquals([u'ant', u'badger', u'zebra'], names)
def test_can_remove_all_environment_variables(self):
job = typical_pipeline() \
.ensure_stage("build") \
.ensure_job("compile")
j = job.without_any_environment_variables()
self.assertEquals(j, job)
self.assertEquals({}, job.environment_variables)
def test_job_can_haveTabs(self):
job = typical_pipeline() \
.ensure_stage("build") \
.ensure_job("compile")
self.assertEquals([Tab("Time_Taken", "artifacts/test-run-times.html")], job.tabs)
def test_can_addTab(self):
job = typical_pipeline() \
.ensure_stage("build") \
.ensure_job("compile")
j = job.ensure_tab(Tab("n", "p"))
self.assertEquals(j, job)
self.assertEquals([Tab("Time_Taken", "artifacts/test-run-times.html"), Tab("n", "p")], job.tabs)
def test_can_ensure_tab(self):
job = typical_pipeline() \
.ensure_stage("build") \
.ensure_job("compile")
job.ensure_tab(Tab("Time_Taken", "artifacts/test-run-times.html"))
self.assertEquals([Tab("Time_Taken", "artifacts/test-run-times.html")], job.tabs)
class TestStages(unittest.TestCase):
def test_pipelines_have_stages(self):
self.assertEquals(2, len(typical_pipeline().stages))
def test_stages_have_names(self):
stages = typical_pipeline().stages
self.assertEquals('build', stages[0].name)
self.assertEquals('deploy', stages[1].name)
def test_stages_can_have_manual_approval(self):
self.assertEquals(False, typical_pipeline().stages[0].has_manual_approval)
self.assertEquals(True, typical_pipeline().stages[1].has_manual_approval)
def test_can_set_manual_approval(self):
stage = typical_pipeline().stages[0]
s = stage.set_has_manual_approval()
self.assertEquals(s, stage)
self.assertEquals(True, stage.has_manual_approval)
def test_stages_have_fetch_materials_flag(self):
stage = typical_pipeline().ensure_stage("build")
self.assertEquals(True, stage.fetch_materials)
stage = more_options_pipeline().ensure_stage("s1")
self.assertEquals(False, stage.fetch_materials)
def test_can_set_fetch_materials_flag(self):
stage = typical_pipeline().ensure_stage("build")
s = stage.set_fetch_materials(False)
self.assertEquals(s, stage)
self.assertEquals(False, stage.fetch_materials)
stage = more_options_pipeline().ensure_stage("s1")
stage.set_fetch_materials(True)
self.assertEquals(True, stage.fetch_materials)
def test_stages_have_jobs(self):
stages = typical_pipeline().stages
jobs = stages[0].jobs
self.assertEquals(1, len(jobs))
self.assertEquals('compile', jobs[0].name)
def test_can_add_job(self):
stage = typical_pipeline().ensure_stage("deploy")
self.assertEquals(1, len(stage.jobs))
ensured_job = stage.ensure_job("new-job")
self.assertEquals(2, len(stage.jobs))
self.assertEquals(ensured_job, stage.jobs[1])
self.assertEquals("new-job", stage.jobs[1].name)
def test_can_add_job_to_empty_stage(self):
stage = empty_stage()
self.assertEquals(0, len(stage.jobs))
ensured_job = stage.ensure_job("new-job")
self.assertEquals(1, len(stage.jobs))
self.assertEquals(ensured_job, stage.jobs[0])
self.assertEquals("new-job", stage.jobs[0].name)
def test_can_ensure_job_exists(self):
stage = typical_pipeline().ensure_stage("deploy")
self.assertEquals(1, len(stage.jobs))
ensured_job = stage.ensure_job("upload")
self.assertEquals(1, len(stage.jobs))
self.assertEquals("upload", ensured_job.name)
def test_can_have_encrypted_environment_variables(self):
pipeline = GoCdConfigurator(config('config-with-encrypted-variable')).ensure_pipeline_group("defaultGroup").find_pipeline("example")
stage = pipeline.ensure_stage('defaultStage')
self.assertEquals({"MY_STAGE_PASSWORD": "yq5qqPrrD9/s=="}, stage.encrypted_environment_variables)
def test_can_set_encrypted_environment_variables(self):
stage = typical_pipeline().ensure_stage("deploy")
stage.ensure_encrypted_environment_variables({'one': 'blah=='})
self.assertEquals({"one": "blah=="}, stage.encrypted_environment_variables)
def test_can_set_environment_variables(self):
stage = typical_pipeline().ensure_stage("deploy")
s = stage.ensure_environment_variables({"new": "one"})
self.assertEquals(s, stage)
self.assertEquals({"BASE_URL": "http://myurl", "new": "one"}, stage.environment_variables)
def test_can_remove_all_environment_variables(self):
stage = typical_pipeline().ensure_stage("deploy")
s = stage.without_any_environment_variables()
self.assertEquals(s, stage)
self.assertEquals({}, stage.environment_variables)
class TestPipeline(unittest.TestCase):
def test_pipelines_have_names(self):
pipeline = typical_pipeline()
self.assertEquals('typical', pipeline.name)
def test_can_add_stage(self):
pipeline = empty_pipeline()
self.assertEquals(0, len(pipeline.stages))
new_stage = pipeline.ensure_stage("some_stage")
self.assertEquals(1, len(pipeline.stages))
self.assertEquals(new_stage, pipeline.stages[0])
self.assertEquals("some_stage", new_stage.name)
def test_can_ensure_stage(self):
pipeline = typical_pipeline()
self.assertEquals(2, len(pipeline.stages))
ensured_stage = pipeline.ensure_stage("deploy")
self.assertEquals(2, len(pipeline.stages))
self.assertEquals("deploy", ensured_stage.name)
def test_can_remove_stage(self):
pipeline = typical_pipeline()
self.assertEquals(2, len(pipeline.stages))
p = pipeline.ensure_removal_of_stage("deploy")
self.assertEquals(p, pipeline)
self.assertEquals(1, len(pipeline.stages))
self.assertEquals(0, len([s for s in pipeline.stages if s.name == "deploy"]))
def test_can_ensure_removal_of_stage(self):
pipeline = typical_pipeline()
self.assertEquals(2, len(pipeline.stages))
pipeline.ensure_removal_of_stage("stage-that-has-already-been-deleted")
self.assertEquals(2, len(pipeline.stages))
def test_can_ensure_initial_stage(self):
pipeline = typical_pipeline()
stage = pipeline.ensure_initial_stage("first")
self.assertEquals(stage, pipeline.stages[0])
self.assertEquals(3, len(pipeline.stages))
def test_can_ensure_initial_stage_if_already_exists_as_initial(self):
pipeline = typical_pipeline()
stage = pipeline.ensure_initial_stage("build")
self.assertEquals(stage, pipeline.stages[0])
self.assertEquals(2, len(pipeline.stages))
def test_can_ensure_initial_stage_if_already_exists(self):
pipeline = typical_pipeline()
stage = pipeline.ensure_initial_stage("deploy")
self.assertEquals(stage, pipeline.stages[0])
self.assertEquals("build", pipeline.stages[1].name)
self.assertEquals(2, len(pipeline.stages))
def test_can_set_stage_clean_policy(self):
pipeline = empty_pipeline()
stage1 = pipeline.ensure_stage("some_stage1").set_clean_working_dir()
stage2 = pipeline.ensure_stage("some_stage2")
self.assertEquals(True, pipeline.stages[0].clean_working_dir)
self.assertEquals(True, stage1.clean_working_dir)
self.assertEquals(False, pipeline.stages[1].clean_working_dir)
self.assertEquals(False, stage2.clean_working_dir)
def test_pipelines_can_have_git_urls(self):
pipeline = typical_pipeline()
self.assertEquals("[email protected]:springersbm/gomatic.git", pipeline.git_url)
def test_git_is_polled_by_default(self):
pipeline = GoCdConfigurator(empty_config()).ensure_pipeline_group("g").ensure_pipeline("p")
pipeline.set_git_url("some git url")
self.assertEquals(True, pipeline.git_material.polling)
def test_pipelines_can_have_git_material_with_material_name(self):
pipeline = more_options_pipeline()
self.assertEquals("[email protected]:springersbm/gomatic.git", pipeline.git_url)
self.assertEquals("some-material-name", pipeline.git_material.material_name)
def test_git_material_can_ignore_sources(self):
pipeline = GoCdConfigurator(config('config-with-source-exclusions')).ensure_pipeline_group("P.Group").find_pipeline("with-exclusions")
self.assertEquals({"excluded-folder", "another-excluded-folder"}, pipeline.git_material.ignore_patterns)
def test_can_set_pipeline_git_url(self):
pipeline = typical_pipeline()
p = pipeline.set_git_url("[email protected]:springersbm/changed.git")
self.assertEquals(p, pipeline)
self.assertEquals("[email protected]:springersbm/changed.git", pipeline.git_url)
self.assertEquals('master', pipeline.git_branch)
def test_can_set_pipeline_git_url_with_options(self):
pipeline = typical_pipeline()
p = pipeline.set_git_material(GitMaterial(
"[email protected]:springersbm/changed.git",
branch="branch",
destination_directory="foo",
material_name="material-name",
ignore_patterns={"ignoreMe", "ignoreThisToo"},
polling=False))
self.assertEquals(p, pipeline)
self.assertEquals("branch", pipeline.git_branch)
self.assertEquals("foo", pipeline.git_material.destination_directory)
self.assertEquals("material-name", pipeline.git_material.material_name)
self.assertEquals({"ignoreMe", "ignoreThisToo"}, pipeline.git_material.ignore_patterns)
self.assertFalse(pipeline.git_material.polling, "git polling")
def test_throws_exception_if_no_git_url(self):
pipeline = GoCdConfigurator(empty_config()).ensure_pipeline_group("g").ensure_pipeline("p")
self.assertEquals(False, pipeline.has_single_git_material)
try:
url = pipeline.git_url
self.fail("should have thrown exception")
except RuntimeError:
pass
def test_git_url_throws_exception_if_multiple_git_materials(self):
pipeline = GoCdConfigurator(empty_config()).ensure_pipeline_group("g").ensure_pipeline("p")
pipeline.ensure_material(GitMaterial("[email protected]:springersbm/one.git"))
pipeline.ensure_material(GitMaterial("[email protected]:springersbm/two.git"))
self.assertEquals(False, pipeline.has_single_git_material)
try:
url = pipeline.git_url
self.fail("should have thrown exception")
except RuntimeError:
pass
def test_set_git_url_throws_exception_if_multiple_git_materials(self):
pipeline = GoCdConfigurator(empty_config()).ensure_pipeline_group("g").ensure_pipeline("p")
pipeline.ensure_material(GitMaterial("[email protected]:springersbm/one.git"))
pipeline.ensure_material(GitMaterial("[email protected]:springersbm/two.git"))
try:
pipeline.set_git_url("[email protected]:springersbm/three.git")
self.fail("should have thrown exception")
except RuntimeError:
pass
def test_can_add_git_material(self):
pipeline = GoCdConfigurator(empty_config()).ensure_pipeline_group("g").ensure_pipeline("p")
p = pipeline.ensure_material(GitMaterial("[email protected]:springersbm/changed.git"))
self.assertEquals(p, pipeline)
self.assertEquals("[email protected]:springersbm/changed.git", pipeline.git_url)
def test_can_ensure_git_material(self):
pipeline = typical_pipeline()
pipeline.ensure_material(GitMaterial("[email protected]:springersbm/gomatic.git"))
self.assertEquals("[email protected]:springersbm/gomatic.git", pipeline.git_url)
self.assertEquals([GitMaterial("[email protected]:springersbm/gomatic.git")], pipeline.materials)
def test_can_have_multiple_git_materials(self):
pipeline = typical_pipeline()
pipeline.ensure_material(GitMaterial("[email protected]:springersbm/changed.git"))
self.assertEquals([GitMaterial("[email protected]:springersbm/gomatic.git"), GitMaterial("[email protected]:springersbm/changed.git")],
pipeline.materials)
def test_pipelines_can_have_pipeline_materials(self):
pipeline = more_options_pipeline()
self.assertEquals(2, len(pipeline.materials))
self.assertEquals(GitMaterial('[email protected]:springersbm/gomatic.git', branch="a-branch", material_name="some-material-name", polling=False),
pipeline.materials[0])
def test_pipelines_can_have_more_complicated_pipeline_materials(self):
pipeline = more_options_pipeline()
self.assertEquals(2, len(pipeline.materials))
self.assertEquals(True, pipeline.materials[0].is_git)
self.assertEquals(PipelineMaterial('pipeline2', 'build'), pipeline.materials[1])
def test_pipelines_can_have_no_materials(self):
pipeline = GoCdConfigurator(empty_config()).ensure_pipeline_group("g").ensure_pipeline("p")
self.assertEquals(0, len(pipeline.materials))
def test_can_add_pipeline_material(self):
pipeline = GoCdConfigurator(empty_config()).ensure_pipeline_group("g").ensure_pipeline("p")
p = pipeline.ensure_material(PipelineMaterial('deploy-qa', 'baseline-user-data'))
self.assertEquals(p, pipeline)
self.assertEquals(PipelineMaterial('deploy-qa', 'baseline-user-data'), pipeline.materials[0])
def test_can_add_more_complicated_pipeline_material(self):
pipeline = GoCdConfigurator(empty_config()).ensure_pipeline_group("g").ensure_pipeline("p")
p = pipeline.ensure_material(PipelineMaterial('p', 's', 'm'))
self.assertEquals(p, pipeline)
self.assertEquals(PipelineMaterial('p', 's', 'm'), pipeline.materials[0])
def test_can_ensure_pipeline_material(self):
pipeline = more_options_pipeline()
self.assertEquals(2, len(pipeline.materials))
pipeline.ensure_material(PipelineMaterial('pipeline2', 'build'))
self.assertEquals(2, len(pipeline.materials))
def test_can_remove_all_pipeline_materials(self):
pipeline = more_options_pipeline()
pipeline.remove_materials()
self.assertEquals(0, len(pipeline.materials))
def test_materials_are_sorted(self):
go_cd_configurator = GoCdConfigurator(empty_config())
pipeline = go_cd_configurator.ensure_pipeline_group("g").ensure_pipeline("p")
pipeline.ensure_material(PipelineMaterial('zeta', 'build'))
pipeline.ensure_material(GitMaterial('[email protected]:springersbm/zebra.git'))
pipeline.ensure_material(PipelineMaterial('alpha', 'build'))
pipeline.ensure_material(GitMaterial('[email protected]:springersbm/art.git'))
pipeline.ensure_material(PipelineMaterial('theta', 'build'))
pipeline.ensure_material(GitMaterial('[email protected]:springersbm/this.git'))
xml = parseString(go_cd_configurator.config)
materials = xml.getElementsByTagName('materials')[0].childNodes
self.assertEquals('git', materials[0].tagName)
self.assertEquals('git', materials[1].tagName)
self.assertEquals('git', materials[2].tagName)
self.assertEquals('pipeline', materials[3].tagName)
self.assertEquals('pipeline', materials[4].tagName)
self.assertEquals('pipeline', materials[5].tagName)
self.assertEquals('[email protected]:springersbm/art.git', materials[0].attributes['url'].value)
self.assertEquals('[email protected]:springersbm/this.git', materials[1].attributes['url'].value)
self.assertEquals('[email protected]:springersbm/zebra.git', materials[2].attributes['url'].value)
self.assertEquals('alpha', materials[3].attributes['pipelineName'].value)
self.assertEquals('theta', materials[4].attributes['pipelineName'].value)
self.assertEquals('zeta', materials[5].attributes['pipelineName'].value)
def test_can_set_pipeline_git_url_for_new_pipeline(self):
pipeline_group = standard_pipeline_group()
new_pipeline = pipeline_group.ensure_pipeline("some_name")
new_pipeline.set_git_url("[email protected]:springersbm/changed.git")
self.assertEquals("[email protected]:springersbm/changed.git", new_pipeline.git_url)
def test_pipelines_do_not_have_to_be_based_on_template(self):
pipeline = more_options_pipeline()
self.assertFalse(pipeline.is_based_on_template)
def test_pipelines_can_be_based_on_template(self):
pipeline = GoCdConfigurator(config('pipeline-based-on-template')).ensure_pipeline_group('defaultGroup').find_pipeline('siberian')
assert isinstance(pipeline, Pipeline)
self.assertTrue(pipeline.is_based_on_template)
template = GoCdConfigurator(config('pipeline-based-on-template')).templates[0]
self.assertEquals(template, pipeline.template)
def test_pipelines_can_be_created_based_on_template(self):
configurator = GoCdConfigurator(empty_config())
configurator.ensure_template('temple').ensure_stage('s').ensure_job('j')
pipeline = configurator.ensure_pipeline_group("g").ensure_pipeline('p').set_template_name('temple')
self.assertEquals('temple', pipeline.template.name)
def test_pipelines_have_environment_variables(self):
pipeline = typical_pipeline()
self.assertEquals({"JAVA_HOME": "/opt/java/jdk-1.8"}, pipeline.environment_variables)
def test_pipelines_have_encrypted_environment_variables(self):
pipeline = GoCdConfigurator(config('config-with-encrypted-variable')).ensure_pipeline_group("defaultGroup").find_pipeline("example")
self.assertEquals({"MY_SECURE_PASSWORD": "yq5qqPrrD9/htfwTWMYqGQ=="}, pipeline.encrypted_environment_variables)
def test_pipelines_have_unencrypted_secure_environment_variables(self):
pipeline = GoCdConfigurator(config('config-with-unencrypted-secure-variable')).ensure_pipeline_group("defaultGroup").find_pipeline("example")
self.assertEquals({"MY_SECURE_PASSWORD": "hunter2"}, pipeline.unencrypted_secure_environment_variables)
def test_can_add_environment_variables_to_pipeline(self):
pipeline = empty_pipeline()
p = pipeline.ensure_environment_variables({"new": "one", "again": "two"})
self.assertEquals(p, pipeline)
self.assertEquals({"new": "one", "again": "two"}, pipeline.environment_variables)
def test_can_add_encrypted_secure_environment_variables_to_pipeline(self):
pipeline = empty_pipeline()
pipeline.ensure_encrypted_environment_variables({"new": "one", "again": "two"})
self.assertEquals({"new": "one", "again": "two"}, pipeline.encrypted_environment_variables)
def test_can_add_unencrypted_secure_environment_variables_to_pipeline(self):
pipeline = empty_pipeline()
pipeline.ensure_unencrypted_secure_environment_variables({"new": "one", "again": "two"})
self.assertEquals({"new": "one", "again": "two"}, pipeline.unencrypted_secure_environment_variables)
def test_can_add_environment_variables_to_new_pipeline(self):
pipeline = typical_pipeline()
pipeline.ensure_environment_variables({"new": "one"})
self.assertEquals({"JAVA_HOME": "/opt/java/jdk-1.8", "new": "one"}, pipeline.environment_variables)
def test_can_modify_environment_variables_of_pipeline(self):
pipeline = typical_pipeline()
pipeline.ensure_environment_variables({"new": "one", "JAVA_HOME": "/opt/java/jdk-1.1"})
self.assertEquals({"JAVA_HOME": "/opt/java/jdk-1.1", "new": "one"}, pipeline.environment_variables)
def test_can_remove_all_environment_variables(self):
pipeline = typical_pipeline()
p = pipeline.without_any_environment_variables()
self.assertEquals(p, pipeline)
self.assertEquals({}, pipeline.environment_variables)
def test_can_remove_specific_environment_variable(self):
pipeline = empty_pipeline()
pipeline.ensure_encrypted_environment_variables({'a': 's'})
pipeline.ensure_environment_variables({'c': 'v', 'd': 'f'})
pipeline.remove_environment_variable('d')
p = pipeline.remove_environment_variable('unknown')
self.assertEquals(p, pipeline)
self.assertEquals({'a': 's'}, pipeline.encrypted_environment_variables)
self.assertEquals({'c': 'v'}, pipeline.environment_variables)
def test_environment_variables_get_added_in_sorted_order_to_reduce_config_thrash(self):
go_cd_configurator = GoCdConfigurator(empty_config())
pipeline = go_cd_configurator \
.ensure_pipeline_group('P.Group') \
.ensure_pipeline('P.Name')
pipeline.ensure_environment_variables({"badger": "a", "xray": "a"})
pipeline.ensure_environment_variables({"ant": "a2", "zebra": "a"})
xml = parseString(go_cd_configurator.config)
names = [e.getAttribute('name') for e in xml.getElementsByTagName('variable')]
self.assertEquals([u'ant', u'badger', u'xray', u'zebra'], names)
def test_encrypted_environment_variables_get_added_in_sorted_order_to_reduce_config_thrash(self):
go_cd_configurator = GoCdConfigurator(empty_config())
pipeline = go_cd_configurator \
.ensure_pipeline_group('P.Group') \
.ensure_pipeline('P.Name')
pipeline.ensure_encrypted_environment_variables({"badger": "a", "xray": "a"})
pipeline.ensure_encrypted_environment_variables({"ant": "a2", "zebra": "a"})
xml = parseString(go_cd_configurator.config)
names = [e.getAttribute('name') for e in xml.getElementsByTagName('variable')]
self.assertEquals([u'ant', u'badger', u'xray', u'zebra'], names)
def test_unencrypted_environment_variables_do_not_have_secure_attribute_in_order_to_reduce_config_thrash(self):
go_cd_configurator = GoCdConfigurator(empty_config())
pipeline = go_cd_configurator \
.ensure_pipeline_group('P.Group') \
.ensure_pipeline('P.Name')
pipeline.ensure_environment_variables({"ant": "a"})
xml = parseString(go_cd_configurator.config)
secure_attributes = [e.getAttribute('secure') for e in xml.getElementsByTagName('variable')]
# attributes that are missing are returned as empty
self.assertEquals([''], secure_attributes, "should not have any 'secure' attributes")
def test_cannot_have_environment_variable_which_is_both_secure_and_insecure(self):
go_cd_configurator = GoCdConfigurator(empty_config())
pipeline = go_cd_configurator \
.ensure_pipeline_group('P.Group') \
.ensure_pipeline('P.Name')
pipeline.ensure_unencrypted_secure_environment_variables({"ant": "a"})
pipeline.ensure_environment_variables({"ant": "b"}) # not secure
self.assertEquals({"ant": "b"}, pipeline.environment_variables)
self.assertEquals({}, pipeline.unencrypted_secure_environment_variables)
def test_can_change_environment_variable_from_secure_to_insecure(self):
go_cd_configurator = GoCdConfigurator(empty_config())
pipeline = go_cd_configurator \
.ensure_pipeline_group('P.Group') \
.ensure_pipeline('P.Name')
pipeline.ensure_unencrypted_secure_environment_variables({"ant": "a", "badger": "b"})
pipeline.ensure_environment_variables({"ant": "b"})
self.assertEquals({"ant": "b"}, pipeline.environment_variables)
self.assertEquals({"badger": "b"}, pipeline.unencrypted_secure_environment_variables)
def test_pipelines_have_parameters(self):
pipeline = more_options_pipeline()
self.assertEquals({"environment": "qa"}, pipeline.parameters)
def test_pipelines_have_no_parameters(self):
pipeline = typical_pipeline()
self.assertEquals({}, pipeline.parameters)
def test_can_add_params_to_pipeline(self):
pipeline = typical_pipeline()
p = pipeline.ensure_parameters({"new": "one", "again": "two"})
self.assertEquals(p, pipeline)
self.assertEquals({"new": "one", "again": "two"}, pipeline.parameters)
def test_can_modify_parameters_of_pipeline(self):
pipeline = more_options_pipeline()
pipeline.ensure_parameters({"new": "one", "environment": "qa55"})
self.assertEquals({"environment": "qa55", "new": "one"}, pipeline.parameters)
def test_can_remove_all_parameters(self):
pipeline = more_options_pipeline()
p = pipeline.without_any_parameters()
self.assertEquals(p, pipeline)
self.assertEquals({}, pipeline.parameters)
def test_can_have_timer(self):
pipeline = more_options_pipeline()
self.assertEquals(True, pipeline.has_timer)
self.assertEquals("0 15 22 * * ?", pipeline.timer)
self.assertEquals(False, pipeline.timer_triggers_only_on_changes)
def test_can_have_timer_with_onlyOnChanges_option(self):
pipeline = GoCdConfigurator(config('config-with-more-options-pipeline')).ensure_pipeline_group('P.Group').find_pipeline('pipeline2')
self.assertEquals(True, pipeline.has_timer)
self.assertEquals("0 0 22 ? * MON-FRI", pipeline.timer)
self.assertEquals(True, pipeline.timer_triggers_only_on_changes)
def test_need_not_have_timer(self):
pipeline = GoCdConfigurator(empty_config()).ensure_pipeline_group('Group').ensure_pipeline('Pipeline')
self.assertEquals(False, pipeline.has_timer)
try:
timer = pipeline.timer
self.fail('should have thrown an exception')
except RuntimeError:
pass
def test_can_set_timer(self):
pipeline = GoCdConfigurator(empty_config()).ensure_pipeline_group('Group').ensure_pipeline('Pipeline')
p = pipeline.set_timer("one two three")
self.assertEquals(p, pipeline)
self.assertEquals("one two three", pipeline.timer)
def test_can_set_timer_with_only_on_changes_flag_off(self):
pipeline = GoCdConfigurator(empty_config()).ensure_pipeline_group('Group').ensure_pipeline('Pipeline')
p = pipeline.set_timer("one two three", only_on_changes=False)
self.assertEquals(p, pipeline)
self.assertEquals("one two three", pipeline.timer)
self.assertEquals(False, pipeline.timer_triggers_only_on_changes)
def test_can_set_timer_with_only_on_changes_flag(self):
pipeline = GoCdConfigurator(empty_config()).ensure_pipeline_group('Group').ensure_pipeline('Pipeline')
p = pipeline.set_timer("one two three", only_on_changes=True)
self.assertEquals(p, pipeline)
self.assertEquals("one two three", pipeline.timer)
self.assertEquals(True, pipeline.timer_triggers_only_on_changes)
def test_can_remove_timer(self):
pipeline = GoCdConfigurator(empty_config()).ensure_pipeline_group('Group').ensure_pipeline('Pipeline')
pipeline.set_timer("one two three")
p = pipeline.remove_timer()
self.assertEquals(p, pipeline)
self.assertFalse(pipeline.has_timer)
def test_can_have_label_template(self):
pipeline = typical_pipeline()
self.assertEquals("something-${COUNT}", pipeline.label_template)
self.assertEquals(True, pipeline.has_label_template)
def test_might_not_have_label_template(self):
pipeline = more_options_pipeline() # TODO swap label with typical
self.assertEquals(False, pipeline.has_label_template)
try:
label_template = pipeline.label_template
self.fail('should have thrown an exception')
except RuntimeError:
pass
def test_can_set_label_template(self):
pipeline = GoCdConfigurator(empty_config()).ensure_pipeline_group('Group').ensure_pipeline('Pipeline')
p = pipeline.set_label_template("some label")
self.assertEquals(p, pipeline)
self.assertEquals("some label", pipeline.label_template)
def test_can_set_default_label_template(self):
pipeline = GoCdConfigurator(empty_config()).ensure_pipeline_group('Group').ensure_pipeline('Pipeline')
p = pipeline.set_default_label_template()
self.assertEquals(p, pipeline)
self.assertEquals(DEFAULT_LABEL_TEMPLATE, pipeline.label_template)
def test_can_set_automatic_pipeline_locking(self):
configurator = GoCdConfigurator(empty_config())
pipeline = configurator.ensure_pipeline_group("new_group").ensure_pipeline("some_name")
p = pipeline.set_automatic_pipeline_locking()
self.assertEquals(p, pipeline)
self.assertEquals(True, pipeline.has_automatic_pipeline_locking)
def test_pipelines_to_dict(self):
pipeline = typical_pipeline()
pp_dict = pipeline.to_dict("P.Group")
self.assertEquals('typical', pp_dict['name'])
self.assertEquals({'JAVA_HOME': '/opt/java/jdk-1.8'},
pp_dict['environment_variables'])
self.assertEquals({}, pp_dict['encrypted_environment_variables'])
self.assertEquals({}, pp_dict['parameters'])
self.assertEquals(2, len(pp_dict['stages']))
self.assertEquals(1, len(pp_dict['materials']))
self.assertFalse(pp_dict.has_key('template'))
self.assertTrue(pp_dict['cron_timer_spec'] is None)
self.assertFalse(pp_dict['automatic_pipeline_locking'])
class TestPipelineGroup(unittest.TestCase):
def _pipeline_group_from_config(self):
return GoCdConfigurator(config('config-with-two-pipelines')).ensure_pipeline_group('P.Group')
def test_pipeline_groups_have_names(self):
pipeline_group = standard_pipeline_group()
self.assertEquals("P.Group", pipeline_group.name)
def test_pipeline_groups_have_pipelines(self):
pipeline_group = self._pipeline_group_from_config()
self.assertEquals(2, len(pipeline_group.pipelines))
def test_can_add_pipeline(self):
configurator = GoCdConfigurator(empty_config())
pipeline_group = configurator.ensure_pipeline_group("new_group")
new_pipeline = pipeline_group.ensure_pipeline("some_name")
self.assertEquals(1, len(pipeline_group.pipelines))
self.assertEquals(new_pipeline, pipeline_group.pipelines[0])
self.assertEquals("some_name", new_pipeline.name)
self.assertEquals(False, new_pipeline.has_single_git_material)
self.assertEquals(False, new_pipeline.has_label_template)
self.assertEquals(False, new_pipeline.has_automatic_pipeline_locking)
def test_can_find_pipeline(self):
found_pipeline = self._pipeline_group_from_config().find_pipeline("pipeline2")
self.assertEquals("pipeline2", found_pipeline.name)
self.assertTrue(self._pipeline_group_from_config().has_pipeline("pipeline2"))
def test_does_not_find_missing_pipeline(self):
self.assertFalse(self._pipeline_group_from_config().has_pipeline("unknown-pipeline"))
try:
self._pipeline_group_from_config().find_pipeline("unknown-pipeline")
self.fail("should have thrown exception")
except RuntimeError as e:
self.assertTrue(e.message.count("unknown-pipeline"))
def test_can_remove_pipeline(self):
pipeline_group = self._pipeline_group_from_config()
pipeline_group.ensure_removal_of_pipeline("pipeline1")
self.assertEquals(1, len(pipeline_group.pipelines))
try:
pipeline_group.find_pipeline("pipeline1")
self.fail("should have thrown exception")
except RuntimeError:
pass
def test_ensuring_replacement_of_pipeline_leaves_it_empty_but_in_same_place(self):
pipeline_group = self._pipeline_group_from_config()
self.assertEquals("pipeline1", pipeline_group.pipelines[0].name)
pipeline = pipeline_group.find_pipeline("pipeline1")
pipeline.set_label_template("something")
self.assertEquals(True, pipeline.has_label_template)
p = pipeline_group.ensure_replacement_of_pipeline("pipeline1")
self.assertEquals(p, pipeline_group.pipelines[0])
self.assertEquals("pipeline1", p.name)
self.assertEquals(False, p.has_label_template)
def test_can_ensure_pipeline_removal(self):
pipeline_group = self._pipeline_group_from_config()
pg = pipeline_group.ensure_removal_of_pipeline("already-removed-pipeline")
self.assertEquals(pg, pipeline_group)
self.assertEquals(2, len(pipeline_group.pipelines))
try:
pipeline_group.find_pipeline("already-removed-pipeline")
self.fail("should have thrown exception")
except RuntimeError:
pass
class TestGoCdConfigurator(unittest.TestCase):
def test_can_tell_if_there_is_no_change_to_save(self):
configurator = GoCdConfigurator(config('config-with-two-pipeline-groups'))
p = configurator.ensure_pipeline_group('Second.Group').ensure_replacement_of_pipeline('smoke-tests')
p.set_git_url('[email protected]:springersbm/gomatic.git')
p.ensure_stage('build').ensure_job('compile').ensure_task(ExecTask(['make', 'source code']))
self.assertFalse(configurator.has_changes)
def test_can_tell_if_there_is_a_change_to_save(self):
configurator = GoCdConfigurator(config('config-with-two-pipeline-groups'))
p = configurator.ensure_pipeline_group('Second.Group').ensure_replacement_of_pipeline('smoke-tests')
p.set_git_url('[email protected]:springersbm/gomatic.git')
p.ensure_stage('moo').ensure_job('bar')
self.assertTrue(configurator.has_changes)
def test_keeps_schema_version(self):
empty_config = FakeHostRestClient(empty_config_xml.replace('schemaVersion="72"', 'schemaVersion="73"'), "empty_config()")
configurator = GoCdConfigurator(empty_config)
self.assertEquals(1, configurator.config.count('schemaVersion="73"'))
def test_can_find_out_server_settings(self):
configurator = GoCdConfigurator(config('config-with-server-settings'))
self.assertEquals("/some/dir", configurator.artifacts_dir)
self.assertEquals("http://10.20.30.40/", configurator.site_url)
self.assertEquals("my_ci_server", configurator.agent_auto_register_key)
self.assertEquals(Decimal("55.0"), configurator.purge_start)
self.assertEquals(Decimal("75.0"), configurator.purge_upto)
def test_can_find_out_server_settings_when_not_set(self):
configurator = GoCdConfigurator(config('config-with-no-server-settings'))
self.assertEquals(None, configurator.artifacts_dir)
self.assertEquals(None, configurator.site_url)
self.assertEquals(None, configurator.agent_auto_register_key)
self.assertEquals(None, configurator.purge_start)
self.assertEquals(None, configurator.purge_upto)
def test_can_set_server_settings(self):
configurator = GoCdConfigurator(config('config-with-no-server-settings'))
configurator.artifacts_dir = "/a/dir"
configurator.site_url = "http://1.2.3.4/"
configurator.agent_auto_register_key = "a_ci_server"
configurator.purge_start = Decimal("44.0")
configurator.purge_upto = Decimal("88.0")
self.assertEquals("/a/dir", configurator.artifacts_dir)
self.assertEquals("http://1.2.3.4/", configurator.site_url)
self.assertEquals("a_ci_server", configurator.agent_auto_register_key)
self.assertEquals(Decimal("44.0"), configurator.purge_start)
self.assertEquals(Decimal("88.0"), configurator.purge_upto)
def test_can_have_no_pipeline_groups(self):
self.assertEquals(0, len(GoCdConfigurator(empty_config()).pipeline_groups))
def test_gets_all_pipeline_groups(self):
self.assertEquals(2, len(GoCdConfigurator(config('config-with-two-pipeline-groups')).pipeline_groups))
def test_can_get_initial_config_md5(self):
configurator = GoCdConfigurator(empty_config())
self.assertEquals("42", configurator._initial_md5)
def test_config_is_updated_as_result_of_updating_part_of_it(self):
configurator = GoCdConfigurator(config('config-with-just-agents'))
agent = configurator.agents[0]
self.assertEquals(2, len(agent.resources))
agent.ensure_resource('a-resource-that-it-does-not-already-have')
configurator_based_on_new_config = GoCdConfigurator(FakeHostRestClient(configurator.config))
self.assertEquals(3, len(configurator_based_on_new_config.agents[0].resources))
def test_can_remove_agent(self):
configurator = GoCdConfigurator(config('config-with-just-agents'))
self.assertEquals(2, len(configurator.agents))
configurator.ensure_removal_of_agent('go-agent-1')
self.assertEquals(1, len(configurator.agents))
self.assertEquals('go-agent-2', configurator.agents[0].hostname)
def test_can_add_pipeline_group(self):
configurator = GoCdConfigurator(empty_config())
self.assertEquals(0, len(configurator.pipeline_groups))
new_pipeline_group = configurator.ensure_pipeline_group("a_new_group")
self.assertEquals(1, len(configurator.pipeline_groups))
self.assertEquals(new_pipeline_group, configurator.pipeline_groups[-1])
self.assertEquals("a_new_group", new_pipeline_group.name)
def test_can_ensure_pipeline_group_exists(self):
configurator = GoCdConfigurator(config('config-with-two-pipeline-groups'))
self.assertEquals(2, len(configurator.pipeline_groups))
pre_existing_pipeline_group = configurator.ensure_pipeline_group('Second.Group')
self.assertEquals(2, len(configurator.pipeline_groups))
self.assertEquals('Second.Group', pre_existing_pipeline_group.name)
def test_can_remove_all_pipeline_groups(self):
configurator = GoCdConfigurator(config('config-with-two-pipeline-groups'))
s = configurator.remove_all_pipeline_groups()
self.assertEquals(s, configurator)
self.assertEquals(0, len(configurator.pipeline_groups))
def test_can_remove_pipeline_group(self):
configurator = GoCdConfigurator(config('config-with-two-pipeline-groups'))
s = configurator.ensure_removal_of_pipeline_group('P.Group')
self.assertEquals(s, configurator)
self.assertEquals(1, len(configurator.pipeline_groups))
def test_can_ensure_removal_of_pipeline_group(self):
configurator = GoCdConfigurator(config('config-with-two-pipeline-groups'))
configurator.ensure_removal_of_pipeline_group('pipeline-group-that-has-already-been-removed')
self.assertEquals(2, len(configurator.pipeline_groups))
def test_can_have_templates(self):
templates = GoCdConfigurator(config('config-with-just-templates')).templates
self.assertEquals(2, len(templates))
self.assertEquals('api-component', templates[0].name)
self.assertEquals('deploy-stack', templates[1].name)
self.assertEquals('deploy-components', templates[1].stages[0].name)
def test_can_have_no_templates(self):
self.assertEquals(0, len(GoCdConfigurator(empty_config()).templates))
def test_can_add_template(self):
configurator = GoCdConfigurator(empty_config())
template = configurator.ensure_template('foo')
self.assertEquals(1, len(configurator.templates))
self.assertEquals(template, configurator.templates[0])
self.assertTrue(isinstance(configurator.templates[0], Pipeline), "so all methods that use to configure pipeline don't need to be tested for template")
def test_can_ensure_template(self):
configurator = GoCdConfigurator(config('config-with-just-templates'))
template = configurator.ensure_template('deploy-stack')
self.assertEquals('deploy-components', template.stages[0].name)
def test_can_ensure_replacement_of_template(self):
configurator = GoCdConfigurator(config('config-with-just-templates'))
template = configurator.ensure_replacement_of_template('deploy-stack')
self.assertEquals(0, len(template.stages))
def test_can_remove_template(self):
configurator = GoCdConfigurator(config('config-with-just-templates'))
self.assertEquals(2, len(configurator.templates))
configurator.ensure_removal_of_template('deploy-stack')
self.assertEquals(1, len(configurator.templates))
def test_if_remove_all_templates_also_remove_templates_element(self):
configurator = GoCdConfigurator(config('config-with-just-templates'))
self.assertEquals(2, len(configurator.templates))
configurator.ensure_removal_of_template('api-component')
configurator.ensure_removal_of_template('deploy-stack')
self.assertEquals(0, len(configurator.templates))
xml = configurator.config
root = ET.fromstring(xml)
self.assertEqual(['server'], [element.tag for element in root])
def test_top_level_elements_get_reordered_to_please_go(self):
configurator = GoCdConfigurator(config('config-with-agents-and-templates-but-without-pipelines'))
configurator.ensure_pipeline_group("some_group").ensure_pipeline("some_pipeline")
xml = configurator.config
root = ET.fromstring(xml)
self.assertEquals("pipelines", root[0].tag)
self.assertEquals("templates", root[1].tag)
self.assertEquals("agents", root[2].tag)
def test_top_level_elements_with_environment_get_reordered_to_please_go(self):
configurator = GoCdConfigurator(config('config-with-pipelines-environments-and-agents'))
configurator.ensure_pipeline_group("P.Group").ensure_pipeline("some_pipeline")
xml = configurator.config
root = ET.fromstring(xml)
self.assertEqual(['server', 'pipelines', 'environments', 'agents'], [element.tag for element in root])
def test_top_level_elements_that_cannot_be_created_get_reordered_to_please_go(self):
configurator = GoCdConfigurator(config('config-with-many-of-the-top-level-elements-that-cannot-be-added'))
configurator.ensure_pipeline_group("P.Group").ensure_pipeline("some_pipeline")
xml = configurator.config
root = ET.fromstring(xml)
self.assertEqual(['server', 'repositories', 'scms', 'pipelines', 'environments', 'agents'],
[element.tag for element in root])
def test_elements_can_be_created_in_order_to_please_go(self):
configurator = GoCdConfigurator(empty_config())
pipeline = configurator.ensure_pipeline_group("some_group").ensure_pipeline("some_pipeline")
pipeline.ensure_parameters({'p': 'p'})
pipeline.set_timer("some timer")
pipeline.ensure_environment_variables({'pe': 'pe'})
pipeline.set_git_url("gurl")
stage = pipeline.ensure_stage("s")
stage.ensure_environment_variables({'s': 's'})
job = stage.ensure_job("j")
job.ensure_environment_variables({'j': 'j'})
job.ensure_task(ExecTask(['ls']))
job.ensure_tab(Tab("n", "p"))
job.ensure_resource("r")
job.ensure_artifacts({Artifact.get_build_artifact('s', 'd')})
xml = configurator.config
pipeline_root = ET.fromstring(xml).find('pipelines').find('pipeline')
self.assertEquals("params", pipeline_root[0].tag)
self.assertEquals("timer", pipeline_root[1].tag)
self.assertEquals("environmentvariables", pipeline_root[2].tag)
self.assertEquals("materials", pipeline_root[3].tag)
self.assertEquals("stage", pipeline_root[4].tag)
self.__check_stage(pipeline_root)
def test_elements_are_reordered_in_order_to_please_go(self):
configurator = GoCdConfigurator(empty_config())
pipeline = configurator.ensure_pipeline_group("some_group").ensure_pipeline("some_pipeline")
pipeline.set_git_url("gurl")
pipeline.ensure_environment_variables({'pe': 'pe'})
pipeline.set_timer("some timer")
pipeline.ensure_parameters({'p': 'p'})
self.__configure_stage(pipeline)
self.__configure_stage(configurator.ensure_template('templ'))
xml = configurator.config
pipeline_root = ET.fromstring(xml).find('pipelines').find('pipeline')
self.assertEquals("params", pipeline_root[0].tag)
self.assertEquals("timer", pipeline_root[1].tag)
self.assertEquals("environmentvariables", pipeline_root[2].tag)
self.assertEquals("materials", pipeline_root[3].tag)
self.assertEquals("stage", pipeline_root[4].tag)
self.__check_stage(pipeline_root)
template_root = ET.fromstring(xml).find('templates').find('pipeline')
self.assertEquals("stage", template_root[0].tag)
self.__check_stage(template_root)
def __check_stage(self, pipeline_root):
stage_root = pipeline_root.find('stage')
self.assertEquals("environmentvariables", stage_root[0].tag)
self.assertEquals("jobs", stage_root[1].tag)
job_root = stage_root.find('jobs').find('job')
self.assertEquals("environmentvariables", job_root[0].tag)
self.assertEquals("tasks", job_root[1].tag)
self.assertEquals("tabs", job_root[2].tag)
self.assertEquals("resources", job_root[3].tag)
self.assertEquals("artifacts", job_root[4].tag)
def __configure_stage(self, pipeline):
stage = pipeline.ensure_stage("s")
job = stage.ensure_job("j")
stage.ensure_environment_variables({'s': 's'})
job.ensure_tab(Tab("n", "p"))
job.ensure_artifacts({Artifact.get_build_artifact('s', 'd')})
job.ensure_task(ExecTask(['ls']))
job.ensure_resource("r")
job.ensure_environment_variables({'j': 'j'})
def simplified(s):
return s.strip().replace("\t", "").replace("\n", "").replace("\\", "").replace(" ", "")
def sneakily_converted_to_xml(pipeline):
if pipeline.is_template:
return ET.tostring(pipeline.element)
else:
return ET.tostring(pipeline.parent.element)
class TestReverseEngineering(unittest.TestCase):
def check_round_trip_pipeline(self, configurator, before, show=False):
reverse_engineered_python = configurator.as_python(before, with_save=False)
if show:
print('r' * 88)
print(reverse_engineered_python)
pipeline = "evaluation failed"
template = "evaluation failed"
exec reverse_engineered_python
# exec reverse_engineered_python.replace("from gomatic import *", "from gomatic.go_cd_configurator import *")
xml_before = sneakily_converted_to_xml(before)
# noinspection PyTypeChecker
xml_after = sneakily_converted_to_xml(pipeline)
if show:
print('b' * 88)
print(prettify(xml_before))
print('a' * 88)
print(prettify(xml_after))
self.assertEquals(xml_before, xml_after)
if before.is_based_on_template:
# noinspection PyTypeChecker
self.assertEquals(sneakily_converted_to_xml(before.template), sneakily_converted_to_xml(template))
def test_can_round_trip_simplest_pipeline(self):
configurator = GoCdConfigurator(empty_config())
before = configurator.ensure_pipeline_group("group").ensure_pipeline("line")
self.check_round_trip_pipeline(configurator, before)
def test_can_round_trip_standard_label(self):
configurator = GoCdConfigurator(empty_config())
before = configurator.ensure_pipeline_group("group").ensure_pipeline("line").set_default_label_template()
self.check_round_trip_pipeline(configurator, before)
def test_can_round_trip_non_standard_label(self):
configurator = GoCdConfigurator(empty_config())
before = configurator.ensure_pipeline_group("group").ensure_pipeline("line").set_label_template("non standard")
self.check_round_trip_pipeline(configurator, before)
def test_can_round_trip_automatic_pipeline_locking(self):
configurator = GoCdConfigurator(empty_config())
before = configurator.ensure_pipeline_group("group").ensure_pipeline("line").set_automatic_pipeline_locking()
self.check_round_trip_pipeline(configurator, before)
def test_can_round_trip_pipeline_material(self):
configurator = GoCdConfigurator(empty_config())
before = configurator.ensure_pipeline_group("group").ensure_pipeline("line").ensure_material(PipelineMaterial("p", "s", "m"))
self.check_round_trip_pipeline(configurator, before)
def test_can_round_trip_multiple_git_materials(self):
configurator = GoCdConfigurator(empty_config())
before = configurator.ensure_pipeline_group("group").ensure_pipeline("line")
before.ensure_material(GitMaterial("giturl1", "b", "m1"))
before.ensure_material(GitMaterial("giturl2"))
self.check_round_trip_pipeline(configurator, before)
def test_can_round_trip_git_url(self):
configurator = GoCdConfigurator(empty_config())
before = configurator.ensure_pipeline_group("group").ensure_pipeline("line").set_git_url("some git url")
self.check_round_trip_pipeline(configurator, before)
def test_can_round_trip_git_extras(self):
configurator = GoCdConfigurator(empty_config())
before = configurator.ensure_pipeline_group("group").ensure_pipeline("line").set_git_material(
GitMaterial("some git url",
branch="some branch",
material_name="some material name",
polling=False,
ignore_patterns={"excluded", "things"},
destination_directory='foo/bar'))
self.check_round_trip_pipeline(configurator, before)
def test_can_round_trip_git_branch_only(self):
configurator = GoCdConfigurator(empty_config())
before = configurator.ensure_pipeline_group("group").ensure_pipeline("line").set_git_material(GitMaterial("some git url", branch="some branch"))
self.check_round_trip_pipeline(configurator, before)
def test_can_round_trip_git_material_only(self):
configurator = GoCdConfigurator(empty_config())
before = configurator.ensure_pipeline_group("group").ensure_pipeline("line").set_git_material(GitMaterial("some git url", material_name="m name"))
self.check_round_trip_pipeline(configurator, before)
def test_can_round_trip_git_polling_only(self):
configurator = GoCdConfigurator(empty_config())
before = configurator.ensure_pipeline_group("group").ensure_pipeline("line").set_git_material(GitMaterial("some git url", polling=False))
self.check_round_trip_pipeline(configurator, before)
def test_can_round_trip_git_ignore_patterns_only_ISSUE_4(self):
configurator = GoCdConfigurator(empty_config())
before = configurator.ensure_pipeline_group("group").ensure_pipeline("line").set_git_material(GitMaterial("git url", ignore_patterns={"ex", "cluded"}))
self.check_round_trip_pipeline(configurator, before)
def test_can_round_trip_git_destination_directory_only(self):
configurator = GoCdConfigurator(empty_config())
before = configurator.ensure_pipeline_group("group").ensure_pipeline("line").set_git_material(GitMaterial("git url", destination_directory='foo/bar'))
self.check_round_trip_pipeline(configurator, before)
def test_can_round_trip_pipeline_parameters(self):
configurator = GoCdConfigurator(empty_config())
before = configurator.ensure_pipeline_group("group").ensure_pipeline("line").ensure_parameters({"p": "v"})
self.check_round_trip_pipeline(configurator, before)
def test_can_round_trip_pipeline_environment_variables(self):
configurator = GoCdConfigurator(empty_config())
before = configurator.ensure_pipeline_group("group").ensure_pipeline("line").ensure_environment_variables({"p": "v"})
self.check_round_trip_pipeline(configurator, before)
def test_can_round_trip_pipeline_encrypted_environment_variables(self):
configurator = GoCdConfigurator(empty_config())
before = configurator.ensure_pipeline_group("group").ensure_pipeline("line").ensure_encrypted_environment_variables({"p": "v"})
self.check_round_trip_pipeline(configurator, before)
def test_can_round_trip_pipeline_unencrypted_secure_environment_variables(self):
configurator = GoCdConfigurator(empty_config())
before = configurator.ensure_pipeline_group("group").ensure_pipeline("line").ensure_unencrypted_secure_environment_variables({"p": "v"})
self.check_round_trip_pipeline(configurator, before)
def test_can_round_trip_timer(self):
configurator = GoCdConfigurator(empty_config())
before = configurator.ensure_pipeline_group("group").ensure_pipeline("line").set_timer("some timer")
self.check_round_trip_pipeline(configurator, before)
def test_can_round_trip_timer_only_on_changes(self):
configurator = GoCdConfigurator(empty_config())
before = configurator.ensure_pipeline_group("group").ensure_pipeline("line").set_timer("some timer", only_on_changes=True)
self.check_round_trip_pipeline(configurator, before)
def test_can_round_trip_stage_bits(self):
configurator = GoCdConfigurator(empty_config())
before = configurator.ensure_pipeline_group("group").ensure_pipeline("line")
before.ensure_stage("stage1").ensure_environment_variables({"k": "v"}).set_clean_working_dir().set_has_manual_approval().set_fetch_materials(False)
self.check_round_trip_pipeline(configurator, before)
def test_can_round_trip_stages(self):
configurator = GoCdConfigurator(empty_config())
before = configurator.ensure_pipeline_group("group").ensure_pipeline("line")
before.ensure_stage("stage1")
before.ensure_stage("stage2")
self.check_round_trip_pipeline(configurator, before)
def test_can_round_trip_job(self):
configurator = GoCdConfigurator(empty_config())
before = configurator.ensure_pipeline_group("group").ensure_pipeline("line")
before.ensure_stage("stage").ensure_job("job")
self.check_round_trip_pipeline(configurator, before)
def test_can_round_trip_job_bits(self):
configurator = GoCdConfigurator(empty_config())
before = configurator.ensure_pipeline_group("group").ensure_pipeline("line")
before.ensure_stage("stage").ensure_job("job") \
.ensure_artifacts({Artifact.get_build_artifact("s", "d"), Artifact.get_test_artifact("sauce")}) \
.ensure_environment_variables({"k": "v"}) \
.ensure_resource("r") \
.ensure_tab(Tab("n", "p")) \
.set_timeout("23") \
.set_runs_on_all_agents()
self.check_round_trip_pipeline(configurator, before)
def test_can_round_trip_jobs(self):
configurator = GoCdConfigurator(empty_config())
before = configurator.ensure_pipeline_group("group").ensure_pipeline("line")
stage = before.ensure_stage("stage")
stage.ensure_job("job1")
stage.ensure_job("job2")
self.check_round_trip_pipeline(configurator, before)
def test_can_round_trip_tasks(self):
configurator = GoCdConfigurator(empty_config())
before = configurator.ensure_pipeline_group("group").ensure_pipeline("line")
job = before.ensure_stage("stage").ensure_job("job")
job.add_task(ExecTask(["one", "two"], working_dir="somewhere", runif="failed"))
job.add_task(ExecTask(["one", "two"], working_dir="somewhere", runif="failed"))
job.ensure_task(ExecTask(["one"], working_dir="somewhere else"))
job.ensure_task(ExecTask(["two"], runif="any"))
job.ensure_task(FetchArtifactTask('p', 's', 'j', FetchArtifactFile('f'), runif="any"))
job.ensure_task(FetchArtifactTask('p', 's', 'j', FetchArtifactDir('d')))
job.ensure_task(FetchArtifactTask('p', 's', 'j', FetchArtifactDir('d'), dest="somewhere-else"))
job.ensure_task(FetchArtifactTask('p', 's', 'j', FetchArtifactDir('d'), dest="somewhere-else", runif="any"))
job.ensure_task(RakeTask('t1', runif="any"))
job.ensure_task(RakeTask('t2'))
self.check_round_trip_pipeline(configurator, before)
def test_can_round_trip_pipeline_base_on_template(self):
configurator = GoCdConfigurator(empty_config())
before = configurator.ensure_pipeline_group("group").ensure_pipeline("line").set_template_name("temple")
configurator.ensure_template("temple").ensure_stage("stage").ensure_job("job")
self.check_round_trip_pipeline(configurator, before)
def test_can_reverse_engineer_pipeline(self):
configurator = GoCdConfigurator(config('config-with-more-options-pipeline'))
actual = configurator.as_python(more_options_pipeline(), with_save=False)
expected = """#!/usr/bin/env python
from gomatic import *
configurator = GoCdConfigurator(FakeConfig(whatever))
pipeline = configurator\
.ensure_pipeline_group("P.Group")\
.ensure_replacement_of_pipeline("more-options")\
.set_timer("0 15 22 * * ?")\
.set_git_material(GitMaterial("[email protected]:springersbm/gomatic.git", branch="a-branch", material_name="some-material-name", polling=False))\
.ensure_material(PipelineMaterial("pipeline2", "build")).ensure_environment_variables({'JAVA_HOME': '/opt/java/jdk-1.7'})\
.ensure_parameters({'environment': 'qa'})
stage = pipeline.ensure_stage("earlyStage")
job = stage.ensure_job("earlyWorm").ensure_artifacts(set([BuildArtifact("scripts/*", "files"), BuildArtifact("target/universal/myapp*.zip", "artifacts"), TestArtifact("from", "to")])).set_runs_on_all_agents()
job.add_task(ExecTask(['ls']))
job.add_task(ExecTask(['bash', '-c', 'curl "http://domain.com/service/check?target=one+two+three&key=2714_beta%40domain.com"']))
stage = pipeline.ensure_stage("middleStage")
job = stage.ensure_job("middleJob")
stage = pipeline.ensure_stage("s1").set_fetch_materials(False)
job = stage.ensure_job("rake-job").ensure_artifacts({BuildArtifact("things/*")})
job.add_task(RakeTask("boo", "passed"))
job = stage.ensure_job("run-if-variants")
job.add_task(ExecTask(['t-passed']))
job.add_task(ExecTask(['t-none']))
job.add_task(ExecTask(['t-failed'], runif="failed"))
job.add_task(ExecTask(['t-any'], runif="any"))
job.add_task(ExecTask(['t-both'], runif="any"))
job = stage.ensure_job("variety-of-tasks")
job.add_task(RakeTask("sometarget", "passed"))
job.add_task(FetchArtifactTask("more-options", "earlyStage", "earlyWorm", FetchArtifactDir("sourceDir"), dest="destDir"))
job.add_task(FetchArtifactTask("more-options", "middleStage", "middleJob", FetchArtifactFile("someFile")))
job.add_task(ExecTask(['true']))
"""
self.assertEquals(simplified(expected), simplified(actual))
class TestXmlFormatting(unittest.TestCase):
def test_can_format_simple_xml(self):
expected = '<?xml version="1.0" ?>\n<top>\n\t<middle>stuff</middle>\n</top>'
non_formatted = "<top><middle>stuff</middle></top>"
formatted = prettify(non_formatted)
self.assertEquals(expected, formatted)
def test_can_format_more_complicated_xml(self):
expected = '<?xml version="1.0" ?>\n<top>\n\t<middle>\n\t\t<innermost>stuff</innermost>\n\t</middle>\n</top>'
non_formatted = "<top><middle><innermost>stuff</innermost></middle></top>"
formatted = prettify(non_formatted)
self.assertEquals(expected, formatted)
def test_can_format_actual_config(self):
formatted = prettify(open("test-data/config-unformatted.xml").read())
expected = open("test-data/config-formatted.xml").read()
def head(s):
return "\n".join(s.split('\n')[:10])
self.assertEquals(expected, formatted, "expected=\n%s\n%s\nactual=\n%s" % (head(expected), "=" * 88, head(formatted)))
| [] |
adriangrepo/qreservoir | gui/wellplot/settings/style/wellplotstylehandler.py | 20fba1b1fd1a42add223d9e8af2d267665bec493 | import logging
from qrutilities.imageutils import ImageUtils
from PyQt4.QtGui import QColor
logger = logging.getLogger('console')
class WellPlotStyleHandler(object):
'''
classdocs
'''
def saveDataState(self, wellPlotData, wellPlotStyleWidget):
if wellPlotStyleWidget.plotTitleOnCheckBox.isChecked():
wellPlotData.title_on = True
else:
wellPlotData.title_on = False
wellPlotData.title = wellPlotStyleWidget.plotTitleLineEdit.text()
r,g,b,a = QColor(wellPlotStyleWidget.trackBackgroundColorPushButton.color()).getRgb()
rgbString = ImageUtils.rgbToString(r,g,b)
wellPlotData.plot_background_rgb = rgbString
wellPlotData.plot_background_alpha = wellPlotStyleWidget.trackBackgroundOpacitySpinBox.value()
r,g,b,a = QColor(wellPlotStyleWidget.labelBackgroundColorPushButton.color()).getRgb()
rgbString = ImageUtils.rgbToString(r,g,b)
wellPlotData.label_background_rgb = rgbString
wellPlotData.label_background_alpha = wellPlotStyleWidget.labelBackgroundOpacitySpinBox.value()
r,g,b,a = QColor(wellPlotStyleWidget.labelForegroundColorPushButton.color()).getRgb()
rgbString = ImageUtils.rgbToString(r,g,b)
wellPlotData.label_foreground_rgb = rgbString
wellPlotData.label_foreground_alpha = wellPlotStyleWidget.labelForegroundOpacitySpinBox.value()
if wellPlotStyleWidget.singleRowLabelsCheckBox.isChecked():
wellPlotData.single_row_header_labels = True
else:
wellPlotData.single_row_header_labels = False | [((4, 9, 4, 37), 'logging.getLogger', 'logging.getLogger', ({(4, 27, 4, 36): '"""console"""'}, {}), "('console')", False, 'import logging\n'), ((19, 20, 19, 49), 'qrutilities.imageutils.ImageUtils.rgbToString', 'ImageUtils.rgbToString', ({(19, 43, 19, 44): 'r', (19, 45, 19, 46): 'g', (19, 47, 19, 48): 'b'}, {}), '(r, g, b)', False, 'from qrutilities.imageutils import ImageUtils\n'), ((24, 20, 24, 49), 'qrutilities.imageutils.ImageUtils.rgbToString', 'ImageUtils.rgbToString', ({(24, 43, 24, 44): 'r', (24, 45, 24, 46): 'g', (24, 47, 24, 48): 'b'}, {}), '(r, g, b)', False, 'from qrutilities.imageutils import ImageUtils\n'), ((29, 20, 29, 49), 'qrutilities.imageutils.ImageUtils.rgbToString', 'ImageUtils.rgbToString', ({(29, 43, 29, 44): 'r', (29, 45, 29, 46): 'g', (29, 47, 29, 48): 'b'}, {}), '(r, g, b)', False, 'from qrutilities.imageutils import ImageUtils\n')] |
geoffreynyaga/ANGA-UTM | utm_messages/urls.py | 8371a51ad27c85d2479bb34d8c4e02ea28465941 | from django.conf.urls import url
from . import views
app_name = "messages"
urlpatterns = [
url(r'^$', views.InboxListView.as_view(), name='inbox'),
url(r'^sent/$', views.SentMessagesListView.as_view(), name='sent'),
url(r'^compose/$', views.MessagesCreateView.as_view(), name='compose'),
# url(r'^compose-all/$', views.SendToAll.as_view(), name='compose_to_all'),
url(r'^(?P<pk>\d+)/$', views.MessageDetailView.as_view(), name='message_detail'),
url(r'^calendar/$', views.CalendarView.as_view(), name='calendar'),
]
| [] |
iotile/iotile_cloud | server/apps/datablock/tests/test_create_worker.py | 9dc65ac86d3a730bba42108ed7d9bbb963d22ba6 | import datetime
import json
import dateutil.parser
from django.contrib.auth import get_user_model
from django.test import Client, TestCase
from django.utils import timezone
from apps.devicelocation.models import DeviceLocation
from apps.physicaldevice.models import Device
from apps.property.models import GenericProperty
from apps.report.models import GeneratedUserReport
from apps.sqsworker.exceptions import WorkerActionHardError
from apps.stream.models import StreamId, StreamVariable
from apps.streamdata.models import StreamData
from apps.streamevent.models import StreamEventData
from apps.streamfilter.models import *
from apps.streamnote.models import StreamNote
from apps.utils.data_mask.mask_utils import get_data_mask_event, set_data_mask
from apps.utils.gid.convert import *
from apps.utils.test_util import TestMixin
from ..models import *
from ..worker.archive_device_data import ArchiveDeviceDataAction
user_model = get_user_model()
class DataBlockCreateWorkerTests(TestMixin, TestCase):
def setUp(self):
self.usersTestSetup()
self.orgTestSetup()
self.deviceTemplateTestSetup()
self.v1 = StreamVariable.objects.create_variable(
name='Var A', project=self.p1, created_by=self.u2, lid=1,
)
self.v2 = StreamVariable.objects.create_variable(
name='Var B', project=self.p1, created_by=self.u3, lid=2,
)
self.pd1 = Device.objects.create_device(project=self.p1, label='d1', template=self.dt1, created_by=self.u2)
self.pd2 = Device.objects.create_device(project=self.p1, label='d2', template=self.dt1, created_by=self.u2)
StreamId.objects.create_after_new_device(self.pd1)
StreamId.objects.create_after_new_device(self.pd2)
self.s1 = StreamId.objects.filter(variable=self.v1).first()
self.s2 = StreamId.objects.filter(variable=self.v2).first()
def tearDown(self):
StreamFilterAction.objects.all().delete()
StreamFilterTrigger.objects.all().delete()
StreamFilter.objects.all().delete()
StreamId.objects.all().delete()
StreamVariable.objects.all().delete()
GenericProperty.objects.all().delete()
Device.objects.all().delete()
StreamData.objects.all().delete()
StreamEventData.objects.all().delete()
self.deviceTemplateTestTearDown()
self.orgTestTearDown()
self.userTestTearDown()
def testDataBlockActionBadArguments(self):
with self.assertRaises(WorkerActionHardError):
ArchiveDeviceDataAction.schedule(args={})
with self.assertRaises(WorkerActionHardError):
ArchiveDeviceDataAction.schedule(args={'foobar': 5})
with self.assertRaises(WorkerActionHardError):
ArchiveDeviceDataAction.schedule(args={'data_block_slug': 'b--0000-0000-0000-0001', 'extra-bad-arg': 'foo'})
self.assertTrue(ArchiveDeviceDataAction._arguments_ok({'data_block_slug': 'b--0000-0000-0000-0001'}))
action = ArchiveDeviceDataAction()
self.assertIsNotNone(action)
with self.assertRaises(WorkerActionHardError):
action.execute(arguments={'foobar': 5})
def testDataBlockActionNoDataBlock(self):
action = ArchiveDeviceDataAction()
self.assertIsNotNone(action)
with self.assertRaises(WorkerActionHardError):
action.execute({'data_block_slug': 'b--0000-0000-0000-0001'})
def testDataBlockActionMigrateProperties(self):
db1 = DataBlock.objects.create(org=self.o1, title='test', device=self.pd1, block=1, created_by=self.u1)
GenericProperty.objects.create_int_property(slug=self.pd1.slug,
created_by=self.u1,
name='prop1', value=4)
GenericProperty.objects.create_str_property(slug=self.pd1.slug,
created_by=self.u1,
name='prop2', value='4')
GenericProperty.objects.create_bool_property(slug=self.pd1.slug,
created_by=self.u1, is_system=True,
name='@prop3', value=True)
self.assertEqual(GenericProperty.objects.object_properties_qs(self.pd1).count(), 3)
self.assertEqual(GenericProperty.objects.object_properties_qs(db1).count(), 0)
action = ArchiveDeviceDataAction()
action._block = db1
action._device = self.pd1
action._migrate_properties()
self.assertEqual(GenericProperty.objects.object_properties_qs(self.pd1).count(), 1)
self.assertEqual(GenericProperty.objects.object_properties_qs(db1).count(), 3)
def testDataBlockActionMigrateStreams(self):
device = Device.objects.create_device(project=self.p1, label='d3', template=self.dt1, created_by=self.u2)
block = DataBlock.objects.create(org=self.o1, title='test', device=device, block=1, created_by=self.u1)
stream1 = StreamId.objects.create_stream(
project=self.p1, variable=self.v1, device=device, created_by=self.u2
)
stream2 = StreamId.objects.create_stream(
project=self.p1, variable=self.v2, device=device, created_by=self.u2
)
var3 = StreamVariable.objects.create_variable(
name='Var C', project=self.p1, created_by=self.u2, lid=3,
)
stream3 = StreamId.objects.create_stream(
project=self.p1, variable=var3, device=device, created_by=self.u2
)
self.assertEqual(self.p1.variables.count(), 3)
count0 = StreamId.objects.count()
self.assertEqual(device.streamids.count(), 3)
action = ArchiveDeviceDataAction()
action._block = block
action._device = device
action._clone_streams()
self.assertEqual(StreamId.objects.count(), count0 + 3)
def testDataBlockActionMigrateStreamData(self):
device = Device.objects.create_device(project=self.p1, label='d3', template=self.dt1, created_by=self.u2)
block = DataBlock.objects.create(org=self.o1, title='test', device=device, block=1, created_by=self.u1)
stream1 = StreamId.objects.create_stream(
project=self.p1, variable=self.v1, device=device, created_by=self.u2
)
stream2 = StreamId.objects.create_stream(
project=self.p1, variable=self.v2, device=device, created_by=self.u2
)
StreamData.objects.create(
stream_slug=stream1.slug,
type='ITR',
timestamp=timezone.now(),
streamer_local_id=5,
int_value=5
)
StreamData.objects.create(
stream_slug=stream1.slug,
type='ITR',
timestamp=timezone.now(),
streamer_local_id=6,
int_value=6
)
StreamData.objects.create(
stream_slug=stream2.slug,
type='ITR',
timestamp=timezone.now(),
streamer_local_id=7,
int_value=7
)
StreamData.objects.create(
stream_slug=stream1.slug,
type='ITR',
timestamp=timezone.now(),
streamer_local_id=8,
int_value=8
)
StreamData.objects.create(
stream_slug=stream2.slug,
type='ITR',
timestamp=timezone.now(),
streamer_local_id=9,
int_value=9
)
action = ArchiveDeviceDataAction()
action._block = block
action._device = device
action._clone_streams()
self.assertEqual(StreamData.objects.filter(stream_slug=stream1.slug).count(), 3)
self.assertEqual(StreamData.objects.filter(stream_slug=stream2.slug).count(), 2)
action._migrate_stream_data()
self.assertEqual(StreamData.objects.filter(stream_slug=stream1.slug).count(), 0)
self.assertEqual(StreamData.objects.filter(stream_slug=stream2.slug).count(), 0)
new_stream1 = block.get_stream_slug_for(self.v1.formatted_lid)
self.assertEqual(StreamData.objects.filter(stream_slug=new_stream1).count(), 3)
new_stream2 = block.get_stream_slug_for(self.v2.formatted_lid)
self.assertEqual(StreamData.objects.filter(stream_slug=new_stream2).count(), 2)
self.assertEqual(StreamData.objects.filter(stream_slug=new_stream1).first().project_slug, '')
def testDataBlockActionMigrateStreamEvents(self):
device = Device.objects.create_device(project=self.p1, label='d3', template=self.dt1, created_by=self.u2)
block = DataBlock.objects.create(org=self.o1, title='test', device=device, block=1, created_by=self.u1)
stream1 = StreamId.objects.create_stream(
project=self.p1, variable=self.v1, device=device, created_by=self.u2
)
stream2 = StreamId.objects.create_stream(
project=self.p1, variable=self.v2, device=device, created_by=self.u2
)
StreamEventData.objects.create(
timestamp=timezone.now(),
device_timestamp=10,
stream_slug=stream1.slug,
streamer_local_id=2
)
StreamEventData.objects.create(
timestamp=timezone.now(),
device_timestamp=10,
stream_slug=stream1.slug,
streamer_local_id=3
)
StreamEventData.objects.create(
timestamp=timezone.now(),
device_timestamp=10,
stream_slug=stream2.slug,
streamer_local_id=4
)
action = ArchiveDeviceDataAction()
action._block = block
action._device = device
action._clone_streams()
self.assertEqual(StreamEventData.objects.filter(stream_slug=stream1.slug).count(), 2)
self.assertEqual(StreamEventData.objects.filter(stream_slug=stream2.slug).count(), 1)
action._migrate_stream_events()
self.assertEqual(StreamEventData.objects.filter(stream_slug=stream1.slug).count(), 0)
self.assertEqual(StreamEventData.objects.filter(stream_slug=stream2.slug).count(), 0)
new_stream1 = block.get_stream_slug_for(self.v1.formatted_lid)
self.assertEqual(StreamEventData.objects.filter(stream_slug=new_stream1).count(), 2)
new_stream2 = block.get_stream_slug_for(self.v2.formatted_lid)
self.assertEqual(StreamEventData.objects.filter(stream_slug=new_stream2).count(), 1)
def testDataBlockActionMigrateStreamNote(self):
device = Device.objects.create_device(project=self.p1, label='d3', template=self.dt1, created_by=self.u2)
block = DataBlock.objects.create(org=self.o1, title='test', device=device, block=1, created_by=self.u1)
stream1 = StreamId.objects.create_stream(
project=self.p1, variable=self.v1, device=device, created_by=self.u2
)
StreamId.objects.create_stream(
project=self.p1, variable=self.v2, device=device, created_by=self.u2
)
StreamNote.objects.create(
target_slug=device.slug,
timestamp=timezone.now(),
created_by=self.u2,
note='System 1',
type='sc'
)
StreamNote.objects.create(
target_slug=stream1.slug,
timestamp=timezone.now(),
created_by=self.u2,
note='Note 2'
)
StreamNote.objects.create(
target_slug=stream1.slug,
timestamp=timezone.now(),
created_by=self.u1,
note='Note 3'
)
StreamNote.objects.create(
target_slug=device.slug,
timestamp=timezone.now(),
created_by=self.u2,
note='Note 4'
)
self.assertEqual(StreamNote.objects.count(), 4)
action = ArchiveDeviceDataAction()
action._block = block
action._device = device
action._clone_streams()
self.assertEqual(StreamNote.objects.filter(target_slug=stream1.slug).count(), 2)
self.assertEqual(StreamNote.objects.filter(target_slug=device.slug).count(), 2)
action._migrate_stream_notes()
self.assertEqual(StreamNote.objects.filter(target_slug=stream1.slug).count(), 0)
self.assertEqual(StreamNote.objects.filter(target_slug=device.slug).count(), 1)
new_stream1 = block.get_stream_slug_for(self.v1.formatted_lid)
self.assertEqual(StreamNote.objects.count(), 4)
self.assertEqual(StreamNote.objects.filter(target_slug=new_stream1).count(), 2)
self.assertEqual(StreamNote.objects.filter(target_slug=block.slug).count(), 1)
def testDataBlockActionMigrateDeviceLocations(self):
device = Device.objects.create_device(project=self.p1, label='d3', template=self.dt1, created_by=self.u2)
block = DataBlock.objects.create(org=self.o1, title='test', device=device, block=1, created_by=self.u1)
DeviceLocation.objects.create(
timestamp=timezone.now(),
target_slug=device.slug,
lat=12.1234, lon=10.000,
user=self.u2
)
DeviceLocation.objects.create(
timestamp=timezone.now(),
target_slug=device.slug,
lat=12.1234, lon=11.000,
user=self.u2
)
DeviceLocation.objects.create(
timestamp=timezone.now(),
target_slug=device.slug,
lat=12.1234, lon=12.000,
user=self.u2
)
self.assertEqual(DeviceLocation.objects.count(), 3)
action = ArchiveDeviceDataAction()
action._block = block
action._device = device
self.assertEqual(DeviceLocation.objects.filter(target_slug=device.slug).count(), 3)
action._migrate_device_locations()
self.assertEqual(DeviceLocation.objects.filter(target_slug=device.slug).count(), 0)
self.assertEqual(DeviceLocation.objects.filter(target_slug=block.slug).count(), 3)
def testDataBlockActionMigrateReports(self):
db1 = DataBlock.objects.create(org=self.pd1.org, title='test', device=self.pd1, block=1, created_by=self.u2)
GeneratedUserReport.objects.create(
org=self.pd1.org,
label='My report 1',
source_ref=self.pd1.slug,
created_by=self.u2
)
GeneratedUserReport.objects.create(
org=self.pd1.org,
label='My report 2',
source_ref=self.pd1.slug,
created_by=self.u2
)
self.assertEqual(GeneratedUserReport.objects.filter(source_ref=self.pd1.slug).count(), 2)
self.assertEqual(GeneratedUserReport.objects.filter(source_ref=db1.slug).count(), 0)
action = ArchiveDeviceDataAction()
action._block = db1
action._device = self.pd1
action._migrate_reports()
self.assertEqual(GeneratedUserReport.objects.filter(source_ref=self.pd1.slug).count(), 0)
self.assertEqual(GeneratedUserReport.objects.filter(source_ref=db1.slug).count(), 2)
def testDataBlockActionTestAll(self):
sg = SensorGraph.objects.create(name='SG 1',
major_version=1,
created_by=self.u1, org=self.o1)
device = Device.objects.create_device(project=self.p1, label='d3', template=self.dt1, sg=sg, created_by=self.u2)
block = DataBlock.objects.create(org=self.o1, title='test', device=device, block=1, created_by=self.u1)
stream1 = StreamId.objects.create_stream(
project=self.p1, variable=self.v1, device=device, created_by=self.u2
)
stream2 = StreamId.objects.create_stream(
project=self.p1, variable=self.v2, device=device, created_by=self.u2
)
GenericProperty.objects.create_int_property(slug=device.slug,
created_by=self.u1,
name='prop1', value=4)
GenericProperty.objects.create_str_property(slug=device.slug,
created_by=self.u1,
name='prop2', value='4')
GenericProperty.objects.create_bool_property(slug=device.slug,
created_by=self.u1,
name='prop3', value=True)
StreamEventData.objects.create(
timestamp=timezone.now(),
device_timestamp=10,
stream_slug=stream1.slug,
streamer_local_id=2
)
StreamEventData.objects.create(
timestamp=timezone.now(),
device_timestamp=10,
stream_slug=stream1.slug,
streamer_local_id=3
)
StreamEventData.objects.create(
timestamp=timezone.now(),
device_timestamp=10,
stream_slug=stream2.slug,
streamer_local_id=4
)
StreamData.objects.create(
stream_slug=stream1.slug,
type='ITR',
timestamp=timezone.now(),
streamer_local_id=5,
int_value=5
)
StreamData.objects.create(
stream_slug=stream1.slug,
type='ITR',
timestamp=timezone.now(),
streamer_local_id=6,
int_value=6
)
StreamData.objects.create(
stream_slug=stream2.slug,
type='ITR',
timestamp=timezone.now(),
streamer_local_id=7,
int_value=7
)
StreamData.objects.create(
stream_slug=stream1.slug,
type='ITR',
timestamp=timezone.now(),
streamer_local_id=8,
int_value=8
)
StreamData.objects.create(
stream_slug=stream2.slug,
type='ITR',
timestamp=timezone.now(),
streamer_local_id=9,
int_value=9
)
StreamNote.objects.create(
target_slug=stream1.slug,
timestamp=timezone.now(),
created_by=self.u2,
note='Note 1'
)
StreamNote.objects.create(
target_slug=stream1.slug,
timestamp=timezone.now(),
created_by=self.u2,
note='Note 2'
)
StreamNote.objects.create(
target_slug=stream1.slug,
timestamp=timezone.now(),
created_by=self.u2,
note='Note 3'
)
StreamNote.objects.create(
target_slug=device.slug,
timestamp=timezone.now(),
created_by=self.u1,
note='Note 4'
)
self.assertEqual(GenericProperty.objects.object_properties_qs(device).count(), 3)
self.assertEqual(GenericProperty.objects.object_properties_qs(block).count(), 0)
self.assertEqual(device.streamids.count(), 2)
self.assertEqual(StreamData.objects.filter(stream_slug=stream1.slug).count(), 3)
self.assertEqual(StreamData.objects.filter(stream_slug=stream2.slug).count(), 2)
self.assertEqual(StreamEventData.objects.filter(stream_slug=stream1.slug).count(), 2)
self.assertEqual(StreamEventData.objects.filter(stream_slug=stream2.slug).count(), 1)
self.assertEqual(StreamNote.objects.filter(target_slug=stream1.slug).count(), 3)
self.assertEqual(StreamNote.objects.filter(target_slug=device.slug).count(), 1)
action = ArchiveDeviceDataAction()
action._block = block
action._device = device
action.execute(arguments={'data_block_slug': block.slug})
self.assertEqual(GenericProperty.objects.object_properties_qs(device).count(), 0)
self.assertEqual(GenericProperty.objects.object_properties_qs(block).count(), 3)
self.assertEqual(device.streamids.count(), 4)
self.assertEqual(StreamData.objects.filter(stream_slug=stream1.slug).count(), 0)
self.assertEqual(StreamData.objects.filter(stream_slug=stream2.slug).count(), 0)
self.assertEqual(StreamEventData.objects.filter(stream_slug=stream1.slug).count(), 0)
self.assertEqual(StreamEventData.objects.filter(stream_slug=stream2.slug).count(), 0)
self.assertEqual(StreamNote.objects.filter(target_slug=stream1.slug).count(), 0)
self.assertEqual(StreamNote.objects.filter(target_slug=device.slug).count(), 1)
new_stream1 = block.get_stream_slug_for(self.v1.formatted_lid)
self.assertEqual(StreamId.objects.filter(slug=new_stream1).count(), 1)
new_stream2 = block.get_stream_slug_for(self.v2.formatted_lid)
self.assertEqual(StreamId.objects.filter(slug=new_stream2).count(), 1)
self.assertEqual(StreamData.objects.filter(stream_slug=new_stream1).count(), 3)
self.assertEqual(StreamEventData.objects.filter(stream_slug=new_stream1).count(), 2)
self.assertEqual(StreamNote.objects.filter(target_slug=new_stream1).count(), 3)
self.assertEqual(StreamData.objects.filter(stream_slug=new_stream2).count(), 2)
self.assertEqual(StreamEventData.objects.filter(stream_slug=new_stream2).count(), 1)
block = DataBlock.objects.first()
self.assertIsNotNone(block.completed_on)
self.assertIsNotNone(block.sg)
self.assertEqual(block.sg, sg)
def testDataBlockActionTestDataMask(self):
sg = SensorGraph.objects.create(name='SG 1',
major_version=1,
created_by=self.u1, org=self.o1)
device = Device.objects.create_device(project=self.p1, label='d3', template=self.dt1, sg=sg, created_by=self.u2)
block = DataBlock.objects.create(org=self.o1, title='test', device=device, block=1, created_by=self.u1)
stream1 = StreamId.objects.create_stream(
project=self.p1, variable=self.v1, device=device, created_by=self.u2
)
dt1 = dateutil.parser.parse('2017-09-28T10:00:00Z')
dt2 = dateutil.parser.parse('2017-09-28T11:00:00Z')
dt3 = dateutil.parser.parse('2017-09-30T10:00:00Z')
dt4 = dateutil.parser.parse('2017-09-30T10:10:00Z')
dt5 = dateutil.parser.parse('2017-09-30T10:20:00Z')
set_data_mask(device, '2017-09-28T10:30:00Z', '2017-09-30T10:15:00Z', [], [], self.u1)
StreamData.objects.create(
stream_slug=stream1.slug,
type='Num',
timestamp=dt1,
int_value=5
)
StreamData.objects.create(
stream_slug=stream1.slug,
type='Num',
timestamp=dt2,
int_value=6
)
StreamData.objects.create(
stream_slug=stream1.slug,
type='Num',
timestamp=dt3,
int_value=7
)
StreamData.objects.create(
stream_slug=stream1.slug,
type='Num',
timestamp=dt4,
int_value=8
)
StreamData.objects.create(
stream_slug=stream1.slug,
type='Num',
timestamp=dt5,
int_value=9
)
self.assertEqual(device.streamids.count(), 1)
data_mask_event = get_data_mask_event(device)
mask_slug = data_mask_event.stream_slug
self.assertEqual(StreamData.objects.filter(stream_slug=stream1.slug).count(), 5)
self.assertEqual(StreamEventData.objects.filter(stream_slug=mask_slug).count(), 1)
action = ArchiveDeviceDataAction()
action._block = block
action._device = device
action.execute(arguments={'data_block_slug': block.slug})
self.assertEqual(device.streamids.count(), 2)
self.assertEqual(StreamData.objects.filter(stream_slug=stream1.slug).count(), 0)
self.assertEqual(StreamEventData.objects.filter(stream_slug=mask_slug).count(), 0)
data_mask_event = get_data_mask_event(block)
self.assertEqual(StreamEventData.objects.filter(stream_slug=data_mask_event.stream_slug).count(), 1)
| [((27, 13, 27, 29), 'django.contrib.auth.get_user_model', 'get_user_model', ({}, {}), '()', False, 'from django.contrib.auth import get_user_model\n'), ((36, 18, 38, 9), 'apps.stream.models.StreamVariable.objects.create_variable', 'StreamVariable.objects.create_variable', (), '', False, 'from apps.stream.models import StreamId, StreamVariable\n'), ((39, 18, 41, 9), 'apps.stream.models.StreamVariable.objects.create_variable', 'StreamVariable.objects.create_variable', (), '', False, 'from apps.stream.models import StreamId, StreamVariable\n'), ((42, 19, 42, 115), 'apps.physicaldevice.models.Device.objects.create_device', 'Device.objects.create_device', (), '', False, 'from apps.physicaldevice.models import Device\n'), ((43, 19, 43, 115), 'apps.physicaldevice.models.Device.objects.create_device', 'Device.objects.create_device', (), '', False, 'from apps.physicaldevice.models import Device\n'), ((44, 8, 44, 58), 'apps.stream.models.StreamId.objects.create_after_new_device', 'StreamId.objects.create_after_new_device', ({(44, 49, 44, 57): 'self.pd1'}, {}), '(self.pd1)', False, 'from apps.stream.models import StreamId, StreamVariable\n'), ((45, 8, 45, 58), 'apps.stream.models.StreamId.objects.create_after_new_device', 'StreamId.objects.create_after_new_device', ({(45, 49, 45, 57): 'self.pd2'}, {}), '(self.pd2)', False, 'from apps.stream.models import StreamId, StreamVariable\n'), ((89, 8, 91, 74), 'apps.property.models.GenericProperty.objects.create_int_property', 'GenericProperty.objects.create_int_property', (), '', False, 'from apps.property.models import GenericProperty\n'), ((92, 8, 94, 76), 'apps.property.models.GenericProperty.objects.create_str_property', 'GenericProperty.objects.create_str_property', (), '', False, 'from apps.property.models import GenericProperty\n'), ((95, 8, 97, 79), 'apps.property.models.GenericProperty.objects.create_bool_property', 'GenericProperty.objects.create_bool_property', (), '', False, 'from apps.property.models import GenericProperty\n'), ((110, 17, 110, 113), 'apps.physicaldevice.models.Device.objects.create_device', 'Device.objects.create_device', (), '', False, 'from apps.physicaldevice.models import Device\n'), ((112, 18, 114, 9), 'apps.stream.models.StreamId.objects.create_stream', 'StreamId.objects.create_stream', (), '', False, 'from apps.stream.models import StreamId, StreamVariable\n'), ((115, 18, 117, 9), 'apps.stream.models.StreamId.objects.create_stream', 'StreamId.objects.create_stream', (), '', False, 'from apps.stream.models import StreamId, StreamVariable\n'), ((118, 15, 120, 9), 'apps.stream.models.StreamVariable.objects.create_variable', 'StreamVariable.objects.create_variable', (), '', False, 'from apps.stream.models import StreamId, StreamVariable\n'), ((121, 18, 123, 9), 'apps.stream.models.StreamId.objects.create_stream', 'StreamId.objects.create_stream', (), '', False, 'from apps.stream.models import StreamId, StreamVariable\n'), ((125, 17, 125, 41), 'apps.stream.models.StreamId.objects.count', 'StreamId.objects.count', ({}, {}), '()', False, 'from apps.stream.models import StreamId, StreamVariable\n'), ((136, 17, 136, 113), 'apps.physicaldevice.models.Device.objects.create_device', 'Device.objects.create_device', (), '', False, 'from apps.physicaldevice.models import Device\n'), ((138, 18, 140, 9), 'apps.stream.models.StreamId.objects.create_stream', 'StreamId.objects.create_stream', (), '', False, 'from apps.stream.models import StreamId, StreamVariable\n'), ((141, 18, 143, 9), 'apps.stream.models.StreamId.objects.create_stream', 'StreamId.objects.create_stream', (), '', False, 'from apps.stream.models import StreamId, StreamVariable\n'), ((203, 17, 203, 113), 'apps.physicaldevice.models.Device.objects.create_device', 'Device.objects.create_device', (), '', False, 'from apps.physicaldevice.models import Device\n'), ((205, 18, 207, 9), 'apps.stream.models.StreamId.objects.create_stream', 'StreamId.objects.create_stream', (), '', False, 'from apps.stream.models import StreamId, StreamVariable\n'), ((208, 18, 210, 9), 'apps.stream.models.StreamId.objects.create_stream', 'StreamId.objects.create_stream', (), '', False, 'from apps.stream.models import StreamId, StreamVariable\n'), ((251, 17, 251, 113), 'apps.physicaldevice.models.Device.objects.create_device', 'Device.objects.create_device', (), '', False, 'from apps.physicaldevice.models import Device\n'), ((253, 18, 255, 9), 'apps.stream.models.StreamId.objects.create_stream', 'StreamId.objects.create_stream', (), '', False, 'from apps.stream.models import StreamId, StreamVariable\n'), ((256, 8, 258, 9), 'apps.stream.models.StreamId.objects.create_stream', 'StreamId.objects.create_stream', (), '', False, 'from apps.stream.models import StreamId, StreamVariable\n'), ((307, 17, 307, 113), 'apps.physicaldevice.models.Device.objects.create_device', 'Device.objects.create_device', (), '', False, 'from apps.physicaldevice.models import Device\n'), ((346, 8, 351, 9), 'apps.report.models.GeneratedUserReport.objects.create', 'GeneratedUserReport.objects.create', (), '', False, 'from apps.report.models import GeneratedUserReport\n'), ((352, 8, 357, 9), 'apps.report.models.GeneratedUserReport.objects.create', 'GeneratedUserReport.objects.create', (), '', False, 'from apps.report.models import GeneratedUserReport\n'), ((373, 17, 373, 120), 'apps.physicaldevice.models.Device.objects.create_device', 'Device.objects.create_device', (), '', False, 'from apps.physicaldevice.models import Device\n'), ((375, 18, 377, 9), 'apps.stream.models.StreamId.objects.create_stream', 'StreamId.objects.create_stream', (), '', False, 'from apps.stream.models import StreamId, StreamVariable\n'), ((378, 18, 380, 9), 'apps.stream.models.StreamId.objects.create_stream', 'StreamId.objects.create_stream', (), '', False, 'from apps.stream.models import StreamId, StreamVariable\n'), ((382, 8, 384, 74), 'apps.property.models.GenericProperty.objects.create_int_property', 'GenericProperty.objects.create_int_property', (), '', False, 'from apps.property.models import GenericProperty\n'), ((385, 8, 387, 76), 'apps.property.models.GenericProperty.objects.create_str_property', 'GenericProperty.objects.create_str_property', (), '', False, 'from apps.property.models import GenericProperty\n'), ((388, 8, 390, 78), 'apps.property.models.GenericProperty.objects.create_bool_property', 'GenericProperty.objects.create_bool_property', (), '', False, 'from apps.property.models import GenericProperty\n'), ((518, 17, 518, 120), 'apps.physicaldevice.models.Device.objects.create_device', 'Device.objects.create_device', (), '', False, 'from apps.physicaldevice.models import Device\n'), ((520, 18, 522, 9), 'apps.stream.models.StreamId.objects.create_stream', 'StreamId.objects.create_stream', (), '', False, 'from apps.stream.models import StreamId, StreamVariable\n'), ((530, 8, 530, 94), 'apps.utils.data_mask.mask_utils.set_data_mask', 'set_data_mask', ({(530, 22, 530, 28): 'device', (530, 30, 530, 52): '"""2017-09-28T10:30:00Z"""', (530, 54, 530, 76): '"""2017-09-30T10:15:00Z"""', (530, 78, 530, 80): '[]', (530, 82, 530, 84): '[]', (530, 86, 530, 93): 'self.u1'}, {}), "(device, '2017-09-28T10:30:00Z', '2017-09-30T10:15:00Z', [], [\n ], self.u1)", False, 'from apps.utils.data_mask.mask_utils import get_data_mask_event, set_data_mask\n'), ((532, 8, 537, 9), 'apps.streamdata.models.StreamData.objects.create', 'StreamData.objects.create', (), '', False, 'from apps.streamdata.models import StreamData\n'), ((538, 8, 543, 9), 'apps.streamdata.models.StreamData.objects.create', 'StreamData.objects.create', (), '', False, 'from apps.streamdata.models import StreamData\n'), ((544, 8, 549, 9), 'apps.streamdata.models.StreamData.objects.create', 'StreamData.objects.create', (), '', False, 'from apps.streamdata.models import StreamData\n'), ((550, 8, 555, 9), 'apps.streamdata.models.StreamData.objects.create', 'StreamData.objects.create', (), '', False, 'from apps.streamdata.models import StreamData\n'), ((556, 8, 561, 9), 'apps.streamdata.models.StreamData.objects.create', 'StreamData.objects.create', (), '', False, 'from apps.streamdata.models import StreamData\n'), ((565, 26, 565, 53), 'apps.utils.data_mask.mask_utils.get_data_mask_event', 'get_data_mask_event', ({(565, 46, 565, 52): 'device'}, {}), '(device)', False, 'from apps.utils.data_mask.mask_utils import get_data_mask_event, set_data_mask\n'), ((580, 26, 580, 52), 'apps.utils.data_mask.mask_utils.get_data_mask_event', 'get_data_mask_event', ({(580, 46, 580, 51): 'block'}, {}), '(block)', False, 'from apps.utils.data_mask.mask_utils import get_data_mask_event, set_data_mask\n'), ((132, 25, 132, 49), 'apps.stream.models.StreamId.objects.count', 'StreamId.objects.count', ({}, {}), '()', False, 'from apps.stream.models import StreamId, StreamVariable\n'), ((285, 25, 285, 51), 'apps.streamnote.models.StreamNote.objects.count', 'StreamNote.objects.count', ({}, {}), '()', False, 'from apps.streamnote.models import StreamNote\n'), ((301, 25, 301, 51), 'apps.streamnote.models.StreamNote.objects.count', 'StreamNote.objects.count', ({}, {}), '()', False, 'from apps.streamnote.models import StreamNote\n'), ((329, 25, 329, 55), 'apps.devicelocation.models.DeviceLocation.objects.count', 'DeviceLocation.objects.count', ({}, {}), '()', False, 'from apps.devicelocation.models import DeviceLocation\n'), ((46, 18, 46, 59), 'apps.stream.models.StreamId.objects.filter', 'StreamId.objects.filter', (), '', False, 'from apps.stream.models import StreamId, StreamVariable\n'), ((47, 18, 47, 59), 'apps.stream.models.StreamId.objects.filter', 'StreamId.objects.filter', (), '', False, 'from apps.stream.models import StreamId, StreamVariable\n'), ((53, 8, 53, 30), 'apps.stream.models.StreamId.objects.all', 'StreamId.objects.all', ({}, {}), '()', False, 'from apps.stream.models import StreamId, StreamVariable\n'), ((54, 8, 54, 36), 'apps.stream.models.StreamVariable.objects.all', 'StreamVariable.objects.all', ({}, {}), '()', False, 'from apps.stream.models import StreamId, StreamVariable\n'), ((55, 8, 55, 37), 'apps.property.models.GenericProperty.objects.all', 'GenericProperty.objects.all', ({}, {}), '()', False, 'from apps.property.models import GenericProperty\n'), ((56, 8, 56, 28), 'apps.physicaldevice.models.Device.objects.all', 'Device.objects.all', ({}, {}), '()', False, 'from apps.physicaldevice.models import Device\n'), ((57, 8, 57, 32), 'apps.streamdata.models.StreamData.objects.all', 'StreamData.objects.all', ({}, {}), '()', False, 'from apps.streamdata.models import StreamData\n'), ((58, 8, 58, 37), 'apps.streamevent.models.StreamEventData.objects.all', 'StreamEventData.objects.all', ({}, {}), '()', False, 'from apps.streamevent.models import StreamEventData\n'), ((148, 22, 148, 36), 'django.utils.timezone.now', 'timezone.now', ({}, {}), '()', False, 'from django.utils import timezone\n'), ((155, 22, 155, 36), 'django.utils.timezone.now', 'timezone.now', ({}, {}), '()', False, 'from django.utils import timezone\n'), ((162, 22, 162, 36), 'django.utils.timezone.now', 'timezone.now', ({}, {}), '()', False, 'from django.utils import timezone\n'), ((169, 22, 169, 36), 'django.utils.timezone.now', 'timezone.now', ({}, {}), '()', False, 'from django.utils import timezone\n'), ((176, 22, 176, 36), 'django.utils.timezone.now', 'timezone.now', ({}, {}), '()', False, 'from django.utils import timezone\n'), ((213, 22, 213, 36), 'django.utils.timezone.now', 'timezone.now', ({}, {}), '()', False, 'from django.utils import timezone\n'), ((219, 22, 219, 36), 'django.utils.timezone.now', 'timezone.now', ({}, {}), '()', False, 'from django.utils import timezone\n'), ((225, 22, 225, 36), 'django.utils.timezone.now', 'timezone.now', ({}, {}), '()', False, 'from django.utils import timezone\n'), ((262, 22, 262, 36), 'django.utils.timezone.now', 'timezone.now', ({}, {}), '()', False, 'from django.utils import timezone\n'), ((269, 22, 269, 36), 'django.utils.timezone.now', 'timezone.now', ({}, {}), '()', False, 'from django.utils import timezone\n'), ((275, 22, 275, 36), 'django.utils.timezone.now', 'timezone.now', ({}, {}), '()', False, 'from django.utils import timezone\n'), ((281, 22, 281, 36), 'django.utils.timezone.now', 'timezone.now', ({}, {}), '()', False, 'from django.utils import timezone\n'), ((311, 22, 311, 36), 'django.utils.timezone.now', 'timezone.now', ({}, {}), '()', False, 'from django.utils import timezone\n'), ((317, 22, 317, 36), 'django.utils.timezone.now', 'timezone.now', ({}, {}), '()', False, 'from django.utils import timezone\n'), ((323, 22, 323, 36), 'django.utils.timezone.now', 'timezone.now', ({}, {}), '()', False, 'from django.utils import timezone\n'), ((392, 22, 392, 36), 'django.utils.timezone.now', 'timezone.now', ({}, {}), '()', False, 'from django.utils import timezone\n'), ((398, 22, 398, 36), 'django.utils.timezone.now', 'timezone.now', ({}, {}), '()', False, 'from django.utils import timezone\n'), ((404, 22, 404, 36), 'django.utils.timezone.now', 'timezone.now', ({}, {}), '()', False, 'from django.utils import timezone\n'), ((412, 22, 412, 36), 'django.utils.timezone.now', 'timezone.now', ({}, {}), '()', False, 'from django.utils import timezone\n'), ((419, 22, 419, 36), 'django.utils.timezone.now', 'timezone.now', ({}, {}), '()', False, 'from django.utils import timezone\n'), ((426, 22, 426, 36), 'django.utils.timezone.now', 'timezone.now', ({}, {}), '()', False, 'from django.utils import timezone\n'), ((433, 22, 433, 36), 'django.utils.timezone.now', 'timezone.now', ({}, {}), '()', False, 'from django.utils import timezone\n'), ((440, 22, 440, 36), 'django.utils.timezone.now', 'timezone.now', ({}, {}), '()', False, 'from django.utils import timezone\n'), ((446, 22, 446, 36), 'django.utils.timezone.now', 'timezone.now', ({}, {}), '()', False, 'from django.utils import timezone\n'), ((452, 22, 452, 36), 'django.utils.timezone.now', 'timezone.now', ({}, {}), '()', False, 'from django.utils import timezone\n'), ((458, 22, 458, 36), 'django.utils.timezone.now', 'timezone.now', ({}, {}), '()', False, 'from django.utils import timezone\n'), ((464, 22, 464, 36), 'django.utils.timezone.now', 'timezone.now', ({}, {}), '()', False, 'from django.utils import timezone\n'), ((98, 25, 98, 79), 'apps.property.models.GenericProperty.objects.object_properties_qs', 'GenericProperty.objects.object_properties_qs', ({(98, 70, 98, 78): 'self.pd1'}, {}), '(self.pd1)', False, 'from apps.property.models import GenericProperty\n'), ((99, 25, 99, 74), 'apps.property.models.GenericProperty.objects.object_properties_qs', 'GenericProperty.objects.object_properties_qs', ({(99, 70, 99, 73): 'db1'}, {}), '(db1)', False, 'from apps.property.models import GenericProperty\n'), ((105, 25, 105, 79), 'apps.property.models.GenericProperty.objects.object_properties_qs', 'GenericProperty.objects.object_properties_qs', ({(105, 70, 105, 78): 'self.pd1'}, {}), '(self.pd1)', False, 'from apps.property.models import GenericProperty\n'), ((106, 25, 106, 74), 'apps.property.models.GenericProperty.objects.object_properties_qs', 'GenericProperty.objects.object_properties_qs', ({(106, 70, 106, 73): 'db1'}, {}), '(db1)', False, 'from apps.property.models import GenericProperty\n'), ((186, 25, 186, 76), 'apps.streamdata.models.StreamData.objects.filter', 'StreamData.objects.filter', (), '', False, 'from apps.streamdata.models import StreamData\n'), ((187, 25, 187, 76), 'apps.streamdata.models.StreamData.objects.filter', 'StreamData.objects.filter', (), '', False, 'from apps.streamdata.models import StreamData\n'), ((191, 25, 191, 76), 'apps.streamdata.models.StreamData.objects.filter', 'StreamData.objects.filter', (), '', False, 'from apps.streamdata.models import StreamData\n'), ((192, 25, 192, 76), 'apps.streamdata.models.StreamData.objects.filter', 'StreamData.objects.filter', (), '', False, 'from apps.streamdata.models import StreamData\n'), ((195, 25, 195, 75), 'apps.streamdata.models.StreamData.objects.filter', 'StreamData.objects.filter', (), '', False, 'from apps.streamdata.models import StreamData\n'), ((197, 25, 197, 75), 'apps.streamdata.models.StreamData.objects.filter', 'StreamData.objects.filter', (), '', False, 'from apps.streamdata.models import StreamData\n'), ((236, 25, 236, 81), 'apps.streamevent.models.StreamEventData.objects.filter', 'StreamEventData.objects.filter', (), '', False, 'from apps.streamevent.models import StreamEventData\n'), ((237, 25, 237, 81), 'apps.streamevent.models.StreamEventData.objects.filter', 'StreamEventData.objects.filter', (), '', False, 'from apps.streamevent.models import StreamEventData\n'), ((241, 25, 241, 81), 'apps.streamevent.models.StreamEventData.objects.filter', 'StreamEventData.objects.filter', (), '', False, 'from apps.streamevent.models import StreamEventData\n'), ((242, 25, 242, 81), 'apps.streamevent.models.StreamEventData.objects.filter', 'StreamEventData.objects.filter', (), '', False, 'from apps.streamevent.models import StreamEventData\n'), ((245, 25, 245, 80), 'apps.streamevent.models.StreamEventData.objects.filter', 'StreamEventData.objects.filter', (), '', False, 'from apps.streamevent.models import StreamEventData\n'), ((247, 25, 247, 80), 'apps.streamevent.models.StreamEventData.objects.filter', 'StreamEventData.objects.filter', (), '', False, 'from apps.streamevent.models import StreamEventData\n'), ((292, 25, 292, 76), 'apps.streamnote.models.StreamNote.objects.filter', 'StreamNote.objects.filter', (), '', False, 'from apps.streamnote.models import StreamNote\n'), ((293, 25, 293, 75), 'apps.streamnote.models.StreamNote.objects.filter', 'StreamNote.objects.filter', (), '', False, 'from apps.streamnote.models import StreamNote\n'), ((297, 25, 297, 76), 'apps.streamnote.models.StreamNote.objects.filter', 'StreamNote.objects.filter', (), '', False, 'from apps.streamnote.models import StreamNote\n'), ((298, 25, 298, 75), 'apps.streamnote.models.StreamNote.objects.filter', 'StreamNote.objects.filter', (), '', False, 'from apps.streamnote.models import StreamNote\n'), ((302, 25, 302, 75), 'apps.streamnote.models.StreamNote.objects.filter', 'StreamNote.objects.filter', (), '', False, 'from apps.streamnote.models import StreamNote\n'), ((303, 25, 303, 74), 'apps.streamnote.models.StreamNote.objects.filter', 'StreamNote.objects.filter', (), '', False, 'from apps.streamnote.models import StreamNote\n'), ((335, 25, 335, 79), 'apps.devicelocation.models.DeviceLocation.objects.filter', 'DeviceLocation.objects.filter', (), '', False, 'from apps.devicelocation.models import DeviceLocation\n'), ((339, 25, 339, 79), 'apps.devicelocation.models.DeviceLocation.objects.filter', 'DeviceLocation.objects.filter', (), '', False, 'from apps.devicelocation.models import DeviceLocation\n'), ((340, 25, 340, 78), 'apps.devicelocation.models.DeviceLocation.objects.filter', 'DeviceLocation.objects.filter', (), '', False, 'from apps.devicelocation.models import DeviceLocation\n'), ((359, 25, 359, 85), 'apps.report.models.GeneratedUserReport.objects.filter', 'GeneratedUserReport.objects.filter', (), '', False, 'from apps.report.models import GeneratedUserReport\n'), ((360, 25, 360, 80), 'apps.report.models.GeneratedUserReport.objects.filter', 'GeneratedUserReport.objects.filter', (), '', False, 'from apps.report.models import GeneratedUserReport\n'), ((366, 25, 366, 85), 'apps.report.models.GeneratedUserReport.objects.filter', 'GeneratedUserReport.objects.filter', (), '', False, 'from apps.report.models import GeneratedUserReport\n'), ((367, 25, 367, 80), 'apps.report.models.GeneratedUserReport.objects.filter', 'GeneratedUserReport.objects.filter', (), '', False, 'from apps.report.models import GeneratedUserReport\n'), ((469, 25, 469, 77), 'apps.property.models.GenericProperty.objects.object_properties_qs', 'GenericProperty.objects.object_properties_qs', ({(469, 70, 469, 76): 'device'}, {}), '(device)', False, 'from apps.property.models import GenericProperty\n'), ((470, 25, 470, 76), 'apps.property.models.GenericProperty.objects.object_properties_qs', 'GenericProperty.objects.object_properties_qs', ({(470, 70, 470, 75): 'block'}, {}), '(block)', False, 'from apps.property.models import GenericProperty\n'), ((474, 25, 474, 76), 'apps.streamdata.models.StreamData.objects.filter', 'StreamData.objects.filter', (), '', False, 'from apps.streamdata.models import StreamData\n'), ((475, 25, 475, 76), 'apps.streamdata.models.StreamData.objects.filter', 'StreamData.objects.filter', (), '', False, 'from apps.streamdata.models import StreamData\n'), ((476, 25, 476, 81), 'apps.streamevent.models.StreamEventData.objects.filter', 'StreamEventData.objects.filter', (), '', False, 'from apps.streamevent.models import StreamEventData\n'), ((477, 25, 477, 81), 'apps.streamevent.models.StreamEventData.objects.filter', 'StreamEventData.objects.filter', (), '', False, 'from apps.streamevent.models import StreamEventData\n'), ((478, 25, 478, 76), 'apps.streamnote.models.StreamNote.objects.filter', 'StreamNote.objects.filter', (), '', False, 'from apps.streamnote.models import StreamNote\n'), ((479, 25, 479, 75), 'apps.streamnote.models.StreamNote.objects.filter', 'StreamNote.objects.filter', (), '', False, 'from apps.streamnote.models import StreamNote\n'), ((486, 25, 486, 77), 'apps.property.models.GenericProperty.objects.object_properties_qs', 'GenericProperty.objects.object_properties_qs', ({(486, 70, 486, 76): 'device'}, {}), '(device)', False, 'from apps.property.models import GenericProperty\n'), ((487, 25, 487, 76), 'apps.property.models.GenericProperty.objects.object_properties_qs', 'GenericProperty.objects.object_properties_qs', ({(487, 70, 487, 75): 'block'}, {}), '(block)', False, 'from apps.property.models import GenericProperty\n'), ((491, 25, 491, 76), 'apps.streamdata.models.StreamData.objects.filter', 'StreamData.objects.filter', (), '', False, 'from apps.streamdata.models import StreamData\n'), ((492, 25, 492, 76), 'apps.streamdata.models.StreamData.objects.filter', 'StreamData.objects.filter', (), '', False, 'from apps.streamdata.models import StreamData\n'), ((493, 25, 493, 81), 'apps.streamevent.models.StreamEventData.objects.filter', 'StreamEventData.objects.filter', (), '', False, 'from apps.streamevent.models import StreamEventData\n'), ((494, 25, 494, 81), 'apps.streamevent.models.StreamEventData.objects.filter', 'StreamEventData.objects.filter', (), '', False, 'from apps.streamevent.models import StreamEventData\n'), ((495, 25, 495, 76), 'apps.streamnote.models.StreamNote.objects.filter', 'StreamNote.objects.filter', (), '', False, 'from apps.streamnote.models import StreamNote\n'), ((496, 25, 496, 75), 'apps.streamnote.models.StreamNote.objects.filter', 'StreamNote.objects.filter', (), '', False, 'from apps.streamnote.models import StreamNote\n'), ((499, 25, 499, 66), 'apps.stream.models.StreamId.objects.filter', 'StreamId.objects.filter', (), '', False, 'from apps.stream.models import StreamId, StreamVariable\n'), ((501, 25, 501, 66), 'apps.stream.models.StreamId.objects.filter', 'StreamId.objects.filter', (), '', False, 'from apps.stream.models import StreamId, StreamVariable\n'), ((503, 25, 503, 75), 'apps.streamdata.models.StreamData.objects.filter', 'StreamData.objects.filter', (), '', False, 'from apps.streamdata.models import StreamData\n'), ((504, 25, 504, 80), 'apps.streamevent.models.StreamEventData.objects.filter', 'StreamEventData.objects.filter', (), '', False, 'from apps.streamevent.models import StreamEventData\n'), ((505, 25, 505, 75), 'apps.streamnote.models.StreamNote.objects.filter', 'StreamNote.objects.filter', (), '', False, 'from apps.streamnote.models import StreamNote\n'), ((506, 25, 506, 75), 'apps.streamdata.models.StreamData.objects.filter', 'StreamData.objects.filter', (), '', False, 'from apps.streamdata.models import StreamData\n'), ((507, 25, 507, 80), 'apps.streamevent.models.StreamEventData.objects.filter', 'StreamEventData.objects.filter', (), '', False, 'from apps.streamevent.models import StreamEventData\n'), ((567, 25, 567, 76), 'apps.streamdata.models.StreamData.objects.filter', 'StreamData.objects.filter', (), '', False, 'from apps.streamdata.models import StreamData\n'), ((568, 25, 568, 78), 'apps.streamevent.models.StreamEventData.objects.filter', 'StreamEventData.objects.filter', (), '', False, 'from apps.streamevent.models import StreamEventData\n'), ((577, 25, 577, 76), 'apps.streamdata.models.StreamData.objects.filter', 'StreamData.objects.filter', (), '', False, 'from apps.streamdata.models import StreamData\n'), ((578, 25, 578, 78), 'apps.streamevent.models.StreamEventData.objects.filter', 'StreamEventData.objects.filter', (), '', False, 'from apps.streamevent.models import StreamEventData\n'), ((581, 25, 581, 96), 'apps.streamevent.models.StreamEventData.objects.filter', 'StreamEventData.objects.filter', (), '', False, 'from apps.streamevent.models import StreamEventData\n'), ((199, 25, 199, 75), 'apps.streamdata.models.StreamData.objects.filter', 'StreamData.objects.filter', (), '', False, 'from apps.streamdata.models import StreamData\n')] |
maya2250/nova | nova/policies/servers.py | e483ca1cd9a5db5856f87fc69ca07c42d2be5def | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_policy import policy
from nova.policies import base
RULE_AOO = base.RULE_ADMIN_OR_OWNER
SERVERS = 'os_compute_api:servers:%s'
NETWORK_ATTACH_EXTERNAL = 'network:attach_external_network'
ZERO_DISK_FLAVOR = SERVERS % 'create:zero_disk_flavor'
REQUESTED_DESTINATION = 'compute:servers:create:requested_destination'
CROSS_CELL_RESIZE = 'compute:servers:resize:cross_cell'
rules = [
policy.DocumentedRuleDefault(
SERVERS % 'index',
RULE_AOO,
"List all servers",
[
{
'method': 'GET',
'path': '/servers'
}
]),
policy.DocumentedRuleDefault(
SERVERS % 'detail',
RULE_AOO,
"List all servers with detailed information",
[
{
'method': 'GET',
'path': '/servers/detail'
}
]),
policy.DocumentedRuleDefault(
SERVERS % 'index:get_all_tenants',
base.RULE_ADMIN_API,
"List all servers for all projects",
[
{
'method': 'GET',
'path': '/servers'
}
]),
policy.DocumentedRuleDefault(
SERVERS % 'detail:get_all_tenants',
base.RULE_ADMIN_API,
"List all servers with detailed information for all projects",
[
{
'method': 'GET',
'path': '/servers/detail'
}
]),
policy.DocumentedRuleDefault(
SERVERS % 'allow_all_filters',
base.RULE_ADMIN_API,
"Allow all filters when listing servers",
[
{
'method': 'GET',
'path': '/servers'
},
{
'method': 'GET',
'path': '/servers/detail'
}
]),
policy.DocumentedRuleDefault(
SERVERS % 'show',
RULE_AOO,
"Show a server",
[
{
'method': 'GET',
'path': '/servers/{server_id}'
}
]),
# the details in host_status are pretty sensitive, only admins
# should do that by default.
policy.DocumentedRuleDefault(
SERVERS % 'show:host_status',
base.RULE_ADMIN_API,
"""
Show a server with additional host status information.
This means host_status will be shown irrespective of status value. If showing
only host_status UNKNOWN is desired, use the
``os_compute_api:servers:show:host_status:unknown-only`` policy rule.
Microvision 2.75 added the ``host_status`` attribute in the
``PUT /servers/{server_id}`` and ``POST /servers/{server_id}/action (rebuild)``
API responses which are also controlled by this policy rule, like the
``GET /servers*`` APIs.
""",
[
{
'method': 'GET',
'path': '/servers/{server_id}'
},
{
'method': 'GET',
'path': '/servers/detail'
},
{
'method': 'PUT',
'path': '/servers/{server_id}'
},
{
'method': 'POST',
'path': '/servers/{server_id}/action (rebuild)'
}
]),
policy.DocumentedRuleDefault(
SERVERS % 'show:host_status:unknown-only',
base.RULE_ADMIN_API,
"""
Show a server with additional host status information, only if host status is
UNKNOWN.
This policy rule will only be enforced when the
``os_compute_api:servers:show:host_status`` policy rule does not pass for the
request. An example policy configuration could be where the
``os_compute_api:servers:show:host_status`` rule is set to allow admin-only and
the ``os_compute_api:servers:show:host_status:unknown-only`` rule is set to
allow everyone.
""",
[
{
'method': 'GET',
'path': '/servers/{server_id}'
},
{
'method': 'GET',
'path': '/servers/detail'
}
]),
policy.DocumentedRuleDefault(
SERVERS % 'create',
RULE_AOO,
"Create a server",
[
{
'method': 'POST',
'path': '/servers'
}
]),
policy.DocumentedRuleDefault(
SERVERS % 'create:forced_host',
base.RULE_ADMIN_API,
"""
Create a server on the specified host and/or node.
In this case, the server is forced to launch on the specified
host and/or node by bypassing the scheduler filters unlike the
``compute:servers:create:requested_destination`` rule.
""",
[
{
'method': 'POST',
'path': '/servers'
}
]),
policy.DocumentedRuleDefault(
REQUESTED_DESTINATION,
base.RULE_ADMIN_API,
"""
Create a server on the requested compute service host and/or
hypervisor_hostname.
In this case, the requested host and/or hypervisor_hostname is
validated by the scheduler filters unlike the
``os_compute_api:servers:create:forced_host`` rule.
""",
[
{
'method': 'POST',
'path': '/servers'
}
]),
policy.DocumentedRuleDefault(
SERVERS % 'create:attach_volume',
RULE_AOO,
"Create a server with the requested volume attached to it",
[
{
'method': 'POST',
'path': '/servers'
}
]),
policy.DocumentedRuleDefault(
SERVERS % 'create:attach_network',
RULE_AOO,
"Create a server with the requested network attached to it",
[
{
'method': 'POST',
'path': '/servers'
}
]),
policy.DocumentedRuleDefault(
SERVERS % 'create:trusted_certs',
RULE_AOO,
"Create a server with trusted image certificate IDs",
[
{
'method': 'POST',
'path': '/servers'
}
]),
policy.DocumentedRuleDefault(
ZERO_DISK_FLAVOR,
base.RULE_ADMIN_API,
"""
This rule controls the compute API validation behavior of creating a server
with a flavor that has 0 disk, indicating the server should be volume-backed.
For a flavor with disk=0, the root disk will be set to exactly the size of the
image used to deploy the instance. However, in this case the filter_scheduler
cannot select the compute host based on the virtual image size. Therefore, 0
should only be used for volume booted instances or for testing purposes.
WARNING: It is a potential security exposure to enable this policy rule
if users can upload their own images since repeated attempts to
create a disk=0 flavor instance with a large image can exhaust
the local disk of the compute (or shared storage cluster). See bug
https://bugs.launchpad.net/nova/+bug/1739646 for details.
""",
[
{
'method': 'POST',
'path': '/servers'
}
]),
policy.DocumentedRuleDefault(
NETWORK_ATTACH_EXTERNAL,
'is_admin:True',
"Attach an unshared external network to a server",
[
# Create a server with a requested network or port.
{
'method': 'POST',
'path': '/servers'
},
# Attach a network or port to an existing server.
{
'method': 'POST',
'path': '/servers/{server_id}/os-interface'
}
]),
policy.DocumentedRuleDefault(
SERVERS % 'delete',
RULE_AOO,
"Delete a server",
[
{
'method': 'DELETE',
'path': '/servers/{server_id}'
}
]),
policy.DocumentedRuleDefault(
SERVERS % 'update',
RULE_AOO,
"Update a server",
[
{
'method': 'PUT',
'path': '/servers/{server_id}'
}
]),
policy.DocumentedRuleDefault(
SERVERS % 'confirm_resize',
RULE_AOO,
"Confirm a server resize",
[
{
'method': 'POST',
'path': '/servers/{server_id}/action (confirmResize)'
}
]),
policy.DocumentedRuleDefault(
SERVERS % 'revert_resize',
RULE_AOO,
"Revert a server resize",
[
{
'method': 'POST',
'path': '/servers/{server_id}/action (revertResize)'
}
]),
policy.DocumentedRuleDefault(
SERVERS % 'reboot',
RULE_AOO,
"Reboot a server",
[
{
'method': 'POST',
'path': '/servers/{server_id}/action (reboot)'
}
]),
policy.DocumentedRuleDefault(
SERVERS % 'resize',
RULE_AOO,
"Resize a server",
[
{
'method': 'POST',
'path': '/servers/{server_id}/action (resize)'
}
]),
policy.DocumentedRuleDefault(
CROSS_CELL_RESIZE,
base.RULE_NOBODY,
"Resize a server across cells. By default, this is disabled for all "
"users and recommended to be tested in a deployment for admin users "
"before opening it up to non-admin users. Resizing within a cell is "
"the default preferred behavior even if this is enabled. ",
[
{
'method': 'POST',
'path': '/servers/{server_id}/action (resize)'
}
]),
policy.DocumentedRuleDefault(
SERVERS % 'rebuild',
RULE_AOO,
"Rebuild a server",
[
{
'method': 'POST',
'path': '/servers/{server_id}/action (rebuild)'
}
]),
policy.DocumentedRuleDefault(
SERVERS % 'rebuild:trusted_certs',
RULE_AOO,
"Rebuild a server with trusted image certificate IDs",
[
{
'method': 'POST',
'path': '/servers/{server_id}/action (rebuild)'
}
]),
policy.DocumentedRuleDefault(
SERVERS % 'create_image',
RULE_AOO,
"Create an image from a server",
[
{
'method': 'POST',
'path': '/servers/{server_id}/action (createImage)'
}
]),
policy.DocumentedRuleDefault(
SERVERS % 'create_image:allow_volume_backed',
RULE_AOO,
"Create an image from a volume backed server",
[
{
'method': 'POST',
'path': '/servers/{server_id}/action (createImage)'
}
]),
policy.DocumentedRuleDefault(
SERVERS % 'start',
RULE_AOO,
"Start a server",
[
{
'method': 'POST',
'path': '/servers/{server_id}/action (os-start)'
}
]),
policy.DocumentedRuleDefault(
SERVERS % 'stop',
RULE_AOO,
"Stop a server",
[
{
'method': 'POST',
'path': '/servers/{server_id}/action (os-stop)'
}
]),
policy.DocumentedRuleDefault(
SERVERS % 'trigger_crash_dump',
RULE_AOO,
"Trigger crash dump in a server",
[
{
'method': 'POST',
'path': '/servers/{server_id}/action (trigger_crash_dump)'
}
]),
]
def list_rules():
return rules
| [((27, 4, 36, 10), 'oslo_policy.policy.DocumentedRuleDefault', 'policy.DocumentedRuleDefault', ({(28, 8, 28, 25): "(SERVERS % 'index')", (29, 8, 29, 16): 'RULE_AOO', (30, 8, 30, 26): '"""List all servers"""', (31, 8, 36, 9): "[{'method': 'GET', 'path': '/servers'}]"}, {}), "(SERVERS % 'index', RULE_AOO,\n 'List all servers', [{'method': 'GET', 'path': '/servers'}])", False, 'from oslo_policy import policy\n'), ((37, 4, 46, 10), 'oslo_policy.policy.DocumentedRuleDefault', 'policy.DocumentedRuleDefault', ({(38, 8, 38, 26): "(SERVERS % 'detail')", (39, 8, 39, 16): 'RULE_AOO', (40, 8, 40, 52): '"""List all servers with detailed information"""', (41, 8, 46, 9): "[{'method': 'GET', 'path': '/servers/detail'}]"}, {}), "(SERVERS % 'detail', RULE_AOO,\n 'List all servers with detailed information', [{'method': 'GET', 'path':\n '/servers/detail'}])", False, 'from oslo_policy import policy\n'), ((47, 4, 56, 10), 'oslo_policy.policy.DocumentedRuleDefault', 'policy.DocumentedRuleDefault', ({(48, 8, 48, 41): "(SERVERS % 'index:get_all_tenants')", (49, 8, 49, 27): 'base.RULE_ADMIN_API', (50, 8, 50, 43): '"""List all servers for all projects"""', (51, 8, 56, 9): "[{'method': 'GET', 'path': '/servers'}]"}, {}), "(SERVERS % 'index:get_all_tenants', base.\n RULE_ADMIN_API, 'List all servers for all projects', [{'method': 'GET',\n 'path': '/servers'}])", False, 'from oslo_policy import policy\n'), ((57, 4, 66, 10), 'oslo_policy.policy.DocumentedRuleDefault', 'policy.DocumentedRuleDefault', ({(58, 8, 58, 42): "(SERVERS % 'detail:get_all_tenants')", (59, 8, 59, 27): 'base.RULE_ADMIN_API', (60, 8, 60, 69): '"""List all servers with detailed information for all projects"""', (61, 8, 66, 9): "[{'method': 'GET', 'path': '/servers/detail'}]"}, {}), "(SERVERS % 'detail:get_all_tenants', base.\n RULE_ADMIN_API,\n 'List all servers with detailed information for all projects', [{\n 'method': 'GET', 'path': '/servers/detail'}])", False, 'from oslo_policy import policy\n'), ((67, 4, 80, 10), 'oslo_policy.policy.DocumentedRuleDefault', 'policy.DocumentedRuleDefault', ({(68, 8, 68, 37): "(SERVERS % 'allow_all_filters')", (69, 8, 69, 27): 'base.RULE_ADMIN_API', (70, 8, 70, 48): '"""Allow all filters when listing servers"""', (71, 8, 80, 9): "[{'method': 'GET', 'path': '/servers'}, {'method': 'GET', 'path':\n '/servers/detail'}]"}, {}), "(SERVERS % 'allow_all_filters', base.\n RULE_ADMIN_API, 'Allow all filters when listing servers', [{'method':\n 'GET', 'path': '/servers'}, {'method': 'GET', 'path': '/servers/detail'}])", False, 'from oslo_policy import policy\n'), ((81, 4, 90, 10), 'oslo_policy.policy.DocumentedRuleDefault', 'policy.DocumentedRuleDefault', ({(82, 8, 82, 24): "(SERVERS % 'show')", (83, 8, 83, 16): 'RULE_AOO', (84, 8, 84, 23): '"""Show a server"""', (85, 8, 90, 9): "[{'method': 'GET', 'path': '/servers/{server_id}'}]"}, {}), "(SERVERS % 'show', RULE_AOO, 'Show a server', [\n {'method': 'GET', 'path': '/servers/{server_id}'}])", False, 'from oslo_policy import policy\n'), ((93, 4, 125, 10), 'oslo_policy.policy.DocumentedRuleDefault', 'policy.DocumentedRuleDefault', ({(94, 8, 94, 36): "(SERVERS % 'show:host_status')", (95, 8, 95, 27): 'base.RULE_ADMIN_API', (96, 8, 107, 3): '"""\nShow a server with additional host status information.\n\nThis means host_status will be shown irrespective of status value. If showing\nonly host_status UNKNOWN is desired, use the\n``os_compute_api:servers:show:host_status:unknown-only`` policy rule.\n\nMicrovision 2.75 added the ``host_status`` attribute in the\n``PUT /servers/{server_id}`` and ``POST /servers/{server_id}/action (rebuild)``\nAPI responses which are also controlled by this policy rule, like the\n``GET /servers*`` APIs.\n"""', (108, 8, 125, 9): "[{'method': 'GET', 'path': '/servers/{server_id}'}, {'method': 'GET',\n 'path': '/servers/detail'}, {'method': 'PUT', 'path':\n '/servers/{server_id}'}, {'method': 'POST', 'path':\n '/servers/{server_id}/action (rebuild)'}]"}, {}), '(SERVERS % \'show:host_status\', base.\n RULE_ADMIN_API,\n """\nShow a server with additional host status information.\n\nThis means host_status will be shown irrespective of status value. If showing\nonly host_status UNKNOWN is desired, use the\n``os_compute_api:servers:show:host_status:unknown-only`` policy rule.\n\nMicrovision 2.75 added the ``host_status`` attribute in the\n``PUT /servers/{server_id}`` and ``POST /servers/{server_id}/action (rebuild)``\nAPI responses which are also controlled by this policy rule, like the\n``GET /servers*`` APIs.\n"""\n , [{\'method\': \'GET\', \'path\': \'/servers/{server_id}\'}, {\'method\': \'GET\',\n \'path\': \'/servers/detail\'}, {\'method\': \'PUT\', \'path\':\n \'/servers/{server_id}\'}, {\'method\': \'POST\', \'path\':\n \'/servers/{server_id}/action (rebuild)\'}])', False, 'from oslo_policy import policy\n'), ((126, 4, 149, 10), 'oslo_policy.policy.DocumentedRuleDefault', 'policy.DocumentedRuleDefault', ({(127, 12, 127, 53): "(SERVERS % 'show:host_status:unknown-only')", (128, 8, 128, 27): 'base.RULE_ADMIN_API', (129, 8, 139, 3): '"""\nShow a server with additional host status information, only if host status is\nUNKNOWN.\n\nThis policy rule will only be enforced when the\n``os_compute_api:servers:show:host_status`` policy rule does not pass for the\nrequest. An example policy configuration could be where the\n``os_compute_api:servers:show:host_status`` rule is set to allow admin-only and\nthe ``os_compute_api:servers:show:host_status:unknown-only`` rule is set to\nallow everyone.\n"""', (140, 8, 149, 9): "[{'method': 'GET', 'path': '/servers/{server_id}'}, {'method': 'GET',\n 'path': '/servers/detail'}]"}, {}), '(SERVERS % \'show:host_status:unknown-only\',\n base.RULE_ADMIN_API,\n """\nShow a server with additional host status information, only if host status is\nUNKNOWN.\n\nThis policy rule will only be enforced when the\n``os_compute_api:servers:show:host_status`` policy rule does not pass for the\nrequest. An example policy configuration could be where the\n``os_compute_api:servers:show:host_status`` rule is set to allow admin-only and\nthe ``os_compute_api:servers:show:host_status:unknown-only`` rule is set to\nallow everyone.\n"""\n , [{\'method\': \'GET\', \'path\': \'/servers/{server_id}\'}, {\'method\': \'GET\',\n \'path\': \'/servers/detail\'}])', False, 'from oslo_policy import policy\n'), ((150, 4, 159, 10), 'oslo_policy.policy.DocumentedRuleDefault', 'policy.DocumentedRuleDefault', ({(151, 8, 151, 26): "(SERVERS % 'create')", (152, 8, 152, 16): 'RULE_AOO', (153, 8, 153, 25): '"""Create a server"""', (154, 8, 159, 9): "[{'method': 'POST', 'path': '/servers'}]"}, {}), "(SERVERS % 'create', RULE_AOO,\n 'Create a server', [{'method': 'POST', 'path': '/servers'}])", False, 'from oslo_policy import policy\n'), ((160, 4, 175, 10), 'oslo_policy.policy.DocumentedRuleDefault', 'policy.DocumentedRuleDefault', ({(161, 8, 161, 38): "(SERVERS % 'create:forced_host')", (162, 8, 162, 27): 'base.RULE_ADMIN_API', (163, 8, 169, 3): '"""\nCreate a server on the specified host and/or node.\n\nIn this case, the server is forced to launch on the specified\nhost and/or node by bypassing the scheduler filters unlike the\n``compute:servers:create:requested_destination`` rule.\n"""', (170, 8, 175, 9): "[{'method': 'POST', 'path': '/servers'}]"}, {}), '(SERVERS % \'create:forced_host\', base.\n RULE_ADMIN_API,\n """\nCreate a server on the specified host and/or node.\n\nIn this case, the server is forced to launch on the specified\nhost and/or node by bypassing the scheduler filters unlike the\n``compute:servers:create:requested_destination`` rule.\n"""\n , [{\'method\': \'POST\', \'path\': \'/servers\'}])', False, 'from oslo_policy import policy\n'), ((176, 4, 192, 10), 'oslo_policy.policy.DocumentedRuleDefault', 'policy.DocumentedRuleDefault', ({(177, 8, 177, 29): 'REQUESTED_DESTINATION', (178, 8, 178, 27): 'base.RULE_ADMIN_API', (179, 8, 186, 3): '"""\nCreate a server on the requested compute service host and/or\nhypervisor_hostname.\n\nIn this case, the requested host and/or hypervisor_hostname is\nvalidated by the scheduler filters unlike the\n``os_compute_api:servers:create:forced_host`` rule.\n"""', (187, 8, 192, 9): "[{'method': 'POST', 'path': '/servers'}]"}, {}), '(REQUESTED_DESTINATION, base.RULE_ADMIN_API,\n """\nCreate a server on the requested compute service host and/or\nhypervisor_hostname.\n\nIn this case, the requested host and/or hypervisor_hostname is\nvalidated by the scheduler filters unlike the\n``os_compute_api:servers:create:forced_host`` rule.\n"""\n , [{\'method\': \'POST\', \'path\': \'/servers\'}])', False, 'from oslo_policy import policy\n'), ((193, 4, 202, 10), 'oslo_policy.policy.DocumentedRuleDefault', 'policy.DocumentedRuleDefault', ({(194, 8, 194, 40): "(SERVERS % 'create:attach_volume')", (195, 8, 195, 16): 'RULE_AOO', (196, 8, 196, 66): '"""Create a server with the requested volume attached to it"""', (197, 8, 202, 9): "[{'method': 'POST', 'path': '/servers'}]"}, {}), "(SERVERS % 'create:attach_volume', RULE_AOO,\n 'Create a server with the requested volume attached to it', [{'method':\n 'POST', 'path': '/servers'}])", False, 'from oslo_policy import policy\n'), ((203, 4, 212, 10), 'oslo_policy.policy.DocumentedRuleDefault', 'policy.DocumentedRuleDefault', ({(204, 8, 204, 41): "(SERVERS % 'create:attach_network')", (205, 8, 205, 16): 'RULE_AOO', (206, 8, 206, 67): '"""Create a server with the requested network attached to it"""', (207, 8, 212, 9): "[{'method': 'POST', 'path': '/servers'}]"}, {}), "(SERVERS % 'create:attach_network', RULE_AOO,\n 'Create a server with the requested network attached to it', [{'method':\n 'POST', 'path': '/servers'}])", False, 'from oslo_policy import policy\n'), ((213, 4, 222, 10), 'oslo_policy.policy.DocumentedRuleDefault', 'policy.DocumentedRuleDefault', ({(214, 8, 214, 40): "(SERVERS % 'create:trusted_certs')", (215, 8, 215, 16): 'RULE_AOO', (216, 8, 216, 60): '"""Create a server with trusted image certificate IDs"""', (217, 8, 222, 9): "[{'method': 'POST', 'path': '/servers'}]"}, {}), "(SERVERS % 'create:trusted_certs', RULE_AOO,\n 'Create a server with trusted image certificate IDs', [{'method':\n 'POST', 'path': '/servers'}])", False, 'from oslo_policy import policy\n'), ((223, 4, 246, 10), 'oslo_policy.policy.DocumentedRuleDefault', 'policy.DocumentedRuleDefault', ({(224, 8, 224, 24): 'ZERO_DISK_FLAVOR', (225, 8, 225, 27): 'base.RULE_ADMIN_API', (226, 8, 240, 3): '"""\nThis rule controls the compute API validation behavior of creating a server\nwith a flavor that has 0 disk, indicating the server should be volume-backed.\n\nFor a flavor with disk=0, the root disk will be set to exactly the size of the\nimage used to deploy the instance. However, in this case the filter_scheduler\ncannot select the compute host based on the virtual image size. Therefore, 0\nshould only be used for volume booted instances or for testing purposes.\n\nWARNING: It is a potential security exposure to enable this policy rule\nif users can upload their own images since repeated attempts to\ncreate a disk=0 flavor instance with a large image can exhaust\nthe local disk of the compute (or shared storage cluster). See bug\nhttps://bugs.launchpad.net/nova/+bug/1739646 for details.\n"""', (241, 8, 246, 9): "[{'method': 'POST', 'path': '/servers'}]"}, {}), '(ZERO_DISK_FLAVOR, base.RULE_ADMIN_API,\n """\nThis rule controls the compute API validation behavior of creating a server\nwith a flavor that has 0 disk, indicating the server should be volume-backed.\n\nFor a flavor with disk=0, the root disk will be set to exactly the size of the\nimage used to deploy the instance. However, in this case the filter_scheduler\ncannot select the compute host based on the virtual image size. Therefore, 0\nshould only be used for volume booted instances or for testing purposes.\n\nWARNING: It is a potential security exposure to enable this policy rule\nif users can upload their own images since repeated attempts to\ncreate a disk=0 flavor instance with a large image can exhaust\nthe local disk of the compute (or shared storage cluster). See bug\nhttps://bugs.launchpad.net/nova/+bug/1739646 for details.\n"""\n , [{\'method\': \'POST\', \'path\': \'/servers\'}])', False, 'from oslo_policy import policy\n'), ((247, 4, 262, 10), 'oslo_policy.policy.DocumentedRuleDefault', 'policy.DocumentedRuleDefault', ({(248, 8, 248, 31): 'NETWORK_ATTACH_EXTERNAL', (249, 8, 249, 23): '"""is_admin:True"""', (250, 8, 250, 57): '"""Attach an unshared external network to a server"""', (251, 8, 262, 9): "[{'method': 'POST', 'path': '/servers'}, {'method': 'POST', 'path':\n '/servers/{server_id}/os-interface'}]"}, {}), "(NETWORK_ATTACH_EXTERNAL, 'is_admin:True',\n 'Attach an unshared external network to a server', [{'method': 'POST',\n 'path': '/servers'}, {'method': 'POST', 'path':\n '/servers/{server_id}/os-interface'}])", False, 'from oslo_policy import policy\n'), ((263, 4, 272, 10), 'oslo_policy.policy.DocumentedRuleDefault', 'policy.DocumentedRuleDefault', ({(264, 8, 264, 26): "(SERVERS % 'delete')", (265, 8, 265, 16): 'RULE_AOO', (266, 8, 266, 25): '"""Delete a server"""', (267, 8, 272, 9): "[{'method': 'DELETE', 'path': '/servers/{server_id}'}]"}, {}), "(SERVERS % 'delete', RULE_AOO,\n 'Delete a server', [{'method': 'DELETE', 'path': '/servers/{server_id}'}])", False, 'from oslo_policy import policy\n'), ((273, 4, 282, 10), 'oslo_policy.policy.DocumentedRuleDefault', 'policy.DocumentedRuleDefault', ({(274, 8, 274, 26): "(SERVERS % 'update')", (275, 8, 275, 16): 'RULE_AOO', (276, 8, 276, 25): '"""Update a server"""', (277, 8, 282, 9): "[{'method': 'PUT', 'path': '/servers/{server_id}'}]"}, {}), "(SERVERS % 'update', RULE_AOO,\n 'Update a server', [{'method': 'PUT', 'path': '/servers/{server_id}'}])", False, 'from oslo_policy import policy\n'), ((283, 4, 292, 10), 'oslo_policy.policy.DocumentedRuleDefault', 'policy.DocumentedRuleDefault', ({(284, 8, 284, 34): "(SERVERS % 'confirm_resize')", (285, 8, 285, 16): 'RULE_AOO', (286, 8, 286, 33): '"""Confirm a server resize"""', (287, 8, 292, 9): "[{'method': 'POST', 'path': '/servers/{server_id}/action (confirmResize)'}]"}, {}), "(SERVERS % 'confirm_resize', RULE_AOO,\n 'Confirm a server resize', [{'method': 'POST', 'path':\n '/servers/{server_id}/action (confirmResize)'}])", False, 'from oslo_policy import policy\n'), ((293, 4, 302, 10), 'oslo_policy.policy.DocumentedRuleDefault', 'policy.DocumentedRuleDefault', ({(294, 8, 294, 33): "(SERVERS % 'revert_resize')", (295, 8, 295, 16): 'RULE_AOO', (296, 8, 296, 32): '"""Revert a server resize"""', (297, 8, 302, 9): "[{'method': 'POST', 'path': '/servers/{server_id}/action (revertResize)'}]"}, {}), "(SERVERS % 'revert_resize', RULE_AOO,\n 'Revert a server resize', [{'method': 'POST', 'path':\n '/servers/{server_id}/action (revertResize)'}])", False, 'from oslo_policy import policy\n'), ((303, 4, 312, 10), 'oslo_policy.policy.DocumentedRuleDefault', 'policy.DocumentedRuleDefault', ({(304, 8, 304, 26): "(SERVERS % 'reboot')", (305, 8, 305, 16): 'RULE_AOO', (306, 8, 306, 25): '"""Reboot a server"""', (307, 8, 312, 9): "[{'method': 'POST', 'path': '/servers/{server_id}/action (reboot)'}]"}, {}), "(SERVERS % 'reboot', RULE_AOO,\n 'Reboot a server', [{'method': 'POST', 'path':\n '/servers/{server_id}/action (reboot)'}])", False, 'from oslo_policy import policy\n'), ((313, 4, 322, 10), 'oslo_policy.policy.DocumentedRuleDefault', 'policy.DocumentedRuleDefault', ({(314, 8, 314, 26): "(SERVERS % 'resize')", (315, 8, 315, 16): 'RULE_AOO', (316, 8, 316, 25): '"""Resize a server"""', (317, 8, 322, 9): "[{'method': 'POST', 'path': '/servers/{server_id}/action (resize)'}]"}, {}), "(SERVERS % 'resize', RULE_AOO,\n 'Resize a server', [{'method': 'POST', 'path':\n '/servers/{server_id}/action (resize)'}])", False, 'from oslo_policy import policy\n'), ((323, 4, 335, 10), 'oslo_policy.policy.DocumentedRuleDefault', 'policy.DocumentedRuleDefault', ({(324, 8, 324, 25): 'CROSS_CELL_RESIZE', (325, 8, 325, 24): 'base.RULE_NOBODY', (326, 8, 329, 66): '"""Resize a server across cells. By default, this is disabled for all users and recommended to be tested in a deployment for admin users before opening it up to non-admin users. Resizing within a cell is the default preferred behavior even if this is enabled. """', (330, 8, 335, 9): "[{'method': 'POST', 'path': '/servers/{server_id}/action (resize)'}]"}, {}), "(CROSS_CELL_RESIZE, base.RULE_NOBODY,\n 'Resize a server across cells. By default, this is disabled for all users and recommended to be tested in a deployment for admin users before opening it up to non-admin users. Resizing within a cell is the default preferred behavior even if this is enabled. '\n , [{'method': 'POST', 'path': '/servers/{server_id}/action (resize)'}])", False, 'from oslo_policy import policy\n'), ((336, 4, 345, 10), 'oslo_policy.policy.DocumentedRuleDefault', 'policy.DocumentedRuleDefault', ({(337, 8, 337, 27): "(SERVERS % 'rebuild')", (338, 8, 338, 16): 'RULE_AOO', (339, 8, 339, 26): '"""Rebuild a server"""', (340, 8, 345, 9): "[{'method': 'POST', 'path': '/servers/{server_id}/action (rebuild)'}]"}, {}), "(SERVERS % 'rebuild', RULE_AOO,\n 'Rebuild a server', [{'method': 'POST', 'path':\n '/servers/{server_id}/action (rebuild)'}])", False, 'from oslo_policy import policy\n'), ((346, 4, 355, 10), 'oslo_policy.policy.DocumentedRuleDefault', 'policy.DocumentedRuleDefault', ({(347, 8, 347, 41): "(SERVERS % 'rebuild:trusted_certs')", (348, 8, 348, 16): 'RULE_AOO', (349, 8, 349, 61): '"""Rebuild a server with trusted image certificate IDs"""', (350, 8, 355, 9): "[{'method': 'POST', 'path': '/servers/{server_id}/action (rebuild)'}]"}, {}), "(SERVERS % 'rebuild:trusted_certs', RULE_AOO,\n 'Rebuild a server with trusted image certificate IDs', [{'method':\n 'POST', 'path': '/servers/{server_id}/action (rebuild)'}])", False, 'from oslo_policy import policy\n'), ((356, 4, 365, 10), 'oslo_policy.policy.DocumentedRuleDefault', 'policy.DocumentedRuleDefault', ({(357, 8, 357, 32): "(SERVERS % 'create_image')", (358, 8, 358, 16): 'RULE_AOO', (359, 8, 359, 39): '"""Create an image from a server"""', (360, 8, 365, 9): "[{'method': 'POST', 'path': '/servers/{server_id}/action (createImage)'}]"}, {}), "(SERVERS % 'create_image', RULE_AOO,\n 'Create an image from a server', [{'method': 'POST', 'path':\n '/servers/{server_id}/action (createImage)'}])", False, 'from oslo_policy import policy\n'), ((366, 4, 375, 10), 'oslo_policy.policy.DocumentedRuleDefault', 'policy.DocumentedRuleDefault', ({(367, 8, 367, 52): "(SERVERS % 'create_image:allow_volume_backed')", (368, 8, 368, 16): 'RULE_AOO', (369, 8, 369, 53): '"""Create an image from a volume backed server"""', (370, 8, 375, 9): "[{'method': 'POST', 'path': '/servers/{server_id}/action (createImage)'}]"}, {}), "(SERVERS % 'create_image:allow_volume_backed',\n RULE_AOO, 'Create an image from a volume backed server', [{'method':\n 'POST', 'path': '/servers/{server_id}/action (createImage)'}])", False, 'from oslo_policy import policy\n'), ((376, 4, 385, 10), 'oslo_policy.policy.DocumentedRuleDefault', 'policy.DocumentedRuleDefault', ({(377, 8, 377, 25): "(SERVERS % 'start')", (378, 8, 378, 16): 'RULE_AOO', (379, 8, 379, 24): '"""Start a server"""', (380, 8, 385, 9): "[{'method': 'POST', 'path': '/servers/{server_id}/action (os-start)'}]"}, {}), "(SERVERS % 'start', RULE_AOO, 'Start a server',\n [{'method': 'POST', 'path': '/servers/{server_id}/action (os-start)'}])", False, 'from oslo_policy import policy\n'), ((386, 4, 395, 10), 'oslo_policy.policy.DocumentedRuleDefault', 'policy.DocumentedRuleDefault', ({(387, 8, 387, 24): "(SERVERS % 'stop')", (388, 8, 388, 16): 'RULE_AOO', (389, 8, 389, 23): '"""Stop a server"""', (390, 8, 395, 9): "[{'method': 'POST', 'path': '/servers/{server_id}/action (os-stop)'}]"}, {}), "(SERVERS % 'stop', RULE_AOO, 'Stop a server', [\n {'method': 'POST', 'path': '/servers/{server_id}/action (os-stop)'}])", False, 'from oslo_policy import policy\n'), ((396, 4, 405, 10), 'oslo_policy.policy.DocumentedRuleDefault', 'policy.DocumentedRuleDefault', ({(397, 8, 397, 38): "(SERVERS % 'trigger_crash_dump')", (398, 8, 398, 16): 'RULE_AOO', (399, 8, 399, 40): '"""Trigger crash dump in a server"""', (400, 8, 405, 9): "[{'method': 'POST', 'path': '/servers/{server_id}/action (trigger_crash_dump)'}\n ]"}, {}), "(SERVERS % 'trigger_crash_dump', RULE_AOO,\n 'Trigger crash dump in a server', [{'method': 'POST', 'path':\n '/servers/{server_id}/action (trigger_crash_dump)'}])", False, 'from oslo_policy import policy\n')] |
sajaldebnath/vrops-custom-group-creation | set-env.py | e3c821336832445e93706ad29afe216867660123 | # !/usr/bin python
"""
#
# set-env - a small python program to setup the configuration environment for data-push.py
# data-push.py contains the python program to push attribute values to vROps
# Author Sajal Debnath <[email protected]>
#
"""
# Importing the required modules
import json
import base64
import os,sys
# Getting the absolute path from where the script is being run
def get_script_path():
return os.path.dirname(os.path.realpath(sys.argv[0]))
# Getting the inputs from user
def get_the_inputs():
adapterkind = raw_input("Please enter Adapter Kind: ")
resourceKind = raw_input("Please enter Resource Kind: ")
servername = raw_input("Enter enter Server IP/FQDN: ")
serveruid = raw_input("Please enter user id: ")
serverpasswd = raw_input("Please enter vRops password: ")
encryptedvar = base64.b64encode(serverpasswd)
data = {}
data["adapterKind"] = adapterkind
data["resourceKind"] = resourceKind
serverdetails = {}
serverdetails["name"] = servername
serverdetails["userid"] = serveruid
serverdetails["password"] = encryptedvar
data["server"] = serverdetails
return data
# Getting the path where env.json file should be kept
path = get_script_path()
fullpath = path+"/"+"env.json"
# Getting the data for the env.json file
final_data = get_the_inputs()
# Saving the data to env.json file
with open(fullpath, 'w') as outfile:
json.dump(final_data, outfile, sort_keys = True, indent = 2, separators=(',', ':'), ensure_ascii=False) | [((29, 19, 29, 49), 'base64.b64encode', 'base64.b64encode', ({(29, 36, 29, 48): 'serverpasswd'}, {}), '(serverpasswd)', False, 'import base64\n'), ((55, 4, 55, 107), 'json.dump', 'json.dump', (), '', False, 'import json\n'), ((19, 27, 19, 56), 'os.path.realpath', 'os.path.realpath', ({(19, 44, 19, 55): 'sys.argv[0]'}, {}), '(sys.argv[0])', False, 'import os, sys\n')] |
gtadeus/LeetCodeChallenge2009 | week02/day08.py | 81d3fae205fb9071d7a98260df9bbeb1c8c8ffe0 | import unittest
# Definition for a binary tree node.
class TreeNode:
def __init__(self, val=0, left=None, right=None):
self.val = val
self.left = left
self.right = right
class Solution:
def sumRootToLeaf(self, root: TreeNode) -> int:
m = self.c(root)
r=0
for n in m:
if n != 0:
if n== 1:
r+=1
else:
r+=int(n,2)
return r
def c(self, l):
if l.left is None and l.right is None:
return [l.val]
else:
p, p2 = [], []
if not l.left is None:
p=self.c(l.left)
if not l.right is None:
p2=self.c(l.right)
v=f'{l.val}'
#v = l.val << 1
for i, x in enumerate(p):
if not l.left is None:
p[i]=f'{v}{x}'
for i, x in enumerate(p2):
if not l.right is None:
p2[i]=f'{v}{x}'
return p+p2
class TestDay08(unittest.TestCase):
S = Solution()
input_ = [ TreeNode(1, TreeNode(0, TreeNode(0,None,None), TreeNode(1,None,None)), TreeNode(1, TreeNode(0,None,None), TreeNode(1,None,None))) ]
solutions = [22]
def testSumRoot(self):
for indx, val in enumerate(self.input_):
self.assertEqual(self.solutions[indx], self.S.sumRootToLeaf(val))
if __name__ == "__main__":
unittest.main()
| [((51, 4, 51, 19), 'unittest.main', 'unittest.main', ({}, {}), '()', False, 'import unittest\n')] |
tiuD/cross-prom | config.py | 8b987138ec32e0ac64ca6ffe13d0e1cd0d18aef3 | TOKEN = "1876415562:AAEsX_c9k3Fot2IT0BYRqkCCQ5vFEHQDLDQ"
CHAT_ID = [957539786] # e.g. [1234567, 2233445, 3466123...]
| [] |
mik2k2/buchschloss | buchschloss/gui2/__init__.py | 8a9d17de5847ccab48a0de48aa4b60af2a7cc045 | """entry point"""
from . import main
start = main.app.launch
| [] |
francesco-p/FACIL | src/tests/test_stop_at_task.py | e719deebb6d2acb5778b60759294c23ea5e2b454 | from tests import run_main_and_assert
FAST_LOCAL_TEST_ARGS = "--exp-name local_test --datasets mnist" \
" --network LeNet --num-tasks 5 --seed 1 --batch-size 32" \
" --nepochs 2 --num-workers 0 --stop-at-task 3"
def test_finetuning_stop_at_task():
args_line = FAST_LOCAL_TEST_ARGS
args_line += " --approach finetuning"
run_main_and_assert(args_line)
| [((11, 4, 11, 34), 'tests.run_main_and_assert', 'run_main_and_assert', ({(11, 24, 11, 33): 'args_line'}, {}), '(args_line)', False, 'from tests import run_main_and_assert\n')] |
shreyventure/LeetCode-Solutions | Python/contains-duplicate.py | 74423d65702b78974e390f17c9d6365d17e6eed5 | # Autor: Anuj Sharma (@optider)
# Github Profile: https://github.com/Optider/
# Problem Link: https://leetcode.com/problems/contains-duplicate/
class Solution:
def containsDuplicate(self, nums: List[int]) -> bool:
count = {}
for n in nums :
if count.get(n) != None :
return True
count[n] = 1
return False
| [] |
google-ar/chromium | build/android/gyp/dex.py | 2441c86a5fd975f09a6c30cddb57dfb7fc239699 | #!/usr/bin/env python
#
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import json
import logging
import optparse
import os
import sys
import tempfile
import zipfile
from util import build_utils
def _CheckFilePathEndsWithJar(parser, file_path):
if not file_path.endswith(".jar"):
# dx ignores non .jar files.
parser.error("%s does not end in .jar" % file_path)
def _CheckFilePathsEndWithJar(parser, file_paths):
for file_path in file_paths:
_CheckFilePathEndsWithJar(parser, file_path)
def _RemoveUnwantedFilesFromZip(dex_path):
iz = zipfile.ZipFile(dex_path, 'r')
tmp_dex_path = '%s.tmp.zip' % dex_path
oz = zipfile.ZipFile(tmp_dex_path, 'w', zipfile.ZIP_DEFLATED)
for i in iz.namelist():
if i.endswith('.dex'):
oz.writestr(i, iz.read(i))
os.remove(dex_path)
os.rename(tmp_dex_path, dex_path)
def _ParseArgs(args):
args = build_utils.ExpandFileArgs(args)
parser = optparse.OptionParser()
build_utils.AddDepfileOption(parser)
parser.add_option('--android-sdk-tools',
help='Android sdk build tools directory.')
parser.add_option('--output-directory',
default=os.getcwd(),
help='Path to the output build directory.')
parser.add_option('--dex-path', help='Dex output path.')
parser.add_option('--configuration-name',
help='The build CONFIGURATION_NAME.')
parser.add_option('--proguard-enabled',
help='"true" if proguard is enabled.')
parser.add_option('--debug-build-proguard-enabled',
help='"true" if proguard is enabled for debug build.')
parser.add_option('--proguard-enabled-input-path',
help=('Path to dex in Release mode when proguard '
'is enabled.'))
parser.add_option('--no-locals', default='0',
help='Exclude locals list from the dex file.')
parser.add_option('--incremental',
action='store_true',
help='Enable incremental builds when possible.')
parser.add_option('--inputs', help='A list of additional input paths.')
parser.add_option('--excluded-paths',
help='A list of paths to exclude from the dex file.')
parser.add_option('--main-dex-list-path',
help='A file containing a list of the classes to '
'include in the main dex.')
parser.add_option('--multidex-configuration-path',
help='A JSON file containing multidex build configuration.')
parser.add_option('--multi-dex', default=False, action='store_true',
help='Generate multiple dex files.')
options, paths = parser.parse_args(args)
required_options = ('android_sdk_tools',)
build_utils.CheckOptions(options, parser, required=required_options)
if options.multidex_configuration_path:
with open(options.multidex_configuration_path) as multidex_config_file:
multidex_config = json.loads(multidex_config_file.read())
options.multi_dex = multidex_config.get('enabled', False)
if options.multi_dex and not options.main_dex_list_path:
logging.warning('multidex cannot be enabled without --main-dex-list-path')
options.multi_dex = False
elif options.main_dex_list_path and not options.multi_dex:
logging.warning('--main-dex-list-path is unused if multidex is not enabled')
if options.inputs:
options.inputs = build_utils.ParseGnList(options.inputs)
_CheckFilePathsEndWithJar(parser, options.inputs)
if options.excluded_paths:
options.excluded_paths = build_utils.ParseGnList(options.excluded_paths)
if options.proguard_enabled_input_path:
_CheckFilePathEndsWithJar(parser, options.proguard_enabled_input_path)
_CheckFilePathsEndWithJar(parser, paths)
return options, paths
def _AllSubpathsAreClassFiles(paths, changes):
for path in paths:
if any(not p.endswith('.class') for p in changes.IterChangedSubpaths(path)):
return False
return True
def _DexWasEmpty(paths, changes):
for path in paths:
if any(p.endswith('.class')
for p in changes.old_metadata.IterSubpaths(path)):
return False
return True
def _IterAllClassFiles(changes):
for path in changes.IterAllPaths():
for subpath in changes.IterAllSubpaths(path):
if subpath.endswith('.class'):
yield path
def _MightHitDxBug(changes):
# We've seen dx --incremental fail for small libraries. It's unlikely a
# speed-up anyways in this case.
num_classes = sum(1 for x in _IterAllClassFiles(changes))
if num_classes < 10:
return True
# We've also been able to consistently produce a failure by adding an empty
# line to the top of the first .java file of a library.
# https://crbug.com/617935
first_file = next(_IterAllClassFiles(changes))
for path in changes.IterChangedPaths():
for subpath in changes.IterChangedSubpaths(path):
if first_file == subpath:
return True
return False
def _RunDx(changes, options, dex_cmd, paths):
with build_utils.TempDir() as classes_temp_dir:
# --multi-dex is incompatible with --incremental.
if options.multi_dex:
dex_cmd.append('--main-dex-list=%s' % options.main_dex_list_path)
else:
# --incremental tells dx to merge all newly dex'ed .class files with
# what that already exist in the output dex file (existing classes are
# replaced).
# Use --incremental when .class files are added or modified, but not when
# any are removed (since it won't know to remove them).
if (options.incremental
and not _MightHitDxBug(changes)
and changes.AddedOrModifiedOnly()):
changed_inputs = set(changes.IterChangedPaths())
changed_paths = [p for p in paths if p in changed_inputs]
if not changed_paths:
return
# When merging in other dex files, there's no easy way to know if
# classes were removed from them.
if (_AllSubpathsAreClassFiles(changed_paths, changes)
and not _DexWasEmpty(changed_paths, changes)):
dex_cmd.append('--incremental')
for path in changed_paths:
changed_subpaths = set(changes.IterChangedSubpaths(path))
# Note: |changed_subpaths| may be empty if nothing changed.
if changed_subpaths:
build_utils.ExtractAll(path, path=classes_temp_dir,
predicate=lambda p: p in changed_subpaths)
paths = [classes_temp_dir]
dex_cmd += paths
build_utils.CheckOutput(dex_cmd, print_stderr=False)
if options.dex_path.endswith('.zip'):
_RemoveUnwantedFilesFromZip(options.dex_path)
def _OnStaleMd5(changes, options, dex_cmd, paths):
_RunDx(changes, options, dex_cmd, paths)
build_utils.WriteJson(
[os.path.relpath(p, options.output_directory) for p in paths],
options.dex_path + '.inputs')
def main(args):
options, paths = _ParseArgs(args)
if ((options.proguard_enabled == 'true'
and options.configuration_name == 'Release')
or (options.debug_build_proguard_enabled == 'true'
and options.configuration_name == 'Debug')):
paths = [options.proguard_enabled_input_path]
if options.inputs:
paths += options.inputs
if options.excluded_paths:
# Excluded paths are relative to the output directory.
exclude_paths = options.excluded_paths
paths = [p for p in paths if not
os.path.relpath(p, options.output_directory) in exclude_paths]
input_paths = list(paths)
dx_binary = os.path.join(options.android_sdk_tools, 'dx')
# See http://crbug.com/272064 for context on --force-jumbo.
# See https://github.com/android/platform_dalvik/commit/dd140a22d for
# --num-threads.
# See http://crbug.com/658782 for why -JXmx2G was added.
dex_cmd = [dx_binary, '-JXmx2G', '--num-threads=8', '--dex', '--force-jumbo',
'--output', options.dex_path]
if options.no_locals != '0':
dex_cmd.append('--no-locals')
if options.multi_dex:
input_paths.append(options.main_dex_list_path)
dex_cmd += [
'--multi-dex',
'--minimal-main-dex',
]
output_paths = [
options.dex_path,
options.dex_path + '.inputs',
]
# An escape hatch to be able to check if incremental dexing is causing
# problems.
force = int(os.environ.get('DISABLE_INCREMENTAL_DX', 0))
build_utils.CallAndWriteDepfileIfStale(
lambda changes: _OnStaleMd5(changes, options, dex_cmd, paths),
options,
input_paths=input_paths,
input_strings=dex_cmd,
output_paths=output_paths,
force=force,
pass_changes=True)
if __name__ == '__main__':
sys.exit(main(sys.argv[1:]))
| [((30, 7, 30, 37), 'zipfile.ZipFile', 'zipfile.ZipFile', ({(30, 23, 30, 31): 'dex_path', (30, 33, 30, 36): '"""r"""'}, {}), "(dex_path, 'r')", False, 'import zipfile\n'), ((32, 7, 32, 63), 'zipfile.ZipFile', 'zipfile.ZipFile', ({(32, 23, 32, 35): 'tmp_dex_path', (32, 37, 32, 40): '"""w"""', (32, 42, 32, 62): 'zipfile.ZIP_DEFLATED'}, {}), "(tmp_dex_path, 'w', zipfile.ZIP_DEFLATED)", False, 'import zipfile\n'), ((36, 2, 36, 21), 'os.remove', 'os.remove', ({(36, 12, 36, 20): 'dex_path'}, {}), '(dex_path)', False, 'import os\n'), ((37, 2, 37, 35), 'os.rename', 'os.rename', ({(37, 12, 37, 24): 'tmp_dex_path', (37, 26, 37, 34): 'dex_path'}, {}), '(tmp_dex_path, dex_path)', False, 'import os\n'), ((41, 9, 41, 41), 'util.build_utils.ExpandFileArgs', 'build_utils.ExpandFileArgs', ({(41, 36, 41, 40): 'args'}, {}), '(args)', False, 'from util import build_utils\n'), ((43, 11, 43, 34), 'optparse.OptionParser', 'optparse.OptionParser', ({}, {}), '()', False, 'import optparse\n'), ((44, 2, 44, 38), 'util.build_utils.AddDepfileOption', 'build_utils.AddDepfileOption', ({(44, 31, 44, 37): 'parser'}, {}), '(parser)', False, 'from util import build_utils\n'), ((80, 2, 80, 70), 'util.build_utils.CheckOptions', 'build_utils.CheckOptions', (), '', False, 'from util import build_utils\n'), ((210, 14, 210, 59), 'os.path.join', 'os.path.join', ({(210, 27, 210, 52): 'options.android_sdk_tools', (210, 54, 210, 58): '"""dx"""'}, {}), "(options.android_sdk_tools, 'dx')", False, 'import os\n'), ((88, 4, 88, 78), 'logging.warning', 'logging.warning', ({(88, 20, 88, 77): '"""multidex cannot be enabled without --main-dex-list-path"""'}, {}), "('multidex cannot be enabled without --main-dex-list-path')", False, 'import logging\n'), ((94, 21, 94, 60), 'util.build_utils.ParseGnList', 'build_utils.ParseGnList', ({(94, 45, 94, 59): 'options.inputs'}, {}), '(options.inputs)', False, 'from util import build_utils\n'), ((97, 29, 97, 76), 'util.build_utils.ParseGnList', 'build_utils.ParseGnList', ({(97, 53, 97, 75): 'options.excluded_paths'}, {}), '(options.excluded_paths)', False, 'from util import build_utils\n'), ((147, 7, 147, 28), 'util.build_utils.TempDir', 'build_utils.TempDir', ({}, {}), '()', False, 'from util import build_utils\n'), ((178, 4, 178, 56), 'util.build_utils.CheckOutput', 'build_utils.CheckOutput', (), '', False, 'from util import build_utils\n'), ((234, 14, 234, 57), 'os.environ.get', 'os.environ.get', ({(234, 29, 234, 53): '"""DISABLE_INCREMENTAL_DX"""', (234, 55, 234, 56): '0'}, {}), "('DISABLE_INCREMENTAL_DX', 0)", False, 'import os\n'), ((49, 28, 49, 39), 'os.getcwd', 'os.getcwd', ({}, {}), '()', False, 'import os\n'), ((91, 4, 91, 80), 'logging.warning', 'logging.warning', ({(91, 20, 91, 79): '"""--main-dex-list-path is unused if multidex is not enabled"""'}, {}), "('--main-dex-list-path is unused if multidex is not enabled')", False, 'import logging\n'), ((187, 7, 187, 51), 'os.path.relpath', 'os.path.relpath', ({(187, 23, 187, 24): 'p', (187, 26, 187, 50): 'options.output_directory'}, {}), '(p, options.output_directory)', False, 'import os\n'), ((206, 13, 206, 57), 'os.path.relpath', 'os.path.relpath', ({(206, 29, 206, 30): 'p', (206, 32, 206, 56): 'options.output_directory'}, {}), '(p, options.output_directory)', False, 'import os\n'), ((173, 14, 174, 79), 'util.build_utils.ExtractAll', 'build_utils.ExtractAll', (), '', False, 'from util import build_utils\n')] |
Edwardhgj/meiduo | apps/views.py | 38796f5caf54676eb5620f50ade5474ee8700ad8 | from django.shortcuts import render
from django.http import HttpResponse
from django.contrib.auth.hashers import check_password, make_password
from django.views import View
from utils.response_code import RET, error_map
from rest_framework.views import APIView
from rest_framework.response import Response
from apps.serializers import *
from datetime import datetime
# Create your views here.
# 展示登陆页
def login(request):
return render(request, 'admin/login.html')
# 提交登陆
import json
class SubmitLogin(View):
def post(self, request): #反射
mes = {}
name = request.POST.get('name')
passwd = request.POST.get('passwd')
# print(name,passwd)
if not all([name, passwd]):
mes['code'] = RET.DATAERR
mes['message'] = error_map[RET.DATAERR]
else:
# 查询name
admin = Sadmin.objects.filter(username=name).first()
print(admin.username)
if admin:
# 比较密码
if check_password(passwd, admin.password):
# 登陆成功
request.session['admin_id'] = admin.id
mes['code'] = RET.OK
mes['message'] = error_map[RET.OK]
else:
mes['code'] = RET.PWDERR
mes['message'] = error_map[RET.PWDERR]
else:
mes['code'] = RET.USERERR
mes['message'] = error_map[RET.USERERR]
print('sdfsdfssdf')
return HttpResponse(json.dumps(mes))
# 注册
def reg(request):
password = make_password('123')
admin = Sadmin(username='admin', password=password, is_admin=True)
admin.save()
return HttpResponse('ok')
# 展示首页
def index(request):
admin_id = request.session.get('admin_id')
if admin_id:
admin = Sadmin.objects.get(id=admin_id)
return render(request, 'admin/index.html', locals())
# 展示分类页面
def showCate(request):
return render(request, "admin/cate_list.html")
# 展示新闻页面
def showNews(request):
return render(request, "admin/news_list.html")
#展示焦点图页面
def bannersCate(request):
return render(request, "admin/point_list.html")
#展示标签页面
def tagCate(request):
return render(request, "admin/tag_list.html")
#展示商品页面
def goodsCate(request):
return render(request, "admin/goods_list.html")
#展示商品页面
def newsCate(request):
return render(request, "admin/news_list.html")
#展示焦点图页面
def bannersCate(request):
return render(request, "admin/point_list.html")
# 分类列表
class CateList(APIView):
def get(self, request):
cate = Cate.objects.all()
c = CateModelSerializer(cate, many=True)
mes = {}
mes['code'] = RET.OK
mes['cateList'] = c.data
return Response(mes)
#标签列表
class TagList(APIView):
def get(self, request):
tags = Tags.objects.all()
c = TagModelSerializer(tags, many=True)
mes = {}
mes['code'] = RET.OK
mes['tagList'] = c.data
return Response(mes)
# 商品列表
class GoodsList(APIView):
def get(self, request):
goods = Goods.objects.all()
g = GoodsModelSerializer(goods, many=True)
mes = {}
mes['code'] = RET.OK
mes['goodsList'] = g.data
return Response(mes)
#新闻列表
class NewsList(APIView):
def get(self, request):
news = News.objects.all()
n=NewsModelSerializer(news,many=True)
mes = {}
mes['code'] = RET.OK
mes['newsList'] = n.data
return Response(mes)
#焦点图列表
class BannersList(APIView):
def get(self, request):
banners = Banners.objects.all()
n=BannersModelSerializer(banners,many=True)
mes = {}
mes['code'] = RET.OK
mes['bannersList'] = n.data
return Response(mes)
# 添加分类页面
def addCate(request):
# 获取一级分类
cate = Cate.objects.filter(pid=0).all()
id=request.GET.get('id')
try:
#修改
one_cate=Cate.objects.get(id=id)
print(one_cate)
except:
id=""
return render(request, "admin/add_cate.html", locals())
# 添加标签页面
def addTag(request):
# print('sdf')
cate_list = Cate.objects.all()
id=request.GET.get('id')
try:
#修改
one_tag=Tags.objects.get(id=id)
except:
id=""
return render(request, "admin/add_tag.html", locals())
# 添加商品页面
def addGoods(request):
# print('ceshi')
# 获取所有商品
goods = Goods.objects.all()
cates = Cate.objects.all()
tag_list=Tags.objects.all()
id=request.GET.get('id')
print(id)
try:
one_goods=Goods.objects.get(id=id)
# print(one_goods)
except:
id=""
return render(request, "admin/add_goods.html", locals())
# 添加商品页面
def addNews(request):
# print('ceshi')
# 获取所有商品
news = News.objects.all()
#修改时需要传id
id=request.GET.get('id')
print(id)
try:
one_news=News.objects.get(id=id)
# print(one_goods)
except:
id=""
return render(request, "admin/add_news.html", locals())
# 添加焦点图页面
def addBanners(request):
# print('ceshi')
# 获取所有商品
banners = Banners.objects.all()
#修改时需要传id
id=request.GET.get('id')
print(id)
try:
one_banner=Banners.objects.get(id=id)
# print(one_goods)
except:
id=""
return render(request, "admin/add_banners.html", locals())
from day01.settings import UPLOADFILES
import os
# 上传图片方法
def upload_img(img):
if img:
f = open(os.path.join(UPLOADFILES, '', img.name),'wb')
for chunk in img.chunks():
f.write(chunk)
f.close()
img=datetime.now().strftime("%Y-%m-%d-%H-%M-%S")+img.name
return 'http://127.0.0.1:8000/static/upload/'+img
return ' '
#富文本上传图片
def addnews_upload(request):
files = request.FILES.get('file')
path = upload_img(files)
mes = {
'path': path,
'error': False
}
return HttpResponse(json.dumps(mes))
# 增加分类接口
class SubmitAddCate(APIView):
def post(self, request):
content = request.data
print(content)
# 上传图片
img = request.FILES.get('img')
path=upload_img(img)
content['picture']=path
try:
pid=int(content['pid'])
except:
pid=0
# 通过pic构造top_id,type
if pid == 0:
type = 1
top_id = 0
else:
cate = Cate.objects.get(id=pid)
type = cate.type + 1
if cate.top_id==0:
top_id = cate.id
else:
top_id = cate.top_id
print(top_id,pid,type)
content['type'] = type
content['top_id'] = top_id
try:
id=int(content['id'])
except:
id=0
if id>0:
cc=Cate.objects.get(id=id)
c=CateSerializer(cc,data=content)
#修改
else:
c = CateSerializer(data=content)
mes={}
if c.is_valid():
try:
c.save()
mes['code'] = RET.OK
except:
mes['code'] = RET.DATAERR
else:
print(c.errors)
mes['code'] = RET.DATAERR
return Response(mes)
#删除分类
def deleteCate(request):
id=request.GET.get('id')
Cate.objects.get(id=id).delete()
return render(request, "admin/cate_list.html")
# 增加标签接口
class SubmitAddTag(APIView):
def post(self, request):
content = request.data
print(content)
try:
id = int(content['id']) # 取出id
print(id)
print('di 到这了')
except:
id = 0
if id > 0:
dd = Tags.objects.get(id=id)
d = TagSerializer(dd, data=content)
# 修改
else:
d = TagSerializer(data=content)
mes = {}
if d.is_valid():
try:
d.save()
mes['code'] = RET.OK
except:
mes['code'] = RET.DATAERR
else:
mes['code'] = RET.DATAERR
return Response(mes)
#删除标签
def deleteTag(request):
id=request.GET.get('id')
Cate.objects.get(id=id).delete()
return render(request, "admin/tag_list.html")
# 增加商品接口
class SubmitAddGoods(APIView):
def post(self, request):
# print('eerw')
content = request.data
print(content)
print(content['id'])
print(content['cid_id'])
# 上传图片
img = request.FILES.get('img')
path=upload_img(img)
content['picture']=path
one_cate=Cate.objects.get(id=int(content['cid_id']))
print(one_cate)
content['top_id'] = one_cate.top_id
try:
print('测试代码')
id=int(content['id'])
print(id)
except:
id=0
if id>0:
# 修改商品
instance = Goods.objects.get(id=id)
c = GoodsSerializer(instance, data=content)
else:
c = GoodsSerializer(data=content)
mes={}
if c.is_valid():
c.save()
mes['code'] = RET.OK
else:
print(c.errors)
mes['code'] = RET.DATAERR
return Response(mes)
#删除商品
def deleteGoods(request):
id=request.GET.get('id')
Goods.objects.get(id=id).delete()
return render(request, "admin/goods_list.html")
#添加新闻接口
class SubmitAddNews(APIView):
def post(self,request):
content=request.data
print(content)
try:
id = int(content['id']) # 取出id
except:
id = 0
if id > 0:
print(id)
nn = News.objects.get(id=id)
d = NewsSerializer(nn, data=content)
# 修改
else:
d = NewsSerializer(data=content)
mes = {}
if d.is_valid():
try:
d.save()
mes['code'] = RET.OK
except:
mes['code'] = RET.DATAERR
else:
mes['code'] = RET.DATAERR
return Response(mes)
#删除新闻
def deleteNews(request):
id=request.GET.get('id')
News.objects.get(id=id).delete()
return render(request,"admin/news_list.html")
#删除焦点图
def deleteBanners(request):
id=request.GET.get('id')
Banners.objects.get(id=id).delete()
return render(request,"admin/point_list.html")
#添加焦点图接口
class SubmitAddBanner(APIView):
def post(self,request):
content=request.data
print(content)
try:
id = int(content['id']) # 取出id
except:
id = 0
if id > 0:
print(id)
nn = Banners.objects.get(id=id)
d = BannersSerializer(nn, data=content)
# 修改
else:
d = BannersSerializer(data=content)
mes = {}
if d.is_valid():
try:
d.save()
mes['code'] = RET.OK
except:
mes['code'] = RET.DATAERR
else:
mes['code'] = RET.DATAERR
return Response(mes)
def user_count(request):
return render(request,'admin/user_count.html')
| [((17, 11, 17, 46), 'django.shortcuts.render', 'render', ({(17, 18, 17, 25): 'request', (17, 27, 17, 45): '"""admin/login.html"""'}, {}), "(request, 'admin/login.html')", False, 'from django.shortcuts import render\n'), ((58, 15, 58, 35), 'django.contrib.auth.hashers.make_password', 'make_password', ({(58, 29, 58, 34): '"""123"""'}, {}), "('123')", False, 'from django.contrib.auth.hashers import check_password, make_password\n'), ((61, 11, 61, 29), 'django.http.HttpResponse', 'HttpResponse', ({(61, 24, 61, 28): '"""ok"""'}, {}), "('ok')", False, 'from django.http import HttpResponse\n'), ((74, 11, 74, 50), 'django.shortcuts.render', 'render', ({(74, 18, 74, 25): 'request', (74, 27, 74, 49): '"""admin/cate_list.html"""'}, {}), "(request, 'admin/cate_list.html')", False, 'from django.shortcuts import render\n'), ((79, 11, 79, 50), 'django.shortcuts.render', 'render', ({(79, 18, 79, 25): 'request', (79, 27, 79, 49): '"""admin/news_list.html"""'}, {}), "(request, 'admin/news_list.html')", False, 'from django.shortcuts import render\n'), ((84, 11, 84, 51), 'django.shortcuts.render', 'render', ({(84, 18, 84, 25): 'request', (84, 27, 84, 50): '"""admin/point_list.html"""'}, {}), "(request, 'admin/point_list.html')", False, 'from django.shortcuts import render\n'), ((88, 11, 88, 49), 'django.shortcuts.render', 'render', ({(88, 18, 88, 25): 'request', (88, 27, 88, 48): '"""admin/tag_list.html"""'}, {}), "(request, 'admin/tag_list.html')", False, 'from django.shortcuts import render\n'), ((92, 11, 92, 51), 'django.shortcuts.render', 'render', ({(92, 18, 92, 25): 'request', (92, 27, 92, 50): '"""admin/goods_list.html"""'}, {}), "(request, 'admin/goods_list.html')", False, 'from django.shortcuts import render\n'), ((96, 11, 96, 50), 'django.shortcuts.render', 'render', ({(96, 18, 96, 25): 'request', (96, 27, 96, 49): '"""admin/news_list.html"""'}, {}), "(request, 'admin/news_list.html')", False, 'from django.shortcuts import render\n'), ((100, 11, 100, 51), 'django.shortcuts.render', 'render', ({(100, 18, 100, 25): 'request', (100, 27, 100, 50): '"""admin/point_list.html"""'}, {}), "(request, 'admin/point_list.html')", False, 'from django.shortcuts import render\n'), ((328, 11, 328, 50), 'django.shortcuts.render', 'render', ({(328, 18, 328, 25): 'request', (328, 27, 328, 49): '"""admin/cate_list.html"""'}, {}), "(request, 'admin/cate_list.html')", False, 'from django.shortcuts import render\n'), ((379, 11, 379, 49), 'django.shortcuts.render', 'render', ({(379, 18, 379, 25): 'request', (379, 27, 379, 48): '"""admin/tag_list.html"""'}, {}), "(request, 'admin/tag_list.html')", False, 'from django.shortcuts import render\n'), ((428, 11, 428, 51), 'django.shortcuts.render', 'render', ({(428, 18, 428, 25): 'request', (428, 27, 428, 50): '"""admin/goods_list.html"""'}, {}), "(request, 'admin/goods_list.html')", False, 'from django.shortcuts import render\n'), ((469, 11, 469, 49), 'django.shortcuts.render', 'render', ({(469, 18, 469, 25): 'request', (469, 26, 469, 48): '"""admin/news_list.html"""'}, {}), "(request, 'admin/news_list.html')", False, 'from django.shortcuts import render\n'), ((476, 11, 476, 50), 'django.shortcuts.render', 'render', ({(476, 18, 476, 25): 'request', (476, 26, 476, 49): '"""admin/point_list.html"""'}, {}), "(request, 'admin/point_list.html')", False, 'from django.shortcuts import render\n'), ((520, 11, 520, 50), 'django.shortcuts.render', 'render', ({(520, 18, 520, 25): 'request', (520, 26, 520, 49): '"""admin/user_count.html"""'}, {}), "(request, 'admin/user_count.html')", False, 'from django.shortcuts import render\n'), ((110, 15, 110, 28), 'rest_framework.response.Response', 'Response', ({(110, 24, 110, 27): 'mes'}, {}), '(mes)', False, 'from rest_framework.response import Response\n'), ((122, 15, 122, 28), 'rest_framework.response.Response', 'Response', ({(122, 24, 122, 27): 'mes'}, {}), '(mes)', False, 'from rest_framework.response import Response\n'), ((134, 15, 134, 28), 'rest_framework.response.Response', 'Response', ({(134, 24, 134, 27): 'mes'}, {}), '(mes)', False, 'from rest_framework.response import Response\n'), ((145, 15, 145, 28), 'rest_framework.response.Response', 'Response', ({(145, 24, 145, 27): 'mes'}, {}), '(mes)', False, 'from rest_framework.response import Response\n'), ((157, 15, 157, 28), 'rest_framework.response.Response', 'Response', ({(157, 24, 157, 27): 'mes'}, {}), '(mes)', False, 'from rest_framework.response import Response\n'), ((267, 24, 267, 39), 'json.dumps', 'json.dumps', ({(267, 35, 267, 38): 'mes'}, {}), '(mes)', False, 'import json\n'), ((322, 15, 322, 28), 'rest_framework.response.Response', 'Response', ({(322, 24, 322, 27): 'mes'}, {}), '(mes)', False, 'from rest_framework.response import Response\n'), ((370, 15, 370, 28), 'rest_framework.response.Response', 'Response', ({(370, 24, 370, 27): 'mes'}, {}), '(mes)', False, 'from rest_framework.response import Response\n'), ((422, 15, 422, 28), 'rest_framework.response.Response', 'Response', ({(422, 24, 422, 27): 'mes'}, {}), '(mes)', False, 'from rest_framework.response import Response\n'), ((463, 15, 463, 28), 'rest_framework.response.Response', 'Response', ({(463, 24, 463, 27): 'mes'}, {}), '(mes)', False, 'from rest_framework.response import Response\n'), ((514, 15, 514, 28), 'rest_framework.response.Response', 'Response', ({(514, 24, 514, 27): 'mes'}, {}), '(mes)', False, 'from rest_framework.response import Response\n'), ((53, 28, 53, 43), 'json.dumps', 'json.dumps', ({(53, 39, 53, 42): 'mes'}, {}), '(mes)', False, 'import json\n'), ((250, 17, 250, 56), 'os.path.join', 'os.path.join', ({(250, 30, 250, 41): 'UPLOADFILES', (250, 43, 250, 45): '""""""', (250, 47, 250, 55): 'img.name'}, {}), "(UPLOADFILES, '', img.name)", False, 'import os\n'), ((41, 19, 41, 57), 'django.contrib.auth.hashers.check_password', 'check_password', ({(41, 34, 41, 40): 'passwd', (41, 42, 41, 56): 'admin.password'}, {}), '(passwd, admin.password)', False, 'from django.contrib.auth.hashers import check_password, make_password\n'), ((254, 12, 254, 26), 'datetime.datetime.now', 'datetime.now', ({}, {}), '()', False, 'from datetime import datetime\n')] |
realtwister/LearnedEvolution | learnedevolution/targets/covariance/amalgam_covariance.py | 2ec49b50a49acae9693cfb05ac114dfbcc4aa337 | import numpy as np;
from .covariance_target import CovarianceTarget;
class AMaLGaMCovariance(CovarianceTarget):
_API=2.
def __init__(self,
theta_SDR = 1.,
eta_DEC = 0.9,
alpha_Sigma = [-1.1,1.2,1.6],
NIS_MAX = 25,
tau = 0.35,
epsilon = 1e-30,
condition_number_epsilon = 1e6):
self.epsilon = epsilon;
self.theta_SDR = theta_SDR;
self.eta_DEC = eta_DEC;
self.eta_INC = 1./eta_DEC;
self.NIS_MAX = NIS_MAX;
self.alpha_Sigma = alpha_Sigma;
self.tau = tau;
self.condition_number_epsilon = condition_number_epsilon;
def _reset(self, initial_mean, initial_covariance):
self.mean = initial_mean;
self.old_mean = initial_mean;
self.covariance = initial_covariance;
self.d = len(initial_mean);
self.Sigma = initial_covariance;
self.c_multiplier = 1.;
self.NIS = 0;
self.t = 0;
self.best_f = -float('inf');
def _update_mean(self, mean):
self.old_mean = self.mean;
self.mean = mean;
def _calculate(self, population):
self.update_matrix(population);
self.update_multiplier(population);
self.t += 1;
self.best_f = max(self.best_f, np.max(population.fitness));
new_covariance = self.Sigma*self.c_multiplier;
u,s,_ = np.linalg.svd(new_covariance);
s_max = np.max(s)
s_max = np.clip(s_max, self.epsilon*self.condition_number_epsilon, 1e3);
s = np.clip(s, s_max/self.condition_number_epsilon, s_max);
new_covariance = u*[email protected]
self.covariance = new_covariance
return self.covariance;
def update_matrix(self, population):
F = population.fitness;
sel_idx = F.argsort()[-np.ceil(self.tau*len(population)).astype(int):][::-1]
alpha = self.alpha_Sigma;
eta_Sigma = 1.-np.exp(alpha[0]*len(sel_idx)**alpha[1]/self.d**alpha[2]);
current_update = np.zeros((self.d,self.d));
selection = population.population[sel_idx];
for individual in selection:
delta = individual-self.old_mean;
current_update += np.outer(delta,delta)
current_update /= (selection.shape[0]);
self.Sigma *= (1-eta_Sigma);
self.Sigma += eta_Sigma*current_update;
# We need to ensure the condition number is OK to avoid singular matrix.
u,s,_ = np.linalg.svd(self.Sigma);
s_max = np.max(s)
s_max = np.clip(s_max, self.epsilon*self.condition_number_epsilon, None);
s = np.clip(s, s_max/self.condition_number_epsilon, s_max);
self.Sigma = u*[email protected]
def update_multiplier(self, population):
if np.any(population.fitness>self.best_f):
self.NIS = 0;
self.c_multiplier = max(1., self.c_multiplier);
self.SDR(population);
else:
if self.c_multiplier <= 1:
self.NIS += 1;
if self.c_multiplier > 1 or self.NIS >= self.NIS_MAX:
self.c_multiplier *= self.eta_DEC;
if self.c_multiplier < 1 and self.NIS < self.NIS_MAX:
self.c_multiplier = 1;
def SDR(self, population):
x_avg = np.mean(population.population[population.fitness>self.best_f], axis=0);
delta = np.abs(self.mean-x_avg);
variances = np.abs(np.diag(self.covariance));
if np.any(delta/np.sqrt(variances)>self.theta_SDR):
self.c_multiplier *= self.eta_INC;
def _calculate_deterministic(self,population):
return self._calculate(population);
def _terminating(self, population):
pass;
@classmethod
def _get_kwargs(cls, config, key = ""):
cls._config_required(
'theta_SDR',
'eta_DEC',
'alpha_Sigma',
'NIS_MAX',
'tau',
'epsilon',
'condition_number_epsilon'
)
cls._config_defaults(
theta_SDR = 1.,
eta_DEC = 0.9,
alpha_Sigma = [-1.1,1.2,1.6],
NIS_MAX = 25,
tau = 0.35,
epsilon = 1e-30,
condition_number_epsilon = 1e6
)
return super()._get_kwargs(config, key = key);
| [((53, 16, 53, 45), 'numpy.linalg.svd', 'np.linalg.svd', ({(53, 30, 53, 44): 'new_covariance'}, {}), '(new_covariance)', True, 'import numpy as np\n'), ((54, 17, 54, 26), 'numpy.max', 'np.max', ({(54, 24, 54, 25): 's'}, {}), '(s)', True, 'import numpy as np\n'), ((55, 17, 55, 80), 'numpy.clip', 'np.clip', ({(55, 25, 55, 30): 's_max', (55, 32, 55, 74): 'self.epsilon * self.condition_number_epsilon', (55, 76, 55, 79): '1000.0'}, {}), '(s_max, self.epsilon * self.condition_number_epsilon, 1000.0)', True, 'import numpy as np\n'), ((56, 12, 56, 66), 'numpy.clip', 'np.clip', ({(56, 20, 56, 21): 's', (56, 23, 56, 58): 's_max / self.condition_number_epsilon', (56, 60, 56, 65): 's_max'}, {}), '(s, s_max / self.condition_number_epsilon, s_max)', True, 'import numpy as np\n'), ((70, 25, 70, 50), 'numpy.zeros', 'np.zeros', ({(70, 34, 70, 49): '(self.d, self.d)'}, {}), '((self.d, self.d))', True, 'import numpy as np\n'), ((81, 16, 81, 41), 'numpy.linalg.svd', 'np.linalg.svd', ({(81, 30, 81, 40): 'self.Sigma'}, {}), '(self.Sigma)', True, 'import numpy as np\n'), ((82, 17, 82, 26), 'numpy.max', 'np.max', ({(82, 24, 82, 25): 's'}, {}), '(s)', True, 'import numpy as np\n'), ((83, 17, 83, 81), 'numpy.clip', 'np.clip', ({(83, 25, 83, 30): 's_max', (83, 32, 83, 74): 'self.epsilon * self.condition_number_epsilon', (83, 76, 83, 80): 'None'}, {}), '(s_max, self.epsilon * self.condition_number_epsilon, None)', True, 'import numpy as np\n'), ((84, 12, 84, 66), 'numpy.clip', 'np.clip', ({(84, 20, 84, 21): 's', (84, 23, 84, 58): 's_max / self.condition_number_epsilon', (84, 60, 84, 65): 's_max'}, {}), '(s, s_max / self.condition_number_epsilon, s_max)', True, 'import numpy as np\n'), ((88, 11, 88, 49), 'numpy.any', 'np.any', ({(88, 18, 88, 48): '(population.fitness > self.best_f)'}, {}), '(population.fitness > self.best_f)', True, 'import numpy as np\n'), ((102, 16, 102, 86), 'numpy.mean', 'np.mean', (), '', True, 'import numpy as np\n'), ((103, 16, 103, 39), 'numpy.abs', 'np.abs', ({(103, 23, 103, 38): 'self.mean - x_avg'}, {}), '(self.mean - x_avg)', True, 'import numpy as np\n'), ((49, 39, 49, 65), 'numpy.max', 'np.max', ({(49, 46, 49, 64): 'population.fitness'}, {}), '(population.fitness)', True, 'import numpy as np\n'), ((74, 30, 74, 51), 'numpy.outer', 'np.outer', ({(74, 39, 74, 44): 'delta', (74, 45, 74, 50): 'delta'}, {}), '(delta, delta)', True, 'import numpy as np\n'), ((104, 27, 104, 51), 'numpy.diag', 'np.diag', ({(104, 35, 104, 50): 'self.covariance'}, {}), '(self.covariance)', True, 'import numpy as np\n'), ((105, 24, 105, 42), 'numpy.sqrt', 'np.sqrt', ({(105, 32, 105, 41): 'variances'}, {}), '(variances)', True, 'import numpy as np\n')] |
terrorizer1980/fs-admin | binding.gyp | e21216161c56def4ca76a3ef4e71844e2ba26074 | {
'target_defaults': {
'win_delay_load_hook': 'false',
'conditions': [
['OS=="win"', {
'msvs_disabled_warnings': [
4530, # C++ exception handler used, but unwind semantics are not enabled
4506, # no definition for inline function
],
}],
],
},
'targets': [
{
'target_name': 'fs_admin',
'defines': [
"NAPI_VERSION=<(napi_build_version)",
],
'cflags!': [ '-fno-exceptions' ],
'cflags_cc!': [ '-fno-exceptions' ],
'xcode_settings': { 'GCC_ENABLE_CPP_EXCEPTIONS': 'YES',
'CLANG_CXX_LIBRARY': 'libc++',
'MACOSX_DEPLOYMENT_TARGET': '10.7',
},
'msvs_settings': {
'VCCLCompilerTool': { 'ExceptionHandling': 1 },
},
'sources': [
'src/main.cc',
],
'include_dirs': [
'<!(node -p "require(\'node-addon-api\').include_dir")',
],
'conditions': [
['OS=="win"', {
'sources': [
'src/fs-admin-win.cc',
],
'libraries': [
'-lole32.lib',
'-lshell32.lib',
],
}],
['OS=="mac"', {
'sources': [
'src/fs-admin-darwin.cc',
],
'libraries': [
'$(SDKROOT)/System/Library/Frameworks/Security.framework',
],
}],
['OS=="linux"', {
'sources': [
'src/fs-admin-linux.cc',
],
}],
],
}
]
}
| [] |
emoritzx/botw-tracker | src/botwtracker/settings.py | 9c096e62825f2ba2f0f66167b646eaf5a1b5b50a | """Django settings for botwtracker project.
Copyright (c) 2017, Evan Moritz.
botw-tracker is an open source software project released under the MIT License.
See the accompanying LICENSE file for terms.
"""
import os
from .config_local import *
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
DATA_DIR = os.path.join(BASE_DIR, '..', 'data')
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'quests.apps.QuestsConfig',
'user.apps.UserConfig',
]
if USE_SIGNUP:
INSTALLED_APPS.append('signup.apps.SignupConfig')
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'botwtracker.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [
os.path.join(BASE_DIR, 'templates')
],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'botwtracker.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.10/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(DATA_DIR, 'sqlite3.db'),
}
}
# Password validation
# https://docs.djangoproject.com/en/1.10/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.10/topics/i18n/
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.10/howto/static-files/
STATIC_URL = '/static/'
STATICFILES_DIRS = [
os.path.join(BASE_DIR, "..", "static")
]
| [((14, 11, 14, 47), 'os.path.join', 'os.path.join', ({(14, 24, 14, 32): 'BASE_DIR', (14, 34, 14, 38): '""".."""', (14, 40, 14, 46): '"""data"""'}, {}), "(BASE_DIR, '..', 'data')", False, 'import os\n'), ((110, 4, 110, 42), 'os.path.join', 'os.path.join', ({(110, 17, 110, 25): 'BASE_DIR', (110, 27, 110, 31): '""".."""', (110, 33, 110, 41): '"""static"""'}, {}), "(BASE_DIR, '..', 'static')", False, 'import os\n'), ((13, 43, 13, 68), 'os.path.abspath', 'os.path.abspath', ({(13, 59, 13, 67): '__file__'}, {}), '(__file__)', False, 'import os\n'), ((71, 16, 71, 52), 'os.path.join', 'os.path.join', ({(71, 29, 71, 37): 'DATA_DIR', (71, 39, 71, 51): '"""sqlite3.db"""'}, {}), "(DATA_DIR, 'sqlite3.db')", False, 'import os\n'), ((48, 12, 48, 47), 'os.path.join', 'os.path.join', ({(48, 25, 48, 33): 'BASE_DIR', (48, 35, 48, 46): '"""templates"""'}, {}), "(BASE_DIR, 'templates')", False, 'import os\n')] |
Geo-Gabriel/eccomerce_nestle_mongodb | app/domains/users/views.py | 97bf5dbdc7bee20a9ca2f7cad98afc6e8f11bd3e | from flask import Blueprint, request, jsonify
from app.domains.users.actions import get_all_users, insert_user, get_user_by_id, update_user, delete_user
app_users = Blueprint('app.users', __name__)
@app_users.route('/users', methods=['GET'])
def get_users():
return jsonify([user.serialize() for user in get_all_users()]), 200
@app_users.route('/users/<id>', methods=["GET"])
def get_by_id(id: str):
user = get_user_by_id(id_user=id)
return jsonify(user.serialize()), 200
@app_users.route('/users', methods=["POST"])
def post_user():
payload = request.get_json()
user = insert_user(payload)
return jsonify(user.serialize()), 201
@app_users.route('/users/<id>', methods=["PUT"])
def update(id: str):
payload = request.get_json()
user = update_user(id_user=id, data=payload)
return jsonify(user.serialize()), 200
@app_users.route('/users/<id>', methods=["DELETE"])
def delete(id: str):
delete_user(id_user=id)
return jsonify({"message": "user deleted"}), 200
| [((5, 12, 5, 44), 'flask.Blueprint', 'Blueprint', ({(5, 22, 5, 33): '"""app.users"""', (5, 35, 5, 43): '__name__'}, {}), "('app.users', __name__)", False, 'from flask import Blueprint, request, jsonify\n'), ((15, 11, 15, 37), 'app.domains.users.actions.get_user_by_id', 'get_user_by_id', (), '', False, 'from app.domains.users.actions import get_all_users, insert_user, get_user_by_id, update_user, delete_user\n'), ((21, 14, 21, 32), 'flask.request.get_json', 'request.get_json', ({}, {}), '()', False, 'from flask import Blueprint, request, jsonify\n'), ((22, 11, 22, 31), 'app.domains.users.actions.insert_user', 'insert_user', ({(22, 23, 22, 30): 'payload'}, {}), '(payload)', False, 'from app.domains.users.actions import get_all_users, insert_user, get_user_by_id, update_user, delete_user\n'), ((28, 14, 28, 32), 'flask.request.get_json', 'request.get_json', ({}, {}), '()', False, 'from flask import Blueprint, request, jsonify\n'), ((29, 11, 29, 48), 'app.domains.users.actions.update_user', 'update_user', (), '', False, 'from app.domains.users.actions import get_all_users, insert_user, get_user_by_id, update_user, delete_user\n'), ((35, 4, 35, 27), 'app.domains.users.actions.delete_user', 'delete_user', (), '', False, 'from app.domains.users.actions import get_all_users, insert_user, get_user_by_id, update_user, delete_user\n'), ((36, 11, 36, 47), 'flask.jsonify', 'jsonify', ({(36, 19, 36, 46): "{'message': 'user deleted'}"}, {}), "({'message': 'user deleted'})", False, 'from flask import Blueprint, request, jsonify\n'), ((10, 49, 10, 64), 'app.domains.users.actions.get_all_users', 'get_all_users', ({}, {}), '()', False, 'from app.domains.users.actions import get_all_users, insert_user, get_user_by_id, update_user, delete_user\n')] |
PerryXDeng/project_punyslayer | legacy_code/tf_cnn_siamese/model.py | 79529b020ca56a5473dbb85ac7155bc03dc5023a | import legacy_code.tf_cnn_siamese.configurations as conf
import tensorflow as tf
import numpy as np
def construct_cnn(x, conv_weights, conv_biases, fc_weights, fc_biases,
dropout = False):
"""
constructs the convolution graph for one image
:param x: input node
:param conv_weights: convolution weights
:param conv_biases: relu biases for each convolution
:param fc_weights: fully connected weights, only one set should be used here
:param fc_biases: fully connected biases, only one set should be used here
:param dropout: whether to add a dropout layer for the fully connected layer
:return: output node
"""
k = conf.NUM_POOL
for i in range(conf.NUM_CONVS):
x = tf.nn.conv2d(x, conv_weights[i], strides=[1, 1, 1, 1], padding='SAME',
data_format=conf.DATA_FORMAT)
x = tf.nn.relu(tf.nn.bias_add(x, conv_biases[i],
data_format=conf.DATA_FORMAT))
if k > 0:
x = tf.nn.max_pool(x, ksize=conf.POOL_KDIM,strides=conf.POOL_KDIM,
padding='VALID', data_format=conf.DATA_FORMAT)
k -= 1
# Reshape the feature map cuboids into vectors for fc layers
features_shape = x.get_shape().as_list()
n = features_shape[0]
m = features_shape[1] * features_shape[2] * features_shape[3]
features = tf.reshape(x, [n, m])
# last fc_weights determine output dimensions
fc = tf.nn.sigmoid(tf.matmul(features, fc_weights[0]) + fc_biases[0])
# for actual training
if dropout:
fc = tf.nn.dropout(fc, conf.DROP_RATE)
return fc
def construct_logits_model(x_1, x_2, conv_weights, conv_biases, fc_weights,
fc_biases, dropout=False):
"""
constructs the logit node before the final sigmoid activation
:param x_1: input image node 1
:param x_2: input image node 2
:param conv_weights: nodes for convolution weights
:param conv_biases: nodes for convolution relu biases
:param fc_weights: nodes for fully connected weights
:param fc_biases: nodes for fully connected biases
:param dropout: whether to include dropout layers
:return: logit node
"""
with tf.name_scope("twin_1"):
twin_1 = construct_cnn(x_1, conv_weights, conv_biases,
fc_weights, fc_biases, dropout)
with tf.name_scope("twin_2"):
twin_2 = construct_cnn(x_2, conv_weights, conv_biases,
fc_weights, fc_biases, dropout)
# logits on squared difference
sq_diff = tf.squared_difference(twin_1, twin_2)
logits = tf.matmul(sq_diff, fc_weights[1]) + fc_biases[1]
return logits
def construct_full_model(x_1, x_2, conv_weights, conv_biases,fc_weights,
fc_biases):
"""
constructs the graph for the neural network without loss node or optimizer
:param x_1: input image node 1
:param x_2: input image node 2
:param conv_weights: nodes for convolution weights
:param conv_biases: nodes for convolution relu biases
:param fc_weights: nodes for fully connected weights
:param fc_biases: nodes for fully connected biases
:return: sigmoid output node
"""
logits = construct_logits_model(x_1, x_2, conv_weights, conv_biases,
fc_weights, fc_biases, dropout=False)
return tf.nn.sigmoid(logits)
def construct_loss_optimizer(x_1, x_2, labels, conv_weights, conv_biases,
fc_weights, fc_biases, dropout=False,
lagrange=False):
"""
constructs the neural network graph with the loss and optimizer node
:param x_1: input image node 1
:param x_2: input image node 2
:param labels: expected output
:param conv_weights: nodes for convolution weights
:param conv_biases: nodes for convolution relu biases
:param fc_weights: nodes for fully connected weights
:param fc_biases: nodes for fully connected biases
:param dropout: whether to use dropout
:param lagrange: whether to apply constraints
:return: the node for the optimizer as well as the loss
"""
logits = construct_logits_model(x_1, x_2, conv_weights, conv_biases,
fc_weights, fc_biases, dropout)
# cross entropy loss on sigmoids of joined output and labels
loss_vec = tf.nn.sigmoid_cross_entropy_with_logits(labels=labels,
logits=logits)
loss = tf.reduce_mean(loss_vec)
if lagrange:
# constraints on sigmoid layers
regularizers = (tf.nn.l2_loss(fc_weights[0]) + tf.nn.l2_loss(fc_biases[0]) +
tf.nn.l2_loss(fc_weights[1]) + tf.nn.l2_loss(fc_biases[1]))
loss += conf.LAMBDA * regularizers
# setting up the optimization
batch = tf.Variable(0, dtype=conf.DTYPE)
# vanilla momentum optimizer
# accumulation = momentum * accumulation + gradient
# every epoch: variable -= learning_rate * accumulation
# batch_total = labels.shape[0]
# learning_rate = tf.train.exponential_decay(
# conf.BASE_LEARNING_RATE,
# batch * conf.BATCH_SIZE, # Current index into the dataset.
# batch_total,
# conf.DECAY_RATE, # Decay rate.
# staircase=True)
# trainer = tf.train.MomentumOptimizer(learning_rate, conf.MOMENTUM)\
# .minimize(loss, global_step=batch)
# adaptive momentum estimation optimizer
# default params: learning_rate=0.001, beta1=0.9, beta2=0.999, epsilon=1e-08
trainer = tf.train.AdamOptimizer().minimize(loss, global_step=batch)
return trainer, loss
def construct_joined_model(twin_1, twin_2, fc_weights, fc_biases):
"""
constructs joined model for two sets of extracted features
:param twin_1: features node extracted from first image
:param twin_2: features node extracted from second image
:param fc_weights: nodes for fully connected weights
:param fc_biases: nodes for fully connected biases
:return: logit node
"""
# logits on squared difference
sq_diff = tf.squared_difference(twin_1, twin_2)
logits = tf.matmul(sq_diff, fc_weights[1]) + fc_biases[1]
return tf.nn.sigmoid(logits)
def initialize_weights():
"""
initializes the variable tensors to be trained in the neural network, decides
network dimensions
:return: nodes for the variables
"""
# twin network convolution and pooling variables
conv_weights = []
conv_biases = []
fc_weights = []
fc_biases = []
for i in range(conf.NUM_CONVS):
if i == 0:
inp = conf.NUM_CHANNELS
else:
inp = conf.NUM_FILTERS[i - 1]
out = conf.NUM_FILTERS[i]
conv_dim = [conf.FILTER_LEN, conf.FILTER_LEN, inp, out]
weight_name = "twin_conv" + str(i + 1) + "_weights"
bias_name = "twin_conv" + str(i + 1) + "_biases"
conv_weights.append(tf.Variable(tf.truncated_normal(conv_dim, stddev=0.1,
seed=conf.SEED, dtype=conf.DTYPE),
name=weight_name))
conv_biases.append(tf.Variable(tf.zeros([out], dtype=conf.DTYPE),
name=bias_name))
# twin network fullly connected variables
inp = conf.FEATURE_MAP_SIZE
out = conf.NUM_FC_NEURONS
fc_weights.append(tf.Variable(tf.truncated_normal([inp, out], stddev=0.1,
seed=conf.SEED, dtype=conf.DTYPE),
name="twin_fc_weights"))
fc_biases.append(tf.Variable(tf.constant(0.1, shape=[out], dtype=conf.DTYPE),
name="twin_fc_biases"))
# joined network fully connected variables
inp = conf.NUM_FC_NEURONS
out = 1
fc_weights.append(tf.Variable(tf.truncated_normal([inp, out], stddev=0.1,
seed=conf.SEED, dtype=conf.DTYPE),
name="joined_fc_weights"))
fc_biases.append(tf.Variable(tf.constant(0.1, shape=[out], dtype=conf.DTYPE),
name="joined_fc_biases"))
return conv_weights, conv_biases, fc_weights, fc_biases
def num_params():
"""
calculates the number of parameters in the model
:return: m, number of parameters
"""
m = 0
for i in range(conf.NUM_CONVS):
if i == 0:
inp = conf.NUM_CHANNELS
else:
inp = conf.NUM_FILTERS[i - 1]
out = conf.NUM_FILTERS[i]
conv_dim = [conf.FILTER_LEN, conf.FILTER_LEN, inp, out]
m += np.prod(conv_dim) + np.prod(out)
inp = conf.FEATURE_MAP_SIZE
out = conf.NUM_FC_NEURONS
m += inp * out + out
inp = conf.NUM_FC_NEURONS
out = 1
m += inp * out + out
return m
if __name__ == "__main__":
print("Number of Parameters: " + str(num_params()))
| [((32, 13, 32, 34), 'tensorflow.reshape', 'tf.reshape', ({(32, 24, 32, 25): 'x', (32, 27, 32, 33): '[n, m]'}, {}), '(x, [n, m])', True, 'import tensorflow as tf\n'), ((61, 12, 61, 49), 'tensorflow.squared_difference', 'tf.squared_difference', ({(61, 34, 61, 40): 'twin_1', (61, 42, 61, 48): 'twin_2'}, {}), '(twin_1, twin_2)', True, 'import tensorflow as tf\n'), ((80, 9, 80, 30), 'tensorflow.nn.sigmoid', 'tf.nn.sigmoid', ({(80, 23, 80, 29): 'logits'}, {}), '(logits)', True, 'import tensorflow as tf\n'), ((102, 13, 103, 67), 'tensorflow.nn.sigmoid_cross_entropy_with_logits', 'tf.nn.sigmoid_cross_entropy_with_logits', (), '', True, 'import tensorflow as tf\n'), ((104, 9, 104, 33), 'tensorflow.reduce_mean', 'tf.reduce_mean', ({(104, 24, 104, 32): 'loss_vec'}, {}), '(loss_vec)', True, 'import tensorflow as tf\n'), ((111, 10, 111, 42), 'tensorflow.Variable', 'tf.Variable', (), '', True, 'import tensorflow as tf\n'), ((142, 12, 142, 49), 'tensorflow.squared_difference', 'tf.squared_difference', ({(142, 34, 142, 40): 'twin_1', (142, 42, 142, 48): 'twin_2'}, {}), '(twin_1, twin_2)', True, 'import tensorflow as tf\n'), ((144, 9, 144, 30), 'tensorflow.nn.sigmoid', 'tf.nn.sigmoid', ({(144, 23, 144, 29): 'logits'}, {}), '(logits)', True, 'import tensorflow as tf\n'), ((20, 8, 21, 50), 'tensorflow.nn.conv2d', 'tf.nn.conv2d', (), '', True, 'import tensorflow as tf\n'), ((37, 9, 37, 42), 'tensorflow.nn.dropout', 'tf.nn.dropout', ({(37, 23, 37, 25): 'fc', (37, 27, 37, 41): 'conf.DROP_RATE'}, {}), '(fc, conf.DROP_RATE)', True, 'import tensorflow as tf\n'), ((54, 7, 54, 30), 'tensorflow.name_scope', 'tf.name_scope', ({(54, 21, 54, 29): '"""twin_1"""'}, {}), "('twin_1')", True, 'import tensorflow as tf\n'), ((57, 7, 57, 30), 'tensorflow.name_scope', 'tf.name_scope', ({(57, 21, 57, 29): '"""twin_2"""'}, {}), "('twin_2')", True, 'import tensorflow as tf\n'), ((62, 11, 62, 44), 'tensorflow.matmul', 'tf.matmul', ({(62, 21, 62, 28): 'sq_diff', (62, 30, 62, 43): 'fc_weights[1]'}, {}), '(sq_diff, fc_weights[1])', True, 'import tensorflow as tf\n'), ((143, 11, 143, 44), 'tensorflow.matmul', 'tf.matmul', ({(143, 21, 143, 28): 'sq_diff', (143, 30, 143, 43): 'fc_weights[1]'}, {}), '(sq_diff, fc_weights[1])', True, 'import tensorflow as tf\n'), ((22, 19, 23, 63), 'tensorflow.nn.bias_add', 'tf.nn.bias_add', (), '', True, 'import tensorflow as tf\n'), ((25, 10, 26, 70), 'tensorflow.nn.max_pool', 'tf.nn.max_pool', (), '', True, 'import tensorflow as tf\n'), ((34, 21, 34, 55), 'tensorflow.matmul', 'tf.matmul', ({(34, 31, 34, 39): 'features', (34, 41, 34, 54): 'fc_weights[0]'}, {}), '(features, fc_weights[0])', True, 'import tensorflow as tf\n'), ((108, 51, 108, 78), 'tensorflow.nn.l2_loss', 'tf.nn.l2_loss', ({(108, 65, 108, 77): 'fc_biases[1]'}, {}), '(fc_biases[1])', True, 'import tensorflow as tf\n'), ((128, 12, 128, 36), 'tensorflow.train.AdamOptimizer', 'tf.train.AdamOptimizer', ({}, {}), '()', True, 'import tensorflow as tf\n'), ((175, 32, 176, 65), 'tensorflow.truncated_normal', 'tf.truncated_normal', (), '', True, 'import tensorflow as tf\n'), ((178, 31, 178, 78), 'tensorflow.constant', 'tf.constant', (), '', True, 'import tensorflow as tf\n'), ((183, 32, 184, 65), 'tensorflow.truncated_normal', 'tf.truncated_normal', (), '', True, 'import tensorflow as tf\n'), ((186, 31, 186, 78), 'tensorflow.constant', 'tf.constant', (), '', True, 'import tensorflow as tf\n'), ((204, 9, 204, 26), 'numpy.prod', 'np.prod', ({(204, 17, 204, 25): 'conv_dim'}, {}), '(conv_dim)', True, 'import numpy as np\n'), ((204, 29, 204, 41), 'numpy.prod', 'np.prod', ({(204, 37, 204, 40): 'out'}, {}), '(out)', True, 'import numpy as np\n'), ((108, 20, 108, 48), 'tensorflow.nn.l2_loss', 'tf.nn.l2_loss', ({(108, 34, 108, 47): 'fc_weights[1]'}, {}), '(fc_weights[1])', True, 'import tensorflow as tf\n'), ((167, 36, 168, 69), 'tensorflow.truncated_normal', 'tf.truncated_normal', (), '', True, 'import tensorflow as tf\n'), ((170, 35, 170, 68), 'tensorflow.zeros', 'tf.zeros', (), '', True, 'import tensorflow as tf\n'), ((107, 20, 107, 48), 'tensorflow.nn.l2_loss', 'tf.nn.l2_loss', ({(107, 34, 107, 47): 'fc_weights[0]'}, {}), '(fc_weights[0])', True, 'import tensorflow as tf\n'), ((107, 51, 107, 78), 'tensorflow.nn.l2_loss', 'tf.nn.l2_loss', ({(107, 65, 107, 77): 'fc_biases[0]'}, {}), '(fc_biases[0])', True, 'import tensorflow as tf\n')] |
FingerCrunch/scrapy | tests/test_utils_log.py | 3225de725720bba246ba8c9845fe4b84bc0c82e7 | import sys
import logging
import unittest
from testfixtures import LogCapture
from twisted.python.failure import Failure
from scrapy.utils.log import (failure_to_exc_info, TopLevelFormatter,
LogCounterHandler, StreamLogger)
from scrapy.utils.test import get_crawler
from scrapy.extensions import telnet
class FailureToExcInfoTest(unittest.TestCase):
def test_failure(self):
try:
0 / 0
except ZeroDivisionError:
exc_info = sys.exc_info()
failure = Failure()
self.assertTupleEqual(exc_info, failure_to_exc_info(failure))
def test_non_failure(self):
self.assertIsNone(failure_to_exc_info('test'))
class TopLevelFormatterTest(unittest.TestCase):
def setUp(self):
self.handler = LogCapture()
self.handler.addFilter(TopLevelFormatter(['test']))
def test_top_level_logger(self):
logger = logging.getLogger('test')
with self.handler as log:
logger.warning('test log msg')
log.check(('test', 'WARNING', 'test log msg'))
def test_children_logger(self):
logger = logging.getLogger('test.test1')
with self.handler as log:
logger.warning('test log msg')
log.check(('test', 'WARNING', 'test log msg'))
def test_overlapping_name_logger(self):
logger = logging.getLogger('test2')
with self.handler as log:
logger.warning('test log msg')
log.check(('test2', 'WARNING', 'test log msg'))
def test_different_name_logger(self):
logger = logging.getLogger('different')
with self.handler as log:
logger.warning('test log msg')
log.check(('different', 'WARNING', 'test log msg'))
class LogCounterHandlerTest(unittest.TestCase):
def setUp(self):
settings = {'LOG_LEVEL': 'WARNING'}
if not telnet.TWISTED_CONCH_AVAILABLE:
# disable it to avoid the extra warning
settings['TELNETCONSOLE_ENABLED'] = False
self.logger = logging.getLogger('test')
self.logger.setLevel(logging.NOTSET)
self.logger.propagate = False
self.crawler = get_crawler(settings_dict=settings)
self.handler = LogCounterHandler(self.crawler)
self.logger.addHandler(self.handler)
def tearDown(self):
self.logger.propagate = True
self.logger.removeHandler(self.handler)
def test_init(self):
self.assertIsNone(self.crawler.stats.get_value('log_count/DEBUG'))
self.assertIsNone(self.crawler.stats.get_value('log_count/INFO'))
self.assertIsNone(self.crawler.stats.get_value('log_count/WARNING'))
self.assertIsNone(self.crawler.stats.get_value('log_count/ERROR'))
self.assertIsNone(self.crawler.stats.get_value('log_count/CRITICAL'))
def test_accepted_level(self):
self.logger.error('test log msg')
self.assertEqual(self.crawler.stats.get_value('log_count/ERROR'), 1)
def test_filtered_out_level(self):
self.logger.debug('test log msg')
self.assertIsNone(self.crawler.stats.get_value('log_count/INFO'))
class StreamLoggerTest(unittest.TestCase):
def setUp(self):
self.stdout = sys.stdout
logger = logging.getLogger('test')
logger.setLevel(logging.WARNING)
sys.stdout = StreamLogger(logger, logging.ERROR)
def tearDown(self):
sys.stdout = self.stdout
def test_redirect(self):
with LogCapture() as log:
print('test log msg')
log.check(('test', 'ERROR', 'test log msg'))
| [((32, 23, 32, 35), 'testfixtures.LogCapture', 'LogCapture', ({}, {}), '()', False, 'from testfixtures import LogCapture\n'), ((36, 17, 36, 42), 'logging.getLogger', 'logging.getLogger', ({(36, 35, 36, 41): '"""test"""'}, {}), "('test')", False, 'import logging\n'), ((42, 17, 42, 48), 'logging.getLogger', 'logging.getLogger', ({(42, 35, 42, 47): '"""test.test1"""'}, {}), "('test.test1')", False, 'import logging\n'), ((48, 17, 48, 43), 'logging.getLogger', 'logging.getLogger', ({(48, 35, 48, 42): '"""test2"""'}, {}), "('test2')", False, 'import logging\n'), ((54, 17, 54, 47), 'logging.getLogger', 'logging.getLogger', ({(54, 35, 54, 46): '"""different"""'}, {}), "('different')", False, 'import logging\n'), ((67, 22, 67, 47), 'logging.getLogger', 'logging.getLogger', ({(67, 40, 67, 46): '"""test"""'}, {}), "('test')", False, 'import logging\n'), ((70, 23, 70, 58), 'scrapy.utils.test.get_crawler', 'get_crawler', (), '', False, 'from scrapy.utils.test import get_crawler\n'), ((71, 23, 71, 54), 'scrapy.utils.log.LogCounterHandler', 'LogCounterHandler', ({(71, 41, 71, 53): 'self.crawler'}, {}), '(self.crawler)', False, 'from scrapy.utils.log import failure_to_exc_info, TopLevelFormatter, LogCounterHandler, StreamLogger\n'), ((98, 17, 98, 42), 'logging.getLogger', 'logging.getLogger', ({(98, 35, 98, 41): '"""test"""'}, {}), "('test')", False, 'import logging\n'), ((100, 21, 100, 56), 'scrapy.utils.log.StreamLogger', 'StreamLogger', ({(100, 34, 100, 40): 'logger', (100, 42, 100, 55): 'logging.ERROR'}, {}), '(logger, logging.ERROR)', False, 'from scrapy.utils.log import failure_to_exc_info, TopLevelFormatter, LogCounterHandler, StreamLogger\n'), ((23, 40, 23, 68), 'scrapy.utils.log.failure_to_exc_info', 'failure_to_exc_info', ({(23, 60, 23, 67): 'failure'}, {}), '(failure)', False, 'from scrapy.utils.log import failure_to_exc_info, TopLevelFormatter, LogCounterHandler, StreamLogger\n'), ((26, 26, 26, 53), 'scrapy.utils.log.failure_to_exc_info', 'failure_to_exc_info', ({(26, 46, 26, 52): '"""test"""'}, {}), "('test')", False, 'from scrapy.utils.log import failure_to_exc_info, TopLevelFormatter, LogCounterHandler, StreamLogger\n'), ((33, 31, 33, 58), 'scrapy.utils.log.TopLevelFormatter', 'TopLevelFormatter', ({(33, 49, 33, 57): "['test']"}, {}), "(['test'])", False, 'from scrapy.utils.log import failure_to_exc_info, TopLevelFormatter, LogCounterHandler, StreamLogger\n'), ((106, 13, 106, 25), 'testfixtures.LogCapture', 'LogCapture', ({}, {}), '()', False, 'from testfixtures import LogCapture\n'), ((20, 23, 20, 37), 'sys.exc_info', 'sys.exc_info', ({}, {}), '()', False, 'import sys\n'), ((21, 22, 21, 31), 'twisted.python.failure.Failure', 'Failure', ({}, {}), '()', False, 'from twisted.python.failure import Failure\n')] |
jeff012345/clue-part-duo | astar.py | bd9ccd2ccdbc2fe358a696b31644b93e70ff874b | import heapq
from typing import List
from definitions import RoomPosition, Position
import random
import sys
class PriorityQueue:
def __init__(self):
self.elements: Array = []
def empty(self) -> bool:
return len(self.elements) == 0
def put(self, item, priority: float):
heapq.heappush(self.elements, (priority, random.randint(1, 9999999999999999), item))
def get(self):
return heapq.heappop(self.elements)[2]
def heuristic(a: Position, b: Position) -> float:
if a == b:
return 0
if isinstance(a, RoomPosition):
if isinstance(b, RoomPosition):
raise Exception("Cannot calculate heuristic between two rooms")
return 1 # (1^2 + 0^2)
if isinstance(b, RoomPosition):
return 1 # (1^2 + 0^2)
# both are Space
return (a.col - b.col) ** 2 + (a.row - b.row) ** 2
def a_star_search(start: Position, goal: Position) -> List[Position]:
if start is None:
raise Exception("Start is None")
if goal is None:
raise Exception("goal is None")
if start == goal:
raise Exception('Start and goal are the same')
frontier = PriorityQueue()
frontier.put(start, 0)
came_from: Dict[Position, Optional[Position]] = {}
cost_so_far: Dict[Position, float] = {}
came_from[start] = None
cost_so_far[start] = 0
while not frontier.empty():
current: Position = frontier.get()
if current == goal:
break
for next in current.connections:
if isinstance(next, RoomPosition) and next != goal:
# once you enter a room, it's a dead end
continue
new_cost = cost_so_far[current] + 1
if next not in cost_so_far or new_cost < cost_so_far[next]:
cost_so_far[next] = new_cost
priority = new_cost + heuristic(goal, next)
frontier.put(next, priority)
came_from[next] = current
if frontier.empty():
print(str(start) + " to " + str(goal))
raise Exception('no path found')
shortest_path = []
prev = goal
while prev is not None:
shortest_path.append(prev)
prev = came_from[prev]
shortest_path.reverse()
return shortest_path
| [((18, 15, 18, 43), 'heapq.heappop', 'heapq.heappop', ({(18, 29, 18, 42): 'self.elements'}, {}), '(self.elements)', False, 'import heapq\n'), ((15, 49, 15, 84), 'random.randint', 'random.randint', ({(15, 64, 15, 65): '(1)', (15, 67, 15, 83): '(9999999999999999)'}, {}), '(1, 9999999999999999)', False, 'import random\n')] |
pb-jchin/FALCON_unzip | src/py_scripts/fc_phasing.py | 21b1df3491e3bb7b9d8ecd13fc0c9c1a45b6393f | from pypeflow.common import *
from pypeflow.data import PypeLocalFile, makePypeLocalFile, fn
from pypeflow.task import PypeTask, PypeThreadTaskBase, PypeTaskBase
from pypeflow.controller import PypeWorkflow, PypeThreadWorkflow
from falcon_kit.FastaReader import FastaReader
import subprocess, shlex
import os, re
cigar_re = r"(\d+)([MIDNSHP=X])"
def make_het_call(self):
bam_fn = fn(self.bam_file)
ctg_id = self.parameters["ctg_id"]
ref_seq = self.parameters["ref_seq"]
base_dir = self.parameters["base_dir"]
vmap_fn = fn(self.vmap_file)
vpos_fn = fn(self.vpos_file)
q_id_map_fn = fn(self.q_id_map_file)
p = subprocess.Popen(shlex.split("samtools view %s %s" % (bam_fn, ctg_id) ), stdout=subprocess.PIPE)
pileup = {}
q_id_map = {}
q_max_id = 0
q_id = 0
q_name_to_id = {}
try:
os.makedirs("%s/%s" % (base_dir, ctg_id))
except OSError:
pass
vmap = open(vmap_fn, "w")
vpos = open(vpos_fn, "w")
for l in p.stdout:
l = l.strip().split()
if l[0][0] == "@":
continue
QNAME = l[0]
if QNAME not in q_name_to_id:
q_id = q_max_id
q_name_to_id[QNAME] = q_id
q_max_id += 1
q_id = q_name_to_id[QNAME]
q_id_map[q_id] = QNAME
FLAG = int(l[1])
RNAME = l[2]
POS = int(l[3]) - 1 # convert to zero base
CIGAR = l[5]
SEQ = l[9]
rp = POS
qp = 0
skip_base = 0
total_aln_pos = 0
for m in re.finditer(cigar_re, CIGAR):
adv = int(m.group(1))
total_aln_pos += adv
if m.group(2) == "S":
skip_base += adv
if 1.0 - 1.0 * skip_base / total_aln_pos < 0.1:
continue
if total_aln_pos < 2000:
continue
for m in re.finditer(cigar_re, CIGAR):
adv = int(m.group(1))
if m.group(2) == "S":
qp += adv
if m.group(2) == "M":
matches = []
for i in range(adv):
matches.append( (rp, SEQ[qp]) )
rp += 1
qp += 1
matches = matches[1:-1]
for pos, b in matches:
pileup.setdefault(pos, {})
pileup[pos].setdefault(b, [])
pileup[pos][b].append(q_id)
elif m.group(2) == "I":
for i in range(adv):
qp += 1
elif m.group(2) == "D":
for i in range(adv):
rp += 1
pos_k = pileup.keys()
pos_k.sort()
th = 0.25
for pos in pos_k:
if pos < POS:
if len(pileup[pos]) < 2:
del pileup[pos]
continue
base_count = []
total_count = 0
for b in ["A", "C", "G", "T"]:
count = len(pileup[pos].get(b,[]))
base_count.append( (count, b) )
total_count += count
if total_count < 10:
del pileup[pos]
continue
base_count.sort()
base_count.reverse()
p0 = 1.0 * base_count[0][0] / total_count
p1 = 1.0 * base_count[1][0] / total_count
if p0 < 1.0 - th and p1 > th:
b0 = base_count[0][1]
b1 = base_count[1][1]
ref_base = ref_seq[pos]
print >> vpos, pos+1, ref_base, total_count, " ".join(["%s %d" % (x[1], x[0]) for x in base_count])
for q_id_ in pileup[pos][b0]:
print >> vmap, pos+1, ref_base, b0, q_id_
for q_id_ in pileup[pos][b1]:
print >> vmap, pos+1, ref_base, b1, q_id_
del pileup[pos]
q_id_map_f = open(q_id_map_fn, "w")
for q_id, q_name in q_id_map.items():
print >> q_id_map_f, q_id, q_name
def generate_association_table(self):
vmap_fn = fn(self.vmap_file)
atable_fn = fn(self.atable_file)
ctg_id = self.parameters["ctg_id"]
base_dir = self.parameters["base_dir"]
vmap = {}
v_positions = []
with open(vmap_fn) as f:
for l in f:
l = l.strip().split()
pos = int(l[0])
ref_b = l[1]
v_b = l[2]
q_id = int(l[3])
if (pos, ref_b) not in vmap:
v_positions.append( (pos, ref_b) )
vmap.setdefault( (pos, ref_b), {} )
vmap[ (pos, ref_b) ].setdefault(v_b, [])
vmap[ (pos, ref_b) ][v_b].append( q_id )
#xary = []
#yary = []
with open(atable_fn, "w") as out_f:
for i1 in xrange(len(v_positions)):
link_count = 0
for i2 in xrange(i1+1, len(v_positions)):
pos1, rb1 = v_positions[i1]
pos2, rb2 = v_positions[i2]
if pos2 - pos1 > (1 << 16):
continue
ct = {}
p1table = []
p2table = []
s1 = 0
list1 = vmap[ (pos1, rb1) ].items()
for b1, qids1 in list1:
p1table.append( (b1, len(qids1) ) )
s1 += len(qids1)
s2 = 0
list2 = vmap[ (pos2, rb2) ].items()
for b2, qids2 in list2:
p2table.append( (b2, len(qids2) ) )
s2 += len(qids2)
total_s = 0
for b1, qids1 in list1:
for b2, qids2 in list2:
s = len(set(qids1) & set(qids2))
ct[(b1,b2)] = s
total_s += s
if total_s < 6:
continue
b11 = p1table[0][0]
b12 = p1table[1][0]
b21 = p2table[0][0]
b22 = p2table[1][0]
print >> out_f, pos1, b11, b12, pos2, b21, b22, ct[(b11,b21)], ct[(b11,b22)], ct[(b12,b21)], ct[(b12,b22)]
#xary.append(pos1)
#yary.append(pos2)
link_count += 1
if link_count > 500:
break
def get_score( c_score, pos1, pos2, s1, s2 ):
if pos1 > pos2:
pos1, pos2 = pos2, pos1
s1, s2 = s2, s1
b11, b12 = s1
b21, b22 = s2
return c_score[ (pos1, pos2) ][ (b11+b21, b12+b22) ]
def get_phased_blocks(self):
vmap_fn = fn(self.vmap_file)
atable_fn = fn(self.atable_file)
p_variant_fn = fn(self.phased_variant_file)
left_connect = {}
right_connect = {}
c_score = {}
states = {}
positions = set()
ref_base = {}
with open(vmap_fn) as f:
for l in f:
l = l.strip().split()
pos = int(l[0])
ref_b = l[1]
v_b = l[2]
q_id = int(l[3])
ref_base[pos] = ref_b
with open(atable_fn) as f:
for l in f:
l = l.strip().split()
pos1, b11, b12, pos2, b21, b22, s11, s12, s21, s22 = l
s11, s12, s21, s22 = int(s11), int(s12), int(s21), int(s22)
if abs(s11+s22-s12-s21) < 6:
continue
pos1 = int(pos1)
pos2 = int(pos2)
positions.add(pos1)
positions.add(pos2)
right_connect.setdefault(pos1, [])
right_connect[pos1].append(pos2)
left_connect.setdefault(pos2, [])
left_connect[pos2].append(pos1)
c_score[ (pos1, pos2) ] = { (b11+b21, b12+b22): s11 + s22, (b12+b22, b11+b21): s11 + s22,
(b12+b21, b11+b22): s12 + s21, (b11+b22, b12+b21): s12 + s21 }
if pos1 not in states:
st1 = (b11, b12)
st2 = (b12, b11)
score1 = 0
score2 = 0
for pp in left_connect.get(pos1,[]):
if pp in states:
st0 = states[pp]
else:
continue
score1 += get_score( c_score, pp, pos1, st0, st1 )
score2 += get_score( c_score, pp, pos1, st0, st2 )
for pp in right_connect.get(pos1,[]):
if pp in states:
st0 = states[pp]
else:
continue
score1 += get_score( c_score, pos1, pp, st1, st0 )
score2 += get_score( c_score, pos1, pp, st2, st0 )
if score1 >= score2:
states[pos1] = st1
else:
states[pos1] = st2
if pos2 not in states:
st1 = (b21, b22)
st2 = (b22, b21)
score1 = 0
score2 = 0
for pp in left_connect.get(pos2,[]):
if pp in states:
st0 = states[pp]
else:
continue
score1 += get_score( c_score, pp, pos2, st0, st1 )
score2 += get_score( c_score, pp, pos2, st0, st2 )
for pp in right_connect.get(pos2,[]):
if pp in states:
st0 = states[pp]
else:
continue
score1 += get_score( c_score, pos2, pp, st1, st0 )
score2 += get_score( c_score, pos2, pp, st2, st0 )
if score1 >= score2:
states[pos2] = st1
else:
states[pos2] = st2
positions = list(positions)
positions.sort()
iter_count = 0
while 1:
iter_count += 1
if iter_count > 10:
break
update_count = 0
for p in positions:
b1, b2 = states[p]
st1 = (b1, b2)
st2 = (b2, b1)
score1 = 0
score2 = 0
for pp in left_connect.get(p,[]):
st0 = states[pp]
score1 += get_score( c_score, pp, p, st0 ,st1)
score2 += get_score( c_score, pp, p, st0, st2)
#for pp in right_connect.get(p,[]):
# st0 = states[pp]
# score1 += get_score( c_score, p, pp, st1 ,st0)
# score2 += get_score( c_score, p, pp, st2, st0)
if score1 >= score2:
states[p] = st1
else:
states[p] = st2
update_count += 1
if update_count == 0:
break
right_extent = {}
right_score = {}
left_extent = {}
left_score = {}
for p in positions:
left_extent[p] = p
left_score[p] = 0
if p in left_connect:
left = p
st0 = states[p]
st0_ = st0[1], st0[0]
for pp in left_connect[p]:
st1 = states[pp]
s = get_score( c_score, pp, p, st1, st0)
s_ = get_score( c_score, pp, p, st1, st0_)
left_score[p] += s - s_
if s - s_ > 0 and pp < left:
left = pp
left_extent[p] = left
right_extent[p] = p
right_score[p] = 0
if p in right_connect:
right = p
st0 = states[p]
st0_ = st0[1], st0[0]
for pp in right_connect[p]:
st1 = states[pp]
s = get_score( c_score, p, pp, st0, st1)
s_ = get_score( c_score, p, pp, st0_, st1)
right_score[p] += s - s_
if s - s_ > 0 and pp > right:
right = pp
right_extent[p] = right
phase_block_id = 1
phase_blocks = {}
pb = []
max_right_ext = 0
for p in positions:
if right_score[p] < 10 or left_score[p] < 10:
continue
b1, b2 = states[p]
if max_right_ext < left_extent[p]:
if len(pb) > 3:
phase_blocks[phase_block_id] = pb
phase_block_id += 1
pb = []
pb.append( (p, b1, b2) )
if right_extent[p] > max_right_ext:
max_right_ext = right_extent[p]
if len(pb) > 3:
phase_blocks[phase_block_id] = pb
else:
phase_block_id -= 1
with open(p_variant_fn, "w") as out_f:
for pid in xrange(1, phase_block_id+1):
if len(phase_blocks[pid]) == 0:
continue
min_ = min( [x[0] for x in phase_blocks[pid]] )
max_ = max( [x[0] for x in phase_blocks[pid]] )
print >>out_f, "P", pid, min_, max_, max_ - min_, len(phase_blocks[pid]), 1.0 * (max_-min_)/len(phase_blocks[pid])
for p, b1, b2 in phase_blocks[pid]:
rb = ref_base[p]
print >>out_f, "V", pid, p, "%d_%s_%s" % (p,rb,b1), "%d_%s_%s" % (p,rb,b2), left_extent[p], right_extent[p], left_score[p], right_score[p]
def get_phased_reads(self):
q_id_map_fn = fn(self.q_id_map_file)
vmap_fn = fn(self.vmap_file)
p_variant_fn = fn(self.phased_variant_file)
ctg_id = parameters["ctg_id"]
phased_read_fn = fn(self.phased_read_file)
rid_map = {}
with open(q_id_map_fn) as f:
for l in f:
l = l.strip().split()
rid_map[int(l[0])] = l[1]
read_to_variants = {}
variant_to_reads = {}
with open(vmap_fn) as f:
for l in f:
l = l.strip().split()
variant = "_".join(l[:3])
read_id = int(l[3])
read_to_variants.setdefault(read_id, set())
read_to_variants[read_id].add(variant)
variant_to_reads.setdefault(variant, set())
variant_to_reads[variant].add(read_id)
variant_to_phase = {}
with open(p_variant_fn) as f:
for l in f:
"""line format example: V 1 6854 6854_A_A 6854_A_G 6854 22781"""
l = l.strip().split()
if l[0] != "V":
continue
pb_id = int(l[1])
variant_to_phase[ l[3] ] = (pb_id, 0)
variant_to_phase[ l[4] ] = (pb_id, 1)
with open(phased_read_fn, "w") as out_f:
for r in read_to_variants:
vl = {}
pl = set()
for v in list( read_to_variants[r] ):
if v in variant_to_phase:
p = variant_to_phase[v]
vl[ p ] = vl.get(p, 0) + 1
pl.add(p[0])
pl = list(pl)
pl.sort()
for p in pl:
if vl.get( (p,0), 0) - vl.get( (p,1), 0) > 1:
print >> out_f, r, ctg_id, p, 0, vl.get( (p,0), 0), vl.get( (p,1), 0), rid_map[r]
elif vl.get( (p,1), 0) - vl.get( (p,0), 0) > 1:
print >> out_f, r, ctg_id, p, 1, vl.get( (p,0), 0), vl.get( (p,1), 0), rid_map[r]
if __name__ == "__main__":
import argparse
import re
parser = argparse.ArgumentParser(description='phasing variants and reads from a bam file')
# we can run this in parallel mode in the furture
#parser.add_argument('--n_core', type=int, default=4,
# help='number of processes used for generating consensus')
parser.add_argument('--bam', type=str, help='path to sorted bam file', required=True)
parser.add_argument('--fasta', type=str, help='path to the fasta file of contain the contig', required=True)
parser.add_argument('--ctg_id', type=str, help='contig identifier in the bam file', required=True)
parser.add_argument('--base_dir', type=str, default="./", help='the output base_dir, default to current working directory')
args = parser.parse_args()
bam_fn = args.bam
fasta_fn = args.fasta
ctg_id = args.ctg_id
base_dir = args.base_dir
ref_seq = ""
for r in FastaReader(fasta_fn):
rid = r.name.split()[0]
if rid != ctg_id:
continue
ref_seq = r.sequence.upper()
PypeThreadWorkflow.setNumThreadAllowed(1, 1)
wf = PypeThreadWorkflow()
bam_file = makePypeLocalFile(bam_fn)
vmap_file = makePypeLocalFile( os.path.join(base_dir, ctg_id, "variant_map") )
vpos_file = makePypeLocalFile( os.path.join(base_dir, ctg_id, "variant_pos") )
q_id_map_file = makePypeLocalFile( os.path.join(base_dir, ctg_id, "q_id_map") )
parameters = {}
parameters["ctg_id"] = ctg_id
parameters["ref_seq"] = ref_seq
parameters["base_dir"] = base_dir
make_het_call_task = PypeTask( inputs = { "bam_file": bam_file },
outputs = { "vmap_file": vmap_file, "vpos_file": vpos_file, "q_id_map_file": q_id_map_file },
parameters = parameters,
TaskType = PypeThreadTaskBase,
URL = "task://localhost/het_call") (make_het_call)
wf.addTasks([make_het_call_task])
atable_file = makePypeLocalFile( os.path.join(base_dir, ctg_id, "atable") )
parameters = {}
parameters["ctg_id"] = ctg_id
parameters["base_dir"] = base_dir
generate_association_table_task = PypeTask( inputs = { "vmap_file": vmap_file },
outputs = { "atable_file": atable_file },
parameters = parameters,
TaskType = PypeThreadTaskBase,
URL = "task://localhost/g_atable") (generate_association_table)
wf.addTasks([generate_association_table_task])
phased_variant_file = makePypeLocalFile( os.path.join(base_dir, ctg_id, "phased_variants") )
get_phased_blocks_task = PypeTask( inputs = { "vmap_file": vmap_file, "atable_file": atable_file },
outputs = { "phased_variant_file": phased_variant_file },
TaskType = PypeThreadTaskBase,
URL = "task://localhost/get_phased_blocks") (get_phased_blocks)
wf.addTasks([get_phased_blocks_task])
phased_read_file = makePypeLocalFile( os.path.join(base_dir, ctg_id, "phased_reads") )
get_phased_reads_task = PypeTask( inputs = { "vmap_file": vmap_file,
"q_id_map_file": q_id_map_file,
"phased_variant_file": phased_variant_file },
outputs = { "phased_read_file": phased_read_file },
parameters = {"ctg_id": ctg_id},
TaskType = PypeThreadTaskBase,
URL = "task://localhost/get_phased_reads") (get_phased_reads)
wf.addTasks([get_phased_reads_task])
wf.refreshTargets()
#with open("fc_phasing_wf.dot", "w") as f:
# print >>f, wf.graphvizDot
| [((13, 13, 13, 30), 'pypeflow.data.fn', 'fn', ({(13, 16, 13, 29): 'self.bam_file'}, {}), '(self.bam_file)', False, 'from pypeflow.data import PypeLocalFile, makePypeLocalFile, fn\n'), ((17, 14, 17, 32), 'pypeflow.data.fn', 'fn', ({(17, 17, 17, 31): 'self.vmap_file'}, {}), '(self.vmap_file)', False, 'from pypeflow.data import PypeLocalFile, makePypeLocalFile, fn\n'), ((18, 14, 18, 32), 'pypeflow.data.fn', 'fn', ({(18, 17, 18, 31): 'self.vpos_file'}, {}), '(self.vpos_file)', False, 'from pypeflow.data import PypeLocalFile, makePypeLocalFile, fn\n'), ((19, 18, 19, 40), 'pypeflow.data.fn', 'fn', ({(19, 21, 19, 39): 'self.q_id_map_file'}, {}), '(self.q_id_map_file)', False, 'from pypeflow.data import PypeLocalFile, makePypeLocalFile, fn\n'), ((134, 14, 134, 32), 'pypeflow.data.fn', 'fn', ({(134, 17, 134, 31): 'self.vmap_file'}, {}), '(self.vmap_file)', False, 'from pypeflow.data import PypeLocalFile, makePypeLocalFile, fn\n'), ((135, 16, 135, 36), 'pypeflow.data.fn', 'fn', ({(135, 19, 135, 35): 'self.atable_file'}, {}), '(self.atable_file)', False, 'from pypeflow.data import PypeLocalFile, makePypeLocalFile, fn\n'), ((212, 14, 212, 32), 'pypeflow.data.fn', 'fn', ({(212, 17, 212, 31): 'self.vmap_file'}, {}), '(self.vmap_file)', False, 'from pypeflow.data import PypeLocalFile, makePypeLocalFile, fn\n'), ((213, 16, 213, 36), 'pypeflow.data.fn', 'fn', ({(213, 19, 213, 35): 'self.atable_file'}, {}), '(self.atable_file)', False, 'from pypeflow.data import PypeLocalFile, makePypeLocalFile, fn\n'), ((214, 19, 214, 47), 'pypeflow.data.fn', 'fn', ({(214, 22, 214, 46): 'self.phased_variant_file'}, {}), '(self.phased_variant_file)', False, 'from pypeflow.data import PypeLocalFile, makePypeLocalFile, fn\n'), ((420, 18, 420, 40), 'pypeflow.data.fn', 'fn', ({(420, 21, 420, 39): 'self.q_id_map_file'}, {}), '(self.q_id_map_file)', False, 'from pypeflow.data import PypeLocalFile, makePypeLocalFile, fn\n'), ((421, 14, 421, 32), 'pypeflow.data.fn', 'fn', ({(421, 17, 421, 31): 'self.vmap_file'}, {}), '(self.vmap_file)', False, 'from pypeflow.data import PypeLocalFile, makePypeLocalFile, fn\n'), ((422, 19, 422, 47), 'pypeflow.data.fn', 'fn', ({(422, 22, 422, 46): 'self.phased_variant_file'}, {}), '(self.phased_variant_file)', False, 'from pypeflow.data import PypeLocalFile, makePypeLocalFile, fn\n'), ((426, 21, 426, 46), 'pypeflow.data.fn', 'fn', ({(426, 24, 426, 45): 'self.phased_read_file'}, {}), '(self.phased_read_file)', False, 'from pypeflow.data import PypeLocalFile, makePypeLocalFile, fn\n'), ((479, 13, 479, 94), 'argparse.ArgumentParser', 'argparse.ArgumentParser', (), '', False, 'import argparse\n'), ((495, 13, 495, 34), 'falcon_kit.FastaReader.FastaReader', 'FastaReader', ({(495, 25, 495, 33): 'fasta_fn'}, {}), '(fasta_fn)', False, 'from falcon_kit.FastaReader import FastaReader\n'), ((501, 4, 501, 48), 'pypeflow.controller.PypeThreadWorkflow.setNumThreadAllowed', 'PypeThreadWorkflow.setNumThreadAllowed', ({(501, 43, 501, 44): '(1)', (501, 46, 501, 47): '(1)'}, {}), '(1, 1)', False, 'from pypeflow.controller import PypeWorkflow, PypeThreadWorkflow\n'), ((502, 9, 502, 29), 'pypeflow.controller.PypeThreadWorkflow', 'PypeThreadWorkflow', ({}, {}), '()', False, 'from pypeflow.controller import PypeWorkflow, PypeThreadWorkflow\n'), ((506, 15, 506, 40), 'pypeflow.data.makePypeLocalFile', 'makePypeLocalFile', ({(506, 33, 506, 39): 'bam_fn'}, {}), '(bam_fn)', False, 'from pypeflow.data import PypeLocalFile, makePypeLocalFile, fn\n'), ((21, 25, 21, 79), 'shlex.split', 'shlex.split', ({(21, 37, 21, 77): "'samtools view %s %s' % (bam_fn, ctg_id)"}, {}), "('samtools view %s %s' % (bam_fn, ctg_id))", False, 'import subprocess, shlex\n'), ((29, 8, 29, 49), 'os.makedirs', 'os.makedirs', ({(29, 20, 29, 48): "('%s/%s' % (base_dir, ctg_id))"}, {}), "('%s/%s' % (base_dir, ctg_id))", False, 'import os, re\n'), ((59, 17, 59, 45), 're.finditer', 're.finditer', ({(59, 29, 59, 37): 'cigar_re', (59, 39, 59, 44): 'CIGAR'}, {}), '(cigar_re, CIGAR)', False, 'import re\n'), ((71, 17, 71, 45), 're.finditer', 're.finditer', ({(71, 29, 71, 37): 'cigar_re', (71, 39, 71, 44): 'CIGAR'}, {}), '(cigar_re, CIGAR)', False, 'import re\n'), ((507, 35, 507, 80), 'os.path.join', 'os.path.join', ({(507, 48, 507, 56): 'base_dir', (507, 58, 507, 64): 'ctg_id', (507, 66, 507, 79): '"""variant_map"""'}, {}), "(base_dir, ctg_id, 'variant_map')", False, 'import os, re\n'), ((508, 35, 508, 80), 'os.path.join', 'os.path.join', ({(508, 48, 508, 56): 'base_dir', (508, 58, 508, 64): 'ctg_id', (508, 66, 508, 79): '"""variant_pos"""'}, {}), "(base_dir, ctg_id, 'variant_pos')", False, 'import os, re\n'), ((509, 39, 509, 81), 'os.path.join', 'os.path.join', ({(509, 52, 509, 60): 'base_dir', (509, 62, 509, 68): 'ctg_id', (509, 70, 509, 80): '"""q_id_map"""'}, {}), "(base_dir, ctg_id, 'q_id_map')", False, 'import os, re\n'), ((515, 25, 519, 59), 'pypeflow.task.PypeTask', 'PypeTask', (), '', False, 'from pypeflow.task import PypeTask, PypeThreadTaskBase, PypeTaskBase\n'), ((526, 37, 526, 77), 'os.path.join', 'os.path.join', ({(526, 50, 526, 58): 'base_dir', (526, 60, 526, 66): 'ctg_id', (526, 68, 526, 76): '"""atable"""'}, {}), "(base_dir, ctg_id, 'atable')", False, 'import os, re\n'), ((530, 38, 534, 72), 'pypeflow.task.PypeTask', 'PypeTask', (), '', False, 'from pypeflow.task import PypeTask, PypeThreadTaskBase, PypeTaskBase\n'), ((541, 45, 541, 94), 'os.path.join', 'os.path.join', ({(541, 58, 541, 66): 'base_dir', (541, 68, 541, 74): 'ctg_id', (541, 76, 541, 93): '"""phased_variants"""'}, {}), "(base_dir, ctg_id, 'phased_variants')", False, 'import os, re\n'), ((542, 29, 545, 81), 'pypeflow.task.PypeTask', 'PypeTask', (), '', False, 'from pypeflow.task import PypeTask, PypeThreadTaskBase, PypeTaskBase\n'), ((551, 42, 551, 88), 'os.path.join', 'os.path.join', ({(551, 55, 551, 63): 'base_dir', (551, 65, 551, 71): 'ctg_id', (551, 73, 551, 87): '"""phased_reads"""'}, {}), "(base_dir, ctg_id, 'phased_reads')", False, 'import os, re\n'), ((552, 28, 558, 80), 'pypeflow.task.PypeTask', 'PypeTask', (), '', False, 'from pypeflow.task import PypeTask, PypeThreadTaskBase, PypeTaskBase\n')] |
pabloduque0/cnn_deconv_viz | augmentation/combineds/wgan_gp_straight.py | 3fc3d8a9dbad8e8e28d4df4023bdb438e4c9cf85 | from keras.datasets import mnist
from keras.layers.merge import _Merge
from keras.layers import Input, Dense, Reshape, Flatten, Dropout
from keras.layers import BatchNormalization, Activation, ZeroPadding2D
from keras.layers.advanced_activations import LeakyReLU
from keras.layers.convolutional import UpSampling2D, Conv2D
from keras.models import Sequential, Model
from keras.optimizers import RMSprop
from functools import partial
from augmentation.discriminators import wasserstein_discriminator
from augmentation.generators import wasserstein_generator
import keras.backend as K
import matplotlib.pyplot as plt
import sys
import numpy as np
class RandomWeightedAverage(_Merge):
"""Provides a (random) weighted average between real and generated image samples"""
def _merge_function(self, inputs):
alpha = K.random_uniform((32, 1, 1, 1))
return (alpha * inputs[0]) + ((1 - alpha) * inputs[1])
class WGANGP():
def __init__(self, img_shape, noise_shape):
self.img_shape = img_shape
self.noise_shape = noise_shape
# Following parameter and optimizer set as recommended in paper
self.n_critic = 5
optimizer = RMSprop(lr=0.00005)
# Build the generator and critic
self.generator = wasserstein_generator.create_model(noise_shape)
self.critic = wasserstein_discriminator.create_model(img_shape)
#-------------------------------
# Construct Computational Graph
# for the Critic
#-------------------------------
# Freeze generator's layers while training critic
self.generator.trainable = False
# Image input (real sample)
real_img = Input(shape=self.img_shape)
# Noise input
z_disc = Input(shape=(self.noise_shape,))
# Generate image based of noise (fake sample)
fake_img = self.generator(z_disc)
# Discriminator determines validity of the real and fake images
fake = self.critic(fake_img)
valid = self.critic(real_img)
# Construct weighted average between real and fake images
interpolated_img = RandomWeightedAverage()([real_img, fake_img])
# Determine validity of weighted sample
validity_interpolated = self.critic(interpolated_img)
# Use Python partial to provide loss function with additional
# 'averaged_samples' argument
partial_gp_loss = partial(self.gradient_penalty_loss,
averaged_samples=interpolated_img)
partial_gp_loss.__name__ = 'gradient_penalty' # Keras requires function names
self.critic_model = Model(inputs=[real_img, z_disc],
outputs=[valid, fake, validity_interpolated])
self.critic_model.compile(loss=[self.wasserstein_loss,
self.wasserstein_loss,
partial_gp_loss],
optimizer=optimizer,
loss_weights=[1, 1, 10])
#-------------------------------
# Construct Computational Graph
# for Generator
#-------------------------------
# For the generator we freeze the critic's layers
self.critic.trainable = False
self.generator.trainable = True
# Sampled noise for input to generator
z_gen = Input(shape=(100,))
# Generate images based of noise
img = self.generator(z_gen)
# Discriminator determines validity
valid = self.critic(img)
# Defines generator model
self.generator_model = Model(z_gen, valid)
self.generator_model.compile(loss=self.wasserstein_loss, optimizer=optimizer)
def gradient_penalty_loss(self, y_true, y_pred, averaged_samples):
"""
Computes gradient penalty based on prediction and weighted real / fake samples
"""
gradients = K.gradients(y_pred, averaged_samples)[0]
# compute the euclidean norm by squaring ...
gradients_sqr = K.square(gradients)
# ... summing over the rows ...
gradients_sqr_sum = K.sum(gradients_sqr,
axis=np.arange(1, len(gradients_sqr.shape)))
# ... and sqrt
gradient_l2_norm = K.sqrt(gradients_sqr_sum)
# compute lambda * (1 - ||grad||)^2 still for each single sample
gradient_penalty = K.square(1 - gradient_l2_norm)
# return the mean as loss over all the batch samples
return K.mean(gradient_penalty)
def wasserstein_loss(self, y_true, y_pred):
return K.mean(y_true * y_pred)
def build_generator(self):
model = Sequential()
model.add(Dense(128 * 7 * 7, activation="relu", input_dim=self.noise_shape))
model.add(Reshape((7, 7, 128)))
model.add(UpSampling2D())
model.add(Conv2D(128, kernel_size=4, padding="same"))
model.add(BatchNormalization(momentum=0.8))
model.add(Activation("relu"))
model.add(UpSampling2D())
model.add(Conv2D(64, kernel_size=4, padding="same"))
model.add(BatchNormalization(momentum=0.8))
model.add(Activation("relu"))
model.add(Conv2D(self.channels, kernel_size=4, padding="same"))
model.add(Activation("tanh"))
model.summary()
noise = Input(shape=(self.noise_shape,))
img = model(noise)
return Model(noise, img)
def build_critic(self):
model = Sequential()
model.add(Conv2D(16, kernel_size=3, strides=2, input_shape=self.img_shape, padding="same"))
model.add(LeakyReLU(alpha=0.2))
model.add(Dropout(0.25))
model.add(Conv2D(32, kernel_size=3, strides=2, padding="same"))
model.add(ZeroPadding2D(padding=((0,1),(0,1))))
model.add(BatchNormalization(momentum=0.8))
model.add(LeakyReLU(alpha=0.2))
model.add(Dropout(0.25))
model.add(Conv2D(64, kernel_size=3, strides=2, padding="same"))
model.add(BatchNormalization(momentum=0.8))
model.add(LeakyReLU(alpha=0.2))
model.add(Dropout(0.25))
model.add(Conv2D(128, kernel_size=3, strides=1, padding="same"))
model.add(BatchNormalization(momentum=0.8))
model.add(LeakyReLU(alpha=0.2))
model.add(Dropout(0.25))
model.add(Flatten())
model.add(Dense(1))
model.summary()
img = Input(shape=self.img_shape)
validity = model(img)
return Model(img, validity)
def train(self, epochs, batch_size, sample_interval=50):
# Adversarial ground truths
valid = -np.ones((batch_size, 1))
fake = np.ones((batch_size, 1))
dummy = np.zeros((batch_size, 1)) # Dummy gt for gradient penalty
for epoch in range(epochs):
for _ in range(self.n_critic):
# ---------------------
# Train Discriminator
# ---------------------
# Select a random batch of images
idx = np.random.randint(0, X_train.shape[0], batch_size)
imgs = X_train[idx]
# Sample generator input
noise = np.random.normal(0, 1, (batch_size, self.noise_shape))
# Train the critic
d_loss = self.critic_model.train_on_batch([imgs, noise],
[valid, fake, dummy])
# ---------------------
# Train Generator
# ---------------------
g_loss = self.generator_model.train_on_batch(noise, valid)
# Plot the progress
print ("%d [D loss: %f] [G loss: %f]" % (epoch, d_loss[0], g_loss))
# If at save interval => save generated image samples
if epoch % sample_interval == 0:
self.sample_images(epoch)
def sample_images(self, epoch):
r, c = 5, 5
noise = np.random.normal(0, 1, (r * c, self.noise_shape))
gen_imgs = self.generator.predict(noise)
# Rescale images 0 - 1
gen_imgs = 0.5 * gen_imgs + 0.5
fig, axs = plt.subplots(r, c)
cnt = 0
for i in range(r):
for j in range(c):
axs[i,j].imshow(gen_imgs[cnt, :,:,0], cmap='gray')
axs[i,j].axis('off')
cnt += 1
fig.savefig("images/mnist_%d.png" % epoch)
plt.close()
| [((24, 16, 24, 47), 'keras.backend.random_uniform', 'K.random_uniform', ({(24, 33, 24, 46): '(32, 1, 1, 1)'}, {}), '((32, 1, 1, 1))', True, 'import keras.backend as K\n'), ((34, 20, 34, 39), 'keras.optimizers.RMSprop', 'RMSprop', (), '', False, 'from keras.optimizers import RMSprop\n'), ((37, 25, 37, 72), 'augmentation.generators.wasserstein_generator.create_model', 'wasserstein_generator.create_model', ({(37, 60, 37, 71): 'noise_shape'}, {}), '(noise_shape)', False, 'from augmentation.generators import wasserstein_generator\n'), ((38, 22, 38, 71), 'augmentation.discriminators.wasserstein_discriminator.create_model', 'wasserstein_discriminator.create_model', ({(38, 61, 38, 70): 'img_shape'}, {}), '(img_shape)', False, 'from augmentation.discriminators import wasserstein_discriminator\n'), ((49, 19, 49, 46), 'keras.layers.Input', 'Input', (), '', False, 'from keras.layers import Input, Dense, Reshape, Flatten, Dropout\n'), ((52, 17, 52, 49), 'keras.layers.Input', 'Input', (), '', False, 'from keras.layers import Input, Dense, Reshape, Flatten, Dropout\n'), ((67, 26, 68, 60), 'functools.partial', 'partial', (), '', False, 'from functools import partial\n'), ((71, 28, 72, 73), 'keras.models.Model', 'Model', (), '', False, 'from keras.models import Sequential, Model\n'), ((88, 16, 88, 35), 'keras.layers.Input', 'Input', (), '', False, 'from keras.layers import Input, Dense, Reshape, Flatten, Dropout\n'), ((94, 31, 94, 50), 'keras.models.Model', 'Model', ({(94, 37, 94, 42): 'z_gen', (94, 44, 94, 49): 'valid'}, {}), '(z_gen, valid)', False, 'from keras.models import Sequential, Model\n'), ((104, 24, 104, 43), 'keras.backend.square', 'K.square', ({(104, 33, 104, 42): 'gradients'}, {}), '(gradients)', True, 'import keras.backend as K\n'), ((109, 27, 109, 52), 'keras.backend.sqrt', 'K.sqrt', ({(109, 34, 109, 51): 'gradients_sqr_sum'}, {}), '(gradients_sqr_sum)', True, 'import keras.backend as K\n'), ((111, 27, 111, 57), 'keras.backend.square', 'K.square', ({(111, 36, 111, 56): '1 - gradient_l2_norm'}, {}), '(1 - gradient_l2_norm)', True, 'import keras.backend as K\n'), ((113, 15, 113, 39), 'keras.backend.mean', 'K.mean', ({(113, 22, 113, 38): 'gradient_penalty'}, {}), '(gradient_penalty)', True, 'import keras.backend as K\n'), ((117, 15, 117, 38), 'keras.backend.mean', 'K.mean', ({(117, 22, 117, 37): '(y_true * y_pred)'}, {}), '(y_true * y_pred)', True, 'import keras.backend as K\n'), ((121, 16, 121, 28), 'keras.models.Sequential', 'Sequential', ({}, {}), '()', False, 'from keras.models import Sequential, Model\n'), ((138, 16, 138, 48), 'keras.layers.Input', 'Input', (), '', False, 'from keras.layers import Input, Dense, Reshape, Flatten, Dropout\n'), ((141, 15, 141, 32), 'keras.models.Model', 'Model', ({(141, 21, 141, 26): 'noise', (141, 28, 141, 31): 'img'}, {}), '(noise, img)', False, 'from keras.models import Sequential, Model\n'), ((145, 16, 145, 28), 'keras.models.Sequential', 'Sequential', ({}, {}), '()', False, 'from keras.models import Sequential, Model\n'), ((168, 14, 168, 41), 'keras.layers.Input', 'Input', (), '', False, 'from keras.layers import Input, Dense, Reshape, Flatten, Dropout\n'), ((171, 15, 171, 35), 'keras.models.Model', 'Model', ({(171, 21, 171, 24): 'img', (171, 26, 171, 34): 'validity'}, {}), '(img, validity)', False, 'from keras.models import Sequential, Model\n'), ((177, 16, 177, 40), 'numpy.ones', 'np.ones', ({(177, 24, 177, 39): '(batch_size, 1)'}, {}), '((batch_size, 1))', True, 'import numpy as np\n'), ((178, 16, 178, 41), 'numpy.zeros', 'np.zeros', ({(178, 25, 178, 40): '(batch_size, 1)'}, {}), '((batch_size, 1))', True, 'import numpy as np\n'), ((211, 16, 211, 65), 'numpy.random.normal', 'np.random.normal', ({(211, 33, 211, 34): '0', (211, 36, 211, 37): '1', (211, 39, 211, 64): '(r * c, self.noise_shape)'}, {}), '(0, 1, (r * c, self.noise_shape))', True, 'import numpy as np\n'), ((217, 19, 217, 37), 'matplotlib.pyplot.subplots', 'plt.subplots', ({(217, 32, 217, 33): 'r', (217, 35, 217, 36): 'c'}, {}), '(r, c)', True, 'import matplotlib.pyplot as plt\n'), ((225, 8, 225, 19), 'matplotlib.pyplot.close', 'plt.close', ({}, {}), '()', True, 'import matplotlib.pyplot as plt\n'), ((102, 20, 102, 57), 'keras.backend.gradients', 'K.gradients', ({(102, 32, 102, 38): 'y_pred', (102, 40, 102, 56): 'averaged_samples'}, {}), '(y_pred, averaged_samples)', True, 'import keras.backend as K\n'), ((123, 18, 123, 83), 'keras.layers.Dense', 'Dense', (), '', False, 'from keras.layers import Input, Dense, Reshape, Flatten, Dropout\n'), ((124, 18, 124, 38), 'keras.layers.Reshape', 'Reshape', ({(124, 26, 124, 37): '(7, 7, 128)'}, {}), '((7, 7, 128))', False, 'from keras.layers import Input, Dense, Reshape, Flatten, Dropout\n'), ((125, 18, 125, 32), 'keras.layers.convolutional.UpSampling2D', 'UpSampling2D', ({}, {}), '()', False, 'from keras.layers.convolutional import UpSampling2D, Conv2D\n'), ((126, 18, 126, 60), 'keras.layers.convolutional.Conv2D', 'Conv2D', (), '', False, 'from keras.layers.convolutional import UpSampling2D, Conv2D\n'), ((127, 18, 127, 50), 'keras.layers.BatchNormalization', 'BatchNormalization', (), '', False, 'from keras.layers import BatchNormalization, Activation, ZeroPadding2D\n'), ((128, 18, 128, 36), 'keras.layers.Activation', 'Activation', ({(128, 29, 128, 35): '"""relu"""'}, {}), "('relu')", False, 'from keras.layers import BatchNormalization, Activation, ZeroPadding2D\n'), ((129, 18, 129, 32), 'keras.layers.convolutional.UpSampling2D', 'UpSampling2D', ({}, {}), '()', False, 'from keras.layers.convolutional import UpSampling2D, Conv2D\n'), ((130, 18, 130, 59), 'keras.layers.convolutional.Conv2D', 'Conv2D', (), '', False, 'from keras.layers.convolutional import UpSampling2D, Conv2D\n'), ((131, 18, 131, 50), 'keras.layers.BatchNormalization', 'BatchNormalization', (), '', False, 'from keras.layers import BatchNormalization, Activation, ZeroPadding2D\n'), ((132, 18, 132, 36), 'keras.layers.Activation', 'Activation', ({(132, 29, 132, 35): '"""relu"""'}, {}), "('relu')", False, 'from keras.layers import BatchNormalization, Activation, ZeroPadding2D\n'), ((133, 18, 133, 70), 'keras.layers.convolutional.Conv2D', 'Conv2D', (), '', False, 'from keras.layers.convolutional import UpSampling2D, Conv2D\n'), ((134, 18, 134, 36), 'keras.layers.Activation', 'Activation', ({(134, 29, 134, 35): '"""tanh"""'}, {}), "('tanh')", False, 'from keras.layers import BatchNormalization, Activation, ZeroPadding2D\n'), ((147, 18, 147, 98), 'keras.layers.convolutional.Conv2D', 'Conv2D', (), '', False, 'from keras.layers.convolutional import UpSampling2D, Conv2D\n'), ((148, 18, 148, 38), 'keras.layers.advanced_activations.LeakyReLU', 'LeakyReLU', (), '', False, 'from keras.layers.advanced_activations import LeakyReLU\n'), ((149, 18, 149, 31), 'keras.layers.Dropout', 'Dropout', ({(149, 26, 149, 30): '(0.25)'}, {}), '(0.25)', False, 'from keras.layers import Input, Dense, Reshape, Flatten, Dropout\n'), ((150, 18, 150, 70), 'keras.layers.convolutional.Conv2D', 'Conv2D', (), '', False, 'from keras.layers.convolutional import UpSampling2D, Conv2D\n'), ((151, 18, 151, 54), 'keras.layers.ZeroPadding2D', 'ZeroPadding2D', (), '', False, 'from keras.layers import BatchNormalization, Activation, ZeroPadding2D\n'), ((152, 18, 152, 50), 'keras.layers.BatchNormalization', 'BatchNormalization', (), '', False, 'from keras.layers import BatchNormalization, Activation, ZeroPadding2D\n'), ((153, 18, 153, 38), 'keras.layers.advanced_activations.LeakyReLU', 'LeakyReLU', (), '', False, 'from keras.layers.advanced_activations import LeakyReLU\n'), ((154, 18, 154, 31), 'keras.layers.Dropout', 'Dropout', ({(154, 26, 154, 30): '(0.25)'}, {}), '(0.25)', False, 'from keras.layers import Input, Dense, Reshape, Flatten, Dropout\n'), ((155, 18, 155, 70), 'keras.layers.convolutional.Conv2D', 'Conv2D', (), '', False, 'from keras.layers.convolutional import UpSampling2D, Conv2D\n'), ((156, 18, 156, 50), 'keras.layers.BatchNormalization', 'BatchNormalization', (), '', False, 'from keras.layers import BatchNormalization, Activation, ZeroPadding2D\n'), ((157, 18, 157, 38), 'keras.layers.advanced_activations.LeakyReLU', 'LeakyReLU', (), '', False, 'from keras.layers.advanced_activations import LeakyReLU\n'), ((158, 18, 158, 31), 'keras.layers.Dropout', 'Dropout', ({(158, 26, 158, 30): '(0.25)'}, {}), '(0.25)', False, 'from keras.layers import Input, Dense, Reshape, Flatten, Dropout\n'), ((159, 18, 159, 71), 'keras.layers.convolutional.Conv2D', 'Conv2D', (), '', False, 'from keras.layers.convolutional import UpSampling2D, Conv2D\n'), ((160, 18, 160, 50), 'keras.layers.BatchNormalization', 'BatchNormalization', (), '', False, 'from keras.layers import BatchNormalization, Activation, ZeroPadding2D\n'), ((161, 18, 161, 38), 'keras.layers.advanced_activations.LeakyReLU', 'LeakyReLU', (), '', False, 'from keras.layers.advanced_activations import LeakyReLU\n'), ((162, 18, 162, 31), 'keras.layers.Dropout', 'Dropout', ({(162, 26, 162, 30): '(0.25)'}, {}), '(0.25)', False, 'from keras.layers import Input, Dense, Reshape, Flatten, Dropout\n'), ((163, 18, 163, 27), 'keras.layers.Flatten', 'Flatten', ({}, {}), '()', False, 'from keras.layers import Input, Dense, Reshape, Flatten, Dropout\n'), ((164, 18, 164, 26), 'keras.layers.Dense', 'Dense', ({(164, 24, 164, 25): '(1)'}, {}), '(1)', False, 'from keras.layers import Input, Dense, Reshape, Flatten, Dropout\n'), ((176, 17, 176, 41), 'numpy.ones', 'np.ones', ({(176, 25, 176, 40): '(batch_size, 1)'}, {}), '((batch_size, 1))', True, 'import numpy as np\n'), ((188, 22, 188, 72), 'numpy.random.randint', 'np.random.randint', ({(188, 40, 188, 41): '0', (188, 43, 188, 59): 'X_train.shape[0]', (188, 61, 188, 71): 'batch_size'}, {}), '(0, X_train.shape[0], batch_size)', True, 'import numpy as np\n'), ((191, 24, 191, 78), 'numpy.random.normal', 'np.random.normal', ({(191, 41, 191, 42): '0', (191, 44, 191, 45): '1', (191, 47, 191, 77): '(batch_size, self.noise_shape)'}, {}), '(0, 1, (batch_size, self.noise_shape))', True, 'import numpy as np\n')] |
BernardoB95/Extrator_SPEDFiscal | Core/Block_C/RC480_Factory.py | 10b4697833c561d24654251da5f22d044f03fc16 | from Core.IFactory import IFactory
from Regs.Block_C import RC480
class RC480Factory(IFactory):
def create_block_object(self, line):
self.rc480 = _rc480 = RC480()
_rc480.reg_list = line
return _rc480
| [((8, 30, 8, 37), 'Regs.Block_C.RC480', 'RC480', ({}, {}), '()', False, 'from Regs.Block_C import RC480\n')] |
mgoldchild/keras-onnx | keras2onnx/proto/__init__.py | 8e700572b89a907ca21a3096556f64b62b7aa76c | ###############################################################################
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
###############################################################################
import os
import onnx
from distutils.version import StrictVersion
# Rather than using ONNX protobuf definition throughout our codebase, we import ONNX protobuf definition here so that
# we can conduct quick fixes by overwriting ONNX functions without changing any lines elsewhere.
from onnx import onnx_pb as onnx_proto
from onnx import helper
def get_opset_number_from_onnx():
return onnx.defs.onnx_opset_version()
def _check_onnx_version():
import pkg_resources
min_required_version = pkg_resources.parse_version('1.0.1')
current_version = pkg_resources.get_distribution('onnx').parsed_version
assert current_version >= min_required_version , 'Keras2ONNX requires ONNX version 1.0.1 or a newer one'
_check_onnx_version()
is_tf_keras = False
if os.environ.get('TF_KERAS', '0') != '0':
is_tf_keras = True
if is_tf_keras:
from tensorflow.python import keras
else:
try:
import keras
except ImportError:
is_tf_keras = True
from tensorflow.python import keras
def is_keras_older_than(version_str):
return StrictVersion(keras.__version__.split('-')[0]) < StrictVersion(version_str)
def is_keras_later_than(version_str):
return StrictVersion(keras.__version__.split('-')[0]) > StrictVersion(version_str)
| [((17, 11, 17, 41), 'onnx.defs.onnx_opset_version', 'onnx.defs.onnx_opset_version', ({}, {}), '()', False, 'import onnx\n'), ((21, 27, 21, 63), 'pkg_resources.parse_version', 'pkg_resources.parse_version', ({(21, 55, 21, 62): '"""1.0.1"""'}, {}), "('1.0.1')", False, 'import pkg_resources\n'), ((28, 3, 28, 34), 'os.environ.get', 'os.environ.get', ({(28, 18, 28, 28): '"""TF_KERAS"""', (28, 30, 28, 33): '"""0"""'}, {}), "('TF_KERAS', '0')", False, 'import os\n'), ((22, 22, 22, 60), 'pkg_resources.get_distribution', 'pkg_resources.get_distribution', ({(22, 53, 22, 59): '"""onnx"""'}, {}), "('onnx')", False, 'import pkg_resources\n'), ((42, 60, 42, 86), 'distutils.version.StrictVersion', 'StrictVersion', ({(42, 74, 42, 85): 'version_str'}, {}), '(version_str)', False, 'from distutils.version import StrictVersion\n'), ((46, 60, 46, 86), 'distutils.version.StrictVersion', 'StrictVersion', ({(46, 74, 46, 85): 'version_str'}, {}), '(version_str)', False, 'from distutils.version import StrictVersion\n'), ((42, 25, 42, 53), 'tensorflow.python.keras.__version__.split', 'keras.__version__.split', ({(42, 49, 42, 52): '"""-"""'}, {}), "('-')", False, 'from tensorflow.python import keras\n'), ((46, 25, 46, 53), 'tensorflow.python.keras.__version__.split', 'keras.__version__.split', ({(46, 49, 46, 52): '"""-"""'}, {}), "('-')", False, 'from tensorflow.python import keras\n')] |
ocefpaf/xroms | tests/test_load.py | 763d6e678e28fe074e0aaab26fecd2b74e51a8b0 | '''Test package.'''
import xroms
from glob import glob
import os
def test_open_netcdf():
'''Test xroms.open_netcdf().'''
base = os.path.join(xroms.__path__[0],'..','tests','input')
files = glob('%s/ocean_his_000?.nc' % base)
ds = xroms.open_netcdf(files)
assert ds
def test_open_zarr():
'''Test xroms.open_zarr().'''
base = os.path.join(xroms.__path__[0],'..','tests','input')
files = glob('%s/ocean_his_000?' % base)
ds = xroms.open_zarr(files, chunks={'ocean_time':2})
assert ds
| [((11, 11, 11, 63), 'os.path.join', 'os.path.join', ({(11, 24, 11, 41): 'xroms.__path__[0]', (11, 42, 11, 46): '""".."""', (11, 47, 11, 54): '"""tests"""', (11, 55, 11, 62): '"""input"""'}, {}), "(xroms.__path__[0], '..', 'tests', 'input')", False, 'import os\n'), ((12, 12, 12, 47), 'glob.glob', 'glob', ({(12, 17, 12, 46): "'%s/ocean_his_000?.nc' % base"}, {}), "('%s/ocean_his_000?.nc' % base)", False, 'from glob import glob\n'), ((13, 9, 13, 33), 'xroms.open_netcdf', 'xroms.open_netcdf', ({(13, 27, 13, 32): 'files'}, {}), '(files)', False, 'import xroms\n'), ((20, 11, 20, 63), 'os.path.join', 'os.path.join', ({(20, 24, 20, 41): 'xroms.__path__[0]', (20, 42, 20, 46): '""".."""', (20, 47, 20, 54): '"""tests"""', (20, 55, 20, 62): '"""input"""'}, {}), "(xroms.__path__[0], '..', 'tests', 'input')", False, 'import os\n'), ((21, 12, 21, 44), 'glob.glob', 'glob', ({(21, 17, 21, 43): "'%s/ocean_his_000?' % base"}, {}), "('%s/ocean_his_000?' % base)", False, 'from glob import glob\n'), ((22, 9, 22, 56), 'xroms.open_zarr', 'xroms.open_zarr', (), '', False, 'import xroms\n')] |
alvnary18/django-nvd3 | demoproject/demoproject/urls.py | 4b7dffb1107b8202698212b99c26d1d0097afd1d | from django.conf.urls import url
from . import views
urlpatterns = [
url(r'^$', views.home, name='home'),
url(r'^piechart/', views.demo_piechart, name='demo_piechart'),
url(r'^linechart/', views.demo_linechart, name='demo_linechart'),
url(r'^linechart_without_date/', views.demo_linechart_without_date, name='demo_linechart_without_date'),
url(r'^linewithfocuschart/', views.demo_linewithfocuschart, name='demo_linewithfocuschart'),
url(r'^multibarchart/', views.demo_multibarchart, name='demo_multibarchart'),
url(r'^stackedareachart/', views.demo_stackedareachart, name='demo_stackedareachart'),
url(r'^multibarhorizontalchart/', views.demo_multibarhorizontalchart, name='demo_multibarhorizontalchart'),
url(r'^lineplusbarchart/', views.demo_lineplusbarchart, name='demo_lineplusbarchart'),
url(r'^cumulativelinechart/', views.demo_cumulativelinechart, name='demo_cumulativelinechart'),
url(r'^discretebarchart/', views.demo_discretebarchart, name='demo_discretebarchart'),
url(r'^discretebarchart_with_date/', views.demo_discretebarchart_with_date, name='demo_discretebarchart_date'),
url(r'^scatterchart/', views.demo_scatterchart, name='demo_scatterchart'),
url(r'^linechart_with_ampm/', views.demo_linechart_with_ampm, name='demo_linechart_with_ampm'),
# url(r'^demoproject/', include('demoproject.foo.urls')),
]
| [((6, 4, 6, 39), 'django.conf.urls.url', 'url', (), '', False, 'from django.conf.urls import url\n'), ((7, 4, 7, 65), 'django.conf.urls.url', 'url', (), '', False, 'from django.conf.urls import url\n'), ((8, 4, 8, 68), 'django.conf.urls.url', 'url', (), '', False, 'from django.conf.urls import url\n'), ((9, 4, 9, 107), 'django.conf.urls.url', 'url', (), '', False, 'from django.conf.urls import url\n'), ((10, 4, 10, 95), 'django.conf.urls.url', 'url', (), '', False, 'from django.conf.urls import url\n'), ((11, 4, 11, 80), 'django.conf.urls.url', 'url', (), '', False, 'from django.conf.urls import url\n'), ((12, 4, 12, 89), 'django.conf.urls.url', 'url', (), '', False, 'from django.conf.urls import url\n'), ((13, 4, 13, 110), 'django.conf.urls.url', 'url', (), '', False, 'from django.conf.urls import url\n'), ((14, 4, 14, 89), 'django.conf.urls.url', 'url', (), '', False, 'from django.conf.urls import url\n'), ((15, 4, 15, 98), 'django.conf.urls.url', 'url', (), '', False, 'from django.conf.urls import url\n'), ((16, 4, 16, 89), 'django.conf.urls.url', 'url', (), '', False, 'from django.conf.urls import url\n'), ((17, 4, 17, 114), 'django.conf.urls.url', 'url', (), '', False, 'from django.conf.urls import url\n'), ((18, 4, 18, 77), 'django.conf.urls.url', 'url', (), '', False, 'from django.conf.urls import url\n'), ((19, 4, 19, 98), 'django.conf.urls.url', 'url', (), '', False, 'from django.conf.urls import url\n')] |
Harri-Renney/Mind_Control_Synth | wired_version/mcs_wired.py | 5a892a81a3f37444ef154f29a62d44fa1476bfbd | import time
import mido
from pinaps.piNapsController import PiNapsController
from NeuroParser import NeuroParser
"""
Equation of motion used to modify virbato.
"""
def positionStep(pos, vel, acc):
return pos + vel * 2 + (1/2) * acc * 4
def velocityStep(vel, acc):
return acc * 2 + vel
CTRL_LFO_PITCH = 26
CTRL_LFO_RATE = 29
MIDI_MESSAGE_PERIOD = 1
vibratoPos = 0
vibratoVel = 0
vibratoAcc = 4
def parserUpdateVibrato(packet):
global vibratoPos
global vibratoVel
global vibratoAcc
if(packet.code == NeuroParser.DataPacket.kPoorQuality):
print("Poor quality: " + str(packet.poorQuality))
if(packet.code == NeuroParser.DataPacket.kAttention):
print("Attention: " + str(packet.attention))
##Change in vibratoStrength depending on meditation values##
##@ToDo - Change to include more momentum build up etc##
if(packet.attention > 50):
vibratoPos = positionStep(vibratoPos, vibratoVel, vibratoAcc)
vibratoVel = velocityStep(vibratoVel, vibratoAcc)
vibratoPos = 100 if vibratoPos > 100 else vibratoPos
vibratoPos = 0 if vibratoPos < 0 else vibratoPos
else:
vibratoPos = positionStep(vibratoPos, vibratoVel, -vibratoAcc)
vibratoVel = velocityStep(vibratoVel, -vibratoAcc)
vibratoPos = 100 if vibratoPos > 100 else vibratoPos
vibratoPos = 0 if vibratoPos < 0 else vibratoPos
def main():
#Init USB:MIDI interface.
#print(mido.get_output_names()) #Used to originally find correct serial port.
port = mido.open_output('USB Midi:USB Midi MIDI 1 20:0')
msgModulate = mido.Message('control_change', control=CTRL_LFO_PITCH, value=100)
port.send(msgModulate)
#Init Pinaps.
pinapsController = PiNapsController()
pinapsController.defaultInitialise()
pinapsController.deactivateAllLEDs()
aParser = NeuroParser()
#Parse all available Pinaps EEG data. Calculate vibrato value and send as MIDI message.
while True:
data = pinapsController.readEEGSensor()
aParser.parse(data, parserUpdateVibrato)
print("Message vibrato strength: ", vibratoPos)
msgModulate = mido.Message('control_change', control=CTRL_LFO_RATE, value=vibratoPos)
port.send(msgModulate)
#Sleep for defined message period.
time.sleep(MIDI_MESSAGE_PERIOD)
if __name__ == '__main__':
main() | [((49, 11, 49, 60), 'mido.open_output', 'mido.open_output', ({(49, 28, 49, 59): '"""USB Midi:USB Midi MIDI 1 20:0"""'}, {}), "('USB Midi:USB Midi MIDI 1 20:0')", False, 'import mido\n'), ((50, 18, 50, 83), 'mido.Message', 'mido.Message', (), '', False, 'import mido\n'), ((54, 23, 54, 41), 'pinaps.piNapsController.PiNapsController', 'PiNapsController', ({}, {}), '()', False, 'from pinaps.piNapsController import PiNapsController\n'), ((58, 14, 58, 27), 'NeuroParser.NeuroParser', 'NeuroParser', ({}, {}), '()', False, 'from NeuroParser import NeuroParser\n'), ((66, 22, 66, 93), 'mido.Message', 'mido.Message', (), '', False, 'import mido\n'), ((70, 8, 70, 39), 'time.sleep', 'time.sleep', ({(70, 19, 70, 38): 'MIDI_MESSAGE_PERIOD'}, {}), '(MIDI_MESSAGE_PERIOD)', False, 'import time\n')] |
windblood/kafka_stock | pipeline/visualization/single_tab.py | 8dbe4a1cf5c367b3c210683d4027bbfaf955ed41 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Jul 31 11:47:47 2019
@author: yanyanyu
"""
"""
Tab1-plot1: candlestick
"""
import json
import datetime
import pandas as pd
from math import pi
from random import choice
from pytz import timezone
from bokeh.plotting import figure,show
from bokeh.palettes import all_palettes,Set3
from bokeh.models import ColumnDataSource, Select,HoverTool,LinearAxis, LabelSet,Range1d,PreText,Div
from warehouse import CassandraStorage
from util.util import pandas_factory,symbol_list,splitTextToTriplet,prev_weekday
from util.config import path,timeZone
def read_company(symbol):
with open(path+'visualization/company/{}.json'.format(symbol),'r') as f:
company=json.load(f)
companyOfficers=company['assetProfile']['companyOfficers']
officerString=''
for officer in companyOfficers:
officerString+=str('<br>      '+officer['name']+' - '+officer['title'])
buzzsummary='\n'.join(splitTextToTriplet('.'.join(company['summaryProfile']['longBusinessSummary'].split('.')[:3]),8))
institutionOwnership=company['institutionOwnership']['ownershipList']
institution_list=[]
for institution in institutionOwnership:
institution_list.append([institution['organization'],institution['position']['raw'],institution['pctHeld']['fmt']])
institution_df=pd.DataFrame(institution_list,columns=['organization','position','pctHeld'])
institution_df['organization']=[i.split(',')[0] for i in institution_df['organization']]
return company,buzzsummary,officerString,institution_df
def candlestick():
if '^GSPC' in symbol_list:
symbol_list.remove('^GSPC')
stock_select=Select(value=symbol_list[0],options=symbol_list)
summaryText = Div(text="",width=400)
financialText=Div(text="",width=180)
def update_summary(symbol):
company,buzzsummary,officerString,institution_df=read_company(symbol)
summaryText.text ="""<b><p style="color:blue;">Overview: </p></b>
<b>Company:</b> {}<br>
<b>Address:</b> {} <br>
<b>City:</b> {} <br>
<b>State:</b> {} <br>
<b>Website:</b> <a href="{}">{}</a> <br>
<b>Industry:</b> {} <br>
<b>Sector:</b> {} <br>
<b>Company Officers:</b> {} <br>
<b>Summary:</b> {} <br>""".format(company['price']['longName'],
company['summaryProfile']['address1'],
company['summaryProfile']['city'],
company['summaryProfile']['state'],
company['summaryProfile']['website'],
company['summaryProfile']['website'],
company['summaryProfile']['industry'],
company['summaryProfile']['sector'],
officerString,
buzzsummary)
financialText.text="""<b><p style="color:blue;">Financial: </p></b>
<b>Recommendation: {}</b> <br>
<b>Enterprise Value:</b> {} <br>
<b>Profit Margins:</b> {} <br>
<b>Beta:</b> {} <br>
<b>EBITDA:</b> {} <br>
<b>Total Debt:</b> {} <br>
<b>Total Revenue:</b> {}<br>
<b>DebtToEquity:</b> {}<br>
<b>Revenue Growth:</b> {} <br>
<b>Current Ratio:</b> {} <br>
<b>ROE:</b> {} <br>
<b>ROA:</b> {} <br>
<b>Gross Profits:</b> {} <br>
<b>Quick Ratio:</b> {} <br>
<b>Free Cashflow:</b> {} <br>
""".format(company['financialData']['recommendationKey'].upper(),
company['defaultKeyStatistics']['enterpriseValue']['fmt'],
company['defaultKeyStatistics']['profitMargins']['fmt'],
company['defaultKeyStatistics']['beta']['fmt'],
company['financialData']['ebitda']['fmt'],
company['financialData']['totalDebt']['fmt'],
company['financialData']['totalRevenue']['fmt'],
company['financialData']['debtToEquity']['fmt'],
company['financialData']['revenueGrowth']['fmt'],
company['financialData']['currentRatio']['fmt'],
company['financialData']['returnOnAssets']['fmt'],
company['financialData']['returnOnEquity']['fmt'],
company['financialData']['grossProfits']['fmt'],
company['financialData']['quickRatio']['fmt'],
company['financialData']['freeCashflow']['fmt'])
update_summary(stock_select.value)
# connect to Cassandra database
database=CassandraStorage(symbol_list[0])
database.session.row_factory = pandas_factory
database.session.default_fetch_size = None
query="SELECT * FROM {} WHERE time>'2015-01-01' ALLOW FILTERING;".format('{}_historical'.format(symbol_list[0]))
rslt = database.session.execute(query, timeout=None)
df = rslt._current_rows
# create color list
color=df.close>df.open
color=color.replace(True,'green')
color=color.replace(False,'red')
# set data source
source = ColumnDataSource(data=dict(close=list(df.close.values),
adjusted_close=list(df.adjusted_close.values),
open=list(df.open.values),
high=list(df.high.values),
low=list(df.low.values),
volume=list(df.volume.values),
time=list(df.time.dt.date.values),
color=list(color.values)))
# hover setting
TOOLTIPS = [
("time", "@time{%F}"),
("adjusted close", "$@adjusted_close"),
("close", "$@close"),
("open", "$@open"),
("high", "$@high"),
("low", "$@low"),
("volume","@volume")]
formatters={
'time' : 'datetime'}
hover = HoverTool(tooltips=TOOLTIPS,formatters=formatters,mode='vline')
# create figure
p = figure(title='{} Candlestick'.format(stock_select.value),plot_height=400,
tools="crosshair,save,undo,xpan,xwheel_zoom,xbox_zoom,reset",
active_scroll='xwheel_zoom',
x_axis_type="datetime")
p.add_tools(hover)
p.line('time', 'close', alpha=0.2, line_width=1, color='navy', source=source)
p.segment('time', 'high', 'time', 'low', line_width=1,color="black", source=source)
p.segment('time', 'open', 'time', 'close', line_width=3, color='color', source=source)
p.y_range = Range1d(min(source.data['close'])*0.3, max(source.data['close'])*1.05)
p.extra_y_ranges = {"volumes": Range1d(start=min(source.data['volume'])/2,
end=max(source.data['volume'])*2)}
p.add_layout(LinearAxis(y_range_name="volumes"), 'right')
p.vbar('time', width=3,top='volume', color=choice(all_palettes['Set2'][8]),alpha=0.5, y_range_name="volumes",source=source)
p.xaxis.axis_label = 'Time'
# set data source
_,_,_,institution_df=read_company(symbol_list[0])
source_ins = ColumnDataSource(data=dict(organization=list(institution_df.organization.values),
pctHeld=list(institution_df.pctHeld.values),
position=list(institution_df.position.values),
color=Set3[12][:len(institution_df)]))
s1=figure(x_range=source_ins.data['organization'],plot_height=300,plot_width=700,title='Institution Ownership')
s1.vbar(x='organization', top='position', width=0.8, color='color', source=source_ins)
s1.xaxis.major_label_orientation = pi/7
labels = LabelSet(x='organization', y='position', text='pctHeld', level='glyph',
x_offset=-15, y_offset=-10, source=source_ins, render_mode='canvas',text_font_size="8pt")
s1.add_layout(labels)
# callback funtion for Select tool 'stock_select'
def callback(attr,old,new):
symbol=stock_select.value
_,_,_,institution=read_company(symbol)
if symbol=='S&P500':
symbol='^GSPC'
database=CassandraStorage(symbol)
database.session.row_factory = pandas_factory
database.session.default_fetch_size = None
if symbol=='^GSPC':
symbol='GSPC'
query="SELECT * FROM {} WHERE time>'2015-01-01' ALLOW FILTERING;".format(symbol+'_historical')
rslt = database.session.execute(query, timeout=None)
df = rslt._current_rows
color=df.close>df.open
color=color.replace(True,'green')
color=color.replace(False,'red')
# update source data
source.data=dict(close=list(df.close.values),
adjusted_close=list(df.adjusted_close.values),
open=list(df.open.values),
high=list(df.high.values),
low=list(df.low.values),
volume=list(df.volume.values),
time=list(df.time.dt.date.values),
color=list(color.values))
source_ins.data=dict(organization=list(institution.organization.values),
pctHeld=list(institution.pctHeld.values),
position=list(institution.position.values),
color=Set3[12][:len(institution)])
p.title.text=symbol+' Candlestick'
p.y_range.start=min(source.data['close'])*0.3
p.y_range.end=max(source.data['close'])*1.05
p.extra_y_ranges['volumes'].start=min(source.data['volume'])/2.
p.extra_y_ranges['volumes'].end=max(source.data['volume'])*2.
s1.x_range.factors=source_ins.data['organization']
update_summary(symbol)
stock_select.on_change('value', callback)
return p,stock_select,summaryText,financialText,s1
def stream_price():
# connect to s&p500's database
plot_symbol='^GSPC'
database=CassandraStorage(plot_symbol)
database.session.row_factory = pandas_factory
database.session.default_fetch_size = None
# if datetime.datetime.now(timezone('US/Eastern')).time()<datetime.time(9,30):
# query_time=str(datetime.datetime.now().date())
last_trading_day= datetime.datetime.now(timezone(timeZone)).date()
query="SELECT * FROM {} WHERE time>='{}' ALLOW FILTERING;".format(plot_symbol[1:]+'_tick',last_trading_day)
rslt = database.session.execute(query, timeout=None)
df = rslt._current_rows
# wrangle timezone (Cassandra will change datetime to UTC time)
trans_time=pd.DatetimeIndex(pd.to_datetime(df.time,unit='ms')).tz_localize('GMT').tz_convert('US/Pacific').to_pydatetime()
trans_time=[i.replace(tzinfo=None) for i in trans_time]
source= ColumnDataSource()
# hover setting
TOOLTIPS = [
("time", "@time{%F %T}"),
("close", "$@close"),
("volume","@volume")]
formatters={
'time' : 'datetime'}
hover = HoverTool(tooltips=TOOLTIPS,formatters=formatters,mode='vline')
# create plot
p = figure(title='S&P500 Realtime Price',
plot_height=200,
tools="crosshair,save,undo,xpan,xwheel_zoom,ybox_zoom,reset",
x_axis_type="datetime",
y_axis_location="left")
p.add_tools(hover)
p.x_range.follow = "end"
p.x_range.follow_interval = 1000000
p.x_range.range_padding = 0
# during trading
if len(df)>0 \
and datetime.datetime.now(timezone(timeZone)).time()<datetime.time(16,0,0) \
and datetime.datetime.now(timezone(timeZone)).time()>datetime.time(9,30,0):
# init source data to those already stored in Cassandra dataase - '{}_tick', so that streaming plot will not start over after refreshing
source= ColumnDataSource(dict(time=list(trans_time),
close=list(df.close.values),
volume=list(df.volume.values)))
p.y_range = Range1d(min(source.data['close'])/1.005, max(source.data['close'])*1.005)
p.extra_y_ranges = {"volumes": Range1d(start=min(source.data['volume'])*0.5,
end=max(source.data['volume'])*2)}
# no trading history or not during trading hour
else:
source= ColumnDataSource(dict(time=[],
close=[],
volume=[]))
p.y_range = Range1d(0,1e4)
p.extra_y_ranges = {"volumes": Range1d(start=0,
end=1e10)}
p.line(x='time', y='close', alpha=0.2, line_width=3, color='blue', source=source)
p.add_layout(LinearAxis(y_range_name="volumes"), 'right')
p.vbar('time', width=3,top='volume', color=choice(all_palettes['Set2'][8]),alpha=0.5, y_range_name="volumes",source=source)
# get update data from a json file overwritter every ~18 seconds
def _create_prices():
with open(path+'cache/data.json','r') as f:
dict_data = json.load(f)
return float(dict_data['close']),dict_data['volume'],dict_data['time']
# update function for stream plot
def update():
close,volume,time=_create_prices()
new_data = dict(
time=[datetime.datetime.strptime(time[:19], "%Y-%m-%d %H:%M:%S")],
close=[close],
volume=[volume]
)
#print(new_data)
source.stream(new_data)
#print ('update source data',str(time))
return p,update
| [((40, 19, 40, 95), 'pandas.DataFrame', 'pd.DataFrame', (), '', True, 'import pandas as pd\n'), ((48, 17, 48, 65), 'bokeh.models.Select', 'Select', (), '', False, 'from bokeh.models import ColumnDataSource, Select, HoverTool, LinearAxis, LabelSet, Range1d, PreText, Div\n'), ((49, 18, 49, 40), 'bokeh.models.Div', 'Div', (), '', False, 'from bokeh.models import ColumnDataSource, Select, HoverTool, LinearAxis, LabelSet, Range1d, PreText, Div\n'), ((50, 18, 50, 40), 'bokeh.models.Div', 'Div', (), '', False, 'from bokeh.models import ColumnDataSource, Select, HoverTool, LinearAxis, LabelSet, Range1d, PreText, Div\n'), ((107, 13, 107, 45), 'warehouse.CassandraStorage', 'CassandraStorage', ({(107, 30, 107, 44): 'symbol_list[0]'}, {}), '(symbol_list[0])', False, 'from warehouse import CassandraStorage\n'), ((143, 12, 143, 75), 'bokeh.models.HoverTool', 'HoverTool', (), '', False, 'from bokeh.models import ColumnDataSource, Select, HoverTool, LinearAxis, LabelSet, Range1d, PreText, Div\n'), ((173, 7, 173, 115), 'bokeh.plotting.figure', 'figure', (), '', False, 'from bokeh.plotting import figure, show\n'), ((176, 13, 177, 103), 'bokeh.models.LabelSet', 'LabelSet', (), '', False, 'from bokeh.models import ColumnDataSource, Select, HoverTool, LinearAxis, LabelSet, Range1d, PreText, Div\n'), ((230, 13, 230, 42), 'warehouse.CassandraStorage', 'CassandraStorage', ({(230, 30, 230, 41): 'plot_symbol'}, {}), '(plot_symbol)', False, 'from warehouse import CassandraStorage\n'), ((247, 12, 247, 30), 'bokeh.models.ColumnDataSource', 'ColumnDataSource', ({}, {}), '()', False, 'from bokeh.models import ColumnDataSource, Select, HoverTool, LinearAxis, LabelSet, Range1d, PreText, Div\n'), ((256, 12, 256, 75), 'bokeh.models.HoverTool', 'HoverTool', (), '', False, 'from bokeh.models import ColumnDataSource, Select, HoverTool, LinearAxis, LabelSet, Range1d, PreText, Div\n'), ((259, 8, 263, 39), 'bokeh.plotting.figure', 'figure', (), '', False, 'from bokeh.plotting import figure, show\n'), ((28, 16, 28, 28), 'json.load', 'json.load', ({(28, 26, 28, 27): 'f'}, {}), '(f)', False, 'import json\n'), ((47, 8, 47, 35), 'util.util.symbol_list.remove', 'symbol_list.remove', ({(47, 27, 47, 34): '"""^GSPC"""'}, {}), "('^GSPC')", False, 'from util.util import pandas_factory, symbol_list, splitTextToTriplet, prev_weekday\n'), ((162, 17, 162, 51), 'bokeh.models.LinearAxis', 'LinearAxis', (), '', False, 'from bokeh.models import ColumnDataSource, Select, HoverTool, LinearAxis, LabelSet, Range1d, PreText, Div\n'), ((186, 17, 186, 41), 'warehouse.CassandraStorage', 'CassandraStorage', ({(186, 34, 186, 40): 'symbol'}, {}), '(symbol)', False, 'from warehouse import CassandraStorage\n'), ((286, 20, 286, 34), 'bokeh.models.Range1d', 'Range1d', ({(286, 28, 286, 29): '0', (286, 30, 286, 33): '10000.0'}, {}), '(0, 10000.0)', False, 'from bokeh.models import ColumnDataSource, Select, HoverTool, LinearAxis, LabelSet, Range1d, PreText, Div\n'), ((291, 17, 291, 51), 'bokeh.models.LinearAxis', 'LinearAxis', (), '', False, 'from bokeh.models import ColumnDataSource, Select, HoverTool, LinearAxis, LabelSet, Range1d, PreText, Div\n'), ((163, 47, 163, 78), 'random.choice', 'choice', ({(163, 54, 163, 77): "all_palettes['Set2'][8]"}, {}), "(all_palettes['Set2'][8])", False, 'from random import choice\n'), ((271, 61, 271, 82), 'datetime.time', 'datetime.time', ({(271, 75, 271, 77): '(16)', (271, 78, 271, 79): '(0)', (271, 80, 271, 81): '(0)'}, {}), '(16, 0, 0)', False, 'import datetime\n'), ((272, 61, 272, 82), 'datetime.time', 'datetime.time', ({(272, 75, 272, 76): '(9)', (272, 77, 272, 79): '(30)', (272, 80, 272, 81): '(0)'}, {}), '(9, 30, 0)', False, 'import datetime\n'), ((287, 39, 288, 56), 'bokeh.models.Range1d', 'Range1d', (), '', False, 'from bokeh.models import ColumnDataSource, Select, HoverTool, LinearAxis, LabelSet, Range1d, PreText, Div\n'), ((292, 47, 292, 78), 'random.choice', 'choice', ({(292, 54, 292, 77): "all_palettes['Set2'][8]"}, {}), "(all_palettes['Set2'][8])", False, 'from random import choice\n'), ((298, 24, 298, 36), 'json.load', 'json.load', ({(298, 34, 298, 35): 'f'}, {}), '(f)', False, 'import json\n'), ((238, 44, 238, 62), 'pytz.timezone', 'timezone', ({(238, 53, 238, 61): 'timeZone'}, {}), '(timeZone)', False, 'from pytz import timezone\n'), ((305, 18, 305, 76), 'datetime.datetime.strptime', 'datetime.datetime.strptime', ({(305, 45, 305, 54): 'time[:19]', (305, 56, 305, 75): '"""%Y-%m-%d %H:%M:%S"""'}, {}), "(time[:19], '%Y-%m-%d %H:%M:%S')", False, 'import datetime\n'), ((271, 34, 271, 52), 'pytz.timezone', 'timezone', ({(271, 43, 271, 51): 'timeZone'}, {}), '(timeZone)', False, 'from pytz import timezone\n'), ((272, 34, 272, 52), 'pytz.timezone', 'timezone', ({(272, 43, 272, 51): 'timeZone'}, {}), '(timeZone)', False, 'from pytz import timezone\n'), ((245, 32, 245, 65), 'pandas.to_datetime', 'pd.to_datetime', (), '', True, 'import pandas as pd\n')] |
Wangjw6/project | traffic_predict/model.py | daae9de42fe7bf7ff29c20246e1164b62b7cef4a | # -*- coding:utf-8 -*-
import tensorflow as tf
class CNN:
def __init__(self, save_or_load_path=None, trainable=True, learning_rate = 0.00002,timestep=9,road=189,predstep=1):
self.trainable = trainable
self.learning_rate = learning_rate
self.road = road
self.input_size = timestep * road
self.output_size = predstep * road
self.bottom = tf.placeholder(tf.float32, shape=[None, self.input_size], name='input') # 25*2*6
self.target = tf.placeholder(tf.float32, shape=[None, self.output_size], name='target')
self.timestep = timestep
def weight_variable(self,shape):
initial = tf.truncated_normal(shape, stddev=0.1)
return tf.Variable(initial)
def bias_variable(self,shape):
initial = tf.truncated_normal(shape, stddev=0.1)
return initial
def conv2d(self,x, W):
return tf.nn.conv2d(x, W, strides=[1, 1, 1, 1], padding='SAME')
def conv1d(self,x, W):
return tf.nn.conv1d(x, W, stride=2, padding='SAME')
def max_pool_2x2(self,x):
return tf.nn.max_pool(x, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')
def build_CNN(self, ):
# conv first
bottom = tf.reshape(self.bottom, [-1, self.road, self.timestep, 1])
W_conv1 = self.weight_variable([3, 3, 1, 64])
b_conv1 = self.bias_variable([64])
h_conv1 = tf.nn.elu(self.conv2d(bottom, W_conv1) + b_conv1)
h_pool1 = self.max_pool_2x2(h_conv1)
h_flat3 = tf.reshape(h_pool1, [-1, 95 * 5 * 64])
W_fc2 = self.weight_variable([95 * 5 * 64, 1200])
b_fc2 = self.bias_variable([1200])
h = tf.nn.elu(tf.matmul(h_flat3, W_fc2) + b_fc2)
# h_flat3 = tf.reshape(h_pool3, [-1, 400])
W_fc2 = self.weight_variable([1200, self.output_size])
b_fc2 = self.bias_variable([self.output_size])
self.predict = tf.nn.elu(tf.matmul(h, W_fc2) + b_fc2)
global_step = tf.Variable(0, trainable=False)
self.learning_rate = 0.0002 #tf.train.exponential_decay(0.001, global_step, 500, 0.9,staircase=True)
self.loss = tf.reduce_mean(tf.squared_difference(self.target, self.predict))
self.accuracy = 1. - tf.reduce_mean(abs(self.target-self.predict)/self.target)
self.trainop = tf.train.AdamOptimizer(self.learning_rate).minimize(self.loss, global_step=global_step)
# self.trainop = tf.train.RMSPropOptimizer(self.learning_rate, 0.99, 0.0, 1e-6).minimize(self.loss)
return self.predict
class CNN15:
def __init__(self, save_or_load_path=None, trainable=True, learning_rate = 0.00002,timestep=9,road=189,predstep=3):
self.trainable = trainable
self.learning_rate = learning_rate
self.road = road
self.input_size = timestep * road
self.output_size = predstep * road
self.bottom = tf.placeholder(tf.float32, shape=[None, self.input_size], name='input') # 25*2*6
self.target = tf.placeholder(tf.float32, shape=[None, self.output_size], name='target')
self.timestep = timestep
def weight_variable(self,shape):
initial = tf.truncated_normal(shape, stddev=0.1)
return tf.Variable(initial)
def bias_variable(self,shape):
initial = tf.truncated_normal(shape, stddev=0.1)
return initial
def conv2d(self,x, W):
return tf.nn.conv2d(x, W, strides=[1, 1, 1, 1], padding='SAME')
def conv1d(self,x, W):
return tf.nn.conv1d(x, W, stride=2, padding='SAME')
def max_pool_2x2(self,x):
return tf.nn.max_pool(x, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')
def build_CNN(self, ):
# conv first
bottom = tf.reshape(self.bottom, [-1, self.road, self.timestep, 1])
W_conv1 = self.weight_variable([3, 3, 1, 64])
b_conv1 = self.bias_variable([64])
h_conv1 = tf.nn.elu(self.conv2d(bottom, W_conv1) + b_conv1)
h_pool1 = self.max_pool_2x2(h_conv1)
h_flat3 = tf.reshape(h_pool1, [-1, 95 * 5 * 64])
W_fc2 = self.weight_variable([95 * 5 * 64, 1200])
b_fc2 = self.bias_variable([1200])
h = tf.nn.elu(tf.matmul(h_flat3, W_fc2) + b_fc2)
# h_flat3 = tf.reshape(h_pool3, [-1, 400])
W_fc2 = self.weight_variable([1200, self.output_size])
b_fc2 = self.bias_variable([self.output_size])
self.predict = tf.nn.elu(tf.matmul(h, W_fc2) + b_fc2)
global_step = tf.Variable(0, trainable=False)
self.learning_rate = 0.0002 #tf.train.exponential_decay(0.001, global_step, 500, 0.9,staircase=True)
self.loss = tf.reduce_mean(tf.squared_difference(self.target, self.predict))
self.accuracy = 1. - tf.reduce_mean(abs(self.target-self.predict)/self.target)
self.trainop = tf.train.AdamOptimizer(self.learning_rate).minimize(self.loss, global_step=global_step)
# self.trainop = tf.train.RMSPropOptimizer(self.learning_rate, 0.99, 0.0, 1e-6).minimize(self.loss)
return self.predict
class CNN30:
def __init__(self, save_or_load_path=None, trainable=True, learning_rate=0.00002,timestep=9,road=189,predstep=6):
self.trainable = trainable
self.learning_rate = learning_rate
self.road = road
self.input_size = timestep * road
self.output_size = predstep * road
self.bottom = tf.placeholder(tf.float32, shape=[None, self.input_size], name='input') # 25*2*6
self.target = tf.placeholder(tf.float32, shape=[None, self.output_size], name='target')
self.timestep = timestep
def weight_variable(self,shape):
initial = tf.truncated_normal(shape, stddev=0.1)
return tf.Variable(initial)
def bias_variable(self,shape):
initial = tf.truncated_normal(shape, stddev=0.1)
return initial
def conv2d(self,x, W):
return tf.nn.conv2d(x, W, strides=[1, 1, 1, 1], padding='SAME')
def conv1d(self,x, W):
return tf.nn.conv1d(x, W, stride=2, padding='SAME')
def max_pool_2x2(self,x):
return tf.nn.max_pool(x, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')
def build_CNN(self, ):
# conv first
bottom = tf.reshape(self.bottom, [-1, self.road, self.timestep, 1])
W_conv1 = self.weight_variable([3, 3, 1, 64])
b_conv1 = self.bias_variable([64])
h_conv1 = tf.nn.elu(self.conv2d(bottom, W_conv1) + b_conv1)
h_pool1 = self.max_pool_2x2(h_conv1)
h_flat3 = tf.reshape(h_pool1, [-1, 95 * 5 * 64])
W_fc2 = self.weight_variable([95 * 5 * 64, 1200])
b_fc2 = self.bias_variable([1200])
h = tf.nn.elu(tf.matmul(h_flat3, W_fc2) + b_fc2)
# h_flat3 = tf.reshape(h_pool3, [-1, 400])
W_fc2 = self.weight_variable([1200, self.output_size])
b_fc2 = self.bias_variable([self.output_size])
self.predict = tf.nn.elu(tf.matmul(h, W_fc2) + b_fc2)
global_step = tf.Variable(0, trainable=False)
self.learning_rate = 0.0002 # tf.train.exponential_decay(0.001, global_step, 500, 0.9,staircase=True)
self.loss = tf.reduce_mean(tf.squared_difference(self.target, self.predict))
self.accuracy = 1. - tf.reduce_mean(abs(self.target - self.predict) / self.target)
self.trainop = tf.train.AdamOptimizer(self.learning_rate).minimize(self.loss, global_step=global_step)
# self.trainop = tf.train.RMSPropOptimizer(self.learning_rate, 0.99, 0.0, 1e-6).minimize(self.loss)
return self.predict | [((13, 22, 13, 93), 'tensorflow.placeholder', 'tf.placeholder', (), '', True, 'import tensorflow as tf\n'), ((14, 22, 14, 95), 'tensorflow.placeholder', 'tf.placeholder', (), '', True, 'import tensorflow as tf\n'), ((18, 18, 18, 56), 'tensorflow.truncated_normal', 'tf.truncated_normal', (), '', True, 'import tensorflow as tf\n'), ((19, 15, 19, 35), 'tensorflow.Variable', 'tf.Variable', ({(19, 27, 19, 34): 'initial'}, {}), '(initial)', True, 'import tensorflow as tf\n'), ((22, 18, 22, 56), 'tensorflow.truncated_normal', 'tf.truncated_normal', (), '', True, 'import tensorflow as tf\n'), ((26, 15, 26, 71), 'tensorflow.nn.conv2d', 'tf.nn.conv2d', (), '', True, 'import tensorflow as tf\n'), ((29, 15, 29, 59), 'tensorflow.nn.conv1d', 'tf.nn.conv1d', (), '', True, 'import tensorflow as tf\n'), ((32, 15, 32, 90), 'tensorflow.nn.max_pool', 'tf.nn.max_pool', (), '', True, 'import tensorflow as tf\n'), ((37, 17, 37, 75), 'tensorflow.reshape', 'tf.reshape', ({(37, 28, 37, 39): 'self.bottom', (37, 41, 37, 74): '[-1, self.road, self.timestep, 1]'}, {}), '(self.bottom, [-1, self.road, self.timestep, 1])', True, 'import tensorflow as tf\n'), ((43, 18, 43, 56), 'tensorflow.reshape', 'tf.reshape', ({(43, 29, 43, 36): 'h_pool1', (43, 38, 43, 55): '[-1, 95 * 5 * 64]'}, {}), '(h_pool1, [-1, 95 * 5 * 64])', True, 'import tensorflow as tf\n'), ((53, 22, 53, 53), 'tensorflow.Variable', 'tf.Variable', (), '', True, 'import tensorflow as tf\n'), ((70, 22, 70, 93), 'tensorflow.placeholder', 'tf.placeholder', (), '', True, 'import tensorflow as tf\n'), ((71, 22, 71, 95), 'tensorflow.placeholder', 'tf.placeholder', (), '', True, 'import tensorflow as tf\n'), ((75, 18, 75, 56), 'tensorflow.truncated_normal', 'tf.truncated_normal', (), '', True, 'import tensorflow as tf\n'), ((76, 15, 76, 35), 'tensorflow.Variable', 'tf.Variable', ({(76, 27, 76, 34): 'initial'}, {}), '(initial)', True, 'import tensorflow as tf\n'), ((79, 18, 79, 56), 'tensorflow.truncated_normal', 'tf.truncated_normal', (), '', True, 'import tensorflow as tf\n'), ((83, 15, 83, 71), 'tensorflow.nn.conv2d', 'tf.nn.conv2d', (), '', True, 'import tensorflow as tf\n'), ((86, 15, 86, 59), 'tensorflow.nn.conv1d', 'tf.nn.conv1d', (), '', True, 'import tensorflow as tf\n'), ((89, 15, 89, 90), 'tensorflow.nn.max_pool', 'tf.nn.max_pool', (), '', True, 'import tensorflow as tf\n'), ((94, 17, 94, 75), 'tensorflow.reshape', 'tf.reshape', ({(94, 28, 94, 39): 'self.bottom', (94, 41, 94, 74): '[-1, self.road, self.timestep, 1]'}, {}), '(self.bottom, [-1, self.road, self.timestep, 1])', True, 'import tensorflow as tf\n'), ((100, 18, 100, 56), 'tensorflow.reshape', 'tf.reshape', ({(100, 29, 100, 36): 'h_pool1', (100, 38, 100, 55): '[-1, 95 * 5 * 64]'}, {}), '(h_pool1, [-1, 95 * 5 * 64])', True, 'import tensorflow as tf\n'), ((110, 22, 110, 53), 'tensorflow.Variable', 'tf.Variable', (), '', True, 'import tensorflow as tf\n'), ((128, 22, 128, 93), 'tensorflow.placeholder', 'tf.placeholder', (), '', True, 'import tensorflow as tf\n'), ((129, 22, 129, 95), 'tensorflow.placeholder', 'tf.placeholder', (), '', True, 'import tensorflow as tf\n'), ((133, 18, 133, 56), 'tensorflow.truncated_normal', 'tf.truncated_normal', (), '', True, 'import tensorflow as tf\n'), ((134, 15, 134, 35), 'tensorflow.Variable', 'tf.Variable', ({(134, 27, 134, 34): 'initial'}, {}), '(initial)', True, 'import tensorflow as tf\n'), ((137, 18, 137, 56), 'tensorflow.truncated_normal', 'tf.truncated_normal', (), '', True, 'import tensorflow as tf\n'), ((141, 15, 141, 71), 'tensorflow.nn.conv2d', 'tf.nn.conv2d', (), '', True, 'import tensorflow as tf\n'), ((144, 15, 144, 59), 'tensorflow.nn.conv1d', 'tf.nn.conv1d', (), '', True, 'import tensorflow as tf\n'), ((147, 15, 147, 90), 'tensorflow.nn.max_pool', 'tf.nn.max_pool', (), '', True, 'import tensorflow as tf\n'), ((152, 17, 152, 75), 'tensorflow.reshape', 'tf.reshape', ({(152, 28, 152, 39): 'self.bottom', (152, 41, 152, 74): '[-1, self.road, self.timestep, 1]'}, {}), '(self.bottom, [-1, self.road, self.timestep, 1])', True, 'import tensorflow as tf\n'), ((158, 18, 158, 56), 'tensorflow.reshape', 'tf.reshape', ({(158, 29, 158, 36): 'h_pool1', (158, 38, 158, 55): '[-1, 95 * 5 * 64]'}, {}), '(h_pool1, [-1, 95 * 5 * 64])', True, 'import tensorflow as tf\n'), ((167, 22, 167, 53), 'tensorflow.Variable', 'tf.Variable', (), '', True, 'import tensorflow as tf\n'), ((55, 35, 55, 83), 'tensorflow.squared_difference', 'tf.squared_difference', ({(55, 57, 55, 68): 'self.target', (55, 70, 55, 82): 'self.predict'}, {}), '(self.target, self.predict)', True, 'import tensorflow as tf\n'), ((112, 35, 112, 83), 'tensorflow.squared_difference', 'tf.squared_difference', ({(112, 57, 112, 68): 'self.target', (112, 70, 112, 82): 'self.predict'}, {}), '(self.target, self.predict)', True, 'import tensorflow as tf\n'), ((169, 35, 169, 83), 'tensorflow.squared_difference', 'tf.squared_difference', ({(169, 57, 169, 68): 'self.target', (169, 70, 169, 82): 'self.predict'}, {}), '(self.target, self.predict)', True, 'import tensorflow as tf\n'), ((46, 22, 46, 47), 'tensorflow.matmul', 'tf.matmul', ({(46, 32, 46, 39): 'h_flat3', (46, 41, 46, 46): 'W_fc2'}, {}), '(h_flat3, W_fc2)', True, 'import tensorflow as tf\n'), ((50, 33, 50, 52), 'tensorflow.matmul', 'tf.matmul', ({(50, 43, 50, 44): 'h', (50, 46, 50, 51): 'W_fc2'}, {}), '(h, W_fc2)', True, 'import tensorflow as tf\n'), ((57, 23, 57, 65), 'tensorflow.train.AdamOptimizer', 'tf.train.AdamOptimizer', ({(57, 46, 57, 64): 'self.learning_rate'}, {}), '(self.learning_rate)', True, 'import tensorflow as tf\n'), ((103, 22, 103, 47), 'tensorflow.matmul', 'tf.matmul', ({(103, 32, 103, 39): 'h_flat3', (103, 41, 103, 46): 'W_fc2'}, {}), '(h_flat3, W_fc2)', True, 'import tensorflow as tf\n'), ((107, 33, 107, 52), 'tensorflow.matmul', 'tf.matmul', ({(107, 43, 107, 44): 'h', (107, 46, 107, 51): 'W_fc2'}, {}), '(h, W_fc2)', True, 'import tensorflow as tf\n'), ((114, 23, 114, 65), 'tensorflow.train.AdamOptimizer', 'tf.train.AdamOptimizer', ({(114, 46, 114, 64): 'self.learning_rate'}, {}), '(self.learning_rate)', True, 'import tensorflow as tf\n'), ((161, 22, 161, 47), 'tensorflow.matmul', 'tf.matmul', ({(161, 32, 161, 39): 'h_flat3', (161, 41, 161, 46): 'W_fc2'}, {}), '(h_flat3, W_fc2)', True, 'import tensorflow as tf\n'), ((165, 33, 165, 52), 'tensorflow.matmul', 'tf.matmul', ({(165, 43, 165, 44): 'h', (165, 46, 165, 51): 'W_fc2'}, {}), '(h, W_fc2)', True, 'import tensorflow as tf\n'), ((171, 23, 171, 65), 'tensorflow.train.AdamOptimizer', 'tf.train.AdamOptimizer', ({(171, 46, 171, 64): 'self.learning_rate'}, {}), '(self.learning_rate)', True, 'import tensorflow as tf\n')] |
SanLiWuXun/Virtual-Control | VirtualMouse-mediapipe.py | c3b38d4e2df201af851ca70a90de1fdc770158e4 | import cv2
import mediapipe as mp
from time import sleep
import numpy as np
import autopy
import pynput
wCam, hCam = 1280, 720
wScr, hScr = autopy.screen.size()
cap = cv2.VideoCapture(0)
cap.set(3, wCam)
cap.set(4, hCam)
mp_drawing = mp.solutions.drawing_utils
mp_hands = mp.solutions.hands
mouse = pynput.mouse.Controller()
def findNodeDistance(imgHeight, imgWidth, landmarks, index1, index2):
x1 = int(landmarks[index1].x*imgWidth)
y1 = int(landmarks[index1].y*imgHeight)
z1 = int(landmarks[index1].z*imgWidth)
x2 = int(landmarks[index2].x*imgWidth)
y2 = int(landmarks[index2].y*imgHeight)
z2 = int(landmarks[index2].z*imgWidth)
dis = ((x1-x2)**2.0+(y1-y2)**2.0)**0.5
z_dis = abs(z1-z2)
return dis, z_dis
with mp_hands.Hands(
min_detection_confidence=0.8,
min_tracking_confidence=0.5) as hands:
while cap.isOpened():
success, image = cap.read()
if not success:
print("Ignoring empty camera frame.")
# If loading a video, use 'break' instead of 'continue'.
continue
# Flip the image horizontally for a later selfie-view display, and convert
# the BGR image to RGB.
image = cv2.cvtColor(cv2.flip(image, 1), cv2.COLOR_BGR2RGB)
# To improve performance, optionally mark the image as not writeable to
# pass by reference.
image.flags.writeable = False
results = hands.process(image)
# Draw the hand annotations on the image.
image.flags.writeable = True
image = cv2.cvtColor(image, cv2.COLOR_RGB2BGR)
if results.multi_hand_landmarks:
for hand_landmarks in results.multi_hand_landmarks:
mp_drawing.draw_landmarks(
image,
hand_landmarks,
mp_hands.HAND_CONNECTIONS)
#cx, cy = int(hand_landmarks.landmark[8].x*wCam), int(hand_landmarks.landmark[8].y*hCam)
targetX, targetY = int(hand_landmarks.landmark[8].x*wScr), int(hand_landmarks.landmark[8].y*hScr)
mouse.position = (targetX, targetY)
xy_dis_8_12, z_dis_8_12 = findNodeDistance(hCam, wCam, hand_landmarks.landmark, 8, 12)
xy_dis_12_16, z_dis_12_16 = findNodeDistance(hCam, wCam, hand_landmarks.landmark, 12, 16)
if xy_dis_8_12 < 40 and z_dis_8_12 < 20:
mouse.click(pynput.mouse.Button.left)
sleep(0.3)
if xy_dis_12_16 < 40 and z_dis_12_16 < 20:
mouse.click(pynput.mouse.Button.left, 2)
sleep(0.3)
cv2.imshow('MediaPipe Hands', image)
if cv2.waitKey(5) & 0xFF == 27:
break
cap.release() | [((9, 13, 9, 33), 'autopy.screen.size', 'autopy.screen.size', ({}, {}), '()', False, 'import autopy\n'), ((11, 6, 11, 25), 'cv2.VideoCapture', 'cv2.VideoCapture', ({(11, 23, 11, 24): '0'}, {}), '(0)', False, 'import cv2\n'), ((18, 8, 18, 33), 'pynput.mouse.Controller', 'pynput.mouse.Controller', ({}, {}), '()', False, 'import pynput\n'), ((52, 16, 52, 54), 'cv2.cvtColor', 'cv2.cvtColor', ({(52, 29, 52, 34): 'image', (52, 36, 52, 53): 'cv2.COLOR_RGB2BGR'}, {}), '(image, cv2.COLOR_RGB2BGR)', False, 'import cv2\n'), ((75, 8, 75, 44), 'cv2.imshow', 'cv2.imshow', ({(75, 19, 75, 36): '"""MediaPipe Hands"""', (75, 38, 75, 43): 'image'}, {}), "('MediaPipe Hands', image)", False, 'import cv2\n'), ((44, 29, 44, 47), 'cv2.flip', 'cv2.flip', ({(44, 38, 44, 43): 'image', (44, 45, 44, 46): '1'}, {}), '(image, 1)', False, 'import cv2\n'), ((76, 11, 76, 25), 'cv2.waitKey', 'cv2.waitKey', ({(76, 23, 76, 24): '(5)'}, {}), '(5)', False, 'import cv2\n'), ((70, 20, 70, 30), 'time.sleep', 'sleep', ({(70, 26, 70, 29): '(0.3)'}, {}), '(0.3)', False, 'from time import sleep\n'), ((73, 20, 73, 30), 'time.sleep', 'sleep', ({(73, 26, 73, 29): '(0.3)'}, {}), '(0.3)', False, 'from time import sleep\n')] |
JochenZoellner/tf_neiss-1 | util/tools/split_train_val.py | c91019e5bce6d3c7512237eec5ea997fd95304ac | import glob
import logging
import os
import shutil
import sys
"""script to divide a folder with generated/training data into a train and val folder
- val folder contains 500 Samples if not changed in source code
- DOES NOT work if images structured in subfolders, see below
- if there is no dir in the given folder -> split this folder
- if there are dir/s in the folder -> perform split on each folder
- split on sorted list -> repeated runs should give the same result
"""
def main(args):
foldername = args[1]
print("CWD: {}".format(os.getcwd()))
print("foldername: {}".format(foldername))
dirs = os.walk(foldername).next()[1]
dirs = [os.path.join(foldername, x) for x in dirs]
print(dirs)
if len(dirs) == 0:
print("no subdirs found -> run directly on {}".format(foldername))
dirs = [foldername]
for dir in dirs:
print("perform split on {}".format(dir))
dir_path = dir
# image_list = sorted(glob.glob1(os.path.join(foldername, dir_path), "*.jpg"))
image_list = sorted(glob.glob1(dir_path, "*.jpg"))
# image_list = sorted(glob.glob1(dir_path , "*.png"))
if len(image_list) == 0:
logging.error("Could not find any '*.jpg' in {}".format(dir_path))
exit(1)
else:
print(" found {} images".format(len(image_list)))
# val_len = int(len(image_list) * 0.1)
val_len = int(500)
val_list = image_list[:val_len]
train_list = image_list[val_len:]
# save first 10%/500 of list to val list
for subdir, part_list in zip(["val", "train"], [val_list, train_list]):
os.makedirs(os.path.join(dir_path, subdir))
print(" move files in {}...".format(subdir))
for image_file in part_list:
shutil.move(os.path.join(dir_path, image_file), os.path.join(dir_path, subdir, image_file))
try:
shutil.move(os.path.join(dir_path, image_file + ".txt"),
os.path.join(dir_path, subdir, image_file + ".txt"))
except IOError as ex:
print(ex)
try:
shutil.move(os.path.join(dir_path, image_file + ".info"),
os.path.join(dir_path, subdir, image_file + ".info"))
except IOError as ex:
pass
print(" write list: {}...".format(os.path.join(dir_path, "{}_{}.lst".format(dir_path, subdir))))
with open(os.path.join(foldername, "{}_{}.lst".format(os.path.basename(dir_path), subdir)), "w") as fobj:
fobj.writelines([os.path.join(dir_path, subdir, x) + "\n" for x in part_list])
if __name__ == '__main__':
main(sys.argv)
| [((22, 12, 22, 39), 'os.path.join', 'os.path.join', ({(22, 25, 22, 35): 'foldername', (22, 37, 22, 38): 'x'}, {}), '(foldername, x)', False, 'import os\n'), ((18, 27, 18, 38), 'os.getcwd', 'os.getcwd', ({}, {}), '()', False, 'import os\n'), ((32, 28, 32, 57), 'glob.glob1', 'glob.glob1', ({(32, 39, 32, 47): 'dir_path', (32, 49, 32, 56): '"""*.jpg"""'}, {}), "(dir_path, '*.jpg')", False, 'import glob\n'), ((21, 11, 21, 30), 'os.walk', 'os.walk', ({(21, 19, 21, 29): 'foldername'}, {}), '(foldername)', False, 'import os\n'), ((48, 24, 48, 54), 'os.path.join', 'os.path.join', ({(48, 37, 48, 45): 'dir_path', (48, 47, 48, 53): 'subdir'}, {}), '(dir_path, subdir)', False, 'import os\n'), ((51, 28, 51, 62), 'os.path.join', 'os.path.join', ({(51, 41, 51, 49): 'dir_path', (51, 51, 51, 61): 'image_file'}, {}), '(dir_path, image_file)', False, 'import os\n'), ((51, 64, 51, 106), 'os.path.join', 'os.path.join', ({(51, 77, 51, 85): 'dir_path', (51, 87, 51, 93): 'subdir', (51, 95, 51, 105): 'image_file'}, {}), '(dir_path, subdir, image_file)', False, 'import os\n'), ((53, 32, 53, 75), 'os.path.join', 'os.path.join', ({(53, 45, 53, 53): 'dir_path', (53, 55, 53, 74): "(image_file + '.txt')"}, {}), "(dir_path, image_file + '.txt')", False, 'import os\n'), ((54, 32, 54, 83), 'os.path.join', 'os.path.join', ({(54, 45, 54, 53): 'dir_path', (54, 55, 54, 61): 'subdir', (54, 63, 54, 82): "(image_file + '.txt')"}, {}), "(dir_path, subdir, image_file + '.txt')", False, 'import os\n'), ((58, 32, 58, 76), 'os.path.join', 'os.path.join', ({(58, 45, 58, 53): 'dir_path', (58, 55, 58, 75): "(image_file + '.info')"}, {}), "(dir_path, image_file + '.info')", False, 'import os\n'), ((59, 32, 59, 84), 'os.path.join', 'os.path.join', ({(59, 45, 59, 53): 'dir_path', (59, 55, 59, 61): 'subdir', (59, 63, 59, 83): "(image_file + '.info')"}, {}), "(dir_path, subdir, image_file + '.info')", False, 'import os\n'), ((64, 66, 64, 92), 'os.path.basename', 'os.path.basename', ({(64, 83, 64, 91): 'dir_path'}, {}), '(dir_path)', False, 'import os\n'), ((65, 33, 65, 66), 'os.path.join', 'os.path.join', ({(65, 46, 65, 54): 'dir_path', (65, 56, 65, 62): 'subdir', (65, 64, 65, 65): 'x'}, {}), '(dir_path, subdir, x)', False, 'import os\n')] |
KonstantinKlepikov/Hands-on-Neuroevolution-with-Python | Chapter10/neuroevolution/distributed_helpers.py | cdd35fa21f2a091d176c140427ab1644d9ecd1f2 |
import threading
from queue import Queue
from multiprocessing.pool import ApplyResult
import tabular_logger as tlogger
class AsyncWorker(object):
@property
def concurrent_tasks(self):
raise NotImplementedError()
def run_async(self, task_id, task, callback):
raise NotImplementedError()
class WorkerHub(object):
def __init__(self, workers, input_queue, done_queue):
self.done_buffer = Queue()
self.workers = workers
self.available_workers = Queue()
self.done_queue = done_queue
self._cache = {}
self.input_queue = input_queue
for w in workers:
for t in w.concurrent_tasks:
self.available_workers.put((w, t))
self.__initialize_handlers()
def __initialize_handlers(self):
self._input_handler = threading.Thread(
target=WorkerHub._handle_input,
args=(self,)
)
self._input_handler._state = 0
tlogger.info('WorkerHub: _input_handler initialized')
self._output_handler = threading.Thread(
target=WorkerHub._handle_output,
args=(self,)
)
self._output_handler._state = 0
tlogger.info('WorkerHub: _output_handler initialized')
def worker_callback(self, worker, subworker, result):
worker_task = (worker, subworker)
if worker_task in self._cache:
task_id = self._cache[worker_task]
del self._cache[worker_task]
self.done_buffer.put((task_id, result))
else:
tlogger.warn('WorkerHub: Worker task not found in cache', worker_task)
tlogger.warn('WorkerHub: Subworker', subworker)
tlogger.warn('WorkerHub: Unable to process result', result)
# Return worker back
self.available_workers.put(worker_task)
@staticmethod
def _handle_input(self):
try:
while True:
worker_task = self.available_workers.get()
if worker_task is None:
tlogger.info('WorkerHub._handle_input NO MORE WORKERS AWAILABLE')
break
worker, subworker = worker_task
task = self.input_queue.get()
if task is None:
tlogger.info('WorkerHub._handle_input NO MORE INPUTS AWAILABLE')
break
task_id, task = task
self._cache[worker_task] = task_id
# tlogger.info('WorkerHub: put task id: %s in cache keyed by worker task: %s' % (task_id, worker_task))
worker.run_async(subworker, task, callback=self.worker_callback)
except:
tlogger.exception('WorkerHub._handle_input exception thrown')
raise
@staticmethod
def _handle_output(self):
try:
while True:
result = self.done_buffer.get()
if result is None:
tlogger.info('WorkerHub._handle_output done')
break
self.done_queue.put(result)
except:
tlogger.exception('WorkerHub._handle_output exception thrown')
raise
def initialize(self):
self._input_handler.start()
self._output_handler.start()
def close(self):
self.available_workers.put(None)
self.input_queue.put(None)
self.done_buffer.put(None)
class AsyncTaskHub(object):
def __init__(self, input_queue=None, results_queue=None):
if input_queue is None:
input_queue = Queue(64)
self.input_queue = input_queue
self._cache = {}
self.results_queue = None
if results_queue is not None:
self.results_queue = results_queue
self._output_handler = threading.Thread(
target=AsyncTaskHub._handle_output,
args=(self,)
)
self._output_handler.daemon = True
self._output_handler._state = 0
self._output_handler.start()
@staticmethod
def _handle_output(self):
try:
while True:
result = self.results_queue.get()
if result is None:
tlogger.info('AsyncTaskHub._handle_output done')
break
self.put(result)
except:
tlogger.exception('AsyncTaskHub._handle_output exception thrown')
raise
def run_async(self, task, callback=None, error_callback=None):
result = ApplyResult(self._cache, callback, error_callback)
self.input_queue.put((result._job, task))
return result
def put(self, result):
job, result=result
self._cache[job]._set(0, (True, result))
| [((19, 27, 19, 34), 'queue.Queue', 'Queue', ({}, {}), '()', False, 'from queue import Queue\n'), ((21, 33, 21, 40), 'queue.Queue', 'Queue', ({}, {}), '()', False, 'from queue import Queue\n'), ((33, 30, 36, 13), 'threading.Thread', 'threading.Thread', (), '', False, 'import threading\n'), ((38, 8, 38, 61), 'tabular_logger.info', 'tlogger.info', ({(38, 21, 38, 60): '"""WorkerHub: _input_handler initialized"""'}, {}), "('WorkerHub: _input_handler initialized')", True, 'import tabular_logger as tlogger\n'), ((40, 31, 43, 13), 'threading.Thread', 'threading.Thread', (), '', False, 'import threading\n'), ((45, 8, 45, 62), 'tabular_logger.info', 'tlogger.info', ({(45, 21, 45, 61): '"""WorkerHub: _output_handler initialized"""'}, {}), "('WorkerHub: _output_handler initialized')", True, 'import tabular_logger as tlogger\n'), ((138, 17, 138, 67), 'multiprocessing.pool.ApplyResult', 'ApplyResult', ({(138, 29, 138, 40): 'self._cache', (138, 42, 138, 50): 'callback', (138, 52, 138, 66): 'error_callback'}, {}), '(self._cache, callback, error_callback)', False, 'from multiprocessing.pool import ApplyResult\n'), ((54, 12, 54, 82), 'tabular_logger.warn', 'tlogger.warn', ({(54, 25, 54, 68): '"""WorkerHub: Worker task not found in cache"""', (54, 70, 54, 81): 'worker_task'}, {}), "('WorkerHub: Worker task not found in cache', worker_task)", True, 'import tabular_logger as tlogger\n'), ((55, 12, 55, 59), 'tabular_logger.warn', 'tlogger.warn', ({(55, 25, 55, 47): '"""WorkerHub: Subworker"""', (55, 49, 55, 58): 'subworker'}, {}), "('WorkerHub: Subworker', subworker)", True, 'import tabular_logger as tlogger\n'), ((56, 12, 56, 71), 'tabular_logger.warn', 'tlogger.warn', ({(56, 25, 56, 62): '"""WorkerHub: Unable to process result"""', (56, 64, 56, 70): 'result'}, {}), "('WorkerHub: Unable to process result', result)", True, 'import tabular_logger as tlogger\n'), ((109, 26, 109, 35), 'queue.Queue', 'Queue', ({(109, 32, 109, 34): '64'}, {}), '(64)', False, 'from queue import Queue\n'), ((116, 35, 119, 17), 'threading.Thread', 'threading.Thread', (), '', False, 'import threading\n'), ((81, 12, 81, 73), 'tabular_logger.exception', 'tlogger.exception', ({(81, 30, 81, 72): '"""WorkerHub._handle_input exception thrown"""'}, {}), "('WorkerHub._handle_input exception thrown')", True, 'import tabular_logger as tlogger\n'), ((94, 12, 94, 74), 'tabular_logger.exception', 'tlogger.exception', ({(94, 30, 94, 73): '"""WorkerHub._handle_output exception thrown"""'}, {}), "('WorkerHub._handle_output exception thrown')", True, 'import tabular_logger as tlogger\n'), ((134, 12, 134, 77), 'tabular_logger.exception', 'tlogger.exception', ({(134, 30, 134, 76): '"""AsyncTaskHub._handle_output exception thrown"""'}, {}), "('AsyncTaskHub._handle_output exception thrown')", True, 'import tabular_logger as tlogger\n'), ((67, 20, 67, 85), 'tabular_logger.info', 'tlogger.info', ({(67, 33, 67, 84): '"""WorkerHub._handle_input NO MORE WORKERS AWAILABLE"""'}, {}), "('WorkerHub._handle_input NO MORE WORKERS AWAILABLE')", True, 'import tabular_logger as tlogger\n'), ((73, 20, 73, 84), 'tabular_logger.info', 'tlogger.info', ({(73, 33, 73, 83): '"""WorkerHub._handle_input NO MORE INPUTS AWAILABLE"""'}, {}), "('WorkerHub._handle_input NO MORE INPUTS AWAILABLE')", True, 'import tabular_logger as tlogger\n'), ((90, 20, 90, 65), 'tabular_logger.info', 'tlogger.info', ({(90, 33, 90, 64): '"""WorkerHub._handle_output done"""'}, {}), "('WorkerHub._handle_output done')", True, 'import tabular_logger as tlogger\n'), ((130, 20, 130, 68), 'tabular_logger.info', 'tlogger.info', ({(130, 33, 130, 67): '"""AsyncTaskHub._handle_output done"""'}, {}), "('AsyncTaskHub._handle_output done')", True, 'import tabular_logger as tlogger\n')] |
tompis/casual | make/platform/registry.py | d838716c7052a906af8a19e945a496acdc7899a2 |
import os
registry = {}
class RegisterPlatform(object):
'''
classdocs
'''
def __init__(self, platform):
'''
Constructor
'''
self.platform = platform
def __call__(self, clazz):
registry[self.platform] = clazz
def platform():
# Decide on which platform this runs
platform = os.uname()[0].lower()
if platform == "darwin":
platform = "osx"
if not registry:
raise SyntaxError, "No platforms are registered."
return registry[ platform]();
| [] |
everyvoter/everyvoter | mailer/admin.py | 65d9b8bdf9b5c64057135c279f6e03b6c207e0fa | """Django Admin Panels for App"""
from django.contrib import admin
from mailer import models
@admin.register(models.SendingAddress)
class SendingAddressAdmin(admin.ModelAdmin):
"""Admin View for SendingAddress"""
list_display = ('address', 'organization')
list_filter = ('organization__name',)
actions = None
def has_delete_permission(self, request, obj=None):
"""The primary address can not be deleted via the django admin"""
if obj and obj.pk == 1:
return False
else:
return True
| [((6, 1, 6, 38), 'django.contrib.admin.register', 'admin.register', ({(6, 16, 6, 37): 'models.SendingAddress'}, {}), '(models.SendingAddress)', False, 'from django.contrib import admin\n')] |
systemallica/django-belt | tests/settings.py | 3035a8bad26a108d9c78daaccb81ab8a9a9ebd41 | DEBUG = True
USE_TZ = True
SECRET_KEY = "dummy"
DATABASES = {"default": {"ENGINE": "django.db.backends.sqlite3", "NAME": ":memory:"}}
INSTALLED_APPS = [
"django.contrib.auth",
"django.contrib.contenttypes",
"django.contrib.sites",
"rest_framework",
"django_filters",
"belt",
"tests.app",
]
SITE_ID = 1
ROOT_URLCONF = "tests.app.urls"
MIDDLEWARE = ()
REST_FRAMEWORK = {
"DEFAULT_FILTER_BACKENDS": ("django_filters.rest_framework.DjangoFilterBackend",)
}
| [] |
ALIGN-analoglayout/2018-01-ALIGN | Cell_Generation/fabric_CMC_NMOS.py | 931263cec2efc05d58657af9ecca88ae0040c3a5 | import sys
import json
import transformation
class StopPointGrid:
def __init__( self, nm, layer, direction, width, pitch, offset=0):
self.nm = nm
self.layer = layer
self.direction = direction
assert direction in ['v','h']
self.width = width
self.pitch = pitch
self.offset = offset
self.grid = []
self.legalStopVector = []
self.legalStopIndices = set()
def addGridPoint( self, value, isLegal):
self.grid.append( value)
self.legalStopVector.append( isLegal)
if isLegal:
self.legalStopIndices.add( len(self.grid)-1)
@property
def n( self):
return len(self.grid)-1
def value( self, idx):
whole = idx // self.n
fract = idx % self.n
while fract < 0:
whole -= 1
fract += self.n
assert fract in self.legalStopIndices
return whole * self.grid[-1] + self.grid[fract]
def segment( self, netName, pinName, center, bIdx, eIdx):
c = center*self.pitch + self.offset
c0 = c - self.width/2
c1 = c + self.width/2
if self.direction == 'h':
rect = [ bIdx, c0, eIdx, c1]
else:
rect = [ c0, bIdx, c1, eIdx]
return { 'netName' : netName, 'pin' : pinName, 'layer' : self.layer, 'rect' : rect}
def segment1( self, netName, pinName, bIdy, eIdy, bIdx, eIdx):
rect = [bIdx, bIdy, eIdx, eIdy]
return { 'netName' : netName, 'pin' : pinName, 'layer' : self.layer, 'rect' : rect}
class UnitCell:
def computeBbox( self):
self.bbox = transformation.Rect(None,None,None,None)
for term in self.terminals:
r = transformation.Rect( *term['rect'])
if self.bbox.llx is None or self.bbox.llx > r.llx: self.bbox.llx = r.llx
if self.bbox.lly is None or self.bbox.lly > r.lly: self.bbox.lly = r.lly
if self.bbox.urx is None or self.bbox.urx < r.urx: self.bbox.urx = r.urx
if self.bbox.ury is None or self.bbox.ury < r.ury: self.bbox.ury = r.ury
def __init__( self ):
self.terminals = []
m0Pitch = 54
m1Pitch = 54
m2Pitch = 54
m3Pitch = 54
plPitch = 54
plOffset = 10
m1Offset = 37
m2Offset = 9
m3Offset = 37
v0Pitch = 36
v1Pitch = m2Pitch
v2Pitch = m2Pitch
dcPitch = 36
finPitch = 27
m0Width = 18
m1Width = 18
m2Width = 18
m3Width = 18
dcWidth = 18
plWidth = 20
lisdWidth = 24
sdtWidth = 24
v0Width = 18
v1Width = 18
v2Width = 18
finWidth = 7
gcutWidth = 18
pcWidth = 18
finDummy = 4
pc_gateExtension = 1 ###Fig. 1 of Ref. [1]
pcLength = (gate_u-1)*plPitch + plWidth + (2*pc_gateExtension)
plActive = 25 ###Fig. 1 of Ref. [1]
extension_y = 0
K_space = extension_y // finPitch
fin_enclosure = 10
activeWidth1 = finPitch*fin_u
activeWidth = finPitch*fin_u1
activePitch = activeWidth1 + (2*finDummy + K_space)*finPitch + extension_y
activeOffset = (activeWidth/2) + finDummy*finPitch - fin_enclosure
pcPitch = activePitch
gcutPitch = activePitch
pc_activeDistance = 30
pc_gcutDistance = 7
pcOffset = activeOffset + pc_activeDistance + pcWidth/2 + activeWidth/2
gcutOffset = activePitch - gcutWidth/2
stoppoint = (dcWidth//2 + plOffset-plWidth//2)//2
self.m0 = StopPointGrid( 'm0', 'M0', 'h', width=m0Width, pitch=m0Pitch)
self.m0.addGridPoint( 0, False)
self.m0.addGridPoint( stoppoint, True)
self.m0.addGridPoint( plOffset, False)
self.m0.addGridPoint( dcPitch-stoppoint, True)
self.m0.addGridPoint( dcPitch, False)
self.m1 = StopPointGrid( 'm1', 'M1', 'v', width=m1Width, pitch=m1Pitch, offset=m1Offset)
self.m1.addGridPoint( 0, False)
self.m1.addGridPoint( stoppoint, True)
self.m1.addGridPoint( 2*m0Pitch, False)
self.m1.addGridPoint( 4*m0Pitch-stoppoint, True)
self.m1.addGridPoint( 4*m0Pitch, False)
self.m2 = StopPointGrid( 'm2', 'M2', 'h', width=m2Width, pitch=m2Pitch, offset=m2Offset)
self.m2.addGridPoint( 0, False)
self.m2.addGridPoint( stoppoint, True)
self.m2.addGridPoint( 2*m0Pitch, False)
self.m2.addGridPoint( 4*m0Pitch-stoppoint, True)
self.m2.addGridPoint( 4*m0Pitch, False)
self.m3 = StopPointGrid( 'm3', 'M3', 'v', width=m3Width, pitch=m3Pitch, offset=m3Offset)
self.m3.addGridPoint( 0, False)
self.m3.addGridPoint( stoppoint, True)
self.m3.addGridPoint( 2*m0Pitch, False)
self.m3.addGridPoint( 4*m0Pitch-stoppoint, True)
self.m3.addGridPoint( 4*m0Pitch, False)
self.pl = StopPointGrid( 'pl', 'poly', 'v', width=plWidth, pitch=plPitch, offset=plOffset)
self.pl.addGridPoint( 0, False)
self.pl.addGridPoint( stoppoint, True)
self.pl.addGridPoint( 2*m0Pitch, False)
self.pl.addGridPoint( 4*m0Pitch-stoppoint, True)
self.pl.addGridPoint( 4*m0Pitch, False)
self.dc = StopPointGrid( 'dc', 'diffcon', 'v', width=dcWidth, pitch=dcPitch)
self.dc.addGridPoint( 0, False)
self.dc.addGridPoint( stoppoint, True)
self.dc.addGridPoint( 2*m0Pitch, False)
self.dc.addGridPoint( 4*m0Pitch-stoppoint, True)
self.dc.addGridPoint( 4*m0Pitch, False)
self.v0 = StopPointGrid( 'v0', 'via0', 'v', width=v0Width, pitch=v0Pitch)
self.v0.addGridPoint( 0, False)
self.v0.addGridPoint( stoppoint, True)
self.v0.addGridPoint( 2*m0Pitch, False)
self.v0.addGridPoint( 4*m0Pitch-stoppoint, True)
self.v0.addGridPoint( 4*m0Pitch, False)
self.v1 = StopPointGrid( 'v1', 'via1', 'h', width=v1Width, pitch=v1Pitch, offset=m2Offset)
self.v1.addGridPoint( 0, False)
self.v1.addGridPoint( stoppoint, True)
self.v1.addGridPoint( 2*m0Pitch, False)
self.v1.addGridPoint( 4*m0Pitch-stoppoint, True)
self.v1.addGridPoint( 4*m0Pitch, False)
self.v2 = StopPointGrid( 'v2', 'via2', 'h', width=v2Width, pitch=v2Pitch, offset=m2Offset)
self.v2.addGridPoint( 0, False)
self.v2.addGridPoint( stoppoint, True)
self.v2.addGridPoint( 2*m0Pitch, False)
self.v2.addGridPoint( 4*m0Pitch-stoppoint, True)
self.v2.addGridPoint( 4*m0Pitch, False)
self.fin = StopPointGrid( 'fin', 'fin', 'h', width=finWidth, pitch=finPitch, offset=finWidth/2)
self.fin.addGridPoint( 0, False)
self.fin.addGridPoint( stoppoint, True)
self.fin.addGridPoint( plOffset, False)
self.fin.addGridPoint( dcPitch-stoppoint, True)
self.fin.addGridPoint( dcPitch, False)
self.active = StopPointGrid( 'active', 'active', 'h', width=activeWidth, pitch=activePitch, offset=activeOffset)
self.active.addGridPoint( 0, False)
self.active.addGridPoint( stoppoint, True)
self.active.addGridPoint( plOffset, False)
self.active.addGridPoint( dcPitch-stoppoint, True)
self.active.addGridPoint( dcPitch, False)
self.nselect = StopPointGrid( 'nselect', 'nselect', 'v', width=activeWidth, pitch=activePitch, offset=activeOffset)
self.gcut = StopPointGrid( 'GCUT', 'GCUT', 'h', width=gcutWidth, pitch=gcutPitch, offset=gcutOffset)
self.gcut.addGridPoint( 0, False)
self.gcut.addGridPoint( stoppoint, True)
self.gcut.addGridPoint( plOffset, False)
self.gcut.addGridPoint( dcPitch-stoppoint, True)
self.gcut.addGridPoint( dcPitch, False)
self.gcut1 = StopPointGrid( 'GCUT', 'GCUT', 'h', width=gcutWidth, pitch=gcutPitch, offset=gcutWidth/2)
self.gcut1.addGridPoint( 0, False)
self.gcut1.addGridPoint( stoppoint, True)
self.gcut1.addGridPoint( plOffset, False)
self.gcut1.addGridPoint( dcPitch-stoppoint, True)
self.gcut1.addGridPoint( dcPitch, False)
self.pc = StopPointGrid( 'pc', 'polycon', 'h', width=pcWidth, pitch=pcPitch, offset=pcOffset)
self.pc.addGridPoint( 0, False)
self.pc.addGridPoint( stoppoint, True)
self.pc.addGridPoint( dcPitch//2, False)
self.pc.addGridPoint( dcPitch-stoppoint, True)
self.pc.addGridPoint( dcPitch, False)
self.lisd = StopPointGrid( 'LISD', 'LISD', 'v', width=lisdWidth, pitch=m1Pitch, offset=m1Offset)
self.lisd.addGridPoint( 0, False)
self.lisd.addGridPoint( stoppoint, True)
self.lisd.addGridPoint( 2*m0Pitch, False)
self.lisd.addGridPoint( 4*m0Pitch-stoppoint, True)
self.lisd.addGridPoint( 4*m0Pitch, False)
self.sdt = StopPointGrid( 'SDT', 'SDT', 'v', width=sdtWidth, pitch=m1Pitch, offset=m1Offset)
self.sdt.addGridPoint( 0, False)
self.sdt.addGridPoint( stoppoint, True)
self.sdt.addGridPoint( 2*m0Pitch, False)
self.sdt.addGridPoint( 4*m0Pitch-stoppoint, True)
self.sdt.addGridPoint( 4*m0Pitch, False)
def addSegment( self, grid, netName, pinName, c, bIdx, eIdx):
segment = grid.segment( netName, pinName, c, bIdx, eIdx)
self.terminals.append( segment)
return segment
def addSegment1( self, grid, netName, pinName, bIdy, eIdy, bIdx, eIdx):
segment1 = grid.segment1( netName, pinName, bIdy, eIdy, bIdx, eIdx)
self.terminals.append( segment1)
return segment1
def m0Segment( self, netName, pinName, x0, x1, y): return self.addSegment( self.m0, netName, pinName, y, x0, x1)
def m1Segment( self, netName, pinName, x, y0, y1): return self.addSegment( self.m1, netName, pinName, x, y0, y1)
def m2Segment( self, netName, pinName, x0, x1, y): return self.addSegment( self.m2, netName, pinName, y, x0, x1)
def m3Segment( self, netName, pinName, x, y0, y1): return self.addSegment( self.m3, netName, pinName, x, y0, y1)
def plSegment( self, netName, pinName, x, y0, y1): return self.addSegment( self.pl, netName, pinName, x, y0, y1)
def dcSegment( self, netName, pinName, x, y0, y1): return self.addSegment( self.dc, netName, pinName, x, y0, y1)
def finSegment( self, netName, pinName, x0, x1, y): return self.addSegment( self.fin, netName, pinName, y, x0, x1)
def activeSegment( self, netName, pinName, x0, x1, y): return self.addSegment( self.active, netName, pinName, y, x0, x1)
def nselectSegment( self, netName, pinName, y0, y1, x0, x1): return self.addSegment1( self.nselect, netName, pinName, y0, y1, x0, x1)
def gcutSegment( self, netName, pinName, x0, x1, y): return self.addSegment( self.gcut, netName, pinName, y, x0, x1)
def gcut1Segment( self, netName, pinName, x0, x1, y): return self.addSegment( self.gcut1, netName, pinName, y, x0, x1)
def pcSegment( self, netName, pinName, x0, x1, y): return self.addSegment( self.pc, netName, pinName, y, x0, x1)
def v0Segment( self, netName, pinName, y0, y1, x0, x1): return self.addSegment1( self.v0, netName, pinName, y0, y1, x0, x1)
def lisdSegment( self, netName, pinName, x, y0, y1): return self.addSegment( self.lisd, netName, pinName, x, y0, y1)
def sdtSegment( self, netName, pinName, x, y0, y1): return self.addSegment( self.sdt, netName, pinName, x, y0, y1)
def v1Segment( self, netName, pinName, x0, x1, y): return self.addSegment( self.v1, netName, pinName, y, x0, x1)
def v2Segment( self, netName, pinName, x0, x1, y): return self.addSegment( self.v2, netName, pinName, y, x0, x1)
def unit( self, x, y):
######## Basic data #############
m1Pitch = 54
m1Offset = 37
m1Width = 18
m2Pitch = 54
m2Width = 18
lisdWidth = 24
sdtWidth = 24
v0Width = 18
v1Width = 18
gcutWidth = 18
v0Pitch = 36
v_enclosure = 7
poly_enclosure = 7
plPitch = 54
finPitch = 27
finWidth = 7
plWidth = 20
plActive = 25
plActive_s = 29
pcWidth = 18
pc_gateExtension = 1
pc_activeDistance = 30
pcLength = (gate_u-1)*plPitch + plWidth + (2*pc_gateExtension)
extension_x = (plPitch - plWidth)/2
extension_y = 0
K_space = extension_y // finPitch
fin_enclosure = 10
######## Derived from Basic data ###########
finDummy = 4
fin = int(round(fin_u + 2*finDummy))
fin1 = int(round(fin_u + 1))
gate = int(round(gate_u + 2))
activeWidth_h = ((gate - 3)) * plPitch + (plActive * 2) + plWidth
activeWidth1 = finPitch*fin_u
activeWidth = finPitch*fin_u1
activePitch = activeWidth1 + (2*finDummy + K_space)*finPitch + extension_y
activeOffset = (activeWidth/2) + finDummy*finPitch - fin_enclosure
pcOffset = activeOffset + pc_activeDistance + pcWidth/2 + activeWidth/2
cont_no = (activeWidth//v0Pitch -1)
pcPitch = activePitch
x_length = ((gate-1)*plPitch) + plWidth + extension_x
y_length = fin * finPitch + extension_y
y_total = y_length*y_cells
m1Length = m2Width + (2*v_enclosure) + (m2Pitch*((fin_u+2)//2))
m1PCLength = m2Width + (2*v_enclosure) + (m2Pitch*((fin_u+4)//2))
m2_tracks = int(round(y_total/m2Pitch))
SA = []
SB = []
DA = []
DB = []
GA = []
GB = []
for k in range(x_cells//2):
if k%2 == 0:
p = 0
else:
p = 4
lS = 8*k + p
lG = lS + 1
lD = lS + gate_u
SA.append(lS)
GA.append(lG)
DA.append(lD)
for k in range(x_cells//2):
if k%2 == 0:
p = 4
else:
p = 0
lS = 8*k + p
lG = lS + 1
lD = lS + gate_u
SB.append(lS)
GB.append(lG)
DB.append(lD)
for i in range(gate):
uc.plSegment( 'g', 'NA', (i+(x*gate)), ((y*y_length)+((y-1)*extension_y)), (((1+y)*y_length)+(y*extension_y)))
if i < (gate-1):
if i == 0 or i == gate_u:
uc.lisdSegment( 'LISD', 'NA', (i+(x*gate)), (finDummy*finPitch - fin_enclosure + y*activePitch), (finDummy*finPitch - fin_enclosure + activeWidth + y*activePitch))
uc.sdtSegment( 'SDT', 'NA', (i+(x*gate)), (finDummy*finPitch - fin_enclosure + y*activePitch), (finDummy*finPitch - fin_enclosure + activeWidth + y*activePitch))
for j in range(cont_no):
uc.v0Segment( 'v0', 'NA', (finDummy*finPitch - fin_enclosure + j*v0Pitch + y*activePitch + v0Width), (finDummy*finPitch - fin_enclosure + j*v0Pitch + y*activePitch + 2*v0Width ), (m1Offset - (m1Width/2) + i*m1Pitch + x*gate*plPitch), (m1Offset - (m1Width/2) + i*m1Pitch +x*gate*plPitch + v0Width) )
else:
uc.v0Segment( 'v0', 'NA', ( pcOffset - pcWidth/2 + y*activePitch), (pcOffset - pcWidth/2 + y*activePitch + v0Width ), (m1Offset - (m1Width/2) + i*m1Pitch + x*gate*plPitch), (m1Offset - (m1Width/2) + i*m1Pitch + x*gate*plPitch + v0Width) )
for i in range(fin):
uc.finSegment( 'fin', 'NA', (((x-1)*extension_x)+ x*x_length), ((1+x)*(x_length)+x*extension_x), (i+(y*fin) + (2*K_space)*y))
uc.gcutSegment( 'GCUT', 'NA', (((x-1)*extension_x)+ x*x_length), ((1+x)*(x_length)+x*extension_x), y)
if y == 0:
uc.gcut1Segment( 'GCUT', 'NA', (((x-1)*extension_x)+ x*x_length), ((1+x)*(x_length)+x*extension_x), 0)
uc.activeSegment( 'active', 'NA', (plActive_s+ x*(plPitch*gate)), ( activeWidth_h + plActive_s + x*(plPitch * gate)), y)
uc.pcSegment( 'PC', 'NA', ( plPitch - pc_gateExtension + x*(gate*plPitch)), (plPitch - pc_gateExtension + x*(gate*plPitch) + pcLength), y)
if x == x_cells -1 and y == y_cells -1:
uc.nselectSegment( 'nselect', 'NA', 0, (((y+1)*y_length)+((y)*extension_y)), (((0-1)*extension_x)), ((1+x)*(x_length)+x*extension_x))
##### Routing for CMC Load
############### M3 routing ###########################
for i in range(3):
if x == 0 and y_cells > 1 and i == 0:
if y == 0:
uc.m3Segment( 'm3', 'SA', (i+(x*gate)), ((m2Pitch*(finDummy//2-1)) - v_enclosure), ((y_cells-1)*m2Pitch*(K_space + fin//2) - v_enclosure + m1Length + (m2Pitch*(finDummy//2))))
uc.v2Segment( 'v2', 'NA', ((i+x*gate)*m1Pitch + m1Offset - v1Width/2), ((i+x*gate)*m1Pitch + m1Offset + v1Width - v1Width/2), (y*(m2_tracks //y_cells + K_space) + finDummy//2 + 1))
if x == 0 and y_cells > 1 and i == 1:
if y == 0:
uc.m3Segment( 'm3', 'G', (i+(x*gate)), ((m2Pitch*(finDummy//2-1)) - v_enclosure), ((y_cells-1)*m2Pitch*(K_space + fin//2) - v_enclosure + m1Length + (m2Pitch*(finDummy//2))))
uc.v2Segment( 'v2', 'NA', ((i+x*gate)*m1Pitch + m1Offset - v1Width/2), ((i+x*gate)*m1Pitch + m1Offset + v1Width - v1Width/2), (y*(m2_tracks //y_cells + K_space) + finDummy//2 + 0))
if x == 0 and y_cells > 1 and i == 2:
if y == 0:
uc.m3Segment( 'm3', 'DA', (i+(x*gate)), ((m2Pitch*(finDummy//2-1)) - v_enclosure), ((y_cells-1)*m2Pitch*(K_space + fin//2) - v_enclosure + m1Length + (m2Pitch*(finDummy//2))))
uc.v2Segment( 'v2', 'NA', ((i+x*gate)*m1Pitch + m1Offset - v1Width/2), ((i+x*gate)*m1Pitch + m1Offset + v1Width - v1Width/2), (y*(m2_tracks //y_cells + K_space) + finDummy//2 + 2))
if x == 1 and y_cells > 1 and i == 0:
if y == 0:
uc.m3Segment( 'm3', 'SB', (i+(x*gate)), ((m2Pitch*(finDummy//2-1)) - v_enclosure), ((y_cells-1)*m2Pitch*(K_space + fin//2) - v_enclosure + m1Length + (m2Pitch*(finDummy//2))))
uc.v2Segment( 'v2', 'NA', ((i+x*gate)*m1Pitch + m1Offset - v1Width/2), ((i+x*gate)*m1Pitch + m1Offset + v1Width - v1Width/2), (y*(m2_tracks //y_cells + K_space) + finDummy//2 + 3))
if x == 1 and y_cells > 1 and i == 2:
if y == 0:
uc.m3Segment( 'm3', 'DB', (i+(x*gate)), ((m2Pitch*(finDummy//2-1)) - v_enclosure), ((y_cells-1)*m2Pitch*(K_space + fin//2) - v_enclosure + m1Length + (m2Pitch*(finDummy//2))))
uc.v2Segment( 'v2', 'NA', ((i+x*gate)*m1Pitch + m1Offset - v1Width/2), ((i+x*gate)*m1Pitch + m1Offset + v1Width - v1Width/2), (y*(m2_tracks //y_cells + K_space) + finDummy//2 + 4))
############### M2 routing ###########################
for i in range((m2_tracks+1)):
if i == (2*y*(m2_tracks //y_cells + K_space)):
uc.m2Segment( 'm2', 'GND', (((x-1)*extension_x)+ x*x_length), ((1+x)*(x_length)+x*extension_x), i)
if i == ((2*y+1)*(m2_tracks //y_cells + K_space)):
uc.m2Segment( 'm2', 'VDD', (((x-1)*extension_x)+ x*x_length), ((1+x)*(x_length)+x*extension_x), i)
if x_cells > 1 and x == 0 and i == (y*(m2_tracks //y_cells + K_space) + finDummy//2 + 0):
uc.m2Segment( 'm2', 'G', (m1Offset - m1Width/2 - v_enclosure), (m1Offset + m1Width/2 + v_enclosure + (x_cells*gate-2)*m1Pitch), i)
if x_cells > 1 and x == 0 and i == (y*(m2_tracks //y_cells + K_space) + finDummy//2 + 1):
uc.m2Segment( 'm2', 'SA', (m1Offset - m1Width/2 - v_enclosure), (m1Offset + m1Width/2 + v_enclosure + (x_cells*gate-2)*m1Pitch), i)
if x_cells > 1 and x == 0 and i == (y*(m2_tracks //y_cells + K_space) + finDummy//2 + 3):
uc.m2Segment( 'm2', 'SB', (m1Offset - m1Width/2 - v_enclosure), (m1Offset + m1Width/2 + v_enclosure + (x_cells*gate-2)*m1Pitch), i)
if x_cells > 1 and x == 0 and i == (y*(m2_tracks //y_cells + K_space) + finDummy//2 + 2):
uc.m2Segment( 'm2', 'DA', (m1Offset - m1Width/2 - v_enclosure), (m1Offset + m1Width/2 + v_enclosure + (x_cells*gate-2)*m1Pitch), i)
if x_cells > 1 and x == 0 and i == (y*(m2_tracks //y_cells + K_space) + finDummy//2 + 4):
uc.m2Segment( 'm2', 'DB', (m1Offset - m1Width/2 - v_enclosure), (m1Offset + m1Width/2 + v_enclosure + (x_cells*gate-2)*m1Pitch), i)
################# M1 routing ######################
if (x_cells - 1 - x) == 0:
if (y % 2) == 0:
for i in DA:
uc.v1Segment( 'v1', 'NA', (i*m1Pitch + m1Offset - v1Width/2), (i*m1Pitch + m1Offset + v1Width - v1Width/2), (y*(m2_tracks //y_cells + K_space) + finDummy//2 + 2))
uc.m1Segment( 'D', 'DA', i, ((m2Pitch*(finDummy//2-1)) - v_enclosure + y*m2Pitch*(K_space + fin//2) ), (y*m2Pitch*(K_space + fin//2) - v_enclosure + m1Length + (m2Pitch*(finDummy//2-1))))
for i in DB:
uc.v1Segment( 'v1', 'NA', (i*m1Pitch + m1Offset - v1Width/2), (i*m1Pitch + m1Offset + v1Width - v1Width/2), (y*(m2_tracks //y_cells + K_space) + finDummy//2 + 4))
uc.m1Segment( 'D', 'DB', i, ((m2Pitch*(finDummy//2-1)) - v_enclosure + y*m2Pitch*(K_space + fin//2) ), (y*m2Pitch*(K_space + fin//2) - v_enclosure + m1Length + (m2Pitch*(finDummy//2-1))))
for i in SA:
uc.v1Segment( 'v1', 'NA', (i*m1Pitch + m1Offset - v1Width/2), (i*m1Pitch + m1Offset + v1Width - v1Width/2), (y*(m2_tracks //y_cells + K_space) + finDummy//2 + 1))
uc.m1Segment( 'S', 'SA', i, ((m2Pitch*(finDummy//2-1)) - v_enclosure + y*m2Pitch*(K_space + fin//2) ), (y*m2Pitch*(K_space + fin//2) - v_enclosure + m1Length + (m2Pitch*(finDummy//2-1))))
for i in SB:
uc.v1Segment( 'v1', 'NA', (i*m1Pitch + m1Offset - v1Width/2), (i*m1Pitch + m1Offset + v1Width - v1Width/2), (y*(m2_tracks //y_cells + K_space) + finDummy//2 + 3))
uc.m1Segment( 'S', 'SB', i, ((m2Pitch*(finDummy//2-1)) - v_enclosure + y*m2Pitch*(K_space + fin//2) ), (y*m2Pitch*(K_space + fin//2) - v_enclosure + m1Length + (m2Pitch*(finDummy//2-1))))
else:
for i in DA:
uc.v1Segment( 'v1', 'NA', (i*m1Pitch + m1Offset - v1Width/2), (i*m1Pitch + m1Offset + v1Width - v1Width/2), (y*(m2_tracks //y_cells + K_space) + finDummy//2 + 4))
uc.m1Segment( 'D', 'DA', i, ((m2Pitch*(finDummy//2-1)) - v_enclosure + y*m2Pitch*(K_space + fin//2) ), (y*m2Pitch*(K_space + fin//2) - v_enclosure + m1Length + (m2Pitch*(finDummy//2-1))))
for i in DB:
uc.v1Segment( 'v1', 'NA', (i*m1Pitch + m1Offset - v1Width/2), (i*m1Pitch + m1Offset + v1Width - v1Width/2), (y*(m2_tracks //y_cells + K_space) + finDummy//2 + 2))
uc.m1Segment( 'D', 'DB', i, ((m2Pitch*(finDummy//2-1)) - v_enclosure + y*m2Pitch*(K_space + fin//2) ), (y*m2Pitch*(K_space + fin//2) - v_enclosure + m1Length + (m2Pitch*(finDummy//2-1))))
for i in SA:
uc.v1Segment( 'v1', 'NA', (i*m1Pitch + m1Offset - v1Width/2), (i*m1Pitch + m1Offset + v1Width - v1Width/2), (y*(m2_tracks //y_cells + K_space) + finDummy//2 + 3))
uc.m1Segment( 'S', 'SA', i, ((m2Pitch*(finDummy//2-1)) - v_enclosure + y*m2Pitch*(K_space + fin//2) ), (y*m2Pitch*(K_space + fin//2) - v_enclosure + m1Length + (m2Pitch*(finDummy//2-1))))
for i in SB:
uc.v1Segment( 'v1', 'NA', (i*m1Pitch + m1Offset - v1Width/2), (i*m1Pitch + m1Offset + v1Width - v1Width/2), (y*(m2_tracks //y_cells + K_space) + finDummy//2 + 1))
uc.m1Segment( 'S', 'SB', i, ((m2Pitch*(finDummy//2-1)) - v_enclosure + y*m2Pitch*(K_space + fin//2) ), (y*m2Pitch*(K_space + fin//2) - v_enclosure + m1Length + (m2Pitch*(finDummy//2-1))))
if (x_cells - 1 - x) == 0:
for i in GA:
uc.v1Segment( 'v1', 'NA', (i*m1Pitch + m1Offset - v1Width/2), (i*m1Pitch + m1Offset + v1Width - v1Width/2), (y*(m2_tracks //y_cells + K_space) + finDummy//2 + 0))
uc.m1Segment( 'G', 'G', i, (0 - v_enclosure + (m2Pitch*(finDummy//2-1)) + y*m2Pitch*(K_space + fin//2) ), (y*m2Pitch*(K_space + fin//2)+ (m2Pitch*(finDummy//2-1)) - v_enclosure + m1PCLength))
for i in GB:
uc.v1Segment( 'v1', 'NA', (i*m1Pitch + m1Offset - v1Width/2), (i*m1Pitch + m1Offset + v1Width - v1Width/2), (y*(m2_tracks //y_cells + K_space) + finDummy//2 + 0))
uc.m1Segment( 'G', 'G', i, (0 - v_enclosure + (m2Pitch*(finDummy//2-1)) + y*m2Pitch*(K_space + fin//2) ), (y*m2Pitch*(K_space + fin//2)+ (m2Pitch*(finDummy//2-1)) - v_enclosure + m1PCLength))
if __name__ == "__main__":
fin_u1 = int(sys.argv[1])
x_cells = int(sys.argv[2])
y_cells = int(sys.argv[3])
assert (x_cells%2) == 0
gate_u = 2
if fin_u1%2 != 0:
fin_u = fin_u1 + 1
else:
fin_u = fin_u1
uc = UnitCell()
for (x,y) in ( (x,y) for x in range(x_cells) for y in range(y_cells)):
uc.unit( x, y)
uc.computeBbox()
with open( "./mydesign_dr_globalrouting.json", "wt") as fp:
data = { 'bbox' : uc.bbox.toList(), 'globalRoutes' : [], 'globalRouteGrid' : [], 'terminals' : uc.terminals}
fp.write( json.dumps( data, indent=2) + '\n')
| [((57, 20, 57, 60), 'transformation.Rect', 'transformation.Rect', ({(57, 40, 57, 44): 'None', (57, 45, 57, 49): 'None', (57, 50, 57, 54): 'None', (57, 55, 57, 59): 'None'}, {}), '(None, None, None, None)', False, 'import transformation\n'), ((59, 16, 59, 51), 'transformation.Rect', 'transformation.Rect', ({(59, 37, 59, 50): "*term['rect']"}, {}), "(*term['rect'])", False, 'import transformation\n'), ((472, 18, 472, 45), 'json.dumps', 'json.dumps', (), '', False, 'import json\n')] |
tiramtaramta/conduit | docs/testcases/all_in_one.py | ae4ca8e64fe64c2b6702d803d799e380fda84a92 | from selenium import webdriver
from selenium.common.exceptions import NoSuchElementException
from selenium.webdriver.chrome.options import Options
from selenium.webdriver.common.by import By
import os
import time
import csv
from webdriver_manager.chrome import ChromeDriverManager
import math
from basic_function import basic_login, find_element
class TestConduit(object):
def setup(self):
browser_options = Options()
browser_options.headless = True
self.driver = webdriver.Chrome(ChromeDriverManager().install(), options=browser_options)
self.driver.get("http://localhost:1667/#/")
def teardown(self):
self.driver.quit()
# -------- A028, TC-0037 Cookie kezelési tájékoztató --------
def test_cookie_process(self):
assert self.driver.find_element_by_id("cookie-policy-panel").is_displayed()
# Cookie-k elfogadása folyamat
self.driver.find_element_by_xpath(
"//button[@class='cookie__bar__buttons__button cookie__bar__buttons__button--accept']").click()
time.sleep(2)
# # Cookie-k elutasítása folyamat
# self.driver.find_element_by_xpath(
# "//button[@class='cookie__bar__buttons__button cookie__bar__buttons__button--decline']").click()
#
# time.sleep(2)
try:
self.driver.find_element_by_id("cookie-policy-panel")
time.sleep(2)
except NoSuchElementException:
return True
return False
# -------- A002, TC-0002 Regisztráció helyes adatokkal --------
def test_registration_process(self):
user_input_data = ["user200", "[email protected]", "Userpass1"]
self.driver.find_element_by_xpath("//a[@href='#/register']").click()
# Beviteli mezők feltöltése a random user adatokkal
for i in range(len(user_input_data)):
self.driver.find_element_by_xpath(f"//fieldset[{i + 1}]/input").send_keys(user_input_data[i])
self.driver.find_element_by_tag_name("button").click()
time.sleep(2)
# Sikeres regisztrációs értesítési ablak szövegének ellenőrzése
swal_text = find_element(self.driver, By.CLASS_NAME, "swal-text")
assert swal_text.text == "Your registration was successful!"
# time.sleep(2)
# Értesítési ablak bezárása
close_btn = find_element(self.driver, By.XPATH, "//button[normalize-space()='OK']")
close_btn.click()
# self.driver.find_element_by_xpath("//button[normalize-space()='OK']").click()
time.sleep(1)
# Bejelentkezés tényének ellenőrzése
username_check = self.driver.find_element_by_xpath("//a[starts-with(@href, '#/@')]").text
assert username_check == user_input_data[
0], f"Test Failed: Username did not match expected ({user_input_data[0]})."
# time.sleep(2)
# -------- A004, TC-0010 Bejelentkezés helyes adatokkal --------
def test_login_process(self):
user_input_data = ["user200", "[email protected]", "Userpass1"]
self.driver.find_element_by_xpath("//a[@href='#/login']").click()
# Bejelentkezési űrlap feltöltése
for i in range(len(user_input_data) - 1):
self.driver.find_element_by_xpath(f"//fieldset[{i + 1}]/input").send_keys(user_input_data[i + 1])
time.sleep(1)
self.driver.find_element_by_tag_name("button").click()
time.sleep(3)
# Bejelentkezés tényének ellenőrzése
username_check = self.driver.find_element_by_xpath("//a[starts-with(@href, '#/@')]").text
assert username_check == user_input_data[0], f"Test Failed: User is not logged in ({user_input_data[0]})."
time.sleep(2)
# -------- A010, TC-0034 Saját profil szerkesztése, képcsere --------
def test_edit_settings_process(self):
basic_login(self.driver)
self.driver.find_element_by_xpath("//a[@href='#/settings']").click()
time.sleep(2)
# Your Settings oldal megjelenésének ellenőrzése
settings_check = self.driver.find_element_by_tag_name("h1").text
assert settings_check == "Your Settings", f"Test Failed: Page names did not match expected ({settings_check})."
time.sleep(3)
# Beolvassuk az előkészített adatokat
with open('edit_user.csv') as article_file:
csv_reader = csv.reader(article_file, delimiter=';')
for row in csv_reader:
user_update = row
time.sleep(2)
# Feltöltjük az adatokkal a beviteli űrlap egyes sorait
user_picture = self.driver.find_element_by_class_name("form-control")
user_bio = self.driver.find_element_by_xpath("//textarea[@placeholder='Short bio about you']")
user_picture.clear()
user_picture.send_keys(user_update[0])
user_bio.clear()
user_bio.send_keys(user_update[1])
time.sleep(1)
self.driver.find_element_by_xpath("//button[normalize-space()='Update Settings']").click()
time.sleep(2)
# Sikeres update értesítési ablak szövegének ellenőrzése
assert self.driver.find_element_by_class_name("swal-title").text == "Update successful!"
time.sleep(2)
# Értesítési ablak bezárása
self.driver.find_element_by_xpath("//button[normalize-space()='OK']").click()
time.sleep(1)
# Ellenőrizzük a felhasználó profiljában történt változásokat
self.driver.find_element_by_xpath("//a[starts-with(@href, '#/@')]").click()
time.sleep(2)
img_check = self.driver.find_element_by_class_name("user-img").get_attribute("src")
assert img_check == user_update[
0], f"Test Failed: Image did not match expected ({user_update[0]})."
bio_check = self.driver.find_element_by_css_selector("div[class='user-info'] p").text
assert bio_check == user_update[
1], f"Test Failed: User's bio did not match expected ({user_update[1]})."
time.sleep(2)
# -------- A005, TC-0003 Kijelentkezés --------
def test_logout_process(self):
basic_login(self.driver)
self.driver.find_element_by_xpath("//i[@class='ion-android-exit']").click()
time.sleep(2)
# Kijelentkezés tényének ellenőrzése
sign_out_check = self.driver.find_element_by_xpath("//a[starts-with(@href, '#/login')]").text
assert sign_out_check == f"{sign_out_check}", f"Test Failed: User is logged in."
time.sleep(1)
# -------- A006, TC-0015 Új poszt létrehozása helyes adatokkal --------
def test_create_post(self):
basic_login(self.driver)
self.driver.find_element_by_xpath("//a[@href='#/editor']").click()
with open('new_post_content.csv') as article_file:
csv_reader = csv.reader(article_file, delimiter=';')
for row in csv_reader:
new_article_data = row
time.sleep(2)
# Beviteli űrlap feltöltése
self.driver.find_element_by_xpath("//input[starts-with(@placeholder,'Article')]").send_keys(new_article_data[0])
self.driver.find_element_by_xpath("//input[starts-with(@placeholder,'What')]").send_keys(new_article_data[1])
self.driver.find_element_by_xpath("//textarea[starts-with(@placeholder,'Write')]").send_keys(
new_article_data[2])
self.driver.find_element_by_xpath("//input[@placeholder='Enter tags']").send_keys(new_article_data[3])
time.sleep(1)
self.driver.find_element_by_css_selector("button[type='submit']").click()
time.sleep(2)
# Bejegyzés létrejöttének ellenőrzése
title_check = self.driver.find_element_by_tag_name("h1").text
assert title_check == new_article_data[
0], f"Test Failed: Content title did not match expected ({new_article_data[0]})."
time.sleep(2)
# -------- A006, TC-0015 Új adatbevitel helyes adatokkal (sorozatos) --------
def test_create_posts_process(self):
basic_login(self.driver)
for i in range(1):
with open('contents.csv') as article_file:
csv_reader = csv.reader(article_file, delimiter=';')
for row in csv_reader:
new_article_data = row
# Beviteli űrlap feltöltése
self.driver.find_element_by_xpath("//a[@href='#/editor']").click()
time.sleep(4)
self.driver.find_element_by_xpath("//input[@placeholder='Article Title']").send_keys(
new_article_data[0])
self.driver.find_element_by_xpath("//input[starts-with(@placeholder,'What')]").send_keys(
new_article_data[1])
self.driver.find_element_by_xpath("//textarea[starts-with(@placeholder,'Write')]").send_keys(
new_article_data[2])
self.driver.find_element_by_xpath("//input[@placeholder='Enter tags']").send_keys(
new_article_data[3])
time.sleep(1)
self.driver.find_element_by_css_selector("button[type='submit']").click()
time.sleep(2)
# Bejegyzés létrejöttének ellenőrzése
title_check = self.driver.find_element_by_tag_name("h1").text
assert title_check == new_article_data[
0], f"Test Failed: Content title did not match expected ({new_article_data[0]})."
time.sleep(4)
# -------- A015, TC-0024 Saját poszt törlése --------
def test_delete_post_process(self):
basic_login(self.driver)
my_articles = self.driver.find_element_by_xpath("//a[starts-with(@href, '#/@')]")
my_articles.click()
time.sleep(2)
articles_list = self.driver.find_elements_by_tag_name("h1")
if len(articles_list) > 0:
articles_list[0].click()
time.sleep(3)
self.driver.find_element_by_xpath("//*[@id='app']/div/div[1]/div/div/span/button/span").click()
time.sleep(2)
# Ellenőrizzük, hogy valóban törlődött-e a bejegyzés
my_articles.click()
time.sleep(2)
new_articles_list = self.driver.find_elements_by_tag_name("h1")
assert not new_articles_list[0] == articles_list[
0], f"Test Failed: Content is not deleted ({articles_list[0]})."
# -------- A029 Adatok lementése felületről --------
def test_export_my_last_post(self):
basic_login(self.driver)
self.driver.find_element_by_xpath("//a[starts-with(@href, '#/@')]").click()
time.sleep(2)
articles_list = self.driver.find_elements_by_tag_name("h1")
if os.path.exists("my_last_article.txt"):
os.remove("my_last_article.txt")
else:
pass
articles_list[0].click()
time.sleep(2)
article_title = self.driver.find_element_by_tag_name("h1").text
article_text = self.driver.find_element_by_tag_name("p").text
with open("my_last_article.txt", "a") as my_txt:
my_txt.write(f"{article_title};{article_text};\n")
time.sleep(3)
# a kiírt tartalom ellenőrzése
with open("my_last_article.txt", "r") as my_txt2:
my_txt = my_txt2.readline()
my_txt_list = my_txt.split(";")
assert my_txt_list[0] == article_title, f"Test Failed: Content title is not exported."
assert my_txt_list[1] == article_text, f"Test Failed: Content text is not exported."
# -------- A007, TC-0025 Bejegyzések listájának megtekintése --------
def test_global_feed_list(self):
basic_login(self.driver)
self.driver.find_element_by_xpath("//a[starts-with(@href, '#/')]").click()
time.sleep(2)
articles_list = self.driver.find_elements_by_xpath("//div[@class='article-preview']/a/h1")
if os.path.exists("titles_list.csv"):
os.remove("titles_list.csv")
else:
pass
for i in range(len(articles_list)):
article_title = articles_list[i].text
with open('titles_list.csv', 'a', encoding="utf-8") as csv_titles:
csv_titles.write(f"{article_title};")
# a lista hosszának ellenőrzése
with open('titles_list.csv', 'r', encoding="utf-8") as csv_titles2:
check_articles = csv.reader(csv_titles2, delimiter=';')
for row in check_articles:
check_articles_list = row
assert len(articles_list) == len(
check_articles_list) - 1, f"Test Failed: The length of the lists are not exactly the same."
# -------- A007, TC-0025 Bejegyzések listájának megtekintése (lapozóval) --------
def test_global_feed_pagination(self):
basic_login(self.driver)
self.driver.find_element_by_xpath("//a[starts-with(@href, '#/')]").click()
time.sleep(2)
articles_list = self.driver.find_elements_by_xpath("//div[@class='article-preview']/a/h1")
# lapozógombok használata
pages = self.driver.find_elements_by_class_name("page-link")
for page in pages:
page.click()
time.sleep(1)
# Az oldal bejárásának ellenőrzése
assert len(pages) == int(math.ceil(
len(articles_list) / 10)), f"Test Failed: The length of the list and pagination not exactly the same."
| [((15, 26, 15, 35), 'selenium.webdriver.chrome.options.Options', 'Options', ({}, {}), '()', False, 'from selenium.webdriver.chrome.options import Options\n'), ((32, 8, 32, 21), 'time.sleep', 'time.sleep', ({(32, 19, 32, 20): '(2)'}, {}), '(2)', False, 'import time\n'), ((61, 8, 61, 21), 'time.sleep', 'time.sleep', ({(61, 19, 61, 20): '(2)'}, {}), '(2)', False, 'import time\n'), ((64, 20, 64, 73), 'basic_function.find_element', 'find_element', ({(64, 33, 64, 44): 'self.driver', (64, 46, 64, 59): 'By.CLASS_NAME', (64, 61, 64, 72): '"""swal-text"""'}, {}), "(self.driver, By.CLASS_NAME, 'swal-text')", False, 'from basic_function import basic_login, find_element\n'), ((70, 20, 70, 91), 'basic_function.find_element', 'find_element', ({(70, 33, 70, 44): 'self.driver', (70, 46, 70, 54): 'By.XPATH', (70, 56, 70, 90): '"""//button[normalize-space()=\'OK\']"""'}, {}), '(self.driver, By.XPATH, "//button[normalize-space()=\'OK\']")', False, 'from basic_function import basic_login, find_element\n'), ((74, 8, 74, 21), 'time.sleep', 'time.sleep', ({(74, 19, 74, 20): '(1)'}, {}), '(1)', False, 'import time\n'), ((93, 8, 93, 21), 'time.sleep', 'time.sleep', ({(93, 19, 93, 20): '(1)'}, {}), '(1)', False, 'import time\n'), ((97, 8, 97, 21), 'time.sleep', 'time.sleep', ({(97, 19, 97, 20): '(3)'}, {}), '(3)', False, 'import time\n'), ((103, 8, 103, 21), 'time.sleep', 'time.sleep', ({(103, 19, 103, 20): '(2)'}, {}), '(2)', False, 'import time\n'), ((107, 8, 107, 32), 'basic_function.basic_login', 'basic_login', ({(107, 20, 107, 31): 'self.driver'}, {}), '(self.driver)', False, 'from basic_function import basic_login, find_element\n'), ((111, 8, 111, 21), 'time.sleep', 'time.sleep', ({(111, 19, 111, 20): '(2)'}, {}), '(2)', False, 'import time\n'), ((117, 8, 117, 21), 'time.sleep', 'time.sleep', ({(117, 19, 117, 20): '(3)'}, {}), '(3)', False, 'import time\n'), ((125, 8, 125, 21), 'time.sleep', 'time.sleep', ({(125, 19, 125, 20): '(2)'}, {}), '(2)', False, 'import time\n'), ((136, 8, 136, 21), 'time.sleep', 'time.sleep', ({(136, 19, 136, 20): '(1)'}, {}), '(1)', False, 'import time\n'), ((140, 8, 140, 21), 'time.sleep', 'time.sleep', ({(140, 19, 140, 20): '(2)'}, {}), '(2)', False, 'import time\n'), ((145, 8, 145, 21), 'time.sleep', 'time.sleep', ({(145, 19, 145, 20): '(2)'}, {}), '(2)', False, 'import time\n'), ((150, 8, 150, 21), 'time.sleep', 'time.sleep', ({(150, 19, 150, 20): '(1)'}, {}), '(1)', False, 'import time\n'), ((155, 8, 155, 21), 'time.sleep', 'time.sleep', ({(155, 19, 155, 20): '(2)'}, {}), '(2)', False, 'import time\n'), ((164, 8, 164, 21), 'time.sleep', 'time.sleep', ({(164, 19, 164, 20): '(2)'}, {}), '(2)', False, 'import time\n'), ((168, 8, 168, 32), 'basic_function.basic_login', 'basic_login', ({(168, 20, 168, 31): 'self.driver'}, {}), '(self.driver)', False, 'from basic_function import basic_login, find_element\n'), ((172, 8, 172, 21), 'time.sleep', 'time.sleep', ({(172, 19, 172, 20): '(2)'}, {}), '(2)', False, 'import time\n'), ((178, 8, 178, 21), 'time.sleep', 'time.sleep', ({(178, 19, 178, 20): '(1)'}, {}), '(1)', False, 'import time\n'), ((182, 8, 182, 32), 'basic_function.basic_login', 'basic_login', ({(182, 20, 182, 31): 'self.driver'}, {}), '(self.driver)', False, 'from basic_function import basic_login, find_element\n'), ((191, 8, 191, 21), 'time.sleep', 'time.sleep', ({(191, 19, 191, 20): '(2)'}, {}), '(2)', False, 'import time\n'), ((200, 8, 200, 21), 'time.sleep', 'time.sleep', ({(200, 19, 200, 20): '(1)'}, {}), '(1)', False, 'import time\n'), ((204, 8, 204, 21), 'time.sleep', 'time.sleep', ({(204, 19, 204, 20): '(2)'}, {}), '(2)', False, 'import time\n'), ((211, 8, 211, 21), 'time.sleep', 'time.sleep', ({(211, 19, 211, 20): '(2)'}, {}), '(2)', False, 'import time\n'), ((215, 8, 215, 32), 'basic_function.basic_login', 'basic_login', ({(215, 20, 215, 31): 'self.driver'}, {}), '(self.driver)', False, 'from basic_function import basic_login, find_element\n'), ((250, 8, 250, 32), 'basic_function.basic_login', 'basic_login', ({(250, 20, 250, 31): 'self.driver'}, {}), '(self.driver)', False, 'from basic_function import basic_login, find_element\n'), ((254, 8, 254, 21), 'time.sleep', 'time.sleep', ({(254, 19, 254, 20): '(2)'}, {}), '(2)', False, 'import time\n'), ((260, 8, 260, 21), 'time.sleep', 'time.sleep', ({(260, 19, 260, 20): '(3)'}, {}), '(3)', False, 'import time\n'), ((264, 8, 264, 21), 'time.sleep', 'time.sleep', ({(264, 19, 264, 20): '(2)'}, {}), '(2)', False, 'import time\n'), ((269, 8, 269, 21), 'time.sleep', 'time.sleep', ({(269, 19, 269, 20): '(2)'}, {}), '(2)', False, 'import time\n'), ((278, 8, 278, 32), 'basic_function.basic_login', 'basic_login', ({(278, 20, 278, 31): 'self.driver'}, {}), '(self.driver)', False, 'from basic_function import basic_login, find_element\n'), ((282, 8, 282, 21), 'time.sleep', 'time.sleep', ({(282, 19, 282, 20): '(2)'}, {}), '(2)', False, 'import time\n'), ((286, 11, 286, 48), 'os.path.exists', 'os.path.exists', ({(286, 26, 286, 47): '"""my_last_article.txt"""'}, {}), "('my_last_article.txt')", False, 'import os\n'), ((293, 8, 293, 21), 'time.sleep', 'time.sleep', ({(293, 19, 293, 20): '(2)'}, {}), '(2)', False, 'import time\n'), ((300, 8, 300, 21), 'time.sleep', 'time.sleep', ({(300, 19, 300, 20): '(3)'}, {}), '(3)', False, 'import time\n'), ((312, 8, 312, 32), 'basic_function.basic_login', 'basic_login', ({(312, 20, 312, 31): 'self.driver'}, {}), '(self.driver)', False, 'from basic_function import basic_login, find_element\n'), ((316, 8, 316, 21), 'time.sleep', 'time.sleep', ({(316, 19, 316, 20): '(2)'}, {}), '(2)', False, 'import time\n'), ((320, 11, 320, 44), 'os.path.exists', 'os.path.exists', ({(320, 26, 320, 43): '"""titles_list.csv"""'}, {}), "('titles_list.csv')", False, 'import os\n'), ((341, 8, 341, 32), 'basic_function.basic_login', 'basic_login', ({(341, 20, 341, 31): 'self.driver'}, {}), '(self.driver)', False, 'from basic_function import basic_login, find_element\n'), ((345, 8, 345, 21), 'time.sleep', 'time.sleep', ({(345, 19, 345, 20): '(2)'}, {}), '(2)', False, 'import time\n'), ((43, 12, 43, 25), 'time.sleep', 'time.sleep', ({(43, 23, 43, 24): '(2)'}, {}), '(2)', False, 'import time\n'), ((121, 25, 121, 64), 'csv.reader', 'csv.reader', (), '', False, 'import csv\n'), ((187, 25, 187, 64), 'csv.reader', 'csv.reader', (), '', False, 'import csv\n'), ((287, 12, 287, 44), 'os.remove', 'os.remove', ({(287, 22, 287, 43): '"""my_last_article.txt"""'}, {}), "('my_last_article.txt')", False, 'import os\n'), ((321, 12, 321, 40), 'os.remove', 'os.remove', ({(321, 22, 321, 39): '"""titles_list.csv"""'}, {}), "('titles_list.csv')", False, 'import os\n'), ((332, 29, 332, 67), 'csv.reader', 'csv.reader', (), '', False, 'import csv\n'), ((354, 12, 354, 25), 'time.sleep', 'time.sleep', ({(354, 23, 354, 24): '(1)'}, {}), '(1)', False, 'import time\n'), ((219, 29, 219, 68), 'csv.reader', 'csv.reader', (), '', False, 'import csv\n'), ((17, 39, 17, 60), 'webdriver_manager.chrome.ChromeDriverManager', 'ChromeDriverManager', ({}, {}), '()', False, 'from webdriver_manager.chrome import ChromeDriverManager\n'), ((225, 20, 225, 33), 'time.sleep', 'time.sleep', ({(225, 31, 225, 32): '(4)'}, {}), '(4)', False, 'import time\n'), ((235, 20, 235, 33), 'time.sleep', 'time.sleep', ({(235, 31, 235, 32): '(1)'}, {}), '(1)', False, 'import time\n'), ((239, 20, 239, 33), 'time.sleep', 'time.sleep', ({(239, 31, 239, 32): '(2)'}, {}), '(2)', False, 'import time\n'), ((246, 20, 246, 33), 'time.sleep', 'time.sleep', ({(246, 31, 246, 32): '(4)'}, {}), '(4)', False, 'import time\n')] |
amalshaji/python-playground | config.py | bd3a88a493f36230958613d60a9d70d64f971dba | from pydantic import BaseSettings
class Settings(BaseSettings):
deta_project_key: str
settings = Settings()
| [] |
dainiusjocas/labs | IV_semester/os/configs.py | 25aa0ae2032681dbaf0afd83f3d80bedddea6407 | #!/usr/bin/env python
''' This module provides configuration options for OS project. No more magic numbers! '''
BLOCK_SIZE = 16 # words
WORD_SIZE = 4 # bytes
# length od RS in blocks
RESTRICTED_LENGTH = 1
# length of DS in blocks
DS_LENGTH = 6
# timer value
TIMER_VALUE = 10
# buffer size
BUFFER_SIZE = 16
# number of blocks in HD
HD_BLOCKS_SIZE = 500
# default priorities
ROOT_PRIORITY = 40
VM_PRIORITY = 50
LOADER_PRIORITY = 60
INTERRUPT_PRIORITY = 70
PRINT_PRIORITY = 70
# Process states
RUNNING_STATE = 'running'
READY_STATE = 'ready'
BLOCKED_STATE = 'blocked'
# Page tables
PAGE_TABLE_STARTING_BLOCK = 0
PAGE_TABLE_ENDING_BLOCK = 14
# Shared memory
SH_MEMEORY_STARTING_BLOCK = 15
SH_MEMORY_ENDING_BLOCK = 31
# blocks dedicated for user tasks are from
USER_STARTING_BLOCK = 32
USER_ENDING_BLOCK = 255
| [] |
Russ76/robotics-toolbox-python | roboticstoolbox/models/URDF/Puma560.py | 4b3e82a6522757ffde1f83aef8d05b3ad475e9de | #!/usr/bin/env python
import numpy as np
from roboticstoolbox.robot.ERobot import ERobot
from math import pi
class Puma560(ERobot):
"""
Class that imports a Puma 560 URDF model
``Puma560()`` is a class which imports a Unimation Puma560 robot definition
from a URDF file. The model describes its kinematic and graphical
characteristics.
.. runblock:: pycon
>>> import roboticstoolbox as rtb
>>> robot = rtb.models.URDF.Puma560()
>>> print(robot)
Defined joint configurations are:
- qz, zero joint angle configuration, 'L' shaped configuration
- qr, vertical 'READY' configuration
- qs, arm is stretched out in the x-direction
- qn, arm is at a nominal non-singular configuration
.. warning:: This file has been modified so that the zero-angle pose is the
same as the DH model in the toolbox. ``j3`` rotation is changed from
-𝜋/2 to 𝜋/2. Dimensions are also slightly different. Both models
include the pedestal height.
.. note:: The original file is from https://github.com/nimasarli/puma560_description/blob/master/urdf/puma560_robot.urdf.xacro
.. codeauthor:: Jesse Haviland
.. sectionauthor:: Peter Corke
"""
def __init__(self):
links, name, urdf_string, urdf_filepath = self.URDF_read(
"puma560_description/urdf/puma560_robot.urdf.xacro"
)
super().__init__(
links,
name=name,
urdf_string=urdf_string,
urdf_filepath=urdf_filepath,
)
self.manufacturer = "Unimation"
# self.ee_link = self.ets[9]
# ready pose, arm up
self.qr = np.array([0, pi / 2, -pi / 2, 0, 0, 0])
self.qz = np.zeros(6)
self.addconfiguration("qr", self.qr)
self.addconfiguration("qz", self.qz)
# zero angles, upper arm horizontal, lower up straight up
self.addconfiguration_attr("qz", np.array([0, 0, 0, 0, 0, 0]))
# reference pose, arm to the right, elbow up
self.addconfiguration_attr(
"ru", np.array([-0.0000, 0.7854, 3.1416, -0.0000, 0.7854, 0.0000])
)
# reference pose, arm to the right, elbow up
self.addconfiguration_attr(
"rd", np.array([-0.0000, -0.8335, 0.0940, -3.1416, 0.8312, 3.1416])
)
# reference pose, arm to the left, elbow up
self.addconfiguration_attr(
"lu", np.array([2.6486, -3.9270, 0.0940, 2.5326, 0.9743, 0.3734])
)
# reference pose, arm to the left, elbow down
self.addconfiguration_attr(
"ld", np.array([2.6486, -2.3081, 3.1416, 0.6743, 0.8604, 2.6611])
)
# straight and horizontal
self.addconfiguration_attr("qs", np.array([0, 0, -pi / 2, 0, 0, 0]))
# nominal table top picking pose
self.addconfiguration_attr("qn", np.array([0, pi / 4, pi, 0, pi / 4, 0]))
if __name__ == "__main__": # pragma nocover
robot = Puma560()
print(robot)
| [((57, 18, 57, 57), 'numpy.array', 'np.array', ({(57, 27, 57, 56): '[0, pi / 2, -pi / 2, 0, 0, 0]'}, {}), '([0, pi / 2, -pi / 2, 0, 0, 0])', True, 'import numpy as np\n'), ((58, 18, 58, 29), 'numpy.zeros', 'np.zeros', ({(58, 27, 58, 28): '6'}, {}), '(6)', True, 'import numpy as np\n'), ((64, 41, 64, 69), 'numpy.array', 'np.array', ({(64, 50, 64, 68): '[0, 0, 0, 0, 0, 0]'}, {}), '([0, 0, 0, 0, 0, 0])', True, 'import numpy as np\n'), ((68, 18, 68, 78), 'numpy.array', 'np.array', ({(68, 27, 68, 77): '[-0.0, 0.7854, 3.1416, -0.0, 0.7854, 0.0]'}, {}), '([-0.0, 0.7854, 3.1416, -0.0, 0.7854, 0.0])', True, 'import numpy as np\n'), ((73, 18, 73, 79), 'numpy.array', 'np.array', ({(73, 27, 73, 78): '[-0.0, -0.8335, 0.094, -3.1416, 0.8312, 3.1416]'}, {}), '([-0.0, -0.8335, 0.094, -3.1416, 0.8312, 3.1416])', True, 'import numpy as np\n'), ((78, 18, 78, 77), 'numpy.array', 'np.array', ({(78, 27, 78, 76): '[2.6486, -3.927, 0.094, 2.5326, 0.9743, 0.3734]'}, {}), '([2.6486, -3.927, 0.094, 2.5326, 0.9743, 0.3734])', True, 'import numpy as np\n'), ((83, 18, 83, 77), 'numpy.array', 'np.array', ({(83, 27, 83, 76): '[2.6486, -2.3081, 3.1416, 0.6743, 0.8604, 2.6611]'}, {}), '([2.6486, -2.3081, 3.1416, 0.6743, 0.8604, 2.6611])', True, 'import numpy as np\n'), ((87, 41, 87, 75), 'numpy.array', 'np.array', ({(87, 50, 87, 74): '[0, 0, -pi / 2, 0, 0, 0]'}, {}), '([0, 0, -pi / 2, 0, 0, 0])', True, 'import numpy as np\n'), ((90, 41, 90, 80), 'numpy.array', 'np.array', ({(90, 50, 90, 79): '[0, pi / 4, pi, 0, pi / 4, 0]'}, {}), '([0, pi / 4, pi, 0, pi / 4, 0])', True, 'import numpy as np\n')] |
hashnfv/hashnfv-functest | functest/tests/unit/odl/test_odl.py | ff34df7ec7be6cd5fcf0f7557b393bd5d6266047 | #!/usr/bin/env python
# Copyright (c) 2016 Orange and others.
#
# All rights reserved. This program and the accompanying materials
# are made available under the terms of the Apache License, Version 2.0
# which accompanies this distribution, and is available at
# http://www.apache.org/licenses/LICENSE-2.0
"""Define the classes required to fully cover odl."""
import errno
import logging
import os
import unittest
from keystoneauth1.exceptions import auth_plugins
import mock
from robot.errors import DataError, RobotError
from robot.result import model
from robot.utils.robottime import timestamp_to_secs
import six
from six.moves import urllib
from functest.core import testcase
from functest.opnfv_tests.sdn.odl import odl
__author__ = "Cedric Ollivier <[email protected]>"
class ODLVisitorTesting(unittest.TestCase):
"""The class testing ODLResultVisitor."""
# pylint: disable=missing-docstring
def setUp(self):
self.visitor = odl.ODLResultVisitor()
def test_empty(self):
self.assertFalse(self.visitor.get_data())
def test_ok(self):
data = {'name': 'foo',
'parent': 'bar',
'status': 'PASS',
'starttime': "20161216 16:00:00.000",
'endtime': "20161216 16:00:01.000",
'elapsedtime': 1000,
'text': 'Hello, World!',
'critical': True}
test = model.TestCase(
name=data['name'], status=data['status'], message=data['text'],
starttime=data['starttime'], endtime=data['endtime'])
test.parent = mock.Mock()
config = {'name': data['parent'],
'criticality.test_is_critical.return_value': data[
'critical']}
test.parent.configure_mock(**config)
self.visitor.visit_test(test)
self.assertEqual(self.visitor.get_data(), [data])
class ODLTesting(unittest.TestCase):
"""The super class which testing classes could inherit."""
# pylint: disable=missing-docstring
logging.disable(logging.CRITICAL)
_keystone_ip = "127.0.0.1"
_neutron_url = "http://127.0.0.2:9696"
_sdn_controller_ip = "127.0.0.3"
_os_auth_url = "http://{}:5000/v3".format(_keystone_ip)
_os_projectname = "admin"
_os_username = "admin"
_os_password = "admin"
_odl_webport = "8080"
_odl_restconfport = "8181"
_odl_username = "admin"
_odl_password = "admin"
_os_userdomainname = 'Default'
_os_projectdomainname = 'Default'
def setUp(self):
for var in ("INSTALLER_TYPE", "SDN_CONTROLLER", "SDN_CONTROLLER_IP"):
if var in os.environ:
del os.environ[var]
os.environ["OS_AUTH_URL"] = self._os_auth_url
os.environ["OS_USERNAME"] = self._os_username
os.environ["OS_USER_DOMAIN_NAME"] = self._os_userdomainname
os.environ["OS_PASSWORD"] = self._os_password
os.environ["OS_PROJECT_NAME"] = self._os_projectname
os.environ["OS_PROJECT_DOMAIN_NAME"] = self._os_projectdomainname
os.environ["OS_PASSWORD"] = self._os_password
self.test = odl.ODLTests(case_name='odl', project_name='functest')
self.defaultargs = {'odlusername': self._odl_username,
'odlpassword': self._odl_password,
'neutronurl': "http://{}:9696".format(
self._keystone_ip),
'osauthurl': self._os_auth_url,
'osusername': self._os_username,
'osuserdomainname': self._os_userdomainname,
'osprojectname': self._os_projectname,
'osprojectdomainname': self._os_projectdomainname,
'ospassword': self._os_password,
'odlip': self._keystone_ip,
'odlwebport': self._odl_webport,
'odlrestconfport': self._odl_restconfport,
'pushtodb': False}
class ODLParseResultTesting(ODLTesting):
"""The class testing ODLTests.parse_results()."""
# pylint: disable=missing-docstring
_config = {'name': 'dummy', 'starttime': '20161216 16:00:00.000',
'endtime': '20161216 16:00:01.000'}
@mock.patch('robot.api.ExecutionResult', side_effect=DataError)
def test_raises_exc(self, mock_method):
with self.assertRaises(DataError):
self.test.parse_results()
mock_method.assert_called_once_with(
os.path.join(odl.ODLTests.res_dir, 'output.xml'))
def _test_result(self, config, result):
suite = mock.Mock()
suite.configure_mock(**config)
with mock.patch('robot.api.ExecutionResult',
return_value=mock.Mock(suite=suite)):
self.test.parse_results()
self.assertEqual(self.test.result, result)
self.assertEqual(self.test.start_time,
timestamp_to_secs(config['starttime']))
self.assertEqual(self.test.stop_time,
timestamp_to_secs(config['endtime']))
self.assertEqual(self.test.details,
{'description': config['name'], 'tests': []})
def test_null_passed(self):
self._config.update({'statistics.critical.passed': 0,
'statistics.critical.total': 20})
self._test_result(self._config, 0)
def test_no_test(self):
self._config.update({'statistics.critical.passed': 20,
'statistics.critical.total': 0})
self._test_result(self._config, 0)
def test_half_success(self):
self._config.update({'statistics.critical.passed': 10,
'statistics.critical.total': 20})
self._test_result(self._config, 50)
def test_success(self):
self._config.update({'statistics.critical.passed': 20,
'statistics.critical.total': 20})
self._test_result(self._config, 100)
class ODLRobotTesting(ODLTesting):
"""The class testing ODLTests.set_robotframework_vars()."""
# pylint: disable=missing-docstring
@mock.patch('fileinput.input', side_effect=Exception())
def test_set_vars_ko(self, mock_method):
self.assertFalse(self.test.set_robotframework_vars())
mock_method.assert_called_once_with(
os.path.join(odl.ODLTests.odl_test_repo,
'csit/variables/Variables.robot'), inplace=True)
@mock.patch('fileinput.input', return_value=[])
def test_set_vars_empty(self, mock_method):
self.assertTrue(self.test.set_robotframework_vars())
mock_method.assert_called_once_with(
os.path.join(odl.ODLTests.odl_test_repo,
'csit/variables/Variables.robot'), inplace=True)
@mock.patch('sys.stdout', new_callable=six.StringIO)
def _test_set_vars(self, msg1, msg2, *args):
line = mock.MagicMock()
line.__iter__.return_value = [msg1]
with mock.patch('fileinput.input', return_value=line) as mock_method:
self.assertTrue(self.test.set_robotframework_vars())
mock_method.assert_called_once_with(
os.path.join(odl.ODLTests.odl_test_repo,
'csit/variables/Variables.robot'), inplace=True)
self.assertEqual(args[0].getvalue(), "{}\n".format(msg2))
def test_set_vars_auth_default(self):
self._test_set_vars(
"@{AUTH} ",
"@{AUTH} admin admin")
def test_set_vars_auth1(self):
self._test_set_vars(
"@{AUTH1} foo bar",
"@{AUTH1} foo bar")
@mock.patch('sys.stdout', new_callable=six.StringIO)
def test_set_vars_auth_foo(self, *args):
line = mock.MagicMock()
line.__iter__.return_value = ["@{AUTH} "]
with mock.patch('fileinput.input', return_value=line) as mock_method:
self.assertTrue(self.test.set_robotframework_vars('foo', 'bar'))
mock_method.assert_called_once_with(
os.path.join(odl.ODLTests.odl_test_repo,
'csit/variables/Variables.robot'), inplace=True)
self.assertEqual(
args[0].getvalue(),
"@{AUTH} foo bar\n")
class ODLMainTesting(ODLTesting):
"""The class testing ODLTests.run_suites()."""
# pylint: disable=missing-docstring
def _get_run_suites_kwargs(self, key=None):
kwargs = {'odlusername': self._odl_username,
'odlpassword': self._odl_password,
'neutronurl': self._neutron_url,
'osauthurl': self._os_auth_url,
'osusername': self._os_username,
'osuserdomainname': self._os_userdomainname,
'osprojectname': self._os_projectname,
'osprojectdomainname': self._os_projectdomainname,
'ospassword': self._os_password,
'odlip': self._sdn_controller_ip,
'odlwebport': self._odl_webport,
'odlrestconfport': self._odl_restconfport}
if key:
del kwargs[key]
return kwargs
def _test_run_suites(self, status, *args):
kwargs = self._get_run_suites_kwargs()
self.assertEqual(self.test.run_suites(**kwargs), status)
if len(args) > 0:
args[0].assert_called_once_with(
odl.ODLTests.res_dir)
if len(args) > 1:
variable = [
'KEYSTONEURL:{}://{}'.format(
urllib.parse.urlparse(self._os_auth_url).scheme,
urllib.parse.urlparse(self._os_auth_url).netloc),
'NEUTRONURL:{}'.format(self._neutron_url),
'OS_AUTH_URL:"{}"'.format(self._os_auth_url),
'OSUSERNAME:"{}"'.format(self._os_username),
'OSUSERDOMAINNAME:"{}"'.format(self._os_userdomainname),
'OSTENANTNAME:"{}"'.format(self._os_projectname),
'OSPROJECTDOMAINNAME:"{}"'.format(self._os_projectdomainname),
'OSPASSWORD:"{}"'.format(self._os_password),
'ODL_SYSTEM_IP:{}'.format(self._sdn_controller_ip),
'PORT:{}'.format(self._odl_webport),
'RESTCONFPORT:{}'.format(self._odl_restconfport)]
args[1].assert_called_once_with(
odl.ODLTests.basic_suite_dir,
odl.ODLTests.neutron_suite_dir,
log='NONE',
output=os.path.join(odl.ODLTests.res_dir, 'output.xml'),
report='NONE',
stdout=mock.ANY,
variable=variable)
if len(args) > 2:
args[2].assert_called_with(
os.path.join(odl.ODLTests.res_dir, 'stdout.txt'))
def _test_no_keyword(self, key):
kwargs = self._get_run_suites_kwargs(key)
self.assertEqual(self.test.run_suites(**kwargs),
testcase.TestCase.EX_RUN_ERROR)
def test_no_odlusername(self):
self._test_no_keyword('odlusername')
def test_no_odlpassword(self):
self._test_no_keyword('odlpassword')
def test_no_neutronurl(self):
self._test_no_keyword('neutronurl')
def test_no_osauthurl(self):
self._test_no_keyword('osauthurl')
def test_no_osusername(self):
self._test_no_keyword('osusername')
def test_no_osprojectname(self):
self._test_no_keyword('osprojectname')
def test_no_ospassword(self):
self._test_no_keyword('ospassword')
def test_no_odlip(self):
self._test_no_keyword('odlip')
def test_no_odlwebport(self):
self._test_no_keyword('odlwebport')
def test_no_odlrestconfport(self):
self._test_no_keyword('odlrestconfport')
def test_set_vars_ko(self):
with mock.patch.object(self.test, 'set_robotframework_vars',
return_value=False) as mock_object:
self._test_run_suites(testcase.TestCase.EX_RUN_ERROR)
mock_object.assert_called_once_with(
self._odl_username, self._odl_password)
@mock.patch('os.makedirs', side_effect=Exception)
def test_makedirs_exc(self, mock_method):
with mock.patch.object(self.test, 'set_robotframework_vars',
return_value=True), \
self.assertRaises(Exception):
self._test_run_suites(testcase.TestCase.EX_RUN_ERROR,
mock_method)
@mock.patch('os.makedirs', side_effect=OSError)
def test_makedirs_oserror(self, mock_method):
with mock.patch.object(self.test, 'set_robotframework_vars',
return_value=True):
self._test_run_suites(testcase.TestCase.EX_RUN_ERROR,
mock_method)
@mock.patch('robot.run', side_effect=RobotError)
@mock.patch('os.makedirs')
def test_run_ko(self, *args):
with mock.patch.object(self.test, 'set_robotframework_vars',
return_value=True), \
self.assertRaises(RobotError):
self._test_run_suites(testcase.TestCase.EX_RUN_ERROR, *args)
@mock.patch('robot.run')
@mock.patch('os.makedirs')
def test_parse_results_ko(self, *args):
with mock.patch.object(self.test, 'set_robotframework_vars',
return_value=True), \
mock.patch.object(self.test, 'parse_results',
side_effect=RobotError):
self._test_run_suites(testcase.TestCase.EX_RUN_ERROR, *args)
@mock.patch('robot.run')
@mock.patch('os.makedirs')
def test_ok(self, *args):
with mock.patch.object(self.test, 'set_robotframework_vars',
return_value=True), \
mock.patch.object(self.test, 'parse_results'):
self._test_run_suites(testcase.TestCase.EX_OK, *args)
@mock.patch('robot.run')
@mock.patch('os.makedirs', side_effect=OSError(errno.EEXIST, ''))
def test_makedirs_oserror17(self, *args):
with mock.patch.object(self.test, 'set_robotframework_vars',
return_value=True), \
mock.patch.object(self.test, 'parse_results'):
self._test_run_suites(testcase.TestCase.EX_OK, *args)
@mock.patch('robot.run', return_value=1)
@mock.patch('os.makedirs')
def test_testcases_in_failure(self, *args):
with mock.patch.object(self.test, 'set_robotframework_vars',
return_value=True), \
mock.patch.object(self.test, 'parse_results'):
self._test_run_suites(testcase.TestCase.EX_OK, *args)
class ODLRunTesting(ODLTesting):
"""The class testing ODLTests.run()."""
# pylint: disable=missing-docstring
def _test_no_env_var(self, var):
with mock.patch('functest.utils.openstack_utils.get_endpoint',
return_value=ODLTesting._neutron_url):
del os.environ[var]
self.assertEqual(self.test.run(),
testcase.TestCase.EX_RUN_ERROR)
def _test_run(self, status=testcase.TestCase.EX_OK,
exception=None, **kwargs):
odlip = kwargs['odlip'] if 'odlip' in kwargs else '127.0.0.3'
odlwebport = kwargs['odlwebport'] if 'odlwebport' in kwargs else '8080'
odlrestconfport = (kwargs['odlrestconfport']
if 'odlrestconfport' in kwargs else '8181')
with mock.patch('functest.utils.openstack_utils.get_endpoint',
return_value=ODLTesting._neutron_url):
if exception:
self.test.run_suites = mock.Mock(side_effect=exception)
else:
self.test.run_suites = mock.Mock(return_value=status)
self.assertEqual(self.test.run(), status)
self.test.run_suites.assert_called_once_with(
odl.ODLTests.default_suites,
neutronurl=self._neutron_url,
odlip=odlip, odlpassword=self._odl_password,
odlrestconfport=odlrestconfport,
odlusername=self._odl_username, odlwebport=odlwebport,
osauthurl=self._os_auth_url,
ospassword=self._os_password,
osprojectname=self._os_projectname,
osusername=self._os_username,
osprojectdomainname=self._os_projectdomainname,
osuserdomainname=self._os_userdomainname)
def _test_multiple_suites(self, suites,
status=testcase.TestCase.EX_OK, **kwargs):
odlip = kwargs['odlip'] if 'odlip' in kwargs else '127.0.0.3'
odlwebport = kwargs['odlwebport'] if 'odlwebport' in kwargs else '8080'
odlrestconfport = (kwargs['odlrestconfport']
if 'odlrestconfport' in kwargs else '8181')
with mock.patch('functest.utils.openstack_utils.get_endpoint',
return_value=ODLTesting._neutron_url):
self.test.run_suites = mock.Mock(return_value=status)
self.assertEqual(self.test.run(suites=suites), status)
self.test.run_suites.assert_called_once_with(
suites,
neutronurl=self._neutron_url,
odlip=odlip, odlpassword=self._odl_password,
odlrestconfport=odlrestconfport,
odlusername=self._odl_username, odlwebport=odlwebport,
osauthurl=self._os_auth_url,
ospassword=self._os_password,
osprojectname=self._os_projectname,
osusername=self._os_username,
osprojectdomainname=self._os_projectdomainname,
osuserdomainname=self._os_userdomainname)
def test_exc(self):
with mock.patch('functest.utils.openstack_utils.get_endpoint',
side_effect=auth_plugins.MissingAuthPlugin()):
self.assertEqual(self.test.run(),
testcase.TestCase.EX_RUN_ERROR)
def test_no_os_auth_url(self):
self._test_no_env_var("OS_AUTH_URL")
def test_no_os_username(self):
self._test_no_env_var("OS_USERNAME")
def test_no_os_password(self):
self._test_no_env_var("OS_PASSWORD")
def test_no_os__name(self):
self._test_no_env_var("OS_PROJECT_NAME")
def test_run_suites_false(self):
os.environ["SDN_CONTROLLER_IP"] = self._sdn_controller_ip
self._test_run(testcase.TestCase.EX_RUN_ERROR,
odlip=self._sdn_controller_ip,
odlwebport=self._odl_webport)
def test_run_suites_exc(self):
with self.assertRaises(Exception):
os.environ["SDN_CONTROLLER_IP"] = self._sdn_controller_ip
self._test_run(status=testcase.TestCase.EX_RUN_ERROR,
exception=Exception(),
odlip=self._sdn_controller_ip,
odlwebport=self._odl_webport)
def test_no_sdn_controller_ip(self):
with mock.patch('functest.utils.openstack_utils.get_endpoint',
return_value=ODLTesting._neutron_url):
self.assertEqual(self.test.run(),
testcase.TestCase.EX_RUN_ERROR)
def test_without_installer_type(self):
os.environ["SDN_CONTROLLER_IP"] = self._sdn_controller_ip
self._test_run(testcase.TestCase.EX_OK,
odlip=self._sdn_controller_ip,
odlwebport=self._odl_webport)
def test_suites(self):
os.environ["SDN_CONTROLLER_IP"] = self._sdn_controller_ip
self._test_multiple_suites(
[odl.ODLTests.basic_suite_dir],
testcase.TestCase.EX_OK,
odlip=self._sdn_controller_ip,
odlwebport=self._odl_webport)
def test_fuel(self):
os.environ["INSTALLER_TYPE"] = "fuel"
self._test_run(testcase.TestCase.EX_OK,
odlip=urllib.parse.urlparse(self._neutron_url).hostname,
odlwebport='8181',
odlrestconfport='8282')
def test_apex_no_controller_ip(self):
with mock.patch('functest.utils.openstack_utils.get_endpoint',
return_value=ODLTesting._neutron_url):
os.environ["INSTALLER_TYPE"] = "apex"
self.assertEqual(self.test.run(),
testcase.TestCase.EX_RUN_ERROR)
def test_apex(self):
os.environ["SDN_CONTROLLER_IP"] = self._sdn_controller_ip
os.environ["INSTALLER_TYPE"] = "apex"
self._test_run(testcase.TestCase.EX_OK,
odlip=self._sdn_controller_ip, odlwebport='8081',
odlrestconfport='8081')
def test_netvirt_no_controller_ip(self):
with mock.patch('functest.utils.openstack_utils.get_endpoint',
return_value=ODLTesting._neutron_url):
os.environ["INSTALLER_TYPE"] = "netvirt"
self.assertEqual(self.test.run(),
testcase.TestCase.EX_RUN_ERROR)
def test_netvirt(self):
os.environ["SDN_CONTROLLER_IP"] = self._sdn_controller_ip
os.environ["INSTALLER_TYPE"] = "netvirt"
self._test_run(testcase.TestCase.EX_OK,
odlip=self._sdn_controller_ip, odlwebport='8081',
odlrestconfport='8081')
def test_joid_no_controller_ip(self):
with mock.patch('functest.utils.openstack_utils.get_endpoint',
return_value=ODLTesting._neutron_url):
os.environ["INSTALLER_TYPE"] = "joid"
self.assertEqual(self.test.run(),
testcase.TestCase.EX_RUN_ERROR)
def test_joid(self):
os.environ["SDN_CONTROLLER"] = self._sdn_controller_ip
os.environ["INSTALLER_TYPE"] = "joid"
self._test_run(testcase.TestCase.EX_OK,
odlip=self._sdn_controller_ip, odlwebport='8080')
def test_compass(self):
os.environ["INSTALLER_TYPE"] = "compass"
self._test_run(testcase.TestCase.EX_OK,
odlip=urllib.parse.urlparse(self._neutron_url).hostname,
odlrestconfport='8080')
def test_daisy_no_controller_ip(self):
with mock.patch('functest.utils.openstack_utils.get_endpoint',
return_value=ODLTesting._neutron_url):
os.environ["INSTALLER_TYPE"] = "daisy"
self.assertEqual(self.test.run(),
testcase.TestCase.EX_RUN_ERROR)
def test_daisy(self):
os.environ["SDN_CONTROLLER_IP"] = self._sdn_controller_ip
os.environ["INSTALLER_TYPE"] = "daisy"
self._test_run(testcase.TestCase.EX_OK,
odlip=self._sdn_controller_ip, odlwebport='8181',
odlrestconfport='8087')
class ODLArgParserTesting(ODLTesting):
"""The class testing ODLParser."""
# pylint: disable=missing-docstring
def setUp(self):
self.parser = odl.ODLParser()
super(ODLArgParserTesting, self).setUp()
def test_default(self):
self.assertEqual(self.parser.parse_args(), self.defaultargs)
def test_basic(self):
self.defaultargs['neutronurl'] = self._neutron_url
self.defaultargs['odlip'] = self._sdn_controller_ip
self.assertEqual(
self.parser.parse_args(
["--neutronurl={}".format(self._neutron_url),
"--odlip={}".format(self._sdn_controller_ip)]),
self.defaultargs)
@mock.patch('sys.stderr', new_callable=six.StringIO)
def test_fail(self, mock_method):
self.defaultargs['foo'] = 'bar'
with self.assertRaises(SystemExit):
self.parser.parse_args(["--foo=bar"])
self.assertTrue(mock_method.getvalue().startswith("usage:"))
def _test_arg(self, arg, value):
self.defaultargs[arg] = value
self.assertEqual(
self.parser.parse_args(["--{}={}".format(arg, value)]),
self.defaultargs)
def test_odlusername(self):
self._test_arg('odlusername', 'foo')
def test_odlpassword(self):
self._test_arg('odlpassword', 'foo')
def test_osauthurl(self):
self._test_arg('osauthurl', 'http://127.0.0.4:5000/v2')
def test_neutronurl(self):
self._test_arg('neutronurl', 'http://127.0.0.4:9696')
def test_osusername(self):
self._test_arg('osusername', 'foo')
def test_osuserdomainname(self):
self._test_arg('osuserdomainname', 'domain')
def test_osprojectname(self):
self._test_arg('osprojectname', 'foo')
def test_osprojectdomainname(self):
self._test_arg('osprojectdomainname', 'domain')
def test_ospassword(self):
self._test_arg('ospassword', 'foo')
def test_odlip(self):
self._test_arg('odlip', '127.0.0.4')
def test_odlwebport(self):
self._test_arg('odlwebport', '80')
def test_odlrestconfport(self):
self._test_arg('odlrestconfport', '80')
def test_pushtodb(self):
self.defaultargs['pushtodb'] = True
self.assertEqual(self.parser.parse_args(["--{}".format('pushtodb')]),
self.defaultargs)
def test_multiple_args(self):
self.defaultargs['neutronurl'] = self._neutron_url
self.defaultargs['odlip'] = self._sdn_controller_ip
self.assertEqual(
self.parser.parse_args(
["--neutronurl={}".format(self._neutron_url),
"--odlip={}".format(self._sdn_controller_ip)]),
self.defaultargs)
if __name__ == "__main__":
logging.disable(logging.CRITICAL)
unittest.main(verbosity=2)
| [((68, 4, 68, 37), 'logging.disable', 'logging.disable', ({(68, 20, 68, 36): 'logging.CRITICAL'}, {}), '(logging.CRITICAL)', False, 'import logging\n'), ((120, 5, 120, 67), 'mock.patch', 'mock.patch', (), '', False, 'import mock\n'), ((174, 5, 174, 51), 'mock.patch', 'mock.patch', (), '', False, 'import mock\n'), ((181, 5, 181, 56), 'mock.patch', 'mock.patch', (), '', False, 'import mock\n'), ((202, 5, 202, 56), 'mock.patch', 'mock.patch', (), '', False, 'import mock\n'), ((313, 5, 313, 53), 'mock.patch', 'mock.patch', (), '', False, 'import mock\n'), ((321, 5, 321, 51), 'mock.patch', 'mock.patch', (), '', False, 'import mock\n'), ((328, 5, 328, 52), 'mock.patch', 'mock.patch', (), '', False, 'import mock\n'), ((329, 5, 329, 30), 'mock.patch', 'mock.patch', ({(329, 16, 329, 29): '"""os.makedirs"""'}, {}), "('os.makedirs')", False, 'import mock\n'), ((336, 5, 336, 28), 'mock.patch', 'mock.patch', ({(336, 16, 336, 27): '"""robot.run"""'}, {}), "('robot.run')", False, 'import mock\n'), ((337, 5, 337, 30), 'mock.patch', 'mock.patch', ({(337, 16, 337, 29): '"""os.makedirs"""'}, {}), "('os.makedirs')", False, 'import mock\n'), ((345, 5, 345, 28), 'mock.patch', 'mock.patch', ({(345, 16, 345, 27): '"""robot.run"""'}, {}), "('robot.run')", False, 'import mock\n'), ((346, 5, 346, 30), 'mock.patch', 'mock.patch', ({(346, 16, 346, 29): '"""os.makedirs"""'}, {}), "('os.makedirs')", False, 'import mock\n'), ((353, 5, 353, 28), 'mock.patch', 'mock.patch', ({(353, 16, 353, 27): '"""robot.run"""'}, {}), "('robot.run')", False, 'import mock\n'), ((361, 5, 361, 44), 'mock.patch', 'mock.patch', (), '', False, 'import mock\n'), ((362, 5, 362, 30), 'mock.patch', 'mock.patch', ({(362, 16, 362, 29): '"""os.makedirs"""'}, {}), "('os.makedirs')", False, 'import mock\n'), ((574, 5, 574, 56), 'mock.patch', 'mock.patch', (), '', False, 'import mock\n'), ((639, 4, 639, 37), 'logging.disable', 'logging.disable', ({(639, 20, 639, 36): 'logging.CRITICAL'}, {}), '(logging.CRITICAL)', False, 'import logging\n'), ((640, 4, 640, 30), 'unittest.main', 'unittest.main', (), '', False, 'import unittest\n'), ((37, 23, 37, 45), 'functest.opnfv_tests.sdn.odl.odl.ODLResultVisitor', 'odl.ODLResultVisitor', ({}, {}), '()', False, 'from functest.opnfv_tests.sdn.odl import odl\n'), ((51, 15, 53, 65), 'robot.result.model.TestCase', 'model.TestCase', (), '', False, 'from robot.result import model\n'), ((54, 22, 54, 33), 'mock.Mock', 'mock.Mock', ({}, {}), '()', False, 'import mock\n'), ((95, 20, 95, 74), 'functest.opnfv_tests.sdn.odl.odl.ODLTests', 'odl.ODLTests', (), '', False, 'from functest.opnfv_tests.sdn.odl import odl\n'), ((128, 16, 128, 27), 'mock.Mock', 'mock.Mock', ({}, {}), '()', False, 'import mock\n'), ((183, 15, 183, 31), 'mock.MagicMock', 'mock.MagicMock', ({}, {}), '()', False, 'import mock\n'), ((204, 15, 204, 31), 'mock.MagicMock', 'mock.MagicMock', ({}, {}), '()', False, 'import mock\n'), ((559, 22, 559, 37), 'functest.opnfv_tests.sdn.odl.odl.ODLParser', 'odl.ODLParser', ({}, {}), '()', False, 'from functest.opnfv_tests.sdn.odl import odl\n'), ((125, 12, 125, 60), 'os.path.join', 'os.path.join', ({(125, 25, 125, 45): 'odl.ODLTests.res_dir', (125, 47, 125, 59): '"""output.xml"""'}, {}), "(odl.ODLTests.res_dir, 'output.xml')", False, 'import os\n'), ((171, 12, 172, 58), 'os.path.join', 'os.path.join', ({(171, 25, 171, 51): 'odl.ODLTests.odl_test_repo', (172, 25, 172, 57): '"""csit/variables/Variables.robot"""'}, {}), "(odl.ODLTests.odl_test_repo, 'csit/variables/Variables.robot')", False, 'import os\n'), ((178, 12, 179, 58), 'os.path.join', 'os.path.join', ({(178, 25, 178, 51): 'odl.ODLTests.odl_test_repo', (179, 25, 179, 57): '"""csit/variables/Variables.robot"""'}, {}), "(odl.ODLTests.odl_test_repo, 'csit/variables/Variables.robot')", False, 'import os\n'), ((185, 13, 185, 61), 'mock.patch', 'mock.patch', (), '', False, 'import mock\n'), ((206, 13, 206, 61), 'mock.patch', 'mock.patch', (), '', False, 'import mock\n'), ((307, 13, 308, 50), 'mock.patch.object', 'mock.patch.object', (), '', False, 'import mock\n'), ((315, 13, 316, 49), 'mock.patch.object', 'mock.patch.object', (), '', False, 'import mock\n'), ((323, 13, 324, 49), 'mock.patch.object', 'mock.patch.object', (), '', False, 'import mock\n'), ((331, 13, 332, 49), 'mock.patch.object', 'mock.patch.object', (), '', False, 'import mock\n'), ((339, 13, 340, 49), 'mock.patch.object', 'mock.patch.object', (), '', False, 'import mock\n'), ((341, 16, 342, 57), 'mock.patch.object', 'mock.patch.object', (), '', False, 'import mock\n'), ((348, 13, 349, 49), 'mock.patch.object', 'mock.patch.object', (), '', False, 'import mock\n'), ((350, 16, 350, 61), 'mock.patch.object', 'mock.patch.object', ({(350, 34, 350, 43): 'self.test', (350, 45, 350, 60): '"""parse_results"""'}, {}), "(self.test, 'parse_results')", False, 'import mock\n'), ((356, 13, 357, 49), 'mock.patch.object', 'mock.patch.object', (), '', False, 'import mock\n'), ((358, 16, 358, 61), 'mock.patch.object', 'mock.patch.object', ({(358, 34, 358, 43): 'self.test', (358, 45, 358, 60): '"""parse_results"""'}, {}), "(self.test, 'parse_results')", False, 'import mock\n'), ((364, 13, 365, 49), 'mock.patch.object', 'mock.patch.object', (), '', False, 'import mock\n'), ((366, 16, 366, 61), 'mock.patch.object', 'mock.patch.object', ({(366, 34, 366, 43): 'self.test', (366, 45, 366, 60): '"""parse_results"""'}, {}), "(self.test, 'parse_results')", False, 'import mock\n'), ((376, 13, 377, 61), 'mock.patch', 'mock.patch', (), '', False, 'import mock\n'), ((389, 13, 390, 61), 'mock.patch', 'mock.patch', (), '', False, 'import mock\n'), ((415, 13, 416, 61), 'mock.patch', 'mock.patch', (), '', False, 'import mock\n'), ((417, 35, 417, 65), 'mock.Mock', 'mock.Mock', (), '', False, 'import mock\n'), ((465, 13, 466, 61), 'mock.patch', 'mock.patch', (), '', False, 'import mock\n'), ((492, 13, 493, 61), 'mock.patch', 'mock.patch', (), '', False, 'import mock\n'), ((506, 13, 507, 61), 'mock.patch', 'mock.patch', (), '', False, 'import mock\n'), ((520, 13, 521, 61), 'mock.patch', 'mock.patch', (), '', False, 'import mock\n'), ((539, 13, 540, 61), 'mock.patch', 'mock.patch', (), '', False, 'import mock\n'), ((135, 29, 135, 67), 'robot.utils.robottime.timestamp_to_secs', 'timestamp_to_secs', ({(135, 47, 135, 66): "config['starttime']"}, {}), "(config['starttime'])", False, 'from robot.utils.robottime import timestamp_to_secs\n'), ((137, 29, 137, 65), 'robot.utils.robottime.timestamp_to_secs', 'timestamp_to_secs', ({(137, 47, 137, 64): "config['endtime']"}, {}), "(config['endtime'])", False, 'from robot.utils.robottime import timestamp_to_secs\n'), ((188, 16, 189, 62), 'os.path.join', 'os.path.join', ({(188, 29, 188, 55): 'odl.ODLTests.odl_test_repo', (189, 29, 189, 61): '"""csit/variables/Variables.robot"""'}, {}), "(odl.ODLTests.odl_test_repo, 'csit/variables/Variables.robot')", False, 'import os\n'), ((209, 16, 210, 62), 'os.path.join', 'os.path.join', ({(209, 29, 209, 55): 'odl.ODLTests.odl_test_repo', (210, 29, 210, 61): '"""csit/variables/Variables.robot"""'}, {}), "(odl.ODLTests.odl_test_repo, 'csit/variables/Variables.robot')", False, 'import os\n'), ((269, 16, 269, 64), 'os.path.join', 'os.path.join', ({(269, 29, 269, 49): 'odl.ODLTests.res_dir', (269, 51, 269, 63): '"""stdout.txt"""'}, {}), "(odl.ODLTests.res_dir, 'stdout.txt')", False, 'import os\n'), ((392, 39, 392, 71), 'mock.Mock', 'mock.Mock', (), '', False, 'import mock\n'), ((394, 39, 394, 69), 'mock.Mock', 'mock.Mock', (), '', False, 'import mock\n'), ((131, 37, 131, 59), 'mock.Mock', 'mock.Mock', (), '', False, 'import mock\n'), ((263, 23, 263, 71), 'os.path.join', 'os.path.join', ({(263, 36, 263, 56): 'odl.ODLTests.res_dir', (263, 58, 263, 70): '"""output.xml"""'}, {}), "(odl.ODLTests.res_dir, 'output.xml')", False, 'import os\n'), ((434, 36, 434, 68), 'keystoneauth1.exceptions.auth_plugins.MissingAuthPlugin', 'auth_plugins.MissingAuthPlugin', ({}, {}), '()', False, 'from keystoneauth1.exceptions import auth_plugins\n'), ((487, 29, 487, 69), 'six.moves.urllib.parse.urlparse', 'urllib.parse.urlparse', ({(487, 51, 487, 68): 'self._neutron_url'}, {}), '(self._neutron_url)', False, 'from six.moves import urllib\n'), ((535, 29, 535, 69), 'six.moves.urllib.parse.urlparse', 'urllib.parse.urlparse', ({(535, 51, 535, 68): 'self._neutron_url'}, {}), '(self._neutron_url)', False, 'from six.moves import urllib\n'), ((247, 20, 247, 60), 'six.moves.urllib.parse.urlparse', 'urllib.parse.urlparse', ({(247, 42, 247, 59): 'self._os_auth_url'}, {}), '(self._os_auth_url)', False, 'from six.moves import urllib\n'), ((248, 20, 248, 60), 'six.moves.urllib.parse.urlparse', 'urllib.parse.urlparse', ({(248, 42, 248, 59): 'self._os_auth_url'}, {}), '(self._os_auth_url)', False, 'from six.moves import urllib\n')] |
OptimalRanging/NTPsec | ntpclients/ntptrace.py | 7fa9b38c3e91f96b173ffa02bafa29cf81173cf7 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
ntptrace - trace peers of an NTP server
Usage: ntptrace [-n | --numeric] [-m number | --max-hosts=number]
[-r hostname | --host=hostname] [--help | --more-help]
hostname
See the manual page for details.
"""
# SPDX-License-Identifier: BSD-2-Clause
from __future__ import print_function
import getopt
import re
import subprocess
import sys
try:
import ntp.util
except ImportError as e:
sys.stderr.write(
"ntptrace: can't find Python NTP library.\n")
sys.stderr.write("%s\n" % e)
sys.exit(1)
def get_info(host):
info = ntp_read_vars(0, [], host)
if info is None or 'stratum' not in info:
return
info['offset'] = round(float(info['offset']) / 1000, 6)
info['syncdistance'] = \
(float(info['rootdisp']) + (float(info['rootdelay']) / 2)) / 1000
return info
def get_next_host(peer, host):
info = ntp_read_vars(peer, ["srcadr"], host)
if info is None:
return
return info['srcadr']
def ntp_read_vars(peer, vars, host):
obsolete = {'phase': 'offset',
'rootdispersion': 'rootdisp'}
if not len(vars):
do_all = True
else:
do_all = False
outvars = {}.fromkeys(vars)
if do_all:
outvars['status_line'] = {}
cmd = ["ntpq", "-n", "-c", "rv %s %s" % (peer, ",".join(vars))]
if host is not None:
cmd.append(host)
try:
# sadly subprocess.check_output() is not in Python 2.6
proc = subprocess.Popen(
cmd,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
out = proc.communicate()[0]
output = out.decode('utf-8').splitlines()
except subprocess.CalledProcessError as e:
print("Could not start ntpq: %s" % e.output, file=sys.stderr)
raise SystemExit(1)
except OSError as e:
print("Could not start ntpq: %s" % e.strerror, file=sys.stderr)
raise SystemExit(1)
for line in output:
if re.search(r'Connection refused', line):
return
match = re.search(r'^asso?c?id=0 status=(\S{4}) (\S+), (\S+),', line,
flags=re.IGNORECASE)
if match:
outvars['status_line']['status'] = match.group(1)
outvars['status_line']['leap'] = match.group(2)
outvars['status_line']['sync'] = match.group(3)
iterator = re.finditer(r'(\w+)=([^,]+),?\s?', line)
for match in iterator:
key = match.group(1)
val = match.group(2)
val = re.sub(r'^"([^"]+)"$', r'\1', val)
if key in obsolete:
key = obsolete[key]
if do_all or key in outvars:
outvars[key] = val
return outvars
usage = r"""ntptrace - trace peers of an NTP server
USAGE: ntptrace [-<flag> [<val>] | --<name>[{=| }<val>]]... [host]
-n, --numeric Print IP addresses instead of hostnames
-m, --max-hosts=num Maximum number of peers to trace
-r, --host=str Single remote host
-?, --help Display usage information and exit
--more-help Pass the extended usage text through a pager
Options are specified by doubled hyphens and their name or by a single
hyphen and the flag character.""" + "\n"
try:
(options, arguments) = getopt.getopt(
sys.argv[1:], "m:nr:?",
["help", "host=", "max-hosts=", "more-help", "numeric"])
except getopt.GetoptError as err:
sys.stderr.write(str(err) + "\n")
raise SystemExit(1)
numeric = False
maxhosts = 99
host = '127.0.0.1'
for (switch, val) in options:
if switch == "-m" or switch == "--max-hosts":
errmsg = "Error: -m parameter '%s' not a number\n"
maxhosts = ntp.util.safeargcast(val, int, errmsg, usage)
elif switch == "-n" or switch == "--numeric":
numeric = True
elif switch == "-r" or switch == "--host":
host = val
elif switch == "-?" or switch == "--help" or switch == "--more-help":
print(usage, file=sys.stderr)
raise SystemExit(0)
if len(arguments):
host = arguments[0]
hostcount = 0
while True:
hostcount += 1
info = get_info(host)
if info is None:
break
if not numeric:
host = ntp.util.canonicalize_dns(host)
print("%s: stratum %d, offset %f, synch distance %f" %
(host, int(info['stratum']), info['offset'], info['syncdistance']),
end='')
if int(info['stratum']) == 1:
print(", refid '%s'" % info['refid'], end='')
print()
if (int(info['stratum']) == 0 or int(info['stratum']) == 1 or
int(info['stratum']) == 16):
break
if re.search(r'^127\.127\.\d{1,3}\.\d{1,3}$', info['refid']):
break
if hostcount == maxhosts:
break
next_host = get_next_host(info['peer'], host)
if next_host is None:
break
if re.search(r'^127\.127\.\d{1,3}\.\d{1,3}$', next_host):
break
host = next_host
| [((118, 27, 120, 64), 'getopt.getopt', 'getopt.getopt', ({(119, 8, 119, 20): 'sys.argv[1:]', (119, 22, 119, 30): '"""m:nr:?"""', (120, 8, 120, 63): "['help', 'host=', 'max-hosts=', 'more-help', 'numeric']"}, {}), "(sys.argv[1:], 'm:nr:?', ['help', 'host=', 'max-hosts=',\n 'more-help', 'numeric'])", False, 'import getopt\n'), ((168, 7, 168, 64), 're.search', 're.search', ({(168, 17, 168, 48): '"""^127\\\\.127\\\\.\\\\d{1,3}\\\\.\\\\d{1,3}$"""', (168, 50, 168, 63): "info['refid']"}, {}), "('^127\\\\.127\\\\.\\\\d{1,3}\\\\.\\\\d{1,3}$', info['refid'])", False, 'import re\n'), ((178, 7, 178, 60), 're.search', 're.search', ({(178, 17, 178, 48): '"""^127\\\\.127\\\\.\\\\d{1,3}\\\\.\\\\d{1,3}$"""', (178, 50, 178, 59): 'next_host'}, {}), "('^127\\\\.127\\\\.\\\\d{1,3}\\\\.\\\\d{1,3}$', next_host)", False, 'import re\n'), ((24, 4, 25, 53), 'sys.stderr.write', 'sys.stderr.write', ({(25, 8, 25, 52): '"""ntptrace: can\'t find Python NTP library.\n"""'}, {}), '("ntptrace: can\'t find Python NTP library.\\n")', False, 'import sys\n'), ((26, 4, 26, 32), 'sys.stderr.write', 'sys.stderr.write', ({(26, 21, 26, 31): "('%s\\n' % e)"}, {}), "('%s\\n' % e)", False, 'import sys\n'), ((27, 4, 27, 15), 'sys.exit', 'sys.exit', ({(27, 13, 27, 14): '(1)'}, {}), '(1)', False, 'import sys\n'), ((68, 15, 71, 37), 'subprocess.Popen', 'subprocess.Popen', (), '', False, 'import subprocess\n'), ((82, 11, 82, 49), 're.search', 're.search', ({(82, 21, 82, 42): '"""Connection refused"""', (82, 44, 82, 48): 'line'}, {}), "('Connection refused', line)", False, 'import re\n'), ((85, 16, 86, 46), 're.search', 're.search', (), '', False, 'import re\n'), ((92, 19, 92, 59), 're.finditer', 're.finditer', ({(92, 31, 92, 52): '"""(\\\\w+)=([^,]+),?\\\\s?"""', (92, 54, 92, 58): 'line'}, {}), "('(\\\\w+)=([^,]+),?\\\\s?', line)", False, 'import re\n'), ((96, 18, 96, 52), 're.sub', 're.sub', ({(96, 25, 96, 39): '"""^"([^"]+)"$"""', (96, 41, 96, 46): '"""\\\\1"""', (96, 48, 96, 51): 'val'}, {}), '(\'^"([^"]+)"$\', \'\\\\1\', val)', False, 'import re\n')] |
abueide/lbry | lbrynet/wallet/server/block_processor.py | 7f5deaf6c80422a30b3714d4bf12e028756ed9fe | import struct
import msgpack
from lbrynet.wallet.transaction import Transaction, Output
from torba.server.hash import hash_to_hex_str
from torba.server.block_processor import BlockProcessor
from lbrynet.schema.claim import Claim
from lbrynet.wallet.server.model import ClaimInfo
class LBRYBlockProcessor(BlockProcessor):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
if self.env.coin.NET == "regtest":
self.prefetcher.polling_delay = 0.5
self.should_validate_signatures = self.env.boolean('VALIDATE_CLAIM_SIGNATURES', False)
self.logger.info("LbryumX Block Processor - Validating signatures: {}".format(self.should_validate_signatures))
def advance_blocks(self, blocks):
# save height, advance blocks as usual, then hook our claim tx processing
height = self.height + 1
super().advance_blocks(blocks)
pending_undo = []
for index, block in enumerate(blocks):
undo = self.advance_claim_txs(block.transactions, height + index)
pending_undo.append((height+index, undo,))
self.db.write_undo(pending_undo)
def advance_claim_txs(self, txs, height):
# TODO: generate claim undo info!
undo_info = []
add_undo = undo_info.append
update_inputs = set()
for etx, txid in txs:
update_inputs.clear()
tx = Transaction(etx.serialize())
for index, output in enumerate(tx.outputs):
if not output.is_claim:
continue
if output.script.is_claim_name:
add_undo(self.advance_claim_name_transaction(output, height, txid, index))
elif output.script.is_update_claim:
update_input = self.db.get_update_input(output.claim_hash, tx.inputs)
if update_input:
update_inputs.add(update_input)
add_undo(self.advance_update_claim(output, height, txid, index))
else:
info = (hash_to_hex_str(txid), output.claim_id,)
self.logger.error("REJECTED: {} updating {}".format(*info))
for txin in tx.inputs:
if txin not in update_inputs:
abandoned_claim_id = self.db.abandon_spent(txin.txo_ref.tx_ref.hash, txin.txo_ref.position)
if abandoned_claim_id:
add_undo((abandoned_claim_id, self.db.get_claim_info(abandoned_claim_id)))
return undo_info
def advance_update_claim(self, output: Output, height, txid, nout):
claim_id = output.claim_hash
claim_info = self.claim_info_from_output(output, txid, nout, height)
old_claim_info = self.db.get_claim_info(claim_id)
self.db.put_claim_id_for_outpoint(old_claim_info.txid, old_claim_info.nout, None)
if old_claim_info.cert_id:
self.db.remove_claim_from_certificate_claims(old_claim_info.cert_id, claim_id)
if claim_info.cert_id:
self.db.put_claim_id_signed_by_cert_id(claim_info.cert_id, claim_id)
self.db.put_claim_info(claim_id, claim_info)
self.db.put_claim_id_for_outpoint(txid, nout, claim_id)
return claim_id, old_claim_info
def advance_claim_name_transaction(self, output: Output, height, txid, nout):
claim_id = output.claim_hash
claim_info = self.claim_info_from_output(output, txid, nout, height)
if claim_info.cert_id:
self.db.put_claim_id_signed_by_cert_id(claim_info.cert_id, claim_id)
self.db.put_claim_info(claim_id, claim_info)
self.db.put_claim_id_for_outpoint(txid, nout, claim_id)
return claim_id, None
def backup_from_undo_info(self, claim_id, undo_claim_info):
"""
Undo information holds a claim state **before** a transaction changes it
There are 4 possibilities when processing it, of which only 3 are valid ones:
1. the claim is known and the undo info has info, it was an update
2. the claim is known and the undo info doesn't hold any info, it was claimed
3. the claim in unknown and the undo info has info, it was abandoned
4. the claim is unknown and the undo info does't hold info, error!
"""
undo_claim_info = ClaimInfo(*undo_claim_info) if undo_claim_info else None
current_claim_info = self.db.get_claim_info(claim_id)
if current_claim_info and undo_claim_info:
# update, remove current claim
self.db.remove_claim_id_for_outpoint(current_claim_info.txid, current_claim_info.nout)
if current_claim_info.cert_id:
self.db.remove_claim_from_certificate_claims(current_claim_info.cert_id, claim_id)
elif current_claim_info and not undo_claim_info:
# claim, abandon it
self.db.abandon_spent(current_claim_info.txid, current_claim_info.nout)
elif not current_claim_info and undo_claim_info:
# abandon, reclaim it (happens below)
pass
else:
# should never happen, unless the database got into an inconsistent state
raise Exception("Unexpected situation occurred on backup, this means the database is inconsistent. "
"Please report. Resetting the data folder (reindex) solves it for now.")
if undo_claim_info:
self.db.put_claim_info(claim_id, undo_claim_info)
if undo_claim_info.cert_id:
cert_id = self._checksig(undo_claim_info.value, undo_claim_info.address)
self.db.put_claim_id_signed_by_cert_id(cert_id, claim_id)
self.db.put_claim_id_for_outpoint(undo_claim_info.txid, undo_claim_info.nout, claim_id)
def backup_txs(self, txs):
self.logger.info("Reorg at height {} with {} transactions.".format(self.height, len(txs)))
undo_info = msgpack.loads(self.db.claim_undo_db.get(struct.pack(">I", self.height)), use_list=False)
for claim_id, undo_claim_info in reversed(undo_info):
self.backup_from_undo_info(claim_id, undo_claim_info)
return super().backup_txs(txs)
def backup_blocks(self, raw_blocks):
self.db.batched_flush_claims()
super().backup_blocks(raw_blocks=raw_blocks)
self.db.batched_flush_claims()
async def flush(self, flush_utxos):
self.db.batched_flush_claims()
return await super().flush(flush_utxos)
def claim_info_from_output(self, output: Output, txid, nout, height):
address = self.coin.address_from_script(output.script.source)
name, value, cert_id = output.script.values['claim_name'], output.script.values['claim'], None
assert txid and address
cert_id = self._checksig(value, address)
return ClaimInfo(name, value, txid, nout, output.amount, address, height, cert_id)
def _checksig(self, value, address):
try:
claim_dict = Claim.from_bytes(value)
cert_id = claim_dict.signing_channel_hash
if not self.should_validate_signatures:
return cert_id
if cert_id:
cert_claim = self.db.get_claim_info(cert_id)
if cert_claim:
certificate = Claim.from_bytes(cert_claim.value)
claim_dict.validate_signature(address, certificate)
return cert_id
except Exception:
pass
| [((139, 15, 139, 90), 'lbrynet.wallet.server.model.ClaimInfo', 'ClaimInfo', ({(139, 25, 139, 29): 'name', (139, 31, 139, 36): 'value', (139, 38, 139, 42): 'txid', (139, 44, 139, 48): 'nout', (139, 50, 139, 63): 'output.amount', (139, 65, 139, 72): 'address', (139, 74, 139, 80): 'height', (139, 82, 139, 89): 'cert_id'}, {}), '(name, value, txid, nout, output.amount, address, height, cert_id)', False, 'from lbrynet.wallet.server.model import ClaimInfo\n'), ((94, 26, 94, 53), 'lbrynet.wallet.server.model.ClaimInfo', 'ClaimInfo', ({(94, 36, 94, 52): '*undo_claim_info'}, {}), '(*undo_claim_info)', False, 'from lbrynet.wallet.server.model import ClaimInfo\n'), ((143, 25, 143, 48), 'lbrynet.schema.claim.Claim.from_bytes', 'Claim.from_bytes', ({(143, 42, 143, 47): 'value'}, {}), '(value)', False, 'from lbrynet.schema.claim import Claim\n'), ((120, 60, 120, 90), 'struct.pack', 'struct.pack', ({(120, 72, 120, 76): '""">I"""', (120, 78, 120, 89): 'self.height'}, {}), "('>I', self.height)", False, 'import struct\n'), ((150, 34, 150, 68), 'lbrynet.schema.claim.Claim.from_bytes', 'Claim.from_bytes', ({(150, 51, 150, 67): 'cert_claim.value'}, {}), '(cert_claim.value)', False, 'from lbrynet.schema.claim import Claim\n'), ((53, 32, 53, 53), 'torba.server.hash.hash_to_hex_str', 'hash_to_hex_str', ({(53, 48, 53, 52): 'txid'}, {}), '(txid)', False, 'from torba.server.hash import hash_to_hex_str\n')] |
jihunroh/ProjectEuler-Python | ProjectEuler.Problem.013.py | 2fceaf5c3dd61038004b6128c5d9ee7a76142bca | from ProjectEulerCommons.Base import *
numbers_list = """37107287533902102798797998220837590246510135740250
46376937677490009712648124896970078050417018260538
74324986199524741059474233309513058123726617309629
91942213363574161572522430563301811072406154908250
23067588207539346171171980310421047513778063246676
89261670696623633820136378418383684178734361726757
28112879812849979408065481931592621691275889832738
44274228917432520321923589422876796487670272189318
47451445736001306439091167216856844588711603153276
70386486105843025439939619828917593665686757934951
62176457141856560629502157223196586755079324193331
64906352462741904929101432445813822663347944758178
92575867718337217661963751590579239728245598838407
58203565325359399008402633568948830189458628227828
80181199384826282014278194139940567587151170094390
35398664372827112653829987240784473053190104293586
86515506006295864861532075273371959191420517255829
71693888707715466499115593487603532921714970056938
54370070576826684624621495650076471787294438377604
53282654108756828443191190634694037855217779295145
36123272525000296071075082563815656710885258350721
45876576172410976447339110607218265236877223636045
17423706905851860660448207621209813287860733969412
81142660418086830619328460811191061556940512689692
51934325451728388641918047049293215058642563049483
62467221648435076201727918039944693004732956340691
15732444386908125794514089057706229429197107928209
55037687525678773091862540744969844508330393682126
18336384825330154686196124348767681297534375946515
80386287592878490201521685554828717201219257766954
78182833757993103614740356856449095527097864797581
16726320100436897842553539920931837441497806860984
48403098129077791799088218795327364475675590848030
87086987551392711854517078544161852424320693150332
59959406895756536782107074926966537676326235447210
69793950679652694742597709739166693763042633987085
41052684708299085211399427365734116182760315001271
65378607361501080857009149939512557028198746004375
35829035317434717326932123578154982629742552737307
94953759765105305946966067683156574377167401875275
88902802571733229619176668713819931811048770190271
25267680276078003013678680992525463401061632866526
36270218540497705585629946580636237993140746255962
24074486908231174977792365466257246923322810917141
91430288197103288597806669760892938638285025333403
34413065578016127815921815005561868836468420090470
23053081172816430487623791969842487255036638784583
11487696932154902810424020138335124462181441773470
63783299490636259666498587618221225225512486764533
67720186971698544312419572409913959008952310058822
95548255300263520781532296796249481641953868218774
76085327132285723110424803456124867697064507995236
37774242535411291684276865538926205024910326572967
23701913275725675285653248258265463092207058596522
29798860272258331913126375147341994889534765745501
18495701454879288984856827726077713721403798879715
38298203783031473527721580348144513491373226651381
34829543829199918180278916522431027392251122869539
40957953066405232632538044100059654939159879593635
29746152185502371307642255121183693803580388584903
41698116222072977186158236678424689157993532961922
62467957194401269043877107275048102390895523597457
23189706772547915061505504953922979530901129967519
86188088225875314529584099251203829009407770775672
11306739708304724483816533873502340845647058077308
82959174767140363198008187129011875491310547126581
97623331044818386269515456334926366572897563400500
42846280183517070527831839425882145521227251250327
55121603546981200581762165212827652751691296897789
32238195734329339946437501907836945765883352399886
75506164965184775180738168837861091527357929701337
62177842752192623401942399639168044983993173312731
32924185707147349566916674687634660915035914677504
99518671430235219628894890102423325116913619626622
73267460800591547471830798392868535206946944540724
76841822524674417161514036427982273348055556214818
97142617910342598647204516893989422179826088076852
87783646182799346313767754307809363333018982642090
10848802521674670883215120185883543223812876952786
71329612474782464538636993009049310363619763878039
62184073572399794223406235393808339651327408011116
66627891981488087797941876876144230030984490851411
60661826293682836764744779239180335110989069790714
85786944089552990653640447425576083659976645795096
66024396409905389607120198219976047599490197230297
64913982680032973156037120041377903785566085089252
16730939319872750275468906903707539413042652315011
94809377245048795150954100921645863754710598436791
78639167021187492431995700641917969777599028300699
15368713711936614952811305876380278410754449733078
40789923115535562561142322423255033685442488917353
44889911501440648020369068063960672322193204149535
41503128880339536053299340368006977710650566631954
81234880673210146739058568557934581403627822703280
82616570773948327592232845941706525094512325230608
22918802058777319719839450180888072429661980811197
77158542502016545090413245809786882778948721859617
72107838435069186155435662884062257473692284509516
20849603980134001723930671666823555245252804609722
53503534226472524250874054075591789781264330331690""".splitlines()
Answer(
str(sum([int(line) for line in numbers_list]))[0:10]
)
"""
------------------------------------------------
ProjectEuler.Problem.013.py
The Answer is: 5537376230
Time Elasped: 0.005984783172607422sec
------------------------------------------------
"""
| [] |
historeno/enermaps | api/app/endpoints/datasets.py | ad3a97636baa153a56367e374d0fef7f009bf19d | """Endpoint for the manipulation of datasets
"""
import hashlib
from flask import Response
from flask_restx import Namespace, Resource, abort
from app.common import client
from app.common import datasets as datasets_fcts
from app.common import path
api = Namespace("datasets", description="Datasets related endpoints")
@api.route("/")
class Datasets(Resource):
def get(self):
"""Return a list of all datasets known by the platform"""
datasets = client.get_dataset_list()
if len(datasets) == 0:
abort(404)
add_openaire_links(datasets)
return datasets
@api.route("/full/")
class DatasetsFull(Resource):
def get(self):
"""Return a list of all datasets known by the platform, along with their
variables and time periods"""
datasets = client.get_dataset_list()
if len(datasets) == 0:
abort(404)
for dataset in datasets:
dataset["info"] = client.get_parameters(dataset["ds_id"])
if dataset["info"] is None:
abort(404)
datasets_fcts.process_parameters(
dataset["info"],
dataset_id=dataset["ds_id"],
is_raster=dataset["is_raster"],
)
add_openaire_links(datasets)
return datasets
@api.route("/<int:id>/parameters/")
class DatasetParameters(Resource):
def get(self, id):
"""Return the variables and time periods available in a dataset"""
parameters = client.get_parameters(id)
if parameters is None:
abort(404)
datasets_fcts.process_parameters(parameters)
return parameters
@api.route(
"/layer_name/vector/<int:id>/", defaults={"variable": None, "time_period": None}
)
@api.route("/layer_name/vector/<int:id>/<string:variable>/<string:time_period>/")
@api.route(
"/layer_name/vector/<int:id>/<string:variable>/", defaults={"time_period": None}
)
@api.route(
"/layer_name/vector/<int:id>/-/<string:time_period>/",
defaults={"variable": None},
)
class VectorLayerName(Resource):
def get(self, id, variable=None, time_period=None):
"""Return an unique layer name"""
if variable is not None:
variable = variable.replace("__SLASH__", "/")
layer_name = path.make_unique_layer_name(
path.VECTOR, id, variable=variable, time_period=time_period
)
return Response(layer_name, mimetype="text/plain")
@api.route(
"/layer_name/raster/<int:id>/", defaults={"variable": None, "time_period": None}
)
@api.route("/layer_name/raster/<int:id>/<string:variable>/<string:time_period>/")
@api.route(
"/layer_name/raster/<int:id>/<string:variable>/", defaults={"time_period": None}
)
@api.route(
"/layer_name/raster/<int:id>/-/<string:time_period>/",
defaults={"variable": None},
)
class RasterLayerName(Resource):
def get(self, id, variable=None, time_period=None):
"""Return an unique layer name"""
if variable is not None:
variable = variable.replace("__SLASH__", "/")
layer_name = path.make_unique_layer_name(
path.RASTER, id, variable=variable, time_period=time_period
)
return Response(layer_name, mimetype="text/plain")
@api.route("/legend/<path:layer_name>/")
class Legend(Resource):
def get(self, layer_name):
"""Return the legend of the layer"""
legend = client.get_legend(layer_name)
if legend is None:
abort(404)
return legend
@api.route("/geojson/<path:layer_name>/")
class GeoJSON(Resource):
def get(self, layer_name):
"""Return the GeoJSON file corresponding to the layer"""
geojson = client.get_geojson(layer_name, ignore_intersecting=True)
if geojson is None:
abort(404)
return geojson
@api.route("/areas/")
class Areas(Resource):
def get(self):
"""Return a list of all areas known by the platform"""
areas = client.get_areas()
if len(areas) == 0:
abort(404)
return areas
def add_openaire_links(datasets):
for dataset in datasets:
shared_id = dataset.get("shared_id")
if not shared_id:
dataset["openaireLink"] = "https://enermaps.openaire.eu/"
else:
shared_id_hash = hashlib.md5(shared_id.encode()) # nosec
dataset["openaireLink"] = (
"https://enermaps.openaire.eu/search/dataset?datasetId=enermaps____::{}"
.format(shared_id_hash.hexdigest())
)
| [((13, 6, 13, 69), 'flask_restx.Namespace', 'Namespace', (), '', False, 'from flask_restx import Namespace, Resource, abort\n'), ((20, 19, 20, 44), 'app.common.client.get_dataset_list', 'client.get_dataset_list', ({}, {}), '()', False, 'from app.common import client\n'), ((34, 19, 34, 44), 'app.common.client.get_dataset_list', 'client.get_dataset_list', ({}, {}), '()', False, 'from app.common import client\n'), ((58, 21, 58, 46), 'app.common.client.get_parameters', 'client.get_parameters', ({(58, 43, 58, 45): 'id'}, {}), '(id)', False, 'from app.common import client\n'), ((62, 8, 62, 52), 'app.common.datasets.process_parameters', 'datasets_fcts.process_parameters', ({(62, 41, 62, 51): 'parameters'}, {}), '(parameters)', True, 'from app.common import datasets as datasets_fcts\n'), ((84, 21, 86, 9), 'app.common.path.make_unique_layer_name', 'path.make_unique_layer_name', (), '', False, 'from app.common import path\n'), ((88, 15, 88, 58), 'flask.Response', 'Response', (), '', False, 'from flask import Response\n'), ((108, 21, 110, 9), 'app.common.path.make_unique_layer_name', 'path.make_unique_layer_name', (), '', False, 'from app.common import path\n'), ((112, 15, 112, 58), 'flask.Response', 'Response', (), '', False, 'from flask import Response\n'), ((119, 17, 119, 46), 'app.common.client.get_legend', 'client.get_legend', ({(119, 35, 119, 45): 'layer_name'}, {}), '(layer_name)', False, 'from app.common import client\n'), ((130, 18, 130, 74), 'app.common.client.get_geojson', 'client.get_geojson', (), '', False, 'from app.common import client\n'), ((141, 16, 141, 34), 'app.common.client.get_areas', 'client.get_areas', ({}, {}), '()', False, 'from app.common import client\n'), ((22, 12, 22, 22), 'flask_restx.abort', 'abort', ({(22, 18, 22, 21): '(404)'}, {}), '(404)', False, 'from flask_restx import Namespace, Resource, abort\n'), ((36, 12, 36, 22), 'flask_restx.abort', 'abort', ({(36, 18, 36, 21): '(404)'}, {}), '(404)', False, 'from flask_restx import Namespace, Resource, abort\n'), ((39, 30, 39, 69), 'app.common.client.get_parameters', 'client.get_parameters', ({(39, 52, 39, 68): "dataset['ds_id']"}, {}), "(dataset['ds_id'])", False, 'from app.common import client\n'), ((43, 12, 47, 13), 'app.common.datasets.process_parameters', 'datasets_fcts.process_parameters', (), '', True, 'from app.common import datasets as datasets_fcts\n'), ((60, 12, 60, 22), 'flask_restx.abort', 'abort', ({(60, 18, 60, 21): '(404)'}, {}), '(404)', False, 'from flask_restx import Namespace, Resource, abort\n'), ((121, 12, 121, 22), 'flask_restx.abort', 'abort', ({(121, 18, 121, 21): '(404)'}, {}), '(404)', False, 'from flask_restx import Namespace, Resource, abort\n'), ((132, 12, 132, 22), 'flask_restx.abort', 'abort', ({(132, 18, 132, 21): '(404)'}, {}), '(404)', False, 'from flask_restx import Namespace, Resource, abort\n'), ((143, 12, 143, 22), 'flask_restx.abort', 'abort', ({(143, 18, 143, 21): '(404)'}, {}), '(404)', False, 'from flask_restx import Namespace, Resource, abort\n'), ((41, 16, 41, 26), 'flask_restx.abort', 'abort', ({(41, 22, 41, 25): '(404)'}, {}), '(404)', False, 'from flask_restx import Namespace, Resource, abort\n')] |
forewing/lc | python/p45.py | 314468a1a3bb7d38eccf1f34b0d1b7da04a34784 | class Solution:
def jump(self, nums: List[int]) -> int:
n = len(nums)
dp = [float('inf')] * n
dp[0] = 0
tail = 1
for i in range(n):
limit = min(n, i + nums[i] + 1)
for j in range(tail, limit):
dp[j] = min(dp[j], dp[i] + 1)
tail = limit - 1
return dp[-1]
| [] |
doycode/mlgorithms | mlgorithms/knn/__init__.py | b187efad474acdc9b7c6defe4761f101530bd1a3 | from .knn import KNNClassifier
__all__ = ['KNNClassifier'] | [] |
sotkonstantinidis/testcircle | apps/configuration/fields.py | 448aa2148fbc2c969e60f0b33ce112d4740a8861 | import unicodedata
from django.forms import fields
class XMLCompatCharField(fields.CharField):
"""
Strip 'control characters', as XML 1.0 does not allow them and the API may
return data in XML.
"""
def to_python(self, value):
value = super().to_python(value=value)
return self.remove_control_characters(value)
@staticmethod
def remove_control_characters(input):
valid_chars = ['\n', '\r']
return "".join(ch for ch in input if
unicodedata.category(ch)[0] != "C" or ch in valid_chars)
| [((20, 23, 20, 47), 'unicodedata.category', 'unicodedata.category', ({(20, 44, 20, 46): 'ch'}, {}), '(ch)', False, 'import unicodedata\n')] |
erikdelange/MicroPython-HTTP-Server | ademo.py | 54bda9d55ac65b9a6bbf2189098a788add52b344 | import sys
import time
import uasyncio as asyncio
from ahttpserver import sendfile, Server
app = Server()
@app.route("GET", "/")
async def root(reader, writer, request):
writer.write(b"HTTP/1.1 200 OK\r\n")
writer.write(b"Connection: close\r\n")
writer.write(b"Content-Type: text/html\r\n")
writer.write(b"\r\n")
await writer.drain()
await sendfile(writer, "index.html")
try:
print(1/0)
except Exception as e:
print("exception in function root():", e) # exception handled locally
# @app.route("GET", "/") # if uncommented raises route already declared exception
# async def also_root(reader, writer, request):
# return
@app.route("GET", "/favicon.ico")
async def favicon(reader, writer, request):
writer.write(b"HTTP/1.1 200 OK\r\n")
writer.write(b"Connection: close\r\n")
writer.write(b"Content-Type: image/x-icon\r\n")
writer.write(b"\r\n")
await writer.drain()
await sendfile(writer, "favicon.ico")
@app.route("GET", "/api/time")
async def get_time(reader, writer, request):
writer.write(b"HTTP/1.1 200 OK\r\n")
writer.write(b"Connection: close\r\n")
writer.write(b"Content-Type: text/html\r\n")
writer.write(b"\r\n")
await writer.drain()
t = time.localtime()
writer.write(f"{t[2]:02d}-{t[1]:02d}-{t[0]:04d} {t[3]:02d}:{t[4]:02d}:{t[5]:02d}")
print(1/0) # will be caught by global exception handler
@app.route("GET", "/api/stop")
async def stop(reader, writer, request):
writer.write(b"HTTP/1.1 200 OK\r\n")
writer.write(b"Connection: close\r\n")
writer.write(b"\r\n")
await writer.drain()
raise(KeyboardInterrupt)
async def hello():
""" For demo purposes show system is still alive """
count = 0
while True:
print("hello", count)
count += 1
await asyncio.sleep(60)
def set_global_exception_handler():
def handle_exception(loop, context):
# uncaught exceptions raised in route handlers end up here
print("global exception handler:", context)
sys.print_exception(context["exception"])
loop = asyncio.get_event_loop()
loop.set_exception_handler(handle_exception)
if __name__ == "__main__":
try:
set_global_exception_handler()
asyncio.create_task(hello())
asyncio.run(app.start()) # must be last, does not return
except KeyboardInterrupt:
pass
finally:
asyncio.run(app.stop())
asyncio.new_event_loop()
| [((7, 6, 7, 14), 'ahttpserver.Server', 'Server', ({}, {}), '()', False, 'from ahttpserver import sendfile, Server\n'), ((46, 8, 46, 24), 'time.localtime', 'time.localtime', ({}, {}), '()', False, 'import time\n'), ((75, 11, 75, 35), 'uasyncio.get_event_loop', 'asyncio.get_event_loop', ({}, {}), '()', True, 'import uasyncio as asyncio\n'), ((17, 10, 17, 40), 'ahttpserver.sendfile', 'sendfile', ({(17, 19, 17, 25): 'writer', (17, 27, 17, 39): '"""index.html"""'}, {}), "(writer, 'index.html')", False, 'from ahttpserver import sendfile, Server\n'), ((36, 10, 36, 41), 'ahttpserver.sendfile', 'sendfile', ({(36, 19, 36, 25): 'writer', (36, 27, 36, 40): '"""favicon.ico"""'}, {}), "(writer, 'favicon.ico')", False, 'from ahttpserver import sendfile, Server\n'), ((73, 8, 73, 49), 'sys.print_exception', 'sys.print_exception', ({(73, 28, 73, 48): "context['exception']"}, {}), "(context['exception'])", False, 'import sys\n'), ((89, 8, 89, 32), 'uasyncio.new_event_loop', 'asyncio.new_event_loop', ({}, {}), '()', True, 'import uasyncio as asyncio\n'), ((66, 14, 66, 31), 'uasyncio.sleep', 'asyncio.sleep', ({(66, 28, 66, 30): '(60)'}, {}), '(60)', True, 'import uasyncio as asyncio\n')] |
vipulSharma18/Deep-Self-Supervised-Audio-Video-Cosegmentation-with-Adaptive-Noise-Cancellation | models/audio_net.py | d52695be31a1552d0785f3b6634bde6ef9276a90 | import torch
import torch.nn as nn
import torch.nn.functional as F
class Unet(nn.Module):
def __init__(self, fc_dim=64, num_downs=5, ngf=64, use_dropout=False):
super(Unet, self).__init__()
# construct unet structure
unet_block = UnetBlock(
ngf * 8, ngf * 8, input_nc=None,
submodule=None, innermost=True)
for i in range(num_downs - 5):
unet_block = UnetBlock(
ngf * 8, ngf * 8, input_nc=None,
submodule=unet_block, use_dropout=use_dropout)
unet_block = UnetBlock(
ngf * 4, ngf * 8, input_nc=None,
submodule=unet_block)
unet_block = UnetBlock(
ngf * 2, ngf * 4, input_nc=None,
submodule=unet_block)
unet_block = UnetBlock(
ngf, ngf * 2, input_nc=None,
submodule=unet_block)
unet_block = UnetBlock(
fc_dim, ngf, input_nc=1,
submodule=unet_block, outermost=True)
self.bn0 = nn.BatchNorm2d(1)
self.unet_block = unet_block
def forward(self, x):
x = self.bn0(x)
x = self.unet_block(x)
return x
# Defines the submodule with skip connection.
# X -------------------identity---------------------- X
# |-- downsampling -- |submodule| -- upsampling --|
class UnetBlock(nn.Module):
def __init__(self, outer_nc, inner_input_nc, input_nc=None,
submodule=None, outermost=False, innermost=False,
use_dropout=False, inner_output_nc=None, noskip=False):
super(UnetBlock, self).__init__()
self.outermost = outermost
self.noskip = noskip
use_bias = False
if input_nc is None:
input_nc = outer_nc
if innermost:
inner_output_nc = inner_input_nc
elif inner_output_nc is None:
inner_output_nc = 2 * inner_input_nc
downrelu = nn.LeakyReLU(0.2, True)
downnorm = nn.BatchNorm2d(inner_input_nc)
uprelu = nn.ReLU(True)
upnorm = nn.BatchNorm2d(outer_nc)
upsample = nn.Upsample(
scale_factor=2, mode='bilinear', align_corners=True)
if outermost:
downconv = nn.Conv2d(
input_nc, inner_input_nc, kernel_size=4,
stride=2, padding=1, bias=use_bias)
upconv = nn.Conv2d(
inner_output_nc, outer_nc, kernel_size=3, padding=1)
down = [downconv]
up = [uprelu, upsample, upconv]
model = down + [submodule] + up
elif innermost:
downconv = nn.Conv2d(
input_nc, inner_input_nc, kernel_size=4,
stride=2, padding=1, bias=use_bias)
upconv = nn.Conv2d(
inner_output_nc, outer_nc, kernel_size=3,
padding=1, bias=use_bias)
down = [downrelu, downconv]
up = [uprelu, upsample, upconv, upnorm]
model = down + up
else:
downconv = nn.Conv2d(
input_nc, inner_input_nc, kernel_size=4,
stride=2, padding=1, bias=use_bias)
upconv = nn.Conv2d(
inner_output_nc, outer_nc, kernel_size=3,
padding=1, bias=use_bias)
down = [downrelu, downconv, downnorm]
up = [uprelu, upsample, upconv, upnorm]
if use_dropout:
model = down + [submodule] + up + [nn.Dropout(0.5)]
else:
model = down + [submodule] + up
self.model = nn.Sequential(*model)
def forward(self, x):
if self.outermost or self.noskip:
return self.model(x)
else:
return torch.cat([x, self.model(x)], 1)
| [((31, 19, 31, 36), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', ({(31, 34, 31, 35): '1'}, {}), '(1)', True, 'import torch.nn as nn\n'), ((58, 19, 58, 42), 'torch.nn.LeakyReLU', 'nn.LeakyReLU', ({(58, 32, 58, 35): '0.2', (58, 37, 58, 41): 'True'}, {}), '(0.2, True)', True, 'import torch.nn as nn\n'), ((59, 19, 59, 49), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', ({(59, 34, 59, 48): 'inner_input_nc'}, {}), '(inner_input_nc)', True, 'import torch.nn as nn\n'), ((60, 17, 60, 30), 'torch.nn.ReLU', 'nn.ReLU', ({(60, 25, 60, 29): 'True'}, {}), '(True)', True, 'import torch.nn as nn\n'), ((61, 17, 61, 41), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', ({(61, 32, 61, 40): 'outer_nc'}, {}), '(outer_nc)', True, 'import torch.nn as nn\n'), ((62, 19, 63, 64), 'torch.nn.Upsample', 'nn.Upsample', (), '', True, 'import torch.nn as nn\n'), ((101, 21, 101, 42), 'torch.nn.Sequential', 'nn.Sequential', ({(101, 35, 101, 41): '*model'}, {}), '(*model)', True, 'import torch.nn as nn\n'), ((66, 23, 68, 51), 'torch.nn.Conv2d', 'nn.Conv2d', (), '', True, 'import torch.nn as nn\n'), ((69, 21, 70, 68), 'torch.nn.Conv2d', 'nn.Conv2d', (), '', True, 'import torch.nn as nn\n'), ((76, 23, 78, 51), 'torch.nn.Conv2d', 'nn.Conv2d', (), '', True, 'import torch.nn as nn\n'), ((79, 21, 81, 41), 'torch.nn.Conv2d', 'nn.Conv2d', (), '', True, 'import torch.nn as nn\n'), ((87, 23, 89, 51), 'torch.nn.Conv2d', 'nn.Conv2d', (), '', True, 'import torch.nn as nn\n'), ((90, 21, 92, 41), 'torch.nn.Conv2d', 'nn.Conv2d', (), '', True, 'import torch.nn as nn\n'), ((97, 51, 97, 66), 'torch.nn.Dropout', 'nn.Dropout', ({(97, 62, 97, 65): '(0.5)'}, {}), '(0.5)', True, 'import torch.nn as nn\n')] |
cnschema/kgtool | tests/test_core.py | 599e23a9e8a856625143b171f9c36eb5b00623f6 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Path hack
import os
import sys
sys.path.insert(0, os.path.abspath('..'))
try:
import unittest2 as unittest
except ImportError:
import unittest
from kgtool.core import * # noqa
class CoreTestCase(unittest.TestCase):
def setUp(self):
pass
def test_file2abspath(self):
tin = "test.json"
tout = file2abspath(tin, __file__)
logging.info(" {} => {}".format(tin, tout))
assert tout.endswith(u"tests/" + tin), tout
tin = "../test.json"
tout = file2abspath(tin)
logging.info(" {} => {}".format(tin, tout))
assert tout.endswith(
u"kgtool/" + os.path.basename(tin)), tout
def test_file2json(self):
filename = "test_core_file.json"
filename = file2abspath(filename, __file__)
ret = file2json(filename)
assert len(ret) == 3
def test_file2iter(self):
filename = "test_core_file.json"
filename = file2abspath(filename, __file__)
str_iter = file2iter(filename)
assert len(list(str_iter)) == 5
def test_json_get(self):
json_data = {"a": {"b": 1}, "c": ["d"], "e": "f"}
assert type(json_get(json_data, ["a"])) == dict
assert json_get(json_data, ["k"]) is None
assert json_get(json_data, ["k"], 10) == 10
assert json_get(json_data, ["a", "b"], 10) == 1
assert json_get(json_data, ["a", "k"], 10) == 10
assert json_get(json_data, ["c", "d"], 10) is None
assert json_get(json_data, ["e", "k"], 10) is None
assert type(json_get(json_data, ["c"])) == list
json_data = {
"father": {"name": "john"},
"birthPlace": "Beijing"
}
assert json_get(json_data, ["father", "name"]) == "john"
assert json_get(json_data, ["father", "image"], default="n/a") == "n/a"
assert json_get(json_data, ["father", "father"]) is None
assert json_get(json_data, ["birthPlace"]) == "Beijing"
assert json_get(
json_data, ["birthPlace", "name"], default="n/a") is None
def test_json_get_list(self):
json_data = {
"name": "john",
"age": None,
"birthPlace": ["Beijing"]
}
assert json_get_list(json_data, "name") == ["john"]
assert json_get_list(json_data, "birthPlace") == ["Beijing"]
assert json_get_list(json_data, "age") == []
def test_json_get_first_item(self):
json_data = {
"name": "john",
"birthPlace": ["Beijing"],
"interests": []
}
assert json_get_first_item(json_data, "name") == "john"
assert json_get_first_item(json_data, "birthPlace") == "Beijing"
assert json_get_first_item(json_data, "birthDate") == ''
assert json_get_first_item(json_data, "interests", defaultValue=None) is None
def test_json_append(self):
json_data = {
"name": "john",
"birthPlace": ["Beijing"],
"interests": []
}
json_append(json_data, "name", "a")
assert json_data["name"] == "john"
json_append(json_data, "birthPlace", "a")
assert json_data["birthPlace"] == ["Beijing","a"]
json_append(json_data, "keywords", "a")
assert json_data["keywords"] == ["a"]
def test_any2utf8(self):
tin = "你好世界"
tout = any2utf8(tin)
logging.info(" {} => {}".format(tin, tout))
tin = u"你好世界"
tout = any2utf8(tin)
logging.info((tin, tout))
tin = "hello world"
tout = any2utf8(tin)
logging.info((tin, tout))
tin = ["hello", "世界"]
tout = any2utf8(tin)
logging.info((tin, tout))
tin = {"hello": u"世界"}
tout = any2utf8(tin)
logging.info((tin, tout))
tin = {"hello": u"世界", "number": 90}
tout = any2utf8(tin)
logging.info((tin, tout))
def test_any2unicode(self):
tin = "你好世界"
tout = any2unicode(tin)
logging.info((tin, tout))
tin = u"你好世界"
tout = any2unicode(tin)
logging.info((tin, tout))
tin = "hello world"
tout = any2unicode(tin)
logging.info((tin, tout))
tin = ["hello", "世界"]
tout = any2unicode(tin)
logging.info((tin, tout))
tin = {"hello": u"世界"}
tout = any2unicode(tin)
logging.info((tin, tout))
def test_any2sha256(self):
tin = "你好世界"
tout = any2sha256(tin)
assert "beca6335b20ff57ccc47403ef4d9e0b8fccb4442b3151c2e7d50050673d43172" == tout, tout
def test_any2sha1(self):
tin = "你好世界"
tout = any2sha1(tin)
assert "dabaa5fe7c47fb21be902480a13013f16a1ab6eb" == tout, tout
tin = u"你好世界"
tout = any2sha1(tin)
assert "dabaa5fe7c47fb21be902480a13013f16a1ab6eb" == tout, tout
tin = "hello world"
tout = any2sha1(tin)
assert "2aae6c35c94fcfb415dbe95f408b9ce91ee846ed" == tout, tout
tin = ["hello", "world"]
tout = any2sha1(tin)
assert "2ed0a51bbdbc4f57378e8c64a1c7a0cd4386cc09" == tout, tout
tin = {"hello": "world"}
tout = any2sha1(tin)
assert "d3b09abe30cfe2edff4ee9e0a141c93bf5b3af87" == tout, tout
def test_json_dict_copy(self):
property_list = [
{ "name":"name", "alternateName": ["name","title"]},
{ "name":"birthDate", "alternateName": ["dob","dateOfBirth"] },
{ "name":"description" }
]
json_object = {"dob":"2010-01-01","title":"John","interests":"data","description":"a person"}
ret = json_dict_copy(json_object, property_list)
assert json_object["title"] == ret["name"]
assert json_object["dob"] == ret["birthDate"]
assert json_object["description"] == ret["description"]
assert ret.get("interests") is None
def test_parse_list_value(self):
ret = parse_list_value(u"原文,正文")
assert len(ret) == 2
if __name__ == '__main__':
unittest.main()
| [((6, 19, 6, 40), 'os.path.abspath', 'os.path.abspath', ({(6, 35, 6, 39): '""".."""'}, {}), "('..')", False, 'import os\n'), ((204, 4, 204, 19), 'unittest.main', 'unittest.main', ({}, {}), '()', False, 'import unittest\n'), ((30, 25, 30, 46), 'os.path.basename', 'os.path.basename', ({(30, 42, 30, 45): 'tin'}, {}), '(tin)', False, 'import os\n')] |
ricardoavelino/compas_ags | ui/Rhino/AGS/dev/AGS_toolbar_display_cmd.py | 1c9e496bc4b72b11adc80ea97288ddc27e92c08e | from __future__ import print_function
from __future__ import absolute_import
from __future__ import division
import scriptcontext as sc
import compas_rhino
from compas_ags.rhino import SettingsForm
from compas_ags.rhino import FormObject
from compas_ags.rhino import ForceObject
__commandname__ = "AGS_toolbar_display"
def RunCommand(is_interactive):
if 'AGS' not in sc.sticky:
compas_rhino.display_message('AGS has not been initialised yet.')
return
scene = sc.sticky['AGS']['scene']
if not scene:
return
# TODO: deal with undo redo
SettingsForm.from_scene(scene, object_types=[FormObject, ForceObject], global_settings=['AGS'])
# ==============================================================================
# Main
# ==============================================================================
if __name__ == '__main__':
RunCommand(True)
| [((26, 4, 26, 99), 'compas_ags.rhino.SettingsForm.from_scene', 'SettingsForm.from_scene', (), '', False, 'from compas_ags.rhino import SettingsForm\n'), ((18, 8, 18, 73), 'compas_rhino.display_message', 'compas_rhino.display_message', ({(18, 37, 18, 72): '"""AGS has not been initialised yet."""'}, {}), "('AGS has not been initialised yet.')", False, 'import compas_rhino\n')] |
VidoniJorge/c-interprete | lpp/evaluator.py | 4f026d093b26289d3f692cd64d52069fdd1d954c | from typing import (
Any,
cast,
List,
Optional,
Type
)
import lpp.ast as ast
from lpp.builtins import BUILTINS
from lpp.object import(
Boolean,
Builtin,
Environment,
Error,
Function,
Integer,
Null,
Object,
ObjectType,
String,
Return
)
TRUE = Boolean(True)
FALSE = Boolean(False)
NULL = Null()
_NOT_A_FUNCTION = 'No es una funcion: {}'
_TYPE_MISMATCH = 'Discrepancia de tipos: {} {} {}'
_UNKNOWN_PREFIX_OPERATOR = 'Operador desconocido: {}{}'
_UNKNOWN_INFIX_OPERATOR = 'Operador desconocido: {} {} {}'
_UNKNOWN_IDENTIFIER = 'Identificador no encontrado: {}'
def evaluate(node:ast.ASTNode, env: Environment) -> Optional[Object]:
node_type: Type = type(node)
if node_type == ast.Program:
node = cast(ast.Program, node)
return _evaluate_program(node, env)
elif node_type == ast.ExpressionStatement:
node = cast(ast.ExpressionStatement, node)
assert node.expression is not None
return evaluate(node.expression, env)
elif node_type == ast.Integer:
node = cast(ast.Integer, node)
assert node.value is not None
return Integer(node.value)
elif node_type == ast.Boolean:
node = cast(ast.Boolean, node)
assert node.value is not None
return _to_boolean_object(node.value)
elif node_type == ast.Prefix:
node = cast(ast.Prefix, node)
assert node.right is not None
right = evaluate(node.right, env)
assert right is not None
return _evaluate_prifix_expression(node.operator, right, node.right.token.line)
elif node_type == ast.Infix:
node = cast(ast.Infix, node)
assert node.left is not None and node.right is not None
left = evaluate(node.left, env)
right = evaluate(node.right, env)
assert right is not None and left is not None
return _evaluate_infix_expression(node.operator, left, right, node.left.token.line)
elif node_type == ast.Block:
node = cast(ast.Block, node)
return _evaluate_block_statement(node, env)
elif node_type == ast.If:
node = cast(ast.If, node)
return _evaluate_if_expression(node, env)
elif node_type == ast.ReturnStatement:
node = cast(ast.ReturnStatement, node)
assert node.return_value is not None
value = evaluate(node.return_value, env)
assert value is not None
return Return(value)
elif node_type == ast.LetStatement:
node = cast(ast.LetStatement, node)
assert node.value is not None
value = evaluate(node.value, env)
assert node.name is not None
env[node.name.value] = value
elif node_type == ast.Identifier:
node = cast(ast.Identifier, node)
return _evaluate_identifier(node, env, node.token.line)
elif node_type == ast.Function:
node = cast(ast.Function, node)
assert node.body is not None
return Function(node.parameters,
node.body,
env)
elif node_type == ast.Call:
node = cast(ast.Call, node)
function = evaluate(node.function, env)
assert function is not None
assert node.arguments is not None
args = _evaluate_expression(node.arguments, env)
assert function is not None
return _apply_function(function, args, node.token.line)
elif node_type == ast.StringLiteral:
node = cast(ast.StringLiteral, node)
return String(node.value)
return None
def _apply_function(fn: Object, args: List[Object],line_evaluated: int) -> Object:
if type(fn) == Function:
fn = cast(Function, fn)
extended_enviroment = _extended_function_enviroment(fn, args)
evaluated = evaluate(fn.body, extended_enviroment)
assert evaluated is not None
return _unwrap_return_value(evaluated)
elif type(fn) == Builtin:
fn = cast(Builtin, fn)
return fn.fn(*args)
else:
return _new_error(_NOT_A_FUNCTION, args, line_evaluated)
def _evaluate_bang_operator_expression(right: Object) -> Object:
if right is TRUE:
return FALSE
elif right is FALSE:
return TRUE
elif right is NULL:
return TRUE
else:
return FALSE
def _evaluate_expression(expressions: List[ast.Expression], env: Environment) -> List[Object]:
result: List[Object] = []
for expression in expressions:
evaluated = evaluate(expression, env)
assert evaluated is not None
result.append(evaluated)
return result
def _extended_function_enviroment(fn: Function, args: List[Object]) -> Environment:
env = Environment(outer=fn.env)
for idx, param in enumerate(fn.parameters):
env[param.value] = args[idx - 1]
return env
def _evaluate_identifier(node: ast.Identifier, env: Environment, line_evaluated:int) -> Object:
try:
return env[node.value]
except KeyError:
return BUILTINS.get(node.value,
_new_error(_UNKNOWN_IDENTIFIER, [node.value], line_evaluated))
def _evaluate_if_expression(if_expression: ast.If, env: Environment) -> Optional[Object]:
assert if_expression.condition is not None
condition = evaluate(if_expression.condition, env)
assert condition is not None
if _is_truthy(condition):
assert if_expression.consequence is not None
return evaluate(if_expression.consequence, env)
elif if_expression.alternative is not None:
return evaluate(if_expression.alternative, env)
else:
return NULL
def _is_truthy(obj: Object) -> bool:
if obj is NULL:
return False
elif obj is TRUE:
return True
elif obj is FALSE:
return False
else:
return True
def _evaluate_block_statement(block: ast.Block, env: Environment) -> Optional[Object]:
result: Optional[Object] = None
for statement in block.statements:
result = evaluate(statement, env)
if result is not None and \
(result.type() == ObjectType.RETURN or result.type() == ObjectType.ERROR):
return result
return result
def _evaluate_infix_expression(operator:str, left:Object, right:Object, line_evaluated:int) -> Object:
if left.type() == ObjectType.INTEGER \
and right.type() == ObjectType.INTEGER:
return _evaluate_integer_infix_expression(operator, left, right, line_evaluated)
if left.type() == ObjectType.STRING \
and right.type() == ObjectType.STRING:
return _evaluate_string_infix_expression(operator, left, right, line_evaluated)
elif operator == '==':
return _to_boolean_object(left is right)
elif operator == '!=':
return _to_boolean_object(left is not right)
elif left.type() != right.type():
return _new_error(_TYPE_MISMATCH, [left.type().name,
operator,
right.type().name
], line_evaluated)
else:
return _new_error(_UNKNOWN_INFIX_OPERATOR,[left.type().name,
operator,
right.type().name
], line_evaluated)
def _evaluate_integer_infix_expression(operator: str, left: Object, right: Object, line_evaluated:int) -> Object:
left_value = int = cast(Integer, left).value
right_value = int = cast(Integer, right).value
if operator == '+':
return Integer(left_value + right_value)
elif operator == '-':
return Integer(left_value - right_value)
elif operator == '*':
return Integer(left_value * right_value)
elif operator == '/':
return Integer(left_value // right_value) #divicio de enteros
elif operator == '<':
return _to_boolean_object(left_value < right_value)
elif operator == '>':
return _to_boolean_object(left_value > right_value)
elif operator == '==':
return _to_boolean_object(left_value == right_value)
elif operator == '!=':
return _to_boolean_object(left_value != right_value)
else:
return _new_error(_UNKNOWN_INFIX_OPERATOR,[left.type().name,
operator,
right.type().name
], line_evaluated)
def _evaluate_string_infix_expression(operator: str, left: Object, right: Object, line_evaluated:int) -> Object:
left_value = str = cast(String, left).value
right_value = str = cast(String, right).value
if operator == '+':
return String(left_value + right_value)
elif operator == '==':
return _to_boolean_object(left_value == right_value)
elif operator == '!=':
return _to_boolean_object(left_value != right_value)
else:
return _new_error(_UNKNOWN_INFIX_OPERATOR,[left.type().name,
operator,
right.type().name
], line_evaluated)
def _evaluate_minus_operator_expression(right: Object, line_evaluated:int) -> Object:
if type(right) != Integer:
return _new_error(_UNKNOWN_PREFIX_OPERATOR, ['-', right.type().name], line_evaluated)
right = cast(Integer, right)
return Integer(-right.value)
def _evaluate_prifix_expression(operator: str, right: Object,line_evaluated:int) -> Object:
if operator == '!':
return _evaluate_bang_operator_expression(right)
elif operator == '-':
return _evaluate_minus_operator_expression(right, line_evaluated)
else:
return _new_error(_UNKNOWN_PREFIX_OPERATOR,[operator, right.type().name],line_evaluated)
def _evaluate_program(program: ast.Program, env) -> Optional[Object]:
result: Optional[Object] = None
for statement in program.statements:
result = evaluate(statement, env)
if type(result) == Return:
result = cast(Return, result)
return result.value
elif type(result) == Error:
return result
return result
def _new_error(message: str, args:List[Any], error_line: int) -> Error:
return Error(message.format(*args), error_line)
def _unwrap_return_value(obj: Object) -> Object:
if type(obj) == Return:
obj = cast(Return, obj)
return obj.value
return obj
def _to_boolean_object(value: bool) -> Boolean:
return TRUE if value else FALSE | [((25, 7, 25, 20), 'lpp.object.Boolean', 'Boolean', ({(25, 15, 25, 19): 'True'}, {}), '(True)', False, 'from lpp.object import Boolean, Builtin, Environment, Error, Function, Integer, Null, Object, ObjectType, String, Return\n'), ((26, 8, 26, 22), 'lpp.object.Boolean', 'Boolean', ({(26, 16, 26, 21): 'False'}, {}), '(False)', False, 'from lpp.object import Boolean, Builtin, Environment, Error, Function, Integer, Null, Object, ObjectType, String, Return\n'), ((27, 7, 27, 13), 'lpp.object.Null', 'Null', ({}, {}), '()', False, 'from lpp.object import Boolean, Builtin, Environment, Error, Function, Integer, Null, Object, ObjectType, String, Return\n'), ((155, 10, 155, 35), 'lpp.object.Environment', 'Environment', (), '', False, 'from lpp.object import Boolean, Builtin, Environment, Error, Function, Integer, Null, Object, ObjectType, String, Return\n'), ((273, 12, 273, 32), 'typing.cast', 'cast', ({(273, 17, 273, 24): 'Integer', (273, 26, 273, 31): 'right'}, {}), '(Integer, right)', False, 'from typing import Any, cast, List, Optional, Type\n'), ((275, 11, 275, 32), 'lpp.object.Integer', 'Integer', ({(275, 19, 275, 31): '(-right.value)'}, {}), '(-right.value)', False, 'from lpp.object import Boolean, Builtin, Environment, Error, Function, Integer, Null, Object, ObjectType, String, Return\n'), ((39, 15, 39, 38), 'typing.cast', 'cast', ({(39, 20, 39, 31): 'ast.Program', (39, 33, 39, 37): 'node'}, {}), '(ast.Program, node)', False, 'from typing import Any, cast, List, Optional, Type\n'), ((121, 13, 121, 31), 'typing.cast', 'cast', ({(121, 18, 121, 26): 'Function', (121, 28, 121, 30): 'fn'}, {}), '(Function, fn)', False, 'from typing import Any, cast, List, Optional, Type\n'), ((227, 23, 227, 42), 'typing.cast', 'cast', ({(227, 28, 227, 35): 'Integer', (227, 37, 227, 41): 'left'}, {}), '(Integer, left)', False, 'from typing import Any, cast, List, Optional, Type\n'), ((228, 24, 228, 44), 'typing.cast', 'cast', ({(228, 29, 228, 36): 'Integer', (228, 38, 228, 43): 'right'}, {}), '(Integer, right)', False, 'from typing import Any, cast, List, Optional, Type\n'), ((231, 15, 231, 48), 'lpp.object.Integer', 'Integer', ({(231, 23, 231, 47): '(left_value + right_value)'}, {}), '(left_value + right_value)', False, 'from lpp.object import Boolean, Builtin, Environment, Error, Function, Integer, Null, Object, ObjectType, String, Return\n'), ((253, 23, 253, 41), 'typing.cast', 'cast', ({(253, 28, 253, 34): 'String', (253, 36, 253, 40): 'left'}, {}), '(String, left)', False, 'from typing import Any, cast, List, Optional, Type\n'), ((254, 24, 254, 43), 'typing.cast', 'cast', ({(254, 29, 254, 35): 'String', (254, 37, 254, 42): 'right'}, {}), '(String, right)', False, 'from typing import Any, cast, List, Optional, Type\n'), ((257, 15, 257, 47), 'lpp.object.String', 'String', ({(257, 22, 257, 46): '(left_value + right_value)'}, {}), '(left_value + right_value)', False, 'from lpp.object import Boolean, Builtin, Environment, Error, Function, Integer, Null, Object, ObjectType, String, Return\n'), ((304, 14, 304, 31), 'typing.cast', 'cast', ({(304, 19, 304, 25): 'Return', (304, 27, 304, 30): 'obj'}, {}), '(Return, obj)', False, 'from typing import Any, cast, List, Optional, Type\n'), ((42, 15, 42, 50), 'typing.cast', 'cast', ({(42, 20, 42, 43): 'ast.ExpressionStatement', (42, 45, 42, 49): 'node'}, {}), '(ast.ExpressionStatement, node)', False, 'from typing import Any, cast, List, Optional, Type\n'), ((129, 13, 129, 30), 'typing.cast', 'cast', ({(129, 18, 129, 25): 'Builtin', (129, 27, 129, 29): 'fn'}, {}), '(Builtin, fn)', False, 'from typing import Any, cast, List, Optional, Type\n'), ((233, 15, 233, 48), 'lpp.object.Integer', 'Integer', ({(233, 23, 233, 47): '(left_value - right_value)'}, {}), '(left_value - right_value)', False, 'from lpp.object import Boolean, Builtin, Environment, Error, Function, Integer, Null, Object, ObjectType, String, Return\n'), ((292, 21, 292, 41), 'typing.cast', 'cast', ({(292, 26, 292, 32): 'Return', (292, 34, 292, 40): 'result'}, {}), '(Return, result)', False, 'from typing import Any, cast, List, Optional, Type\n'), ((47, 15, 47, 38), 'typing.cast', 'cast', ({(47, 20, 47, 31): 'ast.Integer', (47, 33, 47, 37): 'node'}, {}), '(ast.Integer, node)', False, 'from typing import Any, cast, List, Optional, Type\n'), ((49, 15, 49, 34), 'lpp.object.Integer', 'Integer', ({(49, 23, 49, 33): 'node.value'}, {}), '(node.value)', False, 'from lpp.object import Boolean, Builtin, Environment, Error, Function, Integer, Null, Object, ObjectType, String, Return\n'), ((235, 15, 235, 48), 'lpp.object.Integer', 'Integer', ({(235, 23, 235, 47): '(left_value * right_value)'}, {}), '(left_value * right_value)', False, 'from lpp.object import Boolean, Builtin, Environment, Error, Function, Integer, Null, Object, ObjectType, String, Return\n'), ((51, 15, 51, 38), 'typing.cast', 'cast', ({(51, 20, 51, 31): 'ast.Boolean', (51, 33, 51, 37): 'node'}, {}), '(ast.Boolean, node)', False, 'from typing import Any, cast, List, Optional, Type\n'), ((237, 15, 237, 49), 'lpp.object.Integer', 'Integer', ({(237, 23, 237, 48): '(left_value // right_value)'}, {}), '(left_value // right_value)', False, 'from lpp.object import Boolean, Builtin, Environment, Error, Function, Integer, Null, Object, ObjectType, String, Return\n'), ((56, 15, 56, 37), 'typing.cast', 'cast', ({(56, 20, 56, 30): 'ast.Prefix', (56, 32, 56, 36): 'node'}, {}), '(ast.Prefix, node)', False, 'from typing import Any, cast, List, Optional, Type\n'), ((63, 15, 63, 36), 'typing.cast', 'cast', ({(63, 20, 63, 29): 'ast.Infix', (63, 31, 63, 35): 'node'}, {}), '(ast.Infix, node)', False, 'from typing import Any, cast, List, Optional, Type\n'), ((71, 15, 71, 36), 'typing.cast', 'cast', ({(71, 20, 71, 29): 'ast.Block', (71, 31, 71, 35): 'node'}, {}), '(ast.Block, node)', False, 'from typing import Any, cast, List, Optional, Type\n'), ((75, 15, 75, 33), 'typing.cast', 'cast', ({(75, 20, 75, 26): 'ast.If', (75, 28, 75, 32): 'node'}, {}), '(ast.If, node)', False, 'from typing import Any, cast, List, Optional, Type\n'), ((79, 15, 79, 46), 'typing.cast', 'cast', ({(79, 20, 79, 39): 'ast.ReturnStatement', (79, 41, 79, 45): 'node'}, {}), '(ast.ReturnStatement, node)', False, 'from typing import Any, cast, List, Optional, Type\n'), ((83, 15, 83, 28), 'lpp.object.Return', 'Return', ({(83, 22, 83, 27): 'value'}, {}), '(value)', False, 'from lpp.object import Boolean, Builtin, Environment, Error, Function, Integer, Null, Object, ObjectType, String, Return\n'), ((86, 15, 86, 43), 'typing.cast', 'cast', ({(86, 20, 86, 36): 'ast.LetStatement', (86, 38, 86, 42): 'node'}, {}), '(ast.LetStatement, node)', False, 'from typing import Any, cast, List, Optional, Type\n'), ((93, 15, 93, 41), 'typing.cast', 'cast', ({(93, 20, 93, 34): 'ast.Identifier', (93, 36, 93, 40): 'node'}, {}), '(ast.Identifier, node)', False, 'from typing import Any, cast, List, Optional, Type\n'), ((97, 15, 97, 39), 'typing.cast', 'cast', ({(97, 20, 97, 32): 'ast.Function', (97, 34, 97, 38): 'node'}, {}), '(ast.Function, node)', False, 'from typing import Any, cast, List, Optional, Type\n'), ((99, 15, 101, 28), 'lpp.object.Function', 'Function', ({(99, 24, 99, 39): 'node.parameters', (100, 24, 100, 33): 'node.body', (101, 24, 101, 27): 'env'}, {}), '(node.parameters, node.body, env)', False, 'from lpp.object import Boolean, Builtin, Environment, Error, Function, Integer, Null, Object, ObjectType, String, Return\n'), ((104, 15, 104, 35), 'typing.cast', 'cast', ({(104, 20, 104, 28): 'ast.Call', (104, 30, 104, 34): 'node'}, {}), '(ast.Call, node)', False, 'from typing import Any, cast, List, Optional, Type\n'), ((114, 15, 114, 44), 'typing.cast', 'cast', ({(114, 20, 114, 37): 'ast.StringLiteral', (114, 39, 114, 43): 'node'}, {}), '(ast.StringLiteral, node)', False, 'from typing import Any, cast, List, Optional, Type\n'), ((115, 15, 115, 33), 'lpp.object.String', 'String', ({(115, 22, 115, 32): 'node.value'}, {}), '(node.value)', False, 'from lpp.object import Boolean, Builtin, Environment, Error, Function, Integer, Null, Object, ObjectType, String, Return\n')] |
QPC-database/aws-parallelcluster | cli/tests/pcluster/config/test_validators.py | 8c2e9595ca171340df21695c27d85dc00f19d3e4 | # Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance
# with the License. A copy of the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "LICENSE.txt" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES
# OR CONDITIONS OF ANY KIND, express or implied. See the License for the specific language governing permissions and
# limitations under the License.
import datetime
import json
import os
import re
import configparser
import pytest
from assertpy import assert_that
import tests.pcluster.config.utils as utils
from pcluster.config.cfn_param_types import CfnParam, CfnSection
from pcluster.config.mappings import ALLOWED_VALUES, FSX
from pcluster.config.validators import (
DCV_MESSAGES,
EBS_VOLUME_TYPE_TO_VOLUME_SIZE_BOUNDS,
FSX_MESSAGES,
FSX_SUPPORTED_ARCHITECTURES_OSES,
LOGFILE_LOGGER,
architecture_os_validator,
check_usage_class,
cluster_type_validator,
compute_resource_validator,
disable_hyperthreading_architecture_validator,
efa_gdr_validator,
efa_os_arch_validator,
fsx_ignored_parameters_validator,
instances_architecture_compatibility_validator,
intel_hpc_architecture_validator,
queue_compute_type_validator,
queue_validator,
region_validator,
s3_bucket_region_validator,
settings_validator,
)
from pcluster.constants import FSX_HDD_THROUGHPUT, FSX_SSD_THROUGHPUT
from tests.common import MockedBoto3Request
from tests.pcluster.config.defaults import DefaultDict
@pytest.fixture()
def boto3_stubber_path():
return "pcluster.config.validators.boto3"
@pytest.mark.parametrize(
"section_dict, expected_message, expected_warning",
[
# traditional scheduler
({"scheduler": "sge", "initial_queue_size": 1, "max_queue_size": 2, "maintain_initial_size": True}, None, None),
(
{"scheduler": "sge", "initial_queue_size": 3, "max_queue_size": 2, "maintain_initial_size": True},
"initial_queue_size must be fewer than or equal to max_queue_size",
None,
),
(
{"scheduler": "sge", "initial_queue_size": 3, "max_queue_size": 2, "maintain_initial_size": False},
"initial_queue_size must be fewer than or equal to max_queue_size",
None,
),
# awsbatch
({"scheduler": "awsbatch", "min_vcpus": 1, "desired_vcpus": 2, "max_vcpus": 3}, None, None),
(
{"scheduler": "awsbatch", "min_vcpus": 3, "desired_vcpus": 2, "max_vcpus": 3},
"desired_vcpus must be greater than or equal to min_vcpus",
None,
),
(
{"scheduler": "awsbatch", "min_vcpus": 1, "desired_vcpus": 4, "max_vcpus": 3},
"desired_vcpus must be fewer than or equal to max_vcpus",
None,
),
(
{"scheduler": "awsbatch", "min_vcpus": 4, "desired_vcpus": 4, "max_vcpus": 3},
"max_vcpus must be greater than or equal to min_vcpus",
None,
),
# key pair not provided
({"scheduler": "awsbatch"}, None, "If you do not specify a key pair"),
],
)
def test_cluster_validator(mocker, capsys, section_dict, expected_message, expected_warning):
config_parser_dict = {"cluster default": section_dict}
utils.assert_param_validator(
mocker, config_parser_dict, expected_message, capsys, expected_warning=expected_warning
)
@pytest.mark.parametrize(
"instance_type, expected_message", [("t2.micro", None), ("c4.xlarge", None), ("c5.xlarge", "is not supported")]
)
def test_ec2_instance_type_validator(mocker, instance_type, expected_message):
config_parser_dict = {"cluster default": {"compute_instance_type": instance_type}}
utils.assert_param_validator(mocker, config_parser_dict, expected_message)
@pytest.mark.parametrize("instance_type, expected_message", [("t2.micro", None), ("c4.xlarge", None)])
def test_head_node_instance_type_validator(mocker, instance_type, expected_message):
config_parser_dict = {"cluster default": {"master_instance_type": instance_type}}
utils.assert_param_validator(mocker, config_parser_dict, expected_message)
@pytest.mark.parametrize(
"scheduler, instance_type, expected_message, expected_warnings",
[
("sge", "t2.micro", None, None),
("sge", "c4.xlarge", None, None),
("sge", "c5.xlarge", "is not supported", None),
# NOTE: compute_instance_type_validator calls ec2_instance_type_validator only if the scheduler is not awsbatch
("awsbatch", "t2.micro", None, None),
("awsbatch", "c4.xlarge", "is not supported", None),
("awsbatch", "t2", None, None), # t2 family
("awsbatch", "optimal", None, None),
("sge", "p4d.24xlarge", None, "has 4 Network Interfaces."),
("slurm", "p4d.24xlarge", None, None),
],
)
def test_compute_instance_type_validator(mocker, scheduler, instance_type, expected_message, expected_warnings):
config_parser_dict = {"cluster default": {"scheduler": scheduler, "compute_instance_type": instance_type}}
extra_patches = {
"pcluster.config.validators.InstanceTypeInfo.max_network_interface_count": 4
if instance_type == "p4d.24xlarge"
else 1,
}
utils.assert_param_validator(
mocker, config_parser_dict, expected_message, expected_warnings, extra_patches=extra_patches
)
def test_ec2_key_pair_validator(mocker, boto3_stubber):
describe_key_pairs_response = {
"KeyPairs": [
{"KeyFingerprint": "12:bf:7c:56:6c:dd:4f:8c:24:45:75:f1:1b:16:54:89:82:09:a4:26", "KeyName": "key1"}
]
}
mocked_requests = [
MockedBoto3Request(
method="describe_key_pairs", response=describe_key_pairs_response, expected_params={"KeyNames": ["key1"]}
)
]
boto3_stubber("ec2", mocked_requests)
# TODO test with invalid key
config_parser_dict = {"cluster default": {"key_name": "key1"}}
utils.assert_param_validator(mocker, config_parser_dict)
@pytest.mark.parametrize(
"image_architecture, bad_ami_message, bad_architecture_message",
[
("x86_64", None, None),
(
"arm64",
None,
"incompatible with the architecture supported by the instance type chosen for the head node",
),
(
"arm64",
"Unable to get information for AMI",
"incompatible with the architecture supported by the instance type chosen for the head node",
),
],
)
def test_ec2_ami_validator(mocker, boto3_stubber, image_architecture, bad_ami_message, bad_architecture_message):
describe_images_response = {
"Images": [
{
"VirtualizationType": "paravirtual",
"Name": "My server",
"Hypervisor": "xen",
"ImageId": "ami-12345678",
"RootDeviceType": "ebs",
"State": "available",
"BlockDeviceMappings": [
{
"DeviceName": "/dev/sda1",
"Ebs": {
"DeleteOnTermination": True,
"SnapshotId": "snap-1234567890abcdef0",
"VolumeSize": 8,
"VolumeType": "standard",
},
}
],
"Architecture": image_architecture,
"ImageLocation": "123456789012/My server",
"KernelId": "aki-88aa75e1",
"OwnerId": "123456789012",
"RootDeviceName": "/dev/sda1",
"Public": False,
"ImageType": "machine",
"Description": "An AMI for my server",
}
]
}
mocked_requests = [
MockedBoto3Request(
method="describe_images",
response=describe_images_response,
expected_params={"ImageIds": ["ami-12345678"]},
generate_error=bad_ami_message,
)
]
boto3_stubber("ec2", mocked_requests)
# TODO test with invalid key
config_parser_dict = {"cluster default": {"custom_ami": "ami-12345678"}}
expected_message = bad_ami_message or bad_architecture_message
utils.assert_param_validator(mocker, config_parser_dict, expected_message)
@pytest.mark.parametrize(
"section_dict, expected_message",
[
({"tags": {"key": "value", "key2": "value2"}}, None),
(
{"tags": {"key": "value", "Version": "value2"}},
r"Version.*reserved",
),
],
)
def test_tags_validator(mocker, capsys, section_dict, expected_message):
config_parser_dict = {"cluster default": section_dict}
utils.assert_param_validator(mocker, config_parser_dict, expected_error=expected_message)
def test_ec2_volume_validator(mocker, boto3_stubber):
describe_volumes_response = {
"Volumes": [
{
"AvailabilityZone": "us-east-1a",
"Attachments": [
{
"AttachTime": "2013-12-18T22:35:00.000Z",
"InstanceId": "i-1234567890abcdef0",
"VolumeId": "vol-12345678",
"State": "attached",
"DeleteOnTermination": True,
"Device": "/dev/sda1",
}
],
"Encrypted": False,
"VolumeType": "gp2",
"VolumeId": "vol-049df61146c4d7901",
"State": "available", # TODO add test with "in-use"
"SnapshotId": "snap-1234567890abcdef0",
"CreateTime": "2013-12-18T22:35:00.084Z",
"Size": 8,
}
]
}
mocked_requests = [
MockedBoto3Request(
method="describe_volumes",
response=describe_volumes_response,
expected_params={"VolumeIds": ["vol-12345678"]},
)
]
boto3_stubber("ec2", mocked_requests)
# TODO test with invalid key
config_parser_dict = {
"cluster default": {"ebs_settings": "default"},
"ebs default": {"shared_dir": "test", "ebs_volume_id": "vol-12345678"},
}
utils.assert_param_validator(mocker, config_parser_dict)
@pytest.mark.parametrize(
"region, base_os, scheduler, expected_message",
[
# verify awsbatch supported regions
(
"ap-northeast-3",
"alinux2",
"awsbatch",
"Region 'ap-northeast-3' is not yet officially supported by ParallelCluster",
),
("us-gov-east-1", "alinux2", "awsbatch", None),
("us-gov-west-1", "alinux2", "awsbatch", None),
("eu-west-1", "alinux2", "awsbatch", None),
("us-east-1", "alinux2", "awsbatch", None),
("eu-north-1", "alinux2", "awsbatch", None),
("cn-north-1", "alinux2", "awsbatch", None),
("cn-northwest-1", "alinux2", "awsbatch", None),
# verify traditional schedulers are supported in all the regions but ap-northeast-3
("cn-northwest-1", "alinux2", "sge", None),
("us-gov-east-1", "alinux2", "sge", None),
("cn-northwest-1", "alinux2", "slurm", None),
("us-gov-east-1", "alinux2", "slurm", None),
("cn-northwest-1", "alinux2", "torque", None),
("us-gov-east-1", "alinux2", "torque", None),
(
"ap-northeast-3",
"alinux2",
"sge",
"Region 'ap-northeast-3' is not yet officially supported by ParallelCluster",
),
# verify awsbatch supported OSes
("eu-west-1", "centos7", "awsbatch", "scheduler supports the following Operating Systems"),
("eu-west-1", "centos8", "awsbatch", "scheduler supports the following Operating Systems"),
("eu-west-1", "ubuntu1804", "awsbatch", "scheduler supports the following Operating Systems"),
("eu-west-1", "alinux2", "awsbatch", None),
# verify sge supports all the OSes
("eu-west-1", "centos7", "sge", None),
("eu-west-1", "centos8", "sge", None),
("eu-west-1", "ubuntu1804", "sge", None),
("eu-west-1", "alinux2", "sge", None),
# verify slurm supports all the OSes
("eu-west-1", "centos7", "slurm", None),
("eu-west-1", "centos8", "slurm", None),
("eu-west-1", "ubuntu1804", "slurm", None),
("eu-west-1", "alinux2", "slurm", None),
# verify torque supports all the OSes
("eu-west-1", "centos7", "torque", None),
("eu-west-1", "centos8", "torque", None),
("eu-west-1", "ubuntu1804", "torque", None),
("eu-west-1", "alinux2", "torque", None),
],
)
def test_scheduler_validator(mocker, capsys, region, base_os, scheduler, expected_message):
# we need to set the region in the environment because it takes precedence respect of the config file
os.environ["AWS_DEFAULT_REGION"] = region
config_parser_dict = {"cluster default": {"base_os": base_os, "scheduler": scheduler}}
# Deprecation warning should be printed for sge and torque
expected_warning = None
wiki_url = "https://github.com/aws/aws-parallelcluster/wiki/Deprecation-of-SGE-and-Torque-in-ParallelCluster"
if scheduler in ["sge", "torque"]:
expected_warning = ".{0}. is scheduled to be deprecated.*{1}".format(scheduler, wiki_url)
utils.assert_param_validator(mocker, config_parser_dict, expected_message, capsys, expected_warning)
def test_placement_group_validator(mocker, boto3_stubber):
describe_placement_groups_response = {
"PlacementGroups": [{"GroupName": "my-cluster", "State": "available", "Strategy": "cluster"}]
}
mocked_requests = [
MockedBoto3Request(
method="describe_placement_groups",
response=describe_placement_groups_response,
expected_params={"GroupNames": ["my-cluster"]},
)
]
boto3_stubber("ec2", mocked_requests)
# TODO test with invalid group name
config_parser_dict = {"cluster default": {"placement_group": "my-cluster"}}
utils.assert_param_validator(mocker, config_parser_dict)
def test_url_validator(mocker, boto3_stubber, capsys):
head_object_response = {
"AcceptRanges": "bytes",
"ContentType": "text/html",
"LastModified": "Thu, 16 Apr 2015 18:19:14 GMT",
"ContentLength": 77,
"VersionId": "null",
"ETag": '"30a6ec7e1a9ad79c203d05a589c8b400"',
"Metadata": {},
}
mocked_requests = [
MockedBoto3Request(
method="head_object", response=head_object_response, expected_params={"Bucket": "test", "Key": "test.json"}
)
]
boto3_stubber("s3", mocked_requests)
mocker.patch("pcluster.config.validators.urllib.request.urlopen")
tests = [("s3://test/test.json", None), ("http://test/test.json", None)]
for template_url, expected_message in tests:
config_parser_dict = {"cluster default": {"template_url": template_url}}
utils.assert_param_validator(mocker, config_parser_dict, expected_message)
# Test S3 URI in custom_chef_cookbook.
tests = [
(
"s3://test/cookbook.tgz",
None,
MockedBoto3Request(
method="head_object",
response=head_object_response,
expected_params={"Bucket": "test", "Key": "cookbook.tgz"},
),
),
(
"s3://failure/cookbook.tgz",
(
"WARNING: The configuration parameter 'custom_chef_cookbook' generated the following warnings:\n"
"The S3 object does not exist or you do not have access to it.\n"
"Please make sure the cluster nodes have access to it."
),
MockedBoto3Request(
method="head_object",
response=head_object_response,
expected_params={"Bucket": "failure", "Key": "cookbook.tgz"},
generate_error=True,
error_code=404,
),
),
]
for custom_chef_cookbook_url, expected_message, mocked_request in tests:
boto3_stubber("s3", mocked_request)
mocker.patch("pcluster.config.validators.urllib.request.urlopen")
config_parser_dict = {
"cluster default": {
"scheduler": "slurm",
"s3_read_resource": "arn:aws:s3:::test*",
"custom_chef_cookbook": custom_chef_cookbook_url,
}
}
utils.assert_param_validator(mocker, config_parser_dict, capsys=capsys, expected_warning=expected_message)
@pytest.mark.parametrize(
"config, num_calls, error_code, bucket, expected_message",
[
(
{
"cluster default": {"fsx_settings": "fsx"},
"fsx fsx": {
"storage_capacity": 1200,
"import_path": "s3://test/test1/test2",
"export_path": "s3://test/test1/test2",
"auto_import_policy": "NEW",
},
},
2,
None,
{"Bucket": "test"},
"AutoImport is not supported for cross-region buckets.",
),
(
{
"cluster default": {"fsx_settings": "fsx"},
"fsx fsx": {
"storage_capacity": 1200,
"import_path": "s3://test/test1/test2",
"export_path": "s3://test/test1/test2",
"auto_import_policy": "NEW",
},
},
2,
"NoSuchBucket",
{"Bucket": "test"},
"The S3 bucket 'test' does not appear to exist.",
),
(
{
"cluster default": {"fsx_settings": "fsx"},
"fsx fsx": {
"storage_capacity": 1200,
"import_path": "s3://test/test1/test2",
"export_path": "s3://test/test1/test2",
"auto_import_policy": "NEW",
},
},
2,
"AccessDenied",
{"Bucket": "test"},
"You do not have access to the S3 bucket",
),
],
)
def test_auto_import_policy_validator(mocker, boto3_stubber, config, num_calls, error_code, bucket, expected_message):
os.environ["AWS_DEFAULT_REGION"] = "eu-west-1"
head_bucket_response = {
"ResponseMetadata": {
"AcceptRanges": "bytes",
"ContentType": "text/html",
"LastModified": "Thu, 16 Apr 2015 18:19:14 GMT",
"ContentLength": 77,
"VersionId": "null",
"ETag": '"30a6ec7e1a9ad79c203d05a589c8b400"',
"Metadata": {},
}
}
get_bucket_location_response = {
"ResponseMetadata": {
"LocationConstraint": "af-south1",
}
}
mocked_requests = []
for _ in range(num_calls):
mocked_requests.append(
MockedBoto3Request(method="head_bucket", response=head_bucket_response, expected_params=bucket)
)
if error_code is None:
mocked_requests.append(
MockedBoto3Request(
method="get_bucket_location", response=get_bucket_location_response, expected_params=bucket
)
)
else:
mocked_requests.append(
MockedBoto3Request(
method="get_bucket_location",
response=get_bucket_location_response,
expected_params=bucket,
generate_error=error_code is not None,
error_code=error_code,
)
)
boto3_stubber("s3", mocked_requests)
utils.assert_param_validator(mocker, config, expected_message)
@pytest.mark.parametrize(
"config, num_calls, bucket, expected_message",
[
(
{
"cluster default": {"fsx_settings": "fsx"},
"fsx fsx": {
"storage_capacity": 1200,
"import_path": "s3://test/test1/test2",
"export_path": "s3://test/test1/test2",
},
},
2,
{"Bucket": "test"},
None,
),
(
{
"cluster default": {"fsx_settings": "fsx"},
"fsx fsx": {
"storage_capacity": 1200,
"import_path": "http://test/test.json",
"export_path": "s3://test/test1/test2",
},
},
1,
{"Bucket": "test"},
"The value 'http://test/test.json' used for the parameter 'import_path' is not a valid S3 URI.",
),
],
)
def test_s3_validator(mocker, boto3_stubber, config, num_calls, bucket, expected_message):
if bucket:
_head_bucket_stubber(mocker, boto3_stubber, bucket, num_calls)
utils.assert_param_validator(mocker, config, expected_message)
@pytest.mark.parametrize(
"bucket, region, error_code, expected_message, client_error",
[
(
"bucket",
"us-east-1",
None,
None,
False,
),
(
"bucket",
"us-west-1",
None,
None,
False,
),
(
"bucket",
"eu-west-1",
None,
"cluster_resource_bucket must be in the same region of the cluster.",
False,
),
(
"not_existed_bucket",
"af-south-1",
"NoSuchBucket",
"The S3 bucket 'not_existed_bucket' does not appear to exist",
True,
),
(
"access_denied_bucket",
"af-south-1",
"AccessDenied",
"You do not have access to the S3 bucket 'access_denied_bucket'",
True,
),
(
"unexpected_error_bucket",
"af-south-1",
None,
"Unexpected error for S3 bucket",
True,
),
],
)
def test_s3_bucket_region_validator(mocker, boto3_stubber, error_code, bucket, region, client_error, expected_message):
os.environ["AWS_DEFAULT_REGION"] = "us-west-1" if region == "us-west-1" else "us-east-1"
if region == "us-east-1":
# The actual response when region is us-east-1 is
# {'ResponseMetadata': {...}, 'LocationConstraint': None}
# But botocore doesn't support mock None response. we mock the return as following
get_bucket_location_response = {
"ResponseMetadata": {},
}
else:
get_bucket_location_response = {
"ResponseMetadata": {},
"LocationConstraint": region,
}
mocked_requests = []
if error_code is None:
mocked_requests.append(
MockedBoto3Request(
method="get_bucket_location",
response=get_bucket_location_response,
expected_params={"Bucket": bucket},
generate_error=client_error is True,
)
)
else:
mocked_requests.append(
MockedBoto3Request(
method="get_bucket_location",
response=get_bucket_location_response,
expected_params={"Bucket": bucket},
generate_error=error_code is not None,
error_code=error_code,
)
)
boto3_stubber("s3", mocked_requests)
config = {
"cluster default": {"cluster_resource_bucket": bucket},
}
config_parser = configparser.ConfigParser()
config_parser.read_dict(config)
pcluster_config = utils.init_pcluster_config_from_configparser(config_parser, False, auto_refresh=False)
errors, warnings = s3_bucket_region_validator("cluster_resource_bucket", bucket, pcluster_config)
if expected_message:
assert_that(errors[0]).contains(expected_message)
else:
assert_that(errors).is_empty()
def test_ec2_vpc_id_validator(mocker, boto3_stubber):
mocked_requests = []
# mock describe_vpc boto3 call
describe_vpc_response = {
"Vpcs": [
{
"VpcId": "vpc-12345678",
"InstanceTenancy": "default",
"Tags": [{"Value": "Default VPC", "Key": "Name"}],
"State": "available",
"DhcpOptionsId": "dopt-4ef69c2a",
"CidrBlock": "172.31.0.0/16",
"IsDefault": True,
}
]
}
mocked_requests.append(
MockedBoto3Request(
method="describe_vpcs", response=describe_vpc_response, expected_params={"VpcIds": ["vpc-12345678"]}
)
)
# mock describe_vpc_attribute boto3 call
describe_vpc_attribute_response = {
"VpcId": "vpc-12345678",
"EnableDnsSupport": {"Value": True},
"EnableDnsHostnames": {"Value": True},
}
mocked_requests.append(
MockedBoto3Request(
method="describe_vpc_attribute",
response=describe_vpc_attribute_response,
expected_params={"VpcId": "vpc-12345678", "Attribute": "enableDnsSupport"},
)
)
mocked_requests.append(
MockedBoto3Request(
method="describe_vpc_attribute",
response=describe_vpc_attribute_response,
expected_params={"VpcId": "vpc-12345678", "Attribute": "enableDnsHostnames"},
)
)
boto3_stubber("ec2", mocked_requests)
# TODO mock and test invalid vpc-id
for vpc_id, expected_message in [("vpc-12345678", None)]:
config_parser_dict = {"cluster default": {"vpc_settings": "default"}, "vpc default": {"vpc_id": vpc_id}}
utils.assert_param_validator(mocker, config_parser_dict, expected_message)
def test_ec2_subnet_id_validator(mocker, boto3_stubber):
describe_subnets_response = {
"Subnets": [
{
"AvailabilityZone": "us-east-2c",
"AvailabilityZoneId": "use2-az3",
"AvailableIpAddressCount": 248,
"CidrBlock": "10.0.1.0/24",
"DefaultForAz": False,
"MapPublicIpOnLaunch": False,
"State": "available",
"SubnetId": "subnet-12345678",
"VpcId": "vpc-06e4ab6c6cEXAMPLE",
"OwnerId": "111122223333",
"AssignIpv6AddressOnCreation": False,
"Ipv6CidrBlockAssociationSet": [],
"Tags": [{"Key": "Name", "Value": "MySubnet"}],
"SubnetArn": "arn:aws:ec2:us-east-2:111122223333:subnet/subnet-12345678",
}
]
}
mocked_requests = [
MockedBoto3Request(
method="describe_subnets",
response=describe_subnets_response,
expected_params={"SubnetIds": ["subnet-12345678"]},
)
]
boto3_stubber("ec2", mocked_requests)
# TODO test with invalid key
config_parser_dict = {
"cluster default": {"vpc_settings": "default"},
"vpc default": {"master_subnet_id": "subnet-12345678"},
}
utils.assert_param_validator(mocker, config_parser_dict)
def test_ec2_security_group_validator(mocker, boto3_stubber):
describe_security_groups_response = {
"SecurityGroups": [
{
"IpPermissionsEgress": [],
"Description": "My security group",
"IpPermissions": [
{
"PrefixListIds": [],
"FromPort": 22,
"IpRanges": [{"CidrIp": "203.0.113.0/24"}],
"ToPort": 22,
"IpProtocol": "tcp",
"UserIdGroupPairs": [],
}
],
"GroupName": "MySecurityGroup",
"OwnerId": "123456789012",
"GroupId": "sg-12345678",
}
]
}
mocked_requests = [
MockedBoto3Request(
method="describe_security_groups",
response=describe_security_groups_response,
expected_params={"GroupIds": ["sg-12345678"]},
)
]
boto3_stubber("ec2", mocked_requests)
# TODO test with invalid key
config_parser_dict = {
"cluster default": {"vpc_settings": "default"},
"vpc default": {"vpc_security_group_id": "sg-12345678"},
}
utils.assert_param_validator(mocker, config_parser_dict)
@pytest.mark.parametrize(
"section_dict, expected_message",
[
(
{"throughput_mode": "bursting", "provisioned_throughput": 1024},
"When specifying 'provisioned_throughput', the 'throughput_mode' must be set to 'provisioned'",
),
({"throughput_mode": "provisioned", "provisioned_throughput": 1024}, None),
({"shared_dir": "NONE"}, "NONE cannot be used as a shared directory"),
({"shared_dir": "/NONE"}, "/NONE cannot be used as a shared directory"),
({"shared_dir": "/efs"}, None),
],
)
def test_efs_validator(mocker, section_dict, expected_message):
config_parser_dict = {"cluster default": {"efs_settings": "default"}, "efs default": section_dict}
utils.assert_param_validator(mocker, config_parser_dict, expected_message)
@pytest.mark.parametrize(
"section_dict, expected_message",
[
({"volume_type": "io1", "volume_size": 20, "volume_iops": 120}, None),
(
{"volume_type": "io1", "volume_size": 20, "volume_iops": 90},
"IOPS rate must be between 100 and 64000 when provisioning io1 volumes.",
),
(
{"volume_type": "io1", "volume_size": 20, "volume_iops": 64001},
"IOPS rate must be between 100 and 64000 when provisioning io1 volumes.",
),
({"volume_type": "io1", "volume_size": 20, "volume_iops": 1001}, "IOPS to volume size ratio of .* is too high"),
({"volume_type": "io2", "volume_size": 20, "volume_iops": 120}, None),
(
{"volume_type": "io2", "volume_size": 20, "volume_iops": 90},
"IOPS rate must be between 100 and 256000 when provisioning io2 volumes.",
),
(
{"volume_type": "io2", "volume_size": 20, "volume_iops": 256001},
"IOPS rate must be between 100 and 256000 when provisioning io2 volumes.",
),
(
{"volume_type": "io2", "volume_size": 20, "volume_iops": 20001},
"IOPS to volume size ratio of .* is too high",
),
({"volume_type": "gp3", "volume_size": 20, "volume_iops": 3000}, None),
(
{"volume_type": "gp3", "volume_size": 20, "volume_iops": 2900},
"IOPS rate must be between 3000 and 16000 when provisioning gp3 volumes.",
),
(
{"volume_type": "gp3", "volume_size": 20, "volume_iops": 16001},
"IOPS rate must be between 3000 and 16000 when provisioning gp3 volumes.",
),
(
{"volume_type": "gp3", "volume_size": 20, "volume_iops": 10001},
"IOPS to volume size ratio of .* is too high",
),
],
)
def test_raid_validators(mocker, section_dict, expected_message):
config_parser_dict = {"cluster default": {"raid_settings": "default"}, "raid default": section_dict}
utils.assert_param_validator(mocker, config_parser_dict, expected_message)
@pytest.mark.parametrize(
"kms_key_id, expected_message",
[
("9e8a129be-0e46-459d-865b-3a5bf974a22k", None),
(
"9e7a129be-0e46-459d-865b-3a5bf974a22k",
"Key 'arn:aws:kms:us-east-1:12345678:key/9e7a129be-0e46-459d-865b-3a5bf974a22k' does not exist",
),
],
)
def test_kms_key_validator(mocker, boto3_stubber, kms_key_id, expected_message):
_kms_key_stubber(mocker, boto3_stubber, kms_key_id, expected_message, 1)
config_parser_dict = {
"cluster default": {"fsx_settings": "fsx"},
"fsx fsx": {
"storage_capacity": 1200,
"fsx_kms_key_id": kms_key_id,
"deployment_type": "PERSISTENT_1",
"per_unit_storage_throughput": 50,
},
}
utils.assert_param_validator(
mocker, config_parser_dict, expected_error=expected_message if expected_message else None
)
def _kms_key_stubber(mocker, boto3_stubber, kms_key_id, expected_message, num_calls):
describe_key_response = {
"KeyMetadata": {
"AWSAccountId": "1234567890",
"Arn": "arn:aws:kms:us-east-1:1234567890:key/{0}".format(kms_key_id),
"CreationDate": datetime.datetime(2019, 1, 10, 11, 25, 59, 128000),
"Description": "",
"Enabled": True,
"KeyId": kms_key_id,
"KeyManager": "CUSTOMER",
"KeyState": "Enabled",
"KeyUsage": "ENCRYPT_DECRYPT",
"Origin": "AWS_KMS",
}
}
mocked_requests = [
MockedBoto3Request(
method="describe_key",
response=expected_message if expected_message else describe_key_response,
expected_params={"KeyId": kms_key_id},
generate_error=True if expected_message else False,
)
] * num_calls
boto3_stubber("kms", mocked_requests)
@pytest.mark.parametrize(
"section_dict, bucket, expected_error, num_calls",
[
(
{"imported_file_chunk_size": 1024, "import_path": "s3://test", "storage_capacity": 1200},
{"Bucket": "test"},
None,
1,
),
(
{"imported_file_chunk_size": 1024, "storage_capacity": 1200},
None,
"When specifying 'imported_file_chunk_size', the 'import_path' option must be specified",
0,
),
(
{"export_path": "s3://test", "import_path": "s3://test", "storage_capacity": 1200},
{"Bucket": "test"},
None,
2,
),
(
{"export_path": "s3://test", "storage_capacity": 1200},
{"Bucket": "test"},
"When specifying 'export_path', the 'import_path' option must be specified",
0,
),
({"shared_dir": "NONE", "storage_capacity": 1200}, None, "NONE cannot be used as a shared directory", 0),
({"shared_dir": "/NONE", "storage_capacity": 1200}, None, "/NONE cannot be used as a shared directory", 0),
({"shared_dir": "/fsx"}, None, "the 'storage_capacity' option must be specified", 0),
({"shared_dir": "/fsx", "storage_capacity": 1200}, None, None, 0),
(
{
"deployment_type": "PERSISTENT_1",
"fsx_kms_key_id": "9e8a129be-0e46-459d-865b-3a5bf974a22k",
"storage_capacity": 1200,
"per_unit_storage_throughput": 50,
},
None,
None,
0,
),
(
{"deployment_type": "PERSISTENT_1", "per_unit_storage_throughput": 200, "storage_capacity": 1200},
None,
None,
0,
),
(
{
"deployment_type": "SCRATCH_2",
"fsx_kms_key_id": "9e8a129be-0e46-459d-865b-3a5bf974a22k",
"storage_capacity": 1200,
},
None,
"'fsx_kms_key_id' can only be used when 'deployment_type = PERSISTENT_1'",
1,
),
(
{"deployment_type": "SCRATCH_1", "per_unit_storage_throughput": 200, "storage_capacity": 1200},
None,
"'per_unit_storage_throughput' can only be used when 'deployment_type = PERSISTENT_1'",
0,
),
(
{"deployment_type": "PERSISTENT_1", "storage_capacity": 1200},
None,
"'per_unit_storage_throughput' must be specified when 'deployment_type = PERSISTENT_1'",
0,
),
(
{
"storage_capacity": 1200,
"per_unit_storage_throughput": "50",
"deployment_type": "PERSISTENT_1",
"automatic_backup_retention_days": 2,
},
None,
None,
0,
),
(
{
"storage_capacity": 1200,
"deployment_type": "PERSISTENT_1",
"per_unit_storage_throughput": "50",
"automatic_backup_retention_days": 2,
"daily_automatic_backup_start_time": "03:00",
"copy_tags_to_backups": True,
},
None,
None,
0,
),
(
{"automatic_backup_retention_days": 2, "deployment_type": "SCRATCH_1"},
None,
"FSx automatic backup features can be used only with 'PERSISTENT_1' file systems",
0,
),
(
{"daily_automatic_backup_start_time": "03:00"},
None,
"When specifying 'daily_automatic_backup_start_time', "
"the 'automatic_backup_retention_days' option must be specified",
0,
),
(
{"storage_capacity": 1200, "deployment_type": "PERSISTENT_1", "copy_tags_to_backups": True},
None,
"When specifying 'copy_tags_to_backups', the 'automatic_backup_retention_days' option must be specified",
0,
),
(
{"storage_capacity": 1200, "deployment_type": "PERSISTENT_1", "copy_tags_to_backups": False},
None,
"When specifying 'copy_tags_to_backups', the 'automatic_backup_retention_days' option must be specified",
0,
),
(
{"daily_automatic_backup_start_time": "03:00", "copy_tags_to_backups": True},
None,
"When specifying 'daily_automatic_backup_start_time', "
"the 'automatic_backup_retention_days' option must be specified",
0,
),
(
{
"deployment_type": "PERSISTENT_1",
"automatic_backup_retention_days": 2,
"imported_file_chunk_size": 1024,
"export_path": "s3://test",
"import_path": "s3://test",
"storage_capacity": 1200,
},
{"Bucket": "test"},
"Backups cannot be created on S3-linked file systems",
0,
),
(
{
"deployment_type": "PERSISTENT_1",
"automatic_backup_retention_days": 2,
"export_path": "s3://test",
"import_path": "s3://test",
"storage_capacity": 1200,
},
{"Bucket": "test"},
"Backups cannot be created on S3-linked file systems",
0,
),
(
{
"deployment_type": "SCRATCH_1",
"storage_type": "HDD",
"per_unit_storage_throughput": 12,
"storage_capacity": 1200,
"drive_cache_type": "READ",
},
None,
"For HDD filesystems, 'deployment_type' must be 'PERSISTENT_1'",
0,
),
(
{
"deployment_type": "PERSISTENT_1",
"storage_type": "HDD",
"per_unit_storage_throughput": 50,
"storage_capacity": 1200,
"drive_cache_type": "READ",
},
None,
"For HDD filesystems, 'per_unit_storage_throughput' can only have the following values: {0}".format(
FSX_HDD_THROUGHPUT
),
0,
),
(
{
"deployment_type": "PERSISTENT_1",
"storage_type": "SSD",
"per_unit_storage_throughput": 12,
"storage_capacity": 1200,
},
None,
"For SSD filesystems, 'per_unit_storage_throughput' can only have the following values: {0}".format(
FSX_SSD_THROUGHPUT
),
0,
),
(
{
"deployment_type": "PERSISTENT_1",
"storage_type": "SSD",
"per_unit_storage_throughput": 50,
"storage_capacity": 1200,
"drive_cache_type": "NONE",
},
None,
"The configuration parameter 'drive_cache_type' has an invalid value 'NONE'",
0,
),
(
{
"deployment_type": "PERSISTENT_1",
"storage_type": "SSD",
"per_unit_storage_throughput": 50,
"storage_capacity": 1200,
},
None,
None,
0,
),
(
{
"deployment_type": "PERSISTENT_1",
"per_unit_storage_throughput": 50,
"storage_capacity": 1200,
"drive_cache_type": "READ",
},
None,
"'drive_cache_type' features can be used only with HDD filesystems",
0,
),
(
{
"data_compression_type": "LZ4",
"fsx_backup_id": "backup-12345678",
},
None,
"FSx data compression option (LZ4) cannot be specified when creating a filesystem from backup",
0,
),
(
{
"data_compression_type": "NONE",
"fsx_backup_id": "backup-12345678",
},
None,
"The configuration parameter 'data_compression_type' has an invalid value 'NONE'",
0,
),
(
{
"data_compression_type": "LZ4",
"storage_capacity": 1200,
},
None,
None,
0,
),
],
)
def test_fsx_validator(mocker, boto3_stubber, section_dict, bucket, expected_error, num_calls):
if bucket:
_head_bucket_stubber(mocker, boto3_stubber, bucket, num_calls)
if "fsx_kms_key_id" in section_dict:
_kms_key_stubber(mocker, boto3_stubber, section_dict.get("fsx_kms_key_id"), None, 0 if expected_error else 1)
config_parser_dict = {"cluster default": {"fsx_settings": "default"}, "fsx default": section_dict}
if expected_error:
expected_error = re.escape(expected_error)
utils.assert_param_validator(mocker, config_parser_dict, expected_error=expected_error)
@pytest.mark.parametrize(
"section_dict, expected_error, expected_warning",
[
(
{"storage_capacity": 1, "deployment_type": "SCRATCH_1"},
"Capacity for FSx SCRATCH_1 filesystem is 1,200 GB, 2,400 GB or increments of 3,600 GB",
None,
),
({"storage_capacity": 1200, "deployment_type": "SCRATCH_1"}, None, None),
({"storage_capacity": 2400, "deployment_type": "SCRATCH_1"}, None, None),
({"storage_capacity": 3600, "deployment_type": "SCRATCH_1"}, None, None),
(
{"storage_capacity": 3600, "deployment_type": "SCRATCH_2"},
"Capacity for FSx SCRATCH_2 and PERSISTENT_1 filesystems is 1,200 GB or increments of 2,400 GB",
None,
),
(
{"storage_capacity": 3600, "deployment_type": "PERSISTENT_1", "per_unit_storage_throughput": 50},
"Capacity for FSx SCRATCH_2 and PERSISTENT_1 filesystems is 1,200 GB or increments of 2,400 GB",
None,
),
(
{"storage_capacity": 3601, "deployment_type": "PERSISTENT_1", "per_unit_storage_throughput": 50},
"Capacity for FSx SCRATCH_2 and PERSISTENT_1 filesystems is 1,200 GB or increments of 2,400 GB",
None,
),
({"storage_capacity": 7200}, None, None),
(
{"deployment_type": "SCRATCH_1"},
"When specifying 'fsx' section, the 'storage_capacity' option must be specified",
None,
),
(
{
"storage_type": "HDD",
"deployment_type": "PERSISTENT_1",
"storage_capacity": 1801,
"per_unit_storage_throughput": 40,
},
"Capacity for FSx PERSISTENT HDD 40 MB/s/TiB file systems is increments of 1,800 GiB",
None,
),
(
{
"storage_type": "HDD",
"deployment_type": "PERSISTENT_1",
"storage_capacity": 6001,
"per_unit_storage_throughput": 12,
},
"Capacity for FSx PERSISTENT HDD 12 MB/s/TiB file systems is increments of 6,000 GiB",
None,
),
(
{
"storage_type": "HDD",
"deployment_type": "PERSISTENT_1",
"storage_capacity": 1800,
"per_unit_storage_throughput": 40,
},
None,
None,
),
(
{
"storage_type": "HDD",
"deployment_type": "PERSISTENT_1",
"storage_capacity": 6000,
"per_unit_storage_throughput": 12,
},
None,
None,
),
],
)
def test_fsx_storage_capacity_validator(mocker, boto3_stubber, capsys, section_dict, expected_error, expected_warning):
config_parser_dict = {"cluster default": {"fsx_settings": "default"}, "fsx default": section_dict}
utils.assert_param_validator(
mocker, config_parser_dict, capsys=capsys, expected_error=expected_error, expected_warning=expected_warning
)
def _head_bucket_stubber(mocker, boto3_stubber, bucket, num_calls):
head_bucket_response = {
"ResponseMetadata": {
"AcceptRanges": "bytes",
"ContentType": "text/html",
"LastModified": "Thu, 16 Apr 2015 18:19:14 GMT",
"ContentLength": 77,
"VersionId": "null",
"ETag": '"30a6ec7e1a9ad79c203d05a589c8b400"',
"Metadata": {},
}
}
mocked_requests = [
MockedBoto3Request(method="head_bucket", response=head_bucket_response, expected_params=bucket)
] * num_calls
boto3_stubber("s3", mocked_requests)
mocker.patch("pcluster.config.validators.urllib.request.urlopen")
@pytest.mark.parametrize(
"fsx_vpc, ip_permissions, network_interfaces, expected_message",
[
( # working case, right vpc and sg, multiple network interfaces
"vpc-06e4ab6c6cEXAMPLE",
[{"IpProtocol": "-1", "UserIdGroupPairs": [{"UserId": "123456789012", "GroupId": "sg-12345678"}]}],
["eni-09b9460295ddd4e5f", "eni-001b3cef7c78b45c4"],
None,
),
( # working case, right vpc and sg, single network interface
"vpc-06e4ab6c6cEXAMPLE",
[{"IpProtocol": "-1", "UserIdGroupPairs": [{"UserId": "123456789012", "GroupId": "sg-12345678"}]}],
["eni-09b9460295ddd4e5f"],
None,
),
( # not working case --> no network interfaces
"vpc-06e4ab6c6cEXAMPLE",
[{"IpProtocol": "-1", "UserIdGroupPairs": [{"UserId": "123456789012", "GroupId": "sg-12345678"}]}],
[],
"doesn't have Elastic Network Interfaces attached",
),
( # not working case --> wrong vpc
"vpc-06e4ab6c6ccWRONG",
[{"IpProtocol": "-1", "UserIdGroupPairs": [{"UserId": "123456789012", "GroupId": "sg-12345678"}]}],
["eni-09b9460295ddd4e5f"],
"only support using FSx file system that is in the same VPC as the stack",
),
( # not working case --> wrong ip permissions in security group
"vpc-06e4ab6c6cWRONG",
[
{
"PrefixListIds": [],
"FromPort": 22,
"IpRanges": [{"CidrIp": "203.0.113.0/24"}],
"ToPort": 22,
"IpProtocol": "tcp",
"UserIdGroupPairs": [],
}
],
["eni-09b9460295ddd4e5f"],
"does not satisfy mounting requirement",
),
],
)
def test_fsx_id_validator(mocker, boto3_stubber, fsx_vpc, ip_permissions, network_interfaces, expected_message):
describe_file_systems_response = {
"FileSystems": [
{
"VpcId": fsx_vpc,
"NetworkInterfaceIds": network_interfaces,
"SubnetIds": ["subnet-12345678"],
"FileSystemType": "LUSTRE",
"CreationTime": 1567636453.038,
"ResourceARN": "arn:aws:fsx:us-west-2:111122223333:file-system/fs-0ff8da96d57f3b4e3",
"StorageCapacity": 3600,
"LustreConfiguration": {"WeeklyMaintenanceStartTime": "4:07:00"},
"FileSystemId": "fs-0ff8da96d57f3b4e3",
"DNSName": "fs-0ff8da96d57f3b4e3.fsx.us-west-2.amazonaws.com",
"OwnerId": "059623208481",
"Lifecycle": "AVAILABLE",
}
]
}
fsx_mocked_requests = [
MockedBoto3Request(
method="describe_file_systems",
response=describe_file_systems_response,
expected_params={"FileSystemIds": ["fs-0ff8da96d57f3b4e3"]},
)
]
boto3_stubber("fsx", fsx_mocked_requests)
describe_subnets_response = {
"Subnets": [
{
"AvailabilityZone": "us-east-2c",
"AvailabilityZoneId": "use2-az3",
"AvailableIpAddressCount": 248,
"CidrBlock": "10.0.1.0/24",
"DefaultForAz": False,
"MapPublicIpOnLaunch": False,
"State": "available",
"SubnetId": "subnet-12345678",
"VpcId": "vpc-06e4ab6c6cEXAMPLE",
"OwnerId": "111122223333",
"AssignIpv6AddressOnCreation": False,
"Ipv6CidrBlockAssociationSet": [],
"Tags": [{"Key": "Name", "Value": "MySubnet"}],
"SubnetArn": "arn:aws:ec2:us-east-2:111122223333:subnet/subnet-12345678",
}
]
}
ec2_mocked_requests = [
MockedBoto3Request(
method="describe_subnets",
response=describe_subnets_response,
expected_params={"SubnetIds": ["subnet-12345678"]},
)
] * 2
if network_interfaces:
network_interfaces_in_response = []
for network_interface in network_interfaces:
network_interfaces_in_response.append(
{
"Association": {
"AllocationId": "eipalloc-01564b674a1a88a47",
"AssociationId": "eipassoc-02726ee370e175cea",
"IpOwnerId": "111122223333",
"PublicDnsName": "ec2-34-248-114-123.eu-west-1.compute.amazonaws.com",
"PublicIp": "34.248.114.123",
},
"Attachment": {
"AttachmentId": "ela-attach-0cf98331",
"DeleteOnTermination": False,
"DeviceIndex": 1,
"InstanceOwnerId": "amazon-aws",
"Status": "attached",
},
"AvailabilityZone": "eu-west-1a",
"Description": "Interface for NAT Gateway nat-0a8b0e0d28266841f",
"Groups": [{"GroupName": "default", "GroupId": "sg-12345678"}],
"InterfaceType": "nat_gateway",
"Ipv6Addresses": [],
"MacAddress": "0a:e5:8a:82:fd:24",
"NetworkInterfaceId": network_interface,
"OwnerId": "111122223333",
"PrivateDnsName": "ip-10-0-124-85.eu-west-1.compute.internal",
"PrivateIpAddress": "10.0.124.85",
"PrivateIpAddresses": [
{
"Association": {
"AllocationId": "eipalloc-01564b674a1a88a47",
"AssociationId": "eipassoc-02726ee370e175cea",
"IpOwnerId": "111122223333",
"PublicDnsName": "ec2-34-248-114-123.eu-west-1.compute.amazonaws.com",
"PublicIp": "34.248.114.123",
},
"Primary": True,
"PrivateDnsName": "ip-10-0-124-85.eu-west-1.compute.internal",
"PrivateIpAddress": "10.0.124.85",
}
],
"RequesterId": "036872051663",
"RequesterManaged": True,
"SourceDestCheck": False,
"Status": "in-use",
"SubnetId": "subnet-12345678",
"TagSet": [],
"VpcId": fsx_vpc,
}
)
describe_network_interfaces_response = {"NetworkInterfaces": network_interfaces_in_response}
ec2_mocked_requests.append(
MockedBoto3Request(
method="describe_network_interfaces",
response=describe_network_interfaces_response,
expected_params={"NetworkInterfaceIds": network_interfaces},
)
)
if fsx_vpc == "vpc-06e4ab6c6cEXAMPLE":
# the describe security group is performed only if the VPC of the network interface is the same of the FSX
describe_security_groups_response = {
"SecurityGroups": [
{
"IpPermissionsEgress": ip_permissions,
"Description": "My security group",
"IpPermissions": ip_permissions,
"GroupName": "MySecurityGroup",
"OwnerId": "123456789012",
"GroupId": "sg-12345678",
}
]
}
ec2_mocked_requests.append(
MockedBoto3Request(
method="describe_security_groups",
response=describe_security_groups_response,
expected_params={"GroupIds": ["sg-12345678"]},
)
)
boto3_stubber("ec2", ec2_mocked_requests)
fsx_spy = mocker.patch(
"pcluster.config.cfn_param_types.get_fsx_info",
return_value={"DNSName": "my.fsx.dns.name", "LustreConfiguration": {"MountName": "somemountname"}},
)
config_parser_dict = {
"cluster default": {"fsx_settings": "default", "vpc_settings": "default"},
"vpc default": {"master_subnet_id": "subnet-12345678"},
"fsx default": {"fsx_fs_id": "fs-0ff8da96d57f3b4e3"},
}
utils.assert_param_validator(mocker, config_parser_dict, expected_message)
fsx_spy.assert_called_with("fs-0ff8da96d57f3b4e3")
@pytest.mark.parametrize(
"section_dict, expected_message",
[
({"enable_intel_hpc_platform": "true", "base_os": "centos7"}, None),
({"enable_intel_hpc_platform": "true", "base_os": "centos8"}, None),
({"enable_intel_hpc_platform": "true", "base_os": "alinux2"}, "it is required to set the 'base_os'"),
({"enable_intel_hpc_platform": "true", "base_os": "ubuntu1804"}, "it is required to set the 'base_os'"),
# intel hpc disabled, you can use any os
({"enable_intel_hpc_platform": "false", "base_os": "alinux2"}, None),
],
)
def test_intel_hpc_os_validator(mocker, section_dict, expected_message):
config_parser_dict = {"cluster default": section_dict}
utils.assert_param_validator(mocker, config_parser_dict, expected_message)
@pytest.mark.parametrize(
"section_dict, expected_message",
[
(
{"disable_hyperthreading": True, "extra_json": '{"cluster": {"cfn_scheduler_slots": "vcpus"}}'},
"cfn_scheduler_slots cannot be set in addition to disable_hyperthreading = true",
),
(
{"disable_hyperthreading": True, "extra_json": '{"cluster": {"cfn_scheduler_slots": "cores"}}'},
"cfn_scheduler_slots cannot be set in addition to disable_hyperthreading = true",
),
(
{"disable_hyperthreading": True, "extra_json": '{"cluster": {"cfn_scheduler_slots": 3}}'},
"cfn_scheduler_slots cannot be set in addition to disable_hyperthreading = true",
),
({"disable_hyperthreading": True, "extra_json": '{"cluster": {"other_param": "fake_value"}}'}, None),
({"disable_hyperthreading": True}, None),
({"disable_hyperthreading": False, "extra_json": '{"cluster": {"cfn_scheduler_slots": "vcpus"}}'}, None),
({"disable_hyperthreading": False, "extra_json": '{"cluster": {"cfn_scheduler_slots": "cores"}}'}, None),
({"disable_hyperthreading": False, "extra_json": '{"cluster": {"cfn_scheduler_slots": 3}}'}, None),
],
)
def test_disable_hyperthreading_validator(mocker, section_dict, expected_message):
config_parser_dict = {"cluster default": section_dict}
utils.assert_param_validator(mocker, config_parser_dict, expected_message)
@pytest.mark.parametrize(
"section_dict, bucket, expected_message",
[
(
{"imported_file_chunk_size": 0, "import_path": "s3://test-import", "storage_capacity": 1200},
None,
"has a minimum size of 1 MiB, and max size of 512,000 MiB",
),
(
{"imported_file_chunk_size": 1, "import_path": "s3://test-import", "storage_capacity": 1200},
{"Bucket": "test-import"},
None,
),
(
{"imported_file_chunk_size": 10, "import_path": "s3://test-import", "storage_capacity": 1200},
{"Bucket": "test-import"},
None,
),
(
{"imported_file_chunk_size": 512000, "import_path": "s3://test-import", "storage_capacity": 1200},
{"Bucket": "test-import"},
None,
),
(
{"imported_file_chunk_size": 512001, "import_path": "s3://test-import", "storage_capacity": 1200},
None,
"has a minimum size of 1 MiB, and max size of 512,000 MiB",
),
],
)
def test_fsx_imported_file_chunk_size_validator(mocker, boto3_stubber, section_dict, bucket, expected_message):
if bucket:
_head_bucket_stubber(mocker, boto3_stubber, bucket, num_calls=1)
config_parser_dict = {"cluster default": {"fsx_settings": "default"}, "fsx default": section_dict}
utils.assert_param_validator(mocker, config_parser_dict, expected_message)
@pytest.mark.parametrize(
"section_dict, expected_error, expected_warning",
[
({"enable_efa": "NONE"}, "invalid value", None),
({"enable_efa": "compute", "scheduler": "sge"}, "is required to set the 'compute_instance_type'", None),
(
{"enable_efa": "compute", "compute_instance_type": "t2.large", "scheduler": "sge"},
None,
"You may see better performance using a cluster placement group",
),
(
{
"enable_efa": "compute",
"compute_instance_type": "t2.large",
"base_os": "alinux2",
"scheduler": "awsbatch",
},
"it is required to set the 'scheduler'",
None,
),
(
{
"enable_efa": "compute",
"compute_instance_type": "t2.large",
"base_os": "centos7",
"scheduler": "sge",
"placement_group": "DYNAMIC",
},
None,
None,
),
(
{
"enable_efa": "compute",
"compute_instance_type": "t2.large",
"base_os": "alinux2",
"scheduler": "sge",
"placement_group": "DYNAMIC",
},
None,
None,
),
# Additional instance type
(
{
"enable_efa": "compute",
"compute_instance_type": "additional-instance-type",
"base_os": "alinux2",
"scheduler": "sge",
"placement_group": "DYNAMIC",
"instance_types_data": json.dumps(
{
"additional-instance-type": {
"InstanceType": "additional-instance-type",
"NetworkInfo": {"EfaSupported": True},
}
}
),
},
None,
None,
),
],
)
def test_efa_validator(boto3_stubber, mocker, capsys, section_dict, expected_error, expected_warning):
if section_dict.get("enable_efa") != "NONE":
mocked_requests = [
MockedBoto3Request(
method="describe_instance_types",
response={"InstanceTypes": [{"InstanceType": "t2.large"}]},
expected_params={"Filters": [{"Name": "network-info.efa-supported", "Values": ["true"]}]},
)
]
boto3_stubber("ec2", mocked_requests)
config_parser_dict = {"cluster default": section_dict}
# Patch to prevent instance type validators to fail with additional instance type
extra_patches = {
"pcluster.config.validators.get_supported_instance_types": ["t2.large", "additional-instance-type"],
}
utils.assert_param_validator(
mocker,
config_parser_dict,
expected_error,
capsys,
expected_warning,
extra_patches=extra_patches,
use_mock_instance_type_info=False,
)
@pytest.mark.parametrize(
"cluster_dict, expected_error",
[
# EFAGDR without EFA
(
{"enable_efa_gdr": "compute"},
"The parameter 'enable_efa_gdr' can be used only in combination with 'enable_efa'",
),
# EFAGDR with EFA
({"enable_efa": "compute", "enable_efa_gdr": "compute"}, None),
# EFA withoud EFAGDR
({"enable_efa": "compute"}, None),
],
)
def test_efa_gdr_validator(cluster_dict, expected_error):
config_parser_dict = {
"cluster default": cluster_dict,
}
config_parser = configparser.ConfigParser()
config_parser.read_dict(config_parser_dict)
pcluster_config = utils.init_pcluster_config_from_configparser(config_parser, False, auto_refresh=False)
enable_efa_gdr_value = pcluster_config.get_section("cluster").get_param_value("enable_efa_gdr")
errors, warnings = efa_gdr_validator("enable_efa_gdr", enable_efa_gdr_value, pcluster_config)
if expected_error:
assert_that(errors[0]).matches(expected_error)
else:
assert_that(errors).is_empty()
@pytest.mark.parametrize(
"ip_permissions, ip_permissions_egress, expected_message",
[
([], [], "must allow all traffic in and out from itself"),
(
[{"IpProtocol": "-1", "UserIdGroupPairs": [{"UserId": "123456789012", "GroupId": "sg-12345678"}]}],
[],
"must allow all traffic in and out from itself",
),
(
[{"IpProtocol": "-1", "UserIdGroupPairs": [{"UserId": "123456789012", "GroupId": "sg-12345678"}]}],
[{"IpProtocol": "-1", "UserIdGroupPairs": [{"UserId": "123456789012", "GroupId": "sg-12345678"}]}],
None,
),
(
[
{
"PrefixListIds": [],
"FromPort": 22,
"IpRanges": [{"CidrIp": "203.0.113.0/24"}],
"ToPort": 22,
"IpProtocol": "tcp",
"UserIdGroupPairs": [],
}
],
[],
"must allow all traffic in and out from itself",
),
],
)
def test_efa_validator_with_vpc_security_group(
boto3_stubber, mocker, ip_permissions, ip_permissions_egress, expected_message
):
describe_security_groups_response = {
"SecurityGroups": [
{
"IpPermissionsEgress": ip_permissions_egress,
"Description": "My security group",
"IpPermissions": ip_permissions,
"GroupName": "MySecurityGroup",
"OwnerId": "123456789012",
"GroupId": "sg-12345678",
}
]
}
mocked_requests = [
MockedBoto3Request(
method="describe_security_groups",
response=describe_security_groups_response,
expected_params={"GroupIds": ["sg-12345678"]},
),
MockedBoto3Request(
method="describe_instance_types",
response={"InstanceTypes": [{"InstanceType": "t2.large"}]},
expected_params={"Filters": [{"Name": "network-info.efa-supported", "Values": ["true"]}]},
),
MockedBoto3Request(
method="describe_security_groups",
response=describe_security_groups_response,
expected_params={"GroupIds": ["sg-12345678"]},
), # it is called two times, for vpc_security_group_id validation and to validate efa
]
boto3_stubber("ec2", mocked_requests)
config_parser_dict = {
"cluster default": {
"enable_efa": "compute",
"compute_instance_type": "t2.large",
"placement_group": "DYNAMIC",
"vpc_settings": "default",
"scheduler": "sge",
},
"vpc default": {"vpc_security_group_id": "sg-12345678"},
}
utils.assert_param_validator(mocker, config_parser_dict, expected_message)
@pytest.mark.parametrize(
"cluster_section_dict, ebs_section_dict, expected_message",
[
(
{"ebs_settings": "vol1, vol2, vol3, vol4, vol5, vol6"},
{
"vol1": {"shared_dir": "/vol1"},
"vol2": {"shared_dir": "/vol2"},
"vol3": {"shared_dir": "/vol3"},
"vol4": {"shared_dir": "/vol4"},
"vol5": {"shared_dir": "/vol5"},
"vol6": {"shared_dir": "/vol6"},
},
"Invalid number of 'ebs' sections specified. Max 5 expected.",
),
(
{"ebs_settings": "vol1, vol2 "},
{"vol1": {"shared_dir": "vol1"}, "vol2": {"volume_type": "io1"}},
"When using more than 1 EBS volume, shared_dir is required under each EBS section",
),
(
{"ebs_settings": "vol1,vol2"},
{"vol1": {"shared_dir": "/NONE"}, "vol2": {"shared_dir": "vol2"}},
"/NONE cannot be used as a shared directory",
),
(
{"ebs_settings": "vol1, vol2 "},
{"vol1": {"shared_dir": "/vol1"}, "vol2": {"shared_dir": "NONE"}},
"NONE cannot be used as a shared directory",
),
],
)
def test_ebs_settings_validator(mocker, cluster_section_dict, ebs_section_dict, expected_message):
config_parser_dict = {"cluster default": cluster_section_dict}
if ebs_section_dict:
for vol in ebs_section_dict:
config_parser_dict["ebs {0}".format(vol)] = ebs_section_dict.get(vol)
utils.assert_param_validator(mocker, config_parser_dict, expected_message)
@pytest.mark.parametrize(
"section_dict, expected_message",
[
({"shared_dir": "NONE"}, "NONE cannot be used as a shared directory"),
({"shared_dir": "/NONE"}, "/NONE cannot be used as a shared directory"),
({"shared_dir": "/NONEshared"}, None),
],
)
def test_shared_dir_validator(mocker, section_dict, expected_message):
config_parser_dict = {"cluster default": section_dict}
utils.assert_param_validator(mocker, config_parser_dict, expected_message)
@pytest.mark.parametrize(
"base_os, instance_type, access_from, expected_error, expected_warning",
[
("centos7", "t2.medium", None, None, None),
("centos8", "t2.medium", None, None, None),
("ubuntu1804", "t2.medium", None, None, None),
("ubuntu1804", "t2.medium", "1.2.3.4/32", None, None),
("centos7", "t2.medium", "0.0.0.0/0", None, None),
("centos8", "t2.medium", "0.0.0.0/0", None, None),
("alinux2", "t2.medium", None, None, None),
("alinux2", "t2.nano", None, None, "is recommended to use an instance type with at least"),
("alinux2", "t2.micro", None, None, "is recommended to use an instance type with at least"),
("ubuntu1804", "m6g.xlarge", None, None, None),
("alinux2", "m6g.xlarge", None, None, None),
("centos7", "m6g.xlarge", None, None, None),
("centos8", "m6g.xlarge", None, None, None),
],
)
def test_dcv_enabled_validator(
mocker, base_os, instance_type, expected_error, expected_warning, access_from, caplog, capsys
):
config_parser_dict = {
"cluster default": {"base_os": base_os, "dcv_settings": "dcv"},
"dcv dcv": {"enable": "master"},
}
if access_from:
config_parser_dict["dcv dcv"]["access_from"] = access_from
architectures = ["x86_64"] if instance_type.startswith("t2") else ["arm64"]
extra_patches = {
"pcluster.config.validators.get_supported_instance_types": ["t2.nano", "t2.micro", "t2.medium", "m6g.xlarge"],
"pcluster.config.validators.get_supported_architectures_for_instance_type": architectures,
"pcluster.config.cfn_param_types.get_supported_architectures_for_instance_type": architectures,
}
utils.assert_param_validator(
mocker, config_parser_dict, expected_error, capsys, expected_warning, extra_patches=extra_patches
)
access_from_error_msg = DCV_MESSAGES["warnings"]["access_from_world"].format(port=8443)
assert_that(access_from_error_msg in caplog.text).is_equal_to(not access_from or access_from == "0.0.0.0/0")
@pytest.mark.parametrize(
"architecture, base_os, expected_message",
[
# Supported combinations
("x86_64", "alinux2", None),
("x86_64", "centos7", None),
("x86_64", "centos8", None),
("x86_64", "ubuntu1804", None),
("arm64", "ubuntu1804", None),
("arm64", "alinux2", None),
("arm64", "centos7", None),
("arm64", "centos8", None),
# Unsupported combinations
(
"UnsupportedArchitecture",
"alinux2",
FSX_MESSAGES["errors"]["unsupported_architecture"].format(
supported_architectures=list(FSX_SUPPORTED_ARCHITECTURES_OSES.keys())
),
),
],
)
def test_fsx_architecture_os_validator(mocker, architecture, base_os, expected_message):
config_parser_dict = {
"cluster default": {"base_os": base_os, "fsx_settings": "fsx"},
"fsx fsx": {"storage_capacity": 3200},
}
expected_message = re.escape(expected_message) if expected_message else None
extra_patches = {
"pcluster.config.cfn_param_types.get_supported_architectures_for_instance_type": [architecture],
"pcluster.config.validators.get_supported_architectures_for_instance_type": [architecture],
}
utils.assert_param_validator(mocker, config_parser_dict, expected_message, extra_patches=extra_patches)
@pytest.mark.parametrize(
"section_dict, expected_message",
[
(
{"initial_queue_size": "0", "maintain_initial_size": True},
"maintain_initial_size cannot be set to true if initial_queue_size is 0",
),
(
{"scheduler": "awsbatch", "maintain_initial_size": True},
"maintain_initial_size is not supported when using awsbatch as scheduler",
),
],
)
def test_maintain_initial_size_validator(mocker, section_dict, expected_message):
config_parser_dict = {"cluster default": section_dict}
utils.assert_param_validator(mocker, config_parser_dict, expected_message)
@pytest.mark.parametrize(
"cluster_section_dict, expected_message",
[
# SIT cluster, perfectly fine
({"scheduler": "slurm"}, None),
# HIT cluster with one queue
({"scheduler": "slurm", "queue_settings": "queue1"}, None),
({"scheduler": "slurm", "queue_settings": "queue1,queue2,queue3,queue4,queue5"}, None),
({"scheduler": "slurm", "queue_settings": "queue1, queue2"}, None),
(
{"scheduler": "slurm", "queue_settings": "queue1,queue2,queue3,queue4,queue5,queue6"},
"Invalid number of 'queue' sections specified. Max 5 expected.",
),
(
{"scheduler": "slurm", "queue_settings": "queue_1"},
(
"Invalid queue name 'queue_1'. Queue section names can be at most 30 chars long, must begin with"
" a letter and only contain lowercase letters, digits and hyphens. It is forbidden to use"
" 'default' as a queue section name."
),
),
(
{"scheduler": "slurm", "queue_settings": "default"},
(
"Invalid queue name 'default'. Queue section names can be at most 30 chars long, must begin with"
" a letter and only contain lowercase letters, digits and hyphens. It is forbidden to use"
" 'default' as a queue section name."
),
),
(
{"scheduler": "slurm", "queue_settings": "queue1, default"},
(
"Invalid queue name '.*'. Queue section names can be at most 30 chars long, must begin with"
" a letter and only contain lowercase letters, digits and hyphens. It is forbidden to use"
" 'default' as a queue section name."
),
),
(
{"scheduler": "slurm", "queue_settings": "QUEUE"},
(
"Invalid queue name 'QUEUE'. Queue section names can be at most 30 chars long, must begin with"
" a letter and only contain lowercase letters, digits and hyphens. It is forbidden to use"
" 'default' as a queue section name."
),
),
(
{"scheduler": "slurm", "queue_settings": "aQUEUEa"},
(
"Invalid queue name 'aQUEUEa'. Queue section names can be at most 30 chars long, must begin with"
" a letter and only contain lowercase letters, digits and hyphens. It is forbidden to use"
" 'default' as a queue section name."
),
),
({"scheduler": "slurm", "queue_settings": "my-default-queue"}, None),
],
)
def test_queue_settings_validator(mocker, cluster_section_dict, expected_message):
config_parser_dict = {"cluster default": cluster_section_dict}
if cluster_section_dict.get("queue_settings"):
for i, queue_name in enumerate(cluster_section_dict["queue_settings"].split(",")):
config_parser_dict["queue {0}".format(queue_name.strip())] = {
"compute_resource_settings": "cr{0}".format(i),
"disable_hyperthreading": True,
"enable_efa": True,
}
config_parser_dict["compute_resource cr{0}".format(i)] = {"instance_type": "t2.micro"}
utils.assert_param_validator(mocker, config_parser_dict, expected_message)
@pytest.mark.parametrize(
"cluster_dict, queue_dict, expected_error_messages, expected_warning_messages",
[
(
{"queue_settings": "default"},
{"compute_resource_settings": "cr1,cr2", "enable_efa": True, "disable_hyperthreading": True},
[
"Duplicate instance type 't2.micro' found in queue 'default'. "
"Compute resources in the same queue must use different instance types"
],
[
"EFA was enabled on queue 'default', but instance type 't2.micro' "
"defined in compute resource settings cr1 does not support EFA.",
"EFA was enabled on queue 'default', but instance type 't2.micro' "
"defined in compute resource settings cr2 does not support EFA.",
],
),
(
{"queue_settings": "default"},
{"compute_resource_settings": "cr3,cr4", "enable_efa": True, "disable_hyperthreading": True},
[
"Duplicate instance type 'c4.xlarge' found in queue 'default'. "
"Compute resources in the same queue must use different instance types"
],
[
"EFA was enabled on queue 'default', but instance type 'c4.xlarge' "
"defined in compute resource settings cr3 does not support EFA.",
"EFA was enabled on queue 'default', but instance type 'c4.xlarge' "
"defined in compute resource settings cr4 does not support EFA.",
],
),
(
{"queue_settings": "default"},
{"compute_resource_settings": "cr1,cr3", "enable_efa": True, "disable_hyperthreading": True},
None,
[
"EFA was enabled on queue 'default', but instance type 't2.micro' "
"defined in compute resource settings cr1 does not support EFA.",
"EFA was enabled on queue 'default', but instance type 'c4.xlarge' "
"defined in compute resource settings cr3 does not support EFA.",
],
),
(
{"queue_settings": "default"},
{"compute_resource_settings": "cr2,cr4", "enable_efa": True, "disable_hyperthreading": True},
None,
[
"EFA was enabled on queue 'default', but instance type 't2.micro' "
"defined in compute resource settings cr2 does not support EFA.",
"EFA was enabled on queue 'default', but instance type 'c4.xlarge' "
"defined in compute resource settings cr4 does not support EFA.",
],
),
(
{"queue_settings": "default"},
{"compute_resource_settings": "cr2,cr4", "enable_efa": True, "enable_efa_gdr": True},
None,
[
"EFA was enabled on queue 'default', but instance type 't2.micro' "
"defined in compute resource settings cr2 does not support EFA.",
"EFA GDR was enabled on queue 'default', but instance type 't2.micro' "
"defined in compute resource settings cr2 does not support EFA GDR.",
"EFA was enabled on queue 'default', but instance type 'c4.xlarge' "
"defined in compute resource settings cr4 does not support EFA.",
"EFA GDR was enabled on queue 'default', but instance type 'c4.xlarge' "
"defined in compute resource settings cr4 does not support EFA GDR.",
],
),
(
{"queue_settings": "default"},
{"compute_resource_settings": "efa_instance", "enable_efa_gdr": True},
["The parameter 'enable_efa_gdr' can be used only in combination with 'enable_efa'"],
None,
),
({"queue_settings": "default"}, {"compute_resource_settings": "cr1"}, None, None),
(
{"queue_settings": "default", "enable_efa": "compute", "disable_hyperthreading": True},
{"compute_resource_settings": "cr1", "enable_efa": True, "disable_hyperthreading": True},
[
"Parameter 'enable_efa' can be used only in 'cluster' or in 'queue' section",
"Parameter 'disable_hyperthreading' can be used only in 'cluster' or in 'queue' section",
],
[
"EFA was enabled on queue 'default', but instance type 't2.micro' "
"defined in compute resource settings cr1 does not support EFA."
],
),
(
{
"queue_settings": "default",
"enable_efa": "compute",
"enable_efa_gdr": "compute",
"disable_hyperthreading": True,
},
{
"compute_resource_settings": "cr1",
"enable_efa": False,
"enable_efa_gdr": False,
"disable_hyperthreading": False,
},
[
"Parameter 'enable_efa' can be used only in 'cluster' or in 'queue' section",
"Parameter 'enable_efa_gdr' can be used only in 'cluster' or in 'queue' section",
"Parameter 'disable_hyperthreading' can be used only in 'cluster' or in 'queue' section",
],
None,
),
(
{"queue_settings": "default"},
{"compute_resource_settings": "efa_instance", "enable_efa": True},
None,
None,
),
],
)
def test_queue_validator(cluster_dict, queue_dict, expected_error_messages, expected_warning_messages):
config_parser_dict = {
"cluster default": cluster_dict,
"queue default": queue_dict,
"compute_resource cr1": {"instance_type": "t2.micro"},
"compute_resource cr2": {"instance_type": "t2.micro"},
"compute_resource cr3": {"instance_type": "c4.xlarge"},
"compute_resource cr4": {"instance_type": "c4.xlarge"},
"compute_resource efa_instance": {"instance_type": "p3dn.24xlarge"},
}
config_parser = configparser.ConfigParser()
config_parser.read_dict(config_parser_dict)
pcluster_config = utils.init_pcluster_config_from_configparser(config_parser, False, auto_refresh=False)
efa_instance_compute_resource = pcluster_config.get_section("compute_resource", "efa_instance")
if efa_instance_compute_resource:
# Override `enable_efa` and `enable_efa_gdr` default value for instance with efa support
efa_instance_compute_resource.get_param("enable_efa").value = True
efa_instance_compute_resource.get_param("enable_efa_gdr").value = True
errors, warnings = queue_validator("queue", "default", pcluster_config)
if expected_error_messages:
assert_that(expected_error_messages).is_equal_to(errors)
else:
assert_that(errors).is_empty()
if expected_warning_messages:
assert_that(expected_warning_messages).is_equal_to(warnings)
else:
assert_that(warnings).is_empty()
@pytest.mark.parametrize(
"param_value, expected_message",
[
(
"section1!2",
"Invalid label 'section1!2' in param 'queue_settings'. "
"Section labels can only contain alphanumeric characters, dashes or underscores.",
),
(
"section!123456789abcdefghijklmnopqrstuvwxyz_123456789abcdefghijklmnopqrstuvwxyz_",
"Invalid label 'section!123456789...' in param 'queue_settings'. "
"Section labels can only contain alphanumeric characters, dashes or underscores.",
),
("section-1", None),
("section_1", None),
(
"section_123456789abcdefghijklmnopqrstuvwxyz_123456789abcdefghijklmnopqrstuvwxyz_",
"Invalid label 'section_123456789...' in param 'queue_settings'. "
"The maximum length allowed for section labels is 64 characters",
),
],
)
def test_settings_validator(param_value, expected_message):
errors, warnings = settings_validator("queue_settings", param_value, None)
if expected_message:
assert_that(errors and len(errors) == 1).is_true()
assert_that(errors[0]).is_equal_to(expected_message)
else:
assert_that(errors).is_empty()
@pytest.mark.parametrize(
"section_dict, expected_message",
[
({"min_count": -1, "initial_count": -1}, "Parameter 'min_count' must be 0 or greater than 0"),
(
{"min_count": 0, "initial_count": 1, "spot_price": -1.1},
"Parameter 'spot_price' must be 0 or greater than 0",
),
(
{"min_count": 1, "max_count": 0, "initial_count": 1},
"Parameter 'max_count' must be greater than or equal to 'min_count'",
),
({"min_count": 0, "max_count": 0, "initial_count": 0}, "Parameter 'max_count' must be 1 or greater than 1"),
({"min_count": 1, "max_count": 2, "spot_price": 1.5, "initial_count": 1}, None),
(
{"min_count": 2, "max_count": 4, "initial_count": 1},
"Parameter 'initial_count' must be greater than or equal to 'min_count'",
),
(
{"min_count": 2, "max_count": 4, "initial_count": 5},
"Parameter 'initial_count' must be lower than or equal to 'max_count'",
),
],
)
def test_compute_resource_validator(mocker, section_dict, expected_message):
config_parser_dict = {
"cluster default": {"queue_settings": "default"},
"queue default": {"compute_resource_settings": "default"},
"compute_resource default": section_dict,
}
config_parser = configparser.ConfigParser()
config_parser.read_dict(config_parser_dict)
mocker.patch(
"pcluster.config.cfn_param_types.get_supported_architectures_for_instance_type", return_value=["x86_64"]
)
instance_type_info_mock = mocker.MagicMock()
mocker.patch(
"pcluster.config.cfn_param_types.InstanceTypeInfo.init_from_instance_type", return_value=instance_type_info_mock
)
instance_type_info_mock.max_network_interface_count.return_value = 1
mocker.patch("pcluster.config.validators.get_supported_architectures_for_instance_type", return_value=["x86_64"])
pcluster_config = utils.init_pcluster_config_from_configparser(config_parser, False)
errors, warnings = compute_resource_validator("compute_resource", "default", pcluster_config)
if expected_message:
assert_that(expected_message in errors)
else:
assert_that(errors).is_empty()
@pytest.mark.parametrize(
"cluster_section_dict, sections_dict, expected_message",
[
(
{"vpc_settings": "vpc1, vpc2"},
{"vpc vpc1": {}, "vpc vpc2": {}},
"The value of 'vpc_settings' parameter is invalid. It can only contain a single vpc section label",
),
(
{"efs_settings": "efs1, efs2"},
{"efs efs1": {}, "efs efs2": {}},
"The value of 'efs_settings' parameter is invalid. It can only contain a single efs section label",
),
],
)
def test_single_settings_validator(mocker, cluster_section_dict, sections_dict, expected_message):
config_parser_dict = {"cluster default": cluster_section_dict}
if sections_dict:
for key, section in sections_dict.items():
config_parser_dict[key] = section
utils.assert_param_validator(mocker, config_parser_dict, expected_message)
#########
#
# architecture validator tests
#
# Two things make it difficult to test validators that key on architecture in the same way that:
# 1) architecture is a derived parameter and cannot be configured directly via the config file
# 2) many validators key on the architecture, which makes it impossible to test some combinations of
# parameters for validators that run later than others, because those run earlier will have
# already raised exceptions.
#
# Thus, the following code mocks the pcluster_config object passed to the validator functions
# and calls those functions directly (as opposed to patching functions and instantiating a config
# as would be done when running `pcluster create/update`).
#
#########
def get_default_pcluster_sections_dict():
"""Return a dict similar in structure to that of a cluster config file."""
default_pcluster_sections_dict = {}
for section_default_dict in DefaultDict:
if section_default_dict.name == "pcluster": # Get rid of the extra layer in this case
default_pcluster_sections_dict["cluster"] = section_default_dict.value.get("cluster")
else:
default_pcluster_sections_dict[section_default_dict.name] = section_default_dict.value
return default_pcluster_sections_dict
def make_pcluster_config_mock(mocker, config_dict):
"""Mock the calls that made on a pcluster_config by validator functions."""
cluster_config_dict = get_default_pcluster_sections_dict()
for section_key in config_dict:
cluster_config_dict = utils.merge_dicts(cluster_config_dict.get(section_key), config_dict.get(section_key))
section_to_mocks = {}
for section_key, section_dict in config_dict.items():
section_mock = mocker.MagicMock()
section_mock.get_param_value.side_effect = lambda param: section_dict.get(param)
section_to_mocks[section_key] = section_mock
pcluster_config_mock = mocker.MagicMock()
pcluster_config_mock.get_section.side_effect = lambda section: section_to_mocks.get(section)
return pcluster_config_mock
def run_architecture_validator_test(
mocker,
config,
constrained_param_section,
constrained_param_name,
param_name,
param_val,
validator,
expected_warnings,
expected_errors,
):
"""Run a test for a validator that's concerned with the architecture param."""
mocked_pcluster_config = make_pcluster_config_mock(mocker, config)
errors, warnings = validator(param_name, param_val, mocked_pcluster_config)
mocked_pcluster_config.get_section.assert_called_once_with(constrained_param_section)
mocked_pcluster_config.get_section.side_effect(constrained_param_section).get_param_value.assert_called_with(
constrained_param_name
)
assert_that(len(warnings)).is_equal_to(len(expected_warnings))
for warnings, expected_warnings in zip(warnings, expected_warnings):
assert_that(warnings).matches(re.escape(expected_warnings))
assert_that(len(errors)).is_equal_to(len(expected_errors))
for errors, expected_errors in zip(errors, expected_errors):
assert_that(errors).matches(re.escape(expected_errors))
@pytest.mark.parametrize(
"enabled, architecture, expected_errors",
[
(True, "x86_64", []),
(True, "arm64", ["instance types and an AMI that support these architectures"]),
(False, "x86_64", []),
(False, "arm64", []),
],
)
def test_intel_hpc_architecture_validator(mocker, enabled, architecture, expected_errors):
"""Verify that setting enable_intel_hpc_platform is invalid when architecture != x86_64."""
config_dict = {"cluster": {"enable_intel_hpc_platform": enabled, "architecture": architecture}}
run_architecture_validator_test(
mocker,
config_dict,
"cluster",
"architecture",
"enable_intel_hpc_platform",
enabled,
intel_hpc_architecture_validator,
[],
expected_errors,
)
@pytest.mark.parametrize(
"base_os, architecture, expected_warnings, expected_errors",
[
# All OSes supported for x86_64
("alinux2", "x86_64", [], []),
("centos7", "x86_64", [], []),
("centos8", "x86_64", [], []),
("ubuntu1804", "x86_64", [], []),
# Only a subset of OSes supported for arm64
("alinux2", "arm64", [], []),
(
"centos7",
"arm64",
[
"Warning: The aarch64 CentOS 7 OS is not validated for the 6th generation aarch64 instances "
"(M6g, C6g, etc.). To proceed please provide a custom_ami, "
"for more info see: https://wiki.centos.org/Cloud/AWS#aarch64_notes"
],
[],
),
("centos8", "arm64", [], []),
("ubuntu1804", "arm64", [], []),
],
)
def test_architecture_os_validator(mocker, base_os, architecture, expected_warnings, expected_errors):
"""Verify that the correct set of OSes is supported for each supported architecture."""
config_dict = {"cluster": {"base_os": base_os, "architecture": architecture}}
run_architecture_validator_test(
mocker,
config_dict,
"cluster",
"architecture",
"base_os",
base_os,
architecture_os_validator,
expected_warnings,
expected_errors,
)
@pytest.mark.parametrize(
"disable_hyperthreading, architecture, expected_errors",
[
(True, "x86_64", []),
(False, "x86_64", []),
(
True,
"arm64",
["disable_hyperthreading is only supported on instance types that support these architectures"],
),
(False, "arm64", []),
],
)
def test_disable_hyperthreading_architecture_validator(mocker, disable_hyperthreading, architecture, expected_errors):
config_dict = {"cluster": {"architecture": architecture, "disable_hyperthreading": disable_hyperthreading}}
run_architecture_validator_test(
mocker,
config_dict,
"cluster",
"architecture",
"disable_hyperthreading",
disable_hyperthreading,
disable_hyperthreading_architecture_validator,
[],
expected_errors,
)
@pytest.mark.parametrize(
"head_node_architecture, compute_architecture, compute_instance_type, expected_errors",
[
# Single compute_instance_type
("x86_64", "x86_64", "c5.xlarge", []),
(
"x86_64",
"arm64",
"m6g.xlarge",
["none of which are compatible with the architecture supported by the master_instance_type"],
),
(
"arm64",
"x86_64",
"c5.xlarge",
["none of which are compatible with the architecture supported by the master_instance_type"],
),
("arm64", "arm64", "m6g.xlarge", []),
("x86_64", "x86_64", "optimal", []),
# Function to get supported architectures shouldn't be called because compute_instance_type arg
# are instance families.
("x86_64", None, "m6g", []),
("x86_64", None, "c5", []),
# The validator must handle the case where compute_instance_type is a CSV list
("arm64", "arm64", "m6g.xlarge,r6g.xlarge", []),
(
"x86_64",
"arm64",
"m6g.xlarge,r6g.xlarge",
["none of which are compatible with the architecture supported by the master_instance_type"] * 2,
),
],
)
def test_instances_architecture_compatibility_validator(
mocker, caplog, head_node_architecture, compute_architecture, compute_instance_type, expected_errors
):
def internal_is_instance_type(itype):
return "." in itype or itype == "optimal"
supported_architectures_patch = mocker.patch(
"pcluster.config.validators.get_supported_architectures_for_instance_type", return_value=[compute_architecture]
)
is_instance_type_patch = mocker.patch(
"pcluster.config.validators.is_instance_type_format", side_effect=internal_is_instance_type
)
logger_patch = mocker.patch.object(LOGFILE_LOGGER, "debug")
run_architecture_validator_test(
mocker,
{"cluster": {"architecture": head_node_architecture}},
"cluster",
"architecture",
"compute_instance_type",
compute_instance_type,
instances_architecture_compatibility_validator,
[],
expected_errors,
)
compute_instance_types = compute_instance_type.split(",")
non_instance_families = [
instance_type for instance_type in compute_instance_types if internal_is_instance_type(instance_type)
]
assert_that(supported_architectures_patch.call_count).is_equal_to(len(non_instance_families))
assert_that(logger_patch.call_count).is_equal_to(len(compute_instance_types) - len(non_instance_families))
assert_that(is_instance_type_patch.call_count).is_equal_to(len(compute_instance_types))
@pytest.mark.parametrize(
"section_dict, bucket, num_calls, expected_error",
[
(
{
"fsx_backup_id": "backup-0ff8da96d57f3b4e3",
"deployment_type": "PERSISTENT_1",
"per_unit_storage_throughput": 50,
},
None,
0,
"When restoring an FSx Lustre file system from backup, 'deployment_type' cannot be specified.",
),
(
{"fsx_backup_id": "backup-0ff8da96d57f3b4e3", "storage_capacity": 7200},
None,
0,
"When restoring an FSx Lustre file system from backup, 'storage_capacity' cannot be specified.",
),
(
{
"fsx_backup_id": "backup-0ff8da96d57f3b4e3",
"deployment_type": "PERSISTENT_1",
"per_unit_storage_throughput": 100,
},
None,
0,
"When restoring an FSx Lustre file system from backup, 'per_unit_storage_throughput' cannot be specified.",
),
(
{
"fsx_backup_id": "backup-0ff8da96d57f3b4e3",
"imported_file_chunk_size": 1024,
"export_path": "s3://test",
"import_path": "s3://test",
},
{"Bucket": "test"},
2,
"When restoring an FSx Lustre file system from backup, 'imported_file_chunk_size' cannot be specified.",
),
(
{
"fsx_backup_id": "backup-0ff8da96d57f3b4e3",
"fsx_kms_key_id": "somekey",
"deployment_type": "PERSISTENT_1",
"per_unit_storage_throughput": 50,
},
None,
0,
"When restoring an FSx Lustre file system from backup, 'fsx_kms_key_id' cannot be specified.",
),
(
{
"fsx_backup_id": "backup-00000000000000000",
"deployment_type": "PERSISTENT_1",
"per_unit_storage_throughput": 50,
},
None,
0,
"Failed to retrieve backup with Id 'backup-00000000000000000'",
),
],
)
def test_fsx_lustre_backup_validator(mocker, boto3_stubber, section_dict, bucket, num_calls, expected_error):
valid_key_id = "backup-0ff8da96d57f3b4e3"
describe_backups_response = {
"Backups": [
{
"BackupId": valid_key_id,
"Lifecycle": "AVAILABLE",
"Type": "USER_INITIATED",
"CreationTime": 1594159673.559,
"FileSystem": {
"StorageCapacity": 7200,
"StorageType": "SSD",
"LustreConfiguration": {"DeploymentType": "PERSISTENT_1", "PerUnitStorageThroughput": 200},
},
}
]
}
if bucket:
_head_bucket_stubber(mocker, boto3_stubber, bucket, num_calls)
generate_describe_backups_error = section_dict.get("fsx_backup_id") != valid_key_id
fsx_mocked_requests = [
MockedBoto3Request(
method="describe_backups",
response=expected_error if generate_describe_backups_error else describe_backups_response,
expected_params={"BackupIds": [section_dict.get("fsx_backup_id")]},
generate_error=generate_describe_backups_error,
)
]
boto3_stubber("fsx", fsx_mocked_requests)
if "fsx_kms_key_id" in section_dict:
describe_key_response = {"KeyMetadata": {"KeyId": section_dict.get("fsx_kms_key_id")}}
kms_mocked_requests = [
MockedBoto3Request(
method="describe_key",
response=describe_key_response,
expected_params={"KeyId": section_dict.get("fsx_kms_key_id")},
)
]
boto3_stubber("kms", kms_mocked_requests)
config_parser_dict = {"cluster default": {"fsx_settings": "default"}, "fsx default": section_dict}
utils.assert_param_validator(mocker, config_parser_dict, expected_error=expected_error)
#########
#
# ignored FSx params validator test
#
# Testing a validator that requires the fsx_fs_id parameter to be specified requires a lot of
# boto3 stubbing due to the complexity contained in the fsx_id_validator.
#
# Thus, the following code mocks the pcluster_config object passed to the validator functions
# and calls the validator directly.
#
#########
@pytest.mark.parametrize(
"section_dict, expected_error",
[
({"fsx_fs_id": "fs-0123456789abcdef0", "shared_dir": "/fsx"}, None),
(
{"fsx_fs_id": "fs-0123456789abcdef0", "shared_dir": "/fsx", "storage_capacity": 3600},
"storage_capacity is ignored when specifying an existing Lustre file system",
),
],
)
def test_fsx_ignored_parameters_validator(mocker, section_dict, expected_error):
mocked_pcluster_config = utils.get_mocked_pcluster_config(mocker)
fsx_section = CfnSection(FSX, mocked_pcluster_config, "default")
for param_key, param_value in section_dict.items():
param = FSX.get("params").get(param_key).get("type", CfnParam)
param.value = param_value
fsx_section.set_param(param_key, param)
mocked_pcluster_config.add_section(fsx_section)
errors, warnings = fsx_ignored_parameters_validator("fsx", "default", mocked_pcluster_config)
assert_that(warnings).is_empty()
if expected_error:
assert_that(errors[0]).matches(expected_error)
else:
assert_that(errors).is_empty()
@pytest.mark.parametrize(
"section_dict, expected_error",
[
({"volume_type": "standard", "volume_size": 15}, None),
({"volume_type": "standard", "volume_size": 0}, "The size of standard volumes must be at least 1 GiB"),
({"volume_type": "standard", "volume_size": 1025}, "The size of standard volumes can not exceed 1024 GiB"),
({"volume_type": "io1", "volume_size": 15}, None),
({"volume_type": "io1", "volume_size": 3}, "The size of io1 volumes must be at least 4 GiB"),
({"volume_type": "io1", "volume_size": 16385}, "The size of io1 volumes can not exceed 16384 GiB"),
({"volume_type": "io2", "volume_size": 15}, None),
({"volume_type": "io2", "volume_size": 3}, "The size of io2 volumes must be at least 4 GiB"),
({"volume_type": "io2", "volume_size": 65537}, "The size of io2 volumes can not exceed 65536 GiB"),
({"volume_type": "gp2", "volume_size": 15}, None),
({"volume_type": "gp2", "volume_size": 0}, "The size of gp2 volumes must be at least 1 GiB"),
({"volume_type": "gp2", "volume_size": 16385}, "The size of gp2 volumes can not exceed 16384 GiB"),
({"volume_type": "gp3", "volume_size": 15}, None),
({"volume_type": "gp3", "volume_size": 0}, "The size of gp3 volumes must be at least 1 GiB"),
({"volume_type": "gp3", "volume_size": 16385}, "The size of gp3 volumes can not exceed 16384 GiB"),
({"volume_type": "st1", "volume_size": 500}, None),
({"volume_type": "st1", "volume_size": 20}, "The size of st1 volumes must be at least 500 GiB"),
({"volume_type": "st1", "volume_size": 16385}, "The size of st1 volumes can not exceed 16384 GiB"),
({"volume_type": "sc1", "volume_size": 500}, None),
({"volume_type": "sc1", "volume_size": 20}, "The size of sc1 volumes must be at least 500 GiB"),
({"volume_type": "sc1", "volume_size": 16385}, "The size of sc1 volumes can not exceed 16384 GiB"),
],
)
def test_ebs_volume_type_size_validator(mocker, section_dict, caplog, expected_error):
config_parser_dict = {"cluster default": {"ebs_settings": "default"}, "ebs default": section_dict}
utils.assert_param_validator(mocker, config_parser_dict, expected_error)
def test_ebs_allowed_values_all_have_volume_size_bounds():
"""Ensure that all known EBS volume types are accounted for by the volume size validator."""
allowed_values_all_have_volume_size_bounds = set(ALLOWED_VALUES["volume_types"]) <= set(
EBS_VOLUME_TYPE_TO_VOLUME_SIZE_BOUNDS.keys()
)
assert_that(allowed_values_all_have_volume_size_bounds).is_true()
@pytest.mark.parametrize(
"section_dict, expected_message",
[
({"volume_type": "io1", "volume_size": 20, "volume_iops": 120}, None),
(
{"volume_type": "io1", "volume_size": 20, "volume_iops": 90},
"IOPS rate must be between 100 and 64000 when provisioning io1 volumes.",
),
(
{"volume_type": "io1", "volume_size": 20, "volume_iops": 64001},
"IOPS rate must be between 100 and 64000 when provisioning io1 volumes.",
),
({"volume_type": "io1", "volume_size": 20, "volume_iops": 1001}, "IOPS to volume size ratio of .* is too high"),
({"volume_type": "io2", "volume_size": 20, "volume_iops": 120}, None),
(
{"volume_type": "io2", "volume_size": 20, "volume_iops": 90},
"IOPS rate must be between 100 and 256000 when provisioning io2 volumes.",
),
(
{"volume_type": "io2", "volume_size": 20, "volume_iops": 256001},
"IOPS rate must be between 100 and 256000 when provisioning io2 volumes.",
),
(
{"volume_type": "io2", "volume_size": 20, "volume_iops": 20001},
"IOPS to volume size ratio of .* is too high",
),
({"volume_type": "gp3", "volume_size": 20, "volume_iops": 3000}, None),
(
{"volume_type": "gp3", "volume_size": 20, "volume_iops": 2900},
"IOPS rate must be between 3000 and 16000 when provisioning gp3 volumes.",
),
(
{"volume_type": "gp3", "volume_size": 20, "volume_iops": 16001},
"IOPS rate must be between 3000 and 16000 when provisioning gp3 volumes.",
),
(
{"volume_type": "gp3", "volume_size": 20, "volume_iops": 10001},
"IOPS to volume size ratio of .* is too high",
),
],
)
def test_ebs_volume_iops_validator(mocker, section_dict, expected_message):
config_parser_dict = {"cluster default": {"ebs_settings": "default"}, "ebs default": section_dict}
utils.assert_param_validator(mocker, config_parser_dict, expected_message)
@pytest.mark.parametrize(
"section_dict, snapshot_size, state, partition, expected_warning, expected_error, "
"raise_error_when_getting_snapshot_info",
[
(
{"volume_size": 100, "ebs_snapshot_id": "snap-1234567890abcdef0"},
50,
"completed",
"aws-cn",
"The specified volume size is larger than snapshot size. In order to use the full capacity of the "
"volume, you'll need to manually resize the partition "
"according to this doc: "
"https://docs.amazonaws.cn/AWSEC2/latest/UserGuide/recognize-expanded-volume-linux.html",
None,
False,
),
(
{"volume_size": 100, "ebs_snapshot_id": "snap-1234567890abcdef0"},
50,
"completed",
"aws-us-gov",
"The specified volume size is larger than snapshot size. In order to use the full capacity of the "
"volume, you'll need to manually resize the partition "
"according to this doc: "
"https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/recognize-expanded-volume-linux.html",
None,
False,
),
(
{"volume_size": 100, "ebs_snapshot_id": "snap-1234567890abcdef0"},
50,
"incompleted",
"aws-us-gov",
"Snapshot snap-1234567890abcdef0 is in state 'incompleted' not 'completed'",
None,
False,
),
({"ebs_snapshot_id": "snap-1234567890abcdef0"}, 50, "completed", "partition", None, None, False),
(
{"volume_size": 100, "ebs_snapshot_id": "snap-1234567891abcdef0"},
120,
"completed",
"aws-us-gov",
None,
"The EBS volume size of the section 'default' must not be smaller than 120, because it is the size of the "
"provided snapshot snap-1234567891abcdef0",
False,
),
(
{"volume_size": 100, "ebs_snapshot_id": "snap-1234567890abcdef0"},
None,
"completed",
"aws-cn",
None,
"Unable to get volume size for snapshot snap-1234567890abcdef0",
False,
),
(
{"ebs_snapshot_id": "snap-1234567890abcdef0"},
20,
"completed",
"aws",
None,
"some message",
True,
),
],
)
def test_ebs_volume_size_snapshot_validator(
section_dict,
snapshot_size,
state,
partition,
mocker,
expected_warning,
expected_error,
raise_error_when_getting_snapshot_info,
capsys,
):
ebs_snapshot_id = section_dict["ebs_snapshot_id"]
describe_snapshots_response = {
"Description": "This is my snapshot",
"Encrypted": False,
"VolumeId": "vol-049df61146c4d7901",
"State": state,
"VolumeSize": snapshot_size,
"StartTime": "2014-02-28T21:28:32.000Z",
"Progress": "100%",
"OwnerId": "012345678910",
"SnapshotId": ebs_snapshot_id,
}
mocker.patch("pcluster.config.cfn_param_types.get_ebs_snapshot_info", return_value=describe_snapshots_response)
if raise_error_when_getting_snapshot_info:
mocker.patch("pcluster.config.validators.get_ebs_snapshot_info", side_effect=Exception(expected_error))
else:
mocker.patch("pcluster.config.validators.get_ebs_snapshot_info", return_value=describe_snapshots_response)
mocker.patch(
"pcluster.config.validators.get_partition", return_value="aws-cn" if partition == "aws-cn" else "aws-us-gov"
)
config_parser_dict = {"cluster default": {"ebs_settings": "default"}, "ebs default": section_dict}
utils.assert_param_validator(
mocker, config_parser_dict, expected_error=expected_error, capsys=capsys, expected_warning=expected_warning
)
@pytest.mark.parametrize(
"cluster_section_dict, ebs_section_dict1, ebs_section_dict2, expected_message",
[
(
{"shared_dir": "shared_directory", "ebs_settings": "vol1"},
{"volume_size": 30},
{},
None,
),
(
{"shared_dir": "shared_directory", "ebs_settings": "vol1"},
{"shared_dir": "shared_directory1"},
{},
"'shared_dir' can not be specified both in cluster section and EBS section",
),
(
{"shared_dir": "shared_directory", "ebs_settings": "vol1, vol2"},
{"shared_dir": "shared_directory1", "volume_size": 30},
{"shared_dir": "shared_directory2", "volume_size": 30},
"'shared_dir' can not be specified in cluster section when using multiple EBS volumes",
),
(
{"ebs_settings": "vol1, vol2"},
{"shared_dir": "shared_directory1", "volume_size": 30},
{"shared_dir": "shared_directory2", "volume_size": 30},
None,
),
(
{"ebs_settings": "vol1"},
{"volume_size": 30},
{},
None,
),
(
{"ebs_settings": "vol1"},
{},
{},
None,
),
(
{"shared_dir": "shared_directory"},
{},
{},
None,
),
],
)
def test_duplicate_shared_dir_validator(
mocker, cluster_section_dict, ebs_section_dict1, ebs_section_dict2, expected_message
):
config_parser_dict = {
"cluster default": cluster_section_dict,
"ebs vol1": ebs_section_dict1,
"ebs vol2": ebs_section_dict2,
}
utils.assert_param_validator(mocker, config_parser_dict, expected_error=expected_message)
@pytest.mark.parametrize(
"extra_json, expected_message",
[
(
{"extra_json": {"cluster": {"cfn_scheduler_slots": "1"}}},
"It is highly recommended to use the disable_hyperthreading parameter in order to control the "
"hyper-threading configuration in the cluster rather than using cfn_scheduler_slots in extra_json",
),
(
{"extra_json": {"cluster": {"cfn_scheduler_slots": "vcpus"}}},
"It is highly recommended to use the disable_hyperthreading parameter in order to control the "
"hyper-threading configuration in the cluster rather than using cfn_scheduler_slots in extra_json",
),
(
{"extra_json": {"cluster": {"cfn_scheduler_slots": "cores"}}},
"It is highly recommended to use the disable_hyperthreading parameter in order to control the "
"hyper-threading configuration in the cluster rather than using cfn_scheduler_slots in extra_json",
),
],
)
def test_extra_json_validator(mocker, capsys, extra_json, expected_message):
config_parser_dict = {"cluster default": extra_json}
utils.assert_param_validator(mocker, config_parser_dict, capsys=capsys, expected_warning=expected_message)
@pytest.mark.parametrize(
"cluster_dict, architecture, expected_error",
[
({"base_os": "alinux2", "enable_efa": "compute"}, "x86_64", None),
({"base_os": "alinux2", "enable_efa": "compute"}, "arm64", None),
({"base_os": "centos8", "enable_efa": "compute"}, "x86_64", None),
({"base_os": "centos8"}, "x86_64", None),
(
{"base_os": "centos8", "enable_efa": "compute"},
"arm64",
"EFA currently not supported on centos8 for arm64 architecture",
),
({"base_os": "centos8"}, "arm64", None), # must not fail because by default EFA is disabled
({"base_os": "ubuntu1804", "enable_efa": "compute"}, "x86_64", None),
({"base_os": "ubuntu1804", "enable_efa": "compute"}, "arm64", None),
],
)
def test_efa_os_arch_validator(mocker, cluster_dict, architecture, expected_error):
mocker.patch(
"pcluster.config.cfn_param_types.BaseOSCfnParam.get_instance_type_architecture", return_value=architecture
)
config_parser_dict = {"cluster default": cluster_dict}
config_parser = configparser.ConfigParser()
config_parser.read_dict(config_parser_dict)
pcluster_config = utils.init_pcluster_config_from_configparser(config_parser, False, auto_refresh=False)
pcluster_config.get_section("cluster").get_param("architecture").value = architecture
enable_efa_value = pcluster_config.get_section("cluster").get_param_value("enable_efa")
errors, warnings = efa_os_arch_validator("enable_efa", enable_efa_value, pcluster_config)
if expected_error:
assert_that(errors[0]).matches(expected_error)
else:
assert_that(errors).is_empty()
@pytest.mark.parametrize(
"section_dict, expected_message",
[
({"volume_type": "gp3", "volume_throughput": 125}, None),
(
{"volume_type": "gp3", "volume_throughput": 100},
"Throughput must be between 125 MB/s and 1000 MB/s when provisioning gp3 volumes.",
),
(
{"volume_type": "gp3", "volume_throughput": 1001},
"Throughput must be between 125 MB/s and 1000 MB/s when provisioning gp3 volumes.",
),
({"volume_type": "gp3", "volume_throughput": 125, "volume_iops": 3000}, None),
(
{"volume_type": "gp3", "volume_throughput": 760, "volume_iops": 3000},
"Throughput to IOPS ratio of .* is too high",
),
({"volume_type": "gp3", "volume_throughput": 760, "volume_iops": 10000}, None),
],
)
def test_ebs_volume_throughput_validator(mocker, section_dict, expected_message):
config_parser_dict = {"cluster default": {"ebs_settings": "default"}, "ebs default": section_dict}
utils.assert_param_validator(mocker, config_parser_dict, expected_message)
@pytest.mark.parametrize(
"region, expected_message",
[
("invalid-region", "Region 'invalid-region' is not yet officially supported "),
("us-east-1", None),
],
)
def test_region_validator(mocker, region, expected_message):
pcluster_config = utils.get_mocked_pcluster_config(mocker)
pcluster_config.region = region
errors, warnings = region_validator("aws", None, pcluster_config)
if expected_message:
assert_that(len(errors)).is_greater_than(0)
assert_that(errors[0]).matches(expected_message)
else:
assert_that(errors).is_empty()
@pytest.mark.parametrize(
"usage_class, supported_usage_classes, expected_error_message, expected_warning_message",
[
("ondemand", ["ondemand", "spot"], None, None),
("spot", ["ondemand", "spot"], None, None),
("ondemand", ["ondemand"], None, None),
("spot", ["spot"], None, None),
("spot", [], None, "Could not check support for usage class 'spot' with instance type 'instance-type'"),
("ondemand", [], None, "Could not check support for usage class 'ondemand' with instance type 'instance-type'"),
("spot", ["ondemand"], "Usage type 'spot' not supported with instance type 'instance-type'", None),
("ondemand", ["spot"], "Usage type 'ondemand' not supported with instance type 'instance-type'", None),
],
)
def test_check_usage_class(
mocker, usage_class, supported_usage_classes, expected_error_message, expected_warning_message
):
# This test checks the common logic triggered from cluster_type_validator and queue_compute_type_validator.
instance_type_info_mock = mocker.MagicMock()
mocker.patch(
"pcluster.config.cfn_param_types.InstanceTypeInfo.init_from_instance_type", return_value=instance_type_info_mock
)
instance_type_info_mock.supported_usage_classes.return_value = supported_usage_classes
errors = []
warnings = []
check_usage_class("instance-type", usage_class, errors, warnings)
if expected_error_message:
assert_that(errors).contains(expected_error_message)
else:
assert_that(errors).is_empty()
if expected_warning_message:
assert_that(warnings).contains(expected_warning_message)
else:
assert_that(warnings).is_empty()
@pytest.mark.parametrize(
"scheduler, expected_usage_class_check", [("sge", True), ("torque", True), ("slurm", True), ("awsbatch", False)]
)
def test_cluster_type_validator(mocker, scheduler, expected_usage_class_check):
# Usage class validation logic is tested in `test_check_usage_class`.
# This test only makes sure that the logic is triggered from validator.
mock = mocker.patch("pcluster.config.validators.check_usage_class", return_value=None)
cluster_dict = {"compute_instance_type": "t2.micro", "scheduler": scheduler}
config_parser_dict = {"cluster default": cluster_dict}
config_parser = configparser.ConfigParser()
config_parser.read_dict(config_parser_dict)
pcluster_config = utils.init_pcluster_config_from_configparser(config_parser, False, auto_refresh=False)
errors, warnings = cluster_type_validator("compute_type", "spot", pcluster_config)
if expected_usage_class_check:
mock.assert_called_with("t2.micro", "spot", [], [])
else:
mock.assert_not_called()
assert_that(errors).is_equal_to([])
assert_that(warnings).is_equal_to([])
@pytest.mark.parametrize("compute_type", [("ondemand"), ("spot")])
def test_queue_compute_type_validator(mocker, compute_type):
# Usage class validation logic is tested in `test_check_usage_class`.
# This test only makes sure that the logic is triggered from validator.
mock = mocker.patch("pcluster.config.validators.check_usage_class", return_value=None)
config_parser_dict = {
"cluster default": {
"queue_settings": "q1",
},
"queue q1": {"compute_resource_settings": "q1cr1, q1cr2", "compute_type": compute_type},
"compute_resource q1cr1": {"instance_type": "q1cr1_instance_type"},
"compute_resource q1cr2": {"instance_type": "q1cr2_instance_type"},
}
config_parser = configparser.ConfigParser()
config_parser.read_dict(config_parser_dict)
pcluster_config = utils.init_pcluster_config_from_configparser(config_parser, False, auto_refresh=False)
errors, warnings = queue_compute_type_validator("queue", "q1", pcluster_config)
mock.assert_has_calls(
[
mocker.call("q1cr1_instance_type", compute_type, [], []),
mocker.call("q1cr2_instance_type", compute_type, [], []),
],
any_order=True,
)
assert_that(errors).is_equal_to([])
assert_that(warnings).is_equal_to([])
| [((50, 1, 50, 17), 'pytest.fixture', 'pytest.fixture', ({}, {}), '()', False, 'import pytest\n'), ((55, 1, 90, 1), 'pytest.mark.parametrize', 'pytest.mark.parametrize', ({(56, 4, 56, 54): '"""section_dict, expected_message, expected_warning"""', (57, 4, 89, 5): "[({'scheduler': 'sge', 'initial_queue_size': 1, 'max_queue_size': 2,\n 'maintain_initial_size': True}, None, None), ({'scheduler': 'sge',\n 'initial_queue_size': 3, 'max_queue_size': 2, 'maintain_initial_size': \n True},\n 'initial_queue_size must be fewer than or equal to max_queue_size',\n None), ({'scheduler': 'sge', 'initial_queue_size': 3, 'max_queue_size':\n 2, 'maintain_initial_size': False},\n 'initial_queue_size must be fewer than or equal to max_queue_size',\n None), ({'scheduler': 'awsbatch', 'min_vcpus': 1, 'desired_vcpus': 2,\n 'max_vcpus': 3}, None, None), ({'scheduler': 'awsbatch', 'min_vcpus': 3,\n 'desired_vcpus': 2, 'max_vcpus': 3},\n 'desired_vcpus must be greater than or equal to min_vcpus', None), ({\n 'scheduler': 'awsbatch', 'min_vcpus': 1, 'desired_vcpus': 4,\n 'max_vcpus': 3},\n 'desired_vcpus must be fewer than or equal to max_vcpus', None), ({\n 'scheduler': 'awsbatch', 'min_vcpus': 4, 'desired_vcpus': 4,\n 'max_vcpus': 3}, 'max_vcpus must be greater than or equal to min_vcpus',\n None), ({'scheduler': 'awsbatch'}, None,\n 'If you do not specify a key pair')]"}, {}), "('section_dict, expected_message, expected_warning',\n [({'scheduler': 'sge', 'initial_queue_size': 1, 'max_queue_size': 2,\n 'maintain_initial_size': True}, None, None), ({'scheduler': 'sge',\n 'initial_queue_size': 3, 'max_queue_size': 2, 'maintain_initial_size': \n True},\n 'initial_queue_size must be fewer than or equal to max_queue_size',\n None), ({'scheduler': 'sge', 'initial_queue_size': 3, 'max_queue_size':\n 2, 'maintain_initial_size': False},\n 'initial_queue_size must be fewer than or equal to max_queue_size',\n None), ({'scheduler': 'awsbatch', 'min_vcpus': 1, 'desired_vcpus': 2,\n 'max_vcpus': 3}, None, None), ({'scheduler': 'awsbatch', 'min_vcpus': 3,\n 'desired_vcpus': 2, 'max_vcpus': 3},\n 'desired_vcpus must be greater than or equal to min_vcpus', None), ({\n 'scheduler': 'awsbatch', 'min_vcpus': 1, 'desired_vcpus': 4,\n 'max_vcpus': 3},\n 'desired_vcpus must be fewer than or equal to max_vcpus', None), ({\n 'scheduler': 'awsbatch', 'min_vcpus': 4, 'desired_vcpus': 4,\n 'max_vcpus': 3}, 'max_vcpus must be greater than or equal to min_vcpus',\n None), ({'scheduler': 'awsbatch'}, None,\n 'If you do not specify a key pair')])", False, 'import pytest\n'), ((98, 1, 100, 1), 'pytest.mark.parametrize', 'pytest.mark.parametrize', ({(99, 4, 99, 37): '"""instance_type, expected_message"""', (99, 39, 99, 115): "[('t2.micro', None), ('c4.xlarge', None), ('c5.xlarge', 'is not supported')]"}, {}), "('instance_type, expected_message', [('t2.micro',\n None), ('c4.xlarge', None), ('c5.xlarge', 'is not supported')])", False, 'import pytest\n'), ((106, 1, 106, 102), 'pytest.mark.parametrize', 'pytest.mark.parametrize', ({(106, 25, 106, 58): '"""instance_type, expected_message"""', (106, 60, 106, 101): "[('t2.micro', None), ('c4.xlarge', None)]"}, {}), "('instance_type, expected_message', [('t2.micro',\n None), ('c4.xlarge', None)])", False, 'import pytest\n'), ((112, 1, 126, 1), 'pytest.mark.parametrize', 'pytest.mark.parametrize', ({(113, 4, 113, 67): '"""scheduler, instance_type, expected_message, expected_warnings"""', (114, 4, 125, 5): "[('sge', 't2.micro', None, None), ('sge', 'c4.xlarge', None, None), ('sge',\n 'c5.xlarge', 'is not supported', None), ('awsbatch', 't2.micro', None,\n None), ('awsbatch', 'c4.xlarge', 'is not supported', None), ('awsbatch',\n 't2', None, None), ('awsbatch', 'optimal', None, None), ('sge',\n 'p4d.24xlarge', None, 'has 4 Network Interfaces.'), ('slurm',\n 'p4d.24xlarge', None, None)]"}, {}), "(\n 'scheduler, instance_type, expected_message, expected_warnings', [(\n 'sge', 't2.micro', None, None), ('sge', 'c4.xlarge', None, None), (\n 'sge', 'c5.xlarge', 'is not supported', None), ('awsbatch', 't2.micro',\n None, None), ('awsbatch', 'c4.xlarge', 'is not supported', None), (\n 'awsbatch', 't2', None, None), ('awsbatch', 'optimal', None, None), (\n 'sge', 'p4d.24xlarge', None, 'has 4 Network Interfaces.'), ('slurm',\n 'p4d.24xlarge', None, None)])", False, 'import pytest\n'), ((157, 1, 172, 1), 'pytest.mark.parametrize', 'pytest.mark.parametrize', ({(158, 4, 158, 67): '"""image_architecture, bad_ami_message, bad_architecture_message"""', (159, 4, 171, 5): "[('x86_64', None, None), ('arm64', None,\n 'incompatible with the architecture supported by the instance type chosen for the head node'\n ), ('arm64', 'Unable to get information for AMI',\n 'incompatible with the architecture supported by the instance type chosen for the head node'\n )]"}, {}), "(\n 'image_architecture, bad_ami_message, bad_architecture_message', [(\n 'x86_64', None, None), ('arm64', None,\n 'incompatible with the architecture supported by the instance type chosen for the head node'\n ), ('arm64', 'Unable to get information for AMI',\n 'incompatible with the architecture supported by the instance type chosen for the head node'\n )])", False, 'import pytest\n'), ((221, 1, 230, 1), 'pytest.mark.parametrize', 'pytest.mark.parametrize', ({(222, 4, 222, 36): '"""section_dict, expected_message"""', (223, 4, 229, 5): "[({'tags': {'key': 'value', 'key2': 'value2'}}, None), ({'tags': {'key':\n 'value', 'Version': 'value2'}}, 'Version.*reserved')]"}, {}), "('section_dict, expected_message', [({'tags': {'key':\n 'value', 'key2': 'value2'}}, None), ({'tags': {'key': 'value',\n 'Version': 'value2'}}, 'Version.*reserved')])", False, 'import pytest\n'), ((278, 1, 329, 1), 'pytest.mark.parametrize', 'pytest.mark.parametrize', ({(279, 4, 279, 50): '"""region, base_os, scheduler, expected_message"""', (280, 4, 328, 5): '[(\'ap-northeast-3\', \'alinux2\', \'awsbatch\',\n "Region \'ap-northeast-3\' is not yet officially supported by ParallelCluster"\n ), (\'us-gov-east-1\', \'alinux2\', \'awsbatch\', None), (\'us-gov-west-1\',\n \'alinux2\', \'awsbatch\', None), (\'eu-west-1\', \'alinux2\', \'awsbatch\', None\n ), (\'us-east-1\', \'alinux2\', \'awsbatch\', None), (\'eu-north-1\', \'alinux2\',\n \'awsbatch\', None), (\'cn-north-1\', \'alinux2\', \'awsbatch\', None), (\n \'cn-northwest-1\', \'alinux2\', \'awsbatch\', None), (\'cn-northwest-1\',\n \'alinux2\', \'sge\', None), (\'us-gov-east-1\', \'alinux2\', \'sge\', None), (\n \'cn-northwest-1\', \'alinux2\', \'slurm\', None), (\'us-gov-east-1\',\n \'alinux2\', \'slurm\', None), (\'cn-northwest-1\', \'alinux2\', \'torque\', None\n ), (\'us-gov-east-1\', \'alinux2\', \'torque\', None), (\'ap-northeast-3\',\n \'alinux2\', \'sge\',\n "Region \'ap-northeast-3\' is not yet officially supported by ParallelCluster"\n ), (\'eu-west-1\', \'centos7\', \'awsbatch\',\n \'scheduler supports the following Operating Systems\'), (\'eu-west-1\',\n \'centos8\', \'awsbatch\',\n \'scheduler supports the following Operating Systems\'), (\'eu-west-1\',\n \'ubuntu1804\', \'awsbatch\',\n \'scheduler supports the following Operating Systems\'), (\'eu-west-1\',\n \'alinux2\', \'awsbatch\', None), (\'eu-west-1\', \'centos7\', \'sge\', None), (\n \'eu-west-1\', \'centos8\', \'sge\', None), (\'eu-west-1\', \'ubuntu1804\', \'sge\',\n None), (\'eu-west-1\', \'alinux2\', \'sge\', None), (\'eu-west-1\', \'centos7\',\n \'slurm\', None), (\'eu-west-1\', \'centos8\', \'slurm\', None), (\'eu-west-1\',\n \'ubuntu1804\', \'slurm\', None), (\'eu-west-1\', \'alinux2\', \'slurm\', None),\n (\'eu-west-1\', \'centos7\', \'torque\', None), (\'eu-west-1\', \'centos8\',\n \'torque\', None), (\'eu-west-1\', \'ubuntu1804\', \'torque\', None), (\n \'eu-west-1\', \'alinux2\', \'torque\', None)]'}, {}), '(\'region, base_os, scheduler, expected_message\', [(\n \'ap-northeast-3\', \'alinux2\', \'awsbatch\',\n "Region \'ap-northeast-3\' is not yet officially supported by ParallelCluster"\n ), (\'us-gov-east-1\', \'alinux2\', \'awsbatch\', None), (\'us-gov-west-1\',\n \'alinux2\', \'awsbatch\', None), (\'eu-west-1\', \'alinux2\', \'awsbatch\', None\n ), (\'us-east-1\', \'alinux2\', \'awsbatch\', None), (\'eu-north-1\', \'alinux2\',\n \'awsbatch\', None), (\'cn-north-1\', \'alinux2\', \'awsbatch\', None), (\n \'cn-northwest-1\', \'alinux2\', \'awsbatch\', None), (\'cn-northwest-1\',\n \'alinux2\', \'sge\', None), (\'us-gov-east-1\', \'alinux2\', \'sge\', None), (\n \'cn-northwest-1\', \'alinux2\', \'slurm\', None), (\'us-gov-east-1\',\n \'alinux2\', \'slurm\', None), (\'cn-northwest-1\', \'alinux2\', \'torque\', None\n ), (\'us-gov-east-1\', \'alinux2\', \'torque\', None), (\'ap-northeast-3\',\n \'alinux2\', \'sge\',\n "Region \'ap-northeast-3\' is not yet officially supported by ParallelCluster"\n ), (\'eu-west-1\', \'centos7\', \'awsbatch\',\n \'scheduler supports the following Operating Systems\'), (\'eu-west-1\',\n \'centos8\', \'awsbatch\',\n \'scheduler supports the following Operating Systems\'), (\'eu-west-1\',\n \'ubuntu1804\', \'awsbatch\',\n \'scheduler supports the following Operating Systems\'), (\'eu-west-1\',\n \'alinux2\', \'awsbatch\', None), (\'eu-west-1\', \'centos7\', \'sge\', None), (\n \'eu-west-1\', \'centos8\', \'sge\', None), (\'eu-west-1\', \'ubuntu1804\', \'sge\',\n None), (\'eu-west-1\', \'alinux2\', \'sge\', None), (\'eu-west-1\', \'centos7\',\n \'slurm\', None), (\'eu-west-1\', \'centos8\', \'slurm\', None), (\'eu-west-1\',\n \'ubuntu1804\', \'slurm\', None), (\'eu-west-1\', \'alinux2\', \'slurm\', None),\n (\'eu-west-1\', \'centos7\', \'torque\', None), (\'eu-west-1\', \'centos8\',\n \'torque\', None), (\'eu-west-1\', \'ubuntu1804\', \'torque\', None), (\n \'eu-west-1\', \'alinux2\', \'torque\', None)])', False, 'import pytest\n'), ((424, 1, 473, 1), 'pytest.mark.parametrize', 'pytest.mark.parametrize', ({(425, 4, 425, 61): '"""config, num_calls, error_code, bucket, expected_message"""', (426, 4, 472, 5): '[({\'cluster default\': {\'fsx_settings\': \'fsx\'}, \'fsx fsx\': {\n \'storage_capacity\': 1200, \'import_path\': \'s3://test/test1/test2\',\n \'export_path\': \'s3://test/test1/test2\', \'auto_import_policy\': \'NEW\'}}, \n 2, None, {\'Bucket\': \'test\'},\n \'AutoImport is not supported for cross-region buckets.\'), ({\n \'cluster default\': {\'fsx_settings\': \'fsx\'}, \'fsx fsx\': {\n \'storage_capacity\': 1200, \'import_path\': \'s3://test/test1/test2\',\n \'export_path\': \'s3://test/test1/test2\', \'auto_import_policy\': \'NEW\'}}, \n 2, \'NoSuchBucket\', {\'Bucket\': \'test\'},\n "The S3 bucket \'test\' does not appear to exist."), ({\'cluster default\':\n {\'fsx_settings\': \'fsx\'}, \'fsx fsx\': {\'storage_capacity\': 1200,\n \'import_path\': \'s3://test/test1/test2\', \'export_path\':\n \'s3://test/test1/test2\', \'auto_import_policy\': \'NEW\'}}, 2,\n \'AccessDenied\', {\'Bucket\': \'test\'},\n \'You do not have access to the S3 bucket\')]'}, {}), '(\n \'config, num_calls, error_code, bucket, expected_message\', [({\n \'cluster default\': {\'fsx_settings\': \'fsx\'}, \'fsx fsx\': {\n \'storage_capacity\': 1200, \'import_path\': \'s3://test/test1/test2\',\n \'export_path\': \'s3://test/test1/test2\', \'auto_import_policy\': \'NEW\'}}, \n 2, None, {\'Bucket\': \'test\'},\n \'AutoImport is not supported for cross-region buckets.\'), ({\n \'cluster default\': {\'fsx_settings\': \'fsx\'}, \'fsx fsx\': {\n \'storage_capacity\': 1200, \'import_path\': \'s3://test/test1/test2\',\n \'export_path\': \'s3://test/test1/test2\', \'auto_import_policy\': \'NEW\'}}, \n 2, \'NoSuchBucket\', {\'Bucket\': \'test\'},\n "The S3 bucket \'test\' does not appear to exist."), ({\'cluster default\':\n {\'fsx_settings\': \'fsx\'}, \'fsx fsx\': {\'storage_capacity\': 1200,\n \'import_path\': \'s3://test/test1/test2\', \'export_path\':\n \'s3://test/test1/test2\', \'auto_import_policy\': \'NEW\'}}, 2,\n \'AccessDenied\', {\'Bucket\': \'test\'},\n \'You do not have access to the S3 bucket\')])', False, 'import pytest\n'), ((519, 1, 549, 1), 'pytest.mark.parametrize', 'pytest.mark.parametrize', ({(520, 4, 520, 49): '"""config, num_calls, bucket, expected_message"""', (521, 4, 548, 5): '[({\'cluster default\': {\'fsx_settings\': \'fsx\'}, \'fsx fsx\': {\n \'storage_capacity\': 1200, \'import_path\': \'s3://test/test1/test2\',\n \'export_path\': \'s3://test/test1/test2\'}}, 2, {\'Bucket\': \'test\'}, None),\n ({\'cluster default\': {\'fsx_settings\': \'fsx\'}, \'fsx fsx\': {\n \'storage_capacity\': 1200, \'import_path\': \'http://test/test.json\',\n \'export_path\': \'s3://test/test1/test2\'}}, 1, {\'Bucket\': \'test\'},\n "The value \'http://test/test.json\' used for the parameter \'import_path\' is not a valid S3 URI."\n )]'}, {}), '(\'config, num_calls, bucket, expected_message\', [({\n \'cluster default\': {\'fsx_settings\': \'fsx\'}, \'fsx fsx\': {\n \'storage_capacity\': 1200, \'import_path\': \'s3://test/test1/test2\',\n \'export_path\': \'s3://test/test1/test2\'}}, 2, {\'Bucket\': \'test\'}, None),\n ({\'cluster default\': {\'fsx_settings\': \'fsx\'}, \'fsx fsx\': {\n \'storage_capacity\': 1200, \'import_path\': \'http://test/test.json\',\n \'export_path\': \'s3://test/test1/test2\'}}, 1, {\'Bucket\': \'test\'},\n "The value \'http://test/test.json\' used for the parameter \'import_path\' is not a valid S3 URI."\n )])', False, 'import pytest\n'), ((556, 1, 602, 1), 'pytest.mark.parametrize', 'pytest.mark.parametrize', ({(557, 4, 557, 64): '"""bucket, region, error_code, expected_message, client_error"""', (558, 4, 601, 5): '[(\'bucket\', \'us-east-1\', None, None, False), (\'bucket\', \'us-west-1\', None,\n None, False), (\'bucket\', \'eu-west-1\', None,\n \'cluster_resource_bucket must be in the same region of the cluster.\', \n False), (\'not_existed_bucket\', \'af-south-1\', \'NoSuchBucket\',\n "The S3 bucket \'not_existed_bucket\' does not appear to exist", True), (\n \'access_denied_bucket\', \'af-south-1\', \'AccessDenied\',\n "You do not have access to the S3 bucket \'access_denied_bucket\'", True),\n (\'unexpected_error_bucket\', \'af-south-1\', None,\n \'Unexpected error for S3 bucket\', True)]'}, {}), '(\n \'bucket, region, error_code, expected_message, client_error\', [(\n \'bucket\', \'us-east-1\', None, None, False), (\'bucket\', \'us-west-1\', None,\n None, False), (\'bucket\', \'eu-west-1\', None,\n \'cluster_resource_bucket must be in the same region of the cluster.\', \n False), (\'not_existed_bucket\', \'af-south-1\', \'NoSuchBucket\',\n "The S3 bucket \'not_existed_bucket\' does not appear to exist", True), (\n \'access_denied_bucket\', \'af-south-1\', \'AccessDenied\',\n "You do not have access to the S3 bucket \'access_denied_bucket\'", True),\n (\'unexpected_error_bucket\', \'af-south-1\', None,\n \'Unexpected error for S3 bucket\', True)])', False, 'import pytest\n'), ((783, 1, 795, 1), 'pytest.mark.parametrize', 'pytest.mark.parametrize', ({(784, 4, 784, 36): '"""section_dict, expected_message"""', (785, 4, 794, 5): '[({\'throughput_mode\': \'bursting\', \'provisioned_throughput\': 1024},\n "When specifying \'provisioned_throughput\', the \'throughput_mode\' must be set to \'provisioned\'"\n ), ({\'throughput_mode\': \'provisioned\', \'provisioned_throughput\': 1024},\n None), ({\'shared_dir\': \'NONE\'},\n \'NONE cannot be used as a shared directory\'), ({\'shared_dir\': \'/NONE\'},\n \'/NONE cannot be used as a shared directory\'), ({\'shared_dir\': \'/efs\'},\n None)]'}, {}), '(\'section_dict, expected_message\', [({\n \'throughput_mode\': \'bursting\', \'provisioned_throughput\': 1024},\n "When specifying \'provisioned_throughput\', the \'throughput_mode\' must be set to \'provisioned\'"\n ), ({\'throughput_mode\': \'provisioned\', \'provisioned_throughput\': 1024},\n None), ({\'shared_dir\': \'NONE\'},\n \'NONE cannot be used as a shared directory\'), ({\'shared_dir\': \'/NONE\'},\n \'/NONE cannot be used as a shared directory\'), ({\'shared_dir\': \'/efs\'},\n None)])', False, 'import pytest\n'), ((801, 1, 841, 1), 'pytest.mark.parametrize', 'pytest.mark.parametrize', ({(802, 4, 802, 36): '"""section_dict, expected_message"""', (803, 4, 840, 5): "[({'volume_type': 'io1', 'volume_size': 20, 'volume_iops': 120}, None), ({\n 'volume_type': 'io1', 'volume_size': 20, 'volume_iops': 90},\n 'IOPS rate must be between 100 and 64000 when provisioning io1 volumes.'\n ), ({'volume_type': 'io1', 'volume_size': 20, 'volume_iops': 64001},\n 'IOPS rate must be between 100 and 64000 when provisioning io1 volumes.'\n ), ({'volume_type': 'io1', 'volume_size': 20, 'volume_iops': 1001},\n 'IOPS to volume size ratio of .* is too high'), ({'volume_type': 'io2',\n 'volume_size': 20, 'volume_iops': 120}, None), ({'volume_type': 'io2',\n 'volume_size': 20, 'volume_iops': 90},\n 'IOPS rate must be between 100 and 256000 when provisioning io2 volumes.'\n ), ({'volume_type': 'io2', 'volume_size': 20, 'volume_iops': 256001},\n 'IOPS rate must be between 100 and 256000 when provisioning io2 volumes.'\n ), ({'volume_type': 'io2', 'volume_size': 20, 'volume_iops': 20001},\n 'IOPS to volume size ratio of .* is too high'), ({'volume_type': 'gp3',\n 'volume_size': 20, 'volume_iops': 3000}, None), ({'volume_type': 'gp3',\n 'volume_size': 20, 'volume_iops': 2900},\n 'IOPS rate must be between 3000 and 16000 when provisioning gp3 volumes.'\n ), ({'volume_type': 'gp3', 'volume_size': 20, 'volume_iops': 16001},\n 'IOPS rate must be between 3000 and 16000 when provisioning gp3 volumes.'\n ), ({'volume_type': 'gp3', 'volume_size': 20, 'volume_iops': 10001},\n 'IOPS to volume size ratio of .* is too high')]"}, {}), "('section_dict, expected_message', [({'volume_type':\n 'io1', 'volume_size': 20, 'volume_iops': 120}, None), ({'volume_type':\n 'io1', 'volume_size': 20, 'volume_iops': 90},\n 'IOPS rate must be between 100 and 64000 when provisioning io1 volumes.'\n ), ({'volume_type': 'io1', 'volume_size': 20, 'volume_iops': 64001},\n 'IOPS rate must be between 100 and 64000 when provisioning io1 volumes.'\n ), ({'volume_type': 'io1', 'volume_size': 20, 'volume_iops': 1001},\n 'IOPS to volume size ratio of .* is too high'), ({'volume_type': 'io2',\n 'volume_size': 20, 'volume_iops': 120}, None), ({'volume_type': 'io2',\n 'volume_size': 20, 'volume_iops': 90},\n 'IOPS rate must be between 100 and 256000 when provisioning io2 volumes.'\n ), ({'volume_type': 'io2', 'volume_size': 20, 'volume_iops': 256001},\n 'IOPS rate must be between 100 and 256000 when provisioning io2 volumes.'\n ), ({'volume_type': 'io2', 'volume_size': 20, 'volume_iops': 20001},\n 'IOPS to volume size ratio of .* is too high'), ({'volume_type': 'gp3',\n 'volume_size': 20, 'volume_iops': 3000}, None), ({'volume_type': 'gp3',\n 'volume_size': 20, 'volume_iops': 2900},\n 'IOPS rate must be between 3000 and 16000 when provisioning gp3 volumes.'\n ), ({'volume_type': 'gp3', 'volume_size': 20, 'volume_iops': 16001},\n 'IOPS rate must be between 3000 and 16000 when provisioning gp3 volumes.'\n ), ({'volume_type': 'gp3', 'volume_size': 20, 'volume_iops': 10001},\n 'IOPS to volume size ratio of .* is too high')])", False, 'import pytest\n'), ((847, 1, 856, 1), 'pytest.mark.parametrize', 'pytest.mark.parametrize', ({(848, 4, 848, 34): '"""kms_key_id, expected_message"""', (849, 4, 855, 5): '[(\'9e8a129be-0e46-459d-865b-3a5bf974a22k\', None), (\n \'9e7a129be-0e46-459d-865b-3a5bf974a22k\',\n "Key \'arn:aws:kms:us-east-1:12345678:key/9e7a129be-0e46-459d-865b-3a5bf974a22k\' does not exist"\n )]'}, {}), '(\'kms_key_id, expected_message\', [(\n \'9e8a129be-0e46-459d-865b-3a5bf974a22k\', None), (\n \'9e7a129be-0e46-459d-865b-3a5bf974a22k\',\n "Key \'arn:aws:kms:us-east-1:12345678:key/9e7a129be-0e46-459d-865b-3a5bf974a22k\' does not exist"\n )])', False, 'import pytest\n'), ((1164, 1, 1237, 1), 'pytest.mark.parametrize', 'pytest.mark.parametrize', ({(1165, 4, 1165, 52): '"""section_dict, expected_error, expected_warning"""', (1166, 4, 1236, 5): '[({\'storage_capacity\': 1, \'deployment_type\': \'SCRATCH_1\'},\n \'Capacity for FSx SCRATCH_1 filesystem is 1,200 GB, 2,400 GB or increments of 3,600 GB\'\n , None), ({\'storage_capacity\': 1200, \'deployment_type\': \'SCRATCH_1\'},\n None, None), ({\'storage_capacity\': 2400, \'deployment_type\': \'SCRATCH_1\'\n }, None, None), ({\'storage_capacity\': 3600, \'deployment_type\':\n \'SCRATCH_1\'}, None, None), ({\'storage_capacity\': 3600,\n \'deployment_type\': \'SCRATCH_2\'},\n \'Capacity for FSx SCRATCH_2 and PERSISTENT_1 filesystems is 1,200 GB or increments of 2,400 GB\'\n , None), ({\'storage_capacity\': 3600, \'deployment_type\': \'PERSISTENT_1\',\n \'per_unit_storage_throughput\': 50},\n \'Capacity for FSx SCRATCH_2 and PERSISTENT_1 filesystems is 1,200 GB or increments of 2,400 GB\'\n , None), ({\'storage_capacity\': 3601, \'deployment_type\': \'PERSISTENT_1\',\n \'per_unit_storage_throughput\': 50},\n \'Capacity for FSx SCRATCH_2 and PERSISTENT_1 filesystems is 1,200 GB or increments of 2,400 GB\'\n , None), ({\'storage_capacity\': 7200}, None, None), ({\'deployment_type\':\n \'SCRATCH_1\'},\n "When specifying \'fsx\' section, the \'storage_capacity\' option must be specified"\n , None), ({\'storage_type\': \'HDD\', \'deployment_type\': \'PERSISTENT_1\',\n \'storage_capacity\': 1801, \'per_unit_storage_throughput\': 40},\n \'Capacity for FSx PERSISTENT HDD 40 MB/s/TiB file systems is increments of 1,800 GiB\'\n , None), ({\'storage_type\': \'HDD\', \'deployment_type\': \'PERSISTENT_1\',\n \'storage_capacity\': 6001, \'per_unit_storage_throughput\': 12},\n \'Capacity for FSx PERSISTENT HDD 12 MB/s/TiB file systems is increments of 6,000 GiB\'\n , None), ({\'storage_type\': \'HDD\', \'deployment_type\': \'PERSISTENT_1\',\n \'storage_capacity\': 1800, \'per_unit_storage_throughput\': 40}, None,\n None), ({\'storage_type\': \'HDD\', \'deployment_type\': \'PERSISTENT_1\',\n \'storage_capacity\': 6000, \'per_unit_storage_throughput\': 12}, None, None)]'}, {}), '(\'section_dict, expected_error, expected_warning\', [\n ({\'storage_capacity\': 1, \'deployment_type\': \'SCRATCH_1\'},\n \'Capacity for FSx SCRATCH_1 filesystem is 1,200 GB, 2,400 GB or increments of 3,600 GB\'\n , None), ({\'storage_capacity\': 1200, \'deployment_type\': \'SCRATCH_1\'},\n None, None), ({\'storage_capacity\': 2400, \'deployment_type\': \'SCRATCH_1\'\n }, None, None), ({\'storage_capacity\': 3600, \'deployment_type\':\n \'SCRATCH_1\'}, None, None), ({\'storage_capacity\': 3600,\n \'deployment_type\': \'SCRATCH_2\'},\n \'Capacity for FSx SCRATCH_2 and PERSISTENT_1 filesystems is 1,200 GB or increments of 2,400 GB\'\n , None), ({\'storage_capacity\': 3600, \'deployment_type\': \'PERSISTENT_1\',\n \'per_unit_storage_throughput\': 50},\n \'Capacity for FSx SCRATCH_2 and PERSISTENT_1 filesystems is 1,200 GB or increments of 2,400 GB\'\n , None), ({\'storage_capacity\': 3601, \'deployment_type\': \'PERSISTENT_1\',\n \'per_unit_storage_throughput\': 50},\n \'Capacity for FSx SCRATCH_2 and PERSISTENT_1 filesystems is 1,200 GB or increments of 2,400 GB\'\n , None), ({\'storage_capacity\': 7200}, None, None), ({\'deployment_type\':\n \'SCRATCH_1\'},\n "When specifying \'fsx\' section, the \'storage_capacity\' option must be specified"\n , None), ({\'storage_type\': \'HDD\', \'deployment_type\': \'PERSISTENT_1\',\n \'storage_capacity\': 1801, \'per_unit_storage_throughput\': 40},\n \'Capacity for FSx PERSISTENT HDD 40 MB/s/TiB file systems is increments of 1,800 GiB\'\n , None), ({\'storage_type\': \'HDD\', \'deployment_type\': \'PERSISTENT_1\',\n \'storage_capacity\': 6001, \'per_unit_storage_throughput\': 12},\n \'Capacity for FSx PERSISTENT HDD 12 MB/s/TiB file systems is increments of 6,000 GiB\'\n , None), ({\'storage_type\': \'HDD\', \'deployment_type\': \'PERSISTENT_1\',\n \'storage_capacity\': 1800, \'per_unit_storage_throughput\': 40}, None,\n None), ({\'storage_type\': \'HDD\', \'deployment_type\': \'PERSISTENT_1\',\n \'storage_capacity\': 6000, \'per_unit_storage_throughput\': 12}, None, None)])', False, 'import pytest\n'), ((1264, 1, 1307, 1), 'pytest.mark.parametrize', 'pytest.mark.parametrize', ({(1265, 4, 1265, 67): '"""fsx_vpc, ip_permissions, network_interfaces, expected_message"""', (1266, 4, 1306, 5): '[(\'vpc-06e4ab6c6cEXAMPLE\', [{\'IpProtocol\': \'-1\', \'UserIdGroupPairs\': [{\n \'UserId\': \'123456789012\', \'GroupId\': \'sg-12345678\'}]}], [\n \'eni-09b9460295ddd4e5f\', \'eni-001b3cef7c78b45c4\'], None), (\n \'vpc-06e4ab6c6cEXAMPLE\', [{\'IpProtocol\': \'-1\', \'UserIdGroupPairs\': [{\n \'UserId\': \'123456789012\', \'GroupId\': \'sg-12345678\'}]}], [\n \'eni-09b9460295ddd4e5f\'], None), (\'vpc-06e4ab6c6cEXAMPLE\', [{\n \'IpProtocol\': \'-1\', \'UserIdGroupPairs\': [{\'UserId\': \'123456789012\',\n \'GroupId\': \'sg-12345678\'}]}], [],\n "doesn\'t have Elastic Network Interfaces attached"), (\n \'vpc-06e4ab6c6ccWRONG\', [{\'IpProtocol\': \'-1\', \'UserIdGroupPairs\': [{\n \'UserId\': \'123456789012\', \'GroupId\': \'sg-12345678\'}]}], [\n \'eni-09b9460295ddd4e5f\'],\n \'only support using FSx file system that is in the same VPC as the stack\'\n ), (\'vpc-06e4ab6c6cWRONG\', [{\'PrefixListIds\': [], \'FromPort\': 22,\n \'IpRanges\': [{\'CidrIp\': \'203.0.113.0/24\'}], \'ToPort\': 22, \'IpProtocol\':\n \'tcp\', \'UserIdGroupPairs\': []}], [\'eni-09b9460295ddd4e5f\'],\n \'does not satisfy mounting requirement\')]'}, {}), '(\n \'fsx_vpc, ip_permissions, network_interfaces, expected_message\', [(\n \'vpc-06e4ab6c6cEXAMPLE\', [{\'IpProtocol\': \'-1\', \'UserIdGroupPairs\': [{\n \'UserId\': \'123456789012\', \'GroupId\': \'sg-12345678\'}]}], [\n \'eni-09b9460295ddd4e5f\', \'eni-001b3cef7c78b45c4\'], None), (\n \'vpc-06e4ab6c6cEXAMPLE\', [{\'IpProtocol\': \'-1\', \'UserIdGroupPairs\': [{\n \'UserId\': \'123456789012\', \'GroupId\': \'sg-12345678\'}]}], [\n \'eni-09b9460295ddd4e5f\'], None), (\'vpc-06e4ab6c6cEXAMPLE\', [{\n \'IpProtocol\': \'-1\', \'UserIdGroupPairs\': [{\'UserId\': \'123456789012\',\n \'GroupId\': \'sg-12345678\'}]}], [],\n "doesn\'t have Elastic Network Interfaces attached"), (\n \'vpc-06e4ab6c6ccWRONG\', [{\'IpProtocol\': \'-1\', \'UserIdGroupPairs\': [{\n \'UserId\': \'123456789012\', \'GroupId\': \'sg-12345678\'}]}], [\n \'eni-09b9460295ddd4e5f\'],\n \'only support using FSx file system that is in the same VPC as the stack\'\n ), (\'vpc-06e4ab6c6cWRONG\', [{\'PrefixListIds\': [], \'FromPort\': 22,\n \'IpRanges\': [{\'CidrIp\': \'203.0.113.0/24\'}], \'ToPort\': 22, \'IpProtocol\':\n \'tcp\', \'UserIdGroupPairs\': []}], [\'eni-09b9460295ddd4e5f\'],\n \'does not satisfy mounting requirement\')])', False, 'import pytest\n'), ((1461, 1, 1471, 1), 'pytest.mark.parametrize', 'pytest.mark.parametrize', ({(1462, 4, 1462, 36): '"""section_dict, expected_message"""', (1463, 4, 1470, 5): '[({\'enable_intel_hpc_platform\': \'true\', \'base_os\': \'centos7\'}, None), ({\n \'enable_intel_hpc_platform\': \'true\', \'base_os\': \'centos8\'}, None), ({\n \'enable_intel_hpc_platform\': \'true\', \'base_os\': \'alinux2\'},\n "it is required to set the \'base_os\'"), ({\'enable_intel_hpc_platform\':\n \'true\', \'base_os\': \'ubuntu1804\'}, "it is required to set the \'base_os\'"\n ), ({\'enable_intel_hpc_platform\': \'false\', \'base_os\': \'alinux2\'}, None)]'}, {}), '(\'section_dict, expected_message\', [({\n \'enable_intel_hpc_platform\': \'true\', \'base_os\': \'centos7\'}, None), ({\n \'enable_intel_hpc_platform\': \'true\', \'base_os\': \'centos8\'}, None), ({\n \'enable_intel_hpc_platform\': \'true\', \'base_os\': \'alinux2\'},\n "it is required to set the \'base_os\'"), ({\'enable_intel_hpc_platform\':\n \'true\', \'base_os\': \'ubuntu1804\'}, "it is required to set the \'base_os\'"\n ), ({\'enable_intel_hpc_platform\': \'false\', \'base_os\': \'alinux2\'}, None)])', False, 'import pytest\n'), ((1477, 1, 1498, 1), 'pytest.mark.parametrize', 'pytest.mark.parametrize', ({(1478, 4, 1478, 36): '"""section_dict, expected_message"""', (1479, 4, 1497, 5): '[({\'disable_hyperthreading\': True, \'extra_json\':\n \'{"cluster": {"cfn_scheduler_slots": "vcpus"}}\'},\n \'cfn_scheduler_slots cannot be set in addition to disable_hyperthreading = true\'\n ), ({\'disable_hyperthreading\': True, \'extra_json\':\n \'{"cluster": {"cfn_scheduler_slots": "cores"}}\'},\n \'cfn_scheduler_slots cannot be set in addition to disable_hyperthreading = true\'\n ), ({\'disable_hyperthreading\': True, \'extra_json\':\n \'{"cluster": {"cfn_scheduler_slots": 3}}\'},\n \'cfn_scheduler_slots cannot be set in addition to disable_hyperthreading = true\'\n ), ({\'disable_hyperthreading\': True, \'extra_json\':\n \'{"cluster": {"other_param": "fake_value"}}\'}, None), ({\n \'disable_hyperthreading\': True}, None), ({\'disable_hyperthreading\': \n False, \'extra_json\': \'{"cluster": {"cfn_scheduler_slots": "vcpus"}}\'},\n None), ({\'disable_hyperthreading\': False, \'extra_json\':\n \'{"cluster": {"cfn_scheduler_slots": "cores"}}\'}, None), ({\n \'disable_hyperthreading\': False, \'extra_json\':\n \'{"cluster": {"cfn_scheduler_slots": 3}}\'}, None)]'}, {}), '(\'section_dict, expected_message\', [({\n \'disable_hyperthreading\': True, \'extra_json\':\n \'{"cluster": {"cfn_scheduler_slots": "vcpus"}}\'},\n \'cfn_scheduler_slots cannot be set in addition to disable_hyperthreading = true\'\n ), ({\'disable_hyperthreading\': True, \'extra_json\':\n \'{"cluster": {"cfn_scheduler_slots": "cores"}}\'},\n \'cfn_scheduler_slots cannot be set in addition to disable_hyperthreading = true\'\n ), ({\'disable_hyperthreading\': True, \'extra_json\':\n \'{"cluster": {"cfn_scheduler_slots": 3}}\'},\n \'cfn_scheduler_slots cannot be set in addition to disable_hyperthreading = true\'\n ), ({\'disable_hyperthreading\': True, \'extra_json\':\n \'{"cluster": {"other_param": "fake_value"}}\'}, None), ({\n \'disable_hyperthreading\': True}, None), ({\'disable_hyperthreading\': \n False, \'extra_json\': \'{"cluster": {"cfn_scheduler_slots": "vcpus"}}\'},\n None), ({\'disable_hyperthreading\': False, \'extra_json\':\n \'{"cluster": {"cfn_scheduler_slots": "cores"}}\'}, None), ({\n \'disable_hyperthreading\': False, \'extra_json\':\n \'{"cluster": {"cfn_scheduler_slots": 3}}\'}, None)])', False, 'import pytest\n'), ((1504, 1, 1533, 1), 'pytest.mark.parametrize', 'pytest.mark.parametrize', ({(1505, 4, 1505, 44): '"""section_dict, bucket, expected_message"""', (1506, 4, 1532, 5): "[({'imported_file_chunk_size': 0, 'import_path': 's3://test-import',\n 'storage_capacity': 1200}, None,\n 'has a minimum size of 1 MiB, and max size of 512,000 MiB'), ({\n 'imported_file_chunk_size': 1, 'import_path': 's3://test-import',\n 'storage_capacity': 1200}, {'Bucket': 'test-import'}, None), ({\n 'imported_file_chunk_size': 10, 'import_path': 's3://test-import',\n 'storage_capacity': 1200}, {'Bucket': 'test-import'}, None), ({\n 'imported_file_chunk_size': 512000, 'import_path': 's3://test-import',\n 'storage_capacity': 1200}, {'Bucket': 'test-import'}, None), ({\n 'imported_file_chunk_size': 512001, 'import_path': 's3://test-import',\n 'storage_capacity': 1200}, None,\n 'has a minimum size of 1 MiB, and max size of 512,000 MiB')]"}, {}), "('section_dict, bucket, expected_message', [({\n 'imported_file_chunk_size': 0, 'import_path': 's3://test-import',\n 'storage_capacity': 1200}, None,\n 'has a minimum size of 1 MiB, and max size of 512,000 MiB'), ({\n 'imported_file_chunk_size': 1, 'import_path': 's3://test-import',\n 'storage_capacity': 1200}, {'Bucket': 'test-import'}, None), ({\n 'imported_file_chunk_size': 10, 'import_path': 's3://test-import',\n 'storage_capacity': 1200}, {'Bucket': 'test-import'}, None), ({\n 'imported_file_chunk_size': 512000, 'import_path': 's3://test-import',\n 'storage_capacity': 1200}, {'Bucket': 'test-import'}, None), ({\n 'imported_file_chunk_size': 512001, 'import_path': 's3://test-import',\n 'storage_capacity': 1200}, None,\n 'has a minimum size of 1 MiB, and max size of 512,000 MiB')])", False, 'import pytest\n'), ((1633, 1, 1646, 1), 'pytest.mark.parametrize', 'pytest.mark.parametrize', ({(1634, 4, 1634, 34): '"""cluster_dict, expected_error"""', (1635, 4, 1645, 5): '[({\'enable_efa_gdr\': \'compute\'},\n "The parameter \'enable_efa_gdr\' can be used only in combination with \'enable_efa\'"\n ), ({\'enable_efa\': \'compute\', \'enable_efa_gdr\': \'compute\'}, None), ({\n \'enable_efa\': \'compute\'}, None)]'}, {}), '(\'cluster_dict, expected_error\', [({\'enable_efa_gdr\':\n \'compute\'},\n "The parameter \'enable_efa_gdr\' can be used only in combination with \'enable_efa\'"\n ), ({\'enable_efa\': \'compute\', \'enable_efa_gdr\': \'compute\'}, None), ({\n \'enable_efa\': \'compute\'}, None)])', False, 'import pytest\n'), ((1665, 1, 1694, 1), 'pytest.mark.parametrize', 'pytest.mark.parametrize', ({(1666, 4, 1666, 61): '"""ip_permissions, ip_permissions_egress, expected_message"""', (1667, 4, 1693, 5): "[([], [], 'must allow all traffic in and out from itself'), ([{'IpProtocol':\n '-1', 'UserIdGroupPairs': [{'UserId': '123456789012', 'GroupId':\n 'sg-12345678'}]}], [], 'must allow all traffic in and out from itself'),\n ([{'IpProtocol': '-1', 'UserIdGroupPairs': [{'UserId': '123456789012',\n 'GroupId': 'sg-12345678'}]}], [{'IpProtocol': '-1', 'UserIdGroupPairs':\n [{'UserId': '123456789012', 'GroupId': 'sg-12345678'}]}], None), ([{\n 'PrefixListIds': [], 'FromPort': 22, 'IpRanges': [{'CidrIp':\n '203.0.113.0/24'}], 'ToPort': 22, 'IpProtocol': 'tcp',\n 'UserIdGroupPairs': []}], [],\n 'must allow all traffic in and out from itself')]"}, {}), "(\n 'ip_permissions, ip_permissions_egress, expected_message', [([], [],\n 'must allow all traffic in and out from itself'), ([{'IpProtocol': '-1',\n 'UserIdGroupPairs': [{'UserId': '123456789012', 'GroupId':\n 'sg-12345678'}]}], [], 'must allow all traffic in and out from itself'),\n ([{'IpProtocol': '-1', 'UserIdGroupPairs': [{'UserId': '123456789012',\n 'GroupId': 'sg-12345678'}]}], [{'IpProtocol': '-1', 'UserIdGroupPairs':\n [{'UserId': '123456789012', 'GroupId': 'sg-12345678'}]}], None), ([{\n 'PrefixListIds': [], 'FromPort': 22, 'IpRanges': [{'CidrIp':\n '203.0.113.0/24'}], 'ToPort': 22, 'IpProtocol': 'tcp',\n 'UserIdGroupPairs': []}], [],\n 'must allow all traffic in and out from itself')])", False, 'import pytest\n'), ((1743, 1, 1774, 1), 'pytest.mark.parametrize', 'pytest.mark.parametrize', ({(1744, 4, 1744, 62): '"""cluster_section_dict, ebs_section_dict, expected_message"""', (1745, 4, 1773, 5): '[({\'ebs_settings\': \'vol1, vol2, vol3, vol4, vol5, vol6\'}, {\'vol1\': {\n \'shared_dir\': \'/vol1\'}, \'vol2\': {\'shared_dir\': \'/vol2\'}, \'vol3\': {\n \'shared_dir\': \'/vol3\'}, \'vol4\': {\'shared_dir\': \'/vol4\'}, \'vol5\': {\n \'shared_dir\': \'/vol5\'}, \'vol6\': {\'shared_dir\': \'/vol6\'}},\n "Invalid number of \'ebs\' sections specified. Max 5 expected."), ({\n \'ebs_settings\': \'vol1, vol2 \'}, {\'vol1\': {\'shared_dir\': \'vol1\'}, \'vol2\':\n {\'volume_type\': \'io1\'}},\n \'When using more than 1 EBS volume, shared_dir is required under each EBS section\'\n ), ({\'ebs_settings\': \'vol1,vol2\'}, {\'vol1\': {\'shared_dir\': \'/NONE\'},\n \'vol2\': {\'shared_dir\': \'vol2\'}},\n \'/NONE cannot be used as a shared directory\'), ({\'ebs_settings\':\n \'vol1, vol2 \'}, {\'vol1\': {\'shared_dir\': \'/vol1\'}, \'vol2\': {\'shared_dir\':\n \'NONE\'}}, \'NONE cannot be used as a shared directory\')]'}, {}), '(\n \'cluster_section_dict, ebs_section_dict, expected_message\', [({\n \'ebs_settings\': \'vol1, vol2, vol3, vol4, vol5, vol6\'}, {\'vol1\': {\n \'shared_dir\': \'/vol1\'}, \'vol2\': {\'shared_dir\': \'/vol2\'}, \'vol3\': {\n \'shared_dir\': \'/vol3\'}, \'vol4\': {\'shared_dir\': \'/vol4\'}, \'vol5\': {\n \'shared_dir\': \'/vol5\'}, \'vol6\': {\'shared_dir\': \'/vol6\'}},\n "Invalid number of \'ebs\' sections specified. Max 5 expected."), ({\n \'ebs_settings\': \'vol1, vol2 \'}, {\'vol1\': {\'shared_dir\': \'vol1\'}, \'vol2\':\n {\'volume_type\': \'io1\'}},\n \'When using more than 1 EBS volume, shared_dir is required under each EBS section\'\n ), ({\'ebs_settings\': \'vol1,vol2\'}, {\'vol1\': {\'shared_dir\': \'/NONE\'},\n \'vol2\': {\'shared_dir\': \'vol2\'}},\n \'/NONE cannot be used as a shared directory\'), ({\'ebs_settings\':\n \'vol1, vol2 \'}, {\'vol1\': {\'shared_dir\': \'/vol1\'}, \'vol2\': {\'shared_dir\':\n \'NONE\'}}, \'NONE cannot be used as a shared directory\')])', False, 'import pytest\n'), ((1783, 1, 1790, 1), 'pytest.mark.parametrize', 'pytest.mark.parametrize', ({(1784, 4, 1784, 36): '"""section_dict, expected_message"""', (1785, 4, 1789, 5): "[({'shared_dir': 'NONE'}, 'NONE cannot be used as a shared directory'), ({\n 'shared_dir': '/NONE'}, '/NONE cannot be used as a shared directory'),\n ({'shared_dir': '/NONEshared'}, None)]"}, {}), "('section_dict, expected_message', [({'shared_dir':\n 'NONE'}, 'NONE cannot be used as a shared directory'), ({'shared_dir':\n '/NONE'}, '/NONE cannot be used as a shared directory'), ({'shared_dir':\n '/NONEshared'}, None)])", False, 'import pytest\n'), ((1796, 1, 1813, 1), 'pytest.mark.parametrize', 'pytest.mark.parametrize', ({(1797, 4, 1797, 75): '"""base_os, instance_type, access_from, expected_error, expected_warning"""', (1798, 4, 1812, 5): "[('centos7', 't2.medium', None, None, None), ('centos8', 't2.medium', None,\n None, None), ('ubuntu1804', 't2.medium', None, None, None), (\n 'ubuntu1804', 't2.medium', '1.2.3.4/32', None, None), ('centos7',\n 't2.medium', '0.0.0.0/0', None, None), ('centos8', 't2.medium',\n '0.0.0.0/0', None, None), ('alinux2', 't2.medium', None, None, None), (\n 'alinux2', 't2.nano', None, None,\n 'is recommended to use an instance type with at least'), ('alinux2',\n 't2.micro', None, None,\n 'is recommended to use an instance type with at least'), ('ubuntu1804',\n 'm6g.xlarge', None, None, None), ('alinux2', 'm6g.xlarge', None, None,\n None), ('centos7', 'm6g.xlarge', None, None, None), ('centos8',\n 'm6g.xlarge', None, None, None)]"}, {}), "(\n 'base_os, instance_type, access_from, expected_error, expected_warning',\n [('centos7', 't2.medium', None, None, None), ('centos8', 't2.medium',\n None, None, None), ('ubuntu1804', 't2.medium', None, None, None), (\n 'ubuntu1804', 't2.medium', '1.2.3.4/32', None, None), ('centos7',\n 't2.medium', '0.0.0.0/0', None, None), ('centos8', 't2.medium',\n '0.0.0.0/0', None, None), ('alinux2', 't2.medium', None, None, None), (\n 'alinux2', 't2.nano', None, None,\n 'is recommended to use an instance type with at least'), ('alinux2',\n 't2.micro', None, None,\n 'is recommended to use an instance type with at least'), ('ubuntu1804',\n 'm6g.xlarge', None, None, None), ('alinux2', 'm6g.xlarge', None, None,\n None), ('centos7', 'm6g.xlarge', None, None, None), ('centos8',\n 'm6g.xlarge', None, None, None)])", False, 'import pytest\n'), ((1872, 1, 1884, 1), 'pytest.mark.parametrize', 'pytest.mark.parametrize', ({(1873, 4, 1873, 36): '"""section_dict, expected_message"""', (1874, 4, 1883, 5): "[({'initial_queue_size': '0', 'maintain_initial_size': True},\n 'maintain_initial_size cannot be set to true if initial_queue_size is 0'\n ), ({'scheduler': 'awsbatch', 'maintain_initial_size': True},\n 'maintain_initial_size is not supported when using awsbatch as scheduler')]"}, {}), "('section_dict, expected_message', [({\n 'initial_queue_size': '0', 'maintain_initial_size': True},\n 'maintain_initial_size cannot be set to true if initial_queue_size is 0'\n ), ({'scheduler': 'awsbatch', 'maintain_initial_size': True},\n 'maintain_initial_size is not supported when using awsbatch as scheduler')]\n )", False, 'import pytest\n'), ((1890, 1, 1945, 1), 'pytest.mark.parametrize', 'pytest.mark.parametrize', ({(1891, 4, 1891, 44): '"""cluster_section_dict, expected_message"""', (1892, 4, 1944, 5): '[({\'scheduler\': \'slurm\'}, None), ({\'scheduler\': \'slurm\', \'queue_settings\':\n \'queue1\'}, None), ({\'scheduler\': \'slurm\', \'queue_settings\':\n \'queue1,queue2,queue3,queue4,queue5\'}, None), ({\'scheduler\': \'slurm\',\n \'queue_settings\': \'queue1, queue2\'}, None), ({\'scheduler\': \'slurm\',\n \'queue_settings\': \'queue1,queue2,queue3,queue4,queue5,queue6\'},\n "Invalid number of \'queue\' sections specified. Max 5 expected."), ({\n \'scheduler\': \'slurm\', \'queue_settings\': \'queue_1\'},\n "Invalid queue name \'queue_1\'. Queue section names can be at most 30 chars long, must begin with a letter and only contain lowercase letters, digits and hyphens. It is forbidden to use \'default\' as a queue section name."\n ), ({\'scheduler\': \'slurm\', \'queue_settings\': \'default\'},\n "Invalid queue name \'default\'. Queue section names can be at most 30 chars long, must begin with a letter and only contain lowercase letters, digits and hyphens. It is forbidden to use \'default\' as a queue section name."\n ), ({\'scheduler\': \'slurm\', \'queue_settings\': \'queue1, default\'},\n "Invalid queue name \'.*\'. Queue section names can be at most 30 chars long, must begin with a letter and only contain lowercase letters, digits and hyphens. It is forbidden to use \'default\' as a queue section name."\n ), ({\'scheduler\': \'slurm\', \'queue_settings\': \'QUEUE\'},\n "Invalid queue name \'QUEUE\'. Queue section names can be at most 30 chars long, must begin with a letter and only contain lowercase letters, digits and hyphens. It is forbidden to use \'default\' as a queue section name."\n ), ({\'scheduler\': \'slurm\', \'queue_settings\': \'aQUEUEa\'},\n "Invalid queue name \'aQUEUEa\'. Queue section names can be at most 30 chars long, must begin with a letter and only contain lowercase letters, digits and hyphens. It is forbidden to use \'default\' as a queue section name."\n ), ({\'scheduler\': \'slurm\', \'queue_settings\': \'my-default-queue\'}, None)]'}, {}), '(\'cluster_section_dict, expected_message\', [({\n \'scheduler\': \'slurm\'}, None), ({\'scheduler\': \'slurm\', \'queue_settings\':\n \'queue1\'}, None), ({\'scheduler\': \'slurm\', \'queue_settings\':\n \'queue1,queue2,queue3,queue4,queue5\'}, None), ({\'scheduler\': \'slurm\',\n \'queue_settings\': \'queue1, queue2\'}, None), ({\'scheduler\': \'slurm\',\n \'queue_settings\': \'queue1,queue2,queue3,queue4,queue5,queue6\'},\n "Invalid number of \'queue\' sections specified. Max 5 expected."), ({\n \'scheduler\': \'slurm\', \'queue_settings\': \'queue_1\'},\n "Invalid queue name \'queue_1\'. Queue section names can be at most 30 chars long, must begin with a letter and only contain lowercase letters, digits and hyphens. It is forbidden to use \'default\' as a queue section name."\n ), ({\'scheduler\': \'slurm\', \'queue_settings\': \'default\'},\n "Invalid queue name \'default\'. Queue section names can be at most 30 chars long, must begin with a letter and only contain lowercase letters, digits and hyphens. It is forbidden to use \'default\' as a queue section name."\n ), ({\'scheduler\': \'slurm\', \'queue_settings\': \'queue1, default\'},\n "Invalid queue name \'.*\'. Queue section names can be at most 30 chars long, must begin with a letter and only contain lowercase letters, digits and hyphens. It is forbidden to use \'default\' as a queue section name."\n ), ({\'scheduler\': \'slurm\', \'queue_settings\': \'QUEUE\'},\n "Invalid queue name \'QUEUE\'. Queue section names can be at most 30 chars long, must begin with a letter and only contain lowercase letters, digits and hyphens. It is forbidden to use \'default\' as a queue section name."\n ), ({\'scheduler\': \'slurm\', \'queue_settings\': \'aQUEUEa\'},\n "Invalid queue name \'aQUEUEa\'. Queue section names can be at most 30 chars long, must begin with a letter and only contain lowercase letters, digits and hyphens. It is forbidden to use \'default\' as a queue section name."\n ), ({\'scheduler\': \'slurm\', \'queue_settings\': \'my-default-queue\'}, None)])', False, 'import pytest\n'), ((1960, 1, 2074, 1), 'pytest.mark.parametrize', 'pytest.mark.parametrize', ({(1961, 4, 1961, 82): '"""cluster_dict, queue_dict, expected_error_messages, expected_warning_messages"""', (1962, 4, 2073, 5): '[({\'queue_settings\': \'default\'}, {\'compute_resource_settings\': \'cr1,cr2\',\n \'enable_efa\': True, \'disable_hyperthreading\': True}, [\n "Duplicate instance type \'t2.micro\' found in queue \'default\'. Compute resources in the same queue must use different instance types"\n ], [\n "EFA was enabled on queue \'default\', but instance type \'t2.micro\' defined in compute resource settings cr1 does not support EFA."\n ,\n "EFA was enabled on queue \'default\', but instance type \'t2.micro\' defined in compute resource settings cr2 does not support EFA."\n ]), ({\'queue_settings\': \'default\'}, {\'compute_resource_settings\':\n \'cr3,cr4\', \'enable_efa\': True, \'disable_hyperthreading\': True}, [\n "Duplicate instance type \'c4.xlarge\' found in queue \'default\'. Compute resources in the same queue must use different instance types"\n ], [\n "EFA was enabled on queue \'default\', but instance type \'c4.xlarge\' defined in compute resource settings cr3 does not support EFA."\n ,\n "EFA was enabled on queue \'default\', but instance type \'c4.xlarge\' defined in compute resource settings cr4 does not support EFA."\n ]), ({\'queue_settings\': \'default\'}, {\'compute_resource_settings\':\n \'cr1,cr3\', \'enable_efa\': True, \'disable_hyperthreading\': True}, None, [\n "EFA was enabled on queue \'default\', but instance type \'t2.micro\' defined in compute resource settings cr1 does not support EFA."\n ,\n "EFA was enabled on queue \'default\', but instance type \'c4.xlarge\' defined in compute resource settings cr3 does not support EFA."\n ]), ({\'queue_settings\': \'default\'}, {\'compute_resource_settings\':\n \'cr2,cr4\', \'enable_efa\': True, \'disable_hyperthreading\': True}, None, [\n "EFA was enabled on queue \'default\', but instance type \'t2.micro\' defined in compute resource settings cr2 does not support EFA."\n ,\n "EFA was enabled on queue \'default\', but instance type \'c4.xlarge\' defined in compute resource settings cr4 does not support EFA."\n ]), ({\'queue_settings\': \'default\'}, {\'compute_resource_settings\':\n \'cr2,cr4\', \'enable_efa\': True, \'enable_efa_gdr\': True}, None, [\n "EFA was enabled on queue \'default\', but instance type \'t2.micro\' defined in compute resource settings cr2 does not support EFA."\n ,\n "EFA GDR was enabled on queue \'default\', but instance type \'t2.micro\' defined in compute resource settings cr2 does not support EFA GDR."\n ,\n "EFA was enabled on queue \'default\', but instance type \'c4.xlarge\' defined in compute resource settings cr4 does not support EFA."\n ,\n "EFA GDR was enabled on queue \'default\', but instance type \'c4.xlarge\' defined in compute resource settings cr4 does not support EFA GDR."\n ]), ({\'queue_settings\': \'default\'}, {\'compute_resource_settings\':\n \'efa_instance\', \'enable_efa_gdr\': True}, [\n "The parameter \'enable_efa_gdr\' can be used only in combination with \'enable_efa\'"\n ], None), ({\'queue_settings\': \'default\'}, {\'compute_resource_settings\':\n \'cr1\'}, None, None), ({\'queue_settings\': \'default\', \'enable_efa\':\n \'compute\', \'disable_hyperthreading\': True}, {\n \'compute_resource_settings\': \'cr1\', \'enable_efa\': True,\n \'disable_hyperthreading\': True}, [\n "Parameter \'enable_efa\' can be used only in \'cluster\' or in \'queue\' section"\n ,\n "Parameter \'disable_hyperthreading\' can be used only in \'cluster\' or in \'queue\' section"\n ], [\n "EFA was enabled on queue \'default\', but instance type \'t2.micro\' defined in compute resource settings cr1 does not support EFA."\n ]), ({\'queue_settings\': \'default\', \'enable_efa\': \'compute\',\n \'enable_efa_gdr\': \'compute\', \'disable_hyperthreading\': True}, {\n \'compute_resource_settings\': \'cr1\', \'enable_efa\': False,\n \'enable_efa_gdr\': False, \'disable_hyperthreading\': False}, [\n "Parameter \'enable_efa\' can be used only in \'cluster\' or in \'queue\' section"\n ,\n "Parameter \'enable_efa_gdr\' can be used only in \'cluster\' or in \'queue\' section"\n ,\n "Parameter \'disable_hyperthreading\' can be used only in \'cluster\' or in \'queue\' section"\n ], None), ({\'queue_settings\': \'default\'}, {\'compute_resource_settings\':\n \'efa_instance\', \'enable_efa\': True}, None, None)]'}, {}), '(\n \'cluster_dict, queue_dict, expected_error_messages, expected_warning_messages\'\n , [({\'queue_settings\': \'default\'}, {\'compute_resource_settings\':\n \'cr1,cr2\', \'enable_efa\': True, \'disable_hyperthreading\': True}, [\n "Duplicate instance type \'t2.micro\' found in queue \'default\'. Compute resources in the same queue must use different instance types"\n ], [\n "EFA was enabled on queue \'default\', but instance type \'t2.micro\' defined in compute resource settings cr1 does not support EFA."\n ,\n "EFA was enabled on queue \'default\', but instance type \'t2.micro\' defined in compute resource settings cr2 does not support EFA."\n ]), ({\'queue_settings\': \'default\'}, {\'compute_resource_settings\':\n \'cr3,cr4\', \'enable_efa\': True, \'disable_hyperthreading\': True}, [\n "Duplicate instance type \'c4.xlarge\' found in queue \'default\'. Compute resources in the same queue must use different instance types"\n ], [\n "EFA was enabled on queue \'default\', but instance type \'c4.xlarge\' defined in compute resource settings cr3 does not support EFA."\n ,\n "EFA was enabled on queue \'default\', but instance type \'c4.xlarge\' defined in compute resource settings cr4 does not support EFA."\n ]), ({\'queue_settings\': \'default\'}, {\'compute_resource_settings\':\n \'cr1,cr3\', \'enable_efa\': True, \'disable_hyperthreading\': True}, None, [\n "EFA was enabled on queue \'default\', but instance type \'t2.micro\' defined in compute resource settings cr1 does not support EFA."\n ,\n "EFA was enabled on queue \'default\', but instance type \'c4.xlarge\' defined in compute resource settings cr3 does not support EFA."\n ]), ({\'queue_settings\': \'default\'}, {\'compute_resource_settings\':\n \'cr2,cr4\', \'enable_efa\': True, \'disable_hyperthreading\': True}, None, [\n "EFA was enabled on queue \'default\', but instance type \'t2.micro\' defined in compute resource settings cr2 does not support EFA."\n ,\n "EFA was enabled on queue \'default\', but instance type \'c4.xlarge\' defined in compute resource settings cr4 does not support EFA."\n ]), ({\'queue_settings\': \'default\'}, {\'compute_resource_settings\':\n \'cr2,cr4\', \'enable_efa\': True, \'enable_efa_gdr\': True}, None, [\n "EFA was enabled on queue \'default\', but instance type \'t2.micro\' defined in compute resource settings cr2 does not support EFA."\n ,\n "EFA GDR was enabled on queue \'default\', but instance type \'t2.micro\' defined in compute resource settings cr2 does not support EFA GDR."\n ,\n "EFA was enabled on queue \'default\', but instance type \'c4.xlarge\' defined in compute resource settings cr4 does not support EFA."\n ,\n "EFA GDR was enabled on queue \'default\', but instance type \'c4.xlarge\' defined in compute resource settings cr4 does not support EFA GDR."\n ]), ({\'queue_settings\': \'default\'}, {\'compute_resource_settings\':\n \'efa_instance\', \'enable_efa_gdr\': True}, [\n "The parameter \'enable_efa_gdr\' can be used only in combination with \'enable_efa\'"\n ], None), ({\'queue_settings\': \'default\'}, {\'compute_resource_settings\':\n \'cr1\'}, None, None), ({\'queue_settings\': \'default\', \'enable_efa\':\n \'compute\', \'disable_hyperthreading\': True}, {\n \'compute_resource_settings\': \'cr1\', \'enable_efa\': True,\n \'disable_hyperthreading\': True}, [\n "Parameter \'enable_efa\' can be used only in \'cluster\' or in \'queue\' section"\n ,\n "Parameter \'disable_hyperthreading\' can be used only in \'cluster\' or in \'queue\' section"\n ], [\n "EFA was enabled on queue \'default\', but instance type \'t2.micro\' defined in compute resource settings cr1 does not support EFA."\n ]), ({\'queue_settings\': \'default\', \'enable_efa\': \'compute\',\n \'enable_efa_gdr\': \'compute\', \'disable_hyperthreading\': True}, {\n \'compute_resource_settings\': \'cr1\', \'enable_efa\': False,\n \'enable_efa_gdr\': False, \'disable_hyperthreading\': False}, [\n "Parameter \'enable_efa\' can be used only in \'cluster\' or in \'queue\' section"\n ,\n "Parameter \'enable_efa_gdr\' can be used only in \'cluster\' or in \'queue\' section"\n ,\n "Parameter \'disable_hyperthreading\' can be used only in \'cluster\' or in \'queue\' section"\n ], None), ({\'queue_settings\': \'default\'}, {\'compute_resource_settings\':\n \'efa_instance\', \'enable_efa\': True}, None, None)])', False, 'import pytest\n'), ((2110, 1, 2131, 1), 'pytest.mark.parametrize', 'pytest.mark.parametrize', ({(2111, 4, 2111, 35): '"""param_value, expected_message"""', (2112, 4, 2130, 5): '[(\'section1!2\',\n "Invalid label \'section1!2\' in param \'queue_settings\'. Section labels can only contain alphanumeric characters, dashes or underscores."\n ), (\n \'section!123456789abcdefghijklmnopqrstuvwxyz_123456789abcdefghijklmnopqrstuvwxyz_\'\n ,\n "Invalid label \'section!123456789...\' in param \'queue_settings\'. Section labels can only contain alphanumeric characters, dashes or underscores."\n ), (\'section-1\', None), (\'section_1\', None), (\n \'section_123456789abcdefghijklmnopqrstuvwxyz_123456789abcdefghijklmnopqrstuvwxyz_\'\n ,\n "Invalid label \'section_123456789...\' in param \'queue_settings\'. The maximum length allowed for section labels is 64 characters"\n )]'}, {}), '(\'param_value, expected_message\', [(\'section1!2\',\n "Invalid label \'section1!2\' in param \'queue_settings\'. Section labels can only contain alphanumeric characters, dashes or underscores."\n ), (\n \'section!123456789abcdefghijklmnopqrstuvwxyz_123456789abcdefghijklmnopqrstuvwxyz_\'\n ,\n "Invalid label \'section!123456789...\' in param \'queue_settings\'. Section labels can only contain alphanumeric characters, dashes or underscores."\n ), (\'section-1\', None), (\'section_1\', None), (\n \'section_123456789abcdefghijklmnopqrstuvwxyz_123456789abcdefghijklmnopqrstuvwxyz_\'\n ,\n "Invalid label \'section_123456789...\' in param \'queue_settings\'. The maximum length allowed for section labels is 64 characters"\n )])', False, 'import pytest\n'), ((2141, 1, 2164, 1), 'pytest.mark.parametrize', 'pytest.mark.parametrize', ({(2142, 4, 2142, 36): '"""section_dict, expected_message"""', (2143, 4, 2163, 5): '[({\'min_count\': -1, \'initial_count\': -1},\n "Parameter \'min_count\' must be 0 or greater than 0"), ({\'min_count\': 0,\n \'initial_count\': 1, \'spot_price\': -1.1},\n "Parameter \'spot_price\' must be 0 or greater than 0"), ({\'min_count\': 1,\n \'max_count\': 0, \'initial_count\': 1},\n "Parameter \'max_count\' must be greater than or equal to \'min_count\'"),\n ({\'min_count\': 0, \'max_count\': 0, \'initial_count\': 0},\n "Parameter \'max_count\' must be 1 or greater than 1"), ({\'min_count\': 1,\n \'max_count\': 2, \'spot_price\': 1.5, \'initial_count\': 1}, None), ({\n \'min_count\': 2, \'max_count\': 4, \'initial_count\': 1},\n "Parameter \'initial_count\' must be greater than or equal to \'min_count\'"\n ), ({\'min_count\': 2, \'max_count\': 4, \'initial_count\': 5},\n "Parameter \'initial_count\' must be lower than or equal to \'max_count\'")]'}, {}), '(\'section_dict, expected_message\', [({\'min_count\': -\n 1, \'initial_count\': -1},\n "Parameter \'min_count\' must be 0 or greater than 0"), ({\'min_count\': 0,\n \'initial_count\': 1, \'spot_price\': -1.1},\n "Parameter \'spot_price\' must be 0 or greater than 0"), ({\'min_count\': 1,\n \'max_count\': 0, \'initial_count\': 1},\n "Parameter \'max_count\' must be greater than or equal to \'min_count\'"),\n ({\'min_count\': 0, \'max_count\': 0, \'initial_count\': 0},\n "Parameter \'max_count\' must be 1 or greater than 1"), ({\'min_count\': 1,\n \'max_count\': 2, \'spot_price\': 1.5, \'initial_count\': 1}, None), ({\n \'min_count\': 2, \'max_count\': 4, \'initial_count\': 1},\n "Parameter \'initial_count\' must be greater than or equal to \'min_count\'"\n ), ({\'min_count\': 2, \'max_count\': 4, \'initial_count\': 5},\n "Parameter \'initial_count\' must be lower than or equal to \'max_count\'")])', False, 'import pytest\n'), ((2195, 1, 2209, 1), 'pytest.mark.parametrize', 'pytest.mark.parametrize', ({(2196, 4, 2196, 59): '"""cluster_section_dict, sections_dict, expected_message"""', (2197, 4, 2208, 5): '[({\'vpc_settings\': \'vpc1, vpc2\'}, {\'vpc vpc1\': {}, \'vpc vpc2\': {}},\n "The value of \'vpc_settings\' parameter is invalid. It can only contain a single vpc section label"\n ), ({\'efs_settings\': \'efs1, efs2\'}, {\'efs efs1\': {}, \'efs efs2\': {}},\n "The value of \'efs_settings\' parameter is invalid. It can only contain a single efs section label"\n )]'}, {}), '(\'cluster_section_dict, sections_dict, expected_message\'\n , [({\'vpc_settings\': \'vpc1, vpc2\'}, {\'vpc vpc1\': {}, \'vpc vpc2\': {}},\n "The value of \'vpc_settings\' parameter is invalid. It can only contain a single vpc section label"\n ), ({\'efs_settings\': \'efs1, efs2\'}, {\'efs efs1\': {}, \'efs efs2\': {}},\n "The value of \'efs_settings\' parameter is invalid. It can only contain a single efs section label"\n )])', False, 'import pytest\n'), ((2291, 1, 2299, 1), 'pytest.mark.parametrize', 'pytest.mark.parametrize', ({(2292, 4, 2292, 44): '"""enabled, architecture, expected_errors"""', (2293, 4, 2298, 5): "[(True, 'x86_64', []), (True, 'arm64', [\n 'instance types and an AMI that support these architectures']), (False,\n 'x86_64', []), (False, 'arm64', [])]"}, {}), "('enabled, architecture, expected_errors', [(True,\n 'x86_64', []), (True, 'arm64', [\n 'instance types and an AMI that support these architectures']), (False,\n 'x86_64', []), (False, 'arm64', [])])", False, 'import pytest\n'), ((2316, 1, 2339, 1), 'pytest.mark.parametrize', 'pytest.mark.parametrize', ({(2317, 4, 2317, 63): '"""base_os, architecture, expected_warnings, expected_errors"""', (2318, 4, 2338, 5): "[('alinux2', 'x86_64', [], []), ('centos7', 'x86_64', [], []), ('centos8',\n 'x86_64', [], []), ('ubuntu1804', 'x86_64', [], []), ('alinux2',\n 'arm64', [], []), ('centos7', 'arm64', [\n 'Warning: The aarch64 CentOS 7 OS is not validated for the 6th generation aarch64 instances (M6g, C6g, etc.). To proceed please provide a custom_ami, for more info see: https://wiki.centos.org/Cloud/AWS#aarch64_notes'\n ], []), ('centos8', 'arm64', [], []), ('ubuntu1804', 'arm64', [], [])]"}, {}), "(\n 'base_os, architecture, expected_warnings, expected_errors', [(\n 'alinux2', 'x86_64', [], []), ('centos7', 'x86_64', [], []), ('centos8',\n 'x86_64', [], []), ('ubuntu1804', 'x86_64', [], []), ('alinux2',\n 'arm64', [], []), ('centos7', 'arm64', [\n 'Warning: The aarch64 CentOS 7 OS is not validated for the 6th generation aarch64 instances (M6g, C6g, etc.). To proceed please provide a custom_ami, for more info see: https://wiki.centos.org/Cloud/AWS#aarch64_notes'\n ], []), ('centos8', 'arm64', [], []), ('ubuntu1804', 'arm64', [], [])])", False, 'import pytest\n'), ((2356, 1, 2368, 1), 'pytest.mark.parametrize', 'pytest.mark.parametrize', ({(2357, 4, 2357, 59): '"""disable_hyperthreading, architecture, expected_errors"""', (2358, 4, 2367, 5): "[(True, 'x86_64', []), (False, 'x86_64', []), (True, 'arm64', [\n 'disable_hyperthreading is only supported on instance types that support these architectures'\n ]), (False, 'arm64', [])]"}, {}), "('disable_hyperthreading, architecture, expected_errors'\n , [(True, 'x86_64', []), (False, 'x86_64', []), (True, 'arm64', [\n 'disable_hyperthreading is only supported on instance types that support these architectures'\n ]), (False, 'arm64', [])])", False, 'import pytest\n'), ((2384, 1, 2416, 1), 'pytest.mark.parametrize', 'pytest.mark.parametrize', ({(2385, 4, 2385, 90): '"""head_node_architecture, compute_architecture, compute_instance_type, expected_errors"""', (2386, 4, 2415, 5): "[('x86_64', 'x86_64', 'c5.xlarge', []), ('x86_64', 'arm64', 'm6g.xlarge', [\n 'none of which are compatible with the architecture supported by the master_instance_type'\n ]), ('arm64', 'x86_64', 'c5.xlarge', [\n 'none of which are compatible with the architecture supported by the master_instance_type'\n ]), ('arm64', 'arm64', 'm6g.xlarge', []), ('x86_64', 'x86_64',\n 'optimal', []), ('x86_64', None, 'm6g', []), ('x86_64', None, 'c5', []),\n ('arm64', 'arm64', 'm6g.xlarge,r6g.xlarge', []), ('x86_64', 'arm64',\n 'm6g.xlarge,r6g.xlarge', [\n 'none of which are compatible with the architecture supported by the master_instance_type'\n ] * 2)]"}, {}), "(\n 'head_node_architecture, compute_architecture, compute_instance_type, expected_errors'\n , [('x86_64', 'x86_64', 'c5.xlarge', []), ('x86_64', 'arm64',\n 'm6g.xlarge', [\n 'none of which are compatible with the architecture supported by the master_instance_type'\n ]), ('arm64', 'x86_64', 'c5.xlarge', [\n 'none of which are compatible with the architecture supported by the master_instance_type'\n ]), ('arm64', 'arm64', 'm6g.xlarge', []), ('x86_64', 'x86_64',\n 'optimal', []), ('x86_64', None, 'm6g', []), ('x86_64', None, 'c5', []),\n ('arm64', 'arm64', 'm6g.xlarge,r6g.xlarge', []), ('x86_64', 'arm64',\n 'm6g.xlarge,r6g.xlarge', [\n 'none of which are compatible with the architecture supported by the master_instance_type'\n ] * 2)])", False, 'import pytest\n'), ((2450, 1, 2512, 1), 'pytest.mark.parametrize', 'pytest.mark.parametrize', ({(2451, 4, 2451, 53): '"""section_dict, bucket, num_calls, expected_error"""', (2452, 4, 2511, 5): '[({\'fsx_backup_id\': \'backup-0ff8da96d57f3b4e3\', \'deployment_type\':\n \'PERSISTENT_1\', \'per_unit_storage_throughput\': 50}, None, 0,\n "When restoring an FSx Lustre file system from backup, \'deployment_type\' cannot be specified."\n ), ({\'fsx_backup_id\': \'backup-0ff8da96d57f3b4e3\', \'storage_capacity\': \n 7200}, None, 0,\n "When restoring an FSx Lustre file system from backup, \'storage_capacity\' cannot be specified."\n ), ({\'fsx_backup_id\': \'backup-0ff8da96d57f3b4e3\', \'deployment_type\':\n \'PERSISTENT_1\', \'per_unit_storage_throughput\': 100}, None, 0,\n "When restoring an FSx Lustre file system from backup, \'per_unit_storage_throughput\' cannot be specified."\n ), ({\'fsx_backup_id\': \'backup-0ff8da96d57f3b4e3\',\n \'imported_file_chunk_size\': 1024, \'export_path\': \'s3://test\',\n \'import_path\': \'s3://test\'}, {\'Bucket\': \'test\'}, 2,\n "When restoring an FSx Lustre file system from backup, \'imported_file_chunk_size\' cannot be specified."\n ), ({\'fsx_backup_id\': \'backup-0ff8da96d57f3b4e3\', \'fsx_kms_key_id\':\n \'somekey\', \'deployment_type\': \'PERSISTENT_1\',\n \'per_unit_storage_throughput\': 50}, None, 0,\n "When restoring an FSx Lustre file system from backup, \'fsx_kms_key_id\' cannot be specified."\n ), ({\'fsx_backup_id\': \'backup-00000000000000000\', \'deployment_type\':\n \'PERSISTENT_1\', \'per_unit_storage_throughput\': 50}, None, 0,\n "Failed to retrieve backup with Id \'backup-00000000000000000\'")]'}, {}), '(\'section_dict, bucket, num_calls, expected_error\',\n [({\'fsx_backup_id\': \'backup-0ff8da96d57f3b4e3\', \'deployment_type\':\n \'PERSISTENT_1\', \'per_unit_storage_throughput\': 50}, None, 0,\n "When restoring an FSx Lustre file system from backup, \'deployment_type\' cannot be specified."\n ), ({\'fsx_backup_id\': \'backup-0ff8da96d57f3b4e3\', \'storage_capacity\': \n 7200}, None, 0,\n "When restoring an FSx Lustre file system from backup, \'storage_capacity\' cannot be specified."\n ), ({\'fsx_backup_id\': \'backup-0ff8da96d57f3b4e3\', \'deployment_type\':\n \'PERSISTENT_1\', \'per_unit_storage_throughput\': 100}, None, 0,\n "When restoring an FSx Lustre file system from backup, \'per_unit_storage_throughput\' cannot be specified."\n ), ({\'fsx_backup_id\': \'backup-0ff8da96d57f3b4e3\',\n \'imported_file_chunk_size\': 1024, \'export_path\': \'s3://test\',\n \'import_path\': \'s3://test\'}, {\'Bucket\': \'test\'}, 2,\n "When restoring an FSx Lustre file system from backup, \'imported_file_chunk_size\' cannot be specified."\n ), ({\'fsx_backup_id\': \'backup-0ff8da96d57f3b4e3\', \'fsx_kms_key_id\':\n \'somekey\', \'deployment_type\': \'PERSISTENT_1\',\n \'per_unit_storage_throughput\': 50}, None, 0,\n "When restoring an FSx Lustre file system from backup, \'fsx_kms_key_id\' cannot be specified."\n ), ({\'fsx_backup_id\': \'backup-00000000000000000\', \'deployment_type\':\n \'PERSISTENT_1\', \'per_unit_storage_throughput\': 50}, None, 0,\n "Failed to retrieve backup with Id \'backup-00000000000000000\'")])', False, 'import pytest\n'), ((2572, 1, 2581, 1), 'pytest.mark.parametrize', 'pytest.mark.parametrize', ({(2573, 4, 2573, 34): '"""section_dict, expected_error"""', (2574, 4, 2580, 5): "[({'fsx_fs_id': 'fs-0123456789abcdef0', 'shared_dir': '/fsx'}, None), ({\n 'fsx_fs_id': 'fs-0123456789abcdef0', 'shared_dir': '/fsx',\n 'storage_capacity': 3600},\n 'storage_capacity is ignored when specifying an existing Lustre file system'\n )]"}, {}), "('section_dict, expected_error', [({'fsx_fs_id':\n 'fs-0123456789abcdef0', 'shared_dir': '/fsx'}, None), ({'fsx_fs_id':\n 'fs-0123456789abcdef0', 'shared_dir': '/fsx', 'storage_capacity': 3600},\n 'storage_capacity is ignored when specifying an existing Lustre file system'\n )])", False, 'import pytest\n'), ((2598, 1, 2623, 1), 'pytest.mark.parametrize', 'pytest.mark.parametrize', ({(2599, 4, 2599, 34): '"""section_dict, expected_error"""', (2600, 4, 2622, 5): "[({'volume_type': 'standard', 'volume_size': 15}, None), ({'volume_type':\n 'standard', 'volume_size': 0},\n 'The size of standard volumes must be at least 1 GiB'), ({'volume_type':\n 'standard', 'volume_size': 1025},\n 'The size of standard volumes can not exceed 1024 GiB'), ({\n 'volume_type': 'io1', 'volume_size': 15}, None), ({'volume_type': 'io1',\n 'volume_size': 3}, 'The size of io1 volumes must be at least 4 GiB'), (\n {'volume_type': 'io1', 'volume_size': 16385},\n 'The size of io1 volumes can not exceed 16384 GiB'), ({'volume_type':\n 'io2', 'volume_size': 15}, None), ({'volume_type': 'io2', 'volume_size':\n 3}, 'The size of io2 volumes must be at least 4 GiB'), ({'volume_type':\n 'io2', 'volume_size': 65537},\n 'The size of io2 volumes can not exceed 65536 GiB'), ({'volume_type':\n 'gp2', 'volume_size': 15}, None), ({'volume_type': 'gp2', 'volume_size':\n 0}, 'The size of gp2 volumes must be at least 1 GiB'), ({'volume_type':\n 'gp2', 'volume_size': 16385},\n 'The size of gp2 volumes can not exceed 16384 GiB'), ({'volume_type':\n 'gp3', 'volume_size': 15}, None), ({'volume_type': 'gp3', 'volume_size':\n 0}, 'The size of gp3 volumes must be at least 1 GiB'), ({'volume_type':\n 'gp3', 'volume_size': 16385},\n 'The size of gp3 volumes can not exceed 16384 GiB'), ({'volume_type':\n 'st1', 'volume_size': 500}, None), ({'volume_type': 'st1',\n 'volume_size': 20}, 'The size of st1 volumes must be at least 500 GiB'),\n ({'volume_type': 'st1', 'volume_size': 16385},\n 'The size of st1 volumes can not exceed 16384 GiB'), ({'volume_type':\n 'sc1', 'volume_size': 500}, None), ({'volume_type': 'sc1',\n 'volume_size': 20}, 'The size of sc1 volumes must be at least 500 GiB'),\n ({'volume_type': 'sc1', 'volume_size': 16385},\n 'The size of sc1 volumes can not exceed 16384 GiB')]"}, {}), "('section_dict, expected_error', [({'volume_type':\n 'standard', 'volume_size': 15}, None), ({'volume_type': 'standard',\n 'volume_size': 0},\n 'The size of standard volumes must be at least 1 GiB'), ({'volume_type':\n 'standard', 'volume_size': 1025},\n 'The size of standard volumes can not exceed 1024 GiB'), ({\n 'volume_type': 'io1', 'volume_size': 15}, None), ({'volume_type': 'io1',\n 'volume_size': 3}, 'The size of io1 volumes must be at least 4 GiB'), (\n {'volume_type': 'io1', 'volume_size': 16385},\n 'The size of io1 volumes can not exceed 16384 GiB'), ({'volume_type':\n 'io2', 'volume_size': 15}, None), ({'volume_type': 'io2', 'volume_size':\n 3}, 'The size of io2 volumes must be at least 4 GiB'), ({'volume_type':\n 'io2', 'volume_size': 65537},\n 'The size of io2 volumes can not exceed 65536 GiB'), ({'volume_type':\n 'gp2', 'volume_size': 15}, None), ({'volume_type': 'gp2', 'volume_size':\n 0}, 'The size of gp2 volumes must be at least 1 GiB'), ({'volume_type':\n 'gp2', 'volume_size': 16385},\n 'The size of gp2 volumes can not exceed 16384 GiB'), ({'volume_type':\n 'gp3', 'volume_size': 15}, None), ({'volume_type': 'gp3', 'volume_size':\n 0}, 'The size of gp3 volumes must be at least 1 GiB'), ({'volume_type':\n 'gp3', 'volume_size': 16385},\n 'The size of gp3 volumes can not exceed 16384 GiB'), ({'volume_type':\n 'st1', 'volume_size': 500}, None), ({'volume_type': 'st1',\n 'volume_size': 20}, 'The size of st1 volumes must be at least 500 GiB'),\n ({'volume_type': 'st1', 'volume_size': 16385},\n 'The size of st1 volumes can not exceed 16384 GiB'), ({'volume_type':\n 'sc1', 'volume_size': 500}, None), ({'volume_type': 'sc1',\n 'volume_size': 20}, 'The size of sc1 volumes must be at least 500 GiB'),\n ({'volume_type': 'sc1', 'volume_size': 16385},\n 'The size of sc1 volumes can not exceed 16384 GiB')])", False, 'import pytest\n'), ((2637, 1, 2677, 1), 'pytest.mark.parametrize', 'pytest.mark.parametrize', ({(2638, 4, 2638, 36): '"""section_dict, expected_message"""', (2639, 4, 2676, 5): "[({'volume_type': 'io1', 'volume_size': 20, 'volume_iops': 120}, None), ({\n 'volume_type': 'io1', 'volume_size': 20, 'volume_iops': 90},\n 'IOPS rate must be between 100 and 64000 when provisioning io1 volumes.'\n ), ({'volume_type': 'io1', 'volume_size': 20, 'volume_iops': 64001},\n 'IOPS rate must be between 100 and 64000 when provisioning io1 volumes.'\n ), ({'volume_type': 'io1', 'volume_size': 20, 'volume_iops': 1001},\n 'IOPS to volume size ratio of .* is too high'), ({'volume_type': 'io2',\n 'volume_size': 20, 'volume_iops': 120}, None), ({'volume_type': 'io2',\n 'volume_size': 20, 'volume_iops': 90},\n 'IOPS rate must be between 100 and 256000 when provisioning io2 volumes.'\n ), ({'volume_type': 'io2', 'volume_size': 20, 'volume_iops': 256001},\n 'IOPS rate must be between 100 and 256000 when provisioning io2 volumes.'\n ), ({'volume_type': 'io2', 'volume_size': 20, 'volume_iops': 20001},\n 'IOPS to volume size ratio of .* is too high'), ({'volume_type': 'gp3',\n 'volume_size': 20, 'volume_iops': 3000}, None), ({'volume_type': 'gp3',\n 'volume_size': 20, 'volume_iops': 2900},\n 'IOPS rate must be between 3000 and 16000 when provisioning gp3 volumes.'\n ), ({'volume_type': 'gp3', 'volume_size': 20, 'volume_iops': 16001},\n 'IOPS rate must be between 3000 and 16000 when provisioning gp3 volumes.'\n ), ({'volume_type': 'gp3', 'volume_size': 20, 'volume_iops': 10001},\n 'IOPS to volume size ratio of .* is too high')]"}, {}), "('section_dict, expected_message', [({'volume_type':\n 'io1', 'volume_size': 20, 'volume_iops': 120}, None), ({'volume_type':\n 'io1', 'volume_size': 20, 'volume_iops': 90},\n 'IOPS rate must be between 100 and 64000 when provisioning io1 volumes.'\n ), ({'volume_type': 'io1', 'volume_size': 20, 'volume_iops': 64001},\n 'IOPS rate must be between 100 and 64000 when provisioning io1 volumes.'\n ), ({'volume_type': 'io1', 'volume_size': 20, 'volume_iops': 1001},\n 'IOPS to volume size ratio of .* is too high'), ({'volume_type': 'io2',\n 'volume_size': 20, 'volume_iops': 120}, None), ({'volume_type': 'io2',\n 'volume_size': 20, 'volume_iops': 90},\n 'IOPS rate must be between 100 and 256000 when provisioning io2 volumes.'\n ), ({'volume_type': 'io2', 'volume_size': 20, 'volume_iops': 256001},\n 'IOPS rate must be between 100 and 256000 when provisioning io2 volumes.'\n ), ({'volume_type': 'io2', 'volume_size': 20, 'volume_iops': 20001},\n 'IOPS to volume size ratio of .* is too high'), ({'volume_type': 'gp3',\n 'volume_size': 20, 'volume_iops': 3000}, None), ({'volume_type': 'gp3',\n 'volume_size': 20, 'volume_iops': 2900},\n 'IOPS rate must be between 3000 and 16000 when provisioning gp3 volumes.'\n ), ({'volume_type': 'gp3', 'volume_size': 20, 'volume_iops': 16001},\n 'IOPS rate must be between 3000 and 16000 when provisioning gp3 volumes.'\n ), ({'volume_type': 'gp3', 'volume_size': 20, 'volume_iops': 10001},\n 'IOPS to volume size ratio of .* is too high')])", False, 'import pytest\n'), ((2683, 1, 2750, 1), 'pytest.mark.parametrize', 'pytest.mark.parametrize', ({(2684, 4, 2685, 44): '"""section_dict, snapshot_size, state, partition, expected_warning, expected_error, raise_error_when_getting_snapshot_info"""', (2686, 4, 2749, 5): '[({\'volume_size\': 100, \'ebs_snapshot_id\': \'snap-1234567890abcdef0\'}, 50,\n \'completed\', \'aws-cn\',\n "The specified volume size is larger than snapshot size. In order to use the full capacity of the volume, you\'ll need to manually resize the partition according to this doc: https://docs.amazonaws.cn/AWSEC2/latest/UserGuide/recognize-expanded-volume-linux.html"\n , None, False), ({\'volume_size\': 100, \'ebs_snapshot_id\':\n \'snap-1234567890abcdef0\'}, 50, \'completed\', \'aws-us-gov\',\n "The specified volume size is larger than snapshot size. In order to use the full capacity of the volume, you\'ll need to manually resize the partition according to this doc: https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/recognize-expanded-volume-linux.html"\n , None, False), ({\'volume_size\': 100, \'ebs_snapshot_id\':\n \'snap-1234567890abcdef0\'}, 50, \'incompleted\', \'aws-us-gov\',\n "Snapshot snap-1234567890abcdef0 is in state \'incompleted\' not \'completed\'"\n , None, False), ({\'ebs_snapshot_id\': \'snap-1234567890abcdef0\'}, 50,\n \'completed\', \'partition\', None, None, False), ({\'volume_size\': 100,\n \'ebs_snapshot_id\': \'snap-1234567891abcdef0\'}, 120, \'completed\',\n \'aws-us-gov\', None,\n "The EBS volume size of the section \'default\' must not be smaller than 120, because it is the size of the provided snapshot snap-1234567891abcdef0"\n , False), ({\'volume_size\': 100, \'ebs_snapshot_id\':\n \'snap-1234567890abcdef0\'}, None, \'completed\', \'aws-cn\', None,\n \'Unable to get volume size for snapshot snap-1234567890abcdef0\', False),\n ({\'ebs_snapshot_id\': \'snap-1234567890abcdef0\'}, 20, \'completed\', \'aws\',\n None, \'some message\', True)]'}, {}), '(\n \'section_dict, snapshot_size, state, partition, expected_warning, expected_error, raise_error_when_getting_snapshot_info\'\n , [({\'volume_size\': 100, \'ebs_snapshot_id\': \'snap-1234567890abcdef0\'}, \n 50, \'completed\', \'aws-cn\',\n "The specified volume size is larger than snapshot size. In order to use the full capacity of the volume, you\'ll need to manually resize the partition according to this doc: https://docs.amazonaws.cn/AWSEC2/latest/UserGuide/recognize-expanded-volume-linux.html"\n , None, False), ({\'volume_size\': 100, \'ebs_snapshot_id\':\n \'snap-1234567890abcdef0\'}, 50, \'completed\', \'aws-us-gov\',\n "The specified volume size is larger than snapshot size. In order to use the full capacity of the volume, you\'ll need to manually resize the partition according to this doc: https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/recognize-expanded-volume-linux.html"\n , None, False), ({\'volume_size\': 100, \'ebs_snapshot_id\':\n \'snap-1234567890abcdef0\'}, 50, \'incompleted\', \'aws-us-gov\',\n "Snapshot snap-1234567890abcdef0 is in state \'incompleted\' not \'completed\'"\n , None, False), ({\'ebs_snapshot_id\': \'snap-1234567890abcdef0\'}, 50,\n \'completed\', \'partition\', None, None, False), ({\'volume_size\': 100,\n \'ebs_snapshot_id\': \'snap-1234567891abcdef0\'}, 120, \'completed\',\n \'aws-us-gov\', None,\n "The EBS volume size of the section \'default\' must not be smaller than 120, because it is the size of the provided snapshot snap-1234567891abcdef0"\n , False), ({\'volume_size\': 100, \'ebs_snapshot_id\':\n \'snap-1234567890abcdef0\'}, None, \'completed\', \'aws-cn\', None,\n \'Unable to get volume size for snapshot snap-1234567890abcdef0\', False),\n ({\'ebs_snapshot_id\': \'snap-1234567890abcdef0\'}, 20, \'completed\', \'aws\',\n None, \'some message\', True)])', False, 'import pytest\n'), ((2789, 1, 2835, 1), 'pytest.mark.parametrize', 'pytest.mark.parametrize', ({(2790, 4, 2790, 82): '"""cluster_section_dict, ebs_section_dict1, ebs_section_dict2, expected_message"""', (2791, 4, 2834, 5): '[({\'shared_dir\': \'shared_directory\', \'ebs_settings\': \'vol1\'}, {\n \'volume_size\': 30}, {}, None), ({\'shared_dir\': \'shared_directory\',\n \'ebs_settings\': \'vol1\'}, {\'shared_dir\': \'shared_directory1\'}, {},\n "\'shared_dir\' can not be specified both in cluster section and EBS section"\n ), ({\'shared_dir\': \'shared_directory\', \'ebs_settings\': \'vol1, vol2\'}, {\n \'shared_dir\': \'shared_directory1\', \'volume_size\': 30}, {\'shared_dir\':\n \'shared_directory2\', \'volume_size\': 30},\n "\'shared_dir\' can not be specified in cluster section when using multiple EBS volumes"\n ), ({\'ebs_settings\': \'vol1, vol2\'}, {\'shared_dir\': \'shared_directory1\',\n \'volume_size\': 30}, {\'shared_dir\': \'shared_directory2\', \'volume_size\': \n 30}, None), ({\'ebs_settings\': \'vol1\'}, {\'volume_size\': 30}, {}, None),\n ({\'ebs_settings\': \'vol1\'}, {}, {}, None), ({\'shared_dir\':\n \'shared_directory\'}, {}, {}, None)]'}, {}), '(\n \'cluster_section_dict, ebs_section_dict1, ebs_section_dict2, expected_message\'\n , [({\'shared_dir\': \'shared_directory\', \'ebs_settings\': \'vol1\'}, {\n \'volume_size\': 30}, {}, None), ({\'shared_dir\': \'shared_directory\',\n \'ebs_settings\': \'vol1\'}, {\'shared_dir\': \'shared_directory1\'}, {},\n "\'shared_dir\' can not be specified both in cluster section and EBS section"\n ), ({\'shared_dir\': \'shared_directory\', \'ebs_settings\': \'vol1, vol2\'}, {\n \'shared_dir\': \'shared_directory1\', \'volume_size\': 30}, {\'shared_dir\':\n \'shared_directory2\', \'volume_size\': 30},\n "\'shared_dir\' can not be specified in cluster section when using multiple EBS volumes"\n ), ({\'ebs_settings\': \'vol1, vol2\'}, {\'shared_dir\': \'shared_directory1\',\n \'volume_size\': 30}, {\'shared_dir\': \'shared_directory2\', \'volume_size\': \n 30}, None), ({\'ebs_settings\': \'vol1\'}, {\'volume_size\': 30}, {}, None),\n ({\'ebs_settings\': \'vol1\'}, {}, {}, None), ({\'shared_dir\':\n \'shared_directory\'}, {}, {}, None)])', False, 'import pytest\n'), ((2848, 1, 2867, 1), 'pytest.mark.parametrize', 'pytest.mark.parametrize', ({(2849, 4, 2849, 34): '"""extra_json, expected_message"""', (2850, 4, 2866, 5): "[({'extra_json': {'cluster': {'cfn_scheduler_slots': '1'}}},\n 'It is highly recommended to use the disable_hyperthreading parameter in order to control the hyper-threading configuration in the cluster rather than using cfn_scheduler_slots in extra_json'\n ), ({'extra_json': {'cluster': {'cfn_scheduler_slots': 'vcpus'}}},\n 'It is highly recommended to use the disable_hyperthreading parameter in order to control the hyper-threading configuration in the cluster rather than using cfn_scheduler_slots in extra_json'\n ), ({'extra_json': {'cluster': {'cfn_scheduler_slots': 'cores'}}},\n 'It is highly recommended to use the disable_hyperthreading parameter in order to control the hyper-threading configuration in the cluster rather than using cfn_scheduler_slots in extra_json'\n )]"}, {}), "('extra_json, expected_message', [({'extra_json': {\n 'cluster': {'cfn_scheduler_slots': '1'}}},\n 'It is highly recommended to use the disable_hyperthreading parameter in order to control the hyper-threading configuration in the cluster rather than using cfn_scheduler_slots in extra_json'\n ), ({'extra_json': {'cluster': {'cfn_scheduler_slots': 'vcpus'}}},\n 'It is highly recommended to use the disable_hyperthreading parameter in order to control the hyper-threading configuration in the cluster rather than using cfn_scheduler_slots in extra_json'\n ), ({'extra_json': {'cluster': {'cfn_scheduler_slots': 'cores'}}},\n 'It is highly recommended to use the disable_hyperthreading parameter in order to control the hyper-threading configuration in the cluster rather than using cfn_scheduler_slots in extra_json'\n )])", False, 'import pytest\n'), ((2873, 1, 2889, 1), 'pytest.mark.parametrize', 'pytest.mark.parametrize', ({(2874, 4, 2874, 48): '"""cluster_dict, architecture, expected_error"""', (2875, 4, 2888, 5): "[({'base_os': 'alinux2', 'enable_efa': 'compute'}, 'x86_64', None), ({\n 'base_os': 'alinux2', 'enable_efa': 'compute'}, 'arm64', None), ({\n 'base_os': 'centos8', 'enable_efa': 'compute'}, 'x86_64', None), ({\n 'base_os': 'centos8'}, 'x86_64', None), ({'base_os': 'centos8',\n 'enable_efa': 'compute'}, 'arm64',\n 'EFA currently not supported on centos8 for arm64 architecture'), ({\n 'base_os': 'centos8'}, 'arm64', None), ({'base_os': 'ubuntu1804',\n 'enable_efa': 'compute'}, 'x86_64', None), ({'base_os': 'ubuntu1804',\n 'enable_efa': 'compute'}, 'arm64', None)]"}, {}), "('cluster_dict, architecture, expected_error', [({\n 'base_os': 'alinux2', 'enable_efa': 'compute'}, 'x86_64', None), ({\n 'base_os': 'alinux2', 'enable_efa': 'compute'}, 'arm64', None), ({\n 'base_os': 'centos8', 'enable_efa': 'compute'}, 'x86_64', None), ({\n 'base_os': 'centos8'}, 'x86_64', None), ({'base_os': 'centos8',\n 'enable_efa': 'compute'}, 'arm64',\n 'EFA currently not supported on centos8 for arm64 architecture'), ({\n 'base_os': 'centos8'}, 'arm64', None), ({'base_os': 'ubuntu1804',\n 'enable_efa': 'compute'}, 'x86_64', None), ({'base_os': 'ubuntu1804',\n 'enable_efa': 'compute'}, 'arm64', None)])", False, 'import pytest\n'), ((2910, 1, 2929, 1), 'pytest.mark.parametrize', 'pytest.mark.parametrize', ({(2911, 4, 2911, 36): '"""section_dict, expected_message"""', (2912, 4, 2928, 5): "[({'volume_type': 'gp3', 'volume_throughput': 125}, None), ({'volume_type':\n 'gp3', 'volume_throughput': 100},\n 'Throughput must be between 125 MB/s and 1000 MB/s when provisioning gp3 volumes.'\n ), ({'volume_type': 'gp3', 'volume_throughput': 1001},\n 'Throughput must be between 125 MB/s and 1000 MB/s when provisioning gp3 volumes.'\n ), ({'volume_type': 'gp3', 'volume_throughput': 125, 'volume_iops': \n 3000}, None), ({'volume_type': 'gp3', 'volume_throughput': 760,\n 'volume_iops': 3000}, 'Throughput to IOPS ratio of .* is too high'), ({\n 'volume_type': 'gp3', 'volume_throughput': 760, 'volume_iops': 10000},\n None)]"}, {}), "('section_dict, expected_message', [({'volume_type':\n 'gp3', 'volume_throughput': 125}, None), ({'volume_type': 'gp3',\n 'volume_throughput': 100},\n 'Throughput must be between 125 MB/s and 1000 MB/s when provisioning gp3 volumes.'\n ), ({'volume_type': 'gp3', 'volume_throughput': 1001},\n 'Throughput must be between 125 MB/s and 1000 MB/s when provisioning gp3 volumes.'\n ), ({'volume_type': 'gp3', 'volume_throughput': 125, 'volume_iops': \n 3000}, None), ({'volume_type': 'gp3', 'volume_throughput': 760,\n 'volume_iops': 3000}, 'Throughput to IOPS ratio of .* is too high'), ({\n 'volume_type': 'gp3', 'volume_throughput': 760, 'volume_iops': 10000},\n None)])", False, 'import pytest\n'), ((2935, 1, 2941, 1), 'pytest.mark.parametrize', 'pytest.mark.parametrize', ({(2936, 4, 2936, 30): '"""region, expected_message"""', (2937, 4, 2940, 5): '[(\'invalid-region\',\n "Region \'invalid-region\' is not yet officially supported "), (\n \'us-east-1\', None)]'}, {}), '(\'region, expected_message\', [(\'invalid-region\',\n "Region \'invalid-region\' is not yet officially supported "), (\n \'us-east-1\', None)])', False, 'import pytest\n'), ((2954, 1, 2966, 1), 'pytest.mark.parametrize', 'pytest.mark.parametrize', ({(2955, 4, 2955, 92): '"""usage_class, supported_usage_classes, expected_error_message, expected_warning_message"""', (2956, 4, 2965, 5): '[(\'ondemand\', [\'ondemand\', \'spot\'], None, None), (\'spot\', [\'ondemand\',\n \'spot\'], None, None), (\'ondemand\', [\'ondemand\'], None, None), (\'spot\',\n [\'spot\'], None, None), (\'spot\', [], None,\n "Could not check support for usage class \'spot\' with instance type \'instance-type\'"\n ), (\'ondemand\', [], None,\n "Could not check support for usage class \'ondemand\' with instance type \'instance-type\'"\n ), (\'spot\', [\'ondemand\'],\n "Usage type \'spot\' not supported with instance type \'instance-type\'",\n None), (\'ondemand\', [\'spot\'],\n "Usage type \'ondemand\' not supported with instance type \'instance-type\'",\n None)]'}, {}), '(\n \'usage_class, supported_usage_classes, expected_error_message, expected_warning_message\'\n , [(\'ondemand\', [\'ondemand\', \'spot\'], None, None), (\'spot\', [\'ondemand\',\n \'spot\'], None, None), (\'ondemand\', [\'ondemand\'], None, None), (\'spot\',\n [\'spot\'], None, None), (\'spot\', [], None,\n "Could not check support for usage class \'spot\' with instance type \'instance-type\'"\n ), (\'ondemand\', [], None,\n "Could not check support for usage class \'ondemand\' with instance type \'instance-type\'"\n ), (\'spot\', [\'ondemand\'],\n "Usage type \'spot\' not supported with instance type \'instance-type\'",\n None), (\'ondemand\', [\'spot\'],\n "Usage type \'ondemand\' not supported with instance type \'instance-type\'",\n None)])', False, 'import pytest\n'), ((2992, 1, 2994, 1), 'pytest.mark.parametrize', 'pytest.mark.parametrize', ({(2993, 4, 2993, 43): '"""scheduler, expected_usage_class_check"""', (2993, 45, 2993, 116): "[('sge', True), ('torque', True), ('slurm', True), ('awsbatch', False)]"}, {}), "('scheduler, expected_usage_class_check', [('sge', \n True), ('torque', True), ('slurm', True), ('awsbatch', False)])", False, 'import pytest\n'), ((3015, 1, 3015, 66), 'pytest.mark.parametrize', 'pytest.mark.parametrize', ({(3015, 25, 3015, 39): '"""compute_type"""', (3015, 41, 3015, 65): "['ondemand', 'spot']"}, {}), "('compute_type', ['ondemand', 'spot'])", False, 'import pytest\n'), ((93, 4, 95, 5), 'tests.pcluster.config.utils.assert_param_validator', 'utils.assert_param_validator', (), '', True, 'import tests.pcluster.config.utils as utils\n'), ((103, 4, 103, 78), 'tests.pcluster.config.utils.assert_param_validator', 'utils.assert_param_validator', ({(103, 33, 103, 39): 'mocker', (103, 41, 103, 59): 'config_parser_dict', (103, 61, 103, 77): 'expected_message'}, {}), '(mocker, config_parser_dict, expected_message)', True, 'import tests.pcluster.config.utils as utils\n'), ((109, 4, 109, 78), 'tests.pcluster.config.utils.assert_param_validator', 'utils.assert_param_validator', ({(109, 33, 109, 39): 'mocker', (109, 41, 109, 59): 'config_parser_dict', (109, 61, 109, 77): 'expected_message'}, {}), '(mocker, config_parser_dict, expected_message)', True, 'import tests.pcluster.config.utils as utils\n'), ((134, 4, 136, 5), 'tests.pcluster.config.utils.assert_param_validator', 'utils.assert_param_validator', (), '', True, 'import tests.pcluster.config.utils as utils\n'), ((154, 4, 154, 60), 'tests.pcluster.config.utils.assert_param_validator', 'utils.assert_param_validator', ({(154, 33, 154, 39): 'mocker', (154, 41, 154, 59): 'config_parser_dict'}, {}), '(mocker, config_parser_dict)', True, 'import tests.pcluster.config.utils as utils\n'), ((218, 4, 218, 78), 'tests.pcluster.config.utils.assert_param_validator', 'utils.assert_param_validator', ({(218, 33, 218, 39): 'mocker', (218, 41, 218, 59): 'config_parser_dict', (218, 61, 218, 77): 'expected_message'}, {}), '(mocker, config_parser_dict, expected_message)', True, 'import tests.pcluster.config.utils as utils\n'), ((233, 4, 233, 93), 'tests.pcluster.config.utils.assert_param_validator', 'utils.assert_param_validator', (), '', True, 'import tests.pcluster.config.utils as utils\n'), ((275, 4, 275, 60), 'tests.pcluster.config.utils.assert_param_validator', 'utils.assert_param_validator', ({(275, 33, 275, 39): 'mocker', (275, 41, 275, 59): 'config_parser_dict'}, {}), '(mocker, config_parser_dict)', True, 'import tests.pcluster.config.utils as utils\n'), ((339, 4, 339, 104), 'tests.pcluster.config.utils.assert_param_validator', 'utils.assert_param_validator', ({(339, 33, 339, 39): 'mocker', (339, 41, 339, 59): 'config_parser_dict', (339, 61, 339, 77): 'expected_message', (339, 79, 339, 85): 'capsys', (339, 87, 339, 103): 'expected_warning'}, {}), '(mocker, config_parser_dict, expected_message,\n capsys, expected_warning)', True, 'import tests.pcluster.config.utils as utils\n'), ((357, 4, 357, 60), 'tests.pcluster.config.utils.assert_param_validator', 'utils.assert_param_validator', ({(357, 33, 357, 39): 'mocker', (357, 41, 357, 59): 'config_parser_dict'}, {}), '(mocker, config_parser_dict)', True, 'import tests.pcluster.config.utils as utils\n'), ((516, 4, 516, 66), 'tests.pcluster.config.utils.assert_param_validator', 'utils.assert_param_validator', ({(516, 33, 516, 39): 'mocker', (516, 41, 516, 47): 'config', (516, 49, 516, 65): 'expected_message'}, {}), '(mocker, config, expected_message)', True, 'import tests.pcluster.config.utils as utils\n'), ((553, 4, 553, 66), 'tests.pcluster.config.utils.assert_param_validator', 'utils.assert_param_validator', ({(553, 33, 553, 39): 'mocker', (553, 41, 553, 47): 'config', (553, 49, 553, 65): 'expected_message'}, {}), '(mocker, config, expected_message)', True, 'import tests.pcluster.config.utils as utils\n'), ((643, 20, 643, 47), 'configparser.ConfigParser', 'configparser.ConfigParser', ({}, {}), '()', False, 'import configparser\n'), ((646, 22, 646, 108), 'tests.pcluster.config.utils.init_pcluster_config_from_configparser', 'utils.init_pcluster_config_from_configparser', (), '', True, 'import tests.pcluster.config.utils as utils\n'), ((647, 23, 647, 101), 'pcluster.config.validators.s3_bucket_region_validator', 's3_bucket_region_validator', ({(647, 50, 647, 75): '"""cluster_resource_bucket"""', (647, 77, 647, 83): 'bucket', (647, 85, 647, 100): 'pcluster_config'}, {}), "('cluster_resource_bucket', bucket, pcluster_config)", False, 'from pcluster.config.validators import DCV_MESSAGES, EBS_VOLUME_TYPE_TO_VOLUME_SIZE_BOUNDS, FSX_MESSAGES, FSX_SUPPORTED_ARCHITECTURES_OSES, LOGFILE_LOGGER, architecture_os_validator, check_usage_class, cluster_type_validator, compute_resource_validator, disable_hyperthreading_architecture_validator, efa_gdr_validator, efa_os_arch_validator, fsx_ignored_parameters_validator, instances_architecture_compatibility_validator, intel_hpc_architecture_validator, queue_compute_type_validator, queue_validator, region_validator, s3_bucket_region_validator, settings_validator\n'), ((741, 4, 741, 60), 'tests.pcluster.config.utils.assert_param_validator', 'utils.assert_param_validator', ({(741, 33, 741, 39): 'mocker', (741, 41, 741, 59): 'config_parser_dict'}, {}), '(mocker, config_parser_dict)', True, 'import tests.pcluster.config.utils as utils\n'), ((780, 4, 780, 60), 'tests.pcluster.config.utils.assert_param_validator', 'utils.assert_param_validator', ({(780, 33, 780, 39): 'mocker', (780, 41, 780, 59): 'config_parser_dict'}, {}), '(mocker, config_parser_dict)', True, 'import tests.pcluster.config.utils as utils\n'), ((798, 4, 798, 78), 'tests.pcluster.config.utils.assert_param_validator', 'utils.assert_param_validator', ({(798, 33, 798, 39): 'mocker', (798, 41, 798, 59): 'config_parser_dict', (798, 61, 798, 77): 'expected_message'}, {}), '(mocker, config_parser_dict, expected_message)', True, 'import tests.pcluster.config.utils as utils\n'), ((844, 4, 844, 78), 'tests.pcluster.config.utils.assert_param_validator', 'utils.assert_param_validator', ({(844, 33, 844, 39): 'mocker', (844, 41, 844, 59): 'config_parser_dict', (844, 61, 844, 77): 'expected_message'}, {}), '(mocker, config_parser_dict, expected_message)', True, 'import tests.pcluster.config.utils as utils\n'), ((869, 4, 871, 5), 'tests.pcluster.config.utils.assert_param_validator', 'utils.assert_param_validator', (), '', True, 'import tests.pcluster.config.utils as utils\n'), ((1161, 4, 1161, 91), 'tests.pcluster.config.utils.assert_param_validator', 'utils.assert_param_validator', (), '', True, 'import tests.pcluster.config.utils as utils\n'), ((1240, 4, 1242, 5), 'tests.pcluster.config.utils.assert_param_validator', 'utils.assert_param_validator', (), '', True, 'import tests.pcluster.config.utils as utils\n'), ((1457, 4, 1457, 78), 'tests.pcluster.config.utils.assert_param_validator', 'utils.assert_param_validator', ({(1457, 33, 1457, 39): 'mocker', (1457, 41, 1457, 59): 'config_parser_dict', (1457, 61, 1457, 77): 'expected_message'}, {}), '(mocker, config_parser_dict, expected_message)', True, 'import tests.pcluster.config.utils as utils\n'), ((1474, 4, 1474, 78), 'tests.pcluster.config.utils.assert_param_validator', 'utils.assert_param_validator', ({(1474, 33, 1474, 39): 'mocker', (1474, 41, 1474, 59): 'config_parser_dict', (1474, 61, 1474, 77): 'expected_message'}, {}), '(mocker, config_parser_dict, expected_message)', True, 'import tests.pcluster.config.utils as utils\n'), ((1501, 4, 1501, 78), 'tests.pcluster.config.utils.assert_param_validator', 'utils.assert_param_validator', ({(1501, 33, 1501, 39): 'mocker', (1501, 41, 1501, 59): 'config_parser_dict', (1501, 61, 1501, 77): 'expected_message'}, {}), '(mocker, config_parser_dict, expected_message)', True, 'import tests.pcluster.config.utils as utils\n'), ((1538, 4, 1538, 78), 'tests.pcluster.config.utils.assert_param_validator', 'utils.assert_param_validator', ({(1538, 33, 1538, 39): 'mocker', (1538, 41, 1538, 59): 'config_parser_dict', (1538, 61, 1538, 77): 'expected_message'}, {}), '(mocker, config_parser_dict, expected_message)', True, 'import tests.pcluster.config.utils as utils\n'), ((1622, 4, 1630, 5), 'tests.pcluster.config.utils.assert_param_validator', 'utils.assert_param_validator', (), '', True, 'import tests.pcluster.config.utils as utils\n'), ((1652, 20, 1652, 47), 'configparser.ConfigParser', 'configparser.ConfigParser', ({}, {}), '()', False, 'import configparser\n'), ((1655, 22, 1655, 108), 'tests.pcluster.config.utils.init_pcluster_config_from_configparser', 'utils.init_pcluster_config_from_configparser', (), '', True, 'import tests.pcluster.config.utils as utils\n'), ((1658, 23, 1658, 97), 'pcluster.config.validators.efa_gdr_validator', 'efa_gdr_validator', ({(1658, 41, 1658, 57): '"""enable_efa_gdr"""', (1658, 59, 1658, 79): 'enable_efa_gdr_value', (1658, 81, 1658, 96): 'pcluster_config'}, {}), "('enable_efa_gdr', enable_efa_gdr_value, pcluster_config)", False, 'from pcluster.config.validators import DCV_MESSAGES, EBS_VOLUME_TYPE_TO_VOLUME_SIZE_BOUNDS, FSX_MESSAGES, FSX_SUPPORTED_ARCHITECTURES_OSES, LOGFILE_LOGGER, architecture_os_validator, check_usage_class, cluster_type_validator, compute_resource_validator, disable_hyperthreading_architecture_validator, efa_gdr_validator, efa_os_arch_validator, fsx_ignored_parameters_validator, instances_architecture_compatibility_validator, intel_hpc_architecture_validator, queue_compute_type_validator, queue_validator, region_validator, s3_bucket_region_validator, settings_validator\n'), ((1740, 4, 1740, 78), 'tests.pcluster.config.utils.assert_param_validator', 'utils.assert_param_validator', ({(1740, 33, 1740, 39): 'mocker', (1740, 41, 1740, 59): 'config_parser_dict', (1740, 61, 1740, 77): 'expected_message'}, {}), '(mocker, config_parser_dict, expected_message)', True, 'import tests.pcluster.config.utils as utils\n'), ((1780, 4, 1780, 78), 'tests.pcluster.config.utils.assert_param_validator', 'utils.assert_param_validator', ({(1780, 33, 1780, 39): 'mocker', (1780, 41, 1780, 59): 'config_parser_dict', (1780, 61, 1780, 77): 'expected_message'}, {}), '(mocker, config_parser_dict, expected_message)', True, 'import tests.pcluster.config.utils as utils\n'), ((1793, 4, 1793, 78), 'tests.pcluster.config.utils.assert_param_validator', 'utils.assert_param_validator', ({(1793, 33, 1793, 39): 'mocker', (1793, 41, 1793, 59): 'config_parser_dict', (1793, 61, 1793, 77): 'expected_message'}, {}), '(mocker, config_parser_dict, expected_message)', True, 'import tests.pcluster.config.utils as utils\n'), ((1830, 4, 1832, 5), 'tests.pcluster.config.utils.assert_param_validator', 'utils.assert_param_validator', (), '', True, 'import tests.pcluster.config.utils as utils\n'), ((1869, 4, 1869, 107), 'tests.pcluster.config.utils.assert_param_validator', 'utils.assert_param_validator', (), '', True, 'import tests.pcluster.config.utils as utils\n'), ((1887, 4, 1887, 78), 'tests.pcluster.config.utils.assert_param_validator', 'utils.assert_param_validator', ({(1887, 33, 1887, 39): 'mocker', (1887, 41, 1887, 59): 'config_parser_dict', (1887, 61, 1887, 77): 'expected_message'}, {}), '(mocker, config_parser_dict, expected_message)', True, 'import tests.pcluster.config.utils as utils\n'), ((1957, 4, 1957, 78), 'tests.pcluster.config.utils.assert_param_validator', 'utils.assert_param_validator', ({(1957, 33, 1957, 39): 'mocker', (1957, 41, 1957, 59): 'config_parser_dict', (1957, 61, 1957, 77): 'expected_message'}, {}), '(mocker, config_parser_dict, expected_message)', True, 'import tests.pcluster.config.utils as utils\n'), ((2086, 20, 2086, 47), 'configparser.ConfigParser', 'configparser.ConfigParser', ({}, {}), '()', False, 'import configparser\n'), ((2089, 22, 2089, 108), 'tests.pcluster.config.utils.init_pcluster_config_from_configparser', 'utils.init_pcluster_config_from_configparser', (), '', True, 'import tests.pcluster.config.utils as utils\n'), ((2097, 23, 2097, 75), 'pcluster.config.validators.queue_validator', 'queue_validator', ({(2097, 39, 2097, 46): '"""queue"""', (2097, 48, 2097, 57): '"""default"""', (2097, 59, 2097, 74): 'pcluster_config'}, {}), "('queue', 'default', pcluster_config)", False, 'from pcluster.config.validators import DCV_MESSAGES, EBS_VOLUME_TYPE_TO_VOLUME_SIZE_BOUNDS, FSX_MESSAGES, FSX_SUPPORTED_ARCHITECTURES_OSES, LOGFILE_LOGGER, architecture_os_validator, check_usage_class, cluster_type_validator, compute_resource_validator, disable_hyperthreading_architecture_validator, efa_gdr_validator, efa_os_arch_validator, fsx_ignored_parameters_validator, instances_architecture_compatibility_validator, intel_hpc_architecture_validator, queue_compute_type_validator, queue_validator, region_validator, s3_bucket_region_validator, settings_validator\n'), ((2133, 23, 2133, 78), 'pcluster.config.validators.settings_validator', 'settings_validator', ({(2133, 42, 2133, 58): '"""queue_settings"""', (2133, 60, 2133, 71): 'param_value', (2133, 73, 2133, 77): 'None'}, {}), "('queue_settings', param_value, None)", False, 'from pcluster.config.validators import DCV_MESSAGES, EBS_VOLUME_TYPE_TO_VOLUME_SIZE_BOUNDS, FSX_MESSAGES, FSX_SUPPORTED_ARCHITECTURES_OSES, LOGFILE_LOGGER, architecture_os_validator, check_usage_class, cluster_type_validator, compute_resource_validator, disable_hyperthreading_architecture_validator, efa_gdr_validator, efa_os_arch_validator, fsx_ignored_parameters_validator, instances_architecture_compatibility_validator, intel_hpc_architecture_validator, queue_compute_type_validator, queue_validator, region_validator, s3_bucket_region_validator, settings_validator\n'), ((2172, 20, 2172, 47), 'configparser.ConfigParser', 'configparser.ConfigParser', ({}, {}), '()', False, 'import configparser\n'), ((2185, 22, 2185, 88), 'tests.pcluster.config.utils.init_pcluster_config_from_configparser', 'utils.init_pcluster_config_from_configparser', ({(2185, 67, 2185, 80): 'config_parser', (2185, 82, 2185, 87): 'False'}, {}), '(config_parser, False)', True, 'import tests.pcluster.config.utils as utils\n'), ((2187, 23, 2187, 97), 'pcluster.config.validators.compute_resource_validator', 'compute_resource_validator', ({(2187, 50, 2187, 68): '"""compute_resource"""', (2187, 70, 2187, 79): '"""default"""', (2187, 81, 2187, 96): 'pcluster_config'}, {}), "('compute_resource', 'default', pcluster_config)", False, 'from pcluster.config.validators import DCV_MESSAGES, EBS_VOLUME_TYPE_TO_VOLUME_SIZE_BOUNDS, FSX_MESSAGES, FSX_SUPPORTED_ARCHITECTURES_OSES, LOGFILE_LOGGER, architecture_os_validator, check_usage_class, cluster_type_validator, compute_resource_validator, disable_hyperthreading_architecture_validator, efa_gdr_validator, efa_os_arch_validator, fsx_ignored_parameters_validator, instances_architecture_compatibility_validator, intel_hpc_architecture_validator, queue_compute_type_validator, queue_validator, region_validator, s3_bucket_region_validator, settings_validator\n'), ((2215, 4, 2215, 78), 'tests.pcluster.config.utils.assert_param_validator', 'utils.assert_param_validator', ({(2215, 33, 2215, 39): 'mocker', (2215, 41, 2215, 59): 'config_parser_dict', (2215, 61, 2215, 77): 'expected_message'}, {}), '(mocker, config_parser_dict, expected_message)', True, 'import tests.pcluster.config.utils as utils\n'), ((2556, 4, 2556, 91), 'tests.pcluster.config.utils.assert_param_validator', 'utils.assert_param_validator', (), '', True, 'import tests.pcluster.config.utils as utils\n'), ((2583, 29, 2583, 69), 'tests.pcluster.config.utils.get_mocked_pcluster_config', 'utils.get_mocked_pcluster_config', ({(2583, 62, 2583, 68): 'mocker'}, {}), '(mocker)', True, 'import tests.pcluster.config.utils as utils\n'), ((2584, 18, 2584, 68), 'pcluster.config.cfn_param_types.CfnSection', 'CfnSection', ({(2584, 29, 2584, 32): 'FSX', (2584, 34, 2584, 56): 'mocked_pcluster_config', (2584, 58, 2584, 67): '"""default"""'}, {}), "(FSX, mocked_pcluster_config, 'default')", False, 'from pcluster.config.cfn_param_types import CfnParam, CfnSection\n'), ((2590, 23, 2590, 97), 'pcluster.config.validators.fsx_ignored_parameters_validator', 'fsx_ignored_parameters_validator', ({(2590, 56, 2590, 61): '"""fsx"""', (2590, 63, 2590, 72): '"""default"""', (2590, 74, 2590, 96): 'mocked_pcluster_config'}, {}), "('fsx', 'default', mocked_pcluster_config)", False, 'from pcluster.config.validators import DCV_MESSAGES, EBS_VOLUME_TYPE_TO_VOLUME_SIZE_BOUNDS, FSX_MESSAGES, FSX_SUPPORTED_ARCHITECTURES_OSES, LOGFILE_LOGGER, architecture_os_validator, check_usage_class, cluster_type_validator, compute_resource_validator, disable_hyperthreading_architecture_validator, efa_gdr_validator, efa_os_arch_validator, fsx_ignored_parameters_validator, instances_architecture_compatibility_validator, intel_hpc_architecture_validator, queue_compute_type_validator, queue_validator, region_validator, s3_bucket_region_validator, settings_validator\n'), ((2626, 4, 2626, 76), 'tests.pcluster.config.utils.assert_param_validator', 'utils.assert_param_validator', ({(2626, 33, 2626, 39): 'mocker', (2626, 41, 2626, 59): 'config_parser_dict', (2626, 61, 2626, 75): 'expected_error'}, {}), '(mocker, config_parser_dict, expected_error)', True, 'import tests.pcluster.config.utils as utils\n'), ((2680, 4, 2680, 78), 'tests.pcluster.config.utils.assert_param_validator', 'utils.assert_param_validator', ({(2680, 33, 2680, 39): 'mocker', (2680, 41, 2680, 59): 'config_parser_dict', (2680, 61, 2680, 77): 'expected_message'}, {}), '(mocker, config_parser_dict, expected_message)', True, 'import tests.pcluster.config.utils as utils\n'), ((2784, 4, 2786, 5), 'tests.pcluster.config.utils.assert_param_validator', 'utils.assert_param_validator', (), '', True, 'import tests.pcluster.config.utils as utils\n'), ((2845, 4, 2845, 93), 'tests.pcluster.config.utils.assert_param_validator', 'utils.assert_param_validator', (), '', True, 'import tests.pcluster.config.utils as utils\n'), ((2870, 4, 2870, 110), 'tests.pcluster.config.utils.assert_param_validator', 'utils.assert_param_validator', (), '', True, 'import tests.pcluster.config.utils as utils\n'), ((2896, 20, 2896, 47), 'configparser.ConfigParser', 'configparser.ConfigParser', ({}, {}), '()', False, 'import configparser\n'), ((2899, 22, 2899, 108), 'tests.pcluster.config.utils.init_pcluster_config_from_configparser', 'utils.init_pcluster_config_from_configparser', (), '', True, 'import tests.pcluster.config.utils as utils\n'), ((2903, 23, 2903, 93), 'pcluster.config.validators.efa_os_arch_validator', 'efa_os_arch_validator', ({(2903, 45, 2903, 57): '"""enable_efa"""', (2903, 59, 2903, 75): 'enable_efa_value', (2903, 77, 2903, 92): 'pcluster_config'}, {}), "('enable_efa', enable_efa_value, pcluster_config)", False, 'from pcluster.config.validators import DCV_MESSAGES, EBS_VOLUME_TYPE_TO_VOLUME_SIZE_BOUNDS, FSX_MESSAGES, FSX_SUPPORTED_ARCHITECTURES_OSES, LOGFILE_LOGGER, architecture_os_validator, check_usage_class, cluster_type_validator, compute_resource_validator, disable_hyperthreading_architecture_validator, efa_gdr_validator, efa_os_arch_validator, fsx_ignored_parameters_validator, instances_architecture_compatibility_validator, intel_hpc_architecture_validator, queue_compute_type_validator, queue_validator, region_validator, s3_bucket_region_validator, settings_validator\n'), ((2932, 4, 2932, 78), 'tests.pcluster.config.utils.assert_param_validator', 'utils.assert_param_validator', ({(2932, 33, 2932, 39): 'mocker', (2932, 41, 2932, 59): 'config_parser_dict', (2932, 61, 2932, 77): 'expected_message'}, {}), '(mocker, config_parser_dict, expected_message)', True, 'import tests.pcluster.config.utils as utils\n'), ((2943, 22, 2943, 62), 'tests.pcluster.config.utils.get_mocked_pcluster_config', 'utils.get_mocked_pcluster_config', ({(2943, 55, 2943, 61): 'mocker'}, {}), '(mocker)', True, 'import tests.pcluster.config.utils as utils\n'), ((2946, 23, 2946, 69), 'pcluster.config.validators.region_validator', 'region_validator', ({(2946, 40, 2946, 45): '"""aws"""', (2946, 47, 2946, 51): 'None', (2946, 53, 2946, 68): 'pcluster_config'}, {}), "('aws', None, pcluster_config)", False, 'from pcluster.config.validators import DCV_MESSAGES, EBS_VOLUME_TYPE_TO_VOLUME_SIZE_BOUNDS, FSX_MESSAGES, FSX_SUPPORTED_ARCHITECTURES_OSES, LOGFILE_LOGGER, architecture_os_validator, check_usage_class, cluster_type_validator, compute_resource_validator, disable_hyperthreading_architecture_validator, efa_gdr_validator, efa_os_arch_validator, fsx_ignored_parameters_validator, instances_architecture_compatibility_validator, intel_hpc_architecture_validator, queue_compute_type_validator, queue_validator, region_validator, s3_bucket_region_validator, settings_validator\n'), ((2979, 4, 2979, 69), 'pcluster.config.validators.check_usage_class', 'check_usage_class', ({(2979, 22, 2979, 37): '"""instance-type"""', (2979, 39, 2979, 50): 'usage_class', (2979, 52, 2979, 58): 'errors', (2979, 60, 2979, 68): 'warnings'}, {}), "('instance-type', usage_class, errors, warnings)", False, 'from pcluster.config.validators import DCV_MESSAGES, EBS_VOLUME_TYPE_TO_VOLUME_SIZE_BOUNDS, FSX_MESSAGES, FSX_SUPPORTED_ARCHITECTURES_OSES, LOGFILE_LOGGER, architecture_os_validator, check_usage_class, cluster_type_validator, compute_resource_validator, disable_hyperthreading_architecture_validator, efa_gdr_validator, efa_os_arch_validator, fsx_ignored_parameters_validator, instances_architecture_compatibility_validator, intel_hpc_architecture_validator, queue_compute_type_validator, queue_validator, region_validator, s3_bucket_region_validator, settings_validator\n'), ((3001, 20, 3001, 47), 'configparser.ConfigParser', 'configparser.ConfigParser', ({}, {}), '()', False, 'import configparser\n'), ((3004, 22, 3004, 108), 'tests.pcluster.config.utils.init_pcluster_config_from_configparser', 'utils.init_pcluster_config_from_configparser', (), '', True, 'import tests.pcluster.config.utils as utils\n'), ((3005, 23, 3005, 86), 'pcluster.config.validators.cluster_type_validator', 'cluster_type_validator', ({(3005, 46, 3005, 60): '"""compute_type"""', (3005, 62, 3005, 68): '"""spot"""', (3005, 70, 3005, 85): 'pcluster_config'}, {}), "('compute_type', 'spot', pcluster_config)", False, 'from pcluster.config.validators import DCV_MESSAGES, EBS_VOLUME_TYPE_TO_VOLUME_SIZE_BOUNDS, FSX_MESSAGES, FSX_SUPPORTED_ARCHITECTURES_OSES, LOGFILE_LOGGER, architecture_os_validator, check_usage_class, cluster_type_validator, compute_resource_validator, disable_hyperthreading_architecture_validator, efa_gdr_validator, efa_os_arch_validator, fsx_ignored_parameters_validator, instances_architecture_compatibility_validator, intel_hpc_architecture_validator, queue_compute_type_validator, queue_validator, region_validator, s3_bucket_region_validator, settings_validator\n'), ((3030, 20, 3030, 47), 'configparser.ConfigParser', 'configparser.ConfigParser', ({}, {}), '()', False, 'import configparser\n'), ((3033, 22, 3033, 108), 'tests.pcluster.config.utils.init_pcluster_config_from_configparser', 'utils.init_pcluster_config_from_configparser', (), '', True, 'import tests.pcluster.config.utils as utils\n'), ((3034, 23, 3034, 83), 'pcluster.config.validators.queue_compute_type_validator', 'queue_compute_type_validator', ({(3034, 52, 3034, 59): '"""queue"""', (3034, 61, 3034, 65): '"""q1"""', (3034, 67, 3034, 82): 'pcluster_config'}, {}), "('queue', 'q1', pcluster_config)", False, 'from pcluster.config.validators import DCV_MESSAGES, EBS_VOLUME_TYPE_TO_VOLUME_SIZE_BOUNDS, FSX_MESSAGES, FSX_SUPPORTED_ARCHITECTURES_OSES, LOGFILE_LOGGER, architecture_os_validator, check_usage_class, cluster_type_validator, compute_resource_validator, disable_hyperthreading_architecture_validator, efa_gdr_validator, efa_os_arch_validator, fsx_ignored_parameters_validator, instances_architecture_compatibility_validator, intel_hpc_architecture_validator, queue_compute_type_validator, queue_validator, region_validator, s3_bucket_region_validator, settings_validator\n'), ((146, 8, 148, 9), 'tests.common.MockedBoto3Request', 'MockedBoto3Request', (), '', False, 'from tests.common import MockedBoto3Request\n'), ((206, 8, 211, 9), 'tests.common.MockedBoto3Request', 'MockedBoto3Request', (), '', False, 'from tests.common import MockedBoto3Request\n'), ((262, 8, 266, 9), 'tests.common.MockedBoto3Request', 'MockedBoto3Request', (), '', False, 'from tests.common import MockedBoto3Request\n'), ((347, 8, 351, 9), 'tests.common.MockedBoto3Request', 'MockedBoto3Request', (), '', False, 'from tests.common import MockedBoto3Request\n'), ((371, 8, 373, 9), 'tests.common.MockedBoto3Request', 'MockedBoto3Request', (), '', False, 'from tests.common import MockedBoto3Request\n'), ((381, 8, 381, 82), 'tests.pcluster.config.utils.assert_param_validator', 'utils.assert_param_validator', ({(381, 37, 381, 43): 'mocker', (381, 45, 381, 63): 'config_parser_dict', (381, 65, 381, 81): 'expected_message'}, {}), '(mocker, config_parser_dict, expected_message)', True, 'import tests.pcluster.config.utils as utils\n'), ((421, 8, 421, 114), 'tests.pcluster.config.utils.assert_param_validator', 'utils.assert_param_validator', (), '', True, 'import tests.pcluster.config.utils as utils\n'), ((673, 8, 675, 9), 'tests.common.MockedBoto3Request', 'MockedBoto3Request', (), '', False, 'from tests.common import MockedBoto3Request\n'), ((685, 8, 689, 9), 'tests.common.MockedBoto3Request', 'MockedBoto3Request', (), '', False, 'from tests.common import MockedBoto3Request\n'), ((692, 8, 696, 9), 'tests.common.MockedBoto3Request', 'MockedBoto3Request', (), '', False, 'from tests.common import MockedBoto3Request\n'), ((703, 8, 703, 82), 'tests.pcluster.config.utils.assert_param_validator', 'utils.assert_param_validator', ({(703, 37, 703, 43): 'mocker', (703, 45, 703, 63): 'config_parser_dict', (703, 65, 703, 81): 'expected_message'}, {}), '(mocker, config_parser_dict, expected_message)', True, 'import tests.pcluster.config.utils as utils\n'), ((728, 8, 732, 9), 'tests.common.MockedBoto3Request', 'MockedBoto3Request', (), '', False, 'from tests.common import MockedBoto3Request\n'), ((767, 8, 771, 9), 'tests.common.MockedBoto3Request', 'MockedBoto3Request', (), '', False, 'from tests.common import MockedBoto3Request\n'), ((1160, 25, 1160, 50), 're.escape', 're.escape', ({(1160, 35, 1160, 49): 'expected_error'}, {}), '(expected_error)', False, 'import re\n'), ((1328, 8, 1332, 9), 'tests.common.MockedBoto3Request', 'MockedBoto3Request', (), '', False, 'from tests.common import MockedBoto3Request\n'), ((1711, 8, 1715, 9), 'tests.common.MockedBoto3Request', 'MockedBoto3Request', (), '', False, 'from tests.common import MockedBoto3Request\n'), ((1716, 8, 1720, 9), 'tests.common.MockedBoto3Request', 'MockedBoto3Request', (), '', False, 'from tests.common import MockedBoto3Request\n'), ((1721, 8, 1725, 9), 'tests.common.MockedBoto3Request', 'MockedBoto3Request', (), '', False, 'from tests.common import MockedBoto3Request\n'), ((1864, 23, 1864, 50), 're.escape', 're.escape', ({(1864, 33, 1864, 49): 'expected_message'}, {}), '(expected_message)', False, 'import re\n'), ((2190, 8, 2190, 47), 'assertpy.assert_that', 'assert_that', ({(2190, 20, 2190, 46): '(expected_message in errors)'}, {}), '(expected_message in errors)', False, 'from assertpy import assert_that\n'), ((388, 12, 392, 13), 'tests.common.MockedBoto3Request', 'MockedBoto3Request', (), '', False, 'from tests.common import MockedBoto3Request\n'), ((401, 12, 407, 13), 'tests.common.MockedBoto3Request', 'MockedBoto3Request', (), '', False, 'from tests.common import MockedBoto3Request\n'), ((495, 12, 495, 107), 'tests.common.MockedBoto3Request', 'MockedBoto3Request', (), '', False, 'from tests.common import MockedBoto3Request\n'), ((499, 12, 501, 13), 'tests.common.MockedBoto3Request', 'MockedBoto3Request', (), '', False, 'from tests.common import MockedBoto3Request\n'), ((505, 12, 511, 13), 'tests.common.MockedBoto3Request', 'MockedBoto3Request', (), '', False, 'from tests.common import MockedBoto3Request\n'), ((621, 12, 626, 13), 'tests.common.MockedBoto3Request', 'MockedBoto3Request', (), '', False, 'from tests.common import MockedBoto3Request\n'), ((630, 12, 636, 13), 'tests.common.MockedBoto3Request', 'MockedBoto3Request', (), '', False, 'from tests.common import MockedBoto3Request\n'), ((879, 28, 879, 78), 'datetime.datetime', 'datetime.datetime', ({(879, 46, 879, 50): '(2019)', (879, 52, 879, 53): '(1)', (879, 55, 879, 57): '(10)', (879, 59, 879, 61): '(11)', (879, 63, 879, 65): '(25)', (879, 67, 879, 69): '(59)', (879, 71, 879, 77): '(128000)'}, {}), '(2019, 1, 10, 11, 25, 59, 128000)', False, 'import datetime\n'), ((890, 8, 895, 9), 'tests.common.MockedBoto3Request', 'MockedBoto3Request', (), '', False, 'from tests.common import MockedBoto3Request\n'), ((1258, 8, 1258, 103), 'tests.common.MockedBoto3Request', 'MockedBoto3Request', (), '', False, 'from tests.common import MockedBoto3Request\n'), ((1357, 8, 1361, 9), 'tests.common.MockedBoto3Request', 'MockedBoto3Request', (), '', False, 'from tests.common import MockedBoto3Request\n'), ((1418, 12, 1422, 13), 'tests.common.MockedBoto3Request', 'MockedBoto3Request', (), '', False, 'from tests.common import MockedBoto3Request\n'), ((1608, 12, 1612, 13), 'tests.common.MockedBoto3Request', 'MockedBoto3Request', (), '', False, 'from tests.common import MockedBoto3Request\n'), ((1834, 4, 1834, 53), 'assertpy.assert_that', 'assert_that', ({(1834, 16, 1834, 52): '(access_from_error_msg in caplog.text)'}, {}), '(access_from_error_msg in caplog.text)', False, 'from assertpy import assert_that\n'), ((2284, 38, 2284, 66), 're.escape', 're.escape', ({(2284, 48, 2284, 65): 'expected_warnings'}, {}), '(expected_warnings)', False, 'import re\n'), ((2288, 36, 2288, 62), 're.escape', 're.escape', ({(2288, 46, 2288, 61): 'expected_errors'}, {}), '(expected_errors)', False, 'import re\n'), ((2445, 4, 2445, 57), 'assertpy.assert_that', 'assert_that', ({(2445, 16, 2445, 56): 'supported_architectures_patch.call_count'}, {}), '(supported_architectures_patch.call_count)', False, 'from assertpy import assert_that\n'), ((2446, 4, 2446, 40), 'assertpy.assert_that', 'assert_that', ({(2446, 16, 2446, 39): 'logger_patch.call_count'}, {}), '(logger_patch.call_count)', False, 'from assertpy import assert_that\n'), ((2447, 4, 2447, 50), 'assertpy.assert_that', 'assert_that', ({(2447, 16, 2447, 49): 'is_instance_type_patch.call_count'}, {}), '(is_instance_type_patch.call_count)', False, 'from assertpy import assert_that\n'), ((2591, 4, 2591, 25), 'assertpy.assert_that', 'assert_that', ({(2591, 16, 2591, 24): 'warnings'}, {}), '(warnings)', False, 'from assertpy import assert_that\n'), ((2632, 8, 2632, 52), 'pcluster.config.validators.EBS_VOLUME_TYPE_TO_VOLUME_SIZE_BOUNDS.keys', 'EBS_VOLUME_TYPE_TO_VOLUME_SIZE_BOUNDS.keys', ({}, {}), '()', False, 'from pcluster.config.validators import DCV_MESSAGES, EBS_VOLUME_TYPE_TO_VOLUME_SIZE_BOUNDS, FSX_MESSAGES, FSX_SUPPORTED_ARCHITECTURES_OSES, LOGFILE_LOGGER, architecture_os_validator, check_usage_class, cluster_type_validator, compute_resource_validator, disable_hyperthreading_architecture_validator, efa_gdr_validator, efa_os_arch_validator, fsx_ignored_parameters_validator, instances_architecture_compatibility_validator, intel_hpc_architecture_validator, queue_compute_type_validator, queue_validator, region_validator, s3_bucket_region_validator, settings_validator\n'), ((2634, 4, 2634, 59), 'assertpy.assert_that', 'assert_that', ({(2634, 16, 2634, 58): 'allowed_values_all_have_volume_size_bounds'}, {}), '(allowed_values_all_have_volume_size_bounds)', False, 'from assertpy import assert_that\n'), ((3011, 4, 3011, 23), 'assertpy.assert_that', 'assert_that', ({(3011, 16, 3011, 22): 'errors'}, {}), '(errors)', False, 'from assertpy import assert_that\n'), ((3012, 4, 3012, 25), 'assertpy.assert_that', 'assert_that', ({(3012, 16, 3012, 24): 'warnings'}, {}), '(warnings)', False, 'from assertpy import assert_that\n'), ((3043, 4, 3043, 23), 'assertpy.assert_that', 'assert_that', ({(3043, 16, 3043, 22): 'errors'}, {}), '(errors)', False, 'from assertpy import assert_that\n'), ((3044, 4, 3044, 25), 'assertpy.assert_that', 'assert_that', ({(3044, 16, 3044, 24): 'warnings'}, {}), '(warnings)', False, 'from assertpy import assert_that\n'), ((650, 8, 650, 30), 'assertpy.assert_that', 'assert_that', ({(650, 20, 650, 29): 'errors[0]'}, {}), '(errors[0])', False, 'from assertpy import assert_that\n'), ((652, 8, 652, 27), 'assertpy.assert_that', 'assert_that', ({(652, 20, 652, 26): 'errors'}, {}), '(errors)', False, 'from assertpy import assert_that\n'), ((1440, 16, 1444, 17), 'tests.common.MockedBoto3Request', 'MockedBoto3Request', (), '', False, 'from tests.common import MockedBoto3Request\n'), ((1591, 39, 1598, 17), 'json.dumps', 'json.dumps', ({(1592, 20, 1597, 21): "{'additional-instance-type': {'InstanceType': 'additional-instance-type',\n 'NetworkInfo': {'EfaSupported': True}}}"}, {}), "({'additional-instance-type': {'InstanceType':\n 'additional-instance-type', 'NetworkInfo': {'EfaSupported': True}}})", False, 'import json\n'), ((1660, 8, 1660, 30), 'assertpy.assert_that', 'assert_that', ({(1660, 20, 1660, 29): 'errors[0]'}, {}), '(errors[0])', False, 'from assertpy import assert_that\n'), ((1662, 8, 1662, 27), 'assertpy.assert_that', 'assert_that', ({(1662, 20, 1662, 26): 'errors'}, {}), '(errors)', False, 'from assertpy import assert_that\n'), ((2100, 8, 2100, 44), 'assertpy.assert_that', 'assert_that', ({(2100, 20, 2100, 43): 'expected_error_messages'}, {}), '(expected_error_messages)', False, 'from assertpy import assert_that\n'), ((2102, 8, 2102, 27), 'assertpy.assert_that', 'assert_that', ({(2102, 20, 2102, 26): 'errors'}, {}), '(errors)', False, 'from assertpy import assert_that\n'), ((2105, 8, 2105, 46), 'assertpy.assert_that', 'assert_that', ({(2105, 20, 2105, 45): 'expected_warning_messages'}, {}), '(expected_warning_messages)', False, 'from assertpy import assert_that\n'), ((2107, 8, 2107, 29), 'assertpy.assert_that', 'assert_that', ({(2107, 20, 2107, 28): 'warnings'}, {}), '(warnings)', False, 'from assertpy import assert_that\n'), ((2136, 8, 2136, 30), 'assertpy.assert_that', 'assert_that', ({(2136, 20, 2136, 29): 'errors[0]'}, {}), '(errors[0])', False, 'from assertpy import assert_that\n'), ((2138, 8, 2138, 27), 'assertpy.assert_that', 'assert_that', ({(2138, 20, 2138, 26): 'errors'}, {}), '(errors)', False, 'from assertpy import assert_that\n'), ((2192, 8, 2192, 27), 'assertpy.assert_that', 'assert_that', ({(2192, 20, 2192, 26): 'errors'}, {}), '(errors)', False, 'from assertpy import assert_that\n'), ((2284, 8, 2284, 29), 'assertpy.assert_that', 'assert_that', ({(2284, 20, 2284, 28): 'warnings'}, {}), '(warnings)', False, 'from assertpy import assert_that\n'), ((2288, 8, 2288, 27), 'assertpy.assert_that', 'assert_that', ({(2288, 20, 2288, 26): 'errors'}, {}), '(errors)', False, 'from assertpy import assert_that\n'), ((2593, 8, 2593, 30), 'assertpy.assert_that', 'assert_that', ({(2593, 20, 2593, 29): 'errors[0]'}, {}), '(errors[0])', False, 'from assertpy import assert_that\n'), ((2595, 8, 2595, 27), 'assertpy.assert_that', 'assert_that', ({(2595, 20, 2595, 26): 'errors'}, {}), '(errors)', False, 'from assertpy import assert_that\n'), ((2905, 8, 2905, 30), 'assertpy.assert_that', 'assert_that', ({(2905, 20, 2905, 29): 'errors[0]'}, {}), '(errors[0])', False, 'from assertpy import assert_that\n'), ((2907, 8, 2907, 27), 'assertpy.assert_that', 'assert_that', ({(2907, 20, 2907, 26): 'errors'}, {}), '(errors)', False, 'from assertpy import assert_that\n'), ((2949, 8, 2949, 30), 'assertpy.assert_that', 'assert_that', ({(2949, 20, 2949, 29): 'errors[0]'}, {}), '(errors[0])', False, 'from assertpy import assert_that\n'), ((2951, 8, 2951, 27), 'assertpy.assert_that', 'assert_that', ({(2951, 20, 2951, 26): 'errors'}, {}), '(errors)', False, 'from assertpy import assert_that\n'), ((2982, 8, 2982, 27), 'assertpy.assert_that', 'assert_that', ({(2982, 20, 2982, 26): 'errors'}, {}), '(errors)', False, 'from assertpy import assert_that\n'), ((2984, 8, 2984, 27), 'assertpy.assert_that', 'assert_that', ({(2984, 20, 2984, 26): 'errors'}, {}), '(errors)', False, 'from assertpy import assert_that\n'), ((2987, 8, 2987, 29), 'assertpy.assert_that', 'assert_that', ({(2987, 20, 2987, 28): 'warnings'}, {}), '(warnings)', False, 'from assertpy import assert_that\n'), ((2989, 8, 2989, 29), 'assertpy.assert_that', 'assert_that', ({(2989, 20, 2989, 28): 'warnings'}, {}), '(warnings)', False, 'from assertpy import assert_that\n'), ((1854, 45, 1854, 84), 'pcluster.config.validators.FSX_SUPPORTED_ARCHITECTURES_OSES.keys', 'FSX_SUPPORTED_ARCHITECTURES_OSES.keys', ({}, {}), '()', False, 'from pcluster.config.validators import DCV_MESSAGES, EBS_VOLUME_TYPE_TO_VOLUME_SIZE_BOUNDS, FSX_MESSAGES, FSX_SUPPORTED_ARCHITECTURES_OSES, LOGFILE_LOGGER, architecture_os_validator, check_usage_class, cluster_type_validator, compute_resource_validator, disable_hyperthreading_architecture_validator, efa_gdr_validator, efa_os_arch_validator, fsx_ignored_parameters_validator, instances_architecture_compatibility_validator, intel_hpc_architecture_validator, queue_compute_type_validator, queue_validator, region_validator, s3_bucket_region_validator, settings_validator\n'), ((2586, 16, 2586, 33), 'pcluster.config.mappings.FSX.get', 'FSX.get', ({(2586, 24, 2586, 32): '"""params"""'}, {}), "('params')", False, 'from pcluster.config.mappings import ALLOWED_VALUES, FSX\n')] |
takamatsu-shyo/yolo-microservice | flask_app.py | b8ab03b98c0939ab1849d0da938d0878b0ec441f | from flask import Flask
from flask import request
from flask import Response
from resources import resourcePing, resourceResolution
from message_protocol.resolution_input import parseResolutionInput
import json
app = Flask(__name__)
@app.route('/ping', methods=['GET'])
def ping():
output = resourcePing.main()
json = output.toJSON()
return Response(json, mimetype='appliction/json')
@app.route('/resolution', methods=['POST'])
def resolution():
input = parseResolutionInput(request.json)
output = resourceResolution.main(input)
output_json = json.dumps(output)
return Response(output_json, mimetype='appliccation/json')
| [((8, 6, 8, 21), 'flask.Flask', 'Flask', ({(8, 12, 8, 20): '__name__'}, {}), '(__name__)', False, 'from flask import Flask\n'), ((12, 13, 12, 32), 'resources.resourcePing.main', 'resourcePing.main', ({}, {}), '()', False, 'from resources import resourcePing, resourceResolution\n'), ((14, 11, 14, 53), 'flask.Response', 'Response', (), '', False, 'from flask import Response\n'), ((18, 12, 18, 46), 'message_protocol.resolution_input.parseResolutionInput', 'parseResolutionInput', ({(18, 33, 18, 45): 'request.json'}, {}), '(request.json)', False, 'from message_protocol.resolution_input import parseResolutionInput\n'), ((19, 13, 19, 43), 'resources.resourceResolution.main', 'resourceResolution.main', ({(19, 37, 19, 42): 'input'}, {}), '(input)', False, 'from resources import resourcePing, resourceResolution\n'), ((21, 11, 21, 62), 'flask.Response', 'Response', (), '', False, 'from flask import Response\n')] |
Stinger101/my_uno_ml_service | backend/server/server/wsgi.py | 47d19f6e5e19e73c465b7ddca889324c9bd5862f | """
WSGI config for server project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'server.settings')
application = get_wsgi_application()
import inspect
from apps.ml.registry import MLRegistry
from apps.ml.income_classifier.random_forest import RandomForestClassifier
try:
registry = MLRegistry()
rf = RandomForestClassifier()
registry.add_algorithm(endpoint_name="income_classifier",algorithm_object=rf,algorithm_name="random forest", algorithm_status="production", algorithm_version="0.0.1",owner="Piotr",algorithm_description="Random forest with simple pre and post processing",algorithm_code=inspect.getsource(RandomForestClassifier))
except Exception as e:
print ("Error while loading algorithm to the registry",str(e))
| [((14, 0, 14, 66), 'os.environ.setdefault', 'os.environ.setdefault', ({(14, 22, 14, 46): '"""DJANGO_SETTINGS_MODULE"""', (14, 48, 14, 65): '"""server.settings"""'}, {}), "('DJANGO_SETTINGS_MODULE', 'server.settings')", False, 'import os\n'), ((16, 14, 16, 36), 'django.core.wsgi.get_wsgi_application', 'get_wsgi_application', ({}, {}), '()', False, 'from django.core.wsgi import get_wsgi_application\n'), ((23, 15, 23, 27), 'apps.ml.registry.MLRegistry', 'MLRegistry', ({}, {}), '()', False, 'from apps.ml.registry import MLRegistry\n'), ((25, 9, 25, 33), 'apps.ml.income_classifier.random_forest.RandomForestClassifier', 'RandomForestClassifier', ({}, {}), '()', False, 'from apps.ml.income_classifier.random_forest import RandomForestClassifier\n'), ((27, 273, 27, 314), 'inspect.getsource', 'inspect.getsource', ({(27, 291, 27, 313): 'RandomForestClassifier'}, {}), '(RandomForestClassifier)', False, 'import inspect\n')] |
mchestr/pycbc | pycbc/config.py | c215c1f177fe383ec6e797437fa2d5f4727eb9f3 | import os
from functools import reduce
import boto3
import yaml
from copy import deepcopy
from cryptography.fernet import Fernet
from pycbc import json
from pycbc.utils import AttrDict as d
s3 = boto3.client('s3')
_mapping_tag = yaml.resolver.BaseResolver.DEFAULT_MAPPING_TAG
_DEFAULTS = d({
'users': [],
'encrypt_key': Fernet.generate_key().decode('utf-8'),
'api_gateway': None,
'sender_email': None,
'logging': d({
'version': 1,
'formatters': d({
'default': d({
'format': '%(asctime)-15s - %(levelname)-7s - %(message)s',
}),
}),
'handlers': d({
'console': d({
'class': 'logging.StreamHandler',
'formatter': 'default',
'level': 'DEBUG',
'stream': 'ext://sys.stderr',
}),
}),
'loggers': d({
'pycbc': d({
'handlers': ['console'],
'level': 'INFO',
})
})
})
})
def load(event):
event_override = event.get('config', d())
env_prefix = event_override.get(
'env_prefix', os.getenv('ENV_PREFIX', 'PYCBC_'))
s3_bucket = event_override.get(
's3_bucket', os.getenv(f'{env_prefix}S3_BUCKET', 'pycbc'))
s3_filename = event_override.get(
's3_filename',
os.getenv(f'{env_prefix}S3_FILENAME', 'pycbc-config.yaml')
)
return json.loads(json.dumps(reduce(
_merge,
[
deepcopy(_DEFAULTS),
_from_s3(s3_bucket, s3_filename),
_from_env(env_prefix),
event_override,
{'s3_bucket': s3_bucket, 's3_filename': s3_filename}
])
))
def _merge(a, b, path=None):
if path is None:
path = []
for key in b:
if key in a:
if isinstance(a[key], dict) and isinstance(b[key], dict):
_merge(a[key], b[key], path + [str(key)])
else:
a[key] = b[key]
else:
a[key] = b[key]
return a
def _yaml_load(data):
yaml.add_constructor(
_mapping_tag,
lambda loader, node: d(loader.construct_pairs(node)),
)
return yaml.load(data, Loader=yaml.FullLoader)
def _from_env(prefix):
env_vars = (k for k in os.environ if k.startswith(prefix))
return d({
k[len(prefix):].lower(): os.environ[k] for k in env_vars
})
def _from_s3(bucket, filename):
fileobj = s3.get_object(
Bucket=bucket,
Key=filename,
)
return _yaml_load(fileobj['Body'].read())
| [((12, 5, 12, 23), 'boto3.client', 'boto3.client', ({(12, 18, 12, 22): '"""s3"""'}, {}), "('s3')", False, 'import boto3\n'), ((86, 11, 86, 50), 'yaml.load', 'yaml.load', (), '', False, 'import yaml\n'), ((45, 41, 45, 44), 'pycbc.utils.AttrDict', 'd', ({}, {}), '()', True, 'from pycbc.utils import AttrDict as d\n'), ((47, 22, 47, 55), 'os.getenv', 'os.getenv', ({(47, 32, 47, 44): '"""ENV_PREFIX"""', (47, 46, 47, 54): '"""PYCBC_"""'}, {}), "('ENV_PREFIX', 'PYCBC_')", False, 'import os\n'), ((49, 21, 49, 65), 'os.getenv', 'os.getenv', ({(49, 31, 49, 55): 'f"""{env_prefix}S3_BUCKET"""', (49, 57, 49, 64): '"""pycbc"""'}, {}), "(f'{env_prefix}S3_BUCKET', 'pycbc')", False, 'import os\n'), ((52, 8, 52, 66), 'os.getenv', 'os.getenv', ({(52, 18, 52, 44): 'f"""{env_prefix}S3_FILENAME"""', (52, 46, 52, 65): '"""pycbc-config.yaml"""'}, {}), "(f'{env_prefix}S3_FILENAME', 'pycbc-config.yaml')", False, 'import os\n'), ((16, 19, 16, 40), 'cryptography.fernet.Fernet.generate_key', 'Fernet.generate_key', ({}, {}), '()', False, 'from cryptography.fernet import Fernet\n'), ((57, 12, 57, 31), 'copy.deepcopy', 'deepcopy', ({(57, 21, 57, 30): '_DEFAULTS'}, {}), '(_DEFAULTS)', False, 'from copy import deepcopy\n'), ((22, 23, 24, 14), 'pycbc.utils.AttrDict', 'd', ({(22, 25, 24, 13): "{'format': '%(asctime)-15s - %(levelname)-7s - %(message)s'}"}, {}), "({'format': '%(asctime)-15s - %(levelname)-7s - %(message)s'})", True, 'from pycbc.utils import AttrDict as d\n'), ((27, 23, 32, 14), 'pycbc.utils.AttrDict', 'd', ({(27, 25, 32, 13): "{'class': 'logging.StreamHandler', 'formatter': 'default', 'level': 'DEBUG',\n 'stream': 'ext://sys.stderr'}"}, {}), "({'class': 'logging.StreamHandler', 'formatter': 'default', 'level':\n 'DEBUG', 'stream': 'ext://sys.stderr'})", True, 'from pycbc.utils import AttrDict as d\n'), ((35, 21, 38, 14), 'pycbc.utils.AttrDict', 'd', ({(35, 23, 38, 13): "{'handlers': ['console'], 'level': 'INFO'}"}, {}), "({'handlers': ['console'], 'level': 'INFO'})", True, 'from pycbc.utils import AttrDict as d\n')] |
vinirossa/password_generator_test | models/toolscontext/errorhandler.py | dd2f43540c6f58ff9217320c21b246c0be3fc55f | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
""" Module Name
Description...
"""
__author__ = "Vinícius Pereira"
__copyright__ = "Copyright 2021, Vinícius Pereira"
__credits__ = ["Vinícius Pereira","etc."]
__date__ = "2021/04/12"
__license__ = "GPL"
__version__ = "1.0.0"
__pythonversion__ = "3.9.1"
__maintainer__ = "Vinícius Pereira"
__contact__ = "[email protected]"
__status__ = "Development"
import sys, os
import logging
import inspect
import datetime
STD_LOG_FORMAT = ("%(asctime)s - %(levelname)s - %(name)s - %(filename)s - %(funcName)s() - ln.%(lineno)d"
" - %(message)s")
def file_logger(filename: str,
level:int = logging.DEBUG,
format: str = STD_LOG_FORMAT):
logger = logging.getLogger(__name__)
logger.setLevel(level)
formatter = logging.Formatter(format)
file_handler = logging.FileHandler(filename)
file_handler.setLevel(level)
file_handler.setFormatter(formatter)
logger.addHandler(file_handler)
return logger
def prompt_logger(error):
caller = inspect.getframeinfo(inspect.stack()[1][0])
error_log = {"error_type": error.__class__.__name__,
"error_info": error.__doc__,
"error_line": error.__traceback__.tb_lineno,
"error_file": os.path.basename(caller.filename),
"error_time": datetime.datetime.now(),
"error_details": str(error).capitalize()}
print("----- ERROR -----")
print("Type:",error_log["error_type"])
print("Info:",error_log["error_info"])
print("Line:",error_log["error_line"])
print("File:",error_log["error_file"])
print("Time:",error_log["error_time"])
print("Details:",error_log["error_details"])
return error_log
def error_box():
pass
def sql_logger():
pass
if __name__ == "__main__":
pass | [((34, 13, 34, 40), 'logging.getLogger', 'logging.getLogger', ({(34, 31, 34, 39): '__name__'}, {}), '(__name__)', False, 'import logging\n'), ((37, 16, 37, 41), 'logging.Formatter', 'logging.Formatter', ({(37, 34, 37, 40): 'format'}, {}), '(format)', False, 'import logging\n'), ((39, 19, 39, 48), 'logging.FileHandler', 'logging.FileHandler', ({(39, 39, 39, 47): 'filename'}, {}), '(filename)', False, 'import logging\n'), ((55, 31, 55, 64), 'os.path.basename', 'os.path.basename', ({(55, 48, 55, 63): 'caller.filename'}, {}), '(caller.filename)', False, 'import sys, os\n'), ((56, 31, 56, 54), 'datetime.datetime.now', 'datetime.datetime.now', ({}, {}), '()', False, 'import datetime\n'), ((50, 34, 50, 49), 'inspect.stack', 'inspect.stack', ({}, {}), '()', False, 'import inspect\n')] |
henchc/Rediscovering-Text-as-Data | 05-Intro-to-SpaCy/scripts/choropleth.py | 3e14fa7a4bd82899ea564d4f7857a5dbdc616a4f | def us_choropleth(t):
import matplotlib.cm
from matplotlib.patches import Polygon
from matplotlib.collections import PatchCollection
from matplotlib.colors import Normalize
import shapefile
import matplotlib.pyplot as plt
from mpl_toolkits.basemap import Basemap
import numpy as np
import random
import pandas as pd
from collections import Counter
plt.title("NER", fontsize=12)
us_locations_map = Basemap(
resolution="l",
llcrnrlon=-128.94,
llcrnrlat=23.52,
urcrnrlon=-60.12,
urcrnrlat=50.93,
lat_0=37.26,
lon_0=-94.53)
us_locations_map.drawmapboundary(
fill_color="#46bcec") # Fills in the oceans
us_locations_map.fillcontinents(
color="#eabc77",
lake_color="#46bcec") # Defines the continents
us_locations_map.drawcoastlines()
fig = matplotlib.pyplot.gcf()
fig.set_size_inches(15.5, 12.5) # Sets the size of the map
# Converts the coordinates to map points
lons, lats = us_locations_map(t["longitude"], t["latitude"])
us_locations_map.scatter(
lons,
lats,
color="black",
zorder=10) # Draws the points on the map
# Labels each point with the location name
for i in range(t.num_rows):
lat_lon = (
t.row(i).item("longitude") + .2,
t.row(i).item("latitude") - .1)
plt.annotate(np.array(t.row(i).item("name")), lat_lon, fontsize=10)
# Here we are reading in a shape file, which places state boundary
# information for our Basemap
us_locations_map.readshapefile(
"data/us_shapefiles/cb_2016_us_state_20m", "us_states")
state_names = []
for shape_dict in us_locations_map.us_states_info:
state_names.append(shape_dict['NAME'])
ax = plt.gca() # get current axes instance
cmap = plt.get_cmap('Reds')
names = []
shapes = []
counts = []
state_counts = Counter(t["state"])
for index, state in enumerate(state_names):
seg = us_locations_map.us_states[index]
poly = Polygon(seg)
names.append(state)
shapes.append(poly)
if state in t['state']:
counts.append(state_counts[state])
else:
counts.append(0)
# Loading our lists into the DataFrame
shape_table = pd.DataFrame()
shape_table["State Name"] = np.array(names)
shape_table["Shapes"] = np.array(shapes)
shape_table["Count"] = np.array(counts)
pc = PatchCollection(shape_table["Shapes"], zorder=2)
norm = Normalize()
pc.set_facecolor(cmap(norm(shape_table['Count'].fillna(0).values)))
pc.set_edgecolor("black")
ax.add_collection(pc)
# Adds colorbar showing the scale
mapper = matplotlib.cm.ScalarMappable(norm=norm, cmap=cmap)
mapper.set_array(shape_table['Count'])
plt.colorbar(mapper, shrink=0.4)
| [((14, 4, 14, 33), 'matplotlib.pyplot.title', 'plt.title', (), '', True, 'import matplotlib.pyplot as plt\n'), ((16, 23, 23, 21), 'mpl_toolkits.basemap.Basemap', 'Basemap', (), '', False, 'from mpl_toolkits.basemap import Basemap\n'), ((58, 9, 58, 18), 'matplotlib.pyplot.gca', 'plt.gca', ({}, {}), '()', True, 'import matplotlib.pyplot as plt\n'), ((59, 11, 59, 31), 'matplotlib.pyplot.get_cmap', 'plt.get_cmap', ({(59, 24, 59, 30): '"""Reds"""'}, {}), "('Reds')", True, 'import matplotlib.pyplot as plt\n'), ((65, 19, 65, 38), 'collections.Counter', 'Counter', ({(65, 27, 65, 37): "t['state']"}, {}), "(t['state'])", False, 'from collections import Counter\n'), ((78, 18, 78, 32), 'pandas.DataFrame', 'pd.DataFrame', ({}, {}), '()', True, 'import pandas as pd\n'), ((79, 32, 79, 47), 'numpy.array', 'np.array', ({(79, 41, 79, 46): 'names'}, {}), '(names)', True, 'import numpy as np\n'), ((80, 28, 80, 44), 'numpy.array', 'np.array', ({(80, 37, 80, 43): 'shapes'}, {}), '(shapes)', True, 'import numpy as np\n'), ((81, 27, 81, 43), 'numpy.array', 'np.array', ({(81, 36, 81, 42): 'counts'}, {}), '(counts)', True, 'import numpy as np\n'), ((83, 9, 83, 57), 'matplotlib.collections.PatchCollection', 'PatchCollection', (), '', False, 'from matplotlib.collections import PatchCollection\n'), ((84, 11, 84, 22), 'matplotlib.colors.Normalize', 'Normalize', ({}, {}), '()', False, 'from matplotlib.colors import Normalize\n'), ((93, 4, 93, 36), 'matplotlib.pyplot.colorbar', 'plt.colorbar', (), '', True, 'import matplotlib.pyplot as plt\n'), ((69, 15, 69, 27), 'matplotlib.patches.Polygon', 'Polygon', ({(69, 23, 69, 26): 'seg'}, {}), '(seg)', False, 'from matplotlib.patches import Polygon\n')] |
ntmoore/skycamera | take_day_and_night_pictures.py | c8c67970b0e3a52ce008dbd6b34df20cdda786b7 | import time
import os
#parameters
sunset_hr=8
dawn_hr=7
daytime_period_min=60
nighttime_period_min=1
time.localtime()
print("program starts at ",time.localtime());
while(1):
#Is it day or night?
time.localtime()
hour = time.localtime()[3]
minute = time.localtime()[4]
hour_float = 1.0*hour+minute/60.0
if( hour_float>(sunset_hr+12) or hour_float<dawn_hr ):
daytime=0
else :
daytime=1
print("Is it day? ",daytime)
# night
if( daytime==0): # night
filename='sky-{:d}-{:02d}-{:02d}-{:02d}-{:02d}-{:02d}.jpg'.format(
time.localtime()[0], # year
time.localtime()[1], # month
time.localtime()[2], # day of month
time.localtime()[3], # hr
time.localtime()[4], # min
time.localtime()[5] # sec
)
path="/home/pi/skyphotos/data/night/"
command = ("raspistill --shutter 30000000 --analoggain 12.0" +
" --digitalgain 1.0 --nopreview --mode 3 "+
" --annotate "+filename+" -o "+path+filename )
print("running command: ",command)
os.system(command)
print("took picture ",filename)
command = "rclone copy " +path+filename+ " wsu-physics-skycamera:23817_camera/night/ "
os.system(command)
print("uploaded picture ",filename)
if(time.localtime()[3]>sunset_hr) :
time.sleep(30*60) # wait 30 min if its before midnight
# normal wait
time.sleep(nighttime_period_min*60)
# day
if(daytime==1): #implicit else
filename='sky-{:d}-{:02d}-{:02d}-{:02d}-{:02d}-{:02d}.jpg'.format(
time.localtime()[0], # year
time.localtime()[1], # month
time.localtime()[2], # day of month
time.localtime()[3], # hr
time.localtime()[4], # min
time.localtime()[5] # sec
)
path="/home/pi/skyphotos/data/day/"
command="raspistill -annotate "+filename+" --nopreview --mode 3 -o " + path + filename
os.system(command)
print("took picture ",filename)
command = "rclone copy " +path+filename+ " wsu-physics-skycamera:23817_camera/day/ "
os.system(command)
print("uploaded picture ",filename)
time.sleep(daytime_period_min*60)
# program (never) ends
| [((10, 0, 10, 16), 'time.localtime', 'time.localtime', ({}, {}), '()', False, 'import time\n'), ((11, 27, 11, 43), 'time.localtime', 'time.localtime', ({}, {}), '()', False, 'import time\n'), ((16, 4, 16, 20), 'time.localtime', 'time.localtime', ({}, {}), '()', False, 'import time\n'), ((17, 11, 17, 27), 'time.localtime', 'time.localtime', ({}, {}), '()', False, 'import time\n'), ((18, 13, 18, 29), 'time.localtime', 'time.localtime', ({}, {}), '()', False, 'import time\n'), ((43, 8, 43, 26), 'os.system', 'os.system', ({(43, 18, 43, 25): 'command'}, {}), '(command)', False, 'import os\n'), ((47, 8, 47, 26), 'os.system', 'os.system', ({(47, 18, 47, 25): 'command'}, {}), '(command)', False, 'import os\n'), ((53, 8, 53, 43), 'time.sleep', 'time.sleep', ({(53, 19, 53, 42): '(nighttime_period_min * 60)'}, {}), '(nighttime_period_min * 60)', False, 'import time\n'), ((69, 8, 69, 26), 'os.system', 'os.system', ({(69, 18, 69, 25): 'command'}, {}), '(command)', False, 'import os\n'), ((73, 8, 73, 26), 'os.system', 'os.system', ({(73, 18, 73, 25): 'command'}, {}), '(command)', False, 'import os\n'), ((76, 8, 76, 41), 'time.sleep', 'time.sleep', ({(76, 19, 76, 40): '(daytime_period_min * 60)'}, {}), '(daytime_period_min * 60)', False, 'import time\n'), ((51, 12, 51, 29), 'time.sleep', 'time.sleep', ({(51, 23, 51, 28): '(30 * 60)'}, {}), '(30 * 60)', False, 'import time\n'), ((30, 12, 30, 28), 'time.localtime', 'time.localtime', ({}, {}), '()', False, 'import time\n'), ((31, 12, 31, 28), 'time.localtime', 'time.localtime', ({}, {}), '()', False, 'import time\n'), ((32, 12, 32, 28), 'time.localtime', 'time.localtime', ({}, {}), '()', False, 'import time\n'), ((33, 12, 33, 28), 'time.localtime', 'time.localtime', ({}, {}), '()', False, 'import time\n'), ((34, 12, 34, 28), 'time.localtime', 'time.localtime', ({}, {}), '()', False, 'import time\n'), ((35, 12, 35, 28), 'time.localtime', 'time.localtime', ({}, {}), '()', False, 'import time\n'), ((50, 11, 50, 27), 'time.localtime', 'time.localtime', ({}, {}), '()', False, 'import time\n'), ((59, 12, 59, 28), 'time.localtime', 'time.localtime', ({}, {}), '()', False, 'import time\n'), ((60, 12, 60, 28), 'time.localtime', 'time.localtime', ({}, {}), '()', False, 'import time\n'), ((61, 12, 61, 28), 'time.localtime', 'time.localtime', ({}, {}), '()', False, 'import time\n'), ((62, 12, 62, 28), 'time.localtime', 'time.localtime', ({}, {}), '()', False, 'import time\n'), ((63, 12, 63, 28), 'time.localtime', 'time.localtime', ({}, {}), '()', False, 'import time\n'), ((64, 12, 64, 28), 'time.localtime', 'time.localtime', ({}, {}), '()', False, 'import time\n')] |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.