hexsha
stringlengths 40
40
| size
int64 1
1.03M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
239
| max_stars_repo_name
stringlengths 5
130
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
sequencelengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
239
| max_issues_repo_name
stringlengths 5
130
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
sequencelengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
239
| max_forks_repo_name
stringlengths 5
130
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
sequencelengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 1
1.03M
| avg_line_length
float64 1
958k
| max_line_length
int64 1
1.03M
| alphanum_fraction
float64 0
1
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
4a1ef94ad3e22f5f2f84c65589c9559b1e1fa1be | 6,780 | py | Python | Inverted_Pendulum/antlr3/dfa.py | ghazaleh-mahmoodi/-Preceptron-Implementation-without-framework | 22bd8bbdc7d5a55915b9227ee3d142ab7a498028 | [
"MIT"
] | 1 | 2021-11-29T03:07:26.000Z | 2021-11-29T03:07:26.000Z | Inverted_Pendulum/antlr3/dfa.py | ghazaleh-mahmoodi/-Preceptron-Implementation-without-framework | 22bd8bbdc7d5a55915b9227ee3d142ab7a498028 | [
"MIT"
] | null | null | null | Inverted_Pendulum/antlr3/dfa.py | ghazaleh-mahmoodi/-Preceptron-Implementation-without-framework | 22bd8bbdc7d5a55915b9227ee3d142ab7a498028 | [
"MIT"
] | null | null | null | """ANTLR3 runtime package"""
# begin[licence]
#
# [The "BSD licence"]
# Copyright (c) 2005-2012 Terence Parr
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. The name of the author may not be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
# IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
# OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
# NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
# THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# end[licence]
from .constants import EOF
from .exceptions import NoViableAltException, BacktrackingFailed
class DFA(object):
"""@brief A DFA implemented as a set of transition tables.
Any state that has a semantic predicate edge is special; those states
are generated with if-then-else structures in a specialStateTransition()
which is generated by cyclicDFA template.
"""
def __init__(
self,
recognizer, decisionNumber,
eot, eof, min, max, accept, special, transition
):
## Which recognizer encloses this DFA? Needed to check backtracking
self.recognizer = recognizer
self.decisionNumber = decisionNumber
self.eot = eot
self.eof = eof
self.min = min
self.max = max
self.accept = accept
self.special = special
self.transition = transition
def predict(self, input):
"""
From the input stream, predict what alternative will succeed
using this DFA (representing the covering regular approximation
to the underlying CFL). Return an alternative number 1..n. Throw
an exception upon error.
"""
mark = input.mark()
s = 0 # we always start at s0
try:
for _ in range(50000):
specialState = self.special[s]
if specialState >= 0:
s = self.specialStateTransition(specialState, input)
if s == -1:
self.noViableAlt(s, input)
return 0
input.consume()
continue
if self.accept[s] >= 1:
return self.accept[s]
# look for a normal char transition
c = input.LA(1)
if c >= self.min[s] and c <= self.max[s]:
# move to next state
snext = self.transition[s][c-self.min[s]]
if snext < 0:
# was in range but not a normal transition
# must check EOT, which is like the else clause.
# eot[s]>=0 indicates that an EOT edge goes to another
# state.
if self.eot[s] >= 0: # EOT Transition to accept state?
s = self.eot[s]
input.consume()
# TODO: I had this as return accept[eot[s]]
# which assumed here that the EOT edge always
# went to an accept...faster to do this, but
# what about predicated edges coming from EOT
# target?
continue
self.noViableAlt(s, input)
return 0
s = snext
input.consume()
continue
if self.eot[s] >= 0:
s = self.eot[s]
input.consume()
continue
# EOF Transition to accept state?
if c == EOF and self.eof[s] >= 0:
return self.accept[self.eof[s]]
# not in range and not EOF/EOT, must be invalid symbol
self.noViableAlt(s, input)
return 0
else:
raise RuntimeError("DFA bang!")
finally:
input.rewind(mark)
def noViableAlt(self, s, input):
if self.recognizer._state.backtracking > 0:
raise BacktrackingFailed
nvae = NoViableAltException(
self.getDescription(),
self.decisionNumber,
s,
input
)
self.error(nvae)
raise nvae
def error(self, nvae):
"""A hook for debugging interface"""
pass
def specialStateTransition(self, s, input):
return -1
def getDescription(self):
return "n/a"
## def specialTransition(self, state, symbol):
## return 0
@classmethod
def unpack(cls, string):
"""@brief Unpack the runlength encoded table data.
Terence implemented packed table initializers, because Java has a
size restriction on .class files and the lookup tables can grow
pretty large. The generated JavaLexer.java of the Java.g example
would be about 15MB with uncompressed array initializers.
Python does not have any size restrictions, but the compilation of
such large source files seems to be pretty memory hungry. The memory
consumption of the python process grew to >1.5GB when importing a
15MB lexer, eating all my swap space and I was to impacient to see,
if it could finish at all. With packed initializers that are unpacked
at import time of the lexer module, everything works like a charm.
"""
ret = []
for i in range(0, len(string) - 1, 2):
(n, v) = ord(string[i]), ord(string[i + 1])
if v == 0xFFFF:
v = -1
ret += [v] * n
return ret | 35.3125 | 78 | 0.573451 |
4a1efc662e2d7bfc0661b429842d87bea5dbeef9 | 1,190 | py | Python | control/src/global_services.py | mnemonia/transprotobot | 8fc85c0eb5f4cd587cd6ca8ada4593bb36cb27cd | [
"Apache-2.0"
] | null | null | null | control/src/global_services.py | mnemonia/transprotobot | 8fc85c0eb5f4cd587cd6ca8ada4593bb36cb27cd | [
"Apache-2.0"
] | null | null | null | control/src/global_services.py | mnemonia/transprotobot | 8fc85c0eb5f4cd587cd6ca8ada4593bb36cb27cd | [
"Apache-2.0"
] | null | null | null | import logging
from mqtt_service import MqttService
import serial
import json
import platform
class GlobalServices():
LOG = logging.getLogger('GlobalServices')
def __init__(self):
self.mqtt = MqttService()
def on(self):
self.LOG.info('Starting GlobalServices')
self.mqtt.on()
if platform.system() == 'Linux':
self._ser = serial.Serial('/dev/ttyACM0',baudrate=9600,timeout=1)
else:
self._ser = serial.Serial('COM5',baudrate=9600,timeout=1,parity=serial.PARITY_NONE,bytesize=serial.EIGHTBITS,rtscts=1)
self.LOG.info('USB-port: {}'.format(self._ser.name))
def publish(self, topic, message):
self.mqtt.publish('/654baff5-cd72-472a-859a-925afe5056f3/transprotobot/' + topic, message)
def readFromVehicle(self):
# self.LOG.info('readFromVehicle')
if not self._ser.in_waiting:
return None
return self._ser.readline().rstrip()
def publishToVehicle(self, message):
self.LOG.info('publishToVehicle {}'.format(message))
msg = bytearray(json.dumps(message) + '\n', encoding='utf-8')
self._ser.write(msg)
self._ser.flush()
| 31.315789 | 130 | 0.651261 |
4a1efc9c2f03b6dc0185a54823774925604e94fc | 2,056 | py | Python | venv/lib/python3.6/site-packages/ansible_collections/sensu/sensu_go/tests/unit/plugins/modules/test_datastore_info.py | usegalaxy-no/usegalaxy | 75dad095769fe918eb39677f2c887e681a747f3a | [
"MIT"
] | 1 | 2020-01-22T13:11:23.000Z | 2020-01-22T13:11:23.000Z | venv/lib/python3.6/site-packages/ansible_collections/sensu/sensu_go/tests/unit/plugins/modules/test_datastore_info.py | usegalaxy-no/usegalaxy | 75dad095769fe918eb39677f2c887e681a747f3a | [
"MIT"
] | 12 | 2020-02-21T07:24:52.000Z | 2020-04-14T09:54:32.000Z | venv/lib/python3.6/site-packages/ansible_collections/sensu/sensu_go/tests/unit/plugins/modules/test_datastore_info.py | usegalaxy-no/usegalaxy | 75dad095769fe918eb39677f2c887e681a747f3a | [
"MIT"
] | null | null | null | from __future__ import absolute_import, division, print_function
__metaclass__ = type
import sys
import pytest
from ansible_collections.sensu.sensu_go.plugins.module_utils import (
errors, utils,
)
from ansible_collections.sensu.sensu_go.plugins.modules import datastore_info
from .common.utils import (
AnsibleExitJson, AnsibleFailJson, ModuleTestCase, set_module_args,
)
pytestmark = pytest.mark.skipif(
sys.version_info < (2, 7), reason="requires python2.7 or higher"
)
class TestDatastoreInfo(ModuleTestCase):
def test_get_all_datastores(self, mocker):
get_mock = mocker.patch.object(utils, "get")
get_mock.return_value = [dict(spec=1), dict(spec=2)]
set_module_args()
with pytest.raises(AnsibleExitJson) as context:
datastore_info.main()
_client, path = get_mock.call_args[0]
assert path == "/api/enterprise/store/v1/provider"
assert context.value.args[0]["objects"] == [1, 2]
def test_get_single_datastore(self, mocker):
get_mock = mocker.patch.object(utils, "get")
get_mock.return_value = dict(spec=4)
set_module_args(name="sample-datastore")
with pytest.raises(AnsibleExitJson) as context:
datastore_info.main()
_client, path = get_mock.call_args[0]
assert path == "/api/enterprise/store/v1/provider/sample-datastore"
assert context.value.args[0]["objects"] == [4]
def test_missing_single_datastore(self, mocker):
get_mock = mocker.patch.object(utils, "get")
get_mock.return_value = None
set_module_args(name="sample-datastore")
with pytest.raises(AnsibleExitJson) as context:
datastore_info.main()
assert context.value.args[0]["objects"] == []
def test_failure(self, mocker):
get_mock = mocker.patch.object(utils, "get")
get_mock.side_effect = errors.Error("Bad error")
set_module_args(name="sample-datastore")
with pytest.raises(AnsibleFailJson):
datastore_info.main()
| 32.125 | 77 | 0.685798 |
4a1efd31902ebdfc42838c5b42ad61ed695feef4 | 3,463 | py | Python | athena.py | lordgrenville/old_blog | ae4c2205293647dccab956016c2cc05cfbb855c3 | [
"MIT"
] | null | null | null | athena.py | lordgrenville/old_blog | ae4c2205293647dccab956016c2cc05cfbb855c3 | [
"MIT"
] | null | null | null | athena.py | lordgrenville/old_blog | ae4c2205293647dccab956016c2cc05cfbb855c3 | [
"MIT"
] | null | null | null | import datetime
import fileinput
import glob
import os
import sys
import uuid
from datetime import timedelta, timezone
from urllib.parse import urljoin
from feedgen.feed import FeedGenerator
from flask import Flask, make_response, render_template, request
from flask_flatpages import FlatPages
from flask_frozen import Freezer
from flask_static_compress import FlaskStaticCompress
import config
from flatpandoc import FlatPagesPandoc
DEBUG = True
FLATPAGES_AUTO_RELOAD = DEBUG
FLATPAGES_EXTENSION = ".md"
FREEZER_REMOVE_EXTRA_FILES = False
FREEZER_BASE_URL = "http://localhost/"
LOCAL_TIMEZONE = timezone(timedelta(seconds=7200), 'IST')
athena = Flask(__name__
# , root_path='/Users/josh/Documents/research/dev/athena'
)
athena.config.from_object(__name__)
pages = FlatPages(athena)
freezer = Freezer(athena)
athena.jinja_env.comment_start_string = "{##}"
FlatPagesPandoc("markdown+raw_tex+yaml_metadata_block", athena, pre_render=True)
compress = FlaskStaticCompress(athena)
@athena.route("/feed.rss")
def recent_feed():
fg = FeedGenerator()
fg.title(config.config["title"])
fg.subtitle(config.config["title"] + " Atom Feed")
fg.link(
{
"href": config.config["url"] + "/feed.rss",
"rel": "self",
"type": "application/rss+xml",
}
)
for page in pages:
if not page.meta.get("ispage"):
fe = fg.add_entry()
fe.title(page["title"])
fe.description((str(page.__html__())))
fe.link({"href": config.config["url"] + "/posts/" + page.path})
fe.guid(str(uuid.uuid4()))
fe.author({"name": config.config["author"]})
fe.updated(
datetime.datetime.combine(
page["date"], datetime.datetime.min.time(), tzinfo=LOCAL_TIMEZONE
)
)
fe.published(
datetime.datetime.combine(
page["date"], datetime.datetime.min.time(), tzinfo=LOCAL_TIMEZONE
)
)
response = make_response(fg.rss_str(pretty=True))
response.headers.set("Content-Type", "application/rss+xml")
return response
@athena.route("/")
def index():
posts = [page for page in pages if "ispage" not in page.meta]
hpages = [page for page in pages if "ispage" in page.meta]
return render_template(
"index.html", pages=posts, hpages=hpages, config=config.config
)
@athena.route("/<path:path>/")
def hardpagelink(path):
hpage = ""
for page in pages:
if page.path == path:
if page.meta["ispage"]:
hpage = page
hpages = [page for page in pages if "ispage" in page.meta]
return render_template("hard.html", page=hpage, hpages=hpages, config=config.config)
@athena.route("/posts/<path:path>/")
def page(path):
page = pages.get_or_404(path)
hpages = [page for page in pages if "ispage" in page.meta]
return render_template("page.html", page=page, hpages=hpages, config=config.config)
def cat():
allp = os.path.join(os.getcwd(), "pages", "all.bib")
bibs = os.path.join(os.getcwd(), "pages", "*.bib")
with open(allp, "w") as f:
for line in fileinput.input(glob.glob(bibs)):
f.write(line)
fileinput.close()
if __name__ == "__main__":
# cat()
if len(sys.argv) > 1 and sys.argv[1] == "build":
freezer.freeze()
else:
athena.run(port=5000)
| 29.853448 | 88 | 0.634999 |
4a1efde2cd811695280d0256de7b236457bfbb7a | 2,905 | py | Python | Python_For_Maya_Vol_02/Code/benchmark.py | justinfx/tutorials | 318447a34221492066a308d88dd424f9da5ba93c | [
"MIT"
] | 24 | 2017-04-16T16:03:57.000Z | 2021-07-16T08:45:42.000Z | Python_For_Maya_Vol_02/Code/benchmark.py | justinfx/tutorials | 318447a34221492066a308d88dd424f9da5ba93c | [
"MIT"
] | null | null | null | Python_For_Maya_Vol_02/Code/benchmark.py | justinfx/tutorials | 318447a34221492066a308d88dd424f9da5ba93c | [
"MIT"
] | 36 | 2017-04-17T03:28:33.000Z | 2021-09-22T15:38:35.000Z | """
Simple benchmark test:
maya.cmds vs PyMel vs Python API
1. Create a polyHelix that has about 20k vertices.
2. Loop over each vertex and move it by a random amount
3. Delete the helix
"""
import sys
import time
import random
# python commands test
import maya.cmds as cmds
# pymel test
import pymel.core as pm
# python api test
import maya.OpenMaya as OpenMaya
# options for cmds.polyHelix()
# 20020 vertices
HELIX_OPTS = dict( ch=True, o=True, c=20, h=30, w=4, r=.3, sa=20, sco=50 )
# The faster test. Produces even more of a speed difference.
# Less subdivisions ( ~3000 vertices )
HELIX_OPTS["sa"] = 2
# random movement range
RAND = random.Random(0)
LOW = -4
HIGH = 4
def testPyCmds():
start = time.time()
helix = cmds.polyHelix(**HELIX_OPTS)
pHelix = helix[0]
size = cmds.polyEvaluate(v=True)
for i in xrange(size):
x = RAND.uniform(LOW, HIGH)
attrib = '%s.vtx[%s]' % (pHelix, i)
cmds.move(x, attrib, x=True)
cmds.delete(pHelix)
end = time.time()
return end-start
def testPyApi():
start = time.time()
# creating the helix via the cmds module, for consistency
# in the helix object and number of vertices
helix = cmds.polyHelix(**HELIX_OPTS)
pHelix = helix[0]
sel = OpenMaya.MSelectionList()
node = OpenMaya.MObject()
sel.add(pHelix)
sel.getDependNode( 0, node )
vector = OpenMaya.MVector()
iter = OpenMaya.MItMeshVertex(node)
while not iter.isDone():
vector.x = RAND.uniform(LOW, HIGH)
iter.translateBy(vector)
iter.next()
OpenMaya.MGlobal.deleteNode(node)
end = time.time()
return end-start
def testPyMel():
start = time.time()
helix = pm.polyHelix(**HELIX_OPTS)
pHelix = helix[0]
# 20020 loops
for v in pHelix.vtx:
# strangly, its faster to make a new vector
# object every time, as opposed to creating it
# once and changing the x value each time???
vector = pm.dt.Vector(x=RAND.uniform(LOW, HIGH))
v.translateBy(vector)
pm.delete(pHelix)
end = time.time()
return end-start
def testAll():
results = []
sys.stdout.write("Testing testPyCmds()\n")
sys.stdout.flush()
r = testPyCmds()
results.append((r, "PyCmds"))
sys.stdout.write("Testing testPyMel()\n")
sys.stdout.flush()
r = testPyMel()
results.append((r, "PyMel"))
sys.stdout.write("Testing testPyApi()\n")
sys.stdout.flush()
r = testPyApi()
results.append((r, "PyApi"))
results.sort()
fastest = results.pop(0)
print "\nResults from fastest to slowest..."
print "%s:\t%0.4f sec" % (fastest[1], fastest[0])
for r in results:
diff = r[0] / fastest[0]
print "%s:\t%0.4f sec (%0.2fx slower than %s)" % (r[1], r[0], diff, fastest[1])
| 19.761905 | 87 | 0.613769 |
4a1efeafbbd709297b33833cb6f8ec75f297798c | 2,220 | py | Python | python/day23-2.py | kdrag0n/aoc2020 | 44b3c1a455f576474e598931029d5bf914fd6e8e | [
"MIT"
] | 5 | 2020-12-04T00:26:33.000Z | 2021-05-05T00:38:25.000Z | python/day23-2.py | kdrag0n/aoc2020 | 44b3c1a455f576474e598931029d5bf914fd6e8e | [
"MIT"
] | null | null | null | python/day23-2.py | kdrag0n/aoc2020 | 44b3c1a455f576474e598931029d5bf914fd6e8e | [
"MIT"
] | 1 | 2021-05-18T23:29:57.000Z | 2021-05-18T23:29:57.000Z | #!/usr/bin/env python3
import sys
def ints(itr):
return [int(i) for i in itr]
with open(sys.argv[1], "r") as f:
cups = ints(f.read().replace("\n", ""))
class Node:
def __init__(self, value):
self.value = value
self.next = None
self.prev = None
def __str__(self):
return f'<{self.value}>'
def __repr__(self):
return str(self)
cm = max(cups) + 1
for i in range(1000000 - len(cups)):
cups.append(cm + i)
cupss = set(cups)
maxc = max(cups)
nodes = [(v, Node(v)) for v in cups]
nlut = dict(nodes)
cups = nodes[0][1]
#link
for i, (_, n) in enumerate(nodes[:-1]):
n.next = nodes[i+1][1]
last_node = nodes[-1][1]
def llist_to_list(n):
lst = []
while n != None:
lst.append(n.value)
n = n.next
return lst
for m in range(10000000):
if m % 10000 == 0:
print(f'-- move {m+1} --')
#print('popl')
cur = cups.value
#print('cups:', ' '.join(f'({c})' if c == cur else str(c) for c in llist_to_list(cups)))
cur_node = cups
cups = cups.next
pickup = [cups.value, cups.next.value, cups.next.next.value]
cups = cups.next.next.next
#print('pick up:', ', '.join(str(c) for c in pickup))
destl = cur - 1
##print('destsel')
###print(' init dest', destl)
while destl not in cupss or destl in pickup:
###print(' cand dest', destl)
if destl in pickup:
destl -= 1
else:
destl = maxc
#print('destination:', destl)
##print('insert')
destn = nlut[destl]
old_next = destn.next
#print('on', old_next)
destn.next = nlut[pickup[0]]
destn.next.next = nlut[pickup[1]]
destn.next.next.next = nlut[pickup[2]]
destn.next.next.next.next = old_next
if old_next == None:
last_node = destn.next.next.next
#print('nxts', destn, destn.next, destn.next.next, destn.next.next.next, destn.next.next.next.next)
##print('app')
last_node.next = cur_node
cur_node.next = None
last_node = cur_node
#print('last', last_node, last_node.next)
#print()
cupl = llist_to_list(cups)
print(cupl[cupl.index(1)+1], cupl[cupl.index(1)+2])
print(cupl[cupl.index(1)+1] * cupl[cupl.index(1)+2])
| 26.746988 | 103 | 0.582432 |
4a1efed6321525583a6083e5d25b9cec0dd2e281 | 2,521 | py | Python | planemo/ci.py | gallardoalba/planemo | 3201c415a5d637984620d86b07bb9407d33833b5 | [
"CC-BY-3.0"
] | null | null | null | planemo/ci.py | gallardoalba/planemo | 3201c415a5d637984620d86b07bb9407d33833b5 | [
"CC-BY-3.0"
] | null | null | null | planemo/ci.py | gallardoalba/planemo | 3201c415a5d637984620d86b07bb9407d33833b5 | [
"CC-BY-3.0"
] | null | null | null | """Utilities for dealing with continous integration systems."""
from __future__ import print_function
import copy
import math
import os
import yaml
from planemo import git
from planemo import io
from planemo.shed import REPO_METADATA_FILES
def filter_paths(ctx, raw_paths, path_type="repo", **kwds):
"""Filter ``paths``.
``path_type`` is ``repo`` or ``file``.
"""
cwd = os.getcwd()
filter_kwds = copy.deepcopy(kwds)
changed_in_commit_range = kwds.get("changed_in_commit_range", None)
diff_paths = None
if changed_in_commit_range is not None:
diff_files = git.diff(ctx, cwd, changed_in_commit_range)
if path_type == "repo":
diff_dirs = set(os.path.dirname(p) for p in diff_files)
diff_paths = set()
for diff_dir in diff_dirs:
diff_path = metadata_file_in_path(diff_dir)
if diff_path:
diff_paths.add(diff_path)
break
else:
diff_paths = diff_files
unique_paths = set(os.path.relpath(p, cwd) for p in raw_paths)
if diff_paths is not None:
unique_paths = list(diff_paths)
filtered_paths = sorted(io.filter_paths(unique_paths, cwd=cwd, **filter_kwds))
excluded_paths = sorted(set(unique_paths) - set(filtered_paths))
if excluded_paths:
ctx.log("List of excluded paths: %s" % excluded_paths)
path_count = len(filtered_paths)
chunk_size = ((1.0 * path_count) / kwds["chunk_count"])
chunk = kwds["chunk"]
chunked_paths = []
for i, path in enumerate(filtered_paths):
if int(math.floor(i / chunk_size)) == chunk:
chunked_paths.append(path)
return chunked_paths
def metadata_file_in_path(diff_dir):
while diff_dir:
for metadata_file in REPO_METADATA_FILES:
if os.path.isfile(os.path.join(diff_dir, metadata_file)):
return diff_dir
diff_dir = os.path.dirname(diff_dir)
def group_paths(paths):
repos = {}
for path in paths:
repo = os.path.split(path)[0]
if repo not in repos:
repos[repo] = []
repos[repo].append(path)
return [" ".join(repos[_]) for _ in repos]
def print_path_list(paths, **kwds):
with io.open_file_or_standard_output(kwds["output"], "w") as f:
for path in paths:
print(path, file=f)
def print_as_yaml(item, **kwds):
with io.open_file_or_standard_output(kwds["output"], "w") as f:
f.write(yaml.safe_dump(item))
| 29.313953 | 82 | 0.641015 |
4a1f007531966697567814e6ace902b804359b3b | 2,501 | py | Python | models/points_rcnn/backbone_utils.py | periakiva/finding_berries | 1dfc7cf00c384321e39872921051dc9535355e53 | [
"MIT"
] | 11 | 2020-05-11T21:57:44.000Z | 2022-01-05T14:44:28.000Z | models/points_rcnn/backbone_utils.py | periakiva/finding_berries | 1dfc7cf00c384321e39872921051dc9535355e53 | [
"MIT"
] | 2 | 2020-07-29T10:08:36.000Z | 2022-01-18T15:38:57.000Z | models/points_rcnn/backbone_utils.py | periakiva/finding_berries | 1dfc7cf00c384321e39872921051dc9535355e53 | [
"MIT"
] | 2 | 2021-08-29T17:20:38.000Z | 2021-09-21T21:07:30.000Z | from collections import OrderedDict
from torch import nn
from torchvision.ops.feature_pyramid_network import FeaturePyramidNetwork, LastLevelMaxPool
from torchvision.ops import misc as misc_nn_ops
from ._utils import IntermediateLayerGetter
from . import resnet
class BackboneWithFPN(nn.Module):
"""
Adds a FPN on top of a model.
Internally, it uses torchvision.models._utils.IntermediateLayerGetter to
extract a submodel that returns the feature maps specified in return_layers.
The same limitations of IntermediatLayerGetter apply here.
Arguments:
backbone (nn.Module)
return_layers (Dict[name, new_name]): a dict containing the names
of the modules for which the activations will be returned as
the key of the dict, and the value of the dict is the name
of the returned activation (which the user can specify).
in_channels_list (List[int]): number of channels for each feature map
that is returned, in the order they are present in the OrderedDict
out_channels (int): number of channels in the FPN.
Attributes:
out_channels (int): the number of channels in the FPN
"""
def __init__(self, backbone, return_layers, in_channels_list, out_channels):
super(BackboneWithFPN, self).__init__()
self.body = IntermediateLayerGetter(backbone, return_layers=return_layers)
self.fpn = FeaturePyramidNetwork(
in_channels_list=in_channels_list,
out_channels=out_channels,
extra_blocks=LastLevelMaxPool(),
)
self.out_channels = out_channels
def forward(self, x):
x = self.body(x)
x = self.fpn(x)
return x
def resnet_fpn_backbone(backbone_name, pretrained):
backbone = resnet.__dict__[backbone_name](
pretrained=pretrained,
norm_layer=misc_nn_ops.FrozenBatchNorm2d)
# freeze layers
for name, parameter in backbone.named_parameters():
if 'layer2' not in name and 'layer3' not in name and 'layer4' not in name:
parameter.requires_grad_(False)
return_layers = {'layer1': '0', 'layer2': '1', 'layer3': '2', 'layer4': '3'}
in_channels_stage2 = backbone.inplanes // 8
in_channels_list = [
in_channels_stage2,
in_channels_stage2 * 2,
in_channels_stage2 * 4,
in_channels_stage2 * 8,
]
out_channels = 256
return BackboneWithFPN(backbone, return_layers, in_channels_list, out_channels) | 39.698413 | 91 | 0.696521 |
4a1f00bca2c520db2cee089bb2e0bcb4d94c4d91 | 270 | py | Python | tests/artificial/transf_Fisher/trend_MovingMedian/cycle_30/ar_12/test_artificial_32_Fisher_MovingMedian_30_12_20.py | jmabry/pyaf | afbc15a851a2445a7824bf255af612dc429265af | [
"BSD-3-Clause"
] | null | null | null | tests/artificial/transf_Fisher/trend_MovingMedian/cycle_30/ar_12/test_artificial_32_Fisher_MovingMedian_30_12_20.py | jmabry/pyaf | afbc15a851a2445a7824bf255af612dc429265af | [
"BSD-3-Clause"
] | 1 | 2019-11-30T23:39:38.000Z | 2019-12-01T04:34:35.000Z | tests/artificial/transf_Fisher/trend_MovingMedian/cycle_30/ar_12/test_artificial_32_Fisher_MovingMedian_30_12_20.py | jmabry/pyaf | afbc15a851a2445a7824bf255af612dc429265af | [
"BSD-3-Clause"
] | null | null | null | import pyaf.Bench.TS_datasets as tsds
import pyaf.tests.artificial.process_artificial_dataset as art
art.process_dataset(N = 32 , FREQ = 'D', seed = 0, trendtype = "MovingMedian", cycle_length = 30, transform = "Fisher", sigma = 0.0, exog_count = 20, ar_order = 12); | 38.571429 | 165 | 0.733333 |
4a1f00dcb5e4a0983812ececd3555947fc1c96f6 | 27 | py | Python | genemunge/data/gtex/__init__.py | unlearnai/genemunge | a7dc07706ae2bf487a04fcda5623013c055030a3 | [
"CC-BY-4.0"
] | 21 | 2018-04-06T17:03:52.000Z | 2020-06-29T12:56:43.000Z | genemunge/data/gtex/__init__.py | unlearnai/genemunge | a7dc07706ae2bf487a04fcda5623013c055030a3 | [
"CC-BY-4.0"
] | 5 | 2018-04-10T19:32:33.000Z | 2018-07-17T18:07:48.000Z | genemunge/data/gtex/__init__.py | unlearnai/genemunge | a7dc07706ae2bf487a04fcda5623013c055030a3 | [
"CC-BY-4.0"
] | 5 | 2018-04-11T05:42:52.000Z | 2019-04-02T03:43:52.000Z | from . import process_gtex
| 13.5 | 26 | 0.814815 |
4a1f024eefe98b795086a6b8d37d69a716d821d3 | 5,763 | py | Python | tests/unit_tests/test_lm/test_mpirun.py | eirrgang/radical.pilot | ceccd1867dd172935d602ff4c33a5ed4467e0dc8 | [
"MIT"
] | 1 | 2021-11-07T04:51:30.000Z | 2021-11-07T04:51:30.000Z | tests/unit_tests/test_lm/test_mpirun.py | eirrgang/radical.pilot | ceccd1867dd172935d602ff4c33a5ed4467e0dc8 | [
"MIT"
] | null | null | null | tests/unit_tests/test_lm/test_mpirun.py | eirrgang/radical.pilot | ceccd1867dd172935d602ff4c33a5ed4467e0dc8 | [
"MIT"
] | null | null | null |
# pylint: disable=protected-access, unused-argument, no-value-for-parameter
from unittest import mock, TestCase
import radical.utils as ru
from .test_common import setUp
from radical.pilot.agent.launch_method.mpirun import MPIRun
class TestMPIRun(TestCase):
# ------------------------------------------------------------------------------
#
@mock.patch.object(MPIRun, '__init__', return_value=None)
@mock.patch.object(MPIRun, '_get_mpi_info', return_value=[5,'ORTE'])
@mock.patch('radical.utils.raise_on')
@mock.patch('radical.utils.which', return_value='/usr/bin/mpirun')
def test_configure(self, mocked_init, mocked_get_mpi_info, mocked_raise_on,
mocked_which):
component = MPIRun(name=None, cfg=None, session=None)
component.name = 'MPIRun'
component._cfg = mock.Mock(resource='localhost')
component._configure()
self.assertEqual('mpirun', component.launch_command)
self.assertEqual(5, component.mpi_version)
self.assertEqual('ORTE', component.mpi_flavor)
# ------------------------------------------------------------------------------
#
@mock.patch.object(MPIRun, '__init__', return_value=None)
@mock.patch.object(MPIRun, '_get_mpi_info', return_value=[5,'ORTE'])
@mock.patch('radical.utils.raise_on')
@mock.patch('radical.utils.which', return_value='/usr/bin/mpirun')
def test_configure_rsh(self, mocked_init, mocked_get_mpi_info, mocked_raise_on,
mocked_which):
component = MPIRun(name=None, cfg=None, session=None)
component.name = 'MPIRun_rsh'
component._cfg = mock.Mock(resource='localhost')
component._configure()
self.assertEqual('mpirun', component.launch_command)
self.assertEqual(5, component.mpi_version)
self.assertEqual('ORTE', component.mpi_flavor)
# ------------------------------------------------------------------------------
#
@mock.patch.object(MPIRun, '__init__', return_value=None)
@mock.patch.object(MPIRun, '_get_mpi_info', return_value=[5,'ORTE'])
@mock.patch('radical.utils.raise_on')
@mock.patch('radical.utils.which', return_value='/usr/bin/mpirun')
def test_configure_mpt(self, mocked_init, mocked_get_mpi_info, mocked_raise_on,
mocked_which):
component = MPIRun(name=None, cfg=None, session=None)
component.name = 'MPIRun_mpt'
component._cfg = mock.Mock(resource='localhost')
component._configure()
self.assertEqual('mpirun', component.launch_command)
self.assertEqual(5, component.mpi_version)
self.assertEqual('ORTE', component.mpi_flavor)
# ------------------------------------------------------------------------------
#
@mock.patch.object(MPIRun, '__init__', return_value=None)
@mock.patch.object(MPIRun, '_get_mpi_info', return_value=[5,'ORTE'])
@mock.patch('radical.utils.raise_on')
@mock.patch('radical.utils.which', return_value='/usr/bin/mpirun')
def test_configure_ccmrun(self, mocked_init, mocked_get_mpi_info, mocked_raise_on,
mocked_which):
component = MPIRun(name=None, cfg=None, session=None)
component.name = 'MPIRun_ccmrun'
component._cfg = mock.Mock(resource='localhost')
component._configure()
self.assertEqual('mpirun', component.launch_command)
self.assertEqual(5, component.mpi_version)
self.assertEqual('ORTE', component.mpi_flavor)
# ------------------------------------------------------------------------------
#
@mock.patch.object(MPIRun, '__init__', return_value=None)
@mock.patch.object(MPIRun, '_get_mpi_info', return_value=[5,'ORTE'])
@mock.patch('radical.utils.raise_on')
@mock.patch('radical.utils.which', return_value='/usr/bin/mpirun')
def test_configure_dplace(self, mocked_init, mocked_get_mpi_info, mocked_raise_on,
mocked_which):
component = MPIRun(name=None, cfg=None, session=None)
component.name = 'MPIRun_dplace'
component._cfg = mock.Mock(resource='localhost')
component._configure()
self.assertEqual('mpirun', component.launch_command)
self.assertEqual(5, component.mpi_version)
self.assertEqual('ORTE', component.mpi_flavor)
# ------------------------------------------------------------------------------
#
@mock.patch.object(MPIRun, '__init__', return_value=None)
@mock.patch.object(MPIRun, '_get_mpi_info', return_value=[5, 'ORTE'])
@mock.patch('radical.utils.raise_on')
def test_construct_command(self, mocked_init,
mocked_get_mpi_info,
mocked_raise_on):
test_cases = setUp('lm', 'mpirun')
component = MPIRun(name=None, cfg=None, session=None)
component.name = 'MPIRun'
component._log = ru.Logger('dummy')
component._cfg = mock.Mock(resource='localhost')
component._mpt = False
component._rsh = False
component._ccmrun = ''
component._dplace = ''
component._configure()
component.launch_command = 'mpirun'
component.mpi_flavor = None
for task, result in test_cases:
command, hop = component.construct_command(task, None)
self.assertEqual([command, hop], result, task['uid'])
if __name__ == '__main__':
tc = TestMPIRun()
tc.test_construct_command()
tc.test_configure()
# ------------------------------------------------------------------------------
# pylint: enable=protected-access, unused-argument, no-value-for-parameter
| 40.584507 | 86 | 0.593441 |
4a1f02769fe6ccfb002561cfc0db993bedf010d1 | 26,389 | py | Python | rally_openstack/task/scenarios/vm/vmtasks.py | jogeo/rally-openstack | 83437e7c5925d5d647cd28f1821b6d51687b0123 | [
"Apache-2.0"
] | null | null | null | rally_openstack/task/scenarios/vm/vmtasks.py | jogeo/rally-openstack | 83437e7c5925d5d647cd28f1821b6d51687b0123 | [
"Apache-2.0"
] | null | null | null | rally_openstack/task/scenarios/vm/vmtasks.py | jogeo/rally-openstack | 83437e7c5925d5d647cd28f1821b6d51687b0123 | [
"Apache-2.0"
] | 1 | 2021-08-10T03:11:51.000Z | 2021-08-10T03:11:51.000Z | # Copyright 2014: Rackspace UK
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import json
import os
import pkgutil
import re
from rally.common import logging
from rally.common import validation
from rally import exceptions
from rally.plugins.common import validators
from rally.task import atomic
from rally.task import types
from rally.task import utils as rally_utils
from rally.utils import sshutils
from rally_openstack.common import consts
from rally_openstack.common.services import heat
from rally_openstack.task import scenario
from rally_openstack.task.scenarios.cinder import utils as cinder_utils
from rally_openstack.task.scenarios.vm import utils as vm_utils
"""Scenarios that are to be run inside VM instances."""
LOG = logging.getLogger(__name__)
# TODO(andreykurilin): replace by advanced jsonschema(lollipop?!) someday
@validation.configure(name="valid_command", platform="openstack")
class ValidCommandValidator(validators.FileExistsValidator):
def __init__(self, param_name, required=True):
"""Checks that parameter is a proper command-specifying dictionary.
Ensure that the command dictionary is a proper command-specifying
dictionary described in 'vmtasks.VMTasks.boot_runcommand_delete'
docstring.
:param param_name: Name of parameter to validate
:param required: Boolean indicating that the command dictionary is
required
"""
super(ValidCommandValidator, self).__init__(param_name=param_name)
self.required = required
def check_command_dict(self, command):
"""Check command-specifying dict `command'
:raises ValueError: on error
"""
if not isinstance(command, dict):
self.fail("Command must be a dictionary")
# NOTE(pboldin): Here we check for the values not for presence of the
# keys due to template-driven configuration generation that can leave
# keys defined but values empty.
if command.get("interpreter"):
script_file = command.get("script_file")
if script_file:
if "script_inline" in command:
self.fail(
"Exactly one of script_inline or script_file with "
"interpreter is expected: %r" % command)
# User tries to upload a shell? Make sure it is same as interpreter
interpreter = command.get("interpreter")
interpreter = (interpreter[-1]
if isinstance(interpreter, (tuple, list))
else interpreter)
if (command.get("local_path")
and command.get("remote_path") != interpreter):
self.fail(
"When uploading an interpreter its path should be as well"
" specified as the `remote_path' string: %r" % command)
elif not command.get("remote_path"):
# No interpreter and no remote command to execute is given
self.fail(
"Supplied dict specifies no command to execute, either "
"interpreter or remote_path is required: %r" % command)
unexpected_keys = set(command) - {"script_file", "script_inline",
"interpreter", "remote_path",
"local_path", "command_args"}
if unexpected_keys:
self.fail(
"Unexpected command parameters: %s" % ", ".join(
unexpected_keys))
def validate(self, context, config, plugin_cls, plugin_cfg):
command = config.get("args", {}).get(self.param_name)
if command is None and not self.required:
return
try:
self.check_command_dict(command)
except ValueError as e:
return self.fail(str(e))
for key in "script_file", "local_path":
if command.get(key):
self._file_access_ok(
filename=command[key], mode=os.R_OK,
param_name=self.param_name, required=self.required)
@types.convert(image={"type": "glance_image"},
flavor={"type": "nova_flavor"})
@validation.add("image_valid_on_flavor", flavor_param="flavor",
image_param="image", fail_on_404_image=False)
@validation.add("valid_command", param_name="command")
@validation.add("number", param_name="port", minval=1, maxval=65535,
nullable=True, integer_only=True)
@validation.add("external_network_exists", param_name="floating_network")
@validation.add("required_services", services=[consts.Service.NOVA,
consts.Service.CINDER])
@validation.add("required_param_or_context",
param_name="image", ctx_name="image_command_customizer")
@validation.add("required_platform", platform="openstack", users=True)
@scenario.configure(context={"cleanup@openstack": ["nova", "cinder"],
"keypair@openstack": {},
"allow_ssh@openstack": None},
name="VMTasks.boot_runcommand_delete",
platform="openstack")
class BootRuncommandDelete(vm_utils.VMScenario, cinder_utils.CinderBasic):
def run(self, flavor, username, password=None,
image=None,
command=None,
volume_args=None, floating_network=None, port=22,
use_floating_ip=True, force_delete=False, wait_for_ping=True,
max_log_length=None, **kwargs):
"""Boot a server, run script specified in command and delete server.
:param image: glance image name to use for the vm. Optional
in case of specified "image_command_customizer" context
:param flavor: VM flavor name
:param username: ssh username on server, str
:param password: Password on SSH authentication
:param command: Command-specifying dictionary that either specifies
remote command path via `remote_path' (can be uploaded from a
local file specified by `local_path`), an inline script via
`script_inline' or a local script file path using `script_file'.
Both `script_file' and `local_path' are checked to be accessible
by the `file_exists' validator code.
The `script_inline' and `script_file' both require an `interpreter'
value to specify the interpreter script should be run with.
Note that any of `interpreter' and `remote_path' can be an array
prefixed with environment variables and suffixed with args for
the `interpreter' command. `remote_path's last component must be
a path to a command to execute (also upload destination if a
`local_path' is given). Uploading an interpreter is possible
but requires that `remote_path' and `interpreter' path do match.
Examples:
.. code-block:: python
# Run a `local_script.pl' file sending it to a remote
# Perl interpreter
command = {
"script_file": "local_script.pl",
"interpreter": "/usr/bin/perl"
}
# Run an inline script sending it to a remote interpreter
command = {
"script_inline": "echo 'Hello, World!'",
"interpreter": "/bin/sh"
}
# Run a remote command
command = {
"remote_path": "/bin/false"
}
# Copy a local command and run it
command = {
"remote_path": "/usr/local/bin/fio",
"local_path": "/home/foobar/myfiodir/bin/fio"
}
# Copy a local command and run it with environment variable
command = {
"remote_path": ["HOME=/root", "/usr/local/bin/fio"],
"local_path": "/home/foobar/myfiodir/bin/fio"
}
# Run an inline script sending it to a remote interpreter
command = {
"script_inline": "echo \"Hello, ${NAME:-World}\"",
"interpreter": ["NAME=Earth", "/bin/sh"]
}
# Run an inline script sending it to an uploaded remote
# interpreter
command = {
"script_inline": "echo \"Hello, ${NAME:-World}\"",
"interpreter": ["NAME=Earth", "/tmp/sh"],
"remote_path": "/tmp/sh",
"local_path": "/home/user/work/cve/sh-1.0/bin/sh"
}
:param volume_args: volume args for booting server from volume
:param floating_network: external network name, for floating ip
:param port: ssh port for SSH connection
:param use_floating_ip: bool, floating or fixed IP for SSH connection
:param force_delete: whether to use force_delete for servers
:param wait_for_ping: whether to check connectivity on server creation
:param max_log_length: The number of tail nova console-log lines user
would like to retrieve
:param kwargs: extra arguments for booting the server
"""
if volume_args:
volume = self.cinder.create_volume(volume_args["size"],
imageRef=None)
kwargs["block_device_mapping"] = {"vdrally": "%s:::1" % volume.id}
if not image:
image = self.context["tenant"]["custom_image"]["id"]
server, fip = self._boot_server_with_fip(
image, flavor, use_floating_ip=use_floating_ip,
floating_network=floating_network,
key_name=self.context["user"]["keypair"]["name"],
**kwargs)
try:
if wait_for_ping:
self._wait_for_ping(fip["ip"])
code, out, err = self._run_command(
fip["ip"], port, username, password, command=command)
text_area_output = ["StdErr: %s" % (err or "(none)"),
"StdOut:"]
if code:
raise exceptions.ScriptError(
"Error running command %(command)s. "
"Error %(code)s: %(error)s" % {
"command": command, "code": code, "error": err})
# Let's try to load output data
try:
data = json.loads(out)
# 'echo 42' produces very json-compatible result
# - check it here
if not isinstance(data, dict):
raise ValueError
except ValueError:
# It's not a JSON, probably it's 'script_inline' result
data = []
except (exceptions.TimeoutException,
exceptions.SSHTimeout):
console_logs = self._get_server_console_output(server,
max_log_length)
LOG.debug("VM console logs:\n%s" % console_logs)
raise
finally:
self._delete_server_with_fip(server, fip,
force_delete=force_delete)
if isinstance(data, dict) and set(data) == {"additive", "complete"}:
for chart_type, charts in data.items():
for chart in charts:
self.add_output(**{chart_type: chart})
else:
# it's a dict with several unknown lines
text_area_output.extend(out.split("\n"))
self.add_output(complete={"title": "Script Output",
"chart_plugin": "TextArea",
"data": text_area_output})
@scenario.configure(context={"cleanup@openstack": ["nova", "heat"],
"keypair@openstack": {}, "network@openstack": {}},
name="VMTasks.runcommand_heat")
class RuncommandHeat(vm_utils.VMScenario):
def run(self, workload, template, files, parameters):
"""Run workload on stack deployed by heat.
Workload can be either file or resource:
.. code-block:: json
{"file": "/path/to/file.sh"}
{"resource": ["package.module", "workload.py"]}
Also it should contain "username" key.
Given file will be uploaded to `gate_node` and started. This script
should print `key` `value` pairs separated by colon. These pairs will
be presented in results.
Gate node should be accessible via ssh with keypair `key_name`, so
heat template should accept parameter `key_name`.
:param workload: workload to run
:param template: path to heat template file
:param files: additional template files
:param parameters: parameters for heat template
"""
keypair = self.context["user"]["keypair"]
parameters["key_name"] = keypair["name"]
network = self.context["tenant"]["networks"][0]
parameters["router_id"] = network["router_id"]
self.stack = heat.main.Stack(self, self.task,
template, files=files,
parameters=parameters)
self.stack.create()
for output in self.stack.stack.outputs:
if output["output_key"] == "gate_node":
ip = output["output_value"]
break
ssh = sshutils.SSH(workload["username"], ip, pkey=keypair["private"])
ssh.wait()
script = workload.get("resource")
if script:
script = pkgutil.get_data(*script)
else:
script = open(workload["file"]).read()
ssh.execute("cat > /tmp/.rally-workload", stdin=script)
ssh.execute("chmod +x /tmp/.rally-workload")
with atomic.ActionTimer(self, "runcommand_heat.workload"):
status, out, err = ssh.execute(
"/tmp/.rally-workload",
stdin=json.dumps(self.stack.stack.outputs))
rows = []
for line in out.splitlines():
row = line.split(":")
if len(row) != 2:
raise exceptions.ScriptError("Invalid data '%s'" % line)
rows.append(row)
if not rows:
raise exceptions.ScriptError("No data returned. Original error "
"message is %s" % err)
self.add_output(
complete={"title": "Workload summary",
"description": "Data generated by workload",
"chart_plugin": "Table",
"data": {
"cols": ["key", "value"],
"rows": rows}}
)
BASH_DD_LOAD_TEST = """
#!/bin/sh
# Load server and output JSON results ready to be processed
# by Rally scenario
for ex in awk top grep free tr df dc dd gzip
do
if ! type ${ex} >/dev/null
then
echo "Executable is required by script but not available\
on a server: ${ex}" >&2
return 1
fi
done
get_used_cpu_percent() {
echo 100\
$(top -b -n 1 | grep -i CPU | head -n 1 | awk '{print $8}' | tr -d %)\
- p | dc
}
get_used_ram_percent() {
local total=$(free | grep Mem: | awk '{print $2}')
local used=$(free | grep -- -/+\\ buffers | awk '{print $3}')
echo ${used} 100 \\* ${total} / p | dc
}
get_used_disk_percent() {
df -P / | grep -v Filesystem | awk '{print $5}' | tr -d %
}
get_seconds() {
(time -p ${1}) 2>&1 | awk '/real/{print $2}'
}
complete_load() {
local script_file=${LOAD_SCRIPT_FILE:-/tmp/load.sh}
local stop_file=${LOAD_STOP_FILE:-/tmp/load.stop}
local processes_num=${LOAD_PROCESSES_COUNT:-20}
local size=${LOAD_SIZE_MB:-5}
cat << EOF > ${script_file}
until test -e ${stop_file}
do dd if=/dev/urandom bs=1M count=${size} 2>/dev/null | gzip >/dev/null ; done
EOF
local sep
local cpu
local ram
local dis
rm -f ${stop_file}
for i in $(seq ${processes_num})
do
i=$((i-1))
sh ${script_file} &
cpu="${cpu}${sep}[${i}, $(get_used_cpu_percent)]"
ram="${ram}${sep}[${i}, $(get_used_ram_percent)]"
dis="${dis}${sep}[${i}, $(get_used_disk_percent)]"
sep=", "
done
> ${stop_file}
cat << EOF
{
"title": "Generate load by spawning processes",
"description": "Each process runs gzip for ${size}M urandom data\
in a loop",
"chart_plugin": "Lines",
"axis_label": "Number of processes",
"label": "Usage, %",
"data": [
["CPU", [${cpu}]],
["Memory", [${ram}]],
["Disk", [${dis}]]]
}
EOF
}
additive_dd() {
local c=${1:-50} # Megabytes
local file=/tmp/dd_test.img
local write=$(get_seconds "dd if=/dev/zero of=${file} bs=1M count=${c}")
local read=$(get_seconds "dd if=${file} of=/dev/null bs=1M count=${c}")
local gzip=$(get_seconds "gzip ${file}")
rm ${file}.gz
cat << EOF
{
"title": "Write, read and gzip file",
"description": "Using file '${file}', size ${c}Mb.",
"chart_plugin": "StackedArea",
"data": [
["write_${c}M", ${write}],
["read_${c}M", ${read}],
["gzip_${c}M", ${gzip}]]
},
{
"title": "Statistics for write/read/gzip",
"chart_plugin": "StatsTable",
"data": [
["write_${c}M", ${write}],
["read_${c}M", ${read}],
["gzip_${c}M", ${gzip}]]
}
EOF
}
cat << EOF
{
"additive": [$(additive_dd)],
"complete": [$(complete_load)]
}
EOF
"""
@types.convert(image={"type": "glance_image"},
flavor={"type": "nova_flavor"})
@validation.add("image_valid_on_flavor", flavor_param="flavor",
image_param="image")
@validation.add("number", param_name="port", minval=1, maxval=65535,
nullable=True, integer_only=True)
@validation.add("external_network_exists", param_name="floating_network")
@validation.add("required_services", services=[consts.Service.NOVA,
consts.Service.CINDER])
@validation.add("required_platform", platform="openstack", users=True)
@scenario.configure(context={"cleanup@openstack": ["nova", "cinder"],
"keypair@openstack": {},
"allow_ssh@openstack": None},
name="VMTasks.dd_load_test",
platform="openstack")
class DDLoadTest(BootRuncommandDelete):
@logging.log_deprecated_args(
"Use 'interpreter' to specify the interpreter to execute script from.",
"0.10.0", ["command"], once=True)
def run(self, flavor, username, password=None,
image=None, command=None, interpreter="/bin/sh",
volume_args=None, floating_network=None, port=22,
use_floating_ip=True, force_delete=False, wait_for_ping=True,
max_log_length=None, **kwargs):
"""Boot a server from a custom image and performs dd load test.
.. note:: dd load test is prepared script by Rally team. It checks
writing and reading metrics from the VM.
:param image: glance image name to use for the vm. Optional
in case of specified "image_command_customizer" context
:param flavor: VM flavor name
:param username: ssh username on server, str
:param password: Password on SSH authentication
:param interpreter: the interpreter to execute script with dd load test
(defaults to /bin/sh)
:param command: DEPRECATED. use interpreter instead.
:param volume_args: volume args for booting server from volume
:param floating_network: external network name, for floating ip
:param port: ssh port for SSH connection
:param use_floating_ip: bool, floating or fixed IP for SSH connection
:param force_delete: whether to use force_delete for servers
:param wait_for_ping: whether to check connectivity on server creation
:param max_log_length: The number of tail nova console-log lines user
would like to retrieve
:param kwargs: extra arguments for booting the server
"""
cmd = {"interpreter": interpreter,
"script_inline": BASH_DD_LOAD_TEST}
if command and "interpreter" in command:
cmd["interpreter"] = command["interpreter"]
return super(DDLoadTest, self).run(
flavor=flavor, username=username, password=password,
image=image, command=cmd,
volume_args=volume_args, floating_network=floating_network,
port=port, use_floating_ip=use_floating_ip,
force_delete=force_delete,
wait_for_ping=wait_for_ping, max_log_length=max_log_length,
**kwargs)
@types.convert(image={"type": "glance_image"},
flavor={"type": "nova_flavor"})
@validation.add("image_valid_on_flavor", flavor_param="flavor",
image_param="image", fail_on_404_image=False)
@validation.add("number", param_name="port", minval=1, maxval=65535,
nullable=True, integer_only=True)
@validation.add("external_network_exists", param_name="floating_network")
@validation.add("required_services", services=[consts.Service.DESIGNATE,
consts.Service.NEUTRON,
consts.Service.NOVA])
@validation.add("required_contexts", contexts=["network", "zones"])
@validation.add("required_platform", platform="openstack", users=True)
@validation.add("required_context_config", context_name="zones",
context_config={"set_zone_in_network": True})
@scenario.configure(context={"cleanup@openstack": ["designate",
"nova", "neutron"],
"keypair@openstack": {},
"allow_ssh@openstack": None},
name="VMTasks.check_designate_dns_resolving",
platform="openstack")
class CheckDesignateDNSResolving(vm_utils.VMScenario):
def run(self, image, flavor, username, password=None,
floating_network=None, port=22,
use_floating_ip=True, force_delete=False, max_log_length=None,
**kwargs):
"""Try to resolve hostname from VM against existing designate DNS.
- requires zone context with set_zone_in_network parameter
> zones:
> set_zone_in_network: True
- designate IP should be in default dns_nameservers list for new
networks or it can be specified in a network context
> network:
> dns_nameservers:
> - 8.8.8.8
> - 192.168.210.45
:param image: glance image name to use for the vm
:param flavor: VM flavor name
:param username: ssh username on server
:param password: Password on SSH authentication
:param floating_network: external network name, for floating ip
:param port: ssh port for SSH connection
:param use_floating_ip: bool, floating or fixed IP for SSH connection
:param force_delete: whether to use force_delete for servers
:param max_log_length: The number of tail nova console-log lines user
would like to retrieve
:param kwargs: optional args
"""
zone = self.context["tenant"]["zones"][0]["name"]
server, fip = self._boot_server_with_fip(
image, flavor, use_floating_ip=use_floating_ip,
floating_network=floating_network,
key_name=self.context["user"]["keypair"]["name"],
**kwargs)
script = f"cloud-init status -w; systemd-resolve --status; "\
f"dig $(hostname).{zone}"
command = {
"script_inline": script,
"interpreter": "/bin/bash"
}
try:
rally_utils.wait_for_status(
server,
ready_statuses=["ACTIVE"],
update_resource=rally_utils.get_from_manager(),
)
code, out, err = self._run_command(
fip["ip"], port, username, password, command=command)
if code:
raise exceptions.ScriptError(
"Error running command %(command)s. "
"Error %(code)s: %(error)s" % {
"command": command, "code": code, "error": err})
else:
if not re.findall(".*ANSWER SECTION.*", out, re.MULTILINE):
raise exceptions.ScriptError(
f"Error running {script}. "
f"Error: Missing ANSWER section in the output {out}")
except (exceptions.TimeoutException,
exceptions.SSHTimeout):
console_logs = self._get_server_console_output(server,
max_log_length)
LOG.debug("VM console logs:\n%s" % console_logs)
raise
finally:
self._delete_server_with_fip(server, fip,
force_delete=force_delete)
self.add_output(complete={
"title": "Script StdOut",
"chart_plugin": "TextArea",
"data": str(out).split("\n")
})
if err:
self.add_output(complete={
"title": "Script StdErr",
"chart_plugin": "TextArea",
"data": err.split("\n")
})
| 40.473926 | 79 | 0.575732 |
4a1f02807d57b84d682bae912e4d259ff8b7a161 | 11,580 | py | Python | tools/harness-automation/doc/conf.py | tpmanley/openthread | bc02c6c05cf52884bc6cd9fad8dc8fc16364a147 | [
"BSD-3-Clause"
] | null | null | null | tools/harness-automation/doc/conf.py | tpmanley/openthread | bc02c6c05cf52884bc6cd9fad8dc8fc16364a147 | [
"BSD-3-Clause"
] | 2 | 2019-02-25T10:21:34.000Z | 2022-01-23T13:05:08.000Z | tools/harness-automation/doc/conf.py | tpmanley/openthread | bc02c6c05cf52884bc6cd9fad8dc8fc16364a147 | [
"BSD-3-Clause"
] | 1 | 2019-11-26T06:13:59.000Z | 2019-11-26T06:13:59.000Z | # -*- coding: utf-8 -*-
#
# Copyright (c) 2016, The OpenThread Authors.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. Neither the name of the copyright holder nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
#
# Thread Harness Automation documentation build configuration file, created by
# sphinx-quickstart on Wed Jun 29 15:18:24 2016.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
import sys
import sphinx_rtd_theme
sys.path.insert(0, os.path.abspath('..'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = ['sphinx.ext.autodoc', 'sphinxcontrib.napoleon']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The encoding of source files.
#
# source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Thread Harness Automation'
copyright = u'2019, OpenThread'
author = u'OpenThread'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = u'0.5'
# The full version, including alpha/beta/rc tags.
release = u'0.5'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#
# today = ''
#
# Else, today_fmt is used as the format for a strftime call.
#
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
# keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
# html_theme = 'alabaster'
html_theme = 'sphinx_rtd_theme'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = []
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
# The name for this set of Sphinx documents.
# "<project> v<release> documentation" by default.
#
# html_title = u'Thread Harness Automation v0.5'
# A shorter title for the navigation bar. Default is the same as html_title.
#
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#
# html_logo = None
# The name of an image file (relative to this directory) to use as a favicon of
# the docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#
# html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#
# html_extra_path = []
# If not None, a 'Last updated on:' timestamp is inserted at every page
# bottom, using the given strftime format.
# The empty string is equivalent to '%b %d, %Y'.
#
# html_last_updated_fmt = None
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#
# html_additional_pages = {}
# If false, no module index is generated.
#
# html_domain_indices = True
# If false, no index is generated.
#
# html_use_index = True
# If true, the index is split into individual pages for each letter.
#
# html_split_index = False
# If true, links to the reST sources are added to the pages.
#
# html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#
# html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#
# html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr', 'zh'
#
# html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# 'ja' uses this config value.
# 'zh' user can custom change `jieba` dictionary path.
#
# html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#
# html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'ThreadHarnessAutomationdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(
master_doc,
'ThreadHarnessAutomation.tex',
u'Thread Harness Automation Documentation',
u'OpenThread',
'manual',
)
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#
# latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#
# latex_use_parts = False
# If true, show page references after internal links.
#
# latex_show_pagerefs = False
# If true, show URL addresses after external links.
#
# latex_show_urls = False
# Documents to append as an appendix to all manuals.
#
# latex_appendices = []
# If false, no module index is generated.
#
# latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(
master_doc,
'threadharnessautomation',
u'Thread Harness Automation Documentation',
[author],
1,
)
]
# If true, show URL addresses after external links.
#
# man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(
master_doc,
'ThreadHarnessAutomation',
u'Thread Harness Automation Documentation',
author,
'ThreadHarnessAutomation',
'One line description of project.',
'Miscellaneous',
)
]
# Documents to append as an appendix to all manuals.
#
# texinfo_appendices = []
# If false, no module index is generated.
#
# texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#
# texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#
# texinfo_no_detailmenu = False
| 30.55409 | 80 | 0.711917 |
4a1f02cea704c1def874e6c2354258be2209169b | 4,593 | py | Python | setup.py | clayrisser/syncrepl | 9b7b93acda5e2652860ede882f48e07228d823fb | [
"CC0-1.0",
"BSD-3-Clause"
] | 13 | 2017-05-27T00:20:11.000Z | 2021-05-31T14:53:32.000Z | setup.py | clayrisser/syncrepl | 9b7b93acda5e2652860ede882f48e07228d823fb | [
"CC0-1.0",
"BSD-3-Clause"
] | 33 | 2017-06-06T05:47:12.000Z | 2021-02-03T15:43:44.000Z | setup.py | clayrisser/syncrepl | 9b7b93acda5e2652860ede882f48e07228d823fb | [
"CC0-1.0",
"BSD-3-Clause"
] | 7 | 2017-09-21T11:49:11.000Z | 2021-11-10T10:36:30.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# vim: sw=4 ts=4 et
# syncrepl_client installer code.
#
# Refer to the AUTHORS file for copyright statements.
#
# This file contains only factual information.
# Therefore, this file is likely not copyrightable.
# As such, this file is in the public domain.
# For locations where public domain does not exist, this file is licensed
# under the Creative Commons CC0 Public Domain Dedication, the text of which
# may be found in the file `LICENSE_others.md` that was included with this
# distribution, and also at
# https://github.com/akkornel/syncrepl/blob/master/LICENSE_others.md
import re
import setuptools
from setuptools import setup, find_packages
from sys import argv, version_info
# Thanks to https://hynek.me/articles/conditional-python-dependencies/
# for helping me understand the mess that is requirements specifications.
setuptools_under_18 = False
if int(setuptools.__version__.split('.', 1)[0]) < 18:
setuptools_under_18 = True
install_requirements = list()
extra_requirements = dict()
# Block wheel creation on old setuptools
if ((setuptools_under_18 is True) and
('bdist_wheel' in argv)
):
raise OSError('setuptools is too old to create good wheel files.')
# Make sure we have Python 2.7, or 3.3+
# This is covered again later in the 'python_requires' option, but let's be
# safe.
if ((version_info[0] == 2) and
(version_info[1] < 7)
):
raise OSError('With Python 2, Python 2.7 is required.')
if ((version_info[0] == 3) and
(version_info[1] < 3)
):
raise OSError('With Python 3, Python 3.3 or later is required.')
# Python 3.3 and lower require enum34
if setuptools_under_18 is True:
if ((version_info[0] == 2) or
((version_info[0] == 3) and
(version_info[1] < 4)
)
):
install_requirements.append('enum34')
else:
extra_requirements[":python_version<'3.4'"] = ['enum34']
# Python 2 requires python-ldap; Python 3 requires pyldap
if setuptools_under_18 is True:
if version_info[0] == 2:
install_requirements.append('python-ldap')
else:
install_requirements.append('pyldap')
else:
extra_requirements[":python_version<='2.7'"] = ['python-ldap>=99']
extra_requirements[":python_version>='3'"] = ['pyldap>=2.4.37']
# We need pyasn1.
# Well, actually python-ldap/pyldap require pyasn1, but it's an optional
# dependency for them, as it is only used with syncrepl. So, we require it!
# NOTE: We can't use the newest versions.
# See https://github.com/akkornel/syncrepl/issues/18
install_requirements.append('pyasn1<0.3.1,>=0.2.2')
# Have code pull the version number from _version.py
def version():
with open('syncrepl_client/_version.py', encoding='utf8') as file:
regex = r"^__version__ = '(.+)'$"
matches = re.search(regex, file.read(), re.M)
if matches:
return matches.group(1)
else:
raise LookupError('Unable to find version number')
# Have code pull the long description from our README
def readme():
with open('README.rst', encoding='utf8') as file:
return file.read()
# Let setuptools handle the rest
setup(
name = 'syncrepl-client',
version = version(),
description = 'An easier-to-use LDAP syncrepl client',
long_description = readme(),
keywords = 'ldap syncrepl',
author = 'A. Karl Kornel',
author_email = '[email protected]',
url = 'http://github.com/akkornel/syncrepl',
packages = find_packages(),
scripts = ['syncrepl-client'],
zip_safe = True,
include_package_data = True,
python_requires = '>=2.7,!=3.0.*,!=3.1.*,!=3.2.*',
install_requires = install_requirements,
extras_require = extra_requirements,
provides = ['syncrepl_client'],
license = 'BSD 3-Clause',
classifiers = [
'Development Status :: 4 - Beta',
'Environment :: Console',
'Environment :: No Input/Output (Daemon)',
'Intended Audience :: Developers',
'Intended Audience :: System Administrators',
'License :: OSI Approved :: BSD License',
'Operating System :: MacOS :: MacOS X',
'Operating System :: POSIX',
'Operating System :: Unix',
'Programming Language :: Python',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Topic :: System :: Systems Administration :: Authentication/Directory :: LDAP'
]
)
| 32.118881 | 87 | 0.66732 |
4a1f037b50523584988f70adf3dc0001d807ec0b | 1,097 | py | Python | setup.py | jimwwalker/perfrunner | fda19ce9d860f8548b3feeb761b9a70598500cce | [
"Apache-2.0"
] | null | null | null | setup.py | jimwwalker/perfrunner | fda19ce9d860f8548b3feeb761b9a70598500cce | [
"Apache-2.0"
] | 4 | 2021-04-20T17:14:17.000Z | 2022-02-11T03:42:49.000Z | setup.py | jimwwalker/perfrunner | fda19ce9d860f8548b3feeb761b9a70598500cce | [
"Apache-2.0"
] | 1 | 2018-06-25T18:57:10.000Z | 2018-06-25T18:57:10.000Z | from setuptools import setup, Extension
fastdocgen = Extension('fastdocgen', sources=['spring/fastdocgen.c'])
setup(
name='perfrunner',
entry_points={
'console_scripts': [
'cloudrunner = perfrunner.utils.cloudrunner:main',
'cluster = perfrunner.utils.cluster:main',
'debug = perfrunner.utils.debug:main',
'go_dependencies = perfrunner.utils.go_dependencies:main',
'jenkins = perfrunner.utils.jenkins:main',
'hidefast = perfrunner.utils.hidefast:main',
'install = perfrunner.utils.install:main',
'perfrunner = perfrunner.__main__:main',
'recovery = perfrunner.utils.recovery:main',
'spring = spring.__main__:main',
'stats = perfrunner.utils.stats:main',
'templater = perfrunner.utils.templater:main',
'trigger = perfrunner.utils.trigger:main',
'verify_logs = perfrunner.utils.verify_logs:main',
'weekly = perfrunner.utils.weekly:main',
],
},
ext_modules=[
fastdocgen
],
)
| 36.566667 | 70 | 0.609845 |
4a1f04acc5daace43ca82f3ca958c2067b88d92b | 5,827 | py | Python | python/GafferDispatchUI/PythonCommandUI.py | ddesmond/gaffer | 4f25df88103b7893df75865ea919fb035f92bac0 | [
"BSD-3-Clause"
] | 561 | 2016-10-18T04:30:48.000Z | 2022-03-30T06:52:04.000Z | python/GafferDispatchUI/PythonCommandUI.py | ddesmond/gaffer | 4f25df88103b7893df75865ea919fb035f92bac0 | [
"BSD-3-Clause"
] | 1,828 | 2016-10-14T19:01:46.000Z | 2022-03-30T16:07:19.000Z | python/GafferDispatchUI/PythonCommandUI.py | ddesmond/gaffer | 4f25df88103b7893df75865ea919fb035f92bac0 | [
"BSD-3-Clause"
] | 120 | 2016-10-18T15:19:13.000Z | 2021-12-20T16:28:23.000Z | ##########################################################################
#
# Copyright (c) 2015, John Haddon. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above
# copyright notice, this list of conditions and the following
# disclaimer.
#
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided with
# the distribution.
#
# * Neither the name of John Haddon nor the names of
# any other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
import inspect
import Gaffer
import GafferUI
import GafferDispatch
Gaffer.Metadata.registerNode(
GafferDispatch.PythonCommand,
"description",
"""
Runs python code.
""",
plugs = {
"command" : (
"description",
"""
The command to run. This may reference any of the
variables by name, and also the node itself as `self`
and the current Context as `context`.
""",
"plugValueWidget:type", "GafferDispatchUI.PythonCommandUI._CommandPlugValueWidget",
"layout:label", "",
),
"variables" : (
"description",
"""
An arbitrary set of variables which can be accessed via
the `variables` dictionary within the python command.
""",
"layout:section", "Variables",
),
"sequence" : (
"description",
"""
Calls the command once for each sequence, instead of once
per frame. In this mode, an additional variable called `frames`
is available to the command, containing a list of all frame
numbers for which execution should be performed. The Context may
be updated to reference any frame from this list, and accessing
a variable returns the value for the current frame.
A typical structure for the command might look something like this :
```
# Do some one-time initialization
...
# Process all frames
for frame in frames :
context.setFrame( frame )
# Read variables after setting the frame to get
# the right values for that frame.
v = variables["v"]
...
# Do some one-time finalization
...
```
""",
"layout:section", "Advanced",
),
}
)
##########################################################################
# _CodePlugValueWidget
##########################################################################
class _CommandPlugValueWidget( GafferUI.PlugValueWidget ) :
def __init__( self, plug, **kw ) :
self.__codeWidget = GafferUI.CodeWidget()
GafferUI.PlugValueWidget.__init__( self, self.__codeWidget, plug, **kw )
self.__codeWidget._qtWidget().setPlaceholderText(
inspect.cleandoc(
"""
# Global variables :
#
# `context` : Context the command is being executed in.
# `variables` : Contents of the Variables tab.
"""
)
)
self.__codeWidget.setHighlighter( GafferUI.CodeWidget.PythonHighlighter() )
self.__codeWidget.setCommentPrefix( "#" )
self.__codeWidget.activatedSignal().connect( Gaffer.WeakMethod( self.__setPlugValue ), scoped = False )
self.__codeWidget.editingFinishedSignal().connect( Gaffer.WeakMethod( self.__setPlugValue ), scoped = False )
node = self.__pythonCommandNode()
if node is not None :
node.plugDirtiedSignal().connect( Gaffer.WeakMethod( self.__pythonCommandPlugDirtied ), scoped = False )
self.__updateCompleter()
self._updateFromPlug()
def _updateFromPlug( self ) :
if self.getPlug() is not None :
with self.getContext() :
try :
value = self.getPlug().getValue()
except :
value = None
if value is not None :
self.__codeWidget.setText( value )
self.__codeWidget.setErrored( value is None )
self.__codeWidget.setEditable( self._editable() )
def __setPlugValue( self, *unused ) :
if not self._editable() :
return
text = self.__codeWidget.getText()
with Gaffer.UndoScope( self.getPlug().ancestor( Gaffer.ScriptNode ) ) :
self.getPlug().setValue( text )
def __pythonCommandNode( self ) :
def walk( plug ) :
if isinstance( plug.parent(), GafferDispatch.PythonCommand ) :
return plug.parent()
for output in plug.outputs() :
r = walk( output )
if r is not None :
return r
return None
return walk( self.getPlug() )
def __pythonCommandPlugDirtied( self, plug ) :
if plug == plug.node()["variables"] :
self.__updateCompleter()
def __updateCompleter( self ) :
node = self.__pythonCommandNode()
if node is not None :
with self.getContext() :
self.__codeWidget.setCompleter(
GafferUI.CodeWidget.PythonCompleter( node._executionDict() )
)
else :
self.__codeWidget.setCompleter( None )
| 28.014423 | 111 | 0.666381 |
4a1f0502fa6184468c6b97026d8ee6d918602ff1 | 1,819 | py | Python | online_judge/blueprints/auth.py | ashimaathri/online-judge | 596ac26ad9b6ffd44fbee16274a45d39d14ed8e8 | [
"Apache-2.0"
] | null | null | null | online_judge/blueprints/auth.py | ashimaathri/online-judge | 596ac26ad9b6ffd44fbee16274a45d39d14ed8e8 | [
"Apache-2.0"
] | null | null | null | online_judge/blueprints/auth.py | ashimaathri/online-judge | 596ac26ad9b6ffd44fbee16274a45d39d14ed8e8 | [
"Apache-2.0"
] | null | null | null | import json
from flask import (render_template, Blueprint, request, session, current_app, redirect, url_for)
from online_judge.db.user import User
from online_judge.helpers.session import redirect_if_authenticated
auth = Blueprint('auth', __name__)
def validate_form():
username = request.form['username']
password = request.form['password']
error = None
if not username:
error = json.dumps({'error': 'Username absent'})
if not password:
error = json.dumps({'error': 'Password absent'})
return username, password, error
@auth.route('/', methods=['GET'])
@redirect_if_authenticated
def display_login_form():
return render_template('login_register.html')
@auth.route('/login', methods=['POST'])
def login():
username, password, error = validate_form()
if error:
return error, 400
if not User.exists(username):
return json.dumps({'error': 'Invalid credentials'}), 400
user = User(username)
if user.verify(password):
session['username'] = username
try:
return redirect(request.args['next'])
except KeyError:
return redirect(url_for('home_page.display_problem_list'))
else:
return json.dumps({'error': 'Invalid credentials'}), 400
@auth.route('/signup', methods=['POST'])
def signup():
username, password, error = validate_form()
if error:
return error, 400
if User.exists(username):
return json.dumps({'error': 'Username exists'}), 400
else:
User(username, password).save()
return json.dumps({'status': 'success'}), 200
@auth.route('/logout', methods=['GET'])
def logout():
current_app.session_interface.store.remove({'sid': session.sid})
session.clear()
return redirect(url_for('.display_login_form'))
| 27.560606 | 96 | 0.661902 |
4a1f0539ee6e2a07582be0b622591c8f92a5b5bf | 15,510 | py | Python | airflow/providers/cncf/kubernetes/utils/pod_manager.py | MoBagel/airflow | 0905e386f17e34d96f6ee575404c62b13242c75d | [
"Apache-2.0"
] | null | null | null | airflow/providers/cncf/kubernetes/utils/pod_manager.py | MoBagel/airflow | 0905e386f17e34d96f6ee575404c62b13242c75d | [
"Apache-2.0"
] | null | null | null | airflow/providers/cncf/kubernetes/utils/pod_manager.py | MoBagel/airflow | 0905e386f17e34d96f6ee575404c62b13242c75d | [
"Apache-2.0"
] | null | null | null | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Launches PODs"""
import json
import math
import time
import warnings
from contextlib import closing
from dataclasses import dataclass
from datetime import datetime
from typing import TYPE_CHECKING, Iterable, Optional, Tuple, cast
import pendulum
import tenacity
from kubernetes import client, watch
from kubernetes.client.models.v1_pod import V1Pod
from kubernetes.client.rest import ApiException
from kubernetes.stream import stream as kubernetes_stream
from pendulum import DateTime
from pendulum.parsing.exceptions import ParserError
from urllib3.exceptions import HTTPError as BaseHTTPError
from airflow.exceptions import AirflowException
from airflow.kubernetes.kube_client import get_kube_client
from airflow.kubernetes.pod_generator import PodDefaults
from airflow.utils.log.logging_mixin import LoggingMixin
if TYPE_CHECKING:
from kubernetes.client.models.core_v1_event_list import CoreV1EventList
class PodLaunchFailedException(AirflowException):
"""When pod launching fails in KubernetesPodOperator."""
def should_retry_start_pod(exception: BaseException) -> bool:
"""Check if an Exception indicates a transient error and warrants retrying"""
if isinstance(exception, ApiException):
return exception.status == 409
return False
class PodPhase:
"""
Possible pod phases
See https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#pod-phase.
"""
PENDING = 'Pending'
RUNNING = 'Running'
FAILED = 'Failed'
SUCCEEDED = 'Succeeded'
terminal_states = {FAILED, SUCCEEDED}
def container_is_running(pod: V1Pod, container_name: str) -> bool:
"""
Examines V1Pod ``pod`` to determine whether ``container_name`` is running.
If that container is present and running, returns True. Returns False otherwise.
"""
container_statuses = pod.status.container_statuses if pod and pod.status else None
if not container_statuses:
return False
container_status = next(iter([x for x in container_statuses if x.name == container_name]), None)
if not container_status:
return False
return container_status.state.running is not None
def get_container_termination_message(pod: V1Pod, container_name: str):
try:
container_statuses = pod.status.container_statuses
container_status = next(iter([x for x in container_statuses if x.name == container_name]), None)
return container_status.state.terminated.message if container_status else None
except (AttributeError, TypeError):
return None
@dataclass
class PodLoggingStatus:
"""Used for returning the status of the pod and last log time when exiting from `fetch_container_logs`"""
running: bool
last_log_time: Optional[DateTime]
class PodManager(LoggingMixin):
"""
Helper class for creating, monitoring, and otherwise interacting with Kubernetes pods
for use with the KubernetesPodOperator
"""
def __init__(
self,
kube_client: client.CoreV1Api = None,
in_cluster: bool = True,
cluster_context: Optional[str] = None,
):
"""
Creates the launcher.
:param kube_client: kubernetes client
:param in_cluster: whether we are in cluster
:param cluster_context: context of the cluster
"""
super().__init__()
self._client = kube_client or get_kube_client(in_cluster=in_cluster, cluster_context=cluster_context)
self._watch = watch.Watch()
def run_pod_async(self, pod: V1Pod, **kwargs) -> V1Pod:
"""Runs POD asynchronously"""
sanitized_pod = self._client.api_client.sanitize_for_serialization(pod)
json_pod = json.dumps(sanitized_pod, indent=2)
self.log.debug('Pod Creation Request: \n%s', json_pod)
try:
resp = self._client.create_namespaced_pod(
body=sanitized_pod, namespace=pod.metadata.namespace, **kwargs
)
self.log.debug('Pod Creation Response: %s', resp)
except Exception as e:
self.log.exception(
'Exception when attempting to create Namespaced Pod: %s', str(json_pod).replace("\n", " ")
)
raise e
return resp
def delete_pod(self, pod: V1Pod) -> None:
"""Deletes POD"""
try:
self._client.delete_namespaced_pod(
pod.metadata.name, pod.metadata.namespace, body=client.V1DeleteOptions()
)
except ApiException as e:
# If the pod is already deleted
if e.status != 404:
raise
@tenacity.retry(
stop=tenacity.stop_after_attempt(3),
wait=tenacity.wait_random_exponential(),
reraise=True,
retry=tenacity.retry_if_exception(should_retry_start_pod),
)
def create_pod(self, pod: V1Pod) -> V1Pod:
"""Launches the pod asynchronously."""
return self.run_pod_async(pod)
def await_pod_start(self, pod: V1Pod, startup_timeout: int = 120) -> None:
"""
Waits for the pod to reach phase other than ``Pending``
:param pod:
:param startup_timeout: Timeout (in seconds) for startup of the pod
(if pod is pending for too long, fails task)
:return:
"""
curr_time = datetime.now()
while True:
remote_pod = self.read_pod(pod)
if remote_pod.status.phase != PodPhase.PENDING:
break
self.log.warning("Pod not yet started: %s", pod.metadata.name)
delta = datetime.now() - curr_time
if delta.total_seconds() >= startup_timeout:
msg = (
f"Pod took longer than {startup_timeout} seconds to start. "
"Check the pod events in kubernetes to determine why."
)
raise PodLaunchFailedException(msg)
time.sleep(1)
def follow_container_logs(self, pod: V1Pod, container_name: str) -> PodLoggingStatus:
warnings.warn(
"Method `follow_container_logs` is deprecated. Use `fetch_container_logs` instead"
"with option `follow=True`.",
DeprecationWarning,
)
return self.fetch_container_logs(pod=pod, container_name=container_name, follow=True)
def fetch_container_logs(
self, pod: V1Pod, container_name: str, *, follow=False, since_time: Optional[DateTime] = None
) -> PodLoggingStatus:
"""
Follows the logs of container and streams to airflow logging.
Returns when container exits.
"""
def consume_logs(*, since_time: Optional[DateTime] = None, follow: bool = True) -> Optional[DateTime]:
"""
Tries to follow container logs until container completes.
For a long-running container, sometimes the log read may be interrupted
Such errors of this kind are suppressed.
Returns the last timestamp observed in logs.
"""
timestamp = None
try:
logs = self.read_pod_logs(
pod=pod,
container_name=container_name,
timestamps=True,
since_seconds=(
math.ceil((pendulum.now() - since_time).total_seconds()) if since_time else None
),
follow=follow,
)
for raw_line in logs:
line = raw_line.decode('utf-8', errors="backslashreplace")
timestamp, message = self.parse_log_line(line)
self.log.info(message)
except BaseHTTPError as e:
self.log.warning(
"Reading of logs interrupted with error %r; will retry. "
"Set log level to DEBUG for traceback.",
e,
)
self.log.debug(
"Traceback for interrupted logs read for pod %r",
pod.metadata.name,
exc_info=True,
)
return timestamp or since_time
# note: `read_pod_logs` follows the logs, so we shouldn't necessarily *need* to
# loop as we do here. But in a long-running process we might temporarily lose connectivity.
# So the looping logic is there to let us resume following the logs.
last_log_time = since_time
while True:
last_log_time = consume_logs(since_time=last_log_time, follow=follow)
if not self.container_is_running(pod, container_name=container_name):
return PodLoggingStatus(running=False, last_log_time=last_log_time)
if not follow:
return PodLoggingStatus(running=True, last_log_time=last_log_time)
else:
self.log.warning(
'Pod %s log read interrupted but container %s still running',
pod.metadata.name,
container_name,
)
time.sleep(1)
def await_container_completion(self, pod: V1Pod, container_name: str) -> None:
while not self.container_is_running(pod=pod, container_name=container_name):
time.sleep(1)
def await_pod_completion(self, pod: V1Pod) -> V1Pod:
"""
Monitors a pod and returns the final state
:param pod: pod spec that will be monitored
:return: Tuple[State, Optional[str]]
"""
while True:
remote_pod = self.read_pod(pod)
if remote_pod.status.phase in PodPhase.terminal_states:
break
self.log.info('Pod %s has phase %s', pod.metadata.name, remote_pod.status.phase)
time.sleep(2)
return remote_pod
def parse_log_line(self, line: str) -> Tuple[Optional[DateTime], str]:
"""
Parse K8s log line and returns the final state
:param line: k8s log line
:return: timestamp and log message
:rtype: Tuple[str, str]
"""
split_at = line.find(' ')
if split_at == -1:
self.log.error(
"Error parsing timestamp (no timestamp in message %r). "
"Will continue execution but won't update timestamp",
line,
)
return None, line
timestamp = line[:split_at]
message = line[split_at + 1 :].rstrip()
try:
last_log_time = cast(DateTime, pendulum.parse(timestamp))
except ParserError:
self.log.error("Error parsing timestamp. Will continue execution but won't update timestamp")
return None, line
return last_log_time, message
def container_is_running(self, pod: V1Pod, container_name: str) -> bool:
"""Reads pod and checks if container is running"""
remote_pod = self.read_pod(pod)
return container_is_running(pod=remote_pod, container_name=container_name)
@tenacity.retry(stop=tenacity.stop_after_attempt(3), wait=tenacity.wait_exponential(), reraise=True)
def read_pod_logs(
self,
pod: V1Pod,
container_name: str,
tail_lines: Optional[int] = None,
timestamps: bool = False,
since_seconds: Optional[int] = None,
follow=True,
) -> Iterable[bytes]:
"""Reads log from the POD"""
additional_kwargs = {}
if since_seconds:
additional_kwargs['since_seconds'] = since_seconds
if tail_lines:
additional_kwargs['tail_lines'] = tail_lines
try:
return self._client.read_namespaced_pod_log(
name=pod.metadata.name,
namespace=pod.metadata.namespace,
container=container_name,
follow=follow,
timestamps=timestamps,
_preload_content=False,
**additional_kwargs,
)
except BaseHTTPError:
self.log.exception('There was an error reading the kubernetes API.')
raise
@tenacity.retry(stop=tenacity.stop_after_attempt(3), wait=tenacity.wait_exponential(), reraise=True)
def read_pod_events(self, pod: V1Pod) -> "CoreV1EventList":
"""Reads events from the POD"""
try:
return self._client.list_namespaced_event(
namespace=pod.metadata.namespace, field_selector=f"involvedObject.name={pod.metadata.name}"
)
except BaseHTTPError as e:
raise AirflowException(f'There was an error reading the kubernetes API: {e}')
@tenacity.retry(stop=tenacity.stop_after_attempt(3), wait=tenacity.wait_exponential(), reraise=True)
def read_pod(self, pod: V1Pod) -> V1Pod:
"""Read POD information"""
try:
return self._client.read_namespaced_pod(pod.metadata.name, pod.metadata.namespace)
except BaseHTTPError as e:
raise AirflowException(f'There was an error reading the kubernetes API: {e}')
def extract_xcom(self, pod: V1Pod) -> str:
"""Retrieves XCom value and kills xcom sidecar container"""
with closing(
kubernetes_stream(
self._client.connect_get_namespaced_pod_exec,
pod.metadata.name,
pod.metadata.namespace,
container=PodDefaults.SIDECAR_CONTAINER_NAME,
command=['/bin/sh'],
stdin=True,
stdout=True,
stderr=True,
tty=False,
_preload_content=False,
)
) as resp:
result = self._exec_pod_command(resp, f'cat {PodDefaults.XCOM_MOUNT_PATH}/return.json')
self._exec_pod_command(resp, 'kill -s SIGINT 1')
if result is None:
raise AirflowException(f'Failed to extract xcom from pod: {pod.metadata.name}')
return result
def _exec_pod_command(self, resp, command: str) -> Optional[str]:
res = None
if resp.is_open():
self.log.info('Running command... %s\n', command)
resp.write_stdin(command + '\n')
while resp.is_open():
resp.update(timeout=1)
while resp.peek_stdout():
res = res + resp.read_stdout() if res else resp.read_stdout()
error_res = None
while resp.peek_stderr():
error_res = error_res + resp.read_stderr() if error_res else resp.read_stderr()
if error_res:
self.log.info("stderr from command: %s", error_res)
break
if res:
return res
return res
| 39.06801 | 110 | 0.624178 |
4a1f058e729d375d347162dd6d10f65dccdb0d7f | 6,871 | py | Python | app_static.py | tonywu71/AirFrance-ST7 | 2600db76cc1361bfc0f9d12fdd0a9a7d5cf938bd | [
"MIT"
] | null | null | null | app_static.py | tonywu71/AirFrance-ST7 | 2600db76cc1361bfc0f9d12fdd0a9a7d5cf938bd | [
"MIT"
] | null | null | null | app_static.py | tonywu71/AirFrance-ST7 | 2600db76cc1361bfc0f9d12fdd0a9a7d5cf938bd | [
"MIT"
] | 3 | 2021-04-08T14:57:34.000Z | 2021-04-09T07:34:07.000Z | ## ------ Dash application for visualizing solutions to the static problem ------
import os
import json
import re
import pandas as pd
import dash
import dash_core_components as dcc
import dash_html_components as html
from dash.dependencies import Input, Output
import plotly.express as px
import plotly.graph_objects as go
from utils_static import *
external_stylesheets = ['https://codepen.io/chriddyp/pen/bWLwgP.css']
app = dash.Dash(__name__, external_stylesheets=external_stylesheets)
## ------ Obtention de la liste des fichiers de sortie ------
pattern = '^solution_([a-zA-Z0-9]*)_(\w+).csv$'
# dates_avion est undictionnaire dont les clés sont les dates des instances et
# dont les clés sont des string donnant l'avion choisi
dates_avion = dict()
for filename in os.listdir('output'):
ans = re.findall(pattern=pattern, string=filename)
if len(ans) == 1: # Sanity check pour vérifier qu'on a bien une solution...
date = ans[0][0]
avion = ans[0][1]
dates_avion[date] = avion
# Test pour vérifier si on arrive ou non à récupérer des données
assert len(dates_avion) != 0, 'Pas de données correctes trouvées dans le dossier "output" !'
# On extrait les clés du dictionnaire dates_avion pour lire plus facilement les dates:
list_dates = list(dates_avion.keys())
## ------ Widgets ------
date_dropdown = dcc.Dropdown(
id='date-dropdown',
options=[
{'label': f"{date} - {avion}", 'value': f"{date}_{avion}"} for date, avion in dates_avion.items()
],
value=f"{list_dates[0]}_{dates_avion[list_dates[0]]}" # on choisit d'avoir par défaut la 1ère date trouvée
)
## ------ Layout ------
app.layout = html.Div([
html.Div(
[
html.H1("Projet AirFrance (ST7) - Groupe 2", className="app__header__title",
style={'color': '#990000', 'text-align': 'center'}),
html.P(
dcc.Markdown(
"Thomas Bouquet, Caio De Prospero Iglesias, Quentin Guilhot, Thomas Melkior, Tony Wu"),
style={
'fontSize': 16,
'color': '#990000',
'text-align': 'center'
},
className="app__header__title--grey",
),
],
className="app__header__desc",
),
html.Div(
[
html.Img(
src=app.get_asset_url("AirFrance_logo.png"),
style={
'width': '20%',
'position': 'absolute',
'right': '4%',
'top': '6%',
},
className="app__menu__img",
)
],
className="app__header__logo",
),
html.Div(
[
html.Img(
src=app.get_asset_url("cs_logo.png"),
style={
'width': '13%',
'position': 'absolute',
'right': '85%',
'top': '2%',
},
className="app__menu__img",
)
],
className="app__header__logo",
),
html.Div(
style={"padding": "10px"}
),
html.P(
dcc.Markdown("Visualisation des solutions optimales"),
style={
'fontSize': 30,
'color': '#000099',
'text-align': 'left'
},
className="app__header__title--grey",
),
html.P(
dcc.Markdown(
"Sélectionnez la date pour laquelle vous voulez regarder la solution ci-dessous : "),
style={
'fontSize': 18,
'color': 'black',
'text-align': 'left'
},
className="app__header__title--grey",
),
date_dropdown,
dcc.Graph(id="scatter-plot"),
html.Div(
[
html.Img(
src=app.get_asset_url("legend2.png"),
style={
'width': '53%',
'position': 'absolute',
'left': '5%',
'bottom': '2%',
},
className="app__menu__img",
)
],
className="app__header__logo",
),
html.Div(
style={"padding": "15px"}
),
])
@app.callback(
Output("scatter-plot", "figure"),
[Input("date-dropdown", "value")])
def update_bar_chart(value):
## --- Récupération des données de l'instance sélectionnée dans le Dropdown ---
date, AVION = value.split("_")
## --- Lecture du CSV ---
filename = f'solution_{date}_{AVION}.csv'
df_ans = pd.read_csv(os.path.join('output', filename))
df_ans = df_ans.astype({"Transit Time": str}).replace({'inf': '∞'}, regex=True)
## --- Calcul du barycentre depuis df_ans directement
barycentre_x, barycentre_y = calcul_barycentre(df_ans)
## --- Récupération des marqueurs pour le tracé dans Plotly
marker_list = get_markers_passagers(df_ans)
## --- Récupération de certaines métadonnées nécessaire à Plotly
with open('./'+AVION+'.json') as f:
preprocess = json.load(f)
avion = {
'x_max': preprocess['x_max'],
'y_max': preprocess['y_max'],
'exit': preprocess['exit'],
'hallway': preprocess['hallway'],
'barycentre': preprocess['barycentre'],
'background': preprocess['background'],
'seats': {
'real': [],
'fictive': [],
'business': [],
'exit': [],
'eco': []
}
}
## --- Plot de la figure avec Plotly ---
fig = px.scatter(
df_ans,
x='x',
y='y',
hover_name='Siège',
color= 'ID Groupe',
size='Poids',
hover_data=df_ans.columns,
template="plotly_white",
color_continuous_scale=px.colors.diverging.RdBu)
fig.update_traces(marker=dict(line=dict(width=2, color='black')),
marker_symbol=marker_list,
selector=dict(mode='markers'))
## Ajout du barycentre
fig.add_trace(
go.Scatter(x=[barycentre_x],
y=[barycentre_y],
name="Barycentre",
showlegend=False,
marker_symbol=["star-triangle-up-dot"],
mode="markers",
marker=dict(size=20,
color="green",
line=dict(width=2, color='DarkSlateGrey'))))
# fig.add_layout_image(source=f"cabine{AVION}AF.jpg")
# Positionnement de la légende
fig.update_layout(legend=dict(
orientation="h",
yanchor="bottom",
y=1.02,
xanchor="right",
x=1
))
# Add images
fig.add_layout_image(avion['background'])
return fig
app.run_server(debug=False)
| 27.051181 | 114 | 0.525106 |
4a1f06f744c362cb03cb7e8296f6cc5b7f90cbeb | 5,761 | py | Python | 3.DLWorkFlow/TrainingAndValidation/ignite_with_checkpointing.py | sdhnshu/HandsOnDeepLearningWithPytorch | 2292a952a4cb112b03d5db4048c78bc503eb858d | [
"MIT"
] | 87 | 2018-07-19T20:15:27.000Z | 2022-03-27T18:01:59.000Z | 3.DLWorkFlow/TrainingAndValidation/ignite_with_checkpointing.py | sdhnshu/HandsOnDeepLearningWithPytorch | 2292a952a4cb112b03d5db4048c78bc503eb858d | [
"MIT"
] | 2 | 2019-06-07T13:49:13.000Z | 2022-01-11T14:45:01.000Z | 3.DLWorkFlow/TrainingAndValidation/ignite_with_checkpointing.py | sdhnshu/HandsOnDeepLearningWithPytorch | 2292a952a4cb112b03d5db4048c78bc503eb858d | [
"MIT"
] | 41 | 2018-10-16T22:16:37.000Z | 2022-03-13T03:44:57.000Z | from argparse import ArgumentParser
import logging
from torch import nn
from torch.optim import SGD
from torch.utils.data import DataLoader
import torch.nn.functional as F
from torchvision.transforms import Compose, ToTensor, Normalize
from torchvision.datasets import MNIST
from ignite.engine import (
Events, create_supervised_trainer, create_supervised_evaluator)
from ignite.metrics import Accuracy, Loss
from ignite.handlers import ModelCheckpoint
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.conv1 = nn.Conv2d(1, 10, kernel_size=5)
self.conv2 = nn.Conv2d(10, 20, kernel_size=5)
self.conv2_drop = nn.Dropout2d()
self.fc1 = nn.Linear(320, 50)
self.fc2 = nn.Linear(50, 10)
def forward(self, x):
x = F.relu(F.max_pool2d(self.conv1(x), 2))
x = F.relu(F.max_pool2d(self.conv2_drop(self.conv2(x)), 2))
x = x.view(-1, 320)
x = F.relu(self.fc1(x))
x = F.dropout(x, training=self.training)
x = self.fc2(x)
return F.log_softmax(x, dim=-1)
def get_data_loaders(train_batch_size, val_batch_size):
data_transform = Compose([ToTensor(), Normalize((0.1307,), (0.3081,))])
train_loader = DataLoader(
MNIST(
download=True, root=".", transform=data_transform, train=True),
batch_size=train_batch_size, shuffle=True)
val_loader = DataLoader(
MNIST(
download=False, root=".", transform=data_transform, train=False),
batch_size=val_batch_size, shuffle=False)
return train_loader, val_loader
def run(train_batch_size, val_batch_size,
epochs, lr, momentum,
log_interval, restore_from, crash_iteration=1000):
train_loader, val_loader = get_data_loaders(
train_batch_size, val_batch_size)
model = Net()
device = 'cpu'
optimizer = SGD(model.parameters(), lr=lr, momentum=momentum)
trainer = create_supervised_trainer(
model, optimizer, F.nll_loss, device=device)
evaluator = create_supervised_evaluator(model,
metrics={'accuracy': Accuracy(),
'nll': Loss(F.nll_loss)},
device=device)
# Setup debug level of engine logger:
trainer._logger.setLevel(logging.INFO)
ch = logging.StreamHandler()
ch.setLevel(logging.DEBUG)
formatter = logging.Formatter(
"%(asctime)s|%(name)s|%(levelname)s| %(message)s")
ch.setFormatter(formatter)
trainer._logger.addHandler(ch)
@trainer.on(Events.ITERATION_COMPLETED)
def log_training_loss(engine):
iter = (engine.state.iteration - 1) % len(train_loader) + 1
if iter % log_interval == 0:
print("Epoch[{}] Iteration[{}/{}] Loss: {:.2f}"
"".format(
engine.state.epoch, iter,
len(train_loader), engine.state.output))
if engine.state.iteration == crash_iteration:
raise Exception("STOP at {}".format(engine.state.iteration))
@trainer.on(Events.EPOCH_COMPLETED)
def log_training_results(engine):
evaluator.run(train_loader)
metrics = evaluator.state.metrics
avg_accuracy = metrics['accuracy']
avg_nll = metrics['nll']
print(
"Training Results - Epoch: {}\
Avg accuracy: {:.2f} Avg loss: {:.2f}".format(
engine.state.epoch, avg_accuracy, avg_nll))
@trainer.on(Events.EPOCH_COMPLETED)
def log_validation_results(engine):
evaluator.run(val_loader)
metrics = evaluator.state.metrics
avg_accuracy = metrics['accuracy']
avg_nll = metrics['nll']
print(
"Validation Results - Epoch: {}\
Avg accuracy: {:.2f} Avg loss: {:.2f}".format(
engine.state.epoch, avg_accuracy, avg_nll))
objects_to_checkpoint = {"model": model, "optimizer": optimizer}
engine_checkpoint = ModelCheckpoint(
dirname="engine_checkpoint",
filename_prefix='ignite_checking',
require_empty=False,
save_interval=100)
trainer.add_event_handler(
Events.ITERATION_COMPLETED, engine_checkpoint, objects_to_checkpoint)
if restore_from == "":
trainer.run(train_loader, max_epochs=epochs)
else:
raise NotImplementedError('Not implemented yet')
if __name__ == "__main__":
parser = ArgumentParser()
parser.add_argument('--batch_size', type=int, default=64,
help='input batch size for training (default: 64)')
parser.add_argument('--val_batch_size', type=int, default=1000,
help='input batch size for validation (default: 1000)')
parser.add_argument('--epochs', type=int, default=10,
help='number of epochs to train (default: 10)')
parser.add_argument('--lr', type=float, default=0.01,
help='learning rate (default: 0.01)')
parser.add_argument('--momentum', type=float, default=0.5,
help='SGD momentum (default: 0.5)')
parser.add_argument('--log_interval', type=int, default=300,
help='how many batches to wait before logging')
parser.add_argument('--restore_from', type=str, default="",
help='location from where the model can be reloaded')
parser.add_argument(
'--crash_iteration', type=int, default=1000,
help='Iteration to suddenly raise as exception')
args = parser.parse_args()
run(args.batch_size, args.val_batch_size,
args.epochs, args.lr, args.momentum,
args.log_interval, args.restore_from, args.crash_iteration)
| 38.925676 | 79 | 0.629057 |
4a1f086a507d59e95e87530efd9b6f35f4a24918 | 31,278 | py | Python | test/ext/test_baked.py | KaaleppiVirtanen/Testi | 3b6004e6ab3fef8e37fb42981c02f4dfa34fe3b7 | [
"MIT"
] | null | null | null | test/ext/test_baked.py | KaaleppiVirtanen/Testi | 3b6004e6ab3fef8e37fb42981c02f4dfa34fe3b7 | [
"MIT"
] | null | null | null | test/ext/test_baked.py | KaaleppiVirtanen/Testi | 3b6004e6ab3fef8e37fb42981c02f4dfa34fe3b7 | [
"MIT"
] | null | null | null | from sqlalchemy.orm import Session, subqueryload, \
mapper, relationship, lazyload, clear_mappers, backref
from sqlalchemy.testing import eq_, is_, is_not_
from sqlalchemy.testing import assert_raises, assert_raises_message
from sqlalchemy import testing
from test.orm import _fixtures
from sqlalchemy.ext.baked import BakedQuery, baked_lazyload, BakedLazyLoader
from sqlalchemy.ext import baked
from sqlalchemy import bindparam, func, literal_column
from sqlalchemy.orm import exc as orm_exc
import itertools
from sqlalchemy.testing import mock
from sqlalchemy.testing.assertsql import CompiledSQL
class BakedTest(_fixtures.FixtureTest):
run_setup_mappers = 'once'
run_inserts = 'once'
run_deletes = None
def setup(self):
self.bakery = baked.bakery()
class StateChangeTest(BakedTest):
@classmethod
def setup_mappers(cls):
User = cls.classes.User
mapper(User, cls.tables.users)
def _assert_cache_key(self, key, elements):
eq_(
key,
tuple(elem.__code__ for elem in elements)
)
def test_initial_key(self):
User = self.classes.User
session = Session()
def l1(): return session.query(User)
q1 = self.bakery(l1)
self._assert_cache_key(
q1._cache_key,
[l1]
)
eq_(q1.steps, [l1])
def test_inplace_add(self):
User = self.classes.User
session = Session()
def l1(): return session.query(User)
def l2(q): return q.filter(User.name == bindparam('name'))
q1 = self.bakery(l1)
self._assert_cache_key(
q1._cache_key,
[l1]
)
eq_(q1.steps, [l1])
q2 = q1.add_criteria(l2)
is_(q2, q1)
self._assert_cache_key(
q1._cache_key,
[l1, l2]
)
eq_(q1.steps, [l1, l2])
def test_inplace_add_operator(self):
User = self.classes.User
session = Session()
def l1(): return session.query(User)
def l2(q): return q.filter(User.name == bindparam('name'))
q1 = self.bakery(l1)
self._assert_cache_key(
q1._cache_key,
[l1]
)
q1 += l2
self._assert_cache_key(
q1._cache_key,
[l1, l2]
)
def test_chained_add(self):
User = self.classes.User
session = Session()
def l1(): return session.query(User)
def l2(q): return q.filter(User.name == bindparam('name'))
q1 = self.bakery(l1)
q2 = q1.with_criteria(l2)
is_not_(q2, q1)
self._assert_cache_key(
q1._cache_key,
[l1]
)
self._assert_cache_key(
q2._cache_key,
[l1, l2]
)
def test_chained_add_operator(self):
User = self.classes.User
session = Session()
def l1(): return session.query(User)
def l2(q): return q.filter(User.name == bindparam('name'))
q1 = self.bakery(l1)
q2 = q1 + l2
is_not_(q2, q1)
self._assert_cache_key(
q1._cache_key,
[l1]
)
self._assert_cache_key(
q2._cache_key,
[l1, l2]
)
class LikeQueryTest(BakedTest):
@classmethod
def setup_mappers(cls):
User = cls.classes.User
mapper(User, cls.tables.users)
def test_first_no_result(self):
User = self.classes.User
bq = self.bakery(lambda s: s.query(User))
bq += lambda q: q.filter(User.name == 'asdf')
eq_(
bq(Session()).first(),
None
)
def test_first_multiple_result(self):
User = self.classes.User
bq = self.bakery(lambda s: s.query(User.id))
bq += lambda q: q.filter(User.name.like('%ed%')).order_by(User.id)
eq_(
bq(Session()).first(),
(8, )
)
def test_one_or_none_no_result(self):
User = self.classes.User
bq = self.bakery(lambda s: s.query(User))
bq += lambda q: q.filter(User.name == 'asdf')
eq_(
bq(Session()).one_or_none(),
None
)
def test_one_or_none_result(self):
User = self.classes.User
bq = self.bakery(lambda s: s.query(User))
bq += lambda q: q.filter(User.name == 'ed')
u1 = bq(Session()).one_or_none()
eq_(u1.name, 'ed')
def test_one_or_none_multiple_result(self):
User = self.classes.User
bq = self.bakery(lambda s: s.query(User))
bq += lambda q: q.filter(User.name.like('%ed%'))
assert_raises_message(
orm_exc.MultipleResultsFound,
"Multiple rows were found for one_or_none()",
bq(Session()).one_or_none
)
def test_one_no_result(self):
User = self.classes.User
bq = self.bakery(lambda s: s.query(User))
bq += lambda q: q.filter(User.name == 'asdf')
assert_raises_message(
orm_exc.NoResultFound,
"No row was found for one()",
bq(Session()).one
)
def test_one_result(self):
User = self.classes.User
bq = self.bakery(lambda s: s.query(User))
bq += lambda q: q.filter(User.name == 'ed')
u1 = bq(Session()).one()
eq_(u1.name, 'ed')
def test_one_multiple_result(self):
User = self.classes.User
bq = self.bakery(lambda s: s.query(User))
bq += lambda q: q.filter(User.name.like('%ed%'))
assert_raises_message(
orm_exc.MultipleResultsFound,
"Multiple rows were found for one()",
bq(Session()).one
)
def test_get(self):
User = self.classes.User
bq = self.bakery(lambda s: s.query(User))
sess = Session()
def go():
u1 = bq(sess).get(7)
eq_(u1.name, 'jack')
self.assert_sql_count(testing.db, go, 1)
u1 = sess.query(User).get(7) # noqa
def go():
u2 = bq(sess).get(7)
eq_(u2.name, 'jack')
self.assert_sql_count(testing.db, go, 0)
def go():
u2 = bq(sess).get(8)
eq_(u2.name, 'ed')
self.assert_sql_count(testing.db, go, 1)
def test_scalar(self):
User = self.classes.User
bq = self.bakery(lambda s: s.query(User.id))
sess = Session()
bq += lambda q: q.filter(User.id == 7)
eq_(
bq(sess).scalar(), 7
)
def test_count(self):
User = self.classes.User
bq = self.bakery(lambda s: s.query(User))
sess = Session()
eq_(
bq(sess).count(),
4
)
bq += lambda q: q.filter(User.id.in_([8, 9]))
eq_(
bq(sess).count(), 2
)
# original query still works
eq_(
set([(u.id, u.name) for u in bq(sess).all()]),
set([(8, 'ed'), (9, 'fred')])
)
def test_get_pk_w_null(self):
"""test the re-implementation of logic to do get with IS NULL."""
class AddressUser(object):
pass
mapper(
AddressUser,
self.tables.users.outerjoin(self.tables.addresses),
properties={
"id": self.tables.users.c.id,
"address_id": self.tables.addresses.c.id
}
)
bq = self.bakery(lambda s: s.query(AddressUser))
sess = Session()
def go():
u1 = bq(sess).get((10, None))
eq_(u1.name, 'chuck')
self.assert_sql_count(testing.db, go, 1)
u1 = sess.query(AddressUser).get((10, None)) # noqa
def go():
u2 = bq(sess).get((10, None))
eq_(u2.name, 'chuck')
self.assert_sql_count(testing.db, go, 0)
def test_get_includes_getclause(self):
# test issue #3597
User = self.classes.User
bq = self.bakery(lambda s: s.query(User))
for i in range(5):
sess = Session()
u1 = bq(sess).get(7)
eq_(u1.name, 'jack')
sess.close()
eq_(len(bq._bakery), 2)
# simulate race where mapper._get_clause
# may be generated more than once
from sqlalchemy import inspect
del inspect(User).__dict__['_get_clause']
for i in range(5):
sess = Session()
u1 = bq(sess).get(7)
eq_(u1.name, 'jack')
sess.close()
eq_(len(bq._bakery), 4)
class ResultTest(BakedTest):
__backend__ = True
@classmethod
def setup_mappers(cls):
User = cls.classes.User
Address = cls.classes.Address
Order = cls.classes.Order
mapper(User, cls.tables.users, properties={
"addresses": relationship(
Address, order_by=cls.tables.addresses.c.id),
"orders": relationship(
Order, order_by=cls.tables.orders.c.id)
})
mapper(Address, cls.tables.addresses)
mapper(Order, cls.tables.orders)
def test_cachekeys_on_constructor(self):
User = self.classes.User
queue = [7, 8]
def fn(s): return s.query(User.id).filter_by(id=queue.pop(0))
bq1 = self.bakery(fn, 7)
bq2 = self.bakery(fn, 8)
for i in range(3):
session = Session(autocommit=True)
eq_(
bq1(session).all(),
[(7,)]
)
eq_(
bq2(session).all(),
[(8,)]
)
def test_no_steps(self):
User = self.classes.User
bq = self.bakery(
lambda s: s.query(User.id, User.name).order_by(User.id))
for i in range(3):
session = Session(autocommit=True)
eq_(
bq(session).all(),
[(7, 'jack'), (8, 'ed'), (9, 'fred'), (10, 'chuck')]
)
def test_different_limits(self):
User = self.classes.User
bq = self.bakery(
lambda s: s.query(User.id, User.name).order_by(User.id))
bq += lambda q: q.limit(bindparam('limit')).offset(bindparam('offset'))
session = Session(autocommit=True)
for i in range(4):
for limit, offset, exp in [
(2, 1, [(8, 'ed'), (9, 'fred')]),
(3, 0, [(7, 'jack'), (8, 'ed'), (9, 'fred')]),
(1, 2, [(9, 'fred')])
]:
eq_(
bq(session).params(limit=limit, offset=offset).all(),
exp
)
def test_spoiled_full_w_params(self):
User = self.classes.User
canary = mock.Mock()
def fn1(s):
canary.fn1()
return s.query(User.id, User.name).order_by(User.id)
def fn2(q):
canary.fn2()
return q.filter(User.id == bindparam('id'))
def fn3(q):
canary.fn3()
return q
for x in range(3):
bq = self.bakery(fn1)
bq += fn2
sess = Session(autocommit=True)
eq_(
bq.spoil(full=True).add_criteria(fn3)(sess).params(id=7).all(),
[(7, 'jack')]
)
eq_(
canary.mock_calls,
[mock.call.fn1(), mock.call.fn2(), mock.call.fn3(),
mock.call.fn1(), mock.call.fn2(), mock.call.fn3(),
mock.call.fn1(), mock.call.fn2(), mock.call.fn3()]
)
def test_spoiled_half_w_params(self):
User = self.classes.User
canary = mock.Mock()
def fn1(s):
canary.fn1()
return s.query(User.id, User.name).order_by(User.id)
def fn2(q):
canary.fn2()
return q.filter(User.id == bindparam('id'))
def fn3(q):
canary.fn3()
return q
bq = self.bakery(fn1)
bq += fn2
for x in range(3):
bq = self.bakery(fn1)
bq += fn2
sess = Session(autocommit=True)
eq_(
bq.spoil().add_criteria(fn3)(sess).params(id=7).all(),
[(7, 'jack')]
)
eq_(
canary.mock_calls,
[mock.call.fn1(), mock.call.fn2(),
mock.call.fn3(), mock.call.fn3(), mock.call.fn3()]
)
def test_w_new_entities(self):
"""Test that the query can have its entities modified in
an arbitrary callable, and that this new entity list is preserved
when the query is invoked.
"""
User = self.classes.User
bq = self.bakery(
lambda s: s.query(User.id, User.name))
bq += lambda q: q.from_self().with_entities(
func.count(User.id))
for i in range(3):
session = Session(autocommit=True)
eq_(
bq(session).all(),
[(4, )]
)
def test_conditional_step(self):
"""Test a large series of conditionals and assert that
results remain correct between all of them within a series
of loops.
"""
User = self.classes.User
base_bq = self.bakery(
lambda s: s.query(User.id, User.name))
base_bq += lambda q: q.order_by(User.id)
for i in range(4):
for cond1, cond2, cond3, cond4 in itertools.product(
*[(False, True) for j in range(4)]):
bq = base_bq._clone()
if cond1:
bq += lambda q: q.filter(User.name != 'jack')
if cond2:
bq += lambda q: q.join(User.addresses)
else:
bq += lambda q: q.outerjoin(User.addresses)
elif cond3:
bq += lambda q: q.filter(User.name.like('%ed%'))
else:
bq += lambda q: q.filter(User.name == 'jack')
if cond4:
bq += lambda q: q.from_self().with_entities(
func.count(User.id))
sess = Session(autocommit=True)
result = bq(sess).all()
if cond4:
if cond1:
if cond2:
eq_(result, [(4,)])
else:
eq_(result, [(5,)])
elif cond3:
eq_(result, [(2,)])
else:
eq_(result, [(1,)])
else:
if cond1:
if cond2:
eq_(
result,
[(8, 'ed'), (8, 'ed'), (8, 'ed'),
(9, 'fred')]
)
else:
eq_(
result,
[(8, 'ed'), (8, 'ed'), (8, 'ed'),
(9, 'fred'), (10, 'chuck')]
)
elif cond3:
eq_(result, [(8, 'ed'), (9, 'fred')])
else:
eq_(result, [(7, 'jack')])
sess.close()
def test_conditional_step_oneline(self):
User = self.classes.User
base_bq = self.bakery(
lambda s: s.query(User.id, User.name))
base_bq += lambda q: q.order_by(User.id)
for i in range(4):
for cond1 in (False, True):
bq = base_bq._clone()
# we were using (filename, firstlineno) as cache key,
# which fails for this kind of thing!
bq += (lambda q: q.filter(User.name != 'jack')) if cond1 else (lambda q: q.filter(User.name == 'jack')) # noqa
sess = Session(autocommit=True)
result = bq(sess).all()
if cond1:
eq_(result, [(8, u'ed'), (9, u'fred'), (10, u'chuck')])
else:
eq_(result, [(7, 'jack')])
sess.close()
def test_subquery_eagerloading(self):
User = self.classes.User
Address = self.classes.Address
Order = self.classes.Order
# Override the default bakery for one with a smaller size. This used to
# trigger a bug when unbaking subqueries.
self.bakery = baked.bakery(size=3)
base_bq = self.bakery(lambda s: s.query(User))
base_bq += lambda q: q.options(subqueryload(User.addresses),
subqueryload(User.orders))
base_bq += lambda q: q.order_by(User.id)
assert_result = [
User(id=7,
addresses=[Address(id=1, email_address='[email protected]')],
orders=[Order(id=1), Order(id=3), Order(id=5)]),
User(id=8, addresses=[
Address(id=2, email_address='[email protected]'),
Address(id=3, email_address='[email protected]'),
Address(id=4, email_address='[email protected]'),
]),
User(id=9,
addresses=[Address(id=5)],
orders=[Order(id=2), Order(id=4)]),
User(id=10, addresses=[])
]
for i in range(4):
for cond1, cond2 in itertools.product(
*[(False, True) for j in range(2)]):
bq = base_bq._clone()
sess = Session()
if cond1:
bq += lambda q: q.filter(User.name == 'jack')
else:
bq += lambda q: q.filter(User.name.like('%ed%'))
if cond2:
ct = func.count(Address.id).label('count')
subq = sess.query(
ct,
Address.user_id).group_by(Address.user_id).\
having(ct > 2).subquery()
bq += lambda q: q.join(subq)
if cond2:
if cond1:
def go():
result = bq(sess).all()
eq_([], result)
self.assert_sql_count(testing.db, go, 1)
else:
def go():
result = bq(sess).all()
eq_(assert_result[1:2], result)
self.assert_sql_count(testing.db, go, 3)
else:
if cond1:
def go():
result = bq(sess).all()
eq_(assert_result[0:1], result)
self.assert_sql_count(testing.db, go, 3)
else:
def go():
result = bq(sess).all()
eq_(assert_result[1:3], result)
self.assert_sql_count(testing.db, go, 3)
sess.close()
class LazyLoaderTest(testing.AssertsCompiledSQL, BakedTest):
run_setup_mappers = 'each'
def _o2m_fixture(self, lazy="select", **kw):
User = self.classes.User
Address = self.classes.Address
mapper(User, self.tables.users, properties={
'addresses': relationship(
Address, order_by=self.tables.addresses.c.id,
lazy=lazy, **kw)
})
mapper(Address, self.tables.addresses)
return User, Address
def _m2o_fixture(self):
User = self.classes.User
Address = self.classes.Address
mapper(User, self.tables.users)
mapper(Address, self.tables.addresses, properties={
'user': relationship(User)
})
return User, Address
def test_strategy_lookup(self):
"""test that the lazy loader strategies aren't getting mixed up
with BakedLazyLoader as a subclass.
"""
User, Address = self._o2m_fixture()
ll = User.addresses.property._get_strategy((('lazy', 'select'),))
assert not isinstance(ll, BakedLazyLoader)
eq_(ll._strategy_keys, [(('lazy', 'select'),), (('lazy', True),)])
ll = User.addresses.property._get_strategy((('lazy', True),))
assert not isinstance(ll, BakedLazyLoader)
eq_(ll._strategy_keys, [(('lazy', 'select'),), (('lazy', True),)])
bl = User.addresses.property._get_strategy((('lazy', 'baked_select'),))
assert isinstance(bl, BakedLazyLoader)
eq_(bl._strategy_keys, [(('lazy', 'baked_select'),)])
def test_invocation_per_state(self):
"""test that BakedLazyLoader is getting invoked with the
baked_lazyload() loader.
"""
User, Address = self._o2m_fixture()
sess = Session()
q = sess.query(User)
with mock.patch.object(BakedLazyLoader, "_emit_lazyload") as el:
u1 = q.first()
u1.addresses
# not invoked
eq_(el.mock_calls, [])
sess = Session()
q = sess.query(User).options(baked_lazyload(User.addresses))
with mock.patch.object(BakedLazyLoader, "_emit_lazyload") as el:
u1 = q.first()
u1.addresses
# invoked
is_(
el.mock_calls[0][1][1],
u1._sa_instance_state
)
def test_invocation_per_mapper(self):
"""test that BakedLazyLoader is getting invoked with the
"baked_select" lazy setting.
"""
User, Address = self._o2m_fixture(lazy="baked_select")
sess = Session()
q = sess.query(User).options(lazyload(User.addresses))
with mock.patch.object(BakedLazyLoader, "_emit_lazyload") as el:
u1 = q.first()
u1.addresses
# not invoked
eq_(el.mock_calls, [])
sess = Session()
q = sess.query(User)
with mock.patch.object(BakedLazyLoader, "_emit_lazyload") as el:
u1 = q.first()
u1.addresses
# invoked
is_(
el.mock_calls[0][1][1],
u1._sa_instance_state
)
def test_systemwide_loaders_loadable_via_lazyloader(self):
from sqlalchemy.orm import configure_mappers
baked.bake_lazy_loaders()
try:
User, Address = self._o2m_fixture(lazy='joined')
configure_mappers()
is_(
User.addresses.property.
_get_strategy((('lazy', 'select'), )).__class__,
BakedLazyLoader
)
finally:
baked.unbake_lazy_loaders()
def test_invocation_systemwide_loaders(self):
baked.bake_lazy_loaders()
try:
User, Address = self._o2m_fixture()
sess = Session()
q = sess.query(User).options(lazyload(User.addresses))
with mock.patch.object(BakedLazyLoader, "_emit_lazyload") as el:
u1 = q.first()
u1.addresses
# invoked
is_(
el.mock_calls[0][1][1],
u1._sa_instance_state
)
finally:
baked.unbake_lazy_loaders()
clear_mappers()
User, Address = self._o2m_fixture()
sess = Session()
q = sess.query(User).options(lazyload(User.addresses))
with mock.patch.object(BakedLazyLoader, "_emit_lazyload") as el:
u1 = q.first()
u1.addresses
# not invoked
eq_(el.mock_calls, [])
def test_baked_lazy_loading_relationship_flag_true(self):
self._test_baked_lazy_loading_relationship_flag(True)
def test_baked_lazy_loading_relationship_flag_false(self):
self._test_baked_lazy_loading_relationship_flag(False)
def _test_baked_lazy_loading_relationship_flag(self, flag):
baked.bake_lazy_loaders()
try:
User, Address = self._o2m_fixture(bake_queries=flag)
sess = Session()
u1 = sess.query(User).first()
from sqlalchemy.orm import Query
canary = mock.Mock()
# I would think Mock can do this but apparently
# it cannot (wrap / autospec don't work together)
real_compile_context = Query._compile_context
def _my_compile_context(*arg, **kw):
if arg[0].column_descriptions[0]['entity'] is Address:
canary()
return real_compile_context(*arg, **kw)
with mock.patch.object(
Query,
"_compile_context",
_my_compile_context
):
u1.addresses
sess.expire(u1)
u1.addresses
finally:
baked.unbake_lazy_loaders()
if flag:
eq_(canary.call_count, 1)
else:
eq_(canary.call_count, 2)
def test_baked_lazy_loading_option_o2m(self):
User, Address = self._o2m_fixture()
self._test_baked_lazy_loading(set_option=True)
def test_baked_lazy_loading_mapped_o2m(self):
User, Address = self._o2m_fixture(lazy="baked_select")
self._test_baked_lazy_loading(set_option=False)
def _test_baked_lazy_loading(self, set_option):
User, Address = self.classes.User, self.classes.Address
base_bq = self.bakery(
lambda s: s.query(User))
if set_option:
base_bq += lambda q: q.options(baked_lazyload(User.addresses))
base_bq += lambda q: q.order_by(User.id)
assert_result = self.static.user_address_result
for i in range(4):
for cond1, cond2 in itertools.product(
*[(False, True) for j in range(2)]):
bq = base_bq._clone()
sess = Session()
if cond1:
bq += lambda q: q.filter(User.name == 'jack')
else:
bq += lambda q: q.filter(User.name.like('%ed%'))
if cond2:
ct = func.count(Address.id).label('count')
subq = sess.query(
ct,
Address.user_id).group_by(Address.user_id).\
having(ct > 2).subquery()
bq += lambda q: q.join(subq)
if cond2:
if cond1:
def go():
result = bq(sess).all()
eq_([], result)
self.assert_sql_count(testing.db, go, 1)
else:
def go():
result = bq(sess).all()
eq_(assert_result[1:2], result)
self.assert_sql_count(testing.db, go, 2)
else:
if cond1:
def go():
result = bq(sess).all()
eq_(assert_result[0:1], result)
self.assert_sql_count(testing.db, go, 2)
else:
def go():
result = bq(sess).all()
eq_(assert_result[1:3], result)
self.assert_sql_count(testing.db, go, 3)
sess.close()
def test_baked_lazy_loading_m2o(self):
User, Address = self._m2o_fixture()
base_bq = self.bakery(
lambda s: s.query(Address))
base_bq += lambda q: q.options(baked_lazyload(Address.user))
base_bq += lambda q: q.order_by(Address.id)
assert_result = self.static.address_user_result
for i in range(4):
for cond1 in (False, True):
bq = base_bq._clone()
sess = Session()
if cond1:
bq += lambda q: q.filter(
Address.email_address == '[email protected]')
else:
bq += lambda q: q.filter(
Address.email_address.like('ed@%'))
if cond1:
def go():
result = bq(sess).all()
eq_(assert_result[0:1], result)
self.assert_sql_count(testing.db, go, 2)
else:
def go():
result = bq(sess).all()
eq_(assert_result[1:4], result)
self.assert_sql_count(testing.db, go, 2)
sess.close()
def test_useget_cancels_eager(self):
"""test that a one to many lazyload cancels the unnecessary
eager many-to-one join on the other side."""
User = self.classes.User
Address = self.classes.Address
mapper(User, self.tables.users)
mapper(Address, self.tables.addresses, properties={
'user': relationship(
User, lazy='joined',
backref=backref('addresses', lazy='baked_select')
)
})
sess = Session()
u1 = sess.query(User).filter(User.id == 8).one()
def go():
eq_(u1.addresses[0].user, u1)
self.assert_sql_execution(
testing.db, go,
CompiledSQL(
"SELECT addresses.id AS addresses_id, addresses.user_id AS "
"addresses_user_id, addresses.email_address AS "
"addresses_email_address FROM addresses WHERE :param_1 = "
"addresses.user_id",
{'param_1': 8})
)
def test_useget_cancels_eager_propagated_present(self):
"""test that a one to many lazyload cancels the unnecessary
eager many-to-one join on the other side, even when a propagated
option is present."""
User = self.classes.User
Address = self.classes.Address
mapper(User, self.tables.users)
mapper(Address, self.tables.addresses, properties={
'user': relationship(
User, lazy='joined',
backref=backref('addresses', lazy='baked_select')
)
})
from sqlalchemy.orm.interfaces import MapperOption
class MyBogusOption(MapperOption):
propagate_to_loaders = True
sess = Session()
u1 = sess.query(User).options(MyBogusOption()).filter(User.id == 8) \
.one()
def go():
eq_(u1.addresses[0].user, u1)
self.assert_sql_execution(
testing.db, go,
CompiledSQL(
"SELECT addresses.id AS addresses_id, addresses.user_id AS "
"addresses_user_id, addresses.email_address AS "
"addresses_email_address FROM addresses WHERE :param_1 = "
"addresses.user_id",
{'param_1': 8})
)
# additional tests:
# 1. m2m w lazyload
# 2. o2m lazyload where m2o backrefs have an eager load, test
# that eager load is canceled out
# 3. uselist = False, uselist=False assertion
| 29.988495 | 127 | 0.50016 |
4a1f0929ac68fa2c3e96a447844e112012c7beb9 | 403 | py | Python | theaterportal/asgi.py | fross123/Theater-Admin-Portal | 68f8b33d2d25ad287efb6038255425a6d64a0b67 | [
"MIT"
] | null | null | null | theaterportal/asgi.py | fross123/Theater-Admin-Portal | 68f8b33d2d25ad287efb6038255425a6d64a0b67 | [
"MIT"
] | 26 | 2021-02-08T07:57:42.000Z | 2022-03-01T02:07:58.000Z | theaterportal/asgi.py | fross123/Theater-Admin-Portal | 68f8b33d2d25ad287efb6038255425a6d64a0b67 | [
"MIT"
] | null | null | null | """
ASGI config for theaterportal project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/howto/deployment/asgi/
"""
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'theaterportal.settings')
application = get_asgi_application()
| 23.705882 | 78 | 0.791563 |
4a1f0930b0dc60e7188bf2e488ed1a8dc403ea24 | 5,586 | py | Python | legacy/code/scripts/new_diversity_places.py | GLOMICON/emp | c1f752d1ae4c009328bbdcecf9666dbd4dac39b6 | [
"BSD-3-Clause"
] | 1 | 2020-01-30T15:06:26.000Z | 2020-01-30T15:06:26.000Z | legacy/code/scripts/new_diversity_places.py | GLOMICON/emp | c1f752d1ae4c009328bbdcecf9666dbd4dac39b6 | [
"BSD-3-Clause"
] | null | null | null | legacy/code/scripts/new_diversity_places.py | GLOMICON/emp | c1f752d1ae4c009328bbdcecf9666dbd4dac39b6 | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python
from __future__ import division
__author__ = "Jai Ram Rideout"
__copyright__ = "Copyright 2012, The QIIME project"
__credits__ = ["Jai Ram Rideout", "Greg Caporaso"]
__license__ = "GPL"
__version__ = "1.5.0-dev"
__maintainer__ = "Jai Ram Rideout"
__email__ = "[email protected]"
__status__ = "Development"
from glob import glob
from os import makedirs
from os.path import join
from pickle import dump
from qiime.util import (add_filename_suffix, parse_command_line_parameters,
get_options_lookup, make_option, qiime_system_call)
from emp.new_diversity_places import generate_new_diversity_plots
options_lookup = get_options_lookup()
script_info = {}
script_info['brief_description'] = ""
script_info['script_description'] = ""
script_info['script_usage'] = [("Generate plots for new diversity", "The "
"following command generates two plots that compare new, or novel, OTUs in "
"samples grouped by the mapping category 'Environment'. The plot '"
"num_novel_otus_by_Environment.pdf compares the number of unique novel OTUs "
"in each environment, and 'percent_novel_seqs_by_Environment.pdf' "
"compares the percentage of novel sequences (i.e. sequences that were not "
"that were assigned to a GG reference OTU) in each environment.",
"%prog -i otu_table1.biom,otu_table2.biom -g ref_seqs.fasta -m map.txt -c "
"Environment -o new_diversity_out")]
script_info['output_description'] = ""
script_info['required_options'] = [
make_option('-i','--otu_table_fps',type="existing_filepaths",
help='paths to the input OTU tables (i.e., the output from '
'make_otu_table.py)'),
make_option('-g','--gg_fasta_fp',type="existing_filepath",
help='ref db otus were picked against'),
options_lookup['mapping_fp'],
options_lookup['output_dir']
]
script_info['optional_options'] = [
make_option('-c', '--mapping_category', type='string',
help='', default='SAMPLE_TYPE'),
make_option('-n', '--min_num_samples', type='int',
help='', default=11),
make_option('-e', '--category_values_to_exclude', type='string',
help='comma-separated list of values within '
'--mapping_category to exclude from the plots', default='NA')
# make_option('-l', '--taxonomic_levels', type='string',
# help='for summarize_taxa.py\'s output files (L2, L3, ...)',
# default='Kindom,Phylum,Class,Order,Family')
]
script_info['version'] = __version__
def main():
option_parser, opts, args = parse_command_line_parameters(**script_info)
output_dir = opts.output_dir
mapping_category = opts.mapping_category
try:
makedirs(output_dir)
except OSError:
pass
percent_failures_data, percent_failures_plot, num_new_otus_data, \
num_new_otus_plot = generate_new_diversity_plots(
[open(otu_table_fp, 'U') for otu_table_fp in opts.otu_table_fps],
open(opts.gg_fasta_fp, 'U'), open(opts.mapping_fp, 'U'),
mapping_category, opts.min_num_samples,
opts.category_values_to_exclude.split(','), opts.verbose)
# Save plots as PDFs.
percent_failures_plot.savefig(join(output_dir,
'percent_novel_seqs_by_%s.pdf' %
mapping_category))
num_new_otus_plot.savefig(join(output_dir,
'num_novel_otus_by_%s.pdf' %
mapping_category))
# Pickle plot raw data in case we need to load up the data again into new
# plots and interactively tweak them (it'll take too long to rerun the
# whole script for these tweaks).
dump(percent_failures_data, open(join(output_dir,
'percent_novel_seqs_by_%s.p' % mapping_category), 'wb'))
dump(num_new_otus_data, open(join(output_dir,
'num_novel_otus_by_%s.p' % mapping_category), 'wb'))
# Not sure if we'll need the following code...
# # Filter otu table to include only new otus.
# novel_otu_table_fp = add_filename_suffix(opts.otu_table_fp, '_novel')
# stdout, stderr, ret_val = qiime_system_call(
# 'filter_otus_from_otu_table.py -i %s -o %s -e %s' % (opts.otu_table_fp,
# novel_otu_table_fp, opts.gg_fasta_fp))
# print stdout
# print stderr
# if ret_val != 0:
# exit(1)
#
# # Summarize taxa, making sure to report absolute abundances so that we can
# # report raw numbers of classified versus unclassified.
# stdout, stderr, ret_val = qiime_system_call(
# 'summarize_taxa.py -i %s -o %s -a' % (novel_otu_table_fp, output_dir))
# print stdout
# print stderr
# if ret_val != 0:
# exit(1)
#
# # Determine the abundance of unclassifiable otus at each level.
# unclassified_report_f = open(join(output_dir, 'unclassified_report.txt'))
# unclassified_report_f.write('Taxonomic rank\t% unclassified OTUs\n')
#
# ts_fps = glob(join(output_dir, '%s_L*.txt' % novel_otu_table_fp))
# tax_levels = opts.taxonomic_levels.split(',')
#
# if len(ts_fps) != len(tax_levels):
# raise ValueError("The number of taxa summary files does not match the "
# "number of taxonomic levels passed with --taxonomic_levels.")
# for tax_level, ts_fp in zip(tax_levels, ts_fps):
# percent_unclassified = summarize_unclassified(open(ts_fp, 'U'))
# unclassified_report_f.write('%s\t%.2f\n' %
# (tax_level, percent_unclassified))
# unclassified_report_f.close()
if __name__ == "__main__":
main()
| 41.686567 | 80 | 0.675976 |
4a1f0951ed1de0d89f8f150b749a406f66c80f84 | 2,479 | py | Python | alembic/versions/c9b7605de680_fresh_start.py | luther38/XivDbWeb | 204d066232c04dce0ea5a03ec55f160cfbc62659 | [
"MIT"
] | null | null | null | alembic/versions/c9b7605de680_fresh_start.py | luther38/XivDbWeb | 204d066232c04dce0ea5a03ec55f160cfbc62659 | [
"MIT"
] | null | null | null | alembic/versions/c9b7605de680_fresh_start.py | luther38/XivDbWeb | 204d066232c04dce0ea5a03ec55f160cfbc62659 | [
"MIT"
] | null | null | null | """fresh start
Revision ID: c9b7605de680
Revises:
Create Date: 2020-04-26 09:40:53.477070
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = 'c9b7605de680'
down_revision = None
branch_labels = None
depends_on = None
def upgrade():
#op.drop_table('stats')
#op.drop_table('materia')
#op.drop_table('repair')
#op.drop_table('weapon')
op.create_table(
'stats'
,sa.Column('id', sa.String, primary_key=True)
,sa.Column("str", sa.Integer)
,sa.Column("vit", sa.Integer)
,sa.Column("dex", sa.Integer)
,sa.Column('int', sa.Integer)
,sa.Column('mnd', sa.Integer)
,sa.Column('pie', sa.Integer)
,sa.Column('det', sa.Integer)
,sa.Column('spl', sa.Integer)
,sa.Column('skl', sa.Integer)
,sa.Column('dhr', sa.Integer)
,sa.Column('ten', sa.Integer)
)
op.create_table(
'materia'
,sa.Column('id', sa.String, primary_key=True)
,sa.Column('slots', sa.Integer)
,sa.Column('melderJob', sa.String)
,sa.Column('melderLevel', sa.Integer)
,sa.Column('advancedMelding', sa.Boolean)
)
op.create_table(
'repair'
,sa.Column('id', sa.String, primary_key=True)
,sa.Column('job', sa.String)
,sa.Column('level', sa.Integer)
,sa.Column('material', sa.String)
)
op.create_table(
'weapon'
,sa.Column('id', sa.String, primary_key=True)
,sa.Column('url', sa.String)
,sa.Column('pictureUrl', sa.String)
,sa.Column('name',sa.String)
,sa.Column('rarity', sa.String)
,sa.Column('untradeable', sa.Boolean)
,sa.Column('unique', sa.Boolean)
,sa.Column('slot', sa.String)
,sa.Column('itemLevel', sa.Integer)
,sa.Column('jobs', sa.String)
,sa.Column('level', sa.Integer)
,sa.Column('companyCrest', sa.Boolean)
,sa.Column('armorie', sa.Boolean)
,sa.Column('glamourChest', sa.Boolean)
,sa.Column('dyeable', sa.Boolean)
,sa.Column('extractable', sa.Boolean)
,sa.Column('projectable', sa.Boolean)
,sa.Column('desynth', sa.Float)
,sa.Column('materia_id', sa.String, sa.ForeignKey('materia.id'))
,sa.Column('stats_id', sa.String, sa.ForeignKey('stats.id'))
,sa.Column('repair_id', sa.String, sa.ForeignKey('repair.id'))
)
pass
def downgrade():
pass
| 29.86747 | 72 | 0.588544 |
4a1f09ad0200608fbe4a04f34b65e40d68c8a6ab | 194 | py | Python | profiles_api/serializers.py | feemagdev/profiles-rest-api | 44d819dbee8bb3af41f1eaeceda35e461845740d | [
"MIT"
] | null | null | null | profiles_api/serializers.py | feemagdev/profiles-rest-api | 44d819dbee8bb3af41f1eaeceda35e461845740d | [
"MIT"
] | null | null | null | profiles_api/serializers.py | feemagdev/profiles-rest-api | 44d819dbee8bb3af41f1eaeceda35e461845740d | [
"MIT"
] | null | null | null | from rest_framework import serializers
class HelloSerializers(serializers.Serializer):
"""Serializes a name field to test our api view """
name = serializers.CharField(max_length=10)
| 24.25 | 55 | 0.768041 |
4a1f09bfe50033f3de8eb1b9c0b0326e0a71e5fa | 516 | py | Python | tests/binding_tests/local_minimum_list_tests/test_scanbeams.py | synapticarbors/wagyu | b98354611dceda8888f2951e9704f843a4e88c27 | [
"MIT"
] | 1 | 2021-01-20T05:49:13.000Z | 2021-01-20T05:49:13.000Z | tests/binding_tests/local_minimum_list_tests/test_scanbeams.py | synapticarbors/wagyu | b98354611dceda8888f2951e9704f843a4e88c27 | [
"MIT"
] | 1 | 2020-11-20T18:21:24.000Z | 2020-11-20T18:21:37.000Z | tests/binding_tests/local_minimum_list_tests/test_scanbeams.py | synapticarbors/wagyu | b98354611dceda8888f2951e9704f843a4e88c27 | [
"MIT"
] | 2 | 2020-11-20T18:17:31.000Z | 2021-01-20T14:58:22.000Z | from _wagyu import LocalMinimumList
from hypothesis import given
from wagyu.hints import Coordinate
from . import strategies
@given(strategies.local_minimum_lists)
def test_basic(local_minimum_list: LocalMinimumList) -> None:
assert all(isinstance(element, Coordinate)
for element in local_minimum_list.scanbeams)
@given(strategies.local_minimum_lists)
def test_properties(local_minimum_list: LocalMinimumList) -> None:
assert len(local_minimum_list.scanbeams) == len(local_minimum_list)
| 30.352941 | 71 | 0.802326 |
4a1f09ea3645f76fe0481bfc3e943a21ec347db9 | 2,711 | py | Python | tests/test_util.py | DavidHulsman/hdfs | 59f481b69af801ed252c546cafef4a54f86cf061 | [
"MIT"
] | null | null | null | tests/test_util.py | DavidHulsman/hdfs | 59f481b69af801ed252c546cafef4a54f86cf061 | [
"MIT"
] | null | null | null | tests/test_util.py | DavidHulsman/hdfs | 59f481b69af801ed252c546cafef4a54f86cf061 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# encoding: utf-8
"""Test Hdfs client interactions with HDFS."""
from hdfs.util import *
from tests.util import eq_
class TestAsyncWriter(object):
def test_basic(self):
result = []
def consumer(gen):
result.append(list(gen))
with AsyncWriter(consumer) as writer:
writer.write("one")
writer.write("two")
eq_(result, [["one", "two"]])
def test_multiple_writer_uses(self):
result = []
def consumer(gen):
result.append(list(gen))
writer = AsyncWriter(consumer)
with writer:
writer.write("one")
writer.write("two")
with writer:
writer.write("three")
writer.write("four")
eq_(result, [["one", "two"], ["three", "four"]])
def test_multiple_consumer_uses(self):
result = []
def consumer(gen):
result.append(list(gen))
with AsyncWriter(consumer) as writer:
writer.write("one")
writer.write("two")
with AsyncWriter(consumer) as writer:
writer.write("three")
writer.write("four")
eq_(result, [["one", "two"], ["three", "four"]])
def test_nested(self):
with pytest.raises(ValueError):
result = []
def consumer(gen):
result.append(list(gen))
with AsyncWriter(consumer) as _writer:
_writer.write("one")
with _writer as writer:
writer.write("two")
def test_child_error(self):
def consumer(gen):
for value in gen:
if value == "two":
raise HdfsError("Yo")
with pytest.raises(HdfsError):
with AsyncWriter(consumer) as writer:
writer.write("one")
writer.write("two")
def test_parent_error(self):
def consumer(gen):
for value in gen:
pass
def invalid(w):
w.write("one")
raise HdfsError("Ya")
with pytest.raises(HdfsError):
with AsyncWriter(consumer) as writer:
invalid(writer)
class TestTemppath(object):
def test_new(self):
with temppath() as tpath:
assert not osp.exists(tpath)
def test_cleanup(self):
with temppath() as tpath:
with open(tpath, "w") as writer:
writer.write("hi")
assert not osp.exists(tpath)
def test_dpath(self):
with temppath() as dpath:
os.mkdir(dpath)
with temppath(dpath) as tpath:
eq_(osp.dirname(tpath), dpath)
| 26.067308 | 56 | 0.526743 |
4a1f0a1d3d277d06f3df34d2d0431fb95a1f92a4 | 5,749 | py | Python | tools/read_lidar.py | reinforcementdriving/MV3D_TF | 431f49b92e2057afea73b9cd647bc36de6587d77 | [
"MIT"
] | null | null | null | tools/read_lidar.py | reinforcementdriving/MV3D_TF | 431f49b92e2057afea73b9cd647bc36de6587d77 | [
"MIT"
] | null | null | null | tools/read_lidar.py | reinforcementdriving/MV3D_TF | 431f49b92e2057afea73b9cd647bc36de6587d77 | [
"MIT"
] | null | null | null | import numpy as np
import os
import matplotlib.pyplot as plt
# ==============================================================================
# POINT_CLOUD_2_BIRDSEYE
# ==============================================================================
def point_cloud_2_top(points,
res=0.1,
zres=0.3,
side_range=(-10., 10.), # left-most to right-most
fwd_range=(-10., 10.), # back-most to forward-most
height_range=(-2., 2.), # bottom-most to upper-most
):
""" Creates an birds eye view representation of the point cloud data for MV3D.
Args:
points: (numpy array)
N rows of points data
Each point should be specified by at least 3 elements x,y,z
res: (float)
Desired resolution in metres to use. Each output pixel will
represent an square region res x res in size.
zres: (float)
Desired resolution on Z-axis in metres to use.
side_range: (tuple of two floats)
(-left, right) in metres
left and right limits of rectangle to look at.
fwd_range: (tuple of two floats)
(-behind, front) in metres
back and front limits of rectangle to look at.
height_range: (tuple of two floats)
(min, max) heights (in metres) relative to the origin.
All height values will be clipped to this min and max value,
such that anything below min will be truncated to min, and
the same for values above max.
Returns:
numpy array encoding height features , density and intensity.
"""
# EXTRACT THE POINTS FOR EACH AXIS
x_points = points[:, 0]
y_points = points[:, 1]
z_points = points[:, 2]
reflectance = points[:,3]
# INITIALIZE EMPTY ARRAY - of the dimensions we want
x_max = int((side_range[1] - side_range[0]) / res)
y_max = int((fwd_range[1] - fwd_range[0]) / res)
z_max = int((height_range[1] - height_range[0]) / zres)
# z_max =
top = np.zeros([y_max+1, x_max+1, z_max+1], dtype=np.float32)
# FILTER - To return only indices of points within desired cube
# Three filters for: Front-to-back, side-to-side, and height ranges
# Note left side is positive y axis in LIDAR coordinates
f_filt = np.logical_and(
(x_points > fwd_range[0]), (x_points < fwd_range[1]))
s_filt = np.logical_and(
(y_points > -side_range[1]), (y_points < -side_range[0]))
filter = np.logical_and(f_filt, s_filt)
# # ASSIGN EACH POINT TO A HEIGHT SLICE
# # n_slices-1 is used because values above max_height get assigned to an
# # extra index when we call np.digitize().
# bins = np.linspace(height_range[0], height_range[1], num=n_slices-1)
# slice_indices = np.digitize(z_points, bins=bins, right=False)
# # RESCALE THE REFLECTANCE VALUES - to be between the range 0-255
# pixel_values = scale_to_255(r_points, min=0.0, max=1.0)
# FILL PIXEL VALUES IN IMAGE ARRAY
# -y is used because images start from top left
# x_max = int((side_range[1] - side_range[0]) / res)
# y_max = int((fwd_range[1] - fwd_range[0]) / res)
# im = np.zeros([y_max, x_max, n_slices], dtype=np.uint8)
# im[-y_img, x_img, slice_indices] = pixel_values
for i, height in enumerate(np.arange(height_range[0], height_range[1], zres)):
z_filt = np.logical_and((z_points >= height),
(z_points < height + zres))
zfilter = np.logical_and(filter, z_filt)
indices = np.argwhere(zfilter).flatten()
# KEEPERS
xi_points = x_points[indices]
yi_points = y_points[indices]
zi_points = z_points[indices]
ref_i = reflectance[indices]
# print(f_filt.shape)
# CONVERT TO PIXEL POSITION VALUES - Based on resolution
x_img = (-yi_points / res).astype(np.int32) # x axis is -y in LIDAR
y_img = (-xi_points / res).astype(np.int32) # y axis is -x in LIDAR
# SHIFT PIXELS TO HAVE MINIMUM BE (0,0)
# floor & ceil used to prevent anything being rounded to below 0 after
# shift
x_img -= int(np.floor(side_range[0] / res))
y_img += int(np.floor(fwd_range[1] / res))
# CLIP HEIGHT VALUES - to between min and max heights
pixel_values = zi_points - height_range[0]
# pixel_values = zi_points
# FILL PIXEL VALUES IN IMAGE ARRAY
top[y_img, x_img, i] = pixel_values
# max_intensity = np.max(prs[idx])
top[y_img, x_img, z_max] = ref_i
return top
root_dir = "/home1/liumeng/object_detect/MV3D_TF/data/KITTI/object/training"
velodyne = os.path.join(root_dir, "velodyne/")
bird = os.path.join(root_dir, "lidar_bv/")
side_range = (-30., 30.)
fwd_range = (0., 60)
height_range = (-2, 0.4) #
for i in range(7481):
filename = velodyne + str(i).zfill(6) + ".bin"
print("Processing: ", filename)
scan = np.fromfile(filename, dtype=np.float32)
scan = scan.reshape((-1, 4))
bird_view = point_cloud_2_top(scan, res=0.1, zres=0.3,
side_range=side_range, # left-most to right-most
fwd_range=fwd_range, # back-most to forward-most
height_range=height_range)
#save
np.save(bird+str(i).zfill(6)+".npy",bird_view)
# test
test = np.load(bird + "000008.npy")
print(test.shape)
plt.imshow(test[:,:,8])
plt.show()
| 39.376712 | 84 | 0.571926 |
4a1f0a9e7838a0c280d3fdf2228b7b5bb32ad70d | 783 | py | Python | Python/possible-bipartition.py | se77enn/LeetCode-Solution | d29ef5358cae592b63952c3d293897a176fb75e1 | [
"MIT"
] | 1 | 2020-10-27T03:22:31.000Z | 2020-10-27T03:22:31.000Z | Python/possible-bipartition.py | se77enn/LeetCode-Solution | d29ef5358cae592b63952c3d293897a176fb75e1 | [
"MIT"
] | null | null | null | Python/possible-bipartition.py | se77enn/LeetCode-Solution | d29ef5358cae592b63952c3d293897a176fb75e1 | [
"MIT"
] | 1 | 2021-03-22T18:58:23.000Z | 2021-03-22T18:58:23.000Z | # Time: O(|V| + |E|)
# Space: O(|V| + |E|)
import collections
class Solution(object):
def possibleBipartition(self, N, dislikes):
"""
:type N: int
:type dislikes: List[List[int]]
:rtype: bool
"""
adj = [[] for _ in xrange(N)]
for u, v in dislikes:
adj[u-1].append(v-1)
adj[v-1].append(u-1)
color = [0]*N
color[0] = 1
q = collections.deque([0])
while q:
cur = q.popleft()
for nei in adj[cur]:
if color[nei] == color[cur]:
return False
elif color[nei] == -color[cur]:
continue
color[nei] = -color[cur]
q.append(nei)
return True
| 24.46875 | 47 | 0.43295 |
4a1f0acc1ee2d55f19e5ad7923466bacc0ce4179 | 6,332 | py | Python | algs/regularized_evolution/main.py | Beautyya/BenchENA | 5f5491614fc2f00ca26dc29f35f44c334db4718c | [
"MIT"
] | null | null | null | algs/regularized_evolution/main.py | Beautyya/BenchENA | 5f5491614fc2f00ca26dc29f35f44c334db4718c | [
"MIT"
] | null | null | null | algs/regularized_evolution/main.py | Beautyya/BenchENA | 5f5491614fc2f00ca26dc29f35f44c334db4718c | [
"MIT"
] | null | null | null | import os
import sys
curPath = os.path.abspath(os.path.dirname(__file__))
rootPath = os.path.split(curPath)[0]
sys.path.append(os.path.split(rootPath)[0])
from algs.regularized_evolution.genetic.statusupdatetool import StatusUpdateTool
from compute import Config_ini
from comm.log import Log
from comm.utils import GPUFitness
from compute.file import get_algo_local_dir
from algs.regularized_evolution.genetic.population import Population
from algs.regularized_evolution.genetic.evaluate import FitnessEvaluate
from algs.regularized_evolution.genetic.mutation import Mutation
from algs.regularized_evolution.utils import Utils
import collections
import random
import copy
import os
class EvolveCNN(object):
def __init__(self, params):
self.params = params
self.pops = None
self.history = None
def initialize_population(self):
StatusUpdateTool.begin_evolution()
pops = Population(0, self.params)
pops.initialize()
self.pops = pops
Utils.save_population_at_begin(str(pops), 0)
def fitness_evaluate(self):
fitness = FitnessEvaluate(self.pops.individuals, self.params, Log)
fitness.generate_to_python_file()
fitness.evaluate()
fitness_map = GPUFitness.read()
for indi in self.pops.individuals:
if indi.acc == -1:
indi.acc = fitness_map[indi.id]
def mutation(self, parent):
cm = Mutation(self.pops.individuals, parent, Log)
offspring = cm.do_mutation()
self.history.append(offspring[-1])
self.parent_pops = copy.deepcopy(self.pops)
self.pops.individuals = copy.deepcopy(offspring)
Utils.save_population_after_mutation(str(self.pops), self.pops.gen_no)
def environment_selection(self):
v_list = []
indi_list = []
_str = []
for indi in self.pops.individuals:
indi_list.append(indi)
v_list.append(indi.acc)
_t_str = 'Indi-%s-%.5f-%s' % (indi.id, indi.acc, indi.uuid()[0])
_str.append(_t_str)
for indi in self.parent_pops.individuals:
indi_list.append(indi)
v_list.append(indi.acc)
_t_str = 'Pare-%s-%.5f-%s' % (indi.id, indi.acc, indi.uuid()[0])
_str.append(_t_str)
_file = '%s/ENVI_%05d.txt' % (os.path.join(get_algo_local_dir(), 'populations'), self.pops.gen_no)
Utils.write_to_file('\n'.join(_str), _file)
self.pops.individuals.popleft()
offspring = copy.deepcopy(self.pops.individuals)
next_gen_pops = Population(self.pops.gen_no + 1, self.pops.params)
next_gen_pops.create_from_offspring(offspring)
self.pops = next_gen_pops
for _, indi in enumerate(self.pops.individuals):
_t_str = 'new -%s-%.5f-%s' % (indi.id, indi.acc, indi.uuid()[0])
_str.append(_t_str)
_file = '%s/ENVI_%05d.txt' % (os.path.join(get_algo_local_dir(), 'populations'), self.pops.gen_no - 1)
Utils.write_to_file('\n'.join(_str), _file)
Utils.save_population_at_begin(str(self.pops), self.pops.gen_no)
def create_necessary_folders(self):
sub_folders = [os.path.join(get_algo_local_dir(), v) for v in ['populations', 'log', 'scripts']]
if not os.path.exists(get_algo_local_dir()):
os.mkdir(get_algo_local_dir())
for each_sub_folder in sub_folders:
if not os.path.exists(each_sub_folder):
os.mkdir(each_sub_folder)
def do_work(self):
Log.info('*' * 25)
self.create_necessary_folders()
# the step 1
if StatusUpdateTool.is_evolution_running():
Log.info('Initialize from existing population data')
gen_no = Utils.get_newest_file_based_on_prefix('begin')
if gen_no is not None:
Log.info('Initialize from %d-th generation' % (gen_no))
pops = Utils.load_population('begin', gen_no)
self.pops = pops
else:
raise ValueError('The running flag is set to be running, but there is no generated population stored')
else:
gen_no = 0
Log.info('Initialize...')
self.initialize_population()
Log.info('EVOLVE[%d-gen]-Begin to evaluate the fitness' % (gen_no))
self.fitness_evaluate()
Log.info('EVOLVE[%d-gen]-Finish the evaluation' % (gen_no))
self.history = self.pops.individuals
while len(self.history) < self.params['cycles']:
self.params['gen_no'] = gen_no
self.pops.gen_no = gen_no
sample = []
while len(sample) < self.params['sample_size']:
candidate = random.choice(list(self.pops.individuals))
sample.append(candidate)
parent = max(sample, key=lambda i: i.acc)
# step 3
Log.info('EVOLVE[%d-gen]-Begin to crossover and mutation' % (self.pops.gen_no))
self.mutation(parent)
Log.info('EVOLVE[%d-gen]-Finish crossover and mutation' % (self.pops.gen_no))
Log.info('EVOLVE[%d-gen]-Begin to evaluate the fitness' % (self.pops.gen_no))
self.fitness_evaluate()
Log.info('EVOLVE[%d-gen]-Finish the evaluation' % (self.pops.gen_no))
self.history.append(self.pops.individuals[-1])
self.environment_selection()
Log.info('EVOLVE[%d-gen]-Finish the environment selection' % (
self.pops.gen_no - 1)) # in environment_selection, gen_no increase 1
gen_no = gen_no + 1
pop_history = Population(self.pops.gen_no + 1, self.pops.params)
pop_history.create_from_offspring(self.history)
Utils.save_population_at_name(str(pop_history), 'history')
StatusUpdateTool.end_evolution()
class Run(object):
def __init__(self, alg_list, train_list, gpu_info_list):
Config_ini.amend(alg_list, train_list, gpu_info_list)
from algs.regularized_evolution.genetic.statusupdatetool import StatusUpdateTool
StatusUpdateTool.change_cycles(alg_list['max_gen'])
def do(self):
from algs.regularized_evolution.utils import Utils
params = StatusUpdateTool.get_init_params()
evoCNN = EvolveCNN(params)
evoCNN.do_work() | 41.657895 | 118 | 0.642767 |
4a1f0ae4ea586de83d41f103c7b599cc1bd051dc | 2,892 | py | Python | txshark/service.py | beenje/txshark | 87a67e63df57099ce2bf6622671a3401ea5ed5c9 | [
"MIT"
] | null | null | null | txshark/service.py | beenje/txshark | 87a67e63df57099ce2bf6622671a3401ea5ed5c9 | [
"MIT"
] | 3 | 2021-04-27T08:25:35.000Z | 2022-02-13T14:36:21.000Z | txshark/service.py | beenje/txshark | 87a67e63df57099ce2bf6622671a3401ea5ed5c9 | [
"MIT"
] | 1 | 2021-12-03T16:33:22.000Z | 2021-12-03T16:33:22.000Z | # -*- coding: utf-8 -*-
"""
txshark.service
~~~~~~~~~~~~~~~
This module defines the tshark service.
:license: MIT, see LICENSE for more details.
"""
import os
from twisted.application import service
from twisted.internet import reactor
from twisted.python import log
from txshark.protocol import TsharkProtocol
class TsharkService(service.Service, object):
"""Service to stop and start tshark
You should extend this class to override the
packetReceived method.
"""
def __init__(self, interfaces):
"""Initialize the tshark service.
Several interfaces can be given for live capture.
A capture filter can be specified for each interface.
A file can be given instead of an interface. In this case
a display filter can be used (syntax different from
capture filter).
:param interfaces: list of interfaces to listen to
with its associated filter
{"name": <name>, "filter": <filter>}
"""
self.interfaces = interfaces
self.proto = TsharkProtocol(callback=self.packetReceived)
def _get_executable(self):
"""Return tshark full path.
Use the PATH environment variable to find tshark.
:returns: tshark full path
"""
path = os.environ.get('PATH', '').split(os.pathsep)
for directory in path:
exe = os.path.join(directory, 'tshark')
if os.path.exists(exe):
return exe
return None
def _get_args(self):
"""Return tshark arguments"""
args = ['tshark', '-T', 'pdml']
for interface in self.interfaces:
name = interface.get('name', '')
interface_filter = interface.get('filter')
if os.path.isfile(name):
args.extend(['-r', name])
filter_flag = '-Y'
else:
args.extend(['-i', name])
filter_flag = '-f'
if interface_filter:
args.extend([filter_flag, interface_filter])
return args
def packetReceived(self, packet):
"""Method to override to handle incoming packets"""
raise NotImplementedError
def startService(self):
log.msg("Starting tshark service")
super(TsharkService, self).startService()
executable = self._get_executable()
args = self._get_args()
log.msg("Running {} {}".format(executable,
' '.join(args[1:])))
reactor.spawnProcess(self.proto,
executable,
args,
env={'PATH': os.environ.get('PATH', '')})
def stopService(self):
log.msg("Stopping tshark service")
super(TsharkService, self).stopService()
return self.proto.killProcess()
| 31.434783 | 70 | 0.57538 |
4a1f0bbeaf9f3023ad8334d9c0b4eadee72daeea | 1,908 | py | Python | integration-tests/tests/model_principal_components_test.py | rmaldrix/DOC | 2c3be80d515fbf46516e16a81b7eaafe1ed2c6d9 | [
"Apache-2.0"
] | null | null | null | integration-tests/tests/model_principal_components_test.py | rmaldrix/DOC | 2c3be80d515fbf46516e16a81b7eaafe1ed2c6d9 | [
"Apache-2.0"
] | null | null | null | integration-tests/tests/model_principal_components_test.py | rmaldrix/DOC | 2c3be80d515fbf46516e16a81b7eaafe1ed2c6d9 | [
"Apache-2.0"
] | null | null | null | #
# Copyright (c) 2015 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import unittest
import trustedanalytics as ta
# show full stack traces
ta.errors.show_details = True
ta.loggers.set_api()
# TODO: port setup should move to a super class
if ta.server.port != 19099:
ta.server.port = 19099
ta.connect()
class ModelPrincipalComponentsTest(unittest.TestCase):
def test_principal_components(self):
print "define csv file"
schema= [("1", ta.float64),("2", ta.float64),("3", ta.float64),("4", ta.float64),("5", ta.float64),("6", ta.float64),
("7", ta.float64),("8", ta.float64),("9", ta.float64),("10", ta.float64),("11", ta.float64)]
train_file = ta.CsvFile("/datasets/pca_10rows.csv", schema= schema)
print "creating the frame"
train_frame = ta.Frame(train_file)
print "initializing the principalcomponents model"
p = ta.PrincipalComponentsModel()
print "training the model on the frame"
p.train(train_frame,["1","2","3","4","5","6","7","8","9","10","11"],k=9)
print "predicting the class using the model and the frame"
output = p.predict(train_frame,c=5,t_squared_index=True)
self.assertEqual(output.column_names,['1','2','3','4','5','6','7','8','9','10','11','p_1','p_2','p_3','p_4','p_5','t_squared_index'])
if __name__ == "__main__":
unittest.main()
| 36.692308 | 141 | 0.669287 |
4a1f0cff61096e49905a1e6fd98a7152c3dbc957 | 3,125 | py | Python | tetrahedron_rotation.py | NeoNeuron/slide-videos | e41de75324c7ae441feb6302f1f816d8d299a316 | [
"MIT"
] | 1 | 2022-02-12T07:14:16.000Z | 2022-02-12T07:14:16.000Z | tetrahedron_rotation.py | NeoNeuron/slide-videos | e41de75324c7ae441feb6302f1f816d8d299a316 | [
"MIT"
] | null | null | null | tetrahedron_rotation.py | NeoNeuron/slide-videos | e41de75324c7ae441feb6302f1f816d8d299a316 | [
"MIT"
] | null | null | null | # %%
import numpy as np
import matplotlib as mpl
import matplotlib.pyplot as plt
from matplotlib.animation import FuncAnimation
# matplotlib parameters to ensure correctness of Chinese characters
plt.rcParams["font.family"] = 'sans-serif'
plt.rcParams['font.sans-serif']=['Arial Unicode MS', 'SimHei'] # Chinese font
plt.rcParams['axes.unicode_minus']=False # correct minus sign
plt.rcParams["font.size"] = 16
plt.rcParams["xtick.labelsize"] = 16
plt.rcParams["ytick.labelsize"] = 16
plt.rcParams["axes.spines.top"] = False
plt.rcParams["axes.spines.right"] = False
from mpl_toolkits.mplot3d.art3d import Poly3DCollection
#%%
class UpdateFigure:
def __init__(self,
ax:plt.Axes,
frames:int=10):
"""Plot the first frame for the animation.
Args:
ax (plt.Axes): axes of flight icons.
"""
self.ax = ax
# plot tetrahedron
verts = [
[np.sqrt(8/9), 0, -1/3],
[-np.sqrt(2/9), np.sqrt(2/3), -1/3],
[-np.sqrt(2/9), -np.sqrt(2/3), -1/3],
[0, 0, 1],
[0, 0, -1/3],
]
# face
faces = [
# [0, 1, 2],
[0, 1, 3],
[0, 2, 3],
[1, 2, 3],
[4, 1, 2],
[0, 4, 2],
[0, 1, 4],
]
x, y, z = zip(*verts)
self.ax.scatter(x, y, z, c='grey')
poly3d = [[verts[vert_id] for vert_id in face] for face in faces]
self.collection = Poly3DCollection(poly3d, edgecolors= 'grey', facecolor= ['r', '#FFFB00', 'b', '#FFFB00', 'r', 'b'], linewidths=1, alpha=1)
self.ax.add_collection3d(self.collection)
self.ax.set_xlim(-1,1)
self.ax.set_ylim(-1,1)
self.ax.set_zlim(-1,1)
# self.ax.set_xlabel('X')
# self.ax.set_ylabel('Y')
# self.ax.set_zlabel('Z')
# self.ax.set_xticklabels([])
# self.ax.set_yticklabels([])
# self.ax.set_zticklabels([])
self.ax.axis('off')
self.ax.xaxis._axinfo["grid"]['linestyle'] = ":"
self.ax.yaxis._axinfo["grid"]['linestyle'] = ":"
self.ax.zaxis._axinfo["grid"]['linestyle'] = ":"
self.dangle = 360/nframes*1.4
self.nframes = nframes
self.split_frame = int(self.nframes/4*3)
self.line =self.ax.plot([],[])
def __call__(self, i):
# This way the plot can continuously run and we just keep
# watching new realizations of the process
if i < self.split_frame:
self.ax.view_init(15, 100+i*self.dangle)
else:
self.ax.view_init(15-(i-self.split_frame)*self.dangle, 100+self.split_frame*self.dangle)
return self.line
# %%
fig = plt.figure(figsize=(6,6),dpi=200,)
ax = fig.add_subplot(projection='3d')
# create a figure updater
nframes=360
ud = UpdateFigure(ax, nframes)
# user FuncAnimation to generate frames of animation
anim = FuncAnimation(fig, ud, frames=nframes+1, blit=True)
# save animation as *.mp4
anim.save('tetrahedron_movie.mp4', fps=60, dpi=200, codec='libx264', bitrate=-1, extra_args=['-pix_fmt', 'yuv420p'])
# %% | 34.340659 | 148 | 0.57728 |
4a1f0d5725797db3cb6906ec92ee98610757899e | 7,222 | py | Python | encuestas/encuesta/migrations/0001_initial.py | davidbmx/encuestas | 3a80a970fecd477e61ea0a51e4b3787226cbea19 | [
"MIT"
] | null | null | null | encuestas/encuesta/migrations/0001_initial.py | davidbmx/encuestas | 3a80a970fecd477e61ea0a51e4b3787226cbea19 | [
"MIT"
] | null | null | null | encuestas/encuesta/migrations/0001_initial.py | davidbmx/encuestas | 3a80a970fecd477e61ea0a51e4b3787226cbea19 | [
"MIT"
] | null | null | null | # Generated by Django 2.2.4 on 2019-08-18 17:02
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Ciudad',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created', models.DateField(auto_now_add=True, help_text='Fecha de creacion del modelo', verbose_name='created at')),
('modified', models.DateField(auto_now=True, help_text='Fecha de actualizacion del modelo', verbose_name='created at')),
('nombre', models.CharField(max_length=200)),
],
options={
'ordering': ['-created', '-modified'],
'get_latest_by': 'created',
'abstract': False,
},
),
migrations.CreateModel(
name='DatosEncuestado',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created', models.DateField(auto_now_add=True, help_text='Fecha de creacion del modelo', verbose_name='created at')),
('modified', models.DateField(auto_now=True, help_text='Fecha de actualizacion del modelo', verbose_name='created at')),
('nombres', models.CharField(max_length=200)),
('apellidos', models.CharField(max_length=200)),
('edad', models.PositiveIntegerField()),
('genero', models.CharField(choices=[('H', 'HOMBRE'), ('M', 'MUJER')], default='H', max_length=1)),
('email', models.EmailField(max_length=200)),
('tiene_hijos', models.BooleanField(default=False)),
('edad_hijos', models.PositiveIntegerField(blank=True, default=0, null=True)),
('ciudad', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='encuesta.Ciudad')),
],
options={
'ordering': ['-created', '-modified'],
'get_latest_by': 'created',
'abstract': False,
},
),
migrations.CreateModel(
name='Encuesta',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created', models.DateField(auto_now_add=True, help_text='Fecha de creacion del modelo', verbose_name='created at')),
('modified', models.DateField(auto_now=True, help_text='Fecha de actualizacion del modelo', verbose_name='created at')),
('nombre', models.CharField(max_length=250)),
('descripcion', models.CharField(blank=True, max_length=200, null=True)),
('slug', models.SlugField(max_length=200, unique=True)),
('fecha_desde', models.DateTimeField()),
('fecha_hasta', models.DateTimeField()),
],
options={
'ordering': ['-created', '-modified'],
'get_latest_by': 'created',
'abstract': False,
},
),
migrations.CreateModel(
name='OpcionRespuesta',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created', models.DateField(auto_now_add=True, help_text='Fecha de creacion del modelo', verbose_name='created at')),
('modified', models.DateField(auto_now=True, help_text='Fecha de actualizacion del modelo', verbose_name='created at')),
('titulo', models.CharField(blank=True, max_length=150, null=True)),
('imagen', models.ImageField(blank=True, null=True, upload_to='opciones_respuestas/')),
],
options={
'ordering': ['id'],
'get_latest_by': 'created',
'abstract': False,
},
),
migrations.CreateModel(
name='Pregunta',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created', models.DateField(auto_now_add=True, help_text='Fecha de creacion del modelo', verbose_name='created at')),
('modified', models.DateField(auto_now=True, help_text='Fecha de actualizacion del modelo', verbose_name='created at')),
('nombre', models.CharField(max_length=250)),
('descripcion', models.CharField(max_length=150)),
('respuestas_multiples', models.BooleanField(default=False)),
('max_respuestas', models.PositiveIntegerField(default=0)),
('tipo_respuesta', models.CharField(max_length=10)),
('especifica', models.BooleanField(default=False)),
('imagen', models.ImageField(blank=True, null=True, upload_to='preguntas/')),
('dependencia', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='encuesta.Pregunta')),
('encuesta', models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, related_name='preguntas', to='encuesta.Encuesta')),
],
options={
'ordering': ['id'],
'get_latest_by': 'created',
'abstract': False,
},
),
migrations.CreateModel(
name='Respuesta',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created', models.DateField(auto_now_add=True, help_text='Fecha de creacion del modelo', verbose_name='created at')),
('modified', models.DateField(auto_now=True, help_text='Fecha de actualizacion del modelo', verbose_name='created at')),
('detalle_respuesta', models.CharField(blank=True, max_length=250, null=True)),
('encuestado', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='encuesta.DatosEncuestado')),
('opcion_respuesta', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='encuesta.OpcionRespuesta')),
('pregunta', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='encuesta.Pregunta')),
],
options={
'ordering': ['-created', '-modified'],
'get_latest_by': 'created',
'abstract': False,
},
),
migrations.AddField(
model_name='opcionrespuesta',
name='pregunta',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='opciones', to='encuesta.Pregunta'),
),
migrations.AddField(
model_name='datosencuestado',
name='encuesta',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='encuesta.Encuesta'),
),
]
| 54.300752 | 156 | 0.582387 |
4a1f0e3708c51b7d3a891c3e3ace5abfac7c8405 | 8,778 | py | Python | ax/models/torch/botorch_defaults.py | dgiannelli/Ax | c2d17186c433ad6de542b10d4049956cb6ca0823 | [
"MIT"
] | null | null | null | ax/models/torch/botorch_defaults.py | dgiannelli/Ax | c2d17186c433ad6de542b10d4049956cb6ca0823 | [
"MIT"
] | null | null | null | ax/models/torch/botorch_defaults.py | dgiannelli/Ax | c2d17186c433ad6de542b10d4049956cb6ca0823 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
from typing import Any, Callable, Dict, List, Optional, Tuple
import torch
from botorch.acquisition.acquisition import AcquisitionFunction
from botorch.acquisition.objective import ConstrainedMCObjective, LinearMCObjective
from botorch.acquisition.utils import get_acquisition_function, get_infeasible_cost
from botorch.fit import fit_gpytorch_model
from botorch.models.gp_regression import FixedNoiseGP
from botorch.models.gpytorch import GPyTorchModel
from botorch.models.model import Model
from botorch.models.model_list_gp_regression import ModelListGP
from botorch.models.multitask import FixedNoiseMultiTaskGP
from botorch.optim.optimize import joint_optimize, sequential_optimize
from botorch.utils import (
get_objective_weights_transform,
get_outcome_constraint_transforms,
)
from gpytorch.mlls.exact_marginal_log_likelihood import ExactMarginalLogLikelihood
from gpytorch.mlls.sum_marginal_log_likelihood import SumMarginalLogLikelihood
from torch import Tensor
MIN_OBSERVED_NOISE_LEVEL = 1e-7
def get_and_fit_model(
Xs: List[Tensor],
Ys: List[Tensor],
Yvars: List[Tensor],
task_features: List[int],
state_dict: Optional[Dict[str, Tensor]] = None,
**kwargs: Any,
) -> GPyTorchModel:
r"""Instantiates and fits a botorch ModelListGP using the given data.
Args:
Xs: List of X data, one tensor per outcome
Ys: List of Y data, one tensor per outcome
Yvars: List of observed variance of Ys.
task_features: List of columns of X that are tasks.
state_dict: If provided, will set model parameters to this state
dictionary. Otherwise, will fit the model.
Returns:
A fitted ModelListGP.
"""
model = None
if len(task_features) > 1:
raise ValueError(
f"This model only supports 1 task feature (got {task_features})"
)
elif len(task_features) == 1:
task_feature = task_features[0]
else:
task_feature = None
if task_feature is None:
if len(Xs) == 1:
# Use single output, single task GP
model = _get_model(
X=Xs[0], Y=Ys[0], Yvar=Yvars[0], task_feature=task_feature
)
elif all(torch.equal(Xs[0], X) for X in Xs[1:]):
# Use batched multioutput, single task GP
Y = torch.cat(Ys, dim=-1)
Yvar = torch.cat(Yvars, dim=-1)
model = _get_model(X=Xs[0], Y=Y, Yvar=Yvar, task_feature=task_feature)
if model is None:
# Use model list
models = [
_get_model(X=X, Y=Y, Yvar=Yvar, task_feature=task_feature)
for X, Y, Yvar in zip(Xs, Ys, Yvars)
]
model = ModelListGP(*models)
model.to(dtype=Xs[0].dtype, device=Xs[0].device) # pyre-ignore
if state_dict is None:
# TODO: Add bounds for optimization stability - requires revamp upstream
bounds = {}
if isinstance(model, ModelListGP):
mll = SumMarginalLogLikelihood(model.likelihood, model)
else:
# pyre-ignore: [16]
mll = ExactMarginalLogLikelihood(model.likelihood, model)
mll = fit_gpytorch_model(mll, bounds=bounds)
else:
model.load_state_dict(state_dict)
return model
def predict_from_model(model: Model, X: Tensor) -> Tuple[Tensor, Tensor]:
r"""Predicts outcomes given a model and input tensor.
Args:
model: A botorch Model.
X: A `n x d` tensor of input parameters.
Returns:
Tensor: The predicted posterior mean as an `n x o`-dim tensor.
Tensor: The predicted posterior covariance as a `n x o x o`-dim tensor.
"""
with torch.no_grad():
posterior = model.posterior(X)
mean = posterior.mean.cpu().detach()
# TODO: Allow Posterior to (optionally) return the full covariance matrix
variance = posterior.variance.cpu().detach()
cov = variance.unsqueeze(-1) * torch.eye(variance.shape[-1], dtype=variance.dtype)
return mean, cov
def get_NEI(
model: Model,
objective_weights: Tensor,
outcome_constraints: Optional[Tuple[Tensor, Tensor]] = None,
X_observed: Optional[Tensor] = None,
X_pending: Optional[Tensor] = None,
**kwargs: Any,
) -> AcquisitionFunction:
r"""Instantiates a qNoisyExpectedImprovement acquisition function.
Args:
objective_weights: The objective is to maximize a weighted sum of
the columns of f(x). These are the weights.
outcome_constraints: A tuple of (A, b). For k outcome constraints
and m outputs at f(x), A is (k x m) and b is (k x 1) such that
A f(x) <= b. (Not used by single task models)
X_observed: A tensor containing points observed for all objective
outcomes and outcomes that appear in the outcome constraints (if
there are any).
X_pending: A tensor containing points whose evaluation is pending (i.e.
that have been submitted for evaluation) present for all objective
outcomes and outcomes that appear in the outcome constraints (if
there are any).
mc_samples: The number of MC samples to use (default: 500).
qmc: If True, use qMC instead of MC (default: True).
Returns:
qNoisyExpectedImprovement: The instantiated acquisition function.
"""
if X_observed is None:
raise ValueError("There are no feasible observed points.")
# construct Objective module
if outcome_constraints is None:
objective = LinearMCObjective(weights=objective_weights)
else:
obj_tf = get_objective_weights_transform(objective_weights)
con_tfs = get_outcome_constraint_transforms(outcome_constraints)
X_observed = torch.as_tensor(X_observed)
inf_cost = get_infeasible_cost(X=X_observed, model=model, objective=obj_tf)
objective = ConstrainedMCObjective(
objective=obj_tf, constraints=con_tfs or [], infeasible_cost=inf_cost
)
return get_acquisition_function(
acquisition_function_name="qNEI",
model=model,
objective=objective,
X_observed=X_observed,
X_pending=X_pending,
mc_samples=kwargs.get("mc_samples", 500),
qmc=kwargs.get("qmc", True),
seed=torch.randint(1, 10000, (1,)).item(),
)
def scipy_optimizer(
acq_function: AcquisitionFunction,
bounds: Tensor,
n: int,
inequality_constraints: Optional[List[Tuple[Tensor, Tensor, float]]] = None,
fixed_features: Optional[Dict[int, float]] = None,
rounding_func: Optional[Callable[[Tensor], Tensor]] = None,
**kwargs: Any,
) -> Tensor:
r"""Optimizer using scipy's minimize module on a numpy-adpator.
Args:
acq_function: A botorch AcquisitionFunction.
bounds: A `2 x d`-dim tensor, where `bounds[0]` (`bounds[1]`) are the
lower (upper) bounds of the feasible hyperrectangle.
n: The number of candidates to generate.
inequality constraints: A list of tuples (indices, coefficients, rhs),
with each tuple encoding an inequality constraint of the form
`\sum_i (X[indices[i]] * coefficients[i]) >= rhs`
fixed_features: A map {feature_index: value} for features that should
be fixed to a particular value during generation.
rounding_func: A function that rounds an optimization result
appropriately (i.e., according to `round-trip` transformations).
Returns:
Tensor: A `n x d`-dim tensor of generated candidates.
"""
num_restarts: int = kwargs.get("num_restarts", 20)
raw_samples: int = kwargs.get("num_raw_samples", 50 * num_restarts)
if kwargs.get("joint_optimization", False):
optimize = joint_optimize
else:
optimize = sequential_optimize
return optimize(
acq_function=acq_function,
bounds=bounds,
q=n,
num_restarts=num_restarts,
raw_samples=raw_samples,
options=kwargs,
inequality_constraints=inequality_constraints,
fixed_features=fixed_features,
post_processing_func=rounding_func,
)
def _get_model(
X: Tensor, Y: Tensor, Yvar: Tensor, task_feature: Optional[int]
) -> GPyTorchModel:
"""Instantiate a model of type depending on the input data."""
Yvar = Yvar.clamp_min_(MIN_OBSERVED_NOISE_LEVEL) # pyre-ignore: [16]
if task_feature is None:
gp = FixedNoiseGP(train_X=X, train_Y=Y, train_Yvar=Yvar)
else:
gp = FixedNoiseMultiTaskGP(
train_X=X,
train_Y=Y.view(-1),
train_Yvar=Yvar.view(-1),
task_feature=task_feature,
)
return gp
| 38.669604 | 86 | 0.671793 |
4a1f0ed64e7d1fe433a743fbaed382e7834238d7 | 4,002 | py | Python | tests/cli/commands/test_pool_command.py | rayalex/airflow | 4a344f13d26ecbb627bb9968895b290bfd86e4da | [
"Apache-2.0"
] | null | null | null | tests/cli/commands/test_pool_command.py | rayalex/airflow | 4a344f13d26ecbb627bb9968895b290bfd86e4da | [
"Apache-2.0"
] | null | null | null | tests/cli/commands/test_pool_command.py | rayalex/airflow | 4a344f13d26ecbb627bb9968895b290bfd86e4da | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
import json
import os
import unittest
from airflow import models, settings
from airflow.bin import cli
from airflow.cli.commands import pool_command
from airflow.models import Pool
from airflow.settings import Session
from airflow.utils.db import add_default_pool_if_not_exists
class TestCliPools(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.dagbag = models.DagBag(include_examples=True)
cls.parser = cli.CLIFactory.get_parser()
def setUp(self):
super().setUp()
settings.configure_orm()
self.session = Session
self._cleanup()
def tearDown(self):
self._cleanup()
@staticmethod
def _cleanup(session=None):
if session is None:
session = Session()
session.query(Pool).filter(Pool.pool != Pool.DEFAULT_POOL_NAME).delete()
session.commit()
add_default_pool_if_not_exists()
session.close()
def test_pool_list(self):
pool_command.pool_set(self.parser.parse_args(['pools', 'set', 'foo', '1', 'test']))
with self.assertLogs(level='INFO') as cm:
pool_command.pool_list(self.parser.parse_args(['pools', 'list']))
stdout = cm.output
self.assertIn('foo', stdout[0])
def test_pool_list_with_args(self):
pool_command.pool_list(self.parser.parse_args(['pools', 'list', '--output', 'tsv']))
def test_pool_create(self):
pool_command.pool_set(self.parser.parse_args(['pools', 'set', 'foo', '1', 'test']))
self.assertEqual(self.session.query(Pool).count(), 2)
def test_pool_get(self):
pool_command.pool_set(self.parser.parse_args(['pools', 'set', 'foo', '1', 'test']))
pool_command.pool_get(self.parser.parse_args(['pools', 'get', 'foo']))
def test_pool_delete(self):
pool_command.pool_set(self.parser.parse_args(['pools', 'set', 'foo', '1', 'test']))
pool_command.pool_delete(self.parser.parse_args(['pools', 'delete', 'foo']))
self.assertEqual(self.session.query(Pool).count(), 1)
def test_pool_import_export(self):
# Create two pools first
pool_config_input = {
"foo": {
"description": "foo_test",
"slots": 1
},
'default_pool': {
'description': 'Default pool',
'slots': 128
},
"baz": {
"description": "baz_test",
"slots": 2
}
}
with open('pools_import.json', mode='w') as file:
json.dump(pool_config_input, file)
# Import json
pool_command.pool_import(self.parser.parse_args(['pools', 'import', 'pools_import.json']))
# Export json
pool_command.pool_export(self.parser.parse_args(['pools', 'export', 'pools_export.json']))
with open('pools_export.json', mode='r') as file:
pool_config_output = json.load(file)
self.assertEqual(
pool_config_input,
pool_config_output,
"Input and output pool files are not same")
os.remove('pools_import.json')
os.remove('pools_export.json')
| 35.105263 | 98 | 0.638681 |
4a1f0f90dee5b8c56d87a54275ef341fc20cda65 | 4,170 | py | Python | Tools/LibEnsemble/warpx_simf.py | hklion/WarpX | 3c2d0ee2815ab1df21b9f78d899fe7b1a9651758 | [
"BSD-3-Clause-LBNL"
] | 1 | 2021-06-23T23:38:50.000Z | 2021-06-23T23:38:50.000Z | Tools/LibEnsemble/warpx_simf.py | hklion/WarpX | 3c2d0ee2815ab1df21b9f78d899fe7b1a9651758 | [
"BSD-3-Clause-LBNL"
] | 106 | 2021-06-08T23:57:54.000Z | 2022-03-08T00:36:46.000Z | Tools/LibEnsemble/warpx_simf.py | hklion/WarpX | 3c2d0ee2815ab1df21b9f78d899fe7b1a9651758 | [
"BSD-3-Clause-LBNL"
] | 1 | 2021-06-21T18:50:43.000Z | 2021-06-21T18:50:43.000Z | import os
import time
from libensemble.executors.executor import Executor
from libensemble.message_numbers import TASK_FAILED, WORKER_DONE
import numpy as np
from read_sim_output import read_sim_output
from write_sim_input import write_sim_input
"""
This file is part of the suite of scripts to use LibEnsemble on top of WarpX
simulations. It defines a sim_f function that takes LibEnsemble history and
input parameters, run a WarpX simulation and returns 'f'.
"""
def run_warpx(H, persis_info, sim_specs, libE_info):
"""
This function runs a WarpX simulation and returns quantity 'f' as well as
other physical quantities measured in the run for convenience. Status check
is done periodically on the simulation, provided by LibEnsemble.
"""
# Setting up variables needed for input and output
# keys = variable names
# x = variable values
# libE_output = what will be returned to libE
calc_status = 0 # Returns to worker
input_file = sim_specs['user']['input_filename']
time_limit = sim_specs['user']['sim_kill_minutes'] * 60.0
machine_specs = sim_specs['user']['machine_specs']
exctr = Executor.executor # Get Executor
# Modify WarpX input file with input parameters calculated by gen_f
# and passed to this sim_f.
write_sim_input(input_file, H['x'])
# Passed to command line in addition to the executable.
# Here, only input file
app_args = input_file
os.environ["OMP_NUM_THREADS"] = machine_specs['OMP_NUM_THREADS']
# Launch the executor to actually run the WarpX simulation
if machine_specs['name'] == 'summit':
task = exctr.submit(calc_type='sim',
extra_args=machine_specs['extra_args'],
app_args=app_args,
stdout='out.txt',
stderr='err.txt',
wait_on_run=True)
else:
task = exctr.submit(calc_type='sim',
num_procs=machine_specs['cores'],
app_args=app_args,
stdout='out.txt',
stderr='err.txt',
wait_on_run=True)
# Periodically check the status of the simulation
poll_interval = 1 # secs
while(not task.finished):
time.sleep(poll_interval)
task.poll()
if task.runtime > time_limit:
task.kill() # Timeout
# Set calc_status with optional prints.
if task.finished:
if task.state == 'FINISHED':
calc_status = WORKER_DONE
elif task.state == 'FAILED':
print("Warning: Task {} failed: Error code {}"
.format(task.name, task.errcode))
calc_status = TASK_FAILED
elif task.state == 'USER_KILLED':
print("Warning: Task {} has been killed"
.format(task.name))
else:
print("Warning: Task {} in unknown state {}. Error code {}"
.format(task.name, task.state, task.errcode))
# Safety
time.sleep(0.2)
# Get output from a run and delete output files
warpx_out = read_sim_output(task.workdir)
# Excluding results - NaN - from runs where beam was lost
if (warpx_out[0] != warpx_out[0]):
print(task.workdir, ' output led to NaN values (beam was lost or run did not finish)')
# Pass the sim output values to LibEnsemble.
# When optimization is ON, 'f' is then passed to the generating function
# gen_f to generate new inputs for next runs.
# All other parameters are here just for convenience.
libE_output = np.zeros(1, dtype=sim_specs['out'])
libE_output['f'] = warpx_out[0]
libE_output['energy_std'] = warpx_out[1]
libE_output['energy_avg'] = warpx_out[2]
libE_output['charge'] = warpx_out[3]
libE_output['emittance'] = warpx_out[4]
libE_output['ramp_down_1'] = H['x'][0][0]
libE_output['ramp_down_2'] = H['x'][0][1]
libE_output['zlens_1'] = H['x'][0][2]
libE_output['adjust_factor'] = H['x'][0][3]
return libE_output, persis_info, calc_status
| 37.567568 | 94 | 0.62494 |
4a1f107125e6fa3d8aac39a95037c8918313c280 | 1,604 | py | Python | app/controllers/base_controller.py | jattoabdul/andela-genie-backend | 62f49a3783a3dbf21f92449e83ca5c941053a690 | [
"MIT"
] | null | null | null | app/controllers/base_controller.py | jattoabdul/andela-genie-backend | 62f49a3783a3dbf21f92449e83ca5c941053a690 | [
"MIT"
] | null | null | null | app/controllers/base_controller.py | jattoabdul/andela-genie-backend | 62f49a3783a3dbf21f92449e83ca5c941053a690 | [
"MIT"
] | null | null | null | from app.utils.auth import Auth
from flask import jsonify, make_response
class BaseController:
def __init__(self, request):
self.request = request
def user(self, *keys):
return Auth.user(*keys)
def request_params(self, *keys):
_json = self.get_json()
if keys:
values = list()
for key in keys:
values.append(_json[key]) if key in _json else values.append(None)
return values
return _json
def get_params(self, *keys):
values = list()
for key in keys:
values.append(self.request.args.get(key))
return values
def post_params(self, *keys):
values = list()
for key in keys:
values.append(self.request.data.get(key))
return values
def get_json(self):
return self.request.get_json()
def handle_response(self, msg='OK', payload=None, status_code=200, slack_response=None):
# If there is no specific slack formatted response, default to WEB API Response format
if slack_response is None:
data = {'msg': msg}
if payload is not None:
data['payload'] = payload
else:
data = slack_response
response = jsonify(data)
response.status_code = status_code
return response
def missing_required(self, params):
return True if None in params or '' in params else False
def missing_response(self, msg='Missing Required Parameters'):
return self.handle_response(msg=msg, status_code=400)
def prettify_response_dates(self, created_at, updated_at=None):
return {'createdAt': created_at, 'updatedAt': updated_at,
'datePrettyShort': created_at.strftime('%b %d, %Y'), 'datePretty': created_at.strftime('%B %d, %Y')} | 27.186441 | 104 | 0.711347 |
4a1f10d1371bd57584aae5c6f376eb702e0a08a5 | 19,142 | py | Python | plugins/modules/oci_database_database_facts.py | A7rMtWE57x/oci-ansible-collection | 80548243a085cd53fd5dddaa8135b5cb43612c66 | [
"Apache-2.0"
] | null | null | null | plugins/modules/oci_database_database_facts.py | A7rMtWE57x/oci-ansible-collection | 80548243a085cd53fd5dddaa8135b5cb43612c66 | [
"Apache-2.0"
] | null | null | null | plugins/modules/oci_database_database_facts.py | A7rMtWE57x/oci-ansible-collection | 80548243a085cd53fd5dddaa8135b5cb43612c66 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/python
# Copyright (c) 2017, 2020 Oracle and/or its affiliates.
# This software is made available to you under the terms of the GPL 3.0 license or the Apache 2.0 license.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
# Apache License v2.0
# See LICENSE.TXT for details.
# GENERATED FILE - DO NOT EDIT - MANUAL CHANGES WILL BE OVERWRITTEN
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {
"metadata_version": "1.1",
"status": ["preview"],
"supported_by": "community",
}
DOCUMENTATION = """
---
module: oci_database_database_facts
short_description: Fetches details about one or multiple Database resources in Oracle Cloud Infrastructure
description:
- Fetches details about one or multiple Database resources in Oracle Cloud Infrastructure
- Gets a list of the databases in the specified Database Home.
- If I(database_id) is specified, the details of a single Database will be returned.
version_added: "2.9"
author: Oracle (@oracle)
options:
database_id:
description:
- The database L(OCID,https://docs.cloud.oracle.com/Content/General/Concepts/identifiers.htm).
- Required to get a specific database.
type: str
aliases: ["id"]
compartment_id:
description:
- The compartment L(OCID,https://docs.cloud.oracle.com/Content/General/Concepts/identifiers.htm).
- Required to list multiple databases.
type: str
db_home_id:
description:
- A Database Home L(OCID,https://docs.cloud.oracle.com/Content/General/Concepts/identifiers.htm).
type: str
system_id:
description:
- The L(OCID,https://docs.cloud.oracle.com/Content/General/Concepts/identifiers.htm) of the Exadata DB system that you want to filter the database
results by. Applies only to Exadata DB systems.
type: str
sort_by:
description:
- The field to sort by. You can provide one sort order (`sortOrder`). Default order for TIMECREATED is descending. Default order for DBNAME is
ascending. The DBNAME sort order is case sensitive.
type: str
choices:
- "DBNAME"
- "TIMECREATED"
sort_order:
description:
- The sort order to use, either ascending (`ASC`) or descending (`DESC`).
type: str
choices:
- "ASC"
- "DESC"
lifecycle_state:
description:
- A filter to return only resources that match the given lifecycle state exactly.
type: str
choices:
- "PROVISIONING"
- "AVAILABLE"
- "UPDATING"
- "BACKUP_IN_PROGRESS"
- "TERMINATING"
- "TERMINATED"
- "RESTORE_FAILED"
- "FAILED"
db_name:
description:
- A filter to return only resources that match the entire database name given. The match is not case sensitive.
type: str
extends_documentation_fragment: [ oracle.oci.oracle ]
"""
EXAMPLES = """
- name: List databases
oci_database_database_facts:
compartment_id: ocid1.compartment.oc1..xxxxxxEXAMPLExxxxxx
- name: Get a specific database
oci_database_database_facts:
database_id: ocid1.database.oc1..xxxxxxEXAMPLExxxxxx
"""
RETURN = """
databases:
description:
- List of Database resources
returned: on success
type: complex
contains:
id:
description:
- The L(OCID,https://docs.cloud.oracle.com/Content/General/Concepts/identifiers.htm) of the database.
returned: on success
type: string
sample: ocid1.resource.oc1..xxxxxxEXAMPLExxxxxx
compartment_id:
description:
- The L(OCID,https://docs.cloud.oracle.com/Content/General/Concepts/identifiers.htm) of the compartment.
returned: on success
type: string
sample: ocid1.compartment.oc1..xxxxxxEXAMPLExxxxxx
character_set:
description:
- The character set for the database.
returned: on success
type: string
sample: character_set_example
ncharacter_set:
description:
- The national character set for the database.
returned: on success
type: string
sample: ncharacter_set_example
db_home_id:
description:
- The L(OCID,https://docs.cloud.oracle.com/Content/General/Concepts/identifiers.htm) of the Database Home.
returned: on success
type: string
sample: ocid1.dbhome.oc1..xxxxxxEXAMPLExxxxxx
db_system_id:
description:
- The L(OCID,https://docs.cloud.oracle.com/Content/General/Concepts/identifiers.htm) of the DB system.
returned: on success
type: string
sample: ocid1.dbsystem.oc1..xxxxxxEXAMPLExxxxxx
vm_cluster_id:
description:
- The L(OCID,https://docs.cloud.oracle.com/Content/General/Concepts/identifiers.htm) of the VM cluster.
returned: on success
type: string
sample: ocid1.vmcluster.oc1..xxxxxxEXAMPLExxxxxx
db_name:
description:
- The database name.
returned: on success
type: string
sample: db_name_example
pdb_name:
description:
- The name of the pluggable database. The name must begin with an alphabetic character and can contain a maximum of eight alphanumeric
characters. Special characters are not permitted. Pluggable database should not be same as database name.
returned: on success
type: string
sample: pdb_name_example
db_workload:
description:
- The database workload type.
returned: on success
type: string
sample: db_workload_example
db_unique_name:
description:
- A system-generated name for the database to ensure uniqueness within an Oracle Data Guard group (a primary database and its standby
databases). The unique name cannot be changed.
returned: on success
type: string
sample: db_unique_name_example
lifecycle_details:
description:
- Additional information about the current lifecycleState.
returned: on success
type: string
sample: lifecycle_details_example
lifecycle_state:
description:
- The current state of the database.
returned: on success
type: string
sample: PROVISIONING
time_created:
description:
- The date and time the database was created.
returned: on success
type: string
sample: 2013-10-20T19:20:30+01:00
last_backup_timestamp:
description:
- The date and time when the latest database backup was created.
returned: on success
type: string
sample: 2013-10-20T19:20:30+01:00
db_backup_config:
description:
- ""
returned: on success
type: complex
contains:
auto_backup_enabled:
description:
- If set to true, configures automatic backups. If you previously used RMAN or dbcli to configure backups and then you switch to using
the Console or the API for backups, a new backup configuration is created and associated with your database. This means that you can
no longer rely on your previously configured unmanaged backups to work.
returned: on success
type: bool
sample: true
recovery_window_in_days:
description:
- Number of days between the current and the earliest point of recoverability covered by automatic backups.
This value applies to automatic backups only. After a new automatic backup has been created, Oracle removes old automatic backups that
are created before the window.
When the value is updated, it is applied to all existing automatic backups.
returned: on success
type: int
sample: 56
auto_backup_window:
description:
- Time window selected for initiating automatic backup for the database system. There are twelve available two-hour time windows. If no
option is selected, a start time between 12:00 AM to 7:00 AM in the region of the database is automatically chosen. For example, if
the user selects SLOT_TWO from the enum list, the automatic backup job will start in between 2:00 AM (inclusive) to 4:00 AM
(exclusive).
- "Example: `SLOT_TWO`"
returned: on success
type: string
sample: SLOT_TWO
backup_destination_details:
description:
- Backup destination details.
returned: on success
type: complex
contains:
type:
description:
- Type of the database backup destination.
returned: on success
type: string
sample: NFS
id:
description:
- The L(OCID,https://docs.cloud.oracle.com/Content/General/Concepts/identifiers.htm) of the backup destination.
returned: on success
type: string
sample: ocid1.resource.oc1..xxxxxxEXAMPLExxxxxx
vpc_user:
description:
- For a RECOVERY_APPLIANCE backup destination, the Virtual Private Catalog (VPC) user that is used to access the Recovery
Appliance.
returned: on success
type: string
sample: vpc_user_example
vpc_password:
description:
- For a RECOVERY_APPLIANCE backup destination, the password for the VPC user that is used to access the Recovery Appliance.
returned: on success
type: string
sample: vpc_password_example
internet_proxy:
description:
- Proxy URL to connect to object store.
returned: on success
type: string
sample: internet_proxy_example
freeform_tags:
description:
- Free-form tags for this resource. Each tag is a simple key-value pair with no predefined name, type, or namespace.
For more information, see L(Resource Tags,https://docs.cloud.oracle.com/Content/General/Concepts/resourcetags.htm).
- "Example: `{\\"Department\\": \\"Finance\\"}`"
returned: on success
type: dict
sample: {'Department': 'Finance'}
defined_tags:
description:
- Defined tags for this resource. Each key is predefined and scoped to a namespace.
For more information, see L(Resource Tags,https://docs.cloud.oracle.com/Content/General/Concepts/resourcetags.htm).
returned: on success
type: dict
sample: {'Operations': {'CostCenter': 'US'}}
connection_strings:
description:
- The Connection strings used to connect to the Oracle Database.
returned: on success
type: complex
contains:
cdb_default:
description:
- Host name based CDB Connection String.
returned: on success
type: string
sample: cdb_default_example
cdb_ip_default:
description:
- IP based CDB Connection String.
returned: on success
type: string
sample: cdb_ip_default_example
all_connection_strings:
description:
- All connection strings to use to connect to the Database.
returned: on success
type: dict
sample: {}
source_database_point_in_time_recovery_timestamp:
description:
- Point in time recovery timeStamp of the source database at which cloned database system is cloned from the source database system, as
described in L(RFC 3339,https://tools.ietf.org/rfc/rfc3339)
returned: on success
type: string
sample: 2013-10-20T19:20:30+01:00
database_software_image_id:
description:
- The database software image L(OCID,https://docs.cloud.oracle.com/Content/General/Concepts/identifiers.htm)
returned: on success
type: string
sample: ocid1.databasesoftwareimage.oc1..xxxxxxEXAMPLExxxxxx
sample: [{
"id": "ocid1.resource.oc1..xxxxxxEXAMPLExxxxxx",
"compartment_id": "ocid1.compartment.oc1..xxxxxxEXAMPLExxxxxx",
"character_set": "character_set_example",
"ncharacter_set": "ncharacter_set_example",
"db_home_id": "ocid1.dbhome.oc1..xxxxxxEXAMPLExxxxxx",
"db_system_id": "ocid1.dbsystem.oc1..xxxxxxEXAMPLExxxxxx",
"vm_cluster_id": "ocid1.vmcluster.oc1..xxxxxxEXAMPLExxxxxx",
"db_name": "db_name_example",
"pdb_name": "pdb_name_example",
"db_workload": "db_workload_example",
"db_unique_name": "db_unique_name_example",
"lifecycle_details": "lifecycle_details_example",
"lifecycle_state": "PROVISIONING",
"time_created": "2013-10-20T19:20:30+01:00",
"last_backup_timestamp": "2013-10-20T19:20:30+01:00",
"db_backup_config": {
"auto_backup_enabled": true,
"recovery_window_in_days": 56,
"auto_backup_window": "SLOT_TWO",
"backup_destination_details": [{
"type": "NFS",
"id": "ocid1.resource.oc1..xxxxxxEXAMPLExxxxxx",
"vpc_user": "vpc_user_example",
"vpc_password": "vpc_password_example",
"internet_proxy": "internet_proxy_example"
}]
},
"freeform_tags": {'Department': 'Finance'},
"defined_tags": {'Operations': {'CostCenter': 'US'}},
"connection_strings": {
"cdb_default": "cdb_default_example",
"cdb_ip_default": "cdb_ip_default_example",
"all_connection_strings": {}
},
"source_database_point_in_time_recovery_timestamp": "2013-10-20T19:20:30+01:00",
"database_software_image_id": "ocid1.databasesoftwareimage.oc1..xxxxxxEXAMPLExxxxxx"
}]
"""
from ansible.module_utils.basic import AnsibleModule
from ansible_collections.oracle.oci.plugins.module_utils import oci_common_utils
from ansible_collections.oracle.oci.plugins.module_utils.oci_resource_utils import (
OCIResourceFactsHelperBase,
get_custom_class,
)
try:
from oci.database import DatabaseClient
HAS_OCI_PY_SDK = True
except ImportError:
HAS_OCI_PY_SDK = False
class DatabaseFactsHelperGen(OCIResourceFactsHelperBase):
"""Supported operations: get, list"""
def get_required_params_for_get(self):
return [
"database_id",
]
def get_required_params_for_list(self):
return [
"compartment_id",
]
def get_resource(self):
return oci_common_utils.call_with_backoff(
self.client.get_database, database_id=self.module.params.get("database_id"),
)
def list_resources(self):
optional_list_method_params = [
"db_home_id",
"system_id",
"sort_by",
"sort_order",
"lifecycle_state",
"db_name",
]
optional_kwargs = dict(
(param, self.module.params[param])
for param in optional_list_method_params
if self.module.params.get(param) is not None
)
return oci_common_utils.list_all_resources(
self.client.list_databases,
compartment_id=self.module.params.get("compartment_id"),
**optional_kwargs
)
DatabaseFactsHelperCustom = get_custom_class("DatabaseFactsHelperCustom")
class ResourceFactsHelper(DatabaseFactsHelperCustom, DatabaseFactsHelperGen):
pass
def main():
module_args = oci_common_utils.get_common_arg_spec()
module_args.update(
dict(
database_id=dict(aliases=["id"], type="str"),
compartment_id=dict(type="str"),
db_home_id=dict(type="str"),
system_id=dict(type="str"),
sort_by=dict(type="str", choices=["DBNAME", "TIMECREATED"]),
sort_order=dict(type="str", choices=["ASC", "DESC"]),
lifecycle_state=dict(
type="str",
choices=[
"PROVISIONING",
"AVAILABLE",
"UPDATING",
"BACKUP_IN_PROGRESS",
"TERMINATING",
"TERMINATED",
"RESTORE_FAILED",
"FAILED",
],
),
db_name=dict(type="str"),
)
)
module = AnsibleModule(argument_spec=module_args)
if not HAS_OCI_PY_SDK:
module.fail_json(msg="oci python sdk required for this module.")
resource_facts_helper = ResourceFactsHelper(
module=module,
resource_type="database",
service_client_class=DatabaseClient,
namespace="database",
)
result = []
if resource_facts_helper.is_get():
result = [resource_facts_helper.get()]
elif resource_facts_helper.is_list():
result = resource_facts_helper.list()
else:
resource_facts_helper.fail()
module.exit_json(databases=result)
if __name__ == "__main__":
main()
| 40.555085 | 160 | 0.576586 |
4a1f11162e1ed3a2a8ac54dbc3338cf4e1216d1d | 82,737 | py | Python | cinder/tests/unit/test_hplefthand.py | potsmaster/cinder | 275c2acdfb4282b0ec0314c9875b759958c093f8 | [
"Apache-2.0"
] | null | null | null | cinder/tests/unit/test_hplefthand.py | potsmaster/cinder | 275c2acdfb4282b0ec0314c9875b759958c093f8 | [
"Apache-2.0"
] | null | null | null | cinder/tests/unit/test_hplefthand.py | potsmaster/cinder | 275c2acdfb4282b0ec0314c9875b759958c093f8 | [
"Apache-2.0"
] | null | null | null | # (c) Copyright 2014 Hewlett-Packard Development Company, L.P.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
"""Unit tests for OpenStack Cinder volume drivers."""
import mock
from oslo_utils import units
import six
from cinder import context
from cinder import exception
from cinder import test
from cinder.tests.unit import fake_hp_lefthand_client as hplefthandclient
from cinder.volume.drivers.san.hp import hp_lefthand_iscsi
from cinder.volume.drivers.san.hp import hp_lefthand_rest_proxy
from cinder.volume import volume_types
hpexceptions = hplefthandclient.hpexceptions
GOODNESS_FUNCTION = \
"capabilities.capacity_utilization < 0.6? 100 : 25"
FILTER_FUNCTION = \
"capabilities.total_volumes < 400 && capabilities.capacity_utilization"
class HPLeftHandBaseDriver(object):
cluster_id = 1
volume_name = "fakevolume"
volume_id = 1
volume = {
'name': volume_name,
'display_name': 'Foo Volume',
'provider_location': ('10.0.1.6 iqn.2003-10.com.lefthandnetworks:'
'group01:25366:fakev 0'),
'id': volume_id,
'provider_auth': None,
'size': 1}
serverName = 'fakehost'
server_id = 0
server_uri = '/lhos/servers/0'
snapshot_name = "fakeshapshot"
snapshot_id = 3
snapshot = {
'name': snapshot_name,
'volume_name': volume_name}
cloned_volume_name = "clone_volume"
cloned_volume = {'name': cloned_volume_name}
cloned_snapshot_name = "clonedshapshot"
cloned_snapshot_id = 5
cloned_snapshot = {
'name': cloned_snapshot_name,
'volume_name': volume_name}
volume_type_id = 4
init_iqn = 'iqn.1993-08.org.debian:01:222'
volume_type = {'name': 'gold',
'deleted': False,
'updated_at': None,
'extra_specs': {'hplh:provisioning': 'thin',
'hplh:ao': 'true',
'hplh:data_pl': 'r-0'},
'deleted_at': None,
'id': 'gold'}
connector = {
'ip': '10.0.0.2',
'initiator': 'iqn.1993-08.org.debian:01:222',
'host': serverName}
driver_startup_call_stack = [
mock.call.login('foo1', 'bar2'),
mock.call.getClusterByName('CloudCluster1'),
mock.call.getCluster(1),
]
class TestHPLeftHandCLIQISCSIDriver(HPLeftHandBaseDriver, test.TestCase):
def _fake_cliq_run(self, verb, cliq_args, check_exit_code=True):
"""Return fake results for the various methods."""
def create_volume(cliq_args):
"""Create volume CLIQ input for test.
input = "createVolume description="fake description"
clusterName=Cluster01 volumeName=fakevolume
thinProvision=0 output=XML size=1GB"
"""
output = """<gauche version="1.0">
<response description="Operation succeeded."
name="CliqSuccess" processingTime="181" result="0"/>
</gauche>"""
self.assertEqual(self.volume_name, cliq_args['volumeName'])
self.assertEqual('1', cliq_args['thinProvision'])
self.assertEqual('1GB', cliq_args['size'])
return output, None
def delete_volume(cliq_args):
"""Delete volume CLIQ input for test.
input = "deleteVolume volumeName=fakevolume prompt=false
output=XML"
"""
output = """<gauche version="1.0">
<response description="Operation succeeded."
name="CliqSuccess" processingTime="164" result="0"/>
</gauche>"""
self.assertEqual(self.volume_name, cliq_args['volumeName'])
self.assertEqual('false', cliq_args['prompt'])
return output, None
def extend_volume(cliq_args):
"""Extend volume CLIQ input for test.
input = "modifyVolume description="fake description"
volumeName=fakevolume
output=XML size=2GB"
"""
output = """<gauche version="1.0">
<response description="Operation succeeded."
name="CliqSuccess" processingTime="181" result="0"/>
</gauche>"""
self.assertEqual(self.volume_name, cliq_args['volumeName'])
self.assertEqual('2GB', cliq_args['size'])
return output, None
def assign_volume(cliq_args):
"""Assign volume CLIQ input for test.
input = "assignVolumeToServer volumeName=fakevolume
serverName=fakehost
output=XML"
"""
output = """<gauche version="1.0">
<response description="Operation succeeded."
name="CliqSuccess" processingTime="174" result="0"/>
</gauche>"""
self.assertEqual(self.volume_name, cliq_args['volumeName'])
self.assertEqual(self.connector['host'],
cliq_args['serverName'])
return output, None
def unassign_volume(cliq_args):
"""Unassign volume CLIQ input for test.
input = "unassignVolumeToServer volumeName=fakevolume
serverName=fakehost output=XML
"""
output = """<gauche version="1.0">
<response description="Operation succeeded."
name="CliqSuccess" processingTime="205" result="0"/>
</gauche>"""
self.assertEqual(self.volume_name, cliq_args['volumeName'])
self.assertEqual(self.connector['host'],
cliq_args['serverName'])
return output, None
def create_snapshot(cliq_args):
"""Create snapshot CLIQ input for test.
input = "createSnapshot description="fake description"
snapshotName=fakesnapshot
volumeName=fakevolume
output=XML"
"""
output = """<gauche version="1.0">
<response description="Operation succeeded."
name="CliqSuccess" processingTime="181" result="0"/>
</gauche>"""
self.assertEqual(self.snapshot_name, cliq_args['snapshotName'])
self.assertEqual(self.volume_name, cliq_args['volumeName'])
return output, None
def delete_snapshot(cliq_args):
"""Delete shapshot CLIQ input for test.
input = "deleteSnapshot snapshotName=fakesnapshot prompt=false
output=XML"
"""
output = """<gauche version="1.0">
<response description="Operation succeeded."
name="CliqSuccess" processingTime="164" result="0"/>
</gauche>"""
self.assertEqual(self.snapshot_name, cliq_args['snapshotName'])
self.assertEqual('false', cliq_args['prompt'])
return output, None
def create_volume_from_snapshot(cliq_args):
"""Create volume from snapshot CLIQ input for test.
input = "cloneSnapshot description="fake description"
snapshotName=fakesnapshot
volumeName=fakevolume
output=XML"
"""
output = """<gauche version="1.0">
<response description="Operation succeeded."
name="CliqSuccess" processingTime="181" result="0"/>
</gauche>"""
self.assertEqual(self.snapshot_name, cliq_args['snapshotName'])
self.assertEqual(self.volume_name, cliq_args['volumeName'])
return output, None
def get_cluster_info(cliq_args):
"""Get cluster info CLIQ input for test.
input = "getClusterInfo clusterName=Cluster01 searchDepth=1
verbose=0 output=XML"
"""
output = """<gauche version="1.0">
<response description="Operation succeeded." name="CliqSuccess"
processingTime="1164" result="0">
<cluster blockSize="1024" description=""
maxVolumeSizeReplication1="622957690"
maxVolumeSizeReplication2="311480287"
minVolumeSize="262144" name="Cluster01"
pageSize="262144" spaceTotal="633697992"
storageNodeCount="2" unprovisionedSpace="622960574"
useVip="true">
<nsm ipAddress="10.0.1.7" name="111-vsa"/>
<nsm ipAddress="10.0.1.8" name="112-vsa"/>
<vip ipAddress="10.0.1.6" subnetMask="255.255.255.0"/>
</cluster></response></gauche>"""
return output, None
def get_volume_info(cliq_args):
"""Get volume info CLIQ input for test.
input = "getVolumeInfo volumeName=fakevolume output=XML"
"""
output = """<gauche version="1.0">
<response description="Operation succeeded." name="CliqSuccess"
processingTime="87" result="0">
<volume autogrowPages="4" availability="online"
blockSize="1024" bytesWritten="0" checkSum="false"
clusterName="Cluster01" created="2011-02-08T19:56:53Z"
deleting="false" description="" groupName="Group01"
initialQuota="536870912" isPrimary="true"
iscsiIqn="iqn.2003-10.com.lefthandnetworks:group01:25366:fakev"
maxSize="6865387257856" md5="9fa5c8b2cca54b2948a63d833097e1ca"
minReplication="1" name="vol-b" parity="0" replication="2"
reserveQuota="536870912" scratchQuota="4194304"
serialNumber="9fa5c8b2cca54b2948a63d8"
size="1073741824" stridePages="32" thinProvision="true">
<status description="OK" value="2"/>
<permission access="rw" authGroup="api-1"
chapName="chapusername" chapRequired="true"
id="25369" initiatorSecret="" iqn=""
iscsiEnabled="true" loadBalance="true"
targetSecret="supersecret"/>
</volume></response></gauche>"""
return output, None
def get_snapshot_info(cliq_args):
"""Get snapshot info CLIQ input for test.
input = "getSnapshotInfo snapshotName=fakesnapshot output=XML"
"""
output = """<gauche version="1.0">
<response description="Operation succeeded." name="CliqSuccess"
processingTime="87" result="0">
<snapshot applicationManaged="false" autogrowPages="32768"
automatic="false" availability="online" bytesWritten="0"
clusterName="CloudCluster1" created="2013-08-26T07:03:44Z"
deleting="false" description="" groupName="CloudGroup1"
id="730" initialQuota="536870912" isPrimary="true"
iscsiIqn="iqn.2003-10.com.lefthandnetworks:cloudgroup1:73"
md5="a64b4f850539c07fb5ce3cee5db1fcce" minReplication="1"
name="snapshot-7849288e-e5e8-42cb-9687-9af5355d674b"
replication="2" reserveQuota="536870912" scheduleId="0"
scratchQuota="4194304" scratchWritten="0"
serialNumber="a64b4f850539c07fb5ce3cee5db1fcce"
size="2147483648" stridePages="32"
volumeSerial="a64b4f850539c07fb5ce3cee5db1fcce">
<status description="OK" value="2"/>
<permission access="rw"
authGroup="api-34281B815713B78-(trimmed)51ADD4B7030853AA7"
chapName="chapusername" chapRequired="true" id="25369"
initiatorSecret="" iqn="" iscsiEnabled="true"
loadBalance="true" targetSecret="supersecret"/>
</snapshot></response></gauche>"""
return output, None
def get_server_info(cliq_args):
"""Get server info CLIQ input for test.
input = "getServerInfo serverName=fakeName"
"""
output = """<gauche version="1.0"><response result="0"/>
</gauche>"""
return output, None
def create_server(cliq_args):
"""Create server CLIQ input for test.
input = "createServer serverName=fakeName initiator=something"
"""
output = """<gauche version="1.0"><response result="0"/>
</gauche>"""
return output, None
def test_error(cliq_args):
output = """<gauche version="1.0">
<response description="Volume '134234' not found."
name="CliqVolumeNotFound" processingTime="1083"
result="8000100c"/>
</gauche>"""
return output, None
def test_paramiko_1_13_0(cliq_args):
# paramiko 1.13.0 now returns unicode
output = six.text_type(
'<?xml version="1.0" encoding="UTF-8" standalone="no" ?>\n'
'<gauche version="1.0">\n\n <response description="Operation'
' succeeded." name="CliqSuccess" processingTime="423" '
'result="0">\n <cluster adaptiveOptimization="false" '
'blockSize="1024" description="" maxVolumeSizeReplication1='
'"114594676736" minVolumeSize="262144" name="clusterdemo" '
'pageSize="262144" spaceTotal="118889644032" storageNodeCount='
'"1" unprovisionedSpace="114594676736" useVip="true">\n'
' <nsm ipAddress="10.10.29.102" name="lefdemo1"/>\n'
' <vip ipAddress="10.10.22.87" subnetMask='
'"255.255.224.0"/>\n </cluster>\n </response>\n\n'
'</gauche>\n ')
return output, None
def test_paramiko_1_10_0(cliq_args):
# paramiko 1.10.0 returns python default encoding.
output = (
'<?xml version="1.0" encoding="UTF-8" standalone="no" ?>\n'
'<gauche version="1.0">\n\n <response description="Operation'
' succeeded." name="CliqSuccess" processingTime="423" '
'result="0">\n <cluster adaptiveOptimization="false" '
'blockSize="1024" description="" maxVolumeSizeReplication1='
'"114594676736" minVolumeSize="262144" name="clusterdemo" '
'pageSize="262144" spaceTotal="118889644032" storageNodeCount='
'"1" unprovisionedSpace="114594676736" useVip="true">\n'
' <nsm ipAddress="10.10.29.102" name="lefdemo1"/>\n'
' <vip ipAddress="10.10.22.87" subnetMask='
'"255.255.224.0"/>\n </cluster>\n </response>\n\n'
'</gauche>\n ')
return output, None
self.assertEqual('XML', cliq_args['output'])
try:
verbs = {'createVolume': create_volume,
'deleteVolume': delete_volume,
'modifyVolume': extend_volume,
'assignVolumeToServer': assign_volume,
'unassignVolumeToServer': unassign_volume,
'createSnapshot': create_snapshot,
'deleteSnapshot': delete_snapshot,
'cloneSnapshot': create_volume_from_snapshot,
'getClusterInfo': get_cluster_info,
'getVolumeInfo': get_volume_info,
'getSnapshotInfo': get_snapshot_info,
'getServerInfo': get_server_info,
'createServer': create_server,
'testError': test_error,
'testParamiko_1.10.1': test_paramiko_1_10_0,
'testParamiko_1.13.1': test_paramiko_1_13_0}
except KeyError:
raise NotImplementedError()
return verbs[verb](cliq_args)
def setUp(self):
super(TestHPLeftHandCLIQISCSIDriver, self).setUp()
self.properties = {
'target_discoverd': True,
'target_portal': '10.0.1.6:3260',
'target_iqn':
'iqn.2003-10.com.lefthandnetworks:group01:25366:fakev',
'volume_id': self.volume_id}
def default_mock_conf(self):
mock_conf = mock.Mock()
mock_conf.san_ip = '10.10.10.10'
mock_conf.san_login = 'foo'
mock_conf.san_password = 'bar'
mock_conf.san_ssh_port = 16022
mock_conf.san_clustername = 'CloudCluster1'
mock_conf.hplefthand_api_url = None
return mock_conf
def setup_driver(self, config=None):
if config is None:
config = self.default_mock_conf()
self.driver = hp_lefthand_iscsi.HPLeftHandISCSIDriver(
configuration=config)
self.driver.do_setup(None)
self.driver.proxy._cliq_run = mock.Mock(
side_effect=self._fake_cliq_run)
return self.driver.proxy._cliq_run
def test_create_volume(self):
# set up driver with default config
mock_cliq_run = self.setup_driver()
volume = {'name': self.volume_name, 'size': 1}
model_update = self.driver.create_volume(volume)
expected_iqn = "iqn.2003-10.com.lefthandnetworks:group01:25366:fakev 0"
expected_location = "10.0.1.6:3260,1 %s" % expected_iqn
self.assertEqual(expected_location, model_update['provider_location'])
expected = [
mock.call(
'createVolume', {
'clusterName': 'CloudCluster1',
'volumeName': 'fakevolume',
'thinProvision': '1',
'output': 'XML',
'size': '1GB'},
True),
mock.call(
'getVolumeInfo', {
'volumeName': 'fakevolume',
'output': 'XML'},
True),
mock.call(
'getClusterInfo', {
'clusterName': 'Cluster01',
'searchDepth': '1',
'verbose': '0',
'output': 'XML'},
True)]
# validate call chain
mock_cliq_run.assert_has_calls(expected)
def test_delete_volume(self):
# set up driver with default config
mock_cliq_run = self.setup_driver()
volume = {'name': self.volume_name}
self.driver.delete_volume(volume)
expected = [
mock.call(
'getVolumeInfo', {
'volumeName': 'fakevolume',
'output': 'XML'},
True),
mock.call(
'deleteVolume', {
'volumeName': 'fakevolume',
'prompt': 'false',
'output': 'XML'},
True)]
# validate call chain
mock_cliq_run.assert_has_calls(expected)
def test_extend_volume(self):
# set up driver with default config
mock_cliq_run = self.setup_driver()
volume = {'name': self.volume_name}
self.driver.extend_volume(volume, 2)
expected = [
mock.call(
'modifyVolume', {
'volumeName': 'fakevolume',
'output': 'XML',
'size': '2GB'},
True)]
# validate call chain
mock_cliq_run.assert_has_calls(expected)
def test_initialize_connection(self):
# set up driver with default config
mock_cliq_run = self.setup_driver()
self.driver.proxy._get_iscsi_properties = mock.Mock(
return_value=self.properties)
volume = {'name': self.volume_name}
result = self.driver.initialize_connection(volume,
self.connector)
self.assertEqual('iscsi', result['driver_volume_type'])
self.assertDictMatch(result['data'], self.properties)
expected = [
mock.call(
'getServerInfo', {
'output': 'XML',
'serverName': 'fakehost'},
False),
mock.call(
'assignVolumeToServer', {
'volumeName': 'fakevolume',
'serverName': 'fakehost',
'output': 'XML'},
True)]
# validate call chain
mock_cliq_run.assert_has_calls(expected)
def test_terminate_connection(self):
# set up driver with default config
mock_cliq_run = self.setup_driver()
volume = {'name': self.volume_name}
self.driver.terminate_connection(volume, self.connector)
expected = [
mock.call(
'unassignVolumeToServer', {
'volumeName': 'fakevolume',
'serverName': 'fakehost',
'output': 'XML'},
True)]
# validate call chain
mock_cliq_run.assert_has_calls(expected)
def test_create_snapshot(self):
# set up driver with default config
mock_cliq_run = self.setup_driver()
snapshot = {'name': self.snapshot_name,
'volume_name': self.volume_name}
self.driver.create_snapshot(snapshot)
expected = [
mock.call(
'createSnapshot', {
'snapshotName': 'fakeshapshot',
'output': 'XML',
'inheritAccess': 1,
'volumeName': 'fakevolume'},
True)]
# validate call chain
mock_cliq_run.assert_has_calls(expected)
def test_delete_snapshot(self):
# set up driver with default config
mock_cliq_run = self.setup_driver()
snapshot = {'name': self.snapshot_name}
self.driver.delete_snapshot(snapshot)
expected = [
mock.call(
'getSnapshotInfo', {
'snapshotName': 'fakeshapshot',
'output': 'XML'},
True),
mock.call(
'deleteSnapshot', {
'snapshotName': 'fakeshapshot',
'prompt': 'false',
'output': 'XML'},
True)]
# validate call chain
mock_cliq_run.assert_has_calls(expected)
def test_create_volume_from_snapshot(self):
# set up driver with default config
mock_cliq_run = self.setup_driver()
volume = {'name': self.volume_name}
snapshot = {'name': self.snapshot_name}
model_update = self.driver.create_volume_from_snapshot(volume,
snapshot)
expected_iqn = "iqn.2003-10.com.lefthandnetworks:group01:25366:fakev 0"
expected_location = "10.0.1.6:3260,1 %s" % expected_iqn
self.assertEqual(expected_location, model_update['provider_location'])
expected = [
mock.call(
'cloneSnapshot', {
'snapshotName': 'fakeshapshot',
'output': 'XML',
'volumeName': 'fakevolume'},
True),
mock.call(
'getVolumeInfo', {
'volumeName': 'fakevolume',
'output': 'XML'},
True),
mock.call(
'getClusterInfo', {
'clusterName': 'Cluster01',
'searchDepth': '1',
'verbose': '0',
'output': 'XML'},
True)]
# validate call chain
mock_cliq_run.assert_has_calls(expected)
def test_get_volume_stats(self):
# set up driver with default config
mock_cliq_run = self.setup_driver()
volume_stats = self.driver.get_volume_stats(True)
self.assertEqual('Hewlett-Packard', volume_stats['vendor_name'])
self.assertEqual('iSCSI', volume_stats['storage_protocol'])
expected = [
mock.call('getClusterInfo', {
'searchDepth': 1,
'clusterName': 'CloudCluster1',
'output': 'XML'}, True)]
# validate call chain
mock_cliq_run.assert_has_calls(expected)
def test_cliq_run_xml_paramiko_1_13_0(self):
# set up driver with default config
self.setup_driver()
xml = self.driver.proxy._cliq_run_xml('testParamiko_1.13.1', {})
self.assertIsNotNone(xml)
def test_cliq_run_xml_paramiko_1_10_0(self):
# set up driver with default config
self.setup_driver()
xml = self.driver.proxy._cliq_run_xml('testParamiko_1.10.1', {})
self.assertIsNotNone(xml)
class TestHPLeftHandRESTISCSIDriver(HPLeftHandBaseDriver, test.TestCase):
driver_startup_call_stack = [
mock.call.login('foo1', 'bar2'),
mock.call.getClusterByName('CloudCluster1'),
mock.call.getCluster(1),
mock.call.getVolumes(
cluster='CloudCluster1',
fields=['members[id]', 'members[clusterName]', 'members[size]']),
]
def default_mock_conf(self):
mock_conf = mock.Mock()
mock_conf.hplefthand_api_url = 'http://fake.foo:8080/lhos'
mock_conf.hplefthand_username = 'foo1'
mock_conf.hplefthand_password = 'bar2'
mock_conf.hplefthand_iscsi_chap_enabled = False
mock_conf.hplefthand_debug = False
mock_conf.hplefthand_clustername = "CloudCluster1"
mock_conf.goodness_function = GOODNESS_FUNCTION
mock_conf.filter_function = FILTER_FUNCTION
mock_conf.reserved_percentage = 25
def safe_get(attr):
try:
return mock_conf.__getattribute__(attr)
except AttributeError:
return None
mock_conf.safe_get = safe_get
return mock_conf
@mock.patch('hplefthandclient.client.HPLeftHandClient', spec=True)
def setup_driver(self, _mock_client, config=None):
if config is None:
config = self.default_mock_conf()
_mock_client.return_value.getClusterByName.return_value = {
'id': 1, 'virtualIPAddresses': [{'ipV4Address': '10.0.1.6'}]}
_mock_client.return_value.getCluster.return_value = {
'spaceTotal': units.Gi * 500,
'spaceAvailable': units.Gi * 250}
self.driver = hp_lefthand_iscsi.HPLeftHandISCSIDriver(
configuration=config)
self.driver.do_setup(None)
self.cluster_name = config.hplefthand_clustername
return _mock_client.return_value
@mock.patch('hplefthandclient.version', "1.0.0")
def test_unsupported_client_version(self):
self.assertRaises(exception.InvalidInput,
self.setup_driver)
@mock.patch('hplefthandclient.version', "3.0.0")
def test_supported_client_version(self):
self.setup_driver()
def test_create_volume(self):
# setup drive with default configuration
# and return the mock HTTP LeftHand client
mock_client = self.setup_driver()
mock_client.getVolumes.return_value = {'total': 1, 'members': []}
# mock return value of createVolume
mock_client.createVolume.return_value = {
'iscsiIqn': self.connector['initiator']}
with mock.patch.object(hp_lefthand_rest_proxy.HPLeftHandRESTProxy,
'_create_client') as mock_do_setup:
mock_do_setup.return_value = mock_client
# execute driver
volume_info = self.driver.create_volume(self.volume)
self.assertEqual('10.0.1.6:3260,1 iqn.1993-08.org.debian:01:222 0',
volume_info['provider_location'])
expected = self.driver_startup_call_stack + [
mock.call.createVolume(
'fakevolume',
1,
units.Gi,
{'isThinProvisioned': True,
'clusterName': 'CloudCluster1'}),
mock.call.logout()]
mock_client.assert_has_calls(expected)
# mock HTTPServerError
mock_client.createVolume.side_effect =\
hpexceptions.HTTPServerError()
# ensure the raised exception is a cinder exception
self.assertRaises(exception.VolumeBackendAPIException,
self.driver.create_volume, self.volume)
@mock.patch.object(
volume_types,
'get_volume_type',
return_value={'extra_specs': {'hplh:provisioning': 'full'}})
def test_create_volume_with_es(self, _mock_volume_type):
# setup drive with default configuration
# and return the mock HTTP LeftHand client
mock_client = self.setup_driver()
volume_with_vt = self.volume
volume_with_vt['volume_type_id'] = 1
# mock return value of createVolume
mock_client.createVolume.return_value = {
'iscsiIqn': self.connector['initiator']}
mock_client.getVolumes.return_value = {'total': 1, 'members': []}
with mock.patch.object(hp_lefthand_rest_proxy.HPLeftHandRESTProxy,
'_create_client') as mock_do_setup:
mock_do_setup.return_value = mock_client
# execute create_volume
volume_info = self.driver.create_volume(volume_with_vt)
self.assertEqual('10.0.1.6:3260,1 iqn.1993-08.org.debian:01:222 0',
volume_info['provider_location'])
expected = self.driver_startup_call_stack + [
mock.call.createVolume(
'fakevolume',
1,
units.Gi,
{'isThinProvisioned': False,
'clusterName': 'CloudCluster1'}),
mock.call.logout()]
mock_client.assert_has_calls(expected)
def test_delete_volume(self):
# setup drive with default configuration
# and return the mock HTTP LeftHand client
mock_client = self.setup_driver()
# mock return value of getVolumeByName
mock_client.getVolumeByName.return_value = {'id': self.volume_id}
mock_client.getVolumes.return_value = {'total': 1, 'members': []}
with mock.patch.object(hp_lefthand_rest_proxy.HPLeftHandRESTProxy,
'_create_client') as mock_do_setup:
mock_do_setup.return_value = mock_client
# execute delete_volume
self.driver.delete_volume(self.volume)
expected = self.driver_startup_call_stack + [
mock.call.getVolumeByName('fakevolume'),
mock.call.deleteVolume(self.volume_id),
mock.call.logout()]
mock_client.assert_has_calls(expected)
# mock HTTPNotFound (volume not found)
mock_client.getVolumeByName.side_effect =\
hpexceptions.HTTPNotFound()
# no exception should escape method
self.driver.delete_volume(self.volume)
# mock HTTPConflict
mock_client.deleteVolume.side_effect = hpexceptions.HTTPConflict()
# ensure the raised exception is a cinder exception
self.assertRaises(exception.VolumeBackendAPIException,
self.driver.delete_volume, self.volume_id)
def test_extend_volume(self):
# setup drive with default configuration
# and return the mock HTTP LeftHand client
mock_client = self.setup_driver()
# mock return value of getVolumeByName
mock_client.getVolumeByName.return_value = {'id': self.volume_id}
mock_client.getVolumes.return_value = {'total': 1, 'members': []}
with mock.patch.object(hp_lefthand_rest_proxy.HPLeftHandRESTProxy,
'_create_client') as mock_do_setup:
mock_do_setup.return_value = mock_client
# execute extend_volume
self.driver.extend_volume(self.volume, 2)
expected = self.driver_startup_call_stack + [
mock.call.getVolumeByName('fakevolume'),
mock.call.modifyVolume(1, {'size': 2 * units.Gi}),
mock.call.logout()]
# validate call chain
mock_client.assert_has_calls(expected)
# mock HTTPServerError (array failure)
mock_client.modifyVolume.side_effect =\
hpexceptions.HTTPServerError()
# ensure the raised exception is a cinder exception
self.assertRaises(exception.VolumeBackendAPIException,
self.driver.extend_volume, self.volume, 2)
def test_initialize_connection(self):
# setup drive with default configuration
# and return the mock HTTP LeftHand client
mock_client = self.setup_driver()
# mock return value of getVolumeByName
mock_client.getServerByName.side_effect = hpexceptions.HTTPNotFound()
mock_client.createServer.return_value = {'id': self.server_id}
mock_client.getVolumeByName.return_value = {
'id': self.volume_id,
'iscsiSessions': None
}
mock_client.getVolumes.return_value = {'total': 1, 'members': []}
with mock.patch.object(hp_lefthand_rest_proxy.HPLeftHandRESTProxy,
'_create_client') as mock_do_setup:
mock_do_setup.return_value = mock_client
# execute initialize_connection
result = self.driver.initialize_connection(
self.volume,
self.connector)
# validate
self.assertEqual('iscsi', result['driver_volume_type'])
self.assertFalse(result['data']['target_discovered'])
self.assertEqual(self.volume_id, result['data']['volume_id'])
self.assertTrue('auth_method' not in result['data'])
expected = self.driver_startup_call_stack + [
mock.call.getServerByName('fakehost'),
mock.call.createServer
(
'fakehost',
'iqn.1993-08.org.debian:01:222',
None
),
mock.call.getVolumeByName('fakevolume'),
mock.call.addServerAccess(1, 0),
mock.call.logout()]
# validate call chain
mock_client.assert_has_calls(expected)
# mock HTTPServerError (array failure)
mock_client.createServer.side_effect =\
hpexceptions.HTTPServerError()
# ensure the raised exception is a cinder exception
self.assertRaises(
exception.VolumeBackendAPIException,
self.driver.initialize_connection, self.volume, self.connector)
def test_initialize_connection_session_exists(self):
# setup drive with default configuration
# and return the mock HTTP LeftHand client
mock_client = self.setup_driver()
# mock return value of getVolumeByName
mock_client.getServerByName.side_effect = hpexceptions.HTTPNotFound()
mock_client.createServer.return_value = {'id': self.server_id}
mock_client.getVolumeByName.return_value = {
'id': self.volume_id,
'iscsiSessions': [{'server': {'uri': self.server_uri}}]
}
mock_client.getVolumes.return_value = {'total': 1, 'members': []}
with mock.patch.object(hp_lefthand_rest_proxy.HPLeftHandRESTProxy,
'_create_client') as mock_do_setup:
mock_do_setup.return_value = mock_client
# execute initialize_connection
result = self.driver.initialize_connection(
self.volume,
self.connector)
# validate
self.assertEqual('iscsi', result['driver_volume_type'])
self.assertFalse(result['data']['target_discovered'])
self.assertEqual(self.volume_id, result['data']['volume_id'])
self.assertTrue('auth_method' not in result['data'])
expected = self.driver_startup_call_stack + [
mock.call.getServerByName('fakehost'),
mock.call.createServer
(
'fakehost',
'iqn.1993-08.org.debian:01:222',
None
),
mock.call.getVolumeByName('fakevolume'),
mock.call.logout()]
# validate call chain
mock_client.assert_has_calls(expected)
def test_initialize_connection_with_chaps(self):
# setup drive with default configuration
# and return the mock HTTP LeftHand client
mock_client = self.setup_driver()
# mock return value of getVolumeByName
mock_client.getServerByName.side_effect = hpexceptions.HTTPNotFound()
mock_client.createServer.return_value = {
'id': self.server_id,
'chapAuthenticationRequired': True,
'chapTargetSecret': 'dont_tell'}
mock_client.getVolumeByName.return_value = {
'id': self.volume_id,
'iscsiSessions': None
}
mock_client.getVolumes.return_value = {'total': 1, 'members': []}
with mock.patch.object(hp_lefthand_rest_proxy.HPLeftHandRESTProxy,
'_create_client') as mock_do_setup:
mock_do_setup.return_value = mock_client
# execute initialize_connection
result = self.driver.initialize_connection(
self.volume,
self.connector)
# validate
self.assertEqual('iscsi', result['driver_volume_type'])
self.assertFalse(result['data']['target_discovered'])
self.assertEqual(self.volume_id, result['data']['volume_id'])
self.assertEqual('CHAP', result['data']['auth_method'])
expected = self.driver_startup_call_stack + [
mock.call.getServerByName('fakehost'),
mock.call.createServer
(
'fakehost',
'iqn.1993-08.org.debian:01:222',
None
),
mock.call.getVolumeByName('fakevolume'),
mock.call.addServerAccess(1, 0),
mock.call.logout()]
# validate call chain
mock_client.assert_has_calls(expected)
def test_terminate_connection(self):
# setup drive with default configuration
# and return the mock HTTP LeftHand client
mock_client = self.setup_driver()
mock_client.getVolumeByName.return_value = {'id': self.volume_id}
mock_client.getServerByName.return_value = {
'id': self.server_id,
'name': self.serverName}
mock_client.findServerVolumes.return_value = [{'id': self.volume_id}]
mock_client.getVolumes.return_value = {'total': 1, 'members': []}
with mock.patch.object(hp_lefthand_rest_proxy.HPLeftHandRESTProxy,
'_create_client') as mock_do_setup:
mock_do_setup.return_value = mock_client
# execute terminate_connection
self.driver.terminate_connection(self.volume, self.connector)
expected = self.driver_startup_call_stack + [
mock.call.getVolumeByName('fakevolume'),
mock.call.getServerByName('fakehost'),
mock.call.findServerVolumes('fakehost'),
mock.call.removeServerAccess(1, 0),
mock.call.deleteServer(0)]
# validate call chain
mock_client.assert_has_calls(expected)
mock_client.getVolumeByName.side_effect = (
hpexceptions.HTTPNotFound())
# ensure the raised exception is a cinder exception
self.assertRaises(
exception.VolumeBackendAPIException,
self.driver.terminate_connection,
self.volume,
self.connector)
def test_terminate_connection_multiple_volumes_on_server(self):
# setup drive with default configuration
# and return the mock HTTP LeftHand client
mock_client = self.setup_driver()
mock_client.getVolumeByName.return_value = {'id': self.volume_id}
mock_client.getServerByName.return_value = {
'id': self.server_id,
'name': self.serverName}
mock_client.findServerVolumes.return_value = [
{'id': self.volume_id},
{'id': 99999}]
mock_client.getVolumes.return_value = {'total': 1, 'members': []}
with mock.patch.object(hp_lefthand_rest_proxy.HPLeftHandRESTProxy,
'_create_client') as mock_do_setup:
mock_do_setup.return_value = mock_client
# execute terminate_connection
self.driver.terminate_connection(self.volume, self.connector)
expected = self.driver_startup_call_stack + [
mock.call.getVolumeByName('fakevolume'),
mock.call.getServerByName('fakehost'),
mock.call.findServerVolumes('fakehost'),
mock.call.removeServerAccess(1, 0)]
# validate call chain
mock_client.assert_has_calls(expected)
self.assertFalse(mock_client.deleteServer.called)
mock_client.getVolumeByName.side_effect = (
hpexceptions.HTTPNotFound())
# ensure the raised exception is a cinder exception
self.assertRaises(
exception.VolumeBackendAPIException,
self.driver.terminate_connection,
self.volume,
self.connector)
def test_create_snapshot(self):
# setup drive with default configuration
# and return the mock HTTP LeftHand client
mock_client = self.setup_driver()
mock_client.getVolumeByName.return_value = {'id': self.volume_id}
mock_client.getVolumes.return_value = {'total': 1, 'members': []}
with mock.patch.object(hp_lefthand_rest_proxy.HPLeftHandRESTProxy,
'_create_client') as mock_do_setup:
mock_do_setup.return_value = mock_client
# execute create_snapshot
self.driver.create_snapshot(self.snapshot)
mock_client.getVolumes.return_value = {'total': 1, 'members': []}
expected = self.driver_startup_call_stack + [
mock.call.getVolumeByName('fakevolume'),
mock.call.createSnapshot(
'fakeshapshot',
1,
{'inheritAccess': True}),
mock.call.logout()]
# validate call chain
mock_client.assert_has_calls(expected)
# mock HTTPServerError (array failure)
mock_client.getVolumeByName.side_effect =\
hpexceptions.HTTPNotFound()
# ensure the raised exception is a cinder exception
self.assertRaises(
exception.VolumeBackendAPIException,
self.driver.create_snapshot, self.snapshot)
def test_delete_snapshot(self):
# setup drive with default configuration
# and return the mock HTTP LeftHand client
mock_client = self.setup_driver()
mock_client.getSnapshotByName.return_value = {'id': self.snapshot_id}
mock_client.getVolumes.return_value = {'total': 1, 'members': []}
with mock.patch.object(hp_lefthand_rest_proxy.HPLeftHandRESTProxy,
'_create_client') as mock_do_setup:
mock_do_setup.return_value = mock_client
# execute delete_snapshot
self.driver.delete_snapshot(self.snapshot)
expected = self.driver_startup_call_stack + [
mock.call.getSnapshotByName('fakeshapshot'),
mock.call.deleteSnapshot(3),
mock.call.logout()]
# validate call chain
mock_client.assert_has_calls(expected)
mock_client.getSnapshotByName.side_effect =\
hpexceptions.HTTPNotFound()
# no exception is thrown, just error msg is logged
self.driver.delete_snapshot(self.snapshot)
# mock HTTPServerError (array failure)
ex = hpexceptions.HTTPServerError({'message': 'Some message.'})
mock_client.getSnapshotByName.side_effect = ex
# ensure the raised exception is a cinder exception
self.assertRaises(
exception.VolumeBackendAPIException,
self.driver.delete_snapshot,
self.snapshot)
# mock HTTPServerError because the snap is in use
ex = hpexceptions.HTTPServerError({
'message':
'Hey, dude cannot be deleted because it is a clone point'
' duh.'})
mock_client.getSnapshotByName.side_effect = ex
# ensure the raised exception is a cinder exception
self.assertRaises(
exception.SnapshotIsBusy,
self.driver.delete_snapshot,
self.snapshot)
def test_create_volume_from_snapshot(self):
# setup drive with default configuration
# and return the mock HTTP LeftHand client
mock_client = self.setup_driver()
mock_client.getSnapshotByName.return_value = {'id': self.snapshot_id}
mock_client.cloneSnapshot.return_value = {
'iscsiIqn': self.connector['initiator']}
mock_client.getVolumes.return_value = {'total': 1, 'members': []}
with mock.patch.object(hp_lefthand_rest_proxy.HPLeftHandRESTProxy,
'_create_client') as mock_do_setup:
mock_do_setup.return_value = mock_client
# execute create_volume_from_snapshot
model_update = self.driver.create_volume_from_snapshot(
self.volume, self.snapshot)
expected_iqn = 'iqn.1993-08.org.debian:01:222 0'
expected_location = "10.0.1.6:3260,1 %s" % expected_iqn
self.assertEqual(expected_location,
model_update['provider_location'])
expected = self.driver_startup_call_stack + [
mock.call.getSnapshotByName('fakeshapshot'),
mock.call.cloneSnapshot('fakevolume', 3),
mock.call.logout()]
# validate call chain
mock_client.assert_has_calls(expected)
def test_create_cloned_volume(self):
# setup drive with default configuration
# and return the mock HTTP LeftHand client
mock_client = self.setup_driver()
mock_client.getVolumeByName.return_value = {'id': self.volume_id}
mock_client.cloneVolume.return_value = {
'iscsiIqn': self.connector['initiator']}
mock_client.getVolumes.return_value = {'total': 1, 'members': []}
with mock.patch.object(hp_lefthand_rest_proxy.HPLeftHandRESTProxy,
'_create_client') as mock_do_setup:
mock_do_setup.return_value = mock_client
# execute create_cloned_volume
model_update = self.driver.create_cloned_volume(
self.cloned_volume, self.volume)
expected_iqn = 'iqn.1993-08.org.debian:01:222 0'
expected_location = "10.0.1.6:3260,1 %s" % expected_iqn
self.assertEqual(expected_location,
model_update['provider_location'])
expected = self.driver_startup_call_stack + [
mock.call.getVolumeByName('fakevolume'),
mock.call.cloneVolume('clone_volume', 1),
mock.call.logout()]
# validate call chain
mock_client.assert_has_calls(expected)
@mock.patch.object(volume_types, 'get_volume_type')
def test_extra_spec_mapping(self, _mock_get_volume_type):
# setup drive with default configuration
self.setup_driver()
# 2 extra specs we don't care about, and
# 1 that will get mapped
_mock_get_volume_type.return_value = {
'extra_specs': {
'foo:bar': 'fake',
'bar:foo': 1234,
'hplh:provisioning': 'full'}}
volume_with_vt = self.volume
volume_with_vt['volume_type_id'] = self.volume_type_id
# get the extra specs of interest from this volume's volume type
volume_extra_specs = self.driver.proxy._get_volume_extra_specs(
volume_with_vt)
extra_specs = self.driver.proxy._get_lh_extra_specs(
volume_extra_specs,
hp_lefthand_rest_proxy.extra_specs_key_map.keys())
# map the extra specs key/value pairs to key/value pairs
# used as optional configuration values by the LeftHand backend
optional = self.driver.proxy._map_extra_specs(extra_specs)
self.assertDictMatch({'isThinProvisioned': False}, optional)
@mock.patch.object(volume_types, 'get_volume_type')
def test_extra_spec_mapping_invalid_value(self, _mock_get_volume_type):
# setup drive with default configuration
self.setup_driver()
volume_with_vt = self.volume
volume_with_vt['volume_type_id'] = self.volume_type_id
_mock_get_volume_type.return_value = {
'extra_specs': {
# r-07 is an invalid value for hplh:ao
'hplh:data_pl': 'r-07',
'hplh:ao': 'true'}}
# get the extra specs of interest from this volume's volume type
volume_extra_specs = self.driver.proxy._get_volume_extra_specs(
volume_with_vt)
extra_specs = self.driver.proxy._get_lh_extra_specs(
volume_extra_specs,
hp_lefthand_rest_proxy.extra_specs_key_map.keys())
# map the extra specs key/value pairs to key/value pairs
# used as optional configuration values by the LeftHand backend
optional = self.driver.proxy._map_extra_specs(extra_specs)
# {'hplh:ao': 'true'} should map to
# {'isAdaptiveOptimizationEnabled': True}
# without hplh:data_pl since r-07 is an invalid value
self.assertDictMatch({'isAdaptiveOptimizationEnabled': True}, optional)
def test_retype_with_no_LH_extra_specs(self):
# setup drive with default configuration
# and return the mock HTTP LeftHand client
mock_client = self.setup_driver()
mock_client.getVolumes.return_value = {'total': 1, 'members': []}
ctxt = context.get_admin_context()
host = {'host': self.serverName}
key_specs_old = {'foo': False, 'bar': 2, 'error': True}
key_specs_new = {'foo': True, 'bar': 5, 'error': False}
old_type_ref = volume_types.create(ctxt, 'old', key_specs_old)
new_type_ref = volume_types.create(ctxt, 'new', key_specs_new)
diff, equal = volume_types.volume_types_diff(ctxt, old_type_ref['id'],
new_type_ref['id'])
volume = dict.copy(self.volume)
old_type = volume_types.get_volume_type(ctxt, old_type_ref['id'])
volume['volume_type'] = old_type
volume['host'] = host
new_type = volume_types.get_volume_type(ctxt, new_type_ref['id'])
with mock.patch.object(hp_lefthand_rest_proxy.HPLeftHandRESTProxy,
'_create_client') as mock_do_setup:
mock_do_setup.return_value = mock_client
self.driver.retype(ctxt, volume, new_type, diff, host)
expected = self.driver_startup_call_stack + [
mock.call.getVolumeByName('fakevolume'),
mock.call.logout()]
# validate call chain
mock_client.assert_has_calls(expected)
def test_retype_with_only_LH_extra_specs(self):
# setup drive with default configuration
# and return the mock HTTP LeftHand client
mock_client = self.setup_driver()
mock_client.getVolumeByName.return_value = {'id': self.volume_id}
mock_client.getVolumes.return_value = {'total': 1, 'members': []}
ctxt = context.get_admin_context()
host = {'host': self.serverName}
key_specs_old = {'hplh:provisioning': 'thin'}
key_specs_new = {'hplh:provisioning': 'full', 'hplh:ao': 'true'}
old_type_ref = volume_types.create(ctxt, 'old', key_specs_old)
new_type_ref = volume_types.create(ctxt, 'new', key_specs_new)
diff, equal = volume_types.volume_types_diff(ctxt, old_type_ref['id'],
new_type_ref['id'])
volume = dict.copy(self.volume)
old_type = volume_types.get_volume_type(ctxt, old_type_ref['id'])
volume['volume_type'] = old_type
volume['host'] = host
new_type = volume_types.get_volume_type(ctxt, new_type_ref['id'])
with mock.patch.object(hp_lefthand_rest_proxy.HPLeftHandRESTProxy,
'_create_client') as mock_do_setup:
mock_do_setup.return_value = mock_client
self.driver.retype(ctxt, volume, new_type, diff, host)
expected = self.driver_startup_call_stack + [
mock.call.getVolumeByName('fakevolume'),
mock.call.modifyVolume(
1, {
'isThinProvisioned': False,
'isAdaptiveOptimizationEnabled': True}),
mock.call.logout()]
# validate call chain
mock_client.assert_has_calls(expected)
def test_retype_with_both_extra_specs(self):
# setup drive with default configuration
# and return the mock HTTP LeftHand client
mock_client = self.setup_driver()
mock_client.getVolumeByName.return_value = {'id': self.volume_id}
mock_client.getVolumes.return_value = {'total': 1, 'members': []}
ctxt = context.get_admin_context()
host = {'host': self.serverName}
key_specs_old = {'hplh:provisioning': 'full', 'foo': 'bar'}
key_specs_new = {'hplh:provisioning': 'thin', 'foo': 'foobar'}
old_type_ref = volume_types.create(ctxt, 'old', key_specs_old)
new_type_ref = volume_types.create(ctxt, 'new', key_specs_new)
diff, equal = volume_types.volume_types_diff(ctxt, old_type_ref['id'],
new_type_ref['id'])
volume = dict.copy(self.volume)
old_type = volume_types.get_volume_type(ctxt, old_type_ref['id'])
volume['volume_type'] = old_type
volume['host'] = host
new_type = volume_types.get_volume_type(ctxt, new_type_ref['id'])
with mock.patch.object(hp_lefthand_rest_proxy.HPLeftHandRESTProxy,
'_create_client') as mock_do_setup:
mock_do_setup.return_value = mock_client
self.driver.retype(ctxt, volume, new_type, diff, host)
expected = self.driver_startup_call_stack + [
mock.call.getVolumeByName('fakevolume'),
mock.call.modifyVolume(1, {'isThinProvisioned': True}),
mock.call.logout()]
# validate call chain
mock_client.assert_has_calls(expected)
def test_retype_same_extra_specs(self):
# setup drive with default configuration
# and return the mock HTTP LeftHand client
mock_client = self.setup_driver()
mock_client.getVolumeByName.return_value = {'id': self.volume_id}
mock_client.getVolumes.return_value = {'total': 1, 'members': []}
ctxt = context.get_admin_context()
host = {'host': self.serverName}
key_specs_old = {'hplh:provisioning': 'full', 'hplh:ao': 'true'}
key_specs_new = {'hplh:provisioning': 'full', 'hplh:ao': 'false'}
old_type_ref = volume_types.create(ctxt, 'old', key_specs_old)
new_type_ref = volume_types.create(ctxt, 'new', key_specs_new)
diff, equal = volume_types.volume_types_diff(ctxt, old_type_ref['id'],
new_type_ref['id'])
volume = dict.copy(self.volume)
old_type = volume_types.get_volume_type(ctxt, old_type_ref['id'])
volume['volume_type'] = old_type
volume['host'] = host
new_type = volume_types.get_volume_type(ctxt, new_type_ref['id'])
with mock.patch.object(hp_lefthand_rest_proxy.HPLeftHandRESTProxy,
'_create_client') as mock_do_setup:
mock_do_setup.return_value = mock_client
self.driver.retype(ctxt, volume, new_type, diff, host)
expected = self.driver_startup_call_stack + [
mock.call.getVolumeByName('fakevolume'),
mock.call.modifyVolume(
1,
{'isAdaptiveOptimizationEnabled': False}),
mock.call.logout()]
# validate call chain
mock_client.assert_has_calls(expected)
def test_migrate_no_location(self):
# setup drive with default configuration
# and return the mock HTTP LeftHand client
mock_client = self.setup_driver()
host = {'host': self.serverName, 'capabilities': {}}
with mock.patch.object(hp_lefthand_rest_proxy.HPLeftHandRESTProxy,
'_create_client') as mock_do_setup:
mock_do_setup.return_value = mock_client
(migrated, update) = self.driver.migrate_volume(
None,
self.volume,
host)
self.assertFalse(migrated)
mock_client.assert_has_calls([])
self.assertEqual(0, len(mock_client.method_calls))
def test_migrate_incorrect_vip(self):
# setup drive with default configuration
# and return the mock HTTP LeftHand client
mock_client = self.setup_driver()
mock_client.getClusterByName.return_value = {
"virtualIPAddresses": [{
"ipV4Address": "10.10.10.10",
"ipV4NetMask": "255.255.240.0"}],
"id": self.cluster_id}
mock_client.getVolumeByName.return_value = {'id': self.volume_id}
mock_client.getVolumes.return_value = {'total': 1, 'members': []}
location = (self.driver.proxy.DRIVER_LOCATION % {
'cluster': 'New_CloudCluster',
'vip': '10.10.10.111'})
host = {
'host': self.serverName,
'capabilities': {'location_info': location}}
with mock.patch.object(hp_lefthand_rest_proxy.HPLeftHandRESTProxy,
'_create_client') as mock_do_setup:
mock_do_setup.return_value = mock_client
(migrated, update) = self.driver.migrate_volume(
None,
self.volume,
host)
self.assertFalse(migrated)
expected = self.driver_startup_call_stack + [
mock.call.getClusterByName('New_CloudCluster'),
mock.call.logout()]
mock_client.assert_has_calls(expected)
# and nothing else
self.assertEqual(
len(expected),
len(mock_client.method_calls))
def test_migrate_with_location(self):
# setup drive with default configuration
# and return the mock HTTP LeftHand client
mock_client = self.setup_driver()
mock_client.getClusterByName.return_value = {
"virtualIPAddresses": [{
"ipV4Address": "10.10.10.111",
"ipV4NetMask": "255.255.240.0"}],
"id": self.cluster_id}
mock_client.getVolumeByName.return_value = {'id': self.volume_id,
'iscsiSessions': None}
mock_client.getVolume.return_value = {'snapshots': {
'resource': None}}
mock_client.getVolumes.return_value = {'total': 1, 'members': []}
location = (self.driver.proxy.DRIVER_LOCATION % {
'cluster': 'New_CloudCluster',
'vip': '10.10.10.111'})
host = {
'host': self.serverName,
'capabilities': {'location_info': location}}
with mock.patch.object(hp_lefthand_rest_proxy.HPLeftHandRESTProxy,
'_create_client') as mock_do_setup:
mock_do_setup.return_value = mock_client
(migrated, update) = self.driver.migrate_volume(
None,
self.volume,
host)
self.assertTrue(migrated)
expected = self.driver_startup_call_stack + [
mock.call.getClusterByName('New_CloudCluster'),
mock.call.logout()] + self.driver_startup_call_stack + [
mock.call.getVolumeByName('fakevolume'),
mock.call.getVolume(
1,
'fields=snapshots,snapshots[resource[members[name]]]'),
mock.call.modifyVolume(1, {'clusterName': 'New_CloudCluster'}),
mock.call.logout()]
mock_client.assert_has_calls(expected)
# and nothing else
self.assertEqual(
len(expected),
len(mock_client.method_calls))
def test_migrate_with_Snapshots(self):
# setup drive with default configuration
# and return the mock HTTP LeftHand client
mock_client = self.setup_driver()
mock_client.getClusterByName.return_value = {
"virtualIPAddresses": [{
"ipV4Address": "10.10.10.111",
"ipV4NetMask": "255.255.240.0"}],
"id": self.cluster_id}
mock_client.getVolumeByName.return_value = {
'id': self.volume_id,
'iscsiSessions': None}
mock_client.getVolume.return_value = {'snapshots': {
'resource': 'snapfoo'}}
mock_client.getVolumes.return_value = {'total': 1, 'members': []}
location = (self.driver.proxy.DRIVER_LOCATION % {
'cluster': 'New_CloudCluster',
'vip': '10.10.10.111'})
host = {
'host': self.serverName,
'capabilities': {'location_info': location}}
with mock.patch.object(hp_lefthand_rest_proxy.HPLeftHandRESTProxy,
'_create_client') as mock_do_setup:
mock_do_setup.return_value = mock_client
(migrated, update) = self.driver.migrate_volume(
None,
self.volume,
host)
self.assertFalse(migrated)
expected = self.driver_startup_call_stack + [
mock.call.getClusterByName('New_CloudCluster'),
mock.call.logout()] + self.driver_startup_call_stack + [
mock.call.getVolumeByName('fakevolume'),
mock.call.getVolume(
1,
'fields=snapshots,snapshots[resource[members[name]]]'),
mock.call.logout()]
mock_client.assert_has_calls(expected)
# and nothing else
self.assertEqual(
len(expected),
len(mock_client.method_calls))
@mock.patch.object(volume_types, 'get_volume_type',
return_value={'extra_specs': {'hplh:ao': 'true'}})
def test_create_volume_with_ao_true(self, _mock_volume_type):
# setup drive with default configuration
# and return the mock HTTP LeftHand client
mock_client = self.setup_driver()
volume_with_vt = self.volume
volume_with_vt['volume_type_id'] = 1
# mock return value of createVolume
mock_client.createVolume.return_value = {
'iscsiIqn': self.connector['initiator']}
mock_client.getVolumes.return_value = {'total': 1, 'members': []}
with mock.patch.object(hp_lefthand_rest_proxy.HPLeftHandRESTProxy,
'_create_client') as mock_do_setup:
mock_do_setup.return_value = mock_client
volume_info = self.driver.create_volume(volume_with_vt)
self.assertEqual('10.0.1.6:3260,1 iqn.1993-08.org.debian:01:222 0',
volume_info['provider_location'])
# make sure createVolume is called without
# isAdaptiveOptimizationEnabled == true
expected = self.driver_startup_call_stack + [
mock.call.createVolume(
'fakevolume',
1,
units.Gi,
{'isThinProvisioned': True,
'clusterName': 'CloudCluster1'}),
mock.call.logout()]
mock_client.assert_has_calls(expected)
@mock.patch.object(volume_types, 'get_volume_type',
return_value={'extra_specs': {'hplh:ao': 'false'}})
def test_create_volume_with_ao_false(self, _mock_volume_type):
# setup drive with default configuration
# and return the mock HTTP LeftHand client
mock_client = self.setup_driver()
volume_with_vt = self.volume
volume_with_vt['volume_type_id'] = 1
# mock return value of createVolume
mock_client.createVolume.return_value = {
'iscsiIqn': self.connector['initiator']}
mock_client.getVolumes.return_value = {'total': 1, 'members': []}
with mock.patch.object(hp_lefthand_rest_proxy.HPLeftHandRESTProxy,
'_create_client') as mock_do_setup:
mock_do_setup.return_value = mock_client
volume_info = self.driver.create_volume(volume_with_vt)
self.assertEqual('10.0.1.6:3260,1 iqn.1993-08.org.debian:01:222 0',
volume_info['provider_location'])
# make sure createVolume is called with
# isAdaptiveOptimizationEnabled == false
expected = self.driver_startup_call_stack + [
mock.call.createVolume(
'fakevolume',
1,
units.Gi,
{'isThinProvisioned': True,
'clusterName': 'CloudCluster1',
'isAdaptiveOptimizationEnabled': False}),
mock.call.logout()]
mock_client.assert_has_calls(expected)
def test__get_existing_volume_ref_name(self):
self.setup_driver()
existing_ref = {'source-name': self.volume_name}
result = self.driver.proxy._get_existing_volume_ref_name(
existing_ref)
self.assertEqual(self.volume_name, result)
existing_ref = {'bad-key': 'foo'}
self.assertRaises(
exception.ManageExistingInvalidReference,
self.driver.proxy._get_existing_volume_ref_name,
existing_ref)
def test_manage_existing(self):
mock_client = self.setup_driver()
self.driver.proxy.api_version = "1.1"
volume = {'display_name': 'Foo Volume',
'volume_type': None,
'volume_type_id': None,
'id': '12345'}
with mock.patch.object(hp_lefthand_rest_proxy.HPLeftHandRESTProxy,
'_create_client') as mock_do_setup:
mock_do_setup.return_value = mock_client
mock_client.getVolumeByName.return_value = {'id': self.volume_id}
mock_client.getVolumes.return_value = {
"type": "volume",
"total": 1,
"members": [{
"id": self.volume_id,
"clusterName": self.cluster_name,
"size": 1
}]
}
existing_ref = {'source-name': self.volume_name}
expected_obj = {'display_name': 'Foo Volume'}
obj = self.driver.manage_existing(volume, existing_ref)
mock_client.assert_has_calls(
self.driver_startup_call_stack + [
mock.call.getVolumeByName(self.volume_name),
mock.call.logout()] +
self.driver_startup_call_stack + [
mock.call.modifyVolume(self.volume_id,
{'name': 'volume-12345'}),
mock.call.logout()])
self.assertEqual(expected_obj, obj)
@mock.patch.object(volume_types, 'get_volume_type')
def test_manage_existing_retype(self, _mock_volume_types):
mock_client = self.setup_driver()
_mock_volume_types.return_value = {
'name': 'gold',
'id': 'gold-id',
'extra_specs': {
'hplh:provisioning': 'thin',
'hplh:ao': 'true',
'hplh:data_pl': 'r-0',
'volume_type': self.volume_type}}
self.driver.proxy.api_version = "1.1"
volume = {'display_name': 'Foo Volume',
'host': 'stack@lefthand#lefthand',
'volume_type': 'gold',
'volume_type_id': 'bcfa9fa4-54a0-4340-a3d8-bfcf19aea65e',
'id': '12345'}
with mock.patch.object(hp_lefthand_rest_proxy.HPLeftHandRESTProxy,
'_create_client') as mock_do_setup:
mock_do_setup.return_value = mock_client
mock_client.getVolumeByName.return_value = {'id': self.volume_id}
mock_client.getVolumes.return_value = {
"type": "volume",
"total": 1,
"members": [{
"id": self.volume_id,
"clusterName": self.cluster_name,
"size": 1
}]
}
existing_ref = {'source-name': self.volume_name}
expected_obj = {'display_name': 'Foo Volume'}
obj = self.driver.manage_existing(volume, existing_ref)
mock_client.assert_has_calls(
self.driver_startup_call_stack + [
mock.call.getVolumeByName(self.volume_name),
mock.call.logout()] +
self.driver_startup_call_stack + [
mock.call.modifyVolume(self.volume_id,
{'name': 'volume-12345'}),
mock.call.logout()])
self.assertEqual(expected_obj, obj)
@mock.patch.object(volume_types, 'get_volume_type')
def test_manage_existing_retype_exception(self, _mock_volume_types):
mock_client = self.setup_driver()
_mock_volume_types.return_value = {
'name': 'gold',
'id': 'gold-id',
'extra_specs': {
'hplh:provisioning': 'thin',
'hplh:ao': 'true',
'hplh:data_pl': 'r-0',
'volume_type': self.volume_type}}
self.driver.proxy.retype = mock.Mock(
side_effect=exception.VolumeNotFound(volume_id="fake"))
self.driver.proxy.api_version = "1.1"
volume = {'display_name': 'Foo Volume',
'host': 'stack@lefthand#lefthand',
'volume_type': 'gold',
'volume_type_id': 'bcfa9fa4-54a0-4340-a3d8-bfcf19aea65e',
'id': '12345'}
with mock.patch.object(hp_lefthand_rest_proxy.HPLeftHandRESTProxy,
'_create_client') as mock_do_setup:
mock_do_setup.return_value = mock_client
mock_client.getVolumeByName.return_value = {'id': self.volume_id}
mock_client.getVolumes.return_value = {
"type": "volume",
"total": 1,
"members": [{
"id": self.volume_id,
"clusterName": self.cluster_name,
"size": 1
}]
}
existing_ref = {'source-name': self.volume_name}
self.assertRaises(exception.VolumeNotFound,
self.driver.manage_existing,
volume,
existing_ref)
mock_client.assert_has_calls(
self.driver_startup_call_stack + [
mock.call.getVolumeByName(self.volume_name),
mock.call.logout()] +
self.driver_startup_call_stack + [
mock.call.modifyVolume(self.volume_id,
{'name': 'volume-12345'}),
mock.call.logout()] +
self.driver_startup_call_stack + [
mock.call.modifyVolume(self.volume_id,
{'name': 'fakevolume'}),
mock.call.logout()])
def test_manage_existing_volume_type_exception(self):
mock_client = self.setup_driver()
self.driver.proxy.api_version = "1.1"
volume = {'display_name': 'Foo Volume',
'volume_type': 'gold',
'volume_type_id': 'bcfa9fa4-54a0-4340-a3d8-bfcf19aea65e',
'id': '12345'}
with mock.patch.object(hp_lefthand_rest_proxy.HPLeftHandRESTProxy,
'_create_client') as mock_do_setup:
mock_do_setup.return_value = mock_client
mock_client.getVolumeByName.return_value = {'id': self.volume_id}
mock_client.getVolumes.return_value = {
"type": "volume",
"total": 1,
"members": [{
"id": self.volume_id,
"clusterName": self.cluster_name,
"size": 1
}]
}
existing_ref = {'source-name': self.volume_name}
self.assertRaises(exception.ManageExistingVolumeTypeMismatch,
self.driver.manage_existing,
volume=volume,
existing_ref=existing_ref)
mock_client.assert_has_calls(
self.driver_startup_call_stack + [
mock.call.getVolumeByName(self.volume_name),
mock.call.logout()])
def test_manage_existing_get_size(self):
mock_client = self.setup_driver()
mock_client.getVolumeByName.return_value = {'size': 2147483648}
self.driver.proxy.api_version = "1.1"
with mock.patch.object(hp_lefthand_rest_proxy.HPLeftHandRESTProxy,
'_create_client') as mock_do_setup:
mock_do_setup.return_value = mock_client
mock_client.getVolumes.return_value = {
"type": "volume",
"total": 1,
"members": [{
"id": self.volume_id,
"clusterName": self.cluster_name,
"size": 1
}]
}
volume = {}
existing_ref = {'source-name': self.volume_name}
size = self.driver.manage_existing_get_size(volume, existing_ref)
expected_size = 2
expected = [mock.call.getVolumeByName(existing_ref['source-name']),
mock.call.logout()]
mock_client.assert_has_calls(
self.driver_startup_call_stack +
expected)
self.assertEqual(expected_size, size)
def test_manage_existing_get_size_invalid_reference(self):
mock_client = self.setup_driver()
mock_client.getVolumeByName.return_value = {'size': 2147483648}
self.driver.proxy.api_version = "1.1"
with mock.patch.object(hp_lefthand_rest_proxy.HPLeftHandRESTProxy,
'_create_client') as mock_do_setup:
mock_do_setup.return_value = mock_client
volume = {}
existing_ref = {'source-name': "volume-12345"}
self.assertRaises(exception.ManageExistingInvalidReference,
self.driver.manage_existing_get_size,
volume=volume,
existing_ref=existing_ref)
mock_client.assert_has_calls([])
existing_ref = {}
self.assertRaises(exception.ManageExistingInvalidReference,
self.driver.manage_existing_get_size,
volume=volume,
existing_ref=existing_ref)
mock_client.assert_has_calls([])
def test_manage_existing_get_size_invalid_input(self):
mock_client = self.setup_driver()
mock_client.getVolumeByName.side_effect = (
hpexceptions.HTTPNotFound('fake'))
self.driver.proxy.api_version = "1.1"
with mock.patch.object(hp_lefthand_rest_proxy.HPLeftHandRESTProxy,
'_create_client') as mock_do_setup:
mock_do_setup.return_value = mock_client
mock_client.getVolumes.return_value = {
"type": "volume",
"total": 1,
"members": [{
"id": self.volume_id,
"clusterName": self.cluster_name,
"size": 1
}]
}
volume = {}
existing_ref = {'source-name': self.volume_name}
self.assertRaises(exception.InvalidInput,
self.driver.manage_existing_get_size,
volume=volume,
existing_ref=existing_ref)
expected = [mock.call.getVolumeByName(existing_ref['source-name'])]
mock_client.assert_has_calls(
self.driver_startup_call_stack +
expected)
def test_unmanage(self):
mock_client = self.setup_driver()
mock_client.getVolumeByName.return_value = {'id': self.volume_id}
# mock return value of getVolumes
mock_client.getVolumes.return_value = {
"type": "volume",
"total": 1,
"members": [{
"id": self.volume_id,
"clusterName": self.cluster_name,
"size": 1
}]
}
self.driver.proxy.api_version = "1.1"
with mock.patch.object(hp_lefthand_rest_proxy.HPLeftHandRESTProxy,
'_create_client') as mock_do_setup:
mock_do_setup.return_value = mock_client
self.driver.unmanage(self.volume)
new_name = 'unm-' + str(self.volume['id'])
expected = [
mock.call.getVolumeByName(self.volume['name']),
mock.call.modifyVolume(self.volume['id'], {'name': new_name}),
mock.call.logout()
]
mock_client.assert_has_calls(
self.driver_startup_call_stack +
expected)
def test_api_version(self):
self.setup_driver()
self.driver.proxy.api_version = "1.1"
self.driver.proxy._check_api_version()
self.driver.proxy.api_version = "1.0"
self.assertRaises(exception.InvalidInput,
self.driver.proxy._check_api_version)
def test_get_volume_stats(self):
# set up driver with default config
mock_client = self.setup_driver()
# mock return value of getVolumes
mock_client.getVolumes.return_value = {
"type": "volume",
"total": 1,
"members": [{
"id": 12345,
"clusterName": self.cluster_name,
"size": 1 * units.Gi
}]
}
with mock.patch.object(hp_lefthand_rest_proxy.HPLeftHandRESTProxy,
'_create_client') as mock_do_setup:
mock_do_setup.return_value = mock_client
# execute driver
stats = self.driver.get_volume_stats(True)
self.assertEqual('iSCSI', stats['storage_protocol'])
self.assertEqual(GOODNESS_FUNCTION, stats['goodness_function'])
self.assertEqual(FILTER_FUNCTION, stats['filter_function'])
self.assertEqual(1, int(stats['total_volumes']))
self.assertEqual(True, stats['thin_provisioning_support'])
self.assertEqual(True, stats['thick_provisioning_support'])
self.assertEqual(1, int(stats['provisioned_capacity_gb']))
self.assertEqual(25, int(stats['reserved_percentage']))
cap_util = (
float(units.Gi * 500 - units.Gi * 250) / float(units.Gi * 500)
) * 100
self.assertEqual(cap_util, float(stats['capacity_utilization']))
expected = self.driver_startup_call_stack + [
mock.call.getCluster(1),
mock.call.getVolumes(fields=['members[id]',
'members[clusterName]',
'members[size]'],
cluster=self.cluster_name),
mock.call.logout()]
mock_client.assert_has_calls(expected)
| 39.682014 | 79 | 0.57047 |
4a1f11abcca31dca3a2f5991d4217520a710fc54 | 9,787 | py | Python | magenta/models/pianoroll_rnn_nade/pianoroll_rnn_nade_generate.py | nkjulia/magenta | 063d320d59276a15afa0f8a3a8d386ad74594070 | [
"Apache-2.0"
] | 2,785 | 2020-06-05T03:00:48.000Z | 2022-03-31T20:59:43.000Z | magenta/models/pianoroll_rnn_nade/pianoroll_rnn_nade_generate.py | nkjulia/magenta | 063d320d59276a15afa0f8a3a8d386ad74594070 | [
"Apache-2.0"
] | 242 | 2020-06-04T18:35:42.000Z | 2022-03-30T09:14:18.000Z | magenta/models/pianoroll_rnn_nade/pianoroll_rnn_nade_generate.py | nkjulia/magenta | 063d320d59276a15afa0f8a3a8d386ad74594070 | [
"Apache-2.0"
] | 745 | 2020-06-05T02:32:45.000Z | 2022-03-30T04:44:20.000Z | # Copyright 2021 The Magenta Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Generate pianoroll tracks from a trained RNN-NADE checkpoint.
Uses flags to define operation.
"""
import ast
import os
import time
from magenta.models.pianoroll_rnn_nade import pianoroll_rnn_nade_model
from magenta.models.pianoroll_rnn_nade.pianoroll_rnn_nade_sequence_generator import PianorollRnnNadeSequenceGenerator
from magenta.models.shared import sequence_generator
from magenta.models.shared import sequence_generator_bundle
import note_seq
from note_seq.protobuf import generator_pb2
from note_seq.protobuf import music_pb2
import tensorflow.compat.v1 as tf
FLAGS = tf.app.flags.FLAGS
tf.app.flags.DEFINE_string(
'run_dir', None,
'Path to the directory where the latest checkpoint will be loaded from.')
tf.app.flags.DEFINE_string(
'bundle_file', None,
'Path to the bundle file. If specified, this will take priority over '
'run_dir, unless save_generator_bundle is True, in which case both this '
'flag and run_dir are required')
tf.app.flags.DEFINE_boolean(
'save_generator_bundle', False,
'If true, instead of generating a sequence, will save this generator as a '
'bundle file in the location specified by the bundle_file flag')
tf.app.flags.DEFINE_string(
'bundle_description', None,
'A short, human-readable text description of the bundle (e.g., training '
'data, hyper parameters, etc.).')
tf.app.flags.DEFINE_string(
'config', 'rnn-nade', 'Config to use. Ignored if bundle is provided.')
tf.app.flags.DEFINE_string(
'output_dir', '/tmp/pianoroll_rnn_nade/generated',
'The directory where MIDI files will be saved to.')
tf.app.flags.DEFINE_integer(
'num_outputs', 10,
'The number of tracks to generate. One MIDI file will be created for '
'each.')
tf.app.flags.DEFINE_integer(
'num_steps', 128,
'The total number of steps the generated track should be, priming '
'track length + generated steps. Each step is a 16th of a bar.')
tf.app.flags.DEFINE_string(
'primer_pitches', '',
'A string representation of a Python list of pitches that will be used as '
'a starting chord with a quarter note duration. For example: '
'"[60, 64, 67]"')
tf.app.flags.DEFINE_string(
'primer_pianoroll', '', 'A string representation of a Python list of '
'`note_seq.PianorollSequence` event values (tuples of active MIDI'
'pitches for a sequence of steps). For example: '
'"[(55,), (54,), (55, 53), (50,), (62, 52), (), (63, 55)]".')
tf.app.flags.DEFINE_string(
'primer_midi', '',
'The path to a MIDI file containing a polyphonic track that will be used '
'as a priming track.')
tf.app.flags.DEFINE_float(
'qpm', None,
'The quarters per minute to play generated output at. If a primer MIDI is '
'given, the qpm from that will override this flag. If qpm is None, qpm '
'will default to 60.')
tf.app.flags.DEFINE_integer(
'beam_size', 1,
'The beam size to use for beam search when generating tracks.')
tf.app.flags.DEFINE_integer(
'branch_factor', 1,
'The branch factor to use for beam search when generating tracks.')
tf.app.flags.DEFINE_string(
'log', 'INFO',
'The threshold for what messages will be logged DEBUG, INFO, WARN, ERROR, '
'or FATAL.')
tf.app.flags.DEFINE_string(
'hparams', '',
'Comma-separated list of `name=value` pairs. For each pair, the value of '
'the hyperparameter named `name` is set to `value`. This mapping is merged '
'with the default hyperparameters.')
def get_checkpoint():
"""Get the training dir or checkpoint path to be used by the model."""
if FLAGS.run_dir and FLAGS.bundle_file and not FLAGS.save_generator_bundle:
raise sequence_generator.SequenceGeneratorError(
'Cannot specify both bundle_file and run_dir')
if FLAGS.run_dir:
train_dir = os.path.join(os.path.expanduser(FLAGS.run_dir), 'train')
return train_dir
else:
return None
def get_bundle():
"""Returns a generator_pb2.GeneratorBundle object based read from bundle_file.
Returns:
Either a generator_pb2.GeneratorBundle or None if the bundle_file flag is
not set or the save_generator_bundle flag is set.
"""
if FLAGS.save_generator_bundle:
return None
if FLAGS.bundle_file is None:
return None
bundle_file = os.path.expanduser(FLAGS.bundle_file)
return sequence_generator_bundle.read_bundle_file(bundle_file)
def run_with_flags(generator):
"""Generates pianoroll tracks and saves them as MIDI files.
Uses the options specified by the flags defined in this module.
Args:
generator: The PianorollRnnNadeSequenceGenerator to use for generation.
"""
if not FLAGS.output_dir:
tf.logging.fatal('--output_dir required')
return
output_dir = os.path.expanduser(FLAGS.output_dir)
primer_midi = None
if FLAGS.primer_midi:
primer_midi = os.path.expanduser(FLAGS.primer_midi)
if not tf.gfile.Exists(output_dir):
tf.gfile.MakeDirs(output_dir)
primer_sequence = None
qpm = FLAGS.qpm if FLAGS.qpm else 60
if FLAGS.primer_pitches:
primer_sequence = music_pb2.NoteSequence()
primer_sequence.tempos.add().qpm = qpm
primer_sequence.ticks_per_quarter = note_seq.STANDARD_PPQ
for pitch in ast.literal_eval(FLAGS.primer_pitches):
note = primer_sequence.notes.add()
note.start_time = 0
note.end_time = 60.0 / qpm
note.pitch = pitch
note.velocity = 100
primer_sequence.total_time = primer_sequence.notes[-1].end_time
elif FLAGS.primer_pianoroll:
primer_pianoroll = note_seq.PianorollSequence(
events_list=ast.literal_eval(FLAGS.primer_pianoroll),
steps_per_quarter=4,
shift_range=True)
primer_sequence = primer_pianoroll.to_sequence(qpm=qpm)
elif primer_midi:
primer_sequence = note_seq.midi_file_to_sequence_proto(primer_midi)
if primer_sequence.tempos and primer_sequence.tempos[0].qpm:
qpm = primer_sequence.tempos[0].qpm
else:
tf.logging.warning(
'No priming sequence specified. Defaulting to empty sequence.')
primer_sequence = music_pb2.NoteSequence()
primer_sequence.tempos.add().qpm = qpm
primer_sequence.ticks_per_quarter = note_seq.STANDARD_PPQ
# Derive the total number of seconds to generate.
seconds_per_step = 60.0 / qpm / generator.steps_per_quarter
generate_end_time = FLAGS.num_steps * seconds_per_step
# Specify start/stop time for generation based on starting generation at the
# end of the priming sequence and continuing until the sequence is num_steps
# long.
generator_options = generator_pb2.GeneratorOptions()
# Set the start time to begin when the last note ends.
generate_section = generator_options.generate_sections.add(
start_time=primer_sequence.total_time,
end_time=generate_end_time)
if generate_section.start_time >= generate_section.end_time:
tf.logging.fatal(
'Priming sequence is longer than the total number of steps '
'requested: Priming sequence length: %s, Total length '
'requested: %s',
generate_section.start_time, generate_end_time)
return
generator_options.args['beam_size'].int_value = FLAGS.beam_size
generator_options.args['branch_factor'].int_value = FLAGS.branch_factor
tf.logging.info('primer_sequence: %s', primer_sequence)
tf.logging.info('generator_options: %s', generator_options)
# Make the generate request num_outputs times and save the output as midi
# files.
date_and_time = time.strftime('%Y-%m-%d_%H%M%S')
digits = len(str(FLAGS.num_outputs))
for i in range(FLAGS.num_outputs):
generated_sequence = generator.generate(primer_sequence, generator_options)
midi_filename = '%s_%s.mid' % (date_and_time, str(i + 1).zfill(digits))
midi_path = os.path.join(output_dir, midi_filename)
note_seq.sequence_proto_to_midi_file(generated_sequence, midi_path)
tf.logging.info('Wrote %d MIDI files to %s',
FLAGS.num_outputs, output_dir)
def main(unused_argv):
"""Saves bundle or runs generator based on flags."""
tf.logging.set_verbosity(FLAGS.log)
bundle = get_bundle()
config_id = bundle.generator_details.id if bundle else FLAGS.config
config = pianoroll_rnn_nade_model.default_configs[config_id]
config.hparams.parse(FLAGS.hparams)
# Having too large of a batch size will slow generation down unnecessarily.
config.hparams.batch_size = min(
config.hparams.batch_size, FLAGS.beam_size * FLAGS.branch_factor)
generator = PianorollRnnNadeSequenceGenerator(
model=pianoroll_rnn_nade_model.PianorollRnnNadeModel(config),
details=config.details,
steps_per_quarter=config.steps_per_quarter,
checkpoint=get_checkpoint(),
bundle=bundle)
if FLAGS.save_generator_bundle:
bundle_filename = os.path.expanduser(FLAGS.bundle_file)
if FLAGS.bundle_description is None:
tf.logging.warning('No bundle description provided.')
tf.logging.info('Saving generator bundle to %s', bundle_filename)
generator.create_bundle_file(bundle_filename, FLAGS.bundle_description)
else:
run_with_flags(generator)
def console_entry_point():
tf.disable_v2_behavior()
tf.app.run(main)
if __name__ == '__main__':
console_entry_point()
| 38.380392 | 117 | 0.74037 |
4a1f142db5ab086323da6f16f1e9d27c5b9dd559 | 4,355 | py | Python | user_gui/user_verify.py | pantelisantonoudiou/deep-seizure-detect | 040d746833f5d35172fe944da0798f4909770f9c | [
"Apache-2.0"
] | 1 | 2022-01-24T10:29:20.000Z | 2022-01-24T10:29:20.000Z | user_gui/user_verify.py | pantelisantonoudiou/deep-seizure-detect | 040d746833f5d35172fe944da0798f4909770f9c | [
"Apache-2.0"
] | null | null | null | user_gui/user_verify.py | pantelisantonoudiou/deep-seizure-detect | 040d746833f5d35172fe944da0798f4909770f9c | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Created on Tue Sep 29 15:10:48 2020
@author: panton01
"""
### -------- IMPORTS ---------- ###
import os, tables
from pick import pick
import numpy as np
# User
from array_helper import find_szr_idx
### ------------------------------------------ ####
class UserVerify:
"""
Class for User verification of detected seizures.
"""
# class constructor (data retrieval)
def __init__(self, prop_dict):
"""
lab2mat(main_path)
Parameters
----------
prop_dict : Dict, Path to raw data.
"""
# get general path (parent)
self.gen_path = prop_dict['main_path']
# get data path
self.org_rawpath = os.path.join(self.gen_path, prop_dict['org_rawpath'])
# get raw prediction path
self.rawpred_path = os.path.join(self.gen_path, prop_dict['rawpred_path'])
# create user verified path
self.verpred_path = os.path.join(self.gen_path, prop_dict['verpred_path'])
# make path if it doesn't exist
if os.path.exists(self.verpred_path) is False:
os.mkdir( self.verpred_path)
# get sampling rate
self.fs = prop_dict['fs']
# get win in seconds
self.win = prop_dict['win']
def select_file(self):
"""
select_file(self)
Returns
-------
option : Str, selection of file id
"""
# get all files in raw predictions folder
rawpredlist = list(filter(lambda k: '.csv' in k, os.listdir(self.rawpred_path)))
# get all files in user verified predictions
verpredlist = list(filter(lambda k: '.csv' in k, os.listdir(self.verpred_path)))
# get unique list
not_analyzed_filelist = list(set(rawpredlist) - set(verpredlist))
# remaining filelist
analyzed_filelist = list(set(rawpredlist) - set(not_analyzed_filelist))
# filelist
filelist = [' *** ' + s for s in analyzed_filelist] + not_analyzed_filelist
# select from command list
title = 'Please select file for analysis: '
option, index = pick(filelist, title, indicator = '-> ')
return option.replace(' *** ','')
def main_func(self, file_id):
"""
main_func(self, file_id)
Parameters
----------
file_id : String
Returns
-------
data : 3d Numpy Array (1D = segments, 2D = time, 3D = channel)
idx_bounds : 2D Numpy Array (rows = seizures, cols = start and end points of detected seizures)
"""
print('-> File being analyzed: ', file_id,'\n')
# Get predictions
pred_path = os.path.join(self.rawpred_path, file_id) # get path
bin_pred = np.loadtxt(pred_path, delimiter=',', skiprows=0) # get predictions
idx_bounds = find_szr_idx(bin_pred[:,1]>0.5, np.array([0,1])) # find seizure boundaries
# load raw data for visualization
data_path = os.path.join(self.org_rawpath, file_id.replace('.csv','.h5'))
f = tables.open_file(data_path, mode='r')
data = f.root.data[:]
f.close()
# check whether to continue
print('>>>>',idx_bounds.shape[0] ,'seizures detected')
return data, idx_bounds
def save_emptyidx(self, data_len,file_id):
"""
Save user predictions to csv file as binary
Returns
-------
None.
"""
# pre allocate file with zeros
ver_pred = np.zeros(data_len)
# save file
np.savetxt(os.path.join(self.verpred_path, file_id), ver_pred, delimiter=',',fmt='%i')
print('Verified predictions for ', file_id, ' were saved\n')
| 24.060773 | 103 | 0.493685 |
4a1f147ec81ed1eac1a12c2b5737c86105ede20b | 1,003 | py | Python | setup.py | jpleger/atklite | 802e68c00bb1a1c2da325a6d57f898ad05f599d3 | [
"BSD-2-Clause"
] | null | null | null | setup.py | jpleger/atklite | 802e68c00bb1a1c2da325a6d57f898ad05f599d3 | [
"BSD-2-Clause"
] | null | null | null | setup.py | jpleger/atklite | 802e68c00bb1a1c2da325a6d57f898ad05f599d3 | [
"BSD-2-Clause"
] | null | null | null | from distribute_setup import use_setuptools
use_setuptools()
from setuptools import setup
version = "1.0-release"
setup(
name = "atklite",
version = version,
install_requires = [
"ssdeep",
"python-magic",
],
include_package_data = True,
py_modules = ["atklite"],
entry_points = {
'console_scripts': [
'atk-info = atklite:main',
],
},
author = "James Pleger",
author_email = "[email protected]",
url = "https://bitbucket.org/jpleger/atk/",
description = "Library to simplify process of gathering identifiable attributes about files",
license = "ISC",
long_description = open("README.txt").read(),
classifiers = [
"Development Status :: 3 - Alpha",
"Intended Audience :: Information Technology",
"License :: OSI Approved :: ISC License (ISCL)",
"Operating System :: POSIX",
"Programming Language :: Python :: 2 :: Only",
"Topic :: Security",
],
)
| 27.861111 | 97 | 0.60319 |
4a1f149557039217fee53a4fbb0e2c9263492074 | 24,236 | py | Python | chrome/tools/build/win/create_installer_archive.py | junmin-zhu/chromium-rivertrail | eb1a57aca71fe68d96e48af8998dcfbe45171ee1 | [
"BSD-3-Clause"
] | 5 | 2018-03-10T13:08:42.000Z | 2021-07-26T15:02:11.000Z | chrome/tools/build/win/create_installer_archive.py | quisquous/chromium | b25660e05cddc9d0c3053b3514f07037acc69a10 | [
"BSD-3-Clause"
] | 1 | 2015-07-21T08:02:01.000Z | 2015-07-21T08:02:01.000Z | chrome/tools/build/win/create_installer_archive.py | jianglong0156/chromium.src | d496dfeebb0f282468827654c2b3769b3378c087 | [
"BSD-3-Clause"
] | 6 | 2016-11-14T10:13:35.000Z | 2021-01-23T15:29:53.000Z | #!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Script to create Chrome Installer archive.
This script is used to create an archive of all the files required for a
Chrome install in appropriate directory structure. It reads chrome.release
file as input, creates chrome.7z archive, compresses setup.exe and
generates packed_files.txt for mini_installer project.
"""
import ConfigParser
import glob
import optparse
import os
import shutil
import subprocess
import sys
ARCHIVE_DIR = "installer_archive"
# suffix to uncompresed full archive file, appended to options.output_name
ARCHIVE_SUFFIX = ".7z"
BSDIFF_EXEC = "bsdiff.exe"
CHROME_DIR = "Chrome-bin"
CHROME_PATCH_FILE_SUFFIX = "_patch" # prefixed by options.output_name
# compressed full archive suffix, will be prefixed by options.output_name
COMPRESSED_ARCHIVE_SUFFIX = ".packed.7z"
COMPRESSED_FILE_EXT = ".packed.7z" # extension of patch archive file
COURGETTE_EXEC = "courgette.exe"
MINI_INSTALLER_INPUT_FILE = "packed_files.txt"
PATCH_FILE_EXT = '.diff'
SETUP_EXEC = "setup.exe"
SETUP_PATCH_FILE_PREFIX = "setup_patch"
TEMP_ARCHIVE_DIR = "temp_installer_archive"
VERSION_FILE = "VERSION"
def BuildVersion(build_dir):
"""Returns the full build version string constructed from information in
VERSION_FILE. Any segment not found in that file will default to '0'.
"""
major = 0
minor = 0
build = 0
patch = 0
for line in open(os.path.join(build_dir, '../../chrome', VERSION_FILE), 'r'):
line = line.rstrip()
if line.startswith('MAJOR='):
major = line[6:]
elif line.startswith('MINOR='):
minor = line[6:]
elif line.startswith('BUILD='):
build = line[6:]
elif line.startswith('PATCH='):
patch = line[6:]
return '%s.%s.%s.%s' % (major, minor, build, patch)
def CompressUsingLZMA(build_dir, compressed_file, input_file):
lzma_exec = GetLZMAExec(build_dir)
cmd = [lzma_exec,
'a', '-t7z',
# Flags equivalent to -mx9 (ultra) but with the bcj2 turned on (exe
# pre-filter). This results in a ~2.3MB decrease in installer size on
# a 24MB installer.
# Additionally, these settings reflect a 7zip 4.42 and up change in
# the definition of -mx9, increasting the dicionary size moving to
# 26bit = 64MB. This results in an additional ~3.5MB decrease.
# Older 7zip versions can support these settings, as these changes
# rely on existing functionality in the lzma format.
'-m0=BCJ2',
'-m1=LZMA:d27:fb128',
'-m2=LZMA:d22:fb128:mf=bt2',
'-m3=LZMA:d22:fb128:mf=bt2',
'-mb0:1',
'-mb0s1:2',
'-mb0s2:3',
compressed_file,
input_file,]
if os.path.exists(compressed_file):
os.remove(compressed_file)
RunSystemCommand(cmd)
def CopyAllFilesToStagingDir(config, distribution, staging_dir, build_dir,
enable_hidpi, enable_touch_ui):
"""Copies the files required for installer archive.
Copies all common files required for various distributions of Chromium and
also files for the specific Chromium build specified by distribution.
"""
CopySectionFilesToStagingDir(config, 'GENERAL', staging_dir, build_dir)
if distribution:
if len(distribution) > 1 and distribution[0] == '_':
distribution = distribution[1:]
CopySectionFilesToStagingDir(config, distribution.upper(),
staging_dir, build_dir)
if enable_hidpi == '1':
CopySectionFilesToStagingDir(config, 'HIDPI', staging_dir, build_dir)
if enable_touch_ui == '1':
CopySectionFilesToStagingDir(config, 'TOUCH', staging_dir, build_dir)
def CopySectionFilesToStagingDir(config, section, staging_dir, src_dir):
"""Copies installer archive files specified in section from src_dir to
staging_dir. This method reads section from config and copies all the
files specified from src_dir to staging dir.
"""
for option in config.options(section):
if option.endswith('dir'):
continue
dst_dir = os.path.join(staging_dir, config.get(section, option))
src_paths = glob.glob(os.path.join(src_dir, option))
if src_paths and not os.path.exists(dst_dir):
os.makedirs(dst_dir)
for src_path in src_paths:
dst_path = os.path.join(dst_dir, os.path.basename(src_path))
if not os.path.exists(dst_path):
shutil.copy(src_path, dst_dir)
def GenerateDiffPatch(options, orig_file, new_file, patch_file):
if (options.diff_algorithm == "COURGETTE"):
exe_file = os.path.join(options.last_chrome_installer, COURGETTE_EXEC)
cmd = '%s -gen "%s" "%s" "%s"' % (exe_file, orig_file, new_file, patch_file)
else:
exe_file = os.path.join(options.build_dir, BSDIFF_EXEC)
cmd = [exe_file, orig_file, new_file, patch_file,]
RunSystemCommand(cmd)
def GetLZMAExec(build_dir):
lzma_exec = os.path.join(build_dir, "..", "..", "third_party",
"lzma_sdk", "Executable", "7za.exe")
return lzma_exec
def GetPrevVersion(build_dir, temp_dir, last_chrome_installer, output_name):
if not last_chrome_installer:
return ''
lzma_exec = GetLZMAExec(build_dir)
prev_archive_file = os.path.join(last_chrome_installer,
output_name + ARCHIVE_SUFFIX)
cmd = [lzma_exec,
'x',
'-o"%s"' % temp_dir,
prev_archive_file,
'Chrome-bin/*/chrome.dll',]
RunSystemCommand(cmd)
dll_path = glob.glob(os.path.join(temp_dir, 'Chrome-bin', '*', 'chrome.dll'))
return os.path.split(os.path.split(dll_path[0])[0])[1]
def MakeStagingDirectories(staging_dir):
"""Creates a staging path for installer archive. If directory exists already,
deletes the existing directory.
"""
file_path = os.path.join(staging_dir, TEMP_ARCHIVE_DIR)
if os.path.exists(file_path):
shutil.rmtree(file_path)
os.makedirs(file_path)
temp_file_path = os.path.join(staging_dir, TEMP_ARCHIVE_DIR)
if os.path.exists(temp_file_path):
shutil.rmtree(temp_file_path)
os.makedirs(temp_file_path)
return (file_path, temp_file_path)
def Readconfig(input_file, current_version):
"""Reads config information from input file after setting default value of
global variabes.
"""
variables = {}
variables['ChromeDir'] = CHROME_DIR
variables['VersionDir'] = os.path.join(variables['ChromeDir'],
current_version)
config = ConfigParser.SafeConfigParser(variables)
config.read(input_file)
return config
def RunSystemCommand(cmd, **kw):
print 'Running', cmd
exit_code = subprocess.call(cmd, **kw)
if (exit_code != 0):
raise Exception("Error while running cmd: %s, exit_code: %s" %
(cmd, exit_code))
def CreateArchiveFile(options, staging_dir, current_version, prev_version):
"""Creates a new installer archive file after deleting any existing old file.
"""
# First create an uncompressed archive file for the current build (chrome.7z)
lzma_exec = GetLZMAExec(options.build_dir)
archive_file = os.path.join(options.output_dir,
options.output_name + ARCHIVE_SUFFIX)
cmd = [lzma_exec,
'a',
'-t7z',
archive_file,
os.path.join(staging_dir, CHROME_DIR),
'-mx0',]
# There doesnt seem to be any way in 7za.exe to override existing file so
# we always delete before creating a new one.
if not os.path.exists(archive_file):
RunSystemCommand(cmd)
elif options.skip_rebuild_archive != "true":
os.remove(archive_file)
RunSystemCommand(cmd)
# Do not compress the archive in developer (component) builds.
if options.component_build == '1':
compressed_file = os.path.join(
options.output_dir, options.output_name + COMPRESSED_ARCHIVE_SUFFIX)
if os.path.exists(compressed_file):
os.remove(compressed_file)
return os.path.basename(archive_file)
# If we are generating a patch, run bsdiff against previous build and
# compress the resulting patch file. If this is not a patch just compress the
# uncompressed archive file.
patch_name_prefix = options.output_name + CHROME_PATCH_FILE_SUFFIX
if options.last_chrome_installer:
prev_archive_file = os.path.join(options.last_chrome_installer,
options.output_name + ARCHIVE_SUFFIX)
patch_file = os.path.join(options.build_dir, patch_name_prefix +
PATCH_FILE_EXT)
GenerateDiffPatch(options, prev_archive_file, archive_file, patch_file)
compressed_archive_file = patch_name_prefix + '_' + \
current_version + '_from_' + prev_version + \
COMPRESSED_FILE_EXT
orig_file = patch_file
else:
compressed_archive_file = options.output_name + COMPRESSED_ARCHIVE_SUFFIX
orig_file = archive_file
compressed_archive_file_path = os.path.join(options.output_dir,
compressed_archive_file)
CompressUsingLZMA(options.build_dir, compressed_archive_file_path, orig_file)
return compressed_archive_file
def PrepareSetupExec(options, current_version, prev_version):
"""Prepares setup.exe for bundling in mini_installer based on options."""
if options.setup_exe_format == "FULL":
setup_file = SETUP_EXEC
elif options.setup_exe_format == "DIFF":
if not options.last_chrome_installer:
raise Exception(
"To use DIFF for setup.exe, --last_chrome_installer is needed.")
prev_setup_file = os.path.join(options.last_chrome_installer, SETUP_EXEC)
new_setup_file = os.path.join(options.build_dir, SETUP_EXEC)
patch_file = os.path.join(options.build_dir, SETUP_PATCH_FILE_PREFIX +
PATCH_FILE_EXT)
GenerateDiffPatch(options, prev_setup_file, new_setup_file, patch_file)
setup_file = SETUP_PATCH_FILE_PREFIX + '_' + current_version + \
'_from_' + prev_version + COMPRESSED_FILE_EXT
setup_file_path = os.path.join(options.build_dir, setup_file)
CompressUsingLZMA(options.build_dir, setup_file_path, patch_file)
else:
cmd = ['makecab.exe',
'/D', 'CompressionType=LZX',
'/V1',
'/L', options.output_dir,
os.path.join(options.build_dir, SETUP_EXEC),]
# Send useless makecab progress on stdout to the bitbucket.
RunSystemCommand(cmd, stdout=open(os.devnull, "w"))
setup_file = SETUP_EXEC[:-1] + "_"
return setup_file
_RESOURCE_FILE_TEMPLATE = """\
// This file is automatically generated by create_installer_archive.py.
// It contains the resource entries that are going to be linked inside
// mini_installer.exe. For each file to be linked there should be two
// lines:
// - The first line contains the output filename (without path) and the
// type of the resource ('BN' - not compressed , 'BL' - LZ compressed,
// 'B7' - LZMA compressed)
// - The second line contains the path to the input file. Uses '/' to
// separate path components.
%(setup_file)s %(setup_file_resource_type)s
"%(setup_file_path)s"
%(archive_file)s B7
"%(archive_file_path)s"
"""
def CreateResourceInputFile(
output_dir, setup_format, archive_file, setup_file, resource_file_path):
"""Creates resource input file (packed_files.txt) for mini_installer project.
This method checks the format of setup.exe being used and according sets
its resource type.
"""
setup_resource_type = "BL"
if (setup_format == "FULL"):
setup_resource_type = "BN"
elif (setup_format == "DIFF"):
setup_resource_type = "B7"
# Expand the resource file template.
args = {
'setup_file': setup_file,
'setup_file_resource_type': setup_resource_type,
'setup_file_path':
os.path.join(output_dir, setup_file).replace("\\","/"),
'archive_file': archive_file,
'archive_file_path':
os.path.join(output_dir, archive_file).replace("\\","/"),
}
resource_file = _RESOURCE_FILE_TEMPLATE % args
with open(resource_file_path, 'w') as f:
f.write(resource_file)
# Reads |manifest_name| from |build_dir| and writes |manifest_name| to
# |output_dir| with the same content plus |inserted_string| added just before
# |insert_before|.
def CopyAndAugmentManifest(build_dir, output_dir, manifest_name,
inserted_string, insert_before):
manifest_file = open(os.path.join(build_dir, manifest_name), 'r')
manifest_lines = manifest_file.readlines()
manifest_file.close()
insert_line = -1
insert_pos = -1
for i in xrange(len(manifest_lines)):
insert_pos = manifest_lines[i].find(insert_before)
if insert_pos != -1:
insert_line = i
break
if insert_line == -1:
raise ValueError('Could not find {0} in the manifest:\n{1}'.format(
insert_before, ''.join(manifest_lines)))
old = manifest_lines[insert_line]
manifest_lines[insert_line] = (old[:insert_pos] + inserted_string +
old[insert_pos:])
modified_manifest_file = open(
os.path.join(output_dir, manifest_name), 'w')
modified_manifest_file.write(''.join(manifest_lines))
modified_manifest_file.close()
# Copy the relevant CRT DLLs to |build_dir|. We copy DLLs from all versions
# of VS installed to make sure we have the correct CRT version, unused DLLs
# should not conflict with the others anyways.
def CopyVisualStudioRuntimeDLLs(build_dir):
is_debug = os.path.basename(build_dir) == 'Debug'
if not is_debug and os.path.basename(build_dir) != 'Release':
print ("Warning: could not determine build configuration from "
"output directory, assuming Release build.")
crt_dlls = []
if is_debug:
crt_dlls = glob.glob(
"C:/Program Files (x86)/Microsoft Visual Studio */VC/redist/"
"Debug_NonRedist/x86/Microsoft.*.DebugCRT/*.dll")
else:
crt_dlls = glob.glob(
"C:/Program Files (x86)/Microsoft Visual Studio */VC/redist/x86/"
"Microsoft.*.CRT/*.dll")
# Also handle the case where someone is building using only winsdk and
# doesn't have Visual Studio installed.
if not crt_dlls:
# On a 64-bit system, 32-bit dlls are in SysWOW64 (don't ask).
if os.access("C:/Windows/SysWOW64", os.F_OK):
sys_dll_dir = "C:/Windows/SysWOW64"
else:
sys_dll_dir = "C:/Windows/System32"
if is_debug:
crt_dlls = glob.glob(os.path.join(sys_dll_dir, "msvc*0d.dll"))
else:
crt_dlls = glob.glob(os.path.join(sys_dll_dir, "msvc*0.dll"))
if not crt_dlls:
print ("Warning: could not find CRT DLLs to copy to build dir - target "
"may not run on a system that doesn't have those DLLs.")
for dll in crt_dlls:
shutil.copy(dll, build_dir)
# Copies component build DLLs and generates required config files and manifests
# in order for chrome.exe and setup.exe to be able to find those DLLs at
# run-time.
# This is meant for developer builds only and should never be used to package
# an official build.
def DoComponentBuildTasks(staging_dir, build_dir, current_version):
# Get the required directories for the upcoming operations.
chrome_dir = os.path.join(staging_dir, CHROME_DIR)
version_dir = os.path.join(chrome_dir, current_version)
installer_dir = os.path.join(version_dir, 'Installer')
# |installer_dir| is technically only created post-install, but we need it
# now to add setup.exe's config and manifest to the archive.
if not os.path.exists(installer_dir):
os.mkdir(installer_dir)
# Copy the VS CRT DLLs to |build_dir|. This must be done before the general
# copy step below to ensure the CRT DLLs are added to the archive and marked
# as a dependency in the exe manifests generated below.
CopyVisualStudioRuntimeDLLs(build_dir)
# Copy all the DLLs in |build_dir| to the version directory. Simultaneously
# build a list of their names to mark them as dependencies of chrome.exe and
# setup.exe later.
dlls = glob.glob(os.path.join(build_dir, '*.dll'))
dll_names = []
for dll in dlls:
shutil.copy(dll, version_dir)
dll_names.append(os.path.splitext(os.path.basename(dll))[0])
exe_config = (
"<configuration>\n"
" <windows>\n"
" <assemblyBinding xmlns='urn:schemas-microsoft-com:asm.v1'>\n"
" <probing privatePath='{rel_path}'/>\n"
" </assemblyBinding>\n"
" </windows>\n"
"</configuration>")
# Write chrome.exe.config to point to the version directory.
chrome_exe_config_file = open(
os.path.join(chrome_dir, 'chrome.exe.config'), 'w')
chrome_exe_config_file.write(exe_config.format(rel_path=current_version))
chrome_exe_config_file.close()
# Write setup.exe.config to point to the version directory (which is one
# level up from setup.exe post-install).
setup_exe_config_file = open(
os.path.join(installer_dir, 'setup.exe.config'), 'w')
setup_exe_config_file.write(exe_config.format(rel_path='..'))
setup_exe_config_file.close()
# Add a dependency for each DLL in |dlls| to the existing manifests for
# chrome.exe and setup.exe. Some of these DLLs are not actually used by
# either process, but listing them all as dependencies doesn't hurt as it
# only makes them visible to the exes, just like they already are in the
# build output directory.
exe_manifest_dependencies_list = []
for name in dll_names:
exe_manifest_dependencies_list.append(
"<dependency>"
"<dependentAssembly>"
"<assemblyIdentity type='win32' name='chrome.{dll_name}' "
"version='0.0.0.0' processorArchitecture='x86' language='*'/>"
"</dependentAssembly>"
"</dependency>".format(dll_name=name))
exe_manifest_dependencies = ''.join(exe_manifest_dependencies_list)
# Write a modified chrome.exe.manifest beside chrome.exe.
CopyAndAugmentManifest(build_dir, chrome_dir, 'chrome.exe.manifest',
exe_manifest_dependencies, '</assembly>')
# Write a modified setup.exe.manifest beside setup.exe in
# |version_dir|/Installer.
CopyAndAugmentManifest(build_dir, installer_dir, 'setup.exe.manifest',
exe_manifest_dependencies, '</assembly>')
# Generate assembly manifests for each DLL in |dlls|. These do not interfere
# with the private manifests potentially embedded in each DLL. They simply
# allow chrome.exe and setup.exe to see those DLLs although they are in a
# separate directory post-install.
for name in dll_names:
dll_manifest = (
"<assembly\n"
" xmlns='urn:schemas-microsoft-com:asm.v1' manifestVersion='1.0'>\n"
" <assemblyIdentity name='chrome.{dll_name}' version='0.0.0.0'\n"
" type='win32' processorArchitecture='x86'/>\n"
" <file name='{dll_name}.dll'/>\n"
"</assembly>".format(dll_name=name))
dll_manifest_file = open(os.path.join(
version_dir, "chrome.{dll_name}.manifest".format(dll_name=name)), 'w')
dll_manifest_file.write(dll_manifest)
dll_manifest_file.close()
def main(options):
"""Main method that reads input file, creates archive file and write
resource input file.
"""
current_version = BuildVersion(options.build_dir)
config = Readconfig(options.input_file, current_version)
(staging_dir, temp_dir) = MakeStagingDirectories(options.staging_dir)
prev_version = GetPrevVersion(options.build_dir, temp_dir,
options.last_chrome_installer,
options.output_name)
# Preferentially copy the files we can find from the output_dir, as
# this is where we'll find the Syzygy-optimized executables when
# building the optimized mini_installer.
if options.build_dir != options.output_dir:
CopyAllFilesToStagingDir(config, options.distribution,
staging_dir, options.output_dir,
options.enable_hidpi, options.enable_touch_ui)
# Now copy the remainder of the files from the build dir.
CopyAllFilesToStagingDir(config, options.distribution,
staging_dir, options.build_dir,
options.enable_hidpi, options.enable_touch_ui)
if options.component_build == '1':
DoComponentBuildTasks(staging_dir, options.build_dir, current_version)
version_numbers = current_version.split('.')
current_build_number = version_numbers[2] + '.' + version_numbers[3]
prev_build_number = ''
if prev_version:
version_numbers = prev_version.split('.')
prev_build_number = version_numbers[2] + '.' + version_numbers[3]
# Name of the archive file built (for example - chrome.7z or
# patch-<old_version>-<new_version>.7z or patch-<new_version>.7z
archive_file = CreateArchiveFile(options, staging_dir,
current_build_number, prev_build_number)
setup_file = PrepareSetupExec(options,
current_build_number, prev_build_number)
CreateResourceInputFile(options.output_dir, options.setup_exe_format,
archive_file, setup_file, options.resource_file_path)
def _ParseOptions():
parser = optparse.OptionParser()
parser.add_option('-i', '--input_file',
help='Input file describing which files to archive.')
parser.add_option('-b', '--build_dir',
help='Build directory. The paths in input_file are relative to this.')
parser.add_option('--staging_dir',
help='Staging directory where intermediate files and directories '
'will be created')
parser.add_option('-o', '--output_dir',
help='The output directory where the archives will be written. '
'Defaults to the build_dir.')
parser.add_option('--resource_file_path',
help='The path where the resource file will be output. '
'Defaults to %s in the build directory.' %
MINI_INSTALLER_INPUT_FILE)
parser.add_option('-d', '--distribution',
help='Name of Chromium Distribution. Optional.')
parser.add_option('-s', '--skip_rebuild_archive',
default="False", help='Skip re-building Chrome.7z archive if it exists.')
parser.add_option('-l', '--last_chrome_installer',
help='Generate differential installer. The value of this parameter '
'specifies the directory that contains base versions of '
'setup.exe, courgette.exe (if --diff_algorithm is COURGETTE) '
'& chrome.7z.')
parser.add_option('-f', '--setup_exe_format', default='COMPRESSED',
help='How setup.exe should be included {COMPRESSED|DIFF|FULL}.')
parser.add_option('-a', '--diff_algorithm', default='BSDIFF',
help='Diff algorithm to use when generating differential patches '
'{BSDIFF|COURGETTE}.')
parser.add_option('-n', '--output_name', default='chrome',
help='Name used to prefix names of generated archives.')
parser.add_option('--enable_hidpi', default='0',
help='Whether to include HiDPI resource files.')
parser.add_option('--enable_touch_ui', default='0',
help='Whether to include resource files from the "TOUCH" section of the '
'input file.')
parser.add_option('--component_build', default='0',
help='Whether this archive is packaging a component build. This will '
'also turn off compression of chrome.7z into chrome.packed.7z and '
'helpfully delete any old chrome.packed.7z in |output_dir|.')
options, _ = parser.parse_args()
if not options.build_dir:
parser.error('You must provide a build dir.')
options.build_dir = os.path.normpath(options.build_dir)
if not options.staging_dir:
parser.error('You must provide a staging dir.')
if not options.input_file:
parser.error('You must provide an input file')
if not options.output_dir:
options.output_dir = options.build_dir
if not options.resource_file_path:
options.resource_file_path = os.path.join(options.build_dir,
MINI_INSTALLER_INPUT_FILE)
return options
if '__main__' == __name__:
print sys.argv
sys.exit(main(_ParseOptions()))
| 40.125828 | 80 | 0.69174 |
4a1f150746fe11db0886bf611029458b6aa34507 | 6,155 | py | Python | hojehatransportes/hat/migrations/0004_auto__add_userprofile.py | jpgneves/hojehatransportes | 00913462d997d6c1aabfa3b8292072c9f928939a | [
"MIT"
] | null | null | null | hojehatransportes/hat/migrations/0004_auto__add_userprofile.py | jpgneves/hojehatransportes | 00913462d997d6c1aabfa3b8292072c9f928939a | [
"MIT"
] | 1 | 2015-12-14T06:40:15.000Z | 2015-12-14T06:40:15.000Z | hojehatransportes/hat/migrations/0004_auto__add_userprofile.py | jpgneves/hojehatransportes | 00913462d997d6c1aabfa3b8292072c9f928939a | [
"MIT"
] | null | null | null | # encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'UserProfile'
db.create_table('hat_userprofile', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('user', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['auth.User'], unique=True)),
('mail_notifications', self.gf('django.db.models.fields.BooleanField')(default=False)),
))
db.send_create_signal('hat', ['UserProfile'])
def backwards(self, orm):
# Deleting model 'UserProfile'
db.delete_table('hat_userprofile')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'hat.company': {
'Meta': {'object_name': 'Company'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '30'})
},
'hat.region': {
'Meta': {'object_name': 'Region'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '30'})
},
'hat.strike': {
'Meta': {'object_name': 'Strike'},
'all_day': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'approved': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'canceled': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'company': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['hat.Company']"}),
'description': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'downvotes': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'end_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'region': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['hat.Region']"}),
'source_link': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'}),
'start_date': ('django.db.models.fields.DateTimeField', [], {}),
'submitter': ('django.db.models.fields.related.ForeignKey', [], {'default': '1', 'to': "orm['auth.User']"}),
'upvotes': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'})
},
'hat.userprofile': {
'Meta': {'object_name': 'UserProfile'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'mail_notifications': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'unique': 'True'})
}
}
complete_apps = ['hat']
| 62.806122 | 182 | 0.559383 |
4a1f15b1cce640e3ed0755c21113753f4b3bc82b | 17,070 | py | Python | intersight/model/hyperflex_server_firmware_version_entry.py | CiscoDevNet/intersight-python | 04b721f37c3044646a91c185c7259edfb991557a | [
"Apache-2.0"
] | 5 | 2021-12-16T15:13:32.000Z | 2022-03-29T16:09:54.000Z | intersight/model/hyperflex_server_firmware_version_entry.py | CiscoDevNet/intersight-python | 04b721f37c3044646a91c185c7259edfb991557a | [
"Apache-2.0"
] | 4 | 2022-01-25T19:05:51.000Z | 2022-03-29T20:18:37.000Z | intersight/model/hyperflex_server_firmware_version_entry.py | CiscoDevNet/intersight-python | 04b721f37c3044646a91c185c7259edfb991557a | [
"Apache-2.0"
] | 2 | 2020-07-07T15:01:08.000Z | 2022-01-31T04:27:35.000Z | """
Cisco Intersight
Cisco Intersight is a management platform delivered as a service with embedded analytics for your Cisco and 3rd party IT infrastructure. This platform offers an intelligent level of management that enables IT organizations to analyze, simplify, and automate their environments in more advanced ways than the prior generations of tools. Cisco Intersight provides an integrated and intuitive management experience for resources in the traditional data center as well as at the edge. With flexible deployment options to address complex security needs, getting started with Intersight is quick and easy. Cisco Intersight has deep integration with Cisco UCS and HyperFlex systems allowing for remote deployment, configuration, and ongoing maintenance. The model-based deployment works for a single system in a remote location or hundreds of systems in a data center and enables rapid, standardized configuration and deployment. It also streamlines maintaining those systems whether you are working with small or very large configurations. The Intersight OpenAPI document defines the complete set of properties that are returned in the HTTP response. From that perspective, a client can expect that no additional properties are returned, unless these properties are explicitly defined in the OpenAPI document. However, when a client uses an older version of the Intersight OpenAPI document, the server may send additional properties because the software is more recent than the client. In that case, the client may receive properties that it does not know about. Some generated SDKs perform a strict validation of the HTTP response body against the OpenAPI document. # noqa: E501
The version of the OpenAPI document: 1.0.9-4950
Contact: [email protected]
Generated by: https://openapi-generator.tech
"""
import re # noqa: F401
import sys # noqa: F401
from intersight.model_utils import ( # noqa: F401
ApiTypeError,
ModelComposed,
ModelNormal,
ModelSimple,
cached_property,
change_keys_js_to_python,
convert_js_args_to_python_args,
date,
datetime,
file_type,
none_type,
validate_get_composed_info,
)
def lazy_import():
from intersight.model.display_names import DisplayNames
from intersight.model.hyperflex_app_setting_constraint import HyperflexAppSettingConstraint
from intersight.model.hyperflex_server_firmware_version_entry_all_of import HyperflexServerFirmwareVersionEntryAllOf
from intersight.model.hyperflex_server_firmware_version_relationship import HyperflexServerFirmwareVersionRelationship
from intersight.model.mo_base_mo import MoBaseMo
from intersight.model.mo_base_mo_relationship import MoBaseMoRelationship
from intersight.model.mo_tag import MoTag
from intersight.model.mo_version_context import MoVersionContext
globals()['DisplayNames'] = DisplayNames
globals()['HyperflexAppSettingConstraint'] = HyperflexAppSettingConstraint
globals()['HyperflexServerFirmwareVersionEntryAllOf'] = HyperflexServerFirmwareVersionEntryAllOf
globals()['HyperflexServerFirmwareVersionRelationship'] = HyperflexServerFirmwareVersionRelationship
globals()['MoBaseMo'] = MoBaseMo
globals()['MoBaseMoRelationship'] = MoBaseMoRelationship
globals()['MoTag'] = MoTag
globals()['MoVersionContext'] = MoVersionContext
class HyperflexServerFirmwareVersionEntry(ModelComposed):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
Attributes:
allowed_values (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
with a capitalized key describing the allowed value and an allowed
value. These dicts store the allowed enum values.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
discriminator_value_class_map (dict): A dict to go from the discriminator
variable value to the discriminator class name.
validations (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
that stores validations for max_length, min_length, max_items,
min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum,
inclusive_minimum, and regex.
additional_properties_type (tuple): A tuple of classes accepted
as additional properties values.
"""
allowed_values = {
('class_id',): {
'HYPERFLEX.SERVERFIRMWAREVERSIONENTRY': "hyperflex.ServerFirmwareVersionEntry",
},
('object_type',): {
'HYPERFLEX.SERVERFIRMWAREVERSIONENTRY': "hyperflex.ServerFirmwareVersionEntry",
},
('server_platform',): {
'M5': "M5",
'M3': "M3",
'M4': "M4",
'M6': "M6",
},
}
validations = {
('version',): {
'regex': {
'pattern': r'(^3\.[1-9]\([1-9][a-z]\)$|^[4-9]\.[0-9]\([1-9][a-z]\)$)', # noqa: E501
},
},
}
@cached_property
def additional_properties_type():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
"""
lazy_import()
return (bool, date, datetime, dict, float, int, list, str, none_type,) # noqa: E501
_nullable = False
@cached_property
def openapi_types():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
Returns
openapi_types (dict): The key is attribute name
and the value is attribute type.
"""
lazy_import()
return {
'class_id': (str,), # noqa: E501
'object_type': (str,), # noqa: E501
'constraint': (HyperflexAppSettingConstraint,), # noqa: E501
'server_platform': (str,), # noqa: E501
'version': (str,), # noqa: E501
'server_firmware_version': (HyperflexServerFirmwareVersionRelationship,), # noqa: E501
'account_moid': (str,), # noqa: E501
'create_time': (datetime,), # noqa: E501
'domain_group_moid': (str,), # noqa: E501
'mod_time': (datetime,), # noqa: E501
'moid': (str,), # noqa: E501
'owners': ([str], none_type,), # noqa: E501
'shared_scope': (str,), # noqa: E501
'tags': ([MoTag], none_type,), # noqa: E501
'version_context': (MoVersionContext,), # noqa: E501
'ancestors': ([MoBaseMoRelationship], none_type,), # noqa: E501
'parent': (MoBaseMoRelationship,), # noqa: E501
'permission_resources': ([MoBaseMoRelationship], none_type,), # noqa: E501
'display_names': (DisplayNames,), # noqa: E501
}
@cached_property
def discriminator():
val = {
}
if not val:
return None
return {'class_id': val}
attribute_map = {
'class_id': 'ClassId', # noqa: E501
'object_type': 'ObjectType', # noqa: E501
'constraint': 'Constraint', # noqa: E501
'server_platform': 'ServerPlatform', # noqa: E501
'version': 'Version', # noqa: E501
'server_firmware_version': 'ServerFirmwareVersion', # noqa: E501
'account_moid': 'AccountMoid', # noqa: E501
'create_time': 'CreateTime', # noqa: E501
'domain_group_moid': 'DomainGroupMoid', # noqa: E501
'mod_time': 'ModTime', # noqa: E501
'moid': 'Moid', # noqa: E501
'owners': 'Owners', # noqa: E501
'shared_scope': 'SharedScope', # noqa: E501
'tags': 'Tags', # noqa: E501
'version_context': 'VersionContext', # noqa: E501
'ancestors': 'Ancestors', # noqa: E501
'parent': 'Parent', # noqa: E501
'permission_resources': 'PermissionResources', # noqa: E501
'display_names': 'DisplayNames', # noqa: E501
}
required_properties = set([
'_data_store',
'_check_type',
'_spec_property_naming',
'_path_to_item',
'_configuration',
'_visited_composed_classes',
'_composed_instances',
'_var_name_to_model_instances',
'_additional_properties_model_instances',
])
@convert_js_args_to_python_args
def __init__(self, *args, **kwargs): # noqa: E501
"""HyperflexServerFirmwareVersionEntry - a model defined in OpenAPI
Args:
Keyword Args:
class_id (str): The fully-qualified name of the instantiated, concrete type. This property is used as a discriminator to identify the type of the payload when marshaling and unmarshaling data.. defaults to "hyperflex.ServerFirmwareVersionEntry", must be one of ["hyperflex.ServerFirmwareVersionEntry", ] # noqa: E501
object_type (str): The fully-qualified name of the instantiated, concrete type. The value should be the same as the 'ClassId' property.. defaults to "hyperflex.ServerFirmwareVersionEntry", must be one of ["hyperflex.ServerFirmwareVersionEntry", ] # noqa: E501
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
constraint (HyperflexAppSettingConstraint): [optional] # noqa: E501
server_platform (str): The server platform type that is applicable for the server firmware bundle version. * `M5` - M5 generation of UCS server. * `M3` - M3 generation of UCS server. * `M4` - M4 generation of UCS server. * `M6` - M6 generation of UCS server.. [optional] if omitted the server will use the default value of "M5" # noqa: E501
version (str): The server firmware bundle version.. [optional] # noqa: E501
server_firmware_version (HyperflexServerFirmwareVersionRelationship): [optional] # noqa: E501
account_moid (str): The Account ID for this managed object.. [optional] # noqa: E501
create_time (datetime): The time when this managed object was created.. [optional] # noqa: E501
domain_group_moid (str): The DomainGroup ID for this managed object.. [optional] # noqa: E501
mod_time (datetime): The time when this managed object was last modified.. [optional] # noqa: E501
moid (str): The unique identifier of this Managed Object instance.. [optional] # noqa: E501
owners ([str], none_type): [optional] # noqa: E501
shared_scope (str): Intersight provides pre-built workflows, tasks and policies to end users through global catalogs. Objects that are made available through global catalogs are said to have a 'shared' ownership. Shared objects are either made globally available to all end users or restricted to end users based on their license entitlement. Users can use this property to differentiate the scope (global or a specific license tier) to which a shared MO belongs.. [optional] # noqa: E501
tags ([MoTag], none_type): [optional] # noqa: E501
version_context (MoVersionContext): [optional] # noqa: E501
ancestors ([MoBaseMoRelationship], none_type): An array of relationships to moBaseMo resources.. [optional] # noqa: E501
parent (MoBaseMoRelationship): [optional] # noqa: E501
permission_resources ([MoBaseMoRelationship], none_type): An array of relationships to moBaseMo resources.. [optional] # noqa: E501
display_names (DisplayNames): [optional] # noqa: E501
"""
class_id = kwargs.get('class_id', "hyperflex.ServerFirmwareVersionEntry")
object_type = kwargs.get('object_type', "hyperflex.ServerFirmwareVersionEntry")
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
constant_args = {
'_check_type': _check_type,
'_path_to_item': _path_to_item,
'_spec_property_naming': _spec_property_naming,
'_configuration': _configuration,
'_visited_composed_classes': self._visited_composed_classes,
}
required_args = {
'class_id': class_id,
'object_type': object_type,
}
model_args = {}
model_args.update(required_args)
model_args.update(kwargs)
composed_info = validate_get_composed_info(
constant_args, model_args, self)
self._composed_instances = composed_info[0]
self._var_name_to_model_instances = composed_info[1]
self._additional_properties_model_instances = composed_info[2]
unused_args = composed_info[3]
for var_name, var_value in required_args.items():
setattr(self, var_name, var_value)
for var_name, var_value in kwargs.items():
if var_name in unused_args and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
not self._additional_properties_model_instances:
# discard variable.
continue
setattr(self, var_name, var_value)
@cached_property
def _composed_schemas():
# we need this here to make our import statements work
# we must store _composed_schemas in here so the code is only run
# when we invoke this method. If we kept this at the class
# level we would get an error beause the class level
# code would be run when this module is imported, and these composed
# classes don't exist yet because their module has not finished
# loading
lazy_import()
return {
'anyOf': [
],
'allOf': [
HyperflexServerFirmwareVersionEntryAllOf,
MoBaseMo,
],
'oneOf': [
],
}
| 53.84858 | 1,678 | 0.641476 |
4a1f16563c1c010cd08e6e1608cba4b21d061add | 154 | py | Python | python_toolbox/wx_tools/drawing_tools/__init__.py | hboshnak/python_toolbox | cb9ef64b48f1d03275484d707dc5079b6701ad0c | [
"MIT"
] | 119 | 2015-02-05T17:59:47.000Z | 2022-02-21T22:43:40.000Z | python_toolbox/wx_tools/drawing_tools/__init__.py | hboshnak/python_toolbox | cb9ef64b48f1d03275484d707dc5079b6701ad0c | [
"MIT"
] | 4 | 2019-04-24T14:01:14.000Z | 2020-05-21T12:03:29.000Z | python_toolbox/wx_tools/drawing_tools/__init__.py | hboshnak/python_toolbox | cb9ef64b48f1d03275484d707dc5079b6701ad0c | [
"MIT"
] | 14 | 2015-03-30T06:30:42.000Z | 2021-12-24T23:45:11.000Z | # Copyright 2009-2017 Ram Rachum.
# This program is distributed under the MIT license.
'''Defines tools for drawing with wxPython.'''
from . import pens | 25.666667 | 52 | 0.753247 |
4a1f16e1715924ec0df702140bf161de6ff6d455 | 1,654 | py | Python | bot/exts/utils/error_handler.py | dhzdhd/Obsidian-Python | 9ab047aeb96bf79f60f7c268c3252528cbb992a7 | [
"MIT"
] | null | null | null | bot/exts/utils/error_handler.py | dhzdhd/Obsidian-Python | 9ab047aeb96bf79f60f7c268c3252528cbb992a7 | [
"MIT"
] | null | null | null | bot/exts/utils/error_handler.py | dhzdhd/Obsidian-Python | 9ab047aeb96bf79f60f7c268c3252528cbb992a7 | [
"MIT"
] | null | null | null | from discord.ext import commands
from utils.embed_helper import ErrorEmbed
class ErrorHandler(commands.Cog):
def __init__(self, bot):
self.bot = bot
@commands.Cog.listener()
async def on_command_error(self, ctx, error):
if isinstance(error, commands.CommandOnCooldown):
error_embed = ErrorEmbed(
description=f"Command on cooldown\n\nRetry after **{round(error.retry_after)}** seconds",
author=ctx.author
)
await ctx.send(embed=error_embed, delete_after=15)
return
if isinstance(error, commands.MissingRequiredArgument):
error_embed = ErrorEmbed(
description="Command missing required arguments\n\nFor more info refer to `>help <command>`",
author=ctx.author
)
await ctx.send(embed=error_embed, delete_after=15)
return
if isinstance(error, commands.TooManyArguments):
error_embed = ErrorEmbed(
description="Command received too many arguments\n\nFor more info refer to `>help <command>`",
author=ctx.author
)
await ctx.send(embed=error_embed, delete_after=15)
return
if isinstance(error, commands.CommandError):
error_embed = ErrorEmbed(
description="Command has an error\n\n For more info refer to `>help <command>`",
author=ctx.author
)
await ctx.send(embed=error_embed, delete_after=15)
def setup(bot):
"""ErrorEvents Cog setup."""
bot.add_cog(ErrorHandler(bot))
| 35.191489 | 110 | 0.606409 |
4a1f16efed8f845450c4a19a15a3884be301480f | 4,314 | py | Python | python/ctranslate2/converters/converter.py | aj7tesh/CTranslate2 | 8e424efdbcf40c89dca7e237a249464a95eeaf74 | [
"MIT"
] | null | null | null | python/ctranslate2/converters/converter.py | aj7tesh/CTranslate2 | 8e424efdbcf40c89dca7e237a249464a95eeaf74 | [
"MIT"
] | null | null | null | python/ctranslate2/converters/converter.py | aj7tesh/CTranslate2 | 8e424efdbcf40c89dca7e237a249464a95eeaf74 | [
"MIT"
] | null | null | null | import abc
import filecmp
import inspect
import os
import shutil
import six
from ctranslate2.specs import catalog
from ctranslate2.specs.model_spec import ModelSpec
def _list_specs():
return {symbol:getattr(catalog, symbol) for symbol in dir(catalog)
if inspect.isclass(getattr(catalog, symbol)) and not symbol.startswith("_")}
@six.add_metaclass(abc.ABCMeta)
class Converter(object):
@staticmethod
def declare_arguments(parser):
parser.add_argument("--output_dir", required=True,
help="Output model directory.")
parser.add_argument("--model_spec", required=True, choices=list(six.iterkeys(_list_specs())),
help="Type of model to convert.")
parser.add_argument("--vocab_mapping", default=None,
help="Vocabulary mapping file (optional).")
parser.add_argument("--quantization", default=None, choices=["int8", "int16", "float16"],
help="Weight quantization type.")
parser.add_argument("--force", action="store_true",
help="Force conversion even if the output directory already exists.")
return parser
def convert_from_args(self, args):
return self.convert(
args.output_dir,
args.model_spec,
vmap=args.vocab_mapping,
quantization=args.quantization,
force=args.force)
def convert(self, output_dir, model_spec, vmap=None, quantization=None, force=False):
if os.path.exists(output_dir) and not force:
raise RuntimeError(
"output directory %s already exists, use --force to override" % output_dir)
if isinstance(model_spec, six.string_types):
spec_class = _list_specs()[model_spec]
model_spec = spec_class()
if not isinstance(model_spec, ModelSpec):
raise TypeError("model_spec should extend ctranslate2.specs.ModelSpec")
try:
src_vocab, tgt_vocab = self._load(model_spec)
except NotImplementedError:
raise NotImplementedError("This converter does not support the model %s" % model_spec)
model_spec.validate()
self._check_vocabulary_size("source", src_vocab, model_spec.source_vocabulary_size)
self._check_vocabulary_size("target", tgt_vocab, model_spec.target_vocabulary_size)
model_spec.optimize(quantization=quantization)
# Create model directory.
if os.path.exists(output_dir):
shutil.rmtree(output_dir)
os.makedirs(output_dir)
model_spec.serialize(os.path.join(output_dir, "model.bin"))
if vmap is not None:
shutil.copy(vmap, os.path.join(output_dir, "vmap.txt"))
src_vocab_path = os.path.join(output_dir, "source_vocabulary.txt")
tgt_vocab_path = os.path.join(output_dir, "target_vocabulary.txt")
self._save_vocabulary(src_vocab, src_vocab_path)
self._save_vocabulary(tgt_vocab, tgt_vocab_path)
# For shared vocabularies, keep a single file in the model directory.
if filecmp.cmp(src_vocab_path, tgt_vocab_path, shallow=False):
os.remove(tgt_vocab_path)
os.rename(src_vocab_path, os.path.join(output_dir, "shared_vocabulary.txt"))
return output_dir
@abc.abstractmethod
def _load(self, model_spec):
raise NotImplementedError()
@abc.abstractmethod
def _save_vocabulary(self, vocab, destination):
raise NotImplementedError()
def _vocabulary_size(self, vocab):
"""Returns the vocabulary size.
When defined, this enables additional error checking when converting models.
"""
return None
def _check_vocabulary_size(self, name, vocab, expected_size):
"""Raises an exception if expected and actual vocabulary sizes are known but
do not match.
"""
if expected_size is None:
return
vocab_size = self._vocabulary_size(vocab)
if vocab_size is None:
return
if vocab_size != expected_size:
raise ValueError("%s vocabulary has size %d but the model expected a vocabulary "
"of size %d" % (name.capitalize(), vocab_size, expected_size))
| 41.480769 | 101 | 0.654613 |
4a1f17582c983feb288ea44bf76cfed7a36e0274 | 5,297 | py | Python | Introduction to Python/9. Strings.py | RaghuDalal/Coding-Ninjas-Python-Course-Solutions | 14522c8191c03ed39510c4fc489d8b52652d7941 | [
"MIT"
] | 1 | 2020-06-29T16:16:33.000Z | 2020-06-29T16:16:33.000Z | Introduction to Python/9. Strings.py | RaghuDalal/Coding-Ninjas-Python-Course-Solutions | 14522c8191c03ed39510c4fc489d8b52652d7941 | [
"MIT"
] | null | null | null | Introduction to Python/9. Strings.py | RaghuDalal/Coding-Ninjas-Python-Course-Solutions | 14522c8191c03ed39510c4fc489d8b52652d7941 | [
"MIT"
] | null | null | null | #Check Palindrome
"""
Given a String s, check it its palindrome. Return true if string is palindrome, else return false.
Palindrome strings are those, where string s and its reverse is exactly same.
Input Format :
String S
Output Format :
"true" if S is palindrome, else "false"
"""
Sample Input 1 :
abcdcba
Sample Output 1 :
true
Sample Input 2 :
abcd
Sample Output 2 :
false
Solution :
def reverse(s):
return s[::-1]
def isPalindrome(s):
rev = reverse(s)
if (s == rev):
return True
return False
s = input()
ans = isPalindrome(s)
if ans == 1:
print("true")
else:
print("false")
#Check Permutation
"""
Given two strings, S and T, check if they are permutations of each other. Return true or false.
Permutation means - length of both the strings should same and should contain same set of characters.
Order of characters doesn't matter.
Note : Input strings contain only lowercase english alphabets.
Input format :
Line 1 : String 1
Line 2 : String 2
Output format :
'true' or 'false'
"""
Sample Input 1 :
abcde
baedc
Sample Output 1 :
true
Sample Input 2 :
abc
cbd
Sample Output 2 :
false
Solution :
NO_OF_CHARS = 256
def Permutation(str1, str2):
count1 = [0] * NO_OF_CHARS
count2 = [0] * NO_OF_CHARS
for i in str1:
count1[ord(i)] += 1
for i in str2:
count2[ord(i)] += 1
if len(str1) != len(str2):
return 0
for i in range(NO_OF_CHARS):
if count1[i] != count2[i]:
return 0
return 1
str1 = input()
str2 = input()
if Permutation(str1, str2):
print("true")
else:
print("false")
#Remove Consecutive Duplicates
"""
Given a string, S, remove all the consecutive duplicates that are present in the given string.
That means, if 'aaa' is present in the string then it should become 'a' in the output string.
Input format :
String S
Output format :
Modified string
"""
Sample Input 1:
aabccbaa
Sample Output 1:
abcba
Sample Input 2:
xxyyzxx
Sample Output 2:
xyzx
Solution :
def removeDuplicates(S):
n = len(S)
if (n < 2):
return
j = 0
for i in range(n):
if (S[j] != S[i]):
j += 1
S[j] = S[i]
j += 1
S = S[:j]
return S
if __name__ == '__main__':
S1 = input()
S1 = list(S1.rstrip())
S1 = removeDuplicates(S1)
print(*S1, sep="")
#Reverse Each Word
"""
Given a string S, reverse each word of a string individually.
For eg. if a string is "abc def", reversed string should be "cba fed".
Input Format :
String S
Output Format :
Modified string
"""
Sample Input 1:
Welcome to Coding Ninjas
Sample Output 1:
emocleW ot gnidoC sajniN
Sample Input 2:
Give proper names to variables and functions
Sample Output 2:
eviG reporp seman ot selbairav dna snoitcnuf
Solution :
def reverseWordSentence(Sentence):
return ' '.join(word[::-1] for word in Sentence.split(" "))
Sentence = input()
print(reverseWordSentence(Sentence))
#Remove character
"""
Given a string and a character x.
Write a function to remove all occurrences of x character from the given string.
Leave the string as it is, if the given character is not present in the string.
Input Format :
Line 1 : String S
Line 2 : Character c
Output Format :
Modified string
"""
Sample Input 1:
welcome to coding ninjas
o
Sample Output 1:
welcme t cding ninjas
Sample Input 2:
Think of edge cases before submitting solutions
x
Sample Output 2:
Think of edge cases before submitting solutions
Solution :
test_string = input()
x = input()
for i in x:
test_string = test_string.replace(i, '')
print(str(test_string))
#Highest Occurring Character
"""
Given a string, S, find and return the highest occurring character present in the given string.
If there are 2 characters in the input string with same frequency, return the character which comes first.
Note : Assume all the characters in the given string are lowercase.
Input format :
String S
Output format :
Highest occurring character
"""
Sample Input 1:
abdefgbabfba
Sample Output 1:
b
Sample Input 2:
xy
Sample Output 2:
x
Solution :
ASCII_SIZE = 256
def getMaxOccuringChar(str):
count = [0] * ASCII_SIZE
max = -1
c = ''
for i in str:
count[ord(i)] += 1;
for i in str:
if max < count[ord(i)]:
max = count[ord(i)]
c = i
return c
str = input()
print(getMaxOccuringChar(str))
#Compress the String
"""
Write a program to do basic string compression.
For a character which is consecutively repeated more than once,
replace consecutive duplicate occurrences with the count of repetitions.
For e.g. if a String has 'x' repeated 5 times, replace this "xxxxx" with "x5".
Note : Consecutive count of every character in input string is less than equal to 9.
Input Format :
String S
Output Format :
Compressed string
"""
Sample Input 1 :
aaabbccdsa
Sample Output 1 :
a3b2c2dsa
Sample Input 2 :
aaabbcddeeeee
Sample Output 2 :
a3b2cd2e5
Solution :
str1 = input()
str2 = ''
m = 0
i = 0
while i < len(str1):
count = 0
temp = str1[i]
for j in range(i, len(str1)):
if str1[j] == temp:
count += 1
else:
break
if count > 1:
str2 += str1[i] + str(count)
else:
str2 += str1[i]
i = i + count - 1
i += 1
print(str2)
| 17.539735 | 106 | 0.670757 |
4a1f17a1cae0ad9b0c38b26d71cf995fc94df3e4 | 1,256 | py | Python | code/tools/split_file.py | MichSchli/GCNQA | 67edf676aba526e9c38ec45446b45ea169b17eb3 | [
"MIT"
] | 2 | 2018-09-25T03:55:31.000Z | 2018-09-25T05:45:55.000Z | code/tools/split_file.py | MichSchli/GCNQA | 67edf676aba526e9c38ec45446b45ea169b17eb3 | [
"MIT"
] | 1 | 2018-09-25T03:56:17.000Z | 2018-09-25T03:56:17.000Z | code/tools/split_file.py | MichSchli/GCNQA | 67edf676aba526e9c38ec45446b45ea169b17eb3 | [
"MIT"
] | null | null | null | import argparse
import random
parser = argparse.ArgumentParser(description='Remove all relations not occuring at least min_n times as positives.')
parser.add_argument('--in_file')
parser.add_argument('--file_1')
parser.add_argument('--file_2')
parser.add_argument('--second_file_size')
args = parser.parse_args()
items = [[]]
for line in open(args.in_file, 'r'):
line = line.strip()
if line:
items[-1].append(line)
else:
items.append([])
if not items[-1]:
items = items[:-1]
items = ["\n".join(lines) for lines in items]
split_idx = int(args.second_file_size)
first_file_items = items[:-split_idx]
second_file_items = items[-split_idx:]
with open(args.file_1, "w") as outfile:
first = True
for item in first_file_items:
if first:
first = False
else:
print("", file=outfile)
if item:
print("", file=outfile)
print(item, end="", file=outfile)
with open(args.file_2, "w") as outfile:
first = True
for item in second_file_items:
if first:
first = False
else:
print("", file=outfile)
if item:
print("", file=outfile)
print(item, end="", file=outfile) | 24.153846 | 116 | 0.607484 |
4a1f17fbe16e4f072d71315e9717616a43105105 | 549 | py | Python | arc/arc022/arc022c.py | c-yan/atcoder | 940e49d576e6a2d734288fadaf368e486480a948 | [
"MIT"
] | 1 | 2019-08-21T00:49:34.000Z | 2019-08-21T00:49:34.000Z | arc/arc022/arc022c.py | c-yan/atcoder | 940e49d576e6a2d734288fadaf368e486480a948 | [
"MIT"
] | null | null | null | arc/arc022/arc022c.py | c-yan/atcoder | 940e49d576e6a2d734288fadaf368e486480a948 | [
"MIT"
] | null | null | null | from sys import setrecursionlimit, stdin
readline = stdin.readline
setrecursionlimit(10 ** 6)
N = int(readline())
links = [[] for _ in range(N)]
for _ in range(N - 1):
A, B = map(int, readline().split())
links[A - 1].append(B - 1)
links[B - 1].append(A - 1)
def dfs(c, p, d):
result = (d, c)
for a in links[c]:
if a == p:
continue
result = max(result, dfs(a, c, d + 1))
return result
x = max(dfs(c, 0, 1) for c in links[0])[1]
y = max(dfs(c, x, 1) for c in links[x])[1]
print(x + 1, y + 1)
| 20.333333 | 46 | 0.544627 |
4a1f187d4c4524697a1fc19f9e561868212ebcc1 | 22,210 | py | Python | saleor/graphql/core/mutations.py | kayuapi/saleor | dfbcee80185d6681385d9d616c0c5318ec2fe60a | [
"CC-BY-4.0"
] | null | null | null | saleor/graphql/core/mutations.py | kayuapi/saleor | dfbcee80185d6681385d9d616c0c5318ec2fe60a | [
"CC-BY-4.0"
] | 10 | 2021-03-19T04:33:44.000Z | 2022-03-12T00:45:59.000Z | saleor/graphql/core/mutations.py | kayuapi/saleor | dfbcee80185d6681385d9d616c0c5318ec2fe60a | [
"CC-BY-4.0"
] | null | null | null | from itertools import chain
from typing import Tuple, Union
import graphene
from django.contrib.auth import get_user_model
from django.core.exceptions import (
NON_FIELD_ERRORS,
ImproperlyConfigured,
ValidationError,
)
from django.db.models.fields.files import FileField
from graphene import ObjectType
from graphene.types.mutation import MutationOptions
from graphene_django.registry import get_global_registry
from graphql.error import GraphQLError
from graphql_jwt import ObtainJSONWebToken, Verify
from graphql_jwt.exceptions import JSONWebTokenError, PermissionDenied
from ...account import models
from ...account.error_codes import AccountErrorCode
from ..account.types import User
from ..utils import get_nodes
from .types import Error, Upload
from .types.common import AccountError
from .utils import from_global_id_strict_type, snake_to_camel_case
from .utils.error_codes import get_error_code_from_error
registry = get_global_registry()
def get_model_name(model):
"""Return name of the model with first letter lowercase."""
model_name = model.__name__
return model_name[:1].lower() + model_name[1:]
def get_output_fields(model, return_field_name):
"""Return mutation output field for model instance."""
model_type = registry.get_type_for_model(model)
if not model_type:
raise ImproperlyConfigured(
"Unable to find type for model %s in graphene registry" % model.__name__
)
fields = {return_field_name: graphene.Field(model_type)}
return fields
def get_error_fields(error_type_class, error_type_field):
return {
error_type_field: graphene.Field(
graphene.List(
graphene.NonNull(error_type_class),
description="List of errors that occurred executing the mutation.",
),
default_value=[],
required=True,
)
}
def validation_error_to_error_type(validation_error: ValidationError) -> list:
"""Convert a ValidationError into a list of Error types."""
err_list = []
if hasattr(validation_error, "error_dict"):
# convert field errors
for field, field_errors in validation_error.error_dict.items():
field = None if field == NON_FIELD_ERRORS else snake_to_camel_case(field)
for err in field_errors:
err_list.append(
(
Error(field=field, message=err.messages[0]),
get_error_code_from_error(err),
err.params,
)
)
else:
# convert non-field errors
for err in validation_error.error_list:
err_list.append(
(
Error(message=err.messages[0]),
get_error_code_from_error(err),
err.params,
)
)
return err_list
class ModelMutationOptions(MutationOptions):
exclude = None
model = None
return_field_name = None
class BaseMutation(graphene.Mutation):
errors = graphene.List(
graphene.NonNull(Error),
description="List of errors that occurred executing the mutation.",
required=True,
)
class Meta:
abstract = True
@classmethod
def __init_subclass_with_meta__(
cls,
description=None,
permissions: Tuple = None,
_meta=None,
error_type_class=None,
error_type_field=None,
**options,
):
if not _meta:
_meta = MutationOptions(cls)
if not description:
raise ImproperlyConfigured("No description provided in Meta")
if isinstance(permissions, str):
permissions = (permissions,)
if permissions and not isinstance(permissions, tuple):
raise ImproperlyConfigured(
"Permissions should be a tuple or a string in Meta"
)
_meta.permissions = permissions
_meta.error_type_class = error_type_class
_meta.error_type_field = error_type_field
super().__init_subclass_with_meta__(
description=description, _meta=_meta, **options
)
if error_type_class and error_type_field:
cls._meta.fields.update(
get_error_fields(error_type_class, error_type_field)
)
@classmethod
def _update_mutation_arguments_and_fields(cls, arguments, fields):
cls._meta.arguments.update(arguments)
cls._meta.fields.update(fields)
@classmethod
def get_node_by_pk(
cls, info, graphene_type: ObjectType, pk: Union[int, str], qs=None
):
"""Attempt to resolve a node from the given internal ID.
Whether by using the provided query set object or by calling type's get_node().
"""
if qs is not None:
return qs.filter(pk=pk).first()
get_node = getattr(graphene_type, "get_node", None)
if get_node:
return get_node(info, pk)
return None
@classmethod
def get_node_or_error(cls, info, node_id, field="id", only_type=None, qs=None):
if not node_id:
return None
try:
if only_type is not None:
pk = from_global_id_strict_type(node_id, only_type, field=field)
else:
# FIXME: warn when supplied only_type is None?
only_type, pk = graphene.Node.from_global_id(node_id)
if isinstance(only_type, str):
only_type = info.schema.get_type(only_type).graphene_type
node = cls.get_node_by_pk(info, graphene_type=only_type, pk=pk, qs=qs)
except (AssertionError, GraphQLError) as e:
raise ValidationError(
{field: ValidationError(str(e), code="graphql_error")}
)
else:
if node is None:
raise ValidationError(
{
field: ValidationError(
"Couldn't resolve to a node: %s" % node_id, code="not_found"
)
}
)
return node
@classmethod
def get_nodes_or_error(cls, ids, field, only_type=None, qs=None):
try:
instances = get_nodes(ids, only_type, qs=qs)
except GraphQLError as e:
raise ValidationError(
{field: ValidationError(str(e), code="graphql_error")}
)
return instances
@classmethod
def clean_instance(cls, info, instance):
"""Clean the instance that was created using the input data.
Once an instance is created, this method runs `full_clean()` to perform
model validation.
"""
try:
instance.full_clean()
except ValidationError as error:
if hasattr(cls._meta, "exclude"):
# Ignore validation errors for fields that are specified as
# excluded.
new_error_dict = {}
for field, errors in error.error_dict.items():
if field not in cls._meta.exclude:
new_error_dict[field] = errors
error.error_dict = new_error_dict
if error.error_dict:
raise error
@classmethod
def construct_instance(cls, instance, cleaned_data):
"""Fill instance fields with cleaned data.
The `instance` argument is either an empty instance of a already
existing one which was fetched from the database. `cleaned_data` is
data to be set in instance fields. Returns `instance` with filled
fields, but not saved to the database.
"""
from django.db import models
opts = instance._meta
for f in opts.fields:
if any(
[
not f.editable,
isinstance(f, models.AutoField),
f.name not in cleaned_data,
]
):
continue
data = cleaned_data[f.name]
if data is None:
# We want to reset the file field value when None was passed
# in the input, but `FileField.save_form_data` ignores None
# values. In that case we manually pass False which clears
# the file.
if isinstance(f, FileField):
data = False
if not f.null:
data = f._get_default()
f.save_form_data(instance, data)
return instance
@classmethod
def check_permissions(cls, context, permissions=None):
"""Determine whether user or service account has rights to perform this mutation.
Default implementation assumes that account is allowed to perform any
mutation. By overriding this method or defining required permissions
in the meta-class, you can restrict access to it.
The `context` parameter is the Context instance associated with the request.
"""
permissions = permissions or cls._meta.permissions
if not permissions:
return True
if context.user.has_perms(permissions):
return True
service_account = getattr(context, "service_account", None)
if service_account and service_account.has_perms(permissions):
return True
return False
@classmethod
def mutate(cls, root, info, **data):
if not cls.check_permissions(info.context):
raise PermissionDenied()
try:
response = cls.perform_mutation(root, info, **data)
if response.errors is None:
response.errors = []
return response
except ValidationError as e:
return cls.handle_errors(e)
@classmethod
def perform_mutation(cls, root, info, **data):
pass
@classmethod
def handle_errors(cls, error: ValidationError, **extra):
errors = validation_error_to_error_type(error)
return cls.handle_typed_errors(errors, **extra)
@classmethod
def handle_typed_errors(cls, errors: list, **extra):
"""Return class instance with errors."""
if (
cls._meta.error_type_class is not None
and cls._meta.error_type_field is not None
):
typed_errors = [
cls._meta.error_type_class(field=e.field, message=e.message, code=code)
for e, code, _params in errors
]
extra.update({cls._meta.error_type_field: typed_errors})
return cls(errors=[e[0] for e in errors], **extra)
class ModelMutation(BaseMutation):
class Meta:
abstract = True
@classmethod
def __init_subclass_with_meta__(
cls,
arguments=None,
model=None,
exclude=None,
return_field_name=None,
_meta=None,
**options,
):
if not model:
raise ImproperlyConfigured("model is required for ModelMutation")
if not _meta:
_meta = ModelMutationOptions(cls)
if exclude is None:
exclude = []
if not return_field_name:
return_field_name = get_model_name(model)
if arguments is None:
arguments = {}
fields = get_output_fields(model, return_field_name)
_meta.model = model
_meta.return_field_name = return_field_name
_meta.exclude = exclude
super().__init_subclass_with_meta__(_meta=_meta, **options)
cls._update_mutation_arguments_and_fields(arguments=arguments, fields=fields)
@classmethod
def clean_input(cls, info, instance, data, input_cls=None):
"""Clean input data received from mutation arguments.
Fields containing IDs or lists of IDs are automatically resolved into
model instances. `instance` argument is the model instance the mutation
is operating on (before setting the input data). `input` is raw input
data the mutation receives.
Override this method to provide custom transformations of incoming
data.
"""
def is_list_of_ids(field):
return (
isinstance(field.type, graphene.List)
and field.type.of_type == graphene.ID
)
def is_id_field(field):
return (
field.type == graphene.ID
or isinstance(field.type, graphene.NonNull)
and field.type.of_type == graphene.ID
)
def is_upload_field(field):
if hasattr(field.type, "of_type"):
return field.type.of_type == Upload
return field.type == Upload
if not input_cls:
input_cls = getattr(cls.Arguments, "input")
cleaned_input = {}
for field_name, field_item in input_cls._meta.fields.items():
if field_name in data:
value = data[field_name]
# handle list of IDs field
if value is not None and is_list_of_ids(field_item):
instances = (
cls.get_nodes_or_error(value, field_name) if value else []
)
cleaned_input[field_name] = instances
# handle ID field
elif value is not None and is_id_field(field_item):
instance = cls.get_node_or_error(info, value, field_name)
cleaned_input[field_name] = instance
# handle uploaded files
elif value is not None and is_upload_field(field_item):
value = info.context.FILES.get(value)
cleaned_input[field_name] = value
# handle other fields
else:
cleaned_input[field_name] = value
return cleaned_input
@classmethod
def _save_m2m(cls, info, instance, cleaned_data):
opts = instance._meta
for f in chain(opts.many_to_many, opts.private_fields):
if not hasattr(f, "save_form_data"):
continue
if f.name in cleaned_data and cleaned_data[f.name] is not None:
f.save_form_data(instance, cleaned_data[f.name])
@classmethod
def success_response(cls, instance):
"""Return a success response."""
return cls(**{cls._meta.return_field_name: instance, "errors": []})
@classmethod
def save(cls, info, instance, cleaned_input):
instance.save()
@classmethod
def get_instance(cls, info, **data):
"""Retrieve an instance from the supplied global id.
The expected graphene type can be lazy (str).
"""
object_id = data.get("id")
if object_id:
model_type = registry.get_type_for_model(cls._meta.model)
instance = cls.get_node_or_error(info, object_id, only_type=model_type)
else:
instance = cls._meta.model()
return instance
@classmethod
def perform_mutation(cls, _root, info, **data):
"""Perform model mutation.
Depending on the input data, `mutate` either creates a new instance or
updates an existing one. If `id` argument is present, it is assumed
that this is an "update" mutation. Otherwise, a new instance is
created based on the model associated with this mutation.
"""
instance = cls.get_instance(info, **data)
data = data.get("input")
cleaned_input = cls.clean_input(info, instance, data)
instance = cls.construct_instance(instance, cleaned_input)
cls.clean_instance(info, instance)
cls.save(info, instance, cleaned_input)
cls._save_m2m(info, instance, cleaned_input)
return cls.success_response(instance)
class ModelDeleteMutation(ModelMutation):
class Meta:
abstract = True
@classmethod
def clean_instance(cls, info, instance):
"""Perform additional logic before deleting the model instance.
Override this method to raise custom validation error and abort
the deletion process.
"""
@classmethod
def perform_mutation(cls, _root, info, **data):
"""Perform a mutation that deletes a model instance."""
if not cls.check_permissions(info.context):
raise PermissionDenied()
node_id = data.get("id")
model_type = registry.get_type_for_model(cls._meta.model)
instance = cls.get_node_or_error(info, node_id, only_type=model_type)
if instance:
cls.clean_instance(info, instance)
db_id = instance.id
instance.delete()
# After the instance is deleted, set its ID to the original database's
# ID so that the success response contains ID of the deleted object.
instance.id = db_id
return cls.success_response(instance)
class BaseBulkMutation(BaseMutation):
count = graphene.Int(
required=True, description="Returns how many objects were affected."
)
class Meta:
abstract = True
@classmethod
def __init_subclass_with_meta__(cls, model=None, _meta=None, **kwargs):
if not model:
raise ImproperlyConfigured("model is required for bulk mutation")
if not _meta:
_meta = ModelMutationOptions(cls)
_meta.model = model
super().__init_subclass_with_meta__(_meta=_meta, **kwargs)
@classmethod
def clean_instance(cls, info, instance):
"""Perform additional logic.
Override this method to raise custom validation error and prevent
bulk action on the instance.
"""
@classmethod
def bulk_action(cls, queryset, **kwargs):
"""Implement action performed on queryset."""
raise NotImplementedError
@classmethod
def perform_mutation(cls, _root, info, ids, **data):
"""Perform a mutation that deletes a list of model instances."""
clean_instance_ids, errors = [], {}
# Allow to pass empty list for dummy mutation
if not ids:
return 0, errors
instance_model = cls._meta.model
model_type = registry.get_type_for_model(instance_model)
instances = cls.get_nodes_or_error(ids, "id", model_type)
for instance, node_id in zip(instances, ids):
instance_errors = []
# catch individual validation errors to raise them later as
# a single error
try:
cls.clean_instance(info, instance)
except ValidationError as e:
msg = ". ".join(e.messages)
instance_errors.append(msg)
if not instance_errors:
clean_instance_ids.append(instance.pk)
else:
instance_errors_msg = ". ".join(instance_errors)
ValidationError({node_id: instance_errors_msg}).update_error_dict(
errors
)
if errors:
errors = ValidationError(errors)
count = len(clean_instance_ids)
if count:
qs = instance_model.objects.filter(pk__in=clean_instance_ids)
cls.bulk_action(queryset=qs, **data)
return count, errors
@classmethod
def mutate(cls, root, info, **data):
if not cls.check_permissions(info.context):
raise PermissionDenied()
count, errors = cls.perform_mutation(root, info, **data)
if errors:
return cls.handle_errors(errors, count=count)
return cls(errors=errors, count=count)
class ModelBulkDeleteMutation(BaseBulkMutation):
class Meta:
abstract = True
@classmethod
def bulk_action(cls, queryset):
queryset.delete()
class CreateToken(ObtainJSONWebToken):
"""Mutation that authenticates a user and returns token and user data.
It overrides the default graphql_jwt.ObtainJSONWebToken to wrap potential
authentication errors in our Error type, which is consistent to how the rest of
the mutation works.
"""
errors = graphene.List(graphene.NonNull(Error), required=True)
account_errors = graphene.List(
graphene.NonNull(AccountError),
description="List of errors that occurred executing the mutation.",
required=True,
)
user = graphene.Field(User, description="A user instance.")
@classmethod
def mutate(cls, root, info, **kwargs):
try:
result = super().mutate(root, info, **kwargs)
except JSONWebTokenError as e:
errors = [Error(message=str(e))]
account_errors = [
AccountError(
field="email",
message="Please, enter valid credentials",
code=AccountErrorCode.INVALID_CREDENTIALS,
)
]
return CreateToken(errors=errors, account_errors=account_errors)
except ValidationError as e:
errors = validation_error_to_error_type(e)
return cls.handle_typed_errors(errors)
else:
return result
@classmethod
def handle_typed_errors(cls, errors: list):
account_errors = [
AccountError(field=e.field, message=e.message, code=code)
for e, code, _params in errors
]
return cls(errors=[e[0] for e in errors], account_errors=account_errors)
@classmethod
def resolve(cls, root, info, **kwargs):
return cls(user=info.context.user, errors=[], account_errors=[])
class VerifyToken(Verify):
"""Mutation that confirms if token is valid and also returns user data."""
user = graphene.Field(User)
def resolve_user(self, _info, **_kwargs):
username_field = get_user_model().USERNAME_FIELD
kwargs = {username_field: self.payload.get(username_field)}
return models.User.objects.get(**kwargs)
@classmethod
def mutate(cls, root, info, token, **kwargs):
try:
return super().mutate(root, info, token, **kwargs)
except JSONWebTokenError:
return None
| 34.22188 | 89 | 0.613192 |
4a1f1980d97e7c5be659072d9a0d7b09e51553f1 | 326 | py | Python | ui-tests/jupyter_server_config.py | andrewhli/jupyterlab-git | 18b44796fd0a6b11cf8f6b1ac0a397a3aacce7ca | [
"BSD-3-Clause"
] | 1,097 | 2017-06-20T17:40:44.000Z | 2022-03-29T17:15:16.000Z | ui-tests/jupyter_server_config.py | andrewhli/jupyterlab-git | 18b44796fd0a6b11cf8f6b1ac0a397a3aacce7ca | [
"BSD-3-Clause"
] | 831 | 2017-06-20T19:28:23.000Z | 2022-03-31T12:37:55.000Z | ui-tests/jupyter_server_config.py | andrewhli/jupyterlab-git | 18b44796fd0a6b11cf8f6b1ac0a397a3aacce7ca | [
"BSD-3-Clause"
] | 270 | 2017-06-20T17:37:13.000Z | 2022-03-26T18:07:02.000Z | c.ServerApp.port = 8888
c.ServerApp.token = ""
c.ServerApp.password = ""
c.ServerApp.disable_check_xsrf = True
c.ServerApp.open_browser = False
c.LabServerApp.extra_labextensions_path = "/opt/labextension"
# Workaround bug: https://github.com/ipython/traitlets/issues/668
c.LabServerApp.extra_labextensions_path = "/dev/null"
| 36.222222 | 65 | 0.791411 |
4a1f19e845e4945cca022aee6fd2a4666199927a | 2,560 | py | Python | src/routes/book.py | jabuckle26/library-api | f52970585959be202b85a26e419fc994c273f21e | [
"MIT"
] | null | null | null | src/routes/book.py | jabuckle26/library-api | f52970585959be202b85a26e419fc994c273f21e | [
"MIT"
] | null | null | null | src/routes/book.py | jabuckle26/library-api | f52970585959be202b85a26e419fc994c273f21e | [
"MIT"
] | null | null | null | from config.db import conn
from fastapi import APIRouter, HTTPException
from models.book import books
from schemas.book import Book, BookIn
from typing import List, Optional
router = APIRouter(
prefix="/books",
tags=["books"],
responses={404: {"description": "Not found"}},
)
@router.get("/", response_model=List[Book])
async def get_all_books(title: Optional[str] = None):
if title:
print(f'Got.....{title}')
query = books.select().filter(books.c.title.contains(title))
queried_book = conn.execute(query)
returned_books = queried_book.fetchall()
if len(returned_books) == 0:
raise HTTPException(status_code=404, detail=f"No books found matching query.")
else:
return returned_books
else:
return conn.execute(books.select()).fetchall()
@router.get("/get-book/{book_id}", response_model=Book)
async def get_book(book_id: int):
query = books.select().where(book_id == books.c.id)
queried_book = conn.execute(query)
returned_book = queried_book.fetchone()
if returned_book is None:
raise HTTPException(status_code=404, detail=f"Book id #{book_id} not found")
else:
return returned_book
@router.post("/add-book", response_model=Book)
async def add_new_book(book_details: BookIn):
query = books.insert().values(title=book_details.title,
author=book_details.author,
page_count=book_details.page_count,
book_genre=book_details.book_genre)
last_record_id = conn.execute(query).lastrowid
return {**book_details.dict(), "id": last_record_id}
@router.delete("/detele-book/{book_id}")
async def delete_book(book_id: int):
query = books.delete().where(books.c.id == book_id)
conn.execute(query)
return {"data": f"Deleted book {book_id}."}
@router.put("/update-book/{book_id}")
async def update_book(book_id: int, book_details: Book):
query = books.select().where(book_id == books.c.id)
returned_book = conn.execute(query)
if returned_book.fetchone() is None:
raise HTTPException(status_code=404, detail=f"Book id #{book_id} not found")
else:
query = books.update().where(books.c.id == book_id).values(
id=book_id,
title=book_details.title,
author=book_details.author,
page_count=book_details.page_count,
book_genre=book_details.book_genre
)
conn.execute(query)
return book_details
| 33.684211 | 90 | 0.653516 |
4a1f1b318f1b7641d7b5da85b5cc2ad9379ff48e | 45,657 | py | Python | gistools/gdal2cesium.py | echemoo/awesome-bash | d8d88fca6fb7d24905b6c03ee4976486c263f145 | [
"MIT"
] | null | null | null | gistools/gdal2cesium.py | echemoo/awesome-bash | d8d88fca6fb7d24905b6c03ee4976486c263f145 | [
"MIT"
] | null | null | null | gistools/gdal2cesium.py | echemoo/awesome-bash | d8d88fca6fb7d24905b6c03ee4976486c263f145 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
#******************************************************************************
# $Id: gdal2cesium.py 2014-10-01 12:01:23Z $
#
# Project: Cesium terrain generator for GDAL raster formats - S.I.T. Comune di Prato (Italy)
# Support: Gis3w s.a.s. (http://gis3w.it)
# Purpose: Convert a raster into a heightmap terrain for Cesium 3D Javascript library
# - generate a global geodetic TMS data structure
# - tiles are generated according to the Cesium heightmap binary format v1.0 (http://cesiumjs.org/data-and-assets/terrain/formats/heightmap-1.0.html)
# - the max zoom level is calculated on the base of the raster horizontal resolution
# - zoom levels up to the 0 level are always created to complete the parent-child relationships required by the Cesium format
# Author: Giovanni Allegri (http://giovanniallegri.it, http://gis3w.it)
#
###############################################################################
# Copyright (c) 2014, S.I.T. Comune di Prato (Italy)
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
#******************************************************************************
import os,sys,math,glob,struct,shutil
import subprocess
try:
from osgeo import gdal
from osgeo import osr
from osgeo import gdal_array
except:
import gdal
print('You are using "old gen" bindings. gdal2cesium needs "new gen" bindings.')
sys.exit(1)
from shapely.geometry import mapping, Polygon, LineString
from osgeo import ogr
try:
import numpy
import osgeo.gdal_array as gdalarray
except:
print('gdal2cesium needs Numpy.')
sys.exit(1)
MAXZOOMLEVEL = 32
resampling_list = ('average','near','bilinear','cubic','cubicspline','lanczos')
def makepoly(ulx,uly,lrx,lry):
return Polygon([(ulx, uly), (lrx, uly), (lrx, lry), (ulx, lry), (ulx, uly)])
def makeline(ulx,uly,lrx,lry):
return LineString([(ulx, uly), (lrx, uly), (lrx, lry), (ulx, lry), (ulx, uly)])
def splitpath(path):
parts=[]
(path, tail)=os.path.split( path)
while path and tail:
parts.append( tail)
(path,tail)=os.path.split(path)
parts.append( os.path.join(path,tail) )
return map( os.path.normpath, parts)[::-1]
class GlobalGeodetic(object):
def __init__(self, tileSize = 64):
self.tileSize = tileSize
def LatLonToPixels(self, lat, lon, zoom):
"Converts lat/lon to pixel coordinates in given zoom of the EPSG:4326 pyramid"
res = 180.0 / self.tileSize / 2**zoom
px = (180 + lat) / res
py = (90 + lon) / res
return px, py
def PixelsToTile(self, px, py):
"Returns coordinates of the tile covering region in pixel coordinates"
tx = int( math.ceil( px / float(self.tileSize) ) - 1 )
ty = int( math.ceil( py / float(self.tileSize) ) - 1 )
return tx, ty
def LatLonToTile(self, lat, lon, zoom):
"Returns the tile for zoom which covers given lat/lon coordinates"
px, py = self.LatLonToPixels( lat, lon, zoom)
return self.PixelsToTile(px,py)
def Resolution(self, zoom ):
"Resolution (arc/pixel) for given zoom level (measured at Equator)"
return 180.0 / self.tileSize / 2**zoom
def ZoomForPixelSize(self, pixelSize ):
"Maximal scaledown zoom of the pyramid closest to the pixelSize."
for i in range(MAXZOOMLEVEL):
if pixelSize > self.Resolution(i):
if i!=0:
return i-1
else:
return 0 # We don't want to scale up
def TileBounds(self, tx, ty, zoom):
"Returns bounds of the given tile"
res = 180.0 / self.tileSize / 2**zoom
return (
tx*self.tileSize*res - 180,
ty*self.tileSize*res - 90,
(tx+1)*self.tileSize*res - 180,
(ty+1)*self.tileSize*res - 90
)
def TileBoundsForTileSize(self, tx, ty, zoom, extrapixels):
res = 180.0 / self.tileSize / 2**zoom
# we have to calculate a wider bound to consider the overlapping pixel according to Cesium format
extrafactor = res*extrapixels
return (
tx*self.tileSize*res - 180,
(ty*self.tileSize*res) - extrafactor - 90,
((tx+1)*self.tileSize*res - 180) + extrafactor,
(ty+1)*self.tileSize*res - 90
)
def TileLatLonBounds(self, tx, ty, zoom):
"Returns bounds of the given tile in the SWNE form"
b = self.TileBounds(tx, ty, zoom)
return (b[1],b[0],b[3],b[2])
import fnmatch
class PostProcessor(object):
def __init__(self,outPath = "./",tmpOutPath = "./tmp"):
self.rootPath = tmpOutPath
self.pattern = '*.terrain'
self.processedPath = outPath
self.rtype = numpy.dtype('int16')
def walk_tiles(self,folder = '.'):
for root, _, files in os.walk(self.rootPath):
for filename in fnmatch.filter(files, self.pattern):
yield( os.path.join(root, filename))
def get_tiles(self):
terrains = []
for terrain in self.walk_tiles():
terrains.append(terrain)
return terrains
def extract_data(self,fin,rb):
data = numpy.fromfile(fin,dtype=self.rtype,count=4096)
data_mat = data.reshape((64,64))
if rb == 'r':
data_slice = data_mat[:,0] # left col
else:
data_slice = data_mat[0] # top row
return data_slice
def augment_tile(self,fin,data_slice_r,data_slice_b,pixel_br):
data_complete = numpy.fromfile(fin,dtype=self.rtype,count=4097)
data = data_complete[:4096]
maskbytes = data_complete[4096]
data_mat = data.reshape((64,64))
data_slice_b_br = numpy.c_[[data_slice_b],[pixel_br]] # add bottom right pixel to bottom row as new col
data_mat_r = numpy.c_[data_mat,data_slice_r] # add right col to data
data_mat_brpr = numpy.r_[data_mat_r,data_slice_b_br] # add bottom row to data
return data_mat_brpr,maskbytes
def write_tile(self,tilename,new_tile,maskbytes):
tilepath = os.path.join(self.processedPath,tilename)
if not os.path.exists(os.path.dirname(tilepath)):
os.makedirs(os.path.dirname(tilepath))
tilearrayint = new_tile.astype(numpy.int16)
data = tilearrayint.flatten()
data_with_mask = numpy.append(data,maskbytes)
data_with_mask.tofile(tilepath)
def run(self):
for terrain in self.get_tiles():
pathparts = splitpath(terrain)
idx = len(pathparts)
root = os.path.join(*pathparts[:idx-3])
y = int(pathparts[idx-1].split(".")[0])
x = int(pathparts[idx-2])
z = int(pathparts[idx-3])
right_tile = os.path.join(root,str(z),str(x+1),"%s.terrain" % y)
bottom_tile = os.path.join(root,str(z),str(x),"%s.terrain" % str(y-1))
bottom_right_tile = os.path.join(root,str(z),str(x+1),"%s.terrain" % str(y-1))
if os.path.exists(right_tile):
with open(right_tile, 'rb') as right_tile_f:
data_slice_r = self.extract_data(right_tile_f,'r')
else:
data_slice_r = numpy.empty(64)
data_slice_r.fill(5000)
if os.path.exists(bottom_tile):
with open(bottom_tile, 'rb') as bottom_tile_f:
data_slice_b = self.extract_data(bottom_tile_f,'t')
else:
data_slice_b = numpy.empty(64)
data_slice_b.fill(5000)
if os.path.exists(bottom_right_tile):
with open(bottom_right_tile, 'rb') as bottom_right_tile_f:
data = numpy.fromfile(bottom_right_tile_f,dtype=self.rtype,count=1)
pixel_br = data[0]
else:
pixel_br = 5000
with open(terrain, 'rb') as terrain_f:
new_tile,maskbytes = self.augment_tile(terrain_f,data_slice_r,data_slice_b,pixel_br)
tilename = os.path.join(*pathparts[idx-3:idx])
self.write_tile(tilename,new_tile,maskbytes)
class GDAL2Cesium(object):
# -------------------------------------------------------------------------
def process(self):
for inumpyut_file in self.inumpyuts:
self.inumpyut = inumpyut_file
self.pre_process_inumpyut(inumpyut_file)
self.merge_inumpyuts_data()
self.make_tiles()
print """Running post processing"""
pp = PostProcessor(self.output,self.tmpoutput)
pp.run()
print """Post processing terminated"""
shutil.rmtree(self.tmpoutput)
def merge_inumpyuts_data(self):
# Merge tminmax. We will use the extent containing all the layers for the lower zooms and only the higher resolution layer for the highest zooms
global_tminmax = []
for _inumpyut,inumpyut_data in self.inumpyuts_data.iteritems():
#print "Inumpyut: %s" % _inumpyut
minz = inumpyut_data[0]
maxz = inumpyut_data[1]
tminmax = inumpyut_data[2]
for tz,tminmax_values in enumerate(tminmax):
if (self.user_tminz is not None and tz < self.user_tminz) or (self.user_tmaxz is not None and tz > self.user_tmaxz):
continue
if tz <= maxz:
#print " tz: %s, tminmax: %s" % (tz,tminmax_values)
if len(global_tminmax)<=tz:
global_tminmax.append(list(tminmax_values))
else:
tminx = tminmax_values[0]
tminy = tminmax_values[1]
tmaxx = tminmax_values[2]
tmaxy = tminmax_values[3]
if tminx < global_tminmax[tz][0]:
global_tminmax[tz][0] = tminx
if tminy < global_tminmax[tz][1]:
global_tminmax[tz][1] = tminy
if tmaxx > global_tminmax[tz][2]:
global_tminmax[tz][2] = tmaxx
if tmaxy > global_tminmax[tz][3]:
global_tminmax[tz][3] = tmaxy
self.tminmax = global_tminmax
# Split zooms in zoom ranges based on resolutions (to build the related vrt files)
for _inumpyut,inumpyut_data in self.inumpyuts_data.iteritems():
minz = inumpyut_data[0]
maxz = inumpyut_data[1]
if self.tminz is None or minz < self.tminz:
self.tminz = minz
if self.tmaxz is None or maxz > self.tmaxz:
self.tmaxz = maxz
for zoom in range(minz,maxz+1):
if (self.user_tminz is not None and tz < self.user_tminz) or (self.user_tmaxz is not None and tz > self.user_tmaxz):
continue
if self.zoom_resolutions.get(zoom) is None:
self.zoom_resolutions[zoom] = (inumpyut_data[3],inumpyut_data[4])
else:
# the worst resolution is assigned to the common zoom levels (we check only resx, because resy will be consequently correlated)
if self.zoom_resolutions[zoom][0] < inumpyut_data[3]:
self.zoom_resolutions[zoom] = (inumpyut_data[3],inumpyut_data[4])
'''print "MERGED"
for tz,tminmax_values in enumerate(self.global_tminmax):
print " tz: %s, tminmax: %s" % (tz,tminmax_values)
'''
# -------------------------------------------------------------------------
def error(self, msg, details = "" ):
"""Print an error message and stop the processing"""
if details:
self.parser.error(msg + "\n\n" + details)
else:
self.parser.error(msg)
exit(1)
# -------------------------------------------------------------------------
def progressbar(self, complete = 0.0):
"""Print progressbar for float value 0..1"""
gdal.TermProgress_nocb(complete)
# -------------------------------------------------------------------------
def stop(self):
"""Stop the rendering immediately"""
self.stopped = True
# -------------------------------------------------------------------------
def __init__(self, arguments ):
"""Constructor function - initialization"""
try:
subprocess.call(["gdalbuildvrt","--help"])
except:
print "gdalbuildvrt is required to run gdal2cesium in multi inumpyuts mode"
exit(1)
self.stopped = False
self.multi_suffix = ''
self.inumpyut = None
self.default_base_output = 'tiles'
self.min_tile_tz = None
self.inumpyuts_data = {}
self.inumpyuts_files_or_vrt = []
self.vrts = {}
self.tminmax = None
self.zoom_resolutions = {}
self.tminz = None
self.tmaxz = None
gdal.AllRegister()
self.mem_drv = gdal.GetDriverByName( 'MEM' )
self.geodetic = GlobalGeodetic()
# Tile format
self.tilesize = 64
self.tileext = 'terrain'
self.epsg4326 = "EPSG:4326"
self.tilelayer = None
self.scaledquery = True
# How big should be query window be for scaling down
# Later on reset according the chosen resampling algorightm
self.querysize = 4 * self.tilesize
# pixel overlap between tiles according to Ceiusm heightmap format
self.extrapixels = 0
# RUN THE ARGUMENT PARSER:
self.optparse_init()
self.options, self.args = self.parser.parse_args(args=arguments)
self.options.srcnodata = None
if not self.args:
self.error("No inumpyut file specified")
# POSTPROCESSING OF PARSED ARGUMENTS:
# Workaround for old versions of GDAL
try:
if (self.options.verbose and self.options.resampling == 'near') or gdal.TermProgress_nocb:
pass
except:
self.error("This version of GDAL is not supported. Please upgrade to 1.6+.")
#,"You can try run crippled version of gdal2tiles with parameters: -v -r 'near'")
self.inumpyuts = [i for i in self.args]
# Default values for not given options
if self.options.output:
self.output = self.options.output
else:
if len(self.inumpyuts)>0:
self.multi_suffix = '_multi'
self.output = os.path.join(self.default_base_output,os.path.basename( self.inumpyuts[0] ).split('.')[0]+self.multi_suffix)
self.options.title = os.path.basename( self.inumpyuts[0]+self.multi_suffix )
self.tmpoutput = os.path.join(self.output,'tmp')
# Supported options
self.resampling = None
if self.options.resampling == 'average':
try:
if gdal.RegenerateOverview:
pass
except:
self.error("'average' resampling algorithm is not available.", "Please use -r 'near' argument or upgrade to newer version of GDAL.")
elif self.options.resampling == 'near':
self.resampling = gdal.GRA_NearestNeighbour
self.querysize = self.tilesize
elif self.options.resampling == 'bilinear':
self.resampling = gdal.GRA_Bilinear
self.querysize = self.tilesize * 2
elif self.options.resampling == 'cubic':
self.resampling = gdal.GRA_Cubic
elif self.options.resampling == 'cubicspline':
self.resampling = gdal.GRA_CubicSpline
elif self.options.resampling == 'lanczos':
self.resampling = gdal.GRA_Lanczos
# User specified zoom levels
self.user_tminz = None
self.user_tmaxz = None
if self.options.zoom:
minmax = self.options.zoom.split('-',1)
minmax.extend([''])
min, max = minmax[:2]
self.user_tminz = int(min)
if max:
self.user_tmaxz = int(max)
else:
self.user_tmaxz = int(min)
# Output the results
if self.options.verbose:
print("Options:", self.options)
print("Inumpyut:", self.inumpyuts[0]+self.multi_suffix)
print("Output:", self.output)
print("Cache: %s MB" % (gdal.GetCacheMax() / 1024 / 1024))
print('')
# -------------------------------------------------------------------------
def optparse_init(self):
"""Prepare the option parser for inumpyut (argv)"""
from optparse import OptionParser, OptionGroup
usage = "Usage: %prog [options] inumpyut_file(s)"
p = OptionParser(usage, version="%prog ")
p.add_option("-s", "--s_srs", dest="s_srs",
help="Define inumpyut raster CRS (eg EPSG:3003)")
p.add_option('-z', '--zoom', dest="zoom",
help="Zoom levels to render (format:'2-5' or '10').")
p.add_option("-r", "--resampling", dest="resampling", type='choice', choices=resampling_list,
help="Resampling method (%s) - default 'average'" % ",".join(resampling_list))
p.add_option('-e', '--resume', dest="resume", action="store_true",
help="Resume mode. Generate only missing files.")
p.add_option("-v", "--verbose",
action="store_true", dest="verbose",
help="Print status messages to stdout")
p.add_option("-o", "--o_dir",dest="output",
help="Root output directory")
p.add_option("-i", "--index",dest="createtileindexshp",action="store_true",default=False,
help="Create the shapefile of tiles index (True or False)")
p.add_option("-k", "--keep",dest="keepfiles",action="store_true",default=False,
help="Keep temporary files reated by gdal2cesium")
p.set_defaults(resume=False,verbose=False,resampling='average')
self.parser = p
# -------------------------------------------------------------------------
def pre_process_inumpyut(self,_inumpyut):
"""Initialization of the inumpyut raster, reprojection if necessary"""
print "Processing: %s" % _inumpyut
inumpyut_or_vrt = _inumpyut
if not self.mem_drv:
raise Exception("The 'MEM' driver was not found, is it available in this GDAL build?")
# Open the inumpyut file
if self.inumpyut:
in_ds = gdal.Open(_inumpyut, gdal.GA_ReadOnly)
else:
raise Exception("No inumpyut file was specified")
if self.options.verbose:
print("Inumpyut file:", "( %sP x %sL - %s bands)" % (self.in_ds.RasterXSize, self.in_ds.RasterYSize, self.in_ds.RasterCount))
if not in_ds:
# Note: GDAL prints the ERROR message too
self.error("It is not possible to open the inumpyut file '%s'." % _inumpyut )
# Read metadata from the inumpyut file
if in_ds.RasterCount == 0:
self.error( "Inumpyut file '%s' has no raster band" % _inumpyut )
if in_ds.GetRasterBand(1).GetRasterColorTable():
# TODO: Process directly paletted dataset by generating VRT in memory
self.error( "Please convert this file to RGB/RGBA and run gdal2tiles on the result.",
"""From paletted file you can create RGBA file (temp.vrt) by:
gdal_translate -of vrt -expand rgba %s temp.vrt
then run:
gdal2tiles temp.vrt""" % _inumpyut )
# Get NODATA value
in_nodata = []
for i in range(1, in_ds.RasterCount+1):
if in_ds.GetRasterBand(i).GetNoDataValue() != None:
ndata = in_ds.GetRasterBand(i).GetNoDataValue()
if math.isnan(ndata):
ndata = 'none'
in_nodata.append( ndata )
if self.options.srcnodata:
nds = list(map( float, self.options.srcnodata.split(',')))
if len(nds) < in_ds.RasterCount:
in_nodata = (nds * in_ds.RasterCount)[:in_ds.RasterCount]
else:
in_nodata = nds
if self.options.verbose:
print("NODATA: %s" % in_nodata)
#
# Here we should have RGBA inumpyut dataset opened in in_ds
#
if self.options.verbose:
print("Preprocessed file:", "( %sP x %sL - %s bands)" % (in_ds.RasterXSize, in_ds.RasterYSize, in_ds.RasterCount))
# Spatial Reference System of the inumpyut raster
self.in_srs = None
if self.options.s_srs:
self.in_srs = osr.SpatialReference()
self.in_srs.SetFromUserInumpyut(self.options.s_srs)
self.in_srs_wkt = self.in_srs.ExportToWkt()
else:
self.in_srs_wkt = in_ds.GetProjection()
if not self.in_srs_wkt and in_ds.GetGCPCount() != 0:
self.in_srs_wkt = in_ds.GetGCPProjection()
if self.in_srs_wkt:
self.in_srs = osr.SpatialReference()
self.in_srs.ImportFromWkt(self.in_srs_wkt)
# Spatial Reference System of tiles
self.out_srs = osr.SpatialReference()
self.out_srs.ImportFromEPSG(4326)
# Are the reference systems the same? Reproject if necessary.
out_ds = None
if (in_ds.GetGeoTransform() == (0.0, 1.0, 0.0, 0.0, 0.0, 1.0)) and (in_ds.GetGCPCount() == 0):
self.error("There is no georeference - neither affine transformation (worldfile) nor GCPs. You can generate only 'raster' profile tiles.",
"Either gdal2tiles with parameter -p 'raster' or use another GIS software for georeference e.g. gdal_transform -gcp / -a_ullr / -a_srs")
in_srs_code = self.in_srs.GetAttrValue("AUTHORITY", 0)
in_ds_srs = osr.SpatialReference()
res = in_ds_srs.ImportFromWkt(in_ds.GetProjection())
if res != 0 and in_srs_code is None:
print "ERROR! The inumpyut file %s has no SRS associated and no SRS has been defined in inumpyut (-s parameter)" % _inumpyut
exit(1)
if self.in_srs:
if in_ds_srs.ExportToProj4() != self.out_srs.ExportToProj4():
if (self.in_srs.ExportToProj4() != self.out_srs.ExportToProj4()) or (in_ds.GetGCPCount() != 0):
print "WARNING! Inumpyut file %s has a SR different from EPSG:4326 (WGS84). This can make the processing significantly slow." % _inumpyut
# Generation of VRT dataset in tile projection, default 'nearest neighbour' warping
out_ds = gdal.AutoCreateWarpedVRT( in_ds, self.in_srs_wkt, self.out_srs.ExportToWkt() )
# TODO: HIGH PRIORITY: Correction of AutoCreateWarpedVRT according the max zoomlevel for correct direct warping!!!
if self.options.verbose:
print("Warping of the raster by AutoCreateWarpedVRT (result saved into 'tiles.vrt')")
out_ds.GetDriver().CreateCopy("%s.vrt" % _inumpyut, out_ds)
inumpyut_or_vrt = "%s.vrt" % _inumpyut
# Note: self.in_srs and self.in_srs_wkt contain still the non-warped reference system!!!
else:
self.error("Inumpyut file has unknown SRS.", "Use --s_srs ESPG:xyz (or similar) to provide source reference system." )
if out_ds and self.options.verbose:
print("Projected file:", "tiles.vrt", "( %sP x %sL - %s bands)" % (out_ds.RasterXSize, out_ds.RasterYSize, out_ds.RasterCount))
if not out_ds:
out_ds = in_ds
#
# Here we should have a raster (out_ds) in the correct Spatial Reference system
#
# Get alpha band (either directly or from NODATA value)
alphaband = out_ds.GetRasterBand(1).GetMaskBand()
if (alphaband.GetMaskFlags() & gdal.GMF_ALPHA) or out_ds.RasterCount==4 or out_ds.RasterCount==2:
# TODO: Better test for alpha band in the dataset
dataBandsCount = out_ds.RasterCount - 1
else:
dataBandsCount = out_ds.RasterCount
# Read the georeference
out_gt = out_ds.GetGeoTransform()
# Report error in case rotation/skew is in geotransform (possible only in 'raster' profile)
if (out_gt[2], out_gt[4]) != (0,0):
self.error("Georeference of the raster contains rotation or skew. Such raster is not supported. Please use gdalwarp first.")
# TODO: Do the warping in this case automaticaly
#
# Here we expect: pixel is square, no rotation on the raster
#
# Output Bounds - coordinates in the output SRS
ominx = out_gt[0]
omaxx = out_gt[0]+out_ds.RasterXSize*out_gt[1]
omaxy = out_gt[3]
ominy = out_gt[3]-out_ds.RasterYSize*out_gt[1]
# Note: maybe round(x, 14) to avoid the gdal_translate behaviour, when 0 becomes -1e-15
if self.options.verbose:
print("Bounds (output srs):", round(ominx, 13), ominy, omaxx, omaxy)
#
# Calculating ranges for tiles in different zoom levels
#
geodetic = GlobalGeodetic() # from globalmaptiles.py
# Generate table with min max tile coordinates for all zoomlevels
tminmax = list(range(0,32))
for tz in range(0, 32):
tminx, tminy = geodetic.LatLonToTile( ominx, ominy, tz )
tmaxx, tmaxy = geodetic.LatLonToTile( omaxx, omaxy, tz )
# crop tiles extending world limits (+-180,+-90)
tminx, tminy = max(0, tminx), max(0, tminy)
tmaxx, tmaxy = min(2**(tz+1)-1, tmaxx), min(2**tz-1, tmaxy)
tminmax[tz] = (tminx, tminy, tmaxx, tmaxy)
# Get the maximal zoom level (closest possible zoom level up on the resolution of raster)
tminz = geodetic.ZoomForPixelSize( out_gt[1] * max( out_ds.RasterXSize, out_ds.RasterYSize) / float(self.tilesize) )
if self.options.verbose:
print ('Min Zoom: %s' % tminz)
# Get the maximal zoom level (closest possible zoom level up on the resolution of raster)
tmaxz = geodetic.ZoomForPixelSize( out_gt[1] ) + 1
# tmaxz = geodetic.ZoomForPixelSize( out_gt[1] )
if self.options.verbose:
print ('Max Zoom: %s' % tmaxz)
self.inumpyuts_data[_inumpyut] = [tminz,tmaxz,tminmax,out_gt[1],out_gt[5]]
self.inumpyuts_files_or_vrt.append(inumpyut_or_vrt)
if self.options.verbose:
print("Bounds (latlong):", ominx, ominy, omaxx, omaxy)
def make_vrt(self,resx,resy,i):
inumpyuts = " ".join(self.inumpyuts_files_or_vrt)
if self.options.verbose:
print "Building VRT file cesium_%s.vrt" % s
try:
res = subprocess.check_output("gdalbuildvrt -srcnodata 0 -resolution user -tr %s %s cesium_%s.vrt %s" % (abs(resx),abs(resy),i,inumpyuts), shell=True)
except:
exit(1)
def make_tiles(self):
# Generate the vrt files for zoom ranges
i = 0
tmp_res = -1
vrt_file = None
for tz in range(self.tminz,self.tmaxz+1):
res = self.zoom_resolutions[tz][0] # I check only with resx, because resy will be positively correlated
if res != tmp_res:
if i>0:
self.vrts[vrt_file][1] = tz-1
tmp_res = res
resx = self.zoom_resolutions[tz][0]
resy = self.zoom_resolutions[tz][1]
self.make_vrt(resx,resy,i)
vrt_file = "cesium_%s.vrt" % i
self.vrts[vrt_file] = [tz,None]
i += 1
if tz == self.tmaxz:
self.vrts[vrt_file][1] = tz
self.ti_cum = 0
if self.options.createtileindexshp and self.tilelayer is None:
driver = ogr.GetDriverByName('Esri Shapefile')
shptileindexfile = os.path.join(self.output,'tilesindex.shp')
if os.path.exists(shptileindexfile):
for f in glob.glob(self.output+'/tilesindex.*'):
os.remove(f)
shptileindex = driver.CreateDataSource(shptileindexfile)
self.tilelayer = shptileindex.CreateLayer('tiles', None, ogr.wkbLineString)
self.tilelayer.CreateField(ogr.FieldDefn('id', ogr.OFTInteger))
self.tilelayer.CreateField(ogr.FieldDefn('zoom', ogr.OFTInteger))
self.tilelayer.CreateField(ogr.FieldDefn('tile', ogr.OFTString))
self.tilelayer.CreateField(ogr.FieldDefn('children', ogr.OFTInteger))
# Generate parent tiles
self.generate_fake_parent_tiles()
# For each vrt (i.e. zoom range) generate the tiles
self.steps = len(self.vrts)
self.step = 1
for vrt in self.vrts.keys():
self.process_vrt(vrt)
if not self.options.keepfiles:
try:
os.remove(vrt)
except:
pass
self.step += 1
self.create_layerjsonfile()
if self.options.createtileindexshp and self.tilelayer is not None:
shptileindex.Destroy()
shptileindex = self.tilelayer = feat = geom = None
print """Processing finished. Tiles written to "%s".""" % self.output
def process_vrt(self,vrt):
self.open_inumpyut(vrt)
self.generate_tiles(vrt)
def open_inumpyut(self,vrt):
if vrt:
self.in_ds = gdal.Open(vrt, gdal.GA_ReadOnly)
else:
raise Exception("No vrt file was specified")
if self.options.verbose:
print("Inumpyut file:", "( %sP x %sL - %s bands)" % (self.in_ds.RasterXSize, self.in_ds.RasterYSize, self.in_ds.RasterCount))
if not self.in_ds:
# Note: GDAL prints the ERROR message too
self.error("It is not possible to open the inumpyut file '%s'." % vrt )
if self.in_ds.RasterCount == 0:
self.error( "Inumpyut file '%s' has no raster band" % vrt )
self.out_ds = self.in_ds
# Get alpha band (either directly or from NODATA value)
self.alphaband = self.out_ds.GetRasterBand(1).GetMaskBand()
if (self.alphaband.GetMaskFlags() & gdal.GMF_ALPHA) or self.out_ds.RasterCount==4 or self.out_ds.RasterCount==2:
self.dataBandsCount = self.out_ds.RasterCount - 1
else:
self.dataBandsCount = self.out_ds.RasterCount
# -------------------------------------------------------------------------
def make_child_flags(self,N,S,E,W):
# Cesium format neighbor tiles flags
HAS_SW = 0x01
HAS_SE = 0x02
HAS_NW = 0x04
HAS_NE = 0x08
NB_FLAGS = 0x00
if N & W:
NB_FLAGS = NB_FLAGS | HAS_NW
if N & E:
NB_FLAGS = NB_FLAGS | HAS_NE
if S & W:
NB_FLAGS = NB_FLAGS | HAS_SW
if S & E:
NB_FLAGS = NB_FLAGS | HAS_SE
return NB_FLAGS
def generate_fake_parent_tiles(self):
tx = None
for tz in range(self.tminz-1,-1,-1):
tminx, tminy, tmaxx, tmaxy = self.tminmax[tz]
tminx_c, tminy_c, tmaxx_c, tmaxy_c = self.tminmax[tz+1]
for ty in range(tmaxy, tminy-1, -1):
for tx in range(tminx, tmaxx+1):
tminx_cpot = tx * 2
tmaxx_cpot = tminx_cpot + 1
tminy_cpot = ty * 2
tmaxy_cpot = tminy_cpot + 1
N = S = E = W = False
if tminx_cpot >= tminx_c:
W = True
if tmaxx_cpot <= tmaxx_c:
E = True
if tminy_cpot >= tminy_c:
S = True
if tmaxy_cpot <= tmaxy_c:
N = True
NB_FLAGS = self.make_child_flags(N,S,E,W)
if self.options.verbose:
print "Fake tile %s,%s,%s" % (tz,tx,ty)
self.write_fake_tile(tz,tx,ty,NB_FLAGS)
# Write missing zero level tile with no children, tx 0 in case the zero level parent tileX is 1, 1 otherwise
if tx:
tx = 1-tx
if tx is None:
tx = 0
self.write_fake_tile(0,tx,0,0x00)
def write_fake_tile(self,tz,tx,ty,NB_FLAGS):
tilefilename = os.path.join(self.tmpoutput, str(tz), str(tx), "%s.%s" % (ty, self.tileext))
# Create directories for the tile
if not os.path.exists(os.path.dirname(tilefilename)):
os.makedirs(os.path.dirname(tilefilename))
if self.options.createtileindexshp and self.tilelayer is not None:
self.ti_cum += 1
tilelayerdefn = self.tilelayer.GetLayerDefn()
feat = ogr.Feature(tilelayerdefn)
feat.SetField('id', self.ti_cum)
feat.SetField('zoom', tz)
feat.SetField('tile', "%s_%s_%s" % (tz, tx, ty))
feat.SetField('children', NB_FLAGS)
b = self.geodetic.TileBounds(tx, ty, tz)
geom = ogr.CreateGeometryFromWkb(makeline(b[0], b[3], b[2], b[1]).wkb)
feat.SetGeometry(geom)
self.tilelayer.CreateFeature(feat)
feat = geom = None
# convert to integer representation of heightmap accordind to Cesium format and append children flags byte
tilearrayint = (numpy.zeros(4096,numpy.dtype('int16')) + 1000) * 5
tilearrayint.tofile(tilefilename)
child_water_bytes = struct.pack('<BB',NB_FLAGS,0x00)
with open(tilefilename,'ab') as outfile:
outfile.write(child_water_bytes)
def generate_tiles(self,vrt):
"""Generation of the Csium tiles from the inumpyut raster"""
print("Generating Tiles (round %s of %s):" % (self.step,self.steps))
# Cesium format neighbor tiles flags
HAS_SW = 0x01
HAS_SE = 0x02
HAS_NW = 0x04
HAS_NE = 0x08
tminz = self.vrts[vrt][0]
tmaxz = self.vrts[vrt][1]
tcount = 0
for tz in range(tmaxz, tminz-1, -1):
tminx, tminy, tmaxx, tmaxy = self.tminmax[tz]
tcount += (1+abs(tmaxx-tminx)) * (1+abs(tmaxy-tminy))
ti = 0
for tz in range(tmaxz, tminz-1, -1):
# do not overwrite any real tile with successive inputs' fake tiles
tminx, tminy, tmaxx, tmaxy = self.tminmax[tz]
if tz < self.tmaxz:
tminx_c, tminy_c, tmaxx_c, tmaxy_c = self.tminmax[tz+1]
if self.options.verbose:
tcount_zoom = (1+abs(tmaxx-tminx)) * (1+abs(tmaxy-tminy))
print ("Tminx - Tmax: %s-%s" % (tminx,tmaxx))
print ("Tminy - Tmaxy: %s-%s" % (tminy,tmaxy))
print("Tile count for zoom %s: %s" % (tz,tcount_zoom))
for ty in range(tmaxy, tminy-1, -1):
for tx in range(tminx, tmaxx+1):
if self.options.resume and os.path.exists(os.path.join(self.tmpoutput, str(tz), str(tx), "%s.%s" % (ty, self.tileext))):
continue
# By Default the children flags are set to 0, which means no childern tiles (Cesium format)
NB_FLAGS = 0x00
# Child flags are calculated for all the zooms except the higher one (which has not children tiles)
if tz < self.tmaxz:
tminx_cpot = tx * 2
tmaxx_cpot = tminx_cpot + 1
tminy_cpot = ty * 2
tmaxy_cpot = tminy_cpot + 1
N = S = E = W = False
if tminx_cpot >= tminx_c and tminx_cpot <= tmaxx_c:
W = True
if tmaxx_cpot >= tminx_c and tmaxx_cpot <= tmaxx_c:
E = True
if tminy_cpot >= tminy_c and tminy_cpot <= tmaxy_c:
S = True
if tmaxy_cpot >= tminy_c and tmaxy_cpot <= tmaxy_c:
N = True
NB_FLAGS = self.make_child_flags(N,S,E,W)
if self.stopped:
break
ti += 1
self.ti_cum += 1
tilearray = self.process_tile(tz,tx,ty,ti,NB_FLAGS)
self.write_tile(tilearray,tz,tx,ty,NB_FLAGS)
if not self.options.verbose:
self.progressbar( ti / float(tcount) )
def process_tile(self,tz,tx,ty,ti,NB_FLAGS):
ds = self.out_ds
tilebands = self.dataBandsCount
querysize = self.querysize
b = self.geodetic.TileBounds(tx, ty, tz)
tilesize_aug = self.tilesize + self.extrapixels
b_aug = self.geodetic.TileBoundsForTileSize(tx, ty, tz, self.extrapixels)
if self.options.verbose:
print "Tile bounds %s,%s,%s,%s" % (b[0], b[1], b[2], b[3])
print "Tile bounds augomented %s,%s,%s,%s" % (b_aug[0], b_aug[1], b_aug[2], b_aug[3])
#print "Tile poly: %s" % makepoly(b_aug[0], b_aug[1], b_aug[2], b_aug[3]).wkt
if self.options.createtileindexshp and self.tilelayer is not None:
'''
shptileindex.write({
'geometry': mapping(makepoly(b[0], b[3], b[2], b[1])),
'properties': {'id': 123},
})
'''
tilelayerdefn = self.tilelayer.GetLayerDefn()
feat = ogr.Feature(tilelayerdefn)
feat.SetField('id', self.ti_cum)
feat.SetField('zoom', tz)
feat.SetField('tile', "%s_%s_%s" % (tz, tx, ty))
feat.SetField('children', NB_FLAGS)
geom = ogr.CreateGeometryFromWkb(makeline(b[0], b[3], b[2], b[1]).wkb)
feat.SetGeometry(geom)
self.tilelayer.CreateFeature(feat)
feat = geom = None
rb, wb = self.geo_query( ds, b_aug[0], b_aug[3], b_aug[2], b_aug[1])
nativesize = wb[0]+wb[2] # Pixel size in the raster covering query geo extent
if self.options.verbose:
print("\tNative Extent (querysize",nativesize,"): ", rb, wb)
# Tile bounds in raster coordinates for ReadRaster query with extrapixels for Cesium tiles overlap
querysize = self.querysize + ((self.querysize/self.tilesize) * self.extrapixels)
rb, wb = self.geo_query( ds, b_aug[0], b_aug[3], b_aug[2], b_aug[1], querysize=querysize)
rx, ry, rxsize, rysize = rb
wx, wy, wxsize, wysize = wb
if wxsize == 0:
wxsize = 1
if wysize == 0:
wysize = 1
if self.options.verbose:
print("\tReadRaster Extent: ", (rx, ry, rxsize, rysize), (wx, wy, wxsize, wysize))
# Query is in 'nearest neighbour' but can be bigger in then the tilesize
# We scale down the query to the tilesize by supplied algorithm.
# Tile dataset in memory
dstile = self.mem_drv.Create('', tilesize_aug, tilesize_aug, tilebands, gdal.GDT_Float32)
data = ds.ReadRaster(rx, ry, rxsize, rysize, wxsize, wysize, band_list=list(range(1,self.dataBandsCount+1)))
datatype = gdal_array.GDALTypeCodeToNumericTypeCode(ds.GetRasterBand(1).DataType)
if datatype != numpy.float32:
data = numpy.frombuffer(data, dtype=datatype).astype(numpy.float32).tostring()
if tilesize_aug == querysize:
# Use the ReadRaster result directly in tiles ('nearest neighbour' query)
dstile.WriteRaster(wx, wy, wxsize, wysize, data, band_list=list(range(1,self.dataBandsCount+1)))
else:
# Big ReadRaster query in memory scaled to the tilesize - all but 'near' algo
dsquery = self.mem_drv.Create('', querysize, querysize, tilebands, gdal.GDT_Float32)
dsquery.WriteRaster(wx, wy, wxsize, wysize, data, band_list=list(range(1,self.dataBandsCount+1)))
self.scale_query_to_tile(dsquery, dstile)
del dsquery
del data
tilearray = numpy.array(dstile.ReadAsArray())
del dstile
return tilearray
#return None
def write_tile(self,tilearray,tz,tx,ty,NB_FLAGS,WATER_MASK=0):
tilefilename = os.path.join(self.tmpoutput, str(tz), str(tx), "%s.%s" % (ty, self.tileext))
# Create directories for the tile
if not os.path.exists(os.path.dirname(tilefilename)):
os.makedirs(os.path.dirname(tilefilename))
# convert to integer representation of heightmap accordind to Cesium format and append children flags byte
tilearray = (tilearray+1000) * 5
tilearrayint = tilearray.astype(numpy.int16)
tilearrayint.tofile(tilefilename)
child_water_bytes = struct.pack('<BB',NB_FLAGS,WATER_MASK)
with open(tilefilename,'ab') as outfile:
outfile.write(child_water_bytes)
def create_layerjsonfile(self):
with open(os.path.join(self.output,'layer.json'),'w') as lj:
lj.write("""{
"tilejson": "2.1.0",
"format": "heightmap-1.0",
"version": "1.0.0",
"scheme": "tms",
"tiles": ["{z}/{x}/{y}.terrain"]
}""")
# -----------------------------------------------------------------------
def geo_query(self, ds, ulx, uly, lrx, lry, querysize = 0):
"""For given dataset and query in cartographic coordinates
returns parameters for ReadRaster() in raster coordinates and
x/y shifts (for border tiles). If the querysize is not given, the
extent is returned in the native resolution of dataset ds."""
geotran = ds.GetGeoTransform()
rx= int((ulx - geotran[0]) / geotran[1] + 0.001)
ry= int((uly - geotran[3]) / geotran[5] + 0.001)
rxsize= int((lrx - ulx) / geotran[1] + 0.5)
rysize= int((lry - uly) / geotran[5] + 0.5)
if not querysize:
wxsize, wysize = rxsize, rysize
else:
wxsize, wysize = querysize, querysize
# Coordinates should not go out of the bounds of the raster
wx = 0
if rx < 0:
rxshift = abs(rx)
wx = int( wxsize * (float(rxshift) / rxsize) )
wxsize = wxsize - wx
rxsize = rxsize - int( rxsize * (float(rxshift) / rxsize) )
rx = 0
if rx+rxsize > ds.RasterXSize:
wxsize = int( wxsize * (float(ds.RasterXSize - rx) / rxsize) )
rxsize = ds.RasterXSize - rx
wy = 0
if ry < 0:
ryshift = abs(ry)
wy = int( wysize * (float(ryshift) / rysize) )
wysize = wysize - wy
rysize = rysize - int( rysize * (float(ryshift) / rysize) )
ry = 0
if ry+rysize > ds.RasterYSize:
wysize = int( wysize * (float(ds.RasterYSize - ry) / rysize) )
rysize = ds.RasterYSize - ry
return (rx, ry, rxsize, rysize), (wx, wy, wxsize, wysize)
# -------------------------------------------------------------------------
def scale_query_to_tile(self, dsquery, dstile):
"""Scales down query dataset to the tile dataset"""
querysize = dsquery.RasterXSize
tilesize = dstile.RasterXSize
tilebands = dstile.RasterCount
if self.options.resampling == 'average':
for i in range(1,tilebands+1):
res = gdal.RegenerateOverview( dsquery.GetRasterBand(i),
dstile.GetRasterBand(i), 'average' )
if res != 0:
self.error("RegenerateOverview() failed")
else:
# Other algorithms are implemented by gdal.ReprojectImage().
dsquery.SetGeoTransform( (0.0, tilesize / float(querysize), 0.0, 0.0, 0.0, tilesize / float(querysize)) )
dstile.SetGeoTransform( (0.0, 1.0, 0.0, 0.0, 0.0, 1.0) )
res = gdal.ReprojectImage(dsquery, dstile, None, None, self.resampling)
if res != 0:
self.error("ReprojectImage() failed on %s, error %d" % (tilefilename, res))
if __name__=='__main__':
argv = gdal.GeneralCmdLineProcessor( sys.argv )
if argv:
gdal2cesium = GDAL2Cesium( argv[1:] )
gdal2cesium.process()
| 42.910714 | 162 | 0.572749 |
4a1f1bc80706918cb82fcc8de48c1b17020584ca | 2,910 | py | Python | dust/events.py | MondoAurora/pydust | 29a363556cae6477795ccd97e604ef56aef8fbb9 | [
"Apache-2.0"
] | null | null | null | dust/events.py | MondoAurora/pydust | 29a363556cae6477795ccd97e604ef56aef8fbb9 | [
"Apache-2.0"
] | null | null | null | dust/events.py | MondoAurora/pydust | 29a363556cae6477795ccd97e604ef56aef8fbb9 | [
"Apache-2.0"
] | null | null | null | import json
import traceback
import calendar
import pytz
import datetime
from enum import Enum
from dateutil import parser
from dust import Datatypes, ValueTypes, Operation, MetaProps, FieldProps
from dust.entity import Store
UNIT_EVENTS = "events"
UNIT_EVENTS_META = "events_meta"
UNIT_ID = 3
UNIT_META_ID = 4
UTC = pytz.timezone('UTC')
FORMAT_DATETIME_EU = "%d.%m.%Y %H:%M:%S"
FORMAT_DATETIME_SHORT_EU = "%d.%m.%y %H:%M"
FORMAT_DATETIME_EU_TZ = "%d.%m.%Y %H:%M:%S %Z"
FORMAT_DATE_EU = "%d.%m.%Y"
FORMAT_DB_DATE = "%Y-%m-%d"
class RepeatTypes(Enum):
NO_REPEAT = 0
DAILY = 1
WEEKLY = 2
MONTHLY = 3
BYWEEKLY = 4
YEARLY = 5
DAYOFWEEK = 6
CUSTOM = 7
class EventType(Enum):
DATE = 0
TIME = 1
DATETIME = 2
class EventMeta(MetaProps):
start = (Datatypes.INT, ValueTypes.SINGLE, 1, 100)
duration_in_sec = (Datatypes.INT, ValueTypes.SINGLE, 2, 101)
repeat = (Datatypes.STRING, ValueTypes.SINGLE, 3, 102)
repeat_value = (Datatypes.INT, ValueTypes.LIST, 4, 103)
repeat_until = (Datatypes.INT, ValueTypes.LIST, 5, 104)
class EventTypes(FieldProps):
event = (UNIT_EVENTS_META, EventMeta, 1)
Store.create_unit(UNIT_EVENTS, UNIT_ID)
Store.load_types_from_enum(EventTypes, UNIT_META_ID)
def parse_event(event_value_start, event_type, iso=False, duration_in_sec=None, repeat_type=RepeatTypes.NO_REPEAT, repeat_value=None, repeat_until=None, ignoretz=False, tzinfos=None, tz=None):
try:
if iso:
dt = parser.isoparse(event_value_start)
else:
dt = parser.parse(event_value_start, ignoretz=ignoretz, tzinfos=tzinfos)
return get_event(dt, event_type, duration_in_sec, repeat_type, repeat_value, repeat_until, tz)
except:
traceback.print_exc()
return None
def get_event(dt, event_type, duration_in_sec=None, repeat_type=RepeatTypes.NO_REPEAT, repeat_value=None, repeat_until=None, tz=None):
event = Store.access(Operation.GET, None, UNIT_EVENTS, None, EventTypes.event)
if not tz is None:
if _is_naive(dt):
dt = tz.localize(dt, is_dst=None)
else:
dt = dt.astimezone(tz)
else:
if _is_naive(dt):
dt = UTC.localize(dt, is_dst=None)
event.access(Operation.SET, int(dt.timestamp()), EventMeta.start)
event.access(Operation.SET, duration_in_sec, EventMeta.duration_in_sec)
event.access(Operation.SET, repeat_type.name, EventMeta.repeat)
return event
def format_event(event, format_string=FORMAT_DATETIME_EU, tz=UTC):
repeat = RepeatTypes[event.access(Operation.GET, None, EventMeta.repeat)]
if repeat == RepeatTypes.NO_REPEAT:
dt = datetime.datetime.fromtimestamp(event.access(Operation.GET, None, EventMeta.start), tz)
return datetime.datetime.strftime(dt, format_string)
return ""
def _is_naive(dt):
return dt.tzinfo is None or dt.tzinfo.utcoffset(dt) is None
| 30.631579 | 192 | 0.704467 |
4a1f1c4f5b75e08c014bef56b92f776d82822075 | 4,123 | py | Python | build_dataset.py | torewulf/asip | 75cfe77adb5e52bff1e62e6a518d8cc997c329e7 | [
"MIT"
] | null | null | null | build_dataset.py | torewulf/asip | 75cfe77adb5e52bff1e62e6a518d8cc997c329e7 | [
"MIT"
] | null | null | null | build_dataset.py | torewulf/asip | 75cfe77adb5e52bff1e62e6a518d8cc997c329e7 | [
"MIT"
] | null | null | null | import os
import cv2
import netCDF4
import utils
import numpy as np
from tqdm import tqdm
DATA_DIR = '/data/users/twu/ds-2/dataset-2'
PATCHES_DIR = '/data/users/twu/ds-2/patches'
PATCH_SHAPE = (800, 800)
AMSR_PATCH_SHAPE = (16, 16)
OVERLAP = 0.25
ncs = [os.path.join(DATA_DIR, file) for file in os.listdir(DATA_DIR) if file.endswith('.nc')]
amsr_labels = ['btemp_6.9h', 'btemp_6.9v', 'btemp_7.3h', 'btemp_7.3v',
'btemp_10.7h', 'btemp_10.7v', 'btemp_18.7h', 'btemp_18.7v',
'btemp_23.8h', 'btemp_23.8v', 'btemp_36.5h', 'btemp_36.5v',
'btemp_89.0h', 'btemp_89.0v']
if not os.path.exists(PATCHES_DIR):
os.mkdir(PATCHES_DIR)
### PROCESSING ###
for nc_file in tqdm(ncs):
ncf = netCDF4.Dataset(nc_file)
scene_path = os.path.join(PATCHES_DIR, nc_file.split('/')[-1][:-3])
if not os.path.exists(scene_path):
os.mkdir(scene_path)
# Extracting variables from the .nc-file
HH = np.array(ncf.variables.get('sar_primary'))
HV = np.array(ncf.variables.get('sar_secondary'))
HH_nersc = np.array(ncf.variables.get('nersc_sar_primary'))
HV_nersc = np.array(ncf.variables.get('nersc_sar_secondary'))
IC = np.array(ncf.variables.get('polygon_icechart'))
DST = np.array(ncf.variables.get('distance_map')).astype('float32')
IC_codes = list(ncf.variables.get('polygon_codes'))
CT = utils.extract_IC_attribute(IC, IC_codes, attribute='CT')
INC = np.tile(np.array(ncf.variables.get('sar_incidenceangles')), (CT.shape[0], 1))
AMSR = [cv2.resize(np.array(ncf.variables.get(label)), (CT.shape[1], CT.shape[0]), interpolation=cv2.INTER_LINEAR) for label in amsr_labels]
# Replacing invalid data with NaNs
no_data = np.logical_or(np.isnan(HH), np.isnan(HH_nersc))
CT[no_data] = np.nan
DST[no_data] = np.nan
INC[no_data] = np.nan
HH_nersc[no_data] = np.nan
HV_nersc[no_data] = np.nan
for AMSR_channel in AMSR: AMSR_channel[no_data] = np.nan
# Extract all patches (shape=patch_shape) from HH and return indices of all patches without NaN values
HH_patches, non_nan_idxs = utils.extract_patches(HH, patch_shape=PATCH_SHAPE, return_non_nan_idxs=True, overlap=OVERLAP)
if not len(non_nan_idxs) == 0: # if valid non-NaN patches in scene
HH_patches = HH_patches[non_nan_idxs]
del HH
HV_patches = utils.extract_patches(HV, patch_shape=PATCH_SHAPE, overlap=OVERLAP)[non_nan_idxs]
del HV
HH_nersc_patches = utils.extract_patches(HH_nersc, patch_shape=PATCH_SHAPE, overlap=OVERLAP)[non_nan_idxs]
del HH_nersc
HV_nersc_patches = utils.extract_patches(HV_nersc, patch_shape=PATCH_SHAPE, overlap=OVERLAP)[non_nan_idxs]
del HV_nersc
CT_patches = utils.extract_patches(CT, patch_shape=PATCH_SHAPE, overlap=OVERLAP)[non_nan_idxs]
del CT
DST_patches = utils.extract_patches(DST, patch_shape=PATCH_SHAPE, overlap=OVERLAP)[non_nan_idxs]
del DST
INC_patches = utils.extract_patches(INC, patch_shape=PATCH_SHAPE, overlap=OVERLAP)[non_nan_idxs]
del INC
AMSR_patches = [utils.extract_patches(AMSR_channel, patch_shape=PATCH_SHAPE, overlap=OVERLAP, new_shape=AMSR_PATCH_SHAPE)[non_nan_idxs] for AMSR_channel in AMSR]
del AMSR
AMSR_patches = np.stack(AMSR_patches, axis=1)
for patch in range(HH_patches.shape[0]):
np.save(os.path.join(scene_path, str(patch) + '_S1.npy'), np.stack((HH_patches[patch], HV_patches[patch]), axis=0).astype('float32'))
np.save(os.path.join(scene_path, str(patch) + '_S1_nersc.npy'), np.stack((HH_nersc_patches[patch], HV_nersc_patches[patch]), axis=0).astype('float32'))
np.save(os.path.join(scene_path, str(patch) + '_INC.npy'), INC_patches[patch].astype('float32'))
np.save(os.path.join(scene_path, str(patch) + '_CT.npy'), CT_patches[patch].astype('uint8'))
np.save(os.path.join(scene_path, str(patch) + '_DST.npy'), DST_patches[patch].astype('uint8'))
np.save(os.path.join(scene_path, str(patch) + '_AMSR.npy'), AMSR_patches[patch].astype('float32'))
| 50.901235 | 169 | 0.690759 |
4a1f1cca5240098cd668172bb4b84cef8664ab97 | 344 | py | Python | pydar/__init__.py | MomsFriendlyRobotCompany/pydar | 20d5a6b382b4f047ba19f8f82a15a67ab3537543 | [
"MIT"
] | null | null | null | pydar/__init__.py | MomsFriendlyRobotCompany/pydar | 20d5a6b382b4f047ba19f8f82a15a67ab3537543 | [
"MIT"
] | 2 | 2018-10-14T20:45:52.000Z | 2018-10-14T20:47:10.000Z | pydar/__init__.py | MomsFriendlyRobotCompany/pydar | 20d5a6b382b4f047ba19f8f82a15a67ab3537543 | [
"MIT"
] | null | null | null |
from pydar.lidar_urg import URG04LX
from pydar.lidar_lds01 import LDS01
from pydar.rplidar import RPLidar
from pydar.format import Scan
#
# from collections import namedtuple
# Scan = namedtuple('Scan', 'scan timestamp')
# from pydar.rplidar import RPLidarA1, RPLidarA2
__author__ = "Kevin Walchko"
__license__ = "MIT"
__version__ = "0.0.7"
| 22.933333 | 48 | 0.77907 |
4a1f1ce2c11d6221736398175f4ae7f8de02ab0f | 6,687 | py | Python | pybind/slxos/v16r_1_00b/brocade_mpls_rpc/show_mpls_lsp_name_detail/output/lsp/show_mpls_lsp_detail_info/show_mpls_lsp_instances_info/lsp_instances/lsp_rsvp_session_rro_hops/__init__.py | shivharis/pybind | 4e1c6d54b9fd722ccec25546ba2413d79ce337e6 | [
"Apache-2.0"
] | null | null | null | pybind/slxos/v16r_1_00b/brocade_mpls_rpc/show_mpls_lsp_name_detail/output/lsp/show_mpls_lsp_detail_info/show_mpls_lsp_instances_info/lsp_instances/lsp_rsvp_session_rro_hops/__init__.py | shivharis/pybind | 4e1c6d54b9fd722ccec25546ba2413d79ce337e6 | [
"Apache-2.0"
] | null | null | null | pybind/slxos/v16r_1_00b/brocade_mpls_rpc/show_mpls_lsp_name_detail/output/lsp/show_mpls_lsp_detail_info/show_mpls_lsp_instances_info/lsp_instances/lsp_rsvp_session_rro_hops/__init__.py | shivharis/pybind | 4e1c6d54b9fd722ccec25546ba2413d79ce337e6 | [
"Apache-2.0"
] | 1 | 2021-11-05T22:15:42.000Z | 2021-11-05T22:15:42.000Z |
from operator import attrgetter
import pyangbind.lib.xpathhelper as xpathhelper
from pyangbind.lib.yangtypes import RestrictedPrecisionDecimalType, RestrictedClassType, TypedListType
from pyangbind.lib.yangtypes import YANGBool, YANGListType, YANGDynClass, ReferenceType
from pyangbind.lib.base import PybindBase
from decimal import Decimal
from bitarray import bitarray
import __builtin__
import show_mpls_lsp_hop_list
class lsp_rsvp_session_rro_hops(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module brocade-mpls - based on the path /brocade_mpls_rpc/show-mpls-lsp-name-detail/output/lsp/show-mpls-lsp-detail-info/show-mpls-lsp-instances-info/lsp-instances/lsp-rsvp-session-rro-hops. Each member element of
the container is represented as a class variable - with a specific
YANG type.
"""
__slots__ = ('_pybind_generated_by', '_path_helper', '_yang_name', '_rest_name', '_extmethods', '__show_mpls_lsp_hop_list',)
_yang_name = 'lsp-rsvp-session-rro-hops'
_rest_name = 'lsp-rsvp-session-rro-hops'
_pybind_generated_by = 'container'
def __init__(self, *args, **kwargs):
path_helper_ = kwargs.pop("path_helper", None)
if path_helper_ is False:
self._path_helper = False
elif path_helper_ is not None and isinstance(path_helper_, xpathhelper.YANGPathHelper):
self._path_helper = path_helper_
elif hasattr(self, "_parent"):
path_helper_ = getattr(self._parent, "_path_helper", False)
self._path_helper = path_helper_
else:
self._path_helper = False
extmethods = kwargs.pop("extmethods", None)
if extmethods is False:
self._extmethods = False
elif extmethods is not None and isinstance(extmethods, dict):
self._extmethods = extmethods
elif hasattr(self, "_parent"):
extmethods = getattr(self._parent, "_extmethods", None)
self._extmethods = extmethods
else:
self._extmethods = False
self.__show_mpls_lsp_hop_list = YANGDynClass(base=show_mpls_lsp_hop_list.show_mpls_lsp_hop_list, is_container='container', presence=False, yang_name="show-mpls-lsp-hop-list", rest_name="", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, extensions={u'tailf-common': {u'cli-drop-node-name': None}}, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='container', is_config=True)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path()+[self._yang_name]
else:
return [u'brocade_mpls_rpc', u'show-mpls-lsp-name-detail', u'output', u'lsp', u'show-mpls-lsp-detail-info', u'show-mpls-lsp-instances-info', u'lsp-instances', u'lsp-rsvp-session-rro-hops']
def _rest_path(self):
if hasattr(self, "_parent"):
if self._rest_name:
return self._parent._rest_path()+[self._rest_name]
else:
return self._parent._rest_path()
else:
return [u'show-mpls-lsp-name-detail', u'output', u'lsp', u'lsp-instances', u'lsp-rsvp-session-rro-hops']
def _get_show_mpls_lsp_hop_list(self):
"""
Getter method for show_mpls_lsp_hop_list, mapped from YANG variable /brocade_mpls_rpc/show_mpls_lsp_name_detail/output/lsp/show_mpls_lsp_detail_info/show_mpls_lsp_instances_info/lsp_instances/lsp_rsvp_session_rro_hops/show_mpls_lsp_hop_list (container)
"""
return self.__show_mpls_lsp_hop_list
def _set_show_mpls_lsp_hop_list(self, v, load=False):
"""
Setter method for show_mpls_lsp_hop_list, mapped from YANG variable /brocade_mpls_rpc/show_mpls_lsp_name_detail/output/lsp/show_mpls_lsp_detail_info/show_mpls_lsp_instances_info/lsp_instances/lsp_rsvp_session_rro_hops/show_mpls_lsp_hop_list (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_show_mpls_lsp_hop_list is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_show_mpls_lsp_hop_list() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=show_mpls_lsp_hop_list.show_mpls_lsp_hop_list, is_container='container', presence=False, yang_name="show-mpls-lsp-hop-list", rest_name="", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, extensions={u'tailf-common': {u'cli-drop-node-name': None}}, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='container', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """show_mpls_lsp_hop_list must be of a type compatible with container""",
'defined-type': "container",
'generated-type': """YANGDynClass(base=show_mpls_lsp_hop_list.show_mpls_lsp_hop_list, is_container='container', presence=False, yang_name="show-mpls-lsp-hop-list", rest_name="", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, extensions={u'tailf-common': {u'cli-drop-node-name': None}}, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='container', is_config=True)""",
})
self.__show_mpls_lsp_hop_list = t
if hasattr(self, '_set'):
self._set()
def _unset_show_mpls_lsp_hop_list(self):
self.__show_mpls_lsp_hop_list = YANGDynClass(base=show_mpls_lsp_hop_list.show_mpls_lsp_hop_list, is_container='container', presence=False, yang_name="show-mpls-lsp-hop-list", rest_name="", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, extensions={u'tailf-common': {u'cli-drop-node-name': None}}, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='container', is_config=True)
show_mpls_lsp_hop_list = __builtin__.property(_get_show_mpls_lsp_hop_list, _set_show_mpls_lsp_hop_list)
_pyangbind_elements = {'show_mpls_lsp_hop_list': show_mpls_lsp_hop_list, }
| 53.927419 | 466 | 0.741289 |
4a1f1d3e10cda9ef176d9ce9ecbd81968b1d8a44 | 95 | py | Python | pydiscourse/__init__.py | gregnewman/pydiscourse | 872cab43db7c81128a8ae5d8ee913a5c36d147c4 | [
"MIT"
] | null | null | null | pydiscourse/__init__.py | gregnewman/pydiscourse | 872cab43db7c81128a8ae5d8ee913a5c36d147c4 | [
"MIT"
] | null | null | null | pydiscourse/__init__.py | gregnewman/pydiscourse | 872cab43db7c81128a8ae5d8ee913a5c36d147c4 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
__version__ = "1.1.0"
from pydiscourse.client import DiscourseClient
| 15.833333 | 46 | 0.694737 |
4a1f1d926f93a1b037b46ce6b25c861ea9ca88c7 | 1,436 | py | Python | automl/beta/import_dataset.py | m-abba/python-docs-samples | b00f00d734b89edae8ae6876d6261e19dc82cd34 | [
"Apache-2.0"
] | null | null | null | automl/beta/import_dataset.py | m-abba/python-docs-samples | b00f00d734b89edae8ae6876d6261e19dc82cd34 | [
"Apache-2.0"
] | null | null | null | automl/beta/import_dataset.py | m-abba/python-docs-samples | b00f00d734b89edae8ae6876d6261e19dc82cd34 | [
"Apache-2.0"
] | null | null | null | # Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# [START automl_import_data_beta]
from google.cloud import automl_v1beta1 as automl
def import_dataset(
project_id="YOUR_PROJECT_ID",
dataset_id="YOUR_DATASET_ID",
path="gs://YOUR_BUCKET_ID/path/to/data.csv",
):
"""Import a dataset."""
client = automl.AutoMlClient()
# Get the full path of the dataset.
dataset_full_id = client.dataset_path(
project_id, "us-central1", dataset_id
)
# Get the multiple Google Cloud Storage URIs
input_uris = path.split(",")
gcs_source = automl.types.GcsSource(input_uris=input_uris)
input_config = automl.types.InputConfig(gcs_source=gcs_source)
# Import data from the input URI
response = client.import_data(dataset_full_id, input_config)
print("Processing import...")
print("Data imported. {}".format(response.result()))
# [END automl_import_data_beta]
| 35.9 | 74 | 0.735376 |
4a1f1dc641fc160e35d3238111feed4ff25a56bc | 30,750 | py | Python | pymatgen/analysis/defects/thermodynamics.py | molllyn1/pymatgen | 8ba60e6114cd8cd4ea818d3c9e84b71ebef0c654 | [
"MIT"
] | null | null | null | pymatgen/analysis/defects/thermodynamics.py | molllyn1/pymatgen | 8ba60e6114cd8cd4ea818d3c9e84b71ebef0c654 | [
"MIT"
] | null | null | null | pymatgen/analysis/defects/thermodynamics.py | molllyn1/pymatgen | 8ba60e6114cd8cd4ea818d3c9e84b71ebef0c654 | [
"MIT"
] | 1 | 2021-02-17T07:07:00.000Z | 2021-02-17T07:07:00.000Z | # coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
"""
Defect thermodynamics, such as defect phase diagrams, etc.
"""
import logging
from itertools import chain
import matplotlib.cm as cm
import matplotlib.pyplot as plt
import numpy as np
from monty.json import MSONable
from scipy.optimize import bisect
from scipy.spatial import HalfspaceIntersection
from pymatgen.analysis.defects.core import DefectEntry
from pymatgen.analysis.structure_matcher import PointDefectComparator
from pymatgen.electronic_structure.dos import FermiDos
__author__ = "Danny Broberg, Shyam Dwaraknath"
__copyright__ = "Copyright 2018, The Materials Project"
__version__ = "1.0"
__maintainer__ = "Shyam Dwaraknath"
__email__ = "[email protected]"
__status__ = "Development"
__date__ = "Mar 15, 2018"
logger = logging.getLogger(__name__)
class DefectPhaseDiagram(MSONable):
"""
This is similar to a PhaseDiagram object in pymatgen,
but has ability to do quick analysis of defect formation energies
when fed DefectEntry objects.
uses many of the capabilities from PyCDT's DefectsAnalyzer class...
This class is able to get:
a) stability of charge states for a given defect,
b) list of all formation ens
c) transition levels in the gap
"""
def __init__(self, entries, vbm, band_gap, filter_compatible=True, metadata=None):
"""
Args:
dentries ([DefectEntry]): A list of DefectEntry objects
vbm (float): Valence Band energy to use for all defect entries.
NOTE if using band shifting-type correction then this VBM
should still be that of the GGA calculation
(the bandedgeshifting_correction accounts for shift's
contribution to formation energy).
band_gap (float): Band gap to use for all defect entries.
NOTE if using band shifting-type correction then this gap
should still be that of the Hybrid calculation you are shifting to.
filter_compatible (bool): Whether to consider entries which were ruled
incompatible by the DefectComaptibility class. Note this must be set to False
if you desire a suggestion for larger supercell sizes.
Default is True (to omit calculations which have "is_compatible"=False in
DefectEntry'sparameters)
metadata (dict): Dictionary of metadata to store with the PhaseDiagram. Has
no impact on calculations
"""
self.vbm = vbm
self.band_gap = band_gap
self.filter_compatible = filter_compatible
if filter_compatible:
self.entries = [e for e in entries if e.parameters.get("is_compatible", True)]
else:
self.entries = entries
for ent_ind, ent in enumerate(self.entries):
if "vbm" not in ent.parameters.keys() or ent.parameters["vbm"] != vbm:
logger.info(
"Entry {} did not have vbm equal to given DefectPhaseDiagram value."
" Manually overriding.".format(ent.name)
)
new_ent = ent.copy()
new_ent.parameters["vbm"] = vbm
self.entries[ent_ind] = new_ent
self.metadata = metadata or {}
self.find_stable_charges()
def as_dict(self):
"""
Returns:
Json-serializable dict representation of DefectPhaseDiagram
"""
d = {
"@module": self.__class__.__module__,
"@class": self.__class__.__name__,
"entries": [entry.as_dict() for entry in self.entries],
"vbm": self.vbm,
"band_gap": self.band_gap,
"filter_compatible": self.filter_compatible,
"metadata": self.metadata,
}
return d
@classmethod
def from_dict(cls, d):
"""
Reconstitute a DefectPhaseDiagram object from a dict representation created using
as_dict().
Args:
d (dict): dict representation of DefectPhaseDiagram.
Returns:
DefectPhaseDiagram object
"""
entries = [DefectEntry.from_dict(entry_dict) for entry_dict in d.get("entries")]
vbm = d["vbm"]
band_gap = d["band_gap"]
filter_compatible = d.get("filter_compatible", True)
metadata = d.get("metadata", {})
if "entry_id" in d.keys() and "entry_id" not in metadata:
metadata["entry_id"] = d["entry_id"]
return cls(
entries,
vbm,
band_gap,
filter_compatible=filter_compatible,
metadata=metadata,
)
def find_stable_charges(self):
"""
Sets the stable charges and transition states for a series of
defect entries. This function uses scipy's HalfspaceInterection
to oncstruct the polygons corresponding to defect stability as
a function of the Fermi-level. The Halfspace Intersection
constructs N-dimensional hyperplanes, in this case N=2, based
on the equation of defect formation energy with considering chemical
potentials:
E_form = E_0^{Corrected} + Q_{defect}*(E_{VBM} + E_{Fermi})
Extra hyperplanes are constructed to bound this space so that
the algorithm can actually find enclosed region.
This code was modeled after the Halfspace Intersection code for
the Pourbaix Diagram
"""
def similar_defects(entryset):
"""
Used for grouping similar defects of different charges
Can distinguish identical defects even if they are not in same position
"""
pdc = PointDefectComparator(check_charge=False, check_primitive_cell=True, check_lattice_scale=False)
grp_def_sets = []
grp_def_indices = []
for ent_ind, ent in enumerate(entryset):
# TODO: more pythonic way of grouping entry sets with PointDefectComparator.
# this is currently most time intensive part of DefectPhaseDiagram
matched_ind = None
for grp_ind, defgrp in enumerate(grp_def_sets):
if pdc.are_equal(ent.defect, defgrp[0].defect):
matched_ind = grp_ind
break
if matched_ind is not None:
grp_def_sets[matched_ind].append(ent.copy())
grp_def_indices[matched_ind].append(ent_ind)
else:
grp_def_sets.append([ent.copy()])
grp_def_indices.append([ent_ind])
return zip(grp_def_sets, grp_def_indices)
# Limits for search
# E_fermi = { -1 eV to band gap+1}
# E_formation = { (min(Eform) - 30) to (max(Eform) + 30)}
all_eform = [one_def.formation_energy(fermi_level=self.band_gap / 2.0) for one_def in self.entries]
min_y_lim = min(all_eform) - 30
max_y_lim = max(all_eform) + 30
limits = [[-1, self.band_gap + 1], [min_y_lim, max_y_lim]]
stable_entries = {}
finished_charges = {}
transition_level_map = {}
# Grouping by defect types
for defects, index_list in similar_defects(self.entries):
defects = list(defects)
# prepping coefficient matrix for half-space intersection
# [-Q, 1, -1*(E_form+Q*VBM)] -> -Q*E_fermi+E+-1*(E_form+Q*VBM) <= 0 where E_fermi and E are the variables
# in the hyperplanes
hyperplanes = np.array(
[
[
-1.0 * entry.charge,
1,
-1.0 * (entry.energy + entry.charge * self.vbm),
]
for entry in defects
]
)
border_hyperplanes = [
[-1, 0, limits[0][0]],
[1, 0, -1 * limits[0][1]],
[0, -1, limits[1][0]],
[0, 1, -1 * limits[1][1]],
]
hs_hyperplanes = np.vstack([hyperplanes, border_hyperplanes])
interior_point = [self.band_gap / 2, min(all_eform) - 1.0]
hs_ints = HalfspaceIntersection(hs_hyperplanes, np.array(interior_point))
# Group the intersections and coresponding facets
ints_and_facets = zip(hs_ints.intersections, hs_ints.dual_facets)
# Only inlcude the facets corresponding to entries, not the boundaries
total_entries = len(defects)
ints_and_facets = filter(
lambda int_and_facet: all(np.array(int_and_facet[1]) < total_entries),
ints_and_facets,
)
# sort based on transition level
ints_and_facets = list(sorted(ints_and_facets, key=lambda int_and_facet: int_and_facet[0][0]))
# log a defect name for tracking (using full index list to avoid naming
# in-equivalent defects with same name)
str_index_list = [str(ind) for ind in sorted(index_list)]
track_name = defects[0].name + "@" + str("-".join(str_index_list))
if len(ints_and_facets):
# Unpack into lists
_, facets = zip(*ints_and_facets)
# Map of transition level: charge states
transition_level_map[track_name] = {
intersection[0]: [defects[i].charge for i in facet] for intersection, facet in ints_and_facets
}
stable_entries[track_name] = list({defects[i] for dual in facets for i in dual})
finished_charges[track_name] = [defect.charge for defect in defects]
else:
# if ints_and_facets is empty, then there is likely only one defect...
if len(defects) != 1:
# confirm formation energies dominant for one defect over other identical defects
name_set = [one_def.name + "_chg" + str(one_def.charge) for one_def in defects]
vb_list = [one_def.formation_energy(fermi_level=limits[0][0]) for one_def in defects]
cb_list = [one_def.formation_energy(fermi_level=limits[0][1]) for one_def in defects]
vbm_def_index = vb_list.index(min(vb_list))
name_stable_below_vbm = name_set[vbm_def_index]
cbm_def_index = cb_list.index(min(cb_list))
name_stable_above_cbm = name_set[cbm_def_index]
if name_stable_below_vbm != name_stable_above_cbm:
raise ValueError(
"HalfSpace identified only one stable charge out of list: {}\n"
"But {} is stable below vbm and {} is "
"stable above cbm.\nList of VBM formation energies: {}\n"
"List of CBM formation energies: {}"
"".format(
name_set,
name_stable_below_vbm,
name_stable_above_cbm,
vb_list,
cb_list,
)
)
logger.info("{} is only stable defect out of {}".format(name_stable_below_vbm, name_set))
transition_level_map[track_name] = {}
stable_entries[track_name] = list([defects[vbm_def_index]])
finished_charges[track_name] = [one_def.charge for one_def in defects]
else:
transition_level_map[track_name] = {}
stable_entries[track_name] = list([defects[0]])
finished_charges[track_name] = [defects[0].charge]
self.transition_level_map = transition_level_map
self.transition_levels = {
defect_name: list(defect_tls.keys()) for defect_name, defect_tls in transition_level_map.items()
}
self.stable_entries = stable_entries
self.finished_charges = finished_charges
self.stable_charges = {
defect_name: [entry.charge for entry in entries] for defect_name, entries in stable_entries.items()
}
@property
def defect_types(self):
"""
List types of defects existing in the DefectPhaseDiagram
"""
return list(self.finished_charges.keys())
@property
def all_stable_entries(self):
"""
List all stable entries (defect+charge) in the DefectPhaseDiagram
"""
return set(chain.from_iterable(self.stable_entries.values()))
@property
def all_unstable_entries(self):
"""
List all unstable entries (defect+charge) in the DefectPhaseDiagram
"""
all_stable_entries = self.all_stable_entries
return [e for e in self.entries if e not in all_stable_entries]
def defect_concentrations(self, chemical_potentials, temperature=300, fermi_level=0.0):
"""
Give list of all concentrations at specified efermi in the DefectPhaseDiagram
args:
chemical_potentials = {Element: number} is dict of chemical potentials to provide formation energies for
temperature = temperature to produce concentrations from
fermi_level: (float) is fermi level relative to valence band maximum
Default efermi = 0 = VBM energy
returns:
list of dictionaries of defect concentrations
"""
concentrations = []
for dfct in self.all_stable_entries:
concentrations.append(
{
"conc": dfct.defect_concentration(
chemical_potentials=chemical_potentials,
temperature=temperature,
fermi_level=fermi_level,
),
"name": dfct.name,
"charge": dfct.charge,
}
)
return concentrations
def suggest_charges(self, tolerance=0.1):
"""
Suggest possible charges for defects to compute based on proximity
of known transitions from entires to VBM and CBM
Args:
tolerance (float): tolerance with respect to the VBM and CBM to
` continue to compute new charges
"""
recommendations = {}
for def_type in self.defect_types:
test_charges = np.arange(
np.min(self.stable_charges[def_type]) - 1,
np.max(self.stable_charges[def_type]) + 2,
)
test_charges = [charge for charge in test_charges if charge not in self.finished_charges[def_type]]
if len(self.transition_level_map[def_type].keys()):
# More positive charges will shift the minimum transition level down
# Max charge is limited by this if its transition level is close to VBM
min_tl = min(self.transition_level_map[def_type].keys())
if min_tl < tolerance:
max_charge = max(self.transition_level_map[def_type][min_tl])
test_charges = [charge for charge in test_charges if charge < max_charge]
# More negative charges will shift the maximum transition level up
# Minimum charge is limited by this if transition level is near CBM
max_tl = max(self.transition_level_map[def_type].keys())
if max_tl > (self.band_gap - tolerance):
min_charge = min(self.transition_level_map[def_type][max_tl])
test_charges = [charge for charge in test_charges if charge > min_charge]
else:
test_charges = [charge for charge in test_charges if charge not in self.stable_charges[def_type]]
recommendations[def_type] = test_charges
return recommendations
def suggest_larger_supercells(self, tolerance=0.1):
"""
Suggest larger supercells for different defect+chg combinations based on use of
compatibility analysis. Does this for any charged defects which have is_compatible = False,
and the defect+chg formation energy is stable at fermi levels within the band gap.
NOTE: Requires self.filter_compatible = False
Args:
tolerance (float): tolerance with respect to the VBM and CBM for considering
larger supercells for a given charge
"""
if self.filter_compatible:
raise ValueError("Cannot suggest larger supercells if filter_compatible is True.")
recommendations = {}
for def_type in self.defect_types:
template_entry = self.stable_entries[def_type][0].copy()
defect_indices = [int(def_ind) for def_ind in def_type.split("@")[-1].split("-")]
for charge in self.finished_charges[def_type]:
chg_defect = template_entry.defect.copy()
chg_defect.set_charge(charge)
for entry_index in defect_indices:
entry = self.entries[entry_index]
if entry.charge == charge:
break
if entry.parameters.get("is_compatible", True):
continue
# consider if transition level is within
# tolerance of band edges
suggest_bigger_supercell = True
for tl, chgset in self.transition_level_map[def_type].items():
sorted_chgset = list(chgset)
sorted_chgset.sort(reverse=True)
if charge == sorted_chgset[0] and tl < tolerance:
suggest_bigger_supercell = False
elif charge == sorted_chgset[1] and tl > (self.band_gap - tolerance):
suggest_bigger_supercell = False
if suggest_bigger_supercell:
if def_type not in recommendations:
recommendations[def_type] = []
recommendations[def_type].append(charge)
return recommendations
def solve_for_fermi_energy(self, temperature, chemical_potentials, bulk_dos):
"""
Solve for the Fermi energy self-consistently as a function of T
Observations are Defect concentrations, electron and hole conc
Args:
temperature: Temperature to equilibrate fermi energies for
chemical_potentials: dict of chemical potentials to use for calculation fermi level
bulk_dos: bulk system dos (pymatgen Dos object)
Returns:
Fermi energy dictated by charge neutrality
"""
fdos = FermiDos(bulk_dos, bandgap=self.band_gap)
_, fdos_vbm = fdos.get_cbm_vbm()
def _get_total_q(ef):
qd_tot = sum(
[
d["charge"] * d["conc"]
for d in self.defect_concentrations(
chemical_potentials=chemical_potentials,
temperature=temperature,
fermi_level=ef,
)
]
)
qd_tot += fdos.get_doping(fermi_level=ef + fdos_vbm, temperature=temperature)
return qd_tot
return bisect(_get_total_q, -1.0, self.band_gap + 1.0)
def solve_for_non_equilibrium_fermi_energy(self, temperature, quench_temperature, chemical_potentials, bulk_dos):
"""
Solve for the Fermi energy after quenching in the defect concentrations at a higher
temperature (the quench temperature),
as outlined in P. Canepa et al (2017) Chemistry of Materials (doi: 10.1021/acs.chemmater.7b02909)
Args:
temperature: Temperature to equilibrate fermi energy at after quenching in defects
quench_temperature: Temperature to equilibrate defect concentrations at (higher temperature)
chemical_potentials: dict of chemical potentials to use for calculation fermi level
bulk_dos: bulk system dos (pymatgen Dos object)
Returns:
Fermi energy dictated by charge neutrality with respect to frozen in defect concentrations
"""
high_temp_fermi_level = self.solve_for_fermi_energy(quench_temperature, chemical_potentials, bulk_dos)
fixed_defect_charge = sum(
[
d["charge"] * d["conc"]
for d in self.defect_concentrations(
chemical_potentials=chemical_potentials,
temperature=quench_temperature,
fermi_level=high_temp_fermi_level,
)
]
)
fdos = FermiDos(bulk_dos, bandgap=self.band_gap)
_, fdos_vbm = fdos.get_cbm_vbm()
def _get_total_q(ef):
qd_tot = fixed_defect_charge
qd_tot += fdos.get_doping(fermi_level=ef + fdos_vbm, temperature=temperature)
return qd_tot
return bisect(_get_total_q, -1.0, self.band_gap + 1.0)
def get_dopability_limits(self, chemical_potentials):
"""
Find Dopability limits for a given chemical potential.
This is defined by the defect formation energies which first cross zero
in formation energies.
This determine bounds on the fermi level.
Does this by computing formation energy for every stable defect with non-zero charge.
If the formation energy value changes sign on either side of the band gap, then
compute the fermi level value where the formation energy is zero
(formation energies are lines and basic algebra shows: x_crossing = x1 - (y1 / q)
for fermi level, x1, producing formation energy y1)
Args:
chemical_potentials: dict of chemical potentials to use for calculation fermi level
Returns:
lower dopability limit, upper dopability limit
(returns None if no limit exists for upper or lower i.e. no negative defect
crossing before +/- 20 of band edges OR defect formation energies are entirely zero)
"""
min_fl_range = -20.0
max_fl_range = self.band_gap + 20.0
lower_lim = None
upper_lim = None
for def_entry in self.all_stable_entries:
min_fl_formen = def_entry.formation_energy(
chemical_potentials=chemical_potentials, fermi_level=min_fl_range
)
max_fl_formen = def_entry.formation_energy(
chemical_potentials=chemical_potentials, fermi_level=max_fl_range
)
if min_fl_formen < 0.0 and max_fl_formen < 0.0:
logger.error(
"Formation energy is negative through entire gap for entry {} q={}."
" Cannot return dopability limits.".format(def_entry.name, def_entry.charge)
)
return None, None
if np.sign(min_fl_formen) != np.sign(max_fl_formen):
x_crossing = min_fl_range - (min_fl_formen / def_entry.charge)
if min_fl_formen < 0.0:
if lower_lim is None or lower_lim < x_crossing:
lower_lim = x_crossing
else:
if upper_lim is None or upper_lim > x_crossing:
upper_lim = x_crossing
return lower_lim, upper_lim
def plot(
self,
mu_elts=None,
xlim=None,
ylim=None,
ax_fontsize=1.3,
lg_fontsize=1.0,
lg_position=None,
fermi_level=None,
title=None,
saved=False,
):
"""
Produce defect Formation energy vs Fermi energy plot
Args:
mu_elts:
a dictionnary of {Element:value} giving the chemical
potential of each element
xlim:
Tuple (min,max) giving the range of the x (fermi energy) axis
ylim:
Tuple (min,max) giving the range for the formation energy axis
ax_fontsize:
float multiplier to change axis label fontsize
lg_fontsize:
float multiplier to change legend label fontsize
lg_position:
Tuple (horizontal-position, vertical-position) giving the position
to place the legend.
Example: (0.5,-0.75) will likely put it below the x-axis.
saved:
Returns:
a matplotlib object
"""
if xlim is None:
xlim = (-0.5, self.band_gap + 0.5)
xy = {}
lower_cap = -100.0
upper_cap = 100.0
y_range_vals = [] # for finding max/min values on y-axis based on x-limits
for defnom, def_tl in self.transition_level_map.items():
xy[defnom] = [[], []]
if def_tl:
org_x = sorted(def_tl.keys()) # list of transition levels
# establish lower x-bound
first_charge = max(def_tl[org_x[0]])
for chg_ent in self.stable_entries[defnom]:
if chg_ent.charge == first_charge:
form_en = chg_ent.formation_energy(chemical_potentials=mu_elts, fermi_level=lower_cap)
fe_left = chg_ent.formation_energy(chemical_potentials=mu_elts, fermi_level=xlim[0])
xy[defnom][0].append(lower_cap)
xy[defnom][1].append(form_en)
y_range_vals.append(fe_left)
# iterate over stable charge state transitions
for fl in org_x:
charge = max(def_tl[fl])
for chg_ent in self.stable_entries[defnom]:
if chg_ent.charge == charge:
form_en = chg_ent.formation_energy(chemical_potentials=mu_elts, fermi_level=fl)
xy[defnom][0].append(fl)
xy[defnom][1].append(form_en)
y_range_vals.append(form_en)
# establish upper x-bound
last_charge = min(def_tl[org_x[-1]])
for chg_ent in self.stable_entries[defnom]:
if chg_ent.charge == last_charge:
form_en = chg_ent.formation_energy(chemical_potentials=mu_elts, fermi_level=upper_cap)
fe_right = chg_ent.formation_energy(chemical_potentials=mu_elts, fermi_level=xlim[1])
xy[defnom][0].append(upper_cap)
xy[defnom][1].append(form_en)
y_range_vals.append(fe_right)
else:
# no transition - just one stable charge
chg_ent = self.stable_entries[defnom][0]
for x_extrem in [lower_cap, upper_cap]:
xy[defnom][0].append(x_extrem)
xy[defnom][1].append(chg_ent.formation_energy(chemical_potentials=mu_elts, fermi_level=x_extrem))
for x_window in xlim:
y_range_vals.append(chg_ent.formation_energy(chemical_potentials=mu_elts, fermi_level=x_window))
if ylim is None:
window = max(y_range_vals) - min(y_range_vals)
spacer = 0.1 * window
ylim = (min(y_range_vals) - spacer, max(y_range_vals) + spacer)
if len(xy) <= 8:
colors = cm.Dark2(np.linspace(0, 1, len(xy))) # pylint: disable=E1101
else:
colors = cm.gist_rainbow(np.linspace(0, 1, len(xy))) # pylint: disable=E1101
plt.figure()
plt.clf()
width = 12
# plot formation energy lines
for_legend = []
for cnt, defnom in enumerate(xy.keys()):
plt.plot(xy[defnom][0], xy[defnom][1], linewidth=3, color=colors[cnt])
for_legend.append(self.stable_entries[defnom][0].copy())
# plot transtition levels
for cnt, defnom in enumerate(xy.keys()):
x_trans, y_trans = [], []
for x_val, chargeset in self.transition_level_map[defnom].items():
x_trans.append(x_val)
for chg_ent in self.stable_entries[defnom]:
if chg_ent.charge == chargeset[0]:
form_en = chg_ent.formation_energy(chemical_potentials=mu_elts, fermi_level=x_val)
y_trans.append(form_en)
if len(x_trans):
plt.plot(
x_trans,
y_trans,
marker="*",
color=colors[cnt],
markersize=12,
fillstyle="full",
)
# get latex-like legend titles
legends_txt = []
for dfct in for_legend:
flds = dfct.name.split("_")
if flds[0] == "Vac":
base = "$Vac"
sub_str = "_{" + flds[1] + "}$"
elif flds[0] == "Sub":
flds = dfct.name.split("_")
base = "$" + flds[1]
sub_str = "_{" + flds[3] + "}$"
elif flds[0] == "Int":
base = "$" + flds[1]
sub_str = "_{inter}$"
else:
base = dfct.name
sub_str = ""
legends_txt.append(base + sub_str)
if not lg_position:
plt.legend(legends_txt, fontsize=lg_fontsize * width, loc=0)
else:
plt.legend(
legends_txt,
fontsize=lg_fontsize * width,
ncol=3,
loc="lower center",
bbox_to_anchor=lg_position,
)
plt.ylim(ylim)
plt.xlim(xlim)
plt.plot([xlim[0], xlim[1]], [0, 0], "k-") # black dashed line for Eformation = 0
plt.axvline(x=0.0, linestyle="--", color="k", linewidth=3) # black dashed lines for gap edges
plt.axvline(x=self.band_gap, linestyle="--", color="k", linewidth=3)
if fermi_level is not None:
plt.axvline(x=fermi_level, linestyle="-.", color="k", linewidth=2) # smaller dashed lines for gap edges
plt.xlabel("Fermi energy (eV)", size=ax_fontsize * width)
plt.ylabel("Defect Formation\nEnergy (eV)", size=ax_fontsize * width)
if title:
plt.title("{}".format(title), size=ax_fontsize * width)
if saved:
plt.savefig(str(title) + "FreyplnravgPlot.pdf")
else:
return plt
return None
| 42.239011 | 118 | 0.57987 |
4a1f1e1fbf51a77f27a9351e96a7739b0949bcb8 | 911 | py | Python | boilerio/tests/test_pwm.py | adpeace/boilerio | 730486b46a5b5a7b62d2a3d932378c3fa7dff2a0 | [
"MIT"
] | 4 | 2017-07-13T04:57:15.000Z | 2021-12-10T01:28:46.000Z | boilerio/tests/test_pwm.py | adpeace/boilerio | 730486b46a5b5a7b62d2a3d932378c3fa7dff2a0 | [
"MIT"
] | 14 | 2021-01-17T22:42:27.000Z | 2021-12-10T01:30:37.000Z | boilerio/tests/test_pwm.py | adpeace/boilerio | 730486b46a5b5a7b62d2a3d932378c3fa7dff2a0 | [
"MIT"
] | 3 | 2017-04-19T21:34:15.000Z | 2021-11-20T10:45:56.000Z | import mock
from datetime import datetime, timedelta
from boilerio import pwm
def test_start_off():
mock_device = mock.MagicMock()
c = pwm.PWM(0, timedelta(0, 600), mock_device)
now = datetime.now()
c.update(now)
mock_device.off.assert_called()
mock_device.on.assert_not_called()
def test_start_on():
mock_device = mock.MagicMock()
c = pwm.PWM(0.5, timedelta(0, 600), mock_device)
now = datetime.now()
c.update(now)
mock_device.off.assert_not_called()
mock_device.on.assert_called()
def test_device_modulated():
mock_device = mock.MagicMock()
period = timedelta(0,600)
off_before = timedelta(0,301)
c = pwm.PWM(0.5, timedelta(0, 600), mock_device)
now = datetime.now()
c.update(now)
mock_device.off.assert_not_called()
mock_device.on.assert_called()
now += off_before
c.update(now)
mock_device.off.assert_called()
| 26.028571 | 52 | 0.689352 |
4a1f1ea2e00aa0e12aea801de4207d1ffddb6303 | 5,837 | py | Python | hackjohn.py | ScottBishop/hackjohn | c276cffe76921d0e869022f9d6ae69d2d15e034e | [
"MIT"
] | null | null | null | hackjohn.py | ScottBishop/hackjohn | c276cffe76921d0e869022f9d6ae69d2d15e034e | [
"MIT"
] | null | null | null | hackjohn.py | ScottBishop/hackjohn | c276cffe76921d0e869022f9d6ae69d2d15e034e | [
"MIT"
] | null | null | null | """
Bot to monitor for southbound permit spaces on the John Muir Trail
Written by Daniel Himmelstein
Check whether any spaces are available for the
"Donohue Exit Quota and Trailhead Space Available".
This is for people hiking the John Muir Trail starting in Yosemite.
According to the reservations office,
the table is usually updated around 11 AM pacific time
and spaces are usually snatched within ten minutes.
Call the reservation number if there's availability at 209-372-0740.
"""
import pathlib
import re
import requests
import pandas
from pkg_resources import parse_version
# Minimum number of available spaces
spaces = 2
# Comment out trailheads you'd like to start from
exclude = [
# 'Happy Isles->Little Yosemite Valley',
# 'Happy Isles->Sunrise/Merced Lake (pass through)',
#"Glacier Point->Little Yosemite Valley",
#"Sunrise Lakes",
#"Lyell Canyon",
]
# Dates you'd like to start on (inclusive of end date)
dates = pandas.date_range(start="2020-7-12", end="2020-8-31", freq="D")
dates
# Write output to this file. If the generated output is identical to
# the existing output at this path, suppress notification. To disable
# writing any files, set output_path=None as shown below.
output_path = pathlib.Path("__file__").parent.joinpath("hackjohn-output.txt")
# output_path = None # None disables writing to a file
# If the Report Date is before this day, suppress Telegram notification.
# You probably do not need to change this setting unless you have disabled
# output_path
min_report_date = "2019-01-01"
def get_trailhead_df():
"""
Convert the current "Donohue Exit Quota and Trailhead Space Available" HTML table
to a pandas.DataFrame.
"""
pandas_version = parse_version(pandas.__version__)._version.release
if pandas_version[:2] == (0, 23):
# read_html malfunctions in pandas v0.23
# https://github.com/pandas-dev/pandas/issues/22135
raise ImportError("pandas v0.23 is not supported due to https://git.io/fp9Zn")
url = "https://www.nps.gov/yose/planyourvisit/fulltrailheads.htm"
response = requests.get(url)
response.raise_for_status()
(wide_df,) = pandas.read_html(
response.text,
header=2,
attrs={"id": "cs_idLayout2"},
flavor="html5lib",
parse_dates=["Date"],
)
wide_df = wide_df.iloc[:, :6]
trailhead_df = (
wide_df.melt(id_vars="Date", var_name="Trailhead", value_name="Spaces")
.dropna()
.sort_values(by=["Date"], kind="mergesort")
)
trailhead_df.Spaces = trailhead_df.Spaces.astype(int)
assert len(trailhead_df) > 0
return response, trailhead_df
yose_response, trailhead_df = get_trailhead_df()
trailhead_df.head(2)
# Extract report date. https://github.com/dhimmel/hackjohn/issues/1
try:
match = re.search(r"Report Date: ([0-9/]+)", yose_response.text)
report_date = match.group(1)
report_date = pandas.to_datetime(report_date, dayfirst=False)
except Exception:
report_date = yose_response.headers["Date"]
report_date = pandas.to_datetime(report_date, utc=True)
report_date = report_date.date().isoformat()
space_df = trailhead_df.query(
"Date in @dates and Spaces >= @spaces and Trailhead not in @exclude"
)
space_df
space_str = "NO VACANCY" if space_df.empty else space_df.to_string(index=False)
text = f"""Spaces available as of {report_date}:
{space_str}
According to {yose_response.url}
Yosemite Reservations: 209-372-0740 (Monday–Friday 9:00am–4:30pm)
"""
print(text)
# Detect if output_path has changed. If so, rewrite output.
output_has_changed = True
if output_path:
output_path = pathlib.Path(output_path)
if output_path.is_file():
previous_text = output_path.read_text()
output_has_changed = text != previous_text
if output_has_changed:
output_path.write_text(text)
print(f"output has changed: {output_has_changed}")
# determine whether to notify
notify = not space_df.empty and output_has_changed and min_report_date <= report_date
## Notifications using MiddlemanBot
# Uses https://github.com/n1try/telegram-middleman-bot
# Set enable_middleman to True to receive telegram notification
enable_middleman = False
# Get token from messaging /start to @MiddleManBot on Telegram
# https://telegram.me/MiddleManBot
token = "f58252f0-c58a-4bb6-9d8b-44c759218662"
hostname = "http://localhost:8080/"
mmb_url = hostname + "/api/messages"
payload = {
"recipient_token": token,
"text": text,
"origin": "hackjohn",
}
if notify and enable_middleman:
print("sending middleman request")
mmb_response = requests.post(mmb_url, json=payload)
print("middleman status code", mmb_response.status_code)
print(mmb_response.text)
## Notifications using IFTTT
enable_ifttt = True
event_name = "hackjohn"
# Set enable_ifttt to True and personalize ifttt_key to receive IFTTT notifications
# enable_ifttt = True
ifttt_key = "fJ09D0313TIjpguNi-w59LHheAO-51wYw3pTqnz8pLH"
ifttt_hostname = "https://maker.ifttt.com"
ifttt_url = ifttt_hostname + "/trigger/" + event_name + "/with/key/" + ifttt_key
if notify and enable_ifttt:
report = {
"value1": text,
"value2": "209-372-0740",
}
response = requests.post(ifttt_url, data=report)
print("ifttt status code", response.status_code)
print(response.text)
# Set enable_ifttt to True and personalize ifttt_key to receive IFTTT notifications
ifttt_key = "bXfB1siCKuPTjSHqVj3xrL"
ifttt_hostname = "https://maker.ifttt.com"
ifttt_url = ifttt_hostname + "/trigger/" + event_name + "/with/key/" + ifttt_key
if notify and enable_ifttt:
report = {
"value1": text,
"value2": "209-372-0740",
}
response = requests.post(ifttt_url, data=report)
print("ifttt status code", response.status_code)
print(response.text)
| 31.38172 | 86 | 0.723831 |
4a1f1ee683be45f7f538548810e2b151f0140bc5 | 8,814 | py | Python | tuframework/experiment_planning/experiment_planner_baseline_2DUNet.py | Magnety/tuFramework | b31cb34d476ef306b52da955021f93c91c14ddf4 | [
"Apache-2.0"
] | null | null | null | tuframework/experiment_planning/experiment_planner_baseline_2DUNet.py | Magnety/tuFramework | b31cb34d476ef306b52da955021f93c91c14ddf4 | [
"Apache-2.0"
] | null | null | null | tuframework/experiment_planning/experiment_planner_baseline_2DUNet.py | Magnety/tuFramework | b31cb34d476ef306b52da955021f93c91c14ddf4 | [
"Apache-2.0"
] | null | null | null | # Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import shutil
import tuframework
import numpy as np
from batchgenerators.utilities.file_and_folder_operations import load_pickle, subfiles
from multiprocessing.pool import Pool
from tuframework.configuration import default_num_threads
from tuframework.experiment_planning.common_utils import get_pool_and_conv_props
from tuframework.experiment_planning.experiment_planner_baseline_3DUNet import ExperimentPlanner
from tuframework.experiment_planning.utils import add_classes_in_slice_info
from tuframework.network_architecture.generic_UNet import Generic_UNet
from tuframework.paths import *
from tuframework.preprocessing.preprocessing import PreprocessorFor2D
from tuframework.training.model_restore import recursive_find_python_class
class ExperimentPlanner2D(ExperimentPlanner):
def __init__(self, folder_with_cropped_data, preprocessed_output_folder):
super(ExperimentPlanner2D, self).__init__(folder_with_cropped_data,
preprocessed_output_folder)
self.data_identifier = default_data_identifier + "_2D"
self.plans_fname = join(self.preprocessed_output_folder, "tuframeworkPlans" + "_plans_2D.pkl")
self.unet_base_num_features = 30
self.unet_max_num_filters = 512
self.unet_max_numpool = 999
self.preprocessor_name = "PreprocessorFor2D"
def get_properties_for_stage(self, current_spacing, original_spacing, original_shape, num_cases,
num_modalities, num_classes):
new_median_shape = np.round(original_spacing / current_spacing * original_shape).astype(int)
dataset_num_voxels = np.prod(new_median_shape, dtype=np.int64) * num_cases
input_patch_size = new_median_shape[1:]
network_numpool, net_pool_kernel_sizes, net_conv_kernel_sizes, input_patch_size, \
shape_must_be_divisible_by = get_pool_and_conv_props(current_spacing[1:], input_patch_size,
self.unet_featuremap_min_edge_length,
self.unet_max_numpool)
estimated_gpu_ram_consumption = Generic_UNet.compute_approx_vram_consumption(input_patch_size,
network_numpool,
self.unet_base_num_features,
self.unet_max_num_filters,
num_modalities, num_classes,
net_pool_kernel_sizes,
conv_per_stage=self.conv_per_stage)
batch_size = int(np.floor(Generic_UNet.use_this_for_batch_size_computation_2D /
estimated_gpu_ram_consumption * Generic_UNet.DEFAULT_BATCH_SIZE_2D))
if batch_size < self.unet_min_batch_size:
raise RuntimeError("This framework is not made to process patches this large. We will add patch-based "
"2D networks later. Sorry for the inconvenience")
# check if batch size is too large (more than 5 % of dataset)
max_batch_size = np.round(self.batch_size_covers_max_percent_of_dataset * dataset_num_voxels /
np.prod(input_patch_size, dtype=np.int64)).astype(int)
batch_size = min(batch_size, max_batch_size)
plan = {
'batch_size': batch_size,
'num_pool_per_axis': network_numpool,
'patch_size': input_patch_size,
'median_patient_size_in_voxels': new_median_shape,
'current_spacing': current_spacing,
'original_spacing': original_spacing,
'pool_op_kernel_sizes': net_pool_kernel_sizes,
'conv_kernel_sizes': net_conv_kernel_sizes,
'do_dummy_2D_data_aug': False
}
return plan
def plan_experiment(self):
use_nonzero_mask_for_normalization = self.determine_whether_to_use_mask_for_norm()
print("Are we using the nonzero maks for normalizaion?", use_nonzero_mask_for_normalization)
spacings = self.dataset_properties['all_spacings']
sizes = self.dataset_properties['all_sizes']
all_classes = self.dataset_properties['all_classes']
modalities = self.dataset_properties['modalities']
num_modalities = len(list(modalities.keys()))
target_spacing = self.get_target_spacing()
new_shapes = np.array([np.array(i) / target_spacing * np.array(j) for i, j in zip(spacings, sizes)])
max_spacing_axis = np.argmax(target_spacing)
remaining_axes = [i for i in list(range(3)) if i != max_spacing_axis]
self.transpose_forward = [max_spacing_axis] + remaining_axes
self.transpose_backward = [np.argwhere(np.array(self.transpose_forward) == i)[0][0] for i in range(3)]
# we base our calculations on the median shape of the datasets
median_shape = np.median(np.vstack(new_shapes), 0)
print("the median shape of the dataset is ", median_shape)
max_shape = np.max(np.vstack(new_shapes), 0)
print("the max shape in the dataset is ", max_shape)
min_shape = np.min(np.vstack(new_shapes), 0)
print("the min shape in the dataset is ", min_shape)
print("we don't want feature maps smaller than ", self.unet_featuremap_min_edge_length, " in the bottleneck")
# how many stages will the image pyramid have?
self.plans_per_stage = []
target_spacing_transposed = np.array(target_spacing)[self.transpose_forward]
median_shape_transposed = np.array(median_shape)[self.transpose_forward]
print("the transposed median shape of the dataset is ", median_shape_transposed)
self.plans_per_stage.append(
self.get_properties_for_stage(target_spacing_transposed, target_spacing_transposed, median_shape_transposed,
num_cases=len(self.list_of_cropped_npz_files),
num_modalities=num_modalities,
num_classes=len(all_classes) + 1),
)
print(self.plans_per_stage)
self.plans_per_stage = self.plans_per_stage[::-1]
self.plans_per_stage = {i: self.plans_per_stage[i] for i in range(len(self.plans_per_stage))} # convert to dict
normalization_schemes = self.determine_normalization_scheme()
# deprecated
only_keep_largest_connected_component, min_size_per_class, min_region_size_per_class = None, None, None
# these are independent of the stage
plans = {'num_stages': len(list(self.plans_per_stage.keys())), 'num_modalities': num_modalities,
'modalities': modalities, 'normalization_schemes': normalization_schemes,
'dataset_properties': self.dataset_properties, 'list_of_npz_files': self.list_of_cropped_npz_files,
'original_spacings': spacings, 'original_sizes': sizes,
'preprocessed_data_folder': self.preprocessed_output_folder, 'num_classes': len(all_classes),
'all_classes': all_classes, 'base_num_features': self.unet_base_num_features,
'use_mask_for_norm': use_nonzero_mask_for_normalization,
'keep_only_largest_region': only_keep_largest_connected_component,
'min_region_size_per_class': min_region_size_per_class, 'min_size_per_class': min_size_per_class,
'transpose_forward': self.transpose_forward, 'transpose_backward': self.transpose_backward,
'data_identifier': self.data_identifier, 'plans_per_stage': self.plans_per_stage,
'preprocessor_name': self.preprocessor_name,
}
self.plans = plans
self.save_my_plans()
| 55.433962 | 120 | 0.659746 |
4a1f1ee7c7a72581767dd4ba7701efc04662dd46 | 1,162 | py | Python | grouper/fe/handlers/role_user_view.py | TimYagan/merou | 3d3cd5e17eab5ffe259f7b41a925af6f47ec8988 | [
"Apache-2.0"
] | null | null | null | grouper/fe/handlers/role_user_view.py | TimYagan/merou | 3d3cd5e17eab5ffe259f7b41a925af6f47ec8988 | [
"Apache-2.0"
] | null | null | null | grouper/fe/handlers/role_user_view.py | TimYagan/merou | 3d3cd5e17eab5ffe259f7b41a925af6f47ec8988 | [
"Apache-2.0"
] | null | null | null | from typing import TYPE_CHECKING
from grouper.audit import get_group_audit_members_infos
from grouper.fe.handlers.template_variables import get_role_user_view_template_vars
from grouper.fe.util import GrouperHandler
from grouper.models.group import Group
from grouper.models.user import User
if TYPE_CHECKING:
from typing import Any, Optional
class RoleUserView(GrouperHandler):
def get(self, *args, **kwargs):
# type: (*Any, **Any) -> None
user_id = kwargs.get("user_id") # type: Optional[int]
name = kwargs.get("name") # type: Optional[str]
self.handle_refresh()
user = User.get(self.session, user_id, name)
if not user or not user.role_user:
return self.notfound()
group = Group.get(self.session, name=name)
actor = self.current_user
graph = self.graph
session = self.session
self.render(
"service.html",
user=user,
group=group,
audit_members_infos=get_group_audit_members_infos(self.session, group),
**get_role_user_view_template_vars(session, actor, user, group, graph)
)
| 32.277778 | 83 | 0.668675 |
4a1f213d7e5fffda04623f85cb74ac7ef0ec6214 | 2,457 | py | Python | software/ekf/ekf_real/main.py | tucuongbrt/PIFer | e2ac4d4443e1c6a6263f91c32f28dbe767590359 | [
"MIT"
] | 2 | 2021-03-17T18:23:15.000Z | 2021-03-18T06:19:44.000Z | software/ekf/ekf_real/main.py | tucuongbrt/PIFer | e2ac4d4443e1c6a6263f91c32f28dbe767590359 | [
"MIT"
] | 2 | 2021-04-03T08:50:46.000Z | 2021-04-03T08:50:57.000Z | software/ekf/ekf_real/main.py | tucuongbrt/PIFer | e2ac4d4443e1c6a6263f91c32f28dbe767590359 | [
"MIT"
] | 2 | 2021-04-14T00:18:23.000Z | 2021-05-06T05:57:54.000Z | from numpy import *
from math import *
from ekf import *
from utils import *
pule_per_revolution = 780
wheel_diameter = 0.065
file_control_measurement = open("control_measurement.txt")
file_prediction = open("prediction.txt", "w")
ranges = []
x = []
yaw = 0
for line in file_control_measurement.readlines():
cm = line.split(" ")
if cm[0] == 'M':
x = float(cm[1])
y = float(cm[2])
z = 3.0
r = float(cm[4])
yaw = float(cm[5])*pi/180
ranges.append((x,y,z,r))
if(len(ranges)>=3):
P = trilaterate(ranges)
x = P[1]
break
print((x,yaw))
initial_state = array([x[0], x[1], yaw])
initial_covariance = diag([0.1**2, 0.1**2, (50.0 / 180.0 * pi) ** 2])
robot_width = 0.24
scanner_displacement = 0.05
control_motion_factor = 0.001
control_turn_factor = 0.001
measurement_distance_stddev = 0.05
measurement_angle_stddev = 10.0 / 180.0 * pi
ekf = ExtendedKalmanFilter(initial_state, initial_covariance)
ekf.set_control_params(robot_width, control_motion_factor, control_turn_factor)
ekf.set_measurement_params(scanner_displacement, measurement_distance_stddev, measurement_angle_stddev)
file_control_measurement = open("control_measurement.txt")
tmp = open("tmp.txt", 'w')
cnt = 0
for line in file_control_measurement.readlines():
cm = line.split(" ")
if cm[0] == 'C':
c0 = -(float(cm[1])/pule_per_revolution)*(pi*wheel_diameter)
c1 = -(float(cm[2])/pule_per_revolution)*(pi*wheel_diameter)
ekf.predict((c0,c1))
# file_prediction.write("F %f %f %f\n" % (ekf.state[0]*300+500, ekf.state[1]*300+1000, ekf.state[2]))
# file_prediction.write("F %f %f %f\n" % (ekf.state[0], ekf.state[1], ekf.state[2]))
elif cm[0] == 'M':
x = float(cm[1])
y = float(cm[2])
z = float(cm[3])
r = float(cm[4])
if x==0 and y==0:
continue
yaw = float(cm[5])*pi/180
r = r**2-3**2
if r > 0:
r = sqrt(r)
m = (r,yaw)
l = (x,y)
tmp.write(str((m,l)) + '\r')
ekf.correct(m, l)
file_prediction.write("F %f %f %f\n" % (ekf.state[0]*300+500, ekf.state[1]*300+1000, ekf.state[2]))
e = ExtendedKalmanFilter.get_error_ellipse(ekf.covariance)
e[1]*=3*10**4
e[2]*=3*10**4
e.append(sqrt(ekf.covariance[2,2]))
file_prediction.write("E %f %f %f %f\n" % tuple(e)) | 31.5 | 111 | 0.590965 |
4a1f213fef5110a2b785dbece406128e01056056 | 124 | py | Python | tests/regression/diabetes/ws_diabetes_SVR_linear_hive_code_gen.py | antoinecarme/sklearn2sql_heroku | d680db10683daa419324461eeea851dd8b103ad5 | [
"BSD-3-Clause"
] | 1 | 2019-07-09T14:45:18.000Z | 2019-07-09T14:45:18.000Z | tests/regression/diabetes/ws_diabetes_SVR_linear_hive_code_gen.py | antoinecarme/sklearn2sql_heroku | d680db10683daa419324461eeea851dd8b103ad5 | [
"BSD-3-Clause"
] | 5 | 2017-11-13T13:35:37.000Z | 2021-11-11T12:57:20.000Z | tests/regression/diabetes/ws_diabetes_SVR_linear_hive_code_gen.py | antoinecarme/sklearn2sql_heroku | d680db10683daa419324461eeea851dd8b103ad5 | [
"BSD-3-Clause"
] | 1 | 2021-09-19T15:05:33.000Z | 2021-09-19T15:05:33.000Z | from sklearn2sql_heroku.tests.regression import generic as reg_gen
reg_gen.test_model("SVR_linear" , "diabetes" , "hive")
| 24.8 | 66 | 0.790323 |
4a1f2294791c93edd803bc168188cbd56182b122 | 11,522 | py | Python | TensorflowFL/Weight_Def.py | BUAA-BDA/FedShapley | d4b257c3070f85e81cc9bb93f98dd79472817cec | [
"MIT"
] | 16 | 2020-04-27T15:41:24.000Z | 2022-03-11T05:46:56.000Z | TensorflowFL/Weight_Def.py | syl18via/FL_intensive | bfc12da88f8f9ae04f1a1bb6da2a8835d70c66de | [
"MIT"
] | null | null | null | TensorflowFL/Weight_Def.py | syl18via/FL_intensive | bfc12da88f8f9ae04f1a1bb6da2a8835d70c66de | [
"MIT"
] | 6 | 2020-05-07T19:03:00.000Z | 2021-08-20T08:35:35.000Z | import tensorflow_federated as tff
import tensorflow.compat.v1 as tf
import numpy as np
import time
import random
from scipy.special import comb, perm
import os
# tf.compat.v1.enable_v2_behavior()
# tf.compat.v1.enable_eager_execution()
NUM_EXAMPLES_PER_USER = 1000
BATCH_SIZE = 100
NUM_AGENT = 5
def get_data_for_digit(source, digit):
output_sequence = []
all_samples = [i for i, d in enumerate(source[1]) if d == digit]
for i in range(0, len(all_samples), BATCH_SIZE):
batch_samples = all_samples[i:i + BATCH_SIZE]
output_sequence.append({
'x': np.array([source[0][i].flatten() / 255.0 for i in batch_samples],
dtype=np.float32),
'y': np.array([source[1][i] for i in batch_samples], dtype=np.int32)})
return output_sequence
def get_data_for_digit_test(source, digit):
output_sequence = []
all_samples = [i for i, d in enumerate(source[1]) if d == digit]
for i in range(0, len(all_samples)):
output_sequence.append({
'x': np.array(source[0][all_samples[i]].flatten() / 255.0,
dtype=np.float32),
'y': np.array(source[1][all_samples[i]], dtype=np.int32)})
return output_sequence
def get_data_for_federated_agents(source, num):
# add weights 2:3:4:5:6
PIECE = int(5421/20)
left=int((num+2)*(num+1)/2)-1
output_sequence = []
Samples = []
for digit in range(0, 10):
samples = [i for i, d in enumerate(source[1]) if d == digit]
samples = samples[0:5421]
Samples.append(samples)
all_samples = []
for sample in Samples:
for sample_index in range(left*PIECE,(left+num+2)*PIECE):
all_samples.append(sample[sample_index])
# all_samples = [i for i in range(int(num*(len(source[1])/NUM_AGENT)), int((num+1)*(len(source[1])/NUM_AGENT)))]
for i in range(0, len(all_samples), BATCH_SIZE):
batch_samples = all_samples[i:i + BATCH_SIZE]
output_sequence.append({
'x': np.array([source[0][i].flatten() / 255.0 for i in batch_samples],
dtype=np.float32),
'y': np.array([source[1][i] for i in batch_samples], dtype=np.int32)})
return output_sequence
BATCH_TYPE = tff.NamedTupleType([
('x', tff.TensorType(tf.float32, [None, 784])),
('y', tff.TensorType(tf.int32, [None]))])
MODEL_TYPE = tff.NamedTupleType([
('weights', tff.TensorType(tf.float32, [784, 10])),
('bias', tff.TensorType(tf.float32, [10]))])
@tff.tf_computation(MODEL_TYPE, BATCH_TYPE)
def batch_loss(model, batch):
predicted_y = tf.nn.softmax(tf.matmul(batch.x, model.weights) + model.bias)
return -tf.reduce_mean(tf.reduce_sum(
tf.one_hot(batch.y, 10) * tf.log(predicted_y), axis=[1]))
@tff.tf_computation(MODEL_TYPE, BATCH_TYPE, tf.float32)
def batch_train(initial_model, batch, learning_rate):
# Define a group of model variables and set them to `initial_model`.
model_vars = tff.utils.create_variables('v', MODEL_TYPE)
init_model = tff.utils.assign(model_vars, initial_model)
# Perform one step of gradient descent using loss from `batch_loss`.
optimizer = tf.train.GradientDescentOptimizer(learning_rate)
with tf.control_dependencies([init_model]):
train_model = optimizer.minimize(batch_loss(model_vars, batch))
# Return the model vars after performing this gradient descent step.
with tf.control_dependencies([train_model]):
return tff.utils.identity(model_vars)
LOCAL_DATA_TYPE = tff.SequenceType(BATCH_TYPE)
@tff.federated_computation(MODEL_TYPE, tf.float32, LOCAL_DATA_TYPE)
def local_train(initial_model, learning_rate, all_batches):
# Mapping function to apply to each batch.
@tff.federated_computation(MODEL_TYPE, BATCH_TYPE)
def batch_fn(model, batch):
return batch_train(model, batch, learning_rate)
return tff.sequence_reduce(all_batches, initial_model, batch_fn)
@tff.federated_computation(MODEL_TYPE, LOCAL_DATA_TYPE)
def local_eval(model, all_batches):
#
return tff.sequence_sum(
tff.sequence_map(
tff.federated_computation(
lambda b: batch_loss(model, b), BATCH_TYPE),
all_batches))
SERVER_MODEL_TYPE = tff.FederatedType(MODEL_TYPE, tff.SERVER, all_equal=True)
CLIENT_DATA_TYPE = tff.FederatedType(LOCAL_DATA_TYPE, tff.CLIENTS)
@tff.federated_computation(SERVER_MODEL_TYPE, CLIENT_DATA_TYPE)
def federated_eval(model, data):
return tff.federated_mean(
tff.federated_map(local_eval, [tff.federated_broadcast(model), data]))
SERVER_FLOAT_TYPE = tff.FederatedType(tf.float32, tff.SERVER, all_equal=True)
@tff.federated_computation(
SERVER_MODEL_TYPE, SERVER_FLOAT_TYPE, CLIENT_DATA_TYPE)
def federated_train(model, learning_rate, data):
return tff.federated_map(
local_train,
[tff.federated_broadcast(model),
tff.federated_broadcast(learning_rate),
data])
def readTestImagesFromFile(distr_same):
ret = []
if distr_same:
f = open(os.path.join(os.path.dirname(__file__), "test_images1_.txt"), encoding="utf-8")
else:
f = open(os.path.join(os.path.dirname(__file__), "test_images1_.txt"), encoding="utf-8")
lines = f.readlines()
for line in lines:
tem_ret = []
p = line.replace("[", "").replace("]", "").replace(
"\n", "").split("\t")
for i in p:
if i != "":
tem_ret.append(float(i))
ret.append(tem_ret)
return np.asarray(ret)
def readTestLabelsFromFile(distr_same):
ret = []
if distr_same:
f = open(os.path.join(os.path.dirname(__file__), "test_labels_.txt"), encoding="utf-8")
else:
f = open(os.path.join(os.path.dirname(__file__), "test_labels_.txt"), encoding="utf-8")
lines = f.readlines()
for line in lines:
tem_ret = []
p = line.replace("[", "").replace("]", "").replace("\n", "").split(" ")
for i in p:
if i != "":
tem_ret.append(float(i))
ret.append(tem_ret)
return np.asarray(ret)
def remove_list_indexed(removed_ele, original_l, ll):
new_original_l = []
for i in original_l:
new_original_l.append(i)
for i in new_original_l:
if i == removed_ele:
new_original_l.remove(i)
for i in range(len(ll)):
if set(ll[i]) == set(new_original_l):
return i
return -1
def shapley_list_indexed(original_l, ll):
for i in range(len(ll)):
if set(ll[i]) == set(original_l):
return i
return -1
def PowerSetsBinary(items):
N = len(items)
set_all = []
for i in range(2 ** N):
combo = []
for j in range(N):
if (i >> j) % 2 == 1:
combo.append(items[j])
set_all.append(combo)
return set_all
if __name__ == "__main__":
start_time = time.time()
mnist_train, mnist_test = tf.keras.datasets.mnist.load_data()
#add weights
# data_num = np.asarray([1000,2000,3000,4000,5000])
# agents_weights = np.divide(data_num, data_num.sum())
# agents_weights = 0.2032
DISTRIBUTION_TYPE = "SAME"
federated_train_data_divide = None
test_images = None
test_labels_onehot = None
if DISTRIBUTION_TYPE == "SAME":
federated_train_data_divide = [get_data_for_federated_agents(
mnist_train, d) for d in range(NUM_AGENT)]
test_images = readTestImagesFromFile(False)
test_labels_onehot = readTestLabelsFromFile(False)
#add weights
data_num = np.asarray([len(sample) for sample in federated_train_data_divide])
all_sets = PowerSetsBinary([i for i in range(NUM_AGENT)])
group_shapley_value = []
for ss in all_sets:
federated_train_data = []
data_num_sum = 0
agents_weights = []
for item in ss:
federated_train_data.append(federated_train_data_divide[item])
data_num_sum += data_num[item]
for item in ss:
agents_weights.append(data_num[item]/data_num_sum)
f_ini_p = open(os.path.join(os.path.dirname(__file__), "initial_model_parameters.txt"), "r")
para_lines = f_ini_p.readlines()
w_paras = para_lines[0].split("\t")
w_paras = [float(i) for i in w_paras]
b_paras = para_lines[1].split("\t")
b_paras = [float(i) for i in b_paras]
w_initial = np.asarray(w_paras, dtype=np.float32).reshape([784, 10])
b_initial = np.asarray(b_paras, dtype=np.float32).reshape([10])
f_ini_p.close()
initial_model = {
'weights': w_initial,
'bias': b_initial
}
agents_weights = np.asarray(agents_weights)
model = initial_model
learning_rate = 0.1
for round_num in range(50):
local_models = federated_train(
model, learning_rate, federated_train_data)
# print(len(local_models))
print("learning rate: ", learning_rate)
m_w = np.zeros([784, 10], dtype=np.float32)
m_b = np.zeros([10], dtype=np.float32)
for local_model_index in range(len(local_models)):
m_w = np.add(np.multiply(local_models[local_model_index][0], agents_weights[local_model_index]), m_w)
m_b = np.add(np.multiply(local_models[local_model_index][1], agents_weights[local_model_index]), m_b)
model = {
'weights': m_w,
'bias': m_b
}
learning_rate = learning_rate * 0.9
loss = federated_eval(model, federated_train_data)
print('round {}, loss={}'.format(round_num, loss))
print(time.time() - start_time)
'''model = federated_train(model, learning_rate, federated_train_data)
learning_rate = learning_rate * 0.9
loss = federated_eval(model, federated_train_data)
print('round {}, loss={}'.format(round_num, loss))'''
m = np.dot(test_images, np.asarray(model['weights']))
test_result = m + np.asarray(model['bias'])
y = tf.nn.softmax(test_result)
correct_prediction = tf.equal(
tf.argmax(y, 1), tf.arg_max(test_labels_onehot, 1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
group_shapley_value.append(accuracy.numpy())
print("combination finished ", time.time() - start_time)
print(str(ss) + "\t" +
str(group_shapley_value[len(group_shapley_value) - 1]))
agent_shapley = []
for index in range(NUM_AGENT):
shapley = 0.0
for j in all_sets:
if index in j:
remove_list_index = remove_list_indexed(index, j, all_sets)
if remove_list_index != -1:
shapley += (group_shapley_value[shapley_list_indexed(j, all_sets)] - group_shapley_value[
remove_list_index]) / (comb(NUM_AGENT - 1, len(all_sets[remove_list_index])))
agent_shapley.append(shapley)
for ag_s in agent_shapley:
print(ag_s)
print("end_time", time.time() - start_time)
| 36.347003 | 134 | 0.61708 |
4a1f22e149745cdad79f9ab18b0990f7d4df1d62 | 259,784 | py | Python | Tests/test_geo.py | mukhyala/biopython | bab34118fce33a7e10b0db8eb7382f6d0e0d5d41 | [
"BSD-3-Clause"
] | null | null | null | Tests/test_geo.py | mukhyala/biopython | bab34118fce33a7e10b0db8eb7382f6d0e0d5d41 | [
"BSD-3-Clause"
] | null | null | null | Tests/test_geo.py | mukhyala/biopython | bab34118fce33a7e10b0db8eb7382f6d0e0d5d41 | [
"BSD-3-Clause"
] | null | null | null | # This code is part of the Biopython distribution and governed by its
# license. Please see the LICENSE file that should have been included
# as part of this package.
#
"""Tests the basic functionality of the GEO parsers."""
import os
import sys
import unittest
from Bio import Geo
if sys.version_info[0] >= 3:
# Python 3 problem: Can't use utf8 on Tests/Geo/soft_ex_*.txt
# due to micro (\xb5) and degrees (\xb0) symbols
import builtins
def open(path):
return builtins.open(path, encoding="latin")
class TestGeo(unittest.TestCase):
def test_soft_ex_dual(self):
path = "Geo/soft_ex_dual.txt"
with open(path) as handle:
records = Geo.parse(handle)
record = next(records)
self.assertEqual(record.entity_type, "SAMPLE")
self.assertEqual(record.entity_id, "Control Embyronic Stem Cell Replicate 1")
self.assertEqual(len(record.entity_attributes), 24)
self.assertEqual(record.entity_attributes["Sample_extract_protocol_ch2"], "TriZol procedure")
self.assertEqual(record.entity_attributes["Sample_hyb_protocol"], "Oligoarray control targets and hybridization buffer (Agilent In Situ Hybridization Kit Plus) were added, and samples were applied to microarrays enclosed in Agilent SureHyb-enabled hybridization chambers. After hybridization, slides were washed sequentially with 6x SSC/0.005% Triton X-102 and 0.1x SSC/0.005% Triton X-102 before scanning. Slides were hybridized for 17 h at 60\xb0C in a rotating oven, and washed.")
self.assertEqual(record.entity_attributes["Sample_extract_protocol_ch1"], "TriZol procedure")
self.assertEqual(record.entity_attributes["Sample_platform_id"], "GPL3759")
self.assertEqual(record.entity_attributes["Sample_title"], "Control Embyronic Stem Cell Replicate 1")
self.assertEqual(record.entity_attributes["Sample_supplementary_file"], "file1.gpr")
self.assertEqual(record.entity_attributes["Sample_organism_ch2"], "Mus musculus")
self.assertEqual(record.entity_attributes["Sample_organism_ch1"], "Mus musculus")
self.assertEqual(record.entity_attributes["Sample_label_ch1"], "Cy5")
self.assertEqual(len(record.entity_attributes["Sample_scan_protocol"]), 2)
self.assertEqual(record.entity_attributes["Sample_scan_protocol"][0], "Scanned on an Agilent G2565AA scanner.")
self.assertEqual(record.entity_attributes["Sample_scan_protocol"][1], "Images were quantified using Agilent Feature Extraction Software (version A.7.5).")
self.assertEqual(record.entity_attributes["sample_table_begin"], "")
self.assertEqual(record.entity_attributes["Sample_label_protocol_ch2"], "10 \xb5g of total RNA were primed with 2 \xb5l of 100 \xb5M T16N2 DNA primer at 70\xb0C for 10 min, then reversed transcribed at 42\xb0C for 1 h in the presence of 400 U SuperScript II RTase (Invitrogen), and 100 \xb5M each dATP, dTTP, dGTP, with 25 \xb5M dCTP, 25 \xb5M Cy5-labeled dCTP (NEN Life Science, Boston, MA), and RNase inhibitor (Invitrogen). RNA was then degraded with RNase A, and labeled cDNAs were purified using QIAquick PCR columns (Qiagen).")
self.assertEqual(record.entity_attributes["Sample_label_protocol_ch1"], "10 \xb5g of total RNA were primed with 2 \xb5l of 100 \xb5M T16N2 DNA primer at 70\xb0C for 10 min, then reversed transcribed at 42\xb0C for 1 h in the presence of 400 U SuperScript II RTase (Invitrogen), and 100 \xb5M each dATP, dTTP, dGTP, with 25 \xb5M dCTP, 25 \xb5M Cy5-labeled dCTP (NEN Life Science, Boston, MA), and RNase inhibitor (Invitrogen). RNA was then degraded with RNase A, and labeled cDNAs were purified using QIAquick PCR columns (Qiagen).")
self.assertEqual(record.entity_attributes["Sample_data_processing"], "LOWESS normalized, background subtracted VALUE data obtained from log of processed Red signal/processed Green signal.")
self.assertEqual(record.entity_attributes["sample_table_end"], "")
self.assertEqual(record.entity_attributes["Sample_label_ch2"], "Cy3")
self.assertEqual(record.entity_attributes["Sample_description"], "Biological replicate 1 of 4. Control embryonic stem cells, untreated, harvested after several passages.")
self.assertEqual(record.entity_attributes["Sample_source_name_ch1"], "Total RNA from murine ES-D3 embryonic stem cells labeled with Cyanine-5 (red).")
self.assertEqual(record.entity_attributes["Sample_source_name_ch2"], "Total RNA from pooled whole mouse embryos e17.5, labeled with Cyanine-3 (green).")
self.assertEqual(record.entity_attributes["Sample_molecule_ch1"], "total RNA")
self.assertEqual(record.entity_attributes["Sample_molecule_ch2"], "total RNA")
self.assertEqual(record.entity_attributes["Sample_growth_protocol_ch1"], "ES cells were kept in an undifferentiated, pluripotent state by using 1000 IU/ml leukemia inhibitory factor (LIF; Chemicon, ESGRO, ESG1107), and grown on top of murine embryonic fibroblasts feeder layer inactivated by 10 ug/ml of mitomycin C (Sigma, St. Louis). ES cells were cultured on 0.1% gelatin-coated plastic dishes in ES medium containing Dulbecco modified Eagle medium supplemented with 15% fetal calf serum, 0.1 mM beta-mercaptoethanol, 2 mM glutamine, and 0.1 mN non-essential amino acids.")
self.assertEqual(len(record.entity_attributes["Sample_characteristics_ch1"]), 4)
self.assertEqual(record.entity_attributes["Sample_characteristics_ch1"][0], "ES-D3 cell line (CRL-1934)")
self.assertEqual(record.entity_attributes["Sample_characteristics_ch1"][1], "Age: day 4")
self.assertEqual(record.entity_attributes["Sample_characteristics_ch1"][2], "Tissue: blastocytes")
self.assertEqual(record.entity_attributes["Sample_characteristics_ch1"][3], "Strain: 129/Sv mice")
self.assertEqual(len(record.entity_attributes["Sample_characteristics_ch2"]), 3)
self.assertEqual(record.entity_attributes["Sample_characteristics_ch2"][0], "Strain: C57BL/6")
self.assertEqual(record.entity_attributes["Sample_characteristics_ch2"][1], "Age: e17.5 d")
self.assertEqual(record.entity_attributes["Sample_characteristics_ch2"][2], "Tissue: whole embryo")
self.assertEqual(len(record.col_defs), 6)
self.assertEqual(record.col_defs["ID_REF"], "")
self.assertEqual(record.col_defs["VALUE"], "log(REDsignal/GREENsignal) per feature (processed signals used).")
self.assertEqual(record.col_defs["gProcessedSignal"], 'Dye-normalized signal after surrogate "algorithm," green "channel," used for computation of log ratio.')
self.assertEqual(record.col_defs["LogRatioError"], "error of the log ratio calculated according to the error model chosen.")
self.assertEqual(record.col_defs["PValueLogRatio"], "Significance level of the Log Ratio computed for a feature.")
self.assertEqual(record.col_defs["rProcessedSignal"], 'Dye-normalized signal after surrogate "algorithm," red "channel," used for computation of log ratio.')
self.assertEqual(len(record.table_rows), 21)
self.assertEqual(len(record.table_rows[0]), 6)
self.assertEqual(record.table_rows[0][0], "ID_REF")
self.assertEqual(record.table_rows[0][1], "VALUE")
self.assertEqual(record.table_rows[0][2], "LogRatioError")
self.assertEqual(record.table_rows[0][3], "PValueLogRatio")
self.assertEqual(record.table_rows[0][4], "gProcessedSignal")
self.assertEqual(record.table_rows[0][5], "rProcessedSignal")
self.assertEqual(len(record.table_rows[1]), 6)
self.assertEqual(record.table_rows[1][0], "1")
self.assertEqual(record.table_rows[1][1], "-1.6274758")
self.assertEqual(record.table_rows[1][2], "1.36E-01")
self.assertEqual(record.table_rows[1][3], "6.41E-33")
self.assertEqual(record.table_rows[1][4], "9.13E+03")
self.assertEqual(record.table_rows[1][5], "2.15E+02")
self.assertEqual(len(record.table_rows[2]), 6)
self.assertEqual(record.table_rows[2][0], "2")
self.assertEqual(record.table_rows[2][1], "0.1412248")
self.assertEqual(record.table_rows[2][2], "1.34E+00")
self.assertEqual(record.table_rows[2][3], "1.00E+00")
self.assertEqual(record.table_rows[2][4], "4.14E+01")
self.assertEqual(record.table_rows[2][5], "5.72E+01")
self.assertEqual(len(record.table_rows[3]), 6)
self.assertEqual(record.table_rows[3][0], "3")
self.assertEqual(record.table_rows[3][1], "0.1827684")
self.assertEqual(record.table_rows[3][2], "5.19E-02")
self.assertEqual(record.table_rows[3][3], "4.33E-04")
self.assertEqual(record.table_rows[3][4], "5.13E+03")
self.assertEqual(record.table_rows[3][5], "7.81E+03")
self.assertEqual(len(record.table_rows[4]), 6)
self.assertEqual(record.table_rows[4][0], "4")
self.assertEqual(record.table_rows[4][1], "-0.3932267")
self.assertEqual(record.table_rows[4][2], "6.08E-02")
self.assertEqual(record.table_rows[4][3], "1.02E-10")
self.assertEqual(record.table_rows[4][4], "4.65E+03")
self.assertEqual(record.table_rows[4][5], "1.88E+03")
self.assertEqual(len(record.table_rows[5]), 6)
self.assertEqual(record.table_rows[5][0], "5")
self.assertEqual(record.table_rows[5][1], "-0.9865994")
self.assertEqual(record.table_rows[5][2], "1.05E-01")
self.assertEqual(record.table_rows[5][3], "6.32E-21")
self.assertEqual(record.table_rows[5][4], "2.91E+03")
self.assertEqual(record.table_rows[5][5], "3.01E+02")
self.assertEqual(len(record.table_rows[6]), 6)
self.assertEqual(record.table_rows[6][0], "6")
self.assertEqual(record.table_rows[6][1], "0.0238812")
self.assertEqual(record.table_rows[6][2], "1.02E-01")
self.assertEqual(record.table_rows[6][3], "8.15E-01")
self.assertEqual(record.table_rows[6][4], "7.08E+02")
self.assertEqual(record.table_rows[6][5], "7.48E+02")
self.assertEqual(len(record.table_rows[7]), 6)
self.assertEqual(record.table_rows[7][0], "7")
self.assertEqual(record.table_rows[7][1], "-1.4841822")
self.assertEqual(record.table_rows[7][2], "1.25E-01")
self.assertEqual(record.table_rows[7][3], "1.42E-32")
self.assertEqual(record.table_rows[7][4], "1.02E+04")
self.assertEqual(record.table_rows[7][5], "3.36E+02")
self.assertEqual(len(record.table_rows[8]), 6)
self.assertEqual(record.table_rows[8][0], "8")
self.assertEqual(record.table_rows[8][1], "-1.8261356")
self.assertEqual(record.table_rows[8][2], "4.15E-01")
self.assertEqual(record.table_rows[8][3], "1.10E-05")
self.assertEqual(record.table_rows[8][4], "7.19E+02")
self.assertEqual(record.table_rows[8][5], "1.07E+01")
self.assertEqual(len(record.table_rows[9]), 6)
self.assertEqual(record.table_rows[9][0], "9")
self.assertEqual(record.table_rows[9][1], "-1.0344779")
self.assertEqual(record.table_rows[9][2], "1.78E+00")
self.assertEqual(record.table_rows[9][3], "1.00E+00")
self.assertEqual(record.table_rows[9][4], "9.62E+01")
self.assertEqual(record.table_rows[9][5], "8.89E+00")
self.assertEqual(len(record.table_rows[10]), 6)
self.assertEqual(record.table_rows[10][0], "10")
self.assertEqual(record.table_rows[10][1], "0.2405891")
self.assertEqual(record.table_rows[10][2], "3.09E-01")
self.assertEqual(record.table_rows[10][3], "4.36E-01")
self.assertEqual(record.table_rows[10][4], "1.61E+02")
self.assertEqual(record.table_rows[10][5], "2.80E+02")
self.assertEqual(len(record.table_rows[11]), 6)
self.assertEqual(record.table_rows[11][0], "11")
self.assertEqual(record.table_rows[11][1], "0.3209366")
self.assertEqual(record.table_rows[11][2], "3.59E-01")
self.assertEqual(record.table_rows[11][3], "3.71E-01")
self.assertEqual(record.table_rows[11][4], "1.25E+02")
self.assertEqual(record.table_rows[11][5], "2.61E+02")
self.assertEqual(len(record.table_rows[12]), 6)
self.assertEqual(record.table_rows[12][0], "12")
self.assertEqual(record.table_rows[12][1], "0.358304")
self.assertEqual(record.table_rows[12][2], "2.06E+00")
self.assertEqual(record.table_rows[12][3], "1.00E+00")
self.assertEqual(record.table_rows[12][4], "2.04E+01")
self.assertEqual(record.table_rows[12][5], "4.66E+01")
self.assertEqual(len(record.table_rows[13]), 6)
self.assertEqual(record.table_rows[13][0], "13")
self.assertEqual(record.table_rows[13][1], "-0.0122072")
self.assertEqual(record.table_rows[13][2], "3.64E-01")
self.assertEqual(record.table_rows[13][3], "9.73E-01")
self.assertEqual(record.table_rows[13][4], "1.84E+02")
self.assertEqual(record.table_rows[13][5], "1.79E+02")
self.assertEqual(len(record.table_rows[14]), 6)
self.assertEqual(record.table_rows[14][0], "14")
self.assertEqual(record.table_rows[14][1], "-1.5480396")
self.assertEqual(record.table_rows[14][2], "1.30E-01")
self.assertEqual(record.table_rows[14][3], "7.21E-33")
self.assertEqual(record.table_rows[14][4], "1.02E+04")
self.assertEqual(record.table_rows[14][5], "2.90E+02")
self.assertEqual(len(record.table_rows[15]), 6)
self.assertEqual(record.table_rows[15][0], "15")
self.assertEqual(record.table_rows[15][1], "0.0073419")
self.assertEqual(record.table_rows[15][2], "2.98E-01")
self.assertEqual(record.table_rows[15][3], "9.80E-01")
self.assertEqual(record.table_rows[15][4], "2.21E+02")
self.assertEqual(record.table_rows[15][5], "2.25E+02")
self.assertEqual(len(record.table_rows[16]), 6)
self.assertEqual(record.table_rows[16][0], "16")
self.assertEqual(record.table_rows[16][1], "-0.2267015")
self.assertEqual(record.table_rows[16][2], "9.44E-01")
self.assertEqual(record.table_rows[16][3], "8.10E-01")
self.assertEqual(record.table_rows[16][4], "8.90E+01")
self.assertEqual(record.table_rows[16][5], "5.28E+01")
self.assertEqual(len(record.table_rows[17]), 6)
self.assertEqual(record.table_rows[17][0], "17")
self.assertEqual(record.table_rows[17][1], "-0.1484023")
self.assertEqual(record.table_rows[17][2], "8.01E-01")
self.assertEqual(record.table_rows[17][3], "8.53E-01")
self.assertEqual(record.table_rows[17][4], "9.65E+01")
self.assertEqual(record.table_rows[17][5], "6.86E+01")
self.assertEqual(len(record.table_rows[18]), 6)
self.assertEqual(record.table_rows[18][0], "18")
self.assertEqual(record.table_rows[18][1], "-0.6122195")
self.assertEqual(record.table_rows[18][2], "1.28E-01")
self.assertEqual(record.table_rows[18][3], "1.69E-06")
self.assertEqual(record.table_rows[18][4], "1.12E+03")
self.assertEqual(record.table_rows[18][5], "2.73E+02")
self.assertEqual(len(record.table_rows[19]), 6)
self.assertEqual(record.table_rows[19][0], "19")
self.assertEqual(record.table_rows[19][1], "0.0796905")
self.assertEqual(record.table_rows[19][2], "8.78E-02")
self.assertEqual(record.table_rows[19][3], "3.64E-01")
self.assertEqual(record.table_rows[19][4], "8.21E+02")
self.assertEqual(record.table_rows[19][5], "9.87E+02")
self.assertEqual(len(record.table_rows[20]), 6)
self.assertEqual(record.table_rows[20][0], "20")
self.assertEqual(record.table_rows[20][1], "-0.084895")
self.assertEqual(record.table_rows[20][2], "9.38E-01")
self.assertEqual(record.table_rows[20][3], "9.28E-01")
self.assertEqual(record.table_rows[20][4], "7.68E+01")
self.assertEqual(record.table_rows[20][5], "6.32E+01")
record = next(records)
self.assertEqual(record.entity_type, "SAMPLE")
self.assertEqual(record.entity_id, "Control Embyronic Stem Cell Replicate 2")
self.assertEqual(len(record.entity_attributes), 24)
self.assertEqual(record.entity_attributes["Sample_extract_protocol_ch2"], "TriZol procedure")
self.assertEqual(record.entity_attributes["Sample_hyb_protocol"], "Oligoarray control targets and hybridization buffer (Agilent In Situ Hybridization Kit Plus) were added, and samples were applied to microarrays enclosed in Agilent SureHyb-enabled hybridization chambers. After hybridization, slides were washed sequentially with 6x SSC/0.005% Triton X-102 and 0.1x SSC/0.005% Triton X-102 before scanning. Slides were hybridized for 17 h at 60\xb0C in a rotating oven, and washed.")
self.assertEqual(record.entity_attributes["Sample_extract_protocol_ch1"], "TriZol procedure")
self.assertEqual(record.entity_attributes["Sample_platform_id"], "GPL3759")
self.assertEqual(record.entity_attributes["Sample_title"], "Control Embyronic Stem Cell Replicate 2")
self.assertEqual(record.entity_attributes["Sample_supplementary_file"], "file2.gpr")
self.assertEqual(record.entity_attributes["Sample_organism_ch2"], "Mus musculus")
self.assertEqual(record.entity_attributes["Sample_organism_ch1"], "Mus musculus")
self.assertEqual(record.entity_attributes["Sample_label_ch1"], "Cy5")
self.assertEqual(len(record.entity_attributes["Sample_scan_protocol"]), 2)
self.assertEqual(record.entity_attributes["Sample_scan_protocol"][0], "Scanned on an Agilent G2565AA scanner.")
self.assertEqual(record.entity_attributes["Sample_scan_protocol"][1], "Images were quantified using Agilent Feature Extraction Software (version A.7.5).")
self.assertEqual(record.entity_attributes["sample_table_begin"], "")
self.assertEqual(record.entity_attributes["Sample_label_protocol_ch2"], "10 \xb5g of total RNA were primed with 2 \xb5l of 100 \xb5M T16N2 DNA primer at 70\xb0C for 10 min, then reversed transcribed at 42\xb0C for 1 h in the presence of 400 U SuperScript II RTase (Invitrogen), and 100 \xb5M each dATP, dTTP, dGTP, with 25 \xb5M dCTP, 25 \xb5M Cy5-labeled dCTP (NEN Life Science, Boston, MA), and RNase inhibitor (Invitrogen). RNA was then degraded with RNase A, and labeled cDNAs were purified using QIAquick PCR columns (Qiagen).")
self.assertEqual(record.entity_attributes["Sample_label_protocol_ch1"], "10 \xb5g of total RNA were primed with 2 \xb5l of 100 \xb5M T16N2 DNA primer at 70\xb0C for 10 min, then reversed transcribed at 42\xb0C for 1 h in the presence of 400 U SuperScript II RTase (Invitrogen), and 100 \xb5M each dATP, dTTP, dGTP, with 25 \xb5M dCTP, 25 \xb5M Cy5-labeled dCTP (NEN Life Science, Boston, MA), and RNase inhibitor (Invitrogen). RNA was then degraded with RNase A, and labeled cDNAs were purified using QIAquick PCR columns (Qiagen).")
self.assertEqual(record.entity_attributes["Sample_data_processing"], "LOWESS normalized, background subtracted VALUE data obtained from log of processed Red signal/processed Green signal.")
self.assertEqual(record.entity_attributes["sample_table_end"], "")
self.assertEqual(record.entity_attributes["Sample_label_ch2"], "Cy3")
self.assertEqual(record.entity_attributes["Sample_description"], "Biological replicate 2 of 4. Control embryonic stem cells, untreated, harvested after several passages.")
self.assertEqual(record.entity_attributes["Sample_source_name_ch1"], "Total RNA from murine ES-D3 embryonic stem cells labeled with Cyanine-5 (red).")
self.assertEqual(record.entity_attributes["Sample_source_name_ch2"], "Total RNA from pooled whole mouse embryos e17.5, labeled with Cyanine-3 (green).")
self.assertEqual(record.entity_attributes["Sample_molecule_ch1"], "total RNA")
self.assertEqual(record.entity_attributes["Sample_molecule_ch2"], "total RNA")
self.assertEqual(record.entity_attributes["Sample_growth_protocol_ch1"], "ES cells were kept in an undifferentiated, pluripotent state by using 1000 IU/ml leukemia inhibitory factor (LIF; Chemicon, ESGRO, ESG1107), and grown on top of murine embryonic fibroblasts feeder layer inactivated by 10 ug/ml of mitomycin C (Sigma, St. Louis). ES cells were cultured on 0.1% gelatin-coated plastic dishes in ES medium containing Dulbecco modified Eagle medium supplemented with 15% fetal calf serum, 0.1 mM beta-mercaptoethanol, 2 mM glutamine, and 0.1 mN non-essential amino acids.")
self.assertEqual(len(record.entity_attributes["Sample_characteristics_ch1"]), 4)
self.assertEqual(record.entity_attributes["Sample_characteristics_ch1"][0], "ES-D3 cell line (CRL-1934)")
self.assertEqual(record.entity_attributes["Sample_characteristics_ch1"][1], "Age: day 4")
self.assertEqual(record.entity_attributes["Sample_characteristics_ch1"][2], "Tissue: blastocytes")
self.assertEqual(record.entity_attributes["Sample_characteristics_ch1"][3], "Strain: 129/Sv mice")
self.assertEqual(len(record.entity_attributes["Sample_characteristics_ch2"]), 3)
self.assertEqual(record.entity_attributes["Sample_characteristics_ch2"][0], "Strain: C57BL/6")
self.assertEqual(record.entity_attributes["Sample_characteristics_ch2"][1], "Age: e17.5 d")
self.assertEqual(record.entity_attributes["Sample_characteristics_ch2"][2], "Tissue: whole embryo")
self.assertEqual(len(record.col_defs), 6)
self.assertEqual(record.col_defs["ID_REF"], "")
self.assertEqual(record.col_defs["VALUE"], "log(REDsignal/GREENsignal) per feature (processed signals used).")
self.assertEqual(record.col_defs["gProcessedSignal"], 'Dye-normalized signal after surrogate "algorithm," green "channel," used for computation of log ratio.')
self.assertEqual(record.col_defs["LogRatioError"], "error of the log ratio calculated according to the error model chosen.")
self.assertEqual(record.col_defs["PValueLogRatio"], "Significance level of the Log Ratio computed for a feature.")
self.assertEqual(record.col_defs["rProcessedSignal"], 'Dye-normalized signal after surrogate "algorithm," red "channel," used for computation of log ratio.')
self.assertEqual(len(record.table_rows), 21)
self.assertEqual(len(record.table_rows[0]), 6)
self.assertEqual(record.table_rows[0][0], "ID_REF")
self.assertEqual(record.table_rows[0][1], "VALUE")
self.assertEqual(record.table_rows[0][2], "LogRatioError")
self.assertEqual(record.table_rows[0][3], "PValueLogRatio")
self.assertEqual(record.table_rows[0][4], "gProcessedSignal")
self.assertEqual(record.table_rows[0][5], "rProcessedSignal")
self.assertEqual(len(record.table_rows[1]), 6)
self.assertEqual(record.table_rows[1][0], "1")
self.assertEqual(record.table_rows[1][1], "-1.1697263")
self.assertEqual(record.table_rows[1][2], "1.23E-01")
self.assertEqual(record.table_rows[1][3], "2.14E-21")
self.assertEqual(record.table_rows[1][4], "3.17E+03")
self.assertEqual(record.table_rows[1][5], "2.14E+02")
self.assertEqual(len(record.table_rows[2]), 6)
self.assertEqual(record.table_rows[2][0], "2")
self.assertEqual(record.table_rows[2][1], "-0.1111353")
self.assertEqual(record.table_rows[2][2], "1.63E+00")
self.assertEqual(record.table_rows[2][3], "9.46E-01")
self.assertEqual(record.table_rows[2][4], "5.43E+01")
self.assertEqual(record.table_rows[2][5], "4.20E+01")
self.assertEqual(len(record.table_rows[3]), 6)
self.assertEqual(record.table_rows[3][0], "3")
self.assertEqual(record.table_rows[3][1], "0.1400597")
self.assertEqual(record.table_rows[3][2], "5.11E-02")
self.assertEqual(record.table_rows[3][3], "6.17E-03")
self.assertEqual(record.table_rows[3][4], "6.72E+03")
self.assertEqual(record.table_rows[3][5], "9.28E+03")
self.assertEqual(len(record.table_rows[4]), 6)
self.assertEqual(record.table_rows[4][0], "4")
self.assertEqual(record.table_rows[4][1], "-0.4820633")
self.assertEqual(record.table_rows[4][2], "6.38E-02")
self.assertEqual(record.table_rows[4][3], "4.06E-14")
self.assertEqual(record.table_rows[4][4], "6.46E+03")
self.assertEqual(record.table_rows[4][5], "2.13E+03")
self.assertEqual(len(record.table_rows[5]), 6)
self.assertEqual(record.table_rows[5][0], "5")
self.assertEqual(record.table_rows[5][1], "-1.2116196")
self.assertEqual(record.table_rows[5][2], "1.22E-01")
self.assertEqual(record.table_rows[5][3], "2.31E-23")
self.assertEqual(record.table_rows[5][4], "3.62E+03")
self.assertEqual(record.table_rows[5][5], "2.22E+02")
self.assertEqual(len(record.table_rows[6]), 6)
self.assertEqual(record.table_rows[6][0], "6")
self.assertEqual(record.table_rows[6][1], "-0.0230528")
self.assertEqual(record.table_rows[6][2], "1.04E-01")
self.assertEqual(record.table_rows[6][3], "8.24E-01")
self.assertEqual(record.table_rows[6][4], "8.76E+02")
self.assertEqual(record.table_rows[6][5], "8.31E+02")
self.assertEqual(len(record.table_rows[7]), 6)
self.assertEqual(record.table_rows[7][0], "7")
self.assertEqual(record.table_rows[7][1], "-1.1380152")
self.assertEqual(record.table_rows[7][2], "1.13E-01")
self.assertEqual(record.table_rows[7][3], "9.23E-24")
self.assertEqual(record.table_rows[7][4], "3.94E+03")
self.assertEqual(record.table_rows[7][5], "2.86E+02")
self.assertEqual(len(record.table_rows[8]), 6)
self.assertEqual(record.table_rows[8][0], "8")
self.assertEqual(record.table_rows[8][1], "-1.834596")
self.assertEqual(record.table_rows[8][2], "5.40E-01")
self.assertEqual(record.table_rows[8][3], "6.74E-04")
self.assertEqual(record.table_rows[8][4], "6.44E+02")
self.assertEqual(record.table_rows[8][5], "9.43E+00")
self.assertEqual(len(record.table_rows[9]), 6)
self.assertEqual(record.table_rows[9][0], "9")
self.assertEqual(record.table_rows[9][1], "-0.9747637")
self.assertEqual(record.table_rows[9][2], "2.14E+00")
self.assertEqual(record.table_rows[9][3], "1.00E+00")
self.assertEqual(record.table_rows[9][4], "9.17E+01")
self.assertEqual(record.table_rows[9][5], "9.72E+00")
self.assertEqual(len(record.table_rows[10]), 6)
self.assertEqual(record.table_rows[10][0], "10")
self.assertEqual(record.table_rows[10][1], "0.3874005")
self.assertEqual(record.table_rows[10][2], "2.92E-01")
self.assertEqual(record.table_rows[10][3], "1.85E-01")
self.assertEqual(record.table_rows[10][4], "1.69E+02")
self.assertEqual(record.table_rows[10][5], "4.11E+02")
self.assertEqual(len(record.table_rows[11]), 6)
self.assertEqual(record.table_rows[11][0], "11")
self.assertEqual(record.table_rows[11][1], "0.5340442")
self.assertEqual(record.table_rows[11][2], "3.29E-01")
self.assertEqual(record.table_rows[11][3], "1.04E-01")
self.assertEqual(record.table_rows[11][4], "1.23E+02")
self.assertEqual(record.table_rows[11][5], "4.20E+02")
self.assertEqual(len(record.table_rows[12]), 6)
self.assertEqual(record.table_rows[12][0], "12")
self.assertEqual(record.table_rows[12][1], "0.3260696")
self.assertEqual(record.table_rows[12][2], "1.92E+00")
self.assertEqual(record.table_rows[12][3], "8.65E-01")
self.assertEqual(record.table_rows[12][4], "2.73E+01")
self.assertEqual(record.table_rows[12][5], "5.77E+01")
self.assertEqual(len(record.table_rows[13]), 6)
self.assertEqual(record.table_rows[13][0], "13")
self.assertEqual(record.table_rows[13][1], "0.3010618")
self.assertEqual(record.table_rows[13][2], "2.84E-01")
self.assertEqual(record.table_rows[13][3], "2.90E-01")
self.assertEqual(record.table_rows[13][4], "1.93E+02")
self.assertEqual(record.table_rows[13][5], "3.87E+02")
self.assertEqual(len(record.table_rows[14]), 6)
self.assertEqual(record.table_rows[14][0], "14")
self.assertEqual(record.table_rows[14][1], "-1.0760413")
self.assertEqual(record.table_rows[14][2], "1.08E-01")
self.assertEqual(record.table_rows[14][3], "1.63E-23")
self.assertEqual(record.table_rows[14][4], "4.06E+03")
self.assertEqual(record.table_rows[14][5], "3.41E+02")
self.assertEqual(len(record.table_rows[15]), 6)
self.assertEqual(record.table_rows[15][0], "15")
self.assertEqual(record.table_rows[15][1], "-0.1167371")
self.assertEqual(record.table_rows[15][2], "3.87E-01")
self.assertEqual(record.table_rows[15][3], "7.63E-01")
self.assertEqual(record.table_rows[15][4], "2.32E+02")
self.assertEqual(record.table_rows[15][5], "1.77E+02")
self.assertEqual(len(record.table_rows[16]), 6)
self.assertEqual(record.table_rows[16][0], "16")
self.assertEqual(record.table_rows[16][1], "-0.1936322")
self.assertEqual(record.table_rows[16][2], "9.44E-01")
self.assertEqual(record.table_rows[16][3], "8.38E-01")
self.assertEqual(record.table_rows[16][4], "1.02E+02")
self.assertEqual(record.table_rows[16][5], "6.56E+01")
self.assertEqual(len(record.table_rows[17]), 6)
self.assertEqual(record.table_rows[17][0], "17")
self.assertEqual(record.table_rows[17][1], "-0.3275898")
self.assertEqual(record.table_rows[17][2], "7.87E-01")
self.assertEqual(record.table_rows[17][3], "6.77E-01")
self.assertEqual(record.table_rows[17][4], "1.41E+02")
self.assertEqual(record.table_rows[17][5], "6.65E+01")
self.assertEqual(len(record.table_rows[18]), 6)
self.assertEqual(record.table_rows[18][0], "18")
self.assertEqual(record.table_rows[18][1], "-0.4805853")
self.assertEqual(record.table_rows[18][2], "1.14E-01")
self.assertEqual(record.table_rows[18][3], "2.41E-05")
self.assertEqual(record.table_rows[18][4], "1.34E+03")
self.assertEqual(record.table_rows[18][5], "4.42E+02")
self.assertEqual(len(record.table_rows[19]), 6)
self.assertEqual(record.table_rows[19][0], "19")
self.assertEqual(record.table_rows[19][1], "0.1109524")
self.assertEqual(record.table_rows[19][2], "9.56E-02")
self.assertEqual(record.table_rows[19][3], "2.46E-01")
self.assertEqual(record.table_rows[19][4], "8.38E+02")
self.assertEqual(record.table_rows[19][5], "1.08E+03")
self.assertEqual(len(record.table_rows[20]), 6)
self.assertEqual(record.table_rows[20][0], "20")
self.assertEqual(record.table_rows[20][1], "0.1677912")
self.assertEqual(record.table_rows[20][2], "6.51E-01")
self.assertEqual(record.table_rows[20][3], "7.97E-01")
self.assertEqual(record.table_rows[20][4], "9.84E+01")
self.assertEqual(record.table_rows[20][5], "1.45E+02")
record = next(records)
self.assertEqual(record.entity_type, "SAMPLE")
self.assertEqual(record.entity_id, "Triple-Fusion Transfected Embryonic Stem Cells Replicate 1")
self.assertEqual(len(record.entity_attributes), 25)
self.assertEqual(record.entity_attributes["Sample_extract_protocol_ch2"], "TriZol procedure")
self.assertEqual(record.entity_attributes["Sample_hyb_protocol"], "Oligoarray control targets and hybridization buffer (Agilent In Situ Hybridization Kit Plus) were added, and samples were applied to microarrays enclosed in Agilent SureHyb-enabled hybridization chambers. After hybridization, slides were washed sequentially with 6x SSC/0.005% Triton X-102 and 0.1x SSC/0.005% Triton X-102 before scanning. Slides were hybridized for 17 h at 60\xb0C in a rotating oven, and washed.")
self.assertEqual(record.entity_attributes["Sample_extract_protocol_ch1"], "TriZol procedure")
self.assertEqual(record.entity_attributes["Sample_platform_id"], "GPL3759")
self.assertEqual(record.entity_attributes["Sample_title"], "Triple-Fusion Transfected Embryonic Stem Cells Replicate 1")
self.assertEqual(record.entity_attributes["Sample_supplementary_file"], "file3.gpr")
self.assertEqual(record.entity_attributes["Sample_organism_ch2"], "Mus musculus")
self.assertEqual(record.entity_attributes["Sample_organism_ch1"], "Mus musculus")
self.assertEqual(record.entity_attributes["Sample_label_ch1"], "Cy5")
self.assertEqual(record.entity_attributes["Sample_scan_protocol"], "Scanned on an Agilent G2565AA scanner.")
self.assertEqual(record.entity_attributes["sample_table_begin"], "")
self.assertEqual(record.entity_attributes["Sample_label_protocol_ch2"], "10 \xb5g of total RNA were primed with 2 \xb5l of 100 \xb5M T16N2 DNA primer at 70\xb0C for 10 min, then reversed transcribed at 42\xb0C for 1 h in the presence of 400 U SuperScript II RTase (Invitrogen), and 100 \xb5M each dATP, dTTP, dGTP, with 25 \xb5M dCTP, 25 \xb5M Cy5-labeled dCTP (NEN Life Science, Boston, MA), and RNase inhibitor (Invitrogen). RNA was then degraded with RNase A, and labeled cDNAs were purified using QIAquick PCR columns (Qiagen).")
self.assertEqual(record.entity_attributes["Sample_label_protocol_ch1"], "10 \xb5g of total RNA were primed with 2 \xb5l of 100 \xb5M T16N2 DNA primer at 70\xb0C for 10 min, then reversed transcribed at 42\xb0C for 1 h in the presence of 400 U SuperScript II RTase (Invitrogen), and 100 \xb5M each dATP, dTTP, dGTP with 25 \xb5M dCTP, 25 \xb5M Cy5-labeled dCTP (NEN Life Science, Boston, MA), and RNase inhibitor (Invitrogen). RNA was then degraded with RNase A, and labeled cDNAs were purified using QIAquick PCR columns (Qiagen).")
self.assertEqual(record.entity_attributes["Sample_data_processing"], "LOWESS normalized, background subtracted VALUE data obtained from log of processed Red signal/processed Green signal.")
self.assertEqual(record.entity_attributes["sample_table_end"], "")
self.assertEqual(record.entity_attributes["Sample_label_ch2"], "Cy3")
self.assertEqual(record.entity_attributes["Sample_description"], "Biological replicate 1 of 3. Stable triple-fusion-reporter-gene transfected embryonic stem cells, harvested after several passages.")
self.assertEqual(record.entity_attributes["Sample_source_name_ch1"], "Total RNA from murine ES-D3 triple-transfected embryonic stem cells labeled with Cyanine-5 (red).")
self.assertEqual(record.entity_attributes["Sample_source_name_ch2"], "Total RNA from pooled whole mouse embryos e17.5, labeled with Cyanine-3 (green).")
self.assertEqual(record.entity_attributes["Sample_molecule_ch1"], "total RNA")
self.assertEqual(record.entity_attributes["Sample_molecule_ch2"], "total RNA")
self.assertEqual(record.entity_attributes["Sample_growth_protocol_ch1"], "ES cells were kept in an undifferentiated, pluripotent state by using 1000 IU/ml leukemia inhibitory factor (LIF; Chemicon, ESGRO, ESG1107), and grown on top of murine embryonic fibroblasts feeder layer inactivated by 10 ug/ml of mitomycin C (Sigma, St. Louis). ES cells were cultured on 0.1% gelatin-coated plastic dishes in ES medium containing Dulbecco modified Eagle medium supplemented with 15% fetal calf serum, 0.1 mM beta-mercaptoethanol, 2 mM glutamine, and 0.1 mN non-essential amino acids.")
self.assertEqual(len(record.entity_attributes["Sample_characteristics_ch1"]), 5)
self.assertEqual(record.entity_attributes["Sample_characteristics_ch1"][0], "ES-D3 cell line (CRL-1934)")
self.assertEqual(record.entity_attributes["Sample_characteristics_ch1"][1], "Transfected with pUb-fluc-mrfp-ttk triple fusion reporter gene.")
self.assertEqual(record.entity_attributes["Sample_characteristics_ch1"][2], "Age: day 4")
self.assertEqual(record.entity_attributes["Sample_characteristics_ch1"][3], "Tissue: blastocytes")
self.assertEqual(record.entity_attributes["Sample_characteristics_ch1"][4], "Strain: 129/Sv mice")
self.assertEqual(len(record.entity_attributes["Sample_characteristics_ch2"]), 3)
self.assertEqual(record.entity_attributes["Sample_characteristics_ch2"][0], "Strain: C57BL/6")
self.assertEqual(record.entity_attributes["Sample_characteristics_ch2"][1], "Age: e17.5 d")
self.assertEqual(record.entity_attributes["Sample_characteristics_ch2"][2], "Tissue: whole embryo")
self.assertEqual(record.entity_attributes["Sample_treatment_protocol_ch1"], "PCR amplification and standard cloning techniques were used to insert fluc and mrfp genes from plasmids pCDNA 3.1-CMV-fluc (Promega, Madison, WI) and pCDNA3.1-CMV-mrfp in frame with the ttk gene into the pCDNA3.1-truncated sr39tk. This triple fusion (TF) reporter gene fragment (3.3 kbp) was released from the plasmid with Not1 and BamH1 restriction enzymes before blunt-end ligation into the multiple cloning site of lentiviral transfer vector, FUG, driven by the human ubiquitin-C promoter. Self-inactivating (SIN) lentivirus was prepared by transient transfection of 293T cells. Briefly, pFUG-TF containing the triple fusion reporter gene was co-transfected into 293T cells with HIV-1 packaging vector (?8.9) and vesicular stomatitis virus G glycoprotein-pseudotyped envelop vector (pVSVG). Lentivirus supernatant was concentrated by sediment centrifugation using a SW29 rotor at 50,000 x g for two hours. Concentrated virus was titered on 293T cells. Murine ES cells were transfected with LV-pUb-fluc-mrfp-ttk at a multiplicity of infection (MOI) of 10.")
self.assertEqual(len(record.col_defs), 6)
self.assertEqual(record.col_defs["ID_REF"], "")
self.assertEqual(record.col_defs["VALUE"], "log(REDsignal/GREENsignal) per feature (processed signals used).")
self.assertEqual(record.col_defs["gProcessedSignal"], 'Dye-normalized signal after surrogate "algorithm," green "channel," used for computation of log ratio.')
self.assertEqual(record.col_defs["LogRatioError"], "error of the log ratio calculated according to the error model chosen.")
self.assertEqual(record.col_defs["PValueLogRatio"], "Significance level of the Log Ratio computed for a feature.")
self.assertEqual(record.col_defs["rProcessedSignal"], 'Dye-normalized signal after surrogate "algorithm," red "channel," used for computation of log ratio.')
self.assertEqual(len(record.table_rows), 21)
self.assertEqual(len(record.table_rows[0]), 6)
self.assertEqual(record.table_rows[0][0], "ID_REF")
self.assertEqual(record.table_rows[0][1], "VALUE")
self.assertEqual(record.table_rows[0][2], "LogRatioError")
self.assertEqual(record.table_rows[0][3], "PValueLogRatio")
self.assertEqual(record.table_rows[0][4], "gProcessedSignal")
self.assertEqual(record.table_rows[0][5], "rProcessedSignal")
self.assertEqual(len(record.table_rows[1]), 6)
self.assertEqual(record.table_rows[1][0], "1")
self.assertEqual(record.table_rows[1][1], "-0.7837546")
self.assertEqual(record.table_rows[1][2], "1.30E-01")
self.assertEqual(record.table_rows[1][3], "1.70E-09")
self.assertEqual(record.table_rows[1][4], "2.10E+03")
self.assertEqual(record.table_rows[1][5], "3.46E+02")
self.assertEqual(len(record.table_rows[2]), 6)
self.assertEqual(record.table_rows[2][0], "2")
self.assertEqual(record.table_rows[2][1], "0.3797837")
self.assertEqual(record.table_rows[2][2], "1.15E+00")
self.assertEqual(record.table_rows[2][3], "7.41E-01")
self.assertEqual(record.table_rows[2][4], "5.59E+01")
self.assertEqual(record.table_rows[2][5], "1.34E+02")
self.assertEqual(len(record.table_rows[3]), 6)
self.assertEqual(record.table_rows[3][0], "3")
self.assertEqual(record.table_rows[3][1], "0.2079269")
self.assertEqual(record.table_rows[3][2], "5.38E-02")
self.assertEqual(record.table_rows[3][3], "1.12E-04")
self.assertEqual(record.table_rows[3][4], "5.04E+03")
self.assertEqual(record.table_rows[3][5], "8.14E+03")
self.assertEqual(len(record.table_rows[4]), 6)
self.assertEqual(record.table_rows[4][0], "4")
self.assertEqual(record.table_rows[4][1], "-0.4730291")
self.assertEqual(record.table_rows[4][2], "6.71E-02")
self.assertEqual(record.table_rows[4][3], "1.86E-12")
self.assertEqual(record.table_rows[4][4], "5.66E+03")
self.assertEqual(record.table_rows[4][5], "1.91E+03")
self.assertEqual(len(record.table_rows[5]), 6)
self.assertEqual(record.table_rows[5][0], "5")
self.assertEqual(record.table_rows[5][1], "-0.9481128")
self.assertEqual(record.table_rows[5][2], "1.19E-01")
self.assertEqual(record.table_rows[5][3], "1.30E-15")
self.assertEqual(record.table_rows[5][4], "3.10E+03")
self.assertEqual(record.table_rows[5][5], "3.49E+02")
self.assertEqual(len(record.table_rows[6]), 6)
self.assertEqual(record.table_rows[6][0], "6")
self.assertEqual(record.table_rows[6][1], "-0.0159867")
self.assertEqual(record.table_rows[6][2], "1.33E-01")
self.assertEqual(record.table_rows[6][3], "9.05E-01")
self.assertEqual(record.table_rows[6][4], "8.45E+02")
self.assertEqual(record.table_rows[6][5], "8.14E+02")
self.assertEqual(len(record.table_rows[7]), 6)
self.assertEqual(record.table_rows[7][0], "7")
self.assertEqual(record.table_rows[7][1], "-0.819922")
self.assertEqual(record.table_rows[7][2], "1.14E-01")
self.assertEqual(record.table_rows[7][3], "7.01E-13")
self.assertEqual(record.table_rows[7][4], "2.75E+03")
self.assertEqual(record.table_rows[7][5], "4.16E+02")
self.assertEqual(len(record.table_rows[8]), 6)
self.assertEqual(record.table_rows[8][0], "8")
self.assertEqual(record.table_rows[8][1], "-0.1559774")
self.assertEqual(record.table_rows[8][2], "9.16E-01")
self.assertEqual(record.table_rows[8][3], "8.65E-01")
self.assertEqual(record.table_rows[8][4], "1.34E+02")
self.assertEqual(record.table_rows[8][5], "9.34E+01")
self.assertEqual(len(record.table_rows[9]), 6)
self.assertEqual(record.table_rows[9][0], "9")
self.assertEqual(record.table_rows[9][1], "0.145267")
self.assertEqual(record.table_rows[9][2], "3.90E+00")
self.assertEqual(record.table_rows[9][3], "1.00E+00")
self.assertEqual(record.table_rows[9][4], "2.22E+01")
self.assertEqual(record.table_rows[9][5], "3.10E+01")
self.assertEqual(len(record.table_rows[10]), 6)
self.assertEqual(record.table_rows[10][0], "10")
self.assertEqual(record.table_rows[10][1], "0.3611211")
self.assertEqual(record.table_rows[10][2], "3.40E-01")
self.assertEqual(record.table_rows[10][3], "2.88E-01")
self.assertEqual(record.table_rows[10][4], "1.97E+02")
self.assertEqual(record.table_rows[10][5], "4.52E+02")
self.assertEqual(len(record.table_rows[11]), 6)
self.assertEqual(record.table_rows[11][0], "11")
self.assertEqual(record.table_rows[11][1], "0.5092089")
self.assertEqual(record.table_rows[11][2], "4.39E-01")
self.assertEqual(record.table_rows[11][3], "2.46E-01")
self.assertEqual(record.table_rows[11][4], "1.24E+02")
self.assertEqual(record.table_rows[11][5], "4.01E+02")
self.assertEqual(len(record.table_rows[12]), 6)
self.assertEqual(record.table_rows[12][0], "12")
self.assertEqual(record.table_rows[12][1], "0.3715387")
self.assertEqual(record.table_rows[12][2], "1.69E+00")
self.assertEqual(record.table_rows[12][3], "8.26E-01")
self.assertEqual(record.table_rows[12][4], "3.84E+01")
self.assertEqual(record.table_rows[12][5], "9.04E+01")
self.assertEqual(len(record.table_rows[13]), 6)
self.assertEqual(record.table_rows[13][0], "13")
self.assertEqual(record.table_rows[13][1], "0.1734934")
self.assertEqual(record.table_rows[13][2], "3.57E-01")
self.assertEqual(record.table_rows[13][3], "6.27E-01")
self.assertEqual(record.table_rows[13][4], "2.37E+02")
self.assertEqual(record.table_rows[13][5], "3.53E+02")
self.assertEqual(len(record.table_rows[14]), 6)
self.assertEqual(record.table_rows[14][0], "14")
self.assertEqual(record.table_rows[14][1], "-0.9340707")
self.assertEqual(record.table_rows[14][2], "1.20E-01")
self.assertEqual(record.table_rows[14][3], "6.90E-15")
self.assertEqual(record.table_rows[14][4], "2.96E+03")
self.assertEqual(record.table_rows[14][5], "3.45E+02")
self.assertEqual(len(record.table_rows[15]), 6)
self.assertEqual(record.table_rows[15][0], "15")
self.assertEqual(record.table_rows[15][1], "-0.2956317")
self.assertEqual(record.table_rows[15][2], "5.78E-01")
self.assertEqual(record.table_rows[15][3], "6.09E-01")
self.assertEqual(record.table_rows[15][4], "2.46E+02")
self.assertEqual(record.table_rows[15][5], "1.25E+02")
self.assertEqual(len(record.table_rows[16]), 6)
self.assertEqual(record.table_rows[16][0], "16")
self.assertEqual(record.table_rows[16][1], "-0.2321102")
self.assertEqual(record.table_rows[16][2], "1.22E+00")
self.assertEqual(record.table_rows[16][3], "8.49E-01")
self.assertEqual(record.table_rows[16][4], "1.09E+02")
self.assertEqual(record.table_rows[16][5], "6.37E+01")
self.assertEqual(len(record.table_rows[17]), 6)
self.assertEqual(record.table_rows[17][0], "17")
self.assertEqual(record.table_rows[17][1], "-0.1603561")
self.assertEqual(record.table_rows[17][2], "1.16E+00")
self.assertEqual(record.table_rows[17][3], "8.90E-01")
self.assertEqual(record.table_rows[17][4], "1.06E+02")
self.assertEqual(record.table_rows[17][5], "7.34E+01")
self.assertEqual(len(record.table_rows[18]), 6)
self.assertEqual(record.table_rows[18][0], "18")
self.assertEqual(record.table_rows[18][1], "-0.5063897")
self.assertEqual(record.table_rows[18][2], "1.63E-01")
self.assertEqual(record.table_rows[18][3], "1.95E-03")
self.assertEqual(record.table_rows[18][4], "1.15E+03")
self.assertEqual(record.table_rows[18][5], "3.58E+02")
self.assertEqual(len(record.table_rows[19]), 6)
self.assertEqual(record.table_rows[19][0], "19")
self.assertEqual(record.table_rows[19][1], "0.1990761")
self.assertEqual(record.table_rows[19][2], "1.32E-01")
self.assertEqual(record.table_rows[19][3], "1.32E-01")
self.assertEqual(record.table_rows[19][4], "6.65E+02")
self.assertEqual(record.table_rows[19][5], "1.05E+03")
self.assertEqual(len(record.table_rows[20]), 6)
self.assertEqual(record.table_rows[20][0], "20")
self.assertEqual(record.table_rows[20][1], "0.2985912")
self.assertEqual(record.table_rows[20][2], "8.89E-01")
self.assertEqual(record.table_rows[20][3], "7.37E-01")
self.assertEqual(record.table_rows[20][4], "8.06E+01")
self.assertEqual(record.table_rows[20][5], "1.60E+02")
def test_soft_ex_affy(self):
path = "Geo/soft_ex_affy.txt"
with open(path) as handle:
records = Geo.parse(handle)
record = next(records)
self.assertEqual(record.entity_type, "SAMPLE")
self.assertEqual(record.entity_id, "Drosophila_T0-1")
self.assertEqual(len(record.entity_attributes), 18)
self.assertEqual(record.entity_attributes["Sample_organism_ch1"], "Drosophila melanogaster")
self.assertEqual(record.entity_attributes["Sample_label_ch1"], "biotin")
self.assertEqual(record.entity_attributes["Sample_description"], "Gene expression data from embryos younger than nuclear cycle 9, i.e. before zygotic genome activation.")
self.assertEqual(record.entity_attributes["Sample_table_end"], "")
self.assertEqual(record.entity_attributes["Sample_growth_protocol_ch1"], "30 min egg collections of OreR and yw flies at 25C were aged at room temperature (RT) according to the different temporal classes T0-T4.")
self.assertEqual(len(record.entity_attributes["Sample_characteristics_ch1"]), 2)
self.assertEqual(record.entity_attributes["Sample_characteristics_ch1"][0], "Genotype: yellow white and Oregon R parents")
self.assertEqual(record.entity_attributes["Sample_characteristics_ch1"][1], "Age: embryos younger than nuclear cycle 9, i.e. before pole cells budding")
self.assertEqual(record.entity_attributes["Sample_scan_protocol"], "GeneChips were scanned using the Hewlett-Packard GeneArray Scanner G2500A.")
self.assertEqual(record.entity_attributes["Sample_hyb_protocol"], "Following fragmentation, 10 microg of cRNA were hybridized for 16 hr at 45C on GeneChip Drosophila Genome Array. GeneChips were washed and stained in the Affymetrix Fluidics Station 400.")
self.assertEqual(record.entity_attributes["Sample_extract_protocol_ch1"], "Trizol extraction of total RNA was performed according to the manufacturer's instructions.")
self.assertEqual(record.entity_attributes["Sample_source_name_ch1"], "Drosophila embryos before nuclear cycle 9 (maternal transcripts)")
self.assertEqual(record.entity_attributes["Sample_table_begin"], "")
self.assertEqual(record.entity_attributes["Sample_molecule_ch1"], "total RNA")
self.assertEqual(record.entity_attributes["Sample_label_protocol_ch1"], "Biotinylated cRNA were prepared according to the standard Affymetrix protocol from 6 microg total RNA (Expression Analysis Technical Manual, 2001, Affymetrix).")
self.assertEqual(record.entity_attributes["Sample_data_processing"], "The data were analyzed with Microarray Suite version 5.0 (MAS 5.0) using Affymetrix default analysis settings and global scaling as normalization method. The trimmed mean target intensity of each array was arbitrarily set to 100.")
self.assertEqual(record.entity_attributes["Sample_treatment_protocol_ch1"], "Embryos were dechorionated with 50% bleach, put on a cover slip and covered with Halocarbon oil 27 (Sigma). Embryos of the appropriate stage were manually selected under the dissecting scope. Selected embryos were transferred to a basket, rinsed with PBS with 0,7% NaCl, 0,04% triton-X100 and placed on ice in the Trizol solution (GibcoBRL).")
self.assertEqual(record.entity_attributes["Sample_title"], "embryo at T0, biological rep1")
self.assertEqual(record.entity_attributes["Sample_supplementary_file"], "Drosophila_T0-1.CEL")
self.assertEqual(record.entity_attributes["Sample_platform_id"], "GPL72")
self.assertEqual(len(record.col_defs), 4)
self.assertEqual(record.col_defs["DETECTION P-VALUE"], "'detection p-value', p-value that indicates the significance level of the detection call")
self.assertEqual(record.col_defs["ID_REF"], "")
self.assertEqual(record.col_defs["VALUE"], "MAS5-calculated Signal intensity")
self.assertEqual(record.col_defs["ABS_CALL"], "the call in an absolute analysis that indicates if the transcript was present (P), absent (A), marginal (M), or no call (NC)")
self.assertEqual(len(record.table_rows), 22)
self.assertEqual(len(record.table_rows[0]), 4)
self.assertEqual(record.table_rows[0][0], "ID_REF")
self.assertEqual(record.table_rows[0][1], "VALUE")
self.assertEqual(record.table_rows[0][2], "ABS_CALL")
self.assertEqual(record.table_rows[0][3], "DETECTION P-VALUE")
self.assertEqual(len(record.table_rows[1]), 4)
self.assertEqual(record.table_rows[1][0], "141200_at")
self.assertEqual(record.table_rows[1][1], "36.6")
self.assertEqual(record.table_rows[1][2], "A")
self.assertEqual(record.table_rows[1][3], "0.818657")
self.assertEqual(len(record.table_rows[2]), 4)
self.assertEqual(record.table_rows[2][0], "141201_at")
self.assertEqual(record.table_rows[2][1], "41.5")
self.assertEqual(record.table_rows[2][2], "A")
self.assertEqual(record.table_rows[2][3], "0.703191")
self.assertEqual(len(record.table_rows[3]), 4)
self.assertEqual(record.table_rows[3][0], "141202_at")
self.assertEqual(record.table_rows[3][1], "607.3")
self.assertEqual(record.table_rows[3][2], "P")
self.assertEqual(record.table_rows[3][3], "0.000944")
self.assertEqual(len(record.table_rows[4]), 4)
self.assertEqual(record.table_rows[4][0], "141203_at")
self.assertEqual(record.table_rows[4][1], "1509.1")
self.assertEqual(record.table_rows[4][2], "P")
self.assertEqual(record.table_rows[4][3], "0.000762")
self.assertEqual(len(record.table_rows[5]), 4)
self.assertEqual(record.table_rows[5][0], "141204_at")
self.assertEqual(record.table_rows[5][1], "837.3")
self.assertEqual(record.table_rows[5][2], "P")
self.assertEqual(record.table_rows[5][3], "0.000613")
self.assertEqual(len(record.table_rows[6]), 4)
self.assertEqual(record.table_rows[6][0], "141205_at")
self.assertEqual(record.table_rows[6][1], "363.2")
self.assertEqual(record.table_rows[6][2], "P")
self.assertEqual(record.table_rows[6][3], "0.003815")
self.assertEqual(len(record.table_rows[7]), 4)
self.assertEqual(record.table_rows[7][0], "141206_at")
self.assertEqual(record.table_rows[7][1], "1193.6")
self.assertEqual(record.table_rows[7][2], "P")
self.assertEqual(record.table_rows[7][3], "0.000491")
self.assertEqual(len(record.table_rows[8]), 4)
self.assertEqual(record.table_rows[8][0], "141207_at")
self.assertEqual(record.table_rows[8][1], "346.6")
self.assertEqual(record.table_rows[8][2], "P")
self.assertEqual(record.table_rows[8][3], "0.001165")
self.assertEqual(len(record.table_rows[9]), 4)
self.assertEqual(record.table_rows[9][0], "141208_at")
self.assertEqual(record.table_rows[9][1], "257.8")
self.assertEqual(record.table_rows[9][2], "P")
self.assertEqual(record.table_rows[9][3], "0.006575")
self.assertEqual(len(record.table_rows[10]), 4)
self.assertEqual(record.table_rows[10][0], "141209_at")
self.assertEqual(record.table_rows[10][1], "337.1")
self.assertEqual(record.table_rows[10][2], "P")
self.assertEqual(record.table_rows[10][3], "0.002607")
self.assertEqual(len(record.table_rows[11]), 4)
self.assertEqual(record.table_rows[11][0], "141210_at")
self.assertEqual(record.table_rows[11][1], "48")
self.assertEqual(record.table_rows[11][2], "A")
self.assertEqual(record.table_rows[11][3], "0.150145")
self.assertEqual(len(record.table_rows[12]), 4)
self.assertEqual(record.table_rows[12][0], "141211_at")
self.assertEqual(record.table_rows[12][1], "130.7")
self.assertEqual(record.table_rows[12][2], "P")
self.assertEqual(record.table_rows[12][3], "0.005504")
self.assertEqual(len(record.table_rows[13]), 4)
self.assertEqual(record.table_rows[13][0], "141212_at")
self.assertEqual(record.table_rows[13][1], "1454.3")
self.assertEqual(record.table_rows[13][2], "P")
self.assertEqual(record.table_rows[13][3], "0.000491")
self.assertEqual(len(record.table_rows[14]), 4)
self.assertEqual(record.table_rows[14][0], "141213_at")
self.assertEqual(record.table_rows[14][1], "21.2")
self.assertEqual(record.table_rows[14][2], "A")
self.assertEqual(record.table_rows[14][3], "0.635055")
self.assertEqual(len(record.table_rows[15]), 4)
self.assertEqual(record.table_rows[15][0], "142121_at")
self.assertEqual(record.table_rows[15][1], "133.7")
self.assertEqual(record.table_rows[15][2], "A")
self.assertEqual(record.table_rows[15][3], "0.889551")
self.assertEqual(len(record.table_rows[16]), 4)
self.assertEqual(record.table_rows[16][0], "142122_at")
self.assertEqual(record.table_rows[16][1], "275.3")
self.assertEqual(record.table_rows[16][2], "A")
self.assertEqual(record.table_rows[16][3], "0.611218")
self.assertEqual(len(record.table_rows[17]), 4)
self.assertEqual(record.table_rows[17][0], "142123_at")
self.assertEqual(record.table_rows[17][1], "307.6")
self.assertEqual(record.table_rows[17][2], "A")
self.assertEqual(record.table_rows[17][3], "0.611218")
self.assertEqual(len(record.table_rows[18]), 4)
self.assertEqual(record.table_rows[18][0], "142124_at")
self.assertEqual(record.table_rows[18][1], "132.6")
self.assertEqual(record.table_rows[18][2], "A")
self.assertEqual(record.table_rows[18][3], "0.437646")
self.assertEqual(len(record.table_rows[19]), 4)
self.assertEqual(record.table_rows[19][0], "142125_at")
self.assertEqual(record.table_rows[19][1], "195.8")
self.assertEqual(record.table_rows[19][2], "A")
self.assertEqual(record.table_rows[19][3], "0.110449")
self.assertEqual(len(record.table_rows[20]), 4)
self.assertEqual(record.table_rows[20][0], "142126_at")
self.assertEqual(record.table_rows[20][1], "174.1")
self.assertEqual(record.table_rows[20][2], "A")
self.assertEqual(record.table_rows[20][3], "0.681117")
self.assertEqual(len(record.table_rows[21]), 4)
self.assertEqual(record.table_rows[21][0], "142127_at")
self.assertEqual(record.table_rows[21][1], "316.3")
self.assertEqual(record.table_rows[21][2], "A")
self.assertEqual(record.table_rows[21][3], "0.65838")
record = next(records)
self.assertEqual(record.entity_type, "SAMPLE")
self.assertEqual(record.entity_id, "Drosophila_T0-2")
self.assertEqual(len(record.entity_attributes), 18)
self.assertEqual(record.entity_attributes["Sample_organism_ch1"], "Drosophila melanogaster")
self.assertEqual(record.entity_attributes["Sample_label_ch1"], "biotin")
self.assertEqual(record.entity_attributes["Sample_description"], "Gene expression data from embryos younger than nuclear cycle 9, i.e. before zygotic genome activation.")
self.assertEqual(record.entity_attributes["Sample_table_end"], "")
self.assertEqual(record.entity_attributes["Sample_growth_protocol_ch1"], "30 min egg collections of OreR and yw flies at 25C were aged at room temperature (RT) according to the different temporal classes T0-T4.")
self.assertEqual(len(record.entity_attributes["Sample_characteristics_ch1"]), 2)
self.assertEqual(record.entity_attributes["Sample_characteristics_ch1"][0], "Genotype: yellow white and Oregon R parents")
self.assertEqual(record.entity_attributes["Sample_characteristics_ch1"][1], "Age: embryos younger than nuclear cycle 9, i.e. before pole cells budding")
self.assertEqual(record.entity_attributes["Sample_scan_protocol"], "GeneChips were scanned using the Hewlett-Packard GeneArray Scanner G2500A.")
self.assertEqual(record.entity_attributes["Sample_hyb_protocol"], "Following fragmentation, 10 microg of cRNA were hybridized for 16 hr at 45C on GeneChip Drosophila Genome Array. GeneChips were washed and stained in the Affymetrix Fluidics Station 400.")
self.assertEqual(record.entity_attributes["Sample_extract_protocol_ch1"], "Trizol extraction of total RNA was performed according to the manufacturer's instructions.")
self.assertEqual(record.entity_attributes["Sample_source_name_ch1"], "Drosophila embryos before nuclear cycle 9 (maternal transcripts)")
self.assertEqual(record.entity_attributes["Sample_table_begin"], "")
self.assertEqual(record.entity_attributes["Sample_molecule_ch1"], "total RNA")
self.assertEqual(record.entity_attributes["Sample_label_protocol_ch1"], "Biotinylated cRNA were prepared according to the standard Affymetrix protocol from 6 microg total RNA (Expression Analysis Technical Manual, 2001, Affymetrix).")
self.assertEqual(record.entity_attributes["Sample_data_processing"], "The data were analyzed with Microarray Suite version 5.0 (MAS 5.0) using Affymetrix default analysis settings and global scaling as normalization method. The trimmed mean target intensity of each array was arbitrarily set to 100.")
self.assertEqual(record.entity_attributes["Sample_treatment_protocol_ch1"], "Embryos were dechorionated with 50% bleach, put on a cover slip and covered with Halocarbon oil 27 (Sigma). Embryos of the appropriate stage were manually selected under the dissecting scope. Selected embryos were transferred to a basket, rinsed with PBS with 0,7% NaCl, 0,04% triton-X100 and placed on ice in the Trizol solution (GibcoBRL).")
self.assertEqual(record.entity_attributes["Sample_title"], "embryo at T0, biological rep2")
self.assertEqual(record.entity_attributes["Sample_supplementary_file"], "Drosophila_T0-2.CEL")
self.assertEqual(record.entity_attributes["Sample_platform_id"], "GPL72")
self.assertEqual(len(record.col_defs), 4)
self.assertEqual(record.col_defs["DETECTION P-VALUE"], "'detection p-value', p-value that indicates the significance level of the detection call")
self.assertEqual(record.col_defs["ID_REF"], "")
self.assertEqual(record.col_defs["VALUE"], "MAS5-calculated Signal intensity")
self.assertEqual(record.col_defs["ABS_CALL"], "the call in an absolute analysis that indicates if the transcript was present (P), absent (A), marginal (M), or no call (NC)")
self.assertEqual(len(record.table_rows), 22)
self.assertEqual(len(record.table_rows[0]), 4)
self.assertEqual(record.table_rows[0][0], "ID_REF")
self.assertEqual(record.table_rows[0][1], "VALUE")
self.assertEqual(record.table_rows[0][2], "ABS_CALL")
self.assertEqual(record.table_rows[0][3], "DETECTION P-VALUE")
self.assertEqual(len(record.table_rows[1]), 4)
self.assertEqual(record.table_rows[1][0], "141200_at")
self.assertEqual(record.table_rows[1][1], "70.3")
self.assertEqual(record.table_rows[1][2], "A")
self.assertEqual(record.table_rows[1][3], "0.216313")
self.assertEqual(len(record.table_rows[2]), 4)
self.assertEqual(record.table_rows[2][0], "141201_at")
self.assertEqual(record.table_rows[2][1], "38")
self.assertEqual(record.table_rows[2][2], "A")
self.assertEqual(record.table_rows[2][3], "0.635055")
self.assertEqual(len(record.table_rows[3]), 4)
self.assertEqual(record.table_rows[3][0], "141202_at")
self.assertEqual(record.table_rows[3][1], "831.8")
self.assertEqual(record.table_rows[3][2], "P")
self.assertEqual(record.table_rows[3][3], "0.000613")
self.assertEqual(len(record.table_rows[4]), 4)
self.assertEqual(record.table_rows[4][0], "141203_at")
self.assertEqual(record.table_rows[4][1], "2215.5")
self.assertEqual(record.table_rows[4][2], "P")
self.assertEqual(record.table_rows[4][3], "0.000944")
self.assertEqual(len(record.table_rows[5]), 4)
self.assertEqual(record.table_rows[5][0], "141204_at")
self.assertEqual(record.table_rows[5][1], "965.6")
self.assertEqual(record.table_rows[5][2], "P")
self.assertEqual(record.table_rows[5][3], "0.000491")
self.assertEqual(len(record.table_rows[6]), 4)
self.assertEqual(record.table_rows[6][0], "141205_at")
self.assertEqual(record.table_rows[6][1], "383.2")
self.assertEqual(record.table_rows[6][2], "P")
self.assertEqual(record.table_rows[6][3], "0.001432")
self.assertEqual(len(record.table_rows[7]), 4)
self.assertEqual(record.table_rows[7][0], "141206_at")
self.assertEqual(record.table_rows[7][1], "1195")
self.assertEqual(record.table_rows[7][2], "P")
self.assertEqual(record.table_rows[7][3], "0.000491")
self.assertEqual(len(record.table_rows[8]), 4)
self.assertEqual(record.table_rows[8][0], "141207_at")
self.assertEqual(record.table_rows[8][1], "413.7")
self.assertEqual(record.table_rows[8][2], "P")
self.assertEqual(record.table_rows[8][3], "0.000613")
self.assertEqual(len(record.table_rows[9]), 4)
self.assertEqual(record.table_rows[9][0], "141208_at")
self.assertEqual(record.table_rows[9][1], "447.3")
self.assertEqual(record.table_rows[9][2], "P")
self.assertEqual(record.table_rows[9][3], "0.000762")
self.assertEqual(len(record.table_rows[10]), 4)
self.assertEqual(record.table_rows[10][0], "141209_at")
self.assertEqual(record.table_rows[10][1], "294.4")
self.assertEqual(record.table_rows[10][2], "P")
self.assertEqual(record.table_rows[10][3], "0.004591")
self.assertEqual(len(record.table_rows[11]), 4)
self.assertEqual(record.table_rows[11][0], "141210_at")
self.assertEqual(record.table_rows[11][1], "81.7")
self.assertEqual(record.table_rows[11][2], "M")
self.assertEqual(record.table_rows[11][3], "0.054711")
self.assertEqual(len(record.table_rows[12]), 4)
self.assertEqual(record.table_rows[12][0], "141211_at")
self.assertEqual(record.table_rows[12][1], "84.9")
self.assertEqual(record.table_rows[12][2], "P")
self.assertEqual(record.table_rows[12][3], "0.005504")
self.assertEqual(len(record.table_rows[13]), 4)
self.assertEqual(record.table_rows[13][0], "141212_at")
self.assertEqual(record.table_rows[13][1], "1456.4")
self.assertEqual(record.table_rows[13][2], "P")
self.assertEqual(record.table_rows[13][3], "0.000491")
self.assertEqual(len(record.table_rows[14]), 4)
self.assertEqual(record.table_rows[14][0], "141213_at")
self.assertEqual(record.table_rows[14][1], "37")
self.assertEqual(record.table_rows[14][2], "A")
self.assertEqual(record.table_rows[14][3], "0.122747")
self.assertEqual(len(record.table_rows[15]), 4)
self.assertEqual(record.table_rows[15][0], "142121_at")
self.assertEqual(record.table_rows[15][1], "133.7")
self.assertEqual(record.table_rows[15][2], "A")
self.assertEqual(record.table_rows[15][3], "0.889551")
self.assertEqual(len(record.table_rows[16]), 4)
self.assertEqual(record.table_rows[16][0], "142122_at")
self.assertEqual(record.table_rows[16][1], "275.3")
self.assertEqual(record.table_rows[16][2], "A")
self.assertEqual(record.table_rows[16][3], "0.611218")
self.assertEqual(len(record.table_rows[17]), 4)
self.assertEqual(record.table_rows[17][0], "142123_at")
self.assertEqual(record.table_rows[17][1], "307.6")
self.assertEqual(record.table_rows[17][2], "A")
self.assertEqual(record.table_rows[17][3], "0.611218")
self.assertEqual(len(record.table_rows[18]), 4)
self.assertEqual(record.table_rows[18][0], "142124_at")
self.assertEqual(record.table_rows[18][1], "132.6")
self.assertEqual(record.table_rows[18][2], "A")
self.assertEqual(record.table_rows[18][3], "0.437646")
self.assertEqual(len(record.table_rows[19]), 4)
self.assertEqual(record.table_rows[19][0], "142125_at")
self.assertEqual(record.table_rows[19][1], "195.8")
self.assertEqual(record.table_rows[19][2], "A")
self.assertEqual(record.table_rows[19][3], "0.110449")
self.assertEqual(len(record.table_rows[20]), 4)
self.assertEqual(record.table_rows[20][0], "142126_at")
self.assertEqual(record.table_rows[20][1], "174.1")
self.assertEqual(record.table_rows[20][2], "A")
self.assertEqual(record.table_rows[20][3], "0.681117")
self.assertEqual(len(record.table_rows[21]), 4)
self.assertEqual(record.table_rows[21][0], "142127_at")
self.assertEqual(record.table_rows[21][1], "316.3")
self.assertEqual(record.table_rows[21][2], "A")
self.assertEqual(record.table_rows[21][3], "0.65838")
record = next(records)
self.assertEqual(record.entity_type, "SAMPLE")
self.assertEqual(record.entity_id, "Drosophila_T1-1")
self.assertEqual(len(record.entity_attributes), 18)
self.assertEqual(record.entity_attributes["Sample_organism_ch1"], "Drosophila melanogaster")
self.assertEqual(record.entity_attributes["Sample_label_ch1"], "biotin")
self.assertEqual(record.entity_attributes["Sample_description"], "Gene expression data from embryos in slow phase of cellularisation.")
self.assertEqual(record.entity_attributes["Sample_table_end"], "")
self.assertEqual(record.entity_attributes["Sample_growth_protocol_ch1"], "30 min egg collections of OreR and yw flies at 25C were aged at room temperature (RT) according to the different temporal classes T0-T4.")
self.assertEqual(len(record.entity_attributes["Sample_characteristics_ch1"]), 2)
self.assertEqual(record.entity_attributes["Sample_characteristics_ch1"][0], "Genotype: yellow white and Oregon R parents")
self.assertEqual(record.entity_attributes["Sample_characteristics_ch1"][1], "Age: embryos in slow phase of cellularisation")
self.assertEqual(record.entity_attributes["Sample_scan_protocol"], "GeneChips were scanned using the Hewlett-Packard GeneArray Scanner G2500A.")
self.assertEqual(record.entity_attributes["Sample_hyb_protocol"], "Following fragmentation, 10 microg of cRNA were hybridized for 16 hr at 45C on GeneChip Drosophila Genome Array. GeneChips were washed and stained in the Affymetrix Fluidics Station 400.")
self.assertEqual(record.entity_attributes["Sample_extract_protocol_ch1"], "Trizol extraction of total RNA was performed according to the manufacturer's instructions.")
self.assertEqual(record.entity_attributes["Sample_source_name_ch1"], "Drosophila embryos in slow phase of cellularisation")
self.assertEqual(record.entity_attributes["Sample_table_begin"], "")
self.assertEqual(record.entity_attributes["Sample_molecule_ch1"], "total RNA")
self.assertEqual(record.entity_attributes["Sample_label_protocol_ch1"], "Biotinylated cRNA were prepared according to the standard Affymetrix protocol from 6 microg total RNA (Expression Analysis Technical Manual, 2001, Affymetrix).")
self.assertEqual(record.entity_attributes["Sample_data_processing"], "The data were analyzed with Microarray Suite version 5.0 (MAS 5.0) using Affymetrix default analysis settings and global scaling as normalization method. The trimmed mean target intensity of each array was arbitrarily set to 100.")
self.assertEqual(record.entity_attributes["Sample_treatment_protocol_ch1"], "Embryos were dechorionated with 50% bleach, put on a cover slip and covered with Halocarbon oil 27 (Sigma). Embryos of the appropriate stage were manually selected under the dissecting scope. Selected embryos were transferred to a basket, rinsed with PBS with 0,7% NaCl, 0,04% triton-X100 and placed on ice in the Trizol solution (GibcoBRL).")
self.assertEqual(record.entity_attributes["Sample_title"], "embryo at T1, biological rep1")
self.assertEqual(record.entity_attributes["Sample_supplementary_file"], "Drosophila_T1-1.CEL")
self.assertEqual(record.entity_attributes["Sample_platform_id"], "GPL72")
self.assertEqual(len(record.col_defs), 4)
self.assertEqual(record.col_defs["DETECTION P-VALUE"], "'detection p-value', p-value that indicates the significance level of the detection call")
self.assertEqual(record.col_defs["ID_REF"], "")
self.assertEqual(record.col_defs["VALUE"], "MAS5-calculated Signal intensity")
self.assertEqual(record.col_defs["ABS_CALL"], "the call in an absolute analysis that indicates if the transcript was present (P), absent (A), marginal (M), or no call (NC)")
self.assertEqual(len(record.table_rows), 22)
self.assertEqual(len(record.table_rows[0]), 4)
self.assertEqual(record.table_rows[0][0], "ID_REF")
self.assertEqual(record.table_rows[0][1], "VALUE")
self.assertEqual(record.table_rows[0][2], "ABS_CALL")
self.assertEqual(record.table_rows[0][3], "DETECTION P-VALUE")
self.assertEqual(len(record.table_rows[1]), 4)
self.assertEqual(record.table_rows[1][0], "141200_at")
self.assertEqual(record.table_rows[1][1], "20.8")
self.assertEqual(record.table_rows[1][2], "A")
self.assertEqual(record.table_rows[1][3], "0.801637")
self.assertEqual(len(record.table_rows[2]), 4)
self.assertEqual(record.table_rows[2][0], "141201_at")
self.assertEqual(record.table_rows[2][1], "85.8")
self.assertEqual(record.table_rows[2][2], "A")
self.assertEqual(record.table_rows[2][3], "0.48748")
self.assertEqual(len(record.table_rows[3]), 4)
self.assertEqual(record.table_rows[3][0], "141202_at")
self.assertEqual(record.table_rows[3][1], "704.8")
self.assertEqual(record.table_rows[3][2], "P")
self.assertEqual(record.table_rows[3][3], "0.000613")
self.assertEqual(len(record.table_rows[4]), 4)
self.assertEqual(record.table_rows[4][0], "141203_at")
self.assertEqual(record.table_rows[4][1], "1036.6")
self.assertEqual(record.table_rows[4][2], "P")
self.assertEqual(record.table_rows[4][3], "0.000944")
self.assertEqual(len(record.table_rows[5]), 4)
self.assertEqual(record.table_rows[5][0], "141204_at")
self.assertEqual(record.table_rows[5][1], "700.3")
self.assertEqual(record.table_rows[5][2], "P")
self.assertEqual(record.table_rows[5][3], "0.000491")
self.assertEqual(len(record.table_rows[6]), 4)
self.assertEqual(record.table_rows[6][0], "141205_at")
self.assertEqual(record.table_rows[6][1], "462.4")
self.assertEqual(record.table_rows[6][2], "P")
self.assertEqual(record.table_rows[6][3], "0.003159")
self.assertEqual(len(record.table_rows[7]), 4)
self.assertEqual(record.table_rows[7][0], "141206_at")
self.assertEqual(record.table_rows[7][1], "1301.9")
self.assertEqual(record.table_rows[7][2], "P")
self.assertEqual(record.table_rows[7][3], "0.000491")
self.assertEqual(len(record.table_rows[8]), 4)
self.assertEqual(record.table_rows[8][0], "141207_at")
self.assertEqual(record.table_rows[8][1], "454.8")
self.assertEqual(record.table_rows[8][2], "P")
self.assertEqual(record.table_rows[8][3], "0.000944")
self.assertEqual(len(record.table_rows[9]), 4)
self.assertEqual(record.table_rows[9][0], "141208_at")
self.assertEqual(record.table_rows[9][1], "438.6")
self.assertEqual(record.table_rows[9][2], "P")
self.assertEqual(record.table_rows[9][3], "0.000944")
self.assertEqual(len(record.table_rows[10]), 4)
self.assertEqual(record.table_rows[10][0], "141209_at")
self.assertEqual(record.table_rows[10][1], "264.4")
self.assertEqual(record.table_rows[10][2], "P")
self.assertEqual(record.table_rows[10][3], "0.004591")
self.assertEqual(len(record.table_rows[11]), 4)
self.assertEqual(record.table_rows[11][0], "141210_at")
self.assertEqual(record.table_rows[11][1], "65.6")
self.assertEqual(record.table_rows[11][2], "A")
self.assertEqual(record.table_rows[11][3], "0.150145")
self.assertEqual(len(record.table_rows[12]), 4)
self.assertEqual(record.table_rows[12][0], "141211_at")
self.assertEqual(record.table_rows[12][1], "72.2")
self.assertEqual(record.table_rows[12][2], "A")
self.assertEqual(record.table_rows[12][3], "0.070073")
self.assertEqual(len(record.table_rows[13]), 4)
self.assertEqual(record.table_rows[13][0], "141212_at")
self.assertEqual(record.table_rows[13][1], "1200")
self.assertEqual(record.table_rows[13][2], "P")
self.assertEqual(record.table_rows[13][3], "0.000491")
self.assertEqual(len(record.table_rows[14]), 4)
self.assertEqual(record.table_rows[14][0], "141213_at")
self.assertEqual(record.table_rows[14][1], "13.7")
self.assertEqual(record.table_rows[14][2], "A")
self.assertEqual(record.table_rows[14][3], "0.635055")
self.assertEqual(len(record.table_rows[15]), 4)
self.assertEqual(record.table_rows[15][0], "142121_at")
self.assertEqual(record.table_rows[15][1], "133.7")
self.assertEqual(record.table_rows[15][2], "A")
self.assertEqual(record.table_rows[15][3], "0.889551")
self.assertEqual(len(record.table_rows[16]), 4)
self.assertEqual(record.table_rows[16][0], "142122_at")
self.assertEqual(record.table_rows[16][1], "275.3")
self.assertEqual(record.table_rows[16][2], "A")
self.assertEqual(record.table_rows[16][3], "0.611218")
self.assertEqual(len(record.table_rows[17]), 4)
self.assertEqual(record.table_rows[17][0], "142123_at")
self.assertEqual(record.table_rows[17][1], "307.6")
self.assertEqual(record.table_rows[17][2], "A")
self.assertEqual(record.table_rows[17][3], "0.611218")
self.assertEqual(len(record.table_rows[18]), 4)
self.assertEqual(record.table_rows[18][0], "142124_at")
self.assertEqual(record.table_rows[18][1], "132.6")
self.assertEqual(record.table_rows[18][2], "A")
self.assertEqual(record.table_rows[18][3], "0.437646")
self.assertEqual(len(record.table_rows[19]), 4)
self.assertEqual(record.table_rows[19][0], "142125_at")
self.assertEqual(record.table_rows[19][1], "195.8")
self.assertEqual(record.table_rows[19][2], "A")
self.assertEqual(record.table_rows[19][3], "0.110449")
self.assertEqual(len(record.table_rows[20]), 4)
self.assertEqual(record.table_rows[20][0], "142126_at")
self.assertEqual(record.table_rows[20][1], "174.1")
self.assertEqual(record.table_rows[20][2], "A")
self.assertEqual(record.table_rows[20][3], "0.681117")
self.assertEqual(len(record.table_rows[21]), 4)
self.assertEqual(record.table_rows[21][0], "142127_at")
self.assertEqual(record.table_rows[21][1], "316.3")
self.assertEqual(record.table_rows[21][2], "A")
self.assertEqual(record.table_rows[21][3], "0.65838")
record = next(records)
self.assertEqual(record.entity_type, "SERIES")
self.assertEqual(record.entity_id, "Dros_embryo_timecourse")
self.assertEqual(len(record.entity_attributes), 6)
self.assertEqual(len(record.entity_attributes["Series_sample_id"]), 3)
self.assertEqual(record.entity_attributes["Series_sample_id"][0], "Drosophila_T0-1")
self.assertEqual(record.entity_attributes["Series_sample_id"][1], "Drosophila_T0-2")
self.assertEqual(record.entity_attributes["Series_sample_id"][2], "Drosophila_T1-1")
self.assertEqual(len(record.entity_attributes["Series_contributor"]), 5)
self.assertEqual(record.entity_attributes["Series_contributor"][0], "Jane,Doe")
self.assertEqual(record.entity_attributes["Series_contributor"][1], "John,A,Smith")
self.assertEqual(record.entity_attributes["Series_contributor"][2], "Hans,van Elton")
self.assertEqual(record.entity_attributes["Series_contributor"][3], "John,Smithers Jr")
self.assertEqual(record.entity_attributes["Series_contributor"][4], "Jie,D,Chen")
self.assertEqual(len(record.entity_attributes["Series_summary"]), 2)
self.assertEqual(record.entity_attributes["Series_summary"][0], "Morphogenesis of epithelial tissues relies on the precise developmental control of cell polarity and architecture. In the early Drosophila embryo, the primary epithelium forms during cellularisation, following a tightly controlled genetic programme where specific sets of genes are up-regulated. Some of them, for instance, control membrane invagination between the nuclei anchored at the apical surface of the syncytium.")
self.assertEqual(record.entity_attributes["Series_summary"][1], "We used microarrays to detail the global programme of gene expression underlying cellularisation and identified distinct classes of up-regulated genes during this process.")
self.assertEqual(record.entity_attributes["Series_type"], "time course")
self.assertEqual(record.entity_attributes["Series_title"], "Expression data from early Drosophila embryo")
self.assertEqual(record.entity_attributes["Series_overall_design"], "Drosophila embryos were selected at successive stages of early development for RNA extraction and hybridization on Affymetrix microarrays. We sought to obtain homogeneous populations of embryos at each developmental stage in order to increase the temporal resolution of expression profiles. To that end, we hand-selected embryos according to morphological criteria at five time-points: before pole cell formation, i.e. before zygotic transcription (T0), during the slow phase (T1) and the fast phase (T2) of cellularisation and at the beginning (T3) and the end (T4) of gastrulation.")
self.assertEqual(len(record.col_defs), 0)
self.assertEqual(len(record.table_rows), 0)
def test_GSE16(self):
path = "Geo/GSE16.txt"
with open(path) as handle:
records = Geo.parse(handle)
record = next(records)
self.assertEqual(record.entity_type, "SAMPLE")
self.assertEqual(record.entity_id, "GSM804")
self.assertEqual(len(record.entity_attributes), 18)
self.assertEqual(record.entity_attributes["Sample_pubmed_id"], "11687795")
self.assertEqual(record.entity_attributes["Sample_submitter_institute"], "University of California San Francisco")
self.assertEqual(len(record.entity_attributes["Sample_author"]), 19)
self.assertEqual(record.entity_attributes["Sample_author"][0], "Antoine,M,Snijders")
self.assertEqual(record.entity_attributes["Sample_author"][1], "Norma,,Nowak")
self.assertEqual(record.entity_attributes["Sample_author"][2], "Richard,,Segraves")
self.assertEqual(record.entity_attributes["Sample_author"][3], "Stephanie,,Blackwood")
self.assertEqual(record.entity_attributes["Sample_author"][4], "Nils,,Brown")
self.assertEqual(record.entity_attributes["Sample_author"][5], "Jeffery,,Conroy")
self.assertEqual(record.entity_attributes["Sample_author"][6], "Greg,,Hamilton")
self.assertEqual(record.entity_attributes["Sample_author"][7], "Anna,K,Hindle")
self.assertEqual(record.entity_attributes["Sample_author"][8], "Bing,,Huey")
self.assertEqual(record.entity_attributes["Sample_author"][9], "Karen,,Kimura")
self.assertEqual(record.entity_attributes["Sample_author"][10], "Sindy,,Law")
self.assertEqual(record.entity_attributes["Sample_author"][11], "Ken,,Myambo")
self.assertEqual(record.entity_attributes["Sample_author"][12], "Joel,,Palmer")
self.assertEqual(record.entity_attributes["Sample_author"][13], "Bauke,,Ylstra")
self.assertEqual(record.entity_attributes["Sample_author"][14], "Jingzhu,P,Yue")
self.assertEqual(record.entity_attributes["Sample_author"][15], "Joe,W,Gray")
self.assertEqual(record.entity_attributes["Sample_author"][16], "Ajay,N,Jain")
self.assertEqual(record.entity_attributes["Sample_author"][17], "Daniel,,Pinkel")
self.assertEqual(record.entity_attributes["Sample_author"][18], "Donna,G,Albertson")
self.assertEqual(record.entity_attributes["Sample_submitter_phone"], "415 502-8463")
self.assertEqual(record.entity_attributes["Sample_submitter_department"], "Comprehensive Cancer Center")
self.assertEqual(len(record.entity_attributes["Sample_description"]), 4)
self.assertEqual(record.entity_attributes["Sample_description"][0], 'Coriell Cell Repositories cell line <a href="http://locus.umdnj.edu/nigms/nigms_cgi/display.cgi?GM05296">GM05296</a>.')
self.assertEqual(record.entity_attributes["Sample_description"][1], "Fibroblast cell line derived from a 1 month old female with multiple congenital malformations, dysmorphic features, intrauterine growth retardation, heart murmur, cleft palate, equinovarus deformity, microcephaly, coloboma of right iris, clinodactyly, reduced RBC catalase activity, and 1 copy of catalase gene.")
self.assertEqual(record.entity_attributes["Sample_description"][2], "Chromosome abnormalities are present.")
self.assertEqual(record.entity_attributes["Sample_description"][3], "Karyotype is 46,XX,-11,+der(11)inv ins(11;10)(11pter> 11p13::10q21>10q24::11p13>11qter)mat")
self.assertEqual(record.entity_attributes["Sample_target_source2"], "normal male reference genomic DNA")
self.assertEqual(record.entity_attributes["Sample_target_source1"], "Cell line GM05296")
self.assertEqual(record.entity_attributes["Sample_submitter_name"], "Donna,G,Albertson")
self.assertEqual(record.entity_attributes["Sample_platform_id"], "GPL28")
self.assertEqual(record.entity_attributes["Sample_type"], "dual channel genomic")
self.assertEqual(record.entity_attributes["Sample_status"], "Public on Feb 12 2002")
self.assertEqual(record.entity_attributes["Sample_submitter_email"], "[email protected]")
self.assertEqual(record.entity_attributes["Sample_title"], "CGH_Albertson_GM05296-001218")
self.assertEqual(record.entity_attributes["Sample_organism"], "Homo sapiens")
self.assertEqual(record.entity_attributes["Sample_series_id"], "GSE16")
self.assertEqual(record.entity_attributes["Sample_submission_date"], "Jan 17 2002")
self.assertEqual(record.entity_attributes["Sample_submitter_city"], "San Francisco,CA,94143,USA")
self.assertEqual(len(record.col_defs), 5)
self.assertEqual(record.col_defs["NO_REPLICATES"], "Number of replicate spot measurements")
self.assertEqual(record.col_defs["LOG2STDDEV"], "Standard deviation of VALUE")
self.assertEqual(record.col_defs["ID_REF"], "Unique row identifier, genome position order")
self.assertEqual(record.col_defs["VALUE"], "aka LOG2RATIO, mean of log base 2 of LINEAR_RATIO")
self.assertEqual(record.col_defs["LINEAR_RATIO"], "Mean of replicate Cy3/Cy5 ratios")
self.assertEqual(len(record.table_rows), 21)
self.assertEqual(len(record.table_rows[0]), 5)
self.assertEqual(record.table_rows[0][0], "ID_REF")
self.assertEqual(record.table_rows[0][1], "VALUE")
self.assertEqual(record.table_rows[0][2], "LINEAR_RATIO")
self.assertEqual(record.table_rows[0][3], "LOG2STDDEV")
self.assertEqual(record.table_rows[0][4], "NO_REPLICATES")
self.assertEqual(len(record.table_rows[1]), 5)
self.assertEqual(record.table_rows[1][0], "1")
self.assertEqual(record.table_rows[1][1], "")
self.assertEqual(record.table_rows[1][2], "1.047765")
self.assertEqual(record.table_rows[1][3], "0.011853")
self.assertEqual(record.table_rows[1][4], "3")
self.assertEqual(len(record.table_rows[2]), 5)
self.assertEqual(record.table_rows[2][0], "2")
self.assertEqual(record.table_rows[2][1], "")
self.assertEqual(record.table_rows[2][2], "")
self.assertEqual(record.table_rows[2][3], "")
self.assertEqual(record.table_rows[2][4], "0")
self.assertEqual(len(record.table_rows[3]), 5)
self.assertEqual(record.table_rows[3][0], "3")
self.assertEqual(record.table_rows[3][1], "0.008824")
self.assertEqual(record.table_rows[3][2], "1.006135")
self.assertEqual(record.table_rows[3][3], "0.00143")
self.assertEqual(record.table_rows[3][4], "3")
self.assertEqual(len(record.table_rows[4]), 5)
self.assertEqual(record.table_rows[4][0], "4")
self.assertEqual(record.table_rows[4][1], "-0.000894")
self.assertEqual(record.table_rows[4][2], "0.99938")
self.assertEqual(record.table_rows[4][3], "0.001454")
self.assertEqual(record.table_rows[4][4], "3")
self.assertEqual(len(record.table_rows[5]), 5)
self.assertEqual(record.table_rows[5][0], "5")
self.assertEqual(record.table_rows[5][1], "0.075875")
self.assertEqual(record.table_rows[5][2], "1.054")
self.assertEqual(record.table_rows[5][3], "0.003077")
self.assertEqual(record.table_rows[5][4], "3")
self.assertEqual(len(record.table_rows[6]), 5)
self.assertEqual(record.table_rows[6][0], "6")
self.assertEqual(record.table_rows[6][1], "0.017303")
self.assertEqual(record.table_rows[6][2], "1.012066")
self.assertEqual(record.table_rows[6][3], "0.005876")
self.assertEqual(record.table_rows[6][4], "2")
self.assertEqual(len(record.table_rows[7]), 5)
self.assertEqual(record.table_rows[7][0], "7")
self.assertEqual(record.table_rows[7][1], "-0.006766")
self.assertEqual(record.table_rows[7][2], "0.995321")
self.assertEqual(record.table_rows[7][3], "0.013881")
self.assertEqual(record.table_rows[7][4], "3")
self.assertEqual(len(record.table_rows[8]), 5)
self.assertEqual(record.table_rows[8][0], "8")
self.assertEqual(record.table_rows[8][1], "0.020755")
self.assertEqual(record.table_rows[8][2], "1.014491")
self.assertEqual(record.table_rows[8][3], "0.005506")
self.assertEqual(record.table_rows[8][4], "3")
self.assertEqual(len(record.table_rows[9]), 5)
self.assertEqual(record.table_rows[9][0], "9")
self.assertEqual(record.table_rows[9][1], "-0.094938")
self.assertEqual(record.table_rows[9][2], "0.936313")
self.assertEqual(record.table_rows[9][3], "0.012662")
self.assertEqual(record.table_rows[9][4], "3")
self.assertEqual(len(record.table_rows[10]), 5)
self.assertEqual(record.table_rows[10][0], "10")
self.assertEqual(record.table_rows[10][1], "-0.054527")
self.assertEqual(record.table_rows[10][2], "0.96291")
self.assertEqual(record.table_rows[10][3], "0.01073")
self.assertEqual(record.table_rows[10][4], "3")
self.assertEqual(len(record.table_rows[11]), 5)
self.assertEqual(record.table_rows[11][0], "11")
self.assertEqual(record.table_rows[11][1], "-0.025057")
self.assertEqual(record.table_rows[11][2], "0.982782")
self.assertEqual(record.table_rows[11][3], "0.003855")
self.assertEqual(record.table_rows[11][4], "3")
self.assertEqual(len(record.table_rows[12]), 5)
self.assertEqual(record.table_rows[12][0], "12")
self.assertEqual(record.table_rows[12][1], "")
self.assertEqual(record.table_rows[12][2], "")
self.assertEqual(record.table_rows[12][3], "")
self.assertEqual(record.table_rows[12][4], "0")
self.assertEqual(len(record.table_rows[13]), 5)
self.assertEqual(record.table_rows[13][0], "13")
self.assertEqual(record.table_rows[13][1], "0.108454")
self.assertEqual(record.table_rows[13][2], "1.078072")
self.assertEqual(record.table_rows[13][3], "0.005196")
self.assertEqual(record.table_rows[13][4], "3")
self.assertEqual(len(record.table_rows[14]), 5)
self.assertEqual(record.table_rows[14][0], "14")
self.assertEqual(record.table_rows[14][1], "0.078633")
self.assertEqual(record.table_rows[14][2], "1.056017")
self.assertEqual(record.table_rows[14][3], "0.009165")
self.assertEqual(record.table_rows[14][4], "3")
self.assertEqual(len(record.table_rows[15]), 5)
self.assertEqual(record.table_rows[15][0], "15")
self.assertEqual(record.table_rows[15][1], "0.098571")
self.assertEqual(record.table_rows[15][2], "1.070712")
self.assertEqual(record.table_rows[15][3], "0.007834")
self.assertEqual(record.table_rows[15][4], "3")
self.assertEqual(len(record.table_rows[16]), 5)
self.assertEqual(record.table_rows[16][0], "16")
self.assertEqual(record.table_rows[16][1], "0.044048")
self.assertEqual(record.table_rows[16][2], "1.031003")
self.assertEqual(record.table_rows[16][3], "0.013651")
self.assertEqual(record.table_rows[16][4], "3")
self.assertEqual(len(record.table_rows[17]), 5)
self.assertEqual(record.table_rows[17][0], "17")
self.assertEqual(record.table_rows[17][1], "0.018039")
self.assertEqual(record.table_rows[17][2], "1.012582")
self.assertEqual(record.table_rows[17][3], "0.005471")
self.assertEqual(record.table_rows[17][4], "3")
self.assertEqual(len(record.table_rows[18]), 5)
self.assertEqual(record.table_rows[18][0], "18")
self.assertEqual(record.table_rows[18][1], "-0.088807")
self.assertEqual(record.table_rows[18][2], "0.9403")
self.assertEqual(record.table_rows[18][3], "0.010571")
self.assertEqual(record.table_rows[18][4], "3")
self.assertEqual(len(record.table_rows[19]), 5)
self.assertEqual(record.table_rows[19][0], "19")
self.assertEqual(record.table_rows[19][1], "0.016349")
self.assertEqual(record.table_rows[19][2], "1.011397")
self.assertEqual(record.table_rows[19][3], "0.007113")
self.assertEqual(record.table_rows[19][4], "3")
self.assertEqual(len(record.table_rows[20]), 5)
self.assertEqual(record.table_rows[20][0], "20")
self.assertEqual(record.table_rows[20][1], "0.030977")
self.assertEqual(record.table_rows[20][2], "1.021704")
self.assertEqual(record.table_rows[20][3], "0.016798")
self.assertEqual(record.table_rows[20][4], "3")
def test_soft_ex_platform(self):
path = "Geo/soft_ex_platform.txt"
with open(path) as handle:
records = Geo.parse(handle)
record = next(records)
self.assertEqual(record.entity_type, "PLATFORM")
self.assertEqual(record.entity_id, "Murine 15K long oligo array version 2.0")
self.assertEqual(len(record.entity_attributes), 12)
self.assertEqual(record.entity_attributes["Platform_title"], "Murine 15K long oligo array version 2.0")
self.assertEqual(record.entity_attributes["Platform_web_link"], "http://www.microarray.protocols.html")
self.assertEqual(record.entity_attributes["platform_table_end"], "")
self.assertEqual(record.entity_attributes["Platform_support"], "glass")
self.assertEqual(record.entity_attributes["Platform_manufacturer"], "Un. London microarray facility")
self.assertEqual(record.entity_attributes["Platform_coating"], "polysine")
self.assertEqual(record.entity_attributes["Platform_technology"], "spotted oligonucleotide")
self.assertEqual(record.entity_attributes["platform_table_begin"], "")
self.assertEqual(len(record.entity_attributes["Platform_manufacture_protocol"]), 12)
self.assertEqual(record.entity_attributes["Platform_manufacture_protocol"][0], "1. Oligos are arrayed in Greiner 384-well flat-bottom plates. Each well contains 600 pmol of 70-mer oligo.")
self.assertEqual(record.entity_attributes["Platform_manufacture_protocol"][1], "2. Resuspend oligos in water to 20 uM and rearray 5 \xb5L into 384-well, Genetix polystyrene V-bottom plates (cat# X6004).")
self.assertEqual(record.entity_attributes["Platform_manufacture_protocol"][2], "3. Allow Genetix plates to dry through passive water evaporation in a protected environment (e.g., chemical hood).")
self.assertEqual(record.entity_attributes["Platform_manufacture_protocol"][3], "4. Before printing, add 5 \xb5L of 1X Printing Buffer to each well. This can be done the night before a print run is started.")
self.assertEqual(record.entity_attributes["Platform_manufacture_protocol"][4], "5. Seal plates with Corning seals.")
self.assertEqual(record.entity_attributes["Platform_manufacture_protocol"][5], "6. Incubate at 37\xb0C for 30 minutes to aid resuspension of DNA.")
self.assertEqual(record.entity_attributes["Platform_manufacture_protocol"][6], "7. Shake plates near maximum rotational speed on flat-bed shaker for 1 minute.")
self.assertEqual(record.entity_attributes["Platform_manufacture_protocol"][7], "8. Centrifuge plates at 2000 rpm for 3 minutes.")
self.assertEqual(record.entity_attributes["Platform_manufacture_protocol"][8], "9. Remove seals and cover with plate lids. Place in appropriate location of plate cassette. This should be done with first plates just before print run is started to minimize evaporation time before printing. For second and third cassettes, wait until 30 minutes before next cassette is needed to begin centrifugation.")
self.assertEqual(record.entity_attributes["Platform_manufacture_protocol"][9], "10. Make sure plates rest behind both holding clips in the cassettes. Push plates back into the cassettes as far as they will go, putting them in the proper position for the server arm.")
self.assertEqual(record.entity_attributes["Platform_manufacture_protocol"][10], "11. After the print run is completed, allow plates to dry through passive evaporation in a protected environment.")
self.assertEqual(record.entity_attributes["Platform_manufacture_protocol"][11], "12. For each subsequent preparation of these plates for a print run, add water to the wells instead of sodium phosphate buffer. The amount of water should be decreased by 0.25 \xb5L per print run, as this is the amount drawn up by the pin capillary during each dip.")
self.assertEqual(record.entity_attributes["Platform_organism"], "Mus musculus")
self.assertEqual(len(record.entity_attributes["Platform_contributor"]), 5)
self.assertEqual(record.entity_attributes["Platform_contributor"][0], "Jane,Doe")
self.assertEqual(record.entity_attributes["Platform_contributor"][1], "John,A,Smith")
self.assertEqual(record.entity_attributes["Platform_contributor"][2], "Hans,van Elton")
self.assertEqual(record.entity_attributes["Platform_contributor"][3], "John,Smithers Jr")
self.assertEqual(record.entity_attributes["Platform_contributor"][4], "Jie,D,Chen")
self.assertEqual(record.entity_attributes["Platform_distribution"], "non-commercial")
self.assertEqual(len(record.col_defs), 6)
self.assertEqual(record.col_defs["Gene_Desc"], "Gene description")
self.assertEqual(record.col_defs["SEQUENCE"], "Probe sequence information")
self.assertEqual(record.col_defs["Gene_Sym"], "Gene symbols")
self.assertEqual(record.col_defs["GB_ACC"], 'GenBank accession number of sequence used to design oligonucleotide probe LINK_PRE:"http://www.ncbi.nlm.nih.gov/entrez/query.fcgi?cmd=Search&db=Nucleotide&term="')
self.assertEqual(record.col_defs["SPOT_ID"], "alternative identifier")
self.assertEqual(record.col_defs["ID"], "")
self.assertEqual(len(record.table_rows), 21)
self.assertEqual(len(record.table_rows[0]), 6)
self.assertEqual(record.table_rows[0][0], "ID")
self.assertEqual(record.table_rows[0][1], "GB_ACC")
self.assertEqual(record.table_rows[0][2], "Gene_Desc")
self.assertEqual(record.table_rows[0][3], "Gene_Sym")
self.assertEqual(record.table_rows[0][4], "SPOT_ID")
self.assertEqual(record.table_rows[0][5], "SEQUENCE")
self.assertEqual(len(record.table_rows[1]), 6)
self.assertEqual(record.table_rows[1][0], "1")
self.assertEqual(record.table_rows[1][1], "U02079")
self.assertEqual(record.table_rows[1][2], "nuclear factor of activated T-cells, cytoplasmic 2")
self.assertEqual(record.table_rows[1][3], "Nfatc2")
self.assertEqual(record.table_rows[1][4], "")
self.assertEqual(record.table_rows[1][5], "ACCTGGATGACGCAGCCACTTCAGAAAGCTGGGTTGGGACAGAAAGGTATATAGAGAGAAAATTTTGGAA")
self.assertEqual(len(record.table_rows[2]), 6)
self.assertEqual(record.table_rows[2][0], "2")
self.assertEqual(record.table_rows[2][1], "NM_008154")
self.assertEqual(record.table_rows[2][2], "G-protein coupled receptor 3")
self.assertEqual(record.table_rows[2][3], "Gpr3")
self.assertEqual(record.table_rows[2][4], "")
self.assertEqual(record.table_rows[2][5], "CTGTACAATGCTCTCACTTACTACTCAGAGACAACGGTAACTCGGACTTATGTGATGCTGGCCTTGGTGT")
self.assertEqual(len(record.table_rows[3]), 6)
self.assertEqual(record.table_rows[3][0], "3")
self.assertEqual(record.table_rows[3][1], "AK015719")
self.assertEqual(record.table_rows[3][2], "tropomodulin 2")
self.assertEqual(record.table_rows[3][3], "Tmod2")
self.assertEqual(record.table_rows[3][4], "")
self.assertEqual(record.table_rows[3][5], "CACCAGGCTCAGTGCCTAGTATCGGCTTCACCTAGTGTGGTTACTCAGGGCACGCAGAGCTACAGAACAC")
self.assertEqual(len(record.table_rows[4]), 6)
self.assertEqual(record.table_rows[4][0], "4")
self.assertEqual(record.table_rows[4][1], "AK003367")
self.assertEqual(record.table_rows[4][2], "mitochondrial ribosomal protein L15")
self.assertEqual(record.table_rows[4][3], "Mrpl15")
self.assertEqual(record.table_rows[4][4], "")
self.assertEqual(record.table_rows[4][5], "CAAGAAGTCTAGAAATTCTGTGCAAGCCTATTCCATTCTTTCTGCGGGGACAACCAATTCCGAAAAGAAT")
self.assertEqual(len(record.table_rows[5]), 6)
self.assertEqual(record.table_rows[5][0], "5")
self.assertEqual(record.table_rows[5][1], "BC003333")
self.assertEqual(record.table_rows[5][2], "RIKEN cDNA 0610033I05 gene")
self.assertEqual(record.table_rows[5][3], "0610033I05Rik")
self.assertEqual(record.table_rows[5][4], "")
self.assertEqual(record.table_rows[5][5], "AGAACTGGGTGGCAGATATCCTAGAGTTTTGACCAACGTTCACAGCACACATATTGATCTTATAGGACCT")
self.assertEqual(len(record.table_rows[6]), 6)
self.assertEqual(record.table_rows[6][0], "6")
self.assertEqual(record.table_rows[6][1], "NM_008462")
self.assertEqual(record.table_rows[6][2], "killer cell lectin-like receptor, subfamily A, member 2")
self.assertEqual(record.table_rows[6][3], "Klra2")
self.assertEqual(record.table_rows[6][4], "")
self.assertEqual(record.table_rows[6][5], "TGAATTGAAGTTCCTTAAATCCCAACTTCAAAGAAACACATACTGGATTTCACTGACACATCATAAAAGC")
self.assertEqual(len(record.table_rows[7]), 6)
self.assertEqual(record.table_rows[7][0], "7")
self.assertEqual(record.table_rows[7][1], "NM_008029")
self.assertEqual(record.table_rows[7][2], "FMS-like tyrosine kinase 4")
self.assertEqual(record.table_rows[7][3], "Flt4")
self.assertEqual(record.table_rows[7][4], "")
self.assertEqual(record.table_rows[7][5], "GAGGTGCTGTGGGATGACCGCCGGGGCATGCGGGTGCCCACTCAACTGTTGCGCGATGCCCTGTACCTGC")
self.assertEqual(len(record.table_rows[8]), 6)
self.assertEqual(record.table_rows[8][0], "8")
self.assertEqual(record.table_rows[8][1], "NM_054088")
self.assertEqual(record.table_rows[8][2], "adiponutrin")
self.assertEqual(record.table_rows[8][3], "Adpn")
self.assertEqual(record.table_rows[8][4], "")
self.assertEqual(record.table_rows[8][5], "GTCTGAGTTCCATTCCAAAGACGAAGTCGTGGATGCCCTGGTGTGTTCCTGCTTCATTCCCCTCTTCTCT")
self.assertEqual(len(record.table_rows[9]), 6)
self.assertEqual(record.table_rows[9][0], "9")
self.assertEqual(record.table_rows[9][1], "NM_009750")
self.assertEqual(record.table_rows[9][2], "nerve growth factor receptor (TNFRSF16) associated protein 1")
self.assertEqual(record.table_rows[9][3], "Ngfrap1")
self.assertEqual(record.table_rows[9][4], "")
self.assertEqual(record.table_rows[9][5], "TACAGCTGAGAAATTGTCTACGCATCCTTATGGGGGAGCTGTCTAACCACCACGATCACCATGATGAATT")
self.assertEqual(len(record.table_rows[10]), 6)
self.assertEqual(record.table_rows[10][0], "10")
self.assertEqual(record.table_rows[10][1], "AB045323")
self.assertEqual(record.table_rows[10][2], "DNA segment, Chr 8, ERATO Doi 594, expressed")
self.assertEqual(record.table_rows[10][3], "D8Ertd594e")
self.assertEqual(record.table_rows[10][4], "")
self.assertEqual(record.table_rows[10][5], "GATTCAGACTCGGGAGGAGCATCCCAACCTCTCCTTGAGGATAAAGGCCTGAGCGATTGCCCTGGGGAGC")
self.assertEqual(len(record.table_rows[11]), 6)
self.assertEqual(record.table_rows[11][0], "11")
self.assertEqual(record.table_rows[11][1], "AK005789")
self.assertEqual(record.table_rows[11][2], "dynein, cytoplasmic, light chain 2B")
self.assertEqual(record.table_rows[11][3], "Dncl2b")
self.assertEqual(record.table_rows[11][4], "")
self.assertEqual(record.table_rows[11][5], "TGCAGAAGGCATTCCAATCCGAACAACCCTGGACAACTCCACAACGGTTCAGTATGCGGGTCTTCTCCAC")
self.assertEqual(len(record.table_rows[12]), 6)
self.assertEqual(record.table_rows[12][0], "12")
self.assertEqual(record.table_rows[12][1], "NM_010517")
self.assertEqual(record.table_rows[12][2], "insulin-like growth factor binding protein 4")
self.assertEqual(record.table_rows[12][3], "Igfbp4")
self.assertEqual(record.table_rows[12][4], "")
self.assertEqual(record.table_rows[12][5], "GGAGAAGCTGGCGCGCTGCCGCCCCCCCGTGGGTTGCGAGGAGTTGGTGCGGGAGCCAGGCTGCGGTTGT")
self.assertEqual(len(record.table_rows[13]), 6)
self.assertEqual(record.table_rows[13][0], "13")
self.assertEqual(record.table_rows[13][1], "AK010722")
self.assertEqual(record.table_rows[13][2], "RIKEN cDNA 2410075D05 gene")
self.assertEqual(record.table_rows[13][3], "2410075D05Rik")
self.assertEqual(record.table_rows[13][4], "")
self.assertEqual(record.table_rows[13][5], "GGAGCATCTGGAGTTCCGCTTACCGGAAATAAAGTCTTTACTATCGGTGATTGGAGGGCAGTTCACTAAC")
self.assertEqual(len(record.table_rows[14]), 6)
self.assertEqual(record.table_rows[14][0], "14")
self.assertEqual(record.table_rows[14][1], "AK003755")
self.assertEqual(record.table_rows[14][2], "DNA segment, Chr 4, ERATO Doi 421, expressed")
self.assertEqual(record.table_rows[14][3], "D4Ertd421e")
self.assertEqual(record.table_rows[14][4], "")
self.assertEqual(record.table_rows[14][5], "AGCAAAGAGATCTCCCTCAGTGTGCCCATAGGTGGCGGTGCGAGCTTGCGGTTATTGGCCAGTGACTTGC")
self.assertEqual(len(record.table_rows[15]), 6)
self.assertEqual(record.table_rows[15][0], "15")
self.assertEqual(record.table_rows[15][1], "BC003241")
self.assertEqual(record.table_rows[15][2], "cleavage stimulation factor, 3' pre-RNA, subunit 3")
self.assertEqual(record.table_rows[15][3], "Cstf3")
self.assertEqual(record.table_rows[15][4], "")
self.assertEqual(record.table_rows[15][5], "AAATTAGAAGAAAATCCATATGACCTTGATGCTTGGAGCATTCTCATTCGAGAGGCACAGAATCAACCTA")
self.assertEqual(len(record.table_rows[16]), 6)
self.assertEqual(record.table_rows[16][0], "16")
self.assertEqual(record.table_rows[16][1], "AK004937")
self.assertEqual(record.table_rows[16][2], "RIKEN cDNA 1300007O09 gene")
self.assertEqual(record.table_rows[16][3], "1300007O09Rik")
self.assertEqual(record.table_rows[16][4], "")
self.assertEqual(record.table_rows[16][5], "CAGACACAAACCCTAGGTTGTATTGTAGACCGGAGTTTAAGCAGGCACTACCTGTCTGTCTTTTCTTCAT")
self.assertEqual(len(record.table_rows[17]), 6)
self.assertEqual(record.table_rows[17][0], "17")
self.assertEqual(record.table_rows[17][1], "AK004524")
self.assertEqual(record.table_rows[17][2], "unnamed protein product; hypothetical SOCS domain")
self.assertEqual(record.table_rows[17][3], "")
self.assertEqual(record.table_rows[17][4], "")
self.assertEqual(record.table_rows[17][5], "CGGAGCCCTGCGCGCCCAGAGCCCCCTCCCACCCGCTTCCACCAAGTGCATGGAGCCAACATCCGCATGG")
self.assertEqual(len(record.table_rows[18]), 6)
self.assertEqual(record.table_rows[18][0], "18")
self.assertEqual(record.table_rows[18][1], "NM_025999")
self.assertEqual(record.table_rows[18][2], "RIKEN cDNA 2610110L04 gene")
self.assertEqual(record.table_rows[18][3], "2610110L04Rik")
self.assertEqual(record.table_rows[18][4], "")
self.assertEqual(record.table_rows[18][5], "TGCATTGATAAATGGAGTGATCGACACAGGAACTGCCCCATTTGTCGCCTACAGATGACTGGAGCAAATG")
self.assertEqual(len(record.table_rows[19]), 6)
self.assertEqual(record.table_rows[19][0], "19")
self.assertEqual(record.table_rows[19][1], "")
self.assertEqual(record.table_rows[19][2], "")
self.assertEqual(record.table_rows[19][3], "")
self.assertEqual(record.table_rows[19][4], "-- CONTROL")
self.assertEqual(record.table_rows[19][5], "")
self.assertEqual(len(record.table_rows[20]), 6)
self.assertEqual(record.table_rows[20][0], "20")
self.assertEqual(record.table_rows[20][1], "NM_023120")
self.assertEqual(record.table_rows[20][2], "guanine nucleotide binding protein (G protein), beta polypeptide 1-like")
self.assertEqual(record.table_rows[20][3], "Gnb1l")
self.assertEqual(record.table_rows[20][4], "")
self.assertEqual(record.table_rows[20][5], "ACCGCCTGGTCCCAGATTTGTCCTCCGAGGCACACAGTCGGCTGTGAACACGCTCCATTTCTGCCCACCA")
def test_GSM700(self):
path = "Geo/GSM700.txt"
with open(path) as handle:
records = Geo.parse(handle)
record = next(records)
self.assertEqual(record.entity_type, "SAMPLE")
self.assertEqual(record.entity_id, "GSM700")
self.assertEqual(len(record.entity_attributes), 20)
self.assertEqual(record.entity_attributes["Sample_submitter_institute"], "National Cancer Institute")
self.assertEqual(record.entity_attributes["Sample_submitter_department"], "Cancer Genome Anatomy Project")
self.assertEqual(record.entity_attributes["Sample_submitter_web_link"], "http://cgap.nci.nih.gov/")
self.assertEqual(len(record.entity_attributes["Sample_description"]), 14)
self.assertEqual(record.entity_attributes["Sample_description"][0], "This library represents a Cancer Genome Anatomy Project library, which was either produced through CGAP funding, or donated to CGAP.")
self.assertEqual(record.entity_attributes["Sample_description"][1], "The Cancer Genome Anatomy Project (CGAP: http://cgap.nci.nih.gov) is an interdisciplinary program established and administered by the National Cancer Institute (NCI: http://www.nci.nih.gov) to generate the information and technological tools needed to decipher the molecular anatomy of the cancer cell.")
self.assertEqual(record.entity_attributes["Sample_description"][2], "Cell line grown under 1.5% oxygen conditions for 24 hours prior to harvesting in zinc option media with 10% RBS and harvested at passage 102. Library constructed in the laboratory of G. Riggins, M.D., Ph.D. (Duke University).")
self.assertEqual(record.entity_attributes["Sample_description"][3], "Organ: brain")
self.assertEqual(record.entity_attributes["Sample_description"][4], "Tissue_type: glioblastoma multiforme")
self.assertEqual(record.entity_attributes["Sample_description"][5], "Cell_line: H247")
self.assertEqual(record.entity_attributes["Sample_description"][6], "Lab host: DH10B")
self.assertEqual(record.entity_attributes["Sample_description"][7], "Vector: pZErO-1")
self.assertEqual(record.entity_attributes["Sample_description"][8], "Vector type: plasmid")
self.assertEqual(record.entity_attributes["Sample_description"][9], "R. Site 1: Sph1")
self.assertEqual(record.entity_attributes["Sample_description"][10], "R. Site 2: Sph1")
self.assertEqual(record.entity_attributes["Sample_description"][11], "Library treatment: non-normalized")
self.assertEqual(record.entity_attributes["Sample_description"][12], "Tissue description: Brain, Duke glioblastoma multiforme cell line, H247, grown under 1.5% oxygen conditions for 24 hours prior to harvesting.")
self.assertEqual(record.entity_attributes["Sample_description"][13], "Tissue")
self.assertEqual(len(record.entity_attributes["Sample_author"]), 2)
self.assertEqual(record.entity_attributes["Sample_author"][0], "Gregory,J,Riggins")
self.assertEqual(record.entity_attributes["Sample_author"][1], "Robert,L,Strausberg")
self.assertEqual(record.entity_attributes["Sample_web_link"], "http://cgap.nci.nih.gov")
self.assertEqual(record.entity_attributes["Sample_submitter_phone"], "301-496-1550")
self.assertEqual(record.entity_attributes["Sample_series_id"], "GSE14")
self.assertEqual(record.entity_attributes["Sample_tag_count"], "72031")
self.assertEqual(record.entity_attributes["Sample_type"], "sage")
self.assertEqual(record.entity_attributes["Sample_submitter_name"], "Robert,L,Strausberg")
self.assertEqual(record.entity_attributes["Sample_platform_id"], "GPL4")
self.assertEqual(record.entity_attributes["Sample_submitter_city"], "Bethesda,MD,20892,USA")
self.assertEqual(record.entity_attributes["Sample_status"], "Public on Nov 28 2001")
self.assertEqual(record.entity_attributes["Sample_anchor"], "NlaIII")
self.assertEqual(record.entity_attributes["Sample_title"], "SAGE_Duke_H247_Hypoxia")
self.assertEqual(record.entity_attributes["Sample_organism"], "Homo sapiens")
self.assertEqual(record.entity_attributes["Sample_target_source"], "Brain, glioblastoma multiforme, cell-line H247")
self.assertEqual(record.entity_attributes["Sample_submission_date"], "Nov 28 2001")
self.assertEqual(record.entity_attributes["Sample_submitter_email"], "[email protected]")
self.assertEqual(len(record.col_defs), 3)
self.assertEqual(record.col_defs["COUNT"], "Absolute tag count")
self.assertEqual(record.col_defs["TPM"], "Tags per million, or (1000000*COUNT)/(Total tags)")
self.assertEqual(record.col_defs["TAG"], 'Ten base SAGE tag, LINK_PRE:"http://www.ncbi.nlm.nih.gov/SAGE/SAGEtag.cgi?tag="')
self.assertEqual(len(record.table_rows), 21)
self.assertEqual(len(record.table_rows[0]), 3)
self.assertEqual(record.table_rows[0][0], "TAG")
self.assertEqual(record.table_rows[0][1], "COUNT")
self.assertEqual(record.table_rows[0][2], "TPM")
self.assertEqual(len(record.table_rows[1]), 3)
self.assertEqual(record.table_rows[1][0], "TCCAAATCGA")
self.assertEqual(record.table_rows[1][1], "520")
self.assertEqual(record.table_rows[1][2], "7219.11")
self.assertEqual(len(record.table_rows[2]), 3)
self.assertEqual(record.table_rows[2][0], "TACCATCAAT")
self.assertEqual(record.table_rows[2][1], "434")
self.assertEqual(record.table_rows[2][2], "6025.18")
self.assertEqual(len(record.table_rows[3]), 3)
self.assertEqual(record.table_rows[3][0], "TTGGGGTTTC")
self.assertEqual(record.table_rows[3][1], "389")
self.assertEqual(record.table_rows[3][2], "5400.45")
self.assertEqual(len(record.table_rows[4]), 3)
self.assertEqual(record.table_rows[4][0], "CCCATCGTCC")
self.assertEqual(record.table_rows[4][1], "367")
self.assertEqual(record.table_rows[4][2], "5095.03")
self.assertEqual(len(record.table_rows[5]), 3)
self.assertEqual(record.table_rows[5][0], "GTGAAACCCC")
self.assertEqual(record.table_rows[5][1], "365")
self.assertEqual(record.table_rows[5][2], "5067.26")
self.assertEqual(len(record.table_rows[6]), 3)
self.assertEqual(record.table_rows[6][0], "GGGGAAATCG")
self.assertEqual(record.table_rows[6][1], "357")
self.assertEqual(record.table_rows[6][2], "4956.2")
self.assertEqual(len(record.table_rows[7]), 3)
self.assertEqual(record.table_rows[7][0], "CCTGTAATCC")
self.assertEqual(record.table_rows[7][1], "346")
self.assertEqual(record.table_rows[7][2], "4803.49")
self.assertEqual(len(record.table_rows[8]), 3)
self.assertEqual(record.table_rows[8][0], "TGATTTCACT")
self.assertEqual(record.table_rows[8][1], "334")
self.assertEqual(record.table_rows[8][2], "4636.89")
self.assertEqual(len(record.table_rows[9]), 3)
self.assertEqual(record.table_rows[9][0], "TGTGTTGAGA")
self.assertEqual(record.table_rows[9][1], "315")
self.assertEqual(record.table_rows[9][2], "4373.12")
self.assertEqual(len(record.table_rows[10]), 3)
self.assertEqual(record.table_rows[10][0], "GCCCCCAATA")
self.assertEqual(record.table_rows[10][1], "303")
self.assertEqual(record.table_rows[10][2], "4206.52")
self.assertEqual(len(record.table_rows[11]), 3)
self.assertEqual(record.table_rows[11][0], "CTAAGACTTC")
self.assertEqual(record.table_rows[11][1], "279")
self.assertEqual(record.table_rows[11][2], "3873.33")
self.assertEqual(len(record.table_rows[12]), 3)
self.assertEqual(record.table_rows[12][0], "GCGACCGTCA")
self.assertEqual(record.table_rows[12][1], "276")
self.assertEqual(record.table_rows[12][2], "3831.68")
self.assertEqual(len(record.table_rows[13]), 3)
self.assertEqual(record.table_rows[13][0], "TTGGTCCTCT")
self.assertEqual(record.table_rows[13][1], "276")
self.assertEqual(record.table_rows[13][2], "3831.68")
self.assertEqual(len(record.table_rows[14]), 3)
self.assertEqual(record.table_rows[14][0], "CCTAGCTGGA")
self.assertEqual(record.table_rows[14][1], "268")
self.assertEqual(record.table_rows[14][2], "3720.62")
self.assertEqual(len(record.table_rows[15]), 3)
self.assertEqual(record.table_rows[15][0], "GATGAGGAGA")
self.assertEqual(record.table_rows[15][1], "251")
self.assertEqual(record.table_rows[15][2], "3484.61")
self.assertEqual(len(record.table_rows[16]), 3)
self.assertEqual(record.table_rows[16][0], "ACTTTTTCAA")
self.assertEqual(record.table_rows[16][1], "244")
self.assertEqual(record.table_rows[16][2], "3387.43")
self.assertEqual(len(record.table_rows[17]), 3)
self.assertEqual(record.table_rows[17][0], "CCACTGCACT")
self.assertEqual(record.table_rows[17][1], "223")
self.assertEqual(record.table_rows[17][2], "3095.89")
self.assertEqual(len(record.table_rows[18]), 3)
self.assertEqual(record.table_rows[18][0], "GTGTGTTTGT")
self.assertEqual(record.table_rows[18][1], "223")
self.assertEqual(record.table_rows[18][2], "3095.89")
self.assertEqual(len(record.table_rows[19]), 3)
self.assertEqual(record.table_rows[19][0], "GAAATACAGT")
self.assertEqual(record.table_rows[19][1], "218")
self.assertEqual(record.table_rows[19][2], "3026.47")
self.assertEqual(len(record.table_rows[20]), 3)
self.assertEqual(record.table_rows[20][0], "GCTTTATTTG")
self.assertEqual(record.table_rows[20][1], "218")
self.assertEqual(record.table_rows[20][2], "3026.47")
def test_GSM645(self):
path = "Geo/GSM645.txt"
with open(path) as handle:
records = Geo.parse(handle)
record = next(records)
self.assertEqual(record.entity_type, "SAMPLE")
self.assertEqual(record.entity_id, "GSM645")
self.assertEqual(len(record.entity_attributes), 17)
self.assertEqual(record.entity_attributes["Sample_submitter_institute"], "Max von Pettenkofer Institut")
self.assertEqual(record.entity_attributes["Sample_submitter_department"], "Bacteriology")
self.assertEqual(len(record.entity_attributes["Sample_author"]), 4)
self.assertEqual(record.entity_attributes["Sample_author"][0], "Reinhard,,Hoffmann")
self.assertEqual(record.entity_attributes["Sample_author"][1], "Thomas,,Seidl")
self.assertEqual(record.entity_attributes["Sample_author"][2], "Ton,,Rolink")
self.assertEqual(record.entity_attributes["Sample_author"][3], "Fritz,,Melchers")
self.assertEqual(record.entity_attributes["Sample_submitter_phone"], "+49-89-5160-5424")
self.assertEqual(record.entity_attributes["Sample_series_id"], "GSE13")
self.assertEqual(record.entity_attributes["Sample_description"], "B220+CD25+sIg- Large Pre BII cells sorted out of mouse bone marrow, sort no. 8")
self.assertEqual(record.entity_attributes["Sample_type"], "single channel")
self.assertEqual(record.entity_attributes["Sample_submitter_name"], "Reinhard,,Hoffmann")
self.assertEqual(record.entity_attributes["Sample_platform_id"], "GPL22")
self.assertEqual(record.entity_attributes["Sample_submitter_city"], "Munich,80336,Germany")
self.assertEqual(record.entity_attributes["Sample_status"], "Public on Dec 17 2001")
self.assertEqual(record.entity_attributes["Sample_submitter_email"], "[email protected]")
self.assertEqual(record.entity_attributes["Sample_title"], "Large Pre-BII cells 8b")
self.assertEqual(record.entity_attributes["Sample_organism"], "Mus musculus")
self.assertEqual(record.entity_attributes["Sample_target_source"], "Large Pre-BII cells")
self.assertEqual(record.entity_attributes["Sample_submission_date"], "Nov 27 2001")
self.assertEqual(record.entity_attributes["Sample_submitter_address"], "Pettenkoferstr. 9a")
self.assertEqual(len(record.col_defs), 14)
self.assertEqual(record.col_defs["PAIRS"], "number of probe set specific probe pairs on the array")
self.assertEqual(record.col_defs["ABS_CALL"], "Whether a probe set is present, marginal, or absent; see Affymetrix Literature")
self.assertEqual(record.col_defs["PM Excess"], "number of probe pairs where PM/MM exceeds the ratio limit (10 by default)")
self.assertEqual(record.col_defs["POSITIVE"], "number of poisitive probe pairs")
self.assertEqual(record.col_defs["MM Excess"], "Number of probe peirs where MM/PM exceeds 1/ratio limit (10 by default)")
self.assertEqual(record.col_defs["ID_REF"], "Affymetrix Probe Set Identifier")
self.assertEqual(record.col_defs["NEGATIVE"], "number of negative probe pairs")
self.assertEqual(record.col_defs["VALUE"], "Average Difference Intensity")
self.assertEqual(record.col_defs["POS_FRACTION"], "Positive/Pairs Used")
self.assertEqual(record.col_defs["Experiment Name"], "Experiment Name")
self.assertEqual(record.col_defs["POS/NEG"], "Positive/Negative")
self.assertEqual(record.col_defs["PAIRS_USED"], "")
self.assertEqual(record.col_defs["Log Avg"], "")
self.assertEqual(record.col_defs["PAIRS_IN_AVG"], "Trimmed probe pair set")
self.assertEqual(len(record.table_rows), 21)
self.assertEqual(len(record.table_rows[0]), 14)
self.assertEqual(record.table_rows[0][0], "ID_REF")
self.assertEqual(record.table_rows[0][1], "Experiment Name")
self.assertEqual(record.table_rows[0][2], "POSITIVE")
self.assertEqual(record.table_rows[0][3], "NEGATIVE")
self.assertEqual(record.table_rows[0][4], "PAIRS")
self.assertEqual(record.table_rows[0][5], "PAIRS_USED")
self.assertEqual(record.table_rows[0][6], "PAIRS_IN_AVG")
self.assertEqual(record.table_rows[0][7], "POS_FRACTION")
self.assertEqual(record.table_rows[0][8], "Log Avg")
self.assertEqual(record.table_rows[0][9], "PM Excess")
self.assertEqual(record.table_rows[0][10], "MM Excess")
self.assertEqual(record.table_rows[0][11], "POS/NEG")
self.assertEqual(record.table_rows[0][12], "VALUE")
self.assertEqual(record.table_rows[0][13], "ABS_CALL")
self.assertEqual(len(record.table_rows[1]), 14)
self.assertEqual(record.table_rows[1][0], "IL2_at")
self.assertEqual(record.table_rows[1][1], "RHMu8LarB")
self.assertEqual(record.table_rows[1][2], "4")
self.assertEqual(record.table_rows[1][3], "4")
self.assertEqual(record.table_rows[1][4], "19")
self.assertEqual(record.table_rows[1][5], "19")
self.assertEqual(record.table_rows[1][6], "19")
self.assertEqual(record.table_rows[1][7], "0.21")
self.assertEqual(record.table_rows[1][8], "-0.58")
self.assertEqual(record.table_rows[1][9], "0")
self.assertEqual(record.table_rows[1][10], "0")
self.assertEqual(record.table_rows[1][11], "1.0")
self.assertEqual(record.table_rows[1][12], "-78")
self.assertEqual(record.table_rows[1][13], "A")
self.assertEqual(len(record.table_rows[2]), 14)
self.assertEqual(record.table_rows[2][0], "IL10_at")
self.assertEqual(record.table_rows[2][1], "RHMu8LarB")
self.assertEqual(record.table_rows[2][2], "7")
self.assertEqual(record.table_rows[2][3], "4")
self.assertEqual(record.table_rows[2][4], "20")
self.assertEqual(record.table_rows[2][5], "20")
self.assertEqual(record.table_rows[2][6], "18")
self.assertEqual(record.table_rows[2][7], "0.35")
self.assertEqual(record.table_rows[2][8], "1.87")
self.assertEqual(record.table_rows[2][9], "1")
self.assertEqual(record.table_rows[2][10], "0")
self.assertEqual(record.table_rows[2][11], "1.8")
self.assertEqual(record.table_rows[2][12], "161")
self.assertEqual(record.table_rows[2][13], "A")
self.assertEqual(len(record.table_rows[3]), 14)
self.assertEqual(record.table_rows[3][0], "GMCSF_at")
self.assertEqual(record.table_rows[3][1], "RHMu8LarB")
self.assertEqual(record.table_rows[3][2], "4")
self.assertEqual(record.table_rows[3][3], "4")
self.assertEqual(record.table_rows[3][4], "20")
self.assertEqual(record.table_rows[3][5], "20")
self.assertEqual(record.table_rows[3][6], "19")
self.assertEqual(record.table_rows[3][7], "0.20")
self.assertEqual(record.table_rows[3][8], "0.39")
self.assertEqual(record.table_rows[3][9], "0")
self.assertEqual(record.table_rows[3][10], "0")
self.assertEqual(record.table_rows[3][11], "1.0")
self.assertEqual(record.table_rows[3][12], "-11")
self.assertEqual(record.table_rows[3][13], "A")
self.assertEqual(len(record.table_rows[4]), 14)
self.assertEqual(record.table_rows[4][0], "TNFRII_at")
self.assertEqual(record.table_rows[4][1], "RHMu8LarB")
self.assertEqual(record.table_rows[4][2], "2")
self.assertEqual(record.table_rows[4][3], "2")
self.assertEqual(record.table_rows[4][4], "20")
self.assertEqual(record.table_rows[4][5], "20")
self.assertEqual(record.table_rows[4][6], "18")
self.assertEqual(record.table_rows[4][7], "0.10")
self.assertEqual(record.table_rows[4][8], "0.48")
self.assertEqual(record.table_rows[4][9], "0")
self.assertEqual(record.table_rows[4][10], "0")
self.assertEqual(record.table_rows[4][11], "1.0")
self.assertEqual(record.table_rows[4][12], "52")
self.assertEqual(record.table_rows[4][13], "A")
self.assertEqual(len(record.table_rows[5]), 14)
self.assertEqual(record.table_rows[5][0], "MIP1-B_at")
self.assertEqual(record.table_rows[5][1], "RHMu8LarB")
self.assertEqual(record.table_rows[5][2], "6")
self.assertEqual(record.table_rows[5][3], "4")
self.assertEqual(record.table_rows[5][4], "20")
self.assertEqual(record.table_rows[5][5], "20")
self.assertEqual(record.table_rows[5][6], "19")
self.assertEqual(record.table_rows[5][7], "0.30")
self.assertEqual(record.table_rows[5][8], "0.43")
self.assertEqual(record.table_rows[5][9], "0")
self.assertEqual(record.table_rows[5][10], "0")
self.assertEqual(record.table_rows[5][11], "1.5")
self.assertEqual(record.table_rows[5][12], "373")
self.assertEqual(record.table_rows[5][13], "A")
self.assertEqual(len(record.table_rows[6]), 14)
self.assertEqual(record.table_rows[6][0], "IL4_at")
self.assertEqual(record.table_rows[6][1], "RHMu8LarB")
self.assertEqual(record.table_rows[6][2], "3")
self.assertEqual(record.table_rows[6][3], "3")
self.assertEqual(record.table_rows[6][4], "20")
self.assertEqual(record.table_rows[6][5], "20")
self.assertEqual(record.table_rows[6][6], "19")
self.assertEqual(record.table_rows[6][7], "0.15")
self.assertEqual(record.table_rows[6][8], "0.29")
self.assertEqual(record.table_rows[6][9], "0")
self.assertEqual(record.table_rows[6][10], "0")
self.assertEqual(record.table_rows[6][11], "1.0")
self.assertEqual(record.table_rows[6][12], "27")
self.assertEqual(record.table_rows[6][13], "A")
self.assertEqual(len(record.table_rows[7]), 14)
self.assertEqual(record.table_rows[7][0], "IL12_P40_at")
self.assertEqual(record.table_rows[7][1], "RHMu8LarB")
self.assertEqual(record.table_rows[7][2], "3")
self.assertEqual(record.table_rows[7][3], "5")
self.assertEqual(record.table_rows[7][4], "20")
self.assertEqual(record.table_rows[7][5], "20")
self.assertEqual(record.table_rows[7][6], "19")
self.assertEqual(record.table_rows[7][7], "0.15")
self.assertEqual(record.table_rows[7][8], "-0.22")
self.assertEqual(record.table_rows[7][9], "0")
self.assertEqual(record.table_rows[7][10], "0")
self.assertEqual(record.table_rows[7][11], "0.6")
self.assertEqual(record.table_rows[7][12], "-163")
self.assertEqual(record.table_rows[7][13], "A")
self.assertEqual(len(record.table_rows[8]), 14)
self.assertEqual(record.table_rows[8][0], "TNFa_at")
self.assertEqual(record.table_rows[8][1], "RHMu8LarB")
self.assertEqual(record.table_rows[8][2], "3")
self.assertEqual(record.table_rows[8][3], "4")
self.assertEqual(record.table_rows[8][4], "20")
self.assertEqual(record.table_rows[8][5], "20")
self.assertEqual(record.table_rows[8][6], "20")
self.assertEqual(record.table_rows[8][7], "0.15")
self.assertEqual(record.table_rows[8][8], "-0.57")
self.assertEqual(record.table_rows[8][9], "1")
self.assertEqual(record.table_rows[8][10], "0")
self.assertEqual(record.table_rows[8][11], "0.8")
self.assertEqual(record.table_rows[8][12], "-95")
self.assertEqual(record.table_rows[8][13], "A")
self.assertEqual(len(record.table_rows[9]), 14)
self.assertEqual(record.table_rows[9][0], "TCRa_at")
self.assertEqual(record.table_rows[9][1], "RHMu8LarB")
self.assertEqual(record.table_rows[9][2], "1")
self.assertEqual(record.table_rows[9][3], "4")
self.assertEqual(record.table_rows[9][4], "20")
self.assertEqual(record.table_rows[9][5], "20")
self.assertEqual(record.table_rows[9][6], "19")
self.assertEqual(record.table_rows[9][7], "0.05")
self.assertEqual(record.table_rows[9][8], "-0.50")
self.assertEqual(record.table_rows[9][9], "0")
self.assertEqual(record.table_rows[9][10], "0")
self.assertEqual(record.table_rows[9][11], "0.3")
self.assertEqual(record.table_rows[9][12], "-186")
self.assertEqual(record.table_rows[9][13], "A")
self.assertEqual(len(record.table_rows[10]), 14)
self.assertEqual(record.table_rows[10][0], "AFFX-BioB-5_at")
self.assertEqual(record.table_rows[10][1], "RHMu8LarB")
self.assertEqual(record.table_rows[10][2], "0")
self.assertEqual(record.table_rows[10][3], "1")
self.assertEqual(record.table_rows[10][4], "20")
self.assertEqual(record.table_rows[10][5], "20")
self.assertEqual(record.table_rows[10][6], "19")
self.assertEqual(record.table_rows[10][7], "0.00")
self.assertEqual(record.table_rows[10][8], "0.35")
self.assertEqual(record.table_rows[10][9], "0")
self.assertEqual(record.table_rows[10][10], "0")
self.assertEqual(record.table_rows[10][11], "0.0")
self.assertEqual(record.table_rows[10][12], "120")
self.assertEqual(record.table_rows[10][13], "A")
self.assertEqual(len(record.table_rows[11]), 14)
self.assertEqual(record.table_rows[11][0], "AFFX-BioB-M_at")
self.assertEqual(record.table_rows[11][1], "RHMu8LarB")
self.assertEqual(record.table_rows[11][2], "0")
self.assertEqual(record.table_rows[11][3], "1")
self.assertEqual(record.table_rows[11][4], "20")
self.assertEqual(record.table_rows[11][5], "20")
self.assertEqual(record.table_rows[11][6], "19")
self.assertEqual(record.table_rows[11][7], "0.00")
self.assertEqual(record.table_rows[11][8], "0.02")
self.assertEqual(record.table_rows[11][9], "0")
self.assertEqual(record.table_rows[11][10], "0")
self.assertEqual(record.table_rows[11][11], "0.0")
self.assertEqual(record.table_rows[11][12], "-13")
self.assertEqual(record.table_rows[11][13], "A")
self.assertEqual(len(record.table_rows[12]), 14)
self.assertEqual(record.table_rows[12][0], "AFFX-BioB-3_at")
self.assertEqual(record.table_rows[12][1], "RHMu8LarB")
self.assertEqual(record.table_rows[12][2], "2")
self.assertEqual(record.table_rows[12][3], "0")
self.assertEqual(record.table_rows[12][4], "20")
self.assertEqual(record.table_rows[12][5], "20")
self.assertEqual(record.table_rows[12][6], "19")
self.assertEqual(record.table_rows[12][7], "0.10")
self.assertEqual(record.table_rows[12][8], "0.38")
self.assertEqual(record.table_rows[12][9], "0")
self.assertEqual(record.table_rows[12][10], "0")
self.assertEqual(record.table_rows[12][11], "Undef")
self.assertEqual(record.table_rows[12][12], "136")
self.assertEqual(record.table_rows[12][13], "A")
self.assertEqual(len(record.table_rows[13]), 14)
self.assertEqual(record.table_rows[13][0], "AFFX-BioC-5_at")
self.assertEqual(record.table_rows[13][1], "RHMu8LarB")
self.assertEqual(record.table_rows[13][2], "9")
self.assertEqual(record.table_rows[13][3], "0")
self.assertEqual(record.table_rows[13][4], "20")
self.assertEqual(record.table_rows[13][5], "20")
self.assertEqual(record.table_rows[13][6], "20")
self.assertEqual(record.table_rows[13][7], "0.45")
self.assertEqual(record.table_rows[13][8], "1.33")
self.assertEqual(record.table_rows[13][9], "0")
self.assertEqual(record.table_rows[13][10], "0")
self.assertEqual(record.table_rows[13][11], "Undef")
self.assertEqual(record.table_rows[13][12], "606")
self.assertEqual(record.table_rows[13][13], "P")
self.assertEqual(len(record.table_rows[14]), 14)
self.assertEqual(record.table_rows[14][0], "AFFX-BioC-3_at")
self.assertEqual(record.table_rows[14][1], "RHMu8LarB")
self.assertEqual(record.table_rows[14][2], "2")
self.assertEqual(record.table_rows[14][3], "0")
self.assertEqual(record.table_rows[14][4], "20")
self.assertEqual(record.table_rows[14][5], "20")
self.assertEqual(record.table_rows[14][6], "19")
self.assertEqual(record.table_rows[14][7], "0.10")
self.assertEqual(record.table_rows[14][8], "0.64")
self.assertEqual(record.table_rows[14][9], "0")
self.assertEqual(record.table_rows[14][10], "0")
self.assertEqual(record.table_rows[14][11], "Undef")
self.assertEqual(record.table_rows[14][12], "257")
self.assertEqual(record.table_rows[14][13], "A")
self.assertEqual(len(record.table_rows[15]), 14)
self.assertEqual(record.table_rows[15][0], "AFFX-BioDn-5_at")
self.assertEqual(record.table_rows[15][1], "RHMu8LarB")
self.assertEqual(record.table_rows[15][2], "8")
self.assertEqual(record.table_rows[15][3], "0")
self.assertEqual(record.table_rows[15][4], "20")
self.assertEqual(record.table_rows[15][5], "20")
self.assertEqual(record.table_rows[15][6], "20")
self.assertEqual(record.table_rows[15][7], "0.40")
self.assertEqual(record.table_rows[15][8], "1.23")
self.assertEqual(record.table_rows[15][9], "0")
self.assertEqual(record.table_rows[15][10], "0")
self.assertEqual(record.table_rows[15][11], "Undef")
self.assertEqual(record.table_rows[15][12], "380")
self.assertEqual(record.table_rows[15][13], "P")
self.assertEqual(len(record.table_rows[16]), 14)
self.assertEqual(record.table_rows[16][0], "AFFX-BioDn-3_at")
self.assertEqual(record.table_rows[16][1], "RHMu8LarB")
self.assertEqual(record.table_rows[16][2], "16")
self.assertEqual(record.table_rows[16][3], "0")
self.assertEqual(record.table_rows[16][4], "20")
self.assertEqual(record.table_rows[16][5], "20")
self.assertEqual(record.table_rows[16][6], "19")
self.assertEqual(record.table_rows[16][7], "0.80")
self.assertEqual(record.table_rows[16][8], "2.79")
self.assertEqual(record.table_rows[16][9], "0")
self.assertEqual(record.table_rows[16][10], "0")
self.assertEqual(record.table_rows[16][11], "Undef")
self.assertEqual(record.table_rows[16][12], "2764")
self.assertEqual(record.table_rows[16][13], "P")
self.assertEqual(len(record.table_rows[17]), 14)
self.assertEqual(record.table_rows[17][0], "AFFX-CreX-5_at")
self.assertEqual(record.table_rows[17][1], "RHMu8LarB")
self.assertEqual(record.table_rows[17][2], "19")
self.assertEqual(record.table_rows[17][3], "0")
self.assertEqual(record.table_rows[17][4], "20")
self.assertEqual(record.table_rows[17][5], "20")
self.assertEqual(record.table_rows[17][6], "19")
self.assertEqual(record.table_rows[17][7], "0.95")
self.assertEqual(record.table_rows[17][8], "5.65")
self.assertEqual(record.table_rows[17][9], "0")
self.assertEqual(record.table_rows[17][10], "0")
self.assertEqual(record.table_rows[17][11], "Undef")
self.assertEqual(record.table_rows[17][12], "4391")
self.assertEqual(record.table_rows[17][13], "P")
self.assertEqual(len(record.table_rows[18]), 14)
self.assertEqual(record.table_rows[18][0], "AFFX-CreX-3_at")
self.assertEqual(record.table_rows[18][1], "RHMu8LarB")
self.assertEqual(record.table_rows[18][2], "19")
self.assertEqual(record.table_rows[18][3], "0")
self.assertEqual(record.table_rows[18][4], "20")
self.assertEqual(record.table_rows[18][5], "20")
self.assertEqual(record.table_rows[18][6], "20")
self.assertEqual(record.table_rows[18][7], "0.95")
self.assertEqual(record.table_rows[18][8], "6.42")
self.assertEqual(record.table_rows[18][9], "2")
self.assertEqual(record.table_rows[18][10], "0")
self.assertEqual(record.table_rows[18][11], "Undef")
self.assertEqual(record.table_rows[18][12], "10787")
self.assertEqual(record.table_rows[18][13], "P")
self.assertEqual(len(record.table_rows[19]), 14)
self.assertEqual(record.table_rows[19][0], "AFFX-BioB-5_st")
self.assertEqual(record.table_rows[19][1], "RHMu8LarB")
self.assertEqual(record.table_rows[19][2], "5")
self.assertEqual(record.table_rows[19][3], "3")
self.assertEqual(record.table_rows[19][4], "20")
self.assertEqual(record.table_rows[19][5], "20")
self.assertEqual(record.table_rows[19][6], "19")
self.assertEqual(record.table_rows[19][7], "0.25")
self.assertEqual(record.table_rows[19][8], "0.48")
self.assertEqual(record.table_rows[19][9], "0")
self.assertEqual(record.table_rows[19][10], "0")
self.assertEqual(record.table_rows[19][11], "1.7")
self.assertEqual(record.table_rows[19][12], "80")
self.assertEqual(record.table_rows[19][13], "A")
self.assertEqual(len(record.table_rows[20]), 14)
self.assertEqual(record.table_rows[20][0], "AFFX-BioB-M_st")
self.assertEqual(record.table_rows[20][1], "RHMu8LarB")
self.assertEqual(record.table_rows[20][2], "2")
self.assertEqual(record.table_rows[20][3], "3")
self.assertEqual(record.table_rows[20][4], "20")
self.assertEqual(record.table_rows[20][5], "20")
self.assertEqual(record.table_rows[20][6], "17")
self.assertEqual(record.table_rows[20][7], "0.10")
self.assertEqual(record.table_rows[20][8], "0.16")
self.assertEqual(record.table_rows[20][9], "0")
self.assertEqual(record.table_rows[20][10], "0")
self.assertEqual(record.table_rows[20][11], "0.7")
self.assertEqual(record.table_rows[20][12], "24")
self.assertEqual(record.table_rows[20][13], "A")
def test_soft_ex_series(self):
path = "Geo/soft_ex_series.txt"
with open(path) as handle:
records = Geo.parse(handle)
record = next(records)
self.assertEqual(record.entity_type, "SERIES")
self.assertEqual(record.entity_id, "Bone_marrow_stromal_cells")
self.assertEqual(len(record.entity_attributes), 13)
self.assertEqual(record.entity_attributes["Series_variable_description_1"], "HS-5")
self.assertEqual(record.entity_attributes["Series_variable_sample_list_1"], "GSM10001, GSM10002")
self.assertEqual(len(record.entity_attributes["Series_sample_id"]), 4)
self.assertEqual(record.entity_attributes["Series_sample_id"][0], "GSM10001")
self.assertEqual(record.entity_attributes["Series_sample_id"][1], "GSM10002")
self.assertEqual(record.entity_attributes["Series_sample_id"][2], "GSM10003")
self.assertEqual(record.entity_attributes["Series_sample_id"][3], "GSM10004")
self.assertEqual(record.entity_attributes["Series_variable_2"], "cell line")
self.assertEqual(record.entity_attributes["Series_pubmed_id"], "123456789")
self.assertEqual(len(record.entity_attributes["Series_contributor"]), 5)
self.assertEqual(record.entity_attributes["Series_contributor"][0], "Jane,Doe")
self.assertEqual(record.entity_attributes["Series_contributor"][1], "John,A,Smith")
self.assertEqual(record.entity_attributes["Series_contributor"][2], "Hans,van Elton")
self.assertEqual(record.entity_attributes["Series_contributor"][3], "John,Smithers Jr")
self.assertEqual(record.entity_attributes["Series_contributor"][4], "Jie,D,Chen")
self.assertEqual(record.entity_attributes["Series_summary"], "Two human stromal cell lines, HS-5 and HS-27a, represent functionally distinct components of the bone marrow microenvironment.1,2 HS-27a supports cobblestone area formation by early hematopoietic progenitors, whereas HS-5 secretes multiple cytokines that support the proliferation of committed progenitors. These cell lines have been distributed to research groups worldwide for use as a tool to understand interactions between hematopoietic cells and their microenvironment. We have used DNA microarray technology to characterize and compare the expression of over 17 000 genes in these cell lines. Gene expression differences in cytokines/chemokines, G-protein signaling molecules, and multiple extracellular matrix proteins add to the known protein and functional characterization of the lines, leading to new insight into the differences in their support function for hematopoietic progenitors.")
self.assertEqual(record.entity_attributes["Series_type"], "Cell Line Comparison")
self.assertEqual(record.entity_attributes["Series_variable_1"], "cell line")
self.assertEqual(record.entity_attributes["Series_variable_description_2"], "HS-27a")
self.assertEqual(record.entity_attributes["Series_title"], "Profiling of the functionally distinct human bone marrow stromal cell lines HS-5 and HS-27a.")
self.assertEqual(record.entity_attributes["Series_variable_sample_list_2"], "GSM10003, GSM10004")
self.assertEqual(record.entity_attributes["Series_overall_design"], "We analyzed 2 arrays for HS-5 cell line and 2 arrays for HS-27a cell line")
self.assertEqual(len(record.col_defs), 0)
self.assertEqual(len(record.table_rows), 0)
def test_GSM691(self):
path = "Geo/GSM691.txt"
with open(path) as handle:
records = Geo.parse(handle)
record = next(records)
self.assertEqual(record.entity_type, "SAMPLE")
self.assertEqual(record.entity_id, "GSM691")
self.assertEqual(len(record.entity_attributes), 20)
self.assertEqual(record.entity_attributes["Sample_submitter_institute"], "National Cancer Institute")
self.assertEqual(record.entity_attributes["Sample_submitter_department"], "Cancer Genome Anatomy Project")
self.assertEqual(record.entity_attributes["Sample_submitter_web_link"], "http://cgap.nci.nih.gov/")
self.assertEqual(len(record.entity_attributes["Sample_description"]), 12)
self.assertEqual(record.entity_attributes["Sample_description"][0], "This library represents a Cancer Genome Anatomy Project library, which was either produced through CGAP funding, or donated to CGAP.")
self.assertEqual(record.entity_attributes["Sample_description"][1], "The Cancer Genome Anatomy Project (CGAP: http://cgap.nci.nih.gov) is an interdisciplinary program established and administered by the National Cancer Institute (NCI: http://www.nci.nih.gov) to generate the information and technological tools needed to decipher the molecular anatomy of the cancer cell.")
self.assertEqual(record.entity_attributes["Sample_description"][2], "Library constructed by Riggins laboratory Tissue supplied by Jeffrey Marks, Ph.D.")
self.assertEqual(record.entity_attributes["Sample_description"][3], "Organ: Breast")
self.assertEqual(record.entity_attributes["Sample_description"][4], "Tissue_type: normal epithelial organoids")
self.assertEqual(record.entity_attributes["Sample_description"][5], "Library treatment: non-normalized")
self.assertEqual(record.entity_attributes["Sample_description"][6], "Tissue description: Breast, Isolated normal epithelial organoids. Derived from a reduction mammoplasty.")
self.assertEqual(record.entity_attributes["Sample_description"][7], "Tissue supplier: Jeffrey Marks, Ph.D.")
self.assertEqual(record.entity_attributes["Sample_description"][8], "Sample type: Bulk")
self.assertEqual(record.entity_attributes["Sample_description"][9], "Producer: Riggins Laboratory")
self.assertEqual(record.entity_attributes["Sample_description"][10], "Clones generated to date: 768")
self.assertEqual(record.entity_attributes["Sample_description"][11], "Sequences generated to date: 572")
self.assertEqual(len(record.entity_attributes["Sample_author"]), 3)
self.assertEqual(record.entity_attributes["Sample_author"][0], "Jeffrey,,Marks")
self.assertEqual(record.entity_attributes["Sample_author"][1], "Gregory,J,Riggins")
self.assertEqual(record.entity_attributes["Sample_author"][2], "Robert,L,Strausberg")
self.assertEqual(record.entity_attributes["Sample_web_link"], "http://cgap.nci.nih.gov")
self.assertEqual(record.entity_attributes["Sample_submitter_phone"], "301-496-1550")
self.assertEqual(record.entity_attributes["Sample_series_id"], "GSE14")
self.assertEqual(record.entity_attributes["Sample_tag_count"], "7165")
self.assertEqual(record.entity_attributes["Sample_type"], "sage")
self.assertEqual(record.entity_attributes["Sample_submitter_name"], "Robert,L,Strausberg")
self.assertEqual(record.entity_attributes["Sample_platform_id"], "GPL4")
self.assertEqual(record.entity_attributes["Sample_submitter_city"], "Bethesda,MD,20892,USA")
self.assertEqual(record.entity_attributes["Sample_status"], "Public on Nov 28 2001")
self.assertEqual(record.entity_attributes["Sample_anchor"], "NlaIII")
self.assertEqual(record.entity_attributes["Sample_title"], "SAGE_Duke_40N")
self.assertEqual(record.entity_attributes["Sample_organism"], "Homo sapiens")
self.assertEqual(record.entity_attributes["Sample_target_source"], "Breast, isolated normal epithelial organoids")
self.assertEqual(record.entity_attributes["Sample_submission_date"], "Nov 28 2001")
self.assertEqual(record.entity_attributes["Sample_submitter_email"], "[email protected]")
self.assertEqual(len(record.col_defs), 3)
self.assertEqual(record.col_defs["COUNT"], "Absolute tag count")
self.assertEqual(record.col_defs["TPM"], "Tags per million, or (1000000*COUNT)/(Total tags)")
self.assertEqual(record.col_defs["TAG"], 'Ten base SAGE tag, LINK_PRE:"http://www.ncbi.nlm.nih.gov/SAGE/SAGEtag.cgi?tag="')
self.assertEqual(len(record.table_rows), 21)
self.assertEqual(len(record.table_rows[0]), 3)
self.assertEqual(record.table_rows[0][0], "TAG")
self.assertEqual(record.table_rows[0][1], "COUNT")
self.assertEqual(record.table_rows[0][2], "TPM")
self.assertEqual(len(record.table_rows[1]), 3)
self.assertEqual(record.table_rows[1][0], "TTGGGGTTTC")
self.assertEqual(record.table_rows[1][1], "202")
self.assertEqual(record.table_rows[1][2], "28192.6")
self.assertEqual(len(record.table_rows[2]), 3)
self.assertEqual(record.table_rows[2][0], "TAGGTTGTCT")
self.assertEqual(record.table_rows[2][1], "129")
self.assertEqual(record.table_rows[2][2], "18004.2")
self.assertEqual(len(record.table_rows[3]), 3)
self.assertEqual(record.table_rows[3][0], "GAGGGAGTTT")
self.assertEqual(record.table_rows[3][1], "109")
self.assertEqual(record.table_rows[3][2], "15212.8")
self.assertEqual(len(record.table_rows[4]), 3)
self.assertEqual(record.table_rows[4][0], "TGCACGTTTT")
self.assertEqual(record.table_rows[4][1], "92")
self.assertEqual(record.table_rows[4][2], "12840.2")
self.assertEqual(len(record.table_rows[5]), 3)
self.assertEqual(record.table_rows[5][0], "CTGGGTTAAT")
self.assertEqual(record.table_rows[5][1], "83")
self.assertEqual(record.table_rows[5][2], "11584.1")
self.assertEqual(len(record.table_rows[6]), 3)
self.assertEqual(record.table_rows[6][0], "GTTGTGGTTA")
self.assertEqual(record.table_rows[6][1], "82")
self.assertEqual(record.table_rows[6][2], "11444.5")
self.assertEqual(len(record.table_rows[7]), 3)
self.assertEqual(record.table_rows[7][0], "GATCCCAACT")
self.assertEqual(record.table_rows[7][1], "63")
self.assertEqual(record.table_rows[7][2], "8792.74")
self.assertEqual(len(record.table_rows[8]), 3)
self.assertEqual(record.table_rows[8][0], "TGCAGTCACT")
self.assertEqual(record.table_rows[8][1], "59")
self.assertEqual(record.table_rows[8][2], "8234.47")
self.assertEqual(len(record.table_rows[9]), 3)
self.assertEqual(record.table_rows[9][0], "GGATTTGGCC")
self.assertEqual(record.table_rows[9][1], "58")
self.assertEqual(record.table_rows[9][2], "8094.91")
self.assertEqual(len(record.table_rows[10]), 3)
self.assertEqual(record.table_rows[10][0], "GGGCTGGGGT")
self.assertEqual(record.table_rows[10][1], "56")
self.assertEqual(record.table_rows[10][2], "7815.77")
self.assertEqual(len(record.table_rows[11]), 3)
self.assertEqual(record.table_rows[11][0], "ATAATTCTTT")
self.assertEqual(record.table_rows[11][1], "44")
self.assertEqual(record.table_rows[11][2], "6140.96")
self.assertEqual(len(record.table_rows[12]), 3)
self.assertEqual(record.table_rows[12][0], "CTTCCTTGCC")
self.assertEqual(record.table_rows[12][1], "42")
self.assertEqual(record.table_rows[12][2], "5861.83")
self.assertEqual(len(record.table_rows[13]), 3)
self.assertEqual(record.table_rows[13][0], "TTGGTCCTCT")
self.assertEqual(record.table_rows[13][1], "40")
self.assertEqual(record.table_rows[13][2], "5582.69")
self.assertEqual(len(record.table_rows[14]), 3)
self.assertEqual(record.table_rows[14][0], "GGCAAGCCCC")
self.assertEqual(record.table_rows[14][1], "36")
self.assertEqual(record.table_rows[14][2], "5024.42")
self.assertEqual(len(record.table_rows[15]), 3)
self.assertEqual(record.table_rows[15][0], "AACTAAAAAA")
self.assertEqual(record.table_rows[15][1], "34")
self.assertEqual(record.table_rows[15][2], "4745.29")
self.assertEqual(len(record.table_rows[16]), 3)
self.assertEqual(record.table_rows[16][0], "AGGGCTTCCA")
self.assertEqual(record.table_rows[16][1], "34")
self.assertEqual(record.table_rows[16][2], "4745.29")
self.assertEqual(len(record.table_rows[17]), 3)
self.assertEqual(record.table_rows[17][0], "AGGCTACGGA")
self.assertEqual(record.table_rows[17][1], "33")
self.assertEqual(record.table_rows[17][2], "4605.72")
self.assertEqual(len(record.table_rows[18]), 3)
self.assertEqual(record.table_rows[18][0], "GTGAAACCCC")
self.assertEqual(record.table_rows[18][1], "32")
self.assertEqual(record.table_rows[18][2], "4466.15")
self.assertEqual(len(record.table_rows[19]), 3)
self.assertEqual(record.table_rows[19][0], "AACTAACAAA")
self.assertEqual(record.table_rows[19][1], "31")
self.assertEqual(record.table_rows[19][2], "4326.59")
self.assertEqual(len(record.table_rows[20]), 3)
self.assertEqual(record.table_rows[20][0], "GAAAAATGGT")
self.assertEqual(record.table_rows[20][1], "30")
self.assertEqual(record.table_rows[20][2], "4187.02")
def test_soft_ex_family(self):
path = "Geo/soft_ex_family.txt"
with open(path) as handle:
records = Geo.parse(handle)
record = next(records)
self.assertEqual(record.entity_type, "PLATFORM")
self.assertEqual(record.entity_id, "Murine 15K long oligo array version 2.0")
self.assertEqual(len(record.entity_attributes), 12)
self.assertEqual(record.entity_attributes["Platform_title"], "Murine 15K long oligo array version 2.0")
self.assertEqual(record.entity_attributes["Platform_web_link"], "http://www.microarray.protocols.html")
self.assertEqual(record.entity_attributes["platform_table_end"], "")
self.assertEqual(record.entity_attributes["Platform_support"], "glass")
self.assertEqual(record.entity_attributes["Platform_manufacturer"], "Un. London microarray facility")
self.assertEqual(record.entity_attributes["Platform_coating"], "polysine")
self.assertEqual(record.entity_attributes["Platform_technology"], "spotted oligonucleotide")
self.assertEqual(record.entity_attributes["platform_table_begin"], "")
self.assertEqual(len(record.entity_attributes["Platform_manufacture_protocol"]), 12)
self.assertEqual(record.entity_attributes["Platform_manufacture_protocol"][0], "1. Oligos are arrayed in Greiner 384-well flat-bottom plates. Each well contains 600 pmol of 70-mer oligo.")
self.assertEqual(record.entity_attributes["Platform_manufacture_protocol"][1], "2. Resuspend oligos in water to 20 uM and rearray 5 \xb5L into 384-well, Genetix polystyrene V-bottom plates (cat# X6004).")
self.assertEqual(record.entity_attributes["Platform_manufacture_protocol"][2], "3. Allow Genetix plates to dry through passive water evaporation in a protected environment (e.g., chemical hood).")
self.assertEqual(record.entity_attributes["Platform_manufacture_protocol"][3], "4. Before printing, add 5 \xb5L of 1X Printing Buffer to each well. This can be done the night before a print run is started.")
self.assertEqual(record.entity_attributes["Platform_manufacture_protocol"][4], "5. Seal plates with Corning seals.")
self.assertEqual(record.entity_attributes["Platform_manufacture_protocol"][5], "6. Incubate at 37\xb0C for 30 minutes to aid resuspension of DNA.")
self.assertEqual(record.entity_attributes["Platform_manufacture_protocol"][6], "7. Shake plates near maximum rotational speed on flat-bed shaker for 1 minute.")
self.assertEqual(record.entity_attributes["Platform_manufacture_protocol"][7], "8. Centrifuge plates at 2000 rpm for 3 minutes.")
self.assertEqual(record.entity_attributes["Platform_manufacture_protocol"][8], "9. Remove seals and cover with plate lids. Place in appropriate location of plate cassette. This should be done with first plates just before print run is started to minimize evaporation time before printing. For second and third cassettes, wait until 30 minutes before next cassette is needed to begin centrifugation.")
self.assertEqual(record.entity_attributes["Platform_manufacture_protocol"][9], "10. Make sure plates rest behind both holding clips in the cassettes. Push plates back into the cassettes as far as they will go, putting them in the proper position for the server arm.")
self.assertEqual(record.entity_attributes["Platform_manufacture_protocol"][10], "11. After the print run is completed, allow plates to dry through passive evaporation in a protected environment.")
self.assertEqual(record.entity_attributes["Platform_manufacture_protocol"][11], "12. For each subsequent preparation of these plates for a print run, add water to the wells instead of sodium phosphate buffer. The amount of water should be decreased by 0.25 \xb5L per print run, as this is the amount drawn up by the pin capillary during each dip.")
self.assertEqual(record.entity_attributes["Platform_organism"], "Mus musculus")
self.assertEqual(len(record.entity_attributes["Platform_contributor"]), 5)
self.assertEqual(record.entity_attributes["Platform_contributor"][0], "Jane,Doe")
self.assertEqual(record.entity_attributes["Platform_contributor"][1], "John,A,Smith")
self.assertEqual(record.entity_attributes["Platform_contributor"][2], "Hans,van Elton")
self.assertEqual(record.entity_attributes["Platform_contributor"][3], "John,Smithers Jr")
self.assertEqual(record.entity_attributes["Platform_contributor"][4], "Jie,D,Chen")
self.assertEqual(record.entity_attributes["Platform_distribution"], "non-commercial")
self.assertEqual(len(record.col_defs), 6)
self.assertEqual(record.col_defs["Gene_Desc"], "Gene description")
self.assertEqual(record.col_defs["SEQUENCE"], "Probe sequence information")
self.assertEqual(record.col_defs["Gene_Sym"], "Gene symbols")
self.assertEqual(record.col_defs["GB_ACC"], 'GenBank accession number of sequence used to design oligonucleotide probe LINK_PRE:"http://www.ncbi.nlm.nih.gov/entrez/query.fcgi?cmd=Search&db=Nucleotide&term="')
self.assertEqual(record.col_defs["SPOT_ID"], "alternative identifier")
self.assertEqual(record.col_defs["ID"], "")
self.assertEqual(len(record.table_rows), 21)
self.assertEqual(len(record.table_rows[0]), 6)
self.assertEqual(record.table_rows[0][0], "ID")
self.assertEqual(record.table_rows[0][1], "GB_ACC")
self.assertEqual(record.table_rows[0][2], "Gene_Desc")
self.assertEqual(record.table_rows[0][3], "Gene_Sym")
self.assertEqual(record.table_rows[0][4], "SPOT_ID")
self.assertEqual(record.table_rows[0][5], "SEQUENCE")
self.assertEqual(len(record.table_rows[1]), 6)
self.assertEqual(record.table_rows[1][0], "1")
self.assertEqual(record.table_rows[1][1], "U02079")
self.assertEqual(record.table_rows[1][2], "nuclear factor of activated T-cells, cytoplasmic 2")
self.assertEqual(record.table_rows[1][3], "Nfatc2")
self.assertEqual(record.table_rows[1][4], "")
self.assertEqual(record.table_rows[1][5], "ACCTGGATGACGCAGCCACTTCAGAAAGCTGGGTTGGGACAGAAAGGTATATAGAGAGAAAATTTTGGAA")
self.assertEqual(len(record.table_rows[2]), 6)
self.assertEqual(record.table_rows[2][0], "2")
self.assertEqual(record.table_rows[2][1], "NM_008154")
self.assertEqual(record.table_rows[2][2], "G-protein coupled receptor 3")
self.assertEqual(record.table_rows[2][3], "Gpr3")
self.assertEqual(record.table_rows[2][4], "")
self.assertEqual(record.table_rows[2][5], "CTGTACAATGCTCTCACTTACTACTCAGAGACAACGGTAACTCGGACTTATGTGATGCTGGCCTTGGTGT")
self.assertEqual(len(record.table_rows[3]), 6)
self.assertEqual(record.table_rows[3][0], "3")
self.assertEqual(record.table_rows[3][1], "AK015719")
self.assertEqual(record.table_rows[3][2], "tropomodulin 2")
self.assertEqual(record.table_rows[3][3], "Tmod2")
self.assertEqual(record.table_rows[3][4], "")
self.assertEqual(record.table_rows[3][5], "CACCAGGCTCAGTGCCTAGTATCGGCTTCACCTAGTGTGGTTACTCAGGGCACGCAGAGCTACAGAACAC")
self.assertEqual(len(record.table_rows[4]), 6)
self.assertEqual(record.table_rows[4][0], "4")
self.assertEqual(record.table_rows[4][1], "AK003367")
self.assertEqual(record.table_rows[4][2], "mitochondrial ribosomal protein L15")
self.assertEqual(record.table_rows[4][3], "Mrpl15")
self.assertEqual(record.table_rows[4][4], "")
self.assertEqual(record.table_rows[4][5], "CAAGAAGTCTAGAAATTCTGTGCAAGCCTATTCCATTCTTTCTGCGGGGACAACCAATTCCGAAAAGAAT")
self.assertEqual(len(record.table_rows[5]), 6)
self.assertEqual(record.table_rows[5][0], "5")
self.assertEqual(record.table_rows[5][1], "BC003333")
self.assertEqual(record.table_rows[5][2], "RIKEN cDNA 0610033I05 gene")
self.assertEqual(record.table_rows[5][3], "0610033I05Rik")
self.assertEqual(record.table_rows[5][4], "")
self.assertEqual(record.table_rows[5][5], "AGAACTGGGTGGCAGATATCCTAGAGTTTTGACCAACGTTCACAGCACACATATTGATCTTATAGGACCT")
self.assertEqual(len(record.table_rows[6]), 6)
self.assertEqual(record.table_rows[6][0], "6")
self.assertEqual(record.table_rows[6][1], "NM_008462")
self.assertEqual(record.table_rows[6][2], "killer cell lectin-like receptor, subfamily A, member 2")
self.assertEqual(record.table_rows[6][3], "Klra2")
self.assertEqual(record.table_rows[6][4], "")
self.assertEqual(record.table_rows[6][5], "TGAATTGAAGTTCCTTAAATCCCAACTTCAAAGAAACACATACTGGATTTCACTGACACATCATAAAAGC")
self.assertEqual(len(record.table_rows[7]), 6)
self.assertEqual(record.table_rows[7][0], "7")
self.assertEqual(record.table_rows[7][1], "NM_008029")
self.assertEqual(record.table_rows[7][2], "FMS-like tyrosine kinase 4")
self.assertEqual(record.table_rows[7][3], "Flt4")
self.assertEqual(record.table_rows[7][4], "")
self.assertEqual(record.table_rows[7][5], "GAGGTGCTGTGGGATGACCGCCGGGGCATGCGGGTGCCCACTCAACTGTTGCGCGATGCCCTGTACCTGC")
self.assertEqual(len(record.table_rows[8]), 6)
self.assertEqual(record.table_rows[8][0], "8")
self.assertEqual(record.table_rows[8][1], "NM_054088")
self.assertEqual(record.table_rows[8][2], "adiponutrin")
self.assertEqual(record.table_rows[8][3], "Adpn")
self.assertEqual(record.table_rows[8][4], "")
self.assertEqual(record.table_rows[8][5], "GTCTGAGTTCCATTCCAAAGACGAAGTCGTGGATGCCCTGGTGTGTTCCTGCTTCATTCCCCTCTTCTCT")
self.assertEqual(len(record.table_rows[9]), 6)
self.assertEqual(record.table_rows[9][0], "9")
self.assertEqual(record.table_rows[9][1], "NM_009750")
self.assertEqual(record.table_rows[9][2], "nerve growth factor receptor (TNFRSF16) associated protein 1")
self.assertEqual(record.table_rows[9][3], "Ngfrap1")
self.assertEqual(record.table_rows[9][4], "")
self.assertEqual(record.table_rows[9][5], "TACAGCTGAGAAATTGTCTACGCATCCTTATGGGGGAGCTGTCTAACCACCACGATCACCATGATGAATT")
self.assertEqual(len(record.table_rows[10]), 6)
self.assertEqual(record.table_rows[10][0], "10")
self.assertEqual(record.table_rows[10][1], "AB045323")
self.assertEqual(record.table_rows[10][2], "DNA segment, Chr 8, ERATO Doi 594, expressed")
self.assertEqual(record.table_rows[10][3], "D8Ertd594e")
self.assertEqual(record.table_rows[10][4], "")
self.assertEqual(record.table_rows[10][5], "GATTCAGACTCGGGAGGAGCATCCCAACCTCTCCTTGAGGATAAAGGCCTGAGCGATTGCCCTGGGGAGC")
self.assertEqual(len(record.table_rows[11]), 6)
self.assertEqual(record.table_rows[11][0], "11")
self.assertEqual(record.table_rows[11][1], "AK005789")
self.assertEqual(record.table_rows[11][2], "dynein, cytoplasmic, light chain 2B")
self.assertEqual(record.table_rows[11][3], "Dncl2b")
self.assertEqual(record.table_rows[11][4], "")
self.assertEqual(record.table_rows[11][5], "TGCAGAAGGCATTCCAATCCGAACAACCCTGGACAACTCCACAACGGTTCAGTATGCGGGTCTTCTCCAC")
self.assertEqual(len(record.table_rows[12]), 6)
self.assertEqual(record.table_rows[12][0], "12")
self.assertEqual(record.table_rows[12][1], "NM_010517")
self.assertEqual(record.table_rows[12][2], "insulin-like growth factor binding protein 4")
self.assertEqual(record.table_rows[12][3], "Igfbp4")
self.assertEqual(record.table_rows[12][4], "")
self.assertEqual(record.table_rows[12][5], "GGAGAAGCTGGCGCGCTGCCGCCCCCCCGTGGGTTGCGAGGAGTTGGTGCGGGAGCCAGGCTGCGGTTGT")
self.assertEqual(len(record.table_rows[13]), 6)
self.assertEqual(record.table_rows[13][0], "13")
self.assertEqual(record.table_rows[13][1], "AK010722")
self.assertEqual(record.table_rows[13][2], "RIKEN cDNA 2410075D05 gene")
self.assertEqual(record.table_rows[13][3], "2410075D05Rik")
self.assertEqual(record.table_rows[13][4], "")
self.assertEqual(record.table_rows[13][5], "GGAGCATCTGGAGTTCCGCTTACCGGAAATAAAGTCTTTACTATCGGTGATTGGAGGGCAGTTCACTAAC")
self.assertEqual(len(record.table_rows[14]), 6)
self.assertEqual(record.table_rows[14][0], "14")
self.assertEqual(record.table_rows[14][1], "AK003755")
self.assertEqual(record.table_rows[14][2], "DNA segment, Chr 4, ERATO Doi 421, expressed")
self.assertEqual(record.table_rows[14][3], "D4Ertd421e")
self.assertEqual(record.table_rows[14][4], "")
self.assertEqual(record.table_rows[14][5], "AGCAAAGAGATCTCCCTCAGTGTGCCCATAGGTGGCGGTGCGAGCTTGCGGTTATTGGCCAGTGACTTGC")
self.assertEqual(len(record.table_rows[15]), 6)
self.assertEqual(record.table_rows[15][0], "15")
self.assertEqual(record.table_rows[15][1], "BC003241")
self.assertEqual(record.table_rows[15][2], "cleavage stimulation factor, 3' pre-RNA, subunit 3")
self.assertEqual(record.table_rows[15][3], "Cstf3")
self.assertEqual(record.table_rows[15][4], "")
self.assertEqual(record.table_rows[15][5], "AAATTAGAAGAAAATCCATATGACCTTGATGCTTGGAGCATTCTCATTCGAGAGGCACAGAATCAACCTA")
self.assertEqual(len(record.table_rows[16]), 6)
self.assertEqual(record.table_rows[16][0], "16")
self.assertEqual(record.table_rows[16][1], "AK004937")
self.assertEqual(record.table_rows[16][2], "RIKEN cDNA 1300007O09 gene")
self.assertEqual(record.table_rows[16][3], "1300007O09Rik")
self.assertEqual(record.table_rows[16][4], "")
self.assertEqual(record.table_rows[16][5], "CAGACACAAACCCTAGGTTGTATTGTAGACCGGAGTTTAAGCAGGCACTACCTGTCTGTCTTTTCTTCAT")
self.assertEqual(len(record.table_rows[17]), 6)
self.assertEqual(record.table_rows[17][0], "17")
self.assertEqual(record.table_rows[17][1], "AK004524")
self.assertEqual(record.table_rows[17][2], "unnamed protein product; hypothetical SOCS domain")
self.assertEqual(record.table_rows[17][3], "")
self.assertEqual(record.table_rows[17][4], "")
self.assertEqual(record.table_rows[17][5], "CGGAGCCCTGCGCGCCCAGAGCCCCCTCCCACCCGCTTCCACCAAGTGCATGGAGCCAACATCCGCATGG")
self.assertEqual(len(record.table_rows[18]), 6)
self.assertEqual(record.table_rows[18][0], "18")
self.assertEqual(record.table_rows[18][1], "NM_025999")
self.assertEqual(record.table_rows[18][2], "RIKEN cDNA 2610110L04 gene")
self.assertEqual(record.table_rows[18][3], "2610110L04Rik")
self.assertEqual(record.table_rows[18][4], "")
self.assertEqual(record.table_rows[18][5], "TGCATTGATAAATGGAGTGATCGACACAGGAACTGCCCCATTTGTCGCCTACAGATGACTGGAGCAAATG")
self.assertEqual(len(record.table_rows[19]), 6)
self.assertEqual(record.table_rows[19][0], "19")
self.assertEqual(record.table_rows[19][1], "")
self.assertEqual(record.table_rows[19][2], "")
self.assertEqual(record.table_rows[19][3], "")
self.assertEqual(record.table_rows[19][4], "-- CONTROL")
self.assertEqual(record.table_rows[19][5], "")
self.assertEqual(len(record.table_rows[20]), 6)
self.assertEqual(record.table_rows[20][0], "20")
self.assertEqual(record.table_rows[20][1], "NM_023120")
self.assertEqual(record.table_rows[20][2], "guanine nucleotide binding protein (G protein), beta polypeptide 1-like")
self.assertEqual(record.table_rows[20][3], "Gnb1l")
self.assertEqual(record.table_rows[20][4], "")
self.assertEqual(record.table_rows[20][5], "ACCGCCTGGTCCCAGATTTGTCCTCCGAGGCACACAGTCGGCTGTGAACACGCTCCATTTCTGCCCACCA")
record = next(records)
self.assertEqual(record.entity_type, "SAMPLE")
self.assertEqual(record.entity_id, "Control Embyronic Stem Cell Replicate 1")
self.assertEqual(len(record.entity_attributes), 24)
self.assertEqual(record.entity_attributes["Sample_extract_protocol_ch2"], "TriZol procedure")
self.assertEqual(record.entity_attributes["Sample_hyb_protocol"], "Oligoarray control targets and hybridization buffer (Agilent In Situ Hybridization Kit Plus) were added, and samples were applied to microarrays enclosed in Agilent SureHyb-enabled hybridization chambers. After hybridization, slides were washed sequentially with 6x SSC/0.005% Triton X-102 and 0.1x SSC/0.005% Triton X-102 before scanning. Slides were hybridized for 17 h at 60\xb0C in a rotating oven, and washed.")
self.assertEqual(record.entity_attributes["Sample_extract_protocol_ch1"], "TriZol procedure")
self.assertEqual(record.entity_attributes["Sample_platform_id"], "Murine 15K long oligo array version 2.0")
self.assertEqual(record.entity_attributes["Sample_title"], "Control Embyronic Stem Cell Replicate 1")
self.assertEqual(record.entity_attributes["Sample_supplementary_file"], "file1.gpr")
self.assertEqual(record.entity_attributes["Sample_organism_ch2"], "Mus musculus")
self.assertEqual(record.entity_attributes["Sample_organism_ch1"], "Mus musculus")
self.assertEqual(record.entity_attributes["Sample_label_ch1"], "Cy5")
self.assertEqual(len(record.entity_attributes["Sample_scan_protocol"]), 2)
self.assertEqual(record.entity_attributes["Sample_scan_protocol"][0], "Scanned on an Agilent G2565AA scanner.")
self.assertEqual(record.entity_attributes["Sample_scan_protocol"][1], "Images were quantified using Agilent Feature Extraction Software (version A.7.5).")
self.assertEqual(record.entity_attributes["sample_table_begin"], "")
self.assertEqual(record.entity_attributes["Sample_label_protocol_ch2"], "10 \xb5g of total RNA were primed with 2 \xb5l of 100 \xb5M T16N2 DNA primer at 70\xb0C for 10 min, then reversed transcribed at 42\xb0C for 1 h in the presence of 400 U SuperScript II RTase (Invitrogen), and 100 \xb5M each dATP, dTTP, dGTP, with 25 \xb5M dCTP, 25 \xb5M Cy5-labeled dCTP (NEN Life Science, Boston, MA), and RNase inhibitor (Invitrogen). RNA was then degraded with RNase A, and labeled cDNAs were purified using QIAquick PCR columns (Qiagen).")
self.assertEqual(record.entity_attributes["Sample_label_protocol_ch1"], "10 \xb5g of total RNA were primed with 2 \xb5l of 100 \xb5M T16N2 DNA primer at 70\xb0C for 10 min, then reversed transcribed at 42\xb0C for 1 h in the presence of 400 U SuperScript II RTase (Invitrogen), and 100 \xb5M each dATP, dTTP, dGTP, with 25 \xb5M dCTP, 25 \xb5M Cy5-labeled dCTP (NEN Life Science, Boston, MA), and RNase inhibitor (Invitrogen). RNA was then degraded with RNase A, and labeled cDNAs were purified using QIAquick PCR columns (Qiagen).")
self.assertEqual(record.entity_attributes["Sample_data_processing"], "LOWESS normalized, background subtracted VALUE data obtained from log of processed Red signal/processed Green signal.")
self.assertEqual(record.entity_attributes["sample_table_end"], "")
self.assertEqual(record.entity_attributes["Sample_label_ch2"], "Cy3")
self.assertEqual(record.entity_attributes["Sample_description"], "Biological replicate 1 of 4. Control embryonic stem cells, untreated, harvested after several passages.")
self.assertEqual(record.entity_attributes["Sample_source_name_ch1"], "Total RNA from murine ES-D3 embryonic stem cells labeled with Cyanine-5 (red).")
self.assertEqual(record.entity_attributes["Sample_source_name_ch2"], "Total RNA from pooled whole mouse embryos e17.5, labeled with Cyanine-3 (green).")
self.assertEqual(record.entity_attributes["Sample_molecule_ch1"], "total RNA")
self.assertEqual(record.entity_attributes["Sample_molecule_ch2"], "total RNA")
self.assertEqual(record.entity_attributes["Sample_growth_protocol_ch1"], "ES cells were kept in an undifferentiated, pluripotent state by using 1000 IU/ml leukemia inhibitory factor (LIF; Chemicon, ESGRO, ESG1107), and grown on top of murine embryonic fibroblasts feeder layer inactivated by 10 ug/ml of mitomycin C (Sigma, St. Louis). ES cells were cultured on 0.1% gelatin-coated plastic dishes in ES medium containing Dulbecco modified Eagle medium supplemented with 15% fetal calf serum, 0.1 mM beta-mercaptoethanol, 2 mM glutamine, and 0.1 mN non-essential amino acids.")
self.assertEqual(len(record.entity_attributes["Sample_characteristics_ch1"]), 4)
self.assertEqual(record.entity_attributes["Sample_characteristics_ch1"][0], "ES-D3 cell line (CRL-1934)")
self.assertEqual(record.entity_attributes["Sample_characteristics_ch1"][1], "Age: day 4")
self.assertEqual(record.entity_attributes["Sample_characteristics_ch1"][2], "Tissue: blastocytes")
self.assertEqual(record.entity_attributes["Sample_characteristics_ch1"][3], "Strain: 129/Sv mice")
self.assertEqual(len(record.entity_attributes["Sample_characteristics_ch2"]), 3)
self.assertEqual(record.entity_attributes["Sample_characteristics_ch2"][0], "Strain: C57BL/6")
self.assertEqual(record.entity_attributes["Sample_characteristics_ch2"][1], "Age: e17.5 d")
self.assertEqual(record.entity_attributes["Sample_characteristics_ch2"][2], "Tissue: whole embryo")
self.assertEqual(len(record.col_defs), 6)
self.assertEqual(record.col_defs["ID_REF"], "")
self.assertEqual(record.col_defs["VALUE"], "log(REDsignal/GREENsignal) per feature (processed signals used).")
self.assertEqual(record.col_defs["gProcessedSignal"], 'Dye-normalized signal after surrogate "algorithm," green "channel," used for computation of log ratio.')
self.assertEqual(record.col_defs["LogRatioError"], "error of the log ratio calculated according to the error model chosen.")
self.assertEqual(record.col_defs["PValueLogRatio"], "Significance level of the Log Ratio computed for a feature.")
self.assertEqual(record.col_defs["rProcessedSignal"], 'Dye-normalized signal after surrogate "algorithm," red "channel," used for computation of log ratio.')
self.assertEqual(len(record.table_rows), 21)
self.assertEqual(len(record.table_rows[0]), 6)
self.assertEqual(record.table_rows[0][0], "ID_REF")
self.assertEqual(record.table_rows[0][1], "VALUE")
self.assertEqual(record.table_rows[0][2], "LogRatioError")
self.assertEqual(record.table_rows[0][3], "PValueLogRatio")
self.assertEqual(record.table_rows[0][4], "gProcessedSignal")
self.assertEqual(record.table_rows[0][5], "rProcessedSignal")
self.assertEqual(len(record.table_rows[1]), 6)
self.assertEqual(record.table_rows[1][0], "1")
self.assertEqual(record.table_rows[1][1], "-1.6274758")
self.assertEqual(record.table_rows[1][2], "1.36E-01")
self.assertEqual(record.table_rows[1][3], "6.41E-33")
self.assertEqual(record.table_rows[1][4], "9.13E+03")
self.assertEqual(record.table_rows[1][5], "2.15E+02")
self.assertEqual(len(record.table_rows[2]), 6)
self.assertEqual(record.table_rows[2][0], "2")
self.assertEqual(record.table_rows[2][1], "0.1412248")
self.assertEqual(record.table_rows[2][2], "1.34E+00")
self.assertEqual(record.table_rows[2][3], "1.00E+00")
self.assertEqual(record.table_rows[2][4], "4.14E+01")
self.assertEqual(record.table_rows[2][5], "5.72E+01")
self.assertEqual(len(record.table_rows[3]), 6)
self.assertEqual(record.table_rows[3][0], "3")
self.assertEqual(record.table_rows[3][1], "0.1827684")
self.assertEqual(record.table_rows[3][2], "5.19E-02")
self.assertEqual(record.table_rows[3][3], "4.33E-04")
self.assertEqual(record.table_rows[3][4], "5.13E+03")
self.assertEqual(record.table_rows[3][5], "7.81E+03")
self.assertEqual(len(record.table_rows[4]), 6)
self.assertEqual(record.table_rows[4][0], "4")
self.assertEqual(record.table_rows[4][1], "-0.3932267")
self.assertEqual(record.table_rows[4][2], "6.08E-02")
self.assertEqual(record.table_rows[4][3], "1.02E-10")
self.assertEqual(record.table_rows[4][4], "4.65E+03")
self.assertEqual(record.table_rows[4][5], "1.88E+03")
self.assertEqual(len(record.table_rows[5]), 6)
self.assertEqual(record.table_rows[5][0], "5")
self.assertEqual(record.table_rows[5][1], "-0.9865994")
self.assertEqual(record.table_rows[5][2], "1.05E-01")
self.assertEqual(record.table_rows[5][3], "6.32E-21")
self.assertEqual(record.table_rows[5][4], "2.91E+03")
self.assertEqual(record.table_rows[5][5], "3.01E+02")
self.assertEqual(len(record.table_rows[6]), 6)
self.assertEqual(record.table_rows[6][0], "6")
self.assertEqual(record.table_rows[6][1], "0.0238812")
self.assertEqual(record.table_rows[6][2], "1.02E-01")
self.assertEqual(record.table_rows[6][3], "8.15E-01")
self.assertEqual(record.table_rows[6][4], "7.08E+02")
self.assertEqual(record.table_rows[6][5], "7.48E+02")
self.assertEqual(len(record.table_rows[7]), 6)
self.assertEqual(record.table_rows[7][0], "7")
self.assertEqual(record.table_rows[7][1], "-1.4841822")
self.assertEqual(record.table_rows[7][2], "1.25E-01")
self.assertEqual(record.table_rows[7][3], "1.42E-32")
self.assertEqual(record.table_rows[7][4], "1.02E+04")
self.assertEqual(record.table_rows[7][5], "3.36E+02")
self.assertEqual(len(record.table_rows[8]), 6)
self.assertEqual(record.table_rows[8][0], "8")
self.assertEqual(record.table_rows[8][1], "-1.8261356")
self.assertEqual(record.table_rows[8][2], "4.15E-01")
self.assertEqual(record.table_rows[8][3], "1.10E-05")
self.assertEqual(record.table_rows[8][4], "7.19E+02")
self.assertEqual(record.table_rows[8][5], "1.07E+01")
self.assertEqual(len(record.table_rows[9]), 6)
self.assertEqual(record.table_rows[9][0], "9")
self.assertEqual(record.table_rows[9][1], "-1.0344779")
self.assertEqual(record.table_rows[9][2], "1.78E+00")
self.assertEqual(record.table_rows[9][3], "1.00E+00")
self.assertEqual(record.table_rows[9][4], "9.62E+01")
self.assertEqual(record.table_rows[9][5], "8.89E+00")
self.assertEqual(len(record.table_rows[10]), 6)
self.assertEqual(record.table_rows[10][0], "10")
self.assertEqual(record.table_rows[10][1], "0.2405891")
self.assertEqual(record.table_rows[10][2], "3.09E-01")
self.assertEqual(record.table_rows[10][3], "4.36E-01")
self.assertEqual(record.table_rows[10][4], "1.61E+02")
self.assertEqual(record.table_rows[10][5], "2.80E+02")
self.assertEqual(len(record.table_rows[11]), 6)
self.assertEqual(record.table_rows[11][0], "11")
self.assertEqual(record.table_rows[11][1], "0.3209366")
self.assertEqual(record.table_rows[11][2], "3.59E-01")
self.assertEqual(record.table_rows[11][3], "3.71E-01")
self.assertEqual(record.table_rows[11][4], "1.25E+02")
self.assertEqual(record.table_rows[11][5], "2.61E+02")
self.assertEqual(len(record.table_rows[12]), 6)
self.assertEqual(record.table_rows[12][0], "12")
self.assertEqual(record.table_rows[12][1], "0.358304")
self.assertEqual(record.table_rows[12][2], "2.06E+00")
self.assertEqual(record.table_rows[12][3], "1.00E+00")
self.assertEqual(record.table_rows[12][4], "2.04E+01")
self.assertEqual(record.table_rows[12][5], "4.66E+01")
self.assertEqual(len(record.table_rows[13]), 6)
self.assertEqual(record.table_rows[13][0], "13")
self.assertEqual(record.table_rows[13][1], "-0.0122072")
self.assertEqual(record.table_rows[13][2], "3.64E-01")
self.assertEqual(record.table_rows[13][3], "9.73E-01")
self.assertEqual(record.table_rows[13][4], "1.84E+02")
self.assertEqual(record.table_rows[13][5], "1.79E+02")
self.assertEqual(len(record.table_rows[14]), 6)
self.assertEqual(record.table_rows[14][0], "14")
self.assertEqual(record.table_rows[14][1], "-1.5480396")
self.assertEqual(record.table_rows[14][2], "1.30E-01")
self.assertEqual(record.table_rows[14][3], "7.21E-33")
self.assertEqual(record.table_rows[14][4], "1.02E+04")
self.assertEqual(record.table_rows[14][5], "2.90E+02")
self.assertEqual(len(record.table_rows[15]), 6)
self.assertEqual(record.table_rows[15][0], "15")
self.assertEqual(record.table_rows[15][1], "0.0073419")
self.assertEqual(record.table_rows[15][2], "2.98E-01")
self.assertEqual(record.table_rows[15][3], "9.80E-01")
self.assertEqual(record.table_rows[15][4], "2.21E+02")
self.assertEqual(record.table_rows[15][5], "2.25E+02")
self.assertEqual(len(record.table_rows[16]), 6)
self.assertEqual(record.table_rows[16][0], "16")
self.assertEqual(record.table_rows[16][1], "-0.2267015")
self.assertEqual(record.table_rows[16][2], "9.44E-01")
self.assertEqual(record.table_rows[16][3], "8.10E-01")
self.assertEqual(record.table_rows[16][4], "8.90E+01")
self.assertEqual(record.table_rows[16][5], "5.28E+01")
self.assertEqual(len(record.table_rows[17]), 6)
self.assertEqual(record.table_rows[17][0], "17")
self.assertEqual(record.table_rows[17][1], "-0.1484023")
self.assertEqual(record.table_rows[17][2], "8.01E-01")
self.assertEqual(record.table_rows[17][3], "8.53E-01")
self.assertEqual(record.table_rows[17][4], "9.65E+01")
self.assertEqual(record.table_rows[17][5], "6.86E+01")
self.assertEqual(len(record.table_rows[18]), 6)
self.assertEqual(record.table_rows[18][0], "18")
self.assertEqual(record.table_rows[18][1], "-0.6122195")
self.assertEqual(record.table_rows[18][2], "1.28E-01")
self.assertEqual(record.table_rows[18][3], "1.69E-06")
self.assertEqual(record.table_rows[18][4], "1.12E+03")
self.assertEqual(record.table_rows[18][5], "2.73E+02")
self.assertEqual(len(record.table_rows[19]), 6)
self.assertEqual(record.table_rows[19][0], "19")
self.assertEqual(record.table_rows[19][1], "0.0796905")
self.assertEqual(record.table_rows[19][2], "8.78E-02")
self.assertEqual(record.table_rows[19][3], "3.64E-01")
self.assertEqual(record.table_rows[19][4], "8.21E+02")
self.assertEqual(record.table_rows[19][5], "9.87E+02")
self.assertEqual(len(record.table_rows[20]), 6)
self.assertEqual(record.table_rows[20][0], "20")
self.assertEqual(record.table_rows[20][1], "-0.084895")
self.assertEqual(record.table_rows[20][2], "9.38E-01")
self.assertEqual(record.table_rows[20][3], "9.28E-01")
self.assertEqual(record.table_rows[20][4], "7.68E+01")
self.assertEqual(record.table_rows[20][5], "6.32E+01")
record = next(records)
self.assertEqual(record.entity_type, "SAMPLE")
self.assertEqual(record.entity_id, "Control Embyronic Stem Cell Replicate 2")
self.assertEqual(len(record.entity_attributes), 24)
self.assertEqual(record.entity_attributes["Sample_extract_protocol_ch2"], "TriZol procedure")
self.assertEqual(record.entity_attributes["Sample_hyb_protocol"], "Oligoarray control targets and hybridization buffer (Agilent In Situ Hybridization Kit Plus) were added, and samples were applied to microarrays enclosed in Agilent SureHyb-enabled hybridization chambers. After hybridization, slides were washed sequentially with 6x SSC/0.005% Triton X-102 and 0.1x SSC/0.005% Triton X-102 before scanning. Slides were hybridized for 17 h at 60\xb0C in a rotating oven, and washed.")
self.assertEqual(record.entity_attributes["Sample_extract_protocol_ch1"], "TriZol procedure")
self.assertEqual(record.entity_attributes["Sample_platform_id"], "Murine 15K long oligo array version 2.0")
self.assertEqual(record.entity_attributes["Sample_title"], "Control Embyronic Stem Cell Replicate 2")
self.assertEqual(record.entity_attributes["Sample_supplementary_file"], "file2.gpr")
self.assertEqual(record.entity_attributes["Sample_organism_ch2"], "Mus musculus")
self.assertEqual(record.entity_attributes["Sample_organism_ch1"], "Mus musculus")
self.assertEqual(record.entity_attributes["Sample_label_ch1"], "Cy5")
self.assertEqual(len(record.entity_attributes["Sample_scan_protocol"]), 2)
self.assertEqual(record.entity_attributes["Sample_scan_protocol"][0], "Scanned on an Agilent G2565AA scanner.")
self.assertEqual(record.entity_attributes["Sample_scan_protocol"][1], "Images were quantified using Agilent Feature Extraction Software (version A.7.5).")
self.assertEqual(record.entity_attributes["sample_table_begin"], "")
self.assertEqual(record.entity_attributes["Sample_label_protocol_ch2"], "10 \xb5g of total RNA were primed with 2 \xb5l of 100 \xb5M T16N2 DNA primer at 70\xb0C for 10 min, then reversed transcribed at 42\xb0C for 1 h in the presence of 400 U SuperScript II RTase (Invitrogen), and 100 \xb5M each dATP, dTTP, dGTP, with 25 \xb5M dCTP, 25 \xb5M Cy5-labeled dCTP (NEN Life Science, Boston, MA), and RNase inhibitor (Invitrogen). RNA was then degraded with RNase A, and labeled cDNAs were purified using QIAquick PCR columns (Qiagen).")
self.assertEqual(record.entity_attributes["Sample_label_protocol_ch1"], "10 \xb5g of total RNA were primed with 2 \xb5l of 100 \xb5M T16N2 DNA primer at 70\xb0C for 10 min, then reversed transcribed at 42\xb0C for 1 h in the presence of 400 U SuperScript II RTase (Invitrogen), and 100 \xb5M each dATP, dTTP, dGTP, with 25 \xb5M dCTP, 25 \xb5M Cy5-labeled dCTP (NEN Life Science, Boston, MA), and RNase inhibitor (Invitrogen). RNA was then degraded with RNase A, and labeled cDNAs were purified using QIAquick PCR columns (Qiagen).")
self.assertEqual(record.entity_attributes["Sample_data_processing"], "LOWESS normalized, background subtracted VALUE data obtained from log of processed Red signal/processed Green signal.")
self.assertEqual(record.entity_attributes["sample_table_end"], "")
self.assertEqual(record.entity_attributes["Sample_label_ch2"], "Cy3")
self.assertEqual(record.entity_attributes["Sample_description"], "Biological replicate 2 of 4. Control embryonic stem cells, untreated, harvested after several passages.")
self.assertEqual(record.entity_attributes["Sample_source_name_ch1"], "Total RNA from murine ES-D3 embryonic stem cells labeled with Cyanine-5 (red).")
self.assertEqual(record.entity_attributes["Sample_source_name_ch2"], "Total RNA from pooled whole mouse embryos e17.5, labeled with Cyanine-3 (green).")
self.assertEqual(record.entity_attributes["Sample_molecule_ch1"], "total RNA")
self.assertEqual(record.entity_attributes["Sample_molecule_ch2"], "total RNA")
self.assertEqual(record.entity_attributes["Sample_growth_protocol_ch1"], "ES cells were kept in an undifferentiated, pluripotent state by using 1000 IU/ml leukemia inhibitory factor (LIF; Chemicon, ESGRO, ESG1107), and grown on top of murine embryonic fibroblasts feeder layer inactivated by 10 ug/ml of mitomycin C (Sigma, St. Louis). ES cells were cultured on 0.1% gelatin-coated plastic dishes in ES medium containing Dulbecco modified Eagle medium supplemented with 15% fetal calf serum, 0.1 mM beta-mercaptoethanol, 2 mM glutamine, and 0.1 mN non-essential amino acids.")
self.assertEqual(len(record.entity_attributes["Sample_characteristics_ch1"]), 4)
self.assertEqual(record.entity_attributes["Sample_characteristics_ch1"][0], "ES-D3 cell line (CRL-1934)")
self.assertEqual(record.entity_attributes["Sample_characteristics_ch1"][1], "Age: day 4")
self.assertEqual(record.entity_attributes["Sample_characteristics_ch1"][2], "Tissue: blastocytes")
self.assertEqual(record.entity_attributes["Sample_characteristics_ch1"][3], "Strain: 129/Sv mice")
self.assertEqual(len(record.entity_attributes["Sample_characteristics_ch2"]), 3)
self.assertEqual(record.entity_attributes["Sample_characteristics_ch2"][0], "Strain: C57BL/6")
self.assertEqual(record.entity_attributes["Sample_characteristics_ch2"][1], "Age: e17.5 d")
self.assertEqual(record.entity_attributes["Sample_characteristics_ch2"][2], "Tissue: whole embryo")
self.assertEqual(len(record.col_defs), 6)
self.assertEqual(record.col_defs["ID_REF"], "")
self.assertEqual(record.col_defs["VALUE"], "log(REDsignal/GREENsignal) per feature (processed signals used).")
self.assertEqual(record.col_defs["gProcessedSignal"], 'Dye-normalized signal after surrogate "algorithm," green "channel," used for computation of log ratio.')
self.assertEqual(record.col_defs["LogRatioError"], "error of the log ratio calculated according to the error model chosen.")
self.assertEqual(record.col_defs["PValueLogRatio"], "Significance level of the Log Ratio computed for a feature.")
self.assertEqual(record.col_defs["rProcessedSignal"], 'Dye-normalized signal after surrogate "algorithm," red "channel," used for computation of log ratio.')
self.assertEqual(len(record.table_rows), 21)
self.assertEqual(len(record.table_rows[0]), 6)
self.assertEqual(record.table_rows[0][0], "ID_REF")
self.assertEqual(record.table_rows[0][1], "VALUE")
self.assertEqual(record.table_rows[0][2], "LogRatioError")
self.assertEqual(record.table_rows[0][3], "PValueLogRatio")
self.assertEqual(record.table_rows[0][4], "gProcessedSignal")
self.assertEqual(record.table_rows[0][5], "rProcessedSignal")
self.assertEqual(len(record.table_rows[1]), 6)
self.assertEqual(record.table_rows[1][0], "1")
self.assertEqual(record.table_rows[1][1], "-1.1697263")
self.assertEqual(record.table_rows[1][2], "1.23E-01")
self.assertEqual(record.table_rows[1][3], "2.14E-21")
self.assertEqual(record.table_rows[1][4], "3.17E+03")
self.assertEqual(record.table_rows[1][5], "2.14E+02")
self.assertEqual(len(record.table_rows[2]), 6)
self.assertEqual(record.table_rows[2][0], "2")
self.assertEqual(record.table_rows[2][1], "-0.1111353")
self.assertEqual(record.table_rows[2][2], "1.63E+00")
self.assertEqual(record.table_rows[2][3], "9.46E-01")
self.assertEqual(record.table_rows[2][4], "5.43E+01")
self.assertEqual(record.table_rows[2][5], "4.20E+01")
self.assertEqual(len(record.table_rows[3]), 6)
self.assertEqual(record.table_rows[3][0], "3")
self.assertEqual(record.table_rows[3][1], "0.1400597")
self.assertEqual(record.table_rows[3][2], "5.11E-02")
self.assertEqual(record.table_rows[3][3], "6.17E-03")
self.assertEqual(record.table_rows[3][4], "6.72E+03")
self.assertEqual(record.table_rows[3][5], "9.28E+03")
self.assertEqual(len(record.table_rows[4]), 6)
self.assertEqual(record.table_rows[4][0], "4")
self.assertEqual(record.table_rows[4][1], "-0.4820633")
self.assertEqual(record.table_rows[4][2], "6.38E-02")
self.assertEqual(record.table_rows[4][3], "4.06E-14")
self.assertEqual(record.table_rows[4][4], "6.46E+03")
self.assertEqual(record.table_rows[4][5], "2.13E+03")
self.assertEqual(len(record.table_rows[5]), 6)
self.assertEqual(record.table_rows[5][0], "5")
self.assertEqual(record.table_rows[5][1], "-1.2116196")
self.assertEqual(record.table_rows[5][2], "1.22E-01")
self.assertEqual(record.table_rows[5][3], "2.31E-23")
self.assertEqual(record.table_rows[5][4], "3.62E+03")
self.assertEqual(record.table_rows[5][5], "2.22E+02")
self.assertEqual(len(record.table_rows[6]), 6)
self.assertEqual(record.table_rows[6][0], "6")
self.assertEqual(record.table_rows[6][1], "-0.0230528")
self.assertEqual(record.table_rows[6][2], "1.04E-01")
self.assertEqual(record.table_rows[6][3], "8.24E-01")
self.assertEqual(record.table_rows[6][4], "8.76E+02")
self.assertEqual(record.table_rows[6][5], "8.31E+02")
self.assertEqual(len(record.table_rows[7]), 6)
self.assertEqual(record.table_rows[7][0], "7")
self.assertEqual(record.table_rows[7][1], "-1.1380152")
self.assertEqual(record.table_rows[7][2], "1.13E-01")
self.assertEqual(record.table_rows[7][3], "9.23E-24")
self.assertEqual(record.table_rows[7][4], "3.94E+03")
self.assertEqual(record.table_rows[7][5], "2.86E+02")
self.assertEqual(len(record.table_rows[8]), 6)
self.assertEqual(record.table_rows[8][0], "8")
self.assertEqual(record.table_rows[8][1], "-1.834596")
self.assertEqual(record.table_rows[8][2], "5.40E-01")
self.assertEqual(record.table_rows[8][3], "6.74E-04")
self.assertEqual(record.table_rows[8][4], "6.44E+02")
self.assertEqual(record.table_rows[8][5], "9.43E+00")
self.assertEqual(len(record.table_rows[9]), 6)
self.assertEqual(record.table_rows[9][0], "9")
self.assertEqual(record.table_rows[9][1], "-0.9747637")
self.assertEqual(record.table_rows[9][2], "2.14E+00")
self.assertEqual(record.table_rows[9][3], "1.00E+00")
self.assertEqual(record.table_rows[9][4], "9.17E+01")
self.assertEqual(record.table_rows[9][5], "9.72E+00")
self.assertEqual(len(record.table_rows[10]), 6)
self.assertEqual(record.table_rows[10][0], "10")
self.assertEqual(record.table_rows[10][1], "0.3874005")
self.assertEqual(record.table_rows[10][2], "2.92E-01")
self.assertEqual(record.table_rows[10][3], "1.85E-01")
self.assertEqual(record.table_rows[10][4], "1.69E+02")
self.assertEqual(record.table_rows[10][5], "4.11E+02")
self.assertEqual(len(record.table_rows[11]), 6)
self.assertEqual(record.table_rows[11][0], "11")
self.assertEqual(record.table_rows[11][1], "0.5340442")
self.assertEqual(record.table_rows[11][2], "3.29E-01")
self.assertEqual(record.table_rows[11][3], "1.04E-01")
self.assertEqual(record.table_rows[11][4], "1.23E+02")
self.assertEqual(record.table_rows[11][5], "4.20E+02")
self.assertEqual(len(record.table_rows[12]), 6)
self.assertEqual(record.table_rows[12][0], "12")
self.assertEqual(record.table_rows[12][1], "0.3260696")
self.assertEqual(record.table_rows[12][2], "1.92E+00")
self.assertEqual(record.table_rows[12][3], "8.65E-01")
self.assertEqual(record.table_rows[12][4], "2.73E+01")
self.assertEqual(record.table_rows[12][5], "5.77E+01")
self.assertEqual(len(record.table_rows[13]), 6)
self.assertEqual(record.table_rows[13][0], "13")
self.assertEqual(record.table_rows[13][1], "0.3010618")
self.assertEqual(record.table_rows[13][2], "2.84E-01")
self.assertEqual(record.table_rows[13][3], "2.90E-01")
self.assertEqual(record.table_rows[13][4], "1.93E+02")
self.assertEqual(record.table_rows[13][5], "3.87E+02")
self.assertEqual(len(record.table_rows[14]), 6)
self.assertEqual(record.table_rows[14][0], "14")
self.assertEqual(record.table_rows[14][1], "-1.0760413")
self.assertEqual(record.table_rows[14][2], "1.08E-01")
self.assertEqual(record.table_rows[14][3], "1.63E-23")
self.assertEqual(record.table_rows[14][4], "4.06E+03")
self.assertEqual(record.table_rows[14][5], "3.41E+02")
self.assertEqual(len(record.table_rows[15]), 6)
self.assertEqual(record.table_rows[15][0], "15")
self.assertEqual(record.table_rows[15][1], "-0.1167371")
self.assertEqual(record.table_rows[15][2], "3.87E-01")
self.assertEqual(record.table_rows[15][3], "7.63E-01")
self.assertEqual(record.table_rows[15][4], "2.32E+02")
self.assertEqual(record.table_rows[15][5], "1.77E+02")
self.assertEqual(len(record.table_rows[16]), 6)
self.assertEqual(record.table_rows[16][0], "16")
self.assertEqual(record.table_rows[16][1], "-0.1936322")
self.assertEqual(record.table_rows[16][2], "9.44E-01")
self.assertEqual(record.table_rows[16][3], "8.38E-01")
self.assertEqual(record.table_rows[16][4], "1.02E+02")
self.assertEqual(record.table_rows[16][5], "6.56E+01")
self.assertEqual(len(record.table_rows[17]), 6)
self.assertEqual(record.table_rows[17][0], "17")
self.assertEqual(record.table_rows[17][1], "-0.3275898")
self.assertEqual(record.table_rows[17][2], "7.87E-01")
self.assertEqual(record.table_rows[17][3], "6.77E-01")
self.assertEqual(record.table_rows[17][4], "1.41E+02")
self.assertEqual(record.table_rows[17][5], "6.65E+01")
self.assertEqual(len(record.table_rows[18]), 6)
self.assertEqual(record.table_rows[18][0], "18")
self.assertEqual(record.table_rows[18][1], "-0.4805853")
self.assertEqual(record.table_rows[18][2], "1.14E-01")
self.assertEqual(record.table_rows[18][3], "2.41E-05")
self.assertEqual(record.table_rows[18][4], "1.34E+03")
self.assertEqual(record.table_rows[18][5], "4.42E+02")
self.assertEqual(len(record.table_rows[19]), 6)
self.assertEqual(record.table_rows[19][0], "19")
self.assertEqual(record.table_rows[19][1], "0.1109524")
self.assertEqual(record.table_rows[19][2], "9.56E-02")
self.assertEqual(record.table_rows[19][3], "2.46E-01")
self.assertEqual(record.table_rows[19][4], "8.38E+02")
self.assertEqual(record.table_rows[19][5], "1.08E+03")
self.assertEqual(len(record.table_rows[20]), 6)
self.assertEqual(record.table_rows[20][0], "20")
self.assertEqual(record.table_rows[20][1], "0.1677912")
self.assertEqual(record.table_rows[20][2], "6.51E-01")
self.assertEqual(record.table_rows[20][3], "7.97E-01")
self.assertEqual(record.table_rows[20][4], "9.84E+01")
self.assertEqual(record.table_rows[20][5], "1.45E+02")
record = next(records)
self.assertEqual(record.entity_type, "SAMPLE")
self.assertEqual(record.entity_id, "Triple-Fusion Transfected Embryonic Stem Cells Replicate 1")
self.assertEqual(len(record.entity_attributes), 25)
self.assertEqual(record.entity_attributes["Sample_extract_protocol_ch2"], "TriZol procedure")
self.assertEqual(record.entity_attributes["Sample_hyb_protocol"], "Oligoarray control targets and hybridization buffer (Agilent In Situ Hybridization Kit Plus) were added, and samples were applied to microarrays enclosed in Agilent SureHyb-enabled hybridization chambers. After hybridization, slides were washed sequentially with 6x SSC/0.005% Triton X-102 and 0.1x SSC/0.005% Triton X-102 before scanning. Slides were hybridized for 17 h at 60\xb0C in a rotating oven, and washed.")
self.assertEqual(record.entity_attributes["Sample_extract_protocol_ch1"], "TriZol procedure")
self.assertEqual(record.entity_attributes["Sample_platform_id"], "Murine 15K long oligo array version 2.0")
self.assertEqual(record.entity_attributes["Sample_title"], "Triple-Fusion Transfected Embryonic Stem Cells Replicate 1")
self.assertEqual(record.entity_attributes["Sample_supplementary_file"], "file3.gpr")
self.assertEqual(record.entity_attributes["Sample_organism_ch2"], "Mus musculus")
self.assertEqual(record.entity_attributes["Sample_organism_ch1"], "Mus musculus")
self.assertEqual(record.entity_attributes["Sample_label_ch1"], "Cy5")
self.assertEqual(record.entity_attributes["Sample_scan_protocol"], "Scanned on an Agilent G2565AA scanner.")
self.assertEqual(record.entity_attributes["sample_table_begin"], "")
self.assertEqual(record.entity_attributes["Sample_label_protocol_ch2"], "10 \xb5g of total RNA were primed with 2 \xb5l of 100 \xb5M T16N2 DNA primer at 70\xb0C for 10 min, then reversed transcribed at 42\xb0C for 1 h in the presence of 400 U SuperScript II RTase (Invitrogen), and 100 \xb5M each dATP, dTTP, dGTP, with 25 \xb5M dCTP, 25 \xb5M Cy5-labeled dCTP (NEN Life Science, Boston, MA), and RNase inhibitor (Invitrogen). RNA was then degraded with RNase A, and labeled cDNAs were purified using QIAquick PCR columns (Qiagen).")
self.assertEqual(record.entity_attributes["Sample_label_protocol_ch1"], "10 \xb5g of total RNA were primed with 2 \xb5l of 100 \xb5M T16N2 DNA primer at 70\xb0C for 10 min, then reversed transcribed at 42\xb0C for 1 h in the presence of 400 U SuperScript II RTase (Invitrogen), and 100 \xb5M each dATP, dTTP, dGTP with 25 \xb5M dCTP, 25 \xb5M Cy5-labeled dCTP (NEN Life Science, Boston, MA), and RNase inhibitor (Invitrogen). RNA was then degraded with RNase A, and labeled cDNAs were purified using QIAquick PCR columns (Qiagen).")
self.assertEqual(record.entity_attributes["Sample_data_processing"], "LOWESS normalized, background subtracted VALUE data obtained from log of processed Red signal/processed Green signal.")
self.assertEqual(record.entity_attributes["sample_table_end"], "")
self.assertEqual(record.entity_attributes["Sample_label_ch2"], "Cy3")
self.assertEqual(record.entity_attributes["Sample_description"], "Biological replicate 1 of 3. Stable triple-fusion-reporter-gene transfected embryonic stem cells, harvested after several passages.")
self.assertEqual(record.entity_attributes["Sample_source_name_ch1"], "Total RNA from murine ES-D3 triple-transfected embryonic stem cells labeled with Cyanine-5 (red).")
self.assertEqual(record.entity_attributes["Sample_source_name_ch2"], "Total RNA from pooled whole mouse embryos e17.5, labeled with Cyanine-3 (green).")
self.assertEqual(record.entity_attributes["Sample_molecule_ch1"], "total RNA")
self.assertEqual(record.entity_attributes["Sample_molecule_ch2"], "total RNA")
self.assertEqual(record.entity_attributes["Sample_growth_protocol_ch1"], "ES cells were kept in an undifferentiated, pluripotent state by using 1000 IU/ml leukemia inhibitory factor (LIF; Chemicon, ESGRO, ESG1107), and grown on top of murine embryonic fibroblasts feeder layer inactivated by 10 ug/ml of mitomycin C (Sigma, St. Louis). ES cells were cultured on 0.1% gelatin-coated plastic dishes in ES medium containing Dulbecco modified Eagle medium supplemented with 15% fetal calf serum, 0.1 mM beta-mercaptoethanol, 2 mM glutamine, and 0.1 mN non-essential amino acids.")
self.assertEqual(len(record.entity_attributes["Sample_characteristics_ch1"]), 5)
self.assertEqual(record.entity_attributes["Sample_characteristics_ch1"][0], "ES-D3 cell line (CRL-1934)")
self.assertEqual(record.entity_attributes["Sample_characteristics_ch1"][1], "Transfected with pUb-fluc-mrfp-ttk triple fusion reporter gene.")
self.assertEqual(record.entity_attributes["Sample_characteristics_ch1"][2], "Age: day 4")
self.assertEqual(record.entity_attributes["Sample_characteristics_ch1"][3], "Tissue: blastocytes")
self.assertEqual(record.entity_attributes["Sample_characteristics_ch1"][4], "Strain: 129/Sv mice")
self.assertEqual(len(record.entity_attributes["Sample_characteristics_ch2"]), 3)
self.assertEqual(record.entity_attributes["Sample_characteristics_ch2"][0], "Strain: C57BL/6")
self.assertEqual(record.entity_attributes["Sample_characteristics_ch2"][1], "Age: e17.5 d")
self.assertEqual(record.entity_attributes["Sample_characteristics_ch2"][2], "Tissue: whole embryo")
self.assertEqual(record.entity_attributes["Sample_treatment_protocol_ch1"], "PCR amplification and standard cloning techniques were used to insert fluc and mrfp genes from plasmids pCDNA 3.1-CMV-fluc (Promega, Madison, WI) and pCDNA3.1-CMV-mrfp in frame with the ttk gene into the pCDNA3.1-truncated sr39tk. This triple fusion (TF) reporter gene fragment (3.3 kbp) was released from the plasmid with Not1 and BamH1 restriction enzymes before blunt-end ligation into the multiple cloning site of lentiviral transfer vector, FUG, driven by the human ubiquitin-C promoter. Self-inactivating (SIN) lentivirus was prepared by transient transfection of 293T cells. Briefly, pFUG-TF containing the triple fusion reporter gene was co-transfected into 293T cells with HIV-1 packaging vector (?8.9) and vesicular stomatitis virus G glycoprotein-pseudotyped envelop vector (pVSVG). Lentivirus supernatant was concentrated by sediment centrifugation using a SW29 rotor at 50,000 x g for two hours. Concentrated virus was titered on 293T cells. Murine ES cells were transfected with LV-pUb-fluc-mrfp-ttk at a multiplicity of infection (MOI) of 10.")
self.assertEqual(len(record.col_defs), 6)
self.assertEqual(record.col_defs["ID_REF"], "")
self.assertEqual(record.col_defs["VALUE"], "log(REDsignal/GREENsignal) per feature (processed signals used).")
self.assertEqual(record.col_defs["gProcessedSignal"], 'Dye-normalized signal after surrogate "algorithm," green "channel," used for computation of log ratio.')
self.assertEqual(record.col_defs["LogRatioError"], "error of the log ratio calculated according to the error model chosen.")
self.assertEqual(record.col_defs["PValueLogRatio"], "Significance level of the Log Ratio computed for a feature.")
self.assertEqual(record.col_defs["rProcessedSignal"], 'Dye-normalized signal after surrogate "algorithm," red "channel," used for computation of log ratio.')
self.assertEqual(len(record.table_rows), 21)
self.assertEqual(len(record.table_rows[0]), 6)
self.assertEqual(record.table_rows[0][0], "ID_REF")
self.assertEqual(record.table_rows[0][1], "VALUE")
self.assertEqual(record.table_rows[0][2], "LogRatioError")
self.assertEqual(record.table_rows[0][3], "PValueLogRatio")
self.assertEqual(record.table_rows[0][4], "gProcessedSignal")
self.assertEqual(record.table_rows[0][5], "rProcessedSignal")
self.assertEqual(len(record.table_rows[1]), 6)
self.assertEqual(record.table_rows[1][0], "1")
self.assertEqual(record.table_rows[1][1], "-0.7837546")
self.assertEqual(record.table_rows[1][2], "1.30E-01")
self.assertEqual(record.table_rows[1][3], "1.70E-09")
self.assertEqual(record.table_rows[1][4], "2.10E+03")
self.assertEqual(record.table_rows[1][5], "3.46E+02")
self.assertEqual(len(record.table_rows[2]), 6)
self.assertEqual(record.table_rows[2][0], "2")
self.assertEqual(record.table_rows[2][1], "0.3797837")
self.assertEqual(record.table_rows[2][2], "1.15E+00")
self.assertEqual(record.table_rows[2][3], "7.41E-01")
self.assertEqual(record.table_rows[2][4], "5.59E+01")
self.assertEqual(record.table_rows[2][5], "1.34E+02")
self.assertEqual(len(record.table_rows[3]), 6)
self.assertEqual(record.table_rows[3][0], "3")
self.assertEqual(record.table_rows[3][1], "0.2079269")
self.assertEqual(record.table_rows[3][2], "5.38E-02")
self.assertEqual(record.table_rows[3][3], "1.12E-04")
self.assertEqual(record.table_rows[3][4], "5.04E+03")
self.assertEqual(record.table_rows[3][5], "8.14E+03")
self.assertEqual(len(record.table_rows[4]), 6)
self.assertEqual(record.table_rows[4][0], "4")
self.assertEqual(record.table_rows[4][1], "-0.4730291")
self.assertEqual(record.table_rows[4][2], "6.71E-02")
self.assertEqual(record.table_rows[4][3], "1.86E-12")
self.assertEqual(record.table_rows[4][4], "5.66E+03")
self.assertEqual(record.table_rows[4][5], "1.91E+03")
self.assertEqual(len(record.table_rows[5]), 6)
self.assertEqual(record.table_rows[5][0], "5")
self.assertEqual(record.table_rows[5][1], "-0.9481128")
self.assertEqual(record.table_rows[5][2], "1.19E-01")
self.assertEqual(record.table_rows[5][3], "1.30E-15")
self.assertEqual(record.table_rows[5][4], "3.10E+03")
self.assertEqual(record.table_rows[5][5], "3.49E+02")
self.assertEqual(len(record.table_rows[6]), 6)
self.assertEqual(record.table_rows[6][0], "6")
self.assertEqual(record.table_rows[6][1], "-0.0159867")
self.assertEqual(record.table_rows[6][2], "1.33E-01")
self.assertEqual(record.table_rows[6][3], "9.05E-01")
self.assertEqual(record.table_rows[6][4], "8.45E+02")
self.assertEqual(record.table_rows[6][5], "8.14E+02")
self.assertEqual(len(record.table_rows[7]), 6)
self.assertEqual(record.table_rows[7][0], "7")
self.assertEqual(record.table_rows[7][1], "-0.819922")
self.assertEqual(record.table_rows[7][2], "1.14E-01")
self.assertEqual(record.table_rows[7][3], "7.01E-13")
self.assertEqual(record.table_rows[7][4], "2.75E+03")
self.assertEqual(record.table_rows[7][5], "4.16E+02")
self.assertEqual(len(record.table_rows[8]), 6)
self.assertEqual(record.table_rows[8][0], "8")
self.assertEqual(record.table_rows[8][1], "-0.1559774")
self.assertEqual(record.table_rows[8][2], "9.16E-01")
self.assertEqual(record.table_rows[8][3], "8.65E-01")
self.assertEqual(record.table_rows[8][4], "1.34E+02")
self.assertEqual(record.table_rows[8][5], "9.34E+01")
self.assertEqual(len(record.table_rows[9]), 6)
self.assertEqual(record.table_rows[9][0], "9")
self.assertEqual(record.table_rows[9][1], "0.145267")
self.assertEqual(record.table_rows[9][2], "3.90E+00")
self.assertEqual(record.table_rows[9][3], "1.00E+00")
self.assertEqual(record.table_rows[9][4], "2.22E+01")
self.assertEqual(record.table_rows[9][5], "3.10E+01")
self.assertEqual(len(record.table_rows[10]), 6)
self.assertEqual(record.table_rows[10][0], "10")
self.assertEqual(record.table_rows[10][1], "0.3611211")
self.assertEqual(record.table_rows[10][2], "3.40E-01")
self.assertEqual(record.table_rows[10][3], "2.88E-01")
self.assertEqual(record.table_rows[10][4], "1.97E+02")
self.assertEqual(record.table_rows[10][5], "4.52E+02")
self.assertEqual(len(record.table_rows[11]), 6)
self.assertEqual(record.table_rows[11][0], "11")
self.assertEqual(record.table_rows[11][1], "0.5092089")
self.assertEqual(record.table_rows[11][2], "4.39E-01")
self.assertEqual(record.table_rows[11][3], "2.46E-01")
self.assertEqual(record.table_rows[11][4], "1.24E+02")
self.assertEqual(record.table_rows[11][5], "4.01E+02")
self.assertEqual(len(record.table_rows[12]), 6)
self.assertEqual(record.table_rows[12][0], "12")
self.assertEqual(record.table_rows[12][1], "0.3715387")
self.assertEqual(record.table_rows[12][2], "1.69E+00")
self.assertEqual(record.table_rows[12][3], "8.26E-01")
self.assertEqual(record.table_rows[12][4], "3.84E+01")
self.assertEqual(record.table_rows[12][5], "9.04E+01")
self.assertEqual(len(record.table_rows[13]), 6)
self.assertEqual(record.table_rows[13][0], "13")
self.assertEqual(record.table_rows[13][1], "0.1734934")
self.assertEqual(record.table_rows[13][2], "3.57E-01")
self.assertEqual(record.table_rows[13][3], "6.27E-01")
self.assertEqual(record.table_rows[13][4], "2.37E+02")
self.assertEqual(record.table_rows[13][5], "3.53E+02")
self.assertEqual(len(record.table_rows[14]), 6)
self.assertEqual(record.table_rows[14][0], "14")
self.assertEqual(record.table_rows[14][1], "-0.9340707")
self.assertEqual(record.table_rows[14][2], "1.20E-01")
self.assertEqual(record.table_rows[14][3], "6.90E-15")
self.assertEqual(record.table_rows[14][4], "2.96E+03")
self.assertEqual(record.table_rows[14][5], "3.45E+02")
self.assertEqual(len(record.table_rows[15]), 6)
self.assertEqual(record.table_rows[15][0], "15")
self.assertEqual(record.table_rows[15][1], "-0.2956317")
self.assertEqual(record.table_rows[15][2], "5.78E-01")
self.assertEqual(record.table_rows[15][3], "6.09E-01")
self.assertEqual(record.table_rows[15][4], "2.46E+02")
self.assertEqual(record.table_rows[15][5], "1.25E+02")
self.assertEqual(len(record.table_rows[16]), 6)
self.assertEqual(record.table_rows[16][0], "16")
self.assertEqual(record.table_rows[16][1], "-0.2321102")
self.assertEqual(record.table_rows[16][2], "1.22E+00")
self.assertEqual(record.table_rows[16][3], "8.49E-01")
self.assertEqual(record.table_rows[16][4], "1.09E+02")
self.assertEqual(record.table_rows[16][5], "6.37E+01")
self.assertEqual(len(record.table_rows[17]), 6)
self.assertEqual(record.table_rows[17][0], "17")
self.assertEqual(record.table_rows[17][1], "-0.1603561")
self.assertEqual(record.table_rows[17][2], "1.16E+00")
self.assertEqual(record.table_rows[17][3], "8.90E-01")
self.assertEqual(record.table_rows[17][4], "1.06E+02")
self.assertEqual(record.table_rows[17][5], "7.34E+01")
self.assertEqual(len(record.table_rows[18]), 6)
self.assertEqual(record.table_rows[18][0], "18")
self.assertEqual(record.table_rows[18][1], "-0.5063897")
self.assertEqual(record.table_rows[18][2], "1.63E-01")
self.assertEqual(record.table_rows[18][3], "1.95E-03")
self.assertEqual(record.table_rows[18][4], "1.15E+03")
self.assertEqual(record.table_rows[18][5], "3.58E+02")
self.assertEqual(len(record.table_rows[19]), 6)
self.assertEqual(record.table_rows[19][0], "19")
self.assertEqual(record.table_rows[19][1], "0.1990761")
self.assertEqual(record.table_rows[19][2], "1.32E-01")
self.assertEqual(record.table_rows[19][3], "1.32E-01")
self.assertEqual(record.table_rows[19][4], "6.65E+02")
self.assertEqual(record.table_rows[19][5], "1.05E+03")
self.assertEqual(len(record.table_rows[20]), 6)
self.assertEqual(record.table_rows[20][0], "20")
self.assertEqual(record.table_rows[20][1], "0.2985912")
self.assertEqual(record.table_rows[20][2], "8.89E-01")
self.assertEqual(record.table_rows[20][3], "7.37E-01")
self.assertEqual(record.table_rows[20][4], "8.06E+01")
self.assertEqual(record.table_rows[20][5], "1.60E+02")
record = next(records)
self.assertEqual(record.entity_type, "SERIES")
self.assertEqual(record.entity_id, "Murine ES Cells")
self.assertEqual(len(record.entity_attributes), 7)
self.assertEqual(len(record.entity_attributes["Series_sample_id"]), 3)
self.assertEqual(record.entity_attributes["Series_sample_id"][0], "Control Embyronic Stem Cell Replicate 1")
self.assertEqual(record.entity_attributes["Series_sample_id"][1], "Control Embyronic Stem Cell Replicate 2")
self.assertEqual(record.entity_attributes["Series_sample_id"][2], "Triple-Fusion Transfected Embryonic Stem Cells Replicate 1")
self.assertEqual(record.entity_attributes["Series_pubmed_id"], "16390873")
self.assertEqual(len(record.entity_attributes["Series_contributor"]), 9)
self.assertEqual(record.entity_attributes["Series_contributor"][0], "Joseph,C,Wu")
self.assertEqual(record.entity_attributes["Series_contributor"][1], "Joshua,M,Spin")
self.assertEqual(record.entity_attributes["Series_contributor"][2], "Feng,,Cao")
self.assertEqual(record.entity_attributes["Series_contributor"][3], "Shaun,,Lin")
self.assertEqual(record.entity_attributes["Series_contributor"][4], "Olivier,,Gheysens")
self.assertEqual(record.entity_attributes["Series_contributor"][5], "Ian,Y,Chen")
self.assertEqual(record.entity_attributes["Series_contributor"][6], "Anya,,Tsalenko")
self.assertEqual(record.entity_attributes["Series_contributor"][7], "Sanjiv,S,Ghambhir")
self.assertEqual(record.entity_attributes["Series_contributor"][8], "Thomas,,Quertermous")
self.assertEqual(record.entity_attributes["Series_summary"], "Transcriptional profiling of mouse embryonic stem cells comparing control untreated ES cells with ES cells transfected with a pUb-fluc-mrfp-ttk triple fusion reporter gene. The latter makes ES visualization possible by FACS and single ce")
self.assertEqual(record.entity_attributes["Series_type"], "Genetic modification")
self.assertEqual(record.entity_attributes["Series_title"], "Murine ES Cells: Control vs. Triple-Fusion Transfected")
self.assertEqual(record.entity_attributes["Series_overall_design"], "Two-condition experiment, ES vs. TF-ES cells. Biological replicates: 4 control, 3 transfected, independently grown and harvested. One replicate per array.")
self.assertEqual(len(record.col_defs), 0)
self.assertEqual(len(record.table_rows), 0)
def test_GSM804(self):
path = "Geo/GSM804.txt"
with open(path) as handle:
records = Geo.parse(handle)
record = next(records)
self.assertEqual(record.entity_type, "SAMPLE")
self.assertEqual(record.entity_id, "GSM804")
self.assertEqual(len(record.entity_attributes), 18)
self.assertEqual(record.entity_attributes["Sample_pubmed_id"], "11687795")
self.assertEqual(record.entity_attributes["Sample_submitter_institute"], "University of California San Francisco")
self.assertEqual(len(record.entity_attributes["Sample_author"]), 19)
self.assertEqual(record.entity_attributes["Sample_author"][0], "Antoine,M,Snijders")
self.assertEqual(record.entity_attributes["Sample_author"][1], "Norma,,Nowak")
self.assertEqual(record.entity_attributes["Sample_author"][2], "Richard,,Segraves")
self.assertEqual(record.entity_attributes["Sample_author"][3], "Stephanie,,Blackwood")
self.assertEqual(record.entity_attributes["Sample_author"][4], "Nils,,Brown")
self.assertEqual(record.entity_attributes["Sample_author"][5], "Jeffery,,Conroy")
self.assertEqual(record.entity_attributes["Sample_author"][6], "Greg,,Hamilton")
self.assertEqual(record.entity_attributes["Sample_author"][7], "Anna,K,Hindle")
self.assertEqual(record.entity_attributes["Sample_author"][8], "Bing,,Huey")
self.assertEqual(record.entity_attributes["Sample_author"][9], "Karen,,Kimura")
self.assertEqual(record.entity_attributes["Sample_author"][10], "Sindy,,Law")
self.assertEqual(record.entity_attributes["Sample_author"][11], "Ken,,Myambo")
self.assertEqual(record.entity_attributes["Sample_author"][12], "Joel,,Palmer")
self.assertEqual(record.entity_attributes["Sample_author"][13], "Bauke,,Ylstra")
self.assertEqual(record.entity_attributes["Sample_author"][14], "Jingzhu,P,Yue")
self.assertEqual(record.entity_attributes["Sample_author"][15], "Joe,W,Gray")
self.assertEqual(record.entity_attributes["Sample_author"][16], "Ajay,N,Jain")
self.assertEqual(record.entity_attributes["Sample_author"][17], "Daniel,,Pinkel")
self.assertEqual(record.entity_attributes["Sample_author"][18], "Donna,G,Albertson")
self.assertEqual(record.entity_attributes["Sample_submitter_phone"], "415 502-8463")
self.assertEqual(record.entity_attributes["Sample_submitter_department"], "Comprehensive Cancer Center")
self.assertEqual(len(record.entity_attributes["Sample_description"]), 4)
self.assertEqual(record.entity_attributes["Sample_description"][0], 'Coriell Cell Repositories cell line <a href="http://locus.umdnj.edu/nigms/nigms_cgi/display.cgi?GM05296">GM05296</a>.')
self.assertEqual(record.entity_attributes["Sample_description"][1], "Fibroblast cell line derived from a 1 month old female with multiple congenital malformations, dysmorphic features, intrauterine growth retardation, heart murmur, cleft palate, equinovarus deformity, microcephaly, coloboma of right iris, clinodactyly, reduced RBC catalase activity, and 1 copy of catalase gene.")
self.assertEqual(record.entity_attributes["Sample_description"][2], "Chromosome abnormalities are present.")
self.assertEqual(record.entity_attributes["Sample_description"][3], "Karyotype is 46,XX,-11,+der(11)inv ins(11;10)(11pter> 11p13::10q21>10q24::11p13>11qter)mat")
self.assertEqual(record.entity_attributes["Sample_target_source2"], "normal male reference genomic DNA")
self.assertEqual(record.entity_attributes["Sample_target_source1"], "Cell line GM05296")
self.assertEqual(record.entity_attributes["Sample_submitter_name"], "Donna,G,Albertson")
self.assertEqual(record.entity_attributes["Sample_platform_id"], "GPL28")
self.assertEqual(record.entity_attributes["Sample_type"], "dual channel genomic")
self.assertEqual(record.entity_attributes["Sample_status"], "Public on Feb 12 2002")
self.assertEqual(record.entity_attributes["Sample_submitter_email"], "[email protected]")
self.assertEqual(record.entity_attributes["Sample_title"], "CGH_Albertson_GM05296-001218")
self.assertEqual(record.entity_attributes["Sample_organism"], "Homo sapiens")
self.assertEqual(record.entity_attributes["Sample_series_id"], "GSE16")
self.assertEqual(record.entity_attributes["Sample_submission_date"], "Jan 17 2002")
self.assertEqual(record.entity_attributes["Sample_submitter_city"], "San Francisco,CA,94143,USA")
self.assertEqual(len(record.col_defs), 5)
self.assertEqual(record.col_defs["NO_REPLICATES"], "Number of replicate spot measurements")
self.assertEqual(record.col_defs["LOG2STDDEV"], "Standard deviation of VALUE")
self.assertEqual(record.col_defs["ID_REF"], "Unique row identifier, genome position order")
self.assertEqual(record.col_defs["VALUE"], "aka LOG2RATIO, mean of log base 2 of LINEAR_RATIO")
self.assertEqual(record.col_defs["LINEAR_RATIO"], "Mean of replicate Cy3/Cy5 ratios")
self.assertEqual(len(record.table_rows), 21)
self.assertEqual(len(record.table_rows[0]), 5)
self.assertEqual(record.table_rows[0][0], "ID_REF")
self.assertEqual(record.table_rows[0][1], "VALUE")
self.assertEqual(record.table_rows[0][2], "LINEAR_RATIO")
self.assertEqual(record.table_rows[0][3], "LOG2STDDEV")
self.assertEqual(record.table_rows[0][4], "NO_REPLICATES")
self.assertEqual(len(record.table_rows[1]), 5)
self.assertEqual(record.table_rows[1][0], "1")
self.assertEqual(record.table_rows[1][1], "")
self.assertEqual(record.table_rows[1][2], "1.047765")
self.assertEqual(record.table_rows[1][3], "0.011853")
self.assertEqual(record.table_rows[1][4], "3")
self.assertEqual(len(record.table_rows[2]), 5)
self.assertEqual(record.table_rows[2][0], "2")
self.assertEqual(record.table_rows[2][1], "")
self.assertEqual(record.table_rows[2][2], "")
self.assertEqual(record.table_rows[2][3], "")
self.assertEqual(record.table_rows[2][4], "0")
self.assertEqual(len(record.table_rows[3]), 5)
self.assertEqual(record.table_rows[3][0], "3")
self.assertEqual(record.table_rows[3][1], "0.008824")
self.assertEqual(record.table_rows[3][2], "1.006135")
self.assertEqual(record.table_rows[3][3], "0.00143")
self.assertEqual(record.table_rows[3][4], "3")
self.assertEqual(len(record.table_rows[4]), 5)
self.assertEqual(record.table_rows[4][0], "4")
self.assertEqual(record.table_rows[4][1], "-0.000894")
self.assertEqual(record.table_rows[4][2], "0.99938")
self.assertEqual(record.table_rows[4][3], "0.001454")
self.assertEqual(record.table_rows[4][4], "3")
self.assertEqual(len(record.table_rows[5]), 5)
self.assertEqual(record.table_rows[5][0], "5")
self.assertEqual(record.table_rows[5][1], "0.075875")
self.assertEqual(record.table_rows[5][2], "1.054")
self.assertEqual(record.table_rows[5][3], "0.003077")
self.assertEqual(record.table_rows[5][4], "3")
self.assertEqual(len(record.table_rows[6]), 5)
self.assertEqual(record.table_rows[6][0], "6")
self.assertEqual(record.table_rows[6][1], "0.017303")
self.assertEqual(record.table_rows[6][2], "1.012066")
self.assertEqual(record.table_rows[6][3], "0.005876")
self.assertEqual(record.table_rows[6][4], "2")
self.assertEqual(len(record.table_rows[7]), 5)
self.assertEqual(record.table_rows[7][0], "7")
self.assertEqual(record.table_rows[7][1], "-0.006766")
self.assertEqual(record.table_rows[7][2], "0.995321")
self.assertEqual(record.table_rows[7][3], "0.013881")
self.assertEqual(record.table_rows[7][4], "3")
self.assertEqual(len(record.table_rows[8]), 5)
self.assertEqual(record.table_rows[8][0], "8")
self.assertEqual(record.table_rows[8][1], "0.020755")
self.assertEqual(record.table_rows[8][2], "1.014491")
self.assertEqual(record.table_rows[8][3], "0.005506")
self.assertEqual(record.table_rows[8][4], "3")
self.assertEqual(len(record.table_rows[9]), 5)
self.assertEqual(record.table_rows[9][0], "9")
self.assertEqual(record.table_rows[9][1], "-0.094938")
self.assertEqual(record.table_rows[9][2], "0.936313")
self.assertEqual(record.table_rows[9][3], "0.012662")
self.assertEqual(record.table_rows[9][4], "3")
self.assertEqual(len(record.table_rows[10]), 5)
self.assertEqual(record.table_rows[10][0], "10")
self.assertEqual(record.table_rows[10][1], "-0.054527")
self.assertEqual(record.table_rows[10][2], "0.96291")
self.assertEqual(record.table_rows[10][3], "0.01073")
self.assertEqual(record.table_rows[10][4], "3")
self.assertEqual(len(record.table_rows[11]), 5)
self.assertEqual(record.table_rows[11][0], "11")
self.assertEqual(record.table_rows[11][1], "-0.025057")
self.assertEqual(record.table_rows[11][2], "0.982782")
self.assertEqual(record.table_rows[11][3], "0.003855")
self.assertEqual(record.table_rows[11][4], "3")
self.assertEqual(len(record.table_rows[12]), 5)
self.assertEqual(record.table_rows[12][0], "12")
self.assertEqual(record.table_rows[12][1], "")
self.assertEqual(record.table_rows[12][2], "")
self.assertEqual(record.table_rows[12][3], "")
self.assertEqual(record.table_rows[12][4], "0")
self.assertEqual(len(record.table_rows[13]), 5)
self.assertEqual(record.table_rows[13][0], "13")
self.assertEqual(record.table_rows[13][1], "0.108454")
self.assertEqual(record.table_rows[13][2], "1.078072")
self.assertEqual(record.table_rows[13][3], "0.005196")
self.assertEqual(record.table_rows[13][4], "3")
self.assertEqual(len(record.table_rows[14]), 5)
self.assertEqual(record.table_rows[14][0], "14")
self.assertEqual(record.table_rows[14][1], "0.078633")
self.assertEqual(record.table_rows[14][2], "1.056017")
self.assertEqual(record.table_rows[14][3], "0.009165")
self.assertEqual(record.table_rows[14][4], "3")
self.assertEqual(len(record.table_rows[15]), 5)
self.assertEqual(record.table_rows[15][0], "15")
self.assertEqual(record.table_rows[15][1], "0.098571")
self.assertEqual(record.table_rows[15][2], "1.070712")
self.assertEqual(record.table_rows[15][3], "0.007834")
self.assertEqual(record.table_rows[15][4], "3")
self.assertEqual(len(record.table_rows[16]), 5)
self.assertEqual(record.table_rows[16][0], "16")
self.assertEqual(record.table_rows[16][1], "0.044048")
self.assertEqual(record.table_rows[16][2], "1.031003")
self.assertEqual(record.table_rows[16][3], "0.013651")
self.assertEqual(record.table_rows[16][4], "3")
self.assertEqual(len(record.table_rows[17]), 5)
self.assertEqual(record.table_rows[17][0], "17")
self.assertEqual(record.table_rows[17][1], "0.018039")
self.assertEqual(record.table_rows[17][2], "1.012582")
self.assertEqual(record.table_rows[17][3], "0.005471")
self.assertEqual(record.table_rows[17][4], "3")
self.assertEqual(len(record.table_rows[18]), 5)
self.assertEqual(record.table_rows[18][0], "18")
self.assertEqual(record.table_rows[18][1], "-0.088807")
self.assertEqual(record.table_rows[18][2], "0.9403")
self.assertEqual(record.table_rows[18][3], "0.010571")
self.assertEqual(record.table_rows[18][4], "3")
self.assertEqual(len(record.table_rows[19]), 5)
self.assertEqual(record.table_rows[19][0], "19")
self.assertEqual(record.table_rows[19][1], "0.016349")
self.assertEqual(record.table_rows[19][2], "1.011397")
self.assertEqual(record.table_rows[19][3], "0.007113")
self.assertEqual(record.table_rows[19][4], "3")
self.assertEqual(len(record.table_rows[20]), 5)
self.assertEqual(record.table_rows[20][0], "20")
self.assertEqual(record.table_rows[20][1], "0.030977")
self.assertEqual(record.table_rows[20][2], "1.021704")
self.assertEqual(record.table_rows[20][3], "0.016798")
self.assertEqual(record.table_rows[20][4], "3")
def test_soft_ex_affy_chp(self):
path = "Geo/soft_ex_affy_chp.txt"
with open(path) as handle:
records = Geo.parse(handle)
record = next(records)
self.assertEqual(record.entity_type, "SAMPLE")
self.assertEqual(record.entity_id, "Drosophila_T0-1")
self.assertEqual(len(record.entity_attributes), 16)
self.assertEqual(record.entity_attributes["Sample_organism_ch1"], "Drosophila melanogaster")
self.assertEqual(record.entity_attributes["Sample_label_ch1"], "biotin")
self.assertEqual(record.entity_attributes["Sample_description"], "Gene expression data from embryos younger than nuclear cycle 9, i.e. before zygotic genome activation.")
self.assertEqual(record.entity_attributes["Sample_growth_protocol_ch1"], "30 min egg collections of OreR and yw flies at 25C were aged at room temperature (RT) according to the different temporal classes T0-T4.")
self.assertEqual(len(record.entity_attributes["Sample_characteristics_ch1"]), 2)
self.assertEqual(record.entity_attributes["Sample_characteristics_ch1"][0], "Genotype: yellow white and Oregon R parents")
self.assertEqual(record.entity_attributes["Sample_characteristics_ch1"][1], "Age: embryos younger than nuclear cycle 9, i.e. before pole cells budding")
self.assertEqual(record.entity_attributes["Sample_scan_protocol"], "GeneChips were scanned using the Hewlett-Packard GeneArray Scanner G2500A.")
self.assertEqual(record.entity_attributes["Sample_hyb_protocol"], "Following fragmentation, 10 microg of cRNA were hybridized for 16 hr at 45C on GeneChip Drosophila Genome Array. GeneChips were washed and stained in the Affymetrix Fluidics Station 400.")
self.assertEqual(record.entity_attributes["Sample_extract_protocol_ch1"], "Trizol extraction of total RNA was performed according to the manufacturer's instructions.")
self.assertEqual(record.entity_attributes["Sample_source_name_ch1"], "Drosophila embryos before nuclear cycle 9 (maternal transcripts)")
self.assertEqual(record.entity_attributes["Sample_table"], "Drosophila_T0-1.CHP")
self.assertEqual(record.entity_attributes["Sample_molecule_ch1"], "total RNA")
self.assertEqual(record.entity_attributes["Sample_label_protocol_ch1"], "Biotinylated cRNA were prepared according to the standard Affymetrix protocol from 6 microg total RNA (Expression Analysis Technical Manual, 2001, Affymetrix).")
self.assertEqual(record.entity_attributes["Sample_data_processing"], "The data were analyzed with Microarray Suite version 5.0 (MAS 5.0) using Affymetrix default analysis settings and global scaling as normalization method. The trimmed mean target intensity of each array was arbitrarily set to 100.")
self.assertEqual(record.entity_attributes["Sample_treatment_protocol_ch1"], "Embryos were dechorionated with 50% bleach, put on a cover slip and covered with Halocarbon oil 27 (Sigma). Embryos of the appropriate stage were manually selected under the dissecting scope. Selected embryos were transferred to a basket, rinsed with PBS with 0,7% NaCl, 0,04% triton-X100 and placed on ice in the Trizol solution (GibcoBRL).")
self.assertEqual(record.entity_attributes["Sample_title"], "embryo at T0, biological rep1")
self.assertEqual(record.entity_attributes["Sample_supplementary_file"], "Drosophila_T0-1.CEL")
self.assertEqual(len(record.col_defs), 0)
self.assertEqual(len(record.table_rows), 0)
record = next(records)
self.assertEqual(record.entity_type, "SAMPLE")
self.assertEqual(record.entity_id, "Drosophila_T0-2")
self.assertEqual(len(record.entity_attributes), 16)
self.assertEqual(record.entity_attributes["Sample_organism_ch1"], "Drosophila melanogaster")
self.assertEqual(record.entity_attributes["Sample_label_ch1"], "biotin")
self.assertEqual(record.entity_attributes["Sample_description"], "Gene expression data from embryos younger than nuclear cycle 9, i.e. before zygotic genome activation.")
self.assertEqual(record.entity_attributes["Sample_growth_protocol_ch1"], "30 min egg collections of OreR and yw flies at 25C were aged at room temperature (RT) according to the different temporal classes T0-T4.")
self.assertEqual(len(record.entity_attributes["Sample_characteristics_ch1"]), 2)
self.assertEqual(record.entity_attributes["Sample_characteristics_ch1"][0], "Genotype: yellow white and Oregon R parents")
self.assertEqual(record.entity_attributes["Sample_characteristics_ch1"][1], "Age: embryos younger than nuclear cycle 9, i.e. before pole cells budding")
self.assertEqual(record.entity_attributes["Sample_scan_protocol"], "GeneChips were scanned using the Hewlett-Packard GeneArray Scanner G2500A.")
self.assertEqual(record.entity_attributes["Sample_hyb_protocol"], "Following fragmentation, 10 microg of cRNA were hybridized for 16 hr at 45C on GeneChip Drosophila Genome Array. GeneChips were washed and stained in the Affymetrix Fluidics Station 400.")
self.assertEqual(record.entity_attributes["Sample_extract_protocol_ch1"], "Trizol extraction of total RNA was performed according to the manufacturer's instructions.")
self.assertEqual(record.entity_attributes["Sample_source_name_ch1"], "Drosophila embryos before nuclear cycle 9 (maternal transcripts)")
self.assertEqual(record.entity_attributes["Sample_table"], "Drosophila_T0-2.CHP")
self.assertEqual(record.entity_attributes["Sample_molecule_ch1"], "total RNA")
self.assertEqual(record.entity_attributes["Sample_label_protocol_ch1"], "Biotinylated cRNA were prepared according to the standard Affymetrix protocol from 6 microg total RNA (Expression Analysis Technical Manual, 2001, Affymetrix).")
self.assertEqual(record.entity_attributes["Sample_data_processing"], "The data were analyzed with Microarray Suite version 5.0 (MAS 5.0) using Affymetrix default analysis settings and global scaling as normalization method. The trimmed mean target intensity of each array was arbitrarily set to 100.")
self.assertEqual(record.entity_attributes["Sample_treatment_protocol_ch1"], "Embryos were dechorionated with 50% bleach, put on a cover slip and covered with Halocarbon oil 27 (Sigma). Embryos of the appropriate stage were manually selected under the dissecting scope. Selected embryos were transferred to a basket, rinsed with PBS with 0,7% NaCl, 0,04% triton-X100 and placed on ice in the Trizol solution (GibcoBRL).")
self.assertEqual(record.entity_attributes["Sample_title"], "embryo at T0, biological rep2")
self.assertEqual(record.entity_attributes["Sample_supplementary_file"], "Drosophila_T0-2.CEL")
self.assertEqual(len(record.col_defs), 0)
self.assertEqual(len(record.table_rows), 0)
record = next(records)
self.assertEqual(record.entity_type, "SAMPLE")
self.assertEqual(record.entity_id, "Drosophila_T1-1")
self.assertEqual(len(record.entity_attributes), 16)
self.assertEqual(record.entity_attributes["Sample_organism_ch1"], "Drosophila melanogaster")
self.assertEqual(record.entity_attributes["Sample_label_ch1"], "biotin")
self.assertEqual(record.entity_attributes["Sample_description"], "Gene expression data from embryos in slow phase of cellularisation.")
self.assertEqual(record.entity_attributes["Sample_growth_protocol_ch1"], "30 min egg collections of OreR and yw flies at 25C were aged at room temperature (RT) according to the different temporal classes T0-T4.")
self.assertEqual(len(record.entity_attributes["Sample_characteristics_ch1"]), 2)
self.assertEqual(record.entity_attributes["Sample_characteristics_ch1"][0], "Genotype: yellow white and Oregon R parents")
self.assertEqual(record.entity_attributes["Sample_characteristics_ch1"][1], "Age: embryos in slow phase of cellularisation")
self.assertEqual(record.entity_attributes["Sample_scan_protocol"], "GeneChips were scanned using the Hewlett-Packard GeneArray Scanner G2500A.")
self.assertEqual(record.entity_attributes["Sample_hyb_protocol"], "Following fragmentation, 10 microg of cRNA were hybridized for 16 hr at 45C on GeneChip Drosophila Genome Array. GeneChips were washed and stained in the Affymetrix Fluidics Station 400.")
self.assertEqual(record.entity_attributes["Sample_extract_protocol_ch1"], "Trizol extraction of total RNA was performed according to the manufacturer's instructions.")
self.assertEqual(record.entity_attributes["Sample_source_name_ch1"], "Drosophila embryos in slow phase of cellularisation")
self.assertEqual(record.entity_attributes["Sample_table"], "Drosophila_T1-1.CHP")
self.assertEqual(record.entity_attributes["Sample_molecule_ch1"], "total RNA")
self.assertEqual(record.entity_attributes["Sample_label_protocol_ch1"], "Biotinylated cRNA were prepared according to the standard Affymetrix protocol from 6 microg total RNA (Expression Analysis Technical Manual, 2001, Affymetrix).")
self.assertEqual(record.entity_attributes["Sample_data_processing"], "The data were analyzed with Microarray Suite version 5.0 (MAS 5.0) using Affymetrix default analysis settings and global scaling as normalization method. The trimmed mean target intensity of each array was arbitrarily set to 100.")
self.assertEqual(record.entity_attributes["Sample_treatment_protocol_ch1"], "Embryos were dechorionated with 50% bleach, put on a cover slip and covered with Halocarbon oil 27 (Sigma). Embryos of the appropriate stage were manually selected under the dissecting scope. Selected embryos were transferred to a basket, rinsed with PBS with 0,7% NaCl, 0,04% triton-X100 and placed on ice in the Trizol solution (GibcoBRL).")
self.assertEqual(record.entity_attributes["Sample_title"], "embryo at T1, biological rep1")
self.assertEqual(record.entity_attributes["Sample_supplementary_file"], "Drosophila_T1-1.CEL")
self.assertEqual(len(record.col_defs), 0)
self.assertEqual(len(record.table_rows), 0)
record = next(records)
self.assertEqual(record.entity_type, "SERIES")
self.assertEqual(record.entity_id, "Dros_embryo_timecourse")
self.assertEqual(len(record.entity_attributes), 6)
self.assertEqual(len(record.entity_attributes["Series_sample_id"]), 3)
self.assertEqual(record.entity_attributes["Series_sample_id"][0], "Drosophila_T0-1")
self.assertEqual(record.entity_attributes["Series_sample_id"][1], "Drosophila_T0-2")
self.assertEqual(record.entity_attributes["Series_sample_id"][2], "Drosophila_T1-1")
self.assertEqual(len(record.entity_attributes["Series_contributor"]), 5)
self.assertEqual(record.entity_attributes["Series_contributor"][0], "Jane,Doe")
self.assertEqual(record.entity_attributes["Series_contributor"][1], "John,A,Smith")
self.assertEqual(record.entity_attributes["Series_contributor"][2], "Hans,van Elton")
self.assertEqual(record.entity_attributes["Series_contributor"][3], "John,Smithers Jr")
self.assertEqual(record.entity_attributes["Series_contributor"][4], "Jie,D,Chen")
self.assertEqual(len(record.entity_attributes["Series_summary"]), 2)
self.assertEqual(record.entity_attributes["Series_summary"][0], "Morphogenesis of epithelial tissues relies on the precise developmental control of cell polarity and architecture. In the early Drosophila embryo, the primary epithelium forms during cellularisation, following a tightly controlled genetic programme where specific sets of genes are up-regulated. Some of them, for instance, control membrane invagination between the nuclei anchored at the apical surface of the syncytium.")
self.assertEqual(record.entity_attributes["Series_summary"][1], "We used microarrays to detail the global programme of gene expression underlying cellularisation and identified distinct classes of up-regulated genes during this process.")
self.assertEqual(record.entity_attributes["Series_type"], "time course")
self.assertEqual(record.entity_attributes["Series_title"], "Expression data from early Drosophila embryo")
self.assertEqual(record.entity_attributes["Series_overall_design"], "Drosophila embryos were selected at successive stages of early development for RNA extraction and hybridization on Affymetrix microarrays. We sought to obtain homogeneous populations of embryos at each developmental stage in order to increase the temporal resolution of expression profiles. To that end, we hand-selected embryos according to morphological criteria at five time-points: before pole cell formation, i.e. before zygotic transcription (T0), during the slow phase (T1) and the fast phase (T2) of cellularisation and at the beginning (T3) and the end (T4) of gastrulation.")
self.assertEqual(len(record.col_defs), 0)
self.assertEqual(len(record.table_rows), 0)
if __name__ == "__main__":
runner = unittest.TextTestRunner(verbosity=2)
unittest.main(testRunner=runner)
| 81.899117 | 1,148 | 0.66434 |
4a1f22fbfad9203031f638d09ed379d2d3855990 | 9,585 | py | Python | lib/python/impala_py_lib/jenkins/generate_junitxml.py | garyli1019/impala | ea0e1def6160d596082b01365fcbbb6e24afb21d | [
"Apache-2.0"
] | 1,523 | 2015-01-01T03:42:24.000Z | 2022-02-06T22:24:04.000Z | lib/python/impala_py_lib/jenkins/generate_junitxml.py | garyli1019/impala | ea0e1def6160d596082b01365fcbbb6e24afb21d | [
"Apache-2.0"
] | 10 | 2015-01-09T06:46:05.000Z | 2022-03-29T21:57:57.000Z | lib/python/impala_py_lib/jenkins/generate_junitxml.py | garyli1019/impala | ea0e1def6160d596082b01365fcbbb6e24afb21d | [
"Apache-2.0"
] | 647 | 2015-01-02T04:01:40.000Z | 2022-03-30T15:57:35.000Z | #!/usr/bin/env python
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
A script for generating arbitrary junit XML reports while building Impala.
These files will be consumed by jenkins.impala.io to generate reports for
easier triaging of build and setup errors.
"""
import argparse
import errno
import os
import textwrap
from xml.dom import minidom
from xml.etree import ElementTree as ET
from datetime import datetime as dt
IMPALA_HOME = os.getenv('IMPALA_HOME', '.')
SCRIPT_NAME, _ = os.path.splitext(os.path.basename(__file__))
JUNITXML_LOGDIR = os.path.join(os.getenv("IMPALA_LOGS_DIR", "."), 'extra_junit_xml_logs')
class JunitReport(object):
"""A Junit XML style report parseable by Jenkins for reporting build status.
Generally, a caller who invokes this script from bash doesn't need to do
more than supply the necessary command line parameters. The JunitReport
class is instantiated using those initial inputs, and a timestamped XML
file is output to the $IMPALA_HOME/logs/extra_junit_xml_logs/.
Log files are timestamped, so they will not overwrite previous files containing
output of the same step.
For use from within a python script (must be invoked with impala-python), an
example might look like:
>>> from impala_py_lib.jenkins.generate_junitxml import JunitReport
>>> report = JunitReport(phase='load_data', step='load_hbase', error_msg='oops')
>>> report.tofile()
For now, the class does not support adding more than one step (analogous to a
test case) to the same phase (analogous to a test suite). Each report should
be unique for a given junit XML file. This may be enhanced at some point.
"""
def __init__(self, phase, step, error_msg=None, stdout=None, stderr=None,
elapsed_time=0):
self.root_element = None
self.testsuite_element = None
self.testcase_element = None
self.phase = phase
self.step = step
self.error_msg = error_msg
self.stdout = stdout
self.stderr = stderr
self.elapsed_time = elapsed_time
self.utc_time = dt.utcnow()
self.create_root_element()
self.add_testsuite_element()
self.add_testcase_element()
if self.error_msg is not None:
self.set_error()
if self.stdout is not None:
self.add_output('out', self.stdout)
if self.stderr is not None:
self.add_output('err', self.stderr)
def create_root_element(self):
"""Create the testsuites root element."""
self.root_element = ET.Element("testsuites")
self.root_element.set("time", "{0:.1f}".format(float(self.elapsed_time)))
self.root_element.set("tests", "1")
self.root_element.set("failures", "0")
self.root_element.set("errors", "0")
def add_testsuite_element(self):
"""Create the testsuite element."""
self.testsuite_element = ET.SubElement(self.root_element, "testsuite")
self.testsuite_element.set("name", "{name}.{phase}.{step}".format(
name=SCRIPT_NAME, phase=self.phase, step=self.step))
self.testsuite_element.set(
"timestamp", "{ts}+00:00".format(ts=self.utc_time.strftime('%Y-%m-%d %H:%M:%S')))
self.testsuite_element.set("disabled", "0")
self.testsuite_element.set("errors", "0")
self.testsuite_element.set("failures", "0")
self.testsuite_element.set("skipped", "0")
self.testsuite_element.set("tests", "1")
self.testsuite_element.set("time", "0")
self.testsuite_element.set("file", "None")
self.testsuite_element.set("log", "None")
self.testsuite_element.set("url", "None")
def add_testcase_element(self):
"""Create the testcase element."""
self.testcase_element = ET.SubElement(self.testsuite_element, "testcase")
self.testcase_element.set("classname", "{name}.{phase}".format(
name=SCRIPT_NAME, phase=self.phase))
self.testcase_element.set("name", self.step)
def set_error(self):
"""Set an error msg if the step failed, and increment necessary error attributes."""
error = ET.SubElement(self.testcase_element, "error")
error.set("message", self.error_msg)
error.set("type", "error")
self.testsuite_element.set("errors", "1")
self.root_element.set("errors", "1")
def add_output(self, output_type, file_or_string):
"""
Add stdout or stderr content to testcase element.
Args:
output_type: [string] either out or err
file_or_string: a path to a file containing the content, or a plain string
"""
output = ET.SubElement(self.testcase_element,
"system-{output_type}".format(output_type=output_type))
output.text = JunitReport.get_xml_content(file_or_string)
def to_file(self, junitxml_logdir=JUNITXML_LOGDIR):
"""
Create a timestamped XML report file.
Args:
junitxml_logdir: path to directory where the file will be created
Return:
junit_log_file: path to the generated file
"""
# The equivalent of mkdir -p
try:
os.makedirs(junitxml_logdir)
except OSError as e:
if e.errno == errno.EEXIST and os.path.isdir(junitxml_logdir):
pass
else:
raise
filename = '{name}.{ts}.xml'.format(
name=self.testsuite_element.attrib['name'],
ts=self.utc_time.strftime('%Y%m%d_%H_%M_%S')
)
junit_log_file = os.path.join(junitxml_logdir, filename)
with open(junit_log_file, 'w') as f:
f.write(str(self))
return junit_log_file
@staticmethod
def get_xml_content(file_or_string=None):
"""
Derive additional content for the XML report.
If the supplied parameter is the path to a file, the contents will be inserted
into the XML report. If the parameter is just plain string, use that as the
content for the report.
Args:
file_or_string: a path to a file, or a plain string
Returns:
content as a string
"""
if file_or_string is None:
content = ''
elif os.path.exists(file_or_string):
with open(file_or_string, 'r') as f:
content = f.read()
else:
content = file_or_string
return content
def __str__(self):
"""
Generate and return a pretty-printable XML string.
"""
root_node_str = minidom.parseString(ET.tostring(self.root_element))
return root_node_str.toprettyxml(indent=' ' * 4)
def get_options():
"""Parse and return command line options."""
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
# Required options
parser.add_argument("--phase",
default="buildall",
help="General build phase or script.")
parser.add_argument("--step",
required=True,
help=textwrap.dedent(
"""Specific build step or child script being run.
Each step must be unique for the given build phase.""")
)
parser.add_argument("-t", "--time",
type=float,
default=0,
help="If known, the elapsed time in seconds for this step.")
parser.add_argument("--stdout",
help=textwrap.dedent(
"""Standard output to include in the XML report. Can be
either a string or the path to a file..""")
)
parser.add_argument("--stderr",
help=textwrap.dedent(
"""Standard error to include in the XML report. Can be
either a string or the path to a file.""")
)
parser.add_argument("--error",
help=textwrap.dedent(
"""If specified, the XML report will mark this as an error.
This should be a brief explanation for the error.""")
)
return parser.parse_args()
def main():
"""
Create a report for each invocation of the script, and output the results
of the test case to an XML file within $IMPALA_HOME/logs/extra_junit_xml_logs.
The log file name will use "phase" and "step" values provided on the command
line to structure the report. The XML report filename will follow the form:
junitxml_logger.<phase>.<step>.<time_stamp>.xml
Phase can be repeated in a given test run, but the step leaf node, which is
equivalent to a "test case", must be unique within each phase.
"""
options = get_options()
junit_report = JunitReport(phase=options.phase,
step=options.step,
error_msg=options.error,
stdout=options.stdout,
stderr=options.stderr,
elapsed_time=options.time)
junit_log_file = junit_report.to_file()
print("Generated: {0}".format(junit_log_file))
if "__main__" == __name__:
main()
| 35.898876 | 90 | 0.663954 |
4a1f23b9f97620c668947b6c82db3cdb6abaf69d | 14,056 | py | Python | application/views.py | mohangcsm/secops | c1398a34bba4149c24511cc73a45cd14731abd04 | [
"Apache-2.0"
] | 8 | 2020-08-03T05:53:46.000Z | 2021-05-05T01:59:32.000Z | application/views.py | mohangcsm/secops | c1398a34bba4149c24511cc73a45cd14731abd04 | [
"Apache-2.0"
] | 4 | 2021-11-05T18:17:54.000Z | 2021-11-05T18:18:49.000Z | application/views.py | mohangcsm/secops | c1398a34bba4149c24511cc73a45cd14731abd04 | [
"Apache-2.0"
] | 1 | 2021-03-11T05:49:04.000Z | 2021-03-11T05:49:04.000Z | import flask, time, sys, json, sqlite3, os, sys, random, string, hashlib, subprocess, requests
from flask import render_template, session, jsonify, request, Response, flash
from flask import flash, current_app, redirect, url_for, send_from_directory
from flask_oauth import OAuth
from application import app
from jira import JIRA
from jira_functions import *
reload(sys)
sys.setdefaultencoding('utf8')
app.config.from_object(__name__)
JIRA_SETTINGS = app.config['JIRA_SETTINGS']
JIRA_URL = JIRA_SETTINGS['JIRA_URL']
JIRA_USER = JIRA_SETTINGS['JIRA_USER']
JIRA_PASS = JIRA_SETTINGS['JIRA_PASS']
JIRA_PROJECT = JIRA_SETTINGS['JIRA_PROJECT']
peer_review_enabled = app.config['PEER_REVIEW_ENABLED']
JIRA_TRANSITIONS = JIRA_SETTINGS['JIRA_TRANSITIONS'][peer_review_enabled]
PEER_REVIEW_REQUIRED_FOR = app.config['PEER_REVIEW_REQUIRED_FOR']
JIRA_COMPONENTS = JIRA_SETTINGS['JIRA_COMPONENTS']
jira = JIRA(JIRA_URL, basic_auth=(JIRA_USER,JIRA_PASS))
oauth = OAuth()
google = oauth.remote_app(
app.config['OAUTH_CLIENT'],
base_url=app.config['BASE_URL'],
authorize_url=app.config['AUTHORIZE_URL'],
request_token_url=app.config['REQUEST_TOKEN_URL'],
request_token_params=app.config['REQUEST_TOKEN_PARAMS'],
access_token_url=app.config['ACCESS_TOKEN_URL'],
access_token_method=app.config['ACCESS_TOKEN_METHOD'],
access_token_params=app.config['ACCESS_TOKEN_PARAMS'],
consumer_key=app.config['GOOGLE_CLIENT_ID'],
consumer_secret=app.config['GOOGLE_CLIENT_SECRET']
)
@app.route('/', methods=['GET'])
def index():
if not session.get('access_token'):
return render_template("login.html"), 403
return render_template('index.html',message=" ",category=""), 200
@app.route('/new_secreview', methods=['GET'])
def new_secreview():
access_token = session.get('access_token')
if access_token is None:
return render_template("login.html",message="Please login to continue",category="info"), 403
return render_template('new_secreview.html',message="",category=""), 200
@app.route('/create_secreview', methods=['GET','POST'])
def create_secreview():
redirect_url = "/"
access_token = session.get('access_token')
if access_token is None:
return render_template("login.html",message="Please login to continue",category="info"), 403
args = request.form
if not args or "requestingfor" not in args:
return render_template('new_secreview.html',message="Please fill all details before submitting",category="warning"), 200
requestingfor = args.get('requestingfor')
Product_Title = requestingfor
if requestingfor not in ("others"):
Product_Title = "["+requestingfor+"] "+args.get('Product_Title')
component = JIRA_COMPONENTS["SECURITY_REVIEW"]
if requestingfor == "sec_bug":
component = JIRA_COMPONENTS["SECURITY_BUG"]
Product_Title = "["+requestingfor+"] "+args.get('Issue_Title')
description = ""
for key in args:
if key != "requestingfor":
value = args.get(key)
if key in ('steps to reproduce','Recommendation'):
value = "\n{code}"+value+"{code}"
if key in ("Environment Details"):
value = "\n"+value
description +="*"+key+"* : "+value+"\n"
description+="*Ticket Raised By* : "+session.get('email');
description = "*requestingfor* : "+requestingfor+"\n"+description
result = create_new_jira(jira,JIRA_SETTINGS,Product_Title,description,component,peer_review_enabled)
if result.key:
redirect_url = JIRA_URL+"/browse/"+result.key
# return redirect(redirect_url), 302
return render_template("index.html",message="Ticket raised successfully: "+result.key+".<br /><br /><a href='"+redirect_url+"' target='_blank'>click here to view the ticket.</a>")
return render_template('new_secreview.html',
message="JiraError: "+str(result)+"<br />Please contact @mohan.kk",
category="warning"), 200
@app.route('/close_tickets', methods=['GET','POST'])
def close_tickets():
access_token = session.get('access_token')
if access_token is None:
return render_template("login.html",message="Please login to continue",category="info"), 403
appsec_user = session.get('appsec_user')
if not appsec_user:
return render_template("index.html",message="You are not authorized",category="danger"), 403
if request.method == 'GET':
[secreview_string,secbugs_string] = get_open_secreviews(jira,JIRA_SETTINGS)
return render_template('close_tickets.html',
peer_review_enabled = str(peer_review_enabled).lower(),
PEER_REVIEW_REQUIRED_FOR = PEER_REVIEW_REQUIRED_FOR,
secreview_string = secreview_string,
secbugs_string = secbugs_string,
message="",
category=""), 200
if request.method == "POST" and "Action" in request.form:
args = request.form
category="success"
return_code = 200
ticket_id = args.get('ticket_id')
issue = jira.issue(ticket_id)
requestingfor = args.get('requestingfor')
comments = args.get('comments')
approver = args.get('approver')
action = args.get('Action')
status = check_status(str(issue.fields.status.name),requestingfor)
if not status:
message = "Operation not allowed. Please retry after changing the JIRA state from Backlog/Todo/ToStart."
category = "warning"
return_code = 403
return render_template('index.html',message=message, category=category), return_code
if not ticket_id or not requestingfor:
message = "Manadatory parameters missing. Please check and retry"
category = "warning"
return_code = 403
return render_template('index.html',message=message, category=category), return_code
peer_review_required = False
for review_id in PEER_REVIEW_REQUIRED_FOR:
if review_id in str(issue.fields.summary):
peer_review_required = True
not_allowed = (action == "Approve" and peer_review_required and status == "In Progress")
if not_allowed:
message = "Can not approve without peer review. Please check and retry"
category = "warning"
return_code = 403
return render_template('index.html',message=message, category=category), return_code
comment_message = ""
if comments:
comment_message = "The following are the Callouts/Feedback/Comments from Appsec side\n";
comment_message += "{code}"+comments+"{code}";
if action == "Approve" or action == "Send for Review":
checks = get_request_options(requestingfor)
checks_message = "\n{code}"
for arg in args:
for check in checks:
if arg == check:
checks_message += check+"\n"
checks_message.strip("\n")
checks_message += "{code}"
if action == "Approve":
message = "Ticket Approved successfully"
approve_message = "This is good to go from appsec side. The following checks have been verified."
signing_message = "\nReview Approved by : @["+session['email']+"]"
else:
message = "Ticket sent for approver review"
approve_message = "Initial review Completed. The following checks have been performed as part of the review."
signing_message = "\nInitial Review Completed by : @["+session['email']+"]"
if approver == "":
message = "Manadatory parameters 'approver' is missing. Please check and retry"
category = "warning"
return_code = 403
return render_template('index.html',message=message, category=category), return_code
# assign_to_approver(key,approver)
comment_message = approve_message+checks_message+"\n"+comment_message+signing_message
if action == "Reject":
if not comments or comments == "":
message = "Manadatory parameters 'comments' is missing. Please check and retry"
category = "warning"
return_code = 403
return render_template('index.html',message=message, category=category), return_code
message = "Ticket Rejected successfully"
reject_message = "For mote information pelase reach out to "+app.config['SECURITY_EMAIL']+" with review ID in subject line."
comment_message = comment_message+"\n"+reject_message
result = resolve_or_close_jira(jira,JIRA_TRANSITIONS,ticket_id,comment_message,action,approver)
if not result:
message = "Error occured. Please try again after checking Jira state"
category = "warning"
return_code = 403
return render_template('index.html',message=message, category=category), return_code
return render_template('index.html',message=message, category=category), return_code
return redirect(url_for('index')), 403
@app.route('/security_base')
def security_base():
return render_template('index.html',message="currently not available", category="warning"), 200
@app.route('/rfp_base')
def rfp_base():
return render_template('index.html',message="currently not available", category="warning"), 200
@app.route('/code_review')
def code_review():
args = re
@app.route('/options.json')
def options():
return send_from_directory(app.static_folder, "options.json")
@app.route('/request_options.json')
def request_options():
return send_from_directory(app.static_folder, "request_options.json")
############ support functions ############
with app.test_request_context('/'):
def is_appsec_user(email):
appsec_users = app.config['APPSEC_USERS']
if email in appsec_users:
return True
return False
with app.test_request_context('/'):
def get_request_options(requestingfor):
with open(app.static_folder+'/options.json') as options_file:
options_all = json.load(options_file)
secreview_options = options_all['others']
for key in options_all:
if key == requestingfor:
secreview_options = options_all[key]
if 'other_options' in secreview_options and secreview_options['other_options']:
for other_key in options_all['others']:
secreview_options[other_key] = options_all['others'][other_key]
secreview_options.pop('other_options')
return secreview_options
with app.test_request_context('/'):
def check_status(status,requestingfor):
if requestingfor != 'sec_bug' and status in ("Backlog","To Do", "ToStart"):
return None
return status
############ Do not modify these route/functions ############
@app.route('/login', methods=['GET'])
def login():
callback=url_for('authorized', _external=True)
return google.authorize(callback=callback)
@app.route('/logout')
def logout():
session.clear()
return redirect(url_for('index'))
@app.route(app.config['REDIRECT_URI'])
@google.authorized_handler
def authorized(resp):
access_token = resp['access_token']
session['access_token'] = access_token, ''
from urllib2 import Request, urlopen, URLError
headers = {'Authorization': 'OAuth '+access_token}
req = Request('https://www.googleapis.com/oauth2/v1/userinfo', None, headers)
try:
res = urlopen(req)
except URLError, e:
print e
if e.code == 401:
# Unauthorized - bad token
session.pop('access_token', None)
return redirect(url_for('login'))
return res.read()
res = json.loads(res.read())
res['access_token'] = access_token
allowed_domain = False
for domain in app.config['ALLOWED_DOMAINS']:
if domain in res['email']:
allowed_domain = True
break
if not allowed_domain:
session.clear()
return render_template('login.html',message="Email ids with this domain are not allowed.",category="danger"), 403
session['email'] = res['email']
session['picture'] = res['picture']
session['type'] = 'user'
session['loginType'] = 'oauth'
session['verified'] = True
session['oauth_uid'] = res['id']
session['appsec_user'] = is_appsec_user(res['email'])
if session['access_token']:
return redirect(url_for('index'))
return render_template('login.html',message="Something went wrong. Please try again.",category="danger"), 500
@google.tokengetter
def get_access_token():
return session.get('access_token')
@app.route('/robots.txt')
def robots():
return send_from_directory(app.static_folder, "robots.txt")
@app.errorhandler(404)
def page_not_found(e):
redirect = "login.html"
access_token = session.get('access_token')
if access_token:
redirect = "index.html"
return render_template(redirect,message="404 - Requested resource does not exist.",category="warning"), 404
@app.errorhandler(403)
def server_error_403(e):
redirect = "login.html"
access_token = session.get('access_token')
if access_token:
redirect = "index.html"
return render_template(redirect,message="User not authorized to view this resource.",category="warning"), 403
# @app.errorhandler(Exception)
@app.errorhandler(500)
def server_error_500(e):
redirect = "login.html"
access_token = session.get('access_token')
if access_token:
redirect = "index.html"
return render_template(redirect,message="Something went wrong ! Please try again.",category="danger"), 500
############ Do not modify these route/functions ############
| 36.041026 | 187 | 0.656588 |
4a1f2584dca54ea276f8f978f87f9e2976db8984 | 11,616 | py | Python | dash-app/app_FEIAT.py | brain-bzh/SilentCities | 3526d1716d4d7fa970a692bb8370f7d134fbd09c | [
"MIT"
] | 5 | 2020-09-27T20:40:32.000Z | 2021-12-10T04:35:00.000Z | dash-app/app_FEIAT.py | brain-bzh/SilentCities | 3526d1716d4d7fa970a692bb8370f7d134fbd09c | [
"MIT"
] | null | null | null | dash-app/app_FEIAT.py | brain-bzh/SilentCities | 3526d1716d4d7fa970a692bb8370f7d134fbd09c | [
"MIT"
] | null | null | null | import sys
sys.path.insert(0, "../")
import os
from glob import glob
# import argparse
import csv
import dash
# import dash_bootstrap_components as dbc
import dash_core_components as dcc
import dash_html_components as html
from dash.dependencies import Input, Output, State
# from dash_html_components.Div import Div
# from librosa.core import audio
# import plotly.graph_objs as go
# import plotly.express as px
import numpy as np
import pandas as pd
import visualisation.plotly_figs as pf
# from pydub import AudioSegment
from datetime import datetime
import base64
##### Path
# PATH_MP3 = '/home/nfarrugi/bigdisk2/mp3_sl/'
# PATH_DATABASE = "/home/nfarrugi/SilentCities/database/public_final_metadata_geo_stats.csv"
# PATH_TAGSITE = "/home/nfarrugi/bigdisk2/meta_silentcities/site/"
PATH_MP3 = '/data/mp3_sl/'
PATH_DATABASE = "/data/meta_silentcities/public_final_metadata_geo_stats.csv"
PATH_TAGSITE = "/data/meta_silentcities/site/"
# PATH_MP3 = '/Users/nicolas/Downloads/mp3_sl/'
# PATH_DATABASE = "/Users/nicolas/Documents/SilentCities/database/public_final_metadata_geo_stats.csv"
# PATH_TAGSITE = "/Users/nicolas/Documents/SilentCities/database/meta_silentcities"
#### Initialization
database = pd.read_csv(PATH_DATABASE)
# get available site
available_site_mp3 = glob(os.path.join(PATH_MP3, '*/'))
available_site_mp3 = np.sort([int(i[-5:-1]) for i in available_site_mp3])
available_site_process = glob(os.path.join(PATH_TAGSITE, 'results*'))
available_site_process = np.sort([int(i[-8:-4]) for i in available_site_process])
available_site = list(set(available_site_process) & set(available_site_mp3))
database = database[database['partID'].isin(available_site)].reset_index(drop=True)
# Initialization first fig
current_partID = available_site[0]
figmap = pf.get_map_fig(database)
figindic, data = pf.get_heatmaps(available_site[0], path=PATH_TAGSITE)
wavefig, path_current_audio, error_audio_file = pf.get_sample_fig(available_site[0], f"{data['name'][0][:-4]}_{int(data['start'][0])}.mp3", path=PATH_MP3)
encoded_sound = base64.b64encode(open(path_current_audio, 'rb').read())
data_1 = data.copy()
# Init_csv_file
header = ["site", 'file', 'datetime', 'current_time', 'Antropophy','Geophony', 'Biophony', 'bruit','comm']
idx = 0
LOGFILENAME = f'''logfile/{datetime.now().strftime('%Y%m%d_%H%M%S')}.csv'''
if not os.path.exists('logfile/'):
os.makedirs('logfile/')
with open(LOGFILENAME, 'w', encoding='UTF8') as f:
writer = csv.writer(f)
writer.writerow(header)
# styles
table_header_style = {
"backgroundColor": "rgb(2,21,70)",
"color": "white",
"textAlign": "center",
}
app = dash.Dash(__name__)
styles = {
'pre': {
'border': 'thin lightgrey solid',
'overflowX': 'scroll'
}}
app.title = "SilentCities"
server = app.server
##### Layout
app.layout = html.Div(
className="",
children=[
html.Div(
className="header",
children=[
# html.A(
# id="link silent",
# children=["Silent Cities project"],
# href="https://osf.io/h285u/",
# style={"color": "white"},
# ),
html.H2("Silent Cities Project : Visualisation des données", style={"color": "white"}),
# html.A(
# id="gh-link",
# children=["Source code Silent Cities"],
# href="https://github.com/brain-bzh/SilentCities",
# style={"color": "white", "border": "solid 1px white"},
# ),
],
style={"backgroundColor": "rgb(2,21,70)", "textAlign": "center"}
),
html.Div(
className='tot',
children=[
html.Div(
className="row",
children=[
html.Div(
className="one-third column alpha",
children=[
html.Div(className="row",children = [
dcc.Graph(id='map', figure = figmap),
html.H4(children=['Méta données:'],style={"color": "blue"}),
html.H6(
id="map-info",
children=[f"Site: {database['partID'][0]}", html.Br(),
f"Pays: {database['country'][0]}", html.Br(),
f"Ville : { database['city'][0]}",html.Br(),
f"Enregistreur : {database['recorder'][0]}",html.Br(),
f"Stat : {database['s_statsmean'][0]:.3f}"
],
style={"color": "black", "border": "solid 1px white"},
),
])
]
),
html.Div(
className="two-thirds column omega",
children=[dcc.Graph(id='heatmap', figure = figindic, style={
"width": "100%"})]
)
]
),
html.Div(
className="row",
children=[
html.H6(
id="wav-info",
children=[f"Information fichier wav : file name {data['name'][0]}, date : {datetime.strptime(data['datetime'][0], '%Y%m%d_%H%M%S')}, Geophony : {data['geophony'][0]*100:.1f} %, Biophony {data['biophony'][0]*100:.1f} %, Anthropophony {data['anthropophony'][0]*100:.1f} %"],
style={"color": "blue", "border": "solid 1px white"},
),
html.Audio(id="player", src='data:audio/mpeg;base64,{}'.format(encoded_sound.decode()), controls=True, style={
"width": "100%"}),
dcc.Graph(id='spactrogram', figure = wavefig, style={
"width": "100%"})
]
),
html.Div(className="row",
children=[
html.H3(id = 'text_what',
children=['''Qu'est ce que vous entendez ?''']),
html.Div(className="one-third column alpha",
children=[
dcc.Checklist(
id="checklist",
options=[{'label':'Antropophonie', 'value':'Antropophony'},
{'label':'Géophonie', 'value':'Geophony'},
{'label':'Biophonie', 'value':'Biophony'},
{'label':'Bruit', 'value':'Bruit'}],
value=[],
# labelStyle={"display": "inline-block",'font_size': '38px', 'color':"red"},
# inputStyle={"display": "inline-block",'font_size': '26px'},
)]
),
html.Div(className="row",
children=[
dcc.Textarea(
id='text_comm',
value='',
style={'width': '50%', 'height': 100}
)
# html.Button('Enregistrer', id='textarea-state-example-button', n_clicks=0),
]
)
]
)
]
)
]
)
##### call back
@app.callback([Output('heatmap', 'figure'),
Output('map-info', 'children')],
[Input('map', 'clickData')])
def Update_heatmap(clickData):
global data
global database
global current_partID
current_partID = clickData['points'][0]['customdata'][0]
idx = clickData['points'][0]['pointNumber']
figindic, data = pf.get_heatmaps(current_partID, path=PATH_TAGSITE)
text = [f"Site : {database['partID'][idx]}", html.Br(),
f"Pays : {database['country'][idx]}", html.Br(),
f"Ville : { database['city'][idx]}",html.Br(),
f"Enregistreur : {database['recorder'][idx]}",html.Br(),
f"Stat : {database['s_statsmean'][idx]:.3f}", html.Br(),
]
print(text)
return figindic, text
@app.callback([Output('spactrogram', 'figure'),
Output('player', 'src'),
Output('wav-info', 'children'),
Output('text_comm', 'value'),
Output('checklist', 'value')],
[ Input('heatmap', 'clickData')],
[State('text_comm', 'value'),
State('checklist', 'value')])
def Update_audio(clickData, val_text, val_check):
global data
global data_1
global current_partID
global idx
global error_audio_file
print(val_text)
print(val_check)
print([current_partID, str(data_1['name'][idx])[:-4], data_1['datetime'][idx], datetime.now().strftime('%Y%m%d_%H%M%S'), 'Antropophony' in val_check, 'Geophony' in val_check, 'Biophony' in val_check, 'Bruit' in val_check, val_text])
with open(LOGFILENAME, 'a', encoding='UTF8') as f:
writer = csv.writer(f)
if error_audio_file:
writer.writerow([current_partID, str(data_1['name'][idx])[:-4], data_1['datetime'][idx], datetime.now().strftime('%Y%m%d_%H%M%S'), 'Antropophony' in val_check, 'Geophony' in val_check, 'Biophony' in val_check, 'Bruit' in val_check, 'error'])
else:
writer.writerow([current_partID, str(data_1['name'][idx])[:-4], data_1['datetime'][idx], datetime.now().strftime('%Y%m%d_%H%M%S'), 'Antropophony' in val_check, 'Geophony' in val_check, 'Biophony' in val_check, 'Bruit' in val_check, val_text])
x = clickData['points'][0]['x']
try :
idx = data.index[data["datetime"]==x][0]
text = [f"Information fichier wav : file name {str(data['name'][idx])[:-4]}_{int(data['start'][idx])}.mp3, date : {datetime.strptime(data['datetime'][idx], '%Y%m%d_%H%M%S')}",html.Br() ,f"Anthropophonie : {data['anthropophony'][idx]*100:.1f} %", html.Br() ,f"Geophonie : {data['geophony'][idx]*100:.1f} %, ", html.Br() ,f"Biophonie : {data['biophony'][idx]*100:.1f} %,"]
error_audio_file = False
except :
wavefig, path_current_audio, error_audio_file = pf.get_sample_fig(current_partID, f"ERROR", path='ERROR', error=True)
text=['erreur fichier audio']
wavefig, path_current_audio, error_audio_file = pf.get_sample_fig(current_partID, f"{str(data['name'][idx])[:-4]}_{int(data['start'][idx])}.mp3", path=PATH_MP3)
if error_audio_file:
text=['erreur fichier audio']
return wavefig, 'None', text, '', []
encoded_sound = base64.b64encode(open('temp.mp3', 'rb').read())
src = 'data:audio/mpeg;base64,{}'.format(encoded_sound.decode())
data_1 = data.copy()
return wavefig, src, text, '', []
if __name__ == '__main__':
app.run_server(debug=False, host='127.0.0.1',port=os.getenv("PORT", "8051"))
| 42.086957 | 378 | 0.512052 |
4a1f27e94389cba14b2ade5e2e132603e0c9a822 | 2,125 | py | Python | test/Scanner/parallel-rescan.py | datalogics-staylor/scons | 4c48deb6947066e53aac7d86621a7ec17f3b4034 | [
"MIT"
] | 3 | 2017-01-06T09:26:23.000Z | 2017-03-04T04:13:20.000Z | test/Scanner/parallel-rescan.py | datalogics-staylor/scons | 4c48deb6947066e53aac7d86621a7ec17f3b4034 | [
"MIT"
] | 2 | 2015-10-27T20:17:24.000Z | 2016-08-04T21:49:56.000Z | test/Scanner/parallel-rescan.py | datalogics-staylor/scons | 4c48deb6947066e53aac7d86621a7ec17f3b4034 | [
"MIT"
] | 4 | 2015-03-31T16:09:15.000Z | 2021-08-04T12:41:47.000Z | #!/usr/bin/env python
#
# __COPYRIGHT__
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "__FILE__ __REVISION__ __DATE__ __DEVELOPER__"
"""
Verify that when a source file is generated and the -j option is used,
the source file correctly gets re-scanned for implicit dependencies
after it's built.
"""
import TestSCons
test = TestSCons.TestSCons()
test.write('SConstruct', """\
env = Environment()
env['BUILDERS']['COPY'] = Builder(action = Copy("$TARGET", "$SOURCE"))
env.COPY('a.c', 'a.in')
env.COPY('b.c', 'b.in')
env.StaticLibrary('lib', ['a.c', 'b.c'])
""")
test.write("a.in", """\
#include "a.h"
""")
test.write("b.in", """\
#include "b.h"
""")
test.write("a.h", """\
char *A_FILE = "b.in";
""")
test.write("b.h", """\
char *B_FILE = "b.in";
""")
test.run(arguments = '-j4 .',
stderr=TestSCons.noisy_ar,
match=TestSCons.match_re_dotall)
# If the dependencies weren't re-scanned properly, the .h files won't
# show up in the previous run's dependency lists, and the .o files and
# library will get rebuilt here.
test.up_to_date(arguments = '.')
test.pass_test()
| 29.109589 | 73 | 0.715765 |
4a1f2803d339ab2d6f5073a24cc0f3cbca7521bd | 3,036 | py | Python | ARedSum/src/run_para.py | BoonthichaSaejia/ThaiSum | fdb99eab23e60a933acf4e84836f53ddf05b7c8b | [
"Apache-2.0"
] | 23 | 2020-11-20T09:10:10.000Z | 2022-01-30T16:44:39.000Z | ARedSum/src/run_para.py | BoonthichaSaejia/ThaiSum | fdb99eab23e60a933acf4e84836f53ddf05b7c8b | [
"Apache-2.0"
] | 2 | 2020-12-02T13:41:57.000Z | 2021-11-07T17:19:57.000Z | ARedSum/src/run_para.py | nakhunchumpolsathien/ThaiSum | c72b26799c60ff10f31d28af6df41ea97c732285 | [
"Apache-2.0"
] | 7 | 2020-12-03T08:16:38.000Z | 2021-12-05T13:05:06.000Z | import os
import sys
DATA_ROOT_DIR="/net/home/kbi/ingham_disk/doc_summarization"
DATA_PATH="%s/CNNDM/bert_data/cnndm" % DATA_ROOT_DIR
MODEL_DIR="%s/models/cnndm/" % DATA_ROOT_DIR
LOG_DIR= "/net/home/kbi/projects/doc_summarization/ARedSumSentRank/logs"
RESULT_DIR="%s/outputs" % DATA_ROOT_DIR
#script_path = "sudo /data/anaconda/envs/py36/bin/python train.py"
script_path = "python train.py"
CONST_CMD_ARR = [("bert_data_path", DATA_PATH),
("visible_gpus", "2"),
("gpu_ranks", "0"),
("accum_count", 2),
("report_every", 50),
("save_checkpoint_steps", 2000),
("decay_method", "noam")]
CONST_CMD = " ".join(["-{} {}".format(x[0], x[1]) for x in CONST_CMD_ARR])
EVAL_CMD = "-test_all"
para_names = ['mode', 'model_name', 'max_epoch', 'train_steps', 'label_format', 'use_rouge_label', 'valid_by_rouge', \
'use_doc', 'rand_input_thre', 'temperature', \
'seg_count', 'ngram_seg_count', 'bilinear_out']
short_names = ['', 'mn', 'me', 'ts', 'if', 'rl', 'vbr', 'ud', 'rit', 'tprt',\
'sc', 'nsc', 'bo']
paras = [
('train', 'base', 2, 50000, 'soft', 'f','f', False, 1.0, 0, 1, '1,1,1',1),
('train', 'ctx', 2, 50000, 'soft', 't','t', False, 1.0, 20, 30, '20,20,20',20),
('train', 'seq', 2, 50000, 'soft', 't','t', True, 0.8, 20, 1, '1,1,1',1),
]
nyt_paras = [
('train', 'base', 2, 50000, 'soft', 'f','f', False, 1.0, 0, 1, '1,1,1',1),
('train', 'ctx', 2, 50000, 'soft', 't','t', False, 0.8, 20, 30, '10,10,10',20),
('train', 'seq', 2, 50000, 'soft', 't','t', True, 0.8, 20, 1, '1,1,1',1),
]
for para in paras:
cmd_arr = []
cmd_arr.append(script_path)
model_name = "_".join(["{}{}".format(x,y) for x,y in zip(short_names, para)][1:])
#train or valid or test not need to be included
result_path = "%s/%s/cnndm" % (RESULT_DIR, model_name)
model_path = "%s/%s" % (MODEL_DIR, model_name)
cur_cmd_option = " ".join(["-{} {}".format(x,y) for x,y in zip(para_names, para)])
mode = para[0]
if mode == "train":
batch_size = 3000
elif mode == "validate":
batch_size = 30000
cmd_arr.append(EVAL_CMD)
else:
batch_size = 30000
#cmd_arr.append("-report_rouge False")
cmd_arr.append("-batch_size %s" % batch_size)
if para[1] == 'ctx':
cmd_arr.append("-fix_scorer")
saved_model_name ="/mnt/scratch/kbi/doc_summarization/models/cnndm/ectransformer_group_me2_ts50000_ifgroup_gs1_labelremove_te2_rlf_vbrf_losswsoftmax_scorerbilinear_oiTrue_ssfirst_udTrue_rit1.0_tprt0/model_step_48000.pt"
cmd_arr.append("-train_from %s" % saved_model_name)
cmd_arr.append(CONST_CMD)
cmd_arr.append(cur_cmd_option)
cmd_arr.append("-result_path %s" % result_path)
cmd_arr.append("-model_path %s" % model_path)
cmd_arr.append("-log_file %s/%s_%s.log" % (LOG_DIR, mode, model_name))
cmd_arr.append("&> %s_%s.log" % (mode, model_name))
cmd = " " .join(cmd_arr)
print(cmd)
os.system(cmd)
| 39.947368 | 227 | 0.608037 |
4a1f2a641f43e3e3204329d321d23e2c217df1b5 | 460 | py | Python | PYTHON/cipher.py | Sonu589/Hacktoberfest-2025 | 06397aa12a41967cb112722666e384007d87dbc4 | [
"MIT"
] | 1 | 2021-10-04T07:14:40.000Z | 2021-10-04T07:14:40.000Z | PYTHON/cipher.py | Sonu589/Hacktoberfest-2025 | 06397aa12a41967cb112722666e384007d87dbc4 | [
"MIT"
] | 11 | 2022-01-24T20:42:11.000Z | 2022-02-27T23:58:24.000Z | PYTHON/cipher.py | Sonu589/Hacktoberfest-2025 | 06397aa12a41967cb112722666e384007d87dbc4 | [
"MIT"
] | 1 | 2021-10-05T04:40:26.000Z | 2021-10-05T04:40:26.000Z | import string
dict ={}
data =""
file = open("op_file.txt","w")
for i in range(len(string.ascii_letters)):
dict[string.ascii_letters[i]]=string.ascii_letters[i-1]
print(dict)
with open("ip_file.txt") as f:
while True:
c = f.read(1)
if not c:
print("end of line")
break
if c in dict:
data = dict[c]
else:
data = c
file.write(data)
print(data)
file.close()
| 20 | 59 | 0.534783 |
4a1f2ab2f861018ca2351123b6e88ed693da3713 | 32 | py | Python | lib/python3.4/__future__.py | caiocsalvador/whats_the_craic | c49ef62f1acd7379f6fd90c2b93aa1fa00c8661d | [
"MIT"
] | 7 | 2017-04-26T12:28:22.000Z | 2021-02-09T18:59:50.000Z | django-ng/lib/python3.4/__future__.py | Arsalen/BusinessStrategies | 209e57340359af3ea063c064982198848dc36c5f | [
"MIT"
] | 13 | 2015-12-04T03:38:37.000Z | 2015-12-12T00:15:46.000Z | django-ng/lib/python3.4/__future__.py | Arsalen/BusinessStrategies | 209e57340359af3ea063c064982198848dc36c5f | [
"MIT"
] | 8 | 2017-06-01T08:42:16.000Z | 2020-07-23T12:30:19.000Z | /usr/lib/python3.4/__future__.py | 32 | 32 | 0.8125 |
4a1f2ad31c499634f7a41169f80bec5e2d28ba11 | 7,284 | py | Python | vendas/core/migrations/0001_initial.py | JacksonOsvaldo/bc_calcado-vendas | b688931256dad6dc54a1ea6ef94eb108dafb58f4 | [
"MIT"
] | null | null | null | vendas/core/migrations/0001_initial.py | JacksonOsvaldo/bc_calcado-vendas | b688931256dad6dc54a1ea6ef94eb108dafb58f4 | [
"MIT"
] | 2 | 2020-02-12T02:26:22.000Z | 2020-06-05T22:25:07.000Z | vendas/core/migrations/0001_initial.py | JacksonOsvaldo/bc_calcado-vendas | b688931256dad6dc54a1ea6ef94eb108dafb58f4 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# Generated by Django 1.10.1 on 2019-08-13 18:28
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Brand',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('brand', models.CharField(max_length=50, unique=True, verbose_name='Marca')),
],
options={
'verbose_name': 'marca',
'verbose_name_plural': 'marcas',
'ordering': ['brand'],
},
),
migrations.CreateModel(
name='Category',
fields=[
('id', models.CharField(max_length=7, primary_key=True, serialize=False, verbose_name='Id')),
('category', models.CharField(max_length=50, unique=True, verbose_name='Categoria')),
],
options={
'verbose_name': 'categoria',
'verbose_name_plural': 'categorias',
},
),
migrations.CreateModel(
name='Customer',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created', models.DateTimeField(auto_now_add=True, verbose_name='criado em')),
('modified', models.DateTimeField(auto_now=True, verbose_name='modificado em')),
('gender', models.CharField(choices=[('M', 'masculino'), ('F', 'feminino')], max_length=1, verbose_name='gênero')),
('cpf', models.CharField(max_length=11, verbose_name='CPF')),
('firstname', models.CharField(max_length=20, verbose_name='Nome')),
('lastname', models.CharField(max_length=20, verbose_name='Sobrenome')),
('email', models.EmailField(max_length=254, unique=True, verbose_name='e-mail')),
('phone', models.CharField(max_length=18, verbose_name='Fone')),
('birthday', models.DateTimeField(verbose_name='Nascimento')),
],
options={
'verbose_name': 'cliente',
'verbose_name_plural': 'clientes',
},
),
migrations.CreateModel(
name='Product',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('imported', models.BooleanField(default=False, verbose_name='Importado')),
('outofline', models.BooleanField(default=False, verbose_name='Fora de linha')),
('ncm', models.CharField(max_length=8, verbose_name='NCM')),
('product', models.CharField(max_length=100, unique=True, verbose_name='Produto')),
('price', models.DecimalField(decimal_places=2, max_digits=7, verbose_name='Preço')),
('ipi', models.DecimalField(blank=True, decimal_places=2, max_digits=3, verbose_name='IPI')),
('stock', models.IntegerField(verbose_name='Estoque atual')),
('stock_min', models.PositiveIntegerField(default=0, verbose_name='Estoque mínimo')),
('brand', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='core.Brand', verbose_name='marca')),
('category', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='core.Category', verbose_name='categoria')),
],
options={
'verbose_name': 'produto',
'verbose_name_plural': 'produtos',
'ordering': ['product'],
},
),
migrations.CreateModel(
name='Sale',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created', models.DateTimeField(auto_now_add=True, verbose_name='criado em')),
('modified', models.DateTimeField(auto_now=True, verbose_name='modificado em')),
('customer', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='customer_sale', to='core.Customer', verbose_name='cliente')),
],
options={
'verbose_name': 'venda',
'verbose_name_plural': 'vendas',
},
),
migrations.CreateModel(
name='SaleDetail',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('quantity', models.PositiveSmallIntegerField(verbose_name='quantidade')),
('price_sale', models.DecimalField(decimal_places=2, default=0, max_digits=6, verbose_name='Preço de venda')),
('ipi_sale', models.DecimalField(decimal_places=2, default=0.1, max_digits=3, verbose_name='IPI')),
('product', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='product_det', to='core.Product', verbose_name='produto')),
('sale', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='sales_det', to='core.Sale')),
],
),
migrations.CreateModel(
name='Seller',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created', models.DateTimeField(auto_now_add=True, verbose_name='criado em')),
('modified', models.DateTimeField(auto_now=True, verbose_name='modificado em')),
('gender', models.CharField(choices=[('M', 'masculino'), ('F', 'feminino')], max_length=1, verbose_name='gênero')),
('cpf', models.CharField(max_length=11, verbose_name='CPF')),
('firstname', models.CharField(max_length=20, verbose_name='Nome')),
('lastname', models.CharField(max_length=20, verbose_name='Sobrenome')),
('email', models.EmailField(max_length=254, unique=True, verbose_name='e-mail')),
('phone', models.CharField(max_length=18, verbose_name='Fone')),
('birthday', models.DateTimeField(verbose_name='Nascimento')),
('active', models.BooleanField(default=True, verbose_name='ativo')),
('internal', models.BooleanField(default=True, verbose_name='interno')),
('commissioned', models.BooleanField(default=True, verbose_name='comissionado')),
('commission', models.DecimalField(blank=True, decimal_places=2, default=0.01, max_digits=6, verbose_name='comissão')),
],
options={
'verbose_name': 'vendedor',
'verbose_name_plural': 'vendedores',
},
),
migrations.AddField(
model_name='sale',
name='seller',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='seller_sale', to='core.Seller', verbose_name='vendedor'),
),
]
| 54.766917 | 167 | 0.586491 |
4a1f2b1f59fc729a33c58bbe5fe72ebc9fe01476 | 1,168 | py | Python | test/test_pixel.py | USGS-EROS/lcmap-gen | 1be50eb316f7d737d6bbd000bd6a8b5006730928 | [
"Unlicense"
] | 6 | 2018-07-09T00:33:52.000Z | 2019-11-14T16:36:39.000Z | test/test_pixel.py | USGS-EROS/lcmap-gen | 1be50eb316f7d737d6bbd000bd6a8b5006730928 | [
"Unlicense"
] | 1 | 2018-06-11T19:35:07.000Z | 2018-06-11T19:35:07.000Z | test/test_pixel.py | USGS-EROS/lcmap-gen | 1be50eb316f7d737d6bbd000bd6a8b5006730928 | [
"Unlicense"
] | 2 | 2018-06-11T17:59:03.000Z | 2018-07-09T00:33:54.000Z | from ccdc import pixel
from pyspark.sql import Row
import datetime
import test
def test_table():
assert 'pixel' == pixel.table()
def test_schema():
s = pixel.schema().simpleString()
assert s == 'struct<cx:int,cy:int,px:int,py:int,mask:array<tinyint>>'
def test_dataframe(spark_context, sql_context):
rows = [Row(cx=0,
cy=1,
px=3,
py=4,
mask=[0, 1, 2, 3, 4],
extra=True)]
df = sql_context.createDataFrame(rows)
cdf = pixel.dataframe(spark_context, df).toJSON().collect()
assert cdf == ['{"cx":0,"cy":1,"px":3,"py":4,"mask":[0,1,2,3,4]}']
def test_read_write(spark_context, sql_context):
ids = [Row(cx=0, cy=1)]
idf = sql_context.createDataFrame(ids)
rows = [Row(cx=0,
cy=1,
px=3,
py=4,
mask=[0, 1, 2, 3, 4])]
df = sql_context.createDataFrame(rows)
pdf = pixel.dataframe(spark_context, df)
written = pixel.write(spark_context, pdf)
read = pixel.read(spark_context, idf)
assert read.toJSON().collect() == written.toJSON().collect()
| 25.955556 | 73 | 0.560788 |
4a1f2b24c019b6cab7bcd6ce2496bb484f6ce95d | 12,333 | py | Python | models/imagenet/resnet_ibn_cnsn.py | yhygao/crossnorm-selfnorm | 4db2656a2398025bd9c23283a3f76a8ea5a183a9 | [
"Apache-2.0"
] | 71 | 2021-08-14T01:30:25.000Z | 2022-03-31T15:05:23.000Z | models/imagenet/resnet_ibn_cnsn.py | yhygao/crossnorm-selfnorm | 4db2656a2398025bd9c23283a3f76a8ea5a183a9 | [
"Apache-2.0"
] | 1 | 2021-11-10T03:44:45.000Z | 2021-11-23T17:46:46.000Z | models/imagenet/resnet_ibn_cnsn.py | yhygao/crossnorm-selfnorm | 4db2656a2398025bd9c23283a3f76a8ea5a183a9 | [
"Apache-2.0"
] | 11 | 2021-08-14T01:36:12.000Z | 2022-02-11T05:23:24.000Z | # Code is adapted from https://github.com/XingangPan/IBN-Net/blob/8efba2b20acf1f891386bfd2f8ffb5d69c491c6a/ibnnet/resnet_ibn.py
# which is originally licensed under MIT.
import math
import warnings
import torch
import torch.nn as nn
import numpy as np
from ..cnsn import CrossNorm, SelfNorm, CNSN
__all__ = ['ResNet_IBN', 'resnet50_ibn_a', 'resnet101_ibn_a', 'resnet152_ibn_a',
'resnet50_ibn_b', 'resnet101_ibn_b', 'resnet152_ibn_b']
model_urls = {
'resnet50_ibn_a': 'https://github.com/XingangPan/IBN-Net/releases/download/v1.0/resnet50_ibn_a-d9d0bb7b.pth',
'resnet101_ibn_a': 'https://github.com/XingangPan/IBN-Net/releases/download/v1.0/resnet101_ibn_a-59ea0ac6.pth',
'resnet50_ibn_b': 'https://github.com/XingangPan/IBN-Net/releases/download/v1.0/resnet50_ibn_b-9ca61e85.pth',
'resnet101_ibn_b': 'https://github.com/XingangPan/IBN-Net/releases/download/v1.0/resnet101_ibn_b-c55f6dba.pth',
}
class IBN(nn.Module):
r"""Instance-Batch Normalization layer from
`"Two at Once: Enhancing Learning and Generalization Capacities via IBN-Net"
<https://arxiv.org/pdf/1807.09441.pdf>`
Args:
planes (int): Number of channels for the input tensor
ratio (float): Ratio of instance normalization in the IBN layer
"""
def __init__(self, planes, ratio=0.5):
super(IBN, self).__init__()
self.half = int(planes * ratio)
self.IN = nn.InstanceNorm2d(self.half, affine=True)
self.BN = nn.BatchNorm2d(planes - self.half)
def forward(self, x):
# print('excuting ibn with half: {}'.format(self.half))
split = torch.split(x, self.half, 1)
out1 = self.IN(split[0].contiguous())
out2 = self.BN(split[1].contiguous())
out = torch.cat((out1, out2), 1)
return out
class BottleneckCustom(nn.Module):
expansion = 4
def __init__(self, inplanes, planes, pos, beta, crop, cnsn_type,
ibn=None, stride=1, downsample=None):
super(BottleneckCustom, self).__init__()
self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False)
if ibn == 'a':
self.bn1 = IBN(planes)
else:
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride,
padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.conv3 = nn.Conv2d(planes, planes * self.expansion, kernel_size=1, bias=False)
self.bn3 = nn.BatchNorm2d(planes * self.expansion)
self.IN = nn.InstanceNorm2d(planes * 4, affine=True) if ibn == 'b' else None
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
if self.IN is not None and pos == 'post':
self.cnsn = None
else:
assert cnsn_type in ['sn', 'cn', 'cnsn']
if 'cn' in cnsn_type:
print('using CrossNorm with crop: {}'.format(crop))
crossnorm = CrossNorm(crop=crop, beta=beta)
else:
crossnorm = None
if 'sn' in cnsn_type:
print('using SelfNorm')
if pos == 'pre':
selfnorm = SelfNorm(inplanes)
else:
selfnorm = SelfNorm(planes * self.expansion)
else:
selfnorm = None
self.cnsn = CNSN(crossnorm=crossnorm, selfnorm=selfnorm)
self.pos = pos
if pos is not None:
print('{} in residual module: {}'.format(cnsn_type, pos))
assert pos in ['residual', 'pre', 'post', 'identity']
def forward(self, x):
identity = x
if self.pos == 'pre':
x = self.cnsn(x)
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
identity = self.downsample(x)
if self.pos == 'residual':
out = self.cnsn(out)
elif self.pos == 'identity':
identity = self.cnsn(identity)
out += identity
if self.IN is not None:
out = self.IN(out)
elif self.pos == 'post':
out = self.cnsn(out)
out = self.relu(out)
return out
class ResNet(nn.Module):
def __init__(self,
layers,
ibn_cfg=('a', 'a', 'a', None),
num_classes=1000, active_num=None, pos=None, beta=None,
crop=None, cnsn_type=None):
self.inplanes = 64
super(ResNet, self).__init__()
print('ResNet with ibn, selfnorm and crossnorm...')
self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3,
bias=False)
if ibn_cfg[0] == 'b':
self.bn1 = nn.InstanceNorm2d(64, affine=True)
else:
self.bn1 = nn.BatchNorm2d(64)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
if beta is not None:
print('beta: {}'.format(beta))
if crop is not None:
print('crop mode: {}'.format(crop))
self.layer1 = self._make_layer_custom(BottleneckCustom, 64, layers[0],
pos=pos, beta=beta,
crop=crop, cnsn_type=cnsn_type,
ibn=ibn_cfg[0])
self.layer2 = self._make_layer_custom(BottleneckCustom, 128, layers[1],
pos=pos, beta=beta,
crop=crop, cnsn_type=cnsn_type,
stride=2, ibn=ibn_cfg[1])
self.layer3 = self._make_layer_custom(BottleneckCustom, 256, layers[2],
pos=pos, beta=beta,
crop=crop, cnsn_type=cnsn_type,
stride=2, ibn=ibn_cfg[2])
self.layer4 = self._make_layer_custom(BottleneckCustom, 512, layers[3],
pos=pos, beta=beta,
crop=crop, cnsn_type=cnsn_type,
stride=2, ibn=ibn_cfg[3])
self.avgpool = nn.AvgPool2d(7)
self.fc = nn.Linear(512 * BottleneckCustom.expansion, num_classes)
self.cn_modules = []
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
elif isinstance(m, nn.BatchNorm2d) or isinstance(m, nn.InstanceNorm2d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
elif isinstance(m, CrossNorm):
self.cn_modules.append(m)
if cnsn_type is not None and 'cn' in cnsn_type:
self.active_num = active_num
assert self.active_num > 0
print('active_num: {}'.format(self.active_num))
self.cn_num = len(self.cn_modules)
assert self.cn_num > 0
print('cn_num: {}'.format(self.cn_num))
def _make_layer_custom(self, block, planes, blocks, pos, beta,
crop, cnsn_type, stride=1, ibn=None):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
nn.Conv2d(self.inplanes, planes * block.expansion,
kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(planes * block.expansion),
)
layers = []
layers.append(block(self.inplanes, planes, pos=pos, beta=beta,
crop=crop, cnsn_type=cnsn_type,
ibn=None if ibn == 'b' else ibn,
stride=stride, downsample=downsample))
self.inplanes = planes * block.expansion
for i in range(1, blocks):
layers.append(block(self.inplanes, planes, pos=pos, beta=beta,
crop=crop, cnsn_type=cnsn_type,
ibn=None if (ibn == 'b' and i < blocks-1) else ibn))
return nn.Sequential(*layers)
def _enable_cross_norm(self):
active_cn_idxs = np.random.choice(self.cn_num, self.active_num, replace=False).tolist()
assert len(set(active_cn_idxs)) == self.active_num
# print('active_cn_idxs: {}'.format(active_cn_idxs))
for idx in active_cn_idxs:
self.cn_modules[idx].active = True
def forward(self, x, aug=False):
if aug:
# print('forward cross norm...')
# exit()
self._enable_cross_norm()
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.avgpool(x)
x = x.view(x.size(0), -1)
x = self.fc(x)
return x
def resnet50_ibn_a(config):
"""Constructs a ResNet-50-IBN-a model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = ResNet(
layers=[3, 4, 6, 3],
ibn_cfg=('a', 'a', 'a', None),
active_num=config.active_num,
pos=config.pos, beta=config.beta,
crop=config.crop,
cnsn_type=config.cnsn_type)
# if pretrained:
# model.load_state_dict(torch.hub.load_state_dict_from_url(model_urls['resnet50_ibn_a']))
return model
def resnet101_ibn_a(pretrained=False, **kwargs):
"""Constructs a ResNet-101-IBN-a model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = ResNet_IBN(block=Bottleneck_IBN,
layers=[3, 4, 23, 3],
ibn_cfg=('a', 'a', 'a', None),
**kwargs)
if pretrained:
model.load_state_dict(torch.hub.load_state_dict_from_url(model_urls['resnet101_ibn_a']))
return model
def resnet152_ibn_a(pretrained=False, **kwargs):
"""Constructs a ResNet-152-IBN-a model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = ResNet_IBN(block=Bottleneck_IBN,
layers=[3, 8, 36, 3],
ibn_cfg=('a', 'a', 'a', None),
**kwargs)
if pretrained:
warnings.warn("Pretrained model not available for ResNet-152-IBN-a!")
return model
def resnet50_ibn_b(config):
"""Constructs a ResNet-50-IBN-b model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = ResNet(
layers=[3, 4, 6, 3],
ibn_cfg=('b', 'b', None, None),
active_num=config.active_num,
pos=config.pos, beta=config.beta,
crop=config.crop, cnsn_type=config.cnsn_type)
# if pretrained:
# model.load_state_dict(torch.hub.load_state_dict_from_url(model_urls['resnet50_ibn_b']))
return model
def resnet101_ibn_b(pretrained=False, **kwargs):
"""Constructs a ResNet-101-IBN-b model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = ResNet_IBN(block=Bottleneck_IBN,
layers=[3, 4, 23, 3],
ibn_cfg=('b', 'b', None, None),
**kwargs)
if pretrained:
model.load_state_dict(torch.hub.load_state_dict_from_url(model_urls['resnet101_ibn_b']))
return model
def resnet152_ibn_b(pretrained=False, **kwargs):
"""Constructs a ResNet-152-IBN-b model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = ResNet_IBN(block=Bottleneck_IBN,
layers=[3, 8, 36, 3],
ibn_cfg=('b', 'b', None, None),
**kwargs)
if pretrained:
warnings.warn("Pretrained model not available for ResNet-152-IBN-b!")
return model | 35.747826 | 127 | 0.556393 |
4a1f2c139b2b1bec23aa9651f05ed186f9481996 | 4,981 | py | Python | samfp/sami/list_of_filters.py | b1quint/samfp | 1cd9b85851c02dc61a2294d67a309f62083d358d | [
"BSD-3-Clause"
] | null | null | null | samfp/sami/list_of_filters.py | b1quint/samfp | 1cd9b85851c02dc61a2294d67a309f62083d358d | [
"BSD-3-Clause"
] | 19 | 2016-07-15T21:32:59.000Z | 2017-09-12T00:31:26.000Z | samfp/sami/list_of_filters.py | b1quint/samfp | 1cd9b85851c02dc61a2294d67a309f62083d358d | [
"BSD-3-Clause"
] | null | null | null | filters = {
'Open': ['s0000', 'Open', 'Open'],
'U': ['s0001', 'U_Bessell', 'U Bessell', 'U-Bessell'],
'B': ['s0002', 'B_Bessell', 'B Bessell', 'B-Bessell'],
'V': ['s0003', 'V_Bessell', 'V Bessell', 'V-Bessell'],
'R': ['s0004', 'R_Bessell', 'R Bessell', 'R-Bessell'],
'I': ['s0005', 'I_Bessell', 'I Bessell', 'I-Bessell'],
'SAMI_Ha': ['s0021', 'SAM Ha_6563/75-3x3', 'SAM Ha_6563/75-3x3', 'SAM Ha_6563/75-3x3'],
'SAMI_SII_Broad': ['s0022', 'SAM SII_6724/75-3x3', 'SAM SII_6724/75-3x3', 'SAM SII_6724/75-3x3'],
'SAMI_NII_Narrow': ['s0025', 'SAM NII 6584/20-3x3', 'SAM NII 6584/20-3x3', 'SAM NII 6584/20-3x3'],
'Bkc': ['s0026', 'SAM B_k-c-3x3', 'SAM B k-c-3x3', 'SAM B-k-c-3x3'],
'Vkc': ['s0027', 'SAM V_k-c-3x3', 'SAM V k-c-3x3', 'SAM V-k-c-3x3'],
'Rkc': ['s0028', 'SAM R_k-c-3x3', 'SAM R k-c-3x3', 'SAM R-k-c-3x3'],
'Ikc': ['s0029', 'SAM I_k-c-3x3', 'SAM I k-c-3x3', 'SAM I-k-c-3x3'],
'BTFI_6600_20': ['B0004', '6600.5/19.3-BTFI-3x3', '6600.5/19.3-BTFI-3x3', '6600.5/19.3-BTFI-3x3'],
'u': ['s0006', 'u_Stromgren', 'u Stromgren', 'u-Stromgren'],
'v': ['s0007', 'v_Stromgren', 'v Stromgren', 'v-Stromgren'],
'b': ['s0008', 'b_Stromgren', 'b Stromgren', 'b-Stromgren'],
'y': ['s0009', 'y_Stromgren', 'y Stromgren', 'y-Stromgren'],
'u-SDSS': ['s0010', 'u_SDSS', 'u SDSS', 'u-SDSS'],
'g-SDSS': ['s0011', 's0030', 'g_SDSS', 'g SDSS', 'g-SDSS'],
'r-SDSS': ['s0012', 's0031', 'r_SDSS', 'r SDSS', 'r-SDSS'],
'i-SDSS': ['s0013', 's0032', 'i_SDSS', 'i SDSS', 'i-SDSS'],
'z-SDSS': ['s0014', 's0033', 'z_SDSS', 'z SDSS', 'z-SDSS'],
'Ha': ['s0017', 'Ha', '6563_75'],
'Wing_TiO_778': ['s0015', 'Wing_TiO_778', 'Wing TiO 778'],
'Wing_CN_812': ['s0016', 'Wing_CN_812', 'Wing CN 812'],
'gunn-g': ['c0001', 'gunn_g', 'gunn g', 'gunn-g'],
'gunn-r': ['c0002', 'gunn_r', 'gunn r', 'gunn-r'],
'gunn-I': ['c0003', 'gunn_I', 'gunn I', 'gunn-I'],
'gunn-z': ['c0004', 'gunn_z', 'gunn z', 'gunn-z'],
'B0001': ['B0001', '6569.9/18.6-BTFI-3x3"', '6569.9/18.6-BTFI-3x3"'],
'B0002': ['B0002', '6578.8/19.9-BTFI-3x3"', '6578.8/19.9-BTFI-3x3"'],
'B0003': ['B0003', '6745.5/38.6-BTFI-3x3"', '6745.5/38.6-BTFI-3x3"'],
'B0004': ['B0004', '6600.5/19.3-BTFI-3x3"', '6600.5/19.3-BTFI-3x3"'],
'c0005': ['c0005', 'CTIO_OII_3727/45', 'CTIO OII 3727/45'],
'c0006': ['c0006', 'CTIO_OIII_5019/50', 'CTIO OIII 5019/50'],
'c0007': ['c0007', 'CTIO_656375-4_6563/75', 'CTIO 656375-4 6563/75'],
'c0008': ['c0008', 'CTIO_660075-4', 'CTIO 660075-4'],
'c0009': ['c0009', 'CTIO_SII_6738/50', 'CTIO SII 6738/50'],
'c0010': ['c0010', 'CTIO_red_6850/95', 'CTIO red 6850/95'],
'c0011': ['c0011', 'CTIO_grn_5130/155', 'CTIO grn 5130/155'],
'c0012': ['c0012', 'CTIO_6129/140', 'CTIO 6129/140'],
'c0013': ['c0013', 'CTIO_S8612_BG40', 'CTIO S8612 BG40'],
'c0014': ['c0014', 'CTIO_6826/78', 'CTIO 6826/78'],
'c0015': ['c0015', 'CTIO_6961/79', 'CTIO 6961/79'],
'c0016': ['c0016', 'CTIO_6871/78', 'CTIO 6871/78'],
'c0017': ['c0017', 'CTIO_6606/75', 'CTIO 6606/75'],
'c0018': ['c0018', 'CTIO_6693/76', 'CTIO 6693/76'],
'c0019': ['c0019', 'CTIO_6520/76', 'CTIO 6520/76'],
'c0020': ['c0020', 'CTIO_6649/76', 'CTIO 6649/76'],
'c0021': ['c0021', 'CTIO_OII_3727/44', 'CTIO OII 3727/44'],
'c0022': ['c0022', 'CTIO_6916/78', 'CTIO 6916/78'],
'c0023': ['c0023', 'CTIO_7007/79', 'CTIO 7007/79'],
'c0024': ['c0024', 'CTIO_7146/80', 'CTIO 7146/80'],
'c0025': ['c0025', 'CTIO_7384/84', 'CTIO 7384/84'],
'CTIO660075': ['c0047', 'CTIO 6600/75_3X3', 'CTIO 6600/75_3X3', 'CTIO 6600/75_3X3'],
'v0001': ['v0001', 'F.Winkler_OIII_5007/55', 'F.Winkler OIII 5007/55'],
'v0002': ['v0002', 'F.Winkler_grn_5135/90', 'F.Winkler grn 5135/90'],
'v0003': ['v0003', 'F.Winkler_Ha_6572/25', 'F.Winkler Ha 6572/25'],
'v0004': ['v0004', 'F.Winkler_SII_6734/48', 'F.Winkler SII 6734/48'],
'v0005': ['v0005', 'F.Winkler_red_6852/95', 'F.Winkler red 6852/95'],
'v0006': ['v0006', 'J.Rose_6660/100', 'J.Rose 6660/100'],
'v0007': ['v0007', 'J.Rose_6840/100', 'J.Rose 6840/100'],
'v0008': ['v0008', 'LAM 9000_ROUND', 'LAM 9000_ROUND'],
'x0001': ['x0001', 'special1', 'special1'],
'x0002': ['x0002', 'special2', 'special2'],
'x0003': ['x0003', 'special3', 'special3'],
'x0004': ['x0004', 'special4', 'special4'],
'x0005': ['x0005', 'special5', 'special5'],
'x0006': ['x0006', 'special6', 'special6'],
'x0007': ['x0007', 'special7', 'special7'],
'x0008': ['x0008', 'special8', 'special8'],
'x0009': ['x0009', 'special9', 'special9'],
'x0010': ['x0010', 'special10', 'special10']
}
| 64.688312 | 102 | 0.522987 |
4a1f2d875362f4d131bcd86af18b8eaff73c8e19 | 9,722 | py | Python | python/ccxt/async/__init__.py | born2net/ccxt | 9995e50ca28513b9a68f774a3517f2c396cc0001 | [
"MIT"
] | null | null | null | python/ccxt/async/__init__.py | born2net/ccxt | 9995e50ca28513b9a68f774a3517f2c396cc0001 | [
"MIT"
] | null | null | null | python/ccxt/async/__init__.py | born2net/ccxt | 9995e50ca28513b9a68f774a3517f2c396cc0001 | [
"MIT"
] | 1 | 2018-08-09T18:11:13.000Z | 2018-08-09T18:11:13.000Z | # -*- coding: utf-8 -*-
"""CCXT: CryptoCurrency eXchange Trading Library (Async)"""
# -----------------------------------------------------------------------------
__version__ = '1.9.306'
# -----------------------------------------------------------------------------
from ccxt.async.base.exchange import Exchange # noqa: F401
from ccxt.base import errors # noqa: F401
from ccxt.base.errors import BaseError # noqa: F401
from ccxt.base.errors import ExchangeError # noqa: F401
from ccxt.base.errors import NotSupported # noqa: F401
from ccxt.base.errors import AuthenticationError # noqa: F401
from ccxt.base.errors import InsufficientFunds # noqa: F401
from ccxt.base.errors import InvalidOrder # noqa: F401
from ccxt.base.errors import OrderNotFound # noqa: F401
from ccxt.base.errors import OrderNotCached # noqa: F401
from ccxt.base.errors import NetworkError # noqa: F401
from ccxt.base.errors import DDoSProtection # noqa: F401
from ccxt.base.errors import RequestTimeout # noqa: F401
from ccxt.base.errors import ExchangeNotAvailable # noqa: F401
from ccxt.async._1broker import _1broker # noqa: F401
from ccxt.async._1btcxe import _1btcxe # noqa: F401
from ccxt.async.acx import acx # noqa: F401
from ccxt.async.allcoin import allcoin # noqa: F401
from ccxt.async.anxpro import anxpro # noqa: F401
from ccxt.async.binance import binance # noqa: F401
from ccxt.async.bit2c import bit2c # noqa: F401
from ccxt.async.bitbay import bitbay # noqa: F401
from ccxt.async.bitcoincoid import bitcoincoid # noqa: F401
from ccxt.async.bitfinex import bitfinex # noqa: F401
from ccxt.async.bitfinex2 import bitfinex2 # noqa: F401
from ccxt.async.bitflyer import bitflyer # noqa: F401
from ccxt.async.bithumb import bithumb # noqa: F401
from ccxt.async.bitlish import bitlish # noqa: F401
from ccxt.async.bitmarket import bitmarket # noqa: F401
from ccxt.async.bitmex import bitmex # noqa: F401
from ccxt.async.bitso import bitso # noqa: F401
from ccxt.async.bitstamp1 import bitstamp1 # noqa: F401
from ccxt.async.bitstamp import bitstamp # noqa: F401
from ccxt.async.bittrex import bittrex # noqa: F401
from ccxt.async.bl3p import bl3p # noqa: F401
from ccxt.async.bleutrade import bleutrade # noqa: F401
from ccxt.async.btcbox import btcbox # noqa: F401
from ccxt.async.btcchina import btcchina # noqa: F401
from ccxt.async.btcmarkets import btcmarkets # noqa: F401
from ccxt.async.btctradeua import btctradeua # noqa: F401
from ccxt.async.btcturk import btcturk # noqa: F401
from ccxt.async.btcx import btcx # noqa: F401
from ccxt.async.bter import bter # noqa: F401
from ccxt.async.bxinth import bxinth # noqa: F401
from ccxt.async.ccex import ccex # noqa: F401
from ccxt.async.cex import cex # noqa: F401
from ccxt.async.chbtc import chbtc # noqa: F401
from ccxt.async.chilebit import chilebit # noqa: F401
from ccxt.async.coincheck import coincheck # noqa: F401
from ccxt.async.coinfloor import coinfloor # noqa: F401
from ccxt.async.coingi import coingi # noqa: F401
from ccxt.async.coinmarketcap import coinmarketcap # noqa: F401
from ccxt.async.coinmate import coinmate # noqa: F401
from ccxt.async.coinsecure import coinsecure # noqa: F401
from ccxt.async.coinspot import coinspot # noqa: F401
from ccxt.async.cryptopia import cryptopia # noqa: F401
from ccxt.async.dsx import dsx # noqa: F401
from ccxt.async.exmo import exmo # noqa: F401
from ccxt.async.flowbtc import flowbtc # noqa: F401
from ccxt.async.foxbit import foxbit # noqa: F401
from ccxt.async.fybse import fybse # noqa: F401
from ccxt.async.fybsg import fybsg # noqa: F401
from ccxt.async.gatecoin import gatecoin # noqa: F401
from ccxt.async.gateio import gateio # noqa: F401
from ccxt.async.gdax import gdax # noqa: F401
from ccxt.async.gemini import gemini # noqa: F401
from ccxt.async.hitbtc import hitbtc # noqa: F401
from ccxt.async.hitbtc2 import hitbtc2 # noqa: F401
from ccxt.async.huobi import huobi # noqa: F401
from ccxt.async.huobicny import huobicny # noqa: F401
from ccxt.async.huobipro import huobipro # noqa: F401
from ccxt.async.independentreserve import independentreserve # noqa: F401
from ccxt.async.itbit import itbit # noqa: F401
from ccxt.async.jubi import jubi # noqa: F401
from ccxt.async.kraken import kraken # noqa: F401
from ccxt.async.kuna import kuna # noqa: F401
from ccxt.async.lakebtc import lakebtc # noqa: F401
from ccxt.async.livecoin import livecoin # noqa: F401
from ccxt.async.liqui import liqui # noqa: F401
from ccxt.async.luno import luno # noqa: F401
from ccxt.async.mercado import mercado # noqa: F401
from ccxt.async.mixcoins import mixcoins # noqa: F401
from ccxt.async.nova import nova # noqa: F401
from ccxt.async.okcoincny import okcoincny # noqa: F401
from ccxt.async.okcoinusd import okcoinusd # noqa: F401
from ccxt.async.okex import okex # noqa: F401
from ccxt.async.paymium import paymium # noqa: F401
from ccxt.async.poloniex import poloniex # noqa: F401
from ccxt.async.quadrigacx import quadrigacx # noqa: F401
from ccxt.async.qryptos import qryptos # noqa: F401
from ccxt.async.quoine import quoine # noqa: F401
from ccxt.async.southxchange import southxchange # noqa: F401
from ccxt.async.surbitcoin import surbitcoin # noqa: F401
from ccxt.async.tidex import tidex # noqa: F401
from ccxt.async.therock import therock # noqa: F401
from ccxt.async.urdubit import urdubit # noqa: F401
from ccxt.async.vaultoro import vaultoro # noqa: F401
from ccxt.async.vbtc import vbtc # noqa: F401
from ccxt.async.virwox import virwox # noqa: F401
from ccxt.async.wex import wex # noqa: F401
from ccxt.async.xbtce import xbtce # noqa: F401
from ccxt.async.yobit import yobit # noqa: F401
from ccxt.async.yunbi import yunbi # noqa: F401
from ccxt.async.zaif import zaif # noqa: F401
exchanges = [
'_1broker',
'_1btcxe',
'acx',
'allcoin',
'anxpro',
'binance',
'bit2c',
'bitbay',
'bitcoincoid',
'bitfinex',
'bitfinex2',
'bitflyer',
'bithumb',
'bitlish',
'bitmarket',
'bitmex',
'bitso',
'bitstamp1',
'bitstamp',
'bittrex',
'bl3p',
'bleutrade',
'btcbox',
'btcchina',
'btcmarkets',
'btctradeua',
'btcturk',
'btcx',
'bter',
'bxinth',
'ccex',
'cex',
'chbtc',
'chilebit',
'coincheck',
'coinfloor',
'coingi',
'coinmarketcap',
'coinmate',
'coinsecure',
'coinspot',
'cryptopia',
'dsx',
'exmo',
'flowbtc',
'foxbit',
'fybse',
'fybsg',
'gatecoin',
'gateio',
'gdax',
'gemini',
'hitbtc',
'hitbtc2',
'huobi',
'huobicny',
'huobipro',
'independentreserve',
'itbit',
'jubi',
'kraken',
'kuna',
'lakebtc',
'livecoin',
'liqui',
'luno',
'mercado',
'mixcoins',
'nova',
'okcoincny',
'okcoinusd',
'okex',
'paymium',
'poloniex',
'quadrigacx',
'qryptos',
'quoine',
'southxchange',
'surbitcoin',
'tidex',
'therock',
'urdubit',
'vaultoro',
'vbtc',
'virwox',
'wex',
'xbtce',
'yobit',
'yunbi',
'zaif',
]
base = [
'Exchange',
'exchanges',
]
__all__ = base + errors.__all__ + exchanges
| 44.801843 | 79 | 0.517589 |
4a1f2dbbdc7090dcfb839bd59a458e1bb464d927 | 3,153 | py | Python | odata/tests/__init__.py | suhrawardi/python-odata | 8a8f88329ca0f5b893e114bcf7ab02f3a8106ef0 | [
"MIT"
] | 74 | 2015-04-13T15:12:44.000Z | 2022-01-24T08:06:16.000Z | odata/tests/__init__.py | suhrawardi/python-odata | 8a8f88329ca0f5b893e114bcf7ab02f3a8106ef0 | [
"MIT"
] | 43 | 2015-04-11T15:08:08.000Z | 2021-04-14T16:08:43.000Z | odata/tests/__init__.py | suhrawardi/python-odata | 8a8f88329ca0f5b893e114bcf7ab02f3a8106ef0 | [
"MIT"
] | 63 | 2016-06-22T03:52:39.000Z | 2022-02-25T10:56:34.000Z | # -*- coding: utf-8 -*-
from odata import ODataService
from odata.property import StringProperty, IntegerProperty, DecimalProperty, \
NavigationProperty, DatetimeProperty
from odata.enumtype import EnumType, EnumTypeProperty
url = 'http://unittest.server.local/odata/'
Service = ODataService(url)
class DemoActionWithParameters(Service.Action):
name = 'ODataTest.DemoActionParameters'
parameters = dict(
Name=StringProperty,
Price=DecimalProperty,
)
bound_to_collection = True
class DemoAction(Service.Action):
name = 'ODataTest.DemoAction'
parameters = {}
class DemoCollectionAction(Service.Action):
name = 'ODataTest.DemoCollectionAction'
parameters = {}
bound_to_collection = True
class _DemoUnboundAction(Service.Action):
name = 'ODataTest.DemoUnboundAction'
parameters = {}
DemoUnboundAction = _DemoUnboundAction()
class DemoFunction(Service.Function):
name = 'ODataTest.DemoFunction'
parameters = {}
bound_to_collection = True
class ColorSelection(EnumType):
Black = 0
Red = 1
Blue = 2
Green = 3
class Product(Service.Entity):
__odata_type__ = 'ODataTest.Objects.Product'
__odata_collection__ = 'ProductParts'
id = IntegerProperty('ProductID', primary_key=True)
name = StringProperty('ProductName')
category = StringProperty('Category')
price = DecimalProperty('Price')
color_selection = EnumTypeProperty('ColorSelection',
enum_class=ColorSelection)
DemoAction = DemoAction()
DemoCollectionAction = DemoCollectionAction()
DemoActionWithParameters = DemoActionWithParameters()
DemoFunction = DemoFunction()
class ProductPart(Service.Entity):
__odata_type__ = 'ODataTest.Objects.ProductPart'
__odata_collection__ = 'ProductParts'
id = IntegerProperty('PartID', primary_key=True)
name = StringProperty('PartName')
size = DecimalProperty('Size')
product_id = IntegerProperty('ProductID')
class Manufacturer(Service.Entity):
__odata_type__ = 'ODataTest.Objects.Manufacturer'
__odata_collection__ = 'Manufacturers'
id = IntegerProperty('ManufacturerID', primary_key=True)
name = StringProperty('Name')
established_date = DatetimeProperty('DateEstablished')
class ProductWithNavigation(Product):
__odata_type__ = 'ODataTest.Objects.ProductWithNavigation'
__odata_collection__ = 'ProductsWithNavigation'
manufacturer_id = IntegerProperty('ManufacturerID')
manufacturer = NavigationProperty('Manufacturer', Manufacturer, foreign_key=manufacturer_id)
parts = NavigationProperty('Parts', ProductPart, collection=True)
ProductPart.product = NavigationProperty('Product', ProductWithNavigation, foreign_key=ProductPart.product_id)
class ProductManufacturerSales(Service.Entity):
__odata_type__ = 'ODataTest.Objects.ProductManufacturerSales'
__odata_collection__ = 'Product_Manufacturer_Sales'
product_id = IntegerProperty('ProductID', primary_key=True)
manufacturer_id = IntegerProperty('ManufacturerID', primary_key=True)
sales_amount = DecimalProperty('SalesAmount')
| 29.745283 | 110 | 0.745956 |
4a1f2e17df6616bab9990edaee6a81fc4e0e7ffc | 4,995 | py | Python | src/cray/cfs/inventory/dynamic.py | Cray-HPE/cfs-operator | 16cd12155ba52b89e504ed668c49b544b92d3794 | [
"MIT"
] | null | null | null | src/cray/cfs/inventory/dynamic.py | Cray-HPE/cfs-operator | 16cd12155ba52b89e504ed668c49b544b92d3794 | [
"MIT"
] | 2 | 2021-12-16T19:29:28.000Z | 2022-03-02T22:38:35.000Z | src/cray/cfs/inventory/dynamic.py | Cray-HPE/cfs-operator | 16cd12155ba52b89e504ed668c49b544b92d3794 | [
"MIT"
] | 1 | 2021-11-10T22:28:36.000Z | 2021-11-10T22:28:36.000Z | #
# MIT License
#
# (C) Copyright 2019-2022 Hewlett Packard Enterprise Development LP
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
# OTHER DEALINGS IN THE SOFTWARE.
#
"""
cray.cfs.inventory.dynamic - Generate an inventory from HSM data.
"""
import logging
import os
import requests
from requests.adapters import HTTPAdapter
from requests.packages.urllib3.util.retry import Retry
from collections import defaultdict
from cray.cfs.inventory import CFSInventoryBase
LOGGER = logging.getLogger(__name__)
class DynamicInventory(CFSInventoryBase):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
# FIXME TODO CASMCMS-2777
self.hsm_host = os.getenv('CRAY_SMD_SERVICE_HOST', 'cray-smd')
self.ca_cert = os.getenv('SSL_CAINFO')
LOGGER.debug('API Gateway is: %s', self.hsm_host)
LOGGER.debug('CA Cert location is: %s', self.ca_cert)
self._init_session()
def generate(self):
"""
Generate from HSM.
"""
groups = self._get_groups()
groups.update(self._get_partitions())
groups.update(self._get_components())
LOGGER.info('Dynamic inventory found a total of %d groups', len(groups))
LOGGER.debug('Dynamic inventory found the following groups: %s', ','.join(groups.keys()))
inventory = {
'all': {
'children': groups
}
}
return inventory
def _init_session(self, retries=10, connect=10, backoff_factor=0.5,
status_forcelist=(500, 502, 503, 504)):
self.session = requests.Session()
retry = Retry(
total=retries,
read=retries,
connect=retries,
backoff_factor=backoff_factor,
status_forcelist=status_forcelist,
)
adapter = HTTPAdapter(max_retries=retry)
self.session.mount('http', adapter)
def _get_groups(self):
inventory = {}
try:
data = self._get_data('groups')
for group in data:
members = group['members']['ids']
hosts = {}
hosts['hosts'] = {str(member): {} for member in members}
inventory[str(group['label'])] = hosts
return inventory
except Exception as e:
LOGGER.error('Encountered an unknown exception getting groups data: {}'.format(e))
return inventory
def _get_partitions(self):
inventory = {}
try:
data = self._get_data('partitions')
for group in data:
members = group['members']['ids']
hosts = {}
hosts['hosts'] = {str(member): {} for member in members}
inventory[str(group['name'])] = hosts
return inventory
except Exception as e:
LOGGER.error('Encountered an unknown exception getting partitions data: {}'.format(e))
return inventory
def _get_components(self):
try:
hosts = defaultdict(dict)
data = self._get_data('State/Components?type=node')
for component in data['Components']:
role = ''
if 'Role' in component:
role = str(component['Role'])
hosts[role][str(component['ID'])] = {}
if 'SubRole' in component:
subrole = str(component['SubRole'])
hosts[role + '_' + subrole][str(component['ID'])] = {}
return {group: {'hosts': host} for group, host in hosts.items()}
except Exception as e:
LOGGER.error('Encountered an unknown exception getting component data: {}'.format(e))
return {}
def _get_data(self, endpoint):
url = 'http://{}/hsm/v2/{}'.format(self.hsm_host, endpoint)
LOGGER.debug('Querying %s for inventory data.', url)
r = self.session.get(url, verify=self.ca_cert)
r.raise_for_status()
return r.json()
| 38.423077 | 98 | 0.61962 |
4a1f2e68cec9d4d328a17cf4a14b39045519e4ba | 1,811 | py | Python | script/train.py | FYP-2018/S2S_attention_1 | 416d91eb71f9dcda40b925dc2b0c59582786cb86 | [
"MIT"
] | null | null | null | script/train.py | FYP-2018/S2S_attention_1 | 416d91eb71f9dcda40b925dc2b0c59582786cb86 | [
"MIT"
] | null | null | null | script/train.py | FYP-2018/S2S_attention_1 | 416d91eb71f9dcda40b925dc2b0c59582786cb86 | [
"MIT"
] | null | null | null | import tensorflow as tf
import subprocess
import logging
import os
import sys
sys.path.insert(0, '/Users/user/PycharmProjects/Seq2Seq/TensorFlow-Summarization')
# from src import data_util
MAX_STEPS = 300000
STEPS_PER_VALIDATION = 1000
STEPS_PER_CHECKPOINT = 20000
TEST_THRESHOLD = 200000
MAX_STEPS = 240
STEPS_PER_VALIDATION = 10
STEPS_PER_CHECKPOINT = 20
TEST_THRESHOLD = 200
train_params = {
"--steps_per_validation": STEPS_PER_VALIDATION,
"--steps_per_checkpoint": STEPS_PER_CHECKPOINT,
}
############################################################
# extract the data-loading procedure here
# so that dont need to load data for every training epoch
# @Crystina
############################################################
if __name__ == "__main__":
logging.basicConfig(level=logging.DEBUG,
format="%(asctime)s %(filename)s[line:%(lineno)d] %(levelname)s %(message)s",
datefmt='%b %d %H:%M')
try:
global_step = tf.contrib.framework.load_variable("model", "global_step")
except:
global_step = 0
logging.info("Training starts with global_step={}. ".format(global_step))
while global_step < MAX_STEPS:
terminate_step = max(global_step + STEPS_PER_CHECKPOINT, TEST_THRESHOLD)
logging.info("Train from {} to {}. ".format(global_step, terminate_step))
train_proc = ["python", "src/summarization.py", "--max_iter", str(terminate_step)]
test_proc = ["python", "script/test.py"]
for key, val in train_params.items():
train_proc.append(key)
train_proc.append(str(val))
subprocess.call(train_proc)
global_step = terminate_step
# subprocess.call(["python3", "script/test.py"])
subprocess.call(test_proc)
| 28.296875 | 101 | 0.631695 |
4a1f2e7410b8a66f6c0f8a4294c7a7fe4f6df6f6 | 4,630 | py | Python | fpga_interchange/constraints/tool.py | chipsalliance/python-fpga-interchange | 27fc1db2a37f5e224396c50f0770ea0caef708d9 | [
"ISC"
] | 3 | 2021-12-08T14:06:50.000Z | 2022-02-15T00:45:24.000Z | fpga_interchange/constraints/tool.py | chipsalliance/python-fpga-interchange | 27fc1db2a37f5e224396c50f0770ea0caef708d9 | [
"ISC"
] | 2 | 2022-03-16T14:07:29.000Z | 2022-03-21T15:04:31.000Z | fpga_interchange/constraints/tool.py | chipsalliance/python-fpga-interchange | 27fc1db2a37f5e224396c50f0770ea0caef708d9 | [
"ISC"
] | 3 | 2021-11-15T13:03:46.000Z | 2022-02-11T22:41:24.000Z | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# Copyright (C) 2020 The F4PGA Authors.
#
# Use of this source code is governed by a ISC-style
# license that can be found in the LICENSE file or at
# https://opensource.org/licenses/ISC
#
# SPDX-License-Identifier: ISC
import argparse
import pprint
from pysat.solvers import Solver
import sys
from fpga_interchange.interchange_capnp import Interchange
from fpga_interchange.constraints.placement_oracle import PlacementOracle
from fpga_interchange.constraints.model import CellInstance, Placement
def make_problem_from_device(device, allowed_sites):
""" Generate constraint problem from device database. """
model = device.get_constraints()
placement_oracle = PlacementOracle()
placement_oracle.add_sites_from_device(device)
placements = []
for tile, site, tile_type, site_type, bel, bel_type in device.yield_bels():
if site not in allowed_sites:
continue
placements.append(
Placement(
tile=tile,
site=site,
tile_type=tile_type,
site_type=site_type,
bel=bel))
return model, placement_oracle, placements
def create_constraint_cells_from_netlist(netlist, filtered_out=set()):
""" Generate cells from logical netlist. """
cells = []
for leaf_cell_name, cell_inst in netlist.yield_leaf_cells():
if cell_inst.cell_name in filtered_out:
continue
cells.append(
CellInstance(
cell=cell_inst.cell_name, name=leaf_cell_name, ports={}))
return cells
def main():
parser = argparse.ArgumentParser(
description="Run FPGA constraints placement engine.")
parser.add_argument('--schema_dir', required=True)
parser.add_argument(
'--assumptions', help='Comma seperated list of assumptions to hold')
parser.add_argument('--verbose', action='store_true')
parser.add_argument('--allowed_sites', required=True)
parser.add_argument('--filtered_cells')
parser.add_argument('device')
parser.add_argument('netlist')
args = parser.parse_args()
interchange = Interchange(args.schema_dir)
with open(args.device, 'rb') as f:
device = interchange.read_device_resources(f)
with open(args.netlist, 'rb') as f:
netlist = interchange.read_logical_netlist(f)
allowed_sites = set(args.allowed_sites.split(','))
filtered_cells = set()
if args.filtered_cells is not None:
filtered_cells = set(cell for cell in args.filtered_cells.split(','))
model, placement_oracle, placements = make_problem_from_device(
device, allowed_sites)
cells = create_constraint_cells_from_netlist(netlist, filtered_cells)
solver = model.build_sat(placements, cells, placement_oracle)
if args.verbose:
print()
print("Preparing solver")
print()
clauses = solver.prepare_for_sat()
if args.verbose:
print()
print("Variable names ({} total):".format(len(solver.variable_names)))
print()
for variable in solver.variable_names:
print(variable)
print()
print("Clauses:")
print()
for clause in solver.abstract_clauses:
print(clause)
assumptions = []
if args.assumptions:
for assumption in args.assumptions.split(','):
assumptions.append(solver.get_variable(assumption))
with Solver() as sat:
for clause in clauses:
if args.verbose:
print(clause)
sat.add_clause(clause)
if args.verbose:
print()
print("Running SAT:")
print()
print("Assumptions:")
print(assumptions)
solved = sat.solve(assumptions=assumptions)
if args.verbose:
print(sat.time())
if solved:
model = sat.get_model()
else:
core = sat.get_core()
if solved:
if args.verbose:
print()
print("Raw Solution:")
print()
print(model)
print("Solution:")
state_groups_vars, other_vars = solver.decode_solution_model(model)
assert len(other_vars) == 0
pprint.pprint(state_groups_vars)
else:
print("Unsatifiable!")
if core is not None:
print("Core:")
print(core)
print("Core variables:")
for core_index in core:
print(solver.variable_names[core_index])
sys.exit(1)
if __name__ == "__main__":
main()
| 28.231707 | 79 | 0.631749 |
4a1f2f029eedf63f451068ef7ce97feb409c16de | 1,280 | py | Python | src/fvm/test/PARALLEL_TESTS/testPartMesh.py | drm42/fvm-drm | c9b940e593034f1aa3020d63ff1e09ebef9c182a | [
"MIT"
] | null | null | null | src/fvm/test/PARALLEL_TESTS/testPartMesh.py | drm42/fvm-drm | c9b940e593034f1aa3020d63ff1e09ebef9c182a | [
"MIT"
] | null | null | null | src/fvm/test/PARALLEL_TESTS/testPartMesh.py | drm42/fvm-drm | c9b940e593034f1aa3020d63ff1e09ebef9c182a | [
"MIT"
] | null | null | null | #!/usr/bin/env python
"""
Usage: testPartMesh.py [options] infile
options are:
--type 'tri'[default], 'quad', 'hexa', or 'tetra'
--xdmf Dump data in xdmf
"""
import sys, fvm
import fvm.fvmbaseExt as fvmbaseExt
import fvm.importers as importers
import fvm.fvmparallel as fvmparallel
from mpi4py import MPI
from FluentCase import FluentCase
from optparse import OptionParser
etype = {
'tri' : 1,
'quad' : 2,
'tetra' : 3,
'hexa' : 4
}
numIterations = 10
def usage():
print __doc__
sys.exit(1)
parser = OptionParser()
parser.set_defaults(type='tri')
parser.add_option("--type", help="'tri'[default], 'quad', 'hexa', or 'tetra'")
parser.add_option("--xdmf", action='store_true', help="Dump data in xdmf")
(options, args) = parser.parse_args()
if len(args) != 1:
usage()
reader = FluentCase(args[0])
reader.read()
fluent_meshes = reader.getMeshList()
nmesh = MPI.COMM_WORLD.Get_size()
npart = [nmesh]
etype = [etype[options.type]]
part_mesh = fvmparallel.PartMesh( fluent_meshes, npart, etype );
part_mesh.setWeightType(0);
part_mesh.setNumFlag(0);
#actions
part_mesh.partition()
part_mesh.mesh()
part_mesh.mesh_debug()
part_mesh.debug_print()
if options.xdmf:
part_mesh.mesh_xdmfplot()
#meshes = part_mesh.meshList()
| 21.694915 | 78 | 0.696094 |
4a1f2fc0ae4af02f40808c77018a8d39708c904e | 1,023 | py | Python | kubernetes/test/test_v2alpha1_horizontal_pod_autoscaler_list.py | jraby/kubernetes-client-python | e6e7b710d0b15fbde686bc9dccf00da5951bef84 | [
"Apache-2.0"
] | null | null | null | kubernetes/test/test_v2alpha1_horizontal_pod_autoscaler_list.py | jraby/kubernetes-client-python | e6e7b710d0b15fbde686bc9dccf00da5951bef84 | [
"Apache-2.0"
] | null | null | null | kubernetes/test/test_v2alpha1_horizontal_pod_autoscaler_list.py | jraby/kubernetes-client-python | e6e7b710d0b15fbde686bc9dccf00da5951bef84 | [
"Apache-2.0"
] | null | null | null | # coding: utf-8
"""
Kubernetes
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version: v1.7.1
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import os
import sys
import unittest
import kubernetes.client
from kubernetes.client.rest import ApiException
from kubernetes.client.models.v2alpha1_horizontal_pod_autoscaler_list import V2alpha1HorizontalPodAutoscalerList
class TestV2alpha1HorizontalPodAutoscalerList(unittest.TestCase):
""" V2alpha1HorizontalPodAutoscalerList unit test stubs """
def setUp(self):
pass
def tearDown(self):
pass
def testV2alpha1HorizontalPodAutoscalerList(self):
"""
Test V2alpha1HorizontalPodAutoscalerList
"""
model = kubernetes.client.models.v2alpha1_horizontal_pod_autoscaler_list.V2alpha1HorizontalPodAutoscalerList()
if __name__ == '__main__':
unittest.main()
| 23.790698 | 118 | 0.755621 |
4a1f2fce7a5f17e577f0f8c7b37105ac4c232f6f | 591 | py | Python | Problem Solving/Algorithms/Warmup/A Very Big Sum.py | MonwarAdeeb/HackerRank-Solutions | 571327e9688061745000ae81c5fd74ff7a2976d4 | [
"MIT"
] | null | null | null | Problem Solving/Algorithms/Warmup/A Very Big Sum.py | MonwarAdeeb/HackerRank-Solutions | 571327e9688061745000ae81c5fd74ff7a2976d4 | [
"MIT"
] | null | null | null | Problem Solving/Algorithms/Warmup/A Very Big Sum.py | MonwarAdeeb/HackerRank-Solutions | 571327e9688061745000ae81c5fd74ff7a2976d4 | [
"MIT"
] | null | null | null | #!/bin/python3
import math
import os
import random
import re
import sys
#
# Complete the 'aVeryBigSum' function below.
#
# The function is expected to return a LONG_INTEGER.
# The function accepts LONG_INTEGER_ARRAY ar as parameter.
#
def aVeryBigSum(ar):
# Write your code here
sum_of_numbers = sum(ar)
return sum_of_numbers
if __name__ == '__main__':
fptr = open(os.environ['OUTPUT_PATH'], 'w')
ar_count = int(input().strip())
ar = list(map(int, input().rstrip().split()))
result = aVeryBigSum(ar)
fptr.write(str(result) + '\n')
fptr.close()
| 16.885714 | 58 | 0.673435 |
4a1f2fd483a7c96c5b86be28e9d48b671e25cabe | 3,754 | py | Python | src/plotMethods.py | Syuukakou/SCIS2022 | d6cf84d938b6afa022edb9c58629ca3f9490ea2c | [
"MIT"
] | null | null | null | src/plotMethods.py | Syuukakou/SCIS2022 | d6cf84d938b6afa022edb9c58629ca3f9490ea2c | [
"MIT"
] | null | null | null | src/plotMethods.py | Syuukakou/SCIS2022 | d6cf84d938b6afa022edb9c58629ca3f9490ea2c | [
"MIT"
] | null | null | null | import networkx as nx
import matplotlib.pyplot as plt
import json, collections
import seaborn as sns
import textwrap
import pandas as pd
import matplotlib as mpl
def plot_dict_data(sources, save_path, title, xlabel, ylabel, fig_w, fig_h, rotation, show_Barlabel=False, wrap_xticklabels=False):
"""plot dict data
Args:
sources (dict): [description]
save_path (file path): where the plotted file saved
title (string): Plot image's title
xlabel (string): x axis label
ylabel (string): y axis label
fig_w (int): figure size's width
fig_h (int): figure size's height
rotation (float or int): x axis tick label's rotation
show_Barlabel (bool, optional): [Whether or not to show the value of the bar]. Defaults to False.
wrap_xticklabels (bool, optional): [Whether or not to wrap the x axis tick labels]. Defaults to False.
"""
x_data = list(sources.keys())
y_data = list(sources.values())
# seaborn
# Width, height in inches
plt.figure(figsize=(fig_w, fig_h))
sns.set_style("whitegrid")
ax = sns.barplot(x=x_data, y=y_data)
ax.set(xlabel=xlabel, ylabel=ylabel)
ax.set_title(title)
plt.setp(ax.get_xticklabels(), rotation=rotation, ha="center", rotation_mode="anchor", fontsize=10)
ax.tick_params(axis='x', which='major', pad=12)
plt.ticklabel_format(style='plain', axis='y')
ax.set_yticklabels(['{:,}'.format(int(x)) for x in ax.get_yticks().tolist()])
# ax.tick_params(axis='x', rotation=rotation, labelsize=15, horizontalalignment="right")
# 将x轴坐标label折叠
if wrap_xticklabels:
f = lambda x: textwrap.fill(x.get_text(), 10)
ax.set_xticklabels(map(f, ax.get_xticklabels()))
# add label
if show_Barlabel:
for p in ax.patches:
ax.annotate("%.0f" % p.get_height(), (p.get_x() + p.get_width() / 2., p.get_height()),
ha='center', va='center', fontsize=10, color='black', xytext=(0, 5),
textcoords='offset points', rotation=rotation)
plt.tight_layout()
plt.savefig(save_path)
plt.show()
def plot_dict_data_DoubleBar(sourcedata, save_path, title, xlabel, ylabel, fig_w, fig_h, rotation, show_Barlabel=False, wrap_xticklabels=False):
"""[summary]
Args:
sourcedata ([type]): [description]
"""
# plot_data = pd.DataFrame.from_dict(sourcedata, orient="index")
plot_data = pd.DataFrame(sourcedata[1:], columns=sourcedata[0])
plt.figure(figsize=(fig_w, fig_h))
sns.set_style("darkgrid")
g = sns.barplot(data=plot_data, x="IP Address", y="Value", hue="Type")
g.set(xlabel=xlabel, ylabel=ylabel)
g.set_title(title)
g.tick_params(axis='x', rotation=rotation, labelsize=15)
# print(plot_data)
# # 将x轴坐标label折叠
if wrap_xticklabels:
f = lambda x: textwrap.fill(x.get_text(), 10)
g.set_xticklabels(map(f, g.get_xticklabels()))
# add label
if show_Barlabel:
for p in g.patches:
g.annotate("%.0f" % p.get_height(), (p.get_x() + p.get_width() / 2., p.get_height()),
ha='center', va='center', fontsize=10, color='black', xytext=(0, 5),
textcoords='offset points')
plt.tight_layout()
if len(save_path) == 0:
plt.show()
else:
plt.savefig(save_path)
plt.show()
def plot_pie_dict(dict_data, save_path):
data = list(dict_data.values())
labels = list(dict_data.keys())
colors = sns.color_palette("pastel")
plt.pie(data, labels=labels, colors=colors, autopct='%1.0f%%')
plt.savefig(save_path)
plt.show()
"""
vaddr=0x00000040 paddr=0x00000040 ord=011 fwd=NONE sz=11 bind=GLOBAL type=FUNC name=int_cmp
""" | 37.168317 | 144 | 0.643847 |
4a1f322bc34eb9673ed95c9dd4450b081ee1ad87 | 515 | py | Python | Templates/FuncApp-Http-sql-Example/tools/tools_math.py | mmaysami/azure-functions-python | e97b29204af83bc1fc81b886f841fe7b7bc0c8a3 | [
"MIT"
] | null | null | null | Templates/FuncApp-Http-sql-Example/tools/tools_math.py | mmaysami/azure-functions-python | e97b29204af83bc1fc81b886f841fe7b7bc0c8a3 | [
"MIT"
] | null | null | null | Templates/FuncApp-Http-sql-Example/tools/tools_math.py | mmaysami/azure-functions-python | e97b29204af83bc1fc81b886f841fe7b7bc0c8a3 | [
"MIT"
] | null | null | null | # import numpy as np
# import pandas as pd
# import sklearn
import time
def sum1(a, b):
start = time.time()
a = float(a)
b = float(b)
return a+b, time.time()-start
def sub1(a, b):
start = time.time()
a = float(a)
b = float(b)
return a-b, time.time()-start
def pow1(a, b):
start = time.time()
a = float(a)
b = float(b)
return a**b, time.time()-start
def div1(a, b):
start = time.time()
a = float(a)
b = float(b)
return a//b, time.time()-start
| 15.147059 | 34 | 0.547573 |
4a1f32b66c4411e2b628e98f5cbc4497a90b23fa | 774 | py | Python | solutions/1071-Greatest-Common-Divisor-of-Strings/1071.py | leetcode-notebook/wonz | 9ffd2ce9b5f3a544ee958f5a0673215afd176c2b | [
"MIT"
] | 12 | 2020-04-21T01:09:14.000Z | 2022-01-13T08:42:03.000Z | solutions/1071-Greatest-Common-Divisor-of-Strings/1071.py | leetcode-notebook/wonz | 9ffd2ce9b5f3a544ee958f5a0673215afd176c2b | [
"MIT"
] | null | null | null | solutions/1071-Greatest-Common-Divisor-of-Strings/1071.py | leetcode-notebook/wonz | 9ffd2ce9b5f3a544ee958f5a0673215afd176c2b | [
"MIT"
] | 4 | 2020-03-31T03:06:16.000Z | 2021-07-06T07:27:44.000Z | class Solution:
def gcdOfStrings(self, str1: str, str2: str) -> str:
m, n = len(str1), len(str2)
# solution one
i, j = 0, 0
while i < m or j < n:
if str1[i % m] != str2[j % n]:
return ""
i += 1
j += 1
# 求最大公约数
def gcd(a, b):
return a if b == 0 else gcd(b, a % b)
return str1[:gcd(m, n)]
# solution two
if str1 + str2 != str2 + str1:
return ""
# 求最大公约数
def gcd(a, b):
return a if b == 0 else gcd(b, a % b)
return str1[:gcd(m, n)]
if __name__ == "__main__":
str1 = "ABCABC"
str2 = "ABC"
print(Solution().gcdOfStrings(str1, str2)) | 24.1875 | 56 | 0.410853 |
4a1f32f523889f8112ab924cad4c4dd453c3dad3 | 1,911 | py | Python | src/krux/light.py | odudex/krux | db421a3f107c0263221e5f1e877e9c38925bb17c | [
"MIT"
] | null | null | null | src/krux/light.py | odudex/krux | db421a3f107c0263221e5f1e877e9c38925bb17c | [
"MIT"
] | null | null | null | src/krux/light.py | odudex/krux | db421a3f107c0263221e5f1e877e9c38925bb17c | [
"MIT"
] | null | null | null | # The MIT License (MIT)
# Copyright (c) 2021 Tom J. Sun
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
import board
from Maix import GPIO
from fpioa_manager import fm
class Light:
"""Light is a singleton interface for interacting with the device's LED light"""
def __init__(self):
fm.register(board.config["krux"]["pins"]["LED_W"], fm.fpioa.GPIO3)
self.led_w = GPIO(GPIO.GPIO3, GPIO.OUT)
self.turn_off()
def is_on(self):
"""Returns a boolean indicating if the light is currently on"""
return self.led_w.value() == 0
def turn_on(self):
"""Turns on the light"""
self.led_w.value(0)
def turn_off(self):
"""Turns off the light"""
self.led_w.value(1)
def toggle(self):
"""Toggles the light on or off"""
if self.is_on():
self.turn_off()
else:
self.turn_on()
| 36.056604 | 84 | 0.698587 |
4a1f338100b935fc86d18b4762943134ab6f124a | 39,614 | py | Python | bioblend/cloudman/launch.py | fubar2/bioblend | 99ea9dae70dcda803a6465aa6ecae6f4ab39aef7 | [
"MIT"
] | 51 | 2015-01-23T20:45:01.000Z | 2022-01-31T10:46:31.000Z | bioblend/cloudman/launch.py | fubar2/bioblend | 99ea9dae70dcda803a6465aa6ecae6f4ab39aef7 | [
"MIT"
] | 288 | 2015-01-22T21:01:31.000Z | 2022-03-14T09:09:25.000Z | bioblend/cloudman/launch.py | fubar2/bioblend | 99ea9dae70dcda803a6465aa6ecae6f4ab39aef7 | [
"MIT"
] | 87 | 2015-02-02T06:31:54.000Z | 2022-03-31T02:39:31.000Z | """
Setup and launch a CloudMan instance.
"""
import datetime
import socket
from http.client import (
BadStatusLine,
HTTPConnection,
HTTPException,
)
from urllib.parse import urlparse
import boto
import yaml
from boto.ec2.regioninfo import RegionInfo
from boto.exception import EC2ResponseError, S3ResponseError
from boto.s3.connection import OrdinaryCallingFormat, S3Connection, SubdomainCallingFormat
import bioblend
from bioblend.util import Bunch
# Uncomment the following line if no logging from boto is desired
# bioblend.logging.getLogger('boto').setLevel(bioblend.logging.CRITICAL)
# Uncomment the following line if logging at the prompt is desired
# bioblend.set_stream_logger(__name__)
def instance_types(cloud_name='generic'):
"""
Return a list of dictionaries containing details about the available
instance types for the given `cloud_name`.
:type cloud_name: str
:param cloud_name: A name of the cloud for which the list of instance
types will be returned. Valid values are: `aws`,
`nectar`, `generic`.
:rtype: list
:return: A list of dictionaries describing instance types. Each dict will
contain the following keys: `name`, `model`, and `description`.
"""
instance_list = []
if cloud_name.lower() == 'aws':
instance_list.append({"model": "c3.large",
"name": "Compute optimized Large",
"description": "2 vCPU/4GB RAM"})
instance_list.append({"model": "c3.2xlarge",
"name": "Compute optimized 2xLarge",
"description": "8 vCPU/15GB RAM"})
instance_list.append({"model": "c3.8xlarge",
"name": "Compute optimized 8xLarge",
"description": "32 vCPU/60GB RAM"})
elif cloud_name.lower() in ['nectar', 'generic']:
instance_list.append({"model": "m1.small",
"name": "Small",
"description": "1 vCPU / 4GB RAM"})
instance_list.append({"model": "m1.medium",
"name": "Medium",
"description": "2 vCPU / 8GB RAM"})
instance_list.append({"model": "m1.large",
"name": "Large",
"description": "4 vCPU / 16GB RAM"})
instance_list.append({"model": "m1.xlarge",
"name": "Extra Large",
"description": "8 vCPU / 32GB RAM"})
instance_list.append({"model": "m1.xxlarge",
"name": "Extra-extra Large",
"description": "16 vCPU / 64GB RAM"})
return instance_list
class CloudManLauncher:
def __init__(self, access_key, secret_key, cloud=None):
"""
Define the environment in which this instance of CloudMan will be launched.
Besides providing the credentials, optionally provide the ``cloud``
object. This object must define the properties required to establish a
`boto <https://github.com/boto/boto/>`_ connection to that cloud. See
this method's implementation for an example of the required fields.
Note that as long the as provided object defines the required fields,
it can really by implemented as anything (e.g., a Bunch, a database
object, a custom class). If no value for the ``cloud`` argument is
provided, the default is to use the Amazon cloud.
"""
self.access_key = access_key
self.secret_key = secret_key
if cloud is None:
# Default to an EC2-compatible object
self.cloud = Bunch(id='1', # for compatibility w/ DB representation
name="Amazon",
cloud_type="ec2",
bucket_default="cloudman",
region_name="us-east-1",
region_endpoint="ec2.amazonaws.com",
ec2_port="",
ec2_conn_path="/",
cidr_range="",
is_secure=True,
s3_host="s3.amazonaws.com",
s3_port="",
s3_conn_path='/')
else:
self.cloud = cloud
self.ec2_conn = self.connect_ec2(
self.access_key,
self.secret_key,
self.cloud)
self.vpc_conn = self.connect_vpc(
self.access_key,
self.secret_key,
self.cloud)
# Define exceptions that we want to catch and retry
self.http_exceptions = (
HTTPException,
socket.error,
socket.gaierror,
BadStatusLine
)
def __repr__(self):
return f"Cloud: {self.cloud.name}; acct ID: {self.access_key}"
def launch(self, cluster_name, image_id, instance_type, password,
kernel_id=None, ramdisk_id=None, key_name='cloudman_key_pair',
security_groups=None, placement='', subnet_id=None,
ebs_optimized=False, **kwargs):
"""
Check all the prerequisites (key pair and security groups) for
launching a CloudMan instance, compose the user data based on the
parameters specified in the arguments and the cloud properties as
defined in the object's ``cloud`` field.
For the current list of user data fields that can be provided via
``kwargs``, see `<https://galaxyproject.org/cloudman/userdata/>`_
Return a dict containing the properties and info with which an instance
was launched, namely: ``sg_names`` containing the names of the security
groups, ``kp_name`` containing the name of the key pair, ``kp_material``
containing the private portion of the key pair (*note* that this portion
of the key is available and can be retrieved *only* at the time the key
is created, which will happen only if no key with the name provided in
the ``key_name`` argument exists), ``rs`` containing the
`boto <https://github.com/boto/boto/>`_ ``ResultSet`` object,
``instance_id`` containing the ID of a started instance, and
``error`` containing an error message if there was one.
"""
if security_groups is None:
security_groups = ['CloudMan']
ret = {'sg_names': [],
'sg_ids': [],
'kp_name': '',
'kp_material': '',
'rs': None,
'instance_id': '',
'error': None}
# First satisfy the prerequisites
for sg in security_groups:
# Get VPC ID in case we're launching into a VPC
vpc_id = None
if subnet_id:
try:
sn = self.vpc_conn.get_all_subnets(subnet_id)[0]
vpc_id = sn.vpc_id
except (EC2ResponseError, IndexError):
bioblend.log.exception("Trouble fetching subnet %s", subnet_id)
cmsg = self.create_cm_security_group(sg, vpc_id=vpc_id)
ret['error'] = cmsg['error']
if ret['error']:
return ret
if cmsg['name']:
ret['sg_names'].append(cmsg['name'])
ret['sg_ids'].append(cmsg['sg_id'])
if subnet_id:
# Must setup a network interface if launching into VPC
security_groups = None
interface = boto.ec2.networkinterface.NetworkInterfaceSpecification(
subnet_id=subnet_id, groups=[cmsg['sg_id']],
associate_public_ip_address=True)
network_interfaces = (boto.ec2.networkinterface.
NetworkInterfaceCollection(interface))
else:
network_interfaces = None
kp_info = self.create_key_pair(key_name)
ret['kp_name'] = kp_info['name']
ret['kp_material'] = kp_info['material']
ret['error'] = kp_info['error']
if ret['error']:
return ret
# If not provided, try to find a placement
# TODO: Should placement always be checked? To make sure it's correct
# for existing clusters.
if not placement:
placement = self._find_placement(
cluster_name).get('placement', None)
# Compose user data for launching an instance, ensuring we have the
# required fields
kwargs['access_key'] = self.access_key
kwargs['secret_key'] = self.secret_key
kwargs['cluster_name'] = cluster_name
kwargs['password'] = password
kwargs['cloud_name'] = self.cloud.name
ud = self._compose_user_data(kwargs)
# Now launch an instance
try:
rs = None
rs = self.ec2_conn.run_instances(image_id=image_id,
instance_type=instance_type,
key_name=key_name,
security_groups=security_groups,
# The following two arguments are
# provided in the network_interface
# instead of arguments:
# security_group_ids=security_group_ids,
# subnet_id=subnet_id,
network_interfaces=network_interfaces,
user_data=ud,
kernel_id=kernel_id,
ramdisk_id=ramdisk_id,
placement=placement,
ebs_optimized=ebs_optimized)
ret['rs'] = rs
except EC2ResponseError as e:
err_msg = f"Problem launching an instance: {e} (code {e.error_code}; status {e.status})"
bioblend.log.exception(err_msg)
ret['error'] = err_msg
return ret
else:
if rs:
try:
bioblend.log.info("Launched an instance with ID %s", rs.instances[0].id)
ret['instance_id'] = rs.instances[0].id
ret['instance_ip'] = rs.instances[0].ip_address
except EC2ResponseError as e:
err_msg = f"Problem with the launched instance object: {e} (code {e.error_code}; status {e.status})"
bioblend.log.exception(err_msg)
ret['error'] = err_msg
else:
ret['error'] = ("No response after launching an instance. Check "
"your account permissions and try again.")
return ret
def create_cm_security_group(self, sg_name='CloudMan', vpc_id=None):
"""
Create a security group with all authorizations required to run CloudMan.
If the group already exists, check its rules and add the missing ones.
:type sg_name: str
:param sg_name: A name for the security group to be created.
:type vpc_id: str
:param vpc_id: VPC ID under which to create the security group.
:rtype: dict
:return: A dictionary containing keys ``name`` (with the value being the
name of the security group that was created), ``error``
(with the value being the error message if there was an error
or ``None`` if no error was encountered), and ``ports``
(containing the list of tuples with port ranges that were
opened or attempted to be opened).
.. versionchanged:: 0.6.1
The return value changed from a string to a dict
"""
ports = (('20', '21'), # FTP
('22', '22'), # SSH
('80', '80'), # Web UI
('443', '443'), # SSL Web UI
('8800', '8800'), # NodeJS Proxy for Galaxy IPython IE
('9600', '9700'), # HTCondor
('30000', '30100')) # FTP transfer
progress = {'name': None,
'sg_id': None,
'error': None,
'ports': ports}
cmsg = None
filters = None
if vpc_id:
filters = {'vpc-id': vpc_id}
# Check if this security group already exists
try:
sgs = self.ec2_conn.get_all_security_groups(filters=filters)
except EC2ResponseError as e:
err_msg = f"Problem getting security groups. This could indicate a problem with your account credentials or permissions: {e} (code {e.error_code}; status {e.status})"
bioblend.log.exception(err_msg)
progress['error'] = err_msg
return progress
for sg in sgs:
if sg.name == sg_name:
cmsg = sg
bioblend.log.debug("Security group '%s' already exists; will add authorizations next.", sg_name)
break
# If it does not exist, create security group
if cmsg is None:
bioblend.log.debug("Creating Security Group %s", sg_name)
try:
cmsg = self.ec2_conn.create_security_group(sg_name, 'A security '
'group for CloudMan',
vpc_id=vpc_id)
except EC2ResponseError as e:
err_msg = f"Problem creating security group '{sg_name}': {e} (code {e.error_code}; status {e.status})"
bioblend.log.exception(err_msg)
progress['error'] = err_msg
if cmsg:
progress['name'] = cmsg.name
progress['sg_id'] = cmsg.id
# Add appropriate authorization rules
# If these rules already exist, nothing will be changed in the SG
for port in ports:
try:
if not self.rule_exists(
cmsg.rules, from_port=port[0], to_port=port[1]):
cmsg.authorize(
ip_protocol='tcp',
from_port=port[0],
to_port=port[1],
cidr_ip='0.0.0.0/0')
else:
bioblend.log.debug("Rule (%s:%s) already exists in the SG", port[0], port[1])
except EC2ResponseError as e:
err_msg = f"A problem adding security group authorizations: {e} (code {e.error_code}; status {e.status})"
bioblend.log.exception(err_msg)
progress['error'] = err_msg
# Add ICMP (i.e., ping) rule required by HTCondor
try:
if not self.rule_exists(
cmsg.rules, from_port='-1', to_port='-1', ip_protocol='icmp'):
cmsg.authorize(
ip_protocol='icmp',
from_port=-1,
to_port=-1,
cidr_ip='0.0.0.0/0')
else:
bioblend.log.debug(
f"ICMP rule already exists in {sg_name} SG.")
except EC2ResponseError as e:
err_msg = f"A problem with security ICMP rule authorization: {e} (code {e.error_code}; status {e.status})"
bioblend.log.exception(err_msg)
progress['err_msg'] = err_msg
# Add rule that allows communication between instances in the same
# SG
# A flag to indicate if group rule already exists
g_rule_exists = False
for rule in cmsg.rules:
for grant in rule.grants:
if grant.name == cmsg.name:
g_rule_exists = True
bioblend.log.debug(
"Group rule already exists in the SG.")
if g_rule_exists:
break
if not g_rule_exists:
try:
cmsg.authorize(
src_group=cmsg,
ip_protocol='tcp',
from_port=0,
to_port=65535)
except EC2ResponseError as e:
err_msg = f"A problem with security group group authorization: {e} (code {e.error_code}; status {e.status})"
bioblend.log.exception(err_msg)
progress['err_msg'] = err_msg
bioblend.log.info("Done configuring '%s' security group", cmsg.name)
else:
bioblend.log.warning(
f"Did not create security group '{sg_name}'")
return progress
def rule_exists(
self, rules, from_port, to_port, ip_protocol='tcp', cidr_ip='0.0.0.0/0'):
"""
A convenience method to check if an authorization rule in a security group
already exists.
"""
for rule in rules:
if rule.ip_protocol == ip_protocol and rule.from_port == from_port and \
rule.to_port == to_port and cidr_ip in [ip.cidr_ip for ip in rule.grants]:
return True
return False
def create_key_pair(self, key_name='cloudman_key_pair'):
"""
If a key pair with the provided ``key_name`` does not exist, create it.
:type sg_name: str
:param sg_name: A name for the key pair to be created.
:rtype: dict
:return: A dictionary containing keys ``name`` (with the value being the
name of the key pair that was created), ``error``
(with the value being the error message if there was an error
or ``None`` if no error was encountered), and ``material``
(containing the unencrypted PEM encoded RSA private key if the
key was created or ``None`` if the key already eixsted).
.. versionchanged:: 0.6.1
The return value changed from a tuple to a dict
"""
progress = {'name': None,
'material': None,
'error': None}
kp = None
# Check if a key pair under the given name already exists. If it does not,
# create it, else return.
try:
kps = self.ec2_conn.get_all_key_pairs()
except EC2ResponseError as e:
err_msg = f"Problem getting key pairs: {e} (code {e.error_code}; status {e.status})"
bioblend.log.exception(err_msg)
progress['error'] = err_msg
return progress
for akp in kps:
if akp.name == key_name:
bioblend.log.info("Key pair '%s' already exists; reusing it.", key_name)
progress['name'] = akp.name
return progress
try:
kp = self.ec2_conn.create_key_pair(key_name)
except EC2ResponseError as e:
err_msg = f"Problem creating key pair '{key_name}': {e} (code {e.error_code}; status {e.status})"
bioblend.log.exception(err_msg)
progress['error'] = err_msg
return progress
bioblend.log.info("Created key pair '%s'", kp.name)
progress['name'] = kp.name
progress['material'] = kp.material
return progress
def assign_floating_ip(self, ec2_conn, instance):
try:
bioblend.log.debug("Allocating a new floating IP address.")
address = ec2_conn.allocate_address()
except EC2ResponseError:
bioblend.log.exception("Exception allocating a new floating IP address")
bioblend.log.info("Associating floating IP %s to instance %s", address.public_ip, instance.id)
ec2_conn.associate_address(instance_id=instance.id,
public_ip=address.public_ip)
def get_status(self, instance_id):
"""
Check on the status of an instance. ``instance_id`` needs to be a
``boto``-library copatible instance ID (e.g., ``i-8fehrdss``).If
``instance_id`` is not provided, the ID obtained when launching
*the most recent* instance is used. Note that this assumes the instance
being checked on was launched using this class. Also note that the same
class may be used to launch multiple instances but only the most recent
``instance_id`` is kept while any others will to be explicitly specified.
This method also allows the required ``ec2_conn`` connection object to be
provided at invocation time. If the object is not provided, credentials
defined for the class are used (ability to specify a custom ``ec2_conn``
helps in case of stateless method invocations).
Return a ``state`` dict containing the following keys: ``instance_state``,
``public_ip``, ``placement``, and ``error``, which capture CloudMan's
current state. For ``instance_state``, expected values are: ``pending``,
``booting``, ``running``, or ``error`` and represent the state of the
underlying instance. Other keys will return an empty value until the
``instance_state`` enters ``running`` state.
"""
ec2_conn = self.ec2_conn
rs = None
state = {'instance_state': "",
'public_ip': "",
'placement': "",
'error': ""}
# Make sure we have an instance ID
if instance_id is None:
err = "Missing instance ID, cannot check the state."
bioblend.log.error(err)
state['error'] = err
return state
try:
rs = ec2_conn.get_all_instances([instance_id])
if rs is not None:
inst_state = rs[0].instances[0].update()
public_ip = rs[0].instances[0].ip_address
state['public_ip'] = public_ip
if inst_state == 'running':
# if there's a private ip, but no public ip
# attempt auto allocation of floating IP
if rs[0].instances[0].private_ip_address and not public_ip:
self.assign_floating_ip(ec2_conn, rs[0].instances[0])
# Wait until the CloudMan URL is accessible to return
# the data - this may only be correct initially at
# bootup for instances that configure themselves for
# https - they may ultimately block port 80. However,
# we don't have a good way to determine whether to
# check using http or https.
cm_url = f"http://{public_ip}/cloud"
if self._checkURL(cm_url) is True:
state['instance_state'] = inst_state
state['placement'] = rs[0].instances[0].placement
else:
state['instance_state'] = 'booting'
else:
state['instance_state'] = inst_state
except Exception as e:
err = f"Problem updating instance '{instance_id}' state: {e}"
bioblend.log.error(err)
state['error'] = err
return state
def get_clusters_pd(self, include_placement=True):
"""
Return *persistent data* of all existing clusters for this account.
:type include_placement: bool
:param include_placement: Whether or not to include region placement for
the clusters. Setting this option will lead
to a longer function runtime.
:rtype: dict
:return: A dictionary containing keys ``clusters`` and ``error``. The
value of ``clusters`` will be a dictionary with the following keys
``cluster_name``, ``persistent_data``, ``bucket_name`` and optionally
``placement`` or an empty list if no clusters were found or an
error was encountered. ``persistent_data`` key value is yet
another dictionary containing given cluster's persistent data.
The value for the ``error`` key will contain a string with the
error message.
.. versionadded:: 0.3
.. versionchanged:: 0.7.0
The return value changed from a list to a dictionary.
"""
clusters = []
response = {'clusters': clusters, 'error': None}
s3_conn = self.connect_s3(self.access_key, self.secret_key, self.cloud)
try:
buckets = s3_conn.get_all_buckets()
except S3ResponseError as e:
response['error'] = f"S3ResponseError getting buckets: {e}"
except self.http_exceptions as ex:
response['error'] = f"Exception getting buckets: {ex}"
if response['error']:
bioblend.log.exception(response['error'])
return response
for bucket in [b for b in buckets if b.name.startswith('cm-')]:
try:
# TODO: first lookup if persistent_data.yaml key exists
pd = bucket.get_key('persistent_data.yaml')
except S3ResponseError:
# This can fail for a number of reasons for non-us and/or
# CNAME'd buckets but it is not a terminal error
bioblend.log.warning("Problem fetching persistent_data.yaml from bucket %s", bucket)
continue
if pd:
# We are dealing with a CloudMan bucket
pd_contents = pd.get_contents_as_string()
pd = yaml.safe_load(pd_contents)
if 'cluster_name' in pd:
cluster_name = pd['cluster_name']
else:
for key in bucket.list():
if key.name.endswith('.clusterName'):
cluster_name = key.name.split('.clusterName')[0]
cluster = {'cluster_name': cluster_name,
'persistent_data': pd,
'bucket_name': bucket.name}
# Look for cluster's placement too
if include_placement:
placement = self._find_placement(cluster_name, cluster)
cluster['placement'] = placement
clusters.append(cluster)
response['clusters'] = clusters
return response
def get_cluster_pd(self, cluster_name):
"""
Return *persistent data* (as a dict) associated with a cluster with the
given ``cluster_name``. If a cluster with the given name is not found,
return an empty dict.
.. versionadded:: 0.3
"""
cluster = {}
clusters = self.get_clusters_pd().get('clusters', [])
for c in clusters:
if c['cluster_name'] == cluster_name:
cluster = c
break
return cluster
def connect_ec2(self, a_key, s_key, cloud=None):
"""
Create and return an EC2-compatible connection object for the given cloud.
See ``_get_cloud_info`` method for more details on the requirements for
the ``cloud`` parameter. If no value is provided, the class field is used.
"""
if cloud is None:
cloud = self.cloud
ci = self._get_cloud_info(cloud)
r = RegionInfo(name=ci['region_name'], endpoint=ci['region_endpoint'])
ec2_conn = boto.connect_ec2(aws_access_key_id=a_key,
aws_secret_access_key=s_key,
is_secure=ci['is_secure'],
region=r,
port=ci['ec2_port'],
path=ci['ec2_conn_path'],
validate_certs=False)
return ec2_conn
def connect_s3(self, a_key, s_key, cloud=None):
"""
Create and return an S3-compatible connection object for the given cloud.
See ``_get_cloud_info`` method for more details on the requirements for
the ``cloud`` parameter. If no value is provided, the class field is used.
"""
if cloud is None:
cloud = self.cloud
ci = self._get_cloud_info(cloud)
if ci['cloud_type'] == 'amazon':
calling_format = SubdomainCallingFormat()
else:
calling_format = OrdinaryCallingFormat()
s3_conn = S3Connection(
aws_access_key_id=a_key, aws_secret_access_key=s_key,
is_secure=ci['is_secure'], port=ci['s3_port'], host=ci['s3_host'],
path=ci['s3_conn_path'], calling_format=calling_format)
return s3_conn
def connect_vpc(self, a_key, s_key, cloud=None):
"""
Establish a connection to the VPC service.
TODO: Make this work with non-default clouds as well.
"""
if cloud is None:
cloud = self.cloud
ci = self._get_cloud_info(cloud)
r = RegionInfo(name=ci['region_name'], endpoint=ci['region_endpoint'])
vpc_conn = boto.connect_vpc(
aws_access_key_id=a_key,
aws_secret_access_key=s_key,
is_secure=ci['is_secure'],
region=r,
port=ci['ec2_port'],
path=ci['ec2_conn_path'],
validate_certs=False)
return vpc_conn
def _compose_user_data(self, user_provided_data):
"""
A convenience method used to compose and properly format the user data
required when requesting an instance.
``user_provided_data`` is the data provided by a user required to identify
a cluster and user other user requirements.
"""
form_data = {}
# Do not include the following fields in the user data but do include
# any 'advanced startup fields' that might be added in the future
excluded_fields = ['sg_name', 'image_id', 'instance_id', 'kp_name',
'cloud', 'cloud_type', 'public_dns', 'cidr_range',
'kp_material', 'placement', 'flavor_id']
for key, value in user_provided_data.items():
if key not in excluded_fields:
form_data[key] = value
# If the following user data keys are empty, do not include them in the
# request user data
udkeys = [
'post_start_script_url',
'worker_post_start_script_url',
'bucket_default',
'share_string']
for udkey in udkeys:
if udkey in form_data and form_data[udkey] == '':
del form_data[udkey]
# If bucket_default was not provided, add a default value to the user data
# (missing value does not play nicely with CloudMan's ec2autorun.py)
if not form_data.get(
'bucket_default', None) and self.cloud.bucket_default:
form_data['bucket_default'] = self.cloud.bucket_default
# Reuse the ``password`` for the ``freenxpass`` user data option
if 'freenxpass' not in form_data and 'password' in form_data:
form_data['freenxpass'] = form_data['password']
# Convert form_data into the YAML format
ud = yaml.dump(form_data, default_flow_style=False, allow_unicode=False)
# Also include connection info about the selected cloud
ci = self._get_cloud_info(self.cloud, as_str=True)
return ud + "\n" + ci
def _get_cloud_info(self, cloud, as_str=False):
"""
Get connection information about a given cloud
"""
ci = {}
ci['cloud_type'] = cloud.cloud_type
ci['region_name'] = cloud.region_name
ci['region_endpoint'] = cloud.region_endpoint
ci['is_secure'] = cloud.is_secure
ci['ec2_port'] = cloud.ec2_port if cloud.ec2_port != '' else None
ci['ec2_conn_path'] = cloud.ec2_conn_path
# Include cidr_range only if not empty
if cloud.cidr_range != '':
ci['cidr_range'] = cloud.cidr_range
ci['s3_host'] = cloud.s3_host
ci['s3_port'] = cloud.s3_port if cloud.s3_port != '' else None
ci['s3_conn_path'] = cloud.s3_conn_path
if as_str:
ci = yaml.dump(ci, default_flow_style=False, allow_unicode=False)
return ci
def _get_volume_placement(self, vol_id):
"""
Returns the placement of a volume (or None, if it cannot be determined)
"""
try:
vol = self.ec2_conn.get_all_volumes(volume_ids=[vol_id])
except EC2ResponseError as ec2e:
bioblend.log.error(f"EC2ResponseError querying for volume {vol_id}: {ec2e}")
vol = None
if vol:
return vol[0].zone
else:
bioblend.log.error("Requested placement of a volume '%s' that does not exist.", vol_id)
return None
def _find_placement(self, cluster_name, cluster=None):
"""
Find a placement zone for a cluster with the name ``cluster_name``.
By default, this method will search for and fetch given cluster's
*persistent data*; alternatively, *persistent data* can be provided via
the ``cluster`` parameter. This dict needs to have ``persistent_data``
key with the contents of cluster's *persistent data*.
If the cluster or the volume associated with the cluster cannot be found,
cluster placement is set to ``None``.
:rtype: dict
:return: A dictionary with ``placement`` and ``error`` keywords.
.. versionchanged:: 0.7.0
The return value changed from a list to a dictionary.
"""
placement = None
response = {'placement': placement, 'error': None}
cluster = cluster or self.get_cluster_pd(cluster_name)
if cluster and 'persistent_data' in cluster:
pd = cluster['persistent_data']
try:
if 'placement' in pd:
response['placement'] = pd['placement']
elif 'data_filesystems' in pd:
# We have v1 format persistent data so get the volume first and
# then the placement zone
vol_id = pd['data_filesystems']['galaxyData'][0]['vol_id']
response['placement'] = self._get_volume_placement(vol_id)
elif 'filesystems' in pd:
# V2 format.
for fs in [fs for fs in pd['filesystems'] if fs.get(
'kind', None) == 'volume' and 'ids' in fs]:
# All volumes must be in the same zone
vol_id = fs['ids'][0]
response['placement'] = self._get_volume_placement(
vol_id)
# No need to continue to iterate through
# filesystems, if we found one with a volume.
break
except Exception as exc:
response['error'] = \
f"Exception while finding placement for cluster '{cluster_name}'. This can indicate malformed instance data. Or that this method is broken: {exc}"
bioblend.log.error(response['error'])
response['placement'] = None
else:
bioblend.log.debug(
f"Insufficient info about cluster {cluster_name} to get placement."
)
return response
def find_placements(
self, ec2_conn, instance_type, cloud_type, cluster_name=None):
"""
Find a list of placement zones that support the specified instance type.
If ``cluster_name`` is given and a cluster with the given name exist,
return a list with only one entry where the given cluster lives.
Searching for available zones for a given instance type is done by
checking the spot prices in the potential availability zones for
support before deciding on a region:
http://blog.piefox.com/2011/07/ec2-availability-zones-and-instance.html
Note that, currently, instance-type based zone selection applies only to
AWS. For other clouds, all the available zones are returned (unless a
cluster is being recreated, in which case the cluster's placement zone is
returned sa stored in its persistent data.
:rtype: dict
:return: A dictionary with ``zones`` and ``error`` keywords.
.. versionchanged:: 0.3
Changed method name from ``_find_placements`` to ``find_placements``.
Also added ``cluster_name`` parameter.
.. versionchanged:: 0.7.0
The return value changed from a list to a dictionary.
"""
# First look for a specific zone a given cluster is bound to
zones = []
response = {'zones': zones, 'error': None}
if cluster_name:
placement = self._find_placement(cluster_name)
if placement.get('error'):
response['error'] = placement['error']
return response
response['zones'] = placement.get('placement', [])
# If placement is not found, look for a list of available zones
if not response['zones']:
in_the_past = datetime.datetime.now() - datetime.timedelta(hours=1)
back_compatible_zone = "us-east-1e"
for zone in [
z for z in ec2_conn.get_all_zones() if z.state == 'available']:
# Non EC2 clouds may not support get_spot_price_history
if instance_type is None or cloud_type != 'ec2':
zones.append(zone.name)
elif ec2_conn.get_spot_price_history(instance_type=instance_type,
end_time=in_the_past.isoformat(),
availability_zone=zone.name):
zones.append(zone.name)
# Higher-lettered zones seem to have more availability currently
zones.sort(reverse=True)
if back_compatible_zone in zones:
zones = [back_compatible_zone] + \
[z for z in zones if z != back_compatible_zone]
if len(zones) == 0:
response['error'] = f"Did not find availabilty zone for {instance_type}"
bioblend.log.error(response['error'])
zones.append(back_compatible_zone)
return response
def _checkURL(self, url):
"""
Check if the ``url`` is *alive* (i.e., remote server returns code 200(OK)
or 401 (unauthorized)).
"""
try:
p = urlparse(url)
h = HTTPConnection(p[1])
h.putrequest('HEAD', p[2])
h.endheaders()
r = h.getresponse()
# CloudMan UI is pwd protected so include 401
return r.status in (200, 401)
except Exception:
# No response or no good response
return False
| 46.386417 | 178 | 0.552179 |
4a1f34cc883c1bad8598636344745ddbbb1da586 | 707 | py | Python | coursera/test/test_commandline.py | AbderrhmanAbdellatif/coursera-dl-GUI | 11c5d1f75867ce7ed1398cce16b18777e6499431 | [
"MIT"
] | null | null | null | coursera/test/test_commandline.py | AbderrhmanAbdellatif/coursera-dl-GUI | 11c5d1f75867ce7ed1398cce16b18777e6499431 | [
"MIT"
] | null | null | null | coursera/test/test_commandline.py | AbderrhmanAbdellatif/coursera-dl-GUI | 11c5d1f75867ce7ed1398cce16b18777e6499431 | [
"MIT"
] | null | null | null | """
Test command line module.
"""
from coursera import commandline
from coursera.test import test_workflow
def test_class_name_arg_required():
args = {'list_courses': False, 'version': False}
mock_args = test_workflow.MockedCommandLineArgs(**args)
assert commandline.class_name_arg_required(mock_args)
def test_class_name_arg_not_required():
not_required_cases = [
{'list_courses': True, 'version': False},
{'list_courses': False, 'version': True},
{'list_courses': True, 'version': True},
]
for args in not_required_cases:
mock_args = test_workflow.MockedCommandLineArgs(**args)
assert not commandline.class_name_arg_required(mock_args)
| 29.458333 | 65 | 0.717115 |
4a1f3591f14c8e99af13e6d9f0feeae8128390f0 | 6,766 | py | Python | tensorflow/python/kernel_tests/matrix_solve_op_test.py | DHsLc/test | f286c78b619b81ca95ba9f738cc0de4e14440e44 | [
"Apache-2.0"
] | 5 | 2021-01-11T01:51:57.000Z | 2021-12-11T17:19:08.000Z | tensorflow/python/kernel_tests/matrix_solve_op_test.py | radi2015/tensorflow | 4b2fb49fd7578afe7e289936f347af581b5bdab1 | [
"Apache-2.0"
] | null | null | null | tensorflow/python/kernel_tests/matrix_solve_op_test.py | radi2015/tensorflow | 4b2fb49fd7578afe7e289936f347af581b5bdab1 | [
"Apache-2.0"
] | 3 | 2020-07-02T13:46:32.000Z | 2021-01-11T01:52:01.000Z | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.ops.math_ops.matrix_solve."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.client import session
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import linalg_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
class MatrixSolveOpTest(test.TestCase):
def _verifySolve(self, x, y, batch_dims=None):
for np_type in [np.float32, np.float64, np.complex64, np.complex128]:
if np_type == np.float32 or np_type == np.complex64:
tol = 1e-5
else:
tol = 1e-12
for adjoint in False, True:
if np_type is [np.float32, np.float64]:
a = x.real().astype(np_type)
b = y.real().astype(np_type)
else:
a = x.astype(np_type)
b = y.astype(np_type)
a_np = np.conj(np.transpose(a)) if adjoint else a
if batch_dims is not None:
a = np.tile(a, batch_dims + [1, 1])
a_np = np.tile(a_np, batch_dims + [1, 1])
b = np.tile(b, batch_dims + [1, 1])
np_ans = np.linalg.solve(a_np, b)
with self.test_session(use_gpu=True):
tf_ans = linalg_ops.matrix_solve(a, b, adjoint=adjoint)
out = tf_ans.eval()
self.assertEqual(tf_ans.get_shape(), out.shape)
self.assertEqual(np_ans.shape, out.shape)
self.assertAllClose(np_ans, out, atol=tol, rtol=tol)
def _generateMatrix(self, m, n):
matrix = (np.random.normal(-5, 5,
m * n).astype(np.complex128).reshape([m, n]))
matrix.imag = (np.random.normal(-5, 5, m * n).astype(np.complex128).reshape(
[m, n]))
return matrix
def testSolve(self):
for n in 1, 2, 4, 9:
matrix = self._generateMatrix(n, n)
for nrhs in 1, 2, n:
rhs = self._generateMatrix(n, nrhs)
self._verifySolve(matrix, rhs)
def testSolveBatch(self):
for n in 2, 5:
matrix = self._generateMatrix(n, n)
for nrhs in 1, n:
rhs = self._generateMatrix(n, nrhs)
for batch_dims in [[2], [2, 2], [7, 4]]:
self._verifySolve(matrix, rhs, batch_dims=batch_dims)
def testNonSquareMatrix(self):
# When the solve of a non-square matrix is attempted we should return
# an error
with self.test_session():
with self.assertRaises(ValueError):
matrix = constant_op.constant([[1., 2., 3.], [3., 4., 5.]])
linalg_ops.matrix_solve(matrix, matrix)
def testWrongDimensions(self):
# The matrix and right-hand sides should have the same number of rows.
with self.test_session():
matrix = constant_op.constant([[1., 0.], [0., 1.]])
rhs = constant_op.constant([[1., 0.]])
with self.assertRaises(ValueError):
linalg_ops.matrix_solve(matrix, rhs)
def testNotInvertible(self):
# The input should be invertible.
with self.test_session():
with self.assertRaisesOpError("Input matrix is not invertible."):
# All rows of the matrix below add to zero
matrix = constant_op.constant([[1., 0., -1.], [-1., 1., 0.],
[0., -1., 1.]])
linalg_ops.matrix_solve(matrix, matrix).eval()
class MatrixSolveBenchmark(test.Benchmark):
matrix_shapes = [
(4, 4),
(10, 10),
(16, 16),
(101, 101),
(256, 256),
(1001, 1001),
(1024, 1024),
(2048, 2048),
(513, 4, 4),
(513, 16, 16),
(513, 256, 256),
]
def _GenerateTestData(self, matrix_shape, num_rhs):
batch_shape = matrix_shape[:-2]
matrix_shape = matrix_shape[-2:]
assert matrix_shape[0] == matrix_shape[1]
n = matrix_shape[0]
matrix = (np.ones(matrix_shape).astype(np.float32) /
(2.0 * n) + np.diag(np.ones(n).astype(np.float32)))
rhs = np.ones([n, num_rhs]).astype(np.float32)
matrix = variables.Variable(
np.tile(matrix, batch_shape + (1, 1)), trainable=False)
rhs = variables.Variable(
np.tile(rhs, batch_shape + (1, 1)), trainable=False)
return matrix, rhs
def benchmarkMatrixSolveOp(self):
run_gpu_test = test.is_gpu_available(True)
for adjoint in False, True:
for matrix_shape in self.matrix_shapes:
for num_rhs in 1, 2, matrix_shape[-1]:
with ops.Graph().as_default(), \
session.Session() as sess, \
ops.device("/cpu:0"):
matrix, rhs = self._GenerateTestData(matrix_shape, num_rhs)
x = linalg_ops.matrix_solve(matrix, rhs, adjoint=adjoint)
variables.global_variables_initializer().run()
self.run_op_benchmark(
sess,
control_flow_ops.group(x),
min_iters=25,
store_memory_usage=False,
name=("matrix_solve_cpu_shape_{matrix_shape}_num_rhs_{num_rhs}_"
"adjoint_{adjoint}").format(
matrix_shape=matrix_shape,
num_rhs=num_rhs,
adjoint=adjoint))
if run_gpu_test:
with ops.Graph().as_default(), \
session.Session() as sess, \
ops.device("/gpu:0"):
matrix, rhs = self._GenerateTestData(matrix_shape, num_rhs)
x = linalg_ops.matrix_solve(matrix, rhs, adjoint=adjoint)
variables.global_variables_initializer().run()
self.run_op_benchmark(
sess,
control_flow_ops.group(x),
min_iters=25,
store_memory_usage=False,
name=("matrix_solve_gpu_shape_{matrix_shape}_num_rhs_"
"{num_rhs}_adjoint_{adjoint}").format(
matrix_shape=matrix_shape, num_rhs=num_rhs,
adjoint=adjoint))
if __name__ == "__main__":
test.main()
| 37.381215 | 80 | 0.604197 |
4a1f360f8a0276368ed4abf45911fdf4999bbe71 | 23 | py | Python | src/test/data/pa2/AdditionalTestCases_pa2/none_empty_assignment_comp.py | Leo-Enrique-Wu/chocopy_compiler_semantic_analysis | e89f6434dd7d274d4838457316143f312226495f | [
"BSD-2-Clause"
] | null | null | null | src/test/data/pa2/AdditionalTestCases_pa2/none_empty_assignment_comp.py | Leo-Enrique-Wu/chocopy_compiler_semantic_analysis | e89f6434dd7d274d4838457316143f312226495f | [
"BSD-2-Clause"
] | null | null | null | src/test/data/pa2/AdditionalTestCases_pa2/none_empty_assignment_comp.py | Leo-Enrique-Wu/chocopy_compiler_semantic_analysis | e89f6434dd7d274d4838457316143f312226495f | [
"BSD-2-Clause"
] | null | null | null | x:object = None
x = []
| 7.666667 | 15 | 0.521739 |
4a1f37c3649d500b638edd1a48aa74553be53ccd | 940 | py | Python | PDFtoImg.py | MihirGodbole96/PdftoImg | d8c8183a3f047b477a9eb838e393d23c49c6808d | [
"MIT"
] | null | null | null | PDFtoImg.py | MihirGodbole96/PdftoImg | d8c8183a3f047b477a9eb838e393d23c49c6808d | [
"MIT"
] | null | null | null | PDFtoImg.py | MihirGodbole96/PdftoImg | d8c8183a3f047b477a9eb838e393d23c49c6808d | [
"MIT"
] | null | null | null | import os
import tempfile
from pdf2image import convert_from_path
from PyPDF3 import PdfFileWriter, PdfFileReader
path="input_path"
output_folder_path="destination_path"
for filename in os.listdir(path):
i = 0
pdfname=path + filename
inputpdf = PdfFileReader(open(pdfname,"rb"))
maxPages = inputpdf.numPages
print("Number of pages in PDF="+str(maxPages))
for page in range(1, maxPages, 10):
pil_images = convert_from_path(poppler_path="C:/poppler-0.68.0/bin",pdf_path = pdfname, dpi=200, first_page=page,
last_page=min(page + 10 - 1, maxPages), fmt= 'jpg',
thread_count=1, userpw=None,
use_cropbox=False, strict=False)
for image in pil_images:
image.save(filename[:-4] + '_' + str(i) + '.jpg', 'JPEG')
i = i + 1
| 34.814815 | 119 | 0.571277 |
4a1f39a5f7361d2243afab86a74c4de135dfdb04 | 11,042 | py | Python | sampled_data/sampled_data233.py | bryan-ruddy/ENGSCI233_2021 | 97a9ede84183603ac7975d5692885921419608fa | [
"MIT"
] | 2 | 2022-02-09T02:15:39.000Z | 2022-02-09T02:22:42.000Z | sampled_data/sampled_data233.py | bryan-ruddy/ENGSCI233_2021 | 97a9ede84183603ac7975d5692885921419608fa | [
"MIT"
] | null | null | null | sampled_data/sampled_data233.py | bryan-ruddy/ENGSCI233_2021 | 97a9ede84183603ac7975d5692885921419608fa | [
"MIT"
] | 2 | 2021-05-03T09:25:11.000Z | 2022-02-09T02:15:57.000Z | # Supplementary classes and functions for ENGSCI233 notebook Sampling.ipynb
# author: David Dempsey
import numpy as np
from matplotlib import pyplot as plt
from numpy.linalg import inv # function for matrix inverse
from scipy.integrate import trapz
from scipy.interpolate import interp1d
from matplotlib.patches import Polygon
from matplotlib.collections import PatchCollection
TEXTSIZE = 12
# interpolation
def pop_mpl(kwargs):
color = kwargs.pop('color','k')
mplkwargs = {'color':color}
args = ['alpha','marker','mew','mfc','mec','ls','lw','label']
defs = [1.0,'o',1.0,color,color,'-',0.0,None]
for arg,default in zip(args,defs):
mplkwargs.update({arg:kwargs.pop(arg,default)})
return mplkwargs
def plot_data(xi,yi,ax, **kwargs):
mplkwargs = pop_mpl(kwargs)
ax.plot(xi,yi,**mplkwargs)
ax.set_xlabel('time',size = TEXTSIZE)
ax.set_ylabel('temperature',size = TEXTSIZE)
for t in ax.get_xticklabels()+ax.get_yticklabels():t.set_fontsize(TEXTSIZE)
ax.legend(loc=2, prop={'size':TEXTSIZE});
def plot_interpolation_lines(xj, ax):
ylim = ax.get_ylim()
ax.set_ylim(ylim)
for xji in xj[:-1]:
ax.plot([xji,xji], ylim, 'r--', lw=0.5, alpha=0.4)
# last line added separately so to provide legend label
ax.plot([xj[-1],xj[-1]], ylim, 'r--', lw=0.5, alpha=0.4, label='interpolate at')
ax.legend(loc=2, prop={'size':TEXTSIZE});
# POLYNOMIAL FITTING
# ------------------
# fit a polynomial and plot data and function
def plot_polynomial_elements(ax, xi, yi, xj, m=1, interpolate=False, extrapolate = False):
"""Fit polynomial of order M to data XI,YI and plot to axis AX
"""
# construct Vandemonde matrix
A = vandermonde(xi,m)
# construct RHS vector
b = rhs(xi,yi,m)
# solve Ax=b
# (note: I am solving x = A^-1 b, which is not wildly efficient)
Ainv = inv(A)
ak = np.dot(Ainv, b)
# plotting
# i. data
plot_data(xi,yi,ax,label='data')
# ii. interpolating function
if not interpolate:
if extrapolate:
xm = (xi[0]+xi[-1])/2.
xr = -(xi[0]-xi[-1])
x = np.linspace(0,2*xi[-1],1001) # vector of x vals
else:
x = np.linspace(xi[0],xi[-1],1001) # vector of x vals
fx = polyval(ak,x) # compute f(x)
ax.plot(x,fx,'r-',label='{:d} order fit'.format(m))
# iii. interpolated data
if interpolate:
# show lines
plot_interpolation_lines(xj,ax)
# evaluate interpolating function at XJ
fxj = polyval(ak,xj)
plot_data(xj,fxj,ax,color='r',marker='o',label='interpolated data')
# add residual to plot
if not interpolate:
res = np.sum((polyval(ak,xi)-yi)**2)
ax.annotate('R$^2$={:3.2e}'.format(res), xy=(.05,.7), xycoords='axes fraction', ha='left')
# construct righthandside vector for data XI, YI and polynomial order M
def rhs(xi,yi,m):
"""Return least-squares righthand side vector for data XI, YI and polynomial order M
"""
# preallocate vector
rhs = np.zeros(m+1)
# compute terms
for i in range(m+1):
rhs[i] = np.sum(xi**i*yi)
return rhs
# construct Vandermonde matrix for data XI and polynomial order M
def vandermonde(xi,m):
"""Return Vandermonde matrix for data XI and polynomial order M
"""
# preallocate matrix
V = np.zeros((m+1,m+1))
# loop over rows
for i in range(m+1):
# loop over columns
for j in range(m+1):
V[i,j] = np.sum(xi**(i+j))
return V
# evaluate polynomial with coefficients A at locations XI
def polyval(a,xi):
"""Evaluautes polynomial with coefficients A at points XI.
"""
yi = 0.*xi
for i,ai in enumerate(a):
yi = yi + ai*xi**i
return yi
# PIECEWISE LINEAR INTERPOLATION
# ------------------------------
# perform piecewise linear interpolation
def plot_piecewise_elements(ax, interpolate, xi, yi, xj):
"""Fit straight line segments between neighbouring data pairs.
"""
# for each subinterval
yj = []
for xi1, yi1, xi2, yi2 in zip(xi[:-1], yi[:-1], xi[1:], yi[1:]):
# compute gradient and intercept
mi,ci = mx_c(xi1,yi1,xi2,yi2)
# find interpolating points in subinterval
inds = np.where((xj>=xi1)&(xj<xi2))
# evaluate piecewise interpolating function at points
yj += list(mi*xj[inds] + ci)
# plot data
plot_data(xi,yi,ax,label='data')
# other plotting
if interpolate:
# plot interpolation points
plot_interpolation_lines(xj,ax)
# plot interpolation values
plot_data(xj,yj,ax,color='r',label='interpolated data')
else:
# plot interpolating function
plot_data(xi,yi,ax,color='r',marker=None,lw=1.,label='piecewise linear interpolation')
# linear interpolation between points
def mx_c(x1,y1,x2,y2):
"""Returns gradient and y-intercept for straight line segment between the points (X1,Y1) and (X2,Y2)
"""
# gradient
m = (y2-y1)/(x2-x1)
# y-intercept
c = y1-m*x1
return m,c
# CUBIC SPLINE INTERPOLATION
# --------------------------
# perform cubic spline interpolation
def plot_spline_elements(ax,interpolate,SubIntEqn,xi,yi,xj):
"""Fit cubic splines to data using built-in Python functions.
"""
# plot data
plot_data(xi,yi,ax,label='data')
f = interp1d(xi,yi,kind='cubic')
if interpolate:
# perform interpolation
yj = f(xj)
# plot interpolation points
plot_interpolation_lines(xj,ax)
# plot interpolation values
plot_data(xj,yj,ax,color='r',label='interpolated data')
else:
# plot interpolating function
xv = np.linspace(xi[0], xi[-1], 1001)
yv = f(xv)
plot_data(xv,yv,ax,color='r',lw=1.0,label='cubic spline interpolation', marker=None)
if SubIntEqn>0:
# this is not going to be very elegant...
# get subinterval
x1, x2 = xi[SubIntEqn-1], xi[SubIntEqn]
# evaluate spline at 1000 points inside interval
xk = np.linspace(x1,x2,1000)
# fit best cubic
a = np.polyfit(xk,f(xk),deg=3)
# show cubic for subinterval
poly_str = r'$y$=${:2.1f}x^3$+${:2.1f}x^2$+${:2.1f}x$+${:2.1f}$'.format(*a)
ls = '-'
if interpolate:
ls = '--'
plot_data(xk,f(xk),ax,color='g',lw=2.0,ls=ls,label=poly_str, marker=None)
# INTEGRATION
# -----------
# integration
def f_int(x): return (x-2)*(x-5.5)*(x-7)/8+8
def plot_integration_elements(ax, know_gx, subints, area):
# configure area boolean
if area == 'None': area = 0
elif area =='A0': area = 1
elif area =='A1': area = 2
elif area =='A2': area = 3
elif area =='Atot': area = -1
# plot function or data
if know_gx:
x = np.linspace(2,8,1001)
y = f_int(x)
ax.plot(x,y,'r-', label='known function, $g(x)$')
else:
xi = np.array([2, 3.5, 6.8, 8.])
yi = np.array([7.8, 8.5, 8.1, 10.0])
ax.plot(xi,yi,'kx',ms=5,mew=2,label='known data, $(x_i,y_i)$')
# show subintervals
if subints:
if know_gx:
N=3 # number of subintervals
xi = np.linspace(x[0],x[-1],N+1)
yi = f_int(xi)
ax.plot(xi,yi,'kx',ms=5,mew=2,label='eval. function, $g(x_i)$')
ax.plot(xi,yi,'k--')
# dashed vertical lines
label = 'three subintervals'
for xii,yii in zip(xi,yi):
ax.plot([xii,xii],[0,yii],'k--',label=label)
label=None
# subinterval numbering
if area == 0:
for xi1,xi2,yi1,yi2,i in zip(xi[:-1],xi[1:],yi[:-1],yi[1:], range(len(xi))):
ax.text(0.5*(xi1+xi2), 0.25*(yi1+yi2), '$I_'+'{:d}'.format(i+1)+'$', ha = 'center', va = 'center', size = 14)
if area > 0:
i = area - 1
patches = []
i1 = i
i2 = i+2
if i2 == len(xi):
poly = np.array([list(xi[i1:])+[xi[-1],xi[i1]],list(yi[i1:])+[0,0]]).T
else:
poly = np.array([list(xi[i1:i2])+[xi[i2-1],xi[i1]],list(yi[i1:i2])+[0,0]]).T
xint = xi[i1:i2]
yint = yi[i1:i2]
area = trapz(yint,xint)
polygon = Polygon(poly, zorder=1)
patches.append(polygon)
p = PatchCollection(patches, color = 'r', alpha = 0.2)
ax.add_collection(p)
ax.text(np.mean(xint), 0.5*np.mean(yint), '$A_'+'{:d}'.format(i)+'$'+'\n$=$\n${:3.1f}$'.format(area), ha = 'center', va = 'center', size = 12)
if area < 0:
patches = []
area = trapz(yi,xi)
poly = np.array([list(xi)+[xi[-1],xi[0]],list(yi)+[0,0]]).T
polygon = Polygon(poly, zorder=1)
patches.append(polygon)
p = PatchCollection(patches, color = 'r', alpha = 0.2)
ax.add_collection(p)
ax.text(np.mean(xi), 0.5*np.mean(yi), '$A_{tot}'+'$'+'\n$=$\n${:3.1f}$'.format(area), ha = 'center', va = 'center', size = 12)
else:
if area < 0:
patches = []
if know_gx:
poly = np.array([list(x)+[x[-1],x[0]],list(y)+[0,0]]).T
area = trapz(y,x)
else:
poly = np.array([list(xi)+[xi[-1],xi[0]],list(yi)+[0,0]]).T
area = trapz(yi,xi)
polygon = Polygon(poly, zorder=1)
patches.append(polygon)
p = PatchCollection(patches, color = 'r', alpha = 0.2)
ax.add_collection(p)
ax.text(5., 4, 'Area = {:3.1f}'.format(area), ha='center', va = 'center')
# plotting
ax.set_xlabel('time',size = TEXTSIZE)
ax.set_ylabel('temperature',size = TEXTSIZE)
ax.set_xlim([0,10])
ax.set_ylim([0, 15])
ax.legend(loc=2, prop={'size':TEXTSIZE})
# NEWTON-COTES METHODS
# --------------------
# interactive trapezium method demo
def plot_trapezium_elements(ax, N):
# fit polynomial to data
xi = np.array([2.5, 3.5, 4.5, 5.6, 8.6, 9.9, 13.0, 13.5])
yi = np.array([24.7, 21.5, 21.6, 22.2, 28.2, 26.3, 41.7, 54.8])
ak = fit_poly5(xi,yi)
trapezium(ak,[xi[0], xi[-1]],N,ax)
# fit a fifth order polynomial
def fit_poly5(xi,yi):
"""Return coefficients of fifth order polynomial fitted to data XI,YI.
"""
# construct Vandemonde matrix
A = vandermonde(xi,5)
# construct RHS vector
b = rhs(xi,yi,5)
# solve Ax=b
# (note: I am solving x = A^-1 b, which is not wildly efficient)
Ainv = inv(A)
ak = np.dot(Ainv, b)
return ak
# integrate exactly a fifth order polynomial
def int_poly5(ak, xlim):
akint = np.array([0.,]+[aki/(i+1) for i,aki in enumerate(ak)])
return polyval(akint, xlim[1]) - polyval(akint, xlim[0])
# apply Trapezium method
def trapezium(ak,xlim,N,ax):
"""Apply Trapezium method with N subintervals to polynomial with coefficients
AK over the interval XLIM.
"""
# construct subintervals and function evaluations
xin = np.linspace(xlim[0], xlim[1], N+1)
yin = polyval(ak,xin)
# compute integral
dx = xin[1]-xin[0]
area = dx/2*(yin[0] + 2*np.sum(yin[1:-1]) + yin[-1])
area_true = int_poly5(ak,xlim)
# plotting
# data
xi = np.array([2.5, 3.5, 4.5, 5.6, 8.6, 9.9, 13.0, 13.5])
yi = np.array([24.7, 21.5, 21.6, 22.2, 28.2, 26.3, 41.7, 54.8])
#ax.plot(xi,yi,'ko',mfc='w',mew=1.5,label='data')
# interpolating function
xv = np.linspace(xi[0],xi[-1],1001)
yv = polyval(ak,xv)
ax.plot(xv,yv,'r-',label='$g(x)$')
# subintervals
ax.plot(xin,yin,'k--x',mec='r',mew=1.5,label='subintervals')
for xini,yini in zip(xin,yin):
ax.plot([xini,xini],[0,yini],'k--')
# plot upkeep
ax.legend(loc=2, prop={'size': TEXTSIZE})
ax.set_xlabel('time',size = TEXTSIZE)
ax.set_ylabel('temperature',size = TEXTSIZE)
str1 = '$A_{'+'{:d}'.format(N)+'}'+'={:3.1f}$'.format(area)
str2 = '$A_{\infty}=$'+'${:3.1f}$'.format(area_true)
str3 = '$\%\,\,err=$'+'${:3.1f}$'.format((area_true-area)/area_true*100)
ax.annotate(str1+'\n'+str2+'\n'+str3, xy=(.05,.7), xycoords='axes fraction', ha='left', va='top', size = 12)
ylim = ax.get_ylim()
ax.set_ylim([0, ylim[-1]])
| 28.981627 | 145 | 0.634487 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.