text
stringlengths 4
1.02M
| meta
dict |
---|---|
from flask import Flask
from .core import db
from .helpers import register_blueprints
def create_app(package_name, package_path, settings_override=None):
"""Returns a :class:`Flask` application instance configured with common
functionality for the Fogspoon platform.
:param package_name: application package name
:param package_path: application package path
:param settings_override: a dictionary of settings to override
"""
app = Flask(package_name, instance_relative_config=True)
app.config.from_object('fogspoon.settings')
app.config.from_pyfile('settings.cfg', silent=True)
app.config.from_object(settings_override)
db.init_app(app)
register_blueprints(app, package_name, package_path)
return app
| {
"content_hash": "e12f52b67a08ef3c54543f31dd8fa10a",
"timestamp": "",
"source": "github",
"line_count": 24,
"max_line_length": 75,
"avg_line_length": 31.708333333333332,
"alnum_prop": 0.7411300919842313,
"repo_name": "tkalus/fogspoon",
"id": "9f6950a88294662b166ad4f09c397cb630fa7100",
"size": "786",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "fogspoon/factory.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "10766"
},
{
"name": "HTML",
"bytes": "1186"
},
{
"name": "JavaScript",
"bytes": "4189"
},
{
"name": "Python",
"bytes": "46761"
},
{
"name": "Shell",
"bytes": "132"
}
],
"symlink_target": ""
} |
"""
Definition of urls for QuickScrum.
"""
from django.conf.urls import include, url
from app.forms import BootstrapAuthenticationForm, JiraAuthenticationForm, BootstrapRegisterForm, BootstrapPasswordChangeForm, TeamLoginForm
from app.views import status_view, readstatus_view, dashboard_view, login_view, jiralogin_view, register_view, password_change_view, jiraIssues_view, teamlogin_view, teamcreate_view
# Uncomment the next lines to enable the admin:
# from django.conf.urls import include
from django.contrib import admin
from django.contrib.auth.views import logout, password_reset, password_reset_confirm, password_reset_done, password_reset_complete
from django.utils.timezone import now
from django.views.generic import RedirectView
from QuickScrum import settings
admin.autodiscover()
urlpatterns = [
url(r'^$', status_view, name='status'),
url(r'^status/$', status_view, name='status'),
url(r'^readstatus/(?P<status_id>\w+)/$', readstatus_view, name='readstatus'),
url(r'^dashboard$', dashboard_view, name='dashboard'),
url(r'^getJiraIssues$', jiraIssues_view, name='jiraIssues'),
#url(r'^home$', home, name='home'),
#url(r'^contact$', contact, name='contact'),
#url(r'^about$', about, name='about'),
url(r'^login$', login_view, {
'template_name': 'app/Signin.html',
'authentication_form': BootstrapAuthenticationForm,
'extra_context':
{
'title':'Sign in',
'year':now().year,
},
}, name='login'),
url(r'^register$', register_view, {
'template_name': 'app/register.html',
'register_form': BootstrapRegisterForm,
'extra_context':
{
'title':'Register',
'year':now().year,
},
}, name='register'),
url(r'^password_change/$', password_change_view, {
'template_name': 'app/passwordchange.html',
'password_change_form': BootstrapPasswordChangeForm,
'extra_context':
{
'title':'Change Password',
'year':now().year,
},
}, name='password_change'),
url(r'^jiralogin$', jiralogin_view, {
'template_name': 'app/jirasignin.html',
'authentication_form': BootstrapAuthenticationForm,
'extra_context':
{
'title':'Sign in to Jira',
'year':now().year,
},
}, name='jiralogin'),
url(r'^teamlogin$', teamlogin_view, {
'template_name': 'app/teamsignin.html',
'team_form': TeamLoginForm,
'extra_context':
{
'base_url':settings.SUBDOMAIN_URLCONFS.get('url_base_path'),
'title':'Enter your team name',
'year':now().year,
},
}, name='teamlogin'),
url(r'^teamcreate$', teamcreate_view, {
'template_name': 'app/teamcreate.html',
'team_form': TeamLoginForm,
'extra_context':
{
'base_url':settings.SUBDOMAIN_URLCONFS.get('url_base_path'),
'title':'Create a team',
'year':now().year,
},
}, name='teamcreate'),
# TODO - Password Reset
url(r'^password_reset/$', password_reset, {'post_reset_redirect' : '/password_reset/done/'}, name='password_reset'),
url(r'^password_reset/done/$', password_reset_done),
url(r'^reset/(?P<uidb64>[0-9A-Za-z]+)-(?P<token>.+)/$', password_reset_confirm, {'post_reset_redirect' : '/reset/done/'}),
url(r'^reset/done/$', password_reset_complete),
#url(r'^login$',
# login,
# {
# 'template_name': 'app/login.html',
# 'authentication_form': BootstrapAuthenticationForm,
# 'extra_context':
# {
# 'title':'Sign in',
# 'year':now().year,
# },
# },
# name='login'),
url(r'^logout$',
logout,
{
'next_page': '/',
},
name='logout'),
# Uncomment the admin/doc line below to enable admin documentation:
# url(r'^admin/doc/', include('django.contrib.admindocs.urls')),
# Uncomment the next line to enable the admin:
url(r'^admin/', include(admin.site.urls)),
]
| {
"content_hash": "89e017789a4b1df4c6e578e8e289239a",
"timestamp": "",
"source": "github",
"line_count": 121,
"max_line_length": 181,
"avg_line_length": 34.413223140495866,
"alnum_prop": 0.5883765609990393,
"repo_name": "sunnytambi/QuickScrum",
"id": "308031450fdacf8cfdc8affbb78157970ffea817",
"size": "4166",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "QuickScrum/QuickScrum/urls.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "5598"
},
{
"name": "HTML",
"bytes": "32727"
},
{
"name": "JavaScript",
"bytes": "16910"
},
{
"name": "Python",
"bytes": "265734"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import
from __future__ import print_function
import six
from builtins import str, zip, range
from past.builtins import basestring
import json
from os import environ
import pickle
import re
import unittest2
from richenum import RichEnum
from richenum import RichEnumValue
import normalize.exc as exc
from normalize.record import Record
from normalize.record.json import from_json
from normalize.record.json import JsonRecord
from normalize.record.json import JsonRecordDict
from normalize.record.json import JsonRecordList
from normalize.record.json import to_json
from normalize.property import Property
from normalize.property import ROProperty
from normalize.property import SafeProperty
from normalize.property.coll import DictProperty
from normalize.property.coll import ListProperty
from normalize.property.json import JsonProperty
from normalize.property.json import JsonDictProperty
from normalize.property.json import JsonListProperty
class CheeseRecord(Record):
variety = SafeProperty(isa=str)
smelliness = SafeProperty(isa=float, check=lambda x: 0 < x < 100)
class CheeseCupboardRecord(Record):
id = ROProperty(required=True, isa=int)
name = SafeProperty(isa=str)
best_cheese = SafeProperty(isa=CheeseRecord)
cheeses = ListProperty(of=CheeseRecord)
favorites = DictProperty(of=CheeseRecord)
json_data_number_types = (basestring, float) + six.integer_types
def decode_json_number(str_or_num):
"""Returns a precise number object from a string or number"""
if isinstance(str_or_num, basestring):
if re.match(r'-?\d+$', str_or_num):
return six.integer_types[-1](str_or_num)
if not re.match(r'-?\d+(\.\d+)?([eE][\-+]?\d+)?$', str_or_num):
raise ValueError("invalid json number: '%s'" % str_or_num)
return float(str_or_num)
return str_or_num
class TestRecordMarshaling(unittest2.TestCase):
def setUp(self):
self.primitive = {
"id": "123",
"name": "Fridge",
"best_cheese": dict(variety="Gouda", smelliness="12"),
"cheeses": [
dict(variety="Manchego", smelliness="38"),
dict(variety="Stilton", smelliness="82"),
dict(variety="Polkobin", smelliness="31"),
],
"favorites": {
"Dorothy": dict(variety="Stracchinata", smelliness="28"),
"Walter": dict(variety="Caciobufala", smelliness="32"),
"Albert": dict(variety="Quartirolo Lombardo", smelliness="53"),
},
}
def assertDataOK(self, ccr):
self.assertIsInstance(ccr, CheeseCupboardRecord)
self.assertEqual(ccr.id, 123)
self.assertEqual(len(ccr.cheeses), 3)
self.assertEqual(ccr.best_cheese.variety, "Gouda")
self.assertEqual(ccr.cheeses[1].smelliness, 82)
self.assertEqual(ccr.favorites['Walter'].variety, "Caciobufala")
def assertJsonDataEqual(self, got, wanted, path=""):
"""Test that two JSON-data structures are the same. We can't use
simple assertEqual, because '23' and 23 should compare the same."""
if isinstance(got, basestring):
got = six.text_type(got)
if isinstance(wanted, basestring):
wanted = six.text_type(wanted)
pdisp = path or "top level"
if type(got) != type(wanted):
if isinstance(got, json_data_number_types) and \
isinstance(wanted, json_data_number_types):
got = decode_json_number(got)
wanted = decode_json_number(wanted)
else:
raise AssertionError(
"types differ at %s: wanted %s, got %s" % (
pdisp, type(wanted).__name__, type(got).__name__
)
)
if type(got) == dict:
all_keys = sorted(set(got) | set(wanted))
for key in all_keys:
if (key in got) != (key in wanted):
raise AssertionError(
"dictionary differs at %s: key %s is %s" % (
pdisp, key,
"unexpected" if key in got else "missing"
)
)
else:
self.assertJsonDataEqual(
got[key], wanted[key], path + ("[%r]" % key)
)
elif type(got) == list:
for i in range(0, max((len(got), len(wanted)))):
if i >= len(got) or i >= len(wanted):
raise AssertionError(
"lists differs in length at %s: got %d elements, "
"wanted %d" % (pdisp, len(got), len(wanted))
)
else:
self.assertJsonDataEqual(
got[i], wanted[i], path + ("[%d]" % i)
)
elif got != wanted:
raise AssertionError(
"values differ at %s: wanted %r, got %r" % (
pdisp, wanted, got
)
)
elif "SHOW_JSON_TESTS" in environ:
print("%s: ok (%r)" % (pdisp, got))
def test_assertJsonDataEqual(self):
"""Answering the koan, "Who will test the tests themselves?"
"""
float("inf")
self.assertRaises(ValueError, decode_json_number, "inf")
matches = (
("123", "123"), ("123", 123), (123, 123.0), ("123.0", 123),
("9223372036854775783", 2**63-25), ("-5e5", -500000),
({}, {}), ([], []), ({"foo": "bar"}, {"foo": "bar"}),
([{}, "foo", 123], [{}, "foo", 123.0]),
({"foo": [1, 2, 3], "bar": {"foo": "baz"}},
{"foo": [1, 2, 3], "bar": {"foo": "baz"}}),
)
for a, b in matches:
self.assertJsonDataEqual(a, b)
mismatches = (
(123, 124), ("foo", "bar"), (123, "foo"), (123, {}),
({}, 123), ([], {}), ("inf", float("inf")),
(9.223372036854776e+18, 2**63-25),
({"foo": "bar"}, {"bar": "foo"}),
([1, 2, 3], [1, 2]), ([1, 2], [1, 2, 3]),
({"foo": [1, 2, 3], "bar": {"foo": "baz"}},
{"foo": [1, 2, 3], "bar": {"foo": "bat"}}),
)
for a, b in mismatches:
try:
self.assertJsonDataEqual(a, b)
except AssertionError:
pass
except ValueError:
pass
else:
raise Exception("Compared equal: %r vs %r" % (a, b))
def test_native_marshall(self):
"""Test coerce from python dicts & pickling"""
ccr = CheeseCupboardRecord(self.primitive)
for protocol in range(0, pickle.HIGHEST_PROTOCOL + 1):
pickled = pickle.dumps(ccr, protocol)
ccr_copy = pickle.loads(pickled)
self.assertDataOK(ccr_copy)
def test_json_marshall(self):
"""Test coerce from JSON & marshall out"""
json_struct = json.dumps(self.primitive)
ccr = from_json(CheeseCupboardRecord, json.loads(json_struct))
self.assertDataOK(ccr)
class RealWorldCCR(JsonRecord, CheeseCupboardRecord):
pass
ccr = RealWorldCCR.from_json(json_struct)
self.assertDataOK(ccr)
json_data = ccr.json_data()
json.dumps(json_data)
self.assertJsonDataEqual(json_data, self.primitive)
def test_custom_json_prop_marshall(self):
"""Test customizing JSON marshalling using functions"""
def date_in(struct):
return "%.4d-%.2d-%.2d" % (
struct.get("year", 0), struct.get("month", 0),
struct.get("day", 0),
)
def date_out(val):
return dict(
(k, int(v)) for k, v in zip(
("year", "month", "day"), val.split("-")
) if int(v) > 0
)
class PackedDate(Record):
created_date = Property(
check=lambda x: re.match(r'\d{4}-\d{2}-\d{2}', x),
isa=str,
json_in=date_in,
json_name="created",
json_out=date_out,
)
class JsonPackedDate(PackedDate, JsonRecord):
pass
json_in = {"created": {"year": 2012, "month": 7, "day": 12}}
pd = from_json(PackedDate, json_in)
self.assertEqual(pd.created_date, "2012-07-12")
self.assertJsonDataEqual(to_json(pd), json_in)
jpd = JsonPackedDate.from_json(json_in)
self.assertJsonDataEqual(jpd.json_data(), json_in)
json_in_2 = {"created": {"year": 2012, "month": 7}}
jpd = JsonPackedDate.from_json(json_in_2)
self.assertEqual(jpd.created_date, "2012-07-00")
self.assertJsonDataEqual(jpd.json_data(), json_in_2)
self.assertJsonDataEqual(
to_json(jpd, prop="created_date"), json_in_2['created'],
)
self.assertJsonDataEqual(
to_json(jpd, prop=PackedDate.created_date),
json_in_2['created'],
)
# to_json should not emit keys for undefined values
self.assertEqual(to_json(PackedDate()), {})
self.assertEqual(to_json(CheeseRecord()), {})
# unless they define defaults
class DefaultNone(Record):
none = Property(default=None)
emptystring = Property(default="")
false = Property(default=False)
self.assertJsonDataEqual(
to_json(DefaultNone()), {
"none": None,
"emptystring": "",
"false": False,
}
)
def test_custom_json_class_marshall(self):
class StreamChunk(JsonRecordList):
itemtype = CheeseRecord
next_url = Property()
previous_url = Property()
@classmethod
def json_to_initkwargs(cls, json_data, kwargs):
paging = json_data.get('paging', {})
kwargs['next_url'] = paging.get('next', None)
kwargs['previous_url'] = paging.get('previous', None)
kwargs = super(StreamChunk, cls).json_to_initkwargs(
json_data.get('data', []), kwargs,
)
return kwargs
def json_data(self):
return dict(
data=super(StreamChunk, self).json_data(),
paging=dict(
next=self.next_url,
previous=self.previous_url,
),
)
chunk = {"data": self.primitive['cheeses'][0:2],
"paging": {"next": "stream_token_3", "previous": None}}
sc = StreamChunk.from_json(chunk)
self.assertEqual(sc.next_url, "stream_token_3")
self.assertEqual(sc[0].smelliness, 38)
self.assertJsonDataEqual(sc.json_data(), chunk)
sc2 = StreamChunk(chunk)
self.assertEqual(sc2.next_url, "stream_token_3")
self.assertEqual(sc2[1].smelliness, 82)
self.assertJsonDataEqual(sc2.json_data(), chunk)
self.assertEqual(sc, sc2)
sc3 = eval(repr(sc))
self.assertEqual(sc, sc3)
def test_json_unknown_keys(self):
class JsonCheeseRecord(JsonRecord, CheeseRecord):
unknown_json_keys = Property(json_name=None)
input_json = dict(
variety="Manchego",
smelliness="38",
origin="Spain",
)
jcr = JsonCheeseRecord(input_json)
self.assertJsonDataEqual(jcr.json_data(), input_json)
class RegularJsonCheeseRecord(JsonRecord, CheeseRecord):
pass
rjcr = RegularJsonCheeseRecord(input_json)
diff = jcr.diff(rjcr, duck_type=True)
if diff:
self.fail("Found a difference: %s" % diff)
jcr2 = JsonCheeseRecord(input_json)
jcr2.variety += " (old)"
jcr2.smelliness -= 5
diff = jcr.diff(jcr2)
diff_json = diff.json_data()
self.assertEqual(len(diff_json), 2)
self.assertTrue(all(x['diff_type'] == 'modified' for x in diff_json))
sanitized = rjcr.json_data()
self.assertNotIn("origin", sanitized)
self.assertJsonDataEqual(rjcr.json_data(extraneous=True), input_json)
class NestedJsonRecord(JsonRecord):
cheese = Property(isa=JsonCheeseRecord)
cheese_list = ListProperty(of=JsonCheeseRecord)
nested_input = dict(
cheese=input_json,
cheese_list=[
{"variety": "Cream Havarti",
"type": "semi-soft",
"color": "pale yellow"},
{"variety": "Adelost",
"type": "semi-soft",
"color": "blue"},
],
)
nested_record = NestedJsonRecord(nested_input)
self.assertJsonDataEqual(
nested_record.json_data(extraneous=True),
nested_input,
)
def test_json_round_trip(self):
class Fruit(JsonRecord):
protein = Property()
fat = Property()
carb = Property()
class Banana(JsonRecord):
color = Property(isa=str)
length = Property(isa=int)
contents = JsonProperty(isa=Fruit)
vitamins = JsonProperty(isa=str)
banana = Banana(
color="yellow",
contents={
"carb": "23%",
"fat": "0.5%",
"protein": "1%",
},
vitamins={
"A": "144 IU",
"C": "19.6 mg",
"E": "0.2 mg",
},
length=6,
)
self.assertEqual(
Banana(banana.json_data(extraneous=True)),
banana
)
def test_marshall_exceptions(self):
class SomeRecordList(JsonRecordList):
itemtype = CheeseRecord
with self.assertRaisesRegexp(
exc.JsonCollectionCoerceError, r'array expected',
):
SomeRecordList({"foo": "bar"})
class SomeRecordMap(JsonRecordDict):
itemtype = CheeseRecord
with self.assertRaisesRegexp(
exc.JsonCollectionCoerceError, r'object expected',
):
SomeRecordMap([1, 2, 3])
class SomeRecord(JsonRecord):
some_list = JsonListProperty(of=CheeseRecord)
some_map = JsonDictProperty(of=CheeseRecord)
with self.assertRaisesRegexp(
exc.JsonConversionError, r'\.some_list\b.*array expected',
):
SomeRecord({"some_list": {"foo": "bar"}})
with self.assertRaisesRegexp(
exc.JsonConversionError, r'\.some_map\b.*object expected',
):
SomeRecord({"some_map": [1, 2, 3]})
class SomeOtherRecord(JsonRecord):
foo_bar = Property(isa=SomeRecord, json_name="fooBar")
with self.assertRaisesRegexp(
exc.JsonConversionError, r'\.fooBar\.some_list\b.*array expected',
):
SomeOtherRecord({"fooBar": {"some_list": {"foo": "bar"}}})
class WayRecord(JsonRecord):
down = JsonListProperty(of=SomeOtherRecord)
try:
WayRecord(
{"down": [
{"fooBar": {"some_list": {"foo": "bar"}}},
]}
)
except exc.JsonConversionError as e:
self.assertEqual(e.error_fs.path, ".down[0].fooBar.some_list")
class TurtlesRecord(JsonRecord):
all_the = JsonDictProperty(json_name="allThe", of=WayRecord)
try:
TurtlesRecord(
{"allThe": {"way": {"down": [
{"fooBar": {"some_list": {"foo": "bar"}}},
]}}}
)
except exc.JsonConversionError as e:
self.assertEqual(
e.error_fs.path,
".allThe.way.down[0].fooBar.some_list",
)
self.assertEqual(
e.sub_exception.passed, {"foo": "bar"},
)
def test_rich_enum(self):
class MyEnum(RichEnum):
class EnumValue(RichEnumValue):
def json_data(self):
return self.canonical_name
@classmethod
def from_json(self, string):
return MyEnum.from_canonical(string)
ONE = EnumValue('one', "One")
TWO = EnumValue('two', "Two")
class EnumsGalore(JsonRecord):
my_enum = JsonProperty(isa=MyEnum.EnumValue)
enum_list = JsonListProperty(of=MyEnum.EnumValue)
enum_map = JsonDictProperty(of=MyEnum.EnumValue)
json = {"my_enum": "one",
"enum_list": ["one", "two", "one"],
"enum_map": {"x": "one", "y": "two", "z": "two"}}
eg = EnumsGalore(json)
self.assertEqual(eg.my_enum, MyEnum.ONE)
self.assertEqual(eg.enum_list[2], MyEnum.ONE)
self.assertEqual(eg.enum_map["z"], MyEnum.TWO)
eg_json = eg.json_data()
self.assertEqual(eg_json, json)
| {
"content_hash": "0af42f5104bf83dfe35f092ac8f9f898",
"timestamp": "",
"source": "github",
"line_count": 498,
"max_line_length": 79,
"avg_line_length": 34.63253012048193,
"alnum_prop": 0.5280918420594886,
"repo_name": "hearsaycorp/normalize",
"id": "d417feb58a4879795d577f5dd5b35968a5e8b49a",
"size": "17809",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_marshal.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "367584"
}
],
"symlink_target": ""
} |
import pygame
import pymunk
import math
import helper
from pymunk.vec2d import Vec2d
from pygame.constants import *
from ColorConstants import *
from Globals import *
############################################
############# SHADOW ENGINE ############### Info: http://forums.tigsource.com/index.php?topic=8803.0
############################################
def getProjectedPoint(light, point, radius = None, output = 'pygame', mode = None):
"""
Returns the projected point of the given point repective to the given light.
@light, point: tuples or vec2d
@radius: int, if specified, the shadow will up to the radius (supposed to be
light radius). IT'S IMPERFECT, there are leftovers, small pieces
undrawn.
@output: can be pygame or pymunk (vec2d)
@mode:
- 1: don't check if the arguments given are vec2d's (much faster)
"""
if mode != 1:
if not isinstance(light, pymunk.vec2d.Vec2d):
light = toPymunk(light)
if not isinstance(point, pymunk.vec2d.Vec2d):
point = toPymunk(point)
light_to_point = point - light
projected_point = point + light_to_point
if radius:
extra_length = radius - light_to_point.get_length()
light_to_point = light_to_point.normalized()
vector_to_add = light_to_point * extra_length
projected_point = point + vector_to_add
# projected_point *= 1.0075
if output == 'pygame':
return toPygame(projected_point)
else:
return projected_point
def getProjectionVerticesForLine(light, a, b, radius = None, output = 'pygame', mode = None):
"""
Calculates and returns the projection vertices.
Returns: 4 values: a, b, c, d
- a = argument
- b = argument
- c = projected a vertex
- d = projected b vertex
The function is smart enough to recognize input format.
Info: http://forums.tigsource.com/index.php?topic=8803.0
@radius: int, if specified, the shadow will go up to the radius
@output: can be pygame or pymunk (vec2d)
@mode:
- 1: don't check if the arguments given are vec2d's (much faster)
"""
if mode != 1:
if not isinstance(light, pymunk.vec2d.Vec2d):
light = toPymunk(light)
if not isinstance(a, pymunk.vec2d.Vec2d):
a = toPymunk(a)
if not isinstance(b, pymunk.vec2d.Vec2d):
b = toPymunk(b)
c = getProjectedPoint(light, a, radius, output='pymunk', mode=1)
d = getProjectedPoint(light, b, radius, output='pymunk', mode=1)
if output == 'pygame':
return toPygame(a), toPygame(b), toPygame(c), toPygame(d)
else:
return a, b, c, d
def doesEdgeCastShadow(light, a, b, mode = None):
"""
Returns a boolean depending on whether or not the edge delimited by the
given arguments should cast a shadow based on the given light.
@light: origin, vec2d or (x, y) tuple.
@a: first point of the segment we're evaluating.
@b: second point of the segment we're evaluating.
@mode:
- 1: don't check if the arguments given are vec2d's (much faster)
"""
if mode != 1:
if not isinstance(light, pymunk.vec2d.Vec2d):
light = toPymunk(light)
if not isinstance(a, pymunk.vec2d.Vec2d):
a = toPymunk(a)
if not isinstance(b, pymunk.vec2d.Vec2d):
b = toPymunk(b)
start_to_end = b - a
normal = pymunk.vec2d.Vec2d(-1 * start_to_end.y, start_to_end.x)
light_to_start = a - light
if normal.dot(light_to_start) > 0:
return True
else:
return False
def drawShadowFromVertices(surface, light, vertices, radius=None, color=BLACK, alpha=136, mode=None):
"""
Draws the shadow from the given vertices depending on their position relative
to the given light.
@light: light position in a (x, y) tuple or a vec2d.
@vertices: MUST be ordered clockwise, list/tuple of vertices.
@color: color in which the shadow will be drawn.
@alpha: the alpha (1..255) that will be applied to the shadow.
@mode:
- 1: don't check if the arguments given are vec2d's (much faster)
"""
vertices = list(vertices)
if mode != 1:
if not isinstance(light, pymunk.vec2d.Vec2d):
light = toPymunk(light)
for n in range(len(vertices)):
if not isinstance(vertices[n], pymunk.vec2d.Vec2d):
vertices[n] = toPymunk(vertices[n])
if not isinstance(color, pygame.Color):
r,g,b = color
color = pygame.Color(r,b,g)
else:
color = copy.copy(color)
color.a = alpha
transparent_surface = surface.convert_alpha() #without this we can't draw transparencies
vertices.append(vertices[0])
for n in range(len(vertices)-1):
first, last = vertices[n], vertices[n+1]
if doesEdgeCastShadow(light, first, last): #if shadow should be drawn
# we get the vertices to draw the shadow polygon
a,b,c,d = getProjectionVerticesForLine(light, first, last, radius=radius,
output = 'pygame', mode = 1)
# we arrange the vertices clock wise order
vertices_to_draw = (a, c, d, b) # clock wise
pygame.draw.polygon(transparent_surface, color, vertices_to_draw)
surface.blit(transparent_surface, (0,0))
############################################
############## LIGHT ENGINE ###############
############################################
class Light:
"""
Provides a way to simulate light interfering with rectangles (other shapes
may be smartly modeled as rectangles just for the lightning sake).
Works in PYGAME COORDINATES, aka 0,0 is topleft of the screen.
Usage:
- Create your light using the constructor: l = Light(x, y, size, alpha)
x and y are the center of the light, size is the radius.
- Create your mask: l.createMask()
- Add your obstructors (shadow casters, things that light won't traspass)
- Whenever light position changes -> l.setLightPosition(x, y)
- Before blitting -> l.update()
WARNING: may not need to update every frame! it's expensive!
- Whenever you want to blit l.blit(surface_to_blit)
"""
def __init__(self, x, y, size = 100, alpha = None, color = WHITE, gradient = False):
"""
@x,y: light position (middle)
@size: radius of the light
@alpha: if given (1..255), light will be drawn with some transparency.
@color: color of the light.
@gradient: indicates whether light will be drawn with a gradient or not.
"""
self.x = x
self.y = y
self.size = size
self.obstructors = []
self.obstructor_segments = []
# Data structures used for tracting with dynamic objects for extra efficiency
self.auxiliar_obstructors = []
self.auxiliar_obstructor_segments = []
# Data structures that allow us to update the obstructors with the camera
self.clean_obstructors = []
self.mode = 'dirty' # changed by updateObstructors to dirty
self.light_rect = None
self.mask = None
self.color = color
self.gradient = gradient
if alpha:
if 1 <= alpha <= 255:
self.alpha = alpha
else:
raise ValueError('Alpha must be between 1 and 255')
else:
self.alpha = None
def addObstructor(self, rect, auxiliar = False):
"""
Adds a obstructor to the light engine.
@rect: pygame.rect object.
@auxiliar: bool, adds the rect to a different list if given.
"""
top_left = rect.topleft
top_right = rect.topright
bottom_right = rect.bottomright
bottom_left = rect.bottomleft
if auxiliar:
self.auxiliar_obstructors.append(rect)
self.auxiliar_obstructor_segments.append((top_left, top_right))
self.auxiliar_obstructor_segments.append((top_right, bottom_right))
self.auxiliar_obstructor_segments.append((bottom_right, bottom_left))
self.auxiliar_obstructor_segments.append((bottom_left, top_left))
else:
self.obstructors.append(rect)
self.obstructor_segments.append((top_left, top_right))
self.obstructor_segments.append((top_right, bottom_right))
self.obstructor_segments.append((bottom_right, bottom_left))
self.obstructor_segments.append((bottom_left, top_left))
def setObstructors(self, rects, auxiliar = False):
"""
Changes the actual obstructors by the ones that are inside the given
iterator (set, list, etc).
@rects: iterator, contains pygame.rect objects.
@auxiliar: bool, adds the rect to a different list if given.
"""
if auxiliar:
self.auxiliar_obstructors = []
self.auxiliar_obstructor_segments = []
for rect in rects:
self.addObstructor(rect, auxiliar=True)
else:
self.obstructors = []
self.obstructor_segments = []
for rect in rects:
self.addObstructor(rect, auxiliar=False)
def cleanAuxiliar(self):
self.auxiliar_obstructors = []
self.auxiliar_obstructor_segments = []
def tracePoint(self,x1,y1,x2,y2,l):
"""
Only used from getPolygon
"""
theta = math.atan2((y2-y1),(x2-x1));
if theta<0:
d= (180*(theta+(math.pi*2))/math.pi)
else:
d= (180*(theta)/math.pi)
dx = math.cos(math.radians(d))
dy = math.sin(math.radians(d))
return (x2+dx*l,y2+dy*l)
def getPolygon(self,x,y,box):
"""
Only used from drawMask with arguments:
getPolygon(self.size,self.size,r)
where r is the new_rect that was first cropped and then reallocated.
"""
r = box.right
l = box.left
t = box.top
b = box.bottom
L = self.size+10
tracePoint = self.tracePoint
box = pygame.Rect(l,t,box.width-1,box.height-1)
lightPos = (self.size,self.size)
if x >= l and x <= r:
if y >= b: # directly under
#print "UNDER"
tp1 = tracePoint(x,y,l,b,L)
tp2 = tracePoint(x,y,r,b,L)
return ((box.bottomleft,tp1,[lightPos[0]-L,lightPos[1]-L],[lightPos[0]+L,lightPos[1]-L],tp2,box.bottomright))
else: # directly above
#print "ABOVE"
tp1 = tracePoint(x,y,l,t,L)
tp2 = tracePoint(x,y,r,t,L)
return ((box.topleft,tp1,[lightPos[0]-L,lightPos[1]+L],[lightPos[0]+L,lightPos[1]+L],tp2,box.topright))
elif y >= t and y <= b:
if x <= l: # directly to the left
#print "LEFT"
tp1 = tracePoint(x,y,l,b,L)
tp2 = tracePoint(x,y,l,t,L)
return ((box.bottomleft,tp1,[lightPos[0]+L,lightPos[1]+L],[lightPos[0]+L,lightPos[1]-L],tp2,box.topleft))
else: # directly to the right
#print "RIGHT"
tp1 = tracePoint(x,y,r,b,L)
tp2 = tracePoint(x,y,r,t,L)
return ((box.bottomright,tp1,[lightPos[0]-L,lightPos[1]+L],[lightPos[0]-L,lightPos[1]-L],tp2,box.topright))
if y <= t:
if x <= l: # upper left
#print "UPPER LEFT"
tp1 = tracePoint(x,y,r,t,L)
tp2 = tracePoint(x,y,l,b,L)
return ((box.topleft,box.topright,tp1,tp2,box.bottomleft))
else: # upper right
#print "UPPER RIGHT"
tp1 = tracePoint(x,y,l,t,L)
tp2 = tracePoint(x,y,r,b,L)
return ((box.topright,box.topleft,tp1,tp2,box.bottomright))
elif y >= b:
if x <= l: # lower left
#print "LOWER LEFT"
tp1 = tracePoint(x,y,r,b,L)
tp2 = tracePoint(x,y,l,t,L)
return ((box.bottomleft,box.bottomright,tp1,tp2,box.topleft))
else: # lower right
#print "LOWER RIGHT"
tp1 = tracePoint(x,y,l,b,L)
tp2 = tracePoint(x,y,r,t,L)
return ((box.bottomright,box.bottomleft,tp1,tp2,box.topright))
return None
def update(self):
"""
Previously drawMask.
The core of the engine, calculates the parts of the light which are
obfuscated by the obstructors and doesn't light those.
Stores the results in self.mask (which is a pygame.surface).
"""
img = self.mask
nrects = []
# Iterate over all the rects
# If one is colliding with light rect, it gets cropped and then moved,
# and added to nrects list
if self.mode == 'dirty':
obstructors_list = self.obstructors + self.auxiliar_obstructors
elif self.mode == 'clean':
obstructors_list = self.clean_obstructors + self.auxiliar_obstructors
else:
raise ValueError('Invalid mode')
for r in obstructors_list:
if self.light_rect.colliderect(r):
nr = r.clip(self.light_rect) # Returns a new rectangle that is cropped to be completely inside the argument Rect.
# Normalize the rectangle(move it near to 0,0 for following comparisons)
# Imagine a new rectangle at top left of size light_size*light_size,
# which is the mask, the rectangles are moved there.
nr.top = nr.top - self.light_rect.top
nr.left = nr.left - self.light_rect.left
nrects.append(nr)
img.fill(1) # black, which is set to transparent before
# draws the light circle
if self.gradient:
def f(x):
# return ((x*x))
return math.sqrt(x) - 0.1
# return math.exp(x)
# return -math.cos(x/1.2)
# return 0.49*math.cos(10*x)+0.5
# return math.exp(-x/10.)*math.sin(x)
# return math.ceil(x/10.)
# return math.exp(x-10)+math.exp(-x-10)
# return x**2-x**4
# return 10*x+10
def f2(x):
return x
return math.sqrt(x) - 0.1
# return math.exp(x)
start = (self.size, self.size)
end = (self.size*2, self.size)
start_color = self.color
end_color = (0,0,0)
mode = 1
g_func = f
r_func = f
b_func = f
a_func = f2
draw_circle(img, start, end, start_color, end_color, mode = mode, Afunc=a_func)
# Rfunc = r_func, Gfunc = g_func, Bfunc = b_func, Afunc = a_func)
else:
pygame.draw.circle(img, self.color, (self.size,self.size), self.size,0)
# iterates over all the rects (which were found colliding, were cropped and moved)
for r in nrects:
# if r.collidepoint(self.x, self.y):
# img.fill(1)
# return
p = self.getPolygon(self.size,self.size,r)
if p:
pygame.draw.polygon(img, 1, p, 0)
# draws the center of the light - the light 'producer'
# pygame.draw.circle(img, 3, (self.size,self.size), 2)
def updateObstructors(self, camera_x, camera_y):
"""
Updates position of the obstructors given a camera x and y.
@x,y: camera_x and camera_y.
"""
self.mode = 'clean'
self.clean_obstructors = []
for obstructor in self.obstructors:
x, y = obstructor.topleft
clean_obstructor = obstructor.copy()
x -= camera_x
y += camera_y
clean_obstructor.topleft = (x, y)
self.clean_obstructors.append(clean_obstructor)
def drawMap(self,surface, color = BLACK):
"""
Helper method, draws all the obstructors on the given surface.
Very useful for debugging.
"""
# img.fill((100,100,100))
for r in self.obstructors:
pygame.draw.rect(surface, color, r, 0)
def createMask(self):
"""
This method is highly customizable, serves the purpose of changing
the aesthetic of the light.
"""
mask = pygame.Surface([self.size*2,self.size*2],HWSURFACE)#|HWPALETTE,8)
# mask.set_palette([[0,0,0],[255,0,0],[180,180,180],[255,255,255]])
mask.set_colorkey(1, RLEACCEL) # 1 will be transparent
self.light_rect = mask.get_rect()
self.light_rect.center = (self.x, self.y)
if self.alpha:
mask.set_alpha(self.alpha)
self.mask = mask
def setLightPosition(self, x, y):
"""
Warning: give coordinates in pygame's mode.
"""
self.x = x
self.y = y
self.light_rect.center = (self.x, self.y)
def blit(self, surface):
"""
Paints the current mask into the given surface.
"""
surface.blit(self.mask, self.light_rect.topleft)
def isRectInsideLight(self, rect, x, y, camera_x=0, camera_y=0):
"""
Warning: Brute force approach! Assume low perfomance.
Returns a boolean depending whether it is inside the casted light or
not.
Tests every rect vertex against every segment formed by the obstructors.
@rect: pygame.Rect instance.
@x,y: designed to be light x and y coordinates after applying camera
offsets.
"""
top_left = rect.topleft
top_right = rect.topright
bottom_right = rect.bottomright
bottom_left = rect.bottomleft
vertices = [top_left, top_right, bottom_right, bottom_left]
# vertices = [rect.center]
# adjust segments to camera values
# print(camera_x, camera_y)
if camera_x or camera_y:
segments_aux = []
for segment in self.obstructor_segments + self.auxiliar_obstructor_segments:
a = segment[0]
b = segment[1]
first_point = (a[0]-camera_x, a[1]+camera_y)
second_point = (b[0]-camera_x, b[1]+camera_y)
segments_aux.append((first_point, second_point))
else:
segments_aux = list(self.obstructor_segments)
# n = 1
# print(segments_aux[n], self.obstructor_segments[n])
# DEBUG
# pygame.draw.circle(DISPLAYSURF, RED, rect.center, 5)
# for segment in segments_aux:
# pygame.draw.line(DISPLAYSURF, RED, segment[0], segment[1], 5)
# pygame.draw.line(DISPLAYSURF, BLUE, rect.center,(x, y), 3)
# remove irrelevant vertices (far away)
for i in range(len(vertices)-1,-1,-1):
vertex = vertices[i]
# distance formula
if math.sqrt( (vertex[0]-x)**2 + (vertex[1]-y)**2 ) > self.size:
vertices.pop(i)
# we check if we have any vertex near light max range (optimization)
if len(vertices) > 0:
# iterate over obstructor segments
for segment in segments_aux:
# segment to test
C = segment[0]
D = segment[1]
# For each segment, test against each rect vertex.
# If they intersect, then that vertex isn't relevant anymore,
# because he's blocked from light (light doesn't reach that
# vertex), so we're free to remove it from the list.
for i in range(len(vertices)-1,-1,-1):
# second segment
A = vertices[i]
B = (x, y)
if Light.doSegmentsIntersect(A, B, C, D):
vertices.pop(i)
# At the end of the algorithm, we have removed all the irrelevant
# vertices of the list, so if there was any relevant one, then we can
# assume that the vertex of the rectangle (and thus, the rectangle
# itself) is affected by the light.
return len(vertices) > 0
@staticmethod
def doSegmentsIntersect(A, B, C, D):
"""
Fins if two segments intercept or not (returns Bool).
Two segments are: AB and CD.
@A, B, C, D: points in tuple mode (x, y)
"""
def ccw(A,B,C):
"""Helper method."""
# return (C.y-A.y) * (B.x-A.x) > (B.y-A.y) * (C.x-A.x)
return (C[1]-A[1]) * (B[0]-A[0]) > (B[1]-A[1]) * (C[0]-A[0])
return ccw(A,C,D) != ccw(B,C,D) and ccw(A,B,C) != ccw(A,B,D)
###### LIGHT GRADIENT #######
def draw_circle(surface, startpoint, endpoint, startcolor, endcolor, Rfunc = (lambda x:x), Gfunc = (lambda x:x), Bfunc = (lambda x:x), Afunc = (lambda x:1), mode=0):
"""
Gradient.
Instead of returning an Surface, this function draw it directy onto the
given Surface and returns the rect.
"""
dx = endpoint[0]-startpoint[0]
dy = endpoint[1]-startpoint[1]
radius = int(round(math.hypot(dx, dy)))
pos = (startpoint[0]-radius, startpoint[1]-radius)
# if BLEND_MODES_AVAILABLE:
return surface.blit(radial_func(radius, startcolor, endcolor, Rfunc, Gfunc, Bfunc, Afunc), pos, None, mode)
# else:
# return surface.blit(radial_func(radius, startcolor, endcolor, Rfunc, Gfunc, Bfunc, Afunc), pos)
def radial_func(radius, startcolor, endcolor, Rfunc = (lambda x:x), Gfunc = (lambda x:x), Bfunc = (lambda x:x), Afunc = (lambda x:1), colorkey=(0,0,0,0)):
"""
Draws a linear raidal gradient on a square sized surface and returns
that surface.
"""
bigSurf = pygame.Surface((2*radius, 2*radius)).convert_alpha()
if len(colorkey)==3:
colorkey += (0,)
bigSurf.fill(colorkey)
color = ColorInterpolator(radius, startcolor, endcolor, Rfunc, Gfunc, Bfunc, Afunc)
draw_circle = pygame.draw.circle
for rad in range(radius, 0, -1):
draw_circle(bigSurf, color.eval(rad), (radius, radius), rad)
return bigSurf
class ColorInterpolator(object):
'''
ColorInterpolator(distance, color1, color2, rfunc, gfunc, bfunc, afunc)
interpolates a color over the distance using different functions for r,g,b,a
separately (a= alpha).
'''
def __init__(self, distance, color1, color2, rfunc, gfunc, bfunc, afunc):
object.__init__(self)
self.rInterpolator = FunctionInterpolator(color1[0], color2[0], distance, rfunc)
self.gInterpolator = FunctionInterpolator(color1[1], color2[1], distance, gfunc)
self.bInterpolator = FunctionInterpolator(color1[2], color2[2], distance, bfunc)
if len(color1)==4 and len(color2)==4:
self.aInterpolator = FunctionInterpolator(color1[3], color2[3], distance, afunc)
else:
self.aInterpolator = FunctionInterpolator(255, 255, distance, afunc)
def eval(self, x):
'''
eval(x) -> color
returns the color at the position 0<=x<=d (actually not bound to this interval).
'''
return [self.rInterpolator.eval(x),
self.gInterpolator.eval(x),
self.bInterpolator.eval(x),
self.aInterpolator.eval(x)]
class FunctionInterpolator(object):
'''
FunctionINterpolator(startvalue, endvalue, trange, func)
interpolates a function y=f(x) in the range trange with
startvalue = f(0)
endvalue = f(trange)
using the function func
'''
def __init__(self, startvalue, endvalue, trange, func):
object.__init__(self)
# function
self.func = func
# y-scaling
self.a = endvalue-startvalue
if self.a == 0:
self.a = 1.
# x-scaling
if trange!=0:
self.b = 1./abs(trange)
else:
self.b = 1.
# x-displacement
self.c = 0
# y-displacement
self.d = min(max(startvalue,0),255)
def eval(self, x):
'''
eval(x)->float
return value at position x
'''
# make sure that the returned value is in [0,255]
return int(min(max(self.a*self.func(self.b*(x+self.c))+self.d, 0), 255))
if __name__ == '__main__':
pass
| {
"content_hash": "e6fc7cff5c64cad41306d2201d409590",
"timestamp": "",
"source": "github",
"line_count": 644,
"max_line_length": 165,
"avg_line_length": 38.36801242236025,
"alnum_prop": 0.5623861750779068,
"repo_name": "GMadorell/Shades",
"id": "9940e45c155def0f24eb510139721d3f3df25f69",
"size": "24733",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Code/LightEngine.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "169091"
}
],
"symlink_target": ""
} |
"""Data generators for VQA data sets."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import base64
import csv
import json
import os
import random
import sys
import tarfile
import zipfile
import numpy as np
from tensor2tensor.data_generators import generator_utils
from tensor2tensor.data_generators import image_utils
from tensor2tensor.data_generators import problem
from tensor2tensor.data_generators import text_encoder
from tensor2tensor.data_generators import vqa_utils
from tensor2tensor.layers import modalities
from tensor2tensor.utils import contrib
from tensor2tensor.utils import metrics
from tensor2tensor.utils import registry
import tensorflow.compat.v1 as tf
def _get_vqa_v2_annotations(directory,
annotation_url,
annotation_filename="vqa_v2.tar.gz"):
"""Extract the VQA V2 annotation files to directory unless it's there."""
annotation_file = generator_utils.maybe_download_from_drive(
directory, annotation_filename, annotation_url)
with tarfile.open(annotation_file, "r:gz") as annotation_tar:
annotation_tar.extractall(directory)
def _get_vqa_v2_image_raw_dataset(directory, image_root_url, image_urls):
"""Extract the VQA V2 image data set to directory unless it's there."""
for url in image_urls:
filename = os.path.basename(url)
download_url = os.path.join(image_root_url, url)
path = generator_utils.maybe_download(directory, filename, download_url)
unzip_dir = os.path.join(directory, filename.strip(".zip"))
if not tf.gfile.Exists(unzip_dir):
zipfile.ZipFile(path, "r").extractall(directory)
def _get_vqa_v2_image_feature_dataset(
directory, feature_url, feature_filename="mscoco_feat.tar.gz"):
"""Extract the VQA V2 feature data set to directory unless it's there."""
feature_file = generator_utils.maybe_download_from_drive(
directory, feature_filename, feature_url)
with tarfile.open(feature_file, "r:gz") as feature_tar:
feature_tar.extractall(directory)
class ImageQuestion2MultilabelProblem(image_utils.ImageProblem):
"""Base class for image question answer problem."""
@property
def target_space_id(self):
raise NotImplementedError()
@property
def vocab_size(self):
raise NotImplementedError
@property
def num_classes(self):
raise NotImplementedError()
@property
def vocab_filename(self):
raise NotImplementedError()
@property
def label_filename(self):
raise NotImplementedError()
@property
def train_shards(self):
raise NotImplementedError()
@property
def dev_shards(self):
raise NotImplementedError()
def source_data_files(self, dataset_split):
raise NotImplementedError()
def generator(self, data_dir, tmp_dir, dataset_split):
raise NotImplementedError()
def eval_metrics(self):
return [
metrics.Metrics.ACC_MULTILABEL_MATCH3,
]
def feature_encoders(self, data_dir):
input_encoder = text_encoder.ImageEncoder(channels=self.num_channels)
vocab_file = os.path.join(data_dir, self.vocab_filename)
question_encoder = text_encoder.TokenTextEncoder(
vocab_file, replace_oov="UNK")
label_file = os.path.join(data_dir, self.label_filename)
target_encoder = text_encoder.ClassLabelEncoder(
class_labels_fname=label_file)
return {"inputs": input_encoder,
"question": question_encoder,
"targets": target_encoder}
def hparams(self, defaults, unused_model_hparams):
p = defaults
question_encoder = self._encoders["question"]
targets_encoder = self._encoders["targets"]
p.modality = {
"inputs": modalities.ModalityType.IDENTITY,
"question": modalities.ModalityType.SYMBOL,
"targets": modalities.ModalityType.MULTI_LABEL,
}
p.vocab_size = {
"inputs": None,
"question": question_encoder.vocab_size,
"targets": targets_encoder.vocab_size,
}
p.input_space_id = problem.SpaceID.IMAGE # multiple input features?
p.target_space_id = self.target_space_id
def generate_data(self, data_dir, tmp_dir, task_id=-1):
generator_utils.generate_dataset_and_shuffle(
self.generator(data_dir, tmp_dir, problem.DatasetSplit.TRAIN),
self.training_filepaths(data_dir, self.train_shards, shuffled=False),
self.generator(data_dir, tmp_dir, problem.DatasetSplit.EVAL),
self.dev_filepaths(data_dir, self.dev_shards, shuffled=False))
@registry.register_problem
class ImageVqav2Tokens10kLabels3k(ImageQuestion2MultilabelProblem):
"""VQA V2, raw images, 10k question vocab, 3k answer label."""
_MSCOCO_ROOT_URL = "http://msvocds.blob.core.windows.net/"
_MSCOCO_IMAGE_URLS = [
"coco2014/train2014.zip", "coco2014/val2014.zip", "coco2014/test2014.zip",
]
_VQA_V2_ANNOTATION_URL = ("https://drive.google.com/uc?export=download&id="
"1xfMU54ObCLvMRAekT3cfcIg-AgY39fWB")
_VQA_V2_TRAIN_DATASETS = [
("trainval_resnet101_faster_rcnn_genome_36.tsv",
"v2_train2014_annotations.json"),
]
_VQA_V2_DEV_DATASETS = [
("trainval_resnet101_faster_rcnn_genome_36.tsv",
"v2_val2014_annotations.json"),
]
_VQA_V2_TEST_DATASETS = [
("test2015_resnet101_faster_rcnn_genome_36.tsv",
"v2_test2015_annotations.json"),
]
def source_data_files(self, dataset_split):
train = dataset_split == problem.DatasetSplit.TRAIN
return self._VQA_V2_TRAIN_DATASETS if train else self._VQA_V2_DEV_DATASETS
@property
def target_space_id(self):
return problem.SpaceID.GENERIC
@property
def vocab_size(self):
return 10000
@property
def num_classes(self):
return 3000
@property
def vocab_filename(self):
return "question.vocab.%d" % self.vocab_size
@property
def label_filename(self):
return "answer.label.%d" % self.num_classes
@property
def train_shards(self):
return 128
@property
def dev_shards(self):
return 64
def example_reading_spec(self):
data_fields, data_items_to_decoders = (
super(ImageVqav2Tokens10kLabels3k, self).example_reading_spec())
data_fields["image/image_id"] = tf.FixedLenFeature((), tf.int64)
data_fields["image/question_id"] = tf.FixedLenFeature((), tf.int64)
data_fields["image/question"] = tf.FixedLenSequenceFeature(
(), tf.int64, allow_missing=True)
data_fields["image/answer"] = tf.FixedLenSequenceFeature(
(), tf.int64, allow_missing=True)
slim = contrib.slim()
data_items_to_decoders["question"] = slim.tfexample_decoder.Tensor(
"image/question")
data_items_to_decoders["targets"] = slim.tfexample_decoder.Tensor(
"image/answer")
return data_fields, data_items_to_decoders
def preprocess_example(self, example, mode, hparams):
# hparams is model_hparams
image = example["inputs"]
example["inputs"] = vqa_utils.vqa_v2_preprocess_image(
image, hparams.height, hparams.width, mode,
resize_side=hparams.resize_side, distort=hparams.distort,
image_model_fn=hparams.image_model_fn)
return example
def generator(self, data_dir, tmp_dir, dataset_split):
datasets = self.source_data_files(dataset_split)
return self.vqa_v2_generator(data_dir, tmp_dir, datasets)
def vqa_v2_generator(self, data_dir, tmp_dir, datasets):
"""VQA v2 generator using raw images."""
_get_vqa_v2_annotations(tmp_dir, self._VQA_V2_ANNOTATION_URL)
_get_vqa_v2_image_raw_dataset(tmp_dir, self._MSCOCO_ROOT_URL,
self._MSCOCO_IMAGE_URLS)
vocab_path = os.path.join(data_dir, self.vocab_filename)
if not tf.gfile.Exists(vocab_path):
vocab_tmp_path = os.path.join(tmp_dir, self.vocab_filename)
tf.gfile.Copy(vocab_tmp_path, vocab_path)
with tf.gfile.GFile(vocab_path, mode="r") as f:
vocab_data = "<pad>\n<EOS>\n" + f.read() + "UNK\n"
with tf.gfile.GFile(vocab_path, mode="w") as f:
f.write(vocab_data)
label_path = os.path.join(data_dir, self.label_filename)
if not tf.gfile.Exists(label_path):
label_tmp_path = os.path.join(tmp_dir, self.label_filename)
tf.gfile.Copy(label_tmp_path, label_path)
vocab_encoder = text_encoder.TokenTextEncoder(vocab_path, replace_oov="UNK")
label_encoder = text_encoder.ClassLabelEncoder(
class_labels_fname=label_path)
prefix_annotation = []
for prefix, annotation_file in datasets:
annotation_path = os.path.join(tmp_dir, annotation_file)
with tf.gfile.Open(annotation_path) as f:
annotation_json = json.loads(f.read())
prefix_annotation += [(prefix, anno) for anno in annotation_json]
random.shuffle(prefix_annotation)
annotation_count = len(prefix_annotation)
tf.logging.info("Processing %d annotations for vqa v2" %(annotation_count))
for prefix, anno in prefix_annotation:
image_id = anno["image_id"]
question = vocab_encoder.encode(anno["question"])
answer = [label_encoder.encode(ans) for ans in anno["answer"]]
answer = answer if answer else [0] # 0 indicates padding
image_filename = "COCO_" + prefix + "_" + str(image_id).zfill(12) + ".jpg"
image_filepath = os.path.join(tmp_dir, prefix, image_filename)
with tf.gfile.Open(image_filepath, "r") as f:
encoded_image_data = f.read()
yield {
"image/encoded": [encoded_image_data],
"image/format": ["jpeg"],
"image/image_id": [image_id],
"image/question_id": [anno["question_id"]],
"image/question": question,
"image/answer": answer,
}
@registry.register_problem
class ImageVqav2RcnnFeatureTokens10kLabels3k(ImageVqav2Tokens10kLabels3k):
"""VQA V2, image feature, 10k question vocab, 3k answer label."""
_VQA_V2_FEATURE_URL = ("https://drive.google.com/uc?export=download&id="
"1yTTFUWqx1SScC-Whs2vRbF3tDsEEjrtt")
@property
def num_boxes(self):
return 36
@property
def feature_dimension(self):
return 2048
@property
def spatial_feature_dimension(self):
return 6
@property
def feature_file_field_names(self):
return ["image_id",
"image_w",
"image_h",
"num_boxes",
"boxes",
"features"]
def preprocess_example(self, example, mode, hparams):
# reshape some features
example["inputs"] = tf.reshape(
example["inputs"], [self.num_boxes, 1, self.feature_dimension])
example["spatial_feature"] = tf.reshape(
example["spatial_feature"],
[self.num_boxes, 1, self.spatial_feature_dimension])
return example
def example_reading_spec(self):
slim = contrib.slim()
data_fields, data_items_to_decoders = {}, {}
data_fields["image/feature"] = tf.FixedLenSequenceFeature(
(), tf.float32, allow_missing=True)
data_fields["image/spatial_feature"] = tf.FixedLenSequenceFeature(
(), tf.float32, allow_missing=True)
data_fields["image/image_id"] = tf.FixedLenFeature((), tf.int64)
data_fields["image/question_id"] = tf.FixedLenFeature((), tf.int64)
data_fields["image/question"] = tf.FixedLenSequenceFeature(
(), tf.int64, allow_missing=True)
data_fields["image/answer"] = tf.FixedLenSequenceFeature(
(), tf.int64, allow_missing=True)
data_items_to_decoders["inputs"] = slim.tfexample_decoder.Tensor(
"image/feature")
data_items_to_decoders["question_id"] = slim.tfexample_decoder.Tensor(
"image/question_id")
data_items_to_decoders["image_id"] = slim.tfexample_decoder.Tensor(
"image/image_id")
data_items_to_decoders["spatial_feature"] = slim.tfexample_decoder.Tensor(
"image/spatial_feature")
data_items_to_decoders["question"] = slim.tfexample_decoder.Tensor(
"image/question")
data_items_to_decoders["targets"] = slim.tfexample_decoder.Tensor(
"image/answer")
return data_fields, data_items_to_decoders
def vqa_v2_generator(self, data_dir, tmp_dir, datasets):
"""VQA v2 generator using image features."""
_get_vqa_v2_annotations(tmp_dir, self._VQA_V2_ANNOTATION_URL)
_get_vqa_v2_image_feature_dataset(tmp_dir, self._VQA_V2_FEATURE_URL)
vocab_path = os.path.join(data_dir, self.vocab_filename)
if not tf.gfile.Exists(vocab_path):
vocab_tmp_path = os.path.join(tmp_dir, self.vocab_filename)
tf.gfile.Copy(vocab_tmp_path, vocab_path)
with tf.gfile.GFile(vocab_path, mode="r") as f:
vocab_data = "<pad>\n<EOS>\n" + f.read() + "UNK\n"
with tf.gfile.GFile(vocab_path, mode="w") as f:
f.write(vocab_data)
label_path = os.path.join(data_dir, self.label_filename)
if not tf.gfile.Exists(label_path):
label_tmp_path = os.path.join(tmp_dir, self.label_filename)
tf.gfile.Copy(label_tmp_path, label_path)
vocab_encoder = text_encoder.TokenTextEncoder(vocab_path, replace_oov="UNK")
label_encoder = text_encoder.ClassLabelEncoder(
class_labels_fname=label_path)
# merge annotations
annotation_json = []
for _, annotation_file in datasets:
annotation_path = os.path.join(tmp_dir, annotation_file)
with tf.gfile.Open(annotation_path) as f:
annotation_json += json.loads(f.read())
annotation_count = len(annotation_json)
tf.logging.info("Processing %d annotations for vqa v2" %(annotation_count))
imageid2annotation = {}
for anno in annotation_json:
if anno["image_id"] not in imageid2annotation:
imageid2annotation[anno["image_id"]] = [anno]
else:
imageid2annotation[anno["image_id"]].append(anno)
csv.field_size_limit(sys.maxsize)
for feature_file, _ in datasets:
feature_file_path = os.path.join(tmp_dir, feature_file)
with open(feature_file_path, "r+b") as tsv_file:
csv_reader = csv.DictReader(
tsv_file, delimiter="\t", fieldnames=self.feature_file_field_names)
for item in csv_reader:
item["num_boxes"] = int(item["num_boxes"])
image_id = int(item["image_id"])
image_w = float(item["image_w"])
image_h = float(item["image_h"])
bboxes = np.frombuffer(base64.decodestring(item["boxes"]),
dtype=np.float32).reshape(
(item["num_boxes"], -1))
box_width = bboxes[:, 2] - bboxes[:, 0]
box_height = bboxes[:, 3] - bboxes[:, 1]
scaled_width = box_width / image_w
scaled_height = box_height / image_h
scaled_x = bboxes[:, 0] / image_w
scaled_y = bboxes[:, 1] / image_h
box_width = box_width[..., np.newaxis]
box_height = box_height[..., np.newaxis]
scaled_width = scaled_width[..., np.newaxis]
scaled_height = scaled_height[..., np.newaxis]
scaled_x = scaled_x[..., np.newaxis]
scaled_y = scaled_y[..., np.newaxis]
spatial_features = np.concatenate(
(scaled_x,
scaled_y,
scaled_x + scaled_width,
scaled_y + scaled_height,
scaled_width,
scaled_height),
axis=1)
if image_id in imageid2annotation:
for anno in imageid2annotation[image_id]:
question = vocab_encoder.encode(anno["question"])
answer = [label_encoder.encode(ans) for ans in anno["answer"]]
answer = answer if answer else [0] # 0 indicates padding
yield {
"image/feature":
np.frombuffer(base64.decodestring(item["features"]),
dtype=np.float32).tolist(),
"image/spatial_feature": spatial_features.flatten().tolist(),
"image/height": [image_h],
"image/width": [image_w],
"image/bboxes": bboxes.flatten().tolist(),
"image/image_id": [image_id],
"image/question_id": [anno["question_id"]],
"image/question": question,
"image/answer": answer,
}
del imageid2annotation[image_id]
# assert all annotations are included
assert not imageid2annotation
| {
"content_hash": "c055f47a2df9ad836627d4265a14e804",
"timestamp": "",
"source": "github",
"line_count": 439,
"max_line_length": 80,
"avg_line_length": 37.227790432801825,
"alnum_prop": 0.650186624242795,
"repo_name": "tensorflow/tensor2tensor",
"id": "cd932845ae432182138563385fd9b762b40dbdaf",
"size": "16949",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tensor2tensor/data_generators/vqa.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C++",
"bytes": "32015"
},
{
"name": "HTML",
"bytes": "34684"
},
{
"name": "JavaScript",
"bytes": "78408"
},
{
"name": "Jupyter Notebook",
"bytes": "2859453"
},
{
"name": "Python",
"bytes": "5109255"
},
{
"name": "Shell",
"bytes": "11941"
}
],
"symlink_target": ""
} |
from utils.vector_manager import VectorManager
from time import time
import multiprocessing as mp
import argparse
import numpy as np
import os
import sys
confidence = 0.8
def id2Word(param):
filename, id2w = param
file_words = "%s_clean" % filename.split("_num")[0]
print("Comparing original %s with %s" % (file_words, filename))
def is_valid_numpy():
"""
"""
docs_ids = VectorManager.read_vector(filename)
original = VectorManager.parse_into_4D(VectorManager.read_vector(file_words))
file_list = []
comparison = []
unknowns = 0
for d in range(0, len(docs_ids)):
doc_list = []
for p in range(0, len(docs_ids[d])):
par_list = []
for s in range(0, len(docs_ids[d][p])):
sent_list = []
for w in range(0, len(docs_ids[d][p][s])):
try:
translated = to_word(docs_ids[d][p][s][w])
if translated == '<unk>':
unknowns += 1
comparison.append(translated == original[d][p][s][w])
sent_list.append(translated)
except Exception as e:
print("[%s] Indices %s %s %s %s: %s" % (filename, d,p,s,w, e))
par_list.append(sent_list)
doc_list.append(par_list)
file_list.append(doc_list)
valid = False
try:
ratio = float(comparison.count(True)) / len(comparison)
u_ratio = round(float(unknowns) / len(comparison), 2)
if ratio < confidence:
print("[WARN] File %s equality ratio is %s with %s unknown ratio" % (filename, round(ratio, 2), u_ratio))
else:
print("[OK] File %s equality ratio is %s with %s unknown ratio" % (filename, round(ratio, 2), u_ratio))
valid = True
except KeyError as e:
print("[ERROR] File %s is completely different (%s) with %s unknown ratio" % (filename, e, u_ratio))
return valid
def is_valid():
"""
"""
with open(file_words) as f:
original = f.read().decode("latin-1").split()
with open(file_words) as f:
docs_ids = f.read().split()
doc_words = [id2w(id) for id in docs_ids]
comparison = [original[i] == doc_words[i] for i in range(original)]
valid = False
try:
ratio = float(comparison.count(True)) / len(comparison)
if ratio < confidence:
print("[WARN] File %s equality ratio is %s." % (filename, round(ratio, 2)))
else:
print("[OK] File %s equality ratio is %s." % (filename, round(ratio, 2)))
valid = True
except KeyError as e:
print("[ERROR] File %s is completely different (%s) with %s unknown ratio" % (filename, e))
return valid
def to_word(id):
"""
Return Word associated with id
:param id: of the word to translate
:return: word associated with the ID
"""
try:
word = id2w[id]
except IndexError as e:
print("ID %s not found\n%s" % (id, e))
word = '<unk>'
return word
return is_valid()
class FileID2Word(object):
"""
Auxiliar class which holds the filepaths and w2id structure and yields them one at a time in order to avoid
replicating the w2id structure (which can be quite big)
"""
def __init__(self, filepaths, id2w):
self.filepaths = filepaths
self.id2w = id2w
def __iter__(self):
for file in self.filepaths:
yield (file, self.id2w)
def check_translated_files(data_path, w2Id):
"""
Handles the parallel translation from word to id of the files in data_path with the mapping w2id
:param data_path: path of the files to transform. Used to be called from either main or as block of
the pipeline
:param w2id: mappings to be used
"""
print("[BLOCK] Validating translated files from %s" % (data_path))
sorted_list = sorted(w2Id.items(), key= lambda(x): x[1])
id2words = [w for w,_ in sorted_list]
del w2Id, sorted_list
filepaths = []
for root, dirs, files in os.walk(data_path):
filepaths.extend(["%s/%s" % (root, file) for file in files if file.endswith("_num.npy")])
threads = mp.cpu_count() * 2
iter_file_w2id = FileID2Word(filepaths, id2words)
print("[BLOCK] Starting validation with %s processes and %s files" % (threads, len(filepaths)))
p = mp.Pool(threads, maxtasksperchild=1)
valids = p.map(id2Word, iter_file_w2id)
print("[BLOCK] Validation done. Correct files %s/%s. Confidence [%s]" % (valids.count(True), len(valids), confidence))
sys.stdout.flush()
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('-d', '--data', type=str, help="Path of the data to be translated with word2id vector."
" and clean up.", required=True)
parser.add_argument('-w ', '--word_vector', type=str, help="Word2ID vector to be used for doc reverse translation.",
required=True)
args = parser.parse_args()
data_path = args.data
word2id_file = args.word_vector
begin = time()
w2Id = VectorManager.read_vector(word2id_file)
check_translated_files(data_path, w2Id)
end = time()
print("Total processing time: %d seconds" % (end - begin))
| {
"content_hash": "00ee828b051c1ca5bde4b23d20ef1f44",
"timestamp": "",
"source": "github",
"line_count": 160,
"max_line_length": 122,
"avg_line_length": 35.35625,
"alnum_prop": 0.55524129397207,
"repo_name": "kafkasl/contextualLSTM",
"id": "2c900ce67abd5cd2a8ca5d404f1a0c9cdbc12503",
"size": "5657",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/preprocess/words2ids_validator.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "127138"
},
{
"name": "Shell",
"bytes": "8976"
}
],
"symlink_target": ""
} |
import sys
from rdkit import Chem,DataStructs
# our default rdkit fingerprinter parameters:
rdkitFpParams=dict(maxPath=5,fpSize=1024,nBitsPerHash=2)
#Fragmentation algorithm
#-----------------------
#identify acyclic bonds
#enumerate all single cuts
#make sure you chop off more that 1 atom
#keeps bits which are >60% query mol
#enumerate all double cuts
#keeps bits with 1 attachment point (i.e throw middle bit away)
#need to be >60% query mol
#identify exocyclic bonds
#enumerate all single "ring" cuts
#Check if it results in more that one component
#keep correct bit if >40% query mol
#enumerate successful "rings" cuts with an acyclic cut
#Check if it results in more that one component
#keep correct if >60% query mol
#start
def delete_bonds(mol,bonds,ftype,hac):
#use the same parent mol object and create editable mol
em = Chem.EditableMol(mol)
#loop through the bonds to delete
#print "Breaking bonds between atoms: ",bonds
for b in bonds:
#remove the bond
em.RemoveBond(b[0],b[1])
#now add attachement points
newAtomA = em.AddAtom(Chem.Atom(0))
em.AddBond(b[0],newAtomA,Chem.BondType.SINGLE)
newAtomB = em.AddAtom(Chem.Atom(0))
em.AddBond(b[1],newAtomB,Chem.BondType.SINGLE)
#should be able to get away without sanitising mol
#as the valencies should be okay
modifiedMol = em.GetMol()
#do not do a full sanitization, but do find rings and calculate valences:
Chem.SanitizeMol(modifiedMol,Chem.SanitizeFlags.SANITIZE_PROPERTIES|Chem.SanitizeFlags.SANITIZE_SYMMRINGS)
fragmented_smi = Chem.MolToSmiles(modifiedMol,True)
#print fragmented_smi
fraggle_framentation = select_fragments(fragmented_smi,ftype,hac)
return fraggle_framentation
def is_ring_cut_valid(smi):
#to check is a fragment is a valid ring cut, it needs to match the
#smarts: [$([#0][r].[r][#0]),$([#0][r][#0])]
atom_count = 0
valid = False
m = Chem.MolFromSmiles(smi)
if m is not None:
#use gobal smarts
if(m.HasSubstructMatch(cSma1) or m.HasSubstructMatch(cSma2)):
atom_count = m.GetNumAtoms()
valid = True
return valid,atom_count
def select_fragments(f_smi,ftype,hac):
result = ""
result_hcount = 0
fragments = f_smi.split(".")
if(ftype == "acyclic"):
for f in fragments:
attachments = f.count("*")
#check if terminal fragment
if(attachments == 1):
fMol = Chem.MolFromSmiles(f)
fhac = fMol.GetNumAtoms()
#if the fragment is 2 atoms (or less - includes attachement) it is too small
#to be interesting. This check has the additional benefit
#of pulling out the relevant single cuts as it discards
#fragments where we only chop off a small part of the input cmpd
if(fhac > 3):
result = "%s.%s" % (result,f)
result_hcount = result_hcount + fhac
#needs to be greater than 60% of parent mol
if( (result != "") and (result_hcount > 0.6*hac) ):
#remove first character as it will always be "."
result = result[1:]
else:
result = None
elif(ftype == "cyclic"):
result = None
#make sure it is 2 components
if( len(fragments) == 2):
for f in fragments:
#check if a valid cut
valid,result_hcount = is_ring_cut_valid(f)
if(valid):
#needs to be greater 3 heavy atoms and greater than 40% of parent mol
if((result_hcount > 3) and (result_hcount > 0.4*hac)):
result = f
elif(ftype == "cyclic_and_acyclic"):
#print f_smi
result = ""
#need to find the fragments which are valid which means they must be:
# Terminal (one attachment point) or valid ring cut
for f in fragments:
attachments = f.count("*")
if(attachments >= 3):
continue
if(attachments == 2):
#check if a valid cut
valid,result_hcount = is_ring_cut_valid(f)
if(valid):
#needs to be greater 3 heavy atoms
if(result_hcount > 3):
result = "%s.%s" % (result,f)
elif(attachments == 1):
fMol = Chem.MolFromSmiles(f)
fhac = fMol.GetNumAtoms()
#needs to be greater 3 heavy atoms
if(fhac > 3):
result = "%s.%s" % (result,f)
result_hcount = result_hcount + fhac
#print "F: %s" % (result)
#appropriate fragmentations must have 2 components
#result will always start with . because of the way it is constructed
#hence 2 component result wil contain 2 dots
if( (result != "") and (result.count(".") == 2) ):
#take off the starting dot when building smiles
fMol = Chem.MolFromSmiles(result[1:])
result_hcount = fMol.GetNumAtoms()
#needs to be greater 3 heavy atoms and greater than 60% of parent mol
if((result_hcount > 3) and (result_hcount > 0.6*hac)):
#take off the starting dot
result = result[1:]
else:
result = None
else:
result = None
return result
#Global smarts used by the program
#acyclic bond smarts
acyc_smarts = Chem.MolFromSmarts("[*]!@!=!#[*]")
#exocyclic/fused exocyclic bond smarts
cyc_smarts = Chem.MolFromSmarts("[R1,R2]@[r;!R1]")
#smarts used to find appropriate fragment for
#would use SMARTS: [$([#0][r].[r][#0]),$([#0][r][#0])]
#but rdkit doesn't support component SMARTS in recursive one - $([#0][r].[r][#0])
#hence split into two
cSma1 = Chem.MolFromSmarts("[#0][r].[r][#0]")
cSma2 = Chem.MolFromSmarts("[#0][r][#0]")
def generate_fraggle_fragmentation(mol):
"""
>>> q = Chem.MolFromSmiles('COc1cc(CN2CCC(NC(=O)c3cncc(C)c3)CC2)c(OC)c2ccccc12')
>>> list(generate_fraggle_fragmentation(q))
['[*]C(=O)NC1CCN(Cc2cc(OC)c3ccccc3c2OC)CC1', '[*]C(=O)c1cncc(C)c1.[*]C1CCN(Cc2cc(OC)c3ccccc3c2OC)CC1', '[*]C(=O)c1cncc(C)c1.[*]Cc1cc(OC)c2ccccc2c1OC', '[*]C(=O)c1cncc(C)c1.[*]c1cc(OC)c2ccccc2c1OC', '[*]C1CCN(Cc2cc(OC)c3ccccc3c2OC)CC1', '[*]C1CCN(Cc2cc(OC)c3ccccc3c2OC)CC1.[*]c1cncc(C)c1', '[*]Cc1cc(OC)c2ccccc2c1OC.[*]NC(=O)c1cncc(C)c1', '[*]Cc1cc(OC)c2ccccc2c1OC.[*]c1cncc(C)c1', '[*]N1CCC(NC(=O)c2cncc(C)c2)CC1.[*]c1cc(OC)c2ccccc2c1OC', '[*]NC(=O)c1cncc(C)c1.[*]c1cc(OC)c2ccccc2c1OC', '[*]NC1CCN(Cc2cc(OC)c3ccccc3c2OC)CC1', '[*]NC1CCN(Cc2cc(OC)c3ccccc3c2OC)CC1.[*]c1cncc(C)c1', '[*]c1c(CN2CCC(NC(=O)c3cncc(C)c3)CC2)cc(OC)c2ccccc12', '[*]c1c(OC)cc(CN2CCC(NC(=O)c3cncc(C)c3)CC2)c(OC)c1[*]', '[*]c1cc(CN2CCC(NC(=O)c3cncc(C)c3)CC2)c(OC)c2ccccc12', '[*]c1cc(OC)c2ccccc2c1OC.[*]c1cncc(C)c1']
"""
#query mol heavy atom count
hac = mol.GetNumAtoms()
#different cuts can give the same fragments
#to use out_fragments to remove them
out_fragments = set()
######################
# Single acyclic Cuts
######################
#find the relevant bonds to break
acyclic_matching_atoms = mol.GetSubstructMatches(acyc_smarts)
#print "Matching Atoms:"
#print("acyclic matching atoms: ",acyclic_matching_atoms)
total_acyclic = len(acyclic_matching_atoms)
bonds_selected = []
#loop to generate every single and double cut in the molecule
for x in range( total_acyclic ):
#single cuts are not required
#relevant single cut fragments can be found from the double cuts
#for explanation see check_fragments method
for y in range(x+1,total_acyclic):
#print matching_atoms[x],matching_atoms[y]
bonds_selected.append(acyclic_matching_atoms[x])
bonds_selected.append(acyclic_matching_atoms[y])
fragment = delete_bonds(mol,bonds_selected,"acyclic",hac)
if fragment is not None:
#print(fragment)
out_fragments.add(fragment)
bonds_selected = []
#print(out_fragments)
##################################
# Fused/Spiro exocyclic bond Cuts
##################################
#find the relevant bonds to break
cyclic_matching_atoms = mol.GetSubstructMatches(cyc_smarts)
#print("cyclic matching atoms: ",cyclic_matching_atoms)
#print "Matching Atoms:"
#print matching_atoms
total_cyclic = len(cyclic_matching_atoms)
bonds_selected = []
#loop to generate every double cut of relevant bonds
for x in range( total_cyclic ):
for y in range(x+1,total_cyclic):
#print matching_atoms[x],matching_atoms[y]
bonds_selected.append(cyclic_matching_atoms[x])
bonds_selected.append(cyclic_matching_atoms[y])
fragment = delete_bonds(mol,bonds_selected,"cyclic",hac)
bonds_selected = []
if fragment is not None:
#print "%s" % (fragment)
out_fragments.add(fragment)
#now do an acyclic cut with the successful cyclic cut
for z in range(total_acyclic):
bonds_selected.append(cyclic_matching_atoms[x])
bonds_selected.append(cyclic_matching_atoms[y])
bonds_selected.append(acyclic_matching_atoms[z])
fragment = delete_bonds(mol,bonds_selected,"cyclic_and_acyclic",hac)
if fragment is not None:
#print "%s" % (fragment)
out_fragments.add(fragment)
bonds_selected = []
return sorted(list(out_fragments))
#atomcontrib algorithm
#generate fp of query_substructs (qfp)
#
#loop through atoms of smiles
# For each atom
# Generate partial fp of the atom (pfp)
# Find Tversky sim of pfp in qfp
# If Tversky < 0.8, mark atom in smiles
#
#Loop thru marked atoms
# If marked atom in ring - turn all atoms in that ring to * (aromatic) or Sc (aliphatic)
# For each marked atom
# If aromatic turn to a *
# If aliphatic turn to a Sc
#
# Return modified smiles
def atomContrib(subs,mol,tverskyThresh=0.8):
marked = {}
def partialFP(atomID,tverskyThresh):
#create empty fp
modifiedFP = DataStructs.ExplicitBitVect(1024)
modifiedFP.SetBitsFromList(aBits[atomID])
tverskySim = DataStructs.TverskySimilarity(subsFp,modifiedFP,0,1)
if(tverskySim < tverskyThresh):
#print "%i %s: %f" % (atomID+1, pMol.GetAtomWithIdx(atomID).GetSymbol(), tverskySim)
marked[atomID] = 1
#generate mol object & fp for input mol
aBits = [];
pMol = Chem.Mol(mol.ToBinary())
pMolFp = Chem.RDKFingerprint(pMol, atomBits=aBits, **rdkitFpParams)
#generate fp of query_substructs
qsMol = Chem.MolFromSmiles(subs)
subsFp = Chem.RDKFingerprint(qsMol, **rdkitFpParams)
#loop through atoms of smiles and mark
for atom in pMol.GetAtoms():
#store atoms to change
partialFP(atom.GetIdx(),tverskyThresh)
#get rings to change
ssr = pMol.GetRingInfo().AtomRings()
#loop thru rings and records rings to change
ringsToChange = {}
for ringList in range(len(ssr)):
#print "New ring"
for ringAtom in range(len(ssr[ringList])):
#print ssr[ringList][ringAtoms]
if ssr[ringList][ringAtom] in marked:
#print ssr[ringList][ringAtoms]
ringsToChange[ringList] = 1
#now add these ring atoms to marked
for ringList in ringsToChange:
for ringAtom in range(len(ssr[ringList])):
marked[ ssr[ringList][ringAtom] ] = 1
if(len(marked) > 0):
#now mutate the marked atoms
for key in marked:
#print key
if( pMol.GetAtomWithIdx(key).GetIsAromatic() ):
#pMol.GetAtomWithIdx(key).SetAtomicNum(91)
#this works everytime and causes far fewer problems
pMol.GetAtomWithIdx(key).SetAtomicNum(0)
pMol.GetAtomWithIdx(key).SetNoImplicit(True)
else:
#gives best sim
pMol.GetAtomWithIdx(key).SetAtomicNum(21)
#works better but when replace S it fails due to valency
#pMol.GetAtomWithIdx(key).SetAtomicNum(6)
try:
Chem.SanitizeMol(pMol,sanitizeOps=Chem.SANITIZE_ALL^Chem.SANITIZE_KEKULIZE^Chem.SANITIZE_SETAROMATICITY)
except Exception:
sys.stderr.write("Can't parse smiles: %s\n" % (Chem.MolToSmiles(pMol)))
pMol = Chem.Mol(mol.ToBinary())
return pMol
modified_query_fps = {}
def compute_fraggle_similarity_for_subs(inMol,qMol,qSmi,qSubs,tverskyThresh=0.8):
qFP = Chem.RDKFingerprint(qMol, **rdkitFpParams)
iFP = Chem.RDKFingerprint(inMol, **rdkitFpParams)
rdkit_sim = DataStructs.TanimotoSimilarity(qFP,iFP)
qm_key = "%s_%s" % (qSubs,qSmi)
if qm_key in modified_query_fps:
qmMolFp = modified_query_fps[qm_key]
else:
qmMol = atomContrib(qSubs,qMol,tverskyThresh)
qmMolFp = Chem.RDKFingerprint(qmMol, **rdkitFpParams)
modified_query_fps[qm_key] = qmMolFp
rmMol = atomContrib(qSubs,inMol,tverskyThresh)
#wrap in a try, catch
try:
rmMolFp = Chem.RDKFingerprint(rmMol, **rdkitFpParams)
fraggle_sim=max(DataStructs.FingerprintSimilarity(qmMolFp,rmMolFp),
rdkit_sim)
#print '\t',qSubs,fraggle_sim,rdkit_sim
except Exception:
sys.stderr.write("Can't generate fp for: %s\n" % (Chem.MolToSmiles(rmMol,True)))
fraggle_sim = 0.0
return rdkit_sim,fraggle_sim
def GetFraggleSimilarity(queryMol,refMol,tverskyThresh=0.8):
""" return the Fraggle similarity between two molecules
>>> q = Chem.MolFromSmiles('COc1cc(CN2CCC(NC(=O)c3cncc(C)c3)CC2)c(OC)c2ccccc12')
>>> m = Chem.MolFromSmiles('COc1cc(CN2CCC(NC(=O)c3ccccc3)CC2)c(OC)c2ccccc12')
>>> sim,match = GetFraggleSimilarity(q,m)
>>> sim
0.980...
>>> match
'[*]C1CCN(Cc2cc(OC)c3ccccc3c2OC)CC1'
>>> m = Chem.MolFromSmiles('COc1cc(CN2CCC(Nc3nc4ccccc4s3)CC2)c(OC)c2ccccc12')
>>> sim,match = GetFraggleSimilarity(q,m)
>>> sim
0.794...
>>> match
'[*]C1CCN(Cc2cc(OC)c3ccccc3c2OC)CC1'
>>> q = Chem.MolFromSmiles('COc1ccccc1')
>>> sim,match = GetFraggleSimilarity(q,m)
>>> sim
0.347...
>>> match
'[*]c1ccccc1'
"""
if hasattr(queryMol,'_fraggleDecomp'):
frags = queryMol._fraggleDecomp
else:
frags = generate_fraggle_fragmentation(queryMol)
queryMol._fraggleDecomp = frags
qSmi = Chem.MolToSmiles(queryMol,True)
result=0.0
bestMatch=None
for frag in frags:
rdksim,fragsim= compute_fraggle_similarity_for_subs(refMol,queryMol,qSmi,frag,tverskyThresh)
if fragsim>result:
result=fragsim
bestMatch=frag
return result,bestMatch
#------------------------------------
#
# doctest boilerplate
#
def _test():
import doctest,sys
return doctest.testmod(sys.modules["__main__"],optionflags=doctest.ELLIPSIS+doctest.NORMALIZE_WHITESPACE)
if __name__ == '__main__':
import sys
failed,tried = _test()
sys.exit(failed)
| {
"content_hash": "d0804163ddd3cb65b36c5cf8305814ff",
"timestamp": "",
"source": "github",
"line_count": 439,
"max_line_length": 792,
"avg_line_length": 35.16400911161731,
"alnum_prop": 0.6079549135194662,
"repo_name": "adalke/rdkit",
"id": "ed2f5cfcf47af7f81ad58a2f2cc1a82e94e78e90",
"size": "17117",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "rdkit/Chem/Fraggle/FraggleSim.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "ApacheConf",
"bytes": "385"
},
{
"name": "C",
"bytes": "226290"
},
{
"name": "C#",
"bytes": "6745"
},
{
"name": "C++",
"bytes": "7847294"
},
{
"name": "CMake",
"bytes": "611343"
},
{
"name": "CSS",
"bytes": "3231"
},
{
"name": "FORTRAN",
"bytes": "7661"
},
{
"name": "HTML",
"bytes": "63047"
},
{
"name": "Java",
"bytes": "291444"
},
{
"name": "JavaScript",
"bytes": "11595"
},
{
"name": "LLVM",
"bytes": "29594"
},
{
"name": "Lex",
"bytes": "4508"
},
{
"name": "Makefile",
"bytes": "15435"
},
{
"name": "Objective-C",
"bytes": "298"
},
{
"name": "Python",
"bytes": "3138951"
},
{
"name": "QMake",
"bytes": "389"
},
{
"name": "SMT",
"bytes": "3010"
},
{
"name": "Shell",
"bytes": "12651"
},
{
"name": "Smarty",
"bytes": "5864"
},
{
"name": "Yacc",
"bytes": "49429"
}
],
"symlink_target": ""
} |
"""Common operations on Posix pathnames.
Instead of importing this module directly, import os and refer to
this module as os.path. The "os.path" name is an alias for this
module on Posix systems; on other systems (e.g. Mac, Windows),
os.path provides the same operations in a manner specific to that
platform, and is an alias to another module (e.g. macpath, ntpath).
Some of this can actually be useful on non-Posix systems too, e.g.
for manipulation of the pathname component of URLs.
"""
import os
import sys
import stat
import genericpath
import warnings
from genericpath import *
__all__ = ["normcase","isabs","join","splitdrive","split","splitext",
"basename","dirname","commonprefix","getsize","getmtime",
"getatime","getctime","islink","exists","lexists","isdir","isfile",
"ismount","walk","expanduser","expandvars","normpath","abspath",
"samefile","sameopenfile","samestat",
"curdir","pardir","sep","pathsep","defpath","altsep","extsep",
"devnull","realpath","supports_unicode_filenames","relpath"]
# strings representing various path-related bits and pieces
curdir = '.'
pardir = '..'
extsep = '.'
sep = '/'
pathsep = ':'
defpath = ':/bin:/usr/bin'
altsep = None
devnull = '/dev/null'
# Normalize the case of a pathname. Trivial in Posix, string.lower on Mac.
# On MS-DOS this may also turn slashes into backslashes; however, other
# normalizations (such as optimizing '../' away) are not allowed
# (another function should be defined to do that).
def normcase(s):
"""Normalize case of pathname. Has no effect under Posix"""
return s
# Return whether a path is absolute.
# Trivial in Posix, harder on the Mac or MS-DOS.
def isabs(s):
"""Test whether a path is absolute"""
return s.startswith('/')
# Join pathnames.
# Ignore the previous parts if a part is absolute.
# Insert a '/' unless the first part is empty or already ends in '/'.
def join(a, *p):
"""Join two or more pathname components, inserting '/' as needed.
If any component is an absolute path, all previous path components
will be discarded."""
path = a
for b in p:
if b.startswith('/'):
path = b
elif path == '' or path.endswith('/'):
path += b
else:
path += '/' + b
return path
# Split a path in head (everything up to the last '/') and tail (the
# rest). If the path ends in '/', tail will be empty. If there is no
# '/' in the path, head will be empty.
# Trailing '/'es are stripped from head unless it is the root.
def split(p):
"""Split a pathname. Returns tuple "(head, tail)" where "tail" is
everything after the final slash. Either part may be empty."""
i = p.rfind('/') + 1
head, tail = p[:i], p[i:]
if head and head != '/'*len(head):
head = head.rstrip('/')
return head, tail
# Split a path in root and extension.
# The extension is everything starting at the last dot in the last
# pathname component; the root is everything before that.
# It is always true that root + ext == p.
def splitext(p):
return genericpath._splitext(p, sep, altsep, extsep)
splitext.__doc__ = genericpath._splitext.__doc__
# Split a pathname into a drive specification and the rest of the
# path. Useful on DOS/Windows/NT; on Unix, the drive is always empty.
def splitdrive(p):
"""Split a pathname into drive and path. On Posix, drive is always
empty."""
return '', p
# Return the tail (basename) part of a path, same as split(path)[1].
def basename(p):
"""Returns the final component of a pathname"""
i = p.rfind('/') + 1
return p[i:]
# Return the head (dirname) part of a path, same as split(path)[0].
def dirname(p):
"""Returns the directory component of a pathname"""
i = p.rfind('/') + 1
head = p[:i]
if head and head != '/'*len(head):
head = head.rstrip('/')
return head
def samestat(s1, s2):
"""Test whether two stat buffers reference the same file"""
return s1.st_ino == s2.st_ino and \
s1.st_dev == s2.st_dev
def normpath(path):
"""Normalize path, eliminating double slashes, etc."""
# Preserve unicode (if path is unicode)
slash, dot = (u'/', u'.') if isinstance(path, unicode) else ('/', '.')
if path == '':
return dot
initial_slashes = path.startswith('/')
# POSIX allows one or two initial slashes, but treats three or more
# as single slash.
if (initial_slashes and
path.startswith('//') and not path.startswith('///')):
initial_slashes = 2
comps = path.split('/')
new_comps = []
for comp in comps:
if comp in ('', '.'):
continue
if (comp != '..' or (not initial_slashes and not new_comps) or
(new_comps and new_comps[-1] == '..')):
new_comps.append(comp)
elif new_comps:
new_comps.pop()
comps = new_comps
path = slash.join(comps)
if initial_slashes:
path = slash*initial_slashes + path
return path or dot
def abspath(path):
"""Return an absolute path."""
return normpath(path)
def realpath(filename):
return abspath(filename)
#
# These funcs cannot be implemented in
# abstract pyjs, should be overloaded for
# pyv8 and pysm
#
def islink(path):
raise NotImplemented()
def lexists(path):
raise NotImplemented()
def samefile(f1, f2):
raise NotImplemented()
def sameopenfile(fp1, fp2):
raise NotImplemented()
def ismount(path):
raise NotImplemented()
def walk(top, func, arg):
raise NotImplemented()
def expanduser(path):
raise NotImplemented()
def expandvars(path):
raise NotImplemented()
supports_unicode_filenames = (sys.platform == 'darwin')
def relpath(path, start=curdir):
raise NotImplemented()
| {
"content_hash": "287619f8f068517cfdab7267f2cf1bec",
"timestamp": "",
"source": "github",
"line_count": 199,
"max_line_length": 78,
"avg_line_length": 29.236180904522612,
"alnum_prop": 0.6412856651770368,
"repo_name": "anandology/pyjamas",
"id": "9c1769096b9860419dc3b48f014979bcd13392a2",
"size": "5818",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pyjs/src/pyjs/lib/pyjspath.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "JavaScript",
"bytes": "325172"
},
{
"name": "PHP",
"bytes": "121841"
},
{
"name": "Python",
"bytes": "6383764"
},
{
"name": "Shell",
"bytes": "19448"
}
],
"symlink_target": ""
} |
import sys
import pytest
from collections import Counter
from io import BytesIO
import os
import tempfile
import warcio.utils as utils
from . import get_test_file
try:
from multidict import CIMultiDict, MultiDict
except ImportError:
pass
class TestUtils(object):
def test_headers_to_str_headers(self):
result = [('foo', 'bar'), ('baz', 'barf')]
header_dict = {'foo': b'bar', b'baz': 'barf'}
ret = utils.headers_to_str_headers(header_dict)
assert Counter(ret) == Counter(result)
aiohttp_raw_headers = ((b'foo', b'bar'), (b'baz', b'barf'))
assert Counter(utils.headers_to_str_headers(aiohttp_raw_headers)) == Counter(result)
@pytest.mark.skipif('multidict' not in sys.modules, reason='requires multidict be installed')
def test_multidict_headers_to_str_headers(self):
result = [('foo', 'bar'), ('baz', 'barf')]
aiohttp_headers = MultiDict(foo='bar', baz=b'barf')
ret = utils.headers_to_str_headers(aiohttp_headers)
assert Counter(ret) == Counter(result)
# This case-insensitive thingie titlecases the key
aiohttp_headers = CIMultiDict(Foo='bar', Baz=b'barf')
titlecase_result = [('Foo', 'bar'), ('Baz', 'barf')]
ret = utils.headers_to_str_headers(aiohttp_headers)
assert Counter(ret) == Counter(titlecase_result)
def test_open_or_default(self):
default_fh = BytesIO(b'NOTWARC/1.0\r\n')
with utils.open_or_default(get_test_file('example.warc'), 'rb', default_fh) as fh:
assert fh.readline().decode('utf-8') == 'WARC/1.0\r\n'
with utils.open_or_default(None, 'rb', default_fh) as fh:
assert fh.readline().decode('utf-8') == 'NOTWARC/1.0\r\n'
default_fh.seek(0)
with utils.open_or_default(b'-', 'rb', default_fh) as fh:
assert fh.readline().decode('utf-8') == 'NOTWARC/1.0\r\n'
default_fh.seek(0)
with utils.open_or_default(u'-', 'rb', default_fh) as fh:
assert fh.readline().decode('utf-8') == 'NOTWARC/1.0\r\n'
default_fh.seek(0)
with utils.open_or_default(default_fh, 'rb', None) as fh:
assert fh.readline().decode('utf-8') == 'NOTWARC/1.0\r\n'
def test_to_native_str(self):
# binary string
assert utils.to_native_str(b'10') == '10'
# unicode string
assert utils.to_native_str(u'10') == '10'
# default string
assert utils.to_native_str('10') == '10'
# not string, leave as is
assert utils.to_native_str(10) == 10
def test_open_exclusive(self):
temp_dir = tempfile.mkdtemp('warctest')
full_name = os.path.join(temp_dir, 'foo.txt')
with utils.open(full_name, 'xb') as fh:
fh.write(b'test\r\nfoo')
with pytest.raises(OSError):
with utils.open(full_name, 'xb') as fh:
fh.write(b'test\r\nfoo')
with utils.open(full_name, 'rb') as fh:
assert fh.read() == b'test\r\nfoo'
os.remove(full_name)
os.rmdir(temp_dir)
| {
"content_hash": "baf73926dfaa879e87422d3c1f70d4d7",
"timestamp": "",
"source": "github",
"line_count": 93,
"max_line_length": 97,
"avg_line_length": 33.16129032258065,
"alnum_prop": 0.5959792477302205,
"repo_name": "webrecorder/warcio",
"id": "b235e00e3002199df682ae1cdf810c372e68d8e6",
"size": "3084",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test/test_utils.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Arc",
"bytes": "4012"
},
{
"name": "Python",
"bytes": "195932"
}
],
"symlink_target": ""
} |
import asyncio
import time
import logging
import json
from decimal import Decimal
from lbry.error import InvalidExchangeRateResponse, CurrencyConversionError
from lbry.utils import aiohttp_request
log = logging.getLogger(__name__)
CURRENCY_PAIRS = ["USDBTC", "BTCLBC"]
BITTREX_FEE = 0.0025
COINBASE_FEE = 0.0 # add fee
class ExchangeRate:
def __init__(self, market, spot, ts):
if not int(time.time()) - ts < 600:
raise ValueError('The timestamp is too dated.')
if not spot > 0:
raise ValueError('Spot must be greater than 0.')
self.currency_pair = (market[0:3], market[3:6])
self.spot = spot
self.ts = ts
def __repr__(self):
out = "Currency pair:{}, spot:{}, ts:{}".format(
self.currency_pair, self.spot, self.ts)
return out
def as_dict(self):
return {'spot': self.spot, 'ts': self.ts}
class MarketFeed:
REQUESTS_TIMEOUT = 20
EXCHANGE_RATE_UPDATE_RATE_SEC = 300
def __init__(self, market: str, name: str, url: str, params, fee):
self.market = market
self.name = name
self.url = url
self.params = params
self.fee = fee
self.rate = None
self._task: asyncio.Task = None
self._online = True
def rate_is_initialized(self):
return self.rate is not None
def is_online(self):
return self._online
async def _make_request(self) -> str:
async with aiohttp_request('get', self.url, params=self.params) as response:
return (await response.read()).decode()
def _handle_response(self, response):
raise NotImplementedError()
def _subtract_fee(self, from_amount):
# increase amount to account for market fees
return from_amount / (1.0 - self.fee)
def _save_price(self, price):
log.debug("Saving price update %f for %s from %s" % (price, self.market, self.name))
self.rate = ExchangeRate(self.market, price, int(time.time()))
self._online = True
def _on_error(self, err):
log.warning("There was a problem updating %s exchange rate information from %s",
self.market, self.name)
log.debug("Exchange rate error (%s from %s): %s", self.market, self.name, err)
self._online = False
async def _update_price(self):
while True:
try:
response = await asyncio.wait_for(self._make_request(), self.REQUESTS_TIMEOUT)
self._save_price(self._subtract_fee(self._handle_response(response)))
except (asyncio.TimeoutError, InvalidExchangeRateResponse) as err:
self._on_error(err)
await asyncio.sleep(self.EXCHANGE_RATE_UPDATE_RATE_SEC)
def start(self):
if not self._task:
self._task = asyncio.create_task(self._update_price())
def stop(self):
if self._task and not self._task.done():
self._task.cancel()
self._task = None
class BittrexFeed(MarketFeed):
def __init__(self):
super().__init__(
"BTCLBC",
"Bittrex",
"https://bittrex.com/api/v1.1/public/getmarkethistory",
{'market': 'BTC-LBC', 'count': 50},
BITTREX_FEE
)
def _handle_response(self, response):
json_response = json.loads(response)
if 'result' not in json_response:
raise InvalidExchangeRateResponse(self.name, 'result not found')
trades = json_response['result']
if len(trades) == 0:
raise InvalidExchangeRateResponse(self.market, 'trades not found')
totals = sum([i['Total'] for i in trades])
qtys = sum([i['Quantity'] for i in trades])
if totals <= 0 or qtys <= 0:
raise InvalidExchangeRateResponse(self.market, 'quantities were not positive')
vwap = totals / qtys
return float(1.0 / vwap)
class LBRYioFeed(MarketFeed):
def __init__(self):
super().__init__(
"BTCLBC",
"lbry.com",
"https://api.lbry.com/lbc/exchange_rate",
{},
0.0,
)
def _handle_response(self, response):
json_response = json.loads(response)
if 'data' not in json_response:
raise InvalidExchangeRateResponse(self.name, 'result not found')
return 1.0 / json_response['data']['lbc_btc']
class LBRYioBTCFeed(MarketFeed):
def __init__(self):
super().__init__(
"USDBTC",
"lbry.com",
"https://api.lbry.com/lbc/exchange_rate",
{},
0.0,
)
def _handle_response(self, response):
try:
json_response = json.loads(response)
except ValueError:
raise InvalidExchangeRateResponse(self.name, "invalid rate response : %s" % response)
if 'data' not in json_response:
raise InvalidExchangeRateResponse(self.name, 'result not found')
return 1.0 / json_response['data']['btc_usd']
class CryptonatorBTCFeed(MarketFeed):
def __init__(self):
super().__init__(
"USDBTC",
"cryptonator.com",
"https://api.cryptonator.com/api/ticker/usd-btc",
{},
0.0,
)
def _handle_response(self, response):
try:
json_response = json.loads(response)
except ValueError:
raise InvalidExchangeRateResponse(self.name, "invalid rate response")
if 'ticker' not in json_response or len(json_response['ticker']) == 0 or \
'success' not in json_response or json_response['success'] is not True:
raise InvalidExchangeRateResponse(self.name, 'result not found')
return float(json_response['ticker']['price'])
class CryptonatorFeed(MarketFeed):
def __init__(self):
super().__init__(
"BTCLBC",
"cryptonator.com",
"https://api.cryptonator.com/api/ticker/btc-lbc",
{},
0.0,
)
def _handle_response(self, response):
try:
json_response = json.loads(response)
except ValueError:
raise InvalidExchangeRateResponse(self.name, "invalid rate response")
if 'ticker' not in json_response or len(json_response['ticker']) == 0 or \
'success' not in json_response or json_response['success'] is not True:
raise InvalidExchangeRateResponse(self.name, 'result not found')
return float(json_response['ticker']['price'])
class ExchangeRateManager:
def __init__(self):
self.market_feeds = [
LBRYioBTCFeed(),
LBRYioFeed(),
BittrexFeed(),
# CryptonatorBTCFeed(),
# CryptonatorFeed()
]
def start(self):
log.info("Starting exchange rate manager")
for feed in self.market_feeds:
feed.start()
def stop(self):
log.info("Stopping exchange rate manager")
for source in self.market_feeds:
source.stop()
def convert_currency(self, from_currency, to_currency, amount):
rates = [market.rate for market in self.market_feeds]
log.debug("Converting %f %s to %s, rates: %s" % (amount, from_currency, to_currency, rates))
if from_currency == to_currency:
return amount
for market in self.market_feeds:
if (market.rate_is_initialized() and market.is_online() and
market.rate.currency_pair == (from_currency, to_currency)):
return amount * Decimal(market.rate.spot)
for market in self.market_feeds:
if (market.rate_is_initialized() and market.is_online() and
market.rate.currency_pair[0] == from_currency):
return self.convert_currency(
market.rate.currency_pair[1], to_currency, amount * Decimal(market.rate.spot))
raise CurrencyConversionError(
f'Unable to convert {amount} from {from_currency} to {to_currency}')
def fee_dict(self):
return {market: market.rate.as_dict() for market in self.market_feeds}
| {
"content_hash": "f8a902a195897bcec94d8da8ae73b08a",
"timestamp": "",
"source": "github",
"line_count": 240,
"max_line_length": 100,
"avg_line_length": 34.12083333333333,
"alnum_prop": 0.5833435095860301,
"repo_name": "lbryio/lbry",
"id": "f15d1447868f7338b0fb376d087b97335308ce17",
"size": "8189",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "lbry/lbry/extras/daemon/exchange_rate_manager.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Dockerfile",
"bytes": "3550"
},
{
"name": "HTML",
"bytes": "165150"
},
{
"name": "Makefile",
"bytes": "656"
},
{
"name": "Python",
"bytes": "2099913"
},
{
"name": "Shell",
"bytes": "1730"
}
],
"symlink_target": ""
} |
"""Tests for association.AcceptorRequestor."""
import logging
import time
import threading
import pytest
from pynetdicom import (
AE, VerificationPresentationContexts, PYNETDICOM_IMPLEMENTATION_UID,
PYNETDICOM_IMPLEMENTATION_VERSION, build_context, debug_logger
)
from pynetdicom.association import ServiceUser, Association
from pynetdicom.pdu_primitives import (
A_ASSOCIATE, MaximumLengthNotification, ImplementationClassUIDNotification,
ImplementationVersionNameNotification, SCP_SCU_RoleSelectionNegotiation,
UserIdentityNegotiation, SOPClassExtendedNegotiation,
SOPClassCommonExtendedNegotiation, AsynchronousOperationsWindowNegotiation
)
from pynetdicom.sop_class import VerificationSOPClass
from .dummy_c_scp import DummyVerificationSCP, DummyBaseSCP
#debug_logger()
class TestServiceUserAcceptor(object):
"""Tests for ServiceUser as acceptor."""
def setup(self):
self.assoc = Association(AE(), mode='requestor')
primitive = A_ASSOCIATE()
primitive.application_context_name = '1.2.840.10008.3.1.1.1'
primitive.calling_ae_title = b'LOCAL_AE_TITLE '
primitive.called_ae_title = b'REMOTE_AE_TITLE '
primitive.result = 0x00
primitive.result_source = 0x01
# Presentation Contexts
cx = build_context('1.2.840.10008.1.1')
cx.context_id = 1
primitive.presentation_context_definition_results_list = [cx]
# User Information items
item = MaximumLengthNotification()
item.maximum_length_received = 16383
primitive.user_information = [item]
item = ImplementationClassUIDNotification()
item.implementation_class_uid = '1.2.3'
primitive.user_information.append(item)
self.primitive_ac = primitive
primitive = A_ASSOCIATE()
primitive.result = 0x01
primitive.result_source = 0x01
primitive.diagnostic = 0x01
self.primitive_rj = primitive
def test_init(self):
"""Test new ServiceUser as acceptor."""
user = ServiceUser(self.assoc, mode='acceptor')
assert user.primitive is None
assert user.ae_title == b''
assert user.port is None
assert user.address == ''
assert user._contexts == []
assert user.mode == 'acceptor'
assert user.maximum_length == 16382
assert user.extended_negotiation == []
assert user.implementation_class_uid == PYNETDICOM_IMPLEMENTATION_UID
assert len(user.user_information) == 2
def test_bad_mode(self):
"""Test bad mode raises exception."""
with pytest.raises(ValueError, match=r"The 'mode' must be either"):
ServiceUser(None, 'something')
def test_no_implementation_class_uid(self):
"""Test correct return if no class UID."""
user = ServiceUser(self.assoc, mode='acceptor')
user._user_info = []
assert user.implementation_class_uid is None
def test_no_maximum_len(self):
"""Test correct reutrn if no maximum length."""
user = ServiceUser(self.assoc, mode='acceptor')
user._user_info = []
assert user.maximum_length is None
def test_accepted_common(self):
"""Test accepted_common_extended works correctly."""
user = ServiceUser(self.assoc, 'acceptor')
item = SOPClassCommonExtendedNegotiation()
item.sop_class_uid = '1.2.3'
item.service_class_uid = '2.3.4'
item.related_general_sop_class_identification = ['1.3.4']
user._common_ext = {item.sop_class_uid : item}
out = user.accepted_common_extended
assert out[item.sop_class_uid] == (
item.service_class_uid,
item.related_general_sop_class_identification
)
def test_assignment(self):
"""Test that assignment works OK,"""
user = ServiceUser(self.assoc, mode='acceptor')
assert user.primitive is None
assert user.ae_title == b''
assert user.port is None
assert user.address == ''
assert user._contexts == []
assert user.mode == 'acceptor'
assert user.maximum_length == 16382
assert user.extended_negotiation == []
assert user.implementation_class_uid == PYNETDICOM_IMPLEMENTATION_UID
user.ae_title = b'TEST_AE_TITLE'
user.port = 11112
user.address = '127.9.9.1'
user._contexts = [1]
user.maximum_length = 16383
assert user.ae_title == b'TEST_AE_TITLE'
assert user.port == 11112
assert user.address == '127.9.9.1'
assert user._contexts == [1]
assert user.maximum_length == 16383
def test_mode_assignment_raises(self):
"""Test that assigning mode after init raises exception."""
user = ServiceUser(self.assoc, mode='acceptor')
assert user.mode == 'acceptor'
with pytest.raises(AttributeError, match=r"can't set attribute"):
user.mode = 'requestor'
assert user.mode == 'acceptor'
def test_minimal_ac(self):
"""Test access with a miminal allowed A-ASSOCIATE (ac) primitive."""
user = ServiceUser(self.assoc, mode='acceptor')
user.primitive = self.primitive_ac
assert user.writeable is False
assert user.primitive == self.primitive_ac
assert user.mode == 'acceptor'
assert user.maximum_length == 16383
assert user.implementation_class_uid == '1.2.3'
assert user.implementation_version_name is None
assert user.asynchronous_operations == (1, 1)
assert user.role_selection == {}
assert user.sop_class_common_extended == {}
assert user.sop_class_extended == {}
assert user.user_identity is None
def test_minimal_rj(self):
"""Test access with a miminal allowed A-ASSOCIATE (rj) primitive."""
user = ServiceUser(self.assoc, mode='acceptor')
user.primitive = self.primitive_rj
assert user.writeable is False
assert user.primitive == self.primitive_rj
assert user.mode == 'acceptor'
assert user.maximum_length is None
assert user.implementation_class_uid is None
assert user.implementation_version_name is None
assert user.asynchronous_operations == (1, 1)
assert user.role_selection == {}
assert user.sop_class_common_extended == {}
assert user.sop_class_extended == {}
assert user.user_identity is None
def test_full(self):
"""Test access with a maximum allowed A-ASSOCIATE primitive."""
user = ServiceUser(self.assoc, mode='acceptor')
item = ImplementationVersionNameNotification()
item.implementation_version_name = 'VERSION_1'
self.primitive_ac.user_information.append(item)
for uid in ['1.2', '3.4']:
item = SCP_SCU_RoleSelectionNegotiation()
item.sop_class_uid = uid
item.scu_role = False
item.scp_role = True
self.primitive_ac.user_information.append(item)
item = AsynchronousOperationsWindowNegotiation()
item.maximum_number_operations_invoked = 2
item.maximum_number_operations_performed = 3
self.primitive_ac.user_information.append(item)
item = UserIdentityNegotiation()
item.user_identity_type = 0x01
item.positive_response_requested = True
item.primary_field = b'username'
self.primitive_ac.user_information.append(item)
item = SOPClassExtendedNegotiation()
item.sop_class_uid = '1.2.3'
item.service_class_application_information = b'SOME DATA'
self.primitive_ac.user_information.append(item)
user.primitive = self.primitive_ac
assert user.maximum_length == 16383
assert user.implementation_class_uid == '1.2.3'
assert user.implementation_version_name == b'VERSION_1'
assert user.asynchronous_operations == (2, 3)
roles = user.role_selection
assert len(roles) == 2
role = roles['1.2']
assert role.scu_role is False
assert role.scp_role is True
classes = user.sop_class_extended
assert len(classes) == 1
assert classes['1.2.3'] == b'SOME DATA'
assert user.sop_class_common_extended == {}
item = user.user_identity
assert item.user_identity_type == 0x01
assert item.primary_field == b'username'
def test_info(self):
"""Test the .info propoerty"""
user = ServiceUser(self.assoc, mode='acceptor')
info = user.info
assert info['port'] is None
assert info['mode'] == 'acceptor'
assert info['address'] == ''
assert info['ae_title'] == b''
with pytest.raises(KeyError):
info['pdv_size']
user.primitive = self.primitive_ac
assert user.info['pdv_size'] == 16383
def test_primitive_assignment_raises(self):
"""Test trying to set primitive parameters raises exception."""
user = ServiceUser(self.assoc, mode='acceptor')
user.primitive = self.primitive_ac
assert user.primitive == self.primitive_ac
assert user.mode == 'acceptor'
msg = r"Can't set the Maximum Length after negotiation has started"
with pytest.raises(RuntimeError, match=msg):
user.maximum_length = 16382
msg = (
r"Can't set the Implementation Class UID after negotiation "
r"has started"
)
with pytest.raises(RuntimeError, match=msg):
user.implementation_class_uid = '1.2.3'
msg = (
r"Can't set the Implementation Version Name after negotiation "
r"has started"
)
with pytest.raises(RuntimeError, match=msg):
user.implementation_version_name = '1.2.3'
with pytest.raises(AttributeError, match=r"can't set attribute"):
user.asynchronous_operations = (1, 1)
with pytest.raises(AttributeError, match=r"can't set attribute"):
user.role_selection = {}
with pytest.raises(AttributeError, match=r"can't set attribute"):
user.sop_class_common_extended = {}
with pytest.raises(AttributeError, match=r"can't set attribute"):
user.sop_class_extended = {}
with pytest.raises(AttributeError, match=r"can't set attribute"):
user.user_identity = 'test'
with pytest.raises(AttributeError, match=r"can't set attribute"):
user.extended_negotiation = []
def test_add_neg_pre(self):
"""Test adding negotiation items."""
user = ServiceUser(self.assoc, mode='acceptor')
assert len(user.extended_negotiation) == 0
assert len(user.user_information) == 2
item = AsynchronousOperationsWindowNegotiation()
item.maximum_number_operations_invoked = 2
item.maximum_number_operations_performed = 3
user.add_negotiation_item(item)
assert item in user.extended_negotiation
assert item in user._ext_neg[AsynchronousOperationsWindowNegotiation]
assert len(user.extended_negotiation) == 1
assert len(user.user_information) == 3
item = UserIdentityNegotiation()
item.user_identity_type = 0x01
item.positive_response_requested = True
item.primary_field = b'username'
user.add_negotiation_item(item)
assert item in user.extended_negotiation
assert item in user._ext_neg[UserIdentityNegotiation]
assert len(user.extended_negotiation) == 2
assert len(user.user_information) == 4
item = SCP_SCU_RoleSelectionNegotiation()
item.sop_class_uid = '1.2.3'
item.scu_role = True
item.scp_role = True
user.add_negotiation_item(item)
assert item in user.extended_negotiation
assert item in user._ext_neg[SCP_SCU_RoleSelectionNegotiation]
assert len(user.extended_negotiation) == 3
assert len(user.user_information) == 5
item = SOPClassExtendedNegotiation()
item.sop_class_uid = '1.2.3'
item.service_class_application_information = b'SOME DATA'
user.add_negotiation_item(item)
assert item in user.extended_negotiation
assert item in user._ext_neg[SOPClassExtendedNegotiation]
assert len(user.extended_negotiation) == 4
assert len(user.user_information) == 6
def test_add_neg_pre_raises(self):
"""Test that exception is raised if bad item added."""
user = ServiceUser(self.assoc, mode='acceptor')
item = AsynchronousOperationsWindowNegotiation()
item.maximum_number_operations_invoked = 2
item.maximum_number_operations_performed = 3
user.add_negotiation_item(item)
assert item in user.extended_negotiation
assert item in user._ext_neg[AsynchronousOperationsWindowNegotiation]
assert len(user.extended_negotiation) == 1
assert len(user.user_information) == 3
bad = MaximumLengthNotification()
bad.maximum_length_received = 12
msg = r"'item' is not a valid extended negotiation item"
with pytest.raises(TypeError, match=msg):
user.add_negotiation_item(bad)
assert item in user.extended_negotiation
assert item in user._ext_neg[AsynchronousOperationsWindowNegotiation]
assert len(user.extended_negotiation) == 1
assert len(user.user_information) == 3
def test_add_neg_post_raises(self):
"""Test adding items after negotiation."""
user = ServiceUser(self.assoc, mode='acceptor')
user.primitive = self.primitive_ac
assert user.writeable is False
item = AsynchronousOperationsWindowNegotiation()
item.maximum_number_operations_invoked = 2
item.maximum_number_operations_performed = 3
msg = r"Can't add extended negotiation items after negotiation "
with pytest.raises(RuntimeError, match=msg):
user.add_negotiation_item(item)
assert item not in user.extended_negotiation
assert item not in user._ext_neg[AsynchronousOperationsWindowNegotiation]
assert len(user.extended_negotiation) == 0
assert len(user.user_information) == 2
def test_async_ops_pre(self):
"""Test getting async ops item prior to negotiation."""
user = ServiceUser(self.assoc, mode='acceptor')
assert user.writeable is True
assert user.asynchronous_operations == (1, 1) # default
assert user.extended_negotiation == []
item = AsynchronousOperationsWindowNegotiation()
item.maximum_number_operations_invoked = 2
item.maximum_number_operations_performed = 3
user.add_negotiation_item(item)
assert user.asynchronous_operations == (2, 3)
assert item in user.extended_negotiation
assert item in user._ext_neg[AsynchronousOperationsWindowNegotiation]
assert len(user.extended_negotiation) == 1
assert len(user.user_information) == 3
def test_async_ops_post(self):
"""Test getting async ops item after negotiation."""
user = ServiceUser(self.assoc, mode='acceptor')
user.primitive = self.primitive_ac
assert user.writeable is False
assert user.asynchronous_operations == (1, 1) # default
assert user.extended_negotiation == []
item = AsynchronousOperationsWindowNegotiation()
item.maximum_number_operations_invoked = 2
item.maximum_number_operations_performed = 3
user.primitive.user_information.append(item)
assert user.asynchronous_operations == (2, 3)
assert user.extended_negotiation == [item]
assert item in user.extended_negotiation
assert item not in user._ext_neg[AsynchronousOperationsWindowNegotiation]
assert len(user.extended_negotiation) == 1
assert len(user.user_information) == 3
msg = r"Can't add extended negotiation items after negotiation"
with pytest.raises(RuntimeError, match=msg):
user.add_negotiation_item(item)
def test_ext_neg_pre(self):
"""Test extended_negotiation only returns negotiation items."""
user = ServiceUser(self.assoc, mode='acceptor')
assert user.writeable is True
assert user.extended_negotiation == []
assert len(user.user_information) == 2
item = AsynchronousOperationsWindowNegotiation()
item.maximum_number_operations_invoked = 2
item.maximum_number_operations_performed = 3
user.add_negotiation_item(item)
assert item in user.extended_negotiation
assert len(user.extended_negotiation) == 1
assert len(user.user_information) == 3
def test_ext_neg_post(self):
"""Test extended_negotiation only returns negotiation items."""
user = ServiceUser(self.assoc, mode='acceptor')
user.primitive = self.primitive_ac
assert user.writeable is False
assert user.extended_negotiation == []
assert len(user.user_information) == 2
item = AsynchronousOperationsWindowNegotiation()
item.maximum_number_operations_invoked = 2
item.maximum_number_operations_performed = 3
user.primitive.user_information.append(item)
assert item in user.extended_negotiation
assert len(user.extended_negotiation) == 1
assert len(user.user_information) == 3
def test_get_contexts_pre(self):
"""Test get_contexts prior to association."""
user = ServiceUser(self.assoc, mode='acceptor')
assert user.writeable is True
cxs = user.get_contexts('supported')
assert len(cxs) == 0
user.supported_contexts = [build_context('1.2.840.10008.1.1')]
cxs = user.get_contexts('supported')
assert len(cxs) == 1
assert cxs[0].abstract_syntax == '1.2.840.10008.1.1'
def test_get_contexts_pre_raises(self):
"""Test get_contexts prior to association raises if bad type."""
user = ServiceUser(self.assoc, mode='acceptor')
assert user.writeable is True
msg = r"Invalid 'cx_type', must be 'supported'"
with pytest.raises(ValueError, match=msg):
user.get_contexts('requested')
def test_get_contexts_post(self):
"""Test get_contexts after association."""
user = ServiceUser(self.assoc, mode='acceptor')
user.primitive = self.primitive_ac
assert user.writeable is False
cxs = user.get_contexts('supported')
assert len(cxs) == 0
cxs = user.get_contexts('pcdrl')
assert len(cxs) == 1
assert cxs[0].abstract_syntax == '1.2.840.10008.1.1'
def test_get_contexts_post_raises(self):
"""Test get_contexts after association raises if bad type."""
user = ServiceUser(self.assoc, mode='acceptor')
user.primitive = self.primitive_ac
assert user.writeable is False
msg = r"Invalid 'cx_type', must be 'supported' or 'pcdrl'"
with pytest.raises(ValueError, match=msg):
user.get_contexts('requested')
with pytest.raises(ValueError, match=msg):
user.get_contexts('pcdl')
def test_impl_class_pre(self):
"""Test implementation_class_uid prior to association."""
user = ServiceUser(self.assoc, mode='acceptor')
assert user.writeable is True
assert user.implementation_class_uid == PYNETDICOM_IMPLEMENTATION_UID
class_items = []
for item in user.user_information:
if isinstance(item, ImplementationClassUIDNotification):
assert item.implementation_class_uid == (
PYNETDICOM_IMPLEMENTATION_UID
)
class_items.append(item)
assert len(class_items) == 1
user.implementation_class_uid = '1.2.3'
assert user.implementation_class_uid == '1.2.3'
class_items = []
for item in user.user_information:
if isinstance(item, ImplementationClassUIDNotification):
assert item.implementation_class_uid == (
'1.2.3'
)
class_items.append(item)
assert len(class_items) == 1
def test_impl_class_post(self):
"""Test implementation_class_uid after association."""
user = ServiceUser(self.assoc, mode='acceptor')
user.primitive = self.primitive_ac
assert user.writeable is False
assert user.implementation_class_uid == '1.2.3'
msg = r"Can't set the Implementation Class UID after negotiation"
with pytest.raises(RuntimeError, match=msg):
user.implementation_class_uid = '1.2.3.4'
class_items = []
for item in user.user_information:
if isinstance(item, ImplementationClassUIDNotification):
assert item.implementation_class_uid == '1.2.3'
class_items.append(item)
assert len(class_items) == 1
def test_impl_version_pre(self):
"""Test implementation_version_name prior to association."""
user = ServiceUser(self.assoc, mode='acceptor')
assert user.writeable is True
assert user.implementation_version_name is None
class_items = []
for item in user.user_information:
if isinstance(item, ImplementationVersionNameNotification):
class_items.append(item)
assert len(class_items) == 0
ref = b'12345ABCDE123456'
user.implementation_version_name = ref
assert user.implementation_version_name == ref
class_items = []
user.implementation_version_name = ref
for item in user.user_information:
if isinstance(item, ImplementationVersionNameNotification):
class_items.append(item)
assert user.implementation_version_name == ref
assert len(class_items) == 1
def test_impl_version_post(self):
"""Test implementation_version_name after association."""
user = ServiceUser(self.assoc, mode='acceptor')
user.primitive = self.primitive_ac
assert user.writeable is False
assert user.implementation_version_name is None
ref = b'12345ABCDE123456'
msg = r"Can't set the Implementation Version Name after negotiation"
with pytest.raises(RuntimeError, match=msg):
user.implementation_version_name = ref
item = ImplementationVersionNameNotification()
item.implementation_version_name = ref
user.primitive.user_information.append(item)
class_items = []
for item in user.user_information:
if isinstance(item, ImplementationVersionNameNotification):
assert item.implementation_version_name == ref
class_items.append(item)
assert len(class_items) == 1
def test_is_acceptor(self):
"""Test is_acceptor"""
user = ServiceUser(self.assoc, mode='acceptor')
assert user.is_acceptor is True
def test_is_requestor(self):
"""Test is_requestor"""
user = ServiceUser(self.assoc, mode='acceptor')
assert user.is_requestor is False
def test_max_length_pre(self):
"""Test maximum_length prior to association."""
user = ServiceUser(self.assoc, mode='acceptor')
assert user.writeable is True
assert user.maximum_length == 16382
class_items = []
for item in user.user_information:
if isinstance(item, MaximumLengthNotification):
assert item.maximum_length_received == 16382
class_items.append(item)
assert len(class_items) == 1
user.maximum_length = 45
assert user.maximum_length == 45
class_items = []
for item in user.user_information:
if isinstance(item, MaximumLengthNotification):
assert item.maximum_length_received == 45
class_items.append(item)
assert len(class_items) == 1
def test_max_length_post(self):
"""Test maximum_length after association."""
user = ServiceUser(self.assoc, mode='acceptor')
user.primitive = self.primitive_ac
assert user.writeable is False
assert user.maximum_length == 16383
msg = r"Can't set the Maximum Length after negotiation"
with pytest.raises(RuntimeError, match=msg):
user.maximum_length = 45
class_items = []
for item in user.user_information:
if isinstance(item, MaximumLengthNotification):
assert item.maximum_length_received == 16383
class_items.append(item)
assert len(class_items) == 1
def test_requested_cx_pre(self):
"""Test requested_contexts prior to association."""
user = ServiceUser(self.assoc, mode='acceptor')
assert user.writeable is True
msg = r"Invalid 'cx_type', must be 'supported'"
with pytest.raises(ValueError, match=msg):
user.requested_contexts
msg = (
r"'requested_contexts' can only be set for the association "
r"requestor"
)
with pytest.raises(AttributeError, match=msg):
user.requested_contexts = [build_context('1.2.3')]
def test_requested_cx_post(self):
"""Test requested_contexts after association."""
user = ServiceUser(self.assoc, mode='acceptor')
user.primitive = self.primitive_ac
assert user.writeable is False
msg = r"Invalid 'cx_type', must be 'supported' or 'pcdrl'"
with pytest.raises(ValueError, match=msg):
user.requested_contexts
msg = r"Can't set the requested presentation contexts after"
with pytest.raises(RuntimeError, match=msg):
user.requested_contexts = [build_context('1.2.3')]
def test_rm_neg_pre(self):
"""Test removing negotiation items."""
user = ServiceUser(self.assoc, mode='acceptor')
assert len(user.extended_negotiation) == 0
assert len(user.user_information) == 2
item = AsynchronousOperationsWindowNegotiation()
item.maximum_number_operations_invoked = 2
item.maximum_number_operations_performed = 3
# Test removing non-existent item
user.remove_negotiation_item(item)
assert len(user.extended_negotiation) == 0
assert len(user.user_information) == 2
# Test removing existent item
user.add_negotiation_item(item)
assert item in user.extended_negotiation
assert item in user._ext_neg[AsynchronousOperationsWindowNegotiation]
assert len(user.extended_negotiation) == 1
assert len(user.user_information) == 3
user.remove_negotiation_item(item)
assert len(user.extended_negotiation) == 0
assert len(user.user_information) == 2
# Repeat for UserIdentity
item = UserIdentityNegotiation()
item.user_identity_type = 0x01
item.positive_response_requested = True
item.primary_field = b'username'
user.add_negotiation_item(item)
assert item in user.extended_negotiation
assert item in user._ext_neg[UserIdentityNegotiation]
assert len(user.extended_negotiation) == 1
assert len(user.user_information) == 3
user.remove_negotiation_item(item)
assert len(user.extended_negotiation) == 0
assert len(user.user_information) == 2
# Repeat for Role Selection
item = SCP_SCU_RoleSelectionNegotiation()
item.sop_class_uid = '1.2.3'
item.scu_role = True
item.scp_role = True
user.add_negotiation_item(item)
assert item in user.extended_negotiation
assert item in user._ext_neg[SCP_SCU_RoleSelectionNegotiation]
assert len(user.extended_negotiation) == 1
assert len(user.user_information) == 3
user.remove_negotiation_item(item)
assert len(user.extended_negotiation) == 0
assert len(user.user_information) == 2
# Repeat for SOP Class Extended
item = SOPClassExtendedNegotiation()
item.sop_class_uid = '1.2.3'
item.service_class_application_information = b'SOME DATA'
user.add_negotiation_item(item)
assert item in user.extended_negotiation
assert item in user._ext_neg[SOPClassExtendedNegotiation]
assert len(user.extended_negotiation) == 1
assert len(user.user_information) == 3
user.remove_negotiation_item(item)
assert len(user.extended_negotiation) == 0
assert len(user.user_information) == 2
# Try removing unknown type
msg = r"'item' is not a valid extended negotiation item"
with pytest.raises(TypeError, match=msg):
user.remove_negotiation_item(1234)
def test_rm_neg_post_raises(self):
"""Test adding items after negotiation."""
user = ServiceUser(self.assoc, mode='acceptor')
user.primitive = self.primitive_ac
assert user.writeable is False
item = AsynchronousOperationsWindowNegotiation()
item.maximum_number_operations_invoked = 2
item.maximum_number_operations_performed = 3
user.primitive.user_information.append(item)
msg = r"Can't remove extended negotiation items after negotiation "
with pytest.raises(RuntimeError, match=msg):
user.remove_negotiation_item(item)
assert item in user.extended_negotiation
assert len(user.extended_negotiation) == 1
assert len(user.user_information) == 3
def test_reset_neg_pre(self):
"""Test reset_negotiation_items prior to association."""
user = ServiceUser(self.assoc, mode='acceptor')
assert len(user.extended_negotiation) == 0
assert len(user.user_information) == 2
# Test with no items
user.reset_negotiation_items()
assert len(user.extended_negotiation) == 0
assert len(user.user_information) == 2
item = AsynchronousOperationsWindowNegotiation()
item.maximum_number_operations_invoked = 2
item.maximum_number_operations_performed = 3
user.add_negotiation_item(item)
item = SCP_SCU_RoleSelectionNegotiation()
item.sop_class_uid = '1.2.3'
item.scu_role = True
item.scp_role = True
user.add_negotiation_item(item)
item = UserIdentityNegotiation()
item.user_identity_type = 0x01
item.positive_response_requested = True
item.primary_field = b'username'
user.add_negotiation_item(item)
item = SOPClassExtendedNegotiation()
item.sop_class_uid = '1.2.3'
item.service_class_application_information = b'SOME DATA'
user.add_negotiation_item(item)
assert len(user.extended_negotiation) == 4
assert len(user.user_information) == 6
user.reset_negotiation_items()
assert len(user.extended_negotiation) == 0
assert len(user.user_information) == 2
assert len(user._ext_neg.keys()) == 4
def test_reset_neg_post_raises(self):
"""Test reset_negotiation_items after association raises."""
user = ServiceUser(self.assoc, mode='acceptor')
user.primitive = self.primitive_ac
assert len(user.extended_negotiation) == 0
assert len(user.user_information) == 2
item = AsynchronousOperationsWindowNegotiation()
item.maximum_number_operations_invoked = 2
item.maximum_number_operations_performed = 3
user.primitive.user_information.append(item)
item = SCP_SCU_RoleSelectionNegotiation()
item.sop_class_uid = '1.2.3'
item.scu_role = True
item.scp_role = True
user.primitive.user_information.append(item)
item = UserIdentityNegotiation()
item.user_identity_type = 0x01
item.positive_response_requested = True
item.primary_field = b'username'
user.primitive.user_information.append(item)
item = SOPClassExtendedNegotiation()
item.sop_class_uid = '1.2.3'
item.service_class_application_information = b'SOME DATA'
user.primitive.user_information.append(item)
item = SOPClassCommonExtendedNegotiation()
item.sop_class_uid = '1.2.3'
item.service_class_uid = '2.3.4'
item.related_general_sop_class_identification = ['1.3.4']
user.primitive.user_information.append(item)
assert len(user.extended_negotiation) == 4
assert len(user.user_information) == 7
msg = r"Can't reset the extended negotiation items after negotiation"
with pytest.raises(RuntimeError, match=msg):
user.reset_negotiation_items()
assert len(user.extended_negotiation) == 4
assert len(user.user_information) == 7
def test_role_pre(self):
"""Test role_selection prior to association."""
user = ServiceUser(self.assoc, mode='acceptor')
assert len(user.extended_negotiation) == 0
assert len(user.user_information) == 2
assert user.role_selection == {}
item = SCP_SCU_RoleSelectionNegotiation()
item.sop_class_uid = '1.2.3'
item.scu_role = True
item.scp_role = True
user.add_negotiation_item(item)
assert item in user.extended_negotiation
assert item in user._ext_neg[SCP_SCU_RoleSelectionNegotiation]
assert len(user.extended_negotiation) == 1
assert len(user.user_information) == 3
assert user.role_selection['1.2.3'] == item
def test_role_post(self):
"""Test role_selection prior to association."""
user = ServiceUser(self.assoc, mode='acceptor')
user.primitive = self.primitive_ac
assert len(user.extended_negotiation) == 0
assert len(user.user_information) == 2
assert user.role_selection == {}
item = SCP_SCU_RoleSelectionNegotiation()
item.sop_class_uid = '1.2.3'
item.scu_role = True
item.scp_role = True
user.primitive.user_information.append(item)
assert item in user.extended_negotiation
assert item not in user._ext_neg[SCP_SCU_RoleSelectionNegotiation]
assert len(user.extended_negotiation) == 1
assert len(user.user_information) == 3
assert user.role_selection['1.2.3'] == item
msg = r"Can't add extended negotiation items after negotiation"
with pytest.raises(RuntimeError, match=msg):
user.add_negotiation_item(item)
def test_sop_ext_pre(self):
"""Test sop_class_extended prior to association."""
user = ServiceUser(self.assoc, mode='acceptor')
assert len(user.extended_negotiation) == 0
assert len(user.user_information) == 2
assert user.sop_class_extended == {}
item = SOPClassExtendedNegotiation()
item.sop_class_uid = '1.2.3'
item.service_class_application_information = b'SOME DATA'
user.add_negotiation_item(item)
assert item in user.extended_negotiation
assert item in user._ext_neg[SOPClassExtendedNegotiation]
assert len(user.extended_negotiation) == 1
assert len(user.user_information) == 3
assert user.sop_class_extended['1.2.3'] == (
item.service_class_application_information
)
def test_sop_ext_post(self):
"""Test sop_class_extended prior to association."""
user = ServiceUser(self.assoc, mode='acceptor')
user.primitive = self.primitive_ac
assert len(user.extended_negotiation) == 0
assert len(user.user_information) == 2
assert user.sop_class_extended == {}
item = SOPClassExtendedNegotiation()
item.sop_class_uid = '1.2.3'
item.service_class_application_information = b'SOME DATA'
user.primitive.user_information.append(item)
assert item in user.extended_negotiation
assert item not in user._ext_neg[SOPClassExtendedNegotiation]
assert len(user.extended_negotiation) == 1
assert len(user.user_information) == 3
assert user.sop_class_extended['1.2.3'] == (
item.service_class_application_information
)
msg = r"Can't add extended negotiation items after negotiation"
with pytest.raises(RuntimeError, match=msg):
user.add_negotiation_item(item)
def test_sop_common_pre(self):
"""Test sop_class_common_extended prior to association."""
user = ServiceUser(self.assoc, mode='acceptor')
assert len(user.extended_negotiation) == 0
assert len(user.user_information) == 2
assert user.sop_class_common_extended == {}
item = SOPClassCommonExtendedNegotiation()
item.sop_class_uid = '1.2.3'
item.service_class_uid = '2.3.4'
item.related_general_sop_class_identification = ['1.3.4']
msg = r"'item' is not a valid extended negotiation item"
with pytest.raises(TypeError, match=msg):
user.add_negotiation_item(item)
assert item not in user.extended_negotiation
assert len(user.extended_negotiation) == 0
assert len(user.user_information) == 2
assert user.sop_class_common_extended == {}
def test_sop_common_post(self):
"""Test sop_class_common_extended prior to association."""
user = ServiceUser(self.assoc, mode='acceptor')
user.primitive = self.primitive_ac
assert len(user.extended_negotiation) == 0
assert len(user.user_information) == 2
assert user.sop_class_common_extended == {}
item = SOPClassCommonExtendedNegotiation()
item.sop_class_uid = '1.2.3'
item.service_class_uid = '2.3.4'
item.related_general_sop_class_identification = ['1.3.4']
user.primitive.user_information.append(item)
assert item not in user.extended_negotiation
assert len(user.extended_negotiation) == 0
assert len(user.user_information) == 3
assert user.sop_class_common_extended == {}
msg = r"Can't add extended negotiation items after negotiation"
with pytest.raises(RuntimeError, match=msg):
user.add_negotiation_item(item)
def test_supported_cx_pre(self):
"""Test supported_contexts prior to association."""
user = ServiceUser(self.assoc, mode='acceptor')
assert user.writeable is True
assert user.supported_contexts == []
cx_a = build_context('1.2.3')
cx_b = build_context('1.2.3.4')
user.supported_contexts = [cx_a, cx_b]
assert len(user.supported_contexts) == 2
assert cx_a in user.supported_contexts
assert cx_b in user.supported_contexts
def test_supported_cx_post(self):
"""Test supported_contexts after association."""
user = ServiceUser(self.assoc, mode='acceptor')
user.primitive = self.primitive_ac
assert user.writeable is False
assert user.supported_contexts == []
cx_a = build_context('1.2.3')
msg = r"Can't set the supported presentation contexts after"
with pytest.raises(RuntimeError, match=msg):
user.supported_contexts = [build_context('1.2.3')]
assert user.supported_contexts == []
def test_user_id_pre(self):
"""Test user_identity prior to association."""
user = ServiceUser(self.assoc, mode='acceptor')
assert len(user.extended_negotiation) == 0
assert len(user.user_information) == 2
assert user.user_identity is None
item = UserIdentityNegotiation()
item.user_identity_type = 0x01
item.positive_response_requested = True
item.primary_field = b'username'
user.add_negotiation_item(item)
assert item in user.extended_negotiation
assert item in user._ext_neg[UserIdentityNegotiation]
assert len(user.extended_negotiation) == 1
assert len(user.user_information) == 3
assert user.user_identity == item
def test_user_id_post(self):
"""Test user_identity prior to association."""
user = ServiceUser(self.assoc, mode='acceptor')
user.primitive = self.primitive_ac
assert len(user.extended_negotiation) == 0
assert len(user.user_information) == 2
assert user.user_identity is None
item = UserIdentityNegotiation()
item.user_identity_type = 0x01
item.positive_response_requested = True
item.primary_field = b'username'
user.primitive.user_information.append(item)
assert item in user.extended_negotiation
assert item not in user._ext_neg[UserIdentityNegotiation]
assert len(user.extended_negotiation) == 1
assert len(user.user_information) == 3
assert user.user_identity == item
msg = r"Can't add extended negotiation items after negotiation"
with pytest.raises(RuntimeError, match=msg):
user.add_negotiation_item(item)
def test_user_info_pre(self):
"""Test user_information prior to association."""
user = ServiceUser(self.assoc, mode='acceptor')
assert len(user.user_information) == 2
user.implementation_version_name = 'VERSION_1'
item = user.user_information[2]
assert isinstance(item, ImplementationVersionNameNotification)
assert item.implementation_version_name == b'VERSION_1'
assert len(user.user_information) == 3
for uid in ['1.2', '3.4']:
item = SCP_SCU_RoleSelectionNegotiation()
item.sop_class_uid = uid
item.scu_role = False
item.scp_role = True
user.add_negotiation_item(item)
assert item in user.user_information
assert len(user.user_information) == 5
item = AsynchronousOperationsWindowNegotiation()
item.maximum_number_operations_invoked = 2
item.maximum_number_operations_performed = 3
user.add_negotiation_item(item)
assert item in user.user_information
assert len(user.user_information) == 6
item = UserIdentityNegotiation()
item.user_identity_type = 0x01
item.positive_response_requested = True
item.primary_field = b'username'
user.add_negotiation_item(item)
assert item in user.user_information
assert len(user.user_information) == 7
item = SOPClassExtendedNegotiation()
item.sop_class_uid = '1.2.3'
item.service_class_application_information = b'SOME DATA'
user.add_negotiation_item(item)
assert item in user.user_information
assert len(user.user_information) == 8
item = SOPClassCommonExtendedNegotiation()
item.sop_class_uid = '1.2.3'
item.service_class_uid = '2.3.4'
item.related_general_sop_class_identification = ['1.3.4']
msg = r"'item' is not a valid extended negotiation item"
with pytest.raises(TypeError, match=msg):
user.add_negotiation_item(item)
assert item not in user.user_information
assert len(user.user_information) == 8
def test_user_info_post(self):
"""Test user_information prior to association."""
user = ServiceUser(self.assoc, mode='acceptor')
user.primitive = self.primitive_ac
assert len(user.user_information) == 2
item = ImplementationVersionNameNotification()
item.implementation_version_name = 'VERSION_1'
user.primitive.user_information.append(item)
assert item in user.user_information
assert len(user.user_information) == 3
for uid in ['1.2', '3.4']:
item = SCP_SCU_RoleSelectionNegotiation()
item.sop_class_uid = uid
item.scu_role = False
item.scp_role = True
user.primitive.user_information.append(item)
assert item in user.user_information
assert len(user.user_information) == 5
item = AsynchronousOperationsWindowNegotiation()
item.maximum_number_operations_invoked = 2
item.maximum_number_operations_performed = 3
user.primitive.user_information.append(item)
assert item in user.user_information
assert len(user.user_information) == 6
item = UserIdentityNegotiation()
item.user_identity_type = 0x01
item.positive_response_requested = True
item.primary_field = b'username'
user.primitive.user_information.append(item)
assert item in user.user_information
assert len(user.user_information) == 7
item = SOPClassExtendedNegotiation()
item.sop_class_uid = '1.2.3'
item.service_class_application_information = b'SOME DATA'
user.primitive.user_information.append(item)
assert item in user.user_information
assert len(user.user_information) == 8
item = SOPClassCommonExtendedNegotiation()
item.sop_class_uid = '1.2.3'
item.service_class_uid = '2.3.4'
item.related_general_sop_class_identification = ['1.3.4']
user.primitive.user_information.append(item)
assert item in user.user_information
assert len(user.user_information) == 9
def test_writeable(self):
"""Test writeable."""
user = ServiceUser(self.assoc, mode='acceptor')
assert user.writeable is True
user.primitive = self.primitive_ac
assert user.writeable is False
class TestServiceUserRequestor(object):
"""Tests for ServiceUser as requestor."""
def setup(self):
self.assoc = Association(AE(), mode='requestor')
primitive = A_ASSOCIATE()
primitive.application_context_name = '1.2.840.10008.3.1.1.1'
primitive.calling_ae_title = b'LOCAL_AE_TITLE '
primitive.called_ae_title = b'REMOTE_AE_TITLE '
primitive.calling_presentation_address = ('127.0.0.1', 11112)
primitive.called_presentation_address = ('127.0.0.2', 11113)
# Presentation Contexts
cx = build_context('1.2.840.10008.1.1')
cx.context_id = 1
primitive.presentation_context_definition_list = [cx]
# User Information items
item = MaximumLengthNotification()
item.maximum_length_received = 16382
primitive.user_information = [item]
item = ImplementationClassUIDNotification()
item.implementation_class_uid = '1.2.3'
primitive.user_information.append(item)
self.primitive = primitive
def test_init(self):
"""Test new ServiceUser as requestor."""
user = ServiceUser(self.assoc, mode='requestor')
assert user.primitive is None
assert user.ae_title == b''
assert user.port is None
assert user.address == ''
assert user._contexts == []
assert user.mode == 'requestor'
assert user.maximum_length == 16382
assert user.implementation_class_uid == PYNETDICOM_IMPLEMENTATION_UID
assert user.extended_negotiation == []
assert len(user.user_information) == 2
def test_assignment(self):
"""Test that assignment works OK,"""
user = ServiceUser(self.assoc, mode='requestor')
assert user.primitive is None
assert user.ae_title == b''
assert user.port is None
assert user.address == ''
assert user._contexts == []
assert user.mode == 'requestor'
assert user.maximum_length == 16382
assert user.extended_negotiation == []
assert user.implementation_class_uid == PYNETDICOM_IMPLEMENTATION_UID
user.ae_title = b'TEST_AE_TITLE'
user.port = 11112
user.address = '127.9.9.1'
user._contexts = [1]
user.maximum_length = 16383
assert user.ae_title == b'TEST_AE_TITLE'
assert user.port == 11112
assert user.address == '127.9.9.1'
assert user._contexts == [1]
assert user.maximum_length == 16383
def test_mode_assignment_raises(self):
"""Test that assigning mode after init raises exception."""
user = ServiceUser(self.assoc, mode='requestor')
assert user.mode == 'requestor'
with pytest.raises(AttributeError, match=r"can't set attribute"):
user.mode = 'acceptor'
assert user.mode == 'requestor'
def test_minimal(self):
"""Test access with a miminal allowed A-ASSOCIATE primitive."""
user = ServiceUser(self.assoc, mode='requestor')
user.primitive = self.primitive
assert user.primitive == self.primitive
assert user.mode == 'requestor'
assert user.maximum_length == 16382
assert user.implementation_class_uid == '1.2.3'
assert user.implementation_version_name is None
assert user.asynchronous_operations == (1, 1)
assert user.role_selection == {}
assert user.sop_class_common_extended == {}
assert user.sop_class_extended == {}
assert user.user_identity is None
def test_full(self):
"""Test access with a maximum allowed A-ASSOCIATE primitive."""
user = ServiceUser(self.assoc, mode='requestor')
item = ImplementationVersionNameNotification()
item.implementation_version_name = 'VERSION_1'
self.primitive.user_information.append(item)
for uid in ['1.2', '3.4']:
item = SCP_SCU_RoleSelectionNegotiation()
item.sop_class_uid = uid
item.scu_role = False
item.scp_role = True
self.primitive.user_information.append(item)
item = AsynchronousOperationsWindowNegotiation()
item.maximum_number_operations_invoked = 2
item.maximum_number_operations_performed = 3
self.primitive.user_information.append(item)
item = UserIdentityNegotiation()
item.user_identity_type = 0x01
item.positive_response_requested = True
item.primary_field = b'username'
self.primitive.user_information.append(item)
item = SOPClassExtendedNegotiation()
item.sop_class_uid = '1.2.3'
item.service_class_application_information = b'SOME DATA'
self.primitive.user_information.append(item)
item = SOPClassCommonExtendedNegotiation()
item.sop_class_uid = '1.2.3'
item.service_class_uid = '2.3.4'
item.related_general_sop_class_identification = ['1.3.4']
self.primitive.user_information.append(item)
user.primitive = self.primitive
assert user.maximum_length == 16382
assert user.implementation_class_uid == '1.2.3'
assert user.implementation_version_name == b'VERSION_1'
assert user.asynchronous_operations == (2, 3)
roles = user.role_selection
assert len(roles) == 2
role = roles['1.2']
assert role.scu_role is False
assert role.scp_role is True
classes = user.sop_class_extended
assert len(classes) == 1
assert classes['1.2.3'] == b'SOME DATA'
classes = user.sop_class_common_extended
assert len(classes) == 1
assert classes['1.2.3'].service_class_uid == '2.3.4'
assert classes['1.2.3'].related_general_sop_class_identification == [
'1.3.4'
]
item = user.user_identity
assert item.user_identity_type == 0x01
assert item.primary_field == b'username'
def test_info(self):
"""Test the .info propoerty"""
user = ServiceUser(self.assoc, mode='requestor')
info = user.info
assert info['port'] is None
assert info['mode'] == 'requestor'
assert info['address'] == ''
assert info['ae_title'] == b''
with pytest.raises(KeyError):
info['pdv_size']
user.primitive = self.primitive
assert user.info['pdv_size'] == 16382
def test_primitive_assignment_raises(self):
"""Test trying to set primitive parameters raises exception."""
user = ServiceUser(self.assoc, mode='requestor')
user.primitive = self.primitive
assert user.primitive == self.primitive
assert user.mode == 'requestor'
msg = r"Can't set the Maximum Length after negotiation has started"
with pytest.raises(RuntimeError, match=msg):
user.maximum_length = 16382
msg = (
r"Can't set the Implementation Class UID after negotiation "
r"has started"
)
with pytest.raises(RuntimeError, match=msg):
user.implementation_class_uid = '1.2.3'
msg = (
r"Can't set the Implementation Version Name after negotiation "
r"has started"
)
with pytest.raises(RuntimeError, match=msg):
user.implementation_version_name = '1.2.3'
with pytest.raises(AttributeError, match=r"can't set attribute"):
user.asynchronous_operations = (1, 1)
with pytest.raises(AttributeError, match=r"can't set attribute"):
user.role_selection = {}
with pytest.raises(AttributeError, match=r"can't set attribute"):
user.sop_class_common_extended = {}
with pytest.raises(AttributeError, match=r"can't set attribute"):
user.sop_class_extended = {}
with pytest.raises(AttributeError, match=r"can't set attribute"):
user.user_identity = 'test'
with pytest.raises(AttributeError, match=r"can't set attribute"):
user.extended_negotiation = []
def test_accepted_common_raises(self):
"""Test trying to get the accepted common ext items raises."""
user = ServiceUser(self.assoc, mode='requestor')
msg = (
r"'accepted_common_extended' is only available for the "
r"'acceptor'"
)
with pytest.raises(RuntimeError, match=msg):
user.accepted_common_extended()
def test_add_neg_pre(self):
"""Test adding negotiation items."""
user = ServiceUser(self.assoc, mode='requestor')
assert len(user.extended_negotiation) == 0
assert len(user.user_information) == 2
item = AsynchronousOperationsWindowNegotiation()
item.maximum_number_operations_invoked = 2
item.maximum_number_operations_performed = 3
user.add_negotiation_item(item)
assert item in user.extended_negotiation
assert item in user._ext_neg[AsynchronousOperationsWindowNegotiation]
assert len(user.extended_negotiation) == 1
assert len(user.user_information) == 3
item = UserIdentityNegotiation()
item.user_identity_type = 0x01
item.positive_response_requested = True
item.primary_field = b'username'
user.add_negotiation_item(item)
assert item in user.extended_negotiation
assert item in user._ext_neg[UserIdentityNegotiation]
assert len(user.extended_negotiation) == 2
assert len(user.user_information) == 4
item = SCP_SCU_RoleSelectionNegotiation()
item.sop_class_uid = '1.2.3'
item.scu_role = True
item.scp_role = True
user.add_negotiation_item(item)
assert item in user.extended_negotiation
assert item in user._ext_neg[SCP_SCU_RoleSelectionNegotiation]
assert len(user.extended_negotiation) == 3
assert len(user.user_information) == 5
item = SOPClassExtendedNegotiation()
item.sop_class_uid = '1.2.3'
item.service_class_application_information = b'SOME DATA'
user.add_negotiation_item(item)
assert item in user.extended_negotiation
assert item in user._ext_neg[SOPClassExtendedNegotiation]
assert len(user.extended_negotiation) == 4
assert len(user.user_information) == 6
item = SOPClassCommonExtendedNegotiation()
item.sop_class_uid = '1.2.3'
item.service_class_uid = '2.3.4'
item.related_general_sop_class_identification = ['1.3.4']
user.add_negotiation_item(item)
assert item in user.extended_negotiation
assert item in user._ext_neg[SOPClassCommonExtendedNegotiation]
assert len(user.extended_negotiation) == 5
assert len(user.user_information) == 7
def test_add_neg_pre_raises(self):
"""Test that exception is raised if bad item added."""
user = ServiceUser(self.assoc, mode='requestor')
item = AsynchronousOperationsWindowNegotiation()
item.maximum_number_operations_invoked = 2
item.maximum_number_operations_performed = 3
user.add_negotiation_item(item)
assert item in user.extended_negotiation
assert item in user._ext_neg[AsynchronousOperationsWindowNegotiation]
assert len(user.extended_negotiation) == 1
assert len(user.user_information) == 3
bad = MaximumLengthNotification()
bad.maximum_length_received = 12
msg = r"'item' is not a valid extended negotiation item"
with pytest.raises(TypeError, match=msg):
user.add_negotiation_item(bad)
assert item in user.extended_negotiation
assert item in user._ext_neg[AsynchronousOperationsWindowNegotiation]
assert len(user.extended_negotiation) == 1
assert len(user.user_information) == 3
def test_add_neg_post_raises(self):
"""Test adding items after negotiation."""
user = ServiceUser(self.assoc, mode='requestor')
user.primitive = self.primitive
assert user.writeable is False
item = AsynchronousOperationsWindowNegotiation()
item.maximum_number_operations_invoked = 2
item.maximum_number_operations_performed = 3
msg = r"Can't add extended negotiation items after negotiation "
with pytest.raises(RuntimeError, match=msg):
user.add_negotiation_item(item)
assert item not in user.extended_negotiation
assert item not in user._ext_neg[AsynchronousOperationsWindowNegotiation]
assert len(user.extended_negotiation) == 0
assert len(user.user_information) == 2
def test_async_ops_pre(self):
"""Test getting async ops item prior to negotiation."""
user = ServiceUser(self.assoc, mode='requestor')
assert user.writeable is True
assert user.asynchronous_operations == (1, 1) # default
assert user.extended_negotiation == []
item = AsynchronousOperationsWindowNegotiation()
item.maximum_number_operations_invoked = 2
item.maximum_number_operations_performed = 3
user.add_negotiation_item(item)
assert user.asynchronous_operations == (2, 3)
assert item in user.extended_negotiation
assert item in user._ext_neg[AsynchronousOperationsWindowNegotiation]
assert len(user.extended_negotiation) == 1
assert len(user.user_information) == 3
def test_async_ops_post(self):
"""Test getting async ops item after negotiation."""
user = ServiceUser(self.assoc, mode='requestor')
user.primitive = self.primitive
assert user.writeable is False
assert user.asynchronous_operations == (1, 1) # default
assert user.extended_negotiation == []
item = AsynchronousOperationsWindowNegotiation()
item.maximum_number_operations_invoked = 2
item.maximum_number_operations_performed = 3
self.primitive.user_information.append(item)
assert user.asynchronous_operations == (2, 3)
assert user.extended_negotiation == [item]
assert item in user.extended_negotiation
assert item not in user._ext_neg[AsynchronousOperationsWindowNegotiation]
assert len(user.extended_negotiation) == 1
assert len(user.user_information) == 3
msg = r"Can't add extended negotiation items after negotiation"
with pytest.raises(RuntimeError, match=msg):
user.add_negotiation_item(item)
def test_ext_neg_pre(self):
"""Test extended_negotiation only returns negotiation items."""
user = ServiceUser(self.assoc, mode='requestor')
assert user.writeable is True
assert user.extended_negotiation == []
assert len(user.user_information) == 2
item = AsynchronousOperationsWindowNegotiation()
item.maximum_number_operations_invoked = 2
item.maximum_number_operations_performed = 3
user.add_negotiation_item(item)
assert item in user.extended_negotiation
assert len(user.extended_negotiation) == 1
assert len(user.user_information) == 3
def test_ext_neg_post(self):
"""Test extended_negotiation only returns negotiation items."""
user = ServiceUser(self.assoc, mode='requestor')
user.primitive = self.primitive
assert user.writeable is False
assert user.extended_negotiation == []
assert len(user.user_information) == 2
item = AsynchronousOperationsWindowNegotiation()
item.maximum_number_operations_invoked = 2
item.maximum_number_operations_performed = 3
self.primitive.user_information.append(item)
assert item in user.extended_negotiation
assert len(user.extended_negotiation) == 1
assert len(user.user_information) == 3
def test_get_contexts_pre(self):
"""Test get_contexts prior to association."""
user = ServiceUser(self.assoc, mode='requestor')
assert user.writeable is True
cxs = user.get_contexts('requested')
assert len(cxs) == 0
user.requested_contexts = [build_context('1.2.840.10008.1.1')]
cxs = user.get_contexts('requested')
assert len(cxs) == 1
assert cxs[0].abstract_syntax == '1.2.840.10008.1.1'
def test_get_contexts_pre_raises(self):
"""Test get_contexts prior to association raises if bad type."""
user = ServiceUser(self.assoc, mode='requestor')
assert user.writeable is True
msg = r"Invalid 'cx_type', must be 'requested'"
with pytest.raises(ValueError, match=msg):
user.get_contexts('supported')
def test_get_contexts_post(self):
"""Test get_contexts after association."""
user = ServiceUser(self.assoc, mode='requestor')
user.primitive = self.primitive
assert user.writeable is False
cxs = user.get_contexts('requested')
assert len(cxs) == 0
cxs = user.get_contexts('pcdl')
assert len(cxs) == 1
assert cxs[0].abstract_syntax == '1.2.840.10008.1.1'
def test_get_contexts_post_raises(self):
"""Test get_contexts after association raises if bad type."""
user = ServiceUser(self.assoc, mode='requestor')
user.primitive = self.primitive
assert user.writeable is False
msg = r"Invalid 'cx_type', must be 'requested' or 'pcdl'"
with pytest.raises(ValueError, match=msg):
user.get_contexts('supported')
with pytest.raises(ValueError, match=msg):
user.get_contexts('pcdrl')
def test_impl_class_pre(self):
"""Test implementation_class_uid prior to association."""
user = ServiceUser(self.assoc, mode='requestor')
assert user.writeable is True
assert user.implementation_class_uid == PYNETDICOM_IMPLEMENTATION_UID
class_items = []
for item in user.user_information:
if isinstance(item, ImplementationClassUIDNotification):
assert item.implementation_class_uid == (
PYNETDICOM_IMPLEMENTATION_UID
)
class_items.append(item)
assert len(class_items) == 1
user.implementation_class_uid = '1.2.3'
assert user.implementation_class_uid == '1.2.3'
class_items = []
for item in user.user_information:
if isinstance(item, ImplementationClassUIDNotification):
assert item.implementation_class_uid == (
'1.2.3'
)
class_items.append(item)
assert len(class_items) == 1
def test_impl_class_post(self):
"""Test implementation_class_uid after association."""
user = ServiceUser(self.assoc, mode='requestor')
user.primitive = self.primitive
assert user.writeable is False
assert user.implementation_class_uid == '1.2.3'
msg = r"Can't set the Implementation Class UID after negotiation"
with pytest.raises(RuntimeError, match=msg):
user.implementation_class_uid = '1.2.3.4'
class_items = []
for item in user.user_information:
if isinstance(item, ImplementationClassUIDNotification):
assert item.implementation_class_uid == '1.2.3'
class_items.append(item)
assert len(class_items) == 1
def test_impl_version_pre(self):
"""Test implementation_version_name prior to association."""
user = ServiceUser(self.assoc, mode='requestor')
assert user.writeable is True
assert user.implementation_version_name is None
class_items = []
for item in user.user_information:
if isinstance(item, ImplementationVersionNameNotification):
class_items.append(item)
assert len(class_items) == 0
ref = b'12345ABCDE123456'
user.implementation_version_name = ref
assert user.implementation_version_name == ref
class_items = []
user.implementation_version_name = ref
for item in user.user_information:
if isinstance(item, ImplementationVersionNameNotification):
class_items.append(item)
assert user.implementation_version_name == ref
assert len(class_items) == 1
def test_impl_version_post(self):
"""Test implementation_version_name after association."""
user = ServiceUser(self.assoc, mode='requestor')
user.primitive = self.primitive
assert user.writeable is False
assert user.implementation_version_name is None
ref = b'12345ABCDE123456'
msg = r"Can't set the Implementation Version Name after negotiation"
with pytest.raises(RuntimeError, match=msg):
user.implementation_version_name = ref
item = ImplementationVersionNameNotification()
item.implementation_version_name = ref
self.primitive.user_information.append(item)
class_items = []
for item in user.user_information:
if isinstance(item, ImplementationVersionNameNotification):
assert item.implementation_version_name == ref
class_items.append(item)
assert len(class_items) == 1
def test_is_acceptor(self):
"""Test is_acceptor"""
user = ServiceUser(self.assoc, mode='requestor')
assert user.is_acceptor is False
def test_is_requestor(self):
"""Test is_requestor"""
user = ServiceUser(self.assoc, mode='requestor')
assert user.is_requestor is True
def test_max_length_pre(self):
"""Test maximum_length prior to association."""
user = ServiceUser(self.assoc, mode='requestor')
assert user.writeable is True
assert user.maximum_length == 16382
class_items = []
for item in user.user_information:
if isinstance(item, MaximumLengthNotification):
assert item.maximum_length_received == 16382
class_items.append(item)
assert len(class_items) == 1
user.maximum_length = 45
assert user.maximum_length == 45
class_items = []
for item in user.user_information:
if isinstance(item, MaximumLengthNotification):
assert item.maximum_length_received == 45
class_items.append(item)
assert len(class_items) == 1
def test_max_length_post(self):
"""Test maximum_length after association."""
user = ServiceUser(self.assoc, mode='requestor')
user.primitive = self.primitive
assert user.writeable is False
assert user.maximum_length == 16382
msg = r"Can't set the Maximum Length after negotiation"
with pytest.raises(RuntimeError, match=msg):
user.maximum_length = 45
class_items = []
for item in user.user_information:
if isinstance(item, MaximumLengthNotification):
assert item.maximum_length_received == 16382
class_items.append(item)
assert len(class_items) == 1
def test_requested_cx_pre(self):
"""Test requested_contexts prior to association."""
user = ServiceUser(self.assoc, mode='requestor')
assert user.writeable is True
assert user.requested_contexts == []
cx_a = build_context('1.2.3')
cx_b = build_context('1.2.3.4')
user.requested_contexts = [cx_a, cx_b]
assert len(user.requested_contexts) == 2
assert cx_a in user.requested_contexts
assert cx_b in user.requested_contexts
def test_requested_cx_post(self):
"""Test requested_contexts after association."""
user = ServiceUser(self.assoc, mode='requestor')
user.primitive = self.primitive
assert user.writeable is False
assert user.requested_contexts == []
cx_a = build_context('1.2.3')
msg = r"Can't set the requested presentation contexts after"
with pytest.raises(RuntimeError, match=msg):
user.requested_contexts = [build_context('1.2.3')]
assert user.requested_contexts == []
def test_rm_neg_pre(self):
"""Test removing negotiation items."""
user = ServiceUser(self.assoc, mode='requestor')
assert len(user.extended_negotiation) == 0
assert len(user.user_information) == 2
item = AsynchronousOperationsWindowNegotiation()
item.maximum_number_operations_invoked = 2
item.maximum_number_operations_performed = 3
# Test removing non-existent item
user.remove_negotiation_item(item)
assert len(user.extended_negotiation) == 0
assert len(user.user_information) == 2
# Test removing existent item
user.add_negotiation_item(item)
assert item in user.extended_negotiation
assert item in user._ext_neg[AsynchronousOperationsWindowNegotiation]
assert len(user.extended_negotiation) == 1
assert len(user.user_information) == 3
user.remove_negotiation_item(item)
assert len(user.extended_negotiation) == 0
assert len(user.user_information) == 2
# Repeat for UserIdentity
item = UserIdentityNegotiation()
item.user_identity_type = 0x01
item.positive_response_requested = True
item.primary_field = b'username'
user.add_negotiation_item(item)
assert item in user.extended_negotiation
assert item in user._ext_neg[UserIdentityNegotiation]
assert len(user.extended_negotiation) == 1
assert len(user.user_information) == 3
user.remove_negotiation_item(item)
assert len(user.extended_negotiation) == 0
assert len(user.user_information) == 2
# Repeat for Role Selection
item = SCP_SCU_RoleSelectionNegotiation()
item.sop_class_uid = '1.2.3'
item.scu_role = True
item.scp_role = True
user.add_negotiation_item(item)
assert item in user.extended_negotiation
assert item in user._ext_neg[SCP_SCU_RoleSelectionNegotiation]
assert len(user.extended_negotiation) == 1
assert len(user.user_information) == 3
user.remove_negotiation_item(item)
assert len(user.extended_negotiation) == 0
assert len(user.user_information) == 2
# Repeat for SOP Class Extended
item = SOPClassExtendedNegotiation()
item.sop_class_uid = '1.2.3'
item.service_class_application_information = b'SOME DATA'
user.add_negotiation_item(item)
assert item in user.extended_negotiation
assert item in user._ext_neg[SOPClassExtendedNegotiation]
assert len(user.extended_negotiation) == 1
assert len(user.user_information) == 3
user.remove_negotiation_item(item)
assert len(user.extended_negotiation) == 0
assert len(user.user_information) == 2
# Repeat for SOP Class Common
item = SOPClassCommonExtendedNegotiation()
item.sop_class_uid = '1.2.3'
item.service_class_uid = '2.3.4'
item.related_general_sop_class_identification = ['1.3.4']
user.add_negotiation_item(item)
assert item in user.extended_negotiation
assert item in user._ext_neg[SOPClassCommonExtendedNegotiation]
assert len(user.extended_negotiation) == 1
assert len(user.user_information) == 3
user.remove_negotiation_item(item)
assert len(user.extended_negotiation) == 0
assert len(user.user_information) == 2
# Try removing unknown type
msg = r"'item' is not a valid extended negotiation item"
with pytest.raises(TypeError, match=msg):
user.remove_negotiation_item(1234)
def test_rm_neg_post_raises(self):
"""Test adding items after negotiation."""
user = ServiceUser(self.assoc, mode='requestor')
user.primitive = self.primitive
assert user.writeable is False
item = AsynchronousOperationsWindowNegotiation()
item.maximum_number_operations_invoked = 2
item.maximum_number_operations_performed = 3
self.primitive.user_information.append(item)
msg = r"Can't remove extended negotiation items after negotiation "
with pytest.raises(RuntimeError, match=msg):
user.remove_negotiation_item(item)
assert item in user.extended_negotiation
assert len(user.extended_negotiation) == 1
assert len(user.user_information) == 3
def test_reset_neg_pre(self):
"""Test reset_negotiation_items prior to association."""
user = ServiceUser(self.assoc, mode='requestor')
assert len(user.extended_negotiation) == 0
assert len(user.user_information) == 2
# Test with no items
user.reset_negotiation_items()
assert len(user.extended_negotiation) == 0
assert len(user.user_information) == 2
item = AsynchronousOperationsWindowNegotiation()
item.maximum_number_operations_invoked = 2
item.maximum_number_operations_performed = 3
user.add_negotiation_item(item)
item = SCP_SCU_RoleSelectionNegotiation()
item.sop_class_uid = '1.2.3'
item.scu_role = True
item.scp_role = True
user.add_negotiation_item(item)
item = UserIdentityNegotiation()
item.user_identity_type = 0x01
item.positive_response_requested = True
item.primary_field = b'username'
user.add_negotiation_item(item)
item = SOPClassExtendedNegotiation()
item.sop_class_uid = '1.2.3'
item.service_class_application_information = b'SOME DATA'
user.add_negotiation_item(item)
item = SOPClassCommonExtendedNegotiation()
item.sop_class_uid = '1.2.3'
item.service_class_uid = '2.3.4'
item.related_general_sop_class_identification = ['1.3.4']
user.add_negotiation_item(item)
assert len(user.extended_negotiation) == 5
assert len(user.user_information) == 7
user.reset_negotiation_items()
assert len(user.extended_negotiation) == 0
assert len(user.user_information) == 2
assert len(user._ext_neg.keys()) == 5
def test_reset_neg_post_raises(self):
"""Test reset_negotiation_items after association raises."""
user = ServiceUser(self.assoc, mode='requestor')
user.primitive = self.primitive
assert len(user.extended_negotiation) == 0
assert len(user.user_information) == 2
item = AsynchronousOperationsWindowNegotiation()
item.maximum_number_operations_invoked = 2
item.maximum_number_operations_performed = 3
user.primitive.user_information.append(item)
item = SCP_SCU_RoleSelectionNegotiation()
item.sop_class_uid = '1.2.3'
item.scu_role = True
item.scp_role = True
user.primitive.user_information.append(item)
item = UserIdentityNegotiation()
item.user_identity_type = 0x01
item.positive_response_requested = True
item.primary_field = b'username'
user.primitive.user_information.append(item)
item = SOPClassExtendedNegotiation()
item.sop_class_uid = '1.2.3'
item.service_class_application_information = b'SOME DATA'
user.primitive.user_information.append(item)
item = SOPClassCommonExtendedNegotiation()
item.sop_class_uid = '1.2.3'
item.service_class_uid = '2.3.4'
item.related_general_sop_class_identification = ['1.3.4']
user.primitive.user_information.append(item)
assert len(user.extended_negotiation) == 5
assert len(user.user_information) == 7
msg = r"Can't reset the extended negotiation items after negotiation"
with pytest.raises(RuntimeError, match=msg):
user.reset_negotiation_items()
assert len(user.extended_negotiation) == 5
assert len(user.user_information) == 7
def test_role_pre(self):
"""Test role_selection prior to association."""
user = ServiceUser(self.assoc, mode='requestor')
assert len(user.extended_negotiation) == 0
assert len(user.user_information) == 2
assert user.role_selection == {}
item = SCP_SCU_RoleSelectionNegotiation()
item.sop_class_uid = '1.2.3'
item.scu_role = True
item.scp_role = True
user.add_negotiation_item(item)
assert item in user.extended_negotiation
assert item in user._ext_neg[SCP_SCU_RoleSelectionNegotiation]
assert len(user.extended_negotiation) == 1
assert len(user.user_information) == 3
assert user.role_selection['1.2.3'] == item
def test_role_post(self):
"""Test role_selection prior to association."""
user = ServiceUser(self.assoc, mode='requestor')
user.primitive = self.primitive
assert len(user.extended_negotiation) == 0
assert len(user.user_information) == 2
assert user.role_selection == {}
item = SCP_SCU_RoleSelectionNegotiation()
item.sop_class_uid = '1.2.3'
item.scu_role = True
item.scp_role = True
user.primitive.user_information.append(item)
assert item in user.extended_negotiation
assert item not in user._ext_neg[SCP_SCU_RoleSelectionNegotiation]
assert len(user.extended_negotiation) == 1
assert len(user.user_information) == 3
assert user.role_selection['1.2.3'] == item
msg = r"Can't add extended negotiation items after negotiation"
with pytest.raises(RuntimeError, match=msg):
user.add_negotiation_item(item)
def test_sop_ext_pre(self):
"""Test sop_class_extended prior to association."""
user = ServiceUser(self.assoc, mode='requestor')
assert len(user.extended_negotiation) == 0
assert len(user.user_information) == 2
assert user.sop_class_extended == {}
item = SOPClassExtendedNegotiation()
item.sop_class_uid = '1.2.3'
item.service_class_application_information = b'SOME DATA'
user.add_negotiation_item(item)
assert item in user.extended_negotiation
assert item in user._ext_neg[SOPClassExtendedNegotiation]
assert len(user.extended_negotiation) == 1
assert len(user.user_information) == 3
assert user.sop_class_extended['1.2.3'] == (
item.service_class_application_information
)
def test_sop_ext_post(self):
"""Test sop_class_extended prior to association."""
user = ServiceUser(self.assoc, mode='requestor')
user.primitive = self.primitive
assert len(user.extended_negotiation) == 0
assert len(user.user_information) == 2
assert user.sop_class_extended == {}
item = SOPClassExtendedNegotiation()
item.sop_class_uid = '1.2.3'
item.service_class_application_information = b'SOME DATA'
user.primitive.user_information.append(item)
assert item in user.extended_negotiation
assert item not in user._ext_neg[SOPClassExtendedNegotiation]
assert len(user.extended_negotiation) == 1
assert len(user.user_information) == 3
assert user.sop_class_extended['1.2.3'] == (
item.service_class_application_information
)
msg = r"Can't add extended negotiation items after negotiation"
with pytest.raises(RuntimeError, match=msg):
user.add_negotiation_item(item)
def test_sop_common_pre(self):
"""Test sop_class_common_extended prior to association."""
user = ServiceUser(self.assoc, mode='requestor')
assert len(user.extended_negotiation) == 0
assert len(user.user_information) == 2
assert user.sop_class_common_extended == {}
item = SOPClassCommonExtendedNegotiation()
item.sop_class_uid = '1.2.3'
item.service_class_uid = '2.3.4'
item.related_general_sop_class_identification = ['1.3.4']
user.add_negotiation_item(item)
assert item in user.extended_negotiation
assert item in user._ext_neg[SOPClassCommonExtendedNegotiation]
assert len(user.extended_negotiation) == 1
assert len(user.user_information) == 3
assert user.sop_class_common_extended['1.2.3'] == item
def test_sop_common_post(self):
"""Test sop_class_common_extended prior to association."""
user = ServiceUser(self.assoc, mode='requestor')
user.primitive = self.primitive
assert len(user.extended_negotiation) == 0
assert len(user.user_information) == 2
assert user.sop_class_common_extended == {}
item = SOPClassCommonExtendedNegotiation()
item.sop_class_uid = '1.2.3'
item.service_class_uid = '2.3.4'
item.related_general_sop_class_identification = ['1.3.4']
user.primitive.user_information.append(item)
assert item in user.extended_negotiation
assert item not in user._ext_neg[SOPClassCommonExtendedNegotiation]
assert len(user.extended_negotiation) == 1
assert len(user.user_information) == 3
assert user.sop_class_common_extended['1.2.3'] == item
msg = r"Can't add extended negotiation items after negotiation"
with pytest.raises(RuntimeError, match=msg):
user.add_negotiation_item(item)
def test_supported_cx_pre(self):
"""Test supported_contexts prior to association."""
user = ServiceUser(self.assoc, mode='requestor')
assert user.writeable is True
msg = r"Invalid 'cx_type', must be 'requested'"
with pytest.raises(ValueError, match=msg):
assert user.supported_contexts == []
msg = (
r"'supported_contexts' can only be set for the association "
r"acceptor"
)
with pytest.raises(AttributeError, match=msg):
user.supported_contexts = 'bluh'
def test_supported_cx_post(self):
"""Test supported_contexts after association."""
user = ServiceUser(self.assoc, mode='requestor')
user.primitive = self.primitive
assert user.writeable is False
msg = r"Invalid 'cx_type', must be 'requested' or 'pcdl'"
with pytest.raises(ValueError, match=msg):
assert user.supported_contexts == []
def test_user_id_pre(self):
"""Test user_identity prior to association."""
user = ServiceUser(self.assoc, mode='requestor')
assert len(user.extended_negotiation) == 0
assert len(user.user_information) == 2
assert user.user_identity is None
item = UserIdentityNegotiation()
item.user_identity_type = 0x01
item.positive_response_requested = True
item.primary_field = b'username'
user.add_negotiation_item(item)
assert item in user.extended_negotiation
assert item in user._ext_neg[UserIdentityNegotiation]
assert len(user.extended_negotiation) == 1
assert len(user.user_information) == 3
assert user.user_identity == item
def test_user_id_post(self):
"""Test user_identity prior to association."""
user = ServiceUser(self.assoc, mode='requestor')
user.primitive = self.primitive
assert len(user.extended_negotiation) == 0
assert len(user.user_information) == 2
assert user.user_identity is None
item = UserIdentityNegotiation()
item.user_identity_type = 0x01
item.positive_response_requested = True
item.primary_field = b'username'
user.primitive.user_information.append(item)
assert item in user.extended_negotiation
assert item not in user._ext_neg[UserIdentityNegotiation]
assert len(user.extended_negotiation) == 1
assert len(user.user_information) == 3
assert user.user_identity == item
msg = r"Can't add extended negotiation items after negotiation"
with pytest.raises(RuntimeError, match=msg):
user.add_negotiation_item(item)
def test_user_info_pre(self):
"""Test user_information prior to association."""
user = ServiceUser(self.assoc, mode='requestor')
assert len(user.user_information) == 2
user.implementation_version_name = 'VERSION_1'
item = user.user_information[2]
assert isinstance(item, ImplementationVersionNameNotification)
assert item.implementation_version_name == b'VERSION_1'
assert len(user.user_information) == 3
for uid in ['1.2', '3.4']:
item = SCP_SCU_RoleSelectionNegotiation()
item.sop_class_uid = uid
item.scu_role = False
item.scp_role = True
user.add_negotiation_item(item)
assert item in user.user_information
assert len(user.user_information) == 5
item = AsynchronousOperationsWindowNegotiation()
item.maximum_number_operations_invoked = 2
item.maximum_number_operations_performed = 3
user.add_negotiation_item(item)
assert item in user.user_information
assert len(user.user_information) == 6
item = UserIdentityNegotiation()
item.user_identity_type = 0x01
item.positive_response_requested = True
item.primary_field = b'username'
user.add_negotiation_item(item)
assert item in user.user_information
assert len(user.user_information) == 7
item = SOPClassExtendedNegotiation()
item.sop_class_uid = '1.2.3'
item.service_class_application_information = b'SOME DATA'
user.add_negotiation_item(item)
assert item in user.user_information
assert len(user.user_information) == 8
item = SOPClassCommonExtendedNegotiation()
item.sop_class_uid = '1.2.3'
item.service_class_uid = '2.3.4'
item.related_general_sop_class_identification = ['1.3.4']
user.add_negotiation_item(item)
assert item in user.user_information
assert len(user.user_information) == 9
def test_user_info_post(self):
"""Test user_information prior to association."""
user = ServiceUser(self.assoc, mode='requestor')
user.primitive = self.primitive
assert len(user.user_information) == 2
item = ImplementationVersionNameNotification()
item.implementation_version_name = 'VERSION_1'
user.primitive.user_information.append(item)
assert item in user.user_information
assert len(user.user_information) == 3
for uid in ['1.2', '3.4']:
item = SCP_SCU_RoleSelectionNegotiation()
item.sop_class_uid = uid
item.scu_role = False
item.scp_role = True
user.primitive.user_information.append(item)
assert item in user.user_information
assert len(user.user_information) == 5
item = AsynchronousOperationsWindowNegotiation()
item.maximum_number_operations_invoked = 2
item.maximum_number_operations_performed = 3
user.primitive.user_information.append(item)
assert item in user.user_information
assert len(user.user_information) == 6
item = UserIdentityNegotiation()
item.user_identity_type = 0x01
item.positive_response_requested = True
item.primary_field = b'username'
user.primitive.user_information.append(item)
assert item in user.user_information
assert len(user.user_information) == 7
item = SOPClassExtendedNegotiation()
item.sop_class_uid = '1.2.3'
item.service_class_application_information = b'SOME DATA'
user.primitive.user_information.append(item)
assert item in user.user_information
assert len(user.user_information) == 8
item = SOPClassCommonExtendedNegotiation()
item.sop_class_uid = '1.2.3'
item.service_class_uid = '2.3.4'
item.related_general_sop_class_identification = ['1.3.4']
user.primitive.user_information.append(item)
assert item in user.user_information
assert len(user.user_information) == 9
def test_writeable(self):
"""Test writeable."""
user = ServiceUser(self.assoc, mode='requestor')
assert user.writeable is True
user.primitive = self.primitive
assert user.writeable is False
| {
"content_hash": "9995a35d488ef1a99cb2367b89a89b67",
"timestamp": "",
"source": "github",
"line_count": 2316,
"max_line_length": 81,
"avg_line_length": 38.96459412780656,
"alnum_prop": 0.6385164335896811,
"repo_name": "scaramallion/pynetdicom3",
"id": "066b03c8e67df0c5897e751ce76e222aef70c595",
"size": "90242",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pynetdicom/tests/test_assoc_user.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "2135993"
},
{
"name": "Shell",
"bytes": "7504"
}
],
"symlink_target": ""
} |
from sklearn2sql_heroku.tests.classification import generic as class_gen
class_gen.test_model("SVC_sigmoid" , "digits" , "hive")
| {
"content_hash": "7942e6cd7b7da0be9b1f50bd3df3963d",
"timestamp": "",
"source": "github",
"line_count": 4,
"max_line_length": 72,
"avg_line_length": 32.75,
"alnum_prop": 0.7633587786259542,
"repo_name": "antoinecarme/sklearn2sql_heroku",
"id": "7c1cf430ca57681ef8f57f33671a5d420a4f1c7a",
"size": "131",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/classification/digits/ws_digits_SVC_sigmoid_hive_code_gen.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Makefile",
"bytes": "507043"
},
{
"name": "Procfile",
"bytes": "37"
},
{
"name": "Python",
"bytes": "1021137"
},
{
"name": "R",
"bytes": "2521"
}
],
"symlink_target": ""
} |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
@Author: yyg
@Create: 2017MMDD
@LastUpdate: 2017-02-14 HH:MM:SS
@Version: 0.0.0-Beta
"""
# Import
import sqlite3
from logging import getLogger
from os import walk, mkdir
from os.path import join, isdir
from shutil import rmtree
from params import (LOGGER_NAME, INSPECT_COLLECT_DIR, SQLITE_DB_DIR)
from utils import json_deserialize
# Basic info
__version__ = "0.0.0-Beta"
__all__ = []
__author__ = "yyg"
# Add logger
logger = getLogger(LOGGER_NAME)
# Exceptions
# main code
class Assembler(object):
"""
Generate formated inspect outcome
- step1: reverse-serialize
- step2: re-range data
- step3: generate tables
- collect table cols
- table(disk_info) = > disk_***
- table(network_info) = > netwk_**
tables = [u"basic_info", u"disk_info", u"netwk_info"]
table struct:
- disk
id|hostname/ip|disk_num|disk_1 | disk_2 |
1 |10.10.10.10|2 |/data1=100G+10%|/data2=200G+20%|
"""
def __init__(self):
self.db = "laserjet.db"
self.conn = None
self.data = list()
self.tables = {
# "xxx" : [[cols],sql_create_table, [data], [sql_insert_rows]]
"basic_info": [[], None, [], []],
"disk_info": [[], None, [], []],
"netwk_info": [[], None, [], []]
}
# steps
def start(self):
self.create_db()
self.deserialize()
self.create_tables()
self.insert_rows()
def create_db(self):
if not isdir(SQLITE_DB_DIR):
mkdir(SQLITE_DB_DIR)
else:
rmtree(SQLITE_DB_DIR) # clean up existing laserjet.db
mkdir(SQLITE_DB_DIR)
self.conn = sqlite3.connect(join(SQLITE_DB_DIR, self.db))
def deserialize(self):
total_cols = set()
logger.info("Start deserialize")
for file in Assembler.__jfiles():
with open(file) as j_content:
j_content_dict = json_deserialize(j_content)
self.data.append(j_content_dict)
total_cols = total_cols | set(j_content_dict.keys())
tmp = self.__filter_cols(total_cols, "disk_")
self.tables["disk_info"][0] = tmp[1].append("hostname")
self.tables["disk_info"][1] = Assembler.sql_crt_tb("disk_info", tmp[1])
tmp = self.__filter_cols(tmp[0], "netwk_")
self.tables["netwk_info"][0] = tmp[1]
self.tables["netwk_info"][1] = Assembler.sql_crt_tb("netwk_info", tmp[1])
self.tables["basic_info"][0] = tmp[0]
self.tables["basic_info"][1] = Assembler.sql_crt_tb("basic_info", tmp[0])
logger.info("Table disk_info contains columns: %s" % self.tables["disk_info"][0])
logger.info("Table disk_info use sql: %s" % self.tables["disk_info"][1])
logger.info("Table netwk_info contains columns: %s" % self.tables["netwk_info"][0])
logger.info("Table netwk_info use sql: %s" % self.tables["netwk_info"][1])
logger.info("Table basic_info contains columns: %s" % self.tables["basic_info"][0])
logger.info("Table basic_info use sql: %s" % self.tables["basic_info"][1])
def create_tables(self):
for tb in self.tables.values():
# excute each sql to create corresponding tables
self.conn.execute(tb[1])
def categorize_data(self):
"""
self.tables["disk_info"][3].append({})
self.tables["netwk_info"][3].append({})
self.tables["basic_info"][3].append({})
"""
for element in self.data:
disk_info = dict()
netwk_info = dict()
basic_info = dict()
for k, v in element.iteritems():
if k.startswith("disk_") or k == "hostname":
disk_info[k] = v
elif k.startswith("netwk_") or k == "hostname":
netwk_info[k] = v
else:
basic_info[k] = v
self.tables["disk_info"][2].append(disk_info)
self.tables["netwk_info"][2].append(netwk_info)
self.tables["basic_info"][2].append(basic_info)
def insert_rows(self):
self.categorize_data()
for k, v in self.tables.iteritems():
# k = "disk_info"
# v = [[cols],sql_create_table, [{data},{data}], [sql_insert_rows]]
for data in v[2]:
self.conn.execute(Assembler.sql_insert_rows(k, data))
self.conn.commit()
self.conn.close()
# private methods
@staticmethod
def sql_insert_rows(tb, data):
cols = []
values = []
for k, v in data.iteritems():
cols.append(k)
values.append(v)
cols = ",".join(cols)
values = map(Assembler.addquotation, values)
values = ",".join(values)
sql = "INSERT INTO {0} ({1}) VALUES ({2});".format(tb, cols, values)
logger.info("SQL = %s" % sql)
return sql
@staticmethod
def addquotation(a):
return "'" + str(a) + "'"
@staticmethod
def sql_crt_tb(tb, cols):
"""
:param tb: str
:param cols: list
:return: sql: str
"""
col_style = " VARCHAR(20)"
for col in cols:
# col col_style,
cols[cols.index(col)] = col + col_style
columns = ",".join(cols)
return "CREATE TABLE {0} ( {1} );".format(tb, columns)
@staticmethod
def __jfiles():
"""
: () => ["/**/.../**.json", "/**/.../**.json", ...]
"""
return [join(INSPECT_COLLECT_DIR, file) for file in walk(INSPECT_COLLECT_DIR).next()[2] if
file.endswith(".json")]
@staticmethod
def __filter_cols(data, label):
"""
: (list, str) => [[rest],[filtered]]
"""
return [[i for i in data if not i.startswith(label)], [i for i in data if i.startswith(label)]]
if __name__ == "__main__":
pass
| {
"content_hash": "c1fdc3f0fe96eed9908faa5c238512fd",
"timestamp": "",
"source": "github",
"line_count": 190,
"max_line_length": 103,
"avg_line_length": 33.04210526315789,
"alnum_prop": 0.5082828926409685,
"repo_name": "hipnusleo/laserjet",
"id": "11eaf8621b58b9fc3cd3821b2ffa7668b469c894",
"size": "6278",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "lib/core/assemble.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "3096"
},
{
"name": "Batchfile",
"bytes": "13184"
},
{
"name": "C",
"bytes": "672858"
},
{
"name": "C++",
"bytes": "9678"
},
{
"name": "Go",
"bytes": "6671"
},
{
"name": "HTML",
"bytes": "850945"
},
{
"name": "Java",
"bytes": "14456"
},
{
"name": "Makefile",
"bytes": "14373"
},
{
"name": "Python",
"bytes": "5156663"
}
],
"symlink_target": ""
} |
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'UserProfile'
db.create_table(u'profiles_userprofile', (
('user', self.gf('django.db.models.fields.related.OneToOneField')(related_name='profile', unique=True, primary_key=True, to=orm['auth.User'])),
('due_reviews', self.gf('django.db.models.fields.IntegerField')(default=2)),
))
db.send_create_signal(u'profiles', ['UserProfile'])
def backwards(self, orm):
# Deleting model 'UserProfile'
db.delete_table(u'profiles_userprofile')
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'profiles.userprofile': {
'Meta': {'object_name': 'UserProfile'},
'due_reviews': ('django.db.models.fields.IntegerField', [], {'default': '2'}),
'user': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "'profile'", 'unique': 'True', 'primary_key': 'True', 'to': u"orm['auth.User']"})
}
}
complete_apps = ['profiles'] | {
"content_hash": "c78bb6c4f3cac149f6364ab2f512be61",
"timestamp": "",
"source": "github",
"line_count": 67,
"max_line_length": 187,
"avg_line_length": 62.985074626865675,
"alnum_prop": 0.566350710900474,
"repo_name": "team-stroller/critical_critiques",
"id": "b02cd860f840a7f8635f3d00d4309e603f959f93",
"size": "4244",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "critical_critiques/profiles/migrations/0001_initial.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "JavaScript",
"bytes": "44"
},
{
"name": "Python",
"bytes": "22973"
},
{
"name": "Shell",
"bytes": "5120"
}
],
"symlink_target": ""
} |
""" Handle motion-related events, e. g. beginning capture.
Should accept input from the IR sensor, and may interface with
OpenCV to detect motion in buffered video.
The video capture and image processing should probably be done in
parallel (separate threads / python equivalent)
"""
import cv2
# Determine if there is motion in the frames using a computer vision algorithm
def CV_detects_motion(frame):
# Accept some arguments, probably background and captured image
# Utilize one of OpenCV's vision algorithms, probably background subtraction
# Return result (bool or movement mask, not sure which one yet)
#Thanks to the user 'ravwojdyla' on StackOverflow for this Python
#alternative to static funciton variables
#http://stackoverflow.com/questions/279561
try:
CV_detects_motion.background_reset_counter += 1
except AttributeError:
CV_detects_motion.background_reset_counter = 1
motion = CV_detects_motion.motion_detector.apply(frame)
contours, h = cv2.findContours(motion, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
cv2.imshow("Motion", motion)
cv2.waitKey(1)
#Occasionally reset background model to account for noise
if 90 < CV_detects_motion.background_reset_counter:
CV_detects_motion.motion_detector = cv2.BackgroundSubtractorMOG()
CV_detects_motion.background_reset_counter = 1
threshold = 25
for c in contours:
if threshold**2 < cv2.contourArea(c):
return True;
return False
CV_detects_motion.motion_detector = cv2.BackgroundSubtractorMOG()
try:
import RPi.GPIO as gpio
io_pin = 7
gpio.setmode(gpio.BOARD)
gpio.setup(io_pin, gpio.IN, pull_up_down=gpio.PUD_DOWN)
# Determine if the IR sensor on the Pi is detecting motion
def IR_detects_motion():
if gpio.input(io_pin):
#print "Motion detected by IR!"
print IR_detects_motion.testvar
IR_detects_motion.testvar += 1
return True
#return bool(gpio.input(io_pin))
IR_detects_motion.testvar = 0
except ImportError:
print "This machine is not a Raspberry Pi, disabling GPIO support..."
if __name__ == "__main__":
camera = cv2.VideoCapture(0)
CV_detects_motion(camera.read()[1])
| {
"content_hash": "55a40e45b2094b5dba780dc1c672b712",
"timestamp": "",
"source": "github",
"line_count": 67,
"max_line_length": 86,
"avg_line_length": 34.37313432835821,
"alnum_prop": 0.6891011723838472,
"repo_name": "nat543207/spi-camera",
"id": "ff233552358a9ed3ec8e7fbd677d8a4eb7a09a16",
"size": "2303",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/motion.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "4656"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
import coursereg.models
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('coursereg', '0004_auto_20160731_1541'),
]
operations = [
migrations.RemoveField(
model_name='course',
name='credits',
),
migrations.RemoveField(
model_name='course',
name='term',
),
migrations.RemoveField(
model_name='course',
name='last_reg_date',
),
migrations.RemoveField(
model_name='course',
name='last_drop_date',
),
migrations.AlterField(
model_name='course',
name='new_term',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='coursereg.Term'),
),
migrations.RenameField(
model_name='course',
old_name='new_term',
new_name='term',
),
migrations.RenameField(
model_name='course',
old_name='new_credits',
new_name='credits',
),
migrations.RemoveField(
model_name='participant',
name='state',
),
migrations.RemoveField(
model_name='participant',
name='grade',
),
migrations.RenameField(
model_name='participant',
old_name='new_grade',
new_name='grade',
),
migrations.AlterField(
model_name='participant',
name='created_at',
field=models.DateTimeField(auto_now_add=True),
),
migrations.AlterField(
model_name='participant',
name='updated_at',
field=models.DateTimeField(auto_now=True),
),
migrations.AlterField(
model_name='department',
name='abbreviation',
field=models.CharField(max_length=100, unique=True),
),
migrations.AlterField(
model_name='notification',
name='origin',
field=models.IntegerField(choices=[(0, 'Adviser'), (1, 'Instructor'), (2, 'DCC'), (3, 'Other'), (4, 'Student')], default=2),
),
]
| {
"content_hash": "6a78788cf505ca1968f81c0e9152b226",
"timestamp": "",
"source": "github",
"line_count": 80,
"max_line_length": 136,
"avg_line_length": 28.9625,
"alnum_prop": 0.5217954251186879,
"repo_name": "s-gv/bheemboy",
"id": "4c0470ac449eeb6af19c6ccb6edaac8776072604",
"size": "2389",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "coursereg/migrations/0005_auto_20160731_1557.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "4279"
},
{
"name": "HTML",
"bytes": "116930"
},
{
"name": "JavaScript",
"bytes": "6144"
},
{
"name": "Python",
"bytes": "167718"
}
],
"symlink_target": ""
} |
"""
module for creating frames of data that can be sent to a YubiKey
"""
# Copyright (c) 2010, Yubico AB
# See the file COPYING for licence statement.
__all__ = [
# constants
# functions
# classes
'YubiKeyFrame',
]
import struct
from . import yubico_util
from . import yubikey_defs
from . import yubico_exception
from .yubico_version import __version__
class YubiKeyFrame:
"""
Class containing an YKFRAME (as defined in ykdef.h).
A frame is basically 64 bytes of data. When this is to be sent
to a YubiKey, it is put inside 10 USB HID feature reports. Each
feature report is 7 bytes of data plus 1 byte of sequencing and
flags.
"""
def __init__(self, command, payload=b''):
if not payload:
payload = b'\x00' * 64
if len(payload) != 64:
raise yubico_exception.InputError('payload must be empty or 64 bytes')
if not isinstance(payload, bytes):
raise yubico_exception.InputError('payload must be a bytestring')
self.payload = payload
self.command = command
self.crc = yubico_util.crc16(payload)
def __repr__(self):
return '<%s.%s instance at %s: %s>' % (
self.__class__.__module__,
self.__class__.__name__,
hex(id(self)),
self.command
)
def to_string(self):
"""
Return the frame as a 70 byte string.
"""
# From ykdef.h :
#
# // Frame structure
# #define SLOT_DATA_SIZE 64
# typedef struct {
# unsigned char payload[SLOT_DATA_SIZE];
# unsigned char slot;
# unsigned short crc;
# unsigned char filler[3];
# } YKFRAME;
filler = b''
return struct.pack('<64sBH3s',
self.payload, self.command, self.crc, filler)
def to_feature_reports(self, debug=False):
"""
Return the frame as an array of 8-byte parts, ready to be sent to a YubiKey.
"""
rest = self.to_string()
seq = 0
out = []
# When sending a frame to the YubiKey, we can (should) remove any
# 7-byte serie that only consists of '\x00', besides the first
# and last serie.
while rest:
this, rest = rest[:7], rest[7:]
if seq > 0 and rest:
# never skip first or last serie
if this != b'\x00\x00\x00\x00\x00\x00\x00':
this += yubico_util.chr_byte(yubikey_defs.SLOT_WRITE_FLAG + seq)
out.append(self._debug_string(debug, this))
else:
this += yubico_util.chr_byte(yubikey_defs.SLOT_WRITE_FLAG + seq)
out.append(self._debug_string(debug, this))
seq += 1
return out
def _debug_string(self, debug, data):
"""
Annotate a frames data, if debug is True.
"""
if not debug:
return data
if self.command in [yubikey_defs.SLOT_CONFIG,
yubikey_defs.SLOT_CONFIG2,
yubikey_defs.SLOT_UPDATE1,
yubikey_defs.SLOT_UPDATE2,
yubikey_defs.SLOT_SWAP,
]:
# annotate according to config_st (see yubikey_defs.to_string())
if yubico_util.ord_byte(data[-1]) == 0x80:
return (data, "FFFFFFF")
if yubico_util.ord_byte(data[-1]) == 0x81:
return (data, "FFFFFFF")
if yubico_util.ord_byte(data[-1]) == 0x82:
return (data, "FFUUUUU")
if yubico_util.ord_byte(data[-1]) == 0x83:
return (data, "UKKKKKK")
if yubico_util.ord_byte(data[-1]) == 0x84:
return (data, "KKKKKKK")
if yubico_util.ord_byte(data[-1]) == 0x85:
return (data, "KKKAAAA")
if yubico_util.ord_byte(data[-1]) == 0x86:
return (data, "AAlETCr")
if yubico_util.ord_byte(data[-1]) == 0x87:
return (data, "rCR")
# after payload
if yubico_util.ord_byte(data[-1]) == 0x89:
return (data, " Scr")
else:
return (data, '')
| {
"content_hash": "4a38b9d480aea0bff824f7a35b2955ef",
"timestamp": "",
"source": "github",
"line_count": 124,
"max_line_length": 84,
"avg_line_length": 34.62903225806452,
"alnum_prop": 0.5207265952491849,
"repo_name": "Frostman/python-yubico",
"id": "f095d20b4417f0247f295e327ef36df1a26ad132",
"size": "4294",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "yubico/yubikey_frame.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Groff",
"bytes": "3704"
},
{
"name": "Python",
"bytes": "121173"
}
],
"symlink_target": ""
} |
from twitter_feed.core.hashtag import Hashtag
def test_hashtag_full():
hashtag_dict = {
'text': 'bagr'
}
test_hashtag = Hashtag.from_dict(hashtag_dict)
assert test_hashtag.text == hashtag_dict['text'] | {
"content_hash": "3ff27c221207b819f9ce9fb583f848ac",
"timestamp": "",
"source": "github",
"line_count": 11,
"max_line_length": 52,
"avg_line_length": 20.727272727272727,
"alnum_prop": 0.6535087719298246,
"repo_name": "petrkotas/twitter-feed",
"id": "4ce13bd90727560e981c84cb033d713f39232743",
"size": "228",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/core/test_hashtag.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "1056"
},
{
"name": "Python",
"bytes": "47316"
}
],
"symlink_target": ""
} |
print("Hello World!")
print("Hello Again!")
print("I like this tpying this.")
print("This is fun.")
print("Printing!")
print("I'd much rather you 'not'!")
print('I "said" do not touch this.')
| {
"content_hash": "06910ed0ab28d31d44d9f65e0f98115b",
"timestamp": "",
"source": "github",
"line_count": 7,
"max_line_length": 36,
"avg_line_length": 27.428571428571427,
"alnum_prop": 0.6666666666666666,
"repo_name": "Vayne-Lover/Effective",
"id": "874eb8eaf558aeca14be90ed5cb9f2c8a9b0c231",
"size": "215",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Python/Learn Python The Hard Way/ex1.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "37043"
}
],
"symlink_target": ""
} |
import os
import tarfile
import unittest
import mxnet as mx
import numpy as np
import random
from mxnet import gluon
import platform
from common import setup_module, with_seed, teardown
from mxnet.gluon.data import DataLoader
import mxnet.ndarray as nd
from mxnet import context
from mxnet.gluon.data.dataset import Dataset
@with_seed()
def test_array_dataset():
X = np.random.uniform(size=(10, 20))
Y = np.random.uniform(size=(10,))
dataset = gluon.data.ArrayDataset(X, Y)
loader = gluon.data.DataLoader(dataset, 2)
for i, (x, y) in enumerate(loader):
assert mx.test_utils.almost_equal(x.asnumpy(), X[i*2:(i+1)*2])
assert mx.test_utils.almost_equal(y.asnumpy(), Y[i*2:(i+1)*2])
dataset = gluon.data.ArrayDataset(X)
loader = gluon.data.DataLoader(dataset, 2)
for i, x in enumerate(loader):
assert mx.test_utils.almost_equal(x.asnumpy(), X[i*2:(i+1)*2])
def prepare_record():
if not os.path.isdir("data/test_images"):
os.makedirs('data/test_images')
if not os.path.isdir("data/test_images/test_images"):
gluon.utils.download("http://data.mxnet.io/data/test_images.tar.gz", "data/test_images.tar.gz")
tarfile.open('data/test_images.tar.gz').extractall('data/test_images/')
if not os.path.exists('data/test.rec'):
imgs = os.listdir('data/test_images/test_images')
record = mx.recordio.MXIndexedRecordIO('data/test.idx', 'data/test.rec', 'w')
for i, img in enumerate(imgs):
str_img = open('data/test_images/test_images/'+img, 'rb').read()
s = mx.recordio.pack((0, i, i, 0), str_img)
record.write_idx(i, s)
return 'data/test.rec'
@with_seed()
def test_recordimage_dataset():
recfile = prepare_record()
fn = lambda x, y : (x, y)
dataset = gluon.data.vision.ImageRecordDataset(recfile).transform(fn)
loader = gluon.data.DataLoader(dataset, 1)
for i, (x, y) in enumerate(loader):
assert x.shape[0] == 1 and x.shape[3] == 3
assert y.asscalar() == i
def _dataset_transform_fn(x, y):
"""Named transform function since lambda function cannot be pickled."""
return x, y
@unittest.skip("Hangs in NVIDIA CI builds")
@with_seed()
def test_recordimage_dataset_with_data_loader_multiworker():
recfile = prepare_record()
dataset = gluon.data.vision.ImageRecordDataset(recfile)
loader = gluon.data.DataLoader(dataset, 1, num_workers=5)
for i, (x, y) in enumerate(loader):
assert x.shape[0] == 1 and x.shape[3] == 3
assert y.asscalar() == i
# with transform
dataset = gluon.data.vision.ImageRecordDataset(recfile).transform(_dataset_transform_fn)
loader = gluon.data.DataLoader(dataset, 1, num_workers=5)
for i, (x, y) in enumerate(loader):
assert x.shape[0] == 1 and x.shape[3] == 3
assert y.asscalar() == i
# try limit recursion depth
import sys
old_limit = sys.getrecursionlimit()
sys.setrecursionlimit(500) # this should be smaller than any default value used in python
dataset = gluon.data.vision.ImageRecordDataset(recfile).transform(_dataset_transform_fn)
loader = gluon.data.DataLoader(dataset, 1, num_workers=5)
for i, (x, y) in enumerate(loader):
assert x.shape[0] == 1 and x.shape[3] == 3
assert y.asscalar() == i
sys.setrecursionlimit(old_limit)
@with_seed()
def test_sampler():
seq_sampler = gluon.data.SequentialSampler(10)
assert list(seq_sampler) == list(range(10))
rand_sampler = gluon.data.RandomSampler(10)
assert sorted(list(rand_sampler)) == list(range(10))
seq_batch_keep = gluon.data.BatchSampler(seq_sampler, 3, 'keep')
assert sum(list(seq_batch_keep), []) == list(range(10))
seq_batch_discard = gluon.data.BatchSampler(seq_sampler, 3, 'discard')
assert sum(list(seq_batch_discard), []) == list(range(9))
rand_batch_keep = gluon.data.BatchSampler(rand_sampler, 3, 'keep')
assert sorted(sum(list(rand_batch_keep), [])) == list(range(10))
@with_seed()
def test_datasets():
assert len(gluon.data.vision.MNIST(root='data/mnist')) == 60000
assert len(gluon.data.vision.MNIST(root='data/mnist', train=False)) == 10000
assert len(gluon.data.vision.FashionMNIST(root='data/fashion-mnist')) == 60000
assert len(gluon.data.vision.FashionMNIST(root='data/fashion-mnist', train=False)) == 10000
assert len(gluon.data.vision.CIFAR10(root='data/cifar10')) == 50000
assert len(gluon.data.vision.CIFAR10(root='data/cifar10', train=False)) == 10000
assert len(gluon.data.vision.CIFAR100(root='data/cifar100')) == 50000
assert len(gluon.data.vision.CIFAR100(root='data/cifar100', fine_label=True)) == 50000
assert len(gluon.data.vision.CIFAR100(root='data/cifar100', train=False)) == 10000
@with_seed()
def test_image_folder_dataset():
prepare_record()
dataset = gluon.data.vision.ImageFolderDataset('data/test_images')
assert dataset.synsets == ['test_images']
assert len(dataset.items) == 16
@with_seed()
def test_list_dataset():
for num_worker in range(0, 3):
data = mx.gluon.data.DataLoader([([1,2], 0), ([3, 4], 1)], batch_size=1, num_workers=num_worker)
for d, l in data:
pass
class Dataset(gluon.data.Dataset):
def __len__(self):
return 100
def __getitem__(self, key):
return mx.nd.full((10,), key)
@with_seed()
def test_multi_worker():
data = Dataset()
loader = gluon.data.DataLoader(data, batch_size=1, num_workers=5)
for i, batch in enumerate(loader):
assert (batch.asnumpy() == i).all()
class _Dummy(Dataset):
"""Dummy dataset for randomized shape arrays."""
def __init__(self, random_shape):
self.random_shape = random_shape
def __getitem__(self, idx):
key = idx
if self.random_shape:
out = np.random.uniform(size=(random.randint(1000, 1100), 40))
labels = np.random.uniform(size=(random.randint(10, 15)))
else:
out = np.random.uniform(size=(1000, 40))
labels = np.random.uniform(size=(10))
return key, out, labels
def __len__(self):
return 50
def _batchify_list(data):
"""
return list of ndarray without stack/concat/pad
"""
if isinstance(data, (tuple, list)):
return list(data)
if isinstance(data, mx.nd.NDArray):
return [data]
return data
def _batchify(data):
"""
Collate data into batch. Use shared memory for stacking.
:param data: a list of array, with layout of 'NTC'.
:return either x and x's unpadded lengths, or x, x's unpadded lengths, y and y's unpadded lengths
if labels are not supplied.
"""
# input layout is NTC
keys, inputs, labels = [item[0] for item in data], [item[1] for item in data], \
[item[2] for item in data]
if len(data) > 1:
max_data_len = max([seq.shape[0] for seq in inputs])
max_labels_len = 0 if not labels else max([seq.shape[0] for seq in labels])
else:
max_data_len = inputs[0].shape[0]
max_labels_len = 0 if not labels else labels[0].shape[0]
x_lens = [item.shape[0] for item in inputs]
y_lens = [item.shape[0] for item in labels]
for i, seq in enumerate(inputs):
pad_len = max_data_len - seq.shape[0]
inputs[i] = np.pad(seq, ((0, pad_len), (0, 0)), 'constant', constant_values=0)
labels[i] = np.pad(labels[i], (0, max_labels_len - labels[i].shape[0]),
'constant', constant_values=-1)
inputs = np.asarray(inputs, dtype=np.float32)
if labels is not None:
labels = np.asarray(labels, dtype=np.float32)
inputs = inputs.transpose((1, 0, 2))
labels = labels.transpose((1, 0))
return (nd.array(inputs, dtype=inputs.dtype, ctx=context.Context('cpu_shared', 0)),
nd.array(x_lens, ctx=context.Context('cpu_shared', 0))) \
if labels is None else (
nd.array(inputs, dtype=inputs.dtype, ctx=context.Context('cpu_shared', 0)),
nd.array(x_lens, ctx=context.Context('cpu_shared', 0)),
nd.array(labels, dtype=labels.dtype, ctx=context.Context('cpu_shared', 0)),
nd.array(y_lens, ctx=context.Context('cpu_shared', 0)))
@with_seed()
def test_multi_worker_forked_data_loader():
data = _Dummy(False)
loader = DataLoader(data, batch_size=40, batchify_fn=_batchify, num_workers=2)
for epoch in range(1):
for i, data in enumerate(loader):
pass
data = _Dummy(True)
loader = DataLoader(data, batch_size=40, batchify_fn=_batchify_list, num_workers=2)
for epoch in range(1):
for i, data in enumerate(loader):
pass
if __name__ == '__main__':
import nose
nose.runmodule()
| {
"content_hash": "a2f492b7db2b0b531c2727d2c35e4a07",
"timestamp": "",
"source": "github",
"line_count": 233,
"max_line_length": 104,
"avg_line_length": 37.63519313304721,
"alnum_prop": 0.6391834872847532,
"repo_name": "mlperf/training_results_v0.6",
"id": "d48937c7259cce3327d917cc7f719cf9b33adcaa",
"size": "9555",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Fujitsu/benchmarks/resnet/implementations/mxnet/tests/python/unittest/test_gluon_data.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "ANTLR",
"bytes": "1731"
},
{
"name": "Batchfile",
"bytes": "13941"
},
{
"name": "C",
"bytes": "208630"
},
{
"name": "C++",
"bytes": "10999411"
},
{
"name": "CMake",
"bytes": "129712"
},
{
"name": "CSS",
"bytes": "64767"
},
{
"name": "Clojure",
"bytes": "396764"
},
{
"name": "Cuda",
"bytes": "2272433"
},
{
"name": "Dockerfile",
"bytes": "67820"
},
{
"name": "Groovy",
"bytes": "62557"
},
{
"name": "HTML",
"bytes": "19753082"
},
{
"name": "Java",
"bytes": "166294"
},
{
"name": "JavaScript",
"bytes": "71846"
},
{
"name": "Julia",
"bytes": "408765"
},
{
"name": "Jupyter Notebook",
"bytes": "2713169"
},
{
"name": "Lua",
"bytes": "4430"
},
{
"name": "MATLAB",
"bytes": "34903"
},
{
"name": "Makefile",
"bytes": "115694"
},
{
"name": "Perl",
"bytes": "1535873"
},
{
"name": "Perl 6",
"bytes": "7280"
},
{
"name": "PowerShell",
"bytes": "6150"
},
{
"name": "Python",
"bytes": "24905683"
},
{
"name": "R",
"bytes": "351865"
},
{
"name": "Roff",
"bytes": "293052"
},
{
"name": "Scala",
"bytes": "1189019"
},
{
"name": "Shell",
"bytes": "794096"
},
{
"name": "Smalltalk",
"bytes": "3497"
},
{
"name": "TypeScript",
"bytes": "361164"
}
],
"symlink_target": ""
} |
import os
import requests # pip install requests
# Please NOTE: In this sample we're assuming Cloud Api Server is hosted at "https://localhost".
# If it's not then please replace this with with your hosting url.
# Base URL for PDF.co Web API requests
BASE_URL = "https://localhost"
# Direct URL of source PDF file.
SourceFileUrl = "https://bytescout-com.s3.amazonaws.com/files/demo-files/cloud-api/pdf-edit/sample.pdf"
#Comma-separated list of page indices (or ranges) to process. Leave empty for all pages. Example: '0,2-5,7-'.
Pages = ""
# PDF document password. Leave empty for unprotected documents.
Password = ""
# Destination PDF file name
DestinationFile = ".//result.pdf"
# Image params
Type = "image"
X = 400
Y = 20
Width = 119
Height = 32
ImageUrl = "https://bytescout-com.s3.amazonaws.com/files/demo-files/cloud-api/pdf-edit/logo.png"
def main(args = None):
addImageToExistingPdf(DestinationFile)
def addImageToExistingPdf(destinationFile):
"""Add image using PDF.co Web API"""
# Prepare URL for 'PDF Edit' API request
url = "{}/pdf/edit/add?name={}&password={}&pages={}&url={}&type={}&x={}&y={}&width={}&height={}&urlimage={}".format(
BASE_URL,
os.path.basename(destinationFile),
Password,
Pages,
SourceFileUrl,
Type,
X,
Y,
Width,
Height,
ImageUrl
)
# Execute request and get response as JSON
response = requests.get(url, headers={ "content-type": "application/octet-stream" })
if (response.status_code == 200):
json = response.json()
if json["error"] == False:
# Get URL of result file
resultFileUrl = json["url"]
# Download result file
r = requests.get(resultFileUrl, stream=True)
if (r.status_code == 200):
with open(destinationFile, 'wb') as file:
for chunk in r:
file.write(chunk)
print(f"Result file saved as \"{destinationFile}\" file.")
else:
print(f"Request error: {response.status_code} {response.reason}")
else:
# Show service reported error
print(json["message"])
else:
print(f"Request error: {response.status_code} {response.reason}")
if __name__ == '__main__':
main() | {
"content_hash": "5e0f4afa8d3943b8ddecefa102403cbc",
"timestamp": "",
"source": "github",
"line_count": 76,
"max_line_length": 120,
"avg_line_length": 31.17105263157895,
"alnum_prop": 0.6099620092866188,
"repo_name": "bytescout/ByteScout-SDK-SourceCode",
"id": "63bf4ddc1f9107ea110d0e8987db6f1e3f62165b",
"size": "2369",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Cloud API Server/Add Text And Images To PDF/Python/Add Images to Existing PDF/AddImagesToExistingPDF.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "ASP.NET",
"bytes": "364116"
},
{
"name": "Apex",
"bytes": "243500"
},
{
"name": "Batchfile",
"bytes": "151832"
},
{
"name": "C",
"bytes": "224568"
},
{
"name": "C#",
"bytes": "12909855"
},
{
"name": "C++",
"bytes": "440474"
},
{
"name": "CSS",
"bytes": "56817"
},
{
"name": "Classic ASP",
"bytes": "46655"
},
{
"name": "Dockerfile",
"bytes": "776"
},
{
"name": "Gherkin",
"bytes": "3386"
},
{
"name": "HTML",
"bytes": "17276296"
},
{
"name": "Java",
"bytes": "1483408"
},
{
"name": "JavaScript",
"bytes": "3033610"
},
{
"name": "PHP",
"bytes": "838746"
},
{
"name": "Pascal",
"bytes": "398090"
},
{
"name": "PowerShell",
"bytes": "715204"
},
{
"name": "Python",
"bytes": "703542"
},
{
"name": "QMake",
"bytes": "880"
},
{
"name": "TSQL",
"bytes": "3080"
},
{
"name": "VBA",
"bytes": "383773"
},
{
"name": "VBScript",
"bytes": "1504410"
},
{
"name": "Visual Basic .NET",
"bytes": "9489450"
}
],
"symlink_target": ""
} |
import fileinput, string, sys
# This script takes a gnuplot data file and extracts data of only a specified index of each block.
# The result is written to the standard output and can be used as an input to gnuplot.
try:
if(len(sys.argv) < 3) :
print "Usage: python extract.py <filename> index=<node index> for extracting indices"
print "or python extract.py <filename> x=<position> for extracting positions along y-axis"
print "or python extract.py <filename> y=<position> for extracting positions along x-axis"
print "or python extract.py <filename> x=<position> y=<position> for extracting single positions"
filename = sys.argv[1]
index = 0
useXPosition = False
useYPosition = False
for i in range(2,len(sys.argv)) :
#print "i=", i
str = sys.argv[i]
#print str
arg2 = str.split("=")
if arg2[0] == "index" :
index = arg2[1]
#print "Searching for index ", index
elif arg2[0] == "x" :
useXPosition = True
xPosition = float(arg2[1])
#print "Searching for x position ", xPosition
elif arg2[0] == "y" :
useYPosition = True
yPosition = float(arg2[1])
#print "Searching for y position ", yPosition
else :
print "Usage: python extract.py <filename> index=<node index> for extracting indices"
print "or python extract.py <filename> x=<position> for extracting positions along y-axis"
print "or python extract.py <filename> y=<position> for extracting positions along x-axis"
print "or python extract.py <filename> x=<position> y=<position> for extracting single positions"
exit
#print "useXPosition: ", useXPosition
#print "useYPosition: ", useYPosition
cols = ""
time = 0.0
nTimeSteps = 0
indexCounter = 0
currentXPosition = 0.0
currentYPosition = 0.0
lastXPosition = 0.0
lastYPosition = 0.0
foundLastY = False
f = open(filename, 'r')
for line in f :
if '#' in line :
indexCounter = 0
nTimeSteps += 1
splitted = line.split()
if(len(splitted) > 1) :
time = [2]
if(not(useXPosition and useYPosition)) :
if(nTimeSteps != 1) :
print "\n"
print line,
elif ((not useXPosition) and (not useYPosition)) and (line != "\n") :
if indexCounter == int(index) :
cols = [time]
cols.extend(line.split())
for col in cols :
print col,
print ""
indexCounter += 1
elif (useXPosition or useYPosition) and (line != "\n") :
# Initialize flags for successful search
if (useXPosition) :
foundXPosition = False
else :
foundXPosition = True
if (useYPosition) :
foundYPosition = False
else:
foundYPosition = True
lastXPosition = currentXPosition
currentXPosition = float(line.split()[0])
temp = lastYPosition
lastYPosition = currentYPosition
currentYPosition = float(line.split()[1])
# same y values in this and previous line => restore y value of the preceding block
if(currentYPosition == lastYPosition) :
lastYPosition = temp
if useXPosition :
# Reset x position when entering a new column
if(lastXPosition > currentXPosition) :
lastXPosition = 0.0;
#print lastXPosition, " | ", xPosition, " | ", currentXPosition
if (currentXPosition == xPosition) or (lastXPosition < xPosition and currentXPosition > xPosition):
foundXPosition = True
if useYPosition :
if (currentYPosition == yPosition) or (lastYPosition < yPosition and currentYPosition > yPosition):
foundYPosition = True
#print "lastYPosition = ", lastYPosition
#print "currentYPosition = ", currentYPosition
if (currentXPosition == 0 and foundLastY and not(useXPosition and useYPosition)) :
print ""
foundLastY = True
else :
foundLastY = False
# Print lines containing at desired positions
if(foundXPosition and foundYPosition) :
#cols = [time]
#cols.extend(line.split())
cols = line.split()
k = -1
# Print time instead of position
if (useXPosition and useYPosition) :
print time,
for col in cols :
k = k+1
if (useXPosition and k == 0) :
continue
if (useYPosition and k == 1) :
continue
print col,
print ""
indexCounter += 1
else :
if line == "\n" :
#print "empty line"
indexCounter -= 1
else :
print "Error!"
exit(1)
# Close input file
f.close()
except IOError as e:
# Disbaled because of annoying error messages when calling this script from gnuplot
#print "I/O error({0}): {1}".format(e.errno, e.strerror)
f.close()
| {
"content_hash": "4883f5533a0e1c9523aa3b6a4082adad",
"timestamp": "",
"source": "github",
"line_count": 146,
"max_line_length": 104,
"avg_line_length": 30.705479452054796,
"alnum_prop": 0.6625027883113986,
"repo_name": "pederpansen/dune-ax1",
"id": "c4d3b5b038f62f952111f8d32ba9ae281b03caad",
"size": "4502",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "results/extract.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C++",
"bytes": "3871834"
},
{
"name": "Dockerfile",
"bytes": "628"
},
{
"name": "Gnuplot",
"bytes": "6741"
},
{
"name": "Jupyter Notebook",
"bytes": "43790"
},
{
"name": "M",
"bytes": "307"
},
{
"name": "M4",
"bytes": "3155"
},
{
"name": "Makefile",
"bytes": "98350"
},
{
"name": "Python",
"bytes": "8501"
},
{
"name": "Roff",
"bytes": "1673"
},
{
"name": "Shell",
"bytes": "16072"
}
],
"symlink_target": ""
} |
import unittest
import mock
import numpy
import chainer
from chainer import cuda
from chainer import functions
from chainer import gradient_check
from chainer import testing
from chainer.testing import attr
from chainer.testing import condition
@testing.parameterize(*testing.product({
'shape': [None, (2, 3), (2, 3, 4), (2, 3, 4, 5)],
'dtype': [numpy.float16, numpy.float32, numpy.float64],
}))
class TestSoftmax(unittest.TestCase):
def setUp(self):
if self.shape is None:
# For checking numerical stability
value = -5 if self.dtype == numpy.float16 else -1000
self.x = numpy.array([[value, 1]], dtype=self.dtype)
else:
self.x = numpy.random.uniform(-1, 1, self.shape).astype(self.dtype)
self.gy = numpy.random.uniform(-1, 1, self.x.shape).astype(self.dtype)
self.check_forward_options = {}
self.check_backward_options = {'eps': 1e-2}
if self.dtype == numpy.float16:
self.check_forward_options = {'atol': 1e-3, 'rtol': 1e-2}
self.check_backward_options = {
'eps': 2.0 ** -4, 'atol': 5e-3, 'rtol': 5e-2}
def check_forward(self, x_data, use_cudnn=True):
x = chainer.Variable(x_data)
y = functions.softmax(x, use_cudnn)
self.assertEqual(y.data.dtype, self.dtype)
y_expect = numpy.exp(self.x)
y_roll = numpy.rollaxis(y_expect, 1, y_expect.ndim)
for i in numpy.ndindex(y_roll.shape[:-1]):
y_roll[i] /= y_roll[i].sum()
gradient_check.assert_allclose(
y_expect, y.data, **self.check_forward_options)
@condition.retry(3)
def test_forward_cpu(self):
self.check_forward(self.x)
@attr.cudnn
@condition.retry(3)
def test_forward_gpu(self):
self.check_forward(cuda.to_gpu(self.x))
@attr.gpu
@condition.retry(3)
def test_forward_gpu_no_cudnn(self):
self.check_forward(cuda.to_gpu(self.x), False)
def check_backward(self, x_data, gy_data, use_cudnn=True):
gradient_check.check_backward(
functions.Softmax(use_cudnn), x_data, gy_data,
**self.check_backward_options)
@condition.retry(10)
def test_backward_cpu(self):
self.check_backward(self.x, self.gy)
@attr.cudnn
@condition.retry(10)
def test_backward_gpu(self):
self.check_backward(cuda.to_gpu(self.x), cuda.to_gpu(self.gy))
@attr.gpu
@condition.retry(10)
def test_backward_gpu_no_cudnn(self):
self.check_backward(cuda.to_gpu(self.x), cuda.to_gpu(self.gy), False)
@testing.parameterize(*testing.product({
'use_cudnn': [True, False],
'dtype': [numpy.float16, numpy.float32, numpy.float64],
}))
@attr.cudnn
class TestSoftmaxCudnnCall(unittest.TestCase):
def setUp(self):
self.x = cuda.cupy.random.uniform(-1, 1, (2, 3)).astype(self.dtype)
self.gy = cuda.cupy.random.uniform(-1, 1, (2, 3)).astype(self.dtype)
self.expect = self.use_cudnn and (
cuda.cudnn.cudnn.getVersion() >= 3000 or
self.dtype != numpy.float16)
def forward(self):
x = chainer.Variable(self.x)
return functions.softmax(x, use_cudnn=self.use_cudnn)
def test_call_cudnn_forward(self):
with mock.patch('cupy.cudnn.cudnn.softmaxForward') as func:
self.forward()
self.assertEqual(func.called, self.expect)
def test_call_cudnn_backward(self):
y = self.forward()
y.grad = self.gy
with mock.patch('cupy.cudnn.cudnn.softmaxBackward') as func:
y.backward()
self.assertEqual(func.called, self.expect)
testing.run_module(__name__, __file__)
| {
"content_hash": "0c695c1f891767587b7aacd6d59c9778",
"timestamp": "",
"source": "github",
"line_count": 115,
"max_line_length": 79,
"avg_line_length": 32.208695652173915,
"alnum_prop": 0.6201403887688985,
"repo_name": "benob/chainer",
"id": "aa3a1fe4eb72a1528ea133d3d0aeb38152edf296",
"size": "3704",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/chainer_tests/functions_tests/activation_tests/test_softmax.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "3366"
},
{
"name": "C",
"bytes": "29678"
},
{
"name": "Cuda",
"bytes": "6634"
},
{
"name": "PowerShell",
"bytes": "7195"
},
{
"name": "Python",
"bytes": "1690503"
}
],
"symlink_target": ""
} |
'''
This file contains various heuristic classes all meant to adjust the
word choice for the AI in various ways, such as from tiles selected to
the board positions
'''
import board, player, tile, aistats
# BASE CLASS
class Heuristic(object):
def __init__(self):
self.stats = aistats.AIStats()
'''
Given various data about the move being played, this determines gives an
adjustment number in terms of points for how the play should be revalued
'''
def adjust(self, trayTiles = None, seedRatio = None, playTiles = None):
return 0
#===================================================
# HEURISTIC CLASSES
#===================================================
'''
--Tile Quantile Heuristic--
The motivation behind this heuristic is that some letters are
more useful than others. Useful letters should be given a penalty,
so that their value isn't wasted on low-scoring words. Similarly,
unuseful letters should be encouraged to be discarded at higher
rates since they represent a liability in the tray.
The way this heuristic works is that it uses letterPlay data
previously collected to determine the quantile difference (defaults to .5 mass)
between the play for all letters is at that quantile, versus the
value of that particular letter.
The adjustment is then the sum of all quantile differences multiplied
by the weight of the heuristic (again, defaulting to .5)
What the algorithm means:
-Low quantiles means a more conservative adjustment (accepting a lot of luck)
-High quantiles means a more aggressive adjustment (hoping to get lucky)
-The weight impacts the strength of this heuristic relative to the raw score
and/or other heuristics
'''
class tileQuantileHeuristic(Heuristic):
def __init__(self, quantile = .5, weight = .5):
Heuristic.__init__(self)
allLetters = self.stats.letterPlaysInvCDF(None, quantile)
self.totalAdjustment = 0
self.adjustmentsMade = 0
self.letterAdjust = {}
for code in range(ord('A'), ord('Z')+1):
char = chr(code)
self.letterAdjust[char] = (allLetters - self.stats.letterPlaysInvCDF(char, quantile)) * weight
char = '_'
self.letterAdjust[char] = (allLetters - self.stats.letterPlaysInvCDF(char, quantile)) * weight
def adjust(self, trayTiles = None, seedRatio = None, playTiles = None):
adjustment = super(tileQuantileHeuristic, self).adjust(trayTiles = trayTiles, seedRatio = seedRatio, playTiles = playTiles)
if playTiles != None:
for pos, tile in playTiles:
if tile.isBlank:
letter = '_'
else:
letter = tile.letter
adjustment += self.letterAdjust[letter]
return adjustment
seedRatio = None
'''
This takes another heuristic as an initializing parameter and applies its effect ONLY if
we're not in an end-game situation. This prevents the dynamic strategy from being aggressive
when at the end of the game and it no longer makes sense to play conservatively
'''
class notEndGameHeuristic(Heuristic):
def __init__(self, h):
Heuristic.__init__(self)
self.heuristic = h
def adjust(self, trayTiles = None, seedRatio = None, playTiles = None):
adjustment = super(notEndGameHeuristic, self).adjust(trayTiles = trayTiles, seedRatio = seedRatio, playTiles = playTiles)
if len(trayTiles) == player.Player.TRAY_SIZE:
adjustment += self.heuristic.adjust(trayTiles = trayTiles, seedRatio = seedRatio, playTiles = playTiles)
return adjustment
'''
This is the opposite of notEndGame, applying the heuristic ONLY when we have limited tiles
'''
class endGameHeuristic(Heuristic):
def __init__(self, h):
Heuristic.__init__(self)
self.heuristic = h
def adjust(self, trayTiles = None, seedRatio = None, playTiles = None):
adjustment = super(endGameHeuristic, self).adjust(trayTiles = trayTiles, seedRatio = seedRatio, playTiles = playTiles)
if not len(trayTiles == player.Player.TRAY_SIZE):
adjustment += self.heuristic.adjust(trayTiles = trayTiles, seedRatio = seedRatio, playTiles = playTiles)
return adjustment
'''
This allows for multiple heuristics to be applied simultaneously, iterating through each
and summing their values
'''
class multiHeuristic(Heuristic):
def __init__(self, listOfHeuristics):
Heuristic.__init__(self)
self.heuristics = listOfHeuristics
def adjust(self, trayTiles = None, seedRatio = None, playTiles = None):
adjustment = super(multiHeuristic, self).adjust(trayTiles = trayTiles, seedRatio = seedRatio, playTiles = playTiles)
for h in self.heuristics:
adjustment += h.adjust(trayTiles = trayTiles, seedRatio = seedRatio, playTiles = playTiles)
return adjustment | {
"content_hash": "09f53774bb67a717e98674ae99879d86",
"timestamp": "",
"source": "github",
"line_count": 129,
"max_line_length": 125,
"avg_line_length": 35.83720930232558,
"alnum_prop": 0.7198788665368808,
"repo_name": "grokcore/dev.lexycross",
"id": "ec7dcd4eab8d9109f23f7404528a09ae3e14ed40",
"size": "4623",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "wordsmithed/heuristic.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "96255"
},
{
"name": "JavaScript",
"bytes": "2330612"
},
{
"name": "PHP",
"bytes": "247"
},
{
"name": "Python",
"bytes": "301153"
},
{
"name": "Shell",
"bytes": "5123"
}
],
"symlink_target": ""
} |
from collections import defaultdict, OrderedDict
class HookRegistry():
def __init__(self):
self.hooks = defaultdict(OrderedDict)
def register(self, name, func):
self.hooks[name][func] = True
def run_hook(self, name, *args, **kwargs):
return_val = None
for hook in self.hooks[name]:
result = hook(*args, **kwargs)
if result is not None:
return_val = result
return return_val
registry = HookRegistry()
| {
"content_hash": "381583b89b0db0b3b9e0e79847eae8cc",
"timestamp": "",
"source": "github",
"line_count": 20,
"max_line_length": 48,
"avg_line_length": 24.9,
"alnum_prop": 0.5943775100401606,
"repo_name": "stefanw/froide",
"id": "6b0d364e2ef16969236245ed87911c4703823005",
"size": "498",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "froide/foirequest/hooks.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "17807"
},
{
"name": "HTML",
"bytes": "161162"
},
{
"name": "Java",
"bytes": "287939"
},
{
"name": "JavaScript",
"bytes": "1325034"
},
{
"name": "Makefile",
"bytes": "329"
},
{
"name": "Python",
"bytes": "1642783"
},
{
"name": "Shell",
"bytes": "1621"
}
],
"symlink_target": ""
} |
import datetime
import sys
from cStringIO import StringIO
from urlparse import urlparse
from django.conf import settings
from django.contrib.auth import authenticate, login
from django.contrib.sessions.models import Session
from django.contrib.sessions.middleware import SessionWrapper
from django.core.handlers.base import BaseHandler
from django.core.handlers.wsgi import WSGIRequest
from django.core.signals import got_request_exception
from django.dispatch import dispatcher
from django.http import urlencode, SimpleCookie, HttpRequest
from django.test import signals
from django.utils.functional import curry
BOUNDARY = 'BoUnDaRyStRiNg'
MULTIPART_CONTENT = 'multipart/form-data; boundary=%s' % BOUNDARY
class ClientHandler(BaseHandler):
"""
A HTTP Handler that can be used for testing purposes.
Uses the WSGI interface to compose requests, but returns
the raw HttpResponse object
"""
def __call__(self, environ):
from django.conf import settings
from django.core import signals
# Set up middleware if needed. We couldn't do this earlier, because
# settings weren't available.
if self._request_middleware is None:
self.load_middleware()
dispatcher.send(signal=signals.request_started)
try:
request = WSGIRequest(environ)
response = self.get_response(request)
# Apply response middleware
for middleware_method in self._response_middleware:
response = middleware_method(request, response)
finally:
dispatcher.send(signal=signals.request_finished)
return response
def store_rendered_templates(store, signal, sender, template, context):
"A utility function for storing templates and contexts that are rendered"
store.setdefault('template',[]).append(template)
store.setdefault('context',[]).append(context)
def encode_multipart(boundary, data):
"""
A simple method for encoding multipart POST data from a dictionary of
form values.
The key will be used as the form data name; the value will be transmitted
as content. If the value is a file, the contents of the file will be sent
as an application/octet-stream; otherwise, str(value) will be sent.
"""
lines = []
for (key, value) in data.items():
if isinstance(value, file):
lines.extend([
'--' + boundary,
'Content-Disposition: form-data; name="%s"' % key,
'',
'--' + boundary,
'Content-Disposition: form-data; name="%s_file"; filename="%s"' % (key, value.name),
'Content-Type: application/octet-stream',
'',
value.read()
])
elif hasattr(value, '__iter__'):
for item in value:
lines.extend([
'--' + boundary,
'Content-Disposition: form-data; name="%s"' % key,
'',
str(item)
])
else:
lines.extend([
'--' + boundary,
'Content-Disposition: form-data; name="%s"' % key,
'',
str(value)
])
lines.extend([
'--' + boundary + '--',
'',
])
return '\r\n'.join(lines)
class Client:
"""
A class that can act as a client for testing purposes.
It allows the user to compose GET and POST requests, and
obtain the response that the server gave to those requests.
The server Response objects are annotated with the details
of the contexts and templates that were rendered during the
process of serving the request.
Client objects are stateful - they will retain cookie (and
thus session) details for the lifetime of the Client instance.
This is not intended as a replacement for Twill/Selenium or
the like - it is here to allow testing against the
contexts and templates produced by a view, rather than the
HTML rendered to the end-user.
"""
def __init__(self, **defaults):
self.handler = ClientHandler()
self.defaults = defaults
self.cookies = SimpleCookie()
self.exc_info = None
def store_exc_info(self, *args, **kwargs):
"""
Utility method that can be used to store exceptions when they are
generated by a view.
"""
self.exc_info = sys.exc_info()
def _session(self):
"Obtain the current session variables"
if 'django.contrib.sessions' in settings.INSTALLED_APPS:
cookie = self.cookies.get(settings.SESSION_COOKIE_NAME, None)
if cookie:
return SessionWrapper(cookie.value)
return {}
session = property(_session)
def request(self, **request):
"""
The master request method. Composes the environment dictionary
and passes to the handler, returning the result of the handler.
Assumes defaults for the query environment, which can be overridden
using the arguments to the request.
"""
environ = {
'HTTP_COOKIE': self.cookies,
'PATH_INFO': '/',
'QUERY_STRING': '',
'REQUEST_METHOD': 'GET',
'SCRIPT_NAME': None,
'SERVER_NAME': 'testserver',
'SERVER_PORT': 80,
'SERVER_PROTOCOL': 'HTTP/1.1',
}
environ.update(self.defaults)
environ.update(request)
# Curry a data dictionary into an instance of
# the template renderer callback function
data = {}
on_template_render = curry(store_rendered_templates, data)
dispatcher.connect(on_template_render, signal=signals.template_rendered)
# Capture exceptions created by the handler
dispatcher.connect(self.store_exc_info, signal=got_request_exception)
response = self.handler(environ)
# Add any rendered template detail to the response
# If there was only one template rendered (the most likely case),
# flatten the list to a single element
for detail in ('template', 'context'):
if data.get(detail):
if len(data[detail]) == 1:
setattr(response, detail, data[detail][0]);
else:
setattr(response, detail, data[detail])
else:
setattr(response, detail, None)
# Look for a signalled exception and reraise it
if self.exc_info:
raise self.exc_info[1], None, self.exc_info[2]
# Update persistent cookie data
if response.cookies:
self.cookies.update(response.cookies)
return response
def get(self, path, data={}, **extra):
"Request a response from the server using GET."
r = {
'CONTENT_LENGTH': None,
'CONTENT_TYPE': 'text/html; charset=utf-8',
'PATH_INFO': path,
'QUERY_STRING': urlencode(data),
'REQUEST_METHOD': 'GET',
}
r.update(extra)
return self.request(**r)
def post(self, path, data={}, content_type=MULTIPART_CONTENT, **extra):
"Request a response from the server using POST."
if content_type is MULTIPART_CONTENT:
post_data = encode_multipart(BOUNDARY, data)
else:
post_data = data
r = {
'CONTENT_LENGTH': len(post_data),
'CONTENT_TYPE': content_type,
'PATH_INFO': path,
'REQUEST_METHOD': 'POST',
'wsgi.input': StringIO(post_data),
}
r.update(extra)
return self.request(**r)
def login(self, **credentials):
"""Set the Client to appear as if it has sucessfully logged into a site.
Returns True if login is possible; False if the provided credentials
are incorrect, or if the Sessions framework is not available.
"""
user = authenticate(**credentials)
if user and 'django.contrib.sessions' in settings.INSTALLED_APPS:
obj = Session.objects.get_new_session_object()
# Create a fake request to store login details
request = HttpRequest()
request.session = SessionWrapper(obj.session_key)
login(request, user)
# Set the cookie to represent the session
self.cookies[settings.SESSION_COOKIE_NAME] = obj.session_key
self.cookies[settings.SESSION_COOKIE_NAME]['max-age'] = None
self.cookies[settings.SESSION_COOKIE_NAME]['path'] = '/'
self.cookies[settings.SESSION_COOKIE_NAME]['domain'] = settings.SESSION_COOKIE_DOMAIN
self.cookies[settings.SESSION_COOKIE_NAME]['secure'] = settings.SESSION_COOKIE_SECURE or None
self.cookies[settings.SESSION_COOKIE_NAME]['expires'] = None
# Set the session values
Session.objects.save(obj.session_key, request.session._session,
datetime.datetime.now() + datetime.timedelta(seconds=settings.SESSION_COOKIE_AGE))
return True
else:
return False
| {
"content_hash": "a81b93a52bf9110dcd93cf4fc87b24ee",
"timestamp": "",
"source": "github",
"line_count": 254,
"max_line_length": 106,
"avg_line_length": 36.66535433070866,
"alnum_prop": 0.5999140985718887,
"repo_name": "jonaustin/advisoryscan",
"id": "c3110f02eca163184ca08c4e45fa7482916596cd",
"size": "9313",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "django/django/test/client.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "63725"
},
{
"name": "JavaScript",
"bytes": "159708"
},
{
"name": "Perl",
"bytes": "89271"
},
{
"name": "Python",
"bytes": "2194026"
},
{
"name": "Shell",
"bytes": "3612"
}
],
"symlink_target": ""
} |
import collections
##__________________________________________________________________||
EventBuilderConfig = collections.namedtuple(
'EventBuilderConfig',
'base component'
)
# base is for roottree.EventBuilderConfig
##__________________________________________________________________||
| {
"content_hash": "0bdcaf36804f3ea83723569c265e2532",
"timestamp": "",
"source": "github",
"line_count": 11,
"max_line_length": 70,
"avg_line_length": 27.272727272727273,
"alnum_prop": 0.4066666666666667,
"repo_name": "TaiSakuma/AlphaTwirl",
"id": "3f114375473b8a164c0fc492aafef7d3f5431da9",
"size": "406",
"binary": false,
"copies": "1",
"ref": "refs/heads/v0.9.x",
"path": "alphatwirl/heppyresult/EventBuilderConfig.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "562011"
},
{
"name": "R",
"bytes": "1222"
}
],
"symlink_target": ""
} |
import unittest
from flask import url_for
from yapper import create_app, db
from yapper.blueprints.user.models import Role, User
from yapper.blueprints.blog.models import Post
class LoginTestCase(unittest.TestCase):
def setUp(self):
self.app = create_app('test')
self.app.config['CSRF_ENABLED'] = False
self.client = self.app.test_client()
self.app_context = self.app.app_context()
self.app_context.push()
db.create_all()
Role.insert_roles()
role = Role.query.filter_by(name='user').first()
user = User(
name='Test User',
email='[email protected]',
password='testpass',
status=True,
role=role
)
db.session.add(user)
db.session.commit()
def tearDown(self):
db.session.remove()
db.drop_all()
self.app_context.pop()
def login(self, e, p):
return self.client.post(url_for('user.login'), data=dict(
email=e,
password=p
), follow_redirects=True)
def logout(self):
return self.client.get(url_for('user.logout'), follow_redirects=True)
def test_empty_db(self):
p = Post.query.all()
assert len(p) is 0
def test_can_login(self):
rv = self.login('[email protected]', 'testpass')
assert 'Test User' in rv.data
rv = self.logout()
assert 'You have been logged out' in rv.data
rv = self.login('[email protected]', 'po')
assert 'Invalid combination' in rv.data
def test_add_post(self):
self.login('[email protected]', 'testpass')
rv = self.client.post(url_for('blog.add'), data=dict(
title='Title',
description='Desc',
body='## hello there'
), follow_redirects=True)
assert '<h2>hello there</h2>' in rv.data
rv = self.client.get(
url_for(
'blog.get_post_by_slug',
slug='some-slug-that-is-not-there'))
assert rv.status_code == 404
| {
"content_hash": "5c686ec4245d42cfc448f29df59391c8",
"timestamp": "",
"source": "github",
"line_count": 69,
"max_line_length": 77,
"avg_line_length": 29.884057971014492,
"alnum_prop": 0.5645004849660524,
"repo_name": "brijeshb42/yapper",
"id": "dd38f2eb8399be0e6290bbf4e85d04e6694a1d10",
"size": "2062",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/AppTest/test_login.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "17517"
},
{
"name": "HTML",
"bytes": "9569"
},
{
"name": "JavaScript",
"bytes": "11734"
},
{
"name": "Mako",
"bytes": "412"
},
{
"name": "Python",
"bytes": "53647"
},
{
"name": "Shell",
"bytes": "269"
}
],
"symlink_target": ""
} |
from django.contrib import admin
admin.site.site_header = 'Sysgate Admin'
admin.site.site_title = 'Sysgate'
admin.site.index_title = 'Admin'
| {
"content_hash": "7bf60a5d0ed32f263c422fb0d7ae197f",
"timestamp": "",
"source": "github",
"line_count": 5,
"max_line_length": 40,
"avg_line_length": 28.4,
"alnum_prop": 0.7605633802816901,
"repo_name": "klashxx/PyConES2017",
"id": "bbf811ae10ef899dc3a3a3a8f7f0beb02c3babb7",
"size": "142",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "web/sysgate/apps/core/admin.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "21077"
},
{
"name": "HTML",
"bytes": "19432"
},
{
"name": "Python",
"bytes": "25071"
}
],
"symlink_target": ""
} |
from collections import OrderedDict
# Code snippet from RTC-Tools, copyright Stichting Deltares, originally under the terms of the GPL
# version 3. Relicensed with permission.
class AliasRelation:
def __init__(self):
self._aliases = {}
self._canonical_variables_map = OrderedDict()
self._canonical_variables = set()
def add(self, a, b):
# Construct aliases (a set of equivalent variables)
aliases = self.aliases(a)
inverted_aliases = self.aliases(self.__toggle_sign(a))
aliases |= self.aliases(b)
inverted_aliases |= self.aliases(self.__toggle_sign(b))
for v in aliases:
self._aliases[self.__toggle_sign(v)] = inverted_aliases
self._aliases[v] = aliases
# Get the canonical names and signs
canonical_a, sign_a = self.canonical_signed(a)
canonical_b, _ = self.canonical_signed(b)
# Update _canonical_variables with new canonical var and remove old ones
self._canonical_variables.add(canonical_a)
self._canonical_variables.discard(canonical_b)
for v in aliases:
self._canonical_variables_map[v] = (canonical_a, sign_a)
self._canonical_variables_map[self.__toggle_sign(v)] = (canonical_a, -sign_a)
def __toggle_sign(self, v):
if self.__is_negative(v):
return v[1:]
else:
return '-' + v
@staticmethod
def __is_negative(v):
return True if v[0] == '-' else False
def aliases(self, a):
if a in self._aliases:
return self._aliases[a]
else:
return {a}
def canonical_signed(self, a):
if a in self._canonical_variables_map:
return self._canonical_variables_map[a]
else:
if self.__is_negative(a):
return a[1:], -1
else:
return a, 1
@property
def canonical_variables(self):
return self._canonical_variables
def __iter__(self):
# Note that order is not guaranteed, because we are looping over a set.
for canonical_variable in self._canonical_variables:
aliases = self.aliases(canonical_variable).copy()
aliases.discard(canonical_variable)
yield canonical_variable, aliases
| {
"content_hash": "3723cf1b7237dce21c4ae357f809917c",
"timestamp": "",
"source": "github",
"line_count": 70,
"max_line_length": 98,
"avg_line_length": 33.17142857142857,
"alnum_prop": 0.5968992248062015,
"repo_name": "jgoppert/pymola",
"id": "7a05e4162a06915486873dc545216716ac566b4d",
"size": "2322",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/pymoca/backends/casadi/alias_relation.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "ANTLR",
"bytes": "16090"
},
{
"name": "Jupyter Notebook",
"bytes": "221882"
},
{
"name": "Modelica",
"bytes": "11854"
},
{
"name": "Python",
"bytes": "704971"
},
{
"name": "Shell",
"bytes": "789"
}
],
"symlink_target": ""
} |
from datetime import datetime, timedelta, timezone
import json
from django.test import TestCase, Client
from django.contrib.auth.models import User
from binder import history
from binder.history import Change, Changeset
from .testapp.models import Animal, Caretaker
class HistoryTest(TestCase):
def setUp(self):
super().setUp()
u = User(username='testuser', is_active=True, is_superuser=True)
u.set_password('test')
u.save()
self.client = Client()
r = self.client.login(username='testuser', password='test')
self.assertTrue(r)
# Zoo has no history, Animal does
def test_model_without_history_does_not_create_changes_on_creation(self):
model_data = {
'name': 'Artis',
}
response = self.client.post('/zoo/', data=json.dumps(model_data), content_type='application/json')
self.assertEqual(response.status_code, 200)
self.assertEqual(0, Changeset.objects.count())
self.assertEqual(0, Change.objects.count())
def test_model_with_history_creates_changes_on_creation(self):
model_data = {
'name': 'Daffy Duck',
}
response = self.client.post('/animal/', data=json.dumps(model_data), content_type='application/json')
self.assertEqual(response.status_code, 200)
self.assertEqual(1, Changeset.objects.count())
cs = Changeset.objects.get()
self.assertEqual('testuser', cs.user.username)
self.assertAlmostEqual(datetime.now(tz=timezone.utc), cs.date, delta=timedelta(seconds=1))
self.assertEqual(6, Change.objects.count())
self.assertEqual(1, Change.objects.filter(changeset=cs, model='Animal', field='name', before='null', after='"Daffy Duck"').count())
self.assertEqual(1, Change.objects.filter(changeset=cs, model='Animal', field='id', before='null', after=Animal.objects.get().id).count())
self.assertEqual(1, Change.objects.filter(changeset=cs, model='Animal', field='caretaker', before='null', after='null').count())
self.assertEqual(1, Change.objects.filter(changeset=cs, model='Animal', field='zoo', before='null', after='null').count())
self.assertEqual(1, Change.objects.filter(changeset=cs, model='Animal', field='zoo_of_birth', before='null', after='null').count())
self.assertEqual(1, Change.objects.filter(changeset=cs, model='Animal', field='deleted', before='null', after='false').count())
def test_model_with_history_creates_changes_on_update_but_only_for_changed_fields(self):
daffy = Animal(name='Daffy Duck')
daffy.save()
# Model changes outside the HTTP API aren't recorded (should they be?)
self.assertEqual(0, Changeset.objects.count())
self.assertEqual(0, Change.objects.count())
model_data = {
'name': 'Daffy Duck',
}
response = self.client.patch('/animal/%d/' % (daffy.pk,), data=json.dumps(model_data), content_type='application/json')
self.assertEqual(response.status_code, 200)
# No actual change was done
self.assertEqual(0, Changeset.objects.count())
self.assertEqual(0, Change.objects.count())
model_data = {
'name': 'Daffy THE Duck',
}
response = self.client.patch('/animal/%d/' % (daffy.pk,), data=json.dumps(model_data), content_type='application/json')
self.assertEqual(response.status_code, 200)
self.assertEqual(1, Changeset.objects.count())
cs = Changeset.objects.get()
self.assertEqual('testuser', cs.user.username)
self.assertAlmostEqual(datetime.now(tz=timezone.utc), cs.date, delta=timedelta(seconds=1))
self.assertEqual(1, Change.objects.count())
self.assertEqual(1, Change.objects.filter(changeset=cs, model='Animal', field='name', before='"Daffy Duck"', after='"Daffy THE Duck"').count())
def test_model_with_related_history_model_creates_changes_on_the_same_changeset(self):
mickey = Caretaker(name='Mickey')
mickey.save()
pluto = Animal(name='Pluto')
pluto.save()
# Model changes outside the HTTP API aren't recorded (should they be?)
self.assertEqual(0, Changeset.objects.count())
self.assertEqual(0, Change.objects.count())
model_data = {
'data': [{
'id': pluto.id,
'name': 'Pluto the dog',
}],
'with': {
'caretaker': [{
'id': mickey.id,
'name': 'Mickey Mouse',
}],
},
}
response = self.client.put('/animal/', data=json.dumps(model_data), content_type='application/json')
self.assertEqual(response.status_code, 200)
self.assertEqual(1, Changeset.objects.count())
cs = Changeset.objects.get()
self.assertEqual('testuser', cs.user.username)
self.assertAlmostEqual(datetime.now(tz=timezone.utc), cs.date, delta=timedelta(seconds=1))
self.assertEqual(2, Change.objects.count())
self.assertEqual(1, Change.objects.filter(changeset=cs, model='Animal', field='name', before='"Pluto"', after='"Pluto the dog"').count())
self.assertEqual(1, Change.objects.filter(changeset=cs, model='Caretaker', field='name', before='"Mickey"', after='"Mickey Mouse"').count())
def test_manual_history_direct_success(self):
history.start(source='tests')
# No history yet
self.assertEqual(0, Changeset.objects.count())
self.assertEqual(0, Change.objects.count())
mickey = Caretaker(name='Mickey')
mickey.save()
# Still no history
self.assertEqual(0, Changeset.objects.count())
self.assertEqual(0, Change.objects.count())
history.commit()
# Aww yeah
self.assertEqual(1, Changeset.objects.count())
self.assertEqual(5, Change.objects.count())
def test_manual_history_direct_abort(self):
history.start(source='tests')
# No history yet
self.assertEqual(0, Changeset.objects.count())
self.assertEqual(0, Change.objects.count())
mickey = Caretaker(name='Mickey')
mickey.save()
# Still no history
self.assertEqual(0, Changeset.objects.count())
self.assertEqual(0, Change.objects.count())
history.abort()
# Aborted, so still no history
self.assertEqual(0, Changeset.objects.count())
self.assertEqual(0, Change.objects.count())
def test_manual_history_contextmanager_success(self):
with history.atomic(source='tests'):
# No history yet
self.assertEqual(0, Changeset.objects.count())
self.assertEqual(0, Change.objects.count())
mickey = Caretaker(name='Mickey')
mickey.save()
# Still no history
self.assertEqual(0, Changeset.objects.count())
self.assertEqual(0, Change.objects.count())
# Aww yeah
self.assertEqual(1, Changeset.objects.count())
self.assertEqual(5, Change.objects.count())
def test_manual_history_contextmanager_abort(self):
class TestException(Exception):
pass
try:
with history.atomic(source='tests'):
# No history yet
self.assertEqual(0, Changeset.objects.count())
self.assertEqual(0, Change.objects.count())
mickey = Caretaker(name='Mickey')
mickey.save()
# Still no history
self.assertEqual(0, Changeset.objects.count())
self.assertEqual(0, Change.objects.count())
raise TestException('oeps')
except TestException:
pass
# Aborted, so still no history
self.assertEqual(0, Changeset.objects.count())
self.assertEqual(0, Change.objects.count())
| {
"content_hash": "ce0a8f12c0013229de9df4db62b604ff",
"timestamp": "",
"source": "github",
"line_count": 216,
"max_line_length": 145,
"avg_line_length": 32.21296296296296,
"alnum_prop": 0.7099741304972693,
"repo_name": "CodeYellowBV/django-binder",
"id": "6957ace3095434f13107d02fcb4f9ccc87a4638c",
"size": "6958",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_history.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Dockerfile",
"bytes": "85"
},
{
"name": "Python",
"bytes": "578095"
}
],
"symlink_target": ""
} |
from heatclient.v1.stacks import Stack
from heatclient.v1.stacks import StackManager
from mock import MagicMock
import testscenarios
from testscenarios.scenarios import multiply_scenarios
import testtools
load_tests = testscenarios.load_tests_apply_scenarios
def mock_stack(manager, stack_name, stack_id):
return Stack(manager, {
"id": stack_id,
"stack_name": stack_name,
"links": [{
"href": "http://192.0.2.1:8004/v1/1234/stacks/%s/%s" % (
stack_name, stack_id),
"rel": "self"}],
"description": "No description",
"stack_status_reason": "Stack create completed successfully",
"creation_time": "2013-08-04T20:57:55Z",
"updated_time": "2013-08-04T20:57:55Z",
"stack_status": "CREATE_COMPLETE"
})
class StackStatusActionTest(testtools.TestCase):
scenarios = multiply_scenarios([
('CREATE', dict(action='CREATE')),
('DELETE', dict(action='DELETE')),
('UPDATE', dict(action='UPDATE')),
('ROLLBACK', dict(action='ROLLBACK')),
('SUSPEND', dict(action='SUSPEND')),
('RESUME', dict(action='RESUME'))
], [
('IN_PROGRESS', dict(status='IN_PROGRESS')),
('FAILED', dict(status='FAILED')),
('COMPLETE', dict(status='COMPLETE'))
])
def test_status_action(self):
stack_status = '%s_%s' % (self.action, self.status)
stack = mock_stack(None, 'stack_1', 'abcd1234')
stack.stack_status = stack_status
self.assertEqual(self.action, stack.action)
self.assertEqual(self.status, stack.status)
class StackIdentifierTest(testtools.TestCase):
def test_stack_identifier(self):
stack = mock_stack(None, 'the_stack', 'abcd1234')
self.assertEqual('the_stack/abcd1234', stack.identifier)
class StackOperationsTest(testtools.TestCase):
def test_delete_stack(self):
manager = MagicMock()
stack = mock_stack(manager, 'the_stack', 'abcd1234')
stack.delete()
manager.delete.assert_called_once_with('the_stack/abcd1234')
def test_get_stack(self):
manager = MagicMock()
stack = mock_stack(manager, 'the_stack', 'abcd1234')
stack.get()
manager.get.assert_called_once_with('the_stack/abcd1234')
def test_update_stack(self):
manager = MagicMock()
stack = mock_stack(manager, 'the_stack', 'abcd1234')
stack.update()
manager.update.assert_called_once_with('the_stack/abcd1234')
def test_create_stack(self):
manager = MagicMock()
stack = mock_stack(manager, 'the_stack', 'abcd1234')
stack = stack.create()
manager.create.assert_called_once_with('the_stack/abcd1234')
class StackManagerNoPaginationTest(testtools.TestCase):
scenarios = [
('total_0', dict(total=0)),
('total_1', dict(total=1)),
('total_9', dict(total=9)),
('total_10', dict(total=10)),
('total_11', dict(total=11)),
('total_19', dict(total=19)),
('total_20', dict(total=20)),
('total_21', dict(total=21)),
('total_49', dict(total=49)),
('total_50', dict(total=50)),
('total_51', dict(total=51)),
('total_95', dict(total=95)),
]
# absolute limit for results returned
limit = 50
def mock_manager(self):
manager = StackManager(None)
manager._list = MagicMock()
def mock_list(*args, **kwargs):
def results():
for i in range(0, self.total):
stack_name = 'stack_%s' % (i + 1)
stack_id = 'abcd1234-%s' % (i + 1)
yield mock_stack(manager, stack_name, stack_id)
return list(results())
manager._list.side_effect = mock_list
return manager
def test_stack_list_no_pagination(self):
manager = self.mock_manager()
results = list(manager.list(limit=self.limit))
manager._list.assert_called_once_with(
'/stacks?', 'stacks')
last_result = min(self.limit, self.total)
# paginate is not specified, so the total
# results (up to the limit) is always returned
self.assertEqual(last_result, len(results))
if last_result > 0:
self.assertEqual('stack_1', results[0].stack_name)
self.assertEqual('stack_%s' % last_result, results[-1].stack_name)
def test_stack_list_no_pagination_no_limit(self):
manager = self.mock_manager()
results = list(manager.list())
manager._list.assert_called_once_with(
'/stacks?', 'stacks')
# paginate is not specified, so the total
# results is always returned
self.assertEqual(self.total, len(results))
if self.total > 0:
self.assertEqual('stack_1', results[0].stack_name)
self.assertEqual('stack_%s' % self.total, results[-1].stack_name)
class StackManagerPaginationTest(testtools.TestCase):
scenarios = [
('0_offset_0', dict(
page_size=10,
offset=0,
total=0,
results=((0, 0),)
)),
('1_offset_0', dict(
page_size=10,
offset=0,
total=1,
results=((0, 1),)
)),
('9_offset_0', dict(
page_size=10,
offset=0,
total=9,
results=((0, 9),)
)),
('10_offset_0', dict(
page_size=10,
offset=0,
total=10,
results=((0, 10), (10, 10))
)),
('11_offset_0', dict(
page_size=10,
offset=0,
total=11,
results=((0, 10), (10, 11))
)),
('11_offset_10', dict(
page_size=10,
offset=10,
total=11,
results=((10, 11),)
)),
('19_offset_10', dict(
page_size=10,
offset=10,
total=19,
results=((10, 19),)
)),
('20_offset_10', dict(
page_size=10,
offset=10,
total=20,
results=((10, 20), (20, 20))
)),
('21_offset_10', dict(
page_size=10,
offset=10,
total=21,
results=((10, 20), (20, 21))
)),
('21_offset_0', dict(
page_size=10,
offset=0,
total=21,
results=((0, 10), (10, 20), (20, 21))
)),
('21_offset_20', dict(
page_size=10,
offset=20,
total=21,
results=((20, 21),)
)),
('95_offset_90', dict(
page_size=10,
offset=90,
total=95,
results=((90, 95),)
)),
]
# absolute limit for results returned
limit = 50
def mock_manager(self):
manager = StackManager(None)
manager._list = MagicMock()
def mock_list(arg_url, arg_response_key):
try:
result = self.results[self.result_index]
except IndexError:
return
self.result_index = self.result_index + 1
offset = result[0]
url = '/stacks?'
url += 'limit=%s' % self.page_size
if offset > 0:
url += '&marker=abcd1234-%s' % offset
self.assertEqual(url, arg_url)
def results():
for i in range(*result):
stack_name = 'stack_%s' % (i + 1)
stack_id = 'abcd1234-%s' % (i + 1)
yield mock_stack(manager, stack_name, stack_id)
return list(results())
manager._list.side_effect = mock_list
return manager
def test_stack_list_pagination(self):
manager = self.mock_manager()
list_params = {
'page_size': self.page_size,
'limit': self.limit
}
if self.offset > 0:
marker = 'abcd1234-%s' % self.offset
list_params['marker'] = marker
self.result_index = 0
results = list(manager.list(**list_params))
# assert that the list method has been called enough times
self.assertEqual(len(self.results), self.result_index)
last_result = min(self.limit, self.total - self.offset)
# one or more list calls have been recomposed into a single list
self.assertEqual(last_result, len(results))
if last_result > 0:
self.assertEqual('stack_%s' % (self.offset + 1),
results[0].stack_name)
self.assertEqual('stack_%s' % (self.offset + last_result),
results[-1].stack_name)
| {
"content_hash": "bff2bf28c95083145b0ad95be1a4cb47",
"timestamp": "",
"source": "github",
"line_count": 288,
"max_line_length": 78,
"avg_line_length": 30.46527777777778,
"alnum_prop": 0.5257579211306131,
"repo_name": "larsks/python-heatclient",
"id": "d1920742df36e4b4d54d9f2d43bad372eff9c65c",
"size": "9393",
"binary": false,
"copies": "1",
"ref": "refs/heads/bug/1258622",
"path": "heatclient/tests/test_stacks.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "190099"
},
{
"name": "Shell",
"bytes": "1123"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import
from sentry.testutils.cases import RuleTestCase
from sentry.rules.conditions.tagged_event import TaggedEventCondition, MatchType
class TaggedEventConditionTest(RuleTestCase):
rule_cls = TaggedEventCondition
def get_event(self):
event = self.event
event.data["tags"] = (
("logger", "sentry.example"),
("logger", "foo.bar"),
("notlogger", "sentry.other.example"),
("notlogger", "bar.foo.baz"),
)
return event
def test_render_label(self):
rule = self.get_rule(data={"match": MatchType.EQUAL, "key": u"\xc3", "value": u"\xc4"})
assert rule.render_label() == u"The event's tags match \xc3 equals \xc4"
def test_equals(self):
event = self.get_event()
rule = self.get_rule(
data={"match": MatchType.EQUAL, "key": "LOGGER", "value": "sentry.example"}
)
self.assertPasses(rule, event)
rule = self.get_rule(
data={"match": MatchType.EQUAL, "key": "logger", "value": "sentry.other.example"}
)
self.assertDoesNotPass(rule, event)
def test_does_not_equal(self):
event = self.get_event()
rule = self.get_rule(
data={"match": MatchType.NOT_EQUAL, "key": "logger", "value": "sentry.example"}
)
self.assertDoesNotPass(rule, event)
rule = self.get_rule(
data={"match": MatchType.NOT_EQUAL, "key": "logger", "value": "sentry.other.example"}
)
self.assertPasses(rule, event)
def test_starts_with(self):
event = self.get_event()
rule = self.get_rule(
data={"match": MatchType.STARTS_WITH, "key": "logger", "value": "sentry."}
)
self.assertPasses(rule, event)
rule = self.get_rule(
data={"match": MatchType.STARTS_WITH, "key": "logger", "value": "bar."}
)
self.assertDoesNotPass(rule, event)
def test_ends_with(self):
event = self.get_event()
rule = self.get_rule(
data={"match": MatchType.ENDS_WITH, "key": "logger", "value": ".example"}
)
self.assertPasses(rule, event)
rule = self.get_rule(data={"match": MatchType.ENDS_WITH, "key": "logger", "value": ".foo"})
self.assertDoesNotPass(rule, event)
def test_contains(self):
event = self.get_event()
rule = self.get_rule(data={"match": MatchType.CONTAINS, "key": "logger", "value": "sentry"})
self.assertPasses(rule, event)
rule = self.get_rule(
data={"match": MatchType.CONTAINS, "key": "logger", "value": "bar.foo"}
)
self.assertDoesNotPass(rule, event)
def test_does_not_contain(self):
event = self.get_event()
rule = self.get_rule(
data={"match": MatchType.NOT_CONTAINS, "key": "logger", "value": "sentry"}
)
self.assertDoesNotPass(rule, event)
rule = self.get_rule(
data={"match": MatchType.NOT_CONTAINS, "key": "logger", "value": "bar.foo"}
)
self.assertPasses(rule, event)
def test_is_set(self):
event = self.get_event()
rule = self.get_rule(data={"match": MatchType.IS_SET, "key": "logger"})
self.assertPasses(rule, event)
rule = self.get_rule(data={"match": MatchType.IS_SET, "key": "missing"})
self.assertDoesNotPass(rule, event)
def test_is_not_set(self):
event = self.get_event()
rule = self.get_rule(data={"match": MatchType.NOT_SET, "key": "logger"})
self.assertDoesNotPass(rule, event)
rule = self.get_rule(data={"match": MatchType.NOT_SET, "key": "missing"})
self.assertPasses(rule, event)
| {
"content_hash": "066d3b4ef34148b838aaaea40d0e7430",
"timestamp": "",
"source": "github",
"line_count": 107,
"max_line_length": 100,
"avg_line_length": 35.09345794392523,
"alnum_prop": 0.5762982689747004,
"repo_name": "beeftornado/sentry",
"id": "f955ea21398e06fd65f04b3b5be48f3c0b859400",
"size": "3755",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/sentry/rules/conditions/test_tagged_event.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "157195"
},
{
"name": "HTML",
"bytes": "197026"
},
{
"name": "JavaScript",
"bytes": "380379"
},
{
"name": "Makefile",
"bytes": "2832"
},
{
"name": "Python",
"bytes": "6473603"
}
],
"symlink_target": ""
} |
import itertools
import codegen_common
# ==============================================================================
# The template used to generate the basic operator tests.
op_template = '''
##############################################################################
class %(funcname)s_operator_%(typecode)s(unittest.TestCase):
"""Test for basic operator function.
"""
########################################################
def setUp(self):
"""Initialise.
"""
self.TypeCode = '%(typecode)s'
self.TestData = [97%(zeropad)s, 97%(zeropad)s, 97%(zeropad)s, 98%(zeropad)s, 99%(zeropad)s, 101%(zeropad)s, 101%(zeropad)s, 102%(zeropad)s, 95%(zeropad)s, 103%(zeropad)s]
self.zerofill = 0%(zeropad)s
self.data = array.array(self.TypeCode, self.TestData)
self.dataout = array.array(self.TypeCode, itertools.repeat(self.zerofill, len(self.data)))
# These are the compare operators to use when testing the function.
self.opvals = {
'<' : operator.lt,
'<=' : operator.le,
'==' : operator.eq,
'!=' : operator.ne,
'>=' : operator.ge,
'>' : operator.gt
}
########################################################
def %(pyfuncname)s(self, op, data, param, maxlen=0):
"""Emulate the test function.
"""
# If the maxlen parameter is used, trim the source data accordingly.
if maxlen > 0:
testdata = data[:maxlen]
else:
testdata = data
# Get the type of compare operation we want, and convert it into a
# function we can use as a predicate.
opfunc = self.opvals[op]
opval = lambda x: opfunc(x, param)
# Peform the operation.
result = list(%(pyfunccall)s(opval, testdata))
copiedlength = len(result)
# Pad out with the same fill used for the output array, and return
# the number of items copied.
trimmed = result + list(itertools.repeat(self.zerofill, len(data) - len(result)))
return trimmed, copiedlength
########################################################
def test_operator_eq_01(self):
"""Test eq - Array code %(typecode)s.
"""
param = 97%(zeropad)s
result = arrayfunc.%(funcname)s('==', self.data, self.dataout, param)
expected, explength = self.%(pyfuncname)s('==', self.data, param)
# Check the test to make sure it is working as intended.
self.assertTrue((result > 0) and (result < len(self.data)))
self.assertEqual(result, explength)
for dataoutitem, expecteditem in zip(list(self.dataout), expected):
self.assertEqual(dataoutitem, expecteditem)
########################################################
def test_operator_gt_01(self):
"""Test gt - Array code %(typecode)s.
"""
param = 96%(zeropad)s
result = arrayfunc.%(funcname)s('>', self.data, self.dataout, param)
expected, explength = self.%(pyfuncname)s('>', self.data, param)
# Check the test to make sure it is working as intended.
self.assertTrue((result > 0) and (result < len(self.data)))
self.assertEqual(result, explength)
for dataoutitem, expecteditem in zip(list(self.dataout), expected):
self.assertEqual(dataoutitem, expecteditem)
########################################################
def test_operator_gte_01(self):
"""Test gte - Array code %(typecode)s.
"""
param = 96%(zeropad)s
result = arrayfunc.%(funcname)s('>=', self.data, self.dataout, param)
expected, explength = self.%(pyfuncname)s('>=', self.data, param)
# Check the test to make sure it is working as intended.
self.assertTrue((result > 0) and (result < len(self.data)))
self.assertEqual(result, explength)
for dataoutitem, expecteditem in zip(list(self.dataout), expected):
self.assertEqual(dataoutitem, expecteditem)
########################################################
def test_operator_lt_01(self):
"""Test lt - Array code %(typecode)s.
"""
param = 98%(zeropad)s
result = arrayfunc.%(funcname)s('<', self.data, self.dataout, param)
expected, explength = self.%(pyfuncname)s('<', self.data, param)
# Check the test to make sure it is working as intended.
self.assertTrue((result > 0) and (result < len(self.data)))
self.assertEqual(result, explength)
for dataoutitem, expecteditem in zip(list(self.dataout), expected):
self.assertEqual(dataoutitem, expecteditem)
########################################################
def test_operator_lte_01(self):
"""Test lte - Array code %(typecode)s.
"""
param = 98%(zeropad)s
result = arrayfunc.%(funcname)s('<=', self.data, self.dataout, param)
expected, explength = self.%(pyfuncname)s('<=', self.data, param)
# Check the test to make sure it is working as intended.
self.assertTrue((result > 0) and (result < len(self.data)))
self.assertEqual(result, explength)
for dataoutitem, expecteditem in zip(list(self.dataout), expected):
self.assertEqual(dataoutitem, expecteditem)
########################################################
def test_operator_ne_01(self):
"""Test ne - Array code %(typecode)s.
"""
param = 98%(zeropad)s
result = arrayfunc.%(funcname)s('!=', self.data, self.dataout, param)
expected, explength = self.%(pyfuncname)s('!=', self.data, param)
# Check the test to make sure it is working as intended.
self.assertTrue((result > 0) and (result < len(self.data)))
self.assertEqual(result, explength)
for dataoutitem, expecteditem in zip(list(self.dataout), expected):
self.assertEqual(dataoutitem, expecteditem)
########################################################
def test_operator_lim_01(self):
"""Test array limits - Array code %(typecode)s.
"""
param = 97%(zeropad)s
result = arrayfunc.%(funcname)s('==', self.data, self.dataout, param, maxlen=len(self.data)//2)
expected, explength = self.%(pyfuncname)s('==', self.data, param, maxlen=len(self.data)//2)
# Check the test to make sure it is working as intended.
self.assertTrue((result > 0) and (result < len(self.data)))
self.assertEqual(result, explength)
for dataoutitem, expecteditem in zip(list(self.dataout), expected):
self.assertEqual(dataoutitem, expecteditem)
########################################################
def test_operator_lim_02(self):
"""Test array limits - Array code %(typecode)s.
"""
param = 97%(zeropad)s
result = arrayfunc.%(funcname)s('==', self.data, self.dataout, param, maxlen=-1)
expected, explength = self.%(pyfuncname)s('==', self.data, param, maxlen=-1)
# Check the test to make sure it is working as intended.
self.assertTrue((result > 0) and (result < len(self.data)))
self.assertEqual(result, explength)
for dataoutitem, expecteditem in zip(list(self.dataout), expected):
self.assertEqual(dataoutitem, expecteditem)
##############################################################################
'''
# ==============================================================================
# The template used to generate the parameter tests.
param_template = '''
##############################################################################
class %(funcname)s_params_%(typecode)s(unittest.TestCase):
"""Test for basic parameter function.
"""
########################################################
def setUp(self):
"""Initialise.
"""
self.TypeCode = '%(typecode)s'
self.TestData = [97%(zeropad)s, 97%(zeropad)s, 97%(zeropad)s, 98%(zeropad)s, 99%(zeropad)s, 101%(zeropad)s, 101%(zeropad)s, 102%(zeropad)s, 102%(zeropad)s, 103%(zeropad)s]
self.zerofill = 0%(zeropad)s
self.data = array.array(self.TypeCode, self.TestData)
self.dataout = array.array(self.TypeCode, itertools.repeat(self.zerofill, len(self.data)))
# This is used in testing parameters.
self.dataempty = array.array(self.TypeCode)
########################################################
def test_param_no_params(self):
"""Test exception when no parameters passed - Array code %(typecode)s.
"""
with self.assertRaises(TypeError):
result = arrayfunc.%(funcname)s()
# Check that the exception raised corresponds to the native Python behaviour.
with self.assertRaises(TypeError):
result = %(pyfunccall)s()
########################################################
def test_param_one_params(self):
"""Test exception when one parameter passed - Array code %(typecode)s.
"""
with self.assertRaises(TypeError):
result = arrayfunc.%(funcname)s('==')
# Check that the exception raised corresponds to the native Python behaviour.
with self.assertRaises(TypeError):
result = %(pyfunccall)s(lambda x: x < 1)
########################################################
def test_param_two_params(self):
"""Test exception when two parameters passed - Array code %(typecode)s.
"""
with self.assertRaises(TypeError):
result = arrayfunc.%(funcname)s('==', self.data)
# Check that the exception raised corresponds to the native Python behaviour.
with self.assertRaises(TypeError):
result = %(pyfunccall)s(lambda x: x < 1)
########################################################
def test_param_three_params(self):
"""Test exception when three parameters passed - Array code %(typecode)s.
"""
with self.assertRaises(TypeError):
result = arrayfunc.%(funcname)s('==', self.data, self.dataout)
# Check that the exception raised corresponds to the native Python behaviour.
with self.assertRaises(TypeError):
result = %(pyfunccall)s(lambda x: x < 1)
########################################################
def test_param_six_params(self):
"""Test exception when too many (six) parameters passed - Array code %(typecode)s.
"""
param = 101%(zeropad)s
with self.assertRaises(TypeError):
result = arrayfunc.%(funcname)s('==', self.data, self.dataout, param, 3, maxlen=2)
# Check that the exception raised corresponds to the native Python behaviour.
with self.assertRaises(TypeError):
result = %(pyfunccall)s(lambda x: x < 1, [1, 2, 3, 4], 3)
########################################################
def test_param_invalid_keyword_params(self):
"""Test exception with invalid keyword parameters passed - Array code %(typecode)s.
"""
with self.assertRaises(TypeError):
result = arrayfunc.%(funcname)s('==', self.data, self.dataout, 100%(zeropad)s, xx=2)
# Check that the exception raised corresponds to the native Python behaviour.
with self.assertRaises(TypeError):
result = %(pyfunccall)s(lambda x: x < 1, [0, 1, 2, 3, 4], xx=2)
########################################################
def test_param_invalid_keyword_param_type(self):
"""Test exception with invalid keyword parameter type passed - Array code %(typecode)s.
"""
with self.assertRaises(TypeError):
result = arrayfunc.%(funcname)s('==', self.data, self.dataout, 100%(zeropad)s, maxlen='x')
# Check that the exception raised corresponds to the native Python behaviour.
with self.assertRaises(TypeError):
result = %(pyfunccall)s(lambda x: x < 1, [0, 1, 2, 3, 4], maxlen='x')
########################################################
def test_param_invalid_opcode_param_value(self):
"""Test exception with invalid first parameter value - Array code %(typecode)s.
"""
with self.assertRaises(ValueError):
result = arrayfunc.%(funcname)s('!', self.data, self.dataout, 100%(zeropad)s)
########################################################
def test_param_invalid_opcode_param_type(self):
"""Test exception with invalid first parameter type - Array code %(typecode)s.
"""
with self.assertRaises(TypeError):
result = arrayfunc.%(funcname)s(62, self.data, self.dataout, 100%(zeropad)s)
# Check that the exception raised corresponds to the native Python behaviour.
with self.assertRaises(TypeError):
result = list(%(pyfunccall)s('a', [0, 1, 2, 3, 4]))
########################################################
def test_param_invalid_input_array_param_value(self):
"""Test exception with invalid array input parameter value - Array code %(typecode)s.
"""
with self.assertRaises(TypeError):
result = arrayfunc.%(funcname)s('==', 99, self.dataout, 100%(zeropad)s)
# Check that the exception raised corresponds to the native Python behaviour.
with self.assertRaises(TypeError):
result = %(pyfunccall)s(lambda x: x < 1, 99)
########################################################
def test_param_invalid_output_array_param_value(self):
"""Test exception with invalid array output parameter type - Array code %(typecode)s.
"""
with self.assertRaises(TypeError):
result = arrayfunc.%(funcname)s('==', self.data, 99, 100%(zeropad)s)
# Check that the exception raised corresponds to the native Python behaviour.
with self.assertRaises(TypeError):
result = %(pyfunccall)s(lambda x: x < 1, 99)
########################################################
def test_param_invalid_input_array_param_length(self):
"""Test exception with empty input array parameter type - Array code %(typecode)s.
"""
with self.assertRaises(IndexError):
result = arrayfunc.%(funcname)s('==', self.dataempty, self.dataout, 100%(zeropad)s)
########################################################
def test_param_invalid_output_array_param_length(self):
"""Test exception with empty output array parameter type - Array code %(typecode)s.
"""
with self.assertRaises(IndexError):
result = arrayfunc.%(funcname)s('==', self.data, self.dataempty, 100%(zeropad)s)
########################################################
def test_param_invalid_array_param_type_01(self):
"""Test exception with invalid compare parameter type - Array code %(typecode)s.
"""
with self.assertRaises(TypeError):
result = arrayfunc.%(funcname)s('==', self.data, self.dataout, 'e')
# Check that the exception raised corresponds to the native Python behaviour.
with self.assertRaises(TypeError):
result = %(pyfunccall)s(lambda x: x < 1, 99)
########################################################
def test_param_invalid_array_param_type_02(self):
"""Test exception with invalid compare parameter type - Array code %(typecode)s.
"""
with self.assertRaises(TypeError):
result = arrayfunc.%(funcname)s('==', self.data, self.dataout, 100%(invalidzeropad)s)
# Check that the exception raised corresponds to the native Python behaviour.
with self.assertRaises(TypeError):
result = %(pyfunccall)s(lambda x: x < 1, 99)
##############################################################################
'''
# ==============================================================================
# Overflow testing. This can't be used with all types.
overflow_template = '''
##############################################################################
class %(funcname)s_paramovfl_%(typecode)s(unittest.TestCase):
"""Test for testing parameter overflow.
"""
########################################################
def setUp(self):
"""Initialise.
"""
self.TypeCode = '%(typecode)s'
self.zerofill = 0%(zeropad)s
# These values are used for testing parameter overflows.
self.dataovfl = array.array(self.TypeCode, range(97, 107))
self.dataoutovfl = array.array(self.TypeCode, itertools.repeat(self.zerofill, len(self.dataovfl)))
self.MinVal = arrayfunc.arraylimits.%(typecode)s_min
self.Maxval = arrayfunc.arraylimits.%(typecode)s_max
########################################################
def test_overflow_min(self):
"""Test parameter overflow min - Array code %(typecode)s.
"""
with self.assertRaises(OverflowError):
result = arrayfunc.%(funcname)s('==', self.dataovfl, self.dataoutovfl, self.MinVal %(overflowdec)s)
########################################################
def test_overflow_max(self):
"""Test parameter overflow max - Array code %(typecode)s.
"""
with self.assertRaises(OverflowError):
result = arrayfunc.%(funcname)s('==', self.dataovfl, self.dataoutovfl, self.Maxval %(overflowinc)s)
########################################################
def test_overflow_ok(self):
"""Test no overflow. These should not overflow - Array code %(typecode)s.
"""
result = arrayfunc.%(funcname)s('==', self.dataovfl, self.dataoutovfl, self.MinVal)
result = arrayfunc.%(funcname)s('==', self.dataovfl, self.dataoutovfl, self.Maxval)
##############################################################################
'''
# ==============================================================================
# The template used to generate the tests for nan, inf, -inf.
nonfinite_template = '''
##############################################################################
class %(funcname)s_nonfinite_%(typecode)s(unittest.TestCase):
"""Test for nan, inf, -inf.
"""
########################################################
def setUp(self):
"""Initialise.
"""
self.data = array.array('%(typecode)s', [100.0] * 10)
self.dataout = array.array('%(typecode)s', itertools.repeat(0.0, len(self.data)))
self.zerofill = 0.0
# These are the compare operators to use when testing the function.
self.opvals = {
'<' : operator.lt,
'<=' : operator.le,
'==' : operator.eq,
'!=' : operator.ne,
'>=' : operator.ge,
'>' : operator.gt
}
########################################################
def %(pyfuncname)s(self, op, data, param, maxlen=0):
"""Emulate the test function.
"""
# If the maxlen parameter is used, trim the source data accordingly.
if maxlen > 0:
testdata = data[:maxlen]
else:
testdata = data
# Get the type of compare operation we want, and convert it into a
# function we can use as a predicate.
opfunc = self.opvals[op]
opval = lambda x: opfunc(x, param)
# Peform the operation.
result = list(%(pyfunccall)s(opval, testdata))
copiedlength = len(result)
# Pad out with the same fill used for the output array, and return
# the number of items copied.
trimmed = result + list(itertools.repeat(self.zerofill, len(data) - len(result)))
return trimmed, copiedlength
########################################################
def test_nonfinite_01(self):
"""Test for param of nan - Array code %(typecode)s.
"""
param = math.nan
result = arrayfunc.%(funcname)s('==', self.data, self.dataout, param)
expected, explength = self.%(pyfuncname)s('==', self.data, param)
self.assertEqual(result, explength)
for dataoutitem, expecteditem in zip(list(self.dataout), expected):
self.assertEqual(dataoutitem, expecteditem)
########################################################
def test_nonfinite_02(self):
"""Test for param of inf - Array code %(typecode)s.
"""
param = math.inf
result = arrayfunc.%(funcname)s('==', self.data, self.dataout, param)
expected, explength = self.%(pyfuncname)s('==', self.data, param)
self.assertEqual(result, explength)
for dataoutitem, expecteditem in zip(list(self.dataout), expected):
self.assertEqual(dataoutitem, expecteditem)
########################################################
def test_nonfinite_03(self):
"""Test for param of -inf - Array code %(typecode)s.
"""
param = -math.inf
result = arrayfunc.%(funcname)s('==', self.data, self.dataout, param)
expected, explength = self.%(pyfuncname)s('==', self.data, param)
self.assertEqual(result, explength)
for dataoutitem, expecteditem in zip(list(self.dataout), expected):
self.assertEqual(dataoutitem, expecteditem)
########################################################
def test_nonfinite_04(self):
"""Test for maxlen of nan - Array code %(typecode)s.
"""
with self.assertRaises(TypeError):
result = arrayfunc.%(funcname)s('==', self.data, self.dataout, 100.0, maxlen=math.nan)
########################################################
def test_nonfinite_05(self):
"""Test for maxlen of inf - Array code %(typecode)s.
"""
with self.assertRaises(TypeError):
result = arrayfunc.%(funcname)s('==', self.data, self.dataout, 100.0, maxlen=math.inf)
########################################################
def test_nonfinite_06(self):
"""Test for maxlen of -inf - Array code %(typecode)s.
"""
with self.assertRaises(TypeError):
result = arrayfunc.%(funcname)s('==', self.data, self.dataout, 100.0, maxlen=-math.inf)
##############################################################################
'''
# ==============================================================================
# ==============================================================================
# The names of the functions which implement the equivalent Python operations
# for test purposes.
pyfuncname = {'afilter' : 'PyFilter',
'dropwhile' : 'DropWhile',
'takewhile' : 'TakeWhile'
}
# The Python native version function call.
pyfunccall = {'afilter' : 'filter',
'dropwhile' : 'itertools.dropwhile',
'takewhile' : 'itertools.takewhile'
}
# ==============================================================================
# This defines the module name.
modulename = 'arrayfunc'
# Import the array module for testing.
arrayimport = 'import array'
# The functions which are implemented by this program.
completefuncnames = ('afilter', 'dropwhile', 'takewhile')
# ==============================================================================
# Output the functions which implement the individual non-SIMD
# implementation functions.
for funcname in completefuncnames:
filenamebase = 'test_' + funcname
filename = filenamebase + '.py'
headerdate = codegen_common.FormatHeaderData(filenamebase, '18-Jun-2014', funcname)
# Add additional header data.
headerdate['modulename'] = modulename
headerdate['arrayimport'] = arrayimport
with open(filename, 'w') as f:
# The copyright header.
f.write(codegen_common.HeaderTemplate % headerdate)
#####
# Basic tests.
# Check each array type.
for arraycode in codegen_common.arraycodes:
if arraycode in codegen_common.floatarrays:
zeropad = '.0'
invalidzeropad = ''
overflowinc = '* 1.1'
overflowdec = '* 1.1'
else:
zeropad = ''
invalidzeropad = '.5'
overflowinc = '+ 1'
overflowdec = '- 1'
opdata = {'funcname' : funcname,
'typecode' : arraycode,
'zeropad' : zeropad,
'invalidzeropad' : invalidzeropad,
'pyfuncname' : pyfuncname[funcname],
'overflowinc' : overflowinc,
'overflowdec' : overflowdec,
'pyfunccall' : pyfunccall[funcname]
}
f.write(op_template % opdata)
f.write(param_template % opdata)
# Parameter overflow tests do not work with all array types.
if arraycode not in ('L', 'Q', 'd'):
f.write(overflow_template % opdata)
# Non finite tests for parameter tests for floating point arrays.
for arraycode in codegen_common.floatarrays:
opdata = {'funcname' : funcname,
'pyfuncname' : pyfuncname[funcname],
'typecode' : arraycode,
'pyfunccall' : pyfunccall[funcname]
}
f.write(nonfinite_template % opdata)
#####
# The code which initiates the unit test.
f.write(codegen_common.testendtemplate % {'funcname' : funcname, 'testprefix' : 'af'})
# ==============================================================================
| {
"content_hash": "82c433680df8e17a429cac42011bed88",
"timestamp": "",
"source": "github",
"line_count": 671,
"max_line_length": 173,
"avg_line_length": 34.32637853949329,
"alnum_prop": 0.582989623583554,
"repo_name": "m1griffin/arrayfunc",
"id": "711ae8765dead38e3e378a5cebdfa995168a15ff",
"size": "24159",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "codegen/droptakefilter_testgen.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "3084"
},
{
"name": "C",
"bytes": "6063930"
},
{
"name": "Python",
"bytes": "42119174"
},
{
"name": "Shell",
"bytes": "4004"
}
],
"symlink_target": ""
} |
import re
from setuptools import setup, find_packages
# import multiprocessing to avoid this bug (http://bugs.python.org/issue15881#msg170215_
import multiprocessing
assert multiprocessing
def get_version():
"""
Extracts the version number from the version.py file.
"""
VERSION_FILE = 'entity/version.py'
mo = re.search(r'^__version__ = [\'"]([^\'"]*)[\'"]', open(VERSION_FILE, 'rt').read(), re.M)
if mo:
return mo.group(1)
else:
raise RuntimeError('Unable to find version string in {0}.'.format(VERSION_FILE))
def get_lines(file_path):
return open(file_path, 'r').read().split('\n')
install_requires = get_lines('requirements/requirements.txt')
tests_require = get_lines('requirements/requirements-testing.txt')
setup(
name='django-entity',
version=get_version(),
description='Entity relationship management for Django',
long_description=open('README.md').read(),
long_description_content_type='text/markdown',
url='http://github.com/ambitioninc/django-entity/',
author='Wes Kendall',
author_email='[email protected]',
packages=find_packages(),
classifiers=[
'Programming Language :: Python',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Framework :: Django',
],
install_requires=install_requires,
tests_require=tests_require,
extras_require={'dev': tests_require},
test_suite='run_tests.run',
include_package_data=True,
zip_safe=False,
)
| {
"content_hash": "f70c7a4bcd5c4a68c6d63ae9aa48afb6",
"timestamp": "",
"source": "github",
"line_count": 55,
"max_line_length": 96,
"avg_line_length": 31.436363636363637,
"alnum_prop": 0.6541353383458647,
"repo_name": "ambitioninc/django-entity",
"id": "5a067ea3e719fe1a9e42f634289ad705f24f827c",
"size": "1729",
"binary": false,
"copies": "2",
"ref": "refs/heads/develop",
"path": "setup.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "170632"
}
],
"symlink_target": ""
} |
from . import db
class ActivitySkill(db.Model):
item_id = db.Column(db.Integer, db.ForeignKey('item.id'), primary_key=True)
activity = db.Column(db.Integer, primary_key=True, autoincrement=False)
skill_id = db.Column(db.Integer, db.ForeignKey('item.id'), primary_key=True)
level = db.Column(db.Integer, nullable=True)
| {
"content_hash": "b20be2d7c399be82f563f0d6d61da8ed",
"timestamp": "",
"source": "github",
"line_count": 9,
"max_line_length": 80,
"avg_line_length": 37.44444444444444,
"alnum_prop": 0.7091988130563798,
"repo_name": "Kyria/LazyBlacksmith",
"id": "1d9f73c9232219ca453f6b73a542a2e37b588227",
"size": "363",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "lazyblacksmith/models/sde/activityskill.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Dockerfile",
"bytes": "2005"
},
{
"name": "HTML",
"bytes": "219613"
},
{
"name": "JavaScript",
"bytes": "402713"
},
{
"name": "Mako",
"bytes": "557"
},
{
"name": "Python",
"bytes": "192854"
},
{
"name": "SCSS",
"bytes": "226990"
},
{
"name": "Shell",
"bytes": "1707"
}
],
"symlink_target": ""
} |
import rest_framework_jwt.views
from django.conf.urls import url, include
urlpatterns = [
# login for browsable API
url(r"^api-auth/", include("rest_framework.urls")),
# token auth
url(r"^token-auth/$", rest_framework_jwt.views.obtain_jwt_token),
url(r"^token-refresh/$", rest_framework_jwt.views.refresh_jwt_token),
url(r"^token-verify/$", rest_framework_jwt.views.verify_jwt_token),
]
| {
"content_hash": "eb56a6f25838bfb7637e46335a0b08f2",
"timestamp": "",
"source": "github",
"line_count": 11,
"max_line_length": 73,
"avg_line_length": 37.45454545454545,
"alnum_prop": 0.6941747572815534,
"repo_name": "geometalab/osmaxx",
"id": "45f770b06b8bb7da1da3170cd501512e6e8d1b31",
"size": "412",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "osmaxx/rest_api/urls.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "21768"
},
{
"name": "Dockerfile",
"bytes": "7740"
},
{
"name": "HTML",
"bytes": "280740"
},
{
"name": "JavaScript",
"bytes": "264630"
},
{
"name": "Jinja",
"bytes": "6869531"
},
{
"name": "Lua",
"bytes": "5473"
},
{
"name": "Makefile",
"bytes": "4873"
},
{
"name": "NSIS",
"bytes": "5370"
},
{
"name": "Python",
"bytes": "544979"
},
{
"name": "Roff",
"bytes": "1233"
},
{
"name": "Shell",
"bytes": "9501"
}
],
"symlink_target": ""
} |
"""ML-ENSEMBLE
author: Sebastian Flennerhag
:copyright: 2017-2018
:licence: MIT
"""
from __future__ import division
import os
import numpy as np
import sysconfig
import subprocess
from mlens import config
from mlens.utils import utils
from mlens.utils.exceptions import ParallelProcessingError
from time import sleep
try:
from time import perf_counter as time
except ImportError:
from time import time
try:
from contextlib import redirect_stdout, redirect_stderr
except ImportError:
from mlens.externals.fixes import redirect as redirect_stdout
redirect_stderr = redirect_stdout
try:
import psutil
except ImportError:
psutil = None
__version__ = sysconfig.get_python_version()
# An object to pickle
d = {'entry1': 'test', 'entry2': 'also_test'}
class Logger(object):
"""Temporary class redirect print messages to a python object."""
def __init__(self):
self.log = []
def write(self, msg):
"""Write a printed message to log"""
self.log.append(msg)
def test_print_time():
"""[Utils] print_time: messages looks as expected."""
logger = Logger()
# Initiate a time interval
t0 = time()
sleep(1.3)
# Record recorded print_time message
utils.print_time(t0, message='test', file=logger)
assert logger.log[0][:15] == 'test | 00:00:01'
def test_safe_print():
"""[Utils] safe_print: prints correctly."""
l = Logger()
utils.safe_print('test', file=l)
assert l.log[0] == 'test'
def test_safe_print_string():
"""[Utils] safe_print: accepts flush and stream name as string."""
with open(os.devnull, 'w') as f, redirect_stdout(f):
utils.safe_print('test', flush=True, file="stdout")
def test_recorder():
"""[Utils] _recorder: test subprocess recording function."""
if psutil is not None and not __version__.startswith('2.'):
l = Logger()
pid = os.getpid()
with redirect_stdout(l):
utils._recorder(pid, 0.2, 0.1)
entries = ''.join(l.log).split('\n')
if entries[-1] == '':
entries = entries[:-1]
assert len(entries) == 2
assert len(entries[0].split(',')) == 3
def test_cm():
"""[Utils] CMLog: test logging."""
if psutil is not None and not __version__.startswith('2.') :
cm = utils.CMLog(verbose=True)
with open(os.devnull, 'w') as f, redirect_stdout(f):
cm.monitor(0.3)
while not hasattr(cm, 'cpu'):
sleep(0.3)
cm.collect()
assert len(cm.cpu) == 3
assert len(cm.rss) == 3
assert len(cm.vms) == 3
# Check that it overwrites
with open(os.devnull, 'w') as f, redirect_stdout(f):
cm.monitor(0.2)
while not hasattr(cm, 'cpu'):
sleep(0.2)
cm.collect()
assert len(cm.cpu) == 2
assert len(cm.rss) == 2
assert len(cm.vms) == 2
def test_cm_exception():
"""[Utils] CMLog: test collecting un-monitored returns None."""
if psutil is not None and not __version__.startswith('2.'):
cm = utils.CMLog(verbose=False)
with open(os.devnull, 'w') as f, redirect_stdout(f):
out = cm.collect()
assert out is None
def test_pickle():
"""[Utils] Check that pickling a standard object works."""
utils.pickle_save(d, 'd')
test = utils.pickle_load('d')
subprocess.check_call(['rm', 'd.pkl'])
assert isinstance(d, dict)
assert test['entry1'] == 'test'
assert test['entry2'] == 'also_test'
def test_load():
"""[Utils] Check that load handles exceptions gracefully"""
config.set_ivals(0.1, 0.1)
with open(os.devnull, 'w') as f, redirect_stderr(f):
np.testing.assert_raises(
ParallelProcessingError,
utils.load, os.path.join(os.getcwd(), 'nonexist'))
| {
"content_hash": "f51361593e299240c6e0b95c696a7cc8",
"timestamp": "",
"source": "github",
"line_count": 154,
"max_line_length": 70,
"avg_line_length": 25.175324675324674,
"alnum_prop": 0.6022697962342017,
"repo_name": "flennerhag/mlens",
"id": "b35d78a8ba25a9cd043a3b68df081e30f126762c",
"size": "3877",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "mlens/utils/tests/test_utils.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "933041"
},
{
"name": "Shell",
"bytes": "238"
}
],
"symlink_target": ""
} |
import argparse
import sys
from typing import Callable, List, Optional
import torch
from fairseq import utils
from fairseq.data.indexed_dataset import get_available_dataset_impl
def get_preprocessing_parser(default_task="translation"):
parser = get_parser("Preprocessing", default_task)
add_preprocess_args(parser)
return parser
def get_training_parser(default_task="translation"):
parser = get_parser("Trainer", default_task)
add_dataset_args(parser, train=True)
add_distributed_training_args(parser)
add_model_args(parser)
add_optimization_args(parser)
add_checkpoint_args(parser)
add_pruning_args(parser)
return parser
def get_generation_parser(interactive=False, default_task="translation"):
parser = get_parser("Generation", default_task)
add_dataset_args(parser, gen=True)
add_distributed_training_args(parser, default_world_size=1)
add_generation_args(parser)
if interactive:
add_interactive_args(parser)
return parser
def get_interactive_generation_parser(default_task="translation"):
return get_generation_parser(interactive=True, default_task=default_task)
def get_eval_lm_parser(default_task="language_modeling"):
parser = get_parser("Evaluate Language Model", default_task)
add_dataset_args(parser, gen=True)
add_distributed_training_args(parser, default_world_size=1)
add_eval_lm_args(parser)
return parser
def get_validation_parser(default_task=None):
parser = get_parser("Validation", default_task)
add_dataset_args(parser, train=True)
add_distributed_training_args(parser, default_world_size=1)
group = parser.add_argument_group("Evaluation")
add_common_eval_args(group)
return parser
def csv_str_list(x):
return x.split(',')
def eval_str_list(x, type=float):
if x is None:
return None
if isinstance(x, str):
x = eval(x)
try:
return list(map(type, x))
except TypeError:
return [type(x)]
def eval_str_dict(x, type=dict):
if x is None:
return None
if isinstance(x, str):
x = eval(x)
return x
def eval_bool(x, default=False):
if x is None:
return default
try:
return bool(eval(x))
except TypeError:
return default
def parse_args_and_arch(
parser: argparse.ArgumentParser,
input_args: List[str] = None,
parse_known: bool = False,
suppress_defaults: bool = False,
modify_parser: Optional[Callable[[argparse.ArgumentParser], None]] = None,
):
"""
Args:
parser (ArgumentParser): the parser
input_args (List[str]): strings to parse, defaults to sys.argv
parse_known (bool): only parse known arguments, similar to
`ArgumentParser.parse_known_args`
suppress_defaults (bool): parse while ignoring all default values
modify_parser (Optional[Callable[[ArgumentParser], None]]):
function to modify the parser, e.g., to set default values
"""
if suppress_defaults:
# Parse args without any default values. This requires us to parse
# twice, once to identify all the necessary task/model args, and a second
# time with all defaults set to None.
args = parse_args_and_arch(
parser,
input_args=input_args,
parse_known=parse_known,
suppress_defaults=False,
)
suppressed_parser = argparse.ArgumentParser(add_help=False, parents=[parser])
suppressed_parser.set_defaults(**{k: None for k, v in vars(args).items()})
args = suppressed_parser.parse_args(input_args)
return argparse.Namespace(
**{k: v for k, v in vars(args).items() if v is not None}
)
from fairseq.models import ARCH_MODEL_REGISTRY, ARCH_CONFIG_REGISTRY
# Before creating the true parser, we need to import optional user module
# in order to eagerly import custom tasks, optimizers, architectures, etc.
usr_parser = argparse.ArgumentParser(add_help=False, allow_abbrev=False)
usr_parser.add_argument("--user-dir", default=None)
usr_args, _ = usr_parser.parse_known_args(input_args)
utils.import_user_module(usr_args)
if modify_parser is not None:
modify_parser(parser)
# The parser doesn't know about model/criterion/optimizer-specific args, so
# we parse twice. First we parse the model/criterion/optimizer, then we
# parse a second time after adding the *-specific arguments.
# If input_args is given, we will parse those args instead of sys.argv.
args, _ = parser.parse_known_args(input_args)
# Add model-specific args to parser.
if hasattr(args, "arch"):
model_specific_group = parser.add_argument_group(
"Model-specific configuration",
# Only include attributes which are explicitly given as command-line
# arguments or which have default values.
argument_default=argparse.SUPPRESS,
)
ARCH_MODEL_REGISTRY[args.arch].add_args(model_specific_group)
# Add *-specific args to parser.
from fairseq.registry import REGISTRIES
for registry_name, REGISTRY in REGISTRIES.items():
choice = getattr(args, registry_name, None)
if choice is not None:
cls = REGISTRY["registry"][choice]
if hasattr(cls, "add_args"):
cls.add_args(parser)
if hasattr(args, "task"):
from fairseq.tasks import TASK_REGISTRY
TASK_REGISTRY[args.task].add_args(parser)
if getattr(args, "use_bmuf", False):
# hack to support extra args for block distributed data parallelism
from fairseq.optim.bmuf import FairseqBMUF
FairseqBMUF.add_args(parser)
# Modify the parser a second time, since defaults may have been reset
if modify_parser is not None:
modify_parser(parser)
# Parse a second time.
if parse_known:
args, extra = parser.parse_known_args(input_args)
else:
args = parser.parse_args(input_args)
extra = None
# Post-process args.
if hasattr(args, "max_sentences_valid") and args.max_sentences_valid is None:
args.max_sentences_valid = args.max_sentences
if hasattr(args, "max_tokens_valid") and args.max_tokens_valid is None:
args.max_tokens_valid = args.max_tokens
if getattr(args, "memory_efficient_fp16", False):
args.fp16 = True
if getattr(args, "memory_efficient_bf16", False):
args.bf16 = True
args.tpu = getattr(args, "tpu", False)
args.bf16 = getattr(args, "bf16", False)
if args.bf16:
args.tpu = True
if args.tpu and args.fp16:
raise ValueError("Cannot combine --fp16 and --tpu, use --bf16 on TPUs")
if getattr(args, "seed", None) is None:
args.seed = 1 # default seed for training
args.no_seed_provided = True
else:
args.no_seed_provided = False
# Apply architecture configuration.
if hasattr(args, "arch"):
ARCH_CONFIG_REGISTRY[args.arch](args)
if parse_known:
return args, extra
else:
return args
def get_parser(desc, default_task="translation"):
# Before creating the true parser, we need to import optional user module
# in order to eagerly import custom tasks, optimizers, architectures, etc.
usr_parser = argparse.ArgumentParser(add_help=False, allow_abbrev=False)
usr_parser.add_argument("--user-dir", default=None)
usr_args, _ = usr_parser.parse_known_args()
utils.import_user_module(usr_args)
parser = argparse.ArgumentParser(allow_abbrev=False)
# fmt: off
parser.add_argument('--no-progress-bar', action='store_true', help='disable progress bar')
parser.add_argument('--log-interval', type=int, default=50, metavar='N',
help='log progress every N batches (when progress bar is disabled)')
parser.add_argument('--log-format', default=None, help='log format to use',
choices=['json', 'none', 'simple', 'tqdm'])
parser.add_argument('--tensorboard-logdir', metavar='DIR', default='',
help='path to save logs for tensorboard, should match --logdir '
'of running tensorboard (default: no tensorboard logging)')
parser.add_argument('--seed', default=None, type=int, metavar='N',
help='pseudo random number generator seed')
parser.add_argument('--cpu', action='store_true', help='use CPU instead of CUDA')
parser.add_argument('--tpu', action='store_true', help='use TPU instead of CUDA')
parser.add_argument('--bf16', action='store_true', help='use bfloat16; implies --tpu')
parser.add_argument('--fp16', action='store_true', help='use FP16')
parser.add_argument('--memory-efficient-bf16', action='store_true',
help='use a memory-efficient version of BF16 training; implies --bf16')
parser.add_argument('--memory-efficient-fp16', action='store_true',
help='use a memory-efficient version of FP16 training; implies --fp16')
parser.add_argument('--fp16-no-flatten-grads', action='store_true',
help='don\'t flatten FP16 grads tensor')
parser.add_argument('--fp16-init-scale', default=2 ** 7, type=int,
help='default FP16 loss scale')
parser.add_argument('--fp16-scale-window', type=int,
help='number of updates before increasing loss scale')
parser.add_argument('--fp16-scale-tolerance', default=0.0, type=float,
help='pct of updates that can overflow before decreasing the loss scale')
parser.add_argument('--min-loss-scale', default=1e-4, type=float, metavar='D',
help='minimum FP16 loss scale, after which training is stopped')
parser.add_argument('--threshold-loss-scale', type=float,
help='threshold FP16 loss scale from below')
parser.add_argument('--user-dir', default=None,
help='path to a python module containing custom extensions (tasks and/or architectures)')
parser.add_argument('--empty-cache-freq', default=0, type=int,
help='how often to clear the PyTorch CUDA cache (0 to disable)')
parser.add_argument('--all-gather-list-size', default=16384, type=int,
help='number of bytes reserved for gathering stats from workers')
parser.add_argument('--model-parallel-size', type=int, metavar='N',
default=1,
help='total number of GPUs to parallelize model over')
parser.add_argument('--checkpoint-suffix', default='',
help='suffix to add to the checkpoint file name')
parser.add_argument('--quantization-config-path', default=None,
help='path to quantization config file')
parser.add_argument('--profile', action='store_true', help='enable autograd profiler emit_nvtx')
from fairseq.registry import REGISTRIES
for registry_name, REGISTRY in REGISTRIES.items():
parser.add_argument(
'--' + registry_name.replace('_', '-'),
default=REGISTRY['default'],
choices=REGISTRY['registry'].keys(),
)
# Task definitions can be found under fairseq/tasks/
from fairseq.tasks import TASK_REGISTRY
parser.add_argument('--task', metavar='TASK', default=default_task,
choices=TASK_REGISTRY.keys(),
help='task')
# fmt: on
return parser
def add_preprocess_args(parser):
group = parser.add_argument_group("Preprocessing")
# fmt: off
group.add_argument("-s", "--source-lang", default=None, metavar="SRC",
help="source language")
group.add_argument("-t", "--target-lang", default=None, metavar="TARGET",
help="target language")
group.add_argument("--trainpref", metavar="FP", default=None,
help="train file prefix")
group.add_argument("--validpref", metavar="FP", default=None,
help="comma separated, valid file prefixes")
group.add_argument("--testpref", metavar="FP", default=None,
help="comma separated, test file prefixes")
group.add_argument("--align-suffix", metavar="FP", default=None,
help="alignment file suffix")
group.add_argument("--destdir", metavar="DIR", default="data-bin",
help="destination dir")
group.add_argument("--thresholdtgt", metavar="N", default=0, type=int,
help="map words appearing less than threshold times to unknown")
group.add_argument("--thresholdsrc", metavar="N", default=0, type=int,
help="map words appearing less than threshold times to unknown")
group.add_argument("--tgtdict", metavar="FP",
help="reuse given target dictionary")
group.add_argument("--srcdict", metavar="FP",
help="reuse given source dictionary")
group.add_argument("--nwordstgt", metavar="N", default=-1, type=int,
help="number of target words to retain")
group.add_argument("--nwordssrc", metavar="N", default=-1, type=int,
help="number of source words to retain")
group.add_argument("--alignfile", metavar="ALIGN", default=None,
help="an alignment file (optional)")
parser.add_argument('--dataset-impl', metavar='FORMAT', default='mmap',
choices=get_available_dataset_impl(),
help='output dataset implementation')
group.add_argument("--joined-dictionary", action="store_true",
help="Generate joined dictionary")
group.add_argument("--only-source", action="store_true",
help="Only process the source language")
group.add_argument("--padding-factor", metavar="N", default=8, type=int,
help="Pad dictionary size to be multiple of N")
group.add_argument("--workers", metavar="N", default=1, type=int,
help="number of parallel workers")
# fmt: on
return parser
def add_dataset_args(parser, train=False, gen=False):
group = parser.add_argument_group("Dataset and data loading")
# fmt: off
group.add_argument('--num-workers', default=1, type=int, metavar='N',
help='how many subprocesses to use for data loading')
group.add_argument('--skip-invalid-size-inputs-valid-test', action='store_true',
help='ignore too long or too short lines in valid and test set')
group.add_argument('--max-tokens', type=int, metavar='N',
help='maximum number of tokens in a batch')
group.add_argument('--max-sentences', '--batch-size', type=int, metavar='N',
help='maximum number of sentences in a batch')
group.add_argument('--required-batch-size-multiple', default=8, type=int, metavar='N',
help='batch size will either be less than this value, '
'or a multiple of this value')
parser.add_argument('--dataset-impl', metavar='FORMAT',
choices=get_available_dataset_impl(),
help='output dataset implementation')
group.add_argument('--data-buffer-size', default=10, type=int, metavar='N',
help='number of batches to preload')
if train:
group.add_argument('--train-subset', default='train', metavar='SPLIT',
help='data subset to use for training (e.g. train, valid, test)')
group.add_argument('--valid-subset', default='valid', metavar='SPLIT',
help='comma separated list of data subsets to use for validation'
' (e.g. train, valid, test)')
group.add_argument('--validate-interval', type=int, default=1, metavar='N',
help='validate every N epochs')
group.add_argument('--fixed-validation-seed', default=None, type=int, metavar='N',
help='specified random seed for validation')
group.add_argument('--disable-validation', action='store_true',
help='disable validation')
group.add_argument('--max-tokens-valid', type=int, metavar='N',
help='maximum number of tokens in a validation batch'
' (defaults to --max-tokens)')
group.add_argument('--max-sentences-valid', type=int, metavar='N',
help='maximum number of sentences in a validation batch'
' (defaults to --max-sentences)')
group.add_argument('--curriculum', default=0, type=int, metavar='N',
help='don\'t shuffle batches for first N epochs')
if gen:
group.add_argument('--gen-subset', default='test', metavar='SPLIT',
help='data subset to generate (train, valid, test)')
group.add_argument('--num-shards', default=1, type=int, metavar='N',
help='shard generation over N shards')
group.add_argument('--shard-id', default=0, type=int, metavar='ID',
help='id of the shard to generate (id < num_shards)')
# fmt: on
return group
def add_distributed_training_args(parser, default_world_size=None):
group = parser.add_argument_group("Distributed training")
# fmt: off
if default_world_size is None:
default_world_size = max(1, torch.cuda.device_count())
group.add_argument('--distributed-world-size', type=int, metavar='N',
default=default_world_size,
help='total number of GPUs across all nodes (default: all visible GPUs)')
group.add_argument('--distributed-rank', default=0, type=int,
help='rank of the current worker')
group.add_argument('--distributed-backend', default='nccl', type=str,
help='distributed backend')
group.add_argument('--distributed-init-method', default=None, type=str,
help='typically tcp://hostname:port that will be used to '
'establish initial connetion')
group.add_argument('--distributed-port', default=-1, type=int,
help='port number (not required if using --distributed-init-method)')
group.add_argument('--device-id', '--local_rank', default=0, type=int,
help='which GPU to use (usually configured automatically)')
group.add_argument('--distributed-no-spawn', action='store_true',
help='do not spawn multiple processes even if multiple GPUs are visible')
# "c10d" is PyTorch's DDP implementation and provides the fastest
# training. "no_c10d" is a more robust, but slightly slower DDP
# implementation. Try this if you get warning messages about
# inconsistent gradients between workers, or if some of your model
# parameters are not always used.
group.add_argument('--ddp-backend', default='c10d', type=str,
choices=['c10d', 'no_c10d'],
help='DistributedDataParallel backend')
group.add_argument('--bucket-cap-mb', default=25, type=int, metavar='MB',
help='bucket size for reduction')
group.add_argument('--fix-batches-to-gpus', action='store_true',
help='don\'t shuffle batches between GPUs; this reduces overall '
'randomness and may affect precision but avoids the cost of '
're-reading the data')
group.add_argument('--find-unused-parameters', default=False, action='store_true',
help='disable unused parameter detection (not applicable to '
'no_c10d ddp-backend')
group.add_argument('--fast-stat-sync', default=False, action='store_true',
help='[deprecated] this is now defined per Criterion')
group.add_argument('--broadcast-buffers', default=False, action='store_true',
help='Copy non-trainable parameters between GPUs, such as '
'batchnorm population statistics')
group.add_argument('--distributed-wrapper', default='DDP', type=str,
choices=['DDP', 'SlowMo'],
help='DistributedDataParallel backend')
# Add arguments for SlowMo - these will be used when SlowMo is enabled via above
group.add_argument('--slowmo-momentum', default=None, type=float,
help='SlowMo momentum term; by default use 0.0 for 16 GPUs, '
'0.2 for 32 GPUs; 0.5 for 64 GPUs, 0.6 for > 64 GPUs')
group.add_argument('--slowmo-algorithm', default='LocalSGD', choices=['LocalSGD', 'SGP'],
help='whether to use LocalSGD or SGP')
group.add_argument('--localsgd-frequency', default=3, type=int,
help='Local SGD allreduce frequency')
group.add_argument('--nprocs-per-node', type=int, metavar='N',
default=max(1, torch.cuda.device_count()),
help='number of GPUs in each node. An allreduce operation across GPUs in '
'a node is very fast. Hence, we do allreduce across GPUs in a node, '
'and gossip across different nodes')
# fmt: on
return group
def add_optimization_args(parser):
group = parser.add_argument_group("Optimization")
# fmt: off
group.add_argument('--max-epoch', '--me', default=0, type=int, metavar='N',
help='force stop training at specified epoch')
group.add_argument('--max-update', '--mu', default=0, type=int, metavar='N',
help='force stop training at specified update')
group.add_argument('--stop-time-hours', default=0, type=float, metavar='N',
help='force stop training after specified cumulative time (if >0)')
group.add_argument('--clip-norm', default=0.0, type=float, metavar='NORM',
help='clip threshold of gradients')
group.add_argument('--sentence-avg', action='store_true',
help='normalize gradients by the number of sentences in a batch'
' (default is to normalize by number of tokens)')
group.add_argument('--update-freq', default='1', metavar='N1,N2,...,N_K',
type=lambda uf: eval_str_list(uf, type=int),
help='update parameters every N_i batches, when in epoch i')
group.add_argument('--lr', '--learning-rate', default='0.25', type=eval_str_list,
metavar='LR_1,LR_2,...,LR_N',
help='learning rate for the first N epochs; all epochs >N using LR_N'
' (note: this may be interpreted differently depending on --lr-scheduler)')
group.add_argument('--min-lr', default=-1, type=float, metavar='LR',
help='stop training when the learning rate reaches this minimum')
group.add_argument('--use-bmuf', default=False, action='store_true',
help='specify global optimizer for syncing models on different GPUs/shards')
# fmt: on
return group
def add_checkpoint_args(parser):
group = parser.add_argument_group("Checkpointing")
# fmt: off
group.add_argument('--save-dir', metavar='DIR', default='checkpoints',
help='path to save checkpoints')
group.add_argument('--restore-file', default='checkpoint_last.pt',
help='filename from which to load checkpoint '
'(default: <save-dir>/checkpoint_last.pt')
group.add_argument('--reset-dataloader', action='store_true',
help='if set, does not reload dataloader state from the checkpoint')
group.add_argument('--reset-lr-scheduler', action='store_true',
help='if set, does not load lr scheduler state from the checkpoint')
group.add_argument('--reset-meters', action='store_true',
help='if set, does not load meters from the checkpoint')
group.add_argument('--reset-optimizer', action='store_true',
help='if set, does not load optimizer state from the checkpoint')
group.add_argument('--optimizer-overrides', default="{}", type=str, metavar='DICT',
help='a dictionary used to override optimizer args when loading a checkpoint')
group.add_argument('--save-interval', type=int, default=1, metavar='N',
help='save a checkpoint every N epochs')
group.add_argument('--save-interval-updates', type=int, default=0, metavar='N',
help='save a checkpoint (and validate) every N updates')
group.add_argument('--keep-interval-updates', type=int, default=-1, metavar='N',
help='keep the last N checkpoints saved with --save-interval-updates')
group.add_argument('--keep-last-epochs', type=int, default=-1, metavar='N',
help='keep last N epoch checkpoints')
group.add_argument('--keep-best-checkpoints', type=int, default=-1, metavar='N',
help='keep best N checkpoints based on scores')
group.add_argument('--no-save', action='store_true',
help='don\'t save models or checkpoints')
group.add_argument('--no-epoch-checkpoints', action='store_true',
help='only store last and best checkpoints')
group.add_argument('--no-last-checkpoints', action='store_true',
help='don\'t store last checkpoints')
group.add_argument('--no-save-optimizer-state', action='store_true',
help='don\'t save optimizer-state as part of checkpoint')
group.add_argument('--best-checkpoint-metric', type=str, default='loss',
help='metric to use for saving "best" checkpoints')
group.add_argument('--maximize-best-checkpoint-metric', action='store_true',
help='select the largest metric value for saving "best" checkpoints')
group.add_argument('--patience', type=int, default=-1, metavar='N',
help=('early stop training if valid performance doesn\'t '
'improve for N consecutive validation runs; note '
'that this is influenced by --validate-interval'))
# fmt: on
return group
def add_common_eval_args(group):
# fmt: off
group.add_argument('--path', metavar='FILE',
help='path(s) to model file(s), colon separated')
group.add_argument('--remove-bpe', nargs='?', const='@@ ', default=None,
help='remove BPE tokens before scoring (can be set to sentencepiece)')
group.add_argument('--quiet', action='store_true',
help='only print final scores')
group.add_argument('--model-overrides', default="{}", type=str, metavar='DICT',
help='a dictionary used to override model args at generation '
'that were used during model training')
group.add_argument('--results-path', metavar='RESDIR', type=str, default=None,
help='path to save eval results (optional)"')
# fmt: on
def add_eval_lm_args(parser):
group = parser.add_argument_group("LM Evaluation")
add_common_eval_args(group)
# fmt: off
group.add_argument('--output-word-probs', action='store_true',
help='if set, outputs words and their predicted log probabilities to standard output')
group.add_argument('--output-word-stats', action='store_true',
help='if set, outputs word statistics such as word count, average probability, etc')
group.add_argument('--context-window', default=0, type=int, metavar='N',
help='ensures that every evaluated token has access to a context of at least this size,'
' if possible')
group.add_argument('--softmax-batch', default=sys.maxsize, type=int, metavar='N',
help='if BxT is more than this, will batch the softmax over vocab to this amount of tokens'
' in order to fit into GPU memory')
# fmt: on
def add_generation_args(parser):
group = parser.add_argument_group("Generation")
add_common_eval_args(group)
# fmt: off
group.add_argument('--beam', default=5, type=int, metavar='N',
help='beam size')
group.add_argument('--nbest', default=1, type=int, metavar='N',
help='number of hypotheses to output')
group.add_argument('--max-len-a', default=0, type=float, metavar='N',
help=('generate sequences of maximum length ax + b, '
'where x is the source length'))
group.add_argument('--max-len-b', default=200, type=int, metavar='N',
help=('generate sequences of maximum length ax + b, '
'where x is the source length'))
group.add_argument('--min-len', default=1, type=float, metavar='N',
help=('minimum generation length'))
group.add_argument('--match-source-len', default=False, action='store_true',
help=('generations should match the source length'))
group.add_argument('--no-early-stop', action='store_true',
help='deprecated')
group.add_argument('--unnormalized', action='store_true',
help='compare unnormalized hypothesis scores')
group.add_argument('--no-beamable-mm', action='store_true',
help='don\'t use BeamableMM in attention layers')
group.add_argument('--lenpen', default=1, type=float,
help='length penalty: <1.0 favors shorter, >1.0 favors longer sentences')
group.add_argument('--unkpen', default=0, type=float,
help='unknown word penalty: <0 produces more unks, >0 produces fewer')
group.add_argument('--replace-unk', nargs='?', const=True, default=None,
help='perform unknown replacement (optionally with alignment dictionary)')
group.add_argument('--sacrebleu', action='store_true',
help='score with sacrebleu')
group.add_argument('--score-reference', action='store_true',
help='just score the reference translation')
group.add_argument('--prefix-size', default=0, type=int, metavar='PS',
help='initialize generation by target prefix of given length')
group.add_argument('--no-repeat-ngram-size', default=0, type=int, metavar='N',
help='ngram blocking such that this size ngram cannot be repeated in the generation')
group.add_argument('--sampling', action='store_true',
help='sample hypotheses instead of using beam search')
group.add_argument('--sampling-topk', default=-1, type=int, metavar='PS',
help='sample from top K likely next words instead of all words')
group.add_argument('--sampling-topp', default=-1.0, type=float, metavar='PS',
help='sample from the smallest set whose cumulative probability mass exceeds p for next words')
group.add_argument('--temperature', default=1., type=float, metavar='N',
help='temperature for generation')
group.add_argument('--diverse-beam-groups', default=-1, type=int, metavar='N',
help='number of groups for Diverse Beam Search')
group.add_argument('--diverse-beam-strength', default=0.5, type=float, metavar='N',
help='strength of diversity penalty for Diverse Beam Search')
group.add_argument('--diversity-rate', default=-1.0, type=float, metavar='N',
help='strength of diversity penalty for Diverse Siblings Search')
group.add_argument('--print-alignment', action='store_true',
help='if set, uses attention feedback to compute and print alignment to source tokens')
group.add_argument('--print-step', action='store_true')
# arguments for iterative refinement generator
group.add_argument('--iter-decode-eos-penalty', default=0.0, type=float, metavar='N',
help='if > 0.0, it penalized early-stopping in decoding.')
group.add_argument('--iter-decode-max-iter', default=10, type=int, metavar='N',
help='maximum iterations for iterative refinement.')
group.add_argument('--iter-decode-force-max-iter', action='store_true',
help='if set, run exact the maximum number of iterations without early stop')
group.add_argument('--iter-decode-with-beam', default=1, type=int, metavar='N',
help='if > 1, model will generate translations varying by the lengths.')
group.add_argument('--iter-decode-with-external-reranker', action='store_true',
help='if set, the last checkpoint are assumed to be a reranker to rescore the translations'),
group.add_argument('--retain-iter-history', action='store_true',
help='if set, decoding returns the whole history of iterative refinement')
group.add_argument('--retain-dropout', action='store_true',
help='Use dropout at inference time')
group.add_argument('--retain-dropout-modules', default=None, nargs='+', type=str,
help='if set, only retain dropout for the specified modules; '
'if not set, then dropout will be retained for all modules')
# special decoding format for advanced decoding.
group.add_argument('--decoding-format', default=None, type=str, choices=['unigram', 'ensemble', 'vote', 'dp', 'bs'])
# fmt: on
return group
def add_interactive_args(parser):
group = parser.add_argument_group("Interactive")
# fmt: off
group.add_argument('--buffer-size', default=0, type=int, metavar='N',
help='read this many sentences into a buffer before processing them')
group.add_argument('--input', default='-', type=str, metavar='FILE',
help='file to read from; use - for stdin')
# fmt: on
def add_model_args(parser):
group = parser.add_argument_group("Model configuration")
# fmt: off
# Model definitions can be found under fairseq/models/
#
# The model architecture can be specified in several ways.
# In increasing order of priority:
# 1) model defaults (lowest priority)
# 2) --arch argument
# 3) --encoder/decoder-* arguments (highest priority)
from fairseq.models import ARCH_MODEL_REGISTRY
group.add_argument('--arch', '-a', default='fconv', metavar='ARCH',
choices=ARCH_MODEL_REGISTRY.keys(),
help='Model Architecture')
# fmt: on
return group
def add_pruning_args(parser):
group = parser.add_argument_group("Pruning configuration")
group.add_argument('--target_sparsity', type=float, help='Target sparsity')
group.add_argument('--pruning_interval', type=int, help='Pruning interval')
group.add_argument('--num_pruning_steps', type=int, help='Number of pruning steps')
group.add_argument('--prune_start_step', type=int, help='Training step to start pruning')
group.add_argument('--prune_type', choices=["random", "magnitude"], help='Type of pruning')
group.add_argument('--prune_embedding', default=False, type=bool, help='Include embedding for pruning')
| {
"content_hash": "45acdbf05a203061c3ee81c6d3cf3131",
"timestamp": "",
"source": "github",
"line_count": 673,
"max_line_length": 120,
"avg_line_length": 53.49034175334324,
"alnum_prop": 0.6140170560293341,
"repo_name": "hfp/libxsmm",
"id": "23bb005dc47d37d041b3ee40f3b30c305dd806f8",
"size": "36177",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "samples/deeplearning/sparse_training/fairseq/fairseq/options.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "3115"
},
{
"name": "C",
"bytes": "8335143"
},
{
"name": "C++",
"bytes": "84416"
},
{
"name": "CSS",
"bytes": "242"
},
{
"name": "Fortran",
"bytes": "102021"
},
{
"name": "HTML",
"bytes": "390"
},
{
"name": "JavaScript",
"bytes": "1062"
},
{
"name": "Makefile",
"bytes": "158870"
},
{
"name": "Python",
"bytes": "36612"
},
{
"name": "Shell",
"bytes": "84205"
},
{
"name": "Starlark",
"bytes": "882"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import, unicode_literals
# Formulários
from itertools import izip
from gaebusiness.business import CommandSequential, CommandParallel
from gaebusiness.gaeutil import SaveCommand, ModelSearchCommand
from gaeforms.ndb.form import ModelForm
from gaegraph.business_base import DeleteArcs, DeleteNode, NodeSearch, UpdateNode, CreateSingleOriginArc, \
SingleOriginSearch
from livro_app.livro_model import Livro, AutorArco
class LivroForm(ModelForm):
_model_class = Livro
_include = [Livro.titulo, Livro.preco, Livro.lancamento]
# Comandos
class ApagarAutorArcoCmd(DeleteArcs):
arc_class = AutorArco
def __init__(self, livro):
super(ApagarAutorArcoCmd, self).__init__(destination=livro)
class _ApagarLIvroCmd(DeleteNode):
_model_class = Livro
class ApagarLivroCmd(CommandParallel):
def __init__(self, livro):
delete_cmd = _ApagarLIvroCmd(livro)
apagar_autor_arco_cmd = ApagarAutorArcoCmd(livro)
super(ApagarLivroCmd, self).__init__(delete_cmd, apagar_autor_arco_cmd)
class BuscarLivroPorIdCmd(NodeSearch):
_model_class = Livro
class SalvarLivroCmd(SaveCommand):
_model_form_class = LivroForm
class AtualizarLivroCmd(UpdateNode):
_model_form_class = LivroForm
class SalvarLivroComAutor(CreateSingleOriginArc):
arc_class = AutorArco
class ListarLivrosOrdenadosPorTituloCmd(ModelSearchCommand):
def __init__(self):
super(ListarLivrosOrdenadosPorTituloCmd, self).__init__(Livro.query_listar_livros_ordenados_por_titulo())
class BuscarAutor(SingleOriginSearch):
arc_class = AutorArco
class BuscarAutoresCmd(CommandParallel):
def __init__(self, *livros):
autores_cmds = [BuscarAutor(livro) for livro in livros]
super(BuscarAutoresCmd, self).__init__(*autores_cmds)
def handle_previous(self, command):
autores_cmds = [BuscarAutor(livro) for livro in command.result]
self.extend(autores_cmds)
def do_business(self):
super(BuscarAutoresCmd, self).do_business()
self.result = [cmd.result for cmd in self]
class ListarLivrosPorTituloComAutor(CommandSequential):
def __init__(self):
self.__listar_livros_cmd = ListarLivrosOrdenadosPorTituloCmd()
self.__buscar_autores_cmd = BuscarAutoresCmd()
super(ListarLivrosPorTituloComAutor, self).__init__(self.__listar_livros_cmd,
self.__buscar_autores_cmd)
def do_business(self):
super(ListarLivrosPorTituloComAutor, self).do_business()
# Iterar nos livros e acrescentar seus autores
for livro, autor in izip(self.__listar_livros_cmd.result, self.__buscar_autores_cmd.result):
livro.autor = autor
self.result = self.__listar_livros_cmd.result | {
"content_hash": "f702fcc448508508d3708614aa0411d5",
"timestamp": "",
"source": "github",
"line_count": 88,
"max_line_length": 113,
"avg_line_length": 31.96590909090909,
"alnum_prop": 0.7095627444009954,
"repo_name": "renzon/appengineepython",
"id": "cc25d6224e450ee9ffcc78369ce75dceed5213f5",
"size": "2838",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "backend/apps/livro_app/livro_commands.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "41532"
},
{
"name": "JavaScript",
"bytes": "8800"
},
{
"name": "Python",
"bytes": "114281"
},
{
"name": "Shell",
"bytes": "2039"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('profiles', '0001_initial'),
]
operations = [
migrations.AlterModelOptions(
name='profile',
options={'ordering': ['user__username']},
),
]
| {
"content_hash": "0a89536ac1376e95d7968d9c2c7b488e",
"timestamp": "",
"source": "github",
"line_count": 17,
"max_line_length": 53,
"avg_line_length": 20.352941176470587,
"alnum_prop": 0.5895953757225434,
"repo_name": "jonsimington/app",
"id": "5dbf9d6f3aff973a11a61153f00c6b418a01241a",
"size": "370",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "project/profiles/migrations/0002_auto_20150907_1943.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "1930"
},
{
"name": "HTML",
"bytes": "19763"
},
{
"name": "Makefile",
"bytes": "571"
},
{
"name": "Python",
"bytes": "41446"
}
],
"symlink_target": ""
} |
"""adding_syntax_highlighter_choice
Revision ID: 13234475ad5
Revises: 12ca4cba34c
Create Date: 2015-03-12 10:28:43.604768
"""
# revision identifiers, used by Alembic.
revision = '13234475ad5'
down_revision = '12ca4cba34c'
from alembic import op
import sqlalchemy as sa
from sqlalchemy.sql import table, column
from migrations.utils import drop_column_sqlite
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.add_column('user', sa.Column('blog_syntax_highlighter_css', sa.Enum('autumn.css', 'borland.css', 'bw.css', 'colorful.css', 'default.css', 'emacs.css', 'friendly.css', 'fruity.css', 'github.css', 'manni.css', 'monokai.css', 'murphy.css', 'native.css', 'pastie.css', 'perldoc.css', 'tango.css', 'trac.css', 'vim.css', 'vs.css', 'zenburn.css'), nullable=True))
op.execute(table('user', column('blog_syntax_highlighter_css')).update().values(
{'blog_syntax_highlighter_css': "monokai.css"})
)
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
drop_column_sqlite('user', 'blog_syntax_highlighter_css')
### end Alembic commands ###
| {
"content_hash": "ec3b07ab44486603e47302041dd56ce0",
"timestamp": "",
"source": "github",
"line_count": 31,
"max_line_length": 364,
"avg_line_length": 37.61290322580645,
"alnum_prop": 0.6895368782161235,
"repo_name": "Depado/MarkDownBlog",
"id": "bba8231e9c9edfe6a95908b08398bf5c57b8e8be",
"size": "1166",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "migrations/versions/13234475ad5_adding_syntax_highlighter_choice.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "113779"
},
{
"name": "Dockerfile",
"bytes": "359"
},
{
"name": "HTML",
"bytes": "40180"
},
{
"name": "Mako",
"bytes": "412"
},
{
"name": "Python",
"bytes": "66380"
}
],
"symlink_target": ""
} |
import logging
import datetime
import sys
#logging.basicConfig(stream=sys.stdout, level=logging.DEBUG)
#logging.basicConfig(level=logging.DEBUG)
#logging.info('Wetransfer started')
from .wetransfer import WeTransfer
| {
"content_hash": "73c2ca2a2a0bb7d9f764013172fe3bd8",
"timestamp": "",
"source": "github",
"line_count": 10,
"max_line_length": 60,
"avg_line_length": 21.9,
"alnum_prop": 0.817351598173516,
"repo_name": "predat/wetransferpy",
"id": "9317721e1f4e53a8525d602a988f7a58e59f825c",
"size": "219",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "wetransferpy/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "16637"
}
],
"symlink_target": ""
} |
"""Loss functions."""
import enum
from typing import Tuple, Mapping, Optional, Union
from flax.training import common_utils
import jax
import jax.numpy as jnp
import numpy as np
@jax.custom_vjp
def cross_entropy_with_logits(logits: jnp.ndarray, targets: jnp.ndarray,
z_loss: float) -> Tuple[jnp.ndarray, jnp.ndarray]:
"""Computes cross entropy loss with stable custom gradient.
Computes a stabilized-gradient version of:
-jnp.sum(targets * nn.log_softmax(logits), axis=-1)
If z_loss > 0, then an auxiliary loss equal to z_loss*log(z)^2
will be added to the cross entropy loss (z = softmax normalization constant).
The two uses of z_loss are:
1. To keep the logits from drifting too far from zero, which can cause
unacceptable roundoff errors in bfloat16.
2. To encourage the logits to be normalized log-probabilities.
Args:
logits: [batch, length, num_classes] float array.
targets: categorical one-hot targets [batch, length, num_classes] float
array.
z_loss: coefficient for auxilliary z-loss loss term.
Returns:
tuple with the total loss and the z_loss, both
float arrays with shape [batch, length].
"""
logits_sum = jax.scipy.special.logsumexp(logits, axis=-1, keepdims=True)
log_softmax = logits - logits_sum
loss = -jnp.sum(targets * log_softmax, axis=-1)
# Add auxilliary z-loss term.
log_z = jnp.squeeze(logits_sum, axis=-1)
total_z_loss = z_loss * jax.lax.square(log_z)
loss += total_z_loss
return loss, total_z_loss
def _cross_entropy_with_logits_fwd(
logits: jnp.ndarray,
targets: jnp.ndarray,
z_loss: float = 0.0
) -> Tuple[Tuple[jnp.ndarray, jnp.ndarray],
Tuple[jnp.ndarray, jnp.ndarray, jnp.ndarray, jnp.ndarray,
jnp.ndarray, jnp.ndarray, jnp.ndarray]]:
"""Forward-mode of `cross_entropy_with_logits`."""
max_logit = logits.max(axis=-1, keepdims=True)
shifted = logits - max_logit
exp_shifted = jnp.exp(shifted)
sum_exp = jnp.sum(exp_shifted, axis=-1, keepdims=True)
log_softmax = shifted - jnp.log(sum_exp)
loss = -jnp.sum(targets * log_softmax, axis=-1)
# Add auxilliary z-loss term.
log_z = jnp.squeeze(jnp.log(sum_exp) + max_logit, axis=-1)
total_z_loss = z_loss * jax.lax.square(log_z)
loss += total_z_loss
return (loss, total_z_loss), (logits, targets, z_loss, exp_shifted, sum_exp,
log_softmax, log_z)
def _cross_entropy_with_logits_bwd(
res: Tuple[jnp.ndarray, jnp.ndarray, jnp.ndarray, jnp.ndarray, jnp.ndarray,
jnp.ndarray, jnp.ndarray], g: Tuple[jnp.ndarray, jnp.ndarray]
) -> Tuple[jnp.ndarray, jnp.ndarray, jnp.ndarray]:
"""Backward-mode of `cross_entropy_with_logits`."""
g = g[0] # Ignore z_loss component as that is only used for logging.
logits, targets, z_loss, exp_shifted, sum_exp, log_softmax, log_z = res
# z-loss term adds the (2 * z_loss * log_z) factor.
deriv = (
jnp.expand_dims(1 + 2 * z_loss * log_z, -1) * exp_shifted / sum_exp -
targets)
g_logits = jnp.expand_dims(g, axis=-1) * deriv
g_targets = -jnp.expand_dims(g, axis=-1) * log_softmax
return (jnp.asarray(g_logits,
logits.dtype), jnp.asarray(g_targets, targets.dtype),
jnp.array(0.0)) # sets z-loss coeff gradient to 0
cross_entropy_with_logits.defvjp(_cross_entropy_with_logits_fwd,
_cross_entropy_with_logits_bwd)
def compute_weighted_cross_entropy(
logits: jnp.ndarray,
targets: jnp.ndarray,
weights: Optional[jnp.ndarray] = None,
label_smoothing: float = 0.0,
z_loss: float = 0.0,
loss_normalizing_factor: Optional[float] = None
) -> Tuple[jnp.ndarray, jnp.ndarray, jnp.ndarray]:
"""Compute weighted cross entropy and entropy for log probs and targets.
Args:
logits: [batch, length, num_classes] float array.
targets: categorical targets [batch, length] int array.
weights: None or array of shape [batch, length].
label_smoothing: label smoothing constant, used to determine the on and off
values.
z_loss: coefficient for auxiliary z-loss loss term.
loss_normalizing_factor: Constant to divide loss by. If not specified, loss
will not be normalized. Intended for backward compatibility with T5-MTF
training. Should not normally be used.
Returns:
Tuple of scalar loss, z_loss, and weight sum.
"""
if logits.ndim != targets.ndim + 1:
raise ValueError('Incorrect shapes. Got shape %s logits and %s targets' %
(str(logits.shape), str(targets.shape)))
vocab_size = logits.shape[-1]
confidence = 1.0 - label_smoothing
low_confidence = (1.0 - confidence) / (vocab_size - 1)
normalizing_constant = -(
confidence * jnp.log(confidence) +
(vocab_size - 1) * low_confidence * jnp.log(low_confidence + 1e-20))
soft_targets = common_utils.onehot(
targets, vocab_size, on_value=confidence, off_value=low_confidence)
total_loss, total_z_loss = cross_entropy_with_logits(
logits, soft_targets, z_loss=z_loss)
total_loss = total_loss - normalizing_constant
weight_sum = np.prod(targets.shape)
if weights is not None:
total_loss = total_loss * weights
total_z_loss = total_z_loss * weights
weight_sum = jnp.sum(weights)
# By default, we do not normalize loss based on anything.
# We don't normalize based on batch size because the optimizers we use are
# pretty much scale invariant, so this simplifies things.
# We don't normalize based on number of non-padding tokens in order to treat
# each token as equally important regardless of sequence length.
if loss_normalizing_factor is not None:
total_loss /= loss_normalizing_factor
total_z_loss /= loss_normalizing_factor
return jnp.sum(total_loss), jnp.sum(total_z_loss), weight_sum
@enum.unique
class SpecialLossNormalizingFactor(enum.Enum):
"""Specially calculated loss_normalizing_factors, that are not a constant.
Attributes:
NUM_REAL_TARGET_TOKENS: Whether to divide the loss by the number of real
(non-padding) tokens in the current target batch. If
'decoder_loss_weights' are specified, it will be the sum of the weights.
Otherwise it will be the number of non-zero 'decoder_target_tokens'.
NUM_TOTAL_TARGET_TOKENS: Whether to divide the loss by the total number of
target tokens, i.e., batch_size * target_seq_length (including padding).
AVERAGE_PER_SEQUENCE: This will first compute the per-sequence loss
(averaged over the number of real target tokens in the sequence), and then
compute the average of that over the sequences. This can be preferable to
NUM_REAL_TARGET_TOKENS for finetuning, because it will weigh all examples
equally, regardless of sequence length (which can be especially important
for multi-task finetuning).
"""
NUM_REAL_TARGET_TOKENS = 1
NUM_TOTAL_TARGET_TOKENS = 2
AVERAGE_PER_SEQUENCE = 3
def convert_special_loss_normalizing_factor_to_enum(
x: str) -> SpecialLossNormalizingFactor:
"""Converts stringified version of LNF to an enum.
This is useful because gin dynamic registration does not (currently)
have support for enum.
Args:
x: stringified version of SpecialLossNormalizingFactor enum.
Returns:
SpecialLossNormalizingFactor enum instance.
"""
x = x.upper()
if x == 'NUM_REAL_TARGET_TOKENS':
return SpecialLossNormalizingFactor.NUM_REAL_TARGET_TOKENS
if x == 'NUM_TOTAL_TARGET_TOKENS':
return SpecialLossNormalizingFactor.NUM_TOTAL_TARGET_TOKENS
if x == 'AVERAGE_PER_SEQUENCE':
return SpecialLossNormalizingFactor.AVERAGE_PER_SEQUENCE
raise ValueError(
'Could not convert string \"%s\" to SpecialLossNormalizingFactor' % x)
@jax.vmap
def _sum_weights_per_segment(positions: jnp.ndarray, segment_ids: jnp.ndarray,
weights: jnp.ndarray) -> jnp.ndarray:
"""Sums weights per packed segment to produce a normalizing vector."""
# NB: Assumes padding only occurs at the end of a sequence.
def _repeat_last_nonnegative(xs, reverse=False):
def fn(prev, x):
y = jnp.where(x == 0, prev, x)
return y, y
return jax.lax.scan(fn, jnp.zeros_like(xs[0]), xs, reverse=reverse)[1]
# Compute final positions per sequence.
start_positions = positions == 0
final_positions = jnp.concatenate([start_positions[1:], jnp.ones(1)])
# Clear padded positions.
final_positions *= segment_ids != 0
# Compute cumulative weights, clearing all but final position per sequence.
final_cumulative_weights = final_positions * jnp.cumsum(weights)
# Subtract sequences' final weights from cumulative weights of following ones.
final_total_weights = jnp.concatenate([
final_cumulative_weights[0:1],
jnp.diff(_repeat_last_nonnegative(final_cumulative_weights))
])
# Copy final sequence weight to all positions in sequence.
normalizer = _repeat_last_nonnegative(final_total_weights, reverse=True)
return normalizer
def get_loss_normalizing_factor_and_weights(
loss_normalizing_factor: Optional[Union[float, int, str,
SpecialLossNormalizingFactor]],
batch: Mapping[str, jnp.ndarray]):
"""Get the float loss_normalizing_factor and loss weights.
If loss_normalizing_factor is float or None, this will simply return the
input loss_normalizing_factor and batch.
If loss_normalizing_factor is a SpecialLossNormalizingFactor, it will
return a float loss_normalizing_factor and loss weights corresponding to
the special LNF. See SpecialLossNormalizingFactor for more details.
Args:
loss_normalizing_factor: The input LNF, which may be a float, None, or
SpecialLossNormalizingFactor (or a stringified SLNF).
batch: Input data batch.
Returns:
Tuple of (output_loss_normalizing_factor, loss_weights).
'output_loss_normalizing_factor' is a scalar float (Python float
or jnp float).
'loss_weights' is the per token loss weight JNP array.
"""
loss_weights = batch.get('decoder_loss_weights', None)
if (loss_normalizing_factor is None or
not isinstance(loss_normalizing_factor,
(str, SpecialLossNormalizingFactor))):
return (loss_normalizing_factor, loss_weights)
if isinstance(loss_normalizing_factor, str):
loss_normalizing_factor = convert_special_loss_normalizing_factor_to_enum(
loss_normalizing_factor)
# If `loss_weights` are not provided, we assume that the padding id is 0 and
# that non-padding tokens in the decoder all correspond to the positions
# where loss should be taken. If more fine-grained behavior (e.g., taking
# loss on subset of 'decoder_target_tokens') is desired, provide
# `loss_weights` that account for this.
if loss_weights is None:
loss_weights = jnp.asarray(batch['decoder_target_tokens'] > 0, jnp.float32)
output_normalizing_factor = None
if (loss_normalizing_factor ==
SpecialLossNormalizingFactor.NUM_REAL_TARGET_TOKENS):
output_normalizing_factor = jnp.sum(loss_weights)
elif (loss_normalizing_factor ==
SpecialLossNormalizingFactor.NUM_TOTAL_TARGET_TOKENS):
output_normalizing_factor = np.prod(batch['decoder_target_tokens'].shape)
elif (loss_normalizing_factor ==
SpecialLossNormalizingFactor.AVERAGE_PER_SEQUENCE):
if 'decoder_segment_ids' in batch: # is packed
norm_vec = _sum_weights_per_segment(batch['decoder_positions'],
batch['decoder_segment_ids'],
loss_weights)
else:
norm_vec = jnp.sum(loss_weights, axis=-1, keepdims=True)
# Handle divide-by-zero.
loss_weights = jnp.nan_to_num(
loss_weights / norm_vec, nan=0, posinf=0, neginf=0)
output_normalizing_factor = jnp.sum(loss_weights)
else:
raise ValueError('Unsupported value of loss_normalizing_factor: %s' %
str(loss_normalizing_factor))
return (output_normalizing_factor, loss_weights)
| {
"content_hash": "c585cbd71deaf66a642239fa76847ea3",
"timestamp": "",
"source": "github",
"line_count": 291,
"max_line_length": 80,
"avg_line_length": 41.28865979381443,
"alnum_prop": 0.6930503537245111,
"repo_name": "google-research/t5x",
"id": "e45579d92b9793a3cc26da06077bff136c8c84fe",
"size": "12596",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "t5x/losses.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "35051"
},
{
"name": "Python",
"bytes": "1379643"
}
],
"symlink_target": ""
} |
import _plotly_utils.basevalidators
class ShowticksuffixValidator(_plotly_utils.basevalidators.EnumeratedValidator):
def __init__(
self,
plotly_name="showticksuffix",
parent_name="contourcarpet.colorbar",
**kwargs
):
super(ShowticksuffixValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "colorbars"),
role=kwargs.pop("role", "style"),
values=kwargs.pop("values", ["all", "first", "last", "none"]),
**kwargs
)
| {
"content_hash": "b4772bbc8a4a606c2714c3a851d66c9b",
"timestamp": "",
"source": "github",
"line_count": 18,
"max_line_length": 80,
"avg_line_length": 33.333333333333336,
"alnum_prop": 0.5833333333333334,
"repo_name": "plotly/python-api",
"id": "138105a17ed15de7005148d6aee3d99e09000170",
"size": "600",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "packages/python/plotly/plotly/validators/contourcarpet/colorbar/_showticksuffix.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "JavaScript",
"bytes": "6870"
},
{
"name": "Makefile",
"bytes": "1708"
},
{
"name": "Python",
"bytes": "823245"
},
{
"name": "Shell",
"bytes": "3238"
}
],
"symlink_target": ""
} |
"""
WSGI config for portality project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.8/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "portality.settings")
application = get_wsgi_application()
| {
"content_hash": "67df333b91a548415913c632a1350c5b",
"timestamp": "",
"source": "github",
"line_count": 16,
"max_line_length": 78,
"avg_line_length": 24.6875,
"alnum_prop": 0.7721518987341772,
"repo_name": "genonfire/portality",
"id": "0b76ad950d3e05771dbb914dc32ba32365156413",
"size": "395",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "portality/wsgi.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "13116"
},
{
"name": "HTML",
"bytes": "87543"
},
{
"name": "JavaScript",
"bytes": "7937"
},
{
"name": "Python",
"bytes": "94478"
},
{
"name": "Shell",
"bytes": "68"
}
],
"symlink_target": ""
} |
from six.moves import http_client as httplib
from xml.etree import ElementTree
import six
class ResponseError(Exception):
"""An error received from the Recurly API in response to an HTTP
request."""
def __init__(self, response_xml):
self.response_xml = response_xml
@property
def response_doc(self):
"""The XML document received from the service."""
try:
return self.__dict__['response_doc']
except KeyError:
self.__dict__['response_doc'] = ElementTree.fromstring(self.response_xml)
return self.__dict__['response_doc']
@property
def symbol(self):
"""The machine-readable identifier for the error."""
el = self.response_doc.find('symbol')
if el is not None:
return el.text
@property
def message(self):
"""The human-readable description of the error."""
el = self.response_doc.find('description')
if el is not None:
return el.text
@property
def details(self):
"""A further human-readable elaboration on the error."""
el = self.response_doc.find('details')
if el is not None:
return el.text
@property
def error(self):
"""A fall-back error message in the event no more specific
error is given."""
el = self.response_doc.find('error')
if el is not None:
return el.text
def __str__(self):
return self.__unicode__()
def __unicode__(self):
symbol = self.symbol
if symbol is None:
return self.error
details = self.details
if details is not None:
return six.u('%s: %s %s') % (symbol, self.message, details)
return six.u('%s: %s') % (symbol, self.message)
class ClientError(ResponseError):
"""An error resulting from a problem in the client's request (that
is, an error with an HTTP ``4xx`` status code)."""
pass
class BadRequestError(ClientError):
"""An error showing the request was invalid or could not be
understood by the server.
The error was returned as a ``400 Bad Request`` response.
Resubmitting the request will likely result in the same error.
"""
pass
class ConfigurationError(Exception):
"""An error related to a bad configuration"""
pass
class UnauthorizedError(ClientError):
"""An error for a missing or invalid API key (HTTP ``401 Unauthorized``)."""
def __init__(self, response_xml):
self.response_text = response_xml
def __unicode__(self):
return six.text_type(self.response_text)
class PaymentRequiredError(ClientError):
"""An error indicating your Recurly account is in production mode
but is not in good standing (HTTP ``402 Payment Required``)."""
pass
class ForbiddenError(ClientError):
"""An error showing the request represented an action the client
does not have privileges to access.
This error was returned as a ``403 Forbidden`` response. Verify
your login credentials are for the appropriate account.
"""
pass
class NotFoundError(ClientError):
"""An error for when the resource was not found with the given
identifier (HTTP ``404 Not Found``)."""
pass
class NotAcceptableError(ClientError):
"""An error for when the client's request could not be accepted by
the remote service (HTTP ``406 Not Acceptable``)."""
pass
class PreconditionFailedError(ClientError):
"""An error for a request that was unsuccessful because a condition
was not met.
For example, this error may arise if you attempt to cancel a
subscription for an account that has no subscription. This error
corresponds to the HTTP ``412 Precondition Failed`` status code.
"""
pass
class UnsupportedMediaTypeError(ClientError):
"""An error resulting from the submission as an unsupported media
type (HTTP ``415 Unsupported Media Type``)."""
pass
class TransactionError:
"""Represents a transaction error. See
https://recurly.readme.io/v2.0/page/transaction-errors for more details"""
def __init__(self, response_doc):
self.response_doc = response_doc
@property
def error_code(self):
"""The machine-readable identifier for the error."""
el = self.response_doc.find('error_code')
if el is not None:
return el.text
@property
def error_category(self):
"""The machine-readable identifier for the error category."""
el = self.response_doc.find('error_category')
if el is not None:
return el.text
@property
def customer_message(self):
"""Recommended message for the customer"""
el = self.response_doc.find('customer_message')
if el is not None:
return el.text
@property
def merchant_message(self):
"""Recommended message for the merchant"""
el = self.response_doc.find('merchant_message')
if el is not None:
return el.text
@property
def gateway_error_code(self):
"""Error code from the gateway"""
el = self.response_doc.find('gateway_error_code')
if el is not None:
return el.text
class ValidationError(ClientError):
"""An error indicating some values in the submitted request body
were not valid."""
@property
def transaction_error(self):
"""The transaction error object."""
error = self.response_doc.find('transaction_error')
if error is not None:
return TransactionError(error)
@property
def transaction_error_code(self):
"""The machine-readable error code for a transaction error."""
error = self.response_doc.find('transaction_error')
if error is not None:
code = error.find('error_code')
if code is not None:
return code.text
class Suberror(object):
"""An error describing the invalidity of a single invalid
field."""
def __init__(self, field, symbol, message):
self.field = field
self.symbol = symbol
self.message = message
def __unicode__(self):
return six.u('%s: %s %s') % (self.symbol, self.field, self.message)
@property
def errors(self):
"""A dictionary of error objects, keyed on the name of the
request field that was invalid.
Each error value has `field`, `symbol`, and `message`
attributes describing the particular invalidity of that field.
"""
try:
return self.__dict__['errors']
except KeyError:
pass
suberrors = dict()
for err in self.response_doc.findall('error'):
field = err.attrib['field']
symbol = err.attrib['symbol']
message = err.text
suberrors[field] = self.Suberror(field, symbol, message)
self.__dict__['errors'] = suberrors
return suberrors
def __unicode__(self):
return six.u('; ').join(six.text_type(error) for error in six.itervalues(self.errors))
class ServerError(ResponseError):
"""An error resulting from a problem creating the server's response
to the request (that is, an error with an HTTP ``5xx`` status code)."""
pass
class InternalServerError(ServerError):
"""An unexpected general server error (HTTP ``500 Internal Server
Error``)."""
pass
class BadGatewayError(ServerError):
"""An error resulting when the load balancer or web server has
trouble connecting to the Recurly app.
This error is returned as an HTTP ``502 Bad Gateway`` response.
Try the request again.
"""
pass
class ServiceUnavailableError(ServerError):
"""An error indicating the service is temporarily unavailable.
This error results from an HTTP ``503 Service Unavailable``
response. Try the request again.
"""
pass
class UnexpectedStatusError(ResponseError):
"""An error resulting from an unexpected status code returned by
the remote service."""
def __init__(self, status, response_xml):
super(UnexpectedStatusError, self).__init__(response_xml)
self.status = status
def __unicode__(self):
return six.text_type(self.status)
error_classes = {
400: BadRequestError,
401: UnauthorizedError,
402: PaymentRequiredError,
403: ForbiddenError,
404: NotFoundError,
406: NotAcceptableError,
412: PreconditionFailedError,
415: UnsupportedMediaTypeError,
422: ValidationError,
500: InternalServerError,
502: BadGatewayError,
503: ServiceUnavailableError,
}
def error_class_for_http_status(status):
"""Return the appropriate `ResponseError` subclass for the given
HTTP status code."""
try:
return error_classes[status]
except KeyError:
def new_status_error(xml_response):
return UnexpectedStatusError(status, xml_response)
return new_status_error
other_errors = [ConfigurationError]
__all__ = [x.__name__ for x in list(error_classes.values()) + other_errors]
| {
"content_hash": "8a56b64e40988abf23759a5d7924c4b0",
"timestamp": "",
"source": "github",
"line_count": 320,
"max_line_length": 94,
"avg_line_length": 28.675,
"alnum_prop": 0.6378596338273758,
"repo_name": "cgerrior/recurly-client-python",
"id": "036adc1b85e817e69b3e5a0e4bdf95b1c1d77737",
"size": "9176",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "recurly/errors.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "141961"
}
],
"symlink_target": ""
} |
from django import forms
from restaurants.models import Restaurant
class RestaurantCreateFormEasy(forms.ModelForm):
class Meta:
model = Restaurant
fields = ['name', 'location', 'category']
def clean_name(self):
name = self.cleaned_data.get('name')
'''
method to clean name field of the Restaurant model
if fails to clean raise ValidationError
:return:
'''
return name | {
"content_hash": "75890bd343857790eb6ead26a8ebf28b",
"timestamp": "",
"source": "github",
"line_count": 18,
"max_line_length": 58,
"avg_line_length": 25.11111111111111,
"alnum_prop": 0.6305309734513275,
"repo_name": "matija94/show-me-the-code",
"id": "d508192195b3b4303df0687fa7471f969424d410",
"size": "452",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "trydjango/restaurants/forms.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Assembly",
"bytes": "360945"
},
{
"name": "Batchfile",
"bytes": "5439"
},
{
"name": "CSS",
"bytes": "1535"
},
{
"name": "Clojure",
"bytes": "26019"
},
{
"name": "Dockerfile",
"bytes": "120"
},
{
"name": "HTML",
"bytes": "60877"
},
{
"name": "Hack",
"bytes": "1680"
},
{
"name": "Java",
"bytes": "1094411"
},
{
"name": "JavaScript",
"bytes": "21619"
},
{
"name": "Jupyter Notebook",
"bytes": "1339056"
},
{
"name": "Kotlin",
"bytes": "3918"
},
{
"name": "Pascal",
"bytes": "1125"
},
{
"name": "Python",
"bytes": "291744"
},
{
"name": "Scala",
"bytes": "161887"
},
{
"name": "Scilab",
"bytes": "129306"
},
{
"name": "Shell",
"bytes": "8449"
},
{
"name": "XSLT",
"bytes": "3508"
}
],
"symlink_target": ""
} |
import pytest
from mitmproxy.proxy.commands import CloseConnection, OpenConnection, SendData
from mitmproxy.proxy.events import ConnectionClosed, DataReceived
from mitmproxy.proxy.layers import tcp
from mitmproxy.proxy.layers.tcp import TcpMessageInjected
from mitmproxy.tcp import TCPFlow, TCPMessage
from ..tutils import Placeholder, Playbook, reply
def test_open_connection(tctx):
"""
If there is no server connection yet, establish one,
because the server may send data first.
"""
assert (
Playbook(tcp.TCPLayer(tctx, True))
<< OpenConnection(tctx.server)
)
tctx.server.timestamp_start = 1624544785
assert (
Playbook(tcp.TCPLayer(tctx, True))
<< None
)
def test_open_connection_err(tctx):
f = Placeholder(TCPFlow)
assert (
Playbook(tcp.TCPLayer(tctx))
<< tcp.TcpStartHook(f)
>> reply()
<< OpenConnection(tctx.server)
>> reply("Connect call failed")
<< tcp.TcpErrorHook(f)
>> reply()
<< CloseConnection(tctx.client)
)
def test_simple(tctx):
"""open connection, receive data, send it to peer"""
f = Placeholder(TCPFlow)
assert (
Playbook(tcp.TCPLayer(tctx))
<< tcp.TcpStartHook(f)
>> reply()
<< OpenConnection(tctx.server)
>> reply(None)
>> DataReceived(tctx.client, b"hello!")
<< tcp.TcpMessageHook(f)
>> reply()
<< SendData(tctx.server, b"hello!")
>> DataReceived(tctx.server, b"hi")
<< tcp.TcpMessageHook(f)
>> reply()
<< SendData(tctx.client, b"hi")
>> ConnectionClosed(tctx.server)
<< CloseConnection(tctx.client, half_close=True)
>> ConnectionClosed(tctx.client)
<< CloseConnection(tctx.server)
<< tcp.TcpEndHook(f)
>> reply()
>> ConnectionClosed(tctx.client)
<< None
)
assert len(f().messages) == 2
def test_receive_data_before_server_connected(tctx):
"""
assert that data received before a server connection is established
will still be forwarded.
"""
assert (
Playbook(tcp.TCPLayer(tctx), hooks=False)
<< OpenConnection(tctx.server)
>> DataReceived(tctx.client, b"hello!")
>> reply(None, to=-2)
<< SendData(tctx.server, b"hello!")
)
def test_receive_data_after_half_close(tctx):
"""
data received after the other connection has been half-closed should still be forwarded.
"""
assert (
Playbook(tcp.TCPLayer(tctx), hooks=False)
<< OpenConnection(tctx.server)
>> reply(None)
>> DataReceived(tctx.client, b"eof-delimited-request")
<< SendData(tctx.server, b"eof-delimited-request")
>> ConnectionClosed(tctx.client)
<< CloseConnection(tctx.server, half_close=True)
>> DataReceived(tctx.server, b"i'm late")
<< SendData(tctx.client, b"i'm late")
>> ConnectionClosed(tctx.server)
<< CloseConnection(tctx.client)
)
@pytest.mark.parametrize("ignore", [True, False])
def test_ignore(tctx, ignore):
"""
no flow hooks when we set ignore.
"""
def no_flow_hooks():
assert (
Playbook(tcp.TCPLayer(tctx, ignore=ignore), hooks=True)
<< OpenConnection(tctx.server)
>> reply(None)
>> DataReceived(tctx.client, b"hello!")
<< SendData(tctx.server, b"hello!")
)
if ignore:
no_flow_hooks()
else:
with pytest.raises(AssertionError):
no_flow_hooks()
def test_inject(tctx):
"""inject data into an open connection."""
f = Placeholder(TCPFlow)
assert (
Playbook(tcp.TCPLayer(tctx))
<< tcp.TcpStartHook(f)
>> TcpMessageInjected(f, TCPMessage(True, b"hello!"))
>> reply(to=-2)
<< OpenConnection(tctx.server)
>> reply(None)
<< tcp.TcpMessageHook(f)
>> reply()
<< SendData(tctx.server, b"hello!")
# and the other way...
>> TcpMessageInjected(f, TCPMessage(False, b"I have already done the greeting for you."))
<< tcp.TcpMessageHook(f)
>> reply()
<< SendData(tctx.client, b"I have already done the greeting for you.")
<< None
)
assert len(f().messages) == 2
| {
"content_hash": "c80d875ffd8c509853b45227c13e42d7",
"timestamp": "",
"source": "github",
"line_count": 148,
"max_line_length": 101,
"avg_line_length": 31.114864864864863,
"alnum_prop": 0.5637350705754615,
"repo_name": "Kriechi/mitmproxy",
"id": "299aa99939df9bf97f01a59a59083a8ca555faf7",
"size": "4605",
"binary": false,
"copies": "1",
"ref": "refs/heads/dns-addon",
"path": "test/mitmproxy/proxy/layers/test_tcp.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "20958"
},
{
"name": "Dockerfile",
"bytes": "1031"
},
{
"name": "HTML",
"bytes": "14827"
},
{
"name": "JavaScript",
"bytes": "277250"
},
{
"name": "PowerShell",
"bytes": "495"
},
{
"name": "Python",
"bytes": "1880661"
},
{
"name": "Shell",
"bytes": "4711"
}
],
"symlink_target": ""
} |
cases = int(input(""))
for a in range(cases):
string = raw_input("")
if not "." in string:
print(1)
else:
decimal = string.split(".")[-1]
decimal = 10**len(decimal)/int(decimal)
print(int(decimal))
| {
"content_hash": "69647b43fa93eca935da1eb5821a5a7a",
"timestamp": "",
"source": "github",
"line_count": 9,
"max_line_length": 47,
"avg_line_length": 27,
"alnum_prop": 0.5308641975308642,
"repo_name": "geekpradd/Sphere-Online-Judge-Solutions",
"id": "ca2f99b8bd31f2a943599927ecfeff734b55ef30",
"size": "243",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "games.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "13018"
},
{
"name": "C++",
"bytes": "20615"
},
{
"name": "Java",
"bytes": "1398"
},
{
"name": "Makefile",
"bytes": "62"
},
{
"name": "Python",
"bytes": "24243"
}
],
"symlink_target": ""
} |
import pyaf.Bench.TS_datasets as tsds
import tests.artificial.process_artificial_dataset as art
art.process_dataset(N = 128 , FREQ = 'D', seed = 0, trendtype = "Lag1Trend", cycle_length = 30, transform = "Logit", sigma = 0.0, exog_count = 0, ar_order = 12); | {
"content_hash": "7ea422bd9c42e4713e4711d13174990a",
"timestamp": "",
"source": "github",
"line_count": 7,
"max_line_length": 161,
"avg_line_length": 37.285714285714285,
"alnum_prop": 0.7011494252873564,
"repo_name": "antoinecarme/pyaf",
"id": "d8c97542c8eb12a691b61606223acf0e3f9c94ce",
"size": "261",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/artificial/transf_Logit/trend_Lag1Trend/cycle_30/ar_12/test_artificial_128_Logit_Lag1Trend_30_12_0.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Makefile",
"bytes": "6773299"
},
{
"name": "Procfile",
"bytes": "24"
},
{
"name": "Python",
"bytes": "54209093"
},
{
"name": "R",
"bytes": "807"
},
{
"name": "Shell",
"bytes": "3619"
}
],
"symlink_target": ""
} |
import pytest
from neo4j._conf import PoolConfig
from neo4j._sync.io._bolt4 import Bolt4x0
from ...._async_compat import mark_sync_test
@pytest.mark.parametrize("set_stale", (True, False))
def test_conn_is_stale(fake_socket, set_stale):
address = ("127.0.0.1", 7687)
max_connection_lifetime = 0
connection = Bolt4x0(address, fake_socket(address), max_connection_lifetime)
if set_stale:
connection.set_stale()
assert connection.stale() is True
@pytest.mark.parametrize("set_stale", (True, False))
def test_conn_is_not_stale_if_not_enabled(fake_socket, set_stale):
address = ("127.0.0.1", 7687)
max_connection_lifetime = -1
connection = Bolt4x0(address, fake_socket(address), max_connection_lifetime)
if set_stale:
connection.set_stale()
assert connection.stale() is set_stale
@pytest.mark.parametrize("set_stale", (True, False))
def test_conn_is_not_stale(fake_socket, set_stale):
address = ("127.0.0.1", 7687)
max_connection_lifetime = 999999999
connection = Bolt4x0(address, fake_socket(address), max_connection_lifetime)
if set_stale:
connection.set_stale()
assert connection.stale() is set_stale
@mark_sync_test
def test_db_extra_in_begin(fake_socket):
address = ("127.0.0.1", 7687)
socket = fake_socket(address, Bolt4x0.UNPACKER_CLS)
connection = Bolt4x0(address, socket, PoolConfig.max_connection_lifetime)
connection.begin(db="something")
connection.send_all()
tag, fields = socket.pop_message()
assert tag == b"\x11"
assert len(fields) == 1
assert fields[0] == {"db": "something"}
@mark_sync_test
def test_db_extra_in_run(fake_socket):
address = ("127.0.0.1", 7687)
socket = fake_socket(address, Bolt4x0.UNPACKER_CLS)
connection = Bolt4x0(address, socket, PoolConfig.max_connection_lifetime)
connection.run("", {}, db="something")
connection.send_all()
tag, fields = socket.pop_message()
assert tag == b"\x10"
assert len(fields) == 3
assert fields[0] == ""
assert fields[1] == {}
assert fields[2] == {"db": "something"}
@mark_sync_test
def test_n_extra_in_discard(fake_socket):
address = ("127.0.0.1", 7687)
socket = fake_socket(address, Bolt4x0.UNPACKER_CLS)
connection = Bolt4x0(address, socket, PoolConfig.max_connection_lifetime)
connection.discard(n=666)
connection.send_all()
tag, fields = socket.pop_message()
assert tag == b"\x2F"
assert len(fields) == 1
assert fields[0] == {"n": 666}
@pytest.mark.parametrize(
"test_input, expected",
[
(666, {"n": -1, "qid": 666}),
(-1, {"n": -1}),
]
)
@mark_sync_test
def test_qid_extra_in_discard(fake_socket, test_input, expected):
address = ("127.0.0.1", 7687)
socket = fake_socket(address, Bolt4x0.UNPACKER_CLS)
connection = Bolt4x0(address, socket, PoolConfig.max_connection_lifetime)
connection.discard(qid=test_input)
connection.send_all()
tag, fields = socket.pop_message()
assert tag == b"\x2F"
assert len(fields) == 1
assert fields[0] == expected
@pytest.mark.parametrize(
"test_input, expected",
[
(777, {"n": 666, "qid": 777}),
(-1, {"n": 666}),
]
)
@mark_sync_test
def test_n_and_qid_extras_in_discard(fake_socket, test_input, expected):
address = ("127.0.0.1", 7687)
socket = fake_socket(address, Bolt4x0.UNPACKER_CLS)
connection = Bolt4x0(address, socket, PoolConfig.max_connection_lifetime)
connection.discard(n=666, qid=test_input)
connection.send_all()
tag, fields = socket.pop_message()
assert tag == b"\x2F"
assert len(fields) == 1
assert fields[0] == expected
@pytest.mark.parametrize(
"test_input, expected",
[
(666, {"n": 666}),
(-1, {"n": -1}),
]
)
@mark_sync_test
def test_n_extra_in_pull(fake_socket, test_input, expected):
address = ("127.0.0.1", 7687)
socket = fake_socket(address, Bolt4x0.UNPACKER_CLS)
connection = Bolt4x0(address, socket, PoolConfig.max_connection_lifetime)
connection.pull(n=test_input)
connection.send_all()
tag, fields = socket.pop_message()
assert tag == b"\x3F"
assert len(fields) == 1
assert fields[0] == expected
@pytest.mark.parametrize(
"test_input, expected",
[
(777, {"n": -1, "qid": 777}),
(-1, {"n": -1}),
]
)
@mark_sync_test
def test_qid_extra_in_pull(fake_socket, test_input, expected):
address = ("127.0.0.1", 7687)
socket = fake_socket(address, Bolt4x0.UNPACKER_CLS)
connection = Bolt4x0(address, socket, PoolConfig.max_connection_lifetime)
connection.pull(qid=test_input)
connection.send_all()
tag, fields = socket.pop_message()
assert tag == b"\x3F"
assert len(fields) == 1
assert fields[0] == expected
@mark_sync_test
def test_n_and_qid_extras_in_pull(fake_socket):
address = ("127.0.0.1", 7687)
socket = fake_socket(address, Bolt4x0.UNPACKER_CLS)
connection = Bolt4x0(address, socket, PoolConfig.max_connection_lifetime)
connection.pull(n=666, qid=777)
connection.send_all()
tag, fields = socket.pop_message()
assert tag == b"\x3F"
assert len(fields) == 1
assert fields[0] == {"n": 666, "qid": 777}
@pytest.mark.parametrize("recv_timeout", (1, -1))
@mark_sync_test
def test_hint_recv_timeout_seconds_gets_ignored(
fake_socket_pair, recv_timeout, mocker
):
address = ("127.0.0.1", 7687)
sockets = fake_socket_pair(address,
packer_cls=Bolt4x0.PACKER_CLS,
unpacker_cls=Bolt4x0.UNPACKER_CLS)
sockets.client.settimeout = mocker.MagicMock()
sockets.server.send_message(b"\x70", {
"server": "Neo4j/4.0.0",
"hints": {"connection.recv_timeout_seconds": recv_timeout},
})
connection = Bolt4x0(
address, sockets.client, PoolConfig.max_connection_lifetime
)
connection.hello()
sockets.client.settimeout.assert_not_called()
| {
"content_hash": "996808b439d370016c076aa955c3dd77",
"timestamp": "",
"source": "github",
"line_count": 191,
"max_line_length": 80,
"avg_line_length": 31.287958115183248,
"alnum_prop": 0.6442436412315931,
"repo_name": "neo4j/neo4j-python-driver",
"id": "88f549936acfc62658552e057a2264d249143e3c",
"size": "6619",
"binary": false,
"copies": "1",
"ref": "refs/heads/5.0",
"path": "tests/unit/sync/io/test_class_bolt4x0.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "2068"
},
{
"name": "Python",
"bytes": "1654566"
},
{
"name": "Shell",
"bytes": "4165"
}
],
"symlink_target": ""
} |
"""Tests for LUCluster*
"""
import OpenSSL
import copy
import unittest
import operator
import re
import shutil
import os
from ganeti.cmdlib import cluster
from ganeti.cmdlib.cluster import verify
from ganeti import constants
from ganeti import errors
from ganeti import netutils
from ganeti import objects
from ganeti import opcodes
from ganeti import utils
from ganeti import pathutils
from ganeti import query
from ganeti.hypervisor import hv_xen
from testsupport import *
import testutils
class TestClusterVerifySsh(unittest.TestCase):
def testMultipleGroups(self):
fn = verify.LUClusterVerifyGroup._SelectSshCheckNodes
mygroupnodes = [
objects.Node(name="node20", group="my", offline=False,
master_candidate=True),
objects.Node(name="node21", group="my", offline=False,
master_candidate=True),
objects.Node(name="node22", group="my", offline=False,
master_candidate=False),
objects.Node(name="node23", group="my", offline=False,
master_candidate=True),
objects.Node(name="node24", group="my", offline=False,
master_candidate=True),
objects.Node(name="node25", group="my", offline=False,
master_candidate=False),
objects.Node(name="node26", group="my", offline=True,
master_candidate=True),
]
nodes = [
objects.Node(name="node1", group="g1", offline=True,
master_candidate=True),
objects.Node(name="node2", group="g1", offline=False,
master_candidate=False),
objects.Node(name="node3", group="g1", offline=False,
master_candidate=True),
objects.Node(name="node4", group="g1", offline=True,
master_candidate=True),
objects.Node(name="node5", group="g1", offline=False,
master_candidate=True),
objects.Node(name="node10", group="xyz", offline=False,
master_candidate=True),
objects.Node(name="node11", group="xyz", offline=False,
master_candidate=True),
objects.Node(name="node40", group="alloff", offline=True,
master_candidate=True),
objects.Node(name="node41", group="alloff", offline=True,
master_candidate=True),
objects.Node(name="node50", group="aaa", offline=False,
master_candidate=True),
] + mygroupnodes
assert not utils.FindDuplicates(map(operator.attrgetter("name"), nodes))
(online, perhost, _) = fn(mygroupnodes, "my", nodes)
self.assertEqual(online, ["node%s" % i for i in range(20, 26)])
self.assertEqual(set(perhost.keys()), set(online))
self.assertEqual(perhost, {
"node20": ["node10", "node2", "node50"],
"node21": ["node11", "node3", "node50"],
"node22": ["node10", "node5", "node50"],
"node23": ["node11", "node2", "node50"],
"node24": ["node10", "node3", "node50"],
"node25": ["node11", "node5", "node50"],
})
def testSingleGroup(self):
fn = verify.LUClusterVerifyGroup._SelectSshCheckNodes
nodes = [
objects.Node(name="node1", group="default", offline=True,
master_candidate=True),
objects.Node(name="node2", group="default", offline=False,
master_candidate=True),
objects.Node(name="node3", group="default", offline=False,
master_candidate=True),
objects.Node(name="node4", group="default", offline=True,
master_candidate=True),
]
assert not utils.FindDuplicates(map(operator.attrgetter("name"), nodes))
(online, perhost, _) = fn(nodes, "default", nodes)
self.assertEqual(online, ["node2", "node3"])
self.assertEqual(set(perhost.keys()), set(online))
self.assertEqual(perhost, {
"node2": [],
"node3": [],
})
class TestLUClusterActivateMasterIp(CmdlibTestCase):
def testSuccess(self):
op = opcodes.OpClusterActivateMasterIp()
self.rpc.call_node_activate_master_ip.return_value = \
self.RpcResultsBuilder() \
.CreateSuccessfulNodeResult(self.master)
self.ExecOpCode(op)
self.rpc.call_node_activate_master_ip.assert_called_once_with(
self.master_uuid, self.cfg.GetMasterNetworkParameters(), False)
def testFailure(self):
op = opcodes.OpClusterActivateMasterIp()
self.rpc.call_node_activate_master_ip.return_value = \
self.RpcResultsBuilder() \
.CreateFailedNodeResult(self.master) \
self.ExecOpCodeExpectOpExecError(op)
class TestLUClusterDeactivateMasterIp(CmdlibTestCase):
def testSuccess(self):
op = opcodes.OpClusterDeactivateMasterIp()
self.rpc.call_node_deactivate_master_ip.return_value = \
self.RpcResultsBuilder() \
.CreateSuccessfulNodeResult(self.master)
self.ExecOpCode(op)
self.rpc.call_node_deactivate_master_ip.assert_called_once_with(
self.master_uuid, self.cfg.GetMasterNetworkParameters(), False)
def testFailure(self):
op = opcodes.OpClusterDeactivateMasterIp()
self.rpc.call_node_deactivate_master_ip.return_value = \
self.RpcResultsBuilder() \
.CreateFailedNodeResult(self.master) \
self.ExecOpCodeExpectOpExecError(op)
class TestLUClusterConfigQuery(CmdlibTestCase):
def testInvalidField(self):
op = opcodes.OpClusterConfigQuery(output_fields=["pinky_bunny"])
self.ExecOpCodeExpectOpPrereqError(op, "pinky_bunny")
def testAllFields(self):
op = opcodes.OpClusterConfigQuery(output_fields=query.CLUSTER_FIELDS.keys())
self.rpc.call_get_watcher_pause.return_value = \
self.RpcResultsBuilder() \
.CreateSuccessfulNodeResult(self.master, -1)
ret = self.ExecOpCode(op)
self.assertEqual(1, self.rpc.call_get_watcher_pause.call_count)
self.assertEqual(len(ret), len(query.CLUSTER_FIELDS))
def testEmpytFields(self):
op = opcodes.OpClusterConfigQuery(output_fields=[])
self.ExecOpCode(op)
self.assertFalse(self.rpc.call_get_watcher_pause.called)
class TestLUClusterDestroy(CmdlibTestCase):
def testExistingNodes(self):
op = opcodes.OpClusterDestroy()
self.cfg.AddNewNode()
self.cfg.AddNewNode()
self.ExecOpCodeExpectOpPrereqError(op, "still 2 node\(s\)")
def testExistingInstances(self):
op = opcodes.OpClusterDestroy()
self.cfg.AddNewInstance()
self.cfg.AddNewInstance()
self.ExecOpCodeExpectOpPrereqError(op, "still 2 instance\(s\)")
def testEmptyCluster(self):
op = opcodes.OpClusterDestroy()
self.ExecOpCode(op)
self.assertSingleHooksCall([self.master.name],
"cluster-destroy",
constants.HOOKS_PHASE_POST)
class TestLUClusterPostInit(CmdlibTestCase):
@testutils.patch_object(cluster, "_UpdateMasterClientCert")
def testExecution(self, update_client_cert_mock):
# mock the client certificate creation as it is tested separately
update_client_cert_mock.return_value = None
# For the purpose of this test, return the same certificate digest for all
# nodes
self.rpc.call_node_crypto_tokens = \
lambda node_uuid, _: self.RpcResultsBuilder() \
.CreateSuccessfulNodeResult(node_uuid,
[(constants.CRYPTO_TYPE_SSL_DIGEST, "IA:MA:FA:KE:DI:GE:ST")])
op = opcodes.OpClusterPostInit()
self.ExecOpCode(op)
self.assertSingleHooksCall([self.master.uuid],
"cluster-init",
constants.HOOKS_PHASE_POST)
class TestLUClusterQuery(CmdlibTestCase):
def testSimpleInvocation(self):
op = opcodes.OpClusterQuery()
self.ExecOpCode(op)
def testIPv6Cluster(self):
op = opcodes.OpClusterQuery()
self.cluster.primary_ip_family = netutils.IP6Address.family
self.ExecOpCode(op)
class TestLUClusterRedistConf(CmdlibTestCase):
def testSimpleInvocation(self):
op = opcodes.OpClusterRedistConf()
self.ExecOpCode(op)
class TestLUClusterRename(CmdlibTestCase):
NEW_NAME = "new-name.example.com"
NEW_IP = "203.0.113.100"
def testNoChanges(self):
op = opcodes.OpClusterRename(name=self.cfg.GetClusterName())
self.ExecOpCodeExpectOpPrereqError(op, "name nor the IP address")
def testReachableIp(self):
op = opcodes.OpClusterRename(name=self.NEW_NAME)
self.netutils_mod.GetHostname.return_value = \
HostnameMock(self.NEW_NAME, self.NEW_IP)
self.netutils_mod.TcpPing.return_value = True
self.ExecOpCodeExpectOpPrereqError(op, "is reachable on the network")
def testValidRename(self):
op = opcodes.OpClusterRename(name=self.NEW_NAME)
self.netutils_mod.GetHostname.return_value = \
HostnameMock(self.NEW_NAME, self.NEW_IP)
self.ExecOpCode(op)
self.assertEqual(1, self.ssh_mod.WriteKnownHostsFile.call_count)
self.rpc.call_node_deactivate_master_ip.assert_called_once_with(
self.master_uuid, self.cfg.GetMasterNetworkParameters(), False)
self.rpc.call_node_activate_master_ip.assert_called_once_with(
self.master_uuid, self.cfg.GetMasterNetworkParameters(), False)
def testRenameOfflineMaster(self):
op = opcodes.OpClusterRename(name=self.NEW_NAME)
self.master.offline = True
self.netutils_mod.GetHostname.return_value = \
HostnameMock(self.NEW_NAME, self.NEW_IP)
self.ExecOpCode(op)
class TestLUClusterRepairDiskSizes(CmdlibTestCase):
def testNoInstances(self):
op = opcodes.OpClusterRepairDiskSizes()
self.ExecOpCode(op)
def _SetUpInstanceSingleDisk(self, dev_type=constants.DT_PLAIN):
pnode = self.master
snode = self.cfg.AddNewNode()
disk = self.cfg.CreateDisk(dev_type=dev_type,
primary_node=pnode,
secondary_node=snode)
inst = self.cfg.AddNewInstance(disks=[disk])
return (inst, disk)
def testSingleInstanceOnFailingNode(self):
(inst, _) = self._SetUpInstanceSingleDisk()
op = opcodes.OpClusterRepairDiskSizes(instances=[inst.name])
self.rpc.call_blockdev_getdimensions.return_value = \
self.RpcResultsBuilder() \
.CreateFailedNodeResult(self.master)
self.ExecOpCode(op)
self.mcpu.assertLogContainsRegex("Failure in blockdev_getdimensions")
def _ExecOpClusterRepairDiskSizes(self, node_data):
# not specifying instances repairs all
op = opcodes.OpClusterRepairDiskSizes()
self.rpc.call_blockdev_getdimensions.return_value = \
self.RpcResultsBuilder() \
.CreateSuccessfulNodeResult(self.master, node_data)
return self.ExecOpCode(op)
def testInvalidResultData(self):
for data in [[], [None], ["invalid"], [("still", "invalid")]]:
self.ResetMocks()
self._SetUpInstanceSingleDisk()
self._ExecOpClusterRepairDiskSizes(data)
self.mcpu.assertLogContainsRegex("ignoring")
def testCorrectSize(self):
self._SetUpInstanceSingleDisk()
changed = self._ExecOpClusterRepairDiskSizes([(1024 * 1024 * 1024, None)])
self.mcpu.assertLogIsEmpty()
self.assertEqual(0, len(changed))
def testWrongSize(self):
self._SetUpInstanceSingleDisk()
changed = self._ExecOpClusterRepairDiskSizes([(512 * 1024 * 1024, None)])
self.assertEqual(1, len(changed))
def testCorrectDRBD(self):
self._SetUpInstanceSingleDisk(dev_type=constants.DT_DRBD8)
changed = self._ExecOpClusterRepairDiskSizes([(1024 * 1024 * 1024, None)])
self.mcpu.assertLogIsEmpty()
self.assertEqual(0, len(changed))
def testWrongDRBDChild(self):
(_, disk) = self._SetUpInstanceSingleDisk(dev_type=constants.DT_DRBD8)
disk.children[0].size = 512
changed = self._ExecOpClusterRepairDiskSizes([(1024 * 1024 * 1024, None)])
self.assertEqual(1, len(changed))
def testExclusiveStorageInvalidResultData(self):
self._SetUpInstanceSingleDisk()
self.master.ndparams[constants.ND_EXCLUSIVE_STORAGE] = True
self._ExecOpClusterRepairDiskSizes([(1024 * 1024 * 1024, None)])
self.mcpu.assertLogContainsRegex(
"did not return valid spindles information")
def testExclusiveStorageCorrectSpindles(self):
(_, disk) = self._SetUpInstanceSingleDisk()
disk.spindles = 1
self.master.ndparams[constants.ND_EXCLUSIVE_STORAGE] = True
changed = self._ExecOpClusterRepairDiskSizes([(1024 * 1024 * 1024, 1)])
self.assertEqual(0, len(changed))
def testExclusiveStorageWrongSpindles(self):
self._SetUpInstanceSingleDisk()
self.master.ndparams[constants.ND_EXCLUSIVE_STORAGE] = True
changed = self._ExecOpClusterRepairDiskSizes([(1024 * 1024 * 1024, 1)])
self.assertEqual(1, len(changed))
class TestLUClusterSetParams(CmdlibTestCase):
UID_POOL = [(10, 1000)]
def testUidPool(self):
op = opcodes.OpClusterSetParams(uid_pool=self.UID_POOL)
self.ExecOpCode(op)
self.assertEqual(self.UID_POOL, self.cluster.uid_pool)
def testAddUids(self):
old_pool = [(1, 9)]
self.cluster.uid_pool = list(old_pool)
op = opcodes.OpClusterSetParams(add_uids=self.UID_POOL)
self.ExecOpCode(op)
self.assertEqual(set(self.UID_POOL + old_pool),
set(self.cluster.uid_pool))
def testRemoveUids(self):
additional_pool = [(1, 9)]
self.cluster.uid_pool = self.UID_POOL + additional_pool
op = opcodes.OpClusterSetParams(remove_uids=self.UID_POOL)
self.ExecOpCode(op)
self.assertEqual(additional_pool, self.cluster.uid_pool)
def testMacPrefix(self):
mac_prefix = "aa:01:02"
op = opcodes.OpClusterSetParams(mac_prefix=mac_prefix)
self.ExecOpCode(op)
self.assertEqual(mac_prefix, self.cluster.mac_prefix)
def testEmptyMacPrefix(self):
mac_prefix = ""
op = opcodes.OpClusterSetParams(mac_prefix=mac_prefix)
self.ExecOpCodeExpectOpPrereqError(
op, "Parameter 'OP_CLUSTER_SET_PARAMS.mac_prefix' fails validation")
def testInvalidMacPrefix(self):
mac_prefix = "az:00:00"
op = opcodes.OpClusterSetParams(mac_prefix=mac_prefix)
self.ExecOpCodeExpectOpPrereqError(op, "Invalid MAC address prefix")
def testMasterNetmask(self):
op = opcodes.OpClusterSetParams(master_netmask=26)
self.ExecOpCode(op)
self.assertEqual(26, self.cluster.master_netmask)
def testInvalidDiskparams(self):
for diskparams in [{constants.DT_DISKLESS: {constants.LV_STRIPES: 0}},
{constants.DT_DRBD8: {constants.RBD_POOL: "pool"}},
{constants.DT_DRBD8: {constants.RBD_ACCESS: "bunny"}}]:
self.ResetMocks()
op = opcodes.OpClusterSetParams(diskparams=diskparams)
self.ExecOpCodeExpectOpPrereqError(op, "verify diskparams")
def testValidDiskparams(self):
diskparams = {constants.DT_RBD: {constants.RBD_POOL: "mock_pool",
constants.RBD_ACCESS: "kernelspace"}}
op = opcodes.OpClusterSetParams(diskparams=diskparams)
self.ExecOpCode(op)
self.assertEqual(diskparams[constants.DT_RBD],
self.cluster.diskparams[constants.DT_RBD])
def testMinimalDiskparams(self):
diskparams = {constants.DT_RBD: {constants.RBD_POOL: "mock_pool"}}
self.cluster.diskparams = {}
op = opcodes.OpClusterSetParams(diskparams=diskparams)
self.ExecOpCode(op)
self.assertEqual(diskparams, self.cluster.diskparams)
def testValidDiskparamsAccess(self):
for value in constants.DISK_VALID_ACCESS_MODES:
self.ResetMocks()
op = opcodes.OpClusterSetParams(diskparams={
constants.DT_RBD: {constants.RBD_ACCESS: value}
})
self.ExecOpCode(op)
got = self.cluster.diskparams[constants.DT_RBD][constants.RBD_ACCESS]
self.assertEqual(value, got)
def testInvalidDiskparamsAccess(self):
for value in ["default", "pinky_bunny"]:
self.ResetMocks()
op = opcodes.OpClusterSetParams(diskparams={
constants.DT_RBD: {constants.RBD_ACCESS: value}
})
self.ExecOpCodeExpectOpPrereqError(op, "Invalid value of 'rbd:access'")
def testUnsetDrbdHelperWithDrbdDisks(self):
self.cfg.AddNewInstance(disks=[
self.cfg.CreateDisk(dev_type=constants.DT_DRBD8, create_nodes=True)])
op = opcodes.OpClusterSetParams(drbd_helper="")
self.ExecOpCodeExpectOpPrereqError(op, "Cannot disable drbd helper")
def testFileStorageDir(self):
op = opcodes.OpClusterSetParams(file_storage_dir="/random/path")
self.ExecOpCode(op)
def testSetFileStorageDirToCurrentValue(self):
op = opcodes.OpClusterSetParams(
file_storage_dir=self.cluster.file_storage_dir)
self.ExecOpCode(op)
self.mcpu.assertLogContainsRegex("file storage dir already set to value")
def testUnsetFileStorageDirFileStorageEnabled(self):
self.cfg.SetEnabledDiskTemplates([constants.DT_FILE])
op = opcodes.OpClusterSetParams(file_storage_dir='')
self.ExecOpCodeExpectOpPrereqError(op, "Unsetting the 'file' storage")
def testUnsetFileStorageDirFileStorageDisabled(self):
self.cfg.SetEnabledDiskTemplates([constants.DT_PLAIN])
op = opcodes.OpClusterSetParams(file_storage_dir='')
self.ExecOpCode(op)
def testSetFileStorageDirFileStorageDisabled(self):
self.cfg.SetEnabledDiskTemplates([constants.DT_PLAIN])
op = opcodes.OpClusterSetParams(file_storage_dir='/some/path/')
self.ExecOpCode(op)
self.mcpu.assertLogContainsRegex("although file storage is not enabled")
def testValidDrbdHelper(self):
node1 = self.cfg.AddNewNode()
node1.offline = True
self.rpc.call_drbd_helper.return_value = \
self.RpcResultsBuilder() \
.AddSuccessfulNode(self.master, "/bin/true") \
.AddOfflineNode(node1) \
.Build()
op = opcodes.OpClusterSetParams(drbd_helper="/bin/true")
self.ExecOpCode(op)
self.mcpu.assertLogContainsRegex("Not checking drbd helper on offline node")
def testDrbdHelperFailingNode(self):
self.rpc.call_drbd_helper.return_value = \
self.RpcResultsBuilder() \
.AddFailedNode(self.master) \
.Build()
op = opcodes.OpClusterSetParams(drbd_helper="/bin/true")
self.ExecOpCodeExpectOpPrereqError(op, "Error checking drbd helper")
def testInvalidDrbdHelper(self):
self.rpc.call_drbd_helper.return_value = \
self.RpcResultsBuilder() \
.AddSuccessfulNode(self.master, "/bin/false") \
.Build()
op = opcodes.OpClusterSetParams(drbd_helper="/bin/true")
self.ExecOpCodeExpectOpPrereqError(op, "drbd helper is /bin/false")
def testDrbdHelperWithoutDrbdDiskTemplate(self):
drbd_helper = "/bin/random_helper"
self.cfg.SetEnabledDiskTemplates([constants.DT_DISKLESS])
self.rpc.call_drbd_helper.return_value = \
self.RpcResultsBuilder() \
.AddSuccessfulNode(self.master, drbd_helper) \
.Build()
op = opcodes.OpClusterSetParams(drbd_helper=drbd_helper)
self.ExecOpCode(op)
self.mcpu.assertLogContainsRegex("but did not enable")
def testResetDrbdHelperDrbdDisabled(self):
drbd_helper = ""
self.cfg.SetEnabledDiskTemplates([constants.DT_DISKLESS])
op = opcodes.OpClusterSetParams(drbd_helper=drbd_helper)
self.ExecOpCode(op)
self.assertEqual(None, self.cluster.drbd_usermode_helper)
def testResetDrbdHelperDrbdEnabled(self):
drbd_helper = ""
self.cluster.enabled_disk_templates = [constants.DT_DRBD8]
op = opcodes.OpClusterSetParams(drbd_helper=drbd_helper)
self.ExecOpCodeExpectOpPrereqError(
op, "Cannot disable drbd helper while DRBD is enabled.")
def testEnableDrbdNoHelper(self):
self.cluster.enabled_disk_templates = [constants.DT_DISKLESS]
self.cluster.drbd_usermode_helper = None
enabled_disk_templates = [constants.DT_DRBD8]
op = opcodes.OpClusterSetParams(
enabled_disk_templates=enabled_disk_templates)
self.ExecOpCodeExpectOpPrereqError(
op, "Cannot enable DRBD without a DRBD usermode helper set")
def testEnableDrbdHelperSet(self):
drbd_helper = "/bin/random_helper"
self.rpc.call_drbd_helper.return_value = \
self.RpcResultsBuilder() \
.AddSuccessfulNode(self.master, drbd_helper) \
.Build()
self.cfg.SetEnabledDiskTemplates([constants.DT_DISKLESS])
self.cluster.drbd_usermode_helper = drbd_helper
enabled_disk_templates = [constants.DT_DRBD8]
op = opcodes.OpClusterSetParams(
enabled_disk_templates=enabled_disk_templates,
ipolicy={constants.IPOLICY_DTS: enabled_disk_templates})
self.ExecOpCode(op)
self.assertEqual(drbd_helper, self.cluster.drbd_usermode_helper)
def testDrbdHelperAlreadySet(self):
drbd_helper = "/bin/true"
self.rpc.call_drbd_helper.return_value = \
self.RpcResultsBuilder() \
.AddSuccessfulNode(self.master, "/bin/true") \
.Build()
self.cfg.SetEnabledDiskTemplates([constants.DT_DISKLESS])
op = opcodes.OpClusterSetParams(drbd_helper=drbd_helper)
self.ExecOpCode(op)
self.assertEqual(drbd_helper, self.cluster.drbd_usermode_helper)
self.mcpu.assertLogContainsRegex("DRBD helper already in desired state")
def testSetDrbdHelper(self):
drbd_helper = "/bin/true"
self.rpc.call_drbd_helper.return_value = \
self.RpcResultsBuilder() \
.AddSuccessfulNode(self.master, "/bin/true") \
.Build()
self.cluster.drbd_usermode_helper = "/bin/false"
self.cfg.SetEnabledDiskTemplates([constants.DT_DRBD8])
op = opcodes.OpClusterSetParams(drbd_helper=drbd_helper)
self.ExecOpCode(op)
self.assertEqual(drbd_helper, self.cluster.drbd_usermode_helper)
def testBeparams(self):
beparams = {constants.BE_VCPUS: 32}
op = opcodes.OpClusterSetParams(beparams=beparams)
self.ExecOpCode(op)
self.assertEqual(32, self.cluster
.beparams[constants.PP_DEFAULT][constants.BE_VCPUS])
def testNdparams(self):
ndparams = {constants.ND_EXCLUSIVE_STORAGE: True}
op = opcodes.OpClusterSetParams(ndparams=ndparams)
self.ExecOpCode(op)
self.assertEqual(True, self.cluster
.ndparams[constants.ND_EXCLUSIVE_STORAGE])
def testNdparamsResetOobProgram(self):
ndparams = {constants.ND_OOB_PROGRAM: ""}
op = opcodes.OpClusterSetParams(ndparams=ndparams)
self.ExecOpCode(op)
self.assertEqual(constants.NDC_DEFAULTS[constants.ND_OOB_PROGRAM],
self.cluster.ndparams[constants.ND_OOB_PROGRAM])
def testHvState(self):
hv_state = {constants.HT_FAKE: {constants.HVST_CPU_TOTAL: 8}}
op = opcodes.OpClusterSetParams(hv_state=hv_state)
self.ExecOpCode(op)
self.assertEqual(8, self.cluster.hv_state_static
[constants.HT_FAKE][constants.HVST_CPU_TOTAL])
def testDiskState(self):
disk_state = {
constants.DT_PLAIN: {
"mock_vg": {constants.DS_DISK_TOTAL: 10}
}
}
op = opcodes.OpClusterSetParams(disk_state=disk_state)
self.ExecOpCode(op)
self.assertEqual(10, self.cluster
.disk_state_static[constants.DT_PLAIN]["mock_vg"]
[constants.DS_DISK_TOTAL])
def testDefaultIPolicy(self):
ipolicy = constants.IPOLICY_DEFAULTS
op = opcodes.OpClusterSetParams(ipolicy=ipolicy)
self.ExecOpCode(op)
def testIPolicyNewViolation(self):
import ganeti.constants as C
ipolicy = C.IPOLICY_DEFAULTS
ipolicy[C.ISPECS_MINMAX][0][C.ISPECS_MIN][C.ISPEC_MEM_SIZE] = 128
ipolicy[C.ISPECS_MINMAX][0][C.ISPECS_MAX][C.ISPEC_MEM_SIZE] = 128
self.cfg.AddNewInstance(beparams={C.BE_MINMEM: 512, C.BE_MAXMEM: 512})
op = opcodes.OpClusterSetParams(ipolicy=ipolicy)
self.ExecOpCode(op)
self.mcpu.assertLogContainsRegex("instances violate them")
def testNicparamsNoInstance(self):
nicparams = {
constants.NIC_LINK: "mock_bridge"
}
op = opcodes.OpClusterSetParams(nicparams=nicparams)
self.ExecOpCode(op)
self.assertEqual("mock_bridge",
self.cluster.nicparams
[constants.PP_DEFAULT][constants.NIC_LINK])
def testNicparamsInvalidConf(self):
nicparams = {
constants.NIC_MODE: constants.NIC_MODE_BRIDGED,
constants.NIC_LINK: ""
}
op = opcodes.OpClusterSetParams(nicparams=nicparams)
self.ExecOpCodeExpectException(op, errors.ConfigurationError, "NIC link")
def testNicparamsInvalidInstanceConf(self):
nicparams = {
constants.NIC_MODE: constants.NIC_MODE_BRIDGED,
constants.NIC_LINK: "mock_bridge"
}
self.cfg.AddNewInstance(nics=[
self.cfg.CreateNic(nicparams={constants.NIC_LINK: None})])
op = opcodes.OpClusterSetParams(nicparams=nicparams)
self.ExecOpCodeExpectOpPrereqError(op, "Missing bridged NIC link")
def testNicparamsMissingIp(self):
nicparams = {
constants.NIC_MODE: constants.NIC_MODE_ROUTED
}
self.cfg.AddNewInstance()
op = opcodes.OpClusterSetParams(nicparams=nicparams)
self.ExecOpCodeExpectOpPrereqError(op, "routed NIC with no ip address")
def testNicparamsWithInstance(self):
nicparams = {
constants.NIC_LINK: "mock_bridge"
}
self.cfg.AddNewInstance()
op = opcodes.OpClusterSetParams(nicparams=nicparams)
self.ExecOpCode(op)
def testDefaultHvparams(self):
hvparams = constants.HVC_DEFAULTS
op = opcodes.OpClusterSetParams(hvparams=hvparams)
self.ExecOpCode(op)
self.assertEqual(hvparams, self.cluster.hvparams)
def testMinimalHvparams(self):
hvparams = {
constants.HT_FAKE: {
constants.HV_MIGRATION_MODE: constants.HT_MIGRATION_NONLIVE
}
}
self.cluster.hvparams = {}
op = opcodes.OpClusterSetParams(hvparams=hvparams)
self.ExecOpCode(op)
self.assertEqual(hvparams, self.cluster.hvparams)
def testOsHvp(self):
os_hvp = {
"mocked_os": {
constants.HT_FAKE: {
constants.HV_MIGRATION_MODE: constants.HT_MIGRATION_NONLIVE
}
},
"other_os": constants.HVC_DEFAULTS
}
op = opcodes.OpClusterSetParams(os_hvp=os_hvp)
self.ExecOpCode(op)
self.assertEqual(constants.HT_MIGRATION_NONLIVE,
self.cluster.os_hvp["mocked_os"][constants.HT_FAKE]
[constants.HV_MIGRATION_MODE])
self.assertEqual(constants.HVC_DEFAULTS, self.cluster.os_hvp["other_os"])
def testRemoveOsHvp(self):
os_hvp = {"mocked_os": {constants.HT_FAKE: None}}
op = opcodes.OpClusterSetParams(os_hvp=os_hvp)
self.ExecOpCode(op)
assert constants.HT_FAKE not in self.cluster.os_hvp["mocked_os"]
def testDefaultOsHvp(self):
os_hvp = {"mocked_os": constants.HVC_DEFAULTS.copy()}
self.cluster.os_hvp = {"mocked_os": {}}
op = opcodes.OpClusterSetParams(os_hvp=os_hvp)
self.ExecOpCode(op)
self.assertEqual(os_hvp, self.cluster.os_hvp)
def testOsparams(self):
osparams = {
"mocked_os": {
"param1": "value1",
"param2": None
},
"other_os": {
"param1": None
}
}
self.cluster.osparams = {"other_os": {"param1": "value1"}}
self.cluster.osparams_private_cluster = {}
op = opcodes.OpClusterSetParams(osparams=osparams)
self.ExecOpCode(op)
self.assertEqual({"mocked_os": {"param1": "value1"}}, self.cluster.osparams)
def testEnabledHypervisors(self):
enabled_hypervisors = [constants.HT_XEN_HVM, constants.HT_XEN_PVM]
op = opcodes.OpClusterSetParams(enabled_hypervisors=enabled_hypervisors)
self.ExecOpCode(op)
self.assertEqual(enabled_hypervisors, self.cluster.enabled_hypervisors)
def testEnabledHypervisorsWithoutHypervisorParams(self):
enabled_hypervisors = [constants.HT_FAKE]
self.cluster.hvparams = {}
op = opcodes.OpClusterSetParams(enabled_hypervisors=enabled_hypervisors)
self.ExecOpCode(op)
self.assertEqual(enabled_hypervisors, self.cluster.enabled_hypervisors)
self.assertEqual(constants.HVC_DEFAULTS[constants.HT_FAKE],
self.cluster.hvparams[constants.HT_FAKE])
@testutils.patch_object(utils, "FindFile")
def testValidDefaultIallocator(self, find_file_mock):
find_file_mock.return_value = "/random/path"
default_iallocator = "/random/path"
op = opcodes.OpClusterSetParams(default_iallocator=default_iallocator)
self.ExecOpCode(op)
self.assertEqual(default_iallocator, self.cluster.default_iallocator)
@testutils.patch_object(utils, "FindFile")
def testInvalidDefaultIallocator(self, find_file_mock):
find_file_mock.return_value = None
default_iallocator = "/random/path"
op = opcodes.OpClusterSetParams(default_iallocator=default_iallocator)
self.ExecOpCodeExpectOpPrereqError(op, "Invalid default iallocator script")
def testEnabledDiskTemplates(self):
enabled_disk_templates = [constants.DT_DISKLESS, constants.DT_PLAIN]
op = opcodes.OpClusterSetParams(
enabled_disk_templates=enabled_disk_templates,
ipolicy={constants.IPOLICY_DTS: enabled_disk_templates})
self.ExecOpCode(op)
self.assertEqual(enabled_disk_templates,
self.cluster.enabled_disk_templates)
def testEnabledDiskTemplatesVsIpolicy(self):
enabled_disk_templates = [constants.DT_DISKLESS, constants.DT_PLAIN]
op = opcodes.OpClusterSetParams(
enabled_disk_templates=enabled_disk_templates,
ipolicy={constants.IPOLICY_DTS: [constants.DT_FILE]})
self.ExecOpCodeExpectOpPrereqError(op, "but not enabled on the cluster")
def testDisablingDiskTemplatesOfInstances(self):
old_disk_templates = [constants.DT_DISKLESS, constants.DT_PLAIN]
self.cfg.SetEnabledDiskTemplates(old_disk_templates)
self.cfg.AddNewInstance(
disks=[self.cfg.CreateDisk(dev_type=constants.DT_PLAIN)])
new_disk_templates = [constants.DT_DISKLESS, constants.DT_DRBD8]
op = opcodes.OpClusterSetParams(
enabled_disk_templates=new_disk_templates,
ipolicy={constants.IPOLICY_DTS: new_disk_templates})
self.ExecOpCodeExpectOpPrereqError(op, "least one disk using it")
def testEnabledDiskTemplatesWithoutVgName(self):
enabled_disk_templates = [constants.DT_PLAIN]
self.cluster.volume_group_name = None
op = opcodes.OpClusterSetParams(
enabled_disk_templates=enabled_disk_templates)
self.ExecOpCodeExpectOpPrereqError(op, "specify a volume group")
def testDisableDiskTemplateWithExistingInstance(self):
enabled_disk_templates = [constants.DT_DISKLESS]
self.cfg.AddNewInstance(
disks=[self.cfg.CreateDisk(dev_type=constants.DT_PLAIN)])
op = opcodes.OpClusterSetParams(
enabled_disk_templates=enabled_disk_templates,
ipolicy={constants.IPOLICY_DTS: enabled_disk_templates})
self.ExecOpCodeExpectOpPrereqError(op, "Cannot disable disk template")
def testDisableDiskTemplateWithExistingInstanceDiskless(self):
self.cfg.AddNewInstance(disks=[])
enabled_disk_templates = [constants.DT_PLAIN]
op = opcodes.OpClusterSetParams(
enabled_disk_templates=enabled_disk_templates,
ipolicy={constants.IPOLICY_DTS: enabled_disk_templates})
self.ExecOpCodeExpectOpPrereqError(op, "Cannot disable disk template")
def testVgNameNoLvmDiskTemplateEnabled(self):
vg_name = "test_vg"
self.cfg.SetEnabledDiskTemplates([constants.DT_DISKLESS])
op = opcodes.OpClusterSetParams(vg_name=vg_name)
self.ExecOpCode(op)
self.assertEqual(vg_name, self.cluster.volume_group_name)
self.mcpu.assertLogIsEmpty()
def testUnsetVgNameWithLvmDiskTemplateEnabled(self):
vg_name = ""
self.cluster.enabled_disk_templates = [constants.DT_PLAIN]
op = opcodes.OpClusterSetParams(vg_name=vg_name)
self.ExecOpCodeExpectOpPrereqError(op, "Cannot unset volume group")
def testUnsetVgNameWithLvmInstance(self):
vg_name = ""
self.cfg.AddNewInstance(
disks=[self.cfg.CreateDisk(dev_type=constants.DT_PLAIN)])
op = opcodes.OpClusterSetParams(vg_name=vg_name)
self.ExecOpCodeExpectOpPrereqError(op, "Cannot unset volume group")
def testUnsetVgNameWithNoLvmDiskTemplateEnabled(self):
vg_name = ""
self.cfg.SetEnabledDiskTemplates([constants.DT_DISKLESS])
op = opcodes.OpClusterSetParams(vg_name=vg_name)
self.ExecOpCode(op)
self.assertEqual(None, self.cluster.volume_group_name)
def testVgNameToOldName(self):
vg_name = self.cluster.volume_group_name
op = opcodes.OpClusterSetParams(vg_name=vg_name)
self.ExecOpCode(op)
self.mcpu.assertLogContainsRegex("already in desired state")
def testVgNameWithFailingNode(self):
vg_name = "test_vg"
op = opcodes.OpClusterSetParams(vg_name=vg_name)
self.rpc.call_vg_list.return_value = \
self.RpcResultsBuilder() \
.AddFailedNode(self.master) \
.Build()
self.ExecOpCode(op)
self.mcpu.assertLogContainsRegex("Error while gathering data on node")
def testVgNameWithValidNode(self):
vg_name = "test_vg"
op = opcodes.OpClusterSetParams(vg_name=vg_name)
self.rpc.call_vg_list.return_value = \
self.RpcResultsBuilder() \
.AddSuccessfulNode(self.master, {vg_name: 1024 * 1024}) \
.Build()
self.ExecOpCode(op)
def testVgNameWithTooSmallNode(self):
vg_name = "test_vg"
op = opcodes.OpClusterSetParams(vg_name=vg_name)
self.rpc.call_vg_list.return_value = \
self.RpcResultsBuilder() \
.AddSuccessfulNode(self.master, {vg_name: 1}) \
.Build()
self.ExecOpCodeExpectOpPrereqError(op, "too small")
def testMiscParameters(self):
op = opcodes.OpClusterSetParams(candidate_pool_size=123,
maintain_node_health=True,
modify_etc_hosts=True,
prealloc_wipe_disks=True,
reserved_lvs=["/dev/mock_lv"],
use_external_mip_script=True)
self.ExecOpCode(op)
self.mcpu.assertLogIsEmpty()
self.assertEqual(123, self.cluster.candidate_pool_size)
self.assertEqual(True, self.cluster.maintain_node_health)
self.assertEqual(True, self.cluster.modify_etc_hosts)
self.assertEqual(True, self.cluster.prealloc_wipe_disks)
self.assertEqual(["/dev/mock_lv"], self.cluster.reserved_lvs)
self.assertEqual(True, self.cluster.use_external_mip_script)
def testAddHiddenOs(self):
self.cluster.hidden_os = ["hidden1", "hidden2"]
op = opcodes.OpClusterSetParams(hidden_os=[(constants.DDM_ADD, "hidden2"),
(constants.DDM_ADD, "hidden3")])
self.ExecOpCode(op)
self.assertEqual(["hidden1", "hidden2", "hidden3"], self.cluster.hidden_os)
self.mcpu.assertLogContainsRegex("OS hidden2 already")
def testRemoveBlacklistedOs(self):
self.cluster.blacklisted_os = ["blisted1", "blisted2"]
op = opcodes.OpClusterSetParams(blacklisted_os=[
(constants.DDM_REMOVE, "blisted2"),
(constants.DDM_REMOVE, "blisted3")])
self.ExecOpCode(op)
self.assertEqual(["blisted1"], self.cluster.blacklisted_os)
self.mcpu.assertLogContainsRegex("OS blisted3 not found")
def testMasterNetdev(self):
master_netdev = "test_dev"
op = opcodes.OpClusterSetParams(master_netdev=master_netdev)
self.ExecOpCode(op)
self.assertEqual(master_netdev, self.cluster.master_netdev)
def testMasterNetdevFailNoForce(self):
master_netdev = "test_dev"
op = opcodes.OpClusterSetParams(master_netdev=master_netdev)
self.rpc.call_node_deactivate_master_ip.return_value = \
self.RpcResultsBuilder() \
.CreateFailedNodeResult(self.master)
self.ExecOpCodeExpectOpExecError(op, "Could not disable the master ip")
def testMasterNetdevFailForce(self):
master_netdev = "test_dev"
op = opcodes.OpClusterSetParams(master_netdev=master_netdev,
force=True)
self.rpc.call_node_deactivate_master_ip.return_value = \
self.RpcResultsBuilder() \
.CreateFailedNodeResult(self.master)
self.ExecOpCode(op)
self.mcpu.assertLogContainsRegex("Could not disable the master ip")
def testCompressionToolSuccess(self):
compression_tools = ["certainly_not_a_default", "gzip"]
op = opcodes.OpClusterSetParams(compression_tools=compression_tools)
self.ExecOpCode(op)
self.assertEqual(compression_tools, self.cluster.compression_tools)
def testCompressionToolCompatibility(self):
compression_tools = ["not_gzip", "not_not_not_gzip"]
op = opcodes.OpClusterSetParams(compression_tools=compression_tools)
self.ExecOpCodeExpectOpPrereqError(op, ".*the gzip utility must be.*")
def testCompressionToolForbiddenValues(self):
for value in ["none", "\"rm -rf all.all\"", "ls$IFS-la"]:
compression_tools = [value, "gzip"]
op = opcodes.OpClusterSetParams(compression_tools=compression_tools)
self.ExecOpCodeExpectOpPrereqError(op, re.escape(value))
class TestLUClusterVerify(CmdlibTestCase):
def testVerifyAllGroups(self):
op = opcodes.OpClusterVerify()
result = self.ExecOpCode(op)
self.assertEqual(2, len(result["jobs"]))
def testVerifyDefaultGroups(self):
op = opcodes.OpClusterVerify(group_name="default")
result = self.ExecOpCode(op)
self.assertEqual(1, len(result["jobs"]))
class TestLUClusterVerifyConfig(CmdlibTestCase):
def setUp(self):
super(TestLUClusterVerifyConfig, self).setUp()
self._load_cert_patcher = testutils \
.patch_object(OpenSSL.crypto, "load_certificate")
self._load_cert_mock = self._load_cert_patcher.start()
self._verify_cert_patcher = testutils \
.patch_object(utils, "VerifyCertificate")
self._verify_cert_mock = self._verify_cert_patcher.start()
self._read_file_patcher = testutils.patch_object(utils, "ReadFile")
self._read_file_mock = self._read_file_patcher.start()
self._can_read_patcher = testutils.patch_object(utils, "CanRead")
self._can_read_mock = self._can_read_patcher.start()
self._can_read_mock.return_value = True
self._read_file_mock.return_value = True
self._verify_cert_mock.return_value = (None, "")
self._load_cert_mock.return_value = True
def tearDown(self):
super(TestLUClusterVerifyConfig, self).tearDown()
self._can_read_patcher.stop()
self._read_file_patcher.stop()
self._verify_cert_patcher.stop()
self._load_cert_patcher.stop()
def testSuccessfulRun(self):
self.cfg.AddNewInstance()
op = opcodes.OpClusterVerifyConfig()
result = self.ExecOpCode(op)
self.assertTrue(result)
def testDanglingNode(self):
node = self.cfg.AddNewNode()
self.cfg.AddNewInstance(primary_node=node)
node.group = "invalid"
op = opcodes.OpClusterVerifyConfig()
result = self.ExecOpCode(op)
self.mcpu.assertLogContainsRegex(
"following nodes \(and their instances\) belong to a non existing group")
self.assertFalse(result)
def testDanglingInstance(self):
inst = self.cfg.AddNewInstance()
inst.primary_node = "invalid"
op = opcodes.OpClusterVerifyConfig()
result = self.ExecOpCode(op)
self.mcpu.assertLogContainsRegex(
"following instances have a non-existing primary-node")
self.assertFalse(result)
def testDanglingDisk(self):
self.cfg.AddOrphanDisk()
op = opcodes.OpClusterVerifyConfig()
result = self.ExecOpCode(op)
self.assertTrue(result)
class TestLUClusterVerifyGroup(CmdlibTestCase):
def testEmptyNodeGroup(self):
group = self.cfg.AddNewNodeGroup()
op = opcodes.OpClusterVerifyGroup(group_name=group.name, verbose=True)
result = self.ExecOpCode(op)
self.assertTrue(result)
self.mcpu.assertLogContainsRegex("Empty node group, skipping verification")
def testSimpleInvocation(self):
op = opcodes.OpClusterVerifyGroup(group_name="default", verbose=True)
self.ExecOpCode(op)
def testSimpleInvocationWithInstance(self):
self.cfg.AddNewInstance(disks=[])
op = opcodes.OpClusterVerifyGroup(group_name="default", verbose=True)
self.ExecOpCode(op)
def testGhostNode(self):
group = self.cfg.AddNewNodeGroup()
node = self.cfg.AddNewNode(group=group.uuid, offline=True)
self.master.offline = True
self.cfg.AddNewInstance(disk_template=constants.DT_DRBD8,
primary_node=self.master,
secondary_node=node)
self.rpc.call_blockdev_getmirrorstatus_multi.return_value = \
RpcResultsBuilder() \
.AddOfflineNode(self.master) \
.Build()
op = opcodes.OpClusterVerifyGroup(group_name="default", verbose=True)
self.ExecOpCode(op)
def testValidRpcResult(self):
self.cfg.AddNewInstance(disks=[])
self.rpc.call_node_verify.return_value = \
RpcResultsBuilder() \
.AddSuccessfulNode(self.master, {}) \
.Build()
op = opcodes.OpClusterVerifyGroup(group_name="default", verbose=True)
self.ExecOpCode(op)
def testVerifyNodeDrbdSuccess(self):
ninfo = self.cfg.AddNewNode()
disk = self.cfg.CreateDisk(dev_type=constants.DT_DRBD8,
primary_node=self.master,
secondary_node=ninfo)
instance = self.cfg.AddNewInstance(disks=[disk])
instanceinfo = self.cfg.GetAllInstancesInfo()
disks_info = self.cfg.GetAllDisksInfo()
drbd_map = {ninfo.uuid: {0: disk.uuid}}
minors = verify.LUClusterVerifyGroup._ComputeDrbdMinors(
ninfo, instanceinfo, disks_info, drbd_map, lambda *args: None)
self.assertEquals(minors, {0: (disk.uuid, instance.uuid, False)})
class TestLUClusterVerifyClientCerts(CmdlibTestCase):
def _AddNormalNode(self):
self.normalnode = copy.deepcopy(self.master)
self.normalnode.master_candidate = False
self.normalnode.uuid = "normal-node-uuid"
self.cfg.AddNode(self.normalnode, None)
def testVerifyMasterCandidate(self):
client_cert = "client-cert-digest"
self.cluster.candidate_certs = {self.master.uuid: client_cert}
self.rpc.call_node_verify.return_value = \
RpcResultsBuilder() \
.AddSuccessfulNode(self.master,
{constants.NV_CLIENT_CERT: (None, client_cert)}) \
.Build()
op = opcodes.OpClusterVerifyGroup(group_name="default", verbose=True)
self.ExecOpCode(op)
def testVerifyMasterCandidateInvalid(self):
client_cert = "client-cert-digest"
self.cluster.candidate_certs = {self.master.uuid: client_cert}
self.rpc.call_node_verify.return_value = \
RpcResultsBuilder() \
.AddSuccessfulNode(self.master,
{constants.NV_CLIENT_CERT: (666, "Invalid Certificate")}) \
.Build()
op = opcodes.OpClusterVerifyGroup(group_name="default", verbose=True)
self.ExecOpCode(op)
self.mcpu.assertLogContainsRegex("Client certificate")
self.mcpu.assertLogContainsRegex("failed validation")
def testVerifyNoMasterCandidateMap(self):
client_cert = "client-cert-digest"
self.cluster.candidate_certs = {}
self.rpc.call_node_verify.return_value = \
RpcResultsBuilder() \
.AddSuccessfulNode(self.master,
{constants.NV_CLIENT_CERT: (None, client_cert)}) \
.Build()
op = opcodes.OpClusterVerifyGroup(group_name="default", verbose=True)
self.ExecOpCode(op)
self.mcpu.assertLogContainsRegex(
"list of master candidate certificates is empty")
def testVerifyNoSharingMasterCandidates(self):
client_cert = "client-cert-digest"
self.cluster.candidate_certs = {
self.master.uuid: client_cert,
"some-other-master-candidate-uuid": client_cert}
self.rpc.call_node_verify.return_value = \
RpcResultsBuilder() \
.AddSuccessfulNode(self.master,
{constants.NV_CLIENT_CERT: (None, client_cert)}) \
.Build()
op = opcodes.OpClusterVerifyGroup(group_name="default", verbose=True)
self.ExecOpCode(op)
self.mcpu.assertLogContainsRegex(
"two master candidates configured to use the same")
def testVerifyMasterCandidateCertMismatch(self):
client_cert = "client-cert-digest"
self.cluster.candidate_certs = {self.master.uuid: "different-cert-digest"}
self.rpc.call_node_verify.return_value = \
RpcResultsBuilder() \
.AddSuccessfulNode(self.master,
{constants.NV_CLIENT_CERT: (None, client_cert)}) \
.Build()
op = opcodes.OpClusterVerifyGroup(group_name="default", verbose=True)
self.ExecOpCode(op)
self.mcpu.assertLogContainsRegex("does not match its entry")
def testVerifyMasterCandidateUnregistered(self):
client_cert = "client-cert-digest"
self.cluster.candidate_certs = {"other-node-uuid": "different-cert-digest"}
self.rpc.call_node_verify.return_value = \
RpcResultsBuilder() \
.AddSuccessfulNode(self.master,
{constants.NV_CLIENT_CERT: (None, client_cert)}) \
.Build()
op = opcodes.OpClusterVerifyGroup(group_name="default", verbose=True)
self.ExecOpCode(op)
self.mcpu.assertLogContainsRegex("does not have an entry")
def testVerifyMasterCandidateOtherNodesCert(self):
client_cert = "client-cert-digest"
self.cluster.candidate_certs = {"other-node-uuid": client_cert}
self.rpc.call_node_verify.return_value = \
RpcResultsBuilder() \
.AddSuccessfulNode(self.master,
{constants.NV_CLIENT_CERT: (None, client_cert)}) \
.Build()
op = opcodes.OpClusterVerifyGroup(group_name="default", verbose=True)
self.ExecOpCode(op)
self.mcpu.assertLogContainsRegex("using a certificate of another node")
def testNormalNodeStillInList(self):
self._AddNormalNode()
client_cert_master = "client-cert-digest-master"
client_cert_normal = "client-cert-digest-normal"
self.cluster.candidate_certs = {
self.normalnode.uuid: client_cert_normal,
self.master.uuid: client_cert_master}
self.rpc.call_node_verify.return_value = \
RpcResultsBuilder() \
.AddSuccessfulNode(self.normalnode,
{constants.NV_CLIENT_CERT: (None, client_cert_normal)}) \
.AddSuccessfulNode(self.master,
{constants.NV_CLIENT_CERT: (None, client_cert_master)}) \
.Build()
op = opcodes.OpClusterVerifyGroup(group_name="default", verbose=True)
self.ExecOpCode(op)
self.mcpu.assertLogContainsRegex("not a master candidate")
self.mcpu.assertLogContainsRegex("still listed")
def testNormalNodeStealingMasterCandidateCert(self):
self._AddNormalNode()
client_cert_master = "client-cert-digest-master"
self.cluster.candidate_certs = {
self.master.uuid: client_cert_master}
self.rpc.call_node_verify.return_value = \
RpcResultsBuilder() \
.AddSuccessfulNode(self.normalnode,
{constants.NV_CLIENT_CERT: (None, client_cert_master)}) \
.AddSuccessfulNode(self.master,
{constants.NV_CLIENT_CERT: (None, client_cert_master)}) \
.Build()
op = opcodes.OpClusterVerifyGroup(group_name="default", verbose=True)
self.ExecOpCode(op)
self.mcpu.assertLogContainsRegex("not a master candidate")
self.mcpu.assertLogContainsRegex(
"certificate of another node which is master candidate")
class TestLUClusterVerifyGroupMethods(CmdlibTestCase):
"""Base class for testing individual methods in LUClusterVerifyGroup.
"""
def setUp(self):
super(TestLUClusterVerifyGroupMethods, self).setUp()
self.op = opcodes.OpClusterVerifyGroup(group_name="default")
def PrepareLU(self, lu):
lu._exclusive_storage = False
lu.master_node = self.master_uuid
lu.group_info = self.group
verify.LUClusterVerifyGroup.all_node_info = \
property(fget=lambda _: self.cfg.GetAllNodesInfo())
class TestLUClusterVerifyGroupVerifyNode(TestLUClusterVerifyGroupMethods):
@withLockedLU
def testInvalidNodeResult(self, lu):
self.assertFalse(lu._VerifyNode(self.master, None))
self.assertFalse(lu._VerifyNode(self.master, ""))
@withLockedLU
def testInvalidVersion(self, lu):
self.assertFalse(lu._VerifyNode(self.master, {"version": None}))
self.assertFalse(lu._VerifyNode(self.master, {"version": ""}))
self.assertFalse(lu._VerifyNode(self.master, {
"version": (constants.PROTOCOL_VERSION - 1, constants.RELEASE_VERSION)
}))
self.mcpu.ClearLogMessages()
self.assertTrue(lu._VerifyNode(self.master, {
"version": (constants.PROTOCOL_VERSION, constants.RELEASE_VERSION + "x")
}))
self.mcpu.assertLogContainsRegex("software version mismatch")
def _GetValidNodeResult(self, additional_fields):
ret = {
"version": (constants.PROTOCOL_VERSION, constants.RELEASE_VERSION),
constants.NV_NODESETUP: []
}
ret.update(additional_fields)
return ret
@withLockedLU
def testHypervisor(self, lu):
lu._VerifyNode(self.master, self._GetValidNodeResult({
constants.NV_HYPERVISOR: {
constants.HT_XEN_PVM: None,
constants.HT_XEN_HVM: "mock error"
}
}))
self.mcpu.assertLogContainsRegex(constants.HT_XEN_HVM)
self.mcpu.assertLogContainsRegex("mock error")
@withLockedLU
def testHvParams(self, lu):
lu._VerifyNode(self.master, self._GetValidNodeResult({
constants.NV_HVPARAMS: [("mock item", constants.HT_XEN_HVM, "mock error")]
}))
self.mcpu.assertLogContainsRegex(constants.HT_XEN_HVM)
self.mcpu.assertLogContainsRegex("mock item")
self.mcpu.assertLogContainsRegex("mock error")
@withLockedLU
def testSuccessfulResult(self, lu):
self.assertTrue(lu._VerifyNode(self.master, self._GetValidNodeResult({})))
self.mcpu.assertLogIsEmpty()
class TestLUClusterVerifyGroupVerifyNodeTime(TestLUClusterVerifyGroupMethods):
@withLockedLU
def testInvalidNodeResult(self, lu):
for ndata in [{}, {constants.NV_TIME: "invalid"}]:
self.mcpu.ClearLogMessages()
lu._VerifyNodeTime(self.master, ndata, None, None)
self.mcpu.assertLogContainsRegex("Node returned invalid time")
@withLockedLU
def testNodeDiverges(self, lu):
for ntime in [(0, 0), (2000, 0)]:
self.mcpu.ClearLogMessages()
lu._VerifyNodeTime(self.master, {constants.NV_TIME: ntime}, 1000, 1005)
self.mcpu.assertLogContainsRegex("Node time diverges")
@withLockedLU
def testSuccessfulResult(self, lu):
lu._VerifyNodeTime(self.master, {constants.NV_TIME: (0, 0)}, 0, 5)
self.mcpu.assertLogIsEmpty()
class TestLUClusterVerifyGroupUpdateVerifyNodeLVM(
TestLUClusterVerifyGroupMethods):
def setUp(self):
super(TestLUClusterVerifyGroupUpdateVerifyNodeLVM, self).setUp()
self.VALID_NRESULT = {
constants.NV_VGLIST: {"mock_vg": 30000},
constants.NV_PVLIST: [
{
"name": "mock_pv",
"vg_name": "mock_vg",
"size": 5000,
"free": 2500,
"attributes": [],
"lv_list": []
}
]
}
@withLockedLU
def testNoVgName(self, lu):
lu._UpdateVerifyNodeLVM(self.master, {}, None, None)
self.mcpu.assertLogIsEmpty()
@withLockedLU
def testEmptyNodeResult(self, lu):
lu._UpdateVerifyNodeLVM(self.master, {}, "mock_vg", None)
self.mcpu.assertLogContainsRegex("unable to check volume groups")
self.mcpu.assertLogContainsRegex("Can't get PV list from node")
@withLockedLU
def testValidNodeResult(self, lu):
lu._UpdateVerifyNodeLVM(self.master, self.VALID_NRESULT, "mock_vg", None)
self.mcpu.assertLogIsEmpty()
@withLockedLU
def testValidNodeResultExclusiveStorage(self, lu):
lu._exclusive_storage = True
lu._UpdateVerifyNodeLVM(self.master, self.VALID_NRESULT, "mock_vg",
verify.LUClusterVerifyGroup.NodeImage())
self.mcpu.assertLogIsEmpty()
class TestLUClusterVerifyGroupVerifyGroupDRBDVersion(
TestLUClusterVerifyGroupMethods):
@withLockedLU
def testEmptyNodeResult(self, lu):
lu._VerifyGroupDRBDVersion({})
self.mcpu.assertLogIsEmpty()
@withLockedLU
def testValidNodeResult(self, lu):
lu._VerifyGroupDRBDVersion(
RpcResultsBuilder()
.AddSuccessfulNode(self.master, {
constants.NV_DRBDVERSION: "8.3.0"
})
.Build())
self.mcpu.assertLogIsEmpty()
@withLockedLU
def testDifferentVersions(self, lu):
node1 = self.cfg.AddNewNode()
lu._VerifyGroupDRBDVersion(
RpcResultsBuilder()
.AddSuccessfulNode(self.master, {
constants.NV_DRBDVERSION: "8.3.0"
})
.AddSuccessfulNode(node1, {
constants.NV_DRBDVERSION: "8.4.0"
})
.Build())
self.mcpu.assertLogContainsRegex("DRBD version mismatch: 8.3.0")
self.mcpu.assertLogContainsRegex("DRBD version mismatch: 8.4.0")
class TestLUClusterVerifyGroupVerifyGroupLVM(TestLUClusterVerifyGroupMethods):
@withLockedLU
def testNoVgName(self, lu):
lu._VerifyGroupLVM(None, None)
self.mcpu.assertLogIsEmpty()
@withLockedLU
def testNoExclusiveStorage(self, lu):
lu._VerifyGroupLVM(None, "mock_vg")
self.mcpu.assertLogIsEmpty()
@withLockedLU
def testNoPvInfo(self, lu):
lu._exclusive_storage = True
nimg = verify.LUClusterVerifyGroup.NodeImage()
lu._VerifyGroupLVM({self.master.uuid: nimg}, "mock_vg")
self.mcpu.assertLogIsEmpty()
@withLockedLU
def testValidPvInfos(self, lu):
lu._exclusive_storage = True
node2 = self.cfg.AddNewNode()
nimg1 = verify.LUClusterVerifyGroup.NodeImage(uuid=self.master.uuid)
nimg1.pv_min = 10000
nimg1.pv_max = 10010
nimg2 = verify.LUClusterVerifyGroup.NodeImage(uuid=node2.uuid)
nimg2.pv_min = 9998
nimg2.pv_max = 10005
lu._VerifyGroupLVM({self.master.uuid: nimg1, node2.uuid: nimg2}, "mock_vg")
self.mcpu.assertLogIsEmpty()
class TestLUClusterVerifyGroupVerifyNodeBridges(
TestLUClusterVerifyGroupMethods):
@withLockedLU
def testNoBridges(self, lu):
lu._VerifyNodeBridges(None, None, None)
self.mcpu.assertLogIsEmpty()
@withLockedLU
def testInvalidBridges(self, lu):
for ndata in [{}, {constants.NV_BRIDGES: ""}]:
self.mcpu.ClearLogMessages()
lu._VerifyNodeBridges(self.master, ndata, ["mock_bridge"])
self.mcpu.assertLogContainsRegex("not return valid bridge information")
self.mcpu.ClearLogMessages()
lu._VerifyNodeBridges(self.master, {constants.NV_BRIDGES: ["mock_bridge"]},
["mock_bridge"])
self.mcpu.assertLogContainsRegex("missing bridge")
class TestLUClusterVerifyGroupVerifyNodeUserScripts(
TestLUClusterVerifyGroupMethods):
@withLockedLU
def testNoUserScripts(self, lu):
lu._VerifyNodeUserScripts(self.master, {})
self.mcpu.assertLogContainsRegex("did not return user scripts information")
@withLockedLU
def testBrokenUserScripts(self, lu):
lu._VerifyNodeUserScripts(self.master,
{constants.NV_USERSCRIPTS: ["script"]})
self.mcpu.assertLogContainsRegex("scripts not present or not executable")
class TestLUClusterVerifyGroupVerifyNodeNetwork(
TestLUClusterVerifyGroupMethods):
def setUp(self):
super(TestLUClusterVerifyGroupVerifyNodeNetwork, self).setUp()
self.VALID_NRESULT = {
constants.NV_NODELIST: {},
constants.NV_NODENETTEST: {},
constants.NV_MASTERIP: True
}
@withLockedLU
def testEmptyNodeResult(self, lu):
lu._VerifyNodeNetwork(self.master, {})
self.mcpu.assertLogContainsRegex(
"node hasn't returned node ssh connectivity data")
self.mcpu.assertLogContainsRegex(
"node hasn't returned node tcp connectivity data")
self.mcpu.assertLogContainsRegex(
"node hasn't returned node master IP reachability data")
@withLockedLU
def testValidResult(self, lu):
lu._VerifyNodeNetwork(self.master, self.VALID_NRESULT)
self.mcpu.assertLogIsEmpty()
@withLockedLU
def testSshProblem(self, lu):
self.VALID_NRESULT.update({
constants.NV_NODELIST: {
"mock_node": "mock_error"
}
})
lu._VerifyNodeNetwork(self.master, self.VALID_NRESULT)
self.mcpu.assertLogContainsRegex("ssh communication with node 'mock_node'")
@withLockedLU
def testTcpProblem(self, lu):
self.VALID_NRESULT.update({
constants.NV_NODENETTEST: {
"mock_node": "mock_error"
}
})
lu._VerifyNodeNetwork(self.master, self.VALID_NRESULT)
self.mcpu.assertLogContainsRegex("tcp communication with node 'mock_node'")
@withLockedLU
def testMasterIpNotReachable(self, lu):
self.VALID_NRESULT.update({
constants.NV_MASTERIP: False
})
node1 = self.cfg.AddNewNode()
lu._VerifyNodeNetwork(self.master, self.VALID_NRESULT)
self.mcpu.assertLogContainsRegex(
"the master node cannot reach the master IP")
self.mcpu.ClearLogMessages()
lu._VerifyNodeNetwork(node1, self.VALID_NRESULT)
self.mcpu.assertLogContainsRegex("cannot reach the master IP")
class TestLUClusterVerifyGroupVerifyInstance(TestLUClusterVerifyGroupMethods):
def setUp(self):
super(TestLUClusterVerifyGroupVerifyInstance, self).setUp()
self.node1 = self.cfg.AddNewNode()
self.drbd_inst = self.cfg.AddNewInstance(
disks=[self.cfg.CreateDisk(dev_type=constants.DT_DRBD8,
primary_node=self.master,
secondary_node=self.node1)])
self.running_inst = self.cfg.AddNewInstance(
admin_state=constants.ADMINST_UP, disks_active=True)
self.diskless_inst = self.cfg.AddNewInstance(disks=[])
self.master_img = \
verify.LUClusterVerifyGroup.NodeImage(uuid=self.master_uuid)
self.master_img.volumes = ["/".join(disk.logical_id)
for inst in [self.running_inst,
self.diskless_inst]
for disk in
self.cfg.GetInstanceDisks(inst.uuid)]
drbd_inst_disks = self.cfg.GetInstanceDisks(self.drbd_inst.uuid)
self.master_img.volumes.extend(
["/".join(disk.logical_id) for disk in drbd_inst_disks[0].children])
self.master_img.instances = [self.running_inst.uuid]
self.node1_img = \
verify.LUClusterVerifyGroup.NodeImage(uuid=self.node1.uuid)
self.node1_img.volumes = \
["/".join(disk.logical_id) for disk in drbd_inst_disks[0].children]
self.node_imgs = {
self.master_uuid: self.master_img,
self.node1.uuid: self.node1_img
}
running_inst_disks = self.cfg.GetInstanceDisks(self.running_inst.uuid)
self.diskstatus = {
self.master_uuid: [
(True, objects.BlockDevStatus(ldisk_status=constants.LDS_OKAY))
for _ in running_inst_disks
]
}
@withLockedLU
def testDisklessInst(self, lu):
lu._VerifyInstance(self.diskless_inst, self.node_imgs, {})
self.mcpu.assertLogIsEmpty()
@withLockedLU
def testOfflineNode(self, lu):
self.master_img.offline = True
lu._VerifyInstance(self.drbd_inst, self.node_imgs, {})
self.mcpu.assertLogIsEmpty()
@withLockedLU
def testRunningOnOfflineNode(self, lu):
self.master_img.offline = True
lu._VerifyInstance(self.running_inst, self.node_imgs, {})
self.mcpu.assertLogContainsRegex(
"instance is marked as running and lives on offline node")
@withLockedLU
def testMissingVolume(self, lu):
self.master_img.volumes = []
lu._VerifyInstance(self.running_inst, self.node_imgs, {})
self.mcpu.assertLogContainsRegex("volume .* missing")
@withLockedLU
def testRunningInstanceOnWrongNode(self, lu):
self.master_img.instances = []
self.diskless_inst.admin_state = constants.ADMINST_UP
lu._VerifyInstance(self.running_inst, self.node_imgs, {})
self.mcpu.assertLogContainsRegex("instance not running on its primary node")
@withLockedLU
def testRunningInstanceOnRightNode(self, lu):
self.master_img.instances = [self.running_inst.uuid]
lu._VerifyInstance(self.running_inst, self.node_imgs, {})
self.mcpu.assertLogIsEmpty()
@withLockedLU
def testValidDiskStatus(self, lu):
lu._VerifyInstance(self.running_inst, self.node_imgs, self.diskstatus)
self.mcpu.assertLogIsEmpty()
@withLockedLU
def testDegradedDiskStatus(self, lu):
self.diskstatus[self.master_uuid][0][1].is_degraded = True
lu._VerifyInstance(self.running_inst, self.node_imgs, self.diskstatus)
self.mcpu.assertLogContainsRegex("instance .* is degraded")
@withLockedLU
def testNotOkayDiskStatus(self, lu):
self.diskstatus[self.master_uuid][0][1].is_degraded = True
self.diskstatus[self.master_uuid][0][1].ldisk_status = constants.LDS_FAULTY
lu._VerifyInstance(self.running_inst, self.node_imgs, self.diskstatus)
self.mcpu.assertLogContainsRegex("instance .* state is 'faulty'")
@withLockedLU
def testExclusiveStorageWithInvalidInstance(self, lu):
self.master.ndparams[constants.ND_EXCLUSIVE_STORAGE] = True
lu._VerifyInstance(self.drbd_inst, self.node_imgs, self.diskstatus)
self.mcpu.assertLogContainsRegex(
"disk types? drbd, which are not supported")
@withLockedLU
def testExclusiveStorageWithValidInstance(self, lu):
self.master.ndparams[constants.ND_EXCLUSIVE_STORAGE] = True
running_inst_disks = self.cfg.GetInstanceDisks(self.running_inst.uuid)
running_inst_disks[0].spindles = 1
feedback_fn = lambda _: None
self.cfg.Update(running_inst_disks[0], feedback_fn)
lu._VerifyInstance(self.running_inst, self.node_imgs, self.diskstatus)
self.mcpu.assertLogIsEmpty()
@withLockedLU
def testDrbdInTwoGroups(self, lu):
group = self.cfg.AddNewNodeGroup()
self.node1.group = group.uuid
lu._VerifyInstance(self.drbd_inst, self.node_imgs, self.diskstatus)
self.mcpu.assertLogContainsRegex(
"instance has primary and secondary nodes in different groups")
@withLockedLU
def testOfflineSecondary(self, lu):
self.node1_img.offline = True
lu._VerifyInstance(self.drbd_inst, self.node_imgs, self.diskstatus)
self.mcpu.assertLogContainsRegex("instance has offline secondary node\(s\)")
class TestLUClusterVerifyGroupVerifyOrphanVolumes(
TestLUClusterVerifyGroupMethods):
@withLockedLU
def testOrphanedVolume(self, lu):
master_img = verify.LUClusterVerifyGroup.NodeImage(uuid=self.master_uuid)
master_img.volumes = ["mock_vg/disk_0", "mock_vg/disk_1", "mock_vg/disk_2"]
node_imgs = {
self.master_uuid: master_img
}
node_vol_should = {
self.master_uuid: ["mock_vg/disk_0"]
}
lu._VerifyOrphanVolumes(node_vol_should, node_imgs,
utils.FieldSet("mock_vg/disk_2"))
self.mcpu.assertLogContainsRegex("volume mock_vg/disk_1 is unknown")
self.mcpu.assertLogDoesNotContainRegex("volume mock_vg/disk_0 is unknown")
self.mcpu.assertLogDoesNotContainRegex("volume mock_vg/disk_2 is unknown")
class TestLUClusterVerifyGroupVerifyNPlusOneMemory(
TestLUClusterVerifyGroupMethods):
@withLockedLU
def testN1Failure(self, lu):
group1 = self.cfg.AddNewNodeGroup()
node1 = self.cfg.AddNewNode()
node2 = self.cfg.AddNewNode(group=group1)
node3 = self.cfg.AddNewNode()
inst1 = self.cfg.AddNewInstance()
inst2 = self.cfg.AddNewInstance()
inst3 = self.cfg.AddNewInstance()
node1_img = verify.LUClusterVerifyGroup.NodeImage(uuid=node1.uuid)
node1_img.sbp = {
self.master_uuid: [inst1.uuid, inst2.uuid, inst3.uuid]
}
node2_img = verify.LUClusterVerifyGroup.NodeImage(uuid=node2.uuid)
node3_img = verify.LUClusterVerifyGroup.NodeImage(uuid=node3.uuid)
node3_img.offline = True
node_imgs = {
node1.uuid: node1_img,
node2.uuid: node2_img,
node3.uuid: node3_img
}
lu._VerifyNPlusOneMemory(node_imgs, self.cfg.GetAllInstancesInfo())
self.mcpu.assertLogContainsRegex(
"not enough memory to accomodate instance failovers")
self.mcpu.ClearLogMessages()
node1_img.mfree = 1000
lu._VerifyNPlusOneMemory(node_imgs, self.cfg.GetAllInstancesInfo())
self.mcpu.assertLogIsEmpty()
class TestLUClusterVerifyGroupVerifyFiles(TestLUClusterVerifyGroupMethods):
@withLockedLU
def test(self, lu):
node1 = self.cfg.AddNewNode(master_candidate=False, offline=False,
vm_capable=True)
node2 = self.cfg.AddNewNode(master_candidate=True, vm_capable=False)
node3 = self.cfg.AddNewNode(master_candidate=False, offline=False,
vm_capable=True)
node4 = self.cfg.AddNewNode(master_candidate=False, offline=False,
vm_capable=True)
node5 = self.cfg.AddNewNode(master_candidate=False, offline=True)
nodeinfo = [self.master, node1, node2, node3, node4, node5]
files_all = set([
pathutils.CLUSTER_DOMAIN_SECRET_FILE,
pathutils.RAPI_CERT_FILE,
pathutils.RAPI_USERS_FILE,
])
files_opt = set([
pathutils.RAPI_USERS_FILE,
hv_xen.XL_CONFIG_FILE,
pathutils.VNC_PASSWORD_FILE,
])
files_mc = set([
pathutils.CLUSTER_CONF_FILE,
])
files_vm = set([
hv_xen.XEND_CONFIG_FILE,
hv_xen.XL_CONFIG_FILE,
pathutils.VNC_PASSWORD_FILE,
])
nvinfo = RpcResultsBuilder() \
.AddSuccessfulNode(self.master, {
constants.NV_FILELIST: {
pathutils.CLUSTER_CONF_FILE: "82314f897f38b35f9dab2f7c6b1593e0",
pathutils.RAPI_CERT_FILE: "babbce8f387bc082228e544a2146fee4",
pathutils.CLUSTER_DOMAIN_SECRET_FILE: "cds-47b5b3f19202936bb4",
hv_xen.XEND_CONFIG_FILE: "b4a8a824ab3cac3d88839a9adeadf310",
hv_xen.XL_CONFIG_FILE: "77935cee92afd26d162f9e525e3d49b9"
}}) \
.AddSuccessfulNode(node1, {
constants.NV_FILELIST: {
pathutils.RAPI_CERT_FILE: "97f0356500e866387f4b84233848cc4a",
hv_xen.XEND_CONFIG_FILE: "b4a8a824ab3cac3d88839a9adeadf310",
}
}) \
.AddSuccessfulNode(node2, {
constants.NV_FILELIST: {
pathutils.RAPI_CERT_FILE: "97f0356500e866387f4b84233848cc4a",
pathutils.CLUSTER_DOMAIN_SECRET_FILE: "cds-47b5b3f19202936bb4",
}
}) \
.AddSuccessfulNode(node3, {
constants.NV_FILELIST: {
pathutils.RAPI_CERT_FILE: "97f0356500e866387f4b84233848cc4a",
pathutils.CLUSTER_CONF_FILE: "conf-a6d4b13e407867f7a7b4f0f232a8f527",
pathutils.CLUSTER_DOMAIN_SECRET_FILE: "cds-47b5b3f19202936bb4",
pathutils.RAPI_USERS_FILE: "rapiusers-ea3271e8d810ef3",
hv_xen.XL_CONFIG_FILE: "77935cee92afd26d162f9e525e3d49b9"
}
}) \
.AddSuccessfulNode(node4, {}) \
.AddOfflineNode(node5) \
.Build()
assert set(nvinfo.keys()) == set(map(operator.attrgetter("uuid"), nodeinfo))
lu._VerifyFiles(nodeinfo, self.master_uuid, nvinfo,
(files_all, files_opt, files_mc, files_vm))
expected_msgs = [
"File %s found with 2 different checksums (variant 1 on"
" %s, %s, %s; variant 2 on %s)" %
(pathutils.RAPI_CERT_FILE, node1.name, node2.name, node3.name,
self.master.name),
"File %s is missing from node(s) %s" %
(pathutils.CLUSTER_DOMAIN_SECRET_FILE, node1.name),
"File %s should not exist on node(s) %s" %
(pathutils.CLUSTER_CONF_FILE, node3.name),
"File %s is missing from node(s) %s" %
(hv_xen.XEND_CONFIG_FILE, node3.name),
"File %s is missing from node(s) %s" %
(pathutils.CLUSTER_CONF_FILE, node2.name),
"File %s found with 2 different checksums (variant 1 on"
" %s; variant 2 on %s)" %
(pathutils.CLUSTER_CONF_FILE, self.master.name, node3.name),
"File %s is optional, but it must exist on all or no nodes (not"
" found on %s, %s, %s)" %
(pathutils.RAPI_USERS_FILE, self.master.name, node1.name, node2.name),
"File %s is optional, but it must exist on all or no nodes (not"
" found on %s)" % (hv_xen.XL_CONFIG_FILE, node1.name),
"Node did not return file checksum data",
]
self.assertEqual(len(self.mcpu.GetLogMessages()), len(expected_msgs))
for expected_msg in expected_msgs:
self.mcpu.assertLogContainsInLine(expected_msg)
class TestLUClusterVerifyGroupVerifyNodeOs(TestLUClusterVerifyGroupMethods):
@withLockedLU
def testUpdateNodeOsInvalidNodeResult(self, lu):
for ndata in [{}, {constants.NV_OSLIST: ""}, {constants.NV_OSLIST: [""]},
{constants.NV_OSLIST: [["1", "2"]]}]:
self.mcpu.ClearLogMessages()
nimage = verify.LUClusterVerifyGroup.NodeImage(uuid=self.master_uuid)
lu._UpdateNodeOS(self.master, ndata, nimage)
self.mcpu.assertLogContainsRegex("node hasn't returned valid OS data")
@withLockedLU
def testUpdateNodeOsValidNodeResult(self, lu):
ndata = {
constants.NV_OSLIST: [
["mock_OS", "/mocked/path", True, "", ["default"], [],
[constants.OS_API_V20], True],
["Another_Mock", "/random", True, "", ["var1", "var2"],
[{"param1": "val1"}, {"param2": "val2"}], constants.OS_API_VERSIONS,
True]
]
}
nimage = verify.LUClusterVerifyGroup.NodeImage(uuid=self.master_uuid)
lu._UpdateNodeOS(self.master, ndata, nimage)
self.mcpu.assertLogIsEmpty()
@withLockedLU
def testVerifyNodeOs(self, lu):
node = self.cfg.AddNewNode()
nimg_root = verify.LUClusterVerifyGroup.NodeImage(uuid=self.master_uuid)
nimg = verify.LUClusterVerifyGroup.NodeImage(uuid=node.uuid)
nimg_root.os_fail = False
nimg_root.oslist = {
"mock_os": [("/mocked/path", True, "", set(["default"]), set(),
set([constants.OS_API_V20]), True)],
"broken_base_os": [("/broken", False, "", set(), set(),
set([constants.OS_API_V20]), True)],
"only_on_root": [("/random", True, "", set(), set(), set(), True)],
"diffing_os": [("/pinky", True, "", set(["var1", "var2"]),
set([("param1", "val1"), ("param2", "val2")]),
set([constants.OS_API_V20]), True)],
"trust_os": [("/trust/mismatch", True, "", set(), set(), set(), True)],
}
nimg.os_fail = False
nimg.oslist = {
"mock_os": [("/mocked/path", True, "", set(["default"]), set(),
set([constants.OS_API_V20]), True)],
"only_on_test": [("/random", True, "", set(), set(), set(), True)],
"diffing_os": [("/bunny", True, "", set(["var1", "var3"]),
set([("param1", "val1"), ("param3", "val3")]),
set([constants.OS_API_V15]), True)],
"broken_os": [("/broken", False, "", set(), set(),
set([constants.OS_API_V20]), True)],
"multi_entries": [
("/multi1", True, "", set(), set(), set([constants.OS_API_V20]), True),
("/multi2", True, "", set(), set(), set([constants.OS_API_V20]), True)],
"trust_os": [("/trust/mismatch", True, "", set(), set(), set(), False)],
}
lu._VerifyNodeOS(node, nimg, nimg_root)
expected_msgs = [
"Extra OS only_on_test not present on reference node",
"OSes present on reference node .* but missing on this node:" +
" only_on_root",
"OS API version for diffing_os differs",
"OS variants list for diffing_os differs",
"OS parameters for diffing_os differs",
"Invalid OS broken_os",
"Extra OS broken_os not present on reference node",
"OS 'multi_entries' has multiple entries",
"Extra OS multi_entries not present on reference node",
"OS trusted for trust_os differs from reference node "
]
self.assertEqual(len(expected_msgs), len(self.mcpu.GetLogMessages()))
for expected_msg in expected_msgs:
self.mcpu.assertLogContainsRegex(expected_msg)
class TestLUClusterVerifyGroupVerifyAcceptedFileStoragePaths(
TestLUClusterVerifyGroupMethods):
@withLockedLU
def testNotMaster(self, lu):
lu._VerifyAcceptedFileStoragePaths(self.master, {}, False)
self.mcpu.assertLogIsEmpty()
@withLockedLU
def testNotMasterButRetunedValue(self, lu):
lu._VerifyAcceptedFileStoragePaths(
self.master, {constants.NV_ACCEPTED_STORAGE_PATHS: []}, False)
self.mcpu.assertLogContainsRegex(
"Node should not have returned forbidden file storage paths")
@withLockedLU
def testMasterInvalidNodeResult(self, lu):
lu._VerifyAcceptedFileStoragePaths(self.master, {}, True)
self.mcpu.assertLogContainsRegex(
"Node did not return forbidden file storage paths")
@withLockedLU
def testMasterForbiddenPaths(self, lu):
lu._VerifyAcceptedFileStoragePaths(
self.master, {constants.NV_ACCEPTED_STORAGE_PATHS: ["/forbidden"]}, True)
self.mcpu.assertLogContainsRegex("Found forbidden file storage paths")
@withLockedLU
def testMasterSuccess(self, lu):
lu._VerifyAcceptedFileStoragePaths(
self.master, {constants.NV_ACCEPTED_STORAGE_PATHS: []}, True)
self.mcpu.assertLogIsEmpty()
class TestLUClusterVerifyGroupVerifyStoragePaths(
TestLUClusterVerifyGroupMethods):
@withLockedLU
def testVerifyFileStoragePathsSuccess(self, lu):
lu._VerifyFileStoragePaths(self.master, {})
self.mcpu.assertLogIsEmpty()
@withLockedLU
def testVerifyFileStoragePathsFailure(self, lu):
lu._VerifyFileStoragePaths(self.master,
{constants.NV_FILE_STORAGE_PATH: "/fail/path"})
self.mcpu.assertLogContainsRegex(
"The configured file storage path is unusable")
@withLockedLU
def testVerifySharedFileStoragePathsSuccess(self, lu):
lu._VerifySharedFileStoragePaths(self.master, {})
self.mcpu.assertLogIsEmpty()
@withLockedLU
def testVerifySharedFileStoragePathsFailure(self, lu):
lu._VerifySharedFileStoragePaths(
self.master, {constants.NV_SHARED_FILE_STORAGE_PATH: "/fail/path"})
self.mcpu.assertLogContainsRegex(
"The configured sharedfile storage path is unusable")
class TestLUClusterVerifyGroupVerifyOob(TestLUClusterVerifyGroupMethods):
@withLockedLU
def testEmptyResult(self, lu):
lu._VerifyOob(self.master, {})
self.mcpu.assertLogIsEmpty()
@withLockedLU
def testErrorResults(self, lu):
lu._VerifyOob(self.master, {constants.NV_OOB_PATHS: ["path1", "path2"]})
self.mcpu.assertLogContainsRegex("path1")
self.mcpu.assertLogContainsRegex("path2")
class TestLUClusterVerifyGroupUpdateNodeVolumes(
TestLUClusterVerifyGroupMethods):
def setUp(self):
super(TestLUClusterVerifyGroupUpdateNodeVolumes, self).setUp()
self.nimg = verify.LUClusterVerifyGroup.NodeImage(uuid=self.master_uuid)
@withLockedLU
def testNoVgName(self, lu):
lu._UpdateNodeVolumes(self.master, {}, self.nimg, None)
self.mcpu.assertLogIsEmpty()
self.assertTrue(self.nimg.lvm_fail)
@withLockedLU
def testErrorMessage(self, lu):
lu._UpdateNodeVolumes(self.master, {constants.NV_LVLIST: "mock error"},
self.nimg, "mock_vg")
self.mcpu.assertLogContainsRegex("LVM problem on node: mock error")
self.assertTrue(self.nimg.lvm_fail)
@withLockedLU
def testInvalidNodeResult(self, lu):
lu._UpdateNodeVolumes(self.master, {constants.NV_LVLIST: [1, 2, 3]},
self.nimg, "mock_vg")
self.mcpu.assertLogContainsRegex("rpc call to node failed")
self.assertTrue(self.nimg.lvm_fail)
@withLockedLU
def testValidNodeResult(self, lu):
lu._UpdateNodeVolumes(self.master, {constants.NV_LVLIST: {}},
self.nimg, "mock_vg")
self.mcpu.assertLogIsEmpty()
self.assertFalse(self.nimg.lvm_fail)
class TestLUClusterVerifyGroupUpdateNodeInstances(
TestLUClusterVerifyGroupMethods):
def setUp(self):
super(TestLUClusterVerifyGroupUpdateNodeInstances, self).setUp()
self.nimg = verify.LUClusterVerifyGroup.NodeImage(uuid=self.master_uuid)
@withLockedLU
def testInvalidNodeResult(self, lu):
lu._UpdateNodeInstances(self.master, {}, self.nimg)
self.mcpu.assertLogContainsRegex("rpc call to node failed")
@withLockedLU
def testValidNodeResult(self, lu):
inst = self.cfg.AddNewInstance()
lu._UpdateNodeInstances(self.master,
{constants.NV_INSTANCELIST: [inst.name]},
self.nimg)
self.mcpu.assertLogIsEmpty()
class TestLUClusterVerifyGroupUpdateNodeInfo(TestLUClusterVerifyGroupMethods):
def setUp(self):
super(TestLUClusterVerifyGroupUpdateNodeInfo, self).setUp()
self.nimg = verify.LUClusterVerifyGroup.NodeImage(uuid=self.master_uuid)
self.valid_hvresult = {constants.NV_HVINFO: {"memory_free": 1024}}
@withLockedLU
def testInvalidHvNodeResult(self, lu):
for ndata in [{}, {constants.NV_HVINFO: ""}]:
self.mcpu.ClearLogMessages()
lu._UpdateNodeInfo(self.master, ndata, self.nimg, None)
self.mcpu.assertLogContainsRegex("rpc call to node failed")
@withLockedLU
def testInvalidMemoryFreeHvNodeResult(self, lu):
lu._UpdateNodeInfo(self.master,
{constants.NV_HVINFO: {"memory_free": "abc"}},
self.nimg, None)
self.mcpu.assertLogContainsRegex(
"node returned invalid nodeinfo, check hypervisor")
@withLockedLU
def testValidHvNodeResult(self, lu):
lu._UpdateNodeInfo(self.master, self.valid_hvresult, self.nimg, None)
self.mcpu.assertLogIsEmpty()
@withLockedLU
def testInvalidVgNodeResult(self, lu):
for vgdata in [[], ""]:
self.mcpu.ClearLogMessages()
ndata = {constants.NV_VGLIST: vgdata}
ndata.update(self.valid_hvresult)
lu._UpdateNodeInfo(self.master, ndata, self.nimg, "mock_vg")
self.mcpu.assertLogContainsRegex(
"node didn't return data for the volume group 'mock_vg'")
@withLockedLU
def testInvalidDiskFreeVgNodeResult(self, lu):
self.valid_hvresult.update({
constants.NV_VGLIST: {"mock_vg": "abc"}
})
lu._UpdateNodeInfo(self.master, self.valid_hvresult, self.nimg, "mock_vg")
self.mcpu.assertLogContainsRegex(
"node returned invalid LVM info, check LVM status")
@withLockedLU
def testValidVgNodeResult(self, lu):
self.valid_hvresult.update({
constants.NV_VGLIST: {"mock_vg": 10000}
})
lu._UpdateNodeInfo(self.master, self.valid_hvresult, self.nimg, "mock_vg")
self.mcpu.assertLogIsEmpty()
class TestLUClusterVerifyGroupCollectDiskInfo(TestLUClusterVerifyGroupMethods):
def setUp(self):
super(TestLUClusterVerifyGroupCollectDiskInfo, self).setUp()
self.node1 = self.cfg.AddNewNode()
self.node2 = self.cfg.AddNewNode()
self.node3 = self.cfg.AddNewNode()
self.diskless_inst = \
self.cfg.AddNewInstance(primary_node=self.node1,
disk_template=constants.DT_DISKLESS)
self.plain_inst = \
self.cfg.AddNewInstance(primary_node=self.node2,
disk_template=constants.DT_PLAIN)
self.drbd_inst = \
self.cfg.AddNewInstance(primary_node=self.node3,
secondary_node=self.node2,
disk_template=constants.DT_DRBD8)
self.node1_img = verify.LUClusterVerifyGroup.NodeImage(
uuid=self.node1.uuid)
self.node1_img.pinst = [self.diskless_inst.uuid]
self.node1_img.sinst = []
self.node2_img = verify.LUClusterVerifyGroup.NodeImage(
uuid=self.node2.uuid)
self.node2_img.pinst = [self.plain_inst.uuid]
self.node2_img.sinst = [self.drbd_inst.uuid]
self.node3_img = verify.LUClusterVerifyGroup.NodeImage(
uuid=self.node3.uuid)
self.node3_img.pinst = [self.drbd_inst.uuid]
self.node3_img.sinst = []
self.node_images = {
self.node1.uuid: self.node1_img,
self.node2.uuid: self.node2_img,
self.node3.uuid: self.node3_img
}
self.node_uuids = [self.node1.uuid, self.node2.uuid, self.node3.uuid]
@withLockedLU
def testSuccessfulRun(self, lu):
self.rpc.call_blockdev_getmirrorstatus_multi.return_value = \
RpcResultsBuilder() \
.AddSuccessfulNode(self.node2, [(True, ""), (True, "")]) \
.AddSuccessfulNode(self.node3, [(True, "")]) \
.Build()
lu._CollectDiskInfo(self.node_uuids, self.node_images,
self.cfg.GetAllInstancesInfo())
self.mcpu.assertLogIsEmpty()
@withLockedLU
def testOfflineAndFailingNodes(self, lu):
self.rpc.call_blockdev_getmirrorstatus_multi.return_value = \
RpcResultsBuilder() \
.AddOfflineNode(self.node2) \
.AddFailedNode(self.node3) \
.Build()
lu._CollectDiskInfo(self.node_uuids, self.node_images,
self.cfg.GetAllInstancesInfo())
self.mcpu.assertLogContainsRegex("while getting disk information")
@withLockedLU
def testInvalidNodeResult(self, lu):
self.rpc.call_blockdev_getmirrorstatus_multi.return_value = \
RpcResultsBuilder() \
.AddSuccessfulNode(self.node2, [(True,), (False,)]) \
.AddSuccessfulNode(self.node3, [""]) \
.Build()
lu._CollectDiskInfo(self.node_uuids, self.node_images,
self.cfg.GetAllInstancesInfo())
# logging is not performed through mcpu
self.mcpu.assertLogIsEmpty()
class TestLUClusterVerifyGroupHooksCallBack(TestLUClusterVerifyGroupMethods):
def setUp(self):
super(TestLUClusterVerifyGroupHooksCallBack, self).setUp()
self.feedback_fn = lambda _: None
def PrepareLU(self, lu):
super(TestLUClusterVerifyGroupHooksCallBack, self).PrepareLU(lu)
lu.my_node_uuids = list(self.cfg.GetAllNodesInfo().keys())
@withLockedLU
def testEmptyGroup(self, lu):
lu.my_node_uuids = []
lu.HooksCallBack(constants.HOOKS_PHASE_POST, None, self.feedback_fn, None)
@withLockedLU
def testFailedResult(self, lu):
lu.HooksCallBack(constants.HOOKS_PHASE_POST,
RpcResultsBuilder(use_node_names=True)
.AddFailedNode(self.master).Build(),
self.feedback_fn,
None)
self.mcpu.assertLogContainsRegex("Communication failure in hooks execution")
@withLockedLU
def testOfflineNode(self, lu):
lu.HooksCallBack(constants.HOOKS_PHASE_POST,
RpcResultsBuilder(use_node_names=True)
.AddOfflineNode(self.master).Build(),
self.feedback_fn,
None)
@withLockedLU
def testValidResult(self, lu):
lu.HooksCallBack(constants.HOOKS_PHASE_POST,
RpcResultsBuilder(use_node_names=True)
.AddSuccessfulNode(self.master,
[("mock_script",
constants.HKR_SUCCESS,
"mock output")])
.Build(),
self.feedback_fn,
None)
@withLockedLU
def testFailedScriptResult(self, lu):
lu.HooksCallBack(constants.HOOKS_PHASE_POST,
RpcResultsBuilder(use_node_names=True)
.AddSuccessfulNode(self.master,
[("mock_script",
constants.HKR_FAIL,
"mock output")])
.Build(),
self.feedback_fn,
None)
self.mcpu.assertLogContainsRegex("Script mock_script failed")
class TestLUClusterVerifyDisks(CmdlibTestCase):
def testVerifyDisks(self):
op = opcodes.OpClusterVerifyDisks()
result = self.ExecOpCode(op)
self.assertEqual(1, len(result["jobs"]))
class TestLUClusterRenewCrypto(CmdlibTestCase):
def setUp(self):
super(TestLUClusterRenewCrypto, self).setUp()
self._node_cert = self._CreateTempFile()
shutil.copy(testutils.TestDataFilename("cert1.pem"), self._node_cert)
self._client_node_cert = self._CreateTempFile()
shutil.copy(testutils.TestDataFilename("cert2.pem"), self._client_node_cert)
self._client_node_cert_tmp = self._CreateTempFile()
def tearDown(self):
super(TestLUClusterRenewCrypto, self).tearDown()
def _GetFakeDigest(self, uuid):
"""Creates a fake SSL digest depending on the UUID of a node.
@type uuid: string
@param uuid: node UUID
@returns: a string impersonating a SSL digest
"""
return "FA:KE:%s:%s:%s:%s" % (uuid[0:2], uuid[2:4], uuid[4:6], uuid[6:8])
def _InitPathutils(self, pathutils):
"""Patch pathutils to point to temporary files.
"""
pathutils.NODED_CERT_FILE = self._node_cert
pathutils.NODED_CLIENT_CERT_FILE = self._client_node_cert
pathutils.NODED_CLIENT_CERT_FILE_TMP = \
self._client_node_cert_tmp
def _AssertCertFiles(self, pathutils):
"""Check if the correct certificates exist and don't exist on the master.
"""
self.assertTrue(os.path.exists(pathutils.NODED_CERT_FILE))
self.assertTrue(os.path.exists(pathutils.NODED_CLIENT_CERT_FILE))
self.assertFalse(os.path.exists(pathutils.NODED_CLIENT_CERT_FILE_TMP))
def _CompletelySuccessfulRpc(self, node_uuid, _):
"""Fake RPC call which always returns successfully.
"""
return self.RpcResultsBuilder() \
.CreateSuccessfulNodeResult(node_uuid,
[(constants.CRYPTO_TYPE_SSL_DIGEST,
self._GetFakeDigest(node_uuid))])
@patchPathutils("cluster")
def testSuccessfulCase(self, pathutils):
self._InitPathutils(pathutils)
# create a few non-master, online nodes
num_nodes = 3
for _ in range(num_nodes):
self.cfg.AddNewNode()
self.rpc.call_node_crypto_tokens = self._CompletelySuccessfulRpc
op = opcodes.OpClusterRenewCrypto(node_certificates=True)
self.ExecOpCode(op)
self._AssertCertFiles(pathutils)
# Check if we have the correct digests in the configuration
cluster = self.cfg.GetClusterInfo()
self.assertEqual(num_nodes + 1, len(cluster.candidate_certs))
nodes = self.cfg.GetAllNodesInfo()
for (node_uuid, _) in nodes.items():
expected_digest = self._GetFakeDigest(node_uuid)
self.assertEqual(expected_digest, cluster.candidate_certs[node_uuid])
@patchPathutils("cluster")
def testMasterFails(self, pathutils):
self._InitPathutils(pathutils)
# make sure the RPC calls are failing for all nodes
master_uuid = self.cfg.GetMasterNode()
self.rpc.call_node_crypto_tokens.return_value = self.RpcResultsBuilder() \
.CreateFailedNodeResult(master_uuid)
op = opcodes.OpClusterRenewCrypto(node_certificates=True)
self.ExecOpCode(op)
self._AssertCertFiles(pathutils)
# Check if we correctly have no candidate certificates
cluster = self.cfg.GetClusterInfo()
self.assertFalse(cluster.candidate_certs)
def _partiallyFailingRpc(self, node_uuid, _):
if node_uuid == self._failed_node:
return self.RpcResultsBuilder() \
.CreateFailedNodeResult(node_uuid)
else:
return self.RpcResultsBuilder() \
.CreateSuccessfulNodeResult(node_uuid,
[(constants.CRYPTO_TYPE_SSL_DIGEST, self._GetFakeDigest(node_uuid))])
@patchPathutils("cluster")
def testNonMasterFails(self, pathutils):
self._InitPathutils(pathutils)
# create a few non-master, online nodes
num_nodes = 3
for _ in range(num_nodes):
self.cfg.AddNewNode()
nodes = self.cfg.GetAllNodesInfo()
# pick one node as the failing one
master_uuid = self.cfg.GetMasterNode()
self._failed_node = [node_uuid for node_uuid in nodes
if node_uuid != master_uuid][1]
self.rpc.call_node_crypto_tokens = self._partiallyFailingRpc
op = opcodes.OpClusterRenewCrypto(node_certificates=True)
self.ExecOpCode(op)
self._AssertCertFiles(pathutils)
# Check if we have the correct digests in the configuration
cluster = self.cfg.GetClusterInfo()
# There should be one digest missing.
self.assertEqual(num_nodes, len(cluster.candidate_certs))
nodes = self.cfg.GetAllNodesInfo()
for (node_uuid, _) in nodes.items():
if node_uuid == self._failed_node:
self.assertTrue(node_uuid not in cluster.candidate_certs)
else:
expected_digest = self._GetFakeDigest(node_uuid)
self.assertEqual(expected_digest, cluster.candidate_certs[node_uuid])
@patchPathutils("cluster")
def testOfflineNodes(self, pathutils):
self._InitPathutils(pathutils)
# create a few non-master, online nodes
num_nodes = 3
offline_index = 1
for i in range(num_nodes):
# Pick one node to be offline.
self.cfg.AddNewNode(offline=(i==offline_index))
self.rpc.call_node_crypto_tokens = self._CompletelySuccessfulRpc
op = opcodes.OpClusterRenewCrypto(node_certificates=True)
self.ExecOpCode(op)
self._AssertCertFiles(pathutils)
# Check if we have the correct digests in the configuration
cluster = self.cfg.GetClusterInfo()
# There should be one digest missing.
self.assertEqual(num_nodes, len(cluster.candidate_certs))
nodes = self.cfg.GetAllNodesInfo()
for (node_uuid, node_info) in nodes.items():
if node_info.offline == True:
self.assertTrue(node_uuid not in cluster.candidate_certs)
else:
expected_digest = self._GetFakeDigest(node_uuid)
self.assertEqual(expected_digest, cluster.candidate_certs[node_uuid])
def _RpcSuccessfulAfterRetries(self, node_uuid, _):
if self._retries < self._max_retries:
self._retries += 1
return self.RpcResultsBuilder() \
.CreateFailedNodeResult(node_uuid)
else:
return self.RpcResultsBuilder() \
.CreateSuccessfulNodeResult(node_uuid,
[(constants.CRYPTO_TYPE_SSL_DIGEST, self._GetFakeDigest(node_uuid))])
@patchPathutils("cluster")
def testMasterRetriesSuccess(self, pathutils):
self._InitPathutils(pathutils)
self._max_retries = 2
self._retries = 0
self.rpc.call_node_crypto_tokens = self._RpcSuccessfulAfterRetries
op = opcodes.OpClusterRenewCrypto(node_certificates=True)
self.ExecOpCode(op)
self._AssertCertFiles(pathutils)
cluster = self.cfg.GetClusterInfo()
master_uuid = self.cfg.GetMasterNode()
self.assertTrue(self._GetFakeDigest(master_uuid)
in cluster.candidate_certs.values())
@patchPathutils("cluster")
def testMasterRetriesFail(self, pathutils):
self._InitPathutils(pathutils)
self._max_retries = 5
self._retries = 0
self.rpc.call_node_crypto_tokens = self._RpcSuccessfulAfterRetries
op = opcodes.OpClusterRenewCrypto(node_certificates=True)
self.ExecOpCode(op)
self._AssertCertFiles(pathutils)
cluster = self.cfg.GetClusterInfo()
self.assertFalse(cluster.candidate_certs)
def _RpcSuccessfulAfterRetriesNonMaster(self, node_uuid, _):
if self._retries < self._max_retries and node_uuid != self._master_uuid:
self._retries += 1
return self.RpcResultsBuilder() \
.CreateFailedNodeResult(node_uuid)
else:
return self.RpcResultsBuilder() \
.CreateSuccessfulNodeResult(node_uuid,
[(constants.CRYPTO_TYPE_SSL_DIGEST, self._GetFakeDigest(node_uuid))])
def _NonMasterRetries(self, pathutils, max_retries):
self._InitPathutils(pathutils)
self._master_uuid = self.cfg.GetMasterNode()
self._max_retries = max_retries
self._retries = 0
self.rpc.call_node_crypto_tokens = self._RpcSuccessfulAfterRetriesNonMaster
# Add one non-master node
self.cfg.AddNewNode()
op = opcodes.OpClusterRenewCrypto(node_certificates=True)
self.ExecOpCode(op)
self._AssertCertFiles(pathutils)
return self.cfg.GetClusterInfo()
@patchPathutils("cluster")
def testNonMasterRetriesSuccess(self, pathutils):
cluster = self._NonMasterRetries(pathutils, 2)
self.assertEqual(2, len(cluster.candidate_certs.values()))
@patchPathutils("cluster")
def testNonMasterRetriesFail(self, pathutils):
cluster = self._NonMasterRetries(pathutils, 5)
# Only the master digest should be in the cert list
self.assertEqual(1, len(cluster.candidate_certs.values()))
self.assertTrue(self._master_uuid in cluster.candidate_certs)
if __name__ == "__main__":
testutils.GanetiTestProgram()
| {
"content_hash": "05f4f45f908fcd5e7c5357e12e8a2abc",
"timestamp": "",
"source": "github",
"line_count": 2524,
"max_line_length": 80,
"avg_line_length": 36.48058637083994,
"alnum_prop": 0.6833193957231447,
"repo_name": "dimara/ganeti",
"id": "3e44d62c7bf5de5d629bdfa7997e3eee67745faf",
"size": "93453",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test/py/cmdlib/cluster_unittest.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Haskell",
"bytes": "2409763"
},
{
"name": "Python",
"bytes": "5842471"
},
{
"name": "Shell",
"bytes": "110549"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import
from datetime import datetime
from django.core.urlresolvers import reverse
from sentry.models import Release, ReleaseCommit, ReleaseProject
from sentry.testutils import APITestCase
class ProjectReleaseListTest(APITestCase):
def test_simple(self):
self.login_as(user=self.user)
team = self.create_team()
project1 = self.create_project(team=team, name='foo')
project2 = self.create_project(team=team, name='bar')
release1 = Release.objects.create(
organization_id=project1.organization_id,
version='1',
date_added=datetime(2013, 8, 13, 3, 8, 24, 880386),
)
release1.add_project(project1)
ReleaseProject.objects.filter(project=project1, release=release1).update(new_groups=5)
release2 = Release.objects.create(
organization_id=project1.organization_id,
version='2',
date_added=datetime(2013, 8, 14, 3, 8, 24, 880386),
)
release2.add_project(project1)
release3 = Release.objects.create(
organization_id=project1.organization_id,
version='3',
date_added=datetime(2013, 8, 12, 3, 8, 24, 880386),
date_released=datetime(2013, 8, 15, 3, 8, 24, 880386),
)
release3.add_project(project1)
release4 = Release.objects.create(
organization_id=project2.organization_id,
version='4',
)
release4.add_project(project2)
url = reverse(
'sentry-api-0-project-releases',
kwargs={
'organization_slug': project1.organization.slug,
'project_slug': project1.slug,
}
)
response = self.client.get(url, format='json')
assert response.status_code == 200, response.content
assert len(response.data) == 3
assert response.data[0]['version'] == release3.version
assert response.data[1]['version'] == release2.version
assert response.data[2]['version'] == release1.version
assert response.data[2]['newGroups'] == 5
def test_query_filter(self):
self.login_as(user=self.user)
team = self.create_team()
project = self.create_project(team=team, name='foo')
release = Release.objects.create(
organization_id=project.organization_id,
version='foobar',
date_added=datetime(2013, 8, 13, 3, 8, 24, 880386),
)
release.add_project(project)
url = reverse(
'sentry-api-0-project-releases',
kwargs={
'organization_slug': project.organization.slug,
'project_slug': project.slug,
}
)
response = self.client.get(url + '?query=foo', format='json')
assert response.status_code == 200, response.content
assert len(response.data) == 1
assert response.data[0]['version'] == release.version
response = self.client.get(url + '?query=bar', format='json')
assert response.status_code == 200, response.content
assert len(response.data) == 0
class ProjectReleaseCreateTest(APITestCase):
def test_minimal(self):
self.login_as(user=self.user)
project = self.create_project(name='foo')
url = reverse(
'sentry-api-0-project-releases',
kwargs={
'organization_slug': project.organization.slug,
'project_slug': project.slug,
}
)
response = self.client.post(
url, data={
'version': '1.2.1',
}
)
assert response.status_code == 201, response.content
assert response.data['version']
release = Release.objects.get(
version=response.data['version'],
)
assert not release.owner
assert release.organization == project.organization
assert release.projects.first() == project
def test_ios_release(self):
self.login_as(user=self.user)
project = self.create_project(name='foo')
url = reverse(
'sentry-api-0-project-releases',
kwargs={
'organization_slug': project.organization.slug,
'project_slug': project.slug,
}
)
response = self.client.post(
url, data={
'version': '1.2.1 (123)',
}
)
assert response.status_code == 201, response.content
assert response.data['version']
release = Release.objects.get(
version=response.data['version'],
)
assert not release.owner
assert release.organization == project.organization
assert release.projects.first() == project
def test_duplicate(self):
self.login_as(user=self.user)
project = self.create_project(name='foo')
release = Release.objects.create(version='1.2.1', organization_id=project.organization_id)
release.add_project(project)
url = reverse(
'sentry-api-0-project-releases',
kwargs={
'organization_slug': project.organization.slug,
'project_slug': project.slug,
}
)
response = self.client.post(
url, data={
'version': '1.2.1',
}
)
assert response.status_code == 208, response.content
def test_duplicate_accross_org(self):
self.login_as(user=self.user)
project = self.create_project(name='foo')
release = Release.objects.create(version='1.2.1', organization_id=project.organization_id)
release.add_project(project)
project2 = self.create_project(name='bar', organization=project.organization)
url = reverse(
'sentry-api-0-project-releases',
kwargs={
'organization_slug': project2.organization.slug,
'project_slug': project2.slug,
}
)
response = self.client.post(
url, data={
'version': '1.2.1',
}
)
# since project2 was added, should be 201
assert response.status_code == 201, response.content
assert Release.objects.filter(
version='1.2.1', organization_id=project.organization_id
).count() == 1
assert ReleaseProject.objects.get(release=release, project=project)
assert ReleaseProject.objects.get(release=release, project=project2)
def test_version_whitespace(self):
self.login_as(user=self.user)
project = self.create_project(name='foo')
url = reverse(
'sentry-api-0-project-releases',
kwargs={
'organization_slug': project.organization.slug,
'project_slug': project.slug,
}
)
response = self.client.post(
url, data={
'version': '1.2.3\n',
}
)
assert response.status_code == 400, response.content
response = self.client.post(
url, data={
'version': '\n1.2.3',
}
)
assert response.status_code == 400, response.content
response = self.client.post(
url, data={
'version': '1.\n2.3',
}
)
assert response.status_code == 400, response.content
response = self.client.post(
url, data={
'version': '1.2.3\f',
}
)
assert response.status_code == 400, response.content
response = self.client.post(
url, data={
'version': '1.2.3\t',
}
)
assert response.status_code == 400, response.content
response = self.client.post(
url, data={
'version': '1.2.3',
}
)
assert response.status_code == 201, response.content
assert response.data['version'] == '1.2.3'
release = Release.objects.get(
organization_id=project.organization_id,
version=response.data['version'],
)
assert not release.owner
def test_features(self):
self.login_as(user=self.user)
project = self.create_project(name='foo')
url = reverse(
'sentry-api-0-project-releases',
kwargs={
'organization_slug': project.organization.slug,
'project_slug': project.slug,
}
)
response = self.client.post(
url, data={
'version': '1.2.1',
'owner': self.user.email,
}
)
assert response.status_code == 201, response.content
assert response.data['version']
release = Release.objects.get(
organization_id=project.organization_id,
version=response.data['version'],
)
assert release.owner == self.user
def test_commits(self):
self.login_as(user=self.user)
project = self.create_project(name='foo')
url = reverse(
'sentry-api-0-project-releases',
kwargs={
'organization_slug': project.organization.slug,
'project_slug': project.slug,
}
)
response = self.client.post(
url, data={'version': '1.2.1',
'commits': [
{
'id': 'a' * 40
},
{
'id': 'b' * 40
},
]}
)
assert response.status_code == 201, (response.status_code, response.content)
assert response.data['version']
release = Release.objects.get(
organization_id=project.organization_id,
version=response.data['version'],
)
rc_list = list(
ReleaseCommit.objects.filter(
release=release,
).select_related('commit', 'commit__author').order_by('order')
)
assert len(rc_list) == 2
for rc in rc_list:
assert rc.organization_id
def test_fails_with_refs(self):
self.login_as(user=self.user)
project = self.create_project(name='foo')
url = reverse('sentry-api-0-project-releases', kwargs={
'organization_slug': project.organization.slug,
'project_slug': project.slug,
})
response = self.client.post(url, data={
'version': '1.2.1',
'refs': [{
'repository': 'getsentry/sentry',
'commit': 'a' * 40,
}],
})
assert response.status_code == 400
| {
"content_hash": "fd3e8c7d319ee7654e2bd14749d6d707",
"timestamp": "",
"source": "github",
"line_count": 357,
"max_line_length": 98,
"avg_line_length": 30.641456582633054,
"alnum_prop": 0.5362464576286681,
"repo_name": "jean/sentry",
"id": "3520ce4c211ee1215d258283da5a5ef956f4ef7d",
"size": "10939",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/sentry/api/endpoints/test_project_releases.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "296112"
},
{
"name": "HTML",
"bytes": "314273"
},
{
"name": "JavaScript",
"bytes": "1293918"
},
{
"name": "Lua",
"bytes": "57158"
},
{
"name": "Makefile",
"bytes": "6632"
},
{
"name": "Python",
"bytes": "24515298"
},
{
"name": "Ruby",
"bytes": "4410"
},
{
"name": "Shell",
"bytes": "2942"
}
],
"symlink_target": ""
} |
"""
Handling of block device information and mapping.
This module contains helper methods for intepreting the block
device information and determining the suitable mapping to
guest devices and libvirt XML.
Throughout these methods there are a number of standard
variables / types used
* 'mapping': a dict contains the storage device mapping.
For the default disk types it will contain the following
keys & values:
'disk' -> disk_info
'disk.rescue' -> disk_info
'disk.local' -> disk_info
'disk.swap' -> disk_info
'disk.config' -> disk_info
If any of the default disks are overriden by the block
device info mappings, the hash value will be None
For any ephemeral device there will also be a dict entry
'disk.eph$NUM' -> disk_info
For any volume device there will also be a dict entry:
$path -> disk_info
Finally a special key will refer to the root device:
'root' -> disk_info
* 'disk_info': a tuple specifying disk configuration
It contains the following 3 fields
(disk bus, disk dev, device type)
* 'disk_bus': the guest bus type ('ide', 'virtio', 'scsi', etc)
* 'disk_dev': the device name 'vda', 'hdc', 'sdf', 'xvde' etc
* 'device_type': type of device eg 'disk', 'cdrom', 'floppy'
"""
from oslo.config import cfg
from nova import block_device
from nova.compute import flavors
from nova import exception
from nova.virt import configdrive
from nova.virt import driver
CONF = cfg.CONF
def has_disk_dev(mapping, disk_dev):
"""Determine if a disk device name has already been used.
Looks at all the keys in mapping to see if any
corresponding disk_info tuple has a device name
matching disk_dev
Returns True if the disk_dev is in use.
"""
for disk in mapping:
info = mapping[disk]
if info['dev'] == disk_dev:
return True
return False
def get_dev_prefix_for_disk_bus(disk_bus):
"""Determine the dev prefix for a disk bus.
Determine the dev prefix to be combined
with a disk number to fix a disk_dev.
eg 'hd' for 'ide' bus can be used to
form a disk dev 'hda'
Returns the dev prefix or raises an
exception if the disk bus is unknown.
"""
if CONF.libvirt_disk_prefix:
return CONF.libvirt_disk_prefix
if disk_bus == "ide":
return "hd"
elif disk_bus == "virtio":
return "vd"
elif disk_bus == "xen":
# Two possible mappings for Xen, xvda or sda
# which are interchangable, so we pick sda
return "sd"
elif disk_bus == "scsi":
return "sd"
elif disk_bus == "usb":
return "sd"
elif disk_bus == "uml":
return "ubd"
elif disk_bus == "lxc":
return None
else:
raise exception.NovaException(
_("Unable to determine disk prefix for %s") %
disk_bus)
def get_dev_count_for_disk_bus(disk_bus):
"""Determine the number disks supported.
Determine how many disks can be supported in
a single VM for a particular disk bus.
Returns the number of disks supported.
"""
if disk_bus == "ide":
return 4
else:
return 26
def find_disk_dev_for_disk_bus(mapping, bus, last_device=False):
"""Identify a free disk dev name for a bus.
Determines the possible disk dev names for
the bus, and then checks them in order until
it identifies one that is not yet used in the
disk mapping. If 'last_device' is set, it will
only consider the last available disk dev name.
Returns the chosen disk_dev name, or raises an
exception if none is available.
"""
dev_prefix = get_dev_prefix_for_disk_bus(bus)
if dev_prefix is None:
return None
max_dev = get_dev_count_for_disk_bus(bus)
if last_device:
devs = [max_dev - 1]
else:
devs = range(max_dev)
for idx in devs:
disk_dev = dev_prefix + chr(ord('a') + idx)
if not has_disk_dev(mapping, disk_dev):
return disk_dev
raise exception.NovaException(
_("No free disk device names for prefix '%s'"),
dev_prefix)
def is_disk_bus_valid_for_virt(virt_type, disk_bus):
valid_bus = {
'qemu': ['virtio', 'scsi', 'ide', 'usb'],
'kvm': ['virtio', 'scsi', 'ide', 'usb'],
'xen': ['xen', 'ide'],
'uml': ['uml'],
'lxc': ['lxc'],
}
if virt_type not in valid_bus:
raise exception.UnsupportedVirtType(virt=virt_type)
return disk_bus in valid_bus[virt_type]
def get_disk_bus_for_device_type(virt_type,
image_meta=None,
device_type="disk"):
"""Determine the best disk bus to use for a device type.
Considering the currently configured virtualization
type, return the optimal disk_bus to use for a given
device type. For example, for a disk on KVM it will
return 'virtio', while for a CDROM it will return 'ide'
Returns the disk_bus, or returns None if the device
type is not supported for this virtualization
"""
# Prefer a disk bus set against the image first of all
if image_meta:
key = "hw_" + device_type + "_bus"
disk_bus = image_meta.get('properties', {}).get(key)
if disk_bus is not None:
if not is_disk_bus_valid_for_virt(virt_type, disk_bus):
raise exception.UnsupportedHardware(model=disk_bus,
virt=virt_type)
return disk_bus
# Otherwise pick a hypervisor default disk bus
if virt_type == "uml":
if device_type == "disk":
return "uml"
elif virt_type == "lxc":
return "lxc"
elif virt_type == "xen":
if device_type == "cdrom":
return "ide"
elif device_type == "disk":
return "xen"
elif virt_type in ("qemu", "kvm"):
if device_type == "cdrom":
return "ide"
elif device_type == "disk":
return "virtio"
return None
def get_disk_bus_for_disk_dev(virt_type, disk_dev):
"""Determine the disk bus for a disk dev.
Given a disk devi like 'hda', 'sdf', 'xvdb', etc
guess what the most appropriate disk bus is for
the currently configured virtualization technology
Returns the disk bus, or raises an Exception if
the disk dev prefix is unknown.
"""
if disk_dev[:2] == 'hd':
return "ide"
elif disk_dev[:2] == 'sd':
# Reverse mapping 'sd' is not reliable
# there are many possible mappings. So
# this picks the most likely mappings
if virt_type == "xen":
return "xen"
else:
return "scsi"
elif disk_dev[:2] == 'vd':
return "virtio"
elif disk_dev[:3] == 'xvd':
return "xen"
elif disk_dev[:3] == 'ubd':
return "uml"
else:
raise exception.NovaException(
_("Unable to determine disk bus for '%s'") %
disk_dev[:1])
def get_next_disk_info(mapping, disk_bus,
device_type='disk',
last_device=False):
"""Determine the disk info for the next device on disk_bus.
Considering the disks already listed in the disk mapping,
determine the next available disk dev that can be assigned
for the disk bus.
Returns the disk_info for the next available disk.
"""
disk_dev = find_disk_dev_for_disk_bus(mapping,
disk_bus,
last_device)
return {'bus': disk_bus,
'dev': disk_dev,
'type': device_type}
def get_eph_disk(ephemeral):
return 'disk.eph' + str(ephemeral['num'])
def get_disk_mapping(virt_type, instance,
disk_bus, cdrom_bus,
block_device_info=None,
image_meta=None, rescue=False):
"""Determine how to map default disks to the virtual machine.
This is about figuring out whether the default 'disk',
'disk.local', 'disk.swap' and 'disk.config' images have
been overriden by the block device mapping.
Returns the guest disk mapping for the devices.
"""
inst_type = flavors.extract_flavor(instance)
mapping = {}
if virt_type == "lxc":
# NOTE(zul): This information is not used by the libvirt driver
# however we need to populate mapping so the image can be
# created when the instance is started. This can
# be removed when we convert LXC to use block devices.
root_disk_bus = disk_bus
root_device_type = 'disk'
root_info = get_next_disk_info(mapping,
root_disk_bus,
root_device_type)
mapping['root'] = root_info
mapping['disk'] = root_info
return mapping
if rescue:
rescue_info = get_next_disk_info(mapping,
disk_bus)
mapping['disk.rescue'] = rescue_info
mapping['root'] = rescue_info
os_info = get_next_disk_info(mapping,
disk_bus)
mapping['disk'] = os_info
return mapping
if image_meta and image_meta.get('disk_format') == 'iso':
root_disk_bus = cdrom_bus
root_device_type = 'cdrom'
else:
root_disk_bus = disk_bus
root_device_type = 'disk'
root_device_name = driver.block_device_info_get_root(block_device_info)
if root_device_name is not None:
root_device = block_device.strip_dev(root_device_name)
root_info = {'bus': get_disk_bus_for_disk_dev(virt_type,
root_device),
'dev': root_device,
'type': root_device_type}
else:
root_info = get_next_disk_info(mapping,
root_disk_bus,
root_device_type)
mapping['root'] = root_info
if not block_device.volume_in_mapping(root_info['dev'],
block_device_info):
mapping['disk'] = root_info
eph_info = get_next_disk_info(mapping,
disk_bus)
ephemeral_device = False
if not (block_device.volume_in_mapping(eph_info['dev'],
block_device_info) or
0 in [eph['num'] for eph in
driver.block_device_info_get_ephemerals(
block_device_info)]):
if instance['ephemeral_gb'] > 0:
ephemeral_device = True
if ephemeral_device:
mapping['disk.local'] = eph_info
for eph in driver.block_device_info_get_ephemerals(
block_device_info):
disk_dev = block_device.strip_dev(eph['device_name'])
disk_bus = get_disk_bus_for_disk_dev(virt_type, disk_dev)
mapping[get_eph_disk(eph)] = {'bus': disk_bus,
'dev': disk_dev,
'type': 'disk'}
swap = driver.block_device_info_get_swap(block_device_info)
if driver.swap_is_usable(swap):
disk_dev = block_device.strip_dev(swap['device_name'])
disk_bus = get_disk_bus_for_disk_dev(virt_type, disk_dev)
mapping['disk.swap'] = {'bus': disk_bus,
'dev': disk_dev,
'type': 'disk'}
elif inst_type['swap'] > 0:
swap_info = get_next_disk_info(mapping,
disk_bus)
if not block_device.volume_in_mapping(swap_info['dev'],
block_device_info):
mapping['disk.swap'] = swap_info
block_device_mapping = driver.block_device_info_get_mapping(
block_device_info)
for vol in block_device_mapping:
disk_dev = vol['mount_device'].rpartition("/")[2]
disk_bus = get_disk_bus_for_disk_dev(virt_type, disk_dev)
mapping[vol['mount_device']] = {'bus': disk_bus,
'dev': disk_dev,
'type': 'disk'}
if configdrive.enabled_for(instance):
config_info = get_next_disk_info(mapping,
disk_bus,
last_device=True)
mapping['disk.config'] = config_info
return mapping
def get_disk_info(virt_type, instance, block_device_info=None,
image_meta=None, rescue=False):
"""Determine guest disk mapping info.
This is a wrapper around get_disk_mapping, which
also returns the chosen disk_bus and cdrom_bus.
The returned data is in a dict
- disk_bus: the bus for harddisks
- cdrom_bus: the bus for CDROMs
- mapping: the disk mapping
Returns the disk mapping disk.
"""
disk_bus = get_disk_bus_for_device_type(virt_type, image_meta, "disk")
cdrom_bus = get_disk_bus_for_device_type(virt_type, image_meta, "cdrom")
mapping = get_disk_mapping(virt_type, instance,
disk_bus, cdrom_bus,
block_device_info,
image_meta, rescue)
return {'disk_bus': disk_bus,
'cdrom_bus': cdrom_bus,
'mapping': mapping}
| {
"content_hash": "b7dc48dcdb8c0bda7750b385a656b14d",
"timestamp": "",
"source": "github",
"line_count": 431,
"max_line_length": 76,
"avg_line_length": 31.62877030162413,
"alnum_prop": 0.5585387323943662,
"repo_name": "DirectXMan12/nova-hacking",
"id": "aabcef964a9f2ca964dfdbf116fa5d9725363fac",
"size": "14292",
"binary": false,
"copies": "1",
"ref": "refs/heads/feature_novnc_krb",
"path": "nova/virt/libvirt/blockinfo.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "16002"
},
{
"name": "JavaScript",
"bytes": "7403"
},
{
"name": "Python",
"bytes": "10361785"
},
{
"name": "Shell",
"bytes": "17485"
}
],
"symlink_target": ""
} |
import os
from ivf.io_util.dict_data import saveDict, loadDict
_root = __file__
## Dataset root directories.
def datasetRootDirs():
setting_file = os.path.abspath(os.path.join(_root, "../setting.json"))
setting_data = loadDict(setting_file)
dataset_root_dirs = setting_data["dataset_roots"]
return dataset_root_dirs
## Dataset root directory.
def datasetRootDir():
dataset_root_dirs = datasetRootDirs()
for dataset_root_dir in dataset_root_dirs:
if os.path.exists(dataset_root_dir):
return dataset_root_dir
## Dataset direcotry.
def datasetDir(dataset_name):
return os.path.join(datasetRootDir(), dataset_name)
## Dataset file names.
def datasetFileNames(dataset_dir, file_filter=None):
file_names = os.listdir(dataset_dir)
if file_filter is not None:
file_names = [file_name for file_name in file_names if file_filter in file_name]
return file_names
## Dataset files.
def datasetFiles(dataset_dir, file_filter=None):
file_names = datasetFileNames(dataset_dir, file_filter=file_filter)
files = [os.path.join(dataset_dir, file_name) for file_name in file_names]
files = [file for file in files if os.path.isfile(file)]
return files
def datasetSubDirectories(dataset_dir):
dir_names = os.listdir(dataset_dir)
dir_paths = [os.path.join(dataset_dir, dir_name) for dir_name in dir_names]
dir_paths = [dir_path for dir_path in dir_paths if os.path.isdir(dir_path)]
return dir_paths
## Sub directory.
def subDirectory(target_dir, dir_name, make_dir=True):
sub_dir = os.path.join(target_dir, dir_name)
if make_dir and not os.path.exists(sub_dir):
os.makedirs(sub_dir)
return sub_dir | {
"content_hash": "325d9603a76bd6a2fc8a5137ed93440c",
"timestamp": "",
"source": "github",
"line_count": 60,
"max_line_length": 88,
"avg_line_length": 28.55,
"alnum_prop": 0.6981903093987157,
"repo_name": "tody411/ImageViewerFramework",
"id": "e8d47bb1442004c627c271176dab887126726ec6",
"size": "1866",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ivf/datasets/datasets.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "GLSL",
"bytes": "8089"
},
{
"name": "Python",
"bytes": "337507"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import, unicode_literals
__author__ = 'marcos'
| {
"content_hash": "fa9250159bfbe9e65483f95a6ae13895",
"timestamp": "",
"source": "github",
"line_count": 3,
"max_line_length": 56,
"avg_line_length": 26.666666666666668,
"alnum_prop": 0.7125,
"repo_name": "MarcosVn/turinginformatica",
"id": "0f69ed0cd5c16ad2d1e6f110d58f3649ecb60b9d",
"size": "104",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "backend/appengine/routes/updown/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "1129"
},
{
"name": "CSS",
"bytes": "143251"
},
{
"name": "HTML",
"bytes": "100159"
},
{
"name": "JavaScript",
"bytes": "21533"
},
{
"name": "Python",
"bytes": "94120"
},
{
"name": "Shell",
"bytes": "888"
}
],
"symlink_target": ""
} |
import unittest
import tempfile
import atomic_file
import os
import time
from os import path
class TestAtomicFile(unittest.TestCase):
def setUp(self):
self.directory = path.dirname(__file__)
self.name = tempfile.mktemp()
def tearDown(self):
try:
os.remove(self.name)
except OSError:
pass
def testCreate(self):
af = atomic_file.AtomicFile(name=self.name, dir=self.directory)
self.assert_(not path.exists(self.name))
self.assert_(path.exists(path.join(self.directory,
af.tempfile.name)))
af.swap()
self.assert_(path.exists(self.name))
self.assert_(not path.exists(path.join(self.directory,
af.tempfile.name)))
def testClose(self):
af = atomic_file.AtomicFile(name=self.name, dir=self.directory)
self.assert_(not path.exists(self.name))
self.assert_(path.exists(path.join(self.directory,
af.tempfile.name)))
af.close()
self.assert_(path.exists(self.name))
self.assert_(not path.exists(path.join(self.directory,
af.tempfile.name)))
def testContext(self):
with atomic_file.AtomicFile(name=self.name, dir=self.directory) as af:
self.assert_(not path.exists(self.name))
self.assert_(path.exists(path.join(self.directory, af.tempfile.name)))
self.assert_(path.exists(self.name))
self.assert_(not path.exists(path.join(self.directory,
af.tempfile.name)))
def testWrite(self):
with atomic_file.AtomicFile(name=self.name, dir=self.directory) as af:
text = 'THE TEXT\n'
af.write(text)
self.assertEqual(file(self.name).read(), text)
def testMoreWrite(self):
with atomic_file.AtomicFile(name=self.name, dir=self.directory) as af:
lines = ['THE TEXT', 'MORE TEXT', 'AGAIN!']
for line in lines:
print >> af, line
self.assertEqual(file(self.name).read(), '\n'.join(lines) + '\n')
def hasExplosion(self):
with atomic_file.AtomicFile(name=self.name, dir=self.directory) as af:
raise RuntimeError()
self.assert_(not path.exists(self.name))
self.assert_(not path.exists(path.join(self.directory,
af.tempfile.name)))
def testBoom(self):
self.assertRaises(RuntimeError, self.hasExplosion)
def testDifferentDirectory(self):
tmpdir = tempfile.gettempdir()
af = atomic_file.AtomicFile(name=self.name, dir=tmpdir)
self.assert_(not path.exists(self.name))
self.assert_(path.exists(path.join(tmpdir,
af.tempfile.name)))
af.swap()
self.assert_(path.exists(self.name))
self.assert_(not path.exists(path.join(tmpdir,
af.tempfile.name)))
def testDifferentDirectory2(self):
self.name = path.join('..', 'here' + str(time.time))
af = atomic_file.AtomicFile(name=self.name, dir=self.directory)
self.assert_(not path.exists(self.name))
self.assert_(path.exists(path.join(self.directory,
af.tempfile.name)))
af.swap()
self.assert_(path.exists(self.name))
self.assert_(not path.exists(path.join(self.directory,
af.tempfile.name)))
def testAppend(self):
lines = ['line1', 'line2', 'line3']
open(self.name, 'w').write(lines[0])
with atomic_file.AtomicFile(self.name, mode='a', dir=self.directory) as af:
for line in lines[1:]:
af.write(line + '\n')
self.assertEqual(open(self.name).read(),
'\n'.join(lines) + '\n')
| {
"content_hash": "eeb097896b6712e97bb481a280d69551",
"timestamp": "",
"source": "github",
"line_count": 101,
"max_line_length": 128,
"avg_line_length": 40.118811881188115,
"alnum_prop": 0.5518262586377097,
"repo_name": "rik0/rk-exempla",
"id": "a64811fabdb914a151564b7ee226bcbc5c9ee139",
"size": "4052",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "filesystem/python/test_atomic_file.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "16861"
},
{
"name": "Clojure",
"bytes": "2540"
},
{
"name": "Common Lisp",
"bytes": "5076"
},
{
"name": "Erlang",
"bytes": "2571"
},
{
"name": "Java",
"bytes": "4951"
},
{
"name": "Perl",
"bytes": "4321"
},
{
"name": "Python",
"bytes": "152481"
},
{
"name": "Racket",
"bytes": "9970"
},
{
"name": "Ruby",
"bytes": "283"
},
{
"name": "Scheme",
"bytes": "7945"
},
{
"name": "Shell",
"bytes": "230"
}
],
"symlink_target": ""
} |
"""Converter construction support.
This module contains a base class for all converters, as well as supporting
structures. These structures are referred to as contexts.
The class hierarchy is as follows:
<your converter>
[extends] converter.Base
[extends] transformer.Base
[extends] gast.nodeTransformer
[uses] transfomer.SourceInfo
[uses] converter.EntityContext
[uses] converter.ProgramContext
[uses] transfomer.SourceInfo
converter.Base is a specialization of transformer.Base for AutoGraph. It's a
very lightweight subclass that adds a `ctx` attribute holding the corresponding
EntityContext object (see below). Note that converters are not reusable, and
`visit` will raise an error if called more than once.
converter.EntityContext contains mutable state associated with an entity that
the converter processes.
converter.ProgramContext contains mutable state across related entities. For
example, when converting several functions that call one another, the
ProgramContext should be shared across these entities.
Below is the overall flow at conversion:
program_ctx = ProgramContext(<entities to convert>, <global settings>, ...)
while <program_ctx has more entities to convert>:
entity, source_info = <get next entity from program_ctx>
entity_ctx = EntityContext(program_ctx, source_info)
for <each ConverterClass>:
converter = ConverterClass(entity_ctx)
# May update entity_ctx and program_ctx
entity = converter.visit(entity)
<add entity's dependencies to program_ctx>
Note that pyct contains a small number of transformers used for static analysis.
These implement transformer.Base, rather than converter.Base, to avoid a
dependency on AutoGraph.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import weakref
import enum
from tensorflow.python.autograph.core import config
from tensorflow.python.autograph.core import naming
from tensorflow.python.autograph.pyct import anno
from tensorflow.python.autograph.pyct import ast_util
from tensorflow.python.autograph.pyct import cfg
from tensorflow.python.autograph.pyct import compiler
from tensorflow.python.autograph.pyct import inspect_utils
from tensorflow.python.autograph.pyct import parser
from tensorflow.python.autograph.pyct import qual_names
from tensorflow.python.autograph.pyct import templates
from tensorflow.python.autograph.pyct import transformer
from tensorflow.python.autograph.pyct.static_analysis import activity
from tensorflow.python.autograph.pyct.static_analysis import live_values
from tensorflow.python.autograph.pyct.static_analysis import liveness
from tensorflow.python.autograph.pyct.static_analysis import reaching_definitions
from tensorflow.python.autograph.pyct.static_analysis import type_info
from tensorflow.python.eager import function
from tensorflow.python.util.tf_export import tf_export
# TODO(mdan): These contexts can be refactored into first class objects.
# For example, we could define Program and Entity abstractions that hold on
# to the actual entity and have conversion methods.
# TODO(mdan): Add a test specific to this converter.
@tf_export('autograph.experimental.Verbosity')
class Verbosity(enum.IntEnum):
"""Represents conversion verbosity levels.
Attributes:
BRIEF: No logging, minimal error messages.
VERBOSE: Detailed logging of generated code, detailed error messages.
"""
BRIEF = 0
VERBOSE = 1
@tf_export('autograph.experimental.Feature')
class Feature(enum.Enum):
"""Represents conversion options that can be toggled on or off.
Attributes:
ALL: Enable all features.
AUTO_CONTROL_DEPS: Insert of control dependencies in the generated code.
DECORATORS: Allow decorators in local functions. Note that special
decorators, like `tf.function`, are allowed regardless of this toggle.
ERROR_REWRITING: Rewrite errors that occur in the generated code to
indicate the source code to which the failing code corresponds.
LISTS: Convert list idioms, like initializers, slices, append, etc.
NAME_SCOPES: Insert name scopes that name ops according to context, like the
function they were defined in.
"""
ALL = 'ALL'
AUTO_CONTROL_DEPS = 'AUTO_CONTROL_DEPS'
DECORATORS = 'DECORATORS'
ERROR_REWRITING = 'ERROR_REWRITING'
LISTS = 'LISTS'
NAME_SCOPES = 'NAME_SCOPES'
class ConversionOptions(object):
"""Immutable container for global conversion flags.
Attributes:
recursive: bool, whether to recursively convert any user functions or
classes that the converted function may use.
verbose: Verbosity, the level of verbosity to use.
strip_decorators: Tuple[Callable], contains decorators that should be in
excluded from the compiled output. By default, when converting a function
before the decorators are applied, the compiled output will include those
decorators.
force_conversion: bool, whether to force convertinng the target entity. When
force_conversion is turned off, the converter may decide to return the
function as-is.
optional_features: Union[Feature, Set[Feature]], controls the use of
optional features in the conversion process. See Feature for available
options.
"""
def __init__(self,
recursive=False,
verbose=Verbosity.VERBOSE,
strip_decorators=None,
force_conversion=False,
internal_convert_user_code=True,
optional_features=Feature.ALL):
self.recursive = recursive
self.verbose = verbose
self._strip_decorators = strip_decorators or ()
self.force_conversion = force_conversion
# TODO(mdan): Rename to conversion_recursion_depth?
self.internal_convert_user_code = internal_convert_user_code
if optional_features is None:
optional_features = ()
elif isinstance(optional_features, Feature):
optional_features = (optional_features,)
optional_features = frozenset(optional_features)
self.optional_features = optional_features
@property
def strip_decorators(self):
# A few decorators are included by default.
# TODO(mdan): Revert if function.defun becomes a public symbol.
return self._strip_decorators + (function.defun,)
def should_strip(self, decorator):
for blacklisted in self.strip_decorators:
if blacklisted is decorator:
return True
if isinstance(blacklisted, weakref.ref):
blacklisted_deref = blacklisted()
if (blacklisted_deref is not None and blacklisted_deref is decorator):
return True
return False
def uses(self, feature):
return (Feature.ALL in self.optional_features or
feature in self.optional_features)
def to_ast(self, ctx, internal_convert_user_code=None):
"""Returns a representation of this object as an AST node.
The AST node encodes a constructor that would create an object with the
same contents.
Args:
ctx: EntityContext, the entity with which this AST needs to be consistent.
internal_convert_user_code: Optional[bool], allows ovrriding the
corresponding value.
Returns:
ast.Node
"""
template = """
constructor_name(
recursive=recursive_val,
verbose=verbose_val,
strip_decorators=strip_decorators_val,
force_conversion=force_conversion_val,
optional_features=optional_features_val,
internal_convert_user_code=internal_convert_user_code_val)
"""
def as_qualified_name(o):
name = inspect_utils.getqualifiedname(ctx.info.namespace, o, max_depth=1)
if not name:
# TODO(mdan): This needs to account for the symbols defined locally.
name = ctx.namer.new_symbol(o.__name__, ())
ctx.program.add_symbol(name, weakref.ref(o))
return name
def list_of_names(values):
return parser.parse_expression('({})'.format(', '.join(
tuple(as_qualified_name(v) for v in values))))
def list_of_features(values):
return parser.parse_expression('({})'.format(', '.join(
'ag__.Feature.{}'.format(v)
for v in Feature.__members__
if v in values)))
if internal_convert_user_code is not None:
internal_convert_user_code = self.internal_convert_user_code
expr_ast = templates.replace(
template,
constructor_name=parser.parse_expression(
as_qualified_name(ConversionOptions)),
recursive_val=parser.parse_expression(str(self.recursive)),
verbose_val=parser.parse_expression(str(int(self.verbose))),
strip_decorators_val=list_of_names(self._strip_decorators),
force_conversion_val=parser.parse_expression(
str(self.force_conversion)),
internal_convert_user_code_val=parser.parse_expression(
str(internal_convert_user_code)),
optional_features_val=list_of_features(self.optional_features))
return expr_ast[0].value
class ProgramContext(object):
"""ProgramContext keeps track of converting function hierarchies.
This object is mutable, and is updated during conversion. Not thread safe.
Attributes:
options: ConversionOptions
dependency_cache: Dict[Any, ast.AST], the original entities mapped to their
converted AST
additional_imports: Set[Any], additional entities which for any reason
cannot be attached after loading and need to be explicitly imported in the
generated code
name_map: Dict[str, str], map of original entity name to the name of their
converted counterparts
autograph_module: Module, a reference to the autograph module. This needs to
be specified by the caller to avoid circular dependencies.
uncompiled_modules: Set[Tuple[str, ...]], with each tuple representing the
fully qualified name of a package containing functions that will not be
compiled.
required_imports: str, containing an import statement on each line. These
are all the imports necessary for the compiled code to run, in addition to
the closures of each entity, which are attached dynamically.
"""
def __init__(
self,
options,
partial_types,
autograph_module,
uncompiled_modules,
):
self.options = options
self.partial_types = partial_types if partial_types else ()
self.autograph_module = autograph_module
self.uncompiled_modules = uncompiled_modules
self.conversion_order = []
self.dependency_cache = {}
self.additional_imports = set()
self.name_map = {}
self.additional_symbols = {}
@property
def required_imports(self):
"""Returns a block containing all imports required by the converted code."""
# TODO(mdan): Check that these don't clobber one another.
return '\n'.join(config.COMPILED_IMPORT_STATEMENTS +
tuple(self.additional_imports))
def new_namer(self, namespace):
return naming.Namer(namespace, self.options.recursive, self.name_map,
self.partial_types)
def update_name_map(self, namer):
"""Updates renamed_calls based on the recent activity from the namer.
Whenever we convert a new entity, any references to other entities are being
renamed to match their soon-to-be-converted counterparts. The namer keeps
track of these renames. When conversion is complete, we copy those renames
so that when those referenced entities are being converted, their new name
matches.
Args:
namer: naming.Namer
Raises:
ValueError: when an entity was renamed twice and to different names.
"""
# TODO(mdan): Have call_trees do this directly.
# This is done so indirectly, via the namer, for historic reasons. But
# now we can have the converter that does the rename record the new name
# as well and skip this step altogether.
for o, name in namer.renamed_calls.items():
if o in self.name_map:
if self.name_map[o] != name:
raise ValueError(
'Calls to %s were converted using multiple names (%s). This is '
'possible when an entity with one of these names already '
'existed. To fix, avoid using any of these names.' %
(o, (name, self.name_map[o])))
else:
self.name_map[o] = name
def add_symbol(self, name, value):
if name in self.additional_symbols:
assert self.additional_symbols[name] is value
self.additional_symbols[name] = value
def add_to_cache(self, original_entity, converted_ast):
self.conversion_order.append(original_entity)
self.dependency_cache[original_entity] = converted_ast
class EntityContext(object):
"""Tracks the conversion of a single entity.
This object is mutable, and is updated during conversion. Not thread safe.
Attributes:
namer: Namer
info: transformer.EntityInfo
program: ProgramContext
"""
def __init__(self, namer, entity_info, program_ctx):
self.namer = namer
self.info = entity_info
self.program = program_ctx
class Base(transformer.Base):
"""All converters should inherit from this class.
Attributes:
ctx: EntityContext
"""
def __init__(self, ctx):
super(Base, self).__init__(ctx.info)
self.ctx = ctx # Keeping this short because it's used frequently.
self._used = False
self._ast_depth = 0
def get_definition_directive(self, node, directive, arg, default):
"""Returns the unique directive argument for a symbol.
See lang/directives.py for details on directives.
Example:
# Given a directive in the code:
ag.foo_directive(bar, baz=1)
# One can write for an AST node Name(id='bar'):
get_definition_directive(node, ag.foo_directive, 'baz')
Args:
node: ast.AST, the node representing the symbol for which the directive
argument is needed.
directive: Callable[..., Any], the directive to search.
arg: str, the directive argument to return.
default: Any
Raises:
ValueError: if conflicting annotations have been found
"""
defs = anno.getanno(node, anno.Static.ORIG_DEFINITIONS, ())
if not defs:
return default
arg_values_found = []
for def_ in defs:
if (directive in def_.directives and arg in def_.directives[directive]):
arg_values_found.append(def_.directives[directive][arg])
if not arg_values_found:
return default
if len(arg_values_found) == 1:
return arg_values_found[0]
# If multiple annotations reach the symbol, they must all match. If they do,
# return any of them.
first_value = arg_values_found[0]
for other_value in arg_values_found[1:]:
if not ast_util.matches(first_value, other_value):
qn = anno.getanno(node, anno.Basic.QN)
raise ValueError('%s has ambiguous annotations for %s(%s): %s, %s' %
(qn, directive.__name__, arg,
compiler.ast_to_source(other_value).strip(),
compiler.ast_to_source(first_value).strip()))
return first_value
def visit(self, node):
if not self._ast_depth:
if self._used:
raise ValueError('converter objects cannot be reused')
self._used = True
self._ast_depth += 1
try:
return super(Base, self).visit(node)
finally:
self._ast_depth -= 1
class AnnotatedDef(reaching_definitions.Definition):
def __init__(self):
super(AnnotatedDef, self).__init__()
self.directives = {}
class AgAnno(enum.Enum):
"""Annotation labels specific to AutoGraph. See anno.py."""
DIRECTIVES = 'User directives associated with the annotated statement.'
def __repr__(self):
return self.name
def standard_analysis(node, context, is_initial=False):
"""Performs a complete static analysis of the given code.
Args:
node: ast.AST
context: converter.EntityContext
is_initial: bool, whether this is the initial analysis done on the input
source code
Returns:
ast.AST, same as node, with the static analysis annotations added
"""
# TODO(mdan): Clear static analysis here.
# TODO(mdan): Consider not running all analyses every time.
# TODO(mdan): Don't return a node because it's modified by reference.
graphs = cfg.build(node)
node = qual_names.resolve(node)
node = activity.resolve(node, context.info, None)
node = reaching_definitions.resolve(node, context.info, graphs, AnnotatedDef)
node = liveness.resolve(node, context.info, graphs)
node = live_values.resolve(node, context.info, config.PYTHON_LITERALS)
node = type_info.resolve(node, context.info)
# This second call allows resolving first-order class attributes.
node = live_values.resolve(node, context.info, config.PYTHON_LITERALS)
if is_initial:
anno.dup(
node,
{
anno.Static.DEFINITIONS: anno.Static.ORIG_DEFINITIONS,
},
)
return node
def apply_(node, context, converter_module):
"""Applies a converter to an AST.
Args:
node: ast.AST
context: converter.EntityContext
converter_module: converter.Base
Returns:
ast.AST, the result of applying converter to node
"""
node = standard_analysis(node, context)
node = converter_module.transform(node, context)
return node
| {
"content_hash": "7a29c926c5904614f8ca70facfce5a8c",
"timestamp": "",
"source": "github",
"line_count": 489,
"max_line_length": 81,
"avg_line_length": 35.59918200408998,
"alnum_prop": 0.6997357536764706,
"repo_name": "asimshankar/tensorflow",
"id": "4543b113983f56e8a987a4dbce3bba9db47da517",
"size": "18097",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tensorflow/python/autograph/core/converter.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "4882"
},
{
"name": "Batchfile",
"bytes": "10132"
},
{
"name": "C",
"bytes": "490070"
},
{
"name": "C#",
"bytes": "8446"
},
{
"name": "C++",
"bytes": "52677142"
},
{
"name": "CMake",
"bytes": "207176"
},
{
"name": "Dockerfile",
"bytes": "39454"
},
{
"name": "Go",
"bytes": "1290930"
},
{
"name": "HTML",
"bytes": "4680032"
},
{
"name": "Java",
"bytes": "890529"
},
{
"name": "Jupyter Notebook",
"bytes": "2618412"
},
{
"name": "LLVM",
"bytes": "6536"
},
{
"name": "Makefile",
"bytes": "68402"
},
{
"name": "Objective-C",
"bytes": "16140"
},
{
"name": "Objective-C++",
"bytes": "102518"
},
{
"name": "PHP",
"bytes": "5172"
},
{
"name": "Pascal",
"bytes": "221"
},
{
"name": "Perl",
"bytes": "7536"
},
{
"name": "PureBasic",
"bytes": "25356"
},
{
"name": "Python",
"bytes": "43038983"
},
{
"name": "RobotFramework",
"bytes": "891"
},
{
"name": "Ruby",
"bytes": "838"
},
{
"name": "Shell",
"bytes": "497659"
},
{
"name": "Smarty",
"bytes": "6976"
}
],
"symlink_target": ""
} |
import time
import types
import unittest
import threading
import multiprocessing
import mcl.messages.messages
from mcl.network.network import RawListener
from mcl.network.network import RawBroadcaster
from mcl.network.network import QueuedListener
from mcl.network.network import MessageListener
from mcl.network.network import MessageBroadcaster
from mcl.network.abstract import Connection as AbstractConnection
from mcl.network.abstract import RawListener as AbstractRawListener
from mcl.network.abstract import RawBroadcaster as AbstractRawBroadcaster
# Note: The delay is used to 'synchronise' threaded events so that race
# conditions do not occur.
DELAY = 0.1
TIMEOUT = 5.0
# Topics used for testing.
TOPIC = 'test topic'
TOPICS = ['topic A', 'topic B']
# -----------------------------------------------------------------------------
# Common tools for unit-testing.
# -----------------------------------------------------------------------------
def attr_exists(dct, attrs):
"""Check object contains mandatory attributes."""
for attr in attrs:
if attr not in dct:
msg = "The attribute '%s' is required." % str(attr)
raise TypeError(msg)
def attr_issubclass(dct, key, obj, msg):
"""Check object attribute is a sub-class of a specific object."""
if not issubclass(dct[key], obj):
raise TypeError(msg)
def attr_isinstance(dct, key, obj, msg):
"""Check object attribute is an instance of a specific object."""
if not isinstance(dct[key], obj):
raise TypeError(msg)
def compile_docstring(base, name):
"""Rename dosctring of test-methods in base object."""
# Iterate through items in the base-object.
dct = dict()
for item in dir(base):
# Skip special attributes.
if item.startswith('__'):
continue
# Inspect callable objects.
if callable(getattr(base, item)):
func = getattr(base, item)
dct[item] = types.FunctionType(func.func_code,
func.func_globals,
item,
func.func_defaults,
func.func_closure)
# Rename the doc-string of test methods in the base-object.
if item.startswith('test_'):
dct[item].__doc__ = dct[item].__doc__ % name
return dct
# -----------------------------------------------------------------------------
# Raw/Message Broadcaster()
# -----------------------------------------------------------------------------
class _BroadcasterTestsMeta(type):
def __new__(cls, name, bases, dct):
# Do not look for manditory fields in the base class.
if (name == 'BroadcasterTests') and (bases == (object,)):
return super(_BroadcasterTestsMeta, cls).__new__(cls,
name,
bases,
dct)
# Ensure mandatory attributes are present.
attr_exists(dct, ['broadcaster', 'connection'])
# Ensure 'broadcaster' is a RawBroadcaster().
attr_issubclass(dct, 'broadcaster', AbstractRawBroadcaster,
"The attribute 'broadcaster' must be a sub-class " +
"of abstract.RawBroadcaster().")
# Ensure 'connection' is a Connection().
attr_isinstance(dct, 'connection', AbstractConnection,
"The attribute 'connection' must be an instance of " +
"a abstract.Connection() sub-class.")
# Ensure 'connection' is a Connection().
attr_isinstance(dct, 'bad_connection', AbstractConnection,
"The attribute 'bad_connection' must be an instance " +
"of a abstract.Connection() sub-class that will " +
"fail to connection.")
# Create name from module origin and object name.
module_name = '%s' % dct['broadcaster'].__module__.split('.')[-1]
# Rename docstrings of unit-tests and copy into new sub-class.
method_dct = compile_docstring(bases[0], module_name)
dct.update(method_dct)
return super(_BroadcasterTestsMeta, cls).__new__(cls,
name,
(unittest.TestCase,),
dct)
class BroadcasterTests(object):
"""Standard unit tests for sub-classes of the RawBroadcaster() class.
This object defines standard unit-tests for sub-classes of the
RawBroadcaster() class. Sub-classes of this unit-test must define the
attributes ``broadcaster`` and ``connection`` where:
- ``broadcaster`` is the RawBroadcaster() sub-class to be tested
- ``connection`` is the Connection() object associated with the
broadcaster
Example usage::
class ConcreteRawBroadcaster(BroadcasterTests):
broadcaster = ConcreteRawBroadcaster
connection = ConcreteConnection
"""
__metaclass__ = _BroadcasterTestsMeta
def setUp(self):
"""Create some messages for testing."""
# WARNING: this should not be deployed in production code. It is an
# abuse that has been used for the purposes of unit-testing.
mcl.messages.messages._MESSAGES = list()
class UnitTestMessage(mcl.messages.messages.Message):
mandatory = ('A', 'B',)
connection = self.connection
class UnitTestBadMessage(mcl.messages.messages.Message):
mandatory = ('A', 'B',)
connection = self.bad_connection
self.Message = UnitTestMessage
self.BadMessage = UnitTestBadMessage
def tearDown(self):
"""Clear known messages after testing."""
# WARNING: this should not be deployed in production code. It is an
# abuse that has been used for the purposes of unit-testing.
mcl.messages.messages._MESSAGES = list()
def test_init(self):
"""Test %s RawBroadcaster() can be initialised and closed."""
# Create an instance of RawBroadcaster() with the default topic.
broadcaster = self.broadcaster(self.connection)
self.assertEqual(broadcaster.topic, None)
# Ensure broadcaster has established a connection.
self.assertTrue(broadcaster.is_open)
# Close broadcaster.
result = broadcaster.close()
self.assertTrue(result)
self.assertFalse(broadcaster.is_open)
# Close a closed connection.
result = broadcaster.close()
self.assertFalse(result)
def test_bad_init(self):
"""Test %s RawBroadcaster() catches bad initialisation inputs."""
# Test instantiation fails if 'connection' is not a class an not an
# instance.
with self.assertRaises(TypeError):
self.broadcaster(type(self.connection))
# Test instantiation fails if 'topic' is not a string.
with self.assertRaises(TypeError):
self.broadcaster(self.connection, topic=100)
# Test instantiation fails if 'topic' is an array of strings.
with self.assertRaises(TypeError):
self.broadcaster(self.connection, topic=TOPICS)
# Test instantiation fails if the broadcaster cannot connect.
with self.assertRaises(IOError):
self.broadcaster(self.bad_connection)
def test_init_topic(self):
"""Test %s RawBroadcaster() 'topic' parameter at initialisation."""
# Create an instance of RawBroadcaster() with a specific topic.
broadcaster = self.broadcaster(self.connection, topic=TOPIC)
# Ensure topic was set at initialisation.
self.assertEqual(broadcaster.topic, TOPIC)
# Ensure broadcaster has established a connection.
self.assertTrue(broadcaster.is_open)
broadcaster.close()
def test_publish(self):
"""Test %s RawBroadcaster() can publish data."""
# Create an instance of RawBroadcaster().
broadcaster = self.broadcaster(self.connection)
# Test publish succeeds if the input is a string.
broadcaster.publish('test')
# Test publish succeeds if the input is a serialisable non-string.
broadcaster.publish(42)
# Test setting topic at publish.
self.assertEqual(broadcaster.topic, None)
broadcaster.publish('test', topic='topic')
with self.assertRaises(TypeError):
broadcaster('fail', topic=5)
# Ensure attempts to publish on a closed connection raised an
# exception.
broadcaster.close()
with self.assertRaises(IOError):
broadcaster.publish('test')
def test_factory(self):
"""Test %s RawBroadcaster() from connection."""
# Manufacture an instance of RawBroadcaster() from the connection
# object.
broadcaster = RawBroadcaster(self.connection)
broadcaster.close()
# Ensure errors are propagated.
with self.assertRaises(Exception):
RawBroadcaster(self.bad_connection)
# Test instantiation fails if input is not a 'connection' object.
with self.assertRaises(TypeError):
RawBroadcaster('connection')
def test_message_init(self):
"""Test %s MessageBroadcaster() initialisation."""
# Ensure non-Message() inputs are caught.
with self.assertRaises(TypeError):
MessageBroadcaster(None)
# Ensure errors are propagated.
with self.assertRaises(Exception):
MessageBroadcaster(self.BadMessage)
# Create an instance of MessageBroadcaster() with defaults.
broadcaster = MessageBroadcaster(self.Message)
self.assertEqual(broadcaster.topic, None)
self.assertTrue(broadcaster.is_open)
broadcaster.close()
# Create an instance of MessageBroadcaster() with a specific topic.
broadcaster = MessageBroadcaster(self.Message, topic=TOPIC)
self.assertEqual(broadcaster.topic, TOPIC)
self.assertTrue(broadcaster.is_open)
broadcaster.close()
# Ensure non-string topics are caught.
with self.assertRaises(TypeError):
MessageBroadcaster(self.Message, topic=False)
def test_message_publish(self):
"""Test %s MessageBroadcaster() publish."""
# Test publish fails if the input is not a Message().
with self.assertRaises(TypeError):
MessageBroadcaster(dict)
with self.assertRaises(TypeError):
MessageBroadcaster(0)
# Create an instance of MessageBroadcaster().
message = self.Message()
broadcaster = MessageBroadcaster(self.Message)
# Test publish succeeds with default topic.
broadcaster.publish(message)
# Ensure attempts to publish a non-message type raises an exception.
with self.assertRaises(TypeError):
broadcaster.publish(False)
# Test setting topic at publish.
self.assertEqual(broadcaster.topic, None)
broadcaster.publish(message, topic='topic')
with self.assertRaises(TypeError):
broadcaster(message, topic=5)
# Ensure attempts to publish on a closed connection raises an
# exception.
broadcaster.close()
with self.assertRaises(IOError):
broadcaster.publish(message)
# -----------------------------------------------------------------------------
# Raw/Message/Queued Listener()
# -----------------------------------------------------------------------------
class _ListenerTestsMeta(type):
def __new__(cls, name, bases, dct):
# Do not look for manditory fields in the base class.
if (name == 'ListenerTests') and (bases == (object,)):
return super(_ListenerTestsMeta, cls).__new__(cls,
name,
bases,
dct)
# Ensure mandatory attributes are present.
attr_exists(dct, ['listener', 'connection'])
# Ensure 'listener' is a RawListener().
attr_issubclass(dct, 'listener', AbstractRawListener,
"The attribute 'listener' must be a sub-class " +
"of abstract.RawListener().")
# Ensure 'connection' is a Connection().
attr_isinstance(dct, 'connection', AbstractConnection,
"The attribute 'connection' must be an instance of " +
"a abstract.Connection() sub-class.")
# Ensure 'connection' is a Connection().
attr_isinstance(dct, 'bad_connection', AbstractConnection,
"The attribute 'bad_connection' must be an instance " +
"of a abstract.Connection() sub-class that will " +
"fail to connection.")
# Create name from module origin and object name.
module_name = '%s' % dct['listener'].__module__.split('.')[-1]
# Rename docstrings of unit-tests and copy into new sub-class.
method_dct = compile_docstring(bases[0], module_name)
dct.update(method_dct)
return super(_ListenerTestsMeta, cls).__new__(cls,
name,
(unittest.TestCase,),
dct)
class ListenerTests(object):
"""Standard unit tests for sub-classes of the RawListener() class.
This object defines standard unit-tests for sub-classes of the
RawListener() class. Sub-classes of this unit-test must define the
attributes ``listener`` and ``connection`` where:
- ``listener`` is the RawListener() sub-class to be tested
- ``connection`` is the Connection() object associated with the
listener
Example usage::
class ConcreteRawListener(ListenerTests):
listener = ConcreteRawListener
connection = ConcreteConnection
"""
__metaclass__ = _ListenerTestsMeta
def setUp(self):
"""Create some messages for testing."""
# WARNING: this should not be deployed in production code. It is an
# abuse that has been used for the purposes of unit-testing.
mcl.messages.messages._MESSAGES = list()
class UnitTestMessage(mcl.messages.messages.Message):
mandatory = ('A', 'B',)
connection = self.connection
class UnitTestBadMessage(mcl.messages.messages.Message):
mandatory = ('A', 'B',)
connection = self.bad_connection
self.Message = UnitTestMessage
self.BadMessage = UnitTestBadMessage
def tearDown(self):
"""Clear known messages after testing."""
# WARNING: this should not be deployed in production code. It is an
# abuse that has been used for the purposes of unit-testing.
mcl.messages.messages._MESSAGES = list()
def test_init(self):
"""Test %s RawListener() can be initialised and closed."""
# Create an instance of RawListener() with the default topic.
listener = self.listener(self.connection)
self.assertEqual(listener.topics, None)
# Ensure listener has established a connection.
self.assertTrue(listener.is_open)
self.assertFalse(listener._open())
# Close listener.
result = listener.close()
self.assertTrue(result)
self.assertFalse(listener.is_open)
# Close a closed connection.
result = listener.close()
self.assertFalse(result)
def test_bad_init(self):
"""Test %s RawListener() catches bad initialisation inputs."""
# Test instantiation fails if 'connection' is not a class an not an
# instance.
with self.assertRaises(TypeError):
self.listener(type(self.connection))
# Test instantiation fails if 'topics' is not an array of strings.
with self.assertRaises(TypeError):
self.listener(self.connection, topics=100)
# Test instantiation fails if 'topics' is not an array of strings.
with self.assertRaises(TypeError):
self.listener(self.connection, topics=['topic', 10])
# Test instantiation fails if the listener cannot connect.
with self.assertRaises(IOError):
self.listener(self.bad_connection)
def test_init_topics(self):
"""Test %s RawListener() 'topics' parameter at initialisation."""
# Create an instance of RawListener() with a SINGLE topics.
listener = self.listener(self.connection, topics=TOPIC)
self.assertEqual(listener.topics, TOPIC)
# Create an instance of RawListener() with MULTIPLE topics.
listener = self.listener(self.connection, topics=TOPICS)
self.assertEqual(listener.topics, TOPICS)
def test_subscriptions(self):
"""Test %s RawListener() can subscribe and unsubscribe callbacks."""
# NOTE: This testing is theoretically redundant. Unit test code on the
# parent class 'vent() should pick up any errors. To be paranoid
# and ensure inheritance has been implemented properly, do some
# basic checking here.
callback = lambda data: True
listener = self.listener(self.connection)
# Subscribe callback.
self.assertTrue(listener.subscribe(callback))
self.assertTrue(listener.is_subscribed(callback))
self.assertEqual(listener.num_subscriptions(), 1)
# Unsubscribe callback.
self.assertTrue(listener.unsubscribe(callback))
self.assertFalse(listener.is_subscribed(callback))
self.assertEqual(listener.num_subscriptions(), 0)
def test_factory(self):
"""Test %s RawListener() from connection."""
# Manufacture an instance of RawListener() from the connection object.
listener = RawListener(self.connection)
listener.close()
# Ensure errors are propagated.
with self.assertRaises(Exception):
RawListener(self.bad_connection)
# Test instantiation fails if input is not a 'connection' object.
with self.assertRaises(TypeError):
RawListener('connection')
def test_message_init(self):
"""Test %s MessageListener() initialisation."""
# Ensure non-Message() inputs are caught.
with self.assertRaises(TypeError):
MessageListener(dict)
with self.assertRaises(TypeError):
MessageListener(0)
# Ensure errors are propagated.
with self.assertRaises(Exception):
MessageListener(self.BadMessage)
# Create an instance of MessageListener() with defaults.
listener = MessageListener(self.Message)
self.assertEqual(listener.topics, None)
self.assertTrue(listener.is_open)
listener.close()
# Create an instance of MessageListener() with a specific topic.
listener = MessageListener(self.Message, topics=TOPIC)
self.assertEqual(listener.topics, TOPIC)
self.assertTrue(listener.is_open)
listener.close()
# Ensure non-string topics are caught.
with self.assertRaises(TypeError):
MessageListener(self.Message, topics=False)
# --------------------------------------------------------------------------
# QueuedListener()
# --------------------------------------------------------------------------
def test_queuedlistener_init(self):
"""Test %s QueuedListener() initialisation."""
# Instantiate QueuedListener() using connection object.
for obj in [self.Message.connection, self.Message]:
listener = QueuedListener(obj)
self.assertTrue(listener.is_open())
self.assertFalse(listener.open())
self.assertTrue(listener.close())
self.assertFalse(listener.is_open())
self.assertFalse(listener.close())
# Instantiate QueuedListener(), delay opening connection.
for obj in [self.Message.connection, self.Message]:
listener = QueuedListener(obj, open_init=False)
self.assertFalse(listener.is_open())
self.assertTrue(listener.open())
self.assertTrue(listener.close())
self.assertFalse(listener.is_open())
self.assertFalse(listener.close())
# Ensure errors are propagated.
with self.assertRaises(Exception):
QueuedListener(self.BadMessage)
# Ensure instantiation fails if the input is not a MCL connection.
# object.
with self.assertRaises(TypeError):
QueuedListener('connection')
with self.assertRaises(TypeError):
QueuedListener(dict)
# Ensure instantiation fails if the topic input is not a string or list
# of strings.
with self.assertRaises(TypeError):
QueuedListener(self.Message, topics=5)
def test_queuedlistener_enqueue(self):
"""Test %s QueuedListener() multiprocess enqueue functionality."""
# NOTE: QueuedListener is designed to run on a separate
# process. The code run on that process is contained within the
# class. Exceptions encountered in that will not be caught or
# unit tested unless the code is tested directly in this process.
# However, the code has been made 'private' as it should not be
# called directly and it maintains a clean API. To properly
# unit-test this code, the 'private' mangling of the code will be
# dodged.
# Create broadcaster.
broadcaster = RawBroadcaster(self.Message.connection)
# Abuse intention of 'private' mangling to get queuing function.
fcn = QueuedListener._QueuedListener__enqueue
queue = multiprocessing.Queue()
# The '__enqueue' method does not reference 'self' so it can be tested
# on this thread. However, it does block so multi-threading must be
# used to terminate its operation.
run_event = threading.Event()
run_event.set()
# Launch '__enqueue' method on a new thread.
thread = threading.Thread(target=fcn,
args=(QueuedListener(self.Message.connection),
run_event,
self.Message.connection,
None,
queue))
thread.daemon = True
thread.start()
time.sleep(DELAY)
# Publish data via broadcaster.
test_data = 'test'
broadcaster.publish(test_data)
time.sleep(DELAY)
# Wait for thread to close.
run_event.clear()
thread.join(TIMEOUT)
# Ensure data was processed.
self.assertEqual(queue.get()['payload'], test_data)
@staticmethod
def queued_send_receive(self, listener, broadcaster, test_data):
"""Method for testing QueuedListener send-receive facility"""
# Catch messages.
data_buffer = list()
listener.subscribe(lambda data: data_buffer.append(data))
# Send message.
broadcaster.publish(test_data)
time.sleep(DELAY)
# Ensure the message was received.
self.assertEqual(len(data_buffer), 1)
self.assertEqual(data_buffer[0]['payload'], test_data)
# Stop listener and broadcaster.
listener.close()
broadcaster.close()
def test_raw_receive(self):
"""Test %s QueuedListener() raw-data send-receive functionality."""
listener = QueuedListener(self.Message.connection)
broadcaster = RawBroadcaster(self.Message.connection)
data = 'test'
self.queued_send_receive(listener, broadcaster, data)
def test_message_receive(self):
"""Test %s QueuedListener() message send-receive functionality."""
listener = QueuedListener(self.Message)
broadcaster = MessageBroadcaster(self.Message)
data = self.Message(A=1, B=2)
self.queued_send_receive(listener, broadcaster, data)
# -----------------------------------------------------------------------------
# Publish-Subscribe
# -----------------------------------------------------------------------------
class _PublishSubscribeTestsMeta(type):
def __new__(cls, name, bases, dct):
# Do not look for manditory fields in the base class.
if (name == 'PublishSubscribeTests') and (bases == (object,)):
return super(_PublishSubscribeTestsMeta, cls).__new__(cls,
name,
bases,
dct)
# Ensure mandatory attributes are present.
attr_exists(dct, ['broadcaster', 'listener', 'connection'])
# Ensure 'broadcaster' is a RawBroadcaster().
attr_issubclass(dct, 'broadcaster', AbstractRawBroadcaster,
"The attribute 'broadcaster' must be a sub-class " +
"of abstract.RawBroadcaster().")
# Ensure 'listener' is a RawListener().
attr_issubclass(dct, 'listener', AbstractRawListener,
"The attribute 'listener' must be a sub-class " +
"of abstract.RawListener().")
# Ensure 'connection' is a Connection().
attr_isinstance(dct, 'connection', AbstractConnection,
"The attribute 'connection' must be an instance of " +
"a abstract.Connection() sub-class.")
# Create name from module origin and object name.
module_name = '%s send/receive' % \
dct['broadcaster'].__module__.split('.')[-1]
# Rename docstrings of unit-tests and copy into new sub-class.
method_dct = compile_docstring(bases[0], module_name)
dct.update(method_dct)
return super(_PublishSubscribeTestsMeta, cls).__new__(cls,
name,
(unittest.TestCase,),
dct)
class PublishSubscribeTests(object):
"""Standard unit tests for testing publish-subscribe functionality.
This object defines standard unit-tests for testing network
publish-subscribe functionality where:
- ``broadcaster`` is the RawBroadcaster() sub-class to be tested
- ``listener`` is the RawListener() sub-class to be tested
- ``connection`` is the Connection() object associated with the
broadcaster and listener
Example usage::
class ConcretePublishSubscribeTests(PublishSubscribeTests):
broadcaster = ConcreteRawBroadcaster
listener = ConcreteRawListener
connection = ConcreteConnection
"""
__metaclass__ = _PublishSubscribeTestsMeta
def setUp(self):
"""Create some messages for testing."""
# WARNING: this should not be deployed in production code. It is an
# abuse that has been used for the purposes of unit-testing.
mcl.messages.messages._MESSAGES = list()
class UnitTestMessage(mcl.messages.messages.Message):
mandatory = ('text',)
connection = self.connection
self.Message = UnitTestMessage
def tearDown(self):
"""Clear known messages after testing."""
# WARNING: this should not be deployed in production code. It is an
# abuse that has been used for the purposes of unit-testing.
mcl.messages.messages._MESSAGES = list()
def publish(self, broadcaster, listener, message, topic=None,
received_buffer=None, send_attempts=5, timeout=1.0):
# Store received messages in a list.
if received_buffer is None:
received_buffer = list()
# Catch received messages in a list.
catch_data = lambda data: received_buffer.append(data)
listener.subscribe(catch_data)
# Attempt to publish message several times.
length = len(received_buffer)
for j in range(send_attempts):
# Publish message.
start_time = time.time()
if topic is not None:
broadcaster.publish(message, topic=topic)
else:
broadcaster.publish(message)
# Block until message is received or until wait has timed out.
while len(received_buffer) == length:
time.sleep(0.05)
if (time.time() - start_time) > timeout:
break
# Received message(s), do not resend.
if len(received_buffer) > length:
break
# Stop catching received messages.
listener.unsubscribe(catch_data)
return received_buffer
def test_send_receive(self):
"""Test %s data with default initialisation."""
# Create unique send string based on time.
send_string = 'send/receive test: %1.8f' % time.time()
# Create broadcaster and listener.
broadcaster = self.broadcaster(self.connection)
listener = self.listener(self.connection)
# Test publish-subscribe functionality.
received_buffer = self.publish(broadcaster,
listener,
send_string)
# Close connections.
broadcaster.close()
listener.close()
# Ensure the correct number of messages was received.
self.assertEqual(len(received_buffer), 1)
# Only ONE message was published, ensure the data was received.
self.assertEqual(received_buffer[0]['topic'], None)
self.assertEqual(received_buffer[0]['payload'], send_string)
def test_topic_at_init(self):
"""Test %s with broadcast topic set at initialisation."""
# Create broadcaster and listener.
initial_topic = 'topic A'
broadcaster = self.broadcaster(self.connection, topic=initial_topic)
listener = self.listener(self.connection)
# Create unique send string based on time.
send_string = 'send/receive test: %1.8f' % time.time()
# Publish message with topic from initialisation.
send_string = 'send/receive test: %1.8f' % time.time()
received_buffer = self.publish(broadcaster,
listener,
send_string)
# Close connections.
broadcaster.close()
listener.close()
# Ensure message was transmitted with a topic.
self.assertEqual(len(received_buffer), 1)
self.assertEqual(received_buffer[0]['topic'], initial_topic)
self.assertEqual(received_buffer[0]['payload'], send_string)
def test_topic_at_publish(self):
"""Test %s with broadcast topic set at publish."""
# Create broadcaster and listener.
broadcaster = self.broadcaster(self.connection)
listener = self.listener(self.connection)
self.assertEqual(broadcaster.topic, None)
# Create unique send string based on time.
send_string = 'send/receive test: %1.8f' % time.time()
# Publish message with topic from initialisation.
publish_topic = 'topic A'
send_string = 'send/receive test: %1.8f' % time.time()
received_buffer = self.publish(broadcaster,
listener,
send_string,
topic=publish_topic)
# Close connections.
broadcaster.close()
listener.close()
# Ensure message was transmitted with a topic.
self.assertEqual(len(received_buffer), 1)
self.assertEqual(received_buffer[0]['topic'], publish_topic)
self.assertEqual(received_buffer[0]['payload'], send_string)
def test_listen_single_topic(self):
"""Test %s by listening for a single topic from many."""
# Send multiple topics, receive ONE topic.
send_topics = ['topic A', 'topic B', 'topic C', 'topic D', 'topic E']
listen_topic = 'topic C'
# Create broadcaster.
broadcaster = self.broadcaster(self.connection)
# Catch messages with a specific topic.
topic_buffer = list()
listener_topic = self.listener(self.connection, topics=listen_topic)
listener_topic.subscribe(lambda data: topic_buffer.append(data))
# Catch all messages. This ensures the unit-test does not time out
# waiting for messages that are filtered out by topic.
message_buffer = list()
listener_message = self.listener(self.connection)
# Publish messages with different topics.
send_strings = list()
for (i, topic) in enumerate(send_topics):
send_strings.append('send/receive test: %1.8f' % time.time())
# Perform test.
message_buffer = self.publish(broadcaster,
listener_message,
send_strings[-1],
topic=topic,
received_buffer=message_buffer)
# Close connections.
broadcaster.close()
listener_topic.close()
listener_message.close()
# Ensure ONE specific topic was received.
send_string = send_strings[send_topics.index(listen_topic)]
self.assertEqual(len(topic_buffer), 1)
self.assertEqual(topic_buffer[0]['topic'], listen_topic)
self.assertEqual(topic_buffer[0]['payload'], send_string)
def test_listen_multiple_topics(self):
"""Test %s by listening for multiple topics from many."""
# Send multiple topics, receive SOME topics.
send_topics = ['topic A', 'topic B', 'topic C', 'topic D', 'topic E']
listen_topics = ['topic A', 'topic C', 'topic E']
# Create broadcaster.
broadcaster = self.broadcaster(self.connection)
# Catch messages with a specific topic.
topic_buffer = list()
listener_topic = self.listener(self.connection, topics=listen_topics)
listener_topic.subscribe(lambda data: topic_buffer.append(data))
# Catch all messages. This ensures the unit-test does not time out
# waiting for messages that are filtered out by topic.
message_buffer = list()
listener_message = self.listener(self.connection)
# Publish messages with different topics.
send_strings = list()
for (i, topic) in enumerate(send_topics):
send_strings.append('send/receive test: %1.8f' % time.time())
# Perform test.
message_buffer = self.publish(broadcaster,
listener_message,
send_strings[-1],
topic=topic,
received_buffer=message_buffer)
# Close connections.
broadcaster.close()
listener_topic.close()
listener_message.close()
# Ensure all topics were received.
self.assertEqual(len(topic_buffer), len(listen_topics))
for i, topic in enumerate(listen_topics):
send_string = send_strings[send_topics.index(topic)]
self.assertEqual(topic_buffer[i]['topic'], topic)
self.assertEqual(topic_buffer[i]['payload'], send_string)
def test_message_send_receive(self):
"""Test %s with MessageBroadcaster/Listener() objects."""
# NOTE: this test listens for multiple topics from many. Rather than
# sending raw data (a previous test), Message() objects are
# sent. This tests all the functionality of the
# MessageBroadcaster() and MessageListener() objects.
# Send multiple topics, receive SOME topics.
send_topics = ['topic A', 'topic B', 'topic C', 'topic D', 'topic E']
listen_topics = ['topic A', 'topic C', 'topic E']
# Create broadcaster.
broadcaster = MessageBroadcaster(self.Message)
# Catch messages with a specific topic.
topic_buffer = list()
listener_topic = MessageListener(self.Message, topics=listen_topics)
# Subscribe callback.
def callback(data): topic_buffer.append(data)
self.assertTrue(listener_topic.subscribe(callback))
self.assertTrue(listener_topic.is_subscribed(callback))
self.assertEqual(listener_topic.num_subscriptions(), 1)
# Catch all messages. This ensures the unit-test does not time out
# waiting for messages that are filtered out by topic.
message_buffer = list()
listener_message = MessageListener(self.Message)
# Ensure network objects are open.
self.assertTrue(broadcaster.is_open)
self.assertTrue(listener_topic.is_open)
self.assertTrue(listener_message.is_open)
# Publish messages with different topics.
messages = list()
for (i, topic) in enumerate(send_topics):
messages.append(self.Message())
messages[-1]['text'] = '%s: %1.8f' % (topic, time.time())
# Perform test.
message_buffer = self.publish(broadcaster,
listener_message,
messages[-1],
topic=topic,
received_buffer=message_buffer)
# Close connections.
broadcaster.close()
listener_topic.close()
listener_message.close()
self.assertFalse(broadcaster.is_open)
self.assertFalse(listener_topic.is_open)
self.assertFalse(listener_message.is_open)
# Ensure all topics were received.
self.assertEqual(len(topic_buffer), len(listen_topics))
for i, topic in enumerate(listen_topics):
self.assertEqual(topic_buffer[i]['payload'],
messages[send_topics.index(topic)])
| {
"content_hash": "b6c92f120c042c47d2dbc4e8d8df1ab5",
"timestamp": "",
"source": "github",
"line_count": 1006,
"max_line_length": 83,
"avg_line_length": 38.806163021868784,
"alnum_prop": 0.5851840467225082,
"repo_name": "asherbender/mcl",
"id": "8796182352c301e6b74aa520e71efbedf15b3666",
"size": "39039",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "mcl/network/test/common.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Makefile",
"bytes": "731"
},
{
"name": "Python",
"bytes": "367250"
}
],
"symlink_target": ""
} |
from random import choice
COLORS = ('white', 'yellow', 'purple', 'red')
class Ghost(object):
def __init__(self):
self.color = choice(COLORS)
| {
"content_hash": "ac86eefe265a8259838c10498381f84f",
"timestamp": "",
"source": "github",
"line_count": 8,
"max_line_length": 45,
"avg_line_length": 19.5,
"alnum_prop": 0.6153846153846154,
"repo_name": "the-zebulan/CodeWars",
"id": "d1955ac2064ece34ba001bc319c7d6f7a93d6bbf",
"size": "156",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "katas/kyu_8/color_ghost.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "1203000"
}
],
"symlink_target": ""
} |
import atexit
import inspect
import logging
import os
from collections import defaultdict
def GetChildPids(processes, pid):
"""Returns all child processes of |pid| from the given |processes| list.
Args:
processes: A tuple of (pid, ppid, state) as generated by ps.
pid: The pid for which to get children.
Returns:
A list of child pids.
"""
child_dict = defaultdict(list)
for curr_pid, curr_ppid, state in processes:
if 'Z' in state:
continue # Ignore zombie processes
child_dict[int(curr_ppid)].append(int(curr_pid))
queue = [pid]
child_ids = []
while queue:
parent = queue.pop()
if parent in child_dict:
children = child_dict[parent]
queue.extend(children)
child_ids.extend(children)
return child_ids
def GetPsOutputWithPlatformBackend(platform_backend, columns, pid):
"""Returns output of the 'ps' command as a list of lines.
Args:
platform_backend: The platform backend (LinuxBasedPlatformBackend or
PosixPlatformBackend).
columns: A list of require columns, e.g., ['pid', 'pss'].
pid: If not None, returns only the information of the process with the pid.
"""
args = ['ps']
args.extend(['-p', str(pid)] if pid != None else ['-e'])
for c in columns:
args.extend(['-o', c + '='])
return platform_backend.RunCommand(args).splitlines()
def EnableListingStrayProcessesUponExitHook():
def _ListAllSubprocesses():
try:
import psutil
except ImportError:
logging.error(
'psutil is not installed on the system. Not listing possible '
'leaked processes. To install psutil, see: '
'https://pypi.python.org/pypi/psutil')
return
telemetry_pid = os.getpid()
parent = psutil.Process(telemetry_pid)
if hasattr(parent, 'children'):
children = parent.children(recursive=True)
else: # Some old version of psutil use get_children instead children.
children = parent.get_children()
if children:
leak_processes_info = []
for p in children:
if inspect.ismethod(p.name):
name = p.name()
else: # Process.name is a property in old versions of psutil.
name = p.name
process_info = '%s (%s)' % (name, p.pid)
try:
if inspect.ismethod(p.cmdline):
cmdline = p.cmdline()
else:
cmdline = p.cmdline
process_info += ' - %s' % cmdline
except Exception as e:
logging.warning(str(e))
leak_processes_info.append(process_info)
logging.error('Telemetry leaks these processes: %s',
', '.join(leak_processes_info))
atexit.register(_ListAllSubprocesses)
| {
"content_hash": "b1c4670069a0c556b1c25e6cf6909b82",
"timestamp": "",
"source": "github",
"line_count": 87,
"max_line_length": 79,
"avg_line_length": 31.011494252873565,
"alnum_prop": 0.6386212008895478,
"repo_name": "SummerLW/Perf-Insight-Report",
"id": "d8ef2c7a706ab56e0912e2355953f016cb94a0ff",
"size": "2861",
"binary": false,
"copies": "1",
"ref": "refs/heads/test",
"path": "telemetry/telemetry/internal/util/ps_util.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "3598"
},
{
"name": "C++",
"bytes": "6411"
},
{
"name": "CSS",
"bytes": "14952"
},
{
"name": "HTML",
"bytes": "27508823"
},
{
"name": "JavaScript",
"bytes": "75587"
},
{
"name": "Python",
"bytes": "4638631"
},
{
"name": "Shell",
"bytes": "2124"
}
],
"symlink_target": ""
} |
import pytest
import pandas as pd
def test_code_changes(metrics):
#repo_id
assert metrics.code_changes(10, 25430, period='year').isin([pd.Timestamp('2019-01-01T00:00:00+00:00'), 2]).any().any()
# repo_group_id
assert metrics.code_changes(10, period='year').isin([pd.Timestamp('2019-01-01T00:00:00+00:00'), 21350, 2]).any().any()
#begin_date & end_date
assert metrics.code_changes(10, 25430, begin_date='2019',
end_date='2019-05').isin([pd.Timestamp('2019-03-01'), 2]).any().any()
assert metrics.code_changes(10, begin_date='2019',
end_date='2019-05').isin([pd.Timestamp('2019-03-06'), 21410, 4]).any().any()
def test_code_changes_lines(metrics):
#repo_id
assert metrics.code_changes_lines(10, 25430, period='year').isin([pd.Timestamp('2019-01-01T00:00:00+00:00'), 27190, 3163]).any().any()
#repo_group_id
assert metrics.code_changes_lines(10, period='year').isin([pd.Timestamp('2019-01-01T00:00:00+00:00'), 21410, 31, 3]).any().any()
#begin_date & end_date
assert metrics.code_changes_lines(10, 25430, period='month', begin_date='2019',
end_date='2019-05').isin([pd.Timestamp('2019-02-01T00:00:00+00:00'), 196, 108]).any().any()
assert metrics.code_changes_lines(10, period='month', begin_date='2019-05',
end_date='2019-08-15').isin([pd.Timestamp('2019-06-01T00:00:00+00:00'), 25430, 70, 20]).any().any()
def test_sub_projects(metrics):
# repo group
assert metrics.sub_projects(10).iloc[0]['sub_project_count'] > 0
# repo id
assert metrics.sub_projects(
10, repo_id=25430).iloc[0]['sub_project_count'] > 0
def test_lines_changed_by_author(metrics):
assert metrics.lines_changed_by_author(10).iloc[0].additions > 0
assert metrics.lines_changed_by_author(10, 25430).iloc[0].additions > 0
def test_cii_best_practices_badge(metrics):
# repo
assert int(metrics.cii_best_practices_badge(10, 25430).iloc[0]['tiered_percentage']) >= 85
def test_languages(metrics):
# TODO
pass
def test_annual_lines_of_code_count_ranked_by_repo_in_repo_group(metrics):
pass
# these tests break in 2020
# assert metrics.annual_lines_of_code_count_ranked_by_repo_in_repo_group(20).iloc[0].net > 0
# assert metrics.annual_lines_of_code_count_ranked_by_repo_in_repo_group(10, timeframe = 'year').iloc[0].net > 0
# assert metrics.annual_lines_of_code_count_ranked_by_repo_in_repo_group(10, 25430).iloc[0].net > 0
# assert metrics.annual_lines_of_code_count_ranked_by_repo_in_repo_group(10, 25430,timeframe = 'year').iloc[0].net > 0
def test_annual_lines_of_code_count_ranked_by_new_repo_in_repo_group(metrics):
pass
# assert metrics.annual_lines_of_code_count_ranked_by_new_repo_in_repo_group(20).iloc[0].net > 0
# assert metrics.annual_lines_of_code_count_ranked_by_new_repo_in_repo_group(10, 25430).iloc[0].net > 0
def test_aggregate_summary(metrics):
assert metrics.aggregate_summary(10).iloc[0]['commit_count'] > 0
assert metrics.aggregate_summary(10, 25430,begin_date='2018-1-1 00:00:00',
end_date='2019-12-31 23:59:59').iloc[0]['commit_count'] > 0
# def test_license_declared(metrics):
# assert metrics.license_declared(21).iloc[0]['name']
# assert metrics.license_declared(10, 21116).iloc[0]['name']
# def test_license_count(metrics):
# assert metrics.license_count(21).iloc[0]['number_of_license'] >= 1
# assert metrics.license_count(10, 21116).iloc[0]['number_of_license'] >= 1
# def test_license_coverage(metrics):
# assert metrics.license_coverage(21).iloc[0]['total_files'] >= 1
# assert metrics.license_coverage(10, 21116).iloc[0]['total_files'] >= 1
| {
"content_hash": "90ee2cbc1d05ca6ef109287b7695f44a",
"timestamp": "",
"source": "github",
"line_count": 81,
"max_line_length": 138,
"avg_line_length": 47.06172839506173,
"alnum_prop": 0.6508394543546695,
"repo_name": "OSSHealth/ghdata",
"id": "105189062f0085dc7a365a64c2b2af22a70f53a3",
"size": "3843",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_metrics/test_repo_meta_metrics.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "150650"
},
{
"name": "HTML",
"bytes": "1184"
},
{
"name": "JavaScript",
"bytes": "11003327"
},
{
"name": "Makefile",
"bytes": "5280"
},
{
"name": "Python",
"bytes": "124038"
},
{
"name": "Vue",
"bytes": "35686"
}
],
"symlink_target": ""
} |
"""
This special file is for keeping track of `models.Manager` classes.
TODO:
If this file grows too large to maintain,
create a `managers` folder and split out
files based on class being managed.
"""
from core.query import only_current_instances
from django.db import models
from django.utils import timezone
class InstanceActionsManager(models.Manager):
def get_queryset(self):
# Ignores: Terminate, Imaging
instance_actions = [
'Start', 'Stop',
'Resume', 'Suspend',
'Shelve', 'Shelve Offload', 'Unshelve',
'Reboot', 'Hard Reboot',
'Resize', 'Redeploy'
]
query = models.Q(name__in=instance_actions)
return super(InstanceActionsManager, self).get_queryset().filter(query)
class ActiveInstancesManager(models.Manager):
def get_queryset(self):
now_time = timezone.now()
return super(
ActiveInstancesManager,
self) .get_queryset().filter(only_current_instances(now_time))
| {
"content_hash": "ccdb96c8a7a84a30e195ad11dc721cc6",
"timestamp": "",
"source": "github",
"line_count": 34,
"max_line_length": 79,
"avg_line_length": 30.294117647058822,
"alnum_prop": 0.6485436893203883,
"repo_name": "CCI-MOC/GUI-Backend",
"id": "12c43396183494c0380decb5a8fdecec9a7b563b",
"size": "1030",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "core/models/managers.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "11571"
},
{
"name": "Python",
"bytes": "2565922"
},
{
"name": "Ruby",
"bytes": "1345"
},
{
"name": "Shell",
"bytes": "42018"
}
],
"symlink_target": ""
} |
class Solution(object):
@staticmethod
def get_row_col(idx, row_offset_list, col_offset_list):
col_size = col_offset_list[-1] + 1
col = col_size * (idx / len(col_offset_list)) + col_offset_list[idx % len(col_offset_list)]
row = row_offset_list[idx % len(col_offset_list)]
return row, col
def convert(self, s, numRows):
"""
:type s: str
:type numRows: int
:rtype: str
"""
if numRows == 1:
return s
else:
col_offset_list = tuple(0 if i < numRows else i - numRows + 1 for i in range(numRows * 2 - 2))
row_offset_list = tuple(i if i < numRows else 2 * (numRows - 1) - i for i in range(numRows * 2 - 2))
return ''.join(map(lambda my_ele: my_ele[1],
sorted(map(lambda ele: (
ele[0], ele[1], Solution.get_row_col(ele[0], row_offset_list, col_offset_list)),
enumerate(s)), key=lambda x: x[2])))
if __name__ == '__main__':
print Solution().convert('PAYPALISHIRING', 4)
print 'PINALSIGYAHRPI'
| {
"content_hash": "fc1747b0d6998d64bc0a298ba8de87d7",
"timestamp": "",
"source": "github",
"line_count": 28,
"max_line_length": 115,
"avg_line_length": 41.035714285714285,
"alnum_prop": 0.5126196692776327,
"repo_name": "AlgorithmLover/OJCodes",
"id": "c27bbfc18d28e8da7f06f711a593527b14478bc0",
"size": "1149",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "leetcode/answers/6_zigzag_conversion.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "19271"
},
{
"name": "C++",
"bytes": "105318"
},
{
"name": "CMake",
"bytes": "18525"
},
{
"name": "CSS",
"bytes": "1215"
},
{
"name": "HTML",
"bytes": "11640"
},
{
"name": "Java",
"bytes": "160067"
},
{
"name": "JavaScript",
"bytes": "4932"
},
{
"name": "Makefile",
"bytes": "22061"
},
{
"name": "Matlab",
"bytes": "160"
},
{
"name": "PHP",
"bytes": "829"
},
{
"name": "Python",
"bytes": "169671"
},
{
"name": "Ruby",
"bytes": "1148"
},
{
"name": "Scheme",
"bytes": "6355"
},
{
"name": "Shell",
"bytes": "1093"
},
{
"name": "Thrift",
"bytes": "316"
}
],
"symlink_target": ""
} |
"""Tests exceptions and DB-API exception wrapping."""
from sqlalchemy import exc as sa_exceptions
from sqlalchemy import util
from sqlalchemy.testing import fixtures
from sqlalchemy.testing import eq_
if util.py2k:
from exceptions import StandardError, KeyboardInterrupt, SystemExit
else:
Exception = BaseException
class Error(Exception):
"""This class will be old-style on <= 2.4 and new-style on >=
2.5."""
class DatabaseError(Error):
pass
class OperationalError(DatabaseError):
pass
class ProgrammingError(DatabaseError):
def __str__(self):
return '<%s>' % self.bogus
class OutOfSpec(DatabaseError):
pass
class WrapTest(fixtures.TestBase):
def test_db_error_normal(self):
try:
raise sa_exceptions.DBAPIError.instance('', [],
OperationalError(), DatabaseError)
except sa_exceptions.DBAPIError:
self.assert_(True)
def test_tostring(self):
try:
raise sa_exceptions.DBAPIError.instance('this is a message'
, None, OperationalError(), DatabaseError)
except sa_exceptions.DBAPIError as exc:
assert str(exc) \
== "(OperationalError) 'this is a message' None"
def test_tostring_large_dict(self):
try:
raise sa_exceptions.DBAPIError.instance('this is a message'
,
{'a': 1, 'b': 2, 'c': 3, 'd': 4, 'e': 5, 'f': 6, 'g': 7, 'h':
8, 'i': 9, 'j': 10, 'k': 11,
}, OperationalError(), DatabaseError)
except sa_exceptions.DBAPIError as exc:
assert str(exc).startswith("(OperationalError) 'this is a "
"message' {")
def test_tostring_large_list(self):
try:
raise sa_exceptions.DBAPIError.instance('this is a message',
[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11,],
OperationalError(), DatabaseError)
except sa_exceptions.DBAPIError as exc:
assert str(exc).startswith("(OperationalError) 'this is a "
"message' [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11]")
def test_tostring_large_executemany(self):
try:
raise sa_exceptions.DBAPIError.instance('this is a message',
[{1: 1}, {1: 1}, {1: 1}, {1: 1}, {1: 1}, {1: 1},
{1: 1}, {1:1}, {1: 1}, {1: 1},],
OperationalError(), DatabaseError)
except sa_exceptions.DBAPIError as exc:
eq_(str(exc) ,
"(OperationalError) 'this is a message' [{1: 1}, "\
"{1: 1}, {1: 1}, {1: 1}, {1: 1}, {1: 1}, {1: 1}, {1: "\
"1}, {1: 1}, {1: 1}]")
try:
raise sa_exceptions.DBAPIError.instance('this is a message', [
{1: 1}, {1: 1}, {1: 1}, {1: 1}, {1: 1}, {1: 1}, {1: 1},
{1:1}, {1: 1}, {1: 1}, {1: 1},
], OperationalError(), DatabaseError)
except sa_exceptions.DBAPIError as exc:
eq_(str(exc) ,
"(OperationalError) 'this is a message' [{1: 1}, "
"{1: 1}, {1: 1}, {1: 1}, {1: 1}, {1: 1}, "
"{1: 1}, {1: 1} ... displaying 10 of 11 total "
"bound parameter sets ... {1: 1}, {1: 1}]"
)
try:
raise sa_exceptions.DBAPIError.instance('this is a message',
[
(1, ), (1, ), (1, ), (1, ), (1, ), (1, ), (1, ), (1, ), (1, ),
(1, ),
], OperationalError(), DatabaseError)
except sa_exceptions.DBAPIError as exc:
eq_(str(exc),
"(OperationalError) 'this is a message' [(1,), "\
"(1,), (1,), (1,), (1,), (1,), (1,), (1,), (1,), (1,)]")
try:
raise sa_exceptions.DBAPIError.instance('this is a message', [
(1, ), (1, ), (1, ), (1, ), (1, ), (1, ), (1, ), (1, ), (1, ),
(1, ), (1, ),
], OperationalError(), DatabaseError)
except sa_exceptions.DBAPIError as exc:
eq_(str(exc),
"(OperationalError) 'this is a message' [(1,), "
"(1,), (1,), (1,), (1,), (1,), (1,), (1,) "
"... displaying 10 of 11 total bound "
"parameter sets ... (1,), (1,)]"
)
def test_db_error_busted_dbapi(self):
try:
raise sa_exceptions.DBAPIError.instance('', [],
ProgrammingError(), DatabaseError)
except sa_exceptions.DBAPIError as e:
self.assert_(True)
self.assert_('Error in str() of DB-API' in e.args[0])
def test_db_error_noncompliant_dbapi(self):
try:
raise sa_exceptions.DBAPIError.instance(
'', [], OutOfSpec(),
DatabaseError)
except sa_exceptions.DBAPIError as e:
# OutOfSpec subclasses DatabaseError
self.assert_(e.__class__ is sa_exceptions.DatabaseError)
except OutOfSpec:
self.assert_(False)
try:
raise sa_exceptions.DBAPIError.instance(
'', [],
sa_exceptions.ArgumentError(), DatabaseError)
except sa_exceptions.DBAPIError as e:
self.assert_(e.__class__ is sa_exceptions.DBAPIError)
except sa_exceptions.ArgumentError:
self.assert_(False)
def test_db_error_keyboard_interrupt(self):
try:
raise sa_exceptions.DBAPIError.instance('', [],
KeyboardInterrupt(), DatabaseError)
except sa_exceptions.DBAPIError:
self.assert_(False)
except KeyboardInterrupt:
self.assert_(True)
def test_db_error_system_exit(self):
try:
raise sa_exceptions.DBAPIError.instance('', [],
SystemExit(), DatabaseError)
except sa_exceptions.DBAPIError:
self.assert_(False)
except SystemExit:
self.assert_(True)
| {
"content_hash": "2b0421be9478c5009f9170aea8438586",
"timestamp": "",
"source": "github",
"line_count": 164,
"max_line_length": 78,
"avg_line_length": 36.76829268292683,
"alnum_prop": 0.5058043117744611,
"repo_name": "Abi1ity/uniclust2.0",
"id": "a6238272575cf6040cb0738082ff94a5ccf68900",
"size": "6030",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "SQLAlchemy-0.9.9/test/base/test_except.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "52411"
},
{
"name": "CSS",
"bytes": "69780"
},
{
"name": "Groff",
"bytes": "28"
},
{
"name": "HTML",
"bytes": "12680251"
},
{
"name": "JavaScript",
"bytes": "161113"
},
{
"name": "Makefile",
"bytes": "12078"
},
{
"name": "Python",
"bytes": "22767232"
},
{
"name": "Shell",
"bytes": "8093"
},
{
"name": "TeX",
"bytes": "17033"
}
],
"symlink_target": ""
} |
"""Pipes, pipe segments and piplines, for flowing data from sources to partitions.
"""
class PipelineError(Exception):
def __init__(self, pipe, *args, **kwargs):
from rowgenerators.util import qualified_class_name
super(PipelineError, self).__init__(*args, **kwargs)
self.pipe = pipe
self.exc_name = qualified_class_name(self)
self.extra = ''
self.extra_section = ''
assert isinstance(pipe, Pipe), "Got a type: " + str(type(pipe))
def __str__(self):
return "Pipeline error: {}; {}".format(self.exc_name, self.message)
def details(self):
from rowgenerators.util import qualified_class_name
return """
======================================
Pipeline Exception: {exc_name}
Message: {message}
Pipeline: {pipeline_name}
Pipe: {pipe_class}
Source: {source_name}, {source_id}
Segment Headers: {headers}
{extra}
-------------------------------------
{extra_section}
Pipeline:
{pipeline}
""".format(message=self.message, pipeline_name=self.pipe.pipeline.name, pipeline=str(self.pipe.pipeline),
pipe_class=qualified_class_name(self.pipe), source_name=self.pipe.source.name,
source_id=self.pipe.source.vid,
headers=self.pipe.headers, exc_name=self.exc_name, extra=self.extra,
extra_section=self.extra_section)
class BadRowError(PipelineError):
def __init__(self, pipe, row, *args, **kwargs):
super(BadRowError, self).__init__(pipe, *args, **kwargs)
self.row = row
def __str__(self):
self.extra = 'Last Row : {}'.format(self.row)
return super(BadRowError, self).__str__()
class MissingHeaderError(PipelineError):
def __init__(self, pipe, table_headers, header, table, *args, **kwargs):
super(MissingHeaderError, self).__init__(pipe, *args, **kwargs)
self.table_headers = table_headers
self.header = header
self.table = table
def __str__(self):
self.extra = \
"""
Missing Header: {header}
Table headers : {table_headers}
""".format(header=self.header, table_headers=self.table_headers)
self.extra_section = \
"""
{table_columns}
-------------------------------------
""".format(table_columns=str(self.table))
return super(MissingHeaderError, self).__str__()
class BadSourceTable(PipelineError):
def __init__(self, pipe, source_table, *args, **kwargs):
super(BadSourceTable, self).__init__(pipe, *args, **kwargs)
self.source_table = source_table
def __str__(self):
self.extra = \
"""
Bad/Missing Table: {source_table}
""".format(source_table=self.source_table)
return super(BadSourceTable, self).__str__()
class StopPipe(Exception):
pass
class Pipe(object):
"""A step in the pipeline"""
_source_pipe = None
_source = None
bundle = None
partition = None # Set in the Pipeline
segment = None # Set to the name of the segment
pipeline = None # Set to the name of the segment
headers = None
limit = None
indent = ' ' # For __str__ formatting
scratch = {} # Data area for the casters and derived values to use.
@property
def source(self):
return self._source
@source.setter
def source(self, source_pipe):
raise NotImplemented("Use set_source_pipe instead")
@property
def source_pipe(self):
assert bool(self._source_pipe)
return self._source_pipe
def set_source_pipe(self, source_pipe):
self._source_pipe = source_pipe
self._source = source_pipe.source if source_pipe and hasattr(source_pipe, 'source') else None
return self
def process_header(self, headers):
"""Called to process the first row, the header. Must return the header,
possibly modified. The returned header will be sent upstream"""
return headers
def process_body(self, row):
"""Called to process each row in the body. Must return a row to be sent upstream"""
return row
def finish(self):
"""Called after the last row has been processed"""
pass
def __iter__(self):
rg = iter(self._source_pipe)
self.row_n = 0
self.headers = self.process_header(next(rg))
yield self.headers
header_len = len(self.headers)
try:
for row in rg:
row = self.process_body(row)
if row: # Check that the rows have the same length as the header
self.row_n += 1
if len(row) != header_len:
m = 'Header width mismatch in row {}. Row width = {}, header width = {}'.format(
self.row_n, len(row), header_len)
self.bundle.error(m)
raise BadRowError(self, row, m)
yield row
except StopIteration:
raise
except Exception as e:
if self.bundle:
pass
# self.bundle.error("Exception during pipeline processing, in pipe {}: {} "
# .format(qualified_class_name(self), e))
raise
self.finish()
def log(self, m):
if self.bundle:
self.bundle.logger.info(m)
def error(self, m):
if self.bundle:
self.bundle.logger.error(m)
def print_header(self):
from rowgenerators.util import qualified_class_name
return qualified_class_name(self)
def __str__(self):
return self.print_header()
class DatafileSourcePipe(Pipe):
"""A Source pipe that generates rows from an MPR file. """
def __init__(self, bundle, source):
self.bundle = bundle
if isinstance(source, str):
source = bundle.source(source)
self._source = source
self._datafile = source.datafile
# file_name is for the pipeline logger, to generate a file
self.file_name = source.name
self.path = self._datafile.path
def __iter__(self):
self.start()
if self.limit:
raise NotImplementedError()
with self._datafile.reader as r:
if r.n_rows == 0:
return
# Gets the headers from the source table, which assumes that the
# source table was created in ingestion.
self.headers = self._source.headers
# No? then get the headers from the datafile
if not self.headers:
self.headers = r.headers
yield self.headers
for row in r.rows:
yield row
self.finish()
def start(self):
pass
def finish(self):
pass
def __str__(self):
from ..util import qualified_class_name
return '{}; {} {}'.format(qualified_class_name(self), type(self.source), self.path)
class SourceFileSourcePipe(Pipe):
"""A source pipe that read from the original data file, but skips rows according to the sources's
row spec"""
def __init__(self, bundle, source_rec, source_file):
"""
:param bundle:
:param source_rec:
:param source_file:
:return:
"""
self.bundle = bundle
self._source = source_rec
self._file = source_file
try:
self.path = self._file.path
except AttributeError:
self.path = self._source.name
self.file_name = self._source.name
def __iter__(self):
self.start()
self.headers = self._source.headers
# No? then get the headers from the datafile
if not self.headers:
self.headers = self._file.headers
itr = iter(self._file)
start_line = self._source.start_line or 0
# Throw away data before the data start line,
for i in range(start_line):
next(itr)
yield self.headers
if self.limit:
if self._source.end_line:
for i in range(start_line, self._source.end_line):
if i > self.limit:
break
yield next(itr)
else:
for i, row in enumerate(itr):
if i > self.limit:
break
yield row
else:
if self._source.end_line:
for i in range(start_line, self._source.end_line):
yield next(itr)
else:
for row in itr:
yield row
self.finish()
def start(self):
pass
def finish(self):
pass
def __str__(self):
from ..util import qualified_class_name
return '{}; {} {}'.format(qualified_class_name(self), type(self.source), self.path)
class RowGenerator(object):
"""Base class for generator objects"""
def __init__(self, bundle, source=None):
self._bundle = bundle
self._source = source
class PartitionSourcePipe(Pipe):
"""Base class for a source pipe that implements it own iterator """
def __init__(self, bundle, source, partition):
self.bundle = bundle
self._source = source
self._partition = partition
self._partition.localize()
# file_name is for the pipeline logger, to generate a file
self.file_name = self._source.name
def __iter__(self):
self.start()
if self.limit:
raise NotImplementedError()
yield [c.name for c in self._partition.table.columns]
for row in iter(self._partition):
yield row
self.finish()
def start(self):
pass
def finish(self):
pass
def __str__(self):
from ..util import qualified_class_name
return 'Partition {}'.format(qualified_class_name(self))
class Sink(Pipe):
""" A final stage pipe, which consumes its input and produces no output rows. """
def __init__(self, count=None, callback=None, callback_freq=1000):
self._count = count
self._callback = callback
self._callback_freq = callback_freq
self.i = 0
self._start_time = None
def run(self, count=None, *args, **kwargs):
from time import time
self._start_time = time()
count = count if count else self._count
cb_count = self._callback_freq
for i, row in enumerate(self._source_pipe):
self.i = i
if count and i == count:
break
if cb_count == 0:
cb_count = self._callback_freq
self._callback(self, i)
cb_count -= 1
def report_progress(self):
"""
This function can be called from a higher level to report progress. It is usually called from an alarm
signal handler which is installed just before starting an operation:
:return: Tuple: (process description, #records, #total records, #rate)
"""
from time import time
# rows, rate = pl.sink.report_progress()
return (self.i, round(float(self.i) / float(time() - self._start_time), 2))
class IterSource(Pipe):
"""Creates a source from an Iterator"""
def __init__(self, iterator, header=None):
self.iterator = iterator
self.header = header
def __iter__(self):
itr = iter(self.iterator)
if self.header:
yield self.header
else:
# Create a header from the datatypes
first = next(itr)
yield [type(e).__name__ for e in first]
yield first
for row in itr:
yield row
class OnlySource(Pipe):
"""Only allow iteration on a named source. """
def __init__(self, sources):
if not isinstance(sources, (list, tuple)):
sources = [sources]
else:
sources = list(sources)
self.sources = sources
def process_header(self, row):
if self.source.name not in self.sources:
raise StopPipe
self.headers = row
return row
class Nullify(Pipe):
"""Turn all column values that don't represent a real value, such as SPACE, empty string, or None,
into a real None value"""
def __init__(self):
"""
Construct with one or more 2-element tuple or a string, in a similar format to what
__getitem__ accepts
>>> s = Slice((2,3), (6,8))
>>> s = Slice("2:3,6:8")
:param args: One or more slice objects
:return:
"""
self.row_processor = None
def process_header(self, row):
code = ','.join(['nullify(row[{}])'.format(i) for i, _ in enumerate(row)])
self.row_processor = eval('lambda row: [{}]'.format(code), self.bundle.exec_context())
return row
def process_body(self, row):
return self.row_processor(row)
def __str__(self):
from ..util import qualified_class_name
return '{} '.format(qualified_class_name(self))
class Slice(Pipe):
"""Select a slice of the table, using a set of tuples to represent the start and end positions of each
part of the slice."""
def __init__(self, *args):
"""
Construct with one or more 2-element tuple or a string, in a similar format to what
__getitem__ accepts
>>> s = Slice((2,3), (6,8))
>>> s = Slice("2:3,6:8")
:param args: One or more slice objects
:return:
"""
self._args = args
self.code = None
@staticmethod
def parse(v):
"""
Parse a slice string, of the same form as used by __getitem__
>>> Slice.parse("2:3,7,10:12")
:param v: Input string
:return: A list of tuples, one for each element of the slice string
"""
parts = v.split(',')
slices = []
for part in parts:
p = part.split(':')
if len(p) == 1:
slices.append(int(p[0]))
elif len(p) == 2:
slices.append(tuple(p))
else:
raise ValueError("Too many ':': {}".format(part))
return slices
@staticmethod
def make_slicer(*args):
if len(args) == 1 and isinstance(args[0], str):
args = Slice.parse(args[0])
parts = []
for slice in args:
parts.append('tuple(row[{}:{}])'.format(slice[0], slice[1])
if isinstance(slice, (tuple, list)) else '(row[{}],)'.format(slice))
code = 'lambda row: {}'.format('+'.join(parts))
func = eval(code)
return func, code
def process_header(self, row):
args = self._args
if not args:
args = self.source.segment
try:
self.slicer, self.code = Slice.make_slicer(args)
except Exception as e:
raise PipelineError(self, 'Failed to eval slicer for parts: {} for source {} '
.format(args, self.source.name))
try:
self.headers = self.slicer(row)
return self.headers
except Exception as e:
raise PipelineError(self, "Failed to run slicer: '{}' : {}".format(self.code, e))
def process_body(self, row):
return self.slicer(row)
def __str__(self):
from ..util import qualified_class_name
return '{}; Slice Args = {}'.format(qualified_class_name(self), self.code)
class Head(Pipe):
""" Pass-through only the first N rows
"""
def __init__(self, N=20):
self.N = N
self.i = 0
def process_body(self, row):
if self.i >= self.N:
raise StopIteration
self.i += 1
return row
def __str__(self):
return '{}; N={}; i={}'.format(super(Head, self).__str__(), self.N, self.i)
class Sample(Pipe):
""" Take a sample of rows, skipping rows exponentially to end at the est_length input row, with
count output rows.
"""
def __init__(self, count=20, skip=5, est_length=10000):
from math import log, exp
self.skip = float(skip)
self.skip_factor = exp(log(est_length / self.skip) / (count - 1))
self.count = count
self.i = 0
def process_body(self, row):
if self.count == 0:
raise StopIteration
if self.i % int(self.skip) == 0:
self.count -= 1
self.skip = self.skip * self.skip_factor
else:
row = None
self.i += 1
return row
class Ticker(Pipe):
""" Ticks out 'H' and 'B' for header and rows.
"""
def __init__(self, name=None):
self._name = name
def process_body(self, row):
print(self._name if self._name else 'B')
return row
def process_header(self, row):
print('== {} {} =='.format(self.source.name, self._name if self._name else ''))
return row
class SelectRows(Pipe):
""" Pass-through only rows that satisfy a predicate. The predicate may be
specified as a callable, or a string, which will be evaled. The predicate has the signature f(source, row)
where row is a RowProxy object.
"""
def __init__(self, pred):
"""
>>> Select(' row.id == 10 or source.grain == 20 ')
:param pred: Callable or string. If a string, it must be just an expression which can take arguments source and row
:return:
"""
if isinstance(pred, str):
self.pred_str = pred
self.pred = eval('lambda source, row: {}'.format(pred))
else:
self.pred = pred
self.pred_str = str(pred)
self._row_proxy = None
def process_body(self, row):
if self.pred(self.source, self._row_proxy.set_row(row)):
return row
else:
return None
def process_header(self, row):
from rowgenerators.rowproxy import RowProxy
self._row_proxy = RowProxy(row)
return row
def __str__(self):
from rowgenerators.util import qualified_class_name
return qualified_class_name(self) + ': pred = {} '.format(self.pred_str)
class MatchPredicate(Pipe):
"""Store rows that match a predicate. THe predicate is a function that takes the row as its
sole parameter and returns true or false.
Unlike the Select pipe, MatchPredicate passes all of the rows through and only stores the
ones that match
The matches can be retrieved from the pipeline via the ``matches`` property
"""
def __init__(self, pred):
self._pred = pred
self.i = 0
self.matches = []
def process_body(self, row):
if self._pred(row):
self.matches.append(row)
return row
class AddHeader(Pipe):
"""Adds a header to a row file that doesn't have one. If no header is specified in the
constructor, use the source table. """
def __init__(self, headers=None):
self._added_headers = headers
def __iter__(self):
if not self._added_headers:
self._added_headers = [c.name for c in self.source.source_table.columns]
yield self._added_headers
for row in self._source_pipe:
yield row
class AddDestHeader(Pipe):
"""Adds a header to a row file that doesn't have one. If no header is specified in the constructor,
use the destination table, excluding the first ( id ) column."""
def __init__(self, headers=None):
self._added_headers = headers
def __iter__(self):
rg = iter(self._source_pipe)
if not self._added_headers:
self._added_headers = [c.name for c in self.source.dest_table.columns][1:]
self.headers = self._added_headers
yield self._added_headers
for row in rg:
yield row
class AddSourceHeader(Pipe):
"""Uses the source table header for the header row"""
def __init__(self):
pass
def __iter__(self):
rg = iter(self._source_pipe)
yield [c.name for c in self.source.source_table.columns]
for row in rg:
yield row
class ReplaceWithDestHeader(Pipe):
"""Replace the incomming header with the destination header, excluding the destination tables
first column, which should be the id"""
def __init__(self):
pass
def process_header(self, headers):
"""Ignore the incomming header and replace it with the destination header"""
return [c.name for c in self.source.dest_table.columns][1:]
class MapHeader(Pipe):
"""Alter the header using a map"""
def __init__(self, header_map):
self._header_map = header_map
def __iter__(self):
rg = iter(self._source_pipe)
self.headers = [self._header_map.get(c, c) for c in next(rg)]
yield self.headers
for row in rg:
yield row
class CastSourceColumns(Pipe):
"""Cast a row from the source to the types described in the source """
def __init__(self, error_on_fail=False):
self.processor = None
def process_header(self, headers):
st = self.source.source_table
def cast_maybe(type_, v):
try:
return type_(v)
except:
return v
from dateutil import parser
def date(v):
return parser.parse(v).date()
def datetime(v):
return parser.parse(v)
def time(v):
return parser.parse(v).time()
inner_code = ','.join(['cast_maybe({},row[{}])'
.format(c.datatype, i)
for i, c in enumerate(st.columns)])
self.processor = eval('lambda row: [{}]'.format(inner_code), locals())
return headers
def process_body(self, row):
return self.processor(row)
def __str__(self):
from rowgenerators.util import qualified_class_name
return qualified_class_name(self)
class MapSourceHeaders(Pipe):
"""Alter the header using the source_header and dest_header in the source table. The primary
purpose of this pipe is to normalize multiple sources to one header structure, for instance,
there are multiple year releases of a file that have column name changes from year to year. """
def __init__(self, error_on_fail=False):
self.error_on_fail = error_on_fail
self.map = {}
def process_header(self, headers):
is_generator = False
is_partition = isinstance(self._source_pipe, PartitionSourcePipe)
if len(list(self.source.source_table.columns)) == 0:
if is_generator or is_partition:
# Generators or relations are assumed to return a valid, consistent header, so
# if the table is missing, carry on.
assert headers
return headers
else:
raise PipelineError(
self,
"Source table {} has no columns, can't map header".format(self.source.source_table.name))
else:
dest_headers = [c.dest_header for c in self.source.source_table.columns]
if len(headers) != len(dest_headers):
raise PipelineError(self, ('Source headers not same length as source table for source {}.\n'
'Table : {} headers: {}\n'
'Source: {} headers: {}\n')
.format(self.source.name, len(dest_headers), dest_headers,
len(headers), headers))
return dest_headers
def process_body(self, row):
return super(MapSourceHeaders, self).process_body(row)
def __str__(self):
from rowgenerators.util import qualified_class_name
return qualified_class_name(self) + ': map = {} '.format(self.map)
class NoOp(Pipe):
"""Do Nothing. Mostly for replacing other pipes to remove them from the pipeline"""
class MangleHeader(Pipe):
""""Alter the header so the values are well-formed, converting to alphanumerics and underscores"""
def mangle_column_name(self, i, n):
"""
Override this method to change the way that column names from the source are altered to
become column names in the schema. This method is called from :py:meth:`mangle_header` for each column in the
header, and :py:meth:`mangle_header` is called from the RowGenerator, so it will alter the row both when the
schema is being generated and when data are being inserted into the partition.
Implement it in your bundle class to change the how columsn are converted from the source into database-friendly
names
:param i: Column number
:param n: Original column name
:return: A new column name
"""
raise NotImplementedError
def mangle_header(self, header):
return [self.mangle_column_name(i, n) for i, n in enumerate(header)]
def __iter__(self):
itr = iter(self.source_pipe)
headers = next(itr)
self.headers = self.mangle_header(headers)
yield self.headers
while True:
yield next(itr)
class MergeHeader(Pipe):
"""Strips out the header comments and combines multiple header lines to emit a
single header line"""
footer = None
data_start_line = 1
data_end_line = None
header_lines = [0]
header_comment_lines = []
header_mangler = None
headers = None
header_comments = None
footers = None
initialized = False
def init(self):
"""Deferred initialization b/c the object con be constructed without a valid source"""
from itertools import chain
def maybe_int(v):
try:
return int(v)
except ValueError:
return None
if not self.initialized:
self.data_start_line = 1
self.data_end_line = None
self.header_lines = [0]
if self.source.start_line:
self.data_start_line = self.source.start_line
if self.source.end_line:
self.data_end_line = self.source.end_line
if self.source.header_lines:
self.header_lines = list(map(maybe_int, self.source.header_lines))
if self.source.comment_lines:
self.header_comment_lines = list(map(maybe_int, self.source.comment_lines))
max_header_line = max(chain(self.header_comment_lines, self.header_lines))
if self.data_start_line <= max_header_line:
self.data_start_line = max_header_line + 1
if not self.header_comment_lines:
min_header_line = min(chain(self.header_lines))
if min_header_line:
self.header_comment_lines = list(range(0, min_header_line))
self.headers = []
self.header_comments = []
self.footers = []
self.initialized = True
self.i = 0
def coalesce_headers(self):
self.init()
if len(self.headers) > 1:
# If there are gaps in the values in the first header line, extend them forward
hl1 = []
last = None
for x in self.headers[0]:
if not x:
x = last
else:
last = x
hl1.append(x)
self.headers[0] = hl1
header = [' '.join(col_val.strip() if col_val else '' for col_val in col_set)
for col_set in zip(*self.headers)]
header = [h.strip() for h in header]
return header
elif len(self.headers) > 0:
return self.headers[0]
else:
return []
def __iter__(self):
self.init()
if len(self.header_lines) == 1 and self.header_lines[0] == 0:
# This is the normal case, with the header on line 0, so skip all of the
# checks
# NOTE, were also skiping the check on the data end line, which may sometimes be wrong.
for row in self._source_pipe:
yield row
else:
max_header_line = max(self.header_lines)
for row in self._source_pipe:
if self.i < self.data_start_line:
if self.i in self.header_lines:
self.headers.append(
[_to_ascii(x) for x in row])
if self.i in self.header_comment_lines:
self.header_comments.append(
[_to_ascii(x) for x in row])
if self.i == max_header_line:
yield self.coalesce_headers()
elif not self.data_end_line or self.i <= self.data_end_line:
yield row
elif self.data_end_line and self.i >= self.data_end_line:
self.footers.append(row)
self.i += 1
def __str__(self):
from rowgenerators.util import qualified_class_name
return qualified_class_name(self) + ': header = {} ' \
.format(','.join(str(e) for e in self.header_lines))
class AddDeleteExpand(Pipe):
"""Edit rows as they pass through
The constructor can take four types of functions:
add: a list of headers, or a dict of functions, each of which will add a new column to the table
delete: A list of headers of columns to remove from the table
edit: A dict of functions to each the values in a row
expand: Like add, but adds multiple values.
Many of the arguments take a dict, with each key being the name of a header and the value being a function
to produce a value for the row. In all cases, the function signature is:
f(pipe, row, value)
However, the value is only set for edit entries
>>> pl = b.pipeline('source','dimensions')
>>> pl.last.append(AddDeleteExpand(
>>> delete = ['time','county','state'],
>>> add={ "a": lambda e,r: r[4], "b": lambda e,r: r[1]},
>>> edit = {'stusab': lambda e,r,v: v.lower(), 'county_name' : lambda e,v: v.upper() },
>>> expand = { ('x','y') : lambda e, r: [ parse(r[1]).hour, parse(r[1]).minute ] } ))
The ``add`` argument may also take a list, which is the names of the headers to add. The column value will be None.
"""
def __init__(self, add=[], delete=[], edit={}, expand={}, as_dict=False):
"""
:param add: List of blank columns to add, by header name, or dict of
headers and functions to create the column value
:param delete: List of headers names of columns to delete
:param edit: Dict of header names and functions to alter the value.
:return:
"""
from collections import OrderedDict
self.add = add
self.delete = delete
self.edit = edit
self.expand = expand
self.as_dict = as_dict
if isinstance(self.add, (list, tuple)):
# Convert the list of headers into a sets of functins that
# just produce None
self.add = OrderedDict((k, lambda e, r, v: None) for k in self.add)
self.edit_header = None
self.edit_header_code = None
self.edit_row = None
self.edit_row_code = None
self.expand_row = None
self.expand_row_code = None
self.edit_functions = None # Turn dict lookup into list lookup
self._row_proxy = None
def process_header(self, row):
from rowgenerators.rowproxy import RowProxy
self.edit_functions = [None] * len(row)
header_parts = []
row_parts = []
for i, h in enumerate(row):
if h in self.delete:
pass
elif h in self.edit:
self.edit_functions[i] = self.edit[h]
row_parts.append('self.edit_functions[{i}](self,r, r[{i}])'.format(i=i))
header_parts.append('r[{}]'.format(i))
else:
row_parts.append('r[{}]'.format(i))
header_parts.append('r[{}]'.format(i))
for f in self.add.values():
self.edit_functions.append(f)
i = len(self.edit_functions) - 1
assert self.edit_functions[i] == f
row_parts.append('self.edit_functions[{i}](self,r, None)'.format(i=i))
# The expansions get tacked onto the end, after the adds.
header_expansions = []
row_expanders = [] # The outputs of the expanders are combined, outputs must have same length as header_expansions
self.expand_row = lambda e: [] # Null output
for k, f in self.expand.items():
self.edit_functions.append(f)
i = len(self.edit_functions) - 1
assert self.edit_functions[i] == f
header_expansions += list(k) # k must be a list or tuple or other iterable.
row_expanders.append('self.edit_functions[{i}](self,r, None)'.format(i=i))
if header_expansions:
self.expand_row_code = "lambda r,self=self: ({})".format('+'.join(row_expanders))
self.expand_row = eval(self.expand_row_code)
# Maybe lookups in tuples is faster than in lists.
self.edit_functions = tuple(self.edit_functions)
header_extra = ["'{}'".format(e) for e in (list(self.add.keys()) + header_expansions)]
# Build the single function to edit the header or row all at once
self.edit_header_code = "lambda r: [{}]".format(','.join(header_parts + header_extra))
self.edit_header = eval(self.edit_header_code)
# FIXME: Should probably use itemgetter() instead of eval
self.edit_row_code = "lambda r,self=self: [{}]".format(','.join(row_parts))
self.edit_row = eval(self.edit_row_code)
# Run it!
headers = self.edit_header(row)
self._row_proxy = RowProxy(headers)
return headers
def process_body(self, row):
rp = self._row_proxy.set_row(row)
try:
r1 = self.edit_row(rp)
except:
# Todo, put this into the exception
print('EDIT ROW CODE', self.edit_row_code)
raise
try:
r2 = self.expand_row(rp)
except:
# FIXME: put this into the exception
print('EXPAND ROW CODE: ', self.expand_row_code)
raise
return r1 + r2
def __str__(self):
from ..util import qualified_class_name
return (qualified_class_name(self) + '\n' +
self.indent + 'H:' + str(self.edit_header_code) + '\n' +
self.indent + 'B:' + str(self.edit_row_code))
class Add(AddDeleteExpand):
"""Add fields to a row"""
def __init__(self, add):
"""Add fields using a dict of lambdas. THe new field is appended to the end of the row.
>>> pl = Pipeline()
>>> pl.last = Add({'source_id': lambda pipe,row: pipe.source.sequence_id })
"""
super(Add, self).__init__(add=add)
class Expand(AddDeleteExpand):
"""Add columns to the header"""
def __init__(self, expand, as_dict=False):
super(Expand, self).__init__(expand=expand, as_dict=as_dict)
class Delete(AddDeleteExpand):
"""Delete columns. """
def __init__(self, delete):
super(Delete, self).__init__(delete=delete)
def __str__(self):
from rowgenerators.util import qualified_class_name
return qualified_class_name(self) + 'delete = ' + ', '.join(self.delete)
class SelectColumns(AddDeleteExpand):
"""Pass through only the sepcified columns, deleting all others. """
def __init__(self, keep):
super(SelectColumns, self).__init__()
self.keep = keep
def process_header(self, row):
self.delete = filter(lambda e: e not in self.keep, row)
return super(SelectColumns, self).process_header(row)
def __str__(self):
from rowgenerators.util import qualified_class_name
return qualified_class_name(self) + ' keep = ' + ', '.join(self.keep)
class Edit(AddDeleteExpand):
def __init__(self, edit, as_dict=False):
super(Edit, self).__init__(edit=edit, as_dict=as_dict)
class PassOnlyDestColumns(Delete):
"""Delete any columns that are not in the destination table"""
def __init__(self):
super(PassOnlyDestColumns, self).__init__(delete=[])
def process_header(self, row):
dest_cols = [c.name for c in self.source.dest_table.columns]
self.delete = [h for h in row if h not in dest_cols]
return super(Delete, self).process_header(row)
class CastColumns(Pipe):
"""Composes functions to map from the source table, to the destination table, with potentially
complex transformations for each column.
The CastColumns pipe uses the transformation values in the destination schema ,
datatype, nullify, initialize, typecast, transform and exception, to transform the source rows to destination
rows. The output rows have the lenghts and column types as speciefied in the destination schema.
"""
def __init__(self):
super(CastColumns, self).__init__()
self.row_processors = []
self.orig_headers = None
self.new_headers = None
self.row_proxy_1 = None # Row proxy with source headers
self.row_proxy_2 = None # Row proxy with dest headers
self.accumulator = {}
self.errors = None
self.row_n = 0
def process_header(self, headers):
from rowgenerators.rowproxy import RowProxy
self.orig_headers = headers
self.row_proxy_1 = RowProxy(self.orig_headers)
if len(self.source.dest_table.columns) <= 1:
raise PipelineError(self, "Destination table {} has no columns, Did you run the schema phase?"
.format(self.source.dest_table.name))
# Return the table header, rather than the original row header.
self.new_headers = [c.name for c in self.source.dest_table.columns]
self.row_proxy_2 = RowProxy(self.new_headers)
self.row_processors = self.bundle.build_caster_code(self.source, headers, pipe=self)
self.errors = {}
for h in self.orig_headers + self.new_headers:
self.errors[h] = set()
return self.new_headers
def process_body(self, row):
from rowgenerators.rowpipe.exceptions import CastingError, TooManyCastingErrors
scratch = {}
errors = {}
# Start off the first processing with the source's source headers.
rp = self.row_proxy_1
try:
for proc in self.row_processors:
row = proc(rp.set_row(row), self.row_n, self.errors, scratch, self.accumulator,
self, self.bundle, self.source)
# After the first round, the row has the destinatino headers.
rp = self.row_proxy_2
except CastingError as e:
raise PipelineError(self, "Failed to cast column in table {}, row {}: {}"
.format(self.source.dest_table.name, self.row_n, e))
except TooManyCastingErrors:
self.report_errors()
return row
def report_errors(self):
from rowgenerators.rowpipe.exceptions import TooManyCastingErrors
if sum(len(e) for e in self.errors.values()) > 0:
for c, errors in self.errors.items():
for e in errors:
self.bundle.error(u'Casting Error: {}'.format(e))
raise TooManyCastingErrors()
def finish(self):
super(CastColumns, self).finish()
self.report_errors()
def __str__(self):
from rowgenerators.util import qualified_class_name
o = qualified_class_name(self) + '{} pipelines\n'.format(len(self.row_processors))
return o
class Modify(Pipe):
"""Base class to modify a whole row, as a dict. Does not modify the header. Uses a slower method
than other editing pipes. """
def __iter__(self):
from collections import OrderedDict
rg = iter(self._source_pipe)
self.headers = self.process_header(next(rg))
yield self.headers
for row in rg:
row = self.process_body(OrderedDict(list(zip(self.headers, row))))
if row:
yield list(row.values())
class RemoveBlankColumns(Pipe):
"""Remove columns that don't have a header"""
def __init__(self):
self.editor = None
def process_header(self, row):
header_parts = []
for i, h in enumerate(row):
if h.strip():
header_parts.append('r[{}]'.format(i))
if header_parts:
# FIXME: Should probably use itemgetter() instead of eval
self.editor = eval("lambda r: [{}]".format(','.join(header_parts)))
return self.editor(row)
else:
# If there are no header parts, replace the process_body() method with a passthrough.
self.process_body = lambda self, row: row
return row
def process_body(self, row):
return self.editor(row)
class Skip(Pipe):
"""Skip rows of a table that match a predicate """
def __init__(self, pred, table=None):
"""
:param pred:
:param table:
:return:
"""
self.pred = pred
try:
self.table = table.name
except AttributeError:
self.table = table
self._check = False
self.skipped = 0
self.passed = 0
self.ignored = 0
self.env = None
self.code = None
def process_header(self, headers):
from .codegen import calling_code
from rowgenerators.rowproxy import RowProxy
self.env = self.bundle.exec_context(source=self.source, pipe=self)
if self.pred in self.env:
self.code = 'lambda pipe, bundle, source, row: {}'.format(calling_code(self.env[self.pred], self.pred))
self.pred = eval(self.code, self.env)
elif not callable(self.pred):
self.code = 'lambda pipe, bundle, source, row: {}'.format(self.pred)
self.pred = eval(self.code, self.env)
else:
self.code = self.pred
pass # The predicate is a callable but not in the environment.
# If there is no table specified, always run the predicate, but if the table
# is specified, only run the predicate for that table.
if self.table is None:
self._check = True
else:
self._check = self.table == self.source.dest_table.name
self.row_proxy = RowProxy(headers)
return headers
def __str__(self):
return 'Skip. {} skipped, {} passed, {} ignored'.format(self.skipped, self.passed, self.ignored)
def process_body(self, row):
try:
if not self._check:
self.ignored += 1
return row
elif self.pred(self, self.bundle, self.source, self.row_proxy.set_row(row)):
self.skipped += 1
return None
else:
self.passed += 1
return row
except Exception as e:
self.bundle.error("Failed to process predicate in Skip pipe: '{}' ".format(self.code))
raise
class Collect(Pipe):
"""Collect rows so they can be viewed or processed after the run. """
def __init__(self):
self.rows = []
def process_body(self, row):
self.rows.append(row)
return row
def process_header(self, row):
return row
class LogRate(Pipe):
def __init__(self, output_f, N, message=None):
raise NotImplementedError()
#self.lr = init_log_rate(output_f, N, message)
def process_body(self, row):
self.lr()
return row
class PrintRows(Pipe):
"""A Pipe that collects rows that pass through and displays them as a table when the pipeline is printed. """
def __init__(self, count=10, columns=None, offset=None, print_at=None):
self.columns = columns
self.offset = offset
self.count_inc = count
self.count = count
self.rows = []
self.i = 1
try:
self.print_at_row = int(print_at)
self.print_at_end = False
except:
self.print_at_row = None
self.print_at_end = bool(print_at)
def process_body(self, row):
orig_row = list(row)
if self.i < self.count:
append_row = list(row)
self.rows.append(append_row[self.offset:self.columns])
if self.i == self.print_at_row:
print(str(self))
self.i += 1
return orig_row
def finish(self):
if self.print_at_end:
print(str(self))
# For multi-run pipes, the count is the number of rows per source.
self.count += self.count_inc
def process_header(self, row):
return row
def __str__(self):
from tabulate import tabulate
from rowgenerators.util import qualified_class_name
if self.rows:
aug_header = ['0'] + ['#' + str(j) + ' ' + str(c) for j, c in enumerate(self.headers)]
return (qualified_class_name(self) +
' {} rows total\n'.format(self.i) +
tabulate([[i] + row for i, row in enumerate(self.rows)],
headers=aug_header[self.offset:self.columns], tablefmt='pipe'))
else:
return qualified_class_name(self) + ' 0 rows'
class PrintEvery(Pipe):
"""Print a row every N rows. Always prints the header. """
def __init__(self, N=1):
self.N = N
self.i = 0
def process_header(self, row):
print('Print Header: ', row)
return row
def process_body(self, row):
if self.i % self.N == 0:
print('Print Row :', row)
self.i += 1
return row
class Reduce(Pipe):
"""Like works like reduce() on the body rows, using the function f(accumulator,row) """
def __init__(self, f, initializer=None):
self._f = f
self._initializer = initializer
self.accumulator = None
def __iter__(self):
it = iter(self._source_pipe)
# Yield the header
self.headers = next(it)
yield self.headers
if self._initializer is None:
try:
self.accumulator = self._f(None, next(it))
except StopIteration:
raise TypeError('reduce() of empty sequence with no initial value')
else:
self.accumulator = self._initializer
for row in it:
self.accumulator = self._f(self.accumulator, row)
yield row
def make_table_map(table, headers):
"""Create a function to map from rows with the structure of the headers to the structure of the table."""
header_parts = {}
for i, h in enumerate(headers):
header_parts[h] = 'row[{}]'.format(i)
body_code = 'lambda row: [{}]'.format(','.join(header_parts.get(c.name, 'None') for c in table.columns))
header_code = 'lambda row: [{}]'.format(
','.join(header_parts.get(c.name, "'{}'".format(c.name)) for c in table.columns))
return eval(header_code), eval(body_code)
class PipelineSegment(list):
def __init__(self, pipeline, name, *args):
list.__init__(self)
self.pipeline = pipeline
self.name = name
for p in args:
assert not isinstance(p, (list, tuple))
self.append(p)
def __getitem__(self, k):
import inspect
# Index by class
if inspect.isclass(k):
matches = [e for e in self if isinstance(e, k)]
if not matches:
raise IndexError('No entry for class: {}'.format(k))
k = self.index(matches[0]) # Only return first index
return super(PipelineSegment, self).__getitem__(k)
def append(self, x):
self.insert(len(self), x)
return self
def prepend(self, x):
self.insert(0, x)
return self
def insert(self, i, x):
import inspect
assert not isinstance(x, (list, tuple))
if inspect.isclass(x):
x = x()
if isinstance(x, Pipe):
x.segment = self
x.pipeline = self.pipeline
assert not inspect.isclass(x)
super(PipelineSegment, self).insert(i, x)
@property
def source(self):
return self[0].source
from collections import OrderedDict
class Pipeline(OrderedDict):
"""Hold a defined collection of PipelineGroups, and when called, coalesce them into a single pipeline """
bundle = None
name = None
phase = None
dest_table = None
source_table = None
source_name = None
final = None
sink = None
_group_names = ['source', 'source_map', 'first', 'map', 'cast', 'body',
'last', 'select_partition', 'write', 'final']
def __init__(self, bundle=None, *args, **kwargs):
super(Pipeline, self).__init__()
super(Pipeline, self).__setattr__('bundle', bundle)
super(Pipeline, self).__setattr__('name', None)
super(Pipeline, self).__setattr__('phase', None)
super(Pipeline, self).__setattr__('source_table', None)
super(Pipeline, self).__setattr__('dest_table', None)
super(Pipeline, self).__setattr__('source_name', None)
super(Pipeline, self).__setattr__('final', [])
super(Pipeline, self).__setattr__('stopped', False)
super(Pipeline, self).__setattr__('sink', None)
for k, v in kwargs.items():
if k not in self._group_names:
raise IndexError('{} is not a valid pipeline section name'.format(k))
for group_name in self._group_names:
gs = kwargs.get(group_name, [])
if not isinstance(gs, (list, tuple)):
gs = [gs]
self.__setitem__(group_name, PipelineSegment(self, group_name, *gs))
if args:
self.__setitem__('body', PipelineSegment(self, 'body', *args))
def _subset(self, subset):
"""Return a new pipeline with a subset of the sections"""
pl = Pipeline(bundle=self.bundle)
for group_name, pl_segment in self.items():
if group_name not in subset:
continue
pl[group_name] = pl_segment
return pl
def configure(self, pipe_config):
"""Configure from a dict"""
# Create a context for evaluating the code for each pipeline. This removes the need
# to qualify the class names with the module
# ambry.build comes from ambry.bundle.files.PythonSourceFile#import_bundle
eval_locals = dict()
replacements = {}
def eval_pipe(pipe):
if isinstance(pipe, str):
try:
return eval(pipe, {}, eval_locals)
except SyntaxError as e:
raise SyntaxError("SyntaxError while parsing pipe '{}' from metadata: {}"
.format(pipe, e))
else:
return pipe
def pipe_location(pipe):
"""Return a location prefix from a pipe, or None if there isn't one """
if not isinstance(pipe, str):
return None
elif pipe[0] in '+-$!':
return pipe[0]
else:
return None
for segment_name, pipes in list(pipe_config.items()):
if segment_name == 'final':
# The 'final' segment is actually a list of names of Bundle methods to call afer the pipeline
# completes
super(Pipeline, self).__setattr__('final', pipes)
elif segment_name == 'replace':
for frm, to in pipes.items():
self.replace(eval_pipe(frm), eval_pipe(to))
else:
# Check if any of the pipes have a location command. If not, the pipe
# is cleared and the set of pipes replaces the ones that are there.
if not any(bool(pipe_location(pipe)) for pipe in pipes):
# Nope, they are all clean
self[segment_name] = [eval_pipe(pipe) for pipe in pipes]
else:
for i, pipe in enumerate(pipes):
if pipe_location(pipe): # The pipe is prefixed with a location command
location = pipe_location(pipe)
pipe = pipe[1:]
else:
raise PipelineError(
'If any pipes in a section have a location command, they all must'
' Segment: {} pipes: {}'.format(segment_name, pipes))
ep = eval_pipe(pipe)
if location == '+': # append to the segment
self[segment_name].append(ep)
elif location == '-': # Prepend to the segment
self[segment_name].prepend(ep)
elif location == '!': # Replace a pipe of the same class
if isinstance(ep, type):
repl_class = ep
else:
repl_class = ep.__class__
self.replace(repl_class, ep, segment_name)
def replace(self, repl_class, replacement, target_segment_name=None):
"""Replace a pipe segment, specified by its class, with another segment"""
for segment_name, pipes in self.items():
if target_segment_name and segment_name != target_segment_name:
raise Exception()
repl_pipes = []
found = False
for pipe in pipes:
if isinstance(pipe, repl_class):
pipe = replacement
found = True
repl_pipes.append(pipe)
if found:
found = False
self[segment_name] = repl_pipes
@property
def file_name(self):
try:
return self.source[0].file_name
except Exception:
raise
def __setitem__(self, k, v):
# If the caller tries to set a pipeline segment with a pipe, translte
# the call to an append on the segment.
if isinstance(v, (list, tuple)):
v = list(filter(bool, v))
empty_ps = PipelineSegment(self, k)
if isinstance(v, Pipe) or (isinstance(v, type) and issubclass(v, Pipe)):
# Assignment from a pipe is appending
self[k].append(v)
elif v is None:
# Assignment from None
super(Pipeline, self).__setitem__(k, empty_ps)
elif isinstance(v, (list, tuple)) and not v:
# Assignment from empty list
super(Pipeline, self).__setitem__(k, empty_ps)
elif isinstance(v, PipelineSegment):
super(Pipeline, self).__setitem__(k, v)
elif isinstance(v, (list, tuple)):
# Assignment from a list
super(Pipeline, self).__setitem__(k, PipelineSegment(self, k, *v))
else:
# This maybe should be an error?
super(Pipeline, self).__setitem__(k, v)
assert isinstance(self[k], PipelineSegment), 'Unexpected type: {} for {}'.format(type(self[k]), k)
def __getitem__(self, k):
# Index by class. Looks through all of the segments for the first pipe with the given class
import inspect
if inspect.isclass(k):
chain, last = self._collect()
matches = [e for e in chain if isinstance(e, k)]
if not matches:
raise IndexError("No entry for class: {} in {}".format(k, chain))
return matches[0]
else:
return super(Pipeline, self).__getitem__(k)
def __getattr__(self, k):
if not (k.startswith('__') or k.startswith('_OrderedDict__')):
return self[k]
else:
return super(Pipeline, self).__getattr__(k)
def __setattr__(self, k, v):
if k.startswith('_OrderedDict__') or k in (
'name', 'phase', 'sink', 'dest_table', 'source_name', 'source_table', 'final'):
return super(Pipeline, self).__setattr__(k, v)
self.__setitem__(k, v)
def _collect(self):
import inspect
chain = []
# This is supposed to be an OrderedDict, but it doesn't seem to want to
# retain the ordering, so we force it on output.
for group_name in self._group_names:
assert isinstance(self[group_name], PipelineSegment)
for p in self[group_name]:
chain.append(p)
if len(chain):
last = chain[0]
for p in chain[1:]:
assert not inspect.isclass(p)
try:
p.set_source_pipe(last)
last = p
except:
print(p)
raise
else:
last = None
for p in chain:
p.bundle = self.bundle
return chain, last
def run(self, count=None, source_pipes=None, callback=None, limit = None):
try:
self.sink = Sink(count=count, callback=callback)
if source_pipes:
for source_pipe in source_pipes:
if self.bundle:
self.bundle.logger.info(
'Running source {} in a multi-source run'.format(source_pipe.source.name))
self['source'] = [source_pipe] # Setting as a scalar appends, as a list will replace.
chain, last = self._collect()
self.sink.set_source_pipe(last)
self.sink.run(limit=limit)
else:
chain, last = self._collect()
self.sink.set_source_pipe(last)
self.sink.run(limit=limit)
except StopPipe:
super(Pipeline, self).__setattr__('stopped', True)
return self
def iter(self):
chain, last = self._collect()
# Iterate over the last pipe, which will pull from all those before it.
for row in last:
yield row
def __str__(self):
out = []
chain, last = self._collect()
for pipe in chain:
segment_name = pipe.segment.name if hasattr(pipe, 'segment') else '?'
out.append('{}: {}'.format(segment_name, pipe))
out.append('final: ' + str(self.final))
return 'Pipeline {}\n'.format(self.name if self.name else '') + '\n'.join(out)
def headers_report(self):
from tabulate import tabulate
from rowgenerators.util import qualified_class_name
out = []
chain, last = self._collect()
for pipe in chain:
seg_name = pipe.segment.name if hasattr(pipe, 'segment') else '?'
if not hasattr(pipe, 'headers') or not pipe.headers:
out.append([seg_name, qualified_class_name(pipe)])
else:
try:
v = [seg_name, qualified_class_name(pipe),
len(pipe.headers)] + [str(e)[:10] for e in pipe.headers if e]
out.append(v)
except AttributeError:
pass
if not out:
return None
# Make all lines the same length
ll = max(len(e) for e in out)
for i in range(len(out)):
if len(out[i]) < ll:
out[i] += [''] * (ll - len(out[i]))
return tabulate(out)
def augment_pipeline(pl, head_pipe=None, tail_pipe=None):
"""
Augment the pipeline by adding a new pipe section to each stage that has one or more pipes. Can be used for debugging
:param pl:
:param DebugPipe:
:return:
"""
for k, v in pl.items():
if v and len(v) > 0:
if head_pipe and k != 'source': # Can't put anything before the source.
v.insert(0, head_pipe)
if tail_pipe:
v.append(tail_pipe)
def _to_ascii(s):
""" Converts given string to ascii ignoring non ascii.
Args:
s (text or binary):
Returns:
str:
"""
# TODO: Always use unicode within ambry.
if isinstance(s, str):
ascii_ = s.encode('ascii', 'ignore')
elif isinstance(s, bytes):
ascii_ = s.decode('utf-8').encode('ascii', 'ignore')
else:
raise Exception('Unknown text type - {}'.format(type(s)))
return ascii_
| {
"content_hash": "e9451be23c1a9d689d059110d4fefa9d",
"timestamp": "",
"source": "github",
"line_count": 2116,
"max_line_length": 123,
"avg_line_length": 28.95982986767486,
"alnum_prop": 0.5559163824474942,
"repo_name": "CivicKnowledge/rowgenerators",
"id": "5e39250bc022081442c723ac5d0264ece93f129f",
"size": "61423",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "rowgenerators/rowpipe/pipeline.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "823"
},
{
"name": "Python",
"bytes": "109674"
}
],
"symlink_target": ""
} |
from django import forms
class GenericRepeaterForm(forms.Form):
url = forms.URLField(
required=True,
label='URL to forward to',
help_text='Please enter the full url, like http://www.example.com/forwarding/',
widget=forms.TextInput(attrs={"class": "url"})
)
class FormRepeaterForm(GenericRepeaterForm):
exclude_device_reports = forms.BooleanField(
required=False,
label='Exclude device reports',
initial=True
) | {
"content_hash": "fe92cf7c5ab0099ae657ee3a0ef42fd7",
"timestamp": "",
"source": "github",
"line_count": 18,
"max_line_length": 87,
"avg_line_length": 26.944444444444443,
"alnum_prop": 0.6597938144329897,
"repo_name": "SEL-Columbia/commcare-hq",
"id": "4fb09b8b442bdae96112617cefdb36d5a246ca88",
"size": "485",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "corehq/apps/receiverwrapper/forms.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "ActionScript",
"bytes": "15950"
},
{
"name": "CSS",
"bytes": "768322"
},
{
"name": "JavaScript",
"bytes": "2647080"
},
{
"name": "Python",
"bytes": "7806659"
},
{
"name": "Shell",
"bytes": "28569"
}
],
"symlink_target": ""
} |
import os
from flask import (Flask, jsonify, render_template, request, make_response,
send_from_directory)
from flask_cors import CORS
from db import get_full_name
app = Flask(__name__)
cors = CORS(app)
@app.route("/")
def index():
content_type = request.headers.get('Content-Type', '')
browser = request.headers.get('User-Agent', '').lower()
if request_wants_json():
return jsonify(name=get_full_name())
if browser[:4] in ('curl', 'wget') and content_type in ('text/plain', ''):
return make_response((u"{0}\n".format(get_full_name()), 200,
{'Content-Type': 'text/plain'}))
else:
return render_template('index.html', name=get_full_name())
@app.route('/humans.txt')
def static_from_root():
return send_from_directory(app.static_folder, request.path[1:])
def request_wants_json():
accepted = request.accept_mimetypes
best = accepted.best_match(['application/json', 'text/html'])
return (best == 'application/json' and
accepted[best] > accepted['text/html'])
if __name__ == "__main__":
port = int(os.environ.get('PORT', 5000))
app.run(host='0.0.0.0', port=port)
| {
"content_hash": "cde8b97bd0e0dd3907758040c1d07b04",
"timestamp": "",
"source": "github",
"line_count": 39,
"max_line_length": 78,
"avg_line_length": 30.71794871794872,
"alnum_prop": 0.6168614357262103,
"repo_name": "treyhunner/pseudorandom.name",
"id": "928ab78ba6fc7ec84bd5a03249fae803dea8c01b",
"size": "1220",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pseudorandom.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "1508"
},
{
"name": "Python",
"bytes": "3389"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import
from .zmqredis import ZMQRedis
def factory(commclass, config):
if commclass == 'ZMQRedis':
return ZMQRedis(config)
return ZMQRedis(config)
def cleanup(commclass, config):
return ZMQRedis.cleanup(config)
| {
"content_hash": "d220365fe0674c717316bebd2a0eaab9",
"timestamp": "",
"source": "github",
"line_count": 14,
"max_line_length": 38,
"avg_line_length": 19.142857142857142,
"alnum_prop": 0.7164179104477612,
"repo_name": "PaloAltoNetworks/minemeld-core",
"id": "f9c6f9a039a6f49d1e545fc2974f0e8d35984b83",
"size": "268",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "minemeld/comm/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "ANTLR",
"bytes": "764"
},
{
"name": "C",
"bytes": "3998"
},
{
"name": "Python",
"bytes": "1312719"
},
{
"name": "Shell",
"bytes": "1827"
}
],
"symlink_target": ""
} |
"""Utility functions for metric evaluation in Pax."""
import numbers
import typing
from typing import Any, Dict, Mapping, Optional, Sequence, Union
from absl import logging
import clu.values as clu_values
import jax
from jax import numpy as jnp
from jax.experimental import global_device_array
import numpy as np
from paxml import summary_utils
from praxis import py_utils
from praxis import pytypes
import seqio
from tensorflow.compat.v2 import summary as tf_summary
# internal runtime import
Metrics = pytypes.Metrics
WeightedScalar = pytypes.WeightedScalar
WeightedScalars = pytypes.WeightedScalars
WeightedScalarsList = pytypes.WeightedScalarsList
NestedMap = py_utils.NestedMap
SummaryValueTypes = Union[clu_values.Scalar, clu_values.Image, clu_values.Text]
_VALUES_TO_SUMMARY_TYPE = {
clu_values.Scalar: summary_utils.SummaryType.SCALAR,
clu_values.Text: summary_utils.SummaryType.TEXT,
clu_values.Image: summary_utils.SummaryType.IMAGE,
}
def _get_summary_type(
metric_value: SummaryValueTypes) -> summary_utils.SummaryType:
"""Infers metric summary type from the metric value type."""
if type(metric_value) not in _VALUES_TO_SUMMARY_TYPE:
raise ValueError(f'Unknown metric value type: {type(metric_value)}.')
return _VALUES_TO_SUMMARY_TYPE[type(metric_value)]
def compute_metric_values(metrics: Metrics) -> Dict[str, SummaryValueTypes]:
"""Given a dict of clu_metrics.Metric objects, returns their values.
Args:
metrics: A Dict[str, clu_metrics.Metric] objects with a compute_value()
function implemented that returns either a clu_values.Value object,
a Dict[str, clu_values.Value] objects, a Dict[str, List[clu_values.Value]]
objects, or a List[clu_values.Value].
Returns:
metric_values: A flattened Dict[str, clu_values.Value] objects.
"""
logging.info('Computing metric values.')
metric_values = {}
for metric_name, metric in metrics.items():
logging.info('Computing metric %s', metric_name)
metric_value = metric.compute_value()
# compute_value can return either a scalar clu_values.Value object,
# a Dict[str, clu_values.Value] objects, a Dict[str, List[clu_values.Value]]
# objects, or a List[clu_values.Value] objects.
if isinstance(metric_value, dict):
for key, value in metric_value.items():
summary_key = f'{metric_name}/{key}'
if isinstance(value, (list, tuple)):
for i, subval in enumerate(value):
summary_key_i = f'{summary_key}_{i}'
metric_values[summary_key_i] = subval
else:
metric_values[summary_key] = value
elif isinstance(metric_value, (list, tuple)):
for i, value in enumerate(metric_value):
metric_values[f'{metric_name}/{metric_name}_{i}'] = value
elif isinstance(
metric_value, (clu_values.Scalar, clu_values.Image, clu_values.Text)):
metric_values[f'{metric_name}'] = metric_value
else:
raise ValueError(
'Unrecognized compute_value() output format for metric '
f'{metric_name}: {type(metric_value)}.')
return metric_values
def write_clu_metric_summaries(
metric_values: Dict[str, SummaryValueTypes],
step_i: int) -> None:
"""Given a dict of metric values, writes them out as tensorboard summaries.
This is expected to be called under a summary context.
Args:
metric_values: A Dict[str, Any] objects with metric values. These values
are one of the various clu_values.Value subtypes.
step_i: An int representing the current step of decoding.
"""
if not metric_values:
return
logging.info('Summarizing metrics.')
for metric_name, metric_value in metric_values.items():
logging.info('Summarizing metric %s', metric_name)
summary_type = _get_summary_type(metric_value)
summary_utils.write_summary_tensor(
step_i, metric_name, metric_value.value, summary_type)
def write_seqio_metric_summaries(seqio_metrics: Sequence[Mapping[str, Union[
seqio.metrics.MetricValue, float]]], metric_name_prefix: str,
step: int) -> None:
"""Write seqio metric as tensorboard summaries.
Args:
seqio_metrics: A sequence of Dict of str to seqio metric value or float.
metric_name_prefix: A prefix added to metric name.
step: An int. representing the current step.
"""
for m_dict in seqio_metrics:
for k, v in m_dict.items():
metric_name = f'{metric_name_prefix}/{k}'
if isinstance(v, seqio.metrics.Text):
metric_str = (
v.textdata.decode()
if isinstance(v.textdata, bytes) else v.textdata)
logging.info('Writing summary of %s with string value %s.', metric_name,
metric_str)
tf_summary.text(metric_name, metric_str, step=step)
continue
if isinstance(v, seqio.metrics.Audio):
logging.info('Writing summary of %s with audio.', metric_name)
tf_summary.audio(
metric_name,
v.audiodata,
v.sample_rate,
step=step,
max_outputs=v.max_outputs)
continue
if isinstance(v, seqio.metrics.Generic):
tf_summary.write(metric_name, v.tensor, metadata=v.metadata, step=step)
continue
if isinstance(v, seqio.metrics.Scalar):
v = float(v.value)
else:
v = float(v)
logging.info('Writing summary of %s with value %.4f.', metric_name, v)
summary_utils.write_summary_tensor(step, metric_name, v,
summary_utils.SummaryType.SCALAR)
def is_scalar(v: Any) -> bool:
"""Returns True if input is a scalar."""
return isinstance(v, (numbers.Number, np.ndarray, jnp.ndarray, global_device_array.GlobalDeviceArray, jax.Array))
def is_weighted_scalar(v: Any) -> bool:
"""Returns True if input is a weighted scalar."""
return (isinstance(v, tuple) and len(v) == 2 and is_scalar(v[0]) and
is_scalar(v[1]))
def is_float_convertible(metric_value: Union[numbers.Number, clu_values.Value,
seqio.metrics.MetricValue]):
"""Returns True if a metricv value is float convertible."""
return (isinstance(metric_value, numbers.Number) or
isinstance(metric_value, clu_values.Scalar) or
isinstance(metric_value, seqio.metrics.Scalar) or
is_weighted_scalar(metric_value) or
(isinstance(metric_value, list) and
all(is_weighted_scalar(v) for v in metric_value)))
def as_float(
metric_value: Union[numbers.Number, clu_values.Scalar, seqio.metrics.Scalar,
WeightedScalar, Sequence[WeightedScalar]]
) -> float:
"""Returns the aggregated float value from heterogeneous metric value."""
if is_weighted_scalar(metric_value):
metric_value = [metric_value]
if isinstance(metric_value, list):
assert all(is_weighted_scalar(v) for v in metric_value), metric_value
values = np.stack([x[0] for x in metric_value])
weights = np.stack([x[1] for x in metric_value])
return np.sum(values * weights) / np.sum(weights)
if isinstance(metric_value, (clu_values.Scalar, seqio.metrics.Scalar)):
return metric_value.value
assert isinstance(metric_value, numbers.Number), metric_value
return float(typing.cast(Any, metric_value))
def as_float_dict(
metric_output: Union[
Dict[str, Union[SummaryValueTypes]],
WeightedScalars,
WeightedScalarsList,
Mapping[str, Union[seqio.metrics.MetricValue, float]]],
raise_on_non_float_convertible: bool = False) -> Dict[str, float]:
"""Returns a float dict from heterogeneous metric output."""
results = {}
for k, v in metric_output.items():
if not is_float_convertible(v):
if raise_on_non_float_convertible:
raise ValueError(f'Summary value cannot be converted to float: {v}.')
continue
results[k] = as_float(v)
return results
def update_float_dict(target: Dict[str, float],
source: Dict[str, float],
prefix: Optional[str] = None) -> Dict[str, float]:
"""Inserts items from source dict to target dict with an optional prefix."""
if prefix is None:
target.update(source)
else:
for k, v in source.items():
target[f'{prefix}/{k}'] = v
return target
| {
"content_hash": "d6a4cf3c23ea7a7cee286f6953d0037a",
"timestamp": "",
"source": "github",
"line_count": 223,
"max_line_length": 115,
"avg_line_length": 37.25112107623318,
"alnum_prop": 0.670278078728783,
"repo_name": "google/paxml",
"id": "f8851071f67f885bbb559868952102b7363bfe33",
"size": "8898",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "paxml/metric_utils.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "7552"
},
{
"name": "Python",
"bytes": "916641"
},
{
"name": "Shell",
"bytes": "11373"
},
{
"name": "Starlark",
"bytes": "42602"
}
],
"symlink_target": ""
} |
from swgpy.object import *
def create(kernel):
result = Creature()
result.template = "object/mobile/shared_dressed_stormtrooper_bombardier_m.iff"
result.attribute_template_id = 9
result.stfName("npc_name","")
#### BEGIN MODIFICATIONS ####
#### END MODIFICATIONS ####
return result | {
"content_hash": "03c07239e8e251f8da721468af02bc18",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 79,
"avg_line_length": 22.923076923076923,
"alnum_prop": 0.6946308724832215,
"repo_name": "anhstudios/swganh",
"id": "b3ec6038e6f3d93fd4b54f99f2dc1a4bdca750eb",
"size": "443",
"binary": false,
"copies": "2",
"ref": "refs/heads/develop",
"path": "data/scripts/templates/object/mobile/shared_dressed_stormtrooper_bombardier_m.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "11887"
},
{
"name": "C",
"bytes": "7699"
},
{
"name": "C++",
"bytes": "2357839"
},
{
"name": "CMake",
"bytes": "41264"
},
{
"name": "PLSQL",
"bytes": "42065"
},
{
"name": "Python",
"bytes": "7503510"
},
{
"name": "SQLPL",
"bytes": "42770"
}
],
"symlink_target": ""
} |
from actstream.models import Action
from django.test import TestCase
from cyidentity.cyfullcontact.tests.util import create_sample_contact_info
class FullContactActivityStreamTestCase(TestCase):
def test_contact_create(self):
contact_info = create_sample_contact_info()
action = Action.objects.actor(contact_info).latest('timestamp')
self.assertEqual(action.verb, 'FullContact information was created')
| {
"content_hash": "f5c912c1a9edeb4a6bb89547390e098b",
"timestamp": "",
"source": "github",
"line_count": 10,
"max_line_length": 76,
"avg_line_length": 43.3,
"alnum_prop": 0.7736720554272517,
"repo_name": "shawnhermans/cyborgcrm",
"id": "286cc0c6dd5ebf47c641901da335c874abae6f46",
"size": "433",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "cyidentity/cyfullcontact/tests/test_activity_stream.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "CSS",
"bytes": "26682"
},
{
"name": "DIGITAL Command Language",
"bytes": "667"
},
{
"name": "HTML",
"bytes": "405415"
},
{
"name": "JavaScript",
"bytes": "735"
},
{
"name": "Python",
"bytes": "100893"
},
{
"name": "Shell",
"bytes": "725"
}
],
"symlink_target": ""
} |
import _thread as thread
import atexit
from collections.abc import Mapping
from datetime import timedelta
from enum import IntEnum
import functools
import glob
import json
import logging
import numbers
import os
import re
import sys
import threading
import time
import traceback
from types import TracebackType
from typing import (
Any,
Callable,
Dict,
List,
NamedTuple,
Optional,
Sequence,
TextIO,
Tuple,
Type,
Union,
)
from typing import TYPE_CHECKING
import requests
import wandb
from wandb import errors
from wandb import trigger
from wandb._globals import _datatypes_set_callback
from wandb.apis import internal, public
from wandb.apis.internal import Api
from wandb.apis.public import Api as PublicApi
from wandb.proto.wandb_internal_pb2 import (
MetricRecord,
PollExitResponse,
RunRecord,
)
from wandb.util import (
_is_artifact_object,
_is_artifact_string,
_is_artifact_version_weave_dict,
_is_py_path,
add_import_hook,
artifact_to_json,
parse_artifact_string,
sentry_set_scope,
to_forward_slash_path,
)
from wandb.viz import (
custom_chart,
CustomChart,
Visualize,
)
from . import wandb_artifacts
from . import wandb_config
from . import wandb_metric
from . import wandb_summary
from .data_types._dtypes import TypeRegistry
from .interface.artifacts import Artifact as ArtifactInterface
from .interface.interface import GlobStr, InterfaceBase
from .interface.summary_record import SummaryRecord
from .lib import (
config_util,
deprecate,
filenames,
filesystem,
ipython,
module,
proto_util,
redirect,
telemetry,
)
from .lib.exit_hooks import ExitHooks
from .lib.filenames import DIFF_FNAME
from .lib.git import GitRepo
from .lib.printer import get_printer
from .lib.reporting import Reporter
from .lib.wburls import wburls
from .wandb_artifacts import Artifact
from .wandb_settings import Settings, SettingsConsole
from .wandb_setup import _WandbSetup
if TYPE_CHECKING:
if sys.version_info >= (3, 8):
from typing import TypedDict
else:
from typing_extensions import TypedDict
from .data_types.base_types.wb_value import WBValue
from .wandb_alerts import AlertLevel
from .interface.artifacts import (
ArtifactEntry,
ArtifactManifest,
)
from .interface.interface import FilesDict, PolicyName
from .lib.printer import PrinterTerm, PrinterJupyter
from wandb.proto.wandb_internal_pb2 import (
CheckVersionResponse,
GetSummaryResponse,
SampledHistoryResponse,
)
class GitSourceDict(TypedDict):
remote: str
commit: str
entrypoint: List[str]
class ArtifactSourceDict(TypedDict):
artifact: str
entrypoint: List[str]
class ImageSourceDict(TypedDict):
image: str
class JobSourceDict(TypedDict, total=False):
_version: str
source_type: str
source: Union[GitSourceDict, ArtifactSourceDict, ImageSourceDict]
input_types: Dict[str, Any]
output_types: Dict[str, Any]
runtime: Optional[str]
logger = logging.getLogger("wandb")
EXIT_TIMEOUT = 60
RE_LABEL = re.compile(r"[a-zA-Z0-9_-]+$")
class TeardownStage(IntEnum):
EARLY = 1
LATE = 2
class TeardownHook(NamedTuple):
call: Callable[[], None]
stage: TeardownStage
class RunStatusChecker:
"""Periodically polls the background process for relevant updates.
For now, we just use this to figure out if the user has requested a stop.
"""
def __init__(
self,
interface: InterfaceBase,
stop_polling_interval: int = 15,
retry_polling_interval: int = 5,
) -> None:
self._interface = interface
self._stop_polling_interval = stop_polling_interval
self._retry_polling_interval = retry_polling_interval
self._join_event = threading.Event()
self._stop_thread = threading.Thread(target=self.check_status)
self._stop_thread.name = "ChkStopThr"
self._stop_thread.daemon = True
self._stop_thread.start()
self._retry_thread = threading.Thread(target=self.check_network_status)
self._retry_thread.name = "NetStatThr"
self._retry_thread.daemon = True
self._retry_thread.start()
def check_network_status(self) -> None:
join_requested = False
while not join_requested:
status_response = self._interface.communicate_network_status()
if status_response and status_response.network_responses:
for hr in status_response.network_responses:
if (
hr.http_status_code == 200 or hr.http_status_code == 0
): # we use 0 for non-http errors (eg wandb errors)
wandb.termlog(f"{hr.http_response_text}")
else:
wandb.termlog(
"{} encountered ({}), retrying request".format(
hr.http_status_code, hr.http_response_text.rstrip()
)
)
join_requested = self._join_event.wait(self._retry_polling_interval)
def check_status(self) -> None:
join_requested = False
while not join_requested:
status_response = self._interface.communicate_stop_status()
if status_response and status_response.run_should_stop:
# TODO(frz): This check is required
# until WB-3606 is resolved on server side.
if not wandb.agents.pyagent.is_running():
thread.interrupt_main()
return
join_requested = self._join_event.wait(self._stop_polling_interval)
def stop(self) -> None:
self._join_event.set()
def join(self) -> None:
self.stop()
self._stop_thread.join()
self._retry_thread.join()
class _run_decorator: # noqa: N801
_is_attaching: str = ""
class Dummy:
...
@classmethod
def _attach(cls, func: Callable) -> Callable:
@functools.wraps(func)
def wrapper(self: Type["Run"], *args: Any, **kwargs: Any) -> Any:
# * `_attach_id` is only assigned in service hence for all non-service cases
# it will be a passthrough.
# * `_attach_pid` is only assigned in _init (using _attach_pid guarantees single attach):
# - for non-fork case the object is shared through pickling so will be None.
# - for fork case the new process share mem space hence the value would be of parent process.
if (
getattr(self, "_attach_id", None)
and getattr(self, "_attach_pid", None) != os.getpid()
):
if cls._is_attaching:
message = f"Trying to attach `{func.__name__}` while in the middle of attaching `{cls._is_attaching}`"
raise RuntimeError(message)
cls._is_attaching = func.__name__
try:
wandb._attach(run=self)
except Exception as e:
# In case the attach fails we will raise the exception that caused the issue.
# This exception should be caught and fail the execution of the program.
cls._is_attaching = ""
raise e
cls._is_attaching = ""
return func(self, *args, **kwargs)
return wrapper
@classmethod
def _noop(cls, func: Callable) -> Callable:
@functools.wraps(func)
def wrapper(self: Type["Run"], *args: Any, **kwargs: Any) -> Any:
# `_attach_id` is only assigned in service hence for all service cases
# it will be a passthrough. We don't pickle non-service so again a way to see that we are in non-service case
if getattr(self, "_attach_id", None) is None:
# `_init_pid` is only assigned in __init__ (this will be constant check for mp):
# - for non-fork case the object is shared through pickling and we don't pickle non-service so will be None
# - for fork case the new process share mem space hence the value would be of parent process.
_init_pid = getattr(self, "_init_pid", None)
if _init_pid != os.getpid():
message = "`{}` ignored (called from pid={}, `init` called from pid={}). See: {}".format(
func.__name__,
os.getpid(),
_init_pid,
wburls.get("multiprocess"),
)
# - if this process was pickled in non-service case, we ignore the attributes (since pickle is not supported)
# - for fork case will use the settings of the parent process
# - only point of inconsistent behavior from forked and non-forked cases
settings = getattr(self, "_settings", None)
if settings and settings["strict"]:
wandb.termerror(message, repeat=False)
raise errors.MultiprocessError(
f"`{func.__name__}` does not support multiprocessing"
)
wandb.termwarn(message, repeat=False)
return cls.Dummy()
return func(self, *args, **kwargs)
return wrapper
class Run:
"""A unit of computation logged by wandb. Typically, this is an ML experiment.
Create a run with `wandb.init()`:
<!--yeadoc-test:run-object-basic-->
```python
import wandb
run = wandb.init()
```
There is only ever at most one active `wandb.Run` in any process,
and it is accessible as `wandb.run`:
<!--yeadoc-test:global-run-object-->
```python
import wandb
assert wandb.run is None
wandb.init()
assert wandb.run is not None
```
anything you log with `wandb.log` will be sent to that run.
If you want to start more runs in the same script or notebook, you'll need to
finish the run that is in-flight. Runs can be finished with `wandb.finish` or
by using them in a `with` block:
<!--yeadoc-test:run-context-manager-->
```python
import wandb
wandb.init()
wandb.finish()
assert wandb.run is None
with wandb.init() as run:
pass # log data here
assert wandb.run is None
```
See the documentation for `wandb.init` for more on creating runs, or check out
[our guide to `wandb.init`](https://docs.wandb.ai/guides/track/launch).
In distributed training, you can either create a single run in the rank 0 process
and then log information only from that process, or you can create a run in each process,
logging from each separately, and group the results together with the `group` argument
to `wandb.init`. For more details on distributed training with W&B, check out
[our guide](https://docs.wandb.ai/guides/track/advanced/distributed-training).
Currently, there is a parallel `Run` object in the `wandb.Api`. Eventually these
two objects will be merged.
Attributes:
summary: (Summary) Single values set for each `wandb.log()` key. By
default, summary is set to the last value logged. You can manually
set summary to the best value, like max accuracy, instead of the
final value.
"""
_telemetry_obj: telemetry.TelemetryRecord
_telemetry_obj_active: bool
_telemetry_obj_dirty: bool
_telemetry_obj_flushed: bytes
_teardown_hooks: List[TeardownHook]
_tags: Optional[Tuple[Any, ...]]
_entity: Optional[str]
_project: Optional[str]
_group: Optional[str]
_job_type: Optional[str]
_name: Optional[str]
_notes: Optional[str]
_run_obj: Optional[RunRecord]
_run_obj_offline: Optional[RunRecord]
# Use string literal annotation because of type reference loop
_backend: Optional["wandb.sdk.backend.backend.Backend"]
_internal_run_interface: Optional[
Union[
"wandb.sdk.interface.interface_queue.InterfaceQueue",
"wandb.sdk.interface.interface_grpc.InterfaceGrpc",
]
]
_wl: Optional[_WandbSetup]
_out_redir: Optional[redirect.RedirectBase]
_err_redir: Optional[redirect.RedirectBase]
_redirect_cb: Optional[Callable[[str, str], None]]
_redirect_raw_cb: Optional[Callable[[str, str], None]]
_output_writer: Optional["filesystem.CRDedupedFile"]
_quiet: Optional[bool]
_atexit_cleanup_called: bool
_hooks: Optional[ExitHooks]
_exit_code: Optional[int]
_run_status_checker: Optional[RunStatusChecker]
_check_version: Optional["CheckVersionResponse"]
_sampled_history: Optional["SampledHistoryResponse"]
_final_summary: Optional["GetSummaryResponse"]
_poll_exit_response: Optional[PollExitResponse]
_stdout_slave_fd: Optional[int]
_stderr_slave_fd: Optional[int]
_artifact_slots: List[str]
_init_pid: int
_attach_pid: int
_iface_pid: Optional[int]
_iface_port: Optional[int]
_attach_id: Optional[str]
_is_attached: bool
_settings: Settings
_launch_artifacts: Optional[Dict[str, Any]]
def __init__(
self,
settings: Settings,
config: Optional[Dict[str, Any]] = None,
sweep_config: Optional[Dict[str, Any]] = None,
launch_config: Optional[Dict[str, Any]] = None,
) -> None:
# pid is set, so we know if this run object was initialized by this process
self._init_pid = os.getpid()
self._init(
settings=settings,
config=config,
sweep_config=sweep_config,
launch_config=launch_config,
)
def _init(
self,
settings: Settings,
config: Optional[Dict[str, Any]] = None,
sweep_config: Optional[Dict[str, Any]] = None,
launch_config: Optional[Dict[str, Any]] = None,
) -> None:
self._settings = settings
self._config = wandb_config.Config()
self._config._set_callback(self._config_callback)
self._config._set_artifact_callback(self._config_artifact_callback)
self._config._set_settings(self._settings)
self._backend = None
self._internal_run_interface = None
self.summary = wandb_summary.Summary(
self._summary_get_current_summary_callback,
)
self.summary._set_update_callback(self._summary_update_callback)
self._step = 0
self._torch_history: Optional["wandb.wandb_torch.TorchHistory"] = None
# todo: eventually would be nice to make this configurable using self._settings._start_time
# need to test (jhr): if you set start time to 2 days ago and run a test for 15 minutes,
# does the total time get calculated right (not as 2 days and 15 minutes)?
self._start_time = time.time()
_datatypes_set_callback(self._datatypes_callback)
self._printer = get_printer(self._settings._jupyter)
self._wl = None
self._reporter: Optional[Reporter] = None
self._entity = None
self._project = None
self._group = None
self._job_type = None
self._run_id = self._settings.run_id
self._starting_step = 0
self._name = None
self._notes = None
self._tags = None
self._remote_url = None
self._commit = None
self._hooks = None
self._teardown_hooks = []
self._out_redir = None
self._err_redir = None
self._stdout_slave_fd = None
self._stderr_slave_fd = None
self._exit_code = None
self._exit_result = None
self._quiet = self._settings.quiet
self._code_artifact_info: Optional[Dict[str, str]] = None
self._output_writer = None
self._used_artifact_slots: Dict[str, str] = {}
# Returned from backend request_run(), set from wandb_init?
self._run_obj = None
self._run_obj_offline = None
# Created when the run "starts".
self._run_status_checker = None
self._check_version = None
self._sampled_history = None
self._final_summary = None
self._poll_exit_response = None
# Initialize telemetry object
self._telemetry_obj = telemetry.TelemetryRecord()
self._telemetry_obj_active = False
self._telemetry_obj_flushed = b""
self._telemetry_obj_dirty = False
self._atexit_cleanup_called = False
# Pull info from settings
self._init_from_settings(self._settings)
# Initial scope setup for sentry. This might get changed when the
# actual run comes back.
sentry_set_scope(
settings_dict=self._settings,
process_context="user",
)
# Populate config
config = config or dict()
wandb_key = "_wandb"
config.setdefault(wandb_key, dict())
self._launch_artifact_mapping: Dict[str, Any] = {}
self._unique_launch_artifact_sequence_names: Dict[str, Any] = {}
if self._settings.save_code and self._settings.program_relpath:
config[wandb_key]["code_path"] = to_forward_slash_path(
os.path.join("code", self._settings.program_relpath)
)
if sweep_config:
self._config.update_locked(
sweep_config, user="sweep", _allow_val_change=True
)
if launch_config:
self._config.update_locked(
launch_config, user="launch", _allow_val_change=True
)
self._config._update(config, ignore_locked=True)
# interface pid and port configured when backend is configured (See _hack_set_run)
# TODO: using pid isnt the best for windows as pid reuse can happen more often than unix
self._iface_pid = None
self._iface_port = None
self._attach_id = None
self._is_attached = False
self._attach_pid = os.getpid()
# for now, use runid as attach id, this could/should be versioned in the future
if self._settings._require_service:
self._attach_id = self._settings.run_id
def _set_iface_pid(self, iface_pid: int) -> None:
self._iface_pid = iface_pid
def _set_iface_port(self, iface_port: int) -> None:
self._iface_port = iface_port
def _handle_launch_artifact_overrides(self) -> None:
if self._settings.launch and (os.environ.get("WANDB_ARTIFACTS") is not None):
try:
artifacts: Dict[str, Any] = json.loads(
os.environ.get("WANDB_ARTIFACTS", "{}")
)
except (ValueError, SyntaxError):
wandb.termwarn("Malformed WANDB_ARTIFACTS, using original artifacts")
else:
self._initialize_launch_artifact_maps(artifacts)
elif (
self._settings.launch
and self._settings.launch_config_path
and os.path.exists(self._settings.launch_config_path)
):
self._save(self._settings.launch_config_path)
with open(self._settings.launch_config_path) as fp:
launch_config = json.loads(fp.read())
if launch_config.get("overrides", {}).get("artifacts") is not None:
artifacts = launch_config.get("overrides").get("artifacts")
self._initialize_launch_artifact_maps(artifacts)
def _initialize_launch_artifact_maps(self, artifacts: Dict[str, Any]) -> None:
for key, item in artifacts.items():
self._launch_artifact_mapping[key] = item
artifact_sequence_tuple_or_slot = key.split(":")
if len(artifact_sequence_tuple_or_slot) == 2:
sequence_name = artifact_sequence_tuple_or_slot[0].split("/")[-1]
if self._unique_launch_artifact_sequence_names.get(sequence_name):
self._unique_launch_artifact_sequence_names.pop(sequence_name)
else:
self._unique_launch_artifact_sequence_names[sequence_name] = item
def _telemetry_callback(self, telem_obj: telemetry.TelemetryRecord) -> None:
self._telemetry_obj.MergeFrom(telem_obj)
self._telemetry_obj_dirty = True
self._telemetry_flush()
def _telemetry_flush(self) -> None:
if not self._telemetry_obj_active:
return
if not self._telemetry_obj_dirty:
return
if self._backend and self._backend.interface:
serialized = self._telemetry_obj.SerializeToString()
if serialized == self._telemetry_obj_flushed:
return
self._backend.interface._publish_telemetry(self._telemetry_obj)
self._telemetry_obj_flushed = serialized
self._telemetry_obj_dirty = False
def _freeze(self) -> None:
self._frozen = True
def __setattr__(self, attr: str, value: object) -> None:
if getattr(self, "_frozen", None) and not hasattr(self, attr):
raise Exception(f"Attribute {attr} is not supported on Run object.")
super().__setattr__(attr, value)
@staticmethod
def _telemetry_imports(imp: telemetry.TelemetryImports) -> None:
telem_map = dict(
pytorch_ignite="ignite",
transformers_huggingface="transformers",
)
# calculate mod_map, a mapping from module_name to telem_name
mod_map = dict()
for desc in imp.DESCRIPTOR.fields:
if desc.type != desc.TYPE_BOOL:
continue
telem_name = desc.name
mod_name = telem_map.get(telem_name, telem_name)
mod_map[mod_name] = telem_name
# set telemetry field for every module loaded that we track
mods_set = set(sys.modules)
for mod in mods_set.intersection(mod_map):
setattr(imp, mod_map[mod], True)
def _update_settings(self, settings: Settings) -> None:
self._settings = settings
self._init_from_settings(settings)
def _init_from_settings(self, settings: Settings) -> None:
if settings.entity is not None:
self._entity = settings.entity
if settings.project is not None:
self._project = settings.project
if settings.run_group is not None:
self._group = settings.run_group
if settings.run_job_type is not None:
self._job_type = settings.run_job_type
if settings.run_name is not None:
self._name = settings.run_name
if settings.run_notes is not None:
self._notes = settings.run_notes
if settings.run_tags is not None:
self._tags = settings.run_tags
def _make_proto_run(self, run: RunRecord) -> None:
"""Populate protocol buffer RunData for interface/interface."""
if self._entity is not None:
run.entity = self._entity
if self._project is not None:
run.project = self._project
if self._group is not None:
run.run_group = self._group
if self._job_type is not None:
run.job_type = self._job_type
if self._run_id is not None:
run.run_id = self._run_id
if self._name is not None:
run.display_name = self._name
if self._notes is not None:
run.notes = self._notes
if self._tags is not None:
for tag in self._tags:
run.tags.append(tag)
if self._start_time is not None:
run.start_time.FromMicroseconds(int(self._start_time * 1e6))
if self._remote_url is not None:
run.git.remote_url = self._remote_url
if self._commit is not None:
run.git.commit = self._commit
# Note: run.config is set in interface/interface:_make_run()
def _populate_git_info(self) -> None:
# Use user provided git info if available otherwise resolve it from the environment
try:
repo = GitRepo(
root=self._settings.git_root,
remote=self._settings.git_remote,
remote_url=self._settings.git_remote_url,
commit=self._settings.git_commit,
lazy=False,
)
self._remote_url, self._commit = repo.remote_url, repo.last_commit
except Exception:
wandb.termwarn("Cannot find valid git repo associated with this directory.")
def __getstate__(self) -> Any:
"""Custom pickler."""
# We only pickle in service mode
if not self._settings or not self._settings._require_service:
return
_attach_id = self._attach_id
if not _attach_id:
return
return dict(_attach_id=self._attach_id, _init_pid=self._init_pid)
def __setstate__(self, state: Any) -> None:
"""Custom unpickler."""
if not state:
return
_attach_id = state.get("_attach_id")
if not _attach_id:
return
if state["_init_pid"] == os.getpid():
raise RuntimeError("attach in the same process is not supported currently")
self.__dict__.update(state)
@property
def _torch(self) -> "wandb.wandb_torch.TorchHistory":
if self._torch_history is None:
self._torch_history = wandb.wandb_torch.TorchHistory()
return self._torch_history
@property # type: ignore
@_run_decorator._attach
def settings(self) -> Settings:
"""Returns a frozen copy of run's Settings object."""
cp = self._settings.copy()
cp.freeze()
return cp
@property # type: ignore
@_run_decorator._attach
def dir(self) -> str:
"""Returns the directory where files associated with the run are saved."""
return self._settings.files_dir
@property # type: ignore
@_run_decorator._attach
def config(self) -> wandb_config.Config:
"""Returns the config object associated with this run."""
return self._config
@property # type: ignore
@_run_decorator._attach
def config_static(self) -> wandb_config.ConfigStatic:
return wandb_config.ConfigStatic(self._config)
@property # type: ignore
@_run_decorator._attach
def name(self) -> Optional[str]:
"""Returns the display name of the run.
Display names are not guaranteed to be unique and may be descriptive.
By default, they are randomly generated.
"""
if self._name:
return self._name
if not self._run_obj:
return None
return self._run_obj.display_name
@name.setter
def name(self, name: str) -> None:
with telemetry.context(run=self) as tel:
tel.feature.set_run_name = True
self._name = name
if self._backend and self._backend.interface:
self._backend.interface.publish_run(self)
@property # type: ignore
@_run_decorator._attach
def notes(self) -> Optional[str]:
"""Returns the notes associated with the run, if there are any.
Notes can be a multiline string and can also use markdown and latex equations
inside `$$`, like `$x + 3$`.
"""
if self._notes:
return self._notes
if not self._run_obj:
return None
return self._run_obj.notes
@notes.setter
def notes(self, notes: str) -> None:
self._notes = notes
if self._backend and self._backend.interface:
self._backend.interface.publish_run(self)
@property # type: ignore
@_run_decorator._attach
def tags(self) -> Optional[Tuple]:
"""Returns the tags associated with the run, if there are any."""
if self._tags:
return self._tags
run_obj = self._run_obj or self._run_obj_offline
if run_obj:
return tuple(run_obj.tags)
return None
@tags.setter
def tags(self, tags: Sequence) -> None:
with telemetry.context(run=self) as tel:
tel.feature.set_run_tags = True
self._tags = tuple(tags)
if self._backend and self._backend.interface:
self._backend.interface.publish_run(self)
@property # type: ignore
@_run_decorator._attach
def id(self) -> str:
"""Returns the identifier for this run."""
if TYPE_CHECKING:
assert self._run_id is not None
return self._run_id
@property # type: ignore
@_run_decorator._attach
def sweep_id(self) -> Optional[str]:
"""Returns the ID of the sweep associated with the run, if there is one."""
if not self._run_obj:
return None
return self._run_obj.sweep_id or None
def _get_path(self) -> str:
parts = [
e for e in [self._entity, self._project, self._run_id] if e is not None
]
return "/".join(parts)
@property # type: ignore
@_run_decorator._attach
def path(self) -> str:
"""Returns the path to the run.
Run paths include entity, project, and run ID, in the format
`entity/project/run_id`.
"""
return self._get_path()
def _get_start_time(self) -> float:
return (
self._start_time
if not self._run_obj
else (self._run_obj.start_time.ToMicroseconds() / 1e6)
)
@property # type: ignore
@_run_decorator._attach
def start_time(self) -> float:
"""Returns the unix time stamp, in seconds, when the run started."""
return self._get_start_time()
def _get_starting_step(self) -> int:
return self._starting_step if not self._run_obj else self._run_obj.starting_step
@property # type: ignore
@_run_decorator._attach
def starting_step(self) -> int:
"""Returns the first step of the run."""
return self._get_starting_step()
@property # type: ignore
@_run_decorator._attach
def resumed(self) -> bool:
"""Returns True if the run was resumed, False otherwise."""
return self._run_obj.resumed if self._run_obj else False
@property # type: ignore
@_run_decorator._attach
def step(self) -> int:
"""Returns the current value of the step.
This counter is incremented by `wandb.log`.
"""
return self._step
def project_name(self) -> str:
run_obj = self._run_obj or self._run_obj_offline
return run_obj.project if run_obj else ""
@property # type: ignore
@_run_decorator._attach
def mode(self) -> str:
"""For compatibility with `0.9.x` and earlier, deprecate eventually."""
deprecate.deprecate(
field_name=deprecate.Deprecated.run__mode,
warning_message=(
"The mode property of wandb.run is deprecated "
"and will be removed in a future release."
),
)
return "dryrun" if self._settings._offline else "run"
@property # type: ignore
@_run_decorator._attach
def offline(self) -> bool:
return self._settings._offline
@property # type: ignore
@_run_decorator._attach
def disabled(self) -> bool:
return self._settings._noop
def _get_group(self) -> str:
run_obj = self._run_obj or self._run_obj_offline
return run_obj.run_group if run_obj else ""
@property # type: ignore
@_run_decorator._attach
def group(self) -> str:
"""Returns the name of the group associated with the run.
Setting a group helps the W&B UI organize runs in a sensible way.
If you are doing a distributed training you should give all of the
runs in the training the same group.
If you are doing crossvalidation you should give all the crossvalidation
folds the same group.
"""
return self._get_group()
@property # type: ignore
@_run_decorator._attach
def job_type(self) -> str:
run_obj = self._run_obj or self._run_obj_offline
return run_obj.job_type if run_obj else ""
@property # type: ignore
@_run_decorator._attach
def project(self) -> str:
"""Returns the name of the W&B project associated with the run."""
return self.project_name()
@_run_decorator._attach
def log_code(
self,
root: str = ".",
name: str = None,
include_fn: Callable[[str], bool] = _is_py_path,
exclude_fn: Callable[[str], bool] = filenames.exclude_wandb_fn,
) -> Optional[Artifact]:
"""Saves the current state of your code to a W&B Artifact.
By default, it walks the current directory and logs all files that end with `.py`.
Arguments:
root: The relative (to `os.getcwd()`) or absolute path to recursively find code from.
name: (str, optional) The name of our code artifact. By default, we'll name
the artifact `source-$PROJECT_ID-$ENTRYPOINT_RELPATH`. There may be scenarios where you want
many runs to share the same artifact. Specifying name allows you to achieve that.
include_fn: A callable that accepts a file path and
returns True when it should be included and False otherwise. This
defaults to: `lambda path: path.endswith(".py")`
exclude_fn: A callable that accepts a file path and returns `True` when it should be
excluded and `False` otherwise. This defaults to: `lambda path: False`
Examples:
Basic usage
```python
run.log_code()
```
Advanced usage
```python
run.log_code("../", include_fn=lambda path: path.endswith(".py") or path.endswith(".ipynb"))
```
Returns:
An `Artifact` object if code was logged
"""
if name is None:
name_string = wandb.util.make_artifact_name_safe(
f"{self._project}-{self._settings.program_relpath}"
)
name = f"source-{name_string}"
art = wandb.Artifact(name, "code")
files_added = False
if root is not None:
root = os.path.abspath(root)
for file_path in filenames.filtered_dir(root, include_fn, exclude_fn):
files_added = True
save_name = os.path.relpath(file_path, root)
art.add_file(file_path, name=save_name)
# Add any manually staged files such is ipynb notebooks
for dirpath, _, files in os.walk(self._settings._tmp_code_dir):
for fname in files:
file_path = os.path.join(dirpath, fname)
save_name = os.path.relpath(file_path, self._settings._tmp_code_dir)
files_added = True
art.add_file(file_path, name=save_name)
if not files_added:
return None
self._code_artifact_info = {"name": name, "client_id": art._client_id}
return self._log_artifact(art)
def get_url(self) -> Optional[str]:
"""Returns the url for the W&B run, if there is one.
Offline runs will not have a url.
"""
if self._settings._offline:
wandb.termwarn("URL not available in offline run")
return None
return self._settings.run_url
def get_project_url(self) -> Optional[str]:
"""Returns the url for the W&B project associated with the run, if there is one.
Offline runs will not have a project url.
"""
if self._settings._offline:
wandb.termwarn("URL not available in offline run")
return None
return self._settings.project_url
def get_sweep_url(self) -> Optional[str]:
"""Returns the url for the sweep associated with the run, if there is one."""
if self._settings._offline:
wandb.termwarn("URL not available in offline run")
return None
return self._settings.sweep_url
@property # type: ignore
@_run_decorator._attach
def url(self) -> Optional[str]:
"""Returns the W&B url associated with the run."""
return self.get_url()
@property # type: ignore
@_run_decorator._attach
def entity(self) -> str:
"""Returns the name of the W&B entity associated with the run.
Entity can be a user name or the name of a team or organization.
"""
return self._entity or ""
def _label_internal(
self, code: str = None, repo: str = None, code_version: str = None
) -> None:
with telemetry.context(run=self) as tel:
if code and RE_LABEL.match(code):
tel.label.code_string = code
if repo and RE_LABEL.match(repo):
tel.label.repo_string = repo
if code_version and RE_LABEL.match(code_version):
tel.label.code_version = code_version
def _label(
self,
code: str = None,
repo: str = None,
code_version: str = None,
**kwargs: str,
) -> None:
if self._settings.label_disable:
return
for k, v in (("code", code), ("repo", repo), ("code_version", code_version)):
if v and not RE_LABEL.match(v):
wandb.termwarn(
"Label added for '{}' with invalid identifier '{}' (ignored).".format(
k, v
),
repeat=False,
)
for v in kwargs:
wandb.termwarn(
f"Label added for unsupported key '{v}' (ignored).",
repeat=False,
)
self._label_internal(code=code, repo=repo, code_version=code_version)
# update telemetry in the backend immediately for _label() callers
self._telemetry_flush()
def _label_probe_lines(self, lines: List[str]) -> None:
if not lines:
return
parsed = telemetry._parse_label_lines(lines)
if not parsed:
return
label_dict = {}
code = parsed.get("code") or parsed.get("c")
if code:
label_dict["code"] = code
repo = parsed.get("repo") or parsed.get("r")
if repo:
label_dict["repo"] = repo
code_ver = parsed.get("version") or parsed.get("v")
if code_ver:
label_dict["code_version"] = code_ver
self._label_internal(**label_dict)
def _label_probe_main(self) -> None:
m = sys.modules.get("__main__")
if not m:
return
doc = getattr(m, "__doc__", None)
if not doc:
return
doclines = doc.splitlines()
self._label_probe_lines(doclines)
# TODO: annotate jupyter Notebook class
def _label_probe_notebook(self, notebook: Any) -> None:
logger.info("probe notebook")
lines = None
try:
data = notebook.probe_ipynb()
cell0 = data.get("cells", [])[0]
lines = cell0.get("source")
# kaggle returns a string instead of a list
if isinstance(lines, str):
lines = lines.split()
except Exception as e:
logger.info(f"Unable to probe notebook: {e}")
return
if lines:
self._label_probe_lines(lines)
@_run_decorator._attach
def display(self, height: int = 420, hidden: bool = False) -> bool:
"""Displays this run in jupyter."""
if self._settings._jupyter and ipython.in_jupyter():
ipython.display_html(self.to_html(height, hidden))
return True
else:
wandb.termwarn(".display() only works in jupyter environments")
return False
@_run_decorator._attach
def to_html(self, height: int = 420, hidden: bool = False) -> str:
"""Generates HTML containing an iframe displaying the current run."""
url = self._settings.run_url + "?jupyter=true"
style = f"border:none;width:100%;height:{height}px;"
prefix = ""
if hidden:
style += "display:none;"
prefix = ipython.toggle_button()
return prefix + f'<iframe src="{url}" style="{style}"></iframe>'
def _repr_mimebundle_(
self, include: Any = None, exclude: Any = None
) -> Dict[str, str]:
return {"text/html": self.to_html(hidden=True)}
def _config_callback(
self,
key: Union[Tuple[str, ...], str] = None,
val: Any = None,
data: Dict[str, object] = None,
) -> None:
logger.info(f"config_cb {key} {val} {data}")
if self._backend and self._backend.interface:
self._backend.interface.publish_config(key=key, val=val, data=data)
def _config_artifact_callback(
self, key: str, val: Union[str, Artifact, dict]
) -> Union[Artifact, public.Artifact]:
# artifacts can look like dicts as they are passed into the run config
# since the run config stores them on the backend as a dict with fields shown
# in wandb.util.artifact_to_json
if _is_artifact_version_weave_dict(val):
assert isinstance(val, dict)
public_api = self._public_api()
artifact = public.Artifact.from_id(val["id"], public_api.client)
return self.use_artifact(artifact, use_as=key)
elif _is_artifact_string(val):
# this will never fail, but is required to make mypy happy
assert isinstance(val, str)
artifact_string, base_url, is_id = parse_artifact_string(val)
overrides = {}
if base_url is not None:
overrides = {"base_url": base_url}
public_api = public.Api(overrides)
else:
public_api = self._public_api()
if is_id:
artifact = public.Artifact.from_id(artifact_string, public_api._client)
else:
artifact = public_api.artifact(name=artifact_string)
# in the future we'll need to support using artifacts from
# different instances of wandb. simplest way to do that is
# likely to convert the retrieved public.Artifact to a wandb.Artifact
return self.use_artifact(artifact, use_as=key)
elif _is_artifact_object(val):
return self.use_artifact(val, use_as=key)
else:
raise ValueError(
f"Cannot call _config_artifact_callback on type {type(val)}"
)
def _set_config_wandb(self, key: str, val: Any) -> None:
self._config_callback(key=("_wandb", key), val=val)
def _summary_update_callback(self, summary_record: SummaryRecord) -> None:
if self._backend and self._backend.interface:
self._backend.interface.publish_summary(summary_record)
def _summary_get_current_summary_callback(self) -> Dict[str, Any]:
if not self._backend or not self._backend.interface:
return {}
ret = self._backend.interface.communicate_get_summary()
if not ret:
return {}
return proto_util.dict_from_proto_list(ret.item)
def _metric_callback(self, metric_record: MetricRecord) -> None:
if self._backend and self._backend.interface:
self._backend.interface._publish_metric(metric_record)
def _datatypes_callback(self, fname: str) -> None:
if not self._backend or not self._backend.interface:
return
files: "FilesDict" = dict(files=[(GlobStr(glob.escape(fname)), "now")])
self._backend.interface.publish_files(files)
def _visualization_hack(self, row: Dict[str, Any]) -> Dict[str, Any]:
# TODO(jhr): move visualize hack somewhere else
chart_keys = set()
for k in row:
if isinstance(row[k], Visualize):
key = row[k].get_config_key(k)
value = row[k].get_config_value(k)
row[k] = row[k]._data
self._config_callback(val=value, key=key)
elif isinstance(row[k], CustomChart):
chart_keys.add(k)
key = row[k].get_config_key(k)
value = row[k].get_config_value(
"Vega2", row[k].user_query(f"{k}_table")
)
row[k] = row[k]._data
self._config_callback(val=value, key=key)
for k in chart_keys:
# remove the chart key from the row
# TODO: is this really the right move? what if the user logs
# a non-custom chart to this key?
row[f"{k}_table"] = row.pop(k)
return row
def _partial_history_callback(
self,
row: Dict[str, Any],
step: Optional[int] = None,
commit: Optional[bool] = None,
) -> None:
if row:
row = self._visualization_hack(row)
now = time.time()
row["_timestamp"] = row.get("_timestamp", now)
row["_runtime"] = row.get("_runtime", now - self._get_start_time())
if self._backend and self._backend.interface:
not_using_tensorboard = len(wandb.patched["tensorboard"]) == 0
self._backend.interface.publish_partial_history(
row,
user_step=self._step,
step=step,
flush=commit,
publish_step=not_using_tensorboard,
)
def _console_callback(self, name: str, data: str) -> None:
# logger.info("console callback: %s, %s", name, data)
if self._backend and self._backend.interface:
self._backend.interface.publish_output(name, data)
def _console_raw_callback(self, name: str, data: str) -> None:
# logger.info("console callback: %s, %s", name, data)
if self._backend and self._backend.interface:
self._backend.interface.publish_output_raw(name, data)
def _tensorboard_callback(
self, logdir: str, save: bool = True, root_logdir: str = ""
) -> None:
logger.info("tensorboard callback: %s, %s", logdir, save)
if self._backend and self._backend.interface:
self._backend.interface.publish_tbdata(logdir, save, root_logdir)
def _set_library(self, library: _WandbSetup) -> None:
self._wl = library
def _set_backend(self, backend: "wandb.sdk.backend.backend.Backend") -> None:
self._backend = backend
def _set_internal_run_interface(
self,
interface: Union[
"wandb.sdk.interface.interface_queue.InterfaceQueue",
"wandb.sdk.interface.interface_grpc.InterfaceGrpc",
],
) -> None:
self._internal_run_interface = interface
def _set_reporter(self, reporter: Reporter) -> None:
self._reporter = reporter
def _set_teardown_hooks(self, hooks: List[TeardownHook]) -> None:
self._teardown_hooks = hooks
def _set_run_obj(self, run_obj: RunRecord) -> None:
self._run_obj = run_obj
self._entity = run_obj.entity
self._project = run_obj.project
# Grab the config from resuming
if run_obj.config:
c_dict = config_util.dict_no_value_from_proto_list(run_obj.config.update)
# TODO: Windows throws a wild error when this is set...
if "_wandb" in c_dict:
del c_dict["_wandb"]
# We update the config object here without triggering the callback
self._config._update(c_dict, allow_val_change=True, ignore_locked=True)
# Update the summary, this will trigger an un-needed graphql request :(
if run_obj.summary:
summary_dict = {}
for orig in run_obj.summary.update:
summary_dict[orig.key] = json.loads(orig.value_json)
self.summary.update(summary_dict)
self._step = self._get_starting_step()
# TODO: It feels weird to call this twice..
sentry_set_scope(
process_context="user",
settings_dict=self._settings,
)
def _set_run_obj_offline(self, run_obj: RunRecord) -> None:
self._run_obj_offline = run_obj
def _add_singleton(
self, data_type: str, key: str, value: Dict[Union[int, str], str]
) -> None:
"""Stores a singleton item to wandb config.
A singleton in this context is a piece of data that is continually
logged with the same value in each history step, but represented
as a single item in the config.
We do this to avoid filling up history with a lot of repeated uneccessary data
Add singleton can be called many times in one run and it will only be
updated when the value changes. The last value logged will be the one
persisted to the server.
"""
value_extra = {"type": data_type, "key": key, "value": value}
if data_type not in self._config["_wandb"]:
self._config["_wandb"][data_type] = {}
if data_type in self._config["_wandb"][data_type]:
old_value = self._config["_wandb"][data_type][key]
else:
old_value = None
if value_extra != old_value:
self._config["_wandb"][data_type][key] = value_extra
self._config.persist()
def _log(
self,
data: Dict[str, Any],
step: Optional[int] = None,
commit: Optional[bool] = None,
) -> None:
if not isinstance(data, Mapping):
raise ValueError("wandb.log must be passed a dictionary")
if any(not isinstance(key, str) for key in data.keys()):
raise ValueError("Key values passed to `wandb.log` must be strings.")
self._partial_history_callback(data, step, commit)
if step is not None:
if os.getpid() != self._init_pid or self._is_attached:
wandb.termwarn(
"Note that setting step in multiprocessing can result in data loss. Please log your step values as a metric such as 'global_step'",
repeat=False,
)
# if step is passed in when tensorboard_sync is used we honor the step passed
# to make decisions about how to close out the history record, but will strip
# this history later on in publish_history()
if len(wandb.patched["tensorboard"]) > 0:
wandb.termwarn(
"Step cannot be set when using syncing with tensorboard. Please log your step values as a metric such as 'global_step'",
repeat=False,
)
if step > self._step:
self._step = step
if (step is None and commit is None) or commit:
self._step += 1
@_run_decorator._noop
@_run_decorator._attach
def log(
self,
data: Dict[str, Any],
step: Optional[int] = None,
commit: Optional[bool] = None,
sync: Optional[bool] = None,
) -> None:
"""Logs a dictonary of data to the current run's history.
Use `wandb.log` to log data from runs, such as scalars, images, video,
histograms, plots, and tables.
See our [guides to logging](https://docs.wandb.ai/guides/track/log) for
live examples, code snippets, best practices, and more.
The most basic usage is `wandb.log({"train-loss": 0.5, "accuracy": 0.9})`.
This will save the loss and accuracy to the run's history and update
the summary values for these metrics.
Visualize logged data in the workspace at [wandb.ai](https://wandb.ai),
or locally on a [self-hosted instance](https://docs.wandb.ai/self-hosted)
of the W&B app, or export data to visualize and explore locally, e.g. in
Jupyter notebooks, with [our API](https://docs.wandb.ai/guides/track/public-api-guide).
In the UI, summary values show up in the run table to compare single values across runs.
Summary values can also be set directly with `wandb.run.summary["key"] = value`.
Logged values don't have to be scalars. Logging any wandb object is supported.
For example `wandb.log({"example": wandb.Image("myimage.jpg")})` will log an
example image which will be displayed nicely in the W&B UI.
See the [reference documentation](https://docs.wandb.com/library/reference/data_types)
for all of the different supported types or check out our
[guides to logging](https://docs.wandb.ai/guides/track/log) for examples,
from 3D molecular structures and segmentation masks to PR curves and histograms.
`wandb.Table`s can be used to logged structured data. See our
[guide to logging tables](https://docs.wandb.ai/guides/data-vis/log-tables)
for details.
Logging nested metrics is encouraged and is supported in the W&B UI.
If you log with a nested dictionary like `wandb.log({"train":
{"acc": 0.9}, "val": {"acc": 0.8}})`, the metrics will be organized into
`train` and `val` sections in the W&B UI.
wandb keeps track of a global step, which by default increments with each
call to `wandb.log`, so logging related metrics together is encouraged.
If it's inconvenient to log related metrics together
calling `wandb.log({"train-loss": 0.5}, commit=False)` and then
`wandb.log({"accuracy": 0.9})` is equivalent to calling
`wandb.log({"train-loss": 0.5, "accuracy": 0.9})`.
`wandb.log` is not intended to be called more than a few times per second.
If you want to log more frequently than that it's better to aggregate
the data on the client side or you may get degraded performance.
Arguments:
data: (dict, optional) A dict of serializable python objects i.e `str`,
`ints`, `floats`, `Tensors`, `dicts`, or any of the `wandb.data_types`.
commit: (boolean, optional) Save the metrics dict to the wandb server
and increment the step. If false `wandb.log` just updates the current
metrics dict with the data argument and metrics won't be saved until
`wandb.log` is called with `commit=True`.
step: (integer, optional) The global step in processing. This persists
any non-committed earlier steps but defaults to not committing the
specified step.
sync: (boolean, True) This argument is deprecated and currently doesn't
change the behaviour of `wandb.log`.
Examples:
For more and more detailed examples, see
[our guides to logging](https://docs.wandb.com/guides/track/log).
### Basic usage
<!--yeadoc-test:init-and-log-basic-->
```python
import wandb
wandb.init()
wandb.log({'accuracy': 0.9, 'epoch': 5})
```
### Incremental logging
<!--yeadoc-test:init-and-log-incremental-->
```python
import wandb
wandb.init()
wandb.log({'loss': 0.2}, commit=False)
# Somewhere else when I'm ready to report this step:
wandb.log({'accuracy': 0.8})
```
### Histogram
<!--yeadoc-test:init-and-log-histogram-->
```python
import numpy as np
import wandb
# sample gradients at random from normal distribution
gradients = np.random.randn(100, 100)
wandb.init()
wandb.log({"gradients": wandb.Histogram(gradients)})
```
### Image from numpy
<!--yeadoc-test:init-and-log-image-numpy-->
```python
import numpy as np
import wandb
wandb.init()
examples = []
for i in range(3):
pixels = np.random.randint(low=0, high=256, size=(100, 100, 3))
image = wandb.Image(pixels, caption=f"random field {i}")
examples.append(image)
wandb.log({"examples": examples})
```
### Image from PIL
<!--yeadoc-test:init-and-log-image-pillow-->
```python
import numpy as np
from PIL import Image as PILImage
import wandb
wandb.init()
examples = []
for i in range(3):
pixels = np.random.randint(low=0, high=256, size=(100, 100, 3), dtype=np.uint8)
pil_image = PILImage.fromarray(pixels, mode="RGB")
image = wandb.Image(pil_image, caption=f"random field {i}")
examples.append(image)
wandb.log({"examples": examples})
```
### Video from numpy
<!--yeadoc-test:init-and-log-video-numpy-->
```python
import numpy as np
import wandb
wandb.init()
# axes are (time, channel, height, width)
frames = np.random.randint(low=0, high=256, size=(10, 3, 100, 100), dtype=np.uint8)
wandb.log({"video": wandb.Video(frames, fps=4)})
```
### Matplotlib Plot
<!--yeadoc-test:init-and-log-matplotlib-->
```python
from matplotlib import pyplot as plt
import numpy as np
import wandb
wandb.init()
fig, ax = plt.subplots()
x = np.linspace(0, 10)
y = x * x
ax.plot(x, y) # plot y = x^2
wandb.log({"chart": fig})
```
### PR Curve
```python
wandb.log({'pr': wandb.plots.precision_recall(y_test, y_probas, labels)})
```
### 3D Object
```python
wandb.log({"generated_samples":
[wandb.Object3D(open("sample.obj")),
wandb.Object3D(open("sample.gltf")),
wandb.Object3D(open("sample.glb"))]})
```
Raises:
wandb.Error: if called before `wandb.init`
ValueError: if invalid data is passed
"""
if sync is not None:
deprecate.deprecate(
field_name=deprecate.Deprecated.run__log_sync,
warning_message=(
"`sync` argument is deprecated and does not affect the behaviour of `wandb.log`"
),
)
self._log(data=data, step=step, commit=commit)
@_run_decorator._attach
def save(
self,
glob_str: Optional[str] = None,
base_path: Optional[str] = None,
policy: "PolicyName" = "live",
) -> Union[bool, List[str]]:
"""Ensure all files matching `glob_str` are synced to wandb with the policy specified.
Arguments:
glob_str: (string) a relative or absolute path to a unix glob or regular
path. If this isn't specified the method is a noop.
base_path: (string) the base path to run the glob relative to
policy: (string) on of `live`, `now`, or `end`
- live: upload the file as it changes, overwriting the previous version
- now: upload the file once now
- end: only upload file when the run ends
"""
if glob_str is None:
# noop for historical reasons, run.save() may be called in legacy code
deprecate.deprecate(
field_name=deprecate.Deprecated.run__save_no_args,
warning_message=(
"Calling wandb.run.save without any arguments is deprecated."
"Changes to attributes are automatically persisted."
),
)
return True
return self._save(glob_str, base_path, policy)
def _save(
self,
glob_str: Optional[str] = None,
base_path: Optional[str] = None,
policy: "PolicyName" = "live",
) -> Union[bool, List[str]]:
if policy not in ("live", "end", "now"):
raise ValueError(
'Only "live" "end" and "now" policies are currently supported.'
)
if isinstance(glob_str, bytes):
glob_str = glob_str.decode("utf-8")
if not isinstance(glob_str, str):
raise ValueError("Must call wandb.save(glob_str) with glob_str a str")
if base_path is None:
if os.path.isabs(glob_str):
base_path = os.path.dirname(glob_str)
wandb.termwarn(
"Saving files without folders. If you want to preserve "
"sub directories pass base_path to wandb.save, i.e. "
'wandb.save("/mnt/folder/file.h5", base_path="/mnt")'
)
else:
base_path = "."
wandb_glob_str = GlobStr(os.path.relpath(glob_str, base_path))
if ".." + os.sep in wandb_glob_str:
raise ValueError("globs can't walk above base_path")
with telemetry.context(run=self) as tel:
tel.feature.save = True
if glob_str.startswith("gs://") or glob_str.startswith("s3://"):
wandb.termlog(
"%s is a cloud storage url, can't save file to wandb." % glob_str
)
return []
files = glob.glob(os.path.join(self._settings.files_dir, wandb_glob_str))
warn = False
if len(files) == 0 and "*" in wandb_glob_str:
warn = True
for path in glob.glob(glob_str):
file_name = os.path.relpath(path, base_path)
abs_path = os.path.abspath(path)
wandb_path = os.path.join(self._settings.files_dir, file_name)
wandb.util.mkdir_exists_ok(os.path.dirname(wandb_path))
# We overwrite symlinks because namespaces can change in Tensorboard
if os.path.islink(wandb_path) and abs_path != os.readlink(wandb_path):
os.remove(wandb_path)
os.symlink(abs_path, wandb_path)
elif not os.path.exists(wandb_path):
os.symlink(abs_path, wandb_path)
files.append(wandb_path)
if warn:
file_str = "%i file" % len(files)
if len(files) > 1:
file_str += "s"
wandb.termwarn(
(
"Symlinked %s into the W&B run directory, "
"call wandb.save again to sync new files."
)
% file_str
)
files_dict: "FilesDict" = dict(files=[(wandb_glob_str, policy)])
if self._backend and self._backend.interface:
self._backend.interface.publish_files(files_dict)
return files
@_run_decorator._attach
def restore(
self,
name: str,
run_path: Optional[str] = None,
replace: bool = False,
root: Optional[str] = None,
) -> Union[None, TextIO]:
return restore(
name,
run_path or self._get_path(),
replace,
root or self._settings.files_dir,
)
@_run_decorator._noop
@_run_decorator._attach
def finish(self, exit_code: int = None, quiet: Optional[bool] = None) -> None:
"""Marks a run as finished, and finishes uploading all data.
This is used when creating multiple runs in the same process. We automatically
call this method when your script exits or if you use the run context manager.
Arguments:
exit_code: Set to something other than 0 to mark a run as failed
quiet: Set to true to minimize log output
"""
return self._finish(exit_code, quiet)
def _finish(self, exit_code: int = None, quiet: Optional[bool] = None) -> None:
if quiet is not None:
self._quiet = quiet
with telemetry.context(run=self) as tel:
tel.feature.finish = True
logger.info(f"finishing run {self._get_path()}")
# detach jupyter hooks / others that needs to happen before backend shutdown
for hook in self._teardown_hooks:
if hook.stage == TeardownStage.EARLY:
hook.call()
self._atexit_cleanup(exit_code=exit_code)
if self._wl and len(self._wl._global_run_stack) > 0:
self._wl._global_run_stack.pop()
# detach logger / others meant to be run after we've shutdown the backend
for hook in self._teardown_hooks:
if hook.stage == TeardownStage.LATE:
hook.call()
self._teardown_hooks = []
module.unset_globals()
# inform manager this run is finished
manager = self._wl and self._wl._get_manager()
if manager:
manager._inform_finish(run_id=self._run_id)
@_run_decorator._noop
@_run_decorator._attach
def join(self, exit_code: int = None) -> None:
"""Deprecated alias for `finish()` - please use finish."""
deprecate.deprecate(
field_name=deprecate.Deprecated.run__join,
warning_message=(
"wandb.run.join() is deprecated, please use wandb.run.finish()."
),
)
self._finish(exit_code=exit_code)
@staticmethod
def plot_table(
vega_spec_name: str,
data_table: "wandb.Table",
fields: Dict[str, Any],
string_fields: Optional[Dict[str, Any]] = None,
) -> CustomChart:
"""Creates a custom plot on a table.
Arguments:
vega_spec_name: the name of the spec for the plot
table_key: the key used to log the data table
data_table: a wandb.Table object containing the data to
be used on the visualization
fields: a dict mapping from table keys to fields that the custom
visualization needs
string_fields: a dict that provides values for any string constants
the custom visualization needs
"""
return custom_chart(vega_spec_name, data_table, fields, string_fields or {})
def _add_panel(
self, visualize_key: str, panel_type: str, panel_config: dict
) -> None:
config = {
"panel_type": panel_type,
"panel_config": panel_config,
}
self._config_callback(val=config, key=("_wandb", "visualize", visualize_key))
def _set_globals(self) -> None:
module.set_global(
run=self,
config=self.config,
log=self.log,
summary=self.summary,
save=self.save,
use_artifact=self.use_artifact,
log_artifact=self.log_artifact,
define_metric=self.define_metric,
plot_table=self.plot_table,
alert=self.alert,
mark_preempting=self.mark_preempting,
)
def _redirect(
self,
stdout_slave_fd: Optional[int],
stderr_slave_fd: Optional[int],
console: SettingsConsole = None,
) -> None:
if console is None:
console = self._settings._console
# only use raw for service to minimize potential changes
if console == SettingsConsole.WRAP:
if self._settings._require_service:
console = SettingsConsole.WRAP_RAW
else:
console = SettingsConsole.WRAP_EMU
logger.info("redirect: %s", console)
out_redir: redirect.RedirectBase
err_redir: redirect.RedirectBase
# raw output handles the output_log writing in the internal process
if console in {SettingsConsole.REDIRECT, SettingsConsole.WRAP_EMU}:
output_log_path = os.path.join(
self._settings.files_dir, filenames.OUTPUT_FNAME
)
# output writer might have been setup, see wrap_fallback case
if not self._output_writer:
self._output_writer = filesystem.CRDedupedFile(
open(output_log_path, "wb")
)
if console == SettingsConsole.REDIRECT:
logger.info("Redirecting console.")
out_redir = redirect.Redirect(
src="stdout",
cbs=[
lambda data: self._console_callback("stdout", data),
self._output_writer.write, # type: ignore
],
)
err_redir = redirect.Redirect(
src="stderr",
cbs=[
lambda data: self._console_callback("stderr", data),
self._output_writer.write, # type: ignore
],
)
if os.name == "nt":
def wrap_fallback() -> None:
if self._out_redir:
self._out_redir.uninstall()
if self._err_redir:
self._err_redir.uninstall()
msg = (
"Tensorflow detected. Stream redirection is not supported "
"on Windows when tensorflow is imported. Falling back to "
"wrapping stdout/err."
)
wandb.termlog(msg)
self._redirect(None, None, console=SettingsConsole.WRAP)
add_import_hook("tensorflow", wrap_fallback)
elif console == SettingsConsole.WRAP_EMU:
logger.info("Wrapping output streams.")
out_redir = redirect.StreamWrapper(
src="stdout",
cbs=[
lambda data: self._console_callback("stdout", data),
self._output_writer.write, # type: ignore
],
)
err_redir = redirect.StreamWrapper(
src="stderr",
cbs=[
lambda data: self._console_callback("stderr", data),
self._output_writer.write, # type: ignore
],
)
elif console == SettingsConsole.WRAP_RAW:
logger.info("Wrapping output streams.")
out_redir = redirect.StreamRawWrapper(
src="stdout",
cbs=[
lambda data: self._console_raw_callback("stdout", data),
],
)
err_redir = redirect.StreamRawWrapper(
src="stderr",
cbs=[
lambda data: self._console_raw_callback("stderr", data),
],
)
elif console == SettingsConsole.OFF:
return
else:
raise ValueError("unhandled console")
try:
out_redir.install()
err_redir.install()
self._out_redir = out_redir
self._err_redir = err_redir
logger.info("Redirects installed.")
except Exception as e:
print(e)
logger.error("Failed to redirect.", exc_info=e)
return
def _restore(self) -> None:
logger.info("restore")
# TODO(jhr): drain and shutdown all threads
if self._out_redir:
self._out_redir.uninstall()
if self._err_redir:
self._err_redir.uninstall()
logger.info("restore done")
def _atexit_cleanup(self, exit_code: int = None) -> None:
if self._backend is None:
logger.warning("process exited without backend configured")
return
if self._atexit_cleanup_called:
return
self._atexit_cleanup_called = True
exit_code = exit_code or self._hooks.exit_code if self._hooks else 0
logger.info(f"got exitcode: {exit_code}")
if exit_code == 0:
# Cleanup our resume file on a clean exit
if os.path.exists(self._settings.resume_fname):
os.remove(self._settings.resume_fname)
self._exit_code = exit_code
report_failure = False
try:
self._on_finish()
except KeyboardInterrupt as ki:
if wandb.wandb_agent._is_running():
raise ki
wandb.termerror("Control-C detected -- Run data was not synced")
if not self._settings._jupyter:
os._exit(-1)
except Exception as e:
if not self._settings._jupyter:
report_failure = True
self._console_stop()
self._backend.cleanup()
logger.error("Problem finishing run", exc_info=e)
wandb.termerror("Problem finishing run")
traceback.print_exception(*sys.exc_info())
else:
self._on_final()
finally:
if report_failure:
os._exit(-1)
def _console_start(self) -> None:
logger.info("atexit reg")
self._hooks = ExitHooks()
manager = self._wl and self._wl._get_manager()
if not manager:
self._hooks.hook()
# NB: manager will perform atexit hook like behavior for outstanding runs
atexit.register(lambda: self._atexit_cleanup())
self._redirect(self._stdout_slave_fd, self._stderr_slave_fd)
def _console_stop(self) -> None:
self._restore()
if self._output_writer:
self._output_writer.close()
self._output_writer = None
def _on_init(self) -> None:
if self._backend and self._backend.interface:
logger.info("communicating current version")
self._check_version = self._backend.interface.communicate_check_version(
current_version=wandb.__version__
)
logger.info(f"got version response {self._check_version}")
def _on_start(self) -> None:
# would like to move _set_global to _on_ready to unify _on_start and _on_attach (we want to do the set globals after attach)
# TODO(console) However _console_start calls Redirect that uses `wandb.run` hence breaks
# TODO(jupyter) However _header calls _header_run_info that uses wandb.jupyter that uses `wandb.run` and hence breaks
self._set_globals()
self._header(
self._check_version, settings=self._settings, printer=self._printer
)
if self._settings.save_code and self._settings.code_dir is not None:
self.log_code(self._settings.code_dir)
# TODO(wandb-service) RunStatusChecker not supported yet (WB-7352)
if self._backend and self._backend.interface and not self._settings._offline:
self._run_status_checker = RunStatusChecker(self._backend.interface)
self._console_start()
self._on_ready()
def _on_attach(self) -> None:
"""Event triggered when run is attached to another run."""
with telemetry.context(run=self) as tel:
tel.feature.attach = True
self._set_globals()
self._is_attached = True
self._on_ready()
def _on_ready(self) -> None:
"""Event triggered when run is ready for the user."""
# start reporting any telemetry changes
self._telemetry_obj_active = True
self._telemetry_flush()
# object is about to be returned to the user, don't let them modify it
self._freeze()
def _log_job(self) -> None:
artifact = None
input_types = TypeRegistry.type_of(self.config.as_dict()).to_json()
output_types = TypeRegistry.type_of(self.summary._as_dict()).to_json()
import pkg_resources
installed_packages_list = sorted(
f"{d.key}=={d.version}" for d in iter(pkg_resources.working_set)
)
for job_creation_function in [
self._create_repo_job,
self._create_artifact_job,
self._create_image_job,
]:
artifact = job_creation_function(
input_types, output_types, installed_packages_list
)
if artifact:
break
else:
logger.info(
f"Failed to create job using {job_creation_function.__name__}"
)
def _construct_job_artifact(
self,
name: str,
source_dict: "JobSourceDict",
installed_packages_list: List[str],
patch_path: Optional[os.PathLike] = None,
) -> "Artifact":
job_artifact = wandb.Artifact(name, type="job")
if patch_path and os.path.exists(patch_path):
job_artifact.add_file(patch_path, "diff.patch")
with job_artifact.new_file("requirements.frozen.txt") as f:
f.write("\n".join(installed_packages_list))
with job_artifact.new_file("source_info.json") as f:
f.write(json.dumps(source_dict))
default_config = {}
for k, v in self.config.as_dict().items():
if _is_artifact_object(v):
default_config[k] = artifact_to_json(v)
else:
default_config[k] = v
job_artifact.metadata["config_defaults"] = default_config
return job_artifact
def _create_repo_job(
self,
input_types: Dict[str, Any],
output_types: Dict[str, Any],
installed_packages_list: List[str],
) -> "Optional[Artifact]":
"""Create a job version artifact from a repo."""
has_repo = self._remote_url is not None and self._commit is not None
program_relpath = self._settings.program_relpath
if not has_repo or program_relpath is None:
return None
assert self._remote_url is not None
assert self._commit is not None
name = wandb.util.make_artifact_name_safe(
f"job-{self._remote_url}_{program_relpath}"
)
patch_path = os.path.join(self._settings.files_dir, DIFF_FNAME)
source_info: JobSourceDict = {
"_version": "v0",
"source_type": "repo",
"source": {
"git": {
"remote": self._remote_url,
"commit": self._commit,
},
"entrypoint": [
sys.executable.split("/")[-1],
program_relpath,
],
},
"input_types": input_types,
"output_types": output_types,
"runtime": self._settings._python,
}
job_artifact = self._construct_job_artifact(
name, source_info, installed_packages_list, patch_path
)
artifact = self.log_artifact(job_artifact)
return artifact
def _create_artifact_job(
self,
input_types: Dict[str, Any],
output_types: Dict[str, Any],
installed_packages_list: List[str],
) -> "Optional[Artifact]":
if (
self._code_artifact_info is None
or self._run_obj is None
or self._settings.program_relpath is None
):
return None
artifact_client_id = self._code_artifact_info.get("client_id")
name = f"job-{self._code_artifact_info['name']}"
source_info: JobSourceDict = {
"_version": "v0",
"source_type": "artifact",
"source": {
"artifact": f"wandb-artifact://_id/{artifact_client_id}",
"entrypoint": [
sys.executable.split("/")[-1],
self._settings.program_relpath,
],
},
"input_types": input_types,
"output_types": output_types,
"runtime": self._settings._python,
}
job_artifact = self._construct_job_artifact(
name, source_info, installed_packages_list
)
artifact = self.log_artifact(job_artifact)
return artifact
def _create_image_job(
self,
input_types: Dict[str, Any],
output_types: Dict[str, Any],
installed_packages_list: List[str],
) -> "Optional[Artifact]":
docker_image_name = os.getenv("WANDB_DOCKER")
if docker_image_name is None:
return None
name = wandb.util.make_artifact_name_safe(f"job-{docker_image_name}")
source_info: JobSourceDict = {
"_version": "v0",
"source_type": "image",
"source": {"image": docker_image_name},
"input_types": input_types,
"output_types": output_types,
"runtime": self._settings._python,
}
job_artifact = self._construct_job_artifact(
name, source_info, installed_packages_list
)
artifact = self.log_artifact(job_artifact)
return artifact
def _on_finish(self) -> None:
trigger.call("on_finished")
# populate final import telemetry
with telemetry.context(run=self) as tel:
self._telemetry_imports(tel.imports_finish)
if self._run_status_checker:
self._run_status_checker.stop()
if not self._settings._offline and self._settings.enable_job_creation:
self._log_job()
self._console_stop() # TODO: there's a race here with jupyter console logging
if self._backend and self._backend.interface:
# telemetry could have changed, publish final data
self._telemetry_flush()
# TODO: we need to handle catastrophic failure better
# some tests were timing out on sending exit for reasons not clear to me
self._backend.interface.publish_exit(self._exit_code)
self._footer_exit_status_info(
self._exit_code, settings=self._settings, printer=self._printer
)
while not (self._poll_exit_response and self._poll_exit_response.done):
if self._backend and self._backend.interface:
self._poll_exit_response = (
self._backend.interface.communicate_poll_exit()
)
logger.info(f"got exit ret: {self._poll_exit_response}")
self._footer_file_pusher_status_info(
self._poll_exit_response,
printer=self._printer,
)
time.sleep(0.1)
if self._backend and self._backend.interface:
self._sampled_history = (
self._backend.interface.communicate_sampled_history()
)
self._final_summary = self._backend.interface.communicate_get_summary()
if self._backend:
self._backend.cleanup()
if self._run_status_checker:
self._run_status_checker.join()
def _on_final(self) -> None:
self._footer(
self._sampled_history,
self._final_summary,
self._poll_exit_response,
self._check_version,
self._reporter,
self._quiet,
settings=self._settings,
printer=self._printer,
)
@_run_decorator._attach
def define_metric(
self,
name: str,
step_metric: Union[str, wandb_metric.Metric, None] = None,
step_sync: bool = None,
hidden: bool = None,
summary: str = None,
goal: str = None,
overwrite: bool = None,
**kwargs: Any,
) -> wandb_metric.Metric:
"""Define metric properties which will later be logged with `wandb.log()`.
Arguments:
name: Name of the metric.
step_metric: Independent variable associated with the metric.
step_sync: Automatically add `step_metric` to history if needed.
Defaults to True if step_metric is specified.
hidden: Hide this metric from automatic plots.
summary: Specify aggregate metrics added to summary.
Supported aggregations: "min,max,mean,best,last,none"
Default aggregation is `copy`
Aggregation `best` defaults to `goal`==`minimize`
goal: Specify direction for optimizing the metric.
Supported directions: "minimize,maximize"
Returns:
A metric object is returned that can be further specified.
"""
return self._define_metric(
name, step_metric, step_sync, hidden, summary, goal, overwrite, **kwargs
)
def _define_metric(
self,
name: str,
step_metric: Union[str, wandb_metric.Metric, None] = None,
step_sync: bool = None,
hidden: bool = None,
summary: str = None,
goal: str = None,
overwrite: bool = None,
**kwargs: Any,
) -> wandb_metric.Metric:
if not name:
raise wandb.Error("define_metric() requires non-empty name argument")
for k in kwargs:
wandb.termwarn(f"Unhandled define_metric() arg: {k}")
if isinstance(step_metric, wandb_metric.Metric):
step_metric = step_metric.name
for arg_name, arg_val, exp_type in (
("name", name, str),
("step_metric", step_metric, str),
("step_sync", step_sync, bool),
("hidden", hidden, bool),
("summary", summary, str),
("goal", goal, str),
("overwrite", overwrite, bool),
):
# NOTE: type checking is broken for isinstance and str
if arg_val is not None and not isinstance(arg_val, exp_type):
arg_type = type(arg_val).__name__
raise wandb.Error(
"Unhandled define_metric() arg: {} type: {}".format(
arg_name, arg_type
)
)
stripped = name[:-1] if name.endswith("*") else name
if "*" in stripped:
raise wandb.Error(
"Unhandled define_metric() arg: name (glob suffixes only): {}".format(
name
)
)
summary_ops: Optional[Sequence[str]] = None
if summary:
summary_items = [s.lower() for s in summary.split(",")]
summary_ops = []
valid = {"min", "max", "mean", "best", "last", "copy", "none"}
for i in summary_items:
if i not in valid:
raise wandb.Error(f"Unhandled define_metric() arg: summary op: {i}")
summary_ops.append(i)
goal_cleaned: Optional[str] = None
if goal is not None:
goal_cleaned = goal[:3].lower()
valid_goal = {"min", "max"}
if goal_cleaned not in valid_goal:
raise wandb.Error(f"Unhandled define_metric() arg: goal: {goal}")
m = wandb_metric.Metric(
name=name,
step_metric=step_metric,
step_sync=step_sync,
summary=summary_ops,
hidden=hidden,
goal=goal_cleaned,
overwrite=overwrite,
)
m._set_callback(self._metric_callback)
m._commit()
with telemetry.context(run=self) as tel:
tel.feature.metric = True
return m
# TODO(jhr): annotate this
@_run_decorator._attach
def watch(self, models, criterion=None, log="gradients", log_freq=100, idx=None, log_graph=False) -> None: # type: ignore
wandb.watch(models, criterion, log, log_freq, idx, log_graph)
# TODO(jhr): annotate this
@_run_decorator._attach
def unwatch(self, models=None) -> None: # type: ignore
wandb.unwatch(models=models)
def _swap_artifact_name(self, artifact_name: str, use_as: Optional[str]) -> str:
artifact_key_string = use_as or artifact_name
replacement_artifact_info = self._launch_artifact_mapping.get(
artifact_key_string
)
if replacement_artifact_info is not None:
new_name = replacement_artifact_info.get("name")
entity = replacement_artifact_info.get("entity")
project = replacement_artifact_info.get("project")
if new_name is None or entity is None or project is None:
raise ValueError(
"Misconfigured artifact in launch config. Must include name, project and entity keys."
)
return f"{entity}/{project}/{new_name}"
elif replacement_artifact_info is None and use_as is None:
wandb.termwarn(
f"Could not find {artifact_name} in launch artifact mapping. Searching for unique artifacts with sequence name: {artifact_name}"
)
sequence_name = artifact_name.split(":")[0].split("/")[-1]
unique_artifact_replacement_info = (
self._unique_launch_artifact_sequence_names.get(sequence_name)
)
if unique_artifact_replacement_info is not None:
new_name = unique_artifact_replacement_info.get("name")
entity = unique_artifact_replacement_info.get("entity")
project = unique_artifact_replacement_info.get("project")
if new_name is None or entity is None or project is None:
raise ValueError(
"Misconfigured artifact in launch config. Must include name, project and entity keys."
)
return f"{entity}/{project}/{new_name}"
else:
wandb.termwarn(
f"Could not find swappable artifact at key: {use_as}. Using {artifact_name}"
)
return artifact_name
wandb.termwarn(
f"Could not find {artifact_key_string} in launch artifact mapping. Using {artifact_name}"
)
return artifact_name
def _detach(self) -> None:
pass
@_run_decorator._attach
def link_artifact(
self,
artifact: Union[public.Artifact, Artifact],
target_path: str,
aliases: List[str],
) -> None:
"""Links the given artifact to a portfolio (a promoted collection of artifacts).
The linked artifact will be visible in the UI for the specified portfolio.
Arguments:
artifact: the (public or local) artifact which will be linked
target_path: `str` - takes the following forms: {portfolio}, {project}/{portfolio},
or {entity}/{project}/{portfolio}
aliases: `List[str]` - optional alias(es) that will only be applied on this linked artifact inside the portfolio.
The alias "latest" will always be applied to the latest version of an artifact that is linked.
Returns:
None
"""
portfolio, project, entity = wandb.util._parse_entity_project_item(target_path)
if self._backend and self._backend.interface:
if not self._settings._offline:
self._backend.interface.publish_link_artifact(
self,
artifact,
portfolio,
aliases,
entity,
project,
)
else:
# TODO: implement offline mode + sync
raise NotImplementedError
@_run_decorator._attach
def use_artifact(
self,
artifact_or_name: Union[str, public.Artifact, Artifact],
type: Optional[str] = None,
aliases: Optional[List[str]] = None,
use_as: Optional[str] = None,
) -> Union[public.Artifact, Artifact]:
"""Declare an artifact as an input to a run.
Call `download` or `file` on the returned object to get the contents locally.
Arguments:
artifact_or_name: (str or Artifact) An artifact name.
May be prefixed with entity/project/. Valid names
can be in the following forms:
- name:version
- name:alias
- digest
You can also pass an Artifact object created by calling `wandb.Artifact`
type: (str, optional) The type of artifact to use.
aliases: (list, optional) Aliases to apply to this artifact
use_as: (string, optional) Optional string indicating what purpose the artifact was used with.
Will be shown in UI.
Returns:
An `Artifact` object.
"""
if self._settings._offline:
raise TypeError("Cannot use artifact when in offline mode.")
r = self._run_obj
assert r is not None
api = internal.Api(default_settings={"entity": r.entity, "project": r.project})
api.set_current_run_id(self._run_id)
if isinstance(artifact_or_name, str):
if self._launch_artifact_mapping:
name = self._swap_artifact_name(artifact_or_name, use_as)
else:
name = artifact_or_name
public_api = self._public_api()
artifact = public_api.artifact(type=type, name=name)
if type is not None and type != artifact.type:
raise ValueError(
"Supplied type {} does not match type {} of artifact {}".format(
type, artifact.type, artifact.name
)
)
artifact._use_as = use_as or artifact_or_name
if use_as:
if (
use_as in self._used_artifact_slots.keys()
and self._used_artifact_slots[use_as] != artifact.id
):
raise ValueError(
"Cannot call use_artifact with the same use_as argument more than once"
)
elif ":" in use_as or "/" in use_as:
raise ValueError(
"use_as cannot contain special characters ':' or '/'"
)
self._used_artifact_slots[use_as] = artifact.id
api.use_artifact(
artifact.id,
use_as=use_as or artifact_or_name,
)
return artifact
else:
artifact = artifact_or_name
if aliases is None:
aliases = []
elif isinstance(aliases, str):
aliases = [aliases]
if isinstance(artifact_or_name, wandb.Artifact):
if use_as is not None:
wandb.termwarn(
"Indicating use_as is not supported when using an artifact with an instance of `wandb.Artifact`"
)
self._log_artifact(
artifact,
aliases=aliases,
is_user_created=True,
use_after_commit=True,
)
artifact.wait()
artifact._use_as = use_as or artifact.name
return artifact
elif isinstance(artifact, public.Artifact):
if (
self._launch_artifact_mapping
and artifact.name in self._launch_artifact_mapping.keys()
):
wandb.termwarn(
"Swapping artifacts is not supported when using an instance of `public.Artifact`. "
f"Using {artifact.name}."
)
artifact._use_as = use_as or artifact.name
api.use_artifact(
artifact.id, use_as=use_as or artifact._use_as or artifact.name
)
return artifact
else:
raise ValueError(
'You must pass an artifact name (e.g. "pedestrian-dataset:v1"), '
"an instance of `wandb.Artifact`, or `wandb.Api().artifact()` to `use_artifact`" # noqa: E501
)
@_run_decorator._attach
def log_artifact(
self,
artifact_or_path: Union[wandb_artifacts.Artifact, str],
name: Optional[str] = None,
type: Optional[str] = None,
aliases: Optional[List[str]] = None,
) -> wandb_artifacts.Artifact:
"""Declare an artifact as an output of a run.
Arguments:
artifact_or_path: (str or Artifact) A path to the contents of this artifact,
can be in the following forms:
- `/local/directory`
- `/local/directory/file.txt`
- `s3://bucket/path`
You can also pass an Artifact object created by calling
`wandb.Artifact`.
name: (str, optional) An artifact name. May be prefixed with entity/project.
Valid names can be in the following forms:
- name:version
- name:alias
- digest
This will default to the basename of the path prepended with the current
run id if not specified.
type: (str) The type of artifact to log, examples include `dataset`, `model`
aliases: (list, optional) Aliases to apply to this artifact,
defaults to `["latest"]`
Returns:
An `Artifact` object.
"""
return self._log_artifact(
artifact_or_path, name=name, type=type, aliases=aliases
)
@_run_decorator._attach
def upsert_artifact(
self,
artifact_or_path: Union[wandb_artifacts.Artifact, str],
name: Optional[str] = None,
type: Optional[str] = None,
aliases: Optional[List[str]] = None,
distributed_id: Optional[str] = None,
) -> wandb_artifacts.Artifact:
"""Declare (or append to) a non-finalized artifact as output of a run.
Note that you must call run.finish_artifact() to finalize the artifact.
This is useful when distributed jobs need to all contribute to the same artifact.
Arguments:
artifact_or_path: (str or Artifact) A path to the contents of this artifact,
can be in the following forms:
- `/local/directory`
- `/local/directory/file.txt`
- `s3://bucket/path`
You can also pass an Artifact object created by calling
`wandb.Artifact`.
name: (str, optional) An artifact name. May be prefixed with entity/project.
Valid names can be in the following forms:
- name:version
- name:alias
- digest
This will default to the basename of the path prepended with the current
run id if not specified.
type: (str) The type of artifact to log, examples include `dataset`, `model`
aliases: (list, optional) Aliases to apply to this artifact,
defaults to `["latest"]`
distributed_id: (string, optional) Unique string that all distributed jobs share. If None,
defaults to the run's group name.
Returns:
An `Artifact` object.
"""
if self._get_group() == "" and distributed_id is None:
raise TypeError(
"Cannot upsert artifact unless run is in a group or distributed_id is provided"
)
if distributed_id is None:
distributed_id = self._get_group()
return self._log_artifact(
artifact_or_path,
name=name,
type=type,
aliases=aliases,
distributed_id=distributed_id,
finalize=False,
)
@_run_decorator._attach
def finish_artifact(
self,
artifact_or_path: Union[wandb_artifacts.Artifact, str],
name: Optional[str] = None,
type: Optional[str] = None,
aliases: Optional[List[str]] = None,
distributed_id: Optional[str] = None,
) -> wandb_artifacts.Artifact:
"""Finishes a non-finalized artifact as output of a run.
Subsequent "upserts" with the same distributed ID will result in a new version.
Arguments:
artifact_or_path: (str or Artifact) A path to the contents of this artifact,
can be in the following forms:
- `/local/directory`
- `/local/directory/file.txt`
- `s3://bucket/path`
You can also pass an Artifact object created by calling
`wandb.Artifact`.
name: (str, optional) An artifact name. May be prefixed with entity/project.
Valid names can be in the following forms:
- name:version
- name:alias
- digest
This will default to the basename of the path prepended with the current
run id if not specified.
type: (str) The type of artifact to log, examples include `dataset`, `model`
aliases: (list, optional) Aliases to apply to this artifact,
defaults to `["latest"]`
distributed_id: (string, optional) Unique string that all distributed jobs share. If None,
defaults to the run's group name.
Returns:
An `Artifact` object.
"""
if self._get_group() == "" and distributed_id is None:
raise TypeError(
"Cannot finish artifact unless run is in a group or distributed_id is provided"
)
if distributed_id is None:
distributed_id = self._get_group()
return self._log_artifact(
artifact_or_path,
name,
type,
aliases,
distributed_id=distributed_id,
finalize=True,
)
def _log_artifact(
self,
artifact_or_path: Union[wandb_artifacts.Artifact, str],
name: Optional[str] = None,
type: Optional[str] = None,
aliases: Optional[List[str]] = None,
distributed_id: Optional[str] = None,
finalize: bool = True,
is_user_created: bool = False,
use_after_commit: bool = False,
) -> wandb_artifacts.Artifact:
api = internal.Api()
if api.settings().get("anonymous") == "true":
wandb.termwarn(
"Artifacts logged anonymously cannot be claimed and expire after 7 days."
)
if not finalize and distributed_id is None:
raise TypeError("Must provide distributed_id if artifact is not finalize")
if aliases is not None:
if any(invalid in alias for alias in aliases for invalid in ["/", ":"]):
raise ValueError(
"Aliases must not contain any of the following characters: /, :"
)
artifact, aliases = self._prepare_artifact(
artifact_or_path, name, type, aliases
)
artifact.distributed_id = distributed_id
self._assert_can_log_artifact(artifact)
if self._backend and self._backend.interface:
if not self._settings._offline:
future = self._backend.interface.communicate_artifact(
self,
artifact,
aliases,
self.step,
finalize=finalize,
is_user_created=is_user_created,
use_after_commit=use_after_commit,
)
artifact._logged_artifact = _LazyArtifact(self._public_api(), future)
else:
self._backend.interface.publish_artifact(
self,
artifact,
aliases,
finalize=finalize,
is_user_created=is_user_created,
use_after_commit=use_after_commit,
)
elif self._internal_run_interface:
self._internal_run_interface.publish_artifact(
self,
artifact,
aliases,
finalize=finalize,
is_user_created=is_user_created,
use_after_commit=use_after_commit,
)
return artifact
def _public_api(self, overrides: Optional[Dict[str, str]] = None) -> PublicApi:
overrides = {"run": self._run_id}
run_obj = self._run_obj
if run_obj is not None:
overrides["entity"] = run_obj.entity
overrides["project"] = run_obj.project
return public.Api(overrides)
# TODO(jhr): annotate this
def _assert_can_log_artifact(self, artifact) -> None: # type: ignore
if not self._settings._offline:
try:
public_api = self._public_api()
expected_type = public.Artifact.expected_type(
public_api.client,
artifact.name,
public_api.settings["entity"],
public_api.settings["project"],
)
except requests.exceptions.RequestException:
# Just return early if there is a network error. This is
# ok, as this function is intended to help catch an invalid
# type early, but not a hard requirement for valid operation.
return
if expected_type is not None and artifact.type != expected_type:
raise ValueError(
"Expected artifact type {}, got {}".format(
expected_type, artifact.type
)
)
def _prepare_artifact(
self,
artifact_or_path: Union[wandb_artifacts.Artifact, str],
name: Optional[str] = None,
type: Optional[str] = None,
aliases: Optional[List[str]] = None,
) -> Tuple[wandb_artifacts.Artifact, List[str]]:
aliases = aliases or ["latest"]
if isinstance(artifact_or_path, str):
if name is None:
name = f"run-{self._run_id}-{os.path.basename(artifact_or_path)}"
artifact = wandb.Artifact(name, type)
if os.path.isfile(artifact_or_path):
artifact.add_file(artifact_or_path)
elif os.path.isdir(artifact_or_path):
artifact.add_dir(artifact_or_path)
elif "://" in artifact_or_path:
artifact.add_reference(artifact_or_path)
else:
raise ValueError(
"path must be a file, directory or external"
"reference like s3://bucket/path"
)
else:
artifact = artifact_or_path
if not isinstance(artifact, wandb.Artifact):
raise ValueError(
"You must pass an instance of wandb.Artifact or a "
"valid file path to log_artifact"
)
if isinstance(aliases, str):
aliases = [aliases]
artifact.finalize()
return artifact, aliases
@_run_decorator._attach
def alert(
self,
title: str,
text: str,
level: Union[str, "AlertLevel"] = None,
wait_duration: Union[int, float, timedelta, None] = None,
) -> None:
"""Launch an alert with the given title and text.
Arguments:
title: (str) The title of the alert, must be less than 64 characters long.
text: (str) The text body of the alert.
level: (str or wandb.AlertLevel, optional) The alert level to use, either: `INFO`, `WARN`, or `ERROR`.
wait_duration: (int, float, or timedelta, optional) The time to wait (in seconds) before sending another
alert with this title.
"""
level = level or wandb.AlertLevel.INFO
level_str: str = level.value if isinstance(level, wandb.AlertLevel) else level
if level_str not in {lev.value for lev in wandb.AlertLevel}:
raise ValueError("level must be one of 'INFO', 'WARN', or 'ERROR'")
wait_duration = wait_duration or timedelta(minutes=1)
if isinstance(wait_duration, int) or isinstance(wait_duration, float):
wait_duration = timedelta(seconds=wait_duration)
elif not callable(getattr(wait_duration, "total_seconds", None)):
raise ValueError(
"wait_duration must be an int, float, or datetime.timedelta"
)
wait_duration = int(wait_duration.total_seconds() * 1000)
if self._backend and self._backend.interface:
self._backend.interface.publish_alert(title, text, level_str, wait_duration)
def __enter__(self) -> "Run":
return self
def __exit__(
self,
exc_type: Type[BaseException],
exc_val: BaseException,
exc_tb: TracebackType,
) -> bool:
exit_code = 0 if exc_type is None else 1
self._finish(exit_code)
return exc_type is None
@_run_decorator._attach
def mark_preempting(self) -> None:
"""Marks this run as preempting.
Also tells the internal process to immediately report this to server.
"""
if self._backend and self._backend.interface:
self._backend.interface.publish_preempting()
# ------------------------------------------------------------------------------
# HEADER
# ------------------------------------------------------------------------------
# Note: All the header methods are static methods since we want to share the printing logic
# with the service execution path that doesn't have access to the run instance
@staticmethod
def _header(
check_version: Optional["CheckVersionResponse"] = None,
*,
settings: "Settings",
printer: Union["PrinterTerm", "PrinterJupyter"],
) -> None:
# printer = printer or get_printer(settings._jupyter)
Run._header_version_check_info(
check_version, settings=settings, printer=printer
)
Run._header_wandb_version_info(settings=settings, printer=printer)
Run._header_sync_info(settings=settings, printer=printer)
Run._header_run_info(settings=settings, printer=printer)
@staticmethod
def _header_version_check_info(
check_version: Optional["CheckVersionResponse"] = None,
*,
settings: "Settings",
printer: Union["PrinterTerm", "PrinterJupyter"],
) -> None:
if not check_version or settings._offline:
return
# printer = printer or get_printer(settings._jupyter)
if check_version.delete_message:
printer.display(check_version.delete_message, level="error")
elif check_version.yank_message:
printer.display(check_version.yank_message, level="warn")
printer.display(
check_version.upgrade_message, off=not check_version.upgrade_message
)
@staticmethod
def _header_wandb_version_info(
*,
settings: "Settings",
printer: Union["PrinterTerm", "PrinterJupyter"],
) -> None:
if settings.quiet or settings.silent:
return
# printer = printer or get_printer(settings._jupyter)
printer.display(f"Tracking run with wandb version {wandb.__version__}")
@staticmethod
def _header_sync_info(
*,
settings: "Settings",
printer: Union["PrinterTerm", "PrinterJupyter"],
) -> None:
# printer = printer or get_printer(settings._jupyter)
if settings._offline:
printer.display(
[
f"W&B syncing is set to {printer.code('`offline`')} in this directory. ",
f"Run {printer.code('`wandb online`')} or set {printer.code('WANDB_MODE=online')} "
"to enable cloud syncing.",
]
)
else:
info = [f"Run data is saved locally in {printer.files(settings.sync_dir)}"]
if not printer._html:
info.append(
f"Run {printer.code('`wandb offline`')} to turn off syncing."
)
printer.display(info, off=settings.quiet or settings.silent)
@staticmethod
def _header_run_info(
*,
settings: "Settings",
printer: Union["PrinterTerm", "PrinterJupyter"],
) -> None:
if settings._offline or settings.silent:
return
run_url = settings.run_url
project_url = settings.project_url
sweep_url = settings.sweep_url
run_state_str = "Resuming run" if settings.resumed else "Syncing run"
run_name = settings.run_name
# printer = printer or get_printer(settings._jupyter)
if printer._html:
if not wandb.jupyter.maybe_display():
run_line = f"<strong>{printer.link(run_url, run_name)}</strong>"
project_line, sweep_line = "", ""
# TODO(settings): make settings the source of truth
if not wandb.jupyter.quiet():
doc_html = printer.link(wburls.get("doc_run"), "docs")
project_html = printer.link(project_url, "Weights & Biases")
project_line = f"to {project_html} ({doc_html})"
if sweep_url:
sweep_line = (
f"Sweep page: {printer.link(sweep_url, sweep_url)}"
)
printer.display(
[f"{run_state_str} {run_line} {project_line}", sweep_line]
)
else:
printer.display(f"{run_state_str} {printer.name(run_name)}")
if not settings.quiet:
printer.display(
f'{printer.emoji("star")} View project at {printer.link(project_url)}'
)
if sweep_url:
printer.display(
f'{printer.emoji("broom")} View sweep at {printer.link(sweep_url)}'
)
printer.display(
f'{printer.emoji("rocket")} View run at {printer.link(run_url)}'
)
# TODO(settings) use `wandb_settings` (if self.settings.anonymous == "true":)
if Api().api.settings().get("anonymous") == "true":
printer.display(
"Do NOT share these links with anyone. They can be used to claim your runs.",
level="warn",
)
# ------------------------------------------------------------------------------
# FOOTER
# ------------------------------------------------------------------------------
# Note: All the footer methods are static methods since we want to share the printing logic
# with the service execution path that doesn't have acess to the run instance
@staticmethod
def _footer(
sampled_history: Optional["SampledHistoryResponse"] = None,
final_summary: Optional["GetSummaryResponse"] = None,
poll_exit_response: Optional[PollExitResponse] = None,
check_version: Optional["CheckVersionResponse"] = None,
reporter: Optional[Reporter] = None,
quiet: Optional[bool] = None,
*,
settings: "Settings",
printer: Union["PrinterTerm", "PrinterJupyter"],
) -> None:
Run._footer_history_summary_info(
history=sampled_history,
summary=final_summary,
quiet=quiet,
settings=settings,
printer=printer,
)
Run._footer_sync_info(
pool_exit_response=poll_exit_response,
quiet=quiet,
settings=settings,
printer=printer,
)
Run._footer_log_dir_info(quiet=quiet, settings=settings, printer=printer)
Run._footer_version_check_info(
check_version=check_version, quiet=quiet, settings=settings, printer=printer
)
Run._footer_local_warn(
poll_exit_response=poll_exit_response,
quiet=quiet,
settings=settings,
printer=printer,
)
Run._footer_reporter_warn_err(
reporter=reporter, quiet=quiet, settings=settings, printer=printer
)
Run._footer_server_messages(
poll_exit_response=poll_exit_response,
quiet=quiet,
settings=settings,
printer=printer,
)
@staticmethod
def _footer_exit_status_info(
exit_code: Optional[int],
*,
settings: "Settings",
printer: Union["PrinterTerm", "PrinterJupyter"],
) -> None:
if settings.silent:
return
status = "(success)." if not exit_code else f"(failed {exit_code})."
info = [
f"Waiting for W&B process to finish... {printer.status(status, bool(exit_code))}"
]
if not settings._offline and exit_code:
info.append(f"Press {printer.abort()} to abort syncing.")
printer.display(f'{" ".join(info)}')
# fixme: Temporary hack until we move to rich which allows multiple spinners
@staticmethod
def _footer_file_pusher_status_info(
poll_exit_responses: Optional[
Union[PollExitResponse, Dict[str, Optional[PollExitResponse]]]
] = None,
*,
printer: Union["PrinterTerm", "PrinterJupyter"],
) -> None:
if not poll_exit_responses:
return
if isinstance(poll_exit_responses, PollExitResponse):
Run._footer_single_run_file_pusher_status_info(
poll_exit_responses, printer=printer
)
elif isinstance(poll_exit_responses, dict):
poll_exit_responses_list = [
response for response in poll_exit_responses.values()
]
assert all(
response is None or isinstance(response, PollExitResponse)
for response in poll_exit_responses_list
)
if len(poll_exit_responses_list) == 0:
return
elif len(poll_exit_responses_list) == 1:
Run._footer_single_run_file_pusher_status_info(
poll_exit_responses_list[0], printer=printer
)
else:
Run._footer_multiple_runs_file_pusher_status_info(
poll_exit_responses_list, printer=printer
)
else:
raise ValueError(
f"Got the type `{type(poll_exit_responses)}` for `poll_exit_responses`. "
"Expected either None, PollExitResponse or a Dict[str, Union[PollExitResponse, None]]"
)
@staticmethod
def _footer_single_run_file_pusher_status_info(
poll_exit_response: Optional[PollExitResponse] = None,
*,
printer: Union["PrinterTerm", "PrinterJupyter"],
) -> None:
# todo: is this same as settings._offline?
if not poll_exit_response:
return
progress = poll_exit_response.pusher_stats
done = poll_exit_response.done
megabyte = wandb.util.POW_2_BYTES[2][1]
line = (
f"{progress.uploaded_bytes / megabyte :.3f} MB of {progress.total_bytes / megabyte:.3f} MB uploaded "
f"({progress.deduped_bytes / megabyte:.3f} MB deduped)\r"
)
percent_done = (
1.0
if progress.total_bytes == 0
else progress.uploaded_bytes / progress.total_bytes
)
printer.progress_update(line, percent_done)
if done:
printer.progress_close()
dedupe_fraction = (
progress.deduped_bytes / float(progress.total_bytes)
if progress.total_bytes > 0
else 0
)
if dedupe_fraction > 0.01:
printer.display(
f"W&B sync reduced upload amount by {dedupe_fraction * 100:.1f}% "
)
@staticmethod
def _footer_multiple_runs_file_pusher_status_info(
poll_exit_responses: List[Optional[PollExitResponse]],
*,
printer: Union["PrinterTerm", "PrinterJupyter"],
) -> None:
# todo: is this same as settings._offline?
if not all(poll_exit_responses):
return
megabyte = wandb.util.POW_2_BYTES[2][1]
total_files = sum(
sum(
[
response.file_counts.wandb_count,
response.file_counts.media_count,
response.file_counts.artifact_count,
response.file_counts.other_count,
]
)
for response in poll_exit_responses
if response and response.file_counts
)
uploaded = sum(
response.pusher_stats.uploaded_bytes
for response in poll_exit_responses
if response and response.pusher_stats
)
total = sum(
response.pusher_stats.total_bytes
for response in poll_exit_responses
if response and response.pusher_stats
)
line = f"Processing {len(poll_exit_responses)} runs with {total_files} files ({uploaded/megabyte :.2f} MB/{total/megabyte :.2f} MB)\r"
# line = "{}{:<{max_len}}\r".format(line, " ", max_len=(80 - len(line)))
printer.progress_update(line) # type: ignore [call-arg]
done = all(
[
poll_exit_response.done
for poll_exit_response in poll_exit_responses
if poll_exit_response
]
)
if done:
printer.progress_close()
@staticmethod
def _footer_sync_info(
pool_exit_response: Optional[PollExitResponse] = None,
quiet: Optional[bool] = None,
*,
settings: "Settings",
printer: Union["PrinterTerm", "PrinterJupyter"],
) -> None:
if settings.silent:
return
# printer = printer or get_printer(settings._jupyter)
if settings._offline:
printer.display(
[
"You can sync this run to the cloud by running:",
printer.code(f"wandb sync {settings.sync_dir}"),
],
off=(quiet or settings.quiet),
)
else:
info = []
if settings.run_name and settings.run_url:
info = [
f"Synced {printer.name(settings.run_name)}: {printer.link(settings.run_url)}"
]
if pool_exit_response and pool_exit_response.file_counts:
logger.info("logging synced files")
file_counts = pool_exit_response.file_counts
info.append(
f"Synced {file_counts.wandb_count} W&B file(s), {file_counts.media_count} media file(s), "
f"{file_counts.artifact_count} artifact file(s) and {file_counts.other_count} other file(s)",
)
printer.display(info)
@staticmethod
def _footer_log_dir_info(
quiet: Optional[bool] = None,
*,
settings: "Settings",
printer: Union["PrinterTerm", "PrinterJupyter"],
) -> None:
if (quiet or settings.quiet) or settings.silent:
return
log_dir = settings.log_user or settings.log_internal
if log_dir:
log_dir = os.path.dirname(log_dir.replace(os.getcwd(), "."))
printer.display(
f"Find logs at: {printer.files(log_dir)}",
)
@staticmethod
def _footer_history_summary_info(
history: Optional["SampledHistoryResponse"] = None,
summary: Optional["GetSummaryResponse"] = None,
quiet: Optional[bool] = None,
*,
settings: "Settings",
printer: Union["PrinterTerm", "PrinterJupyter"],
) -> None:
if (quiet or settings.quiet) or settings.silent:
return
# printer = printer or get_printer(settings._jupyter)
panel = []
# Render history if available
if history:
logger.info("rendering history")
sampled_history = {
item.key: wandb.util.downsample(
item.values_float or item.values_int, 40
)
for item in history.item
if not item.key.startswith("_")
}
history_rows = []
for key, values in sorted(sampled_history.items()):
if any(not isinstance(value, numbers.Number) for value in values):
continue
sparkline = printer.sparklines(values)
if sparkline:
history_rows.append([key, sparkline])
if history_rows:
history_grid = printer.grid(
history_rows,
"Run history:",
)
panel.append(history_grid)
# Render summary if available
if summary:
final_summary = {
item.key: json.loads(item.value_json)
for item in summary.item
if not item.key.startswith("_")
}
logger.info("rendering summary")
summary_rows = []
for key, value in sorted(final_summary.items()):
# arrays etc. might be too large. for now, we just don't print them
if isinstance(value, str):
value = value[:20] + "..." * (len(value) >= 20)
summary_rows.append([key, value])
elif isinstance(value, numbers.Number):
value = round(value, 5) if isinstance(value, float) else value
summary_rows.append([key, str(value)])
else:
continue
if summary_rows:
summary_grid = printer.grid(
summary_rows,
"Run summary:",
)
panel.append(summary_grid)
if panel:
printer.display(printer.panel(panel))
@staticmethod
def _footer_local_warn(
poll_exit_response: Optional[PollExitResponse] = None,
quiet: Optional[bool] = None,
*,
settings: "Settings",
printer: Union["PrinterTerm", "PrinterJupyter"],
) -> None:
if (quiet or settings.quiet) or settings.silent:
return
if settings._offline:
return
if not poll_exit_response or not poll_exit_response.local_info:
return
if settings.is_local:
local_info = poll_exit_response.local_info
latest_version, out_of_date = local_info.version, local_info.out_of_date
if out_of_date:
# printer = printer or get_printer(settings._jupyter)
printer.display(
f"Upgrade to the {latest_version} version of W&B Local to get the latest features. "
f"Learn more: {printer.link(wburls.get('upgrade_local'))}",
level="warn",
)
@staticmethod
def _footer_server_messages(
poll_exit_response: Optional[PollExitResponse] = None,
quiet: Optional[bool] = None,
*,
settings: "Settings",
printer: Union["PrinterTerm", "PrinterJupyter"],
) -> None:
if (quiet or settings.quiet) or settings.silent:
return
if settings.disable_hints:
return
if poll_exit_response and poll_exit_response.server_messages:
for message in poll_exit_response.server_messages.item:
printer.display(
message.html_text if printer._html else message.utf_text,
default_text=message.plain_text,
level=message.level,
off=message.type.lower() != "footer",
)
@staticmethod
def _footer_version_check_info(
check_version: Optional["CheckVersionResponse"] = None,
quiet: Optional[bool] = None,
*,
settings: "Settings",
printer: Union["PrinterTerm", "PrinterJupyter"],
) -> None:
if not check_version:
return
if settings._offline:
return
if (quiet or settings.quiet) or settings.silent:
return
# printer = printer or get_printer(settings._jupyter)
if check_version.delete_message:
printer.display(check_version.delete_message, level="error")
elif check_version.yank_message:
printer.display(check_version.yank_message, level="warn")
# only display upgrade message if packages are bad
package_problem = check_version.delete_message or check_version.yank_message
if package_problem and check_version.upgrade_message:
printer.display(check_version.upgrade_message)
@staticmethod
def _footer_reporter_warn_err(
reporter: Optional[Reporter] = None,
quiet: Optional[bool] = None,
*,
settings: "Settings",
printer: Union["PrinterTerm", "PrinterJupyter"],
) -> None:
if (quiet or settings.quiet) or settings.silent:
return
if not reporter:
return
# printer = printer or get_printer(settings._jupyter)
warning_lines = reporter.warning_lines
if warning_lines:
warnings = ["Warnings:"] + [f"{line}" for line in warning_lines]
if len(warning_lines) < reporter.warning_count:
warnings.append("More warnings...")
printer.display(warnings)
error_lines = reporter.error_lines
if error_lines:
errors = ["Errors:"] + [f"{line}" for line in error_lines]
if len(error_lines) < reporter.error_count:
errors.append("More errors...")
printer.display(errors)
# We define this outside of the run context to support restoring before init
def restore(
name: str,
run_path: Optional[str] = None,
replace: bool = False,
root: Optional[str] = None,
) -> Union[None, TextIO]:
"""Downloads the specified file from cloud storage.
File is placed into the current directory or run directory.
By default, will only download the file if it doesn't already exist.
Arguments:
name: the name of the file
run_path: optional path to a run to pull files from, i.e. `username/project_name/run_id`
if wandb.init has not been called, this is required.
replace: whether to download the file even if it already exists locally
root: the directory to download the file to. Defaults to the current
directory or the run directory if wandb.init was called.
Returns:
None if it can't find the file, otherwise a file object open for reading
Raises:
wandb.CommError: if we can't connect to the wandb backend
ValueError: if the file is not found or can't find run_path
"""
is_disabled = wandb.run is not None and wandb.run.disabled
run = None if is_disabled else wandb.run
if run_path is None:
if run is not None:
run_path = run.path
else:
raise ValueError(
"run_path required when calling wandb.restore before wandb.init"
)
if root is None:
if run is not None:
root = run.dir
api = public.Api()
api_run = api.run(run_path)
if root is None:
root = os.getcwd()
path = os.path.join(root, name)
if os.path.exists(path) and replace is False:
return open(path)
if is_disabled:
return None
files = api_run.files([name])
if len(files) == 0:
return None
# if the file does not exist, the file has an md5 of 0
if files[0].md5 == "0":
raise ValueError(f"File {name} not found in {run_path or root}.")
return files[0].download(root=root, replace=True)
# propagate our doc string to the runs restore method
try:
Run.restore.__doc__ = restore.__doc__
except AttributeError:
pass
def finish(exit_code: int = None, quiet: bool = None) -> None:
"""Marks a run as finished, and finishes uploading all data.
This is used when creating multiple runs in the same process.
We automatically call this method when your script exits.
Arguments:
exit_code: Set to something other than 0 to mark a run as failed
quiet: Set to true to minimize log output
"""
if wandb.run:
wandb.run.finish(exit_code=exit_code, quiet=quiet)
class _LazyArtifact(ArtifactInterface):
_api: PublicApi
_instance: Optional[ArtifactInterface] = None
_future: Any
def __init__(self, api: PublicApi, future: Any):
self._api = api
self._future = future
def _assert_instance(self) -> ArtifactInterface:
if not self._instance:
raise ValueError(
"Must call wait() before accessing logged artifact properties"
)
return self._instance
def __getattr__(self, item: str) -> Any:
self._assert_instance()
return getattr(self._instance, item)
def wait(self) -> ArtifactInterface:
if not self._instance:
resp = self._future.get().response.log_artifact_response
if resp.error_message:
raise ValueError(resp.error_message)
self._instance = public.Artifact.from_id(resp.artifact_id, self._api.client)
assert isinstance(
self._instance, ArtifactInterface
), "Insufficient permissions to fetch Artifact with id {} from {}".format(
resp.artifact_id, self._api.client.app_url
)
return self._instance
@property
def id(self) -> Optional[str]:
return self._assert_instance().id
@property
def version(self) -> str:
return self._assert_instance().version
@property
def name(self) -> str:
return self._assert_instance().name
@property
def type(self) -> str:
return self._assert_instance().type
@property
def entity(self) -> str:
return self._assert_instance().entity
@property
def project(self) -> str:
return self._assert_instance().project
@property
def manifest(self) -> "ArtifactManifest":
return self._assert_instance().manifest
@property
def digest(self) -> str:
return self._assert_instance().digest
@property
def state(self) -> str:
return self._assert_instance().state
@property
def size(self) -> int:
return self._assert_instance().size
@property
def commit_hash(self) -> str:
return self._assert_instance().commit_hash
@property
def description(self) -> Optional[str]:
return self._assert_instance().description
@description.setter
def description(self, desc: Optional[str]) -> None:
self._assert_instance().description = desc
@property
def metadata(self) -> dict:
return self._assert_instance().metadata
@metadata.setter
def metadata(self, metadata: dict) -> None:
self._assert_instance().metadata = metadata
@property
def aliases(self) -> List[str]:
return self._assert_instance().aliases
@aliases.setter
def aliases(self, aliases: List[str]) -> None:
self._assert_instance().aliases = aliases
def used_by(self) -> List["wandb.apis.public.Run"]:
return self._assert_instance().used_by()
def logged_by(self) -> "wandb.apis.public.Run":
return self._assert_instance().logged_by()
# Commenting this block out since this code is unreachable since LocalArtifact
# overrides them and therefore untestable.
# Leaving behind as we may want to support these in the future.
# def new_file(self, name: str, mode: str = "w") -> Any: # TODO: Refine Type
# return self._assert_instance().new_file(name, mode)
# def add_file(
# self,
# local_path: str,
# name: Optional[str] = None,
# is_tmp: Optional[bool] = False,
# ) -> Any: # TODO: Refine Type
# return self._assert_instance().add_file(local_path, name, is_tmp)
# def add_dir(self, local_path: str, name: Optional[str] = None) -> None:
# return self._assert_instance().add_dir(local_path, name)
# def add_reference(
# self,
# uri: Union["ArtifactEntry", str],
# name: Optional[str] = None,
# checksum: bool = True,
# max_objects: Optional[int] = None,
# ) -> Any: # TODO: Refine Type
# return self._assert_instance().add_reference(uri, name, checksum, max_objects)
# def add(self, obj: "WBValue", name: str) -> Any: # TODO: Refine Type
# return self._assert_instance().add(obj, name)
def get_path(self, name: str) -> "ArtifactEntry":
return self._assert_instance().get_path(name)
def get(self, name: str) -> "WBValue":
return self._assert_instance().get(name)
def download(self, root: Optional[str] = None, recursive: bool = False) -> str:
return self._assert_instance().download(root, recursive)
def checkout(self, root: Optional[str] = None) -> str:
return self._assert_instance().checkout(root)
def verify(self, root: Optional[str] = None) -> Any:
return self._assert_instance().verify(root)
def save(self) -> None:
return self._assert_instance().save()
def delete(self) -> None:
return self._assert_instance().delete()
| {
"content_hash": "dc2ce3a8141fc6dd39be67c6bb4600b4",
"timestamp": "",
"source": "github",
"line_count": 3694,
"max_line_length": 151,
"avg_line_length": 37.36897671900379,
"alnum_prop": 0.5647525010685231,
"repo_name": "wandb/client",
"id": "c2678ffc9678a88e5f793d1194ce0393258f5d66",
"size": "138041",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "wandb/sdk/wandb_run.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "4902"
},
{
"name": "Dockerfile",
"bytes": "3491"
},
{
"name": "Jupyter Notebook",
"bytes": "7751"
},
{
"name": "Makefile",
"bytes": "1863"
},
{
"name": "Objective-C",
"bytes": "80764"
},
{
"name": "Python",
"bytes": "3634228"
},
{
"name": "Shell",
"bytes": "4662"
}
],
"symlink_target": ""
} |
import argparse
from util.analyser import Analyser
def parse_args():
parser = argparse.ArgumentParser(description="Analyse your Benchmark log")
parser.add_argument(
"path_to_log",
help="Path to File containing the logs of the benchmark."
)
return vars(parser.parse_args())
def main():
""" Main function """
args = parse_args()
path_to_log = args["path_to_log"]
analyser = Analyser(path_to_log)
analyser.print_stats()
main()
| {
"content_hash": "28769a087d0fb9dc0f353b87d5aa9568",
"timestamp": "",
"source": "github",
"line_count": 19,
"max_line_length": 78,
"avg_line_length": 25.210526315789473,
"alnum_prop": 0.6576200417536534,
"repo_name": "Osslack/HANA_SSBM",
"id": "f86e56615ab69dee13516f083e62622dea3e8b90",
"size": "502",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/analyser/analyse.py",
"mode": "33261",
"license": "bsd-2-clause",
"language": [
{
"name": "JavaScript",
"bytes": "2306"
},
{
"name": "Jupyter Notebook",
"bytes": "1178283"
},
{
"name": "Python",
"bytes": "13679"
},
{
"name": "Shell",
"bytes": "10891"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import, unicode_literals
from chatterbox import events, registry
class UserSavesSomemodelEvent(events.ChatterboxMailEvent):
originator = 'demoapp'
event = 'User saves somemodel'
template_subject = 'demoapp/email_subject.jinja'
template_body = 'demoapp/email_body.jinja'
mail_to = '[email protected]'
languages = ['en', 'de']
token_fields = (
'actor.username',
'obj.title',
'obj.body',
'obj.aprop'
)
# TODO(sthzg) Support metadata fields.
# metadata_fields = (
# 'actor.pk',
# 'obj.pk',
# )
registry.register(UserSavesSomemodelEvent)
| {
"content_hash": "e72e35aaa34244e408ed01d3831a38c9",
"timestamp": "",
"source": "github",
"line_count": 25,
"max_line_length": 58,
"avg_line_length": 26.36,
"alnum_prop": 0.6342943854324734,
"repo_name": "sthzg/django-chatterbox",
"id": "07fc5cec2971e5f0095dd9a8a6a7ac7c255395de",
"size": "683",
"binary": false,
"copies": "1",
"ref": "refs/heads/active_development",
"path": "django/djangoapp/demoapp/chatter.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "702"
},
{
"name": "Python",
"bytes": "57339"
}
],
"symlink_target": ""
} |
"""Sets environment variables needed to run a chromium unit test."""
import os
import subprocess
import sys
# This is hardcoded to be src/ relative to this script.
ROOT_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
def fix_python_path(cmd):
"""Returns the fixed command line to call the right python executable."""
out = cmd[:]
if out[0] == 'python':
out[0] = sys.executable
elif out[0].endswith('.py'):
out.insert(0, sys.executable)
return out
def run_executable(cmd, env):
"""Runs an executable with:
- environment variable CR_SOURCE_ROOT set to the root directory.
- environment variable LANGUAGE to en_US.UTF-8.
- Reuses sys.executable automatically.
"""
# Many tests assume a English interface...
env['LANGUAGE'] = 'en_US.UTF-8'
# Used by base/base_paths_linux.cc
env['CR_SOURCE_ROOT'] = os.path.abspath(ROOT_DIR).encode('utf-8')
# Ensure paths are correctly separated on windows.
cmd[0] = cmd[0].replace('/', os.path.sep)
cmd = fix_python_path(cmd)
try:
return subprocess.call(cmd, env=env)
except OSError:
print >> sys.stderr, 'Failed to start %s' % cmd
raise
def main():
return run_executable(sys.argv[1:], os.environ.copy())
if __name__ == "__main__":
sys.exit(main())
| {
"content_hash": "fa7bff14e60457d7c8b42b43728be547",
"timestamp": "",
"source": "github",
"line_count": 46,
"max_line_length": 75,
"avg_line_length": 27.73913043478261,
"alnum_prop": 0.6700626959247649,
"repo_name": "wubenqi/zutils",
"id": "bbbcac529d1d7594213faa25304fefde9771adb2",
"size": "1465",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "zutils/testing/test_env.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "581505"
},
{
"name": "C++",
"bytes": "10104356"
},
{
"name": "CMake",
"bytes": "44889"
},
{
"name": "CSS",
"bytes": "1905"
},
{
"name": "Groff",
"bytes": "5275"
},
{
"name": "HTML",
"bytes": "344166"
},
{
"name": "Lua",
"bytes": "3503"
},
{
"name": "Makefile",
"bytes": "17872"
},
{
"name": "Objective-C",
"bytes": "47417"
},
{
"name": "Objective-C++",
"bytes": "203586"
},
{
"name": "Python",
"bytes": "536742"
},
{
"name": "Shell",
"bytes": "26191"
},
{
"name": "nesC",
"bytes": "15206"
}
],
"symlink_target": ""
} |
"""Exception superclass for all HipPy exceptions."""
class Error(Exception):
"""Exception superclass for all HipPy exceptions."""
pass
| {
"content_hash": "966ed597182c1635364033c3a90c68bc",
"timestamp": "",
"source": "github",
"line_count": 8,
"max_line_length": 56,
"avg_line_length": 18.375,
"alnum_prop": 0.7006802721088435,
"repo_name": "Sean1708/HipPy",
"id": "a7f5b72a8a950be27d67f8cba48dcf926a3373f1",
"size": "147",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "hippy/error.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "34794"
}
],
"symlink_target": ""
} |
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "tryit.settings.production")
application = get_wsgi_application()
| {
"content_hash": "19e1e3c9614222e4df1386c3db06da87",
"timestamp": "",
"source": "github",
"line_count": 7,
"max_line_length": 76,
"avg_line_length": 25.285714285714285,
"alnum_prop": 0.7966101694915254,
"repo_name": "lexotero/try-it",
"id": "749585fee5eaabfc4c984d8d92d61f07ed7e97a0",
"size": "177",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tryit/wsgi.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "18485"
}
],
"symlink_target": ""
} |
from pyvisdk.base.managed_object_types import ManagedObjectTypes
from pyvisdk.base.base_entity import BaseEntity
import logging
########################################
# Automatically generated, do not edit.
########################################
log = logging.getLogger(__name__)
class HostHealthStatusSystem(BaseEntity):
'''This managed object manages the health state of the host.'''
def __init__(self, core, name=None, ref=None, type=ManagedObjectTypes.HostHealthStatusSystem):
super(HostHealthStatusSystem, self).__init__(core, name=name, ref=ref, type=type)
@property
def runtime(self):
''''''
return self.update('runtime')
def RefreshHealthStatusSystem(self):
'''Refresh the available runtime hardware health information.
'''
return self.delegate("RefreshHealthStatusSystem")()
def ResetSystemHealthInfo(self):
'''Resets the state of the sensors of the IPMI subsystem. On certain types of
hardware IPMI sensor states latch onto unhealthy states and will stay in an
unhealth state until the sensor state is reset. This method will explicitly
reset the sensors state.
'''
return self.delegate("ResetSystemHealthInfo")() | {
"content_hash": "a33b287d020de7b7c8f3b9c0c8e71e63",
"timestamp": "",
"source": "github",
"line_count": 40,
"max_line_length": 98,
"avg_line_length": 32.325,
"alnum_prop": 0.6403712296983759,
"repo_name": "xuru/pyvisdk",
"id": "f06edad0ab19abf286a5b626d54dbb3aff0de1c8",
"size": "1294",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pyvisdk/mo/host_health_status_system.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "369"
},
{
"name": "Python",
"bytes": "3037849"
},
{
"name": "Shell",
"bytes": "4517"
}
],
"symlink_target": ""
} |
import os
from setuptools import find_packages, setup
with open(os.path.join(os.path.dirname(__file__), 'README.rst')) as readme:
README = readme.read()
# allow setup.py to be run from any path
os.chdir(os.path.normpath(os.path.join(os.path.abspath(__file__), os.pardir)))
setup(
name='django-maslow',
version='0.6.2',
packages=find_packages(),
include_package_data=True,
license='MIT License', # example license
description='A simple Django base app.',
long_description=README,
url='http://www.obitec.co.za/',
author='JR Minnaar',
author_email='[email protected]',
classifiers=[
'Environment :: Web Environment',
'Framework :: Django',
'Framework :: Django :: 1.9', # replace "X.Y" as appropriate
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License', # example license
'Operating System :: OS Independent',
'Programming Language :: Python',
# Replace these appropriately if you are stuck on Python 2.
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Topic :: Internet :: WWW/HTTP',
'Topic :: Internet :: WWW/HTTP :: Dynamic Content',
],
)
| {
"content_hash": "fdc886e3c114219045f875a947e6cb9d",
"timestamp": "",
"source": "github",
"line_count": 37,
"max_line_length": 78,
"avg_line_length": 35.189189189189186,
"alnum_prop": 0.6221198156682027,
"repo_name": "obitec/django-maslow",
"id": "8366e84057824589f2bb6e4f1bd805240e8e05fc",
"size": "1302",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "95"
},
{
"name": "Python",
"bytes": "23864"
}
],
"symlink_target": ""
} |
from enum import IntEnum
import json
from multiprocessing import Pool
import pathlib
import numpy as np
import pandas as pd
BRANCH_ARTIFACTS_DIR = (
pathlib.Path(__file__).parent.resolve()
/ "googleapiclient"
/ "discovery_cache"
/ "documents"
)
MAIN_ARTIFACTS_DIR = (
pathlib.Path(__file__).parent.resolve()
/ ".."
/ "main"
/ "googleapiclient"
/ "discovery_cache"
/ "documents"
)
MULTIPROCESSING_NUM_PER_BATCH = 5
MULTIPROCESSING_NUM_AGENTS = 10
class ChangeType(IntEnum):
UNKNOWN = 0
DELETED = 1
ADDED = 2
CHANGED = 3
class DirectoryDoesNotExist(ValueError):
"""Raised when the specified directory does not exist."""
pass
class ChangeSummary:
"""Represents the change summary between 2 directories containing \
artifacts.
"""
def __init__(self, new_artifacts_dir, current_artifacts_dir, temp_dir, file_list):
"""Initializes an instance of a ChangeSummary.
Args:
new_artifacts_dir (str): The relative path to the directory with the
new discovery artifacts.
current_artifacts_dir (str): The relative path to the directory with
the current discovery artifacts.
temp_dir (str): The relative path to the directory used for
temporary storage where intermediate files will be stored.
file_list (list): A list of strings containing files to analyze.
"""
self._file_list = file_list
self._new_artifacts_dir = pathlib.Path(new_artifacts_dir)
self._current_artifacts_dir = pathlib.Path(current_artifacts_dir)
self._temp_dir = pathlib.Path(temp_dir)
# Sanity checks to ensure directories exist
self._raise_if_directory_not_found(self._new_artifacts_dir)
self._raise_if_directory_not_found(self._current_artifacts_dir)
self._raise_if_directory_not_found(self._temp_dir)
def _raise_if_directory_not_found(self, directory):
"""Raises if the `directory` doesn't exist
args:
directory (str): The relative path to the `directory`
"""
if not pathlib.Path(directory).exists():
raise DirectoryDoesNotExist(
"Directory does not exist : {0}".format(directory)
)
def _load_json_to_dataframe(self, file_path):
"""Returns a pandas dataframe from the json file provided.
args:
file_path (str): The relative path to the discovery artifact to
parse.
"""
# Create an empty dataframe as we will need to return it if the file
# doesn't exist
dataframe_doc = pd.DataFrame()
if pathlib.Path(file_path).is_file():
with open(file_path, "r") as f:
# Now load the json file into a pandas dataframe as a flat table
dataframe_doc = pd.json_normalize(json.load(f))
return dataframe_doc
def _get_discovery_differences(self, filename):
"""Returns a pandas dataframe which contains the differences with the
current and new discovery artifact directories, corresponding to the
file name provided.
args:
filename (str): The name of the discovery artifact to parse.
"""
# The paths of the 2 discovery artifacts to compare
current_artifact_path = self._current_artifacts_dir / filename
new_artifact_path = self._new_artifacts_dir / filename
# Use a helper functions to load the discovery artifacts into pandas
# dataframes
current_doc = self._load_json_to_dataframe(current_artifact_path)
new_doc = self._load_json_to_dataframe(new_artifact_path)
# Concatenate the 2 dataframes, transpose them, and create
# a new dataframe called combined_docs with columns
# `Key`, `CurrentValue`, `NewValue`.
combined_docs = (
pd.concat([current_doc, new_doc], keys=["CurrentValue", "NewValue"])
# Drop the index column
.reset_index(drop=True, level=1)
# Transpose the DataFrame, Resulting Columns should be
# ["Key", "CurrentValue", "New Value"]
.rename_axis(["Key"], axis=1).transpose()
# Drop the index column
.reset_index()
)
# When discovery documents are added, the column `CurrentValue` will
# not exist. In that case, we'll just populate with `np.nan`.
if "CurrentValue" not in combined_docs.columns:
combined_docs["CurrentValue"] = np.nan
# When discovery documents are deleted, the column `NewValue` will
# not exist. In that case, we'll just populate with `np.nan`.
if "NewValue" not in combined_docs.columns:
combined_docs["NewValue"] = np.nan
# Split the Key into 2 columns for `Parent` and `Child` in order
# to group keys with the same parents together to summarize the changes
# by parent.
parent_child_df = combined_docs["Key"].str.rsplit(".", 1, expand=True)
# Rename the columns and join them with the combined_docs dataframe.
# If we only have a `Parent` column, it means that the Key doesn't have
# any children.
if len(parent_child_df.columns) == 1:
parent_child_df.columns = ["Parent"]
else:
parent_child_df.columns = ["Parent", "Child"]
combined_docs = combined_docs.join(parent_child_df)
# Create a new column `Added` to identify rows which have new keys.
combined_docs["Added"] = np.where(
combined_docs["CurrentValue"].isnull(), True, False
)
# Create a new column `Deleted` to identify rows which have deleted keys.
combined_docs["Deleted"] = np.where(
combined_docs["NewValue"].isnull(), True, False
)
# Aggregate the keys added by grouping keys with the same parents
# together to summarize the changes by parent rather than by key.
parent_added_agg = (
combined_docs.groupby("Parent")
.Added.value_counts(normalize=True)
.reset_index(name="Proportion")
)
# Add a column NumLevels to inicate the number of levels in the tree
# which will allow us to sort the parents in hierarchical order.
parent_added_agg["NumLevels"] = (
parent_added_agg["Parent"].str.split(".").apply(lambda x: len(x))
)
# Aggregate the keys deleted by grouping keys with the same parents
# together to summarize the changes by parent rather than by key.
parent_deleted_agg = (
combined_docs.groupby("Parent")
.Deleted.value_counts(normalize=True)
.reset_index(name="Proportion")
)
# Add a column NumLevels to inicate the number of levels in the tree
# which will allow us to sort the parents in hierarchical order.
parent_deleted_agg["NumLevels"] = (
parent_added_agg["Parent"].str.split(".").apply(lambda x: len(x))
)
# Create a list of all parents that have been added in hierarchical
# order. When `Proportion` is 1, it means that the parent is new as all
# children keys have been added.
all_added = (
parent_added_agg[
(parent_added_agg["Proportion"] == 1)
& (parent_added_agg["Added"] == True)
][["Parent", "NumLevels"]]
.sort_values("NumLevels", ascending=True)
.Parent.to_list()
)
# Create a list of all parents that have been deleted in hierarchical
# order. When `Proportion` is 1, it means that the parent is new as all
# children keys have been deleted.
all_deleted = (
parent_deleted_agg[
(parent_deleted_agg["Proportion"] == 1)
& (parent_deleted_agg["Deleted"] == True)
][["Parent", "NumLevels"]]
.sort_values("NumLevels", ascending=True)
.Parent.to_list()
)
# Go through the list of parents that have been added. If we find any
# keys with parents which are a substring of the parent in this list,
# then it means that the entire parent is new. We don't need verbose
# information about the children, so we replace the parent.
for i in range(0, len(all_added)):
word = all_added[i]
combined_docs.Parent = np.where(
combined_docs["Parent"].str.startswith(word), word, combined_docs.Parent
)
# Go through the list of parents that have been deleted. If we find any
# keys with parents which are a substring of the parent in this list,
# then it means that the entire parent is deleted. We don't need verbose
# information about the children, so we replace the parent.
for i in range(0, len(all_deleted)):
word = all_deleted[i]
combined_docs.Parent = np.where(
combined_docs["Parent"].str.startswith(word), word, combined_docs.Parent
)
# Create a new dataframe with only the keys which have changed
docs_diff = combined_docs[
combined_docs["CurrentValue"] != combined_docs["NewValue"]
].copy(deep=False)
# Get the API and Version from the file name but exclude the extension.
api_version_string = filename.split(".")[:-1]
# Create columns `Name` and `Version` using the version string
docs_diff["Name"] = api_version_string[0]
docs_diff["Version"] = ".".join(api_version_string[1:])
# These conditions are used as arguments in the `np.where` function
# below.
deleted_condition = docs_diff["NewValue"].isnull()
added_condition = docs_diff["CurrentValue"].isnull()
# Create a new `ChangeType` column. The `np.where()` function is like a
# tenary operator. When the `deleted_condition` is `True`, the
# `ChangeType` will be `ChangeType.Deleted`. If the added_condition is
# `True` the `ChangeType` will be `ChangeType.Added`, otherwise the
# `ChangeType` will be `ChangeType.Changed`.
docs_diff["ChangeType"] = np.where(
deleted_condition,
ChangeType.DELETED,
np.where(added_condition, ChangeType.ADDED, ChangeType.CHANGED),
)
# Filter out keys which rarely affect functionality. For example:
# {"description", "documentation", "enum", "etag", "revision", "title",
# "url", "rootUrl"}
docs_diff = docs_diff[
~docs_diff["Key"].str.contains(
"|".join(self._get_keys_to_ignore()), case=False
)
]
# Group keys with similar parents together and create a new column
# called 'Count' which indicates the number of keys that have been
# grouped together. The reason for the count column is that when keys
# have the same parent, we group them together to improve readability.
docs_diff_with_count = (
docs_diff.groupby(
["Parent", "Added", "Deleted", "Name", "Version", "ChangeType"]
)
.size()
.reset_index(name="Count")
)
# Add counts column
docs_diff = docs_diff.merge(docs_diff_with_count)
# When the count is greater than 1, update the key with the name of the
# parent since we are consolidating keys with the same parent.
docs_diff.loc[docs_diff["Count"] > 1, "Key"] = docs_diff["Parent"]
return docs_diff[
["Key", "Added", "Deleted", "Name", "Version", "ChangeType", "Count"]
].drop_duplicates()
def _build_summary_message(self, api_name, is_feature):
"""Returns a string containing the summary for a given api. The string
returned will be in the format `fix(<api_name>): update the API`
when `is_feature=False` and `feat(<api_name>)!: update the API`
when `is_feature=True`.
args:
api_name (str): The name of the api to include in the summary.
is_feature (bool): If True, include the prefix `feat` otherwise use
`fix`
"""
# Build the conventional commit string based on the arguments provided
commit_type = "feat" if is_feature else "fix"
return "{0}({1}): update the api".format(commit_type, api_name)
def _get_keys_to_ignore(self):
"""Returns a list of strings with keys to ignore because they rarely
affect functionality.
args: None
"""
keys_to_ignore = [
"description",
"documentation",
"enum",
"etag",
"revision",
"title",
"url",
"rootUrl",
]
return keys_to_ignore
def _get_stable_versions(self, versions):
"""Returns a pandas series `pd.Series()` of boolean values,
corresponding to the given series, indicating whether the version is
considered stable or not.
args:
versions (object): a pandas series containing version
information for all discovery artifacts.
"""
# Use a regex on the version to find versions with the pattern
# <v>.<0-9>.<0-9>.<0-9> . Any api that matches this pattern will be
# labeled as stable. In other words, v1, v1.4 and v1.4.5 is stable
# but v1b1 v1aplha and v1beta1 is not stable.
return versions.str.extract(r"(v\d?\.?\d?\.?\d+$)").notnull()
def _get_summary_and_write_to_disk(self, dataframe, directory):
"""Writes summary information to file about changes made to discovery
artifacts based on the provided dataframe and returns a dataframe
with the same. The file `'allapis.dataframe'` is saved to the current
working directory.
args:
dataframe (object): a pandas dataframe containing summary change
information for all discovery artifacts
directory (str): path where the summary file should be saved
"""
dataframe["IsStable"] = self._get_stable_versions(dataframe["Version"])
# Create a filter for features, which contains only rows which have keys
# that have been deleted or added, that will be used as an argument in
# the `np.where()` call below.
filter_features = (dataframe["ChangeType"] == ChangeType.DELETED) | (
dataframe["ChangeType"] == ChangeType.ADDED
)
# Create a new column `IsFeature` to indicate which rows should be
# considered as features.
dataframe["IsFeature"] = np.where(filter_features, True, np.nan)
# Create a new column `IsFeatureAggregate` which will be used to
# summarize the api changes. We can either have feature or fix but not
# both.
dataframe["IsFeatureAggregate"] = dataframe.groupby("Name").IsFeature.transform(
lambda x: x.any()
)
# Create a new column `Summary`, which will contain a string with the
# conventional commit message.
dataframe["Summary"] = np.vectorize(self._build_summary_message)(
dataframe["Name"], dataframe["IsFeatureAggregate"]
)
# Write the final dataframe to disk as it will be used in the
# buildprbody.py script
dataframe.to_csv(directory / "allapis.dataframe")
return dataframe
def _write_verbose_changes_to_disk(self, dataframe, directory, summary_df):
"""Writes verbose information to file about changes made to discovery
artifacts based on the provided dataframe. A separate file is saved
for each api in the current working directory. The extension of the
files will be `'.verbose'`.
args:
dataframe (object): a pandas dataframe containing verbose change
information for all discovery artifacts
directory (str): path where the summary file should be saved
summary_df (object): A dataframe containing a summary of the changes
"""
# Array of strings which will contains verbose change information for
# each api
verbose_changes = []
# Sort the dataframe to minimize file operations below.
dataframe.sort_values(
by=["Name", "Version", "ChangeType"], ascending=True, inplace=True
)
# Select only the relevant columns. We need to create verbose output
# by Api Name, Version and ChangeType so we need to group by these
# columns.
change_type_groups = dataframe[
["Name", "Version", "ChangeType", "Key", "Count"]
].groupby(["Name", "Version", "ChangeType"])
lastApi = ""
lastVersion = ""
lastType = ChangeType.UNKNOWN
f = None
for name, group in change_type_groups:
currentApi = name[0]
currentVersion = name[1]
currentType = name[2]
# We need to handing file opening and closing when processing an API
# which is different from the previous one
if lastApi != currentApi:
# If we are processing a new api, close the file used for
# processing the previous API
if f is not None:
f.writelines(verbose_changes)
f.close()
f = None
# Clear the array of strings with information from the previous
# api and reset the last version
verbose_changes = []
lastVersion = ""
# Create a file which contains verbose changes for the current
# API being processed
filename = "{0}.verbose".format(currentApi)
f = open(pathlib.Path(directory / filename), "a")
lastApi = currentApi
# Create a filter with only the rows for the current API
current_api_filter = summary_df["Name"] == currentApi
# Get the string in the `Summary` column for the current api and
# append it to `verbose_changes`. The `Summary` column contains
# the conventional commit message. Use pandas.Series.iloc[0] to
# retrieve only the first elemnt, since all the values in the
# summary column are the same for a given API.
verbose_changes.append(summary_df[current_api_filter].Summary.iloc[0])
# If the version has changed, we need to create append a new heading
# in the verbose summary which contains the api and version.
if lastVersion != currentVersion:
# Append a header string with the API and version
verbose_changes.append(
"\n\n#### {0}:{1}\n\n".format(currentApi, currentVersion)
)
lastVersion = currentVersion
lastType = ChangeType.UNKNOWN
# Whenever the change type is different, we need to create a new
# heading for the group of keys with the same change type.
if currentType != lastType:
if currentType == ChangeType.DELETED:
verbose_changes.append("\nThe following keys were deleted:\n")
elif currentType == ChangeType.ADDED:
verbose_changes.append("\nThe following keys were added:\n")
else:
verbose_changes.append("\nThe following keys were changed:\n")
lastType = currentType
# Append the keys, and corresponding count, in the same change
# type group.
verbose_changes.extend(
[
"- {0} (Total Keys: {1})\n".format(row["Key"], row["Count"])
for index, row in group[["Key", "Count"]].iterrows()
]
)
# Make sure to close the last file and write the changes.
if f is not None:
f.writelines(verbose_changes)
f.close()
f = None
def detect_discovery_changes(self):
"""Writes a summary of the changes to the discovery artifacts to disk
at the path specified in `temp_dir`.
args: None
"""
result = pd.DataFrame()
# Process files in parallel to improve performance
with Pool(processes=MULTIPROCESSING_NUM_AGENTS) as pool:
if len(self._file_list):
result = pd.concat(
pool.map(
self._get_discovery_differences,
self._file_list,
MULTIPROCESSING_NUM_PER_BATCH,
)
)
if len(result):
# Sort the resulting dataframe by `Name`, `Version`, `ChangeType`
# and `Key`
sort_columns = ["Name", "Version", "ChangeType", "Key"]
result.sort_values(by=sort_columns, ascending=True, inplace=True)
# Create a folder which be used by the `createcommits.sh` and
# `buildprbody.py` scripts.
pathlib.Path(self._temp_dir).mkdir(exist_ok=True)
# Create a summary which contains a conventional commit message
# for each API and write it to disk.
summary_df = self._get_summary_and_write_to_disk(result, self._temp_dir)
# Create verbose change information for each API which contains
# a list of changes by key and write it to disk.
self._write_verbose_changes_to_disk(result, self._temp_dir, summary_df)
| {
"content_hash": "32edad068db6e17b43f1128f298b70e6",
"timestamp": "",
"source": "github",
"line_count": 524,
"max_line_length": 88,
"avg_line_length": 41.711832061068705,
"alnum_prop": 0.5945921215171341,
"repo_name": "googleapis/google-api-python-client",
"id": "2742b00b2e2484c2eb6fedf5a6387c7b2677e590",
"size": "22430",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "scripts/changesummary.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Makefile",
"bytes": "1276"
},
{
"name": "Python",
"bytes": "482401"
},
{
"name": "Shell",
"bytes": "32576"
}
],
"symlink_target": ""
} |
"""
Installs and configures nova
"""
import os
import uuid
import logging
from packstack.installer import validators
import packstack.installer.common_utils as utils
from packstack.installer.exceptions import ScriptRuntimeError
from packstack.modules.ospluginutils import NovaConfig, getManifestTemplate, appendManifestFile, manifestfiles
# Controller object will be initialized from main flow
controller = None
PLUGIN_NAME = "OS-NOVA"
logging.debug("plugin %s loaded", __name__)
def initConfig(controllerObject):
global controller
controller = controllerObject
paramsList = [
{"CMD_OPTION" : "novaapi-host",
"USAGE" : "The IP address of the server on which to install the Nova API service",
"PROMPT" : "Enter the IP address of the Nova API service",
"OPTION_LIST" : [],
"VALIDATORS" : [validators.validate_ip, validators.validate_ssh],
"DEFAULT_VALUE" : utils.getLocalhostIP(),
"MASK_INPUT" : False,
"LOOSE_VALIDATION": True,
"CONF_NAME" : "CONFIG_NOVA_API_HOST",
"USE_DEFAULT" : False,
"NEED_CONFIRM" : False,
"CONDITION" : False },
{"CMD_OPTION" : "novacert-host",
"USAGE" : "The IP address of the server on which to install the Nova Cert service",
"PROMPT" : "Enter the IP address of the Nova Cert service",
"OPTION_LIST" : [],
"VALIDATORS" : [validators.validate_ssh],
"DEFAULT_VALUE" : utils.getLocalhostIP(),
"MASK_INPUT" : False,
"LOOSE_VALIDATION": True,
"CONF_NAME" : "CONFIG_NOVA_CERT_HOST",
"USE_DEFAULT" : False,
"NEED_CONFIRM" : False,
"CONDITION" : False },
{"CMD_OPTION" : "novavncproxy-hosts",
"USAGE" : "The IP address of the server on which to install the Nova VNC proxy",
"PROMPT" : "Enter the IP address of the Nova VNC proxy",
"OPTION_LIST" : [],
"VALIDATORS" : [validators.validate_ssh],
"DEFAULT_VALUE" : utils.getLocalhostIP(),
"MASK_INPUT" : False,
"LOOSE_VALIDATION": True,
"CONF_NAME" : "CONFIG_NOVA_VNCPROXY_HOST",
"USE_DEFAULT" : False,
"NEED_CONFIRM" : False,
"CONDITION" : False },
{"CMD_OPTION" : "novacompute-hosts",
"USAGE" : "A comma separated list of IP addresses on which to install the Nova Compute services",
"PROMPT" : "Enter a comma separated list of IP addresses on which to install the Nova Compute services",
"OPTION_LIST" : [],
"VALIDATORS" : [validators.validate_multi_ssh],
"DEFAULT_VALUE" : utils.getLocalhostIP(),
"MASK_INPUT" : False,
"LOOSE_VALIDATION": True,
"CONF_NAME" : "CONFIG_NOVA_COMPUTE_HOSTS",
"USE_DEFAULT" : False,
"NEED_CONFIRM" : False,
"CONDITION" : False },
{"CMD_OPTION" : "novacompute-privif",
"USAGE" : "Private interface for Flat DHCP on the Nova compute servers",
"PROMPT" : "Enter the Private interface for Flat DHCP on the Nova compute servers",
"OPTION_LIST" : [],
"VALIDATORS" : [validators.validate_not_empty],
"DEFAULT_VALUE" : "eth1",
"MASK_INPUT" : False,
"LOOSE_VALIDATION": True,
"CONF_NAME" : "CONFIG_NOVA_COMPUTE_PRIVIF",
"USE_DEFAULT" : False,
"NEED_CONFIRM" : False,
"CONDITION" : False },
{"CMD_OPTION" : "novanetwork-host",
"USAGE" : "The IP address of the server on which to install the Nova Network service",
"PROMPT" : "Enter the IP address of the Nova Network service",
"OPTION_LIST" : [],
"VALIDATORS" : [validators.validate_ip, validators.validate_ssh],
"DEFAULT_VALUE" : utils.getLocalhostIP(),
"MASK_INPUT" : False,
"LOOSE_VALIDATION": True,
"CONF_NAME" : "CONFIG_NOVA_NETWORK_HOST",
"USE_DEFAULT" : False,
"NEED_CONFIRM" : False,
"CONDITION" : False },
{"CMD_OPTION" : "novaconductor-host",
"USAGE" : "The IP address of the server on which to install the Nova Conductor service",
"PROMPT" : "Enter the IP address of the Nova Conductor service",
"OPTION_LIST" : [],
"VALIDATORS" : [validators.validate_ip, validators.validate_ssh],
"DEFAULT_VALUE" : utils.getLocalhostIP(),
"MASK_INPUT" : False,
"LOOSE_VALIDATION": True,
"CONF_NAME" : "CONFIG_NOVA_CONDUCTOR_HOST",
"USE_DEFAULT" : False,
"NEED_CONFIRM" : False,
"CONDITION" : False },
{"CMD_OPTION" : "nova-db-passwd",
"USAGE" : "The password to use for the Nova to access DB",
"PROMPT" : "Enter the password for the Nova DB access",
"OPTION_LIST" : [],
"VALIDATORS" : [validators.validate_not_empty],
"DEFAULT_VALUE" : uuid.uuid4().hex[:16],
"MASK_INPUT" : True,
"LOOSE_VALIDATION": False,
"CONF_NAME" : "CONFIG_NOVA_DB_PW",
"USE_DEFAULT" : True,
"NEED_CONFIRM" : True,
"CONDITION" : False },
{"CMD_OPTION" : "nova-ks-passwd",
"USAGE" : "The password to use for the Nova to authenticate with Keystone",
"PROMPT" : "Enter the password for the Nova Keystone access",
"OPTION_LIST" : [],
"VALIDATORS" : [validators.validate_not_empty],
"DEFAULT_VALUE" : uuid.uuid4().hex[:16],
"MASK_INPUT" : True,
"LOOSE_VALIDATION": False,
"CONF_NAME" : "CONFIG_NOVA_KS_PW",
"USE_DEFAULT" : True,
"NEED_CONFIRM" : True,
"CONDITION" : False },
{"CMD_OPTION" : "novanetwork-pubif",
"USAGE" : "Public interface on the Nova network server",
"PROMPT" : "Enter the Public interface on the Nova network server",
"OPTION_LIST" : [],
"VALIDATORS" : [validators.validate_not_empty],
"DEFAULT_VALUE" : "eth0",
"MASK_INPUT" : False,
"LOOSE_VALIDATION": True,
"CONF_NAME" : "CONFIG_NOVA_NETWORK_PUBIF",
"USE_DEFAULT" : False,
"NEED_CONFIRM" : False,
"CONDITION" : False },
{"CMD_OPTION" : "novanetwork-privif",
"USAGE" : "Private interface for Flat DHCP on the Nova network server",
"PROMPT" : "Enter the Private interface for Flat DHCP on the Nova network server",
"OPTION_LIST" : [],
"VALIDATORS" : [validators.validate_not_empty],
"DEFAULT_VALUE" : "eth1",
"MASK_INPUT" : False,
"LOOSE_VALIDATION": True,
"CONF_NAME" : "CONFIG_NOVA_NETWORK_PRIVIF",
"USE_DEFAULT" : False,
"NEED_CONFIRM" : False,
"CONDITION" : False },
{"CMD_OPTION" : "novanetwork-fixed-range",
"USAGE" : "IP Range for Flat DHCP",
"PROMPT" : "Enter the IP Range for Flat DHCP",
"OPTION_LIST" : ["^([\d]{1,3}\.){3}[\d]{1,3}/\d\d?$"],
"VALIDATORS" : [validators.validate_regexp],
"DEFAULT_VALUE" : "192.168.32.0/22",
"MASK_INPUT" : False,
"LOOSE_VALIDATION": True,
"CONF_NAME" : "CONFIG_NOVA_NETWORK_FIXEDRANGE",
"USE_DEFAULT" : False,
"NEED_CONFIRM" : False,
"CONDITION" : False },
{"CMD_OPTION" : "novanetwork-floating-range",
"USAGE" : "IP Range for Floating IP's",
"PROMPT" : "Enter the IP Range for Floating IP's",
"OPTION_LIST" : ["^([\d]{1,3}\.){3}[\d]{1,3}/\d\d?$"],
"VALIDATORS" : [validators.validate_regexp],
"DEFAULT_VALUE" : "10.3.4.0/22",
"MASK_INPUT" : False,
"LOOSE_VALIDATION": True,
"CONF_NAME" : "CONFIG_NOVA_NETWORK_FLOATRANGE",
"USE_DEFAULT" : False,
"NEED_CONFIRM" : False,
"CONDITION" : False },
{"CMD_OPTION" : "novasched-host",
"USAGE" : "The IP address of the server on which to install the Nova Scheduler service",
"PROMPT" : "Enter the IP address of the Nova Scheduler service",
"OPTION_LIST" : [],
"VALIDATORS" : [validators.validate_ssh],
"DEFAULT_VALUE" : utils.getLocalhostIP(),
"MASK_INPUT" : False,
"LOOSE_VALIDATION": True,
"CONF_NAME" : "CONFIG_NOVA_SCHED_HOST",
"USE_DEFAULT" : False,
"NEED_CONFIRM" : False,
"CONDITION" : False },
{"CMD_OPTION" : "novasched-cpu-allocation-ratio",
"USAGE" : "The overcommitment ratio for virtual to physical CPUs. "
"Set to 1.0 to disable CPU overcommitment",
"PROMPT" : "Enter the CPU overcommitment ratio. "
"Set to 1.0 to disable CPU overcommitment",
"OPTION_LIST" : [],
"VALIDATORS" : [validators.validate_float],
"DEFAULT_VALUE" : 16.0,
"MASK_INPUT" : False,
"LOOSE_VALIDATION": True,
"CONF_NAME" : "CONFIG_NOVA_SCHED_CPU_ALLOC_RATIO",
"USE_DEFAULT" : False,
"NEED_CONFIRM" : False,
"CONDITION" : False },
{"CMD_OPTION" : "novasched-ram-allocation-ratio",
"USAGE" : "The overcommitment ratio for virtual to physical RAM. "
"Set to 1.0 to disable RAM overcommitment",
"PROMPT" : "Enter the RAM overcommitment ratio. "
"Set to 1.0 to disable RAM overcommitment",
"OPTION_LIST" : [],
"VALIDATORS" : [validators.validate_float],
"DEFAULT_VALUE" : 1.5,
"MASK_INPUT" : False,
"LOOSE_VALIDATION": True,
"CONF_NAME" : "CONFIG_NOVA_SCHED_RAM_ALLOC_RATIO",
"USE_DEFAULT" : False,
"NEED_CONFIRM" : False,
"CONDITION" : False },
]
groupDict = { "GROUP_NAME" : "NOVA",
"DESCRIPTION" : "Nova Options",
"PRE_CONDITION" : "CONFIG_NOVA_INSTALL",
"PRE_CONDITION_MATCH" : "y",
"POST_CONDITION" : False,
"POST_CONDITION_MATCH" : True}
controller.addGroup(groupDict, paramsList)
def initSequences(controller):
if controller.CONF['CONFIG_NOVA_INSTALL'] != 'y':
return
novaapisteps = [
{'title': 'Adding Nova API manifest entries', 'functions':[createapimanifest]},
{'title': 'Adding Nova Keystone manifest entries', 'functions':[createkeystonemanifest]},
{'title': 'Adding Nova Cert manifest entries', 'functions':[createcertmanifest]},
{'title': 'Adding Nova Conductor manifest entries', 'functions':[createconductormanifest]},
{'title': 'Adding Nova Compute manifest entries', 'functions':[createcomputemanifest]},
{'title': 'Adding Nova Network manifest entries', 'functions':[createnetworkmanifest]},
{'title': 'Adding Nova Scheduler manifest entries', 'functions':[createschedmanifest]},
{'title': 'Adding Nova VNC Proxy manifest entries', 'functions':[createvncproxymanifest]},
{'title': 'Adding Nova Common manifest entries', 'functions':[createcommonmanifest]},
]
controller.addSequence("Installing OpenStack Nova API", [], [], novaapisteps)
def createapimanifest():
manifestfile = "%s_api_nova.pp"%controller.CONF['CONFIG_NOVA_API_HOST']
manifestdata = getManifestTemplate("nova_api.pp")
appendManifestFile(manifestfile, manifestdata, 'novaapi')
def createkeystonemanifest():
manifestfile = "%s_keystone.pp"%controller.CONF['CONFIG_KEYSTONE_HOST']
manifestdata = getManifestTemplate("keystone_nova.pp")
appendManifestFile(manifestfile, manifestdata)
def createcertmanifest():
manifestfile = "%s_nova.pp"%controller.CONF['CONFIG_NOVA_CERT_HOST']
manifestdata = getManifestTemplate("nova_cert.pp")
appendManifestFile(manifestfile, manifestdata)
def createconductormanifest():
manifestfile = "%s_nova.pp"%controller.CONF['CONFIG_NOVA_CONDUCTOR_HOST']
manifestdata = getManifestTemplate("nova_conductor.pp")
appendManifestFile(manifestfile, manifestdata)
def check_ifcfg(host, device):
"""
Raises ScriptRuntimeError if given host does not have give device.
"""
server = utils.ScriptRunner(host)
cmd = "ip addr show dev %s || ( echo Device %s does not exist && exit 1 )"
server.append(cmd % (device, device))
server.execute()
def bring_up_ifcfg(host, device):
"""
Brings given device up if it's down. Raises ScriptRuntimeError in case
of failure.
"""
server = utils.ScriptRunner(host)
server.append('ip link show up | grep "%s"' % device)
try:
server.execute()
except ScriptRuntimeError:
server.clear()
cmd = 'ip link set dev %s up'
server.append(cmd % device)
try:
server.execute()
except ScriptRuntimeError:
msg = ('Failed to bring up network interface %s on host %s.'
' Interface should be up so Openstack can work'
' properly.' % (device, host))
raise ScriptRuntimeError(msg)
def createcomputemanifest():
for host in controller.CONF["CONFIG_NOVA_COMPUTE_HOSTS"].split(","):
controller.CONF["CONFIG_NOVA_COMPUTE_HOST"] = host
manifestdata = getManifestTemplate("nova_compute.pp")
manifestfile = "%s_nova.pp"%host
nova_config_options = NovaConfig()
if host != controller.CONF["CONFIG_NOVA_NETWORK_HOST"]:
nova_config_options.addOption("DEFAULT/flat_interface", controller.CONF['CONFIG_NOVA_COMPUTE_PRIVIF'])
check_ifcfg(host, controller.CONF['CONFIG_NOVA_COMPUTE_PRIVIF'])
try:
bring_up_ifcfg(host, controller.CONF['CONFIG_NOVA_COMPUTE_PRIVIF'])
except ScriptRuntimeError, ex:
# just warn user to do it by himself
controller.MESSAGES.append(str(ScriptRuntimeError))
appendManifestFile(manifestfile, manifestdata + "\n" + nova_config_options.getManifestEntry())
def createnetworkmanifest():
host = controller.CONF['CONFIG_NOVA_NETWORK_HOST']
for i in ('CONFIG_NOVA_NETWORK_PRIVIF', 'CONFIG_NOVA_NETWORK_PUBIF'):
check_ifcfg(host, controller.CONF[i])
try:
bring_up_ifcfg(host, controller.CONF[i])
except ScriptRuntimeError, ex:
# just warn user to do it by himself
controller.MESSAGES.append(str(ScriptRuntimeError))
manifestfile = "%s_nova.pp" % host
manifestdata = getManifestTemplate("nova_network.pp")
appendManifestFile(manifestfile, manifestdata)
def createschedmanifest():
manifestfile = "%s_nova.pp"%controller.CONF['CONFIG_NOVA_SCHED_HOST']
manifestdata = getManifestTemplate("nova_sched.pp")
appendManifestFile(manifestfile, manifestdata)
def createvncproxymanifest():
manifestfile = "%s_nova.pp"%controller.CONF['CONFIG_NOVA_VNCPROXY_HOST']
manifestdata = getManifestTemplate("nova_vncproxy.pp")
appendManifestFile(manifestfile, manifestdata)
def createcommonmanifest():
for manifestfile, marker in manifestfiles.getFiles():
if manifestfile.endswith("_nova.pp"):
data = getManifestTemplate("nova_common.pp")
appendManifestFile(os.path.split(manifestfile)[1], data)
| {
"content_hash": "ed2b2ac694b8bc2fd33b1c2f35f92412",
"timestamp": "",
"source": "github",
"line_count": 357,
"max_line_length": 132,
"avg_line_length": 50.94677871148459,
"alnum_prop": 0.5037937101385529,
"repo_name": "skottler/packstack",
"id": "0102c2442f1e7b602628d7ccea56ac3adef793a0",
"size": "18188",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "packstack/plugins/nova_300.py",
"mode": "33188",
"license": "apache-2.0",
"language": [],
"symlink_target": ""
} |
import logging
import os
__author__ = 'Tim Schneider <[email protected]>'
__copyright__ = "Copyright 2015, Northbridge Development Konrad & Schneider GbR"
__credits__ = ["Tim Schneider", ]
__maintainer__ = "Tim Schneider"
__email__ = "[email protected]"
__status__ = "Development"
logger = logging.getLogger(__name__)
import pandoc
pandoc.core.PANDOC_PATH = '/usr/local/bin/pandoc'
doc = pandoc.Document()
doc.markdown = open('README.md').read()
f = open('README.rst','w+')
f.write(doc.rst)
f.close()
os.system("python setup.py register") | {
"content_hash": "3885107054102c1d04cb4655eea17025",
"timestamp": "",
"source": "github",
"line_count": 23,
"max_line_length": 80,
"avg_line_length": 25.26086956521739,
"alnum_prop": 0.7039586919104991,
"repo_name": "NB-Dev/django-splitdate",
"id": "5c43decff65c84426570b55b817068458121f413",
"size": "605",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "register.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "22334"
}
],
"symlink_target": ""
} |
"""
Component Classes for Genomic Jobs
Components are assembled by the JobController for a particular Genomic Job
"""
import csv
import json
import logging
import re
import pytz
from collections import deque, namedtuple
from copy import deepcopy
from dateutil.parser import parse
import sqlalchemy
from werkzeug.exceptions import NotFound
from rdr_service import clock, config
from rdr_service.dao.code_dao import CodeDao
from rdr_service.dao.participant_dao import ParticipantDao
from rdr_service.genomic_enums import ResultsModuleType, ResultsWorkflowState
from rdr_service.genomic.genomic_data import GenomicQueryClass
from rdr_service.genomic.genomic_state_handler import GenomicStateHandler
from rdr_service.model.biobank_stored_sample import BiobankStoredSample
from rdr_service.model.code import Code
from rdr_service.model.participant_summary import ParticipantRaceAnswers, ParticipantSummary
from rdr_service.model.config_utils import get_biobank_id_prefix
from rdr_service.resource.generators.genomics import genomic_user_event_metrics_batch_update
from rdr_service.api_util import (
open_cloud_file,
copy_cloud_file,
delete_cloud_file,
list_blobs,
get_blob)
from rdr_service.model.genomics import (
GenomicSet,
GenomicSetMember,
GenomicGCValidationMetrics,
GenomicSampleContamination)
from rdr_service.participant_enums import (
WithdrawalStatus,
QuestionnaireStatus,
SampleStatus,
Race,
SuspensionStatus,
ParticipantCohort)
from rdr_service.genomic_enums import GenomicSetStatus, GenomicSetMemberStatus, GenomicValidationFlag, GenomicJob, \
GenomicWorkflowState, GenomicSubProcessStatus, GenomicSubProcessResult, GenomicManifestTypes, \
GenomicContaminationCategory, GenomicQcStatus, GenomicIncidentCode
from rdr_service.dao.genomics_dao import (
GenomicGCValidationMetricsDao,
GenomicSetMemberDao,
GenomicFileProcessedDao,
GenomicSetDao,
GenomicJobRunDao,
GenomicManifestFeedbackDao,
GenomicManifestFileDao,
GenomicAW1RawDao,
GenomicAW2RawDao,
GenomicGcDataFileDao,
GenomicGcDataFileMissingDao,
GenomicIncidentDao,
UserEventMetricsDao,
GenomicQueriesDao,
GenomicCVLAnalysisDao, GenomicResultWorkflowStateDao, GenomicCVLSecondSampleDao, GenomicAppointmentEventMetricsDao)
from rdr_service.dao.biobank_stored_sample_dao import BiobankStoredSampleDao
from rdr_service.dao.site_dao import SiteDao
from rdr_service.dao.participant_summary_dao import ParticipantSummaryDao
from rdr_service.genomic.genomic_biobank_manifest_handler import (
create_and_upload_genomic_biobank_manifest_file,
)
from rdr_service.genomic.validation import (
GENOMIC_VALID_AGE,
)
from rdr_service.offline.sql_exporter import SqlExporter
from rdr_service.config import (
getSetting,
GENOMIC_CVL_RECONCILIATION_REPORT_SUBFOLDER,
GENOMIC_GEM_A1_MANIFEST_SUBFOLDER,
GENOMIC_GEM_A3_MANIFEST_SUBFOLDER,
GENOME_TYPE_ARRAY,
GENOME_TYPE_ARRAY_INVESTIGATION,
GENOME_TYPE_WGS,
GENOME_TYPE_WGS_INVESTIGATION,
GENOMIC_AW3_ARRAY_SUBFOLDER,
GENOMIC_AW3_WGS_SUBFOLDER,
BIOBANK_AW2F_SUBFOLDER,
GENOMIC_INVESTIGATION_GENOME_TYPES,
CVL_W1IL_HDR_MANIFEST_SUBFOLDER,
CVL_W1IL_PGX_MANIFEST_SUBFOLDER,
CVL_W2W_MANIFEST_SUBFOLDER,
CVL_W3SR_MANIFEST_SUBFOLDER
)
from rdr_service.code_constants import COHORT_1_REVIEW_CONSENT_YES_CODE
from rdr_service.genomic.genomic_mappings import wgs_file_types_attributes, array_file_types_attributes, \
genome_center_datafile_prefix_map
from sqlalchemy.orm import aliased
class GenomicFileIngester:
"""
This class ingests a file from a source GC bucket into the destination table
"""
def __init__(self, job_id=None,
job_run_id=None,
bucket=None,
archive_folder=None,
sub_folder=None,
_controller=None,
target_file=None):
self.controller = _controller
self.job_id = job_id
self.job_run_id = job_run_id
self.file_obj = None
self.file_queue = deque()
self.target_file = target_file
self.bucket_name = bucket
self.archive_folder_name = archive_folder
self.sub_folder_name = sub_folder
self.investigation_set_id = None
self.participant_dao = None
# Sub Components
self.file_validator = GenomicFileValidator(
job_id=self.job_id,
controller=self.controller
)
self.file_mover = GenomicFileMover(archive_folder=self.archive_folder_name)
self.metrics_dao = GenomicGCValidationMetricsDao()
self.file_processed_dao = GenomicFileProcessedDao()
self.member_dao = GenomicSetMemberDao()
self.job_run_dao = GenomicJobRunDao()
self.sample_dao = BiobankStoredSampleDao()
self.feedback_dao = GenomicManifestFeedbackDao()
self.manifest_dao = GenomicManifestFileDao()
self.incident_dao = GenomicIncidentDao()
self.user_metrics_dao = UserEventMetricsDao()
self.cvl_analysis_dao = GenomicCVLAnalysisDao()
self.results_workflow_dao = GenomicResultWorkflowStateDao()
self.analysis_cols = self.cvl_analysis_dao.model_type.__table__.columns.keys()
self.set_dao = None
self.cvl_second_sample_dao = None
def generate_file_processing_queue(self):
"""
Creates the list of files to be ingested in this run.
Ordering is currently arbitrary;
"""
# Check Target file is set.
# It will not be set in cron job, but will be set by tool when run manually
_manifest_file_id = None
try:
_manifest_file_id = self.controller.task_data.manifest_file.id
except AttributeError:
pass
if self.target_file is not None:
if self.controller.storage_provider is not None:
_blob = self.controller.storage_provider.get_blob(self.bucket_name, self.target_file)
else:
_blob = get_blob(self.bucket_name, self.target_file)
files = [(self.target_file, _blob.updated)]
else:
files = self._get_new_file_names_and_upload_dates_from_bucket()
if files == GenomicSubProcessResult.NO_FILES:
return files
else:
for file_data in files:
new_file_record = self.file_processed_dao.insert_file_record(
self.job_run_id,
f'{self.bucket_name}/{file_data[0]}',
self.bucket_name,
file_data[0].split('/')[-1],
upload_date=file_data[1],
manifest_file_id=_manifest_file_id)
self.file_queue.append(new_file_record)
def _get_new_file_names_and_upload_dates_from_bucket(self):
"""
Searches the bucket for un-processed files.
:return: list of (filenames, upload_date) or NO_FILES result code
"""
# Setup date
timezone = pytz.timezone('Etc/Greenwich')
date_limit_obj = timezone.localize(self.controller.last_run_time)
# Look for new files with valid filenames
bucket = '/' + self.bucket_name
files = list_blobs(bucket, prefix=self.sub_folder_name)
files = [(s.name, s.updated) for s in files
if s.updated > date_limit_obj
and self.file_validator.validate_filename(s.name)]
if not files:
logging.info('No files in cloud bucket {}'.format(self.bucket_name))
return GenomicSubProcessResult.NO_FILES
return files
def generate_file_queue_and_do_ingestion(self):
"""
Main method of the ingestor component,
generates a queue and processes each file
:return: result code
"""
file_queue_result = self.generate_file_processing_queue()
if file_queue_result == GenomicSubProcessResult.NO_FILES:
logging.info('No files to process.')
return file_queue_result
else:
logging.info('Processing files in queue.')
results = []
current_file = None
while len(self.file_queue):
try:
current_file = self.file_queue[0]
ingestion_result = self._ingest_genomic_file(current_file)
file_ingested = self.file_queue.popleft()
results.append(ingestion_result == GenomicSubProcessResult.SUCCESS)
if ingestion_result:
ingestion_message = f'Ingestion attempt for {file_ingested.fileName}: {ingestion_result}'
if 'invalid' in ingestion_result.name.lower():
logging.warning(ingestion_message)
else:
logging.info(ingestion_message)
self.file_processed_dao.update_file_record(
file_ingested.id,
GenomicSubProcessStatus.COMPLETED,
ingestion_result
)
# pylint: disable=broad-except
except Exception as e:
logging.error(f'Exception occured when ingesting manifest {current_file.filePath}: {e}')
self.file_queue.popleft()
except IndexError:
logging.info('No files left in file queue.')
return GenomicSubProcessResult.SUCCESS if all(results) \
else GenomicSubProcessResult.ERROR
@staticmethod
def _clean_row_keys(val):
def str_clean(str_val):
return str_val.lower() \
.replace(' ', '') \
.replace('_', '')
if type(val) is str or 'quoted_name' in val.__class__.__name__.lower():
return str_clean(val)
elif 'dict' in val.__class__.__name__.lower():
return dict(zip([str_clean(key)
for key in val], val.values()))
@staticmethod
def _clean_alpha_values(value):
return value[1:] if value[0].isalpha() else value
def _ingest_genomic_file(self, file_obj):
"""
Reads a file object from bucket and inserts into DB
:param: file_obj: A genomic file object
:return: A GenomicSubProcessResultCode
"""
self.file_obj = file_obj
data_to_ingest = self._retrieve_data_from_path(self.file_obj.filePath)
if data_to_ingest == GenomicSubProcessResult.ERROR:
return GenomicSubProcessResult.ERROR
elif data_to_ingest:
logging.info(f'Ingesting data from {self.file_obj.fileName}')
logging.info("Validating file.")
ingestion_map = {
GenomicJob.AW1_MANIFEST: self._ingest_aw1_manifest,
GenomicJob.AW1F_MANIFEST: self._ingest_aw1_manifest,
GenomicJob.METRICS_INGESTION: self._process_gc_metrics_data_for_insert,
GenomicJob.GEM_A2_MANIFEST: self._ingest_gem_a2_manifest,
GenomicJob.GEM_METRICS_INGEST: self._ingest_gem_metrics_manifest,
GenomicJob.AW4_ARRAY_WORKFLOW: self._ingest_aw4_manifest,
GenomicJob.AW4_WGS_WORKFLOW: self._ingest_aw4_manifest,
GenomicJob.AW1C_INGEST: self._ingest_aw1c_manifest,
GenomicJob.AW1CF_INGEST: self._ingest_aw1c_manifest,
GenomicJob.AW5_ARRAY_MANIFEST: self._ingest_aw5_manifest,
GenomicJob.AW5_WGS_MANIFEST: self._ingest_aw5_manifest,
GenomicJob.CVL_W2SC_WORKFLOW: self._ingest_cvl_w2sc_manifest,
GenomicJob.CVL_W3NS_WORKFLOW: self._ingest_cvl_w3ns_manifest,
GenomicJob.CVL_W3SS_WORKFLOW: self._ingest_cvl_w3ss_manifest,
GenomicJob.CVL_W3SC_WORKFLOW: self._ingest_cvl_w3sc_manifest,
GenomicJob.CVL_W4WR_WORKFLOW: self._ingest_cvl_w4wr_manifest,
GenomicJob.CVL_W5NF_WORKFLOW: self._ingest_cvl_w5nf_manifest
}
self.file_validator.valid_schema = None
validation_result = self.file_validator.validate_ingestion_file(
filename=self.file_obj.fileName,
data_to_validate=data_to_ingest
)
if validation_result != GenomicSubProcessResult.SUCCESS:
# delete raw records
if self.job_id == GenomicJob.AW1_MANIFEST:
raw_dao = GenomicAW1RawDao()
raw_dao.delete_from_filepath(file_obj.filePath)
if self.job_id == GenomicJob.METRICS_INGESTION:
raw_dao = GenomicAW2RawDao()
raw_dao.delete_from_filepath(file_obj.filePath)
return validation_result
try:
ingestion_type = ingestion_map[self.job_id]
ingestions = self._set_data_ingest_iterations(data_to_ingest['rows'])
for row in ingestions:
ingestion_type(row)
self._set_manifest_file_resolved()
return GenomicSubProcessResult.SUCCESS
except RuntimeError:
return GenomicSubProcessResult.ERROR
else:
logging.info("No data to ingest.")
return GenomicSubProcessResult.NO_FILES
def _set_data_ingest_iterations(self, data_rows):
all_ingestions = []
if self.controller.max_num and len(data_rows) > self.controller.max_num:
current_rows = []
for row in data_rows:
current_rows.append(row)
if len(current_rows) == self.controller.max_num:
all_ingestions.append(current_rows.copy())
current_rows.clear()
if current_rows:
all_ingestions.append(current_rows)
else:
all_ingestions.append(data_rows)
return all_ingestions
def _set_manifest_file_resolved(self):
if not self.file_obj:
return
has_failed_validation = self.incident_dao.get_open_incident_by_file_name(self.file_obj.fileName)
if not has_failed_validation:
return
self.incident_dao.batch_update_incident_fields(
[obj.id for obj in has_failed_validation],
_type='resolved'
)
@staticmethod
def get_aw1_manifest_column_mappings():
return {
'packageId': 'packageid',
'sampleId': 'sampleid',
'gcManifestBoxStorageUnitId': 'boxstorageunitid',
'gcManifestBoxPlateId': 'boxid/plateid',
'gcManifestWellPosition': 'wellposition',
'gcManifestParentSampleId': 'parentsampleid',
'collectionTubeId': 'collectiontubeid',
'gcManifestMatrixId': 'matrixid',
'gcManifestTreatments': 'treatments',
'gcManifestQuantity_ul': 'quantity(ul)',
'gcManifestTotalConcentration_ng_per_ul': 'totalconcentration(ng/ul)',
'gcManifestTotalDNA_ng': 'totaldna(ng)',
'gcManifestVisitDescription': 'visitdescription',
'gcManifestSampleSource': 'samplesource',
'gcManifestStudy': 'study',
'gcManifestTrackingNumber': 'trackingnumber',
'gcManifestContact': 'contact',
'gcManifestEmail': 'email',
'gcManifestStudyPI': 'studypi',
'gcManifestTestName': 'genometype',
'gcManifestFailureMode': 'failuremode',
'gcManifestFailureDescription': 'failuremodedesc',
}
def _ingest_aw1_manifest(self, rows):
"""
AW1 ingestion method: Updates the GenomicSetMember with AW1 data
If the row is determined to be a control sample,
insert a new GenomicSetMember with AW1 data
:param rows:
:return: result code
"""
_states = [GenomicWorkflowState.AW0, GenomicWorkflowState.EXTRACT_REQUESTED]
_site = self._get_site_from_aw1()
for row in rows:
row_copy = self._clean_row_keys(row)
row_copy['site_id'] = _site
# Skip rows if biobank_id is an empty string (row is empty well)
if row_copy['biobankid'] == "":
continue
# Check if this sample has a control sample parent tube
control_sample_parent = self.member_dao.get_control_sample_parent(
row_copy['genometype'],
int(row_copy['parentsampleid'])
)
# Create new set member record if the sample
# has the investigation genome type
if row_copy['genometype'] in GENOMIC_INVESTIGATION_GENOME_TYPES:
self.create_investigation_member_record_from_aw1(row_copy)
# Move to next row in file
continue
if control_sample_parent:
logging.warning(f"Control sample found: {row_copy['parentsampleid']}")
# Check if the control sample member exists for this GC, BID, collection tube, and sample ID
# Since the Biobank is reusing the sample and collection tube IDs (which are supposed to be unique)
cntrl_sample_member = self.member_dao.get_control_sample_for_gc_and_genome_type(
_site,
row_copy['genometype'],
row_copy['biobankid'],
row_copy['collectiontubeid'],
row_copy['sampleid']
)
if not cntrl_sample_member:
# Insert new GenomicSetMember record if none exists
# for this control sample, genome type, and gc site
self.create_new_member_from_aw1_control_sample(row_copy)
# Skip rest of iteration and go to next row
continue
# Find the existing GenomicSetMember
if self.job_id == GenomicJob.AW1F_MANIFEST:
# Set the member based on collection tube ID will null sample
member = self.member_dao.get_member_from_collection_tube(
row_copy['collectiontubeid'],
row_copy['genometype'],
state=GenomicWorkflowState.AW1
)
else:
# Set the member based on collection tube ID will null sample
member = self.member_dao.get_member_from_collection_tube_with_null_sample_id(
row_copy['collectiontubeid'],
row_copy['genometype'])
# Since member not found, and not a control sample,
# check if collection tube id was swapped by Biobank
if not member:
bid = row_copy['biobankid']
# Strip biobank prefix if it's there
if bid[0] in [get_biobank_id_prefix(), 'T']:
bid = bid[1:]
member = self.member_dao.get_member_from_biobank_id_in_state(
bid,
row_copy['genometype'],
_states
)
# If member found, validate new collection tube ID, set collection tube ID
if member:
if self._validate_collection_tube_id(row_copy['collectiontubeid'], bid):
if member.genomeType in [GENOME_TYPE_ARRAY, GENOME_TYPE_WGS]:
if member.collectionTubeId:
with self.member_dao.session() as session:
self._record_sample_as_contaminated(session, member.collectionTubeId)
member.collectionTubeId = row_copy['collectiontubeid']
else:
# Couldn't find genomic set member based on either biobank ID or collection tube
_message = f"{self.job_id.name}: Cannot find genomic set member: " \
f"collection_tube_id: {row_copy['collectiontubeid']}, " \
f"biobank id: {bid}, " \
f"genome type: {row_copy['genometype']}"
self.controller.create_incident(source_job_run_id=self.job_run_id,
source_file_processed_id=self.file_obj.id,
code=GenomicIncidentCode.UNABLE_TO_FIND_MEMBER.name,
message=_message,
biobank_id=bid,
collection_tube_id=row_copy['collectiontubeid'],
sample_id=row_copy['sampleid'],
)
# Skip rest of iteration and continue processing file
continue
# Check for diversion pouch site
div_pouch_site_id = self.sample_dao.get_diversion_pouch_site_id(row_copy['collectiontubeid'])
if div_pouch_site_id:
member.diversionPouchSiteFlag = 1
# Process the attribute data
member_changed, member = self._process_aw1_attribute_data(row_copy, member)
if member_changed:
self.member_dao.update(member)
return GenomicSubProcessResult.SUCCESS
def create_investigation_member_record_from_aw1(self, aw1_data):
# Create genomic_set
if not self.investigation_set_id:
new_set = self.create_new_genomic_set()
self.investigation_set_id = new_set.id
self.participant_dao = ParticipantDao()
# Get IDs
biobank_id = aw1_data['biobankid']
# Strip biobank prefix if it's there
if biobank_id[0] in [get_biobank_id_prefix(), 'T']:
biobank_id = biobank_id[1:]
participant = self.participant_dao.get_by_biobank_id(biobank_id)
# Create new genomic_set_member
new_member = GenomicSetMember(
genomicSetId=self.investigation_set_id,
biobankId=biobank_id,
participantId=participant.participantId,
reconcileGCManifestJobRunId=self.job_run_id,
genomeType=aw1_data['genometype'],
sexAtBirth=aw1_data['sexatbirth'],
blockResearch=1,
blockResearchReason="Created from AW1 with investigation genome type.",
blockResults=1,
blockResultsReason="Created from AW1 with investigation genome type.",
genomicWorkflowState=GenomicWorkflowState.AW1,
genomicWorkflowStateStr=GenomicWorkflowState.AW1.name,
)
_, member = self._process_aw1_attribute_data(aw1_data, new_member)
self.member_dao.insert(member)
def create_new_genomic_set(self):
new_set = GenomicSet(
genomicSetName=f"investigation_{self.job_run_id}",
genomicSetCriteria="investigation genome type",
genomicSetVersion=1,
)
self.set_dao = GenomicSetDao()
with self.set_dao.session() as session:
session.add(new_set)
return new_set
def load_raw_awn_file(self, raw_dao, **kwargs):
"""
Loads raw models with raw data from manifests file
Ex: genomic_aw1_raw => aw1_manifest
:param raw_dao: Model Dao Class
:return:
"""
dao = raw_dao()
# look up if any rows exist already for the file
records = dao.get_from_filepath(self.target_file)
if records:
logging.warning(f'File already exists in raw table: {self.target_file}')
return GenomicSubProcessResult.SUCCESS
file_data = self._retrieve_data_from_path(self.target_file)
# Return the error status if there is an error in file_data
if not isinstance(file_data, dict):
return file_data
model_columns = dao.model_type.__table__.columns.keys()
# Processing raw data in batches
batch_size = 100
item_count = 0
batch = list()
for row in file_data['rows']:
row_obj = self._set_raw_awn_attributes(row, model_columns)
if kwargs.get('cvl_site_id'):
row_obj['cvl_site_id'] = kwargs.get('cvl_site_id')
row_obj = dao.get_model_obj_from_items(row_obj.items())
batch.append(row_obj)
item_count += 1
if item_count == batch_size:
# Insert batch into DB
with dao.session() as session:
session.bulk_save_objects(batch)
# Reset batch
item_count = 0
batch = list()
if item_count:
# insert last batch if needed
with dao.session() as session:
session.bulk_save_objects(batch)
return GenomicSubProcessResult.SUCCESS
def ingest_single_aw1_row_for_member(self, member):
# Open file and pull row based on member.biobankId
with self.controller.storage_provider.open(self.target_file, 'r') as aw1_file:
reader = csv.DictReader(aw1_file, delimiter=',')
row = [r for r in reader if r['BIOBANK_ID'][1:] == str(member.biobankId)][0]
# Alter field names to remove spaces and change to lower case
row = self._clean_row_keys(row)
ingested_before = member.reconcileGCManifestJobRunId is not None
# Write AW1 data to genomic_set_member table
gc_manifest_column_mappings = self.get_aw1_manifest_column_mappings()
# Set attributes from file
for key in gc_manifest_column_mappings.keys():
try:
member.__setattr__(key, row[gc_manifest_column_mappings[key]])
except KeyError:
member.__setattr__(key, None)
# Set other fields not in AW1 file
member.reconcileGCManifestJobRunId = self.job_run_id
member.aw1FileProcessedId = self.file_obj.id
member.gcSite = self._get_site_from_aw1()
# Only update the member's genomicWorkflowState if it was AW0
if member.genomicWorkflowState == GenomicWorkflowState.AW0:
member.genomicWorkflowState = GenomicWorkflowState.AW1
member.genomicWorkflowStateStr = GenomicWorkflowState.AW1.name
member.genomicWorkflowStateModifiedTime = clock.CLOCK.now()
# Update member in DB
self.member_dao.update(member)
# Update AW1 manifest record count
if not ingested_before and not self.controller.bypass_record_count:
self.increment_manifest_file_record_count_from_id()
return GenomicSubProcessResult.SUCCESS
def ingest_single_aw2_row_for_member(self, member: GenomicSetMember) -> GenomicSubProcessResult:
# Open file and pull row based on member.biobankId
with self.controller.storage_provider.open(self.target_file, 'r') as aw1_file:
reader = csv.DictReader(aw1_file, delimiter=',')
row = [r for r in reader if r['Biobank ID'] == str(member.biobankId)][0]
# Alter field names to remove spaces and change to lower case
row = self._clean_row_keys(row)
# Beging prep aw2 row
if row['genometype'] in (GENOME_TYPE_WGS, GENOME_TYPE_WGS_INVESTIGATION):
row = self._set_metrics_wgs_data_file_paths(row)
elif row['genometype'] in (GENOME_TYPE_ARRAY, GENOME_TYPE_ARRAY_INVESTIGATION):
row = self._set_metrics_array_data_file_paths(row)
row = self.prep_aw2_row_attributes(row, member)
if row == GenomicSubProcessResult.ERROR:
return GenomicSubProcessResult.ERROR
# check whether metrics object exists for that member
existing_metrics_obj = self.metrics_dao.get_metrics_by_member_id(member.id)
if existing_metrics_obj is not None:
metric_id = existing_metrics_obj.id
else:
metric_id = None
self.metrics_dao.upsert_gc_validation_metrics_from_dict(row, metric_id)
self.update_member_for_aw2(member)
# Update member in DB
self.member_dao.update(member)
self._update_member_state_after_aw2(member)
# Update AW1 manifest feedback record count
if existing_metrics_obj is None and not self.controller.bypass_record_count:
# For feedback manifest loop
# Get the genomic_manifest_file
manifest_file = self.file_processed_dao.get(member.aw1FileProcessedId)
if manifest_file is not None:
self.feedback_dao.increment_feedback_count(manifest_file.genomicManifestFileId)
return GenomicSubProcessResult.SUCCESS
def increment_manifest_file_record_count_from_id(self):
"""
Increments the manifest record count by 1
"""
manifest_file = self.manifest_dao.get(self.file_obj.genomicManifestFileId)
manifest_file.recordCount += 1
with self.manifest_dao.session() as s:
s.merge(manifest_file)
def prep_aw2_row_attributes(self, row: dict, member: GenomicSetMember):
"""
Set contamination, contamination category,
call rate, member_id, and file_id on AW2 row dictionary
:param member:
:param row:
:return: row dictionary or ERROR code
"""
row['member_id'] = member.id
row['file_id'] = self.file_obj.id
# handle mapped reads in case they are longer than field length
if 'mappedreadspct' in row.keys():
if len(row['mappedreadspct']) > 10:
row['mappedreadspct'] = row['mappedreadspct'][0:10]
# Set default values in case they upload "" and processing status of "fail"
row['contamination_category'] = GenomicContaminationCategory.UNSET
row['contamination_category_str'] = "UNSET"
# Truncate call rate
try:
row['callrate'] = row['callrate'][:10]
except KeyError:
pass
# Convert blank alignedq30bases to none
try:
if row['alignedq30bases'] == '':
row['alignedq30bases'] = None
except KeyError:
pass
# Validate and clean contamination data
try:
row['contamination'] = float(row['contamination'])
# Percentages shouldn't be less than 0
if row['contamination'] < 0:
row['contamination'] = 0
except ValueError:
if row['processingstatus'].lower() != 'pass':
return row
_message = f'{self.job_id.name}: Contamination must be a number for sample_id: {row["sampleid"]}'
self.controller.create_incident(source_job_run_id=self.job_run_id,
source_file_processed_id=self.file_obj.id,
code=GenomicIncidentCode.DATA_VALIDATION_FAILED.name,
message=_message,
biobank_id=member.biobankId,
sample_id=row['sampleid'],
)
return GenomicSubProcessResult.ERROR
# Calculate contamination_category
contamination_value = float(row['contamination'])
category = self.calculate_contamination_category(
member.collectionTubeId,
contamination_value,
member
)
row['contamination_category'] = category
row['contamination_category_str'] = category.name
return row
def update_member_for_aw2(self, member: GenomicSetMember):
"""
Updates the aw2FileProcessedId and possibly the genomicWorkflowState
of a GenomicSetMember after AW2 data has been ingested
:param member:
"""
member.aw2FileProcessedId = self.file_obj.id
# Only update the state if it was AW1
if member.genomicWorkflowState == GenomicWorkflowState.AW1:
member.genomicWorkflowState = GenomicWorkflowState.AW2
member.genomicWorkflowStateStr = GenomicWorkflowState.AW2.name
member.genomicWorkflowStateModifiedTime = clock.CLOCK.now()
self.member_dao.update(member)
def _ingest_gem_a2_manifest(self, rows):
"""
Processes the GEM A2 manifest file data
Updates GenomicSetMember object with gem_pass field.
:param rows:
:return: Result Code
"""
try:
for row in rows:
sample_id = row['sample_id']
member = self.member_dao.get_member_from_sample_id_with_state(sample_id,
GENOME_TYPE_ARRAY,
GenomicWorkflowState.A1)
if member is None:
logging.warning(f'Invalid sample ID: {sample_id}')
continue
member.gemPass = row['success']
member.gemA2ManifestJobRunId = self.job_run_id
member.gemDateOfImport = parse(row['date_of_import'])
_signal = 'a2-gem-pass' if member.gemPass.lower() == 'y' else 'a2-gem-fail'
# update state and state modifed time only if changed
if member.genomicWorkflowState != GenomicStateHandler.get_new_state(
member.genomicWorkflowState, signal=_signal):
member.genomicWorkflowState = GenomicStateHandler.get_new_state(
member.genomicWorkflowState,
signal=_signal)
member.genomicWorkflowStateStr = member.genomicWorkflowState.name
member.genomicWorkflowStateModifiedTime = clock.CLOCK.now()
self.member_dao.update(member)
return GenomicSubProcessResult.SUCCESS
except (RuntimeError, KeyError):
return GenomicSubProcessResult.ERROR
def _ingest_gem_metrics_manifest(self, rows):
"""
Processes the GEM Metrics manifest file data
Updates GenomicSetMember object with metrics fields.
:param rows:
:return: Result Code
"""
try:
for row in rows:
sample_id = row['sample_id']
member = self.member_dao.get_member_from_sample_id_with_state(sample_id,
GENOME_TYPE_ARRAY,
GenomicWorkflowState.GEM_RPT_READY)
if member is None:
logging.warning(f'Invalid sample ID: {sample_id}')
continue
member.gemMetricsAncestryLoopResponse = row['ancestry_loop_response']
member.gemMetricsAvailableResults = row['available_results']
member.gemMetricsResultsReleasedAt = row['results_released_at']
member.colorMetricsJobRunID = self.job_run_id
self.member_dao.update(member)
return GenomicSubProcessResult.SUCCESS
except (RuntimeError, KeyError):
return GenomicSubProcessResult.ERROR
def _ingest_aw4_manifest(self, rows):
"""
Processes the AW4 manifest file data
:param rows:
:return:
"""
try:
for row in rows:
row_copy = self._clean_row_keys(row)
sample_id = row_copy['sampleid']
member = self.member_dao.get_member_from_aw3_sample(sample_id)
if member is None:
logging.warning(f'Invalid sample ID: {sample_id}')
continue
member.aw4ManifestJobRunID = self.job_run_id
member.qcStatus = self._get_qc_status_from_value(row_copy['qcstatus'])
member.qcStatusStr = member.qcStatus.name
metrics = self.metrics_dao.get_metrics_by_member_id(member.id)
if metrics:
metrics.drcSexConcordance = row_copy['drcsexconcordance']
if self.job_id == GenomicJob.AW4_ARRAY_WORKFLOW:
metrics.drcCallRate = row_copy['drccallrate']
elif self.job_id == GenomicJob.AW4_WGS_WORKFLOW:
metrics.drcContamination = row_copy['drccontamination']
metrics.drcMeanCoverage = row_copy['drcmeancoverage']
metrics.drcFpConcordance = row_copy['drcfpconcordance']
self.metrics_dao.upsert(metrics)
self.member_dao.update(member)
return GenomicSubProcessResult.SUCCESS
except (RuntimeError, KeyError):
return GenomicSubProcessResult.ERROR
def ingest_metrics_file_from_filepath(self, metric_type, file_path):
metric_map = {
'user_events': self.user_metrics_dao
}
file_data = self._retrieve_data_from_path(file_path)
if not isinstance(file_data, dict):
return file_data
batch_size, item_count, batch = 100, 0, []
try:
metric_dao = metric_map[metric_type]
except KeyError:
logging.warning(f'Metric type {metric_type} is invalid for this method')
return GenomicSubProcessResult.ERROR
for row in file_data['rows']:
if row.get('participant_id') and 'P' in row.get('participant_id'):
participant_id = row['participant_id'].split('P')[-1]
row['participant_id'] = int(participant_id)
row['file_path'] = file_path
row['created'] = clock.CLOCK.now()
row['modified'] = clock.CLOCK.now()
row['run_id'] = self.controller.job_run.id
row_insert_obj = metric_dao.get_model_obj_from_items(row.items())
batch.append(row_insert_obj)
item_count += 1
if item_count == batch_size:
with metric_dao.session() as session:
# Use session add_all() so we can get the newly created primary key id values back.
session.add_all(batch)
session.commit()
# Batch update PDR resource records.
genomic_user_event_metrics_batch_update([r.id for r in batch])
item_count = 0
batch.clear()
if item_count:
with metric_dao.session() as session:
# Use session add_all() so we can get the newly created primary key id values back.
session.add_all(batch)
session.commit()
# Batch update PDR resource records.
genomic_user_event_metrics_batch_update([r.id for r in batch])
return GenomicSubProcessResult.SUCCESS
@staticmethod
def ingest_appointment_metrics(file_path):
try:
with open_cloud_file(file_path) as json_file:
json_appointment_data = json.load(json_file)
if not json_appointment_data:
logging.warning(f'Appointment metric file {file_path} is empty')
return GenomicSubProcessResult.NO_RESULTS
batch_size, item_count, batch = 100, 0, []
appointment_metric_dao = GenomicAppointmentEventMetricsDao()
for event in json_appointment_data:
event_obj = {}
message_body = event.get('messageBody')
if event.get('participantId'):
participant_id = event.get('participantId')
if 'P' in participant_id:
participant_id = participant_id.split('P')[-1]
event_obj['participant_id'] = int(participant_id)
event_obj['event_authored_time'] = event.get('eventAuthoredTime')
event_obj['event_type'] = event.get('event')
event_obj['module_type'] = message_body.get('module_type')
event_obj['appointment_event'] = json.dumps(event)
event_obj['file_path'] = file_path
event_obj['created'] = clock.CLOCK.now()
event_obj['modified'] = clock.CLOCK.now()
batch.append(event_obj)
item_count += 1
if item_count == batch_size:
appointment_metric_dao.insert_bulk(batch)
item_count = 0
batch.clear()
if item_count:
appointment_metric_dao.insert_bulk(batch)
except ValueError:
logging.warning('Appointment metric file must be valid json')
return GenomicSubProcessResult.ERROR
return GenomicSubProcessResult.SUCCESS
def _retrieve_data_from_path(self, path):
"""
Retrieves the last genomic data file from a bucket
:param path: The source file to ingest
:return: CSV data as a dictionary
"""
try:
filename = path.split('/')[1]
logging.info(
'Opening CSV file from queue {}: {}.'
.format(path.split('/')[1], filename)
)
if self.controller.storage_provider:
with self.controller.storage_provider.open(path, 'r') as csv_file:
return self._read_data_to_ingest(csv_file)
else:
with open_cloud_file(path) as csv_file:
return self._read_data_to_ingest(csv_file)
except FileNotFoundError:
logging.error(f"File path '{path}' not found")
return GenomicSubProcessResult.ERROR
@staticmethod
def _read_data_to_ingest(csv_file):
data_to_ingest = {'rows': []}
csv_reader = csv.DictReader(csv_file, delimiter=",")
data_to_ingest['fieldnames'] = csv_reader.fieldnames
for row in csv_reader:
for key in row.copy():
if not key:
del row[key]
data_to_ingest['rows'].append(row)
return data_to_ingest
def _process_aw1_attribute_data(self, aw1_data, member):
"""
Checks a GenomicSetMember object for changes provided by AW1 data
And mutates the GenomicSetMember object if necessary
:param aw1_data: dict
:param member: GenomicSetMember
:return: (boolean, GenomicSetMember)
"""
# Check if the member needs updating
if self._test_aw1_data_for_member_updates(aw1_data, member):
member = self._set_member_attributes_from_aw1(aw1_data, member)
member = self._set_rdr_member_attributes_for_aw1(aw1_data, member)
return True, member
return False, member
def _test_aw1_data_for_member_updates(self, aw1_data, member):
"""
Checks each attribute provided by Biobank
for changes to GenomicSetMember Object
:param aw1_data: dict
:param member: GenomicSetMember
:return: boolean (true if member requires updating)
"""
gc_manifest_column_mappings = self.get_aw1_manifest_column_mappings()
member_needs_updating = False
# Iterate each value and test whether the strings for each field correspond
for key in gc_manifest_column_mappings.keys():
if str(member.__getattribute__(key)) != str(aw1_data.get(gc_manifest_column_mappings[key])):
member_needs_updating = True
return member_needs_updating
def _set_member_attributes_from_aw1(self, aw1_data, member):
"""
Mutates the GenomicSetMember attributes provided by the Biobank
:param aw1_data: dict
:param member: GenomicSetMember
:return: GenomicSetMember
"""
gc_manifest_column_mappings = self.get_aw1_manifest_column_mappings()
for key in gc_manifest_column_mappings.keys():
member.__setattr__(key, aw1_data.get(gc_manifest_column_mappings[key]))
return member
def _set_rdr_member_attributes_for_aw1(self, aw1_data, member):
"""
Mutates the GenomicSetMember RDR attributes not provided by the Biobank
:param aw1_data: dict
:param member: GenomicSetMember
:return: GenomicSetMember
"""
# Set job run and file processed IDs
member.reconcileGCManifestJobRunId = self.job_run_id
# Don't overwrite aw1_file_processed_id when ingesting an AW1F
if self.job_id == GenomicJob.AW1_MANIFEST:
member.aw1FileProcessedId = self.file_obj.id
# Set the GC site ID (sourced from file-name)
member.gcSiteId = aw1_data['site_id']
# Only update the state if it was AW0 or AW1 (if in failure manifest workflow)
# We do not want to regress a state for reingested data
states_to_update = [GenomicWorkflowState.AW0, GenomicWorkflowState.EXTRACT_REQUESTED]
if self.controller.job_id == GenomicJob.AW1F_MANIFEST:
states_to_update = [GenomicWorkflowState.AW1]
if member.genomicWorkflowState in states_to_update:
_signal = "aw1-reconciled"
# Set the signal for a failed sample
if aw1_data['failuremode'] is not None and aw1_data['failuremode'] != '':
_signal = 'aw1-failed'
member.genomicWorkflowState = GenomicStateHandler.get_new_state(
member.genomicWorkflowState,
signal=_signal)
member.genomicWorkflowStateStr = member.genomicWorkflowState.name
member.genomicWorkflowStateModifiedTime = clock.CLOCK.now()
return member
def _set_raw_awn_attributes(self, row_data, model_columns):
"""
Builds dict from row_data and model_columns
:param row_data: dict
:param model_columns: Current obj model attribute keys
:return: dict object
"""
row_obj = {}
row = self._clean_row_keys(row_data)
if self.controller.job_id in [
GenomicJob.LOAD_AW1_TO_RAW_TABLE,
GenomicJob.LOAD_CVL_W3SS_TO_RAW_TABLE
]:
# adjusting for biobank fieldnames
row = dict(zip([re.sub(r'\([^)]*\)', '', key)for key in row], row.values()))
row = dict(zip([key.replace('/', '') for key in row], row.values()))
genome_type = row.get('genometype', "")
if not genome_type and row.get('sampleid'):
member = self.member_dao.get_member_from_sample_id(row.get('sampleid'))
genome_type = member.genomeType if member else ""
row_obj['genome_type'] = genome_type
row_obj['test_name'] = genome_type
for column in model_columns:
clean_column = self._clean_row_keys(column)
row_value = row.get(clean_column)
if row_value or row_value == "":
row_obj[column] = row_value[0:512]
row_obj['file_path'] = self.target_file
row_obj['created'] = clock.CLOCK.now()
row_obj['modified'] = clock.CLOCK.now()
return row_obj
def _process_gc_metrics_data_for_insert(self, rows):
""" Since input files vary in column names,
this standardizes the field-names before passing to the bulk inserter
:param rows:
:return result code
"""
members_to_update = []
for row in rows:
# change all key names to lower
row_copy = self._clean_row_keys(row)
if row_copy['genometype'] in (GENOME_TYPE_ARRAY, GENOME_TYPE_ARRAY_INVESTIGATION):
row_copy = self._set_metrics_array_data_file_paths(row_copy)
elif row_copy['genometype'] in (GENOME_TYPE_WGS, GENOME_TYPE_WGS_INVESTIGATION):
row_copy = self._set_metrics_wgs_data_file_paths(row_copy)
member = self.member_dao.get_member_from_sample_id(
int(row_copy['sampleid']),
)
if not member:
bid = row_copy['biobankid']
if bid[0] in [get_biobank_id_prefix(), 'T']:
bid = bid[1:]
# Couldn't find genomic set member based on either biobank ID or sample ID
_message = f"{self.job_id.name}: Cannot find genomic set member for bid, sample_id: " \
f"{row_copy['biobankid']}, {row_copy['sampleid']}"
self.controller.create_incident(source_job_run_id=self.job_run_id,
source_file_processed_id=self.file_obj.id,
code=GenomicIncidentCode.UNABLE_TO_FIND_MEMBER.name,
message=_message,
biobank_id=bid,
sample_id=row_copy['sampleid'],
)
continue
row_copy = self.prep_aw2_row_attributes(row_copy, member)
if row_copy == GenomicSubProcessResult.ERROR:
continue
# check whether metrics object exists for that member
existing_metrics_obj = self.metrics_dao.get_metrics_by_member_id(member.id)
metric_id = None
if existing_metrics_obj:
if self.controller.skip_updates:
# when running tool, updates can be skipped
continue
else:
metric_id = existing_metrics_obj.id
else:
if member.genomeType in [GENOME_TYPE_ARRAY, GENOME_TYPE_WGS]:
if row_copy['contamination_category'] in [GenomicContaminationCategory.EXTRACT_WGS,
GenomicContaminationCategory.EXTRACT_BOTH]:
# Insert a new member
self.insert_member_for_replating(member, row_copy['contamination_category'])
# For feedback manifest loop
# Get the genomic_manifest_file
manifest_file = self.file_processed_dao.get(member.aw1FileProcessedId)
if manifest_file is not None and existing_metrics_obj is None:
self.feedback_dao.increment_feedback_count(manifest_file.genomicManifestFileId)
self.update_member_for_aw2(member)
# set lists of members to update workflow state
member_dict = {
'id': member.id
}
if row_copy['genometype'] == GENOME_TYPE_ARRAY:
member_dict['genomicWorkflowState'] = int(GenomicWorkflowState.GEM_READY)
member_dict['genomicWorkflowStateStr'] = str(GenomicWorkflowState.GEM_READY)
member_dict['genomicWorkflowStateModifiedTime'] = clock.CLOCK.now()
elif row_copy['genometype'] == GENOME_TYPE_WGS:
member_dict['genomicWorkflowState'] = int(GenomicWorkflowState.CVL_READY)
member_dict['genomicWorkflowStateStr'] = str(GenomicWorkflowState.CVL_READY)
member_dict['genomicWorkflowStateModifiedTime'] = clock.CLOCK.now()
members_to_update.append(member_dict)
# upsert metrics record via cloud task
row_copy['contamination_category'] = int(row_copy['contamination_category'])
self.controller.execute_cloud_task({
'metric_id': metric_id,
'payload_dict': row_copy,
}, 'genomic_gc_metrics_upsert')
if members_to_update:
self.member_dao.bulk_update(members_to_update)
return GenomicSubProcessResult.SUCCESS
def copy_member_for_replating(
self,
member,
genome_type=None,
set_id=None,
block_research_reason=None,
block_results_reason=None
):
"""
Inserts a new member record for replating.
:param member: GenomicSetMember
:param genome_type:
:param set_id:
:param block_research_reason:
:param block_results_reason:
:return:
"""
new_member = GenomicSetMember(
biobankId=member.biobankId,
genomicSetId=set_id if set_id else member.genomicSetId,
participantId=member.participantId,
nyFlag=member.nyFlag,
sexAtBirth=member.sexAtBirth,
validationStatus=member.validationStatus,
validationFlags=member.validationFlags,
ai_an=member.ai_an,
genomeType=genome_type if genome_type else member.genomeType,
collectionTubeId=f'replated_{member.id}',
genomicWorkflowState=GenomicWorkflowState.EXTRACT_REQUESTED,
replatedMemberId=member.id,
participantOrigin=member.participantOrigin,
blockResearch=1 if block_research_reason else 0,
blockResearchReason=block_research_reason if block_research_reason else None,
blockResults=1 if block_results_reason else 0,
blockResultsReason=block_results_reason if block_results_reason else None
)
self.member_dao.insert(new_member)
def insert_member_for_replating(self, member, category):
"""
Inserts a new member record for replating.
:param member: GenomicSetMember
:param category: GenomicContaminationCategory
:return:
"""
new_member_wgs = GenomicSetMember(
biobankId=member.biobankId,
genomicSetId=member.genomicSetId,
participantId=member.participantId,
nyFlag=member.nyFlag,
sexAtBirth=member.sexAtBirth,
validationStatus=member.validationStatus,
validationFlags=member.validationFlags,
collectionTubeId=f'replated_{member.id}',
ai_an=member.ai_an,
genomeType=GENOME_TYPE_WGS,
genomicWorkflowState=GenomicWorkflowState.EXTRACT_REQUESTED,
genomicWorkflowStateStr=GenomicWorkflowState.EXTRACT_REQUESTED.name,
participantOrigin=member.participantOrigin,
created=clock.CLOCK.now(),
modified=clock.CLOCK.now(),
replatedMemberId=member.id,
)
if category == GenomicContaminationCategory.EXTRACT_BOTH:
new_member_array = deepcopy(new_member_wgs)
new_member_array.genomeType = GENOME_TYPE_ARRAY
self.member_dao.insert(new_member_array)
self.member_dao.insert(new_member_wgs)
@staticmethod
def get_result_module(module_str):
results_attr_mapping = {
'hdrv1': ResultsModuleType.HDRV1,
'pgxv1': ResultsModuleType.PGXV1,
}
return results_attr_mapping.get(module_str)
def _base_cvl_ingestion(self, **kwargs):
row_copy = self._clean_row_keys(kwargs.get('row'))
biobank_id = row_copy.get('biobankid')
sample_id = row_copy.get('sampleid')
if not (biobank_id and sample_id):
return row_copy, None
biobank_id = self._clean_alpha_values(biobank_id)
member = self.member_dao.get_member_from_biobank_id_and_sample_id(
biobank_id,
sample_id
)
if not member:
logging.warning(f'Can not find genomic member record for biobank_id: '
f'{biobank_id} and sample_id: {sample_id}, skipping...')
return row_copy, None
setattr(member, kwargs.get('run_attr'), self.job_run_id)
self.member_dao.update(member)
# result workflow state
if kwargs.get('result_state') and kwargs.get('module_type'):
self.results_workflow_dao.insert_new_result_record(
member_id=member.id,
module_type=kwargs.get('module_type'),
state=kwargs.get('result_state')
)
return row_copy, member
def _base_cvl_analysis_ingestion(self, row_copy, member):
# cvl analysis
analysis_cols_mapping = {}
for column in self.analysis_cols:
col_matched = row_copy.get(self._clean_row_keys(column))
if col_matched:
analysis_cols_mapping[column] = self._clean_row_keys(column)
analysis_obj = self.cvl_analysis_dao.model_type()
setattr(analysis_obj, 'genomic_set_member_id', member.id)
for key, val in analysis_cols_mapping.items():
setattr(analysis_obj, key, row_copy[val])
self.cvl_analysis_dao.insert(analysis_obj)
def _ingest_cvl_w2sc_manifest(self, rows):
"""
Processes the CVL W2SC manifest file data
:param rows:
:return: Result Code
"""
try:
for row in rows:
self._base_cvl_ingestion(
row=row,
run_attr='cvlW2scManifestJobRunID',
result_state=ResultsWorkflowState.CVL_W2SC,
module_type=ResultsModuleType.HDRV1
)
return GenomicSubProcessResult.SUCCESS
except (RuntimeError, KeyError):
return GenomicSubProcessResult.ERROR
def _ingest_cvl_w3ns_manifest(self, rows):
"""
Processes the CVL W3NS manifest file data
:param rows:
:return: Result Code
"""
try:
for row in rows:
self._base_cvl_ingestion(
row=row,
run_attr='cvlW3nsManifestJobRunID',
result_state=ResultsWorkflowState.CVL_W3NS,
module_type=ResultsModuleType.HDRV1
)
return GenomicSubProcessResult.SUCCESS
except (RuntimeError, KeyError):
return GenomicSubProcessResult.ERROR
def _ingest_cvl_w3sc_manifest(self, rows):
"""
Processes the CVL W3SC manifest file data
:param rows:
:return: Result Code
"""
try:
for row in rows:
row_copy, member = self._base_cvl_ingestion(
row=row,
run_attr='cvlW3scManifestJobRunID',
result_state=ResultsWorkflowState.CVL_W3SC,
module_type=ResultsModuleType.HDRV1
)
if not (row_copy and member):
continue
member.cvlSecondaryConfFailure = row_copy['cvlsecondaryconffailure']
# allows for sample to be resent in subsequent W3SR
# https://docs.google.com/presentation/d/1QqXCzwz6MGLMhNwuXlV6ieoMLaJYuYai8csxagF_2-E/edit#slide=id.g10f369a487f_0_0
member.cvlW3srManifestJobRunID = None
self.member_dao.update(member)
return GenomicSubProcessResult.SUCCESS
except (RuntimeError, KeyError):
return GenomicSubProcessResult.ERROR
def _ingest_cvl_w3ss_manifest(self, rows):
"""
Processes the CVL W3SS manifest file data
:param rows:
:return: Result Code
"""
self.cvl_second_sample_dao = GenomicCVLSecondSampleDao()
sample_cols = self.cvl_second_sample_dao.model_type.__table__.columns.keys()
try:
for row in rows:
row_copy, member = self._base_cvl_ingestion(
row=row,
run_attr='cvlW3ssManifestJobRunID',
result_state=ResultsWorkflowState.CVL_W3SS,
module_type=ResultsModuleType.HDRV1
)
if not (row_copy and member):
continue
row_copy = dict(zip([key.replace('/', '').split('(')[0] for key in row_copy],
row_copy.values()))
# cvl second sample
second_sample_obj = self.cvl_second_sample_dao.model_type()
setattr(second_sample_obj, 'genomic_set_member_id', member.id)
for col in sample_cols:
cleaned_col = self._clean_row_keys(col)
col_value = row_copy.get(cleaned_col)
if col_value:
setattr(second_sample_obj, col, col_value)
self.cvl_second_sample_dao.insert(second_sample_obj)
return GenomicSubProcessResult.SUCCESS
except (RuntimeError, KeyError):
return GenomicSubProcessResult.ERROR
def _ingest_cvl_w4wr_manifest(self, rows):
"""
Processes the CVL W4WR manifest file data
:param rows:
:return: Result Code
"""
run_attr_mapping = {
'hdrv1': 'cvlW4wrHdrManifestJobRunID',
'pgxv1': 'cvlW4wrPgxManifestJobRunID'
}
run_id, module = None, None
for result_key in run_attr_mapping.keys():
if result_key in self.file_obj.fileName.lower():
run_id = run_attr_mapping[result_key]
module = self.get_result_module(result_key)
break
try:
for row in rows:
row_copy, member = self._base_cvl_ingestion(
row=row,
run_attr=run_id,
result_state=ResultsWorkflowState.CVL_W4WR,
module_type=module
)
if not (row_copy and member):
continue
self._base_cvl_analysis_ingestion(row_copy, member)
return GenomicSubProcessResult.SUCCESS
except (RuntimeError, KeyError):
return GenomicSubProcessResult.ERROR
def _ingest_cvl_w5nf_manifest(self, rows):
run_attr_mapping = {
'hdrv1': 'cvlW5nfHdrManifestJobRunID',
'pgxv1': 'cvlW5nfPgxManifestJobRunID'
}
run_id, module = None, None
for result_key in run_attr_mapping.keys():
if result_key in self.file_obj.fileName.lower():
run_id = run_attr_mapping[result_key]
module = self.get_result_module(result_key)
break
try:
for row in rows:
row_copy, member = self._base_cvl_ingestion(
row=row,
run_attr=run_id,
result_state=ResultsWorkflowState.CVL_W5NF,
module_type=module,
)
if not (row_copy and member):
continue
current_analysis = self.cvl_analysis_dao.get_passed_analysis_member_module(
member.id,
module
)
# should have initial record
if current_analysis:
current_analysis.failed = 1
current_analysis.failed_request_reason = row_copy['requestreason']
current_analysis.failed_request_reason_free = row_copy['requestreasonfree'][0:512]
self.cvl_analysis_dao.update(current_analysis)
return GenomicSubProcessResult.SUCCESS
except (RuntimeError, KeyError):
return GenomicSubProcessResult.ERROR
def _ingest_aw5_manifest(self, rows):
try:
for row in rows:
row_copy = self._clean_row_keys(row)
biobank_id = row_copy['biobankid']
biobank_id = self._clean_alpha_values(biobank_id)
sample_id = row_copy['sampleid']
member = self.member_dao.get_member_from_biobank_id_and_sample_id(biobank_id, sample_id)
if not member:
logging.warning(f'Can not find genomic member record for biobank_id: '
f'{biobank_id} and sample_id: {sample_id}, skipping...')
continue
existing_metrics_obj = self.metrics_dao.get_metrics_by_member_id(member.id)
if existing_metrics_obj is not None:
metric_id = existing_metrics_obj.id
else:
logging.warning(f'Can not find metrics record for member id: '
f'{member.id}, skipping...')
continue
self.metrics_dao.update_gc_validation_metrics_deleted_flags_from_dict(row_copy, metric_id)
return GenomicSubProcessResult.SUCCESS
except (RuntimeError, KeyError):
return GenomicSubProcessResult.ERROR
def _ingest_aw1c_manifest(self, rows):
"""
Processes the CVL AW1C manifest file data
:param rows:
:return: Result Code
"""
try:
for row in rows:
row_copy = self._clean_row_keys(row)
collection_tube_id = row_copy['collectiontubeid']
member = self.member_dao.get_member_from_collection_tube(collection_tube_id, GENOME_TYPE_WGS)
if member is None:
# Currently ignoring invalid cases
logging.warning(f'Invalid collection tube ID: {collection_tube_id}')
continue
# Update the AW1C job run ID and genome_type
member.cvlAW1CManifestJobRunID = self.job_run_id
member.genomeType = row_copy['genometype']
# Handle genomic state
_signal = "aw1c-reconciled"
if row_copy['failuremode'] not in (None, ''):
member.gcManifestFailureMode = row_copy['failuremode']
member.gcManifestFailureDescription = row_copy['failuremodedesc']
_signal = 'aw1c-failed'
# update state and state modifed time only if changed
if member.genomicWorkflowState != GenomicStateHandler.get_new_state(
member.genomicWorkflowState, signal=_signal):
member.genomicWorkflowState = GenomicStateHandler.get_new_state(
member.genomicWorkflowState,
signal=_signal)
member.genomicWorkflowStateStr = member.genomicWorkflowState.name
member.genomicWorkflowStateModifiedTime = clock.CLOCK.now()
self.member_dao.update(member)
return GenomicSubProcessResult.SUCCESS
except (RuntimeError, KeyError):
return GenomicSubProcessResult.ERROR
def _get_site_from_aw1(self):
"""
Returns the Genomic Center's site ID from the AW1 filename
:return: GC site ID string
"""
return self.file_obj.fileName.split('/')[-1].split("_")[0].lower()
def _validate_collection_tube_id(self, collection_tube_id, bid):
"""
Returns true if biobank_ID is associated to biobank_stored_sample_id
(collection_tube_id)
:param collection_tube_id:
:param bid:
:return: boolean
"""
sample = self.sample_dao.get(collection_tube_id)
if sample:
return int(sample.biobankId) == int(bid)
return False
@staticmethod
def _get_qc_status_from_value(aw4_value):
"""
Returns the GenomicQcStatus enum value for
:param aw4_value: string from AW4 file (PASS/FAIL)
:return: GenomicQcStatus
"""
if aw4_value.strip().lower() == 'pass':
return GenomicQcStatus.PASS
elif aw4_value.strip().lower() == 'fail':
return GenomicQcStatus.FAIL
else:
logging.warning(f'Value from AW4 "{aw4_value}" is not PASS/FAIL.')
return GenomicQcStatus.UNSET
def create_new_member_from_aw1_control_sample(self, aw1_data: dict) -> GenomicSetMember:
"""
Creates a new control sample GenomicSetMember in RDR based on AW1 data
These will look like regular GenomicSetMember samples
:param aw1_data: dict from aw1 row
:return: GenomicSetMember
"""
# Writing new genomic_set_member based on AW1 data
max_set_id = self.member_dao.get_collection_tube_max_set_id()[0]
# Insert new member with biobank_id and collection tube ID from AW1
new_member_obj = GenomicSetMember(
genomicSetId=max_set_id,
participantId=0,
biobankId=aw1_data['biobankid'],
collectionTubeId=aw1_data['collectiontubeid'],
validationStatus=GenomicSetMemberStatus.VALID,
genomeType=aw1_data['genometype'],
genomicWorkflowState=GenomicWorkflowState.AW1,
genomicWorkflowStateStr=GenomicWorkflowState.AW1.name
)
# Set member attribures from AW1
new_member_obj = self._set_member_attributes_from_aw1(aw1_data, new_member_obj)
new_member_obj = self._set_rdr_member_attributes_for_aw1(aw1_data, new_member_obj)
return self.member_dao.insert(new_member_obj)
@staticmethod
def _participant_has_potentially_clean_samples(session, biobank_id):
"""Check for any stored sample for the participant that is not contaminated
and is a 1ED04, 1ED10, or 1SAL2 test"""
query = session.query(BiobankStoredSample).filter(
BiobankStoredSample.biobankId == biobank_id,
BiobankStoredSample.status < SampleStatus.SAMPLE_NOT_RECEIVED
).outerjoin(GenomicSampleContamination).filter(
GenomicSampleContamination.id.is_(None),
BiobankStoredSample.test.in_(['1ED04', '1ED10', '1SAL2'])
)
exists_query = session.query(query.exists())
return exists_query.scalar()
def _record_sample_as_contaminated(self, session, sample_id):
session.add(GenomicSampleContamination(
sampleId=sample_id,
failedInJob=self.job_id
))
def calculate_contamination_category(self, sample_id, raw_contamination, member: GenomicSetMember):
"""
Takes contamination value from AW2 and calculates GenomicContaminationCategory
:param sample_id:
:param raw_contamination:
:param member:
:return: GenomicContaminationCategory
"""
ps_dao = ParticipantSummaryDao()
ps = ps_dao.get(member.participantId)
contamination_category = GenomicContaminationCategory.UNSET
# No Extract if contamination <1%
if raw_contamination < 0.01:
contamination_category = GenomicContaminationCategory.NO_EXTRACT
# Only extract WGS if contamination between 1 and 3 % inclusive AND ROR
elif (0.01 <= raw_contamination <= 0.03) and ps.consentForGenomicsROR == QuestionnaireStatus.SUBMITTED:
contamination_category = GenomicContaminationCategory.EXTRACT_WGS
# No Extract if contamination between 1 and 3 % inclusive and GROR is not Yes
elif (0.01 <= raw_contamination <= 0.03) and ps.consentForGenomicsROR != QuestionnaireStatus.SUBMITTED:
contamination_category = GenomicContaminationCategory.NO_EXTRACT
# Extract Both if contamination > 3%
elif raw_contamination > 0.03:
contamination_category = GenomicContaminationCategory.EXTRACT_BOTH
with ps_dao.session() as session:
if raw_contamination >= 0.01:
# Record in the contamination table, regardless of GROR consent
self._record_sample_as_contaminated(session, sample_id)
if contamination_category != GenomicContaminationCategory.NO_EXTRACT and \
not self._participant_has_potentially_clean_samples(session, member.biobankId):
contamination_category = GenomicContaminationCategory.TERMINAL_NO_EXTRACT
return contamination_category
def _set_metrics_array_data_file_paths(self, row: dict) -> dict:
gc_site_bucket_map = config.getSettingJson(config.GENOMIC_GC_SITE_BUCKET_MAP, {})
site_id = self.file_obj.fileName.split('_')[0].lower()
gc_bucket_name = gc_site_bucket_map.get(site_id)
gc_bucket = config.getSetting(gc_bucket_name, None)
if not gc_bucket:
return row
for file_def in array_file_types_attributes:
if file_def['required']:
if 'idat' in file_def["file_type"]:
file_path = f'gs://{gc_bucket}/Genotyping_sample_raw_data/{row["chipwellbarcode"]}' + \
f'_{file_def["file_type"]}'
else:
file_path = f'gs://{gc_bucket}/Genotyping_sample_raw_data/{row["chipwellbarcode"]}.' + \
f'{file_def["file_type"]}'
row[file_def['file_path_attribute']] = file_path
return row
def _set_metrics_wgs_data_file_paths(self, row: dict) -> dict:
gc_site_bucket_map = config.getSettingJson(config.GENOMIC_GC_SITE_BUCKET_MAP, {})
site_id = self.file_obj.fileName.split('_')[0].lower()
gc_bucket_name = gc_site_bucket_map.get(site_id)
gc_bucket = config.getSetting(gc_bucket_name, None)
if not gc_bucket:
return row
for file_def in wgs_file_types_attributes:
if file_def['required']:
file_path = f'gs://{gc_bucket}/{genome_center_datafile_prefix_map[site_id][file_def["file_type"]]}/' + \
f'{site_id.upper()}_{row["biobankid"]}_{row["sampleid"]}_{row["limsid"]}_1.' + \
f'{file_def["file_type"]}'
row[file_def['file_path_attribute']] = file_path
return row
def _update_member_state_after_aw2(self, member: GenomicSetMember):
if member.genomeType == 'aou_array':
ready_signal = 'gem-ready'
elif member.genomeType == 'aou_wgs':
ready_signal = 'cvl-ready'
else:
# Don't update state for investigation genome types
return
next_state = GenomicStateHandler.get_new_state(member.genomicWorkflowState, signal=ready_signal)
if next_state and next_state != member.genomicWorkflowState:
self.member_dao.update_member_workflow_state(member, next_state)
class GenomicFileValidator:
"""
This class validates the Genomic Centers files
"""
GENOME_TYPE_MAPPINGS = {
'gen': GENOME_TYPE_ARRAY,
'seq': GENOME_TYPE_WGS,
}
def __init__(self, filename=None, data=None, schema=None, job_id=None, controller=None):
self.filename = filename
self.data_to_validate = data
self.valid_schema = schema
self.job_id = job_id
self.genome_type = None
self.controller = controller
self.gc_site_id = None
self.GC_METRICS_SCHEMAS = {
GENOME_TYPE_WGS: (
"biobankid",
"sampleid",
"biobankidsampleid",
"limsid",
"meancoverage",
"genomecoverage",
"aouhdrcoverage",
"contamination",
'samplesource',
'mappedreadspct',
"sexconcordance",
"sexploidy",
"alignedq30bases",
"arrayconcordance",
"processingstatus",
"notes",
"genometype"
),
GENOME_TYPE_ARRAY: (
"biobankid",
"sampleid",
"biobankidsampleid",
"limsid",
"chipwellbarcode",
"callrate",
"sexconcordance",
"contamination",
'samplesource',
"processingstatus",
"notes",
"pipelineid",
"genometype"
),
}
self.VALID_CVL_FACILITIES = ('rdr', 'co', 'uw', 'bcm')
self.CVL_ANALYSIS_TYPES = ('hdrv1', 'pgxv1')
self.VALID_GENOME_CENTERS = ('uw', 'bam', 'bcm', 'bi', 'jh', 'rdr')
self.DRC_BROAD = 'drc_broad'
self.AW1_MANIFEST_SCHEMA = (
"packageid",
"biobankidsampleid",
"boxstorageunitid",
"boxid/plateid",
"wellposition",
"sampleid",
"parentsampleid",
"collectiontubeid",
"matrixid",
"collectiondate",
"biobankid",
"sexatbirth",
"age",
"nystate(y/n)",
"sampletype",
"treatments",
"quantity(ul)",
"totalconcentration(ng/ul)",
"totaldna(ng)",
"visitdescription",
"samplesource",
"study",
"trackingnumber",
"contact",
"email",
"studypi",
"sitename",
"genometype",
"failuremode",
"failuremodedesc"
)
self.GEM_A2_SCHEMA = (
"biobankid",
"sampleid",
"success",
"dateofimport",
)
self.GEM_METRICS_SCHEMA = (
"biobankid",
"sampleid",
"ancestryloopresponse",
"availableresults",
"resultsreleasedat",
)
self.CVL_W2_SCHEMA = (
"genomicsetname",
"biobankid",
"sexatbirth",
"nyflag",
"siteid",
"secondaryvalidation",
"datesubmitted",
"testname",
)
self.CVL_W2SC_SCHEMA = (
"biobankid",
"sampleid",
)
self.CVL_W3NS_SCHEMA = (
"biobankid",
"sampleid",
"unavailablereason"
)
self.CVL_W3SC_SCHEMA = (
"biobankid",
"sampleid",
"cvlsecondaryconffailure"
)
self.CVL_W3SS_SCHEMA = (
"biobankid",
"sampleid",
"packageid",
"version",
"boxstorageunitid",
"boxid/plateid",
"wellposition",
"cvlsampleid",
"parentsampleid",
"collectiontubeid",
"matrixid",
"collectiondate",
"sexatbirth",
"age",
"nystate(y/n)",
"sampletype",
"treatments",
"quantity(ul)",
"totalconcentration(ng/ul)",
"totaldna(ng)",
"visitdescription",
"samplesource",
"study",
"trackingnumber",
"contact",
"email",
"studypi",
"sitename",
"genometype",
"failuremode",
"failuremodedesc"
)
self.CVL_W4WR_SCHEMA = (
"biobankid",
"sampleid",
"healthrelateddatafilename",
"clinicalanalysistype"
)
self.CVL_W5NF_SCHEMA = (
"biobankid",
"sampleid",
"requestreason",
"requestreasonfree",
"healthrelateddatafilename",
"clinicalanalysistype"
)
self.AW4_ARRAY_SCHEMA = (
"biobankid",
"sampleid",
"sexatbirth",
"siteid",
"redidatpath",
"redidatmd5path",
"greenidatpath",
"greenidatmd5path",
"vcfpath",
"vcfindexpath",
"researchid",
"qcstatus",
"drcsexconcordance",
"drccallrate",
"passtoresearchpipeline"
)
self.AW4_WGS_SCHEMA = (
"biobankid",
"sampleid",
"sexatbirth",
"siteid",
"vcfhfpath",
"vcfhfmd5path",
"vcfhfindexpath",
"vcfrawpath",
"vcfrawmd5path",
"vcfrawindexpath",
"crampath",
"crammd5path",
"craipath",
"gvcfpath",
"gvcfmd5path",
"researchid",
"qcstatus",
"drcsexconcordance",
"drccontamination",
"drcmeancoverage",
"drcfpconcordance",
"passtoresearchpipeline"
)
self.AW5_WGS_SCHEMA = {
"biobankid",
"sampleid",
"biobankidsampleid",
"sexatbirth",
"siteid",
"vcfhf",
"vcfhfindex",
"vcfhfmd5",
"vcfhfbasename",
"vcfhfmd5hash",
"vcfraw",
"vcfrawindex",
"vcfrawmd5",
"vcfrawbasename",
"vcfrawmd5hash",
"cram",
"crammd5",
"crai",
"crambasename",
"crammd5hash",
"gvcf",
"gvcfmd5",
"gvcfbasename",
"gvcfmd5hash",
}
self.AW5_ARRAY_SCHEMA = {
"biobankid",
"sampleid",
"biobankidsampleid",
"sexatbirth",
"siteid",
"redidat",
"redidatmd5",
"redidatbasename",
"redidatmd5hash",
"greenidat",
"greenidatbasename",
"greenidatmd5hash",
"greenidatmd5",
"vcf",
"vcfindex",
"vcfmd5",
"vcfbasename",
"vcfmd5hash",
}
self.values_for_validation = {
GenomicJob.METRICS_INGESTION: {
GENOME_TYPE_ARRAY: {
'pipelineid': ['cidr_egt_1', 'original_egt']
},
},
}
def set_genome_type(self):
if self.job_id in [GenomicJob.METRICS_INGESTION] and self.filename:
file_type = self.filename.lower().split("_")[2]
self.genome_type = self.GENOME_TYPE_MAPPINGS[file_type]
def set_gc_site_id(self, fn_component):
if fn_component and \
fn_component.lower() in self.VALID_GENOME_CENTERS and \
self.job_id in [
GenomicJob.METRICS_INGESTION,
GenomicJob.AW1_MANIFEST,
GenomicJob.AW1C_INGEST,
GenomicJob.AW1CF_INGEST,
GenomicJob.AW1F_MANIFEST
]:
self.gc_site_id = fn_component
elif self.job_id in [
GenomicJob.AW4_ARRAY_WORKFLOW,
GenomicJob.AW4_WGS_WORKFLOW,
GenomicJob.AW5_ARRAY_MANIFEST,
GenomicJob.AW5_WGS_MANIFEST
]:
self.gc_site_id = self.DRC_BROAD
def validate_ingestion_file(self, *, filename, data_to_validate):
"""
Procedure to validate an ingestion file
:param filename:
:param data_to_validate:
:return: result code
"""
self.filename = filename
self.set_genome_type()
file_processed = self.controller. \
file_processed_dao.get_record_from_filename(filename)
# validates filenames for each job
validated_filename = self.validate_filename(filename)
if not validated_filename:
self.controller.create_incident(
source_job_run_id=self.controller.job_run.id,
source_file_processed_id=file_processed.id,
code=GenomicIncidentCode.FILE_VALIDATION_INVALID_FILE_NAME.name,
message=f"{self.job_id.name}: File name {filename.split('/')[1]} has failed validation due to an"
f"incorrect file name.",
slack=True,
submitted_gc_site_id=self.gc_site_id,
manifest_file_name=self.filename
)
return GenomicSubProcessResult.INVALID_FILE_NAME
# validates values in fields if specified for job
values_validation_failed, message = self.validate_values(data_to_validate)
if values_validation_failed:
self.controller.create_incident(
source_job_run_id=self.controller.job_run.id,
source_file_processed_id=file_processed.id,
code=GenomicIncidentCode.FILE_VALIDATION_FAILED_VALUES.name,
message=message,
slack=True,
submitted_gc_site_id=self.gc_site_id,
manifest_file_name=self.filename
)
return GenomicSubProcessResult.ERROR
# validates file structure rules
struct_valid_result, missing_fields, extra_fields, expected = self._check_file_structure_valid(
data_to_validate['fieldnames'])
if not struct_valid_result:
slack = True
invalid_message = f"{self.job_id.name}: File structure of {filename} is not valid."
if extra_fields:
invalid_message += f" Extra fields: {', '.join(extra_fields)}"
if missing_fields:
invalid_message += f" Missing fields: {', '.join(missing_fields)}"
if len(missing_fields) == len(expected):
slack = False
self.controller.create_incident(
source_job_run_id=self.controller.job_run.id,
source_file_processed_id=file_processed.id,
code=GenomicIncidentCode.FILE_VALIDATION_FAILED_STRUCTURE.name,
message=invalid_message,
slack=slack,
submitted_gc_site_id=self.gc_site_id,
manifest_file_name=self.filename
)
return GenomicSubProcessResult.INVALID_FILE_STRUCTURE
return GenomicSubProcessResult.SUCCESS
def validate_filename(self, filename):
"""
Applies a naming rule to an arbitrary filename
Naming rules are defined as local functions and
Mapped to a Genomic Job ID in naming_rules dict.
:param filename: passed to each name rule as 'fn'
:return: boolean
"""
filename_components = [x.lower() for x in filename.split('/')[-1].split("_")]
self.set_gc_site_id(filename_components[0])
# Naming Rule Definitions
def gc_validation_metrics_name_rule():
"""GC metrics file name rule"""
return (
filename_components[0] in self.VALID_GENOME_CENTERS and
filename_components[1] == 'aou' and
filename_components[2] in ('seq', 'gen') and
filename.lower().endswith('csv')
)
def bb_to_gc_manifest_name_rule():
"""Biobank to GCs manifest name rule"""
return (
filename_components[0] in self.VALID_GENOME_CENTERS and
filename_components[1] == 'aou' and
filename_components[2] in ('seq', 'gen') and
filename.lower().endswith('csv')
)
def aw1f_manifest_name_rule():
"""Biobank to GCs Failure (AW1F) manifest name rule"""
return (
len(filename_components) == 5 and
filename_components[0] in self.VALID_GENOME_CENTERS and
filename_components[1] == 'aou' and
filename_components[2] in ('seq', 'gen') and
re.search(r"pkg-[0-9]{4}-[0-9]{5,}$",
filename_components[3]) is not None and
filename_components[4] == 'failure.csv' and
filename.lower().endswith('csv')
)
def cvl_w2sc_manifest_name_rule():
"""
CVL W2SC (secondary confirmation) manifest name rule
"""
return (
len(filename_components) == 5 and
filename_components[0] in self.VALID_CVL_FACILITIES and
filename_components[1] == 'aou' and
filename_components[2] == 'cvl' and
filename_components[3] == 'w2sc' and
filename.lower().endswith('csv')
)
def cvl_w3ns_manifest_name_rule():
"""
CVL W3NS manifest name rule
"""
return (
len(filename_components) == 5 and
filename_components[0] in self.VALID_CVL_FACILITIES and
filename_components[1] == 'aou' and
filename_components[2] == 'cvl' and
filename_components[3] == 'w3ns' and
filename.lower().endswith('csv')
)
def cvl_w3sc_manifest_name_rule():
"""
CVL W3SC manifest name rule
"""
return (
len(filename_components) == 5 and
filename_components[0] in self.VALID_CVL_FACILITIES and
filename_components[1] == 'aou' and
filename_components[2] == 'cvl' and
filename_components[3] == 'w3sc' and
filename.lower().endswith('csv')
)
def cvl_w3ss_manifest_name_rule():
"""
CVL W3SS manifest name rule
"""
return (
len(filename_components) == 4 and
filename_components[0] in self.VALID_CVL_FACILITIES and
filename_components[1] == 'aou' and
filename_components[2] == 'cvl' and
'pkg' in filename_components[3] and
filename.lower().endswith('csv')
)
def cvl_w4wr_manifest_name_rule():
"""
CVL W4WR manifest name rule
"""
return (
len(filename_components) == 6 and
filename_components[0] in self.VALID_CVL_FACILITIES and
filename_components[1] == 'aou' and
filename_components[2] == 'cvl' and
filename_components[3] == 'w4wr' and
filename_components[4] in
[k.lower() for k in ResultsModuleType.to_dict().keys()]
and filename.lower().endswith('csv')
)
def cvl_w5nf_manifest_name_rule():
"""
CVL W5NF manifest name rule
"""
return (
len(filename_components) == 7 and
filename_components[0] in self.VALID_CVL_FACILITIES and
filename_components[1] == 'aou' and
filename_components[2] == 'cvl' and
filename_components[3] == 'w5nf' and
filename_components[4] in
[k.lower() for k in ResultsModuleType.to_dict().keys()]
and filename.lower().endswith('csv')
)
def gem_a2_manifest_name_rule():
"""GEM A2 manifest name rule: i.e. AoU_GEM_A2_manifest_2020-07-11-00-00-00.csv"""
return (
len(filename_components) == 5 and
filename_components[0] == 'aou' and
filename_components[1] == 'gem' and
filename_components[2] == 'a2' and
filename.lower().endswith('csv')
)
def gem_metrics_name_rule():
"""GEM Metrics name rule: i.e. AoU_GEM_metrics_aggregate_2020-07-11-00-00-00.csv"""
return (
filename_components[0] == 'aou' and
filename_components[1] == 'gem' and
filename_components[2] == 'metrics' and
filename.lower().endswith('csv')
)
def aw4_arr_manifest_name_rule():
"""DRC Broad AW4 Array manifest name rule: i.e. AoU_DRCB_GEN_2020-07-11-00-00-00.csv"""
return (
filename_components[0] == 'aou' and
filename_components[1] == 'drcb' and
filename_components[2] == 'gen' and
filename.lower().endswith('csv')
)
def aw4_wgs_manifest_name_rule():
"""DRC Broad AW4 WGS manifest name rule: i.e. AoU_DRCB_SEQ_2020-07-11-00-00-00.csv"""
return (
filename_components[0] == 'aou' and
filename_components[1] == 'drcb' and
filename_components[2] == 'seq' and
filename.lower().endswith('csv')
)
def aw5_wgs_manifest_name_rule():
# don't have name convention right now, if have in the future, add here
return filename.lower().endswith('csv')
def aw5_array_manifest_name_rule():
# don't have name convention right now, if have in the future, add here
return filename.lower().endswith('csv')
ingestion_name_rules = {
GenomicJob.METRICS_INGESTION: gc_validation_metrics_name_rule,
GenomicJob.AW1_MANIFEST: bb_to_gc_manifest_name_rule,
GenomicJob.AW1F_MANIFEST: aw1f_manifest_name_rule,
GenomicJob.GEM_A2_MANIFEST: gem_a2_manifest_name_rule,
GenomicJob.AW4_ARRAY_WORKFLOW: aw4_arr_manifest_name_rule,
GenomicJob.AW4_WGS_WORKFLOW: aw4_wgs_manifest_name_rule,
GenomicJob.GEM_METRICS_INGEST: gem_metrics_name_rule,
GenomicJob.AW5_WGS_MANIFEST: aw5_wgs_manifest_name_rule,
GenomicJob.AW5_ARRAY_MANIFEST: aw5_array_manifest_name_rule,
GenomicJob.CVL_W2SC_WORKFLOW: cvl_w2sc_manifest_name_rule,
GenomicJob.CVL_W3NS_WORKFLOW: cvl_w3ns_manifest_name_rule,
GenomicJob.CVL_W3SC_WORKFLOW: cvl_w3sc_manifest_name_rule,
GenomicJob.CVL_W3SS_WORKFLOW: cvl_w3ss_manifest_name_rule,
GenomicJob.CVL_W4WR_WORKFLOW: cvl_w4wr_manifest_name_rule,
GenomicJob.CVL_W5NF_WORKFLOW: cvl_w5nf_manifest_name_rule
}
try:
is_valid_filename = ingestion_name_rules[self.job_id]()
return is_valid_filename
except KeyError:
return GenomicSubProcessResult.ERROR
def validate_values(self, data):
is_invalid, message = False, None
cleaned_fieldnames = [self._clean_field_name(fieldname) for fieldname in data['fieldnames']]
try:
if self.genome_type:
values_to_check = self.values_for_validation[self.job_id][self.genome_type]
else:
values_to_check = self.values_for_validation[self.job_id]
except KeyError:
return is_invalid, message
for field_name, field_values in values_to_check.items():
if field_name not in cleaned_fieldnames:
continue
pos = cleaned_fieldnames.index(field_name)
for row in data['rows']:
value_check = list(row.values())[pos]
if value_check not in field_values:
message = f"{self.job_id.name}: Value for {data['fieldnames'][pos]} is invalid: {value_check}"
is_invalid = True
return is_invalid, message
return is_invalid, message
@staticmethod
def _clean_field_name(fieldname):
return fieldname.lower().replace('\ufeff', '').replace(' ', '').replace('_', '')
def _check_file_structure_valid(self, fields):
"""
Validates the structure of the CSV against a defined set of columns.
:param fields: the data from the CSV file; dictionary per row.
:return: boolean; True if valid structure, False if not.
"""
missing_fields, extra_fields = None, None
if not self.valid_schema:
self.valid_schema = self._set_schema()
cases = tuple([self._clean_field_name(field) for field in fields])
all_file_columns_valid = all([c in self.valid_schema for c in cases])
all_expected_columns_in_file = all([c in cases for c in self.valid_schema])
if not all_file_columns_valid:
extra_fields = list(set(cases) - set(self.valid_schema))
if not all_expected_columns_in_file:
missing_fields = list(set(self.valid_schema) - set(cases))
return \
all([all_file_columns_valid, all_expected_columns_in_file]), \
missing_fields, \
extra_fields, \
self.valid_schema
def _set_schema(self):
"""
Sets schema via the job id
:return: schema_to_validate,
(tuple from the CSV_SCHEMA or result code of INVALID_FILE_NAME).
"""
try:
if self.job_id == GenomicJob.METRICS_INGESTION:
return self.GC_METRICS_SCHEMAS[self.genome_type]
if self.job_id == GenomicJob.AW1_MANIFEST:
return self.AW1_MANIFEST_SCHEMA
if self.job_id == GenomicJob.GEM_A2_MANIFEST:
return self.GEM_A2_SCHEMA
if self.job_id == GenomicJob.AW1F_MANIFEST:
return self.AW1_MANIFEST_SCHEMA # AW1F and AW1 use same schema
if self.job_id == GenomicJob.GEM_METRICS_INGEST:
return self.GEM_METRICS_SCHEMA
if self.job_id == GenomicJob.AW4_ARRAY_WORKFLOW:
return self.AW4_ARRAY_SCHEMA
if self.job_id == GenomicJob.AW4_WGS_WORKFLOW:
return self.AW4_WGS_SCHEMA
if self.job_id in (GenomicJob.AW1C_INGEST, GenomicJob.AW1CF_INGEST):
return self.AW1_MANIFEST_SCHEMA
if self.job_id == GenomicJob.AW5_WGS_MANIFEST:
self.genome_type = self.GENOME_TYPE_MAPPINGS['seq']
return self.AW5_WGS_SCHEMA
if self.job_id == GenomicJob.AW5_ARRAY_MANIFEST:
self.genome_type = self.GENOME_TYPE_MAPPINGS['gen']
return self.AW5_ARRAY_SCHEMA
if self.job_id == GenomicJob.CVL_W2SC_WORKFLOW:
return self.CVL_W2SC_SCHEMA
if self.job_id == GenomicJob.CVL_W3NS_WORKFLOW:
return self.CVL_W3NS_SCHEMA
if self.job_id == GenomicJob.CVL_W3SC_WORKFLOW:
return self.CVL_W3SC_SCHEMA
if self.job_id == GenomicJob.CVL_W3SS_WORKFLOW:
return self.CVL_W3SS_SCHEMA
if self.job_id == GenomicJob.CVL_W4WR_WORKFLOW:
return self.CVL_W4WR_SCHEMA
if self.job_id == GenomicJob.CVL_W5NF_WORKFLOW:
return self.CVL_W5NF_SCHEMA
except (IndexError, KeyError):
return GenomicSubProcessResult.ERROR
class GenomicFileMover:
"""
This utility class moves files in the bucket by copying into an archive folder
and deleting the old instance.
"""
def __init__(self, archive_folder=None):
self.archive_folder = archive_folder
def archive_file(self, file_obj=None, file_path=None):
"""
This method moves a file to an archive
by copy and delete
:param file_obj: a genomic_file_processed object to move
:return:
"""
source_path = file_obj.filePath if file_obj else file_path
file_name = source_path.split('/')[-1]
archive_path = source_path.replace(file_name,
f"{self.archive_folder}/"
f"{file_name}")
try:
copy_cloud_file(source_path, archive_path)
delete_cloud_file(source_path)
except FileNotFoundError:
logging.error(f"No file found at '{file_obj.filePath}'")
class GenomicReconciler:
""" This component handles reconciliation between genomic datasets """
def __init__(self, run_id, job_id, archive_folder=None, file_mover=None,
bucket_name=None, storage_provider=None, controller=None):
self.run_id = run_id
self.job_id = job_id
self.bucket_name = bucket_name
self.archive_folder = archive_folder
self.cvl_file_name = None
self.file_list = None
self.ready_signal = None
# Dao components
self.member_dao = GenomicSetMemberDao()
self.metrics_dao = GenomicGCValidationMetricsDao()
self.file_dao = GenomicFileProcessedDao()
self.data_file_dao = GenomicGcDataFileDao()
self.data_file_missing_dao = GenomicGcDataFileMissingDao()
# Other components
self.file_mover = file_mover
self.storage_provider = storage_provider
self.controller = controller
def process_missing_data(self, metric, missing_data_files, genome_type):
missing_files_config = config.getSettingJson(config.GENOMIC_SKIP_MISSING_FILETYPES, {})
missing_files_config = missing_files_config.get(genome_type)
if missing_files_config:
missing_files_config = list(missing_files_config) if not type(missing_files_config) \
is list else missing_files_config
missing_data_files = [
x for x in list(missing_data_files) if x not in missing_files_config
]
if missing_data_files:
file = self.file_dao.get(metric.genomicFileProcessedId)
member = self.member_dao.get(metric.genomicSetMemberId)
description = f"{self.job_id.name}: The following AW2 manifests are missing data files."
description += f"\nGenomic Job Run ID: {self.run_id}"
file_list = '\n'.join([mf for mf in missing_data_files])
description += f"\nManifest File: {file.fileName}"
description += "\nMissing Data File(s):"
description += f"\n{file_list}"
self.controller.create_incident(
source_job_run_id=self.run_id,
source_file_processed_id=file.id,
code=GenomicIncidentCode.MISSING_FILES.name,
message=description,
genomic_set_member_id=member.id,
biobank_id=member.biobankId,
sample_id=member.sampleId if member.sampleId else "",
collection_tube_id=member.collectionTubeId if member.collectionTubeId else "",
slack=True
)
def generate_cvl_reconciliation_report(self):
"""
The main method for the CVL Reconciliation report,
outputs report file to the cvl subfolder and updates
genomic_set_member
:return: result code
"""
members = self.member_dao.get_members_for_cvl_reconciliation()
if members:
cvl_subfolder = getSetting(GENOMIC_CVL_RECONCILIATION_REPORT_SUBFOLDER)
self.cvl_file_name = f"{cvl_subfolder}/cvl_report_{self.run_id}.csv"
self._write_cvl_report_to_file(members)
self.controller.execute_cloud_task({
'member_ids': [m.id for m in members],
'field': 'reconcileCvlJobRunId',
'value': self.run_id,
'is_job_run': True,
}, 'genomic_set_member_update_task')
return GenomicSubProcessResult.SUCCESS
return GenomicSubProcessResult.NO_FILES
def update_report_states_for_consent_removal(self, workflow_states):
"""
Updates report states if gror or primary consent is not yes
:param workflow_states: list of GenomicWorkflowStates
"""
# Get unconsented members to update
unconsented_gror_members = self.member_dao.get_unconsented_gror_or_primary(workflow_states)
# update each member with the new state and withdrawal time
for member in unconsented_gror_members:
new_state = GenomicStateHandler.get_new_state(member.genomicWorkflowState,
signal='unconsented')
if new_state is not None or new_state != member.genomicWorkflowState:
self.member_dao.update_member_workflow_state(member, new_state)
# Handle withdrawal (gror/primary consent) for reportConsentRemovalDate
removal_date = self.member_dao.get_consent_removal_date(member)
if removal_date:
self.member_dao.update_report_consent_removal_date(member, removal_date)
def update_report_state_for_reconsent(self, last_run_time):
"""
This code is not currently executed, the reconsent has been deferred.
:param last_run_time:
:return:
"""
# Get reconsented members to update (consent > last run time of job_id)
reconsented_gror_members = self.member_dao.get_reconsented_gror_since_date(last_run_time)
# update each member with the new state
for member in reconsented_gror_members:
new_state = GenomicStateHandler.get_new_state(member.genomicWorkflowState,
signal='reconsented')
if new_state is not None or new_state != member.genomicWorkflowState:
self.member_dao.update_member_workflow_state(member, new_state)
self.member_dao.update_report_consent_removal_date(member, None)
def _write_cvl_report_to_file(self, members):
"""
writes data to csv file in bucket
:param members:
:return: result code
"""
try:
# extract only columns we need
cvl_columns = ('biobank_id', 'sample_id', 'member_id')
report_data = ((m.biobankId, m.sampleId, m.id) for m in members)
# Use SQL exporter
exporter = SqlExporter(self.bucket_name)
with exporter.open_cloud_writer(self.cvl_file_name) as writer:
writer.write_header(cvl_columns)
writer.write_rows(report_data)
return GenomicSubProcessResult.SUCCESS
except RuntimeError:
return GenomicSubProcessResult.ERROR
class GenomicBiobankSamplesCoupler:
"""This component creates the source data for Cohot 3:
new genomic set and members from the biobank samples pipeline.
Class uses the manifest handler to create and upload a manifest"""
_SEX_AT_BIRTH_CODES = {
'male': 'M',
'female': 'F',
'none_intersex': 'NA'
}
_VALIDATION_FLAGS = (GenomicValidationFlag.INVALID_WITHDRAW_STATUS,
GenomicValidationFlag.INVALID_SUSPENSION_STATUS,
GenomicValidationFlag.INVALID_CONSENT,
GenomicValidationFlag.INVALID_AGE,
GenomicValidationFlag.INVALID_SEX_AT_BIRTH)
_ARRAY_GENOME_TYPE = "aou_array"
_WGS_GENOME_TYPE = "aou_wgs"
_LR_GENOME_TYPE = "long_read"
COHORT_1_ID = "C1"
COHORT_2_ID = "C2"
COHORT_3_ID = "C3"
GenomicSampleMeta = namedtuple("GenomicSampleMeta", ["bids",
"pids",
"order_ids",
"site_ids",
"state_ids",
"sample_ids",
"valid_withdrawal_status",
"valid_suspension_status",
"gen_consents",
"valid_ages",
"sabs",
"gror",
"is_ai_an",
"origins"])
def __init__(self, run_id, controller=None):
self.samples_dao = BiobankStoredSampleDao()
self.set_dao = GenomicSetDao()
self.member_dao = GenomicSetMemberDao()
self.site_dao = SiteDao()
self.ps_dao = ParticipantSummaryDao()
self.code_dao = CodeDao()
self.run_id = run_id
self.controller = controller
self.query = GenomicQueryClass()
def create_new_genomic_participants(self, from_date):
"""
This method determines which samples to enter into the genomic system
from Cohort 3 (New Participants).
Validation is handled in the query that retrieves the newly consented
participants' samples to process.
:param: from_date : the date from which to lookup new biobank_ids
:return: result
"""
samples = self._get_new_biobank_samples(from_date)
if samples:
samples_meta = self.GenomicSampleMeta(*samples)
return self.process_samples_into_manifest(samples_meta, cohort=self.COHORT_3_ID)
else:
logging.info(f'New Participant Workflow: No new samples to process.')
return GenomicSubProcessResult.NO_FILES
def create_saliva_genomic_participants(self, local=False, _config=None):
"""
This method determines which samples to enter into
the genomic system that are saliva only, via the
config obj passed in the argument.
:param: config : options for ror consent type and denoting if sample was generated in-home or in-clinic
:return: result
"""
participants = self._get_remaining_saliva_participants(_config)
if len(participants) > 0:
return self.create_matrix_and_process_samples(participants, cohort=None, local=local, saliva=True)
else:
logging.info(
f'Saliva Participant Workflow: No participants to process.')
return GenomicSubProcessResult.NO_FILES
def create_c2_genomic_participants(self, local=False):
"""
Creates Cohort 2 Participants in the genomic system.
Validation is handled in the query.
Refactored to first pull valid participants, then pull their samples,
applying the new business logic of prioritizing
collection date & blood over saliva.
:return: result
"""
samples = self._get_remaining_c2_samples()
if len(samples) > 0:
samples_meta = self.GenomicSampleMeta(*samples)
return self.process_samples_into_manifest(samples_meta, cohort=self.COHORT_2_ID, local=local)
else:
logging.info(f'Cohort 2 Participant Workflow: No participants to process.')
return GenomicSubProcessResult.NO_FILES
def create_c1_genomic_participants(self):
"""
Creates Cohort 1 Participants in the genomic system using reconsent.
Validation is handled in the query that retrieves the newly consented
participants. Only valid participants are currently sent.
:param: from_date : the date from which to lookup new participants
:return: result
"""
samples = self._get_remaining_c1_samples()
if len(samples) > 0:
samples_meta = self.GenomicSampleMeta(*samples)
return self.process_samples_into_manifest(samples_meta, cohort=self.COHORT_1_ID)
else:
logging.info(f'Cohort 1 Participant Workflow: No participants to process.')
return GenomicSubProcessResult.NO_FILES
def create_long_read_genomic_participants(self, limit=None):
"""
Create long_read participants that are already in the genomic system,
based on downstream filters.
:return:
"""
participants = self._get_long_read_participants(limit)
if len(participants) > 0:
return self.process_genomic_members_into_manifest(
participants=participants,
genome_type=self._LR_GENOME_TYPE
)
logging.info(f'Long Read Participant Workflow: No participants to process.')
return GenomicSubProcessResult.NO_FILES
def process_genomic_members_into_manifest(self, *, participants, genome_type):
"""
Compiles AW0 Manifest from already submitted genomic members.
:param participants:
:param genome_type
:return:
"""
new_genomic_set = self._create_new_genomic_set()
processed_members = []
count = 0
# duplicate genomic set members
with self.member_dao.session() as session:
for i, participant in enumerate(participants):
dup_member_obj = GenomicSetMember(
biobankId=participant.biobankId,
genomicSetId=new_genomic_set.id,
participantId=participant.participantId,
nyFlag=participant.nyFlag,
sexAtBirth=participant.sexAtBirth,
collectionTubeId=participant.collectionTubeId,
validationStatus=participant.validationStatus,
validationFlags=participant.validationFlags,
ai_an=participant.ai_an,
genomeType=genome_type,
genomicWorkflowState=GenomicWorkflowState.LR_PENDING,
genomicWorkflowStateStr=GenomicWorkflowState.LR_PENDING.name,
participantOrigin=participant.participantOrigin,
created=clock.CLOCK.now(),
modified=clock.CLOCK.now(),
)
processed_members.append(dup_member_obj)
count = i + 1
if count % 100 == 0:
self.genomic_members_insert(
members=processed_members,
session=session,
)
processed_members.clear()
if count and processed_members:
self.genomic_members_insert(
members=processed_members,
session=session,
)
return new_genomic_set.id
def process_samples_into_manifest(self, samples_meta, cohort, saliva=False, local=False):
"""
Compiles AW0 Manifest from samples list.
:param samples_meta:
:param cohort:
:param saliva:
:param local: overrides automatic push to bucket
:return: job result code
"""
logging.info(f'{self.__class__.__name__}: Processing new biobank_ids {samples_meta.bids}')
new_genomic_set = self._create_new_genomic_set()
processed_array_wgs = []
count = 0
bids = []
# Create genomic set members
with self.member_dao.session() as session:
for i, bid in enumerate(samples_meta.bids):
# Don't write participant to table if no sample
if samples_meta.sample_ids[i] == 0:
continue
logging.info(f'Validating sample: {samples_meta.sample_ids[i]}')
validation_criteria = (
samples_meta.valid_withdrawal_status[i],
samples_meta.valid_suspension_status[i],
samples_meta.gen_consents[i],
samples_meta.valid_ages[i],
samples_meta.sabs[i] in self._SEX_AT_BIRTH_CODES.values()
)
valid_flags = self._calculate_validation_flags(validation_criteria)
logging.info(f'Creating genomic set members for PID: {samples_meta.pids[i]}')
# Get NY flag for collected-site
if samples_meta.site_ids[i]:
_ny_flag = self._get_new_york_flag_from_site(samples_meta.site_ids[i])
# Get NY flag for mail-kit
elif samples_meta.state_ids[i]:
_ny_flag = self._get_new_york_flag_from_state_id(samples_meta.state_ids[i])
# default ny flag if no state id
elif not samples_meta.state_ids[i]:
_ny_flag = 0
else:
logging.warning(f'No collection site or mail kit state. Skipping biobank_id: {bid}')
continue
new_array_member_obj = GenomicSetMember(
biobankId=bid,
genomicSetId=new_genomic_set.id,
participantId=samples_meta.pids[i],
nyFlag=_ny_flag,
sexAtBirth=samples_meta.sabs[i],
collectionTubeId=samples_meta.sample_ids[i],
validationStatus=(GenomicSetMemberStatus.INVALID if len(valid_flags) > 0
else GenomicSetMemberStatus.VALID),
validationFlags=valid_flags,
ai_an='Y' if samples_meta.is_ai_an[i] else 'N',
genomeType=self._ARRAY_GENOME_TYPE,
genomicWorkflowState=GenomicWorkflowState.AW0_READY,
genomicWorkflowStateStr=GenomicWorkflowState.AW0_READY.name,
participantOrigin=samples_meta.origins[i],
created=clock.CLOCK.now(),
modified=clock.CLOCK.now(),
)
# Also create a WGS member
new_wgs_member_obj = deepcopy(new_array_member_obj)
new_wgs_member_obj.genomeType = self._WGS_GENOME_TYPE
bids.append(bid)
processed_array_wgs.extend([new_array_member_obj, new_wgs_member_obj])
count = i + 1
if count % 1000 == 0:
self.genomic_members_insert(
members=processed_array_wgs,
session=session
)
processed_array_wgs.clear()
bids.clear()
if count and processed_array_wgs:
self.genomic_members_insert(
members=processed_array_wgs,
session=session
)
# Create & transfer the Biobank Manifest based on the new genomic set
try:
if local:
return new_genomic_set.id
else:
create_and_upload_genomic_biobank_manifest_file(new_genomic_set.id,
cohort_id=cohort,
saliva=saliva)
# Handle Genomic States for manifests
for member in self.member_dao.get_members_from_set_id(new_genomic_set.id):
new_state = GenomicStateHandler.get_new_state(member.genomicWorkflowState,
signal='manifest-generated')
if new_state is not None or new_state != member.genomicWorkflowState:
self.member_dao.update_member_workflow_state(member, new_state)
logging.info(f'{self.__class__.__name__}: Genomic set members created ')
return GenomicSubProcessResult.SUCCESS
except RuntimeError:
return GenomicSubProcessResult.ERROR
def create_matrix_and_process_samples(self, participants, cohort, local, saliva=False):
"""
Wrapper method for processing participants for C1 and C2 manifests
:param cohort:
:param participants:
:param local:
:param saliva:
:return:
"""
participant_matrix = self.GenomicSampleMeta(*participants)
for i, _bid in enumerate(participant_matrix.bids):
logging.info(f'Retrieving samples for PID: f{participant_matrix.pids[i]}')
blood_sample_data = None
if not saliva:
blood_sample_data = self._get_usable_blood_sample(pid=participant_matrix.pids[i],
bid=_bid)
saliva_sample_data = self._get_usable_saliva_sample(pid=participant_matrix.pids[i],
bid=_bid)
# Determine which sample ID to use
sample_data = self._determine_best_sample(blood_sample_data, saliva_sample_data)
# update the sample id, collected site, and biobank order
if sample_data is not None:
participant_matrix.sample_ids[i] = sample_data[0]
participant_matrix.site_ids[i] = sample_data[1]
participant_matrix.order_ids[i] = sample_data[2]
else:
logging.info(f'No valid samples for pid {participant_matrix.pids[i]}.')
# insert new members and make the manifest
return self.process_samples_into_manifest(
participant_matrix,
cohort=cohort,
saliva=saliva,
local=local
)
@staticmethod
def genomic_members_insert(*, members, session):
"""
Bulk save of member for genomic_set_member
batch updating of members
:param: members
:param: session
"""
try:
session.bulk_save_objects(members)
session.commit()
except Exception as e:
raise Exception("Error occurred on genomic member insert: {0}".format(e))
def _get_new_biobank_samples(self, from_date):
"""
Retrieves BiobankStoredSample objects with `rdr_created`
after the last run of the new participant workflow job.
The query filters out participants that do not match the
genomic validation requirements.
:param: from_date
:return: list of tuples (bid, pid, biobank_identifier.value, collected_site_id)
"""
_new_samples_sql = self.query.new_biobank_samples()
params = {
"sample_status_param": SampleStatus.RECEIVED.__int__(),
"dob_param": GENOMIC_VALID_AGE,
"general_consent_param": QuestionnaireStatus.SUBMITTED.__int__(),
"ai_param": Race.AMERICAN_INDIAN_OR_ALASKA_NATIVE.__int__(),
"from_date_param": from_date.strftime("%Y-%m-%d"),
"withdrawal_param": WithdrawalStatus.NOT_WITHDRAWN.__int__(),
"suspension_param": SuspensionStatus.NOT_SUSPENDED.__int__(),
"cohort_3_param": ParticipantCohort.COHORT_3.__int__(),
"ignore_param": GenomicWorkflowState.IGNORE.__int__(),
}
with self.samples_dao.session() as session:
result = session.execute(_new_samples_sql, params).fetchall()
result = self._prioritize_samples_by_participant(result)
return list(zip(*result))[:-2] # Slicing to remove the last two columns retrieved for prioritization
def _prioritize_samples_by_participant(self, sample_results):
preferred_samples = {}
for sample in sample_results:
preferred_sample = sample
previously_found_sample = preferred_samples.get(sample.participant_id, None)
if previously_found_sample is not None:
preferred_sample = self._determine_best_sample(previously_found_sample, sample)
preferred_samples[sample.participant_id] = preferred_sample
return [x for x in preferred_samples.values() if x is not None]
@staticmethod
def _determine_best_sample(sample_one, sample_two):
if sample_one is None:
return sample_two
if sample_two is None:
return sample_one
# Return the usable sample (status less than NOT_RECEIVED) if one is usable and the other isn't
if sample_one.status < int(SampleStatus.SAMPLE_NOT_RECEIVED) <= sample_two.status:
return sample_one
elif sample_two.status < int(SampleStatus.SAMPLE_NOT_RECEIVED) <= sample_two.status:
return sample_two
elif sample_one.status >= int(SampleStatus.SAMPLE_NOT_RECEIVED) \
and sample_two.status >= int(SampleStatus.SAMPLE_NOT_RECEIVED):
return None
# Both are usable
# Return the sample by the priority of the code: 1ED04, then 1ED10, and 1SAL2 last
test_codes_by_preference = ['1ED04', '1ED10', '1SAL2'] # most desirable first
samples_by_code = {}
for sample in [sample_one, sample_two]:
samples_by_code[sample.test] = sample
for test_code in test_codes_by_preference:
if samples_by_code.get(test_code):
return samples_by_code[test_code]
logging.error(f'Should have been able to select between '
f'{sample_one.biobank_stored_sample_id} and {sample_two.biobank_stored_sample_id}')
def _get_remaining_c2_samples(self):
_c2_participant_sql = self.query.remaining_c2_participants()
params = {
"sample_status_param": SampleStatus.RECEIVED.__int__(),
"dob_param": GENOMIC_VALID_AGE,
"general_consent_param": QuestionnaireStatus.SUBMITTED.__int__(),
"withdrawal_param": WithdrawalStatus.NOT_WITHDRAWN.__int__(),
"suspension_param": SuspensionStatus.NOT_SUSPENDED.__int__(),
"cohort_param": ParticipantCohort.COHORT_2.__int__(),
"ignore_param": GenomicWorkflowState.IGNORE.__int__(),
}
with self.samples_dao.session() as session:
result = session.execute(_c2_participant_sql, params).fetchall()
result2 = self._prioritize_samples_by_participant(result)
return list(zip(*result2))[:-2]
def _get_remaining_c1_samples(self):
"""
Retrieves C1 participants and validation data.
"""
_c1_participant_sql = self.query.remaining_c1_samples()
params = {
"sample_status_param": SampleStatus.RECEIVED.__int__(),
"dob_param": GENOMIC_VALID_AGE,
"general_consent_param": QuestionnaireStatus.SUBMITTED.__int__(),
"withdrawal_param": WithdrawalStatus.NOT_WITHDRAWN.__int__(),
"suspension_param": SuspensionStatus.NOT_SUSPENDED.__int__(),
"cohort_param": ParticipantCohort.COHORT_1.__int__(),
"c1_reconsent_param": COHORT_1_REVIEW_CONSENT_YES_CODE,
"ignore_param": GenomicWorkflowState.IGNORE.__int__(),
}
with self.samples_dao.session() as session:
result = session.execute(_c1_participant_sql, params).fetchall()
result = self._prioritize_samples_by_participant(result)
return list(zip(*result))[:-2]
def _get_long_read_participants(self, limit=None):
"""
Retrieves participants based on filters that have
been denoted to use in the long read pilot program
"""
with self.member_dao.session() as session:
gsm_alias = aliased(GenomicSetMember)
result = session.query(GenomicSetMember).join(
ParticipantSummary,
GenomicSetMember.participantId == ParticipantSummary.participantId,
).join(
ParticipantRaceAnswers,
ParticipantRaceAnswers.participantId == ParticipantSummary.participantId,
).join(
Code,
ParticipantRaceAnswers.codeId == Code.codeId,
).join(
GenomicGCValidationMetrics,
GenomicSetMember.id == GenomicGCValidationMetrics.genomicSetMemberId,
).outerjoin(
gsm_alias,
sqlalchemy.and_(
gsm_alias.participantId == ParticipantSummary.participantId,
gsm_alias.genomeType == 'long_read'
)
).filter(
Code.value == 'WhatRaceEthnicity_Black',
GenomicSetMember.genomeType.in_(['aou_wgs']),
GenomicSetMember.genomicWorkflowState != GenomicWorkflowState.IGNORE,
GenomicGCValidationMetrics.ignoreFlag == 0,
GenomicGCValidationMetrics.contamination <= 0.01,
ParticipantSummary.participantOrigin == 'vibrent',
ParticipantSummary.ehrUpdateTime.isnot(None),
gsm_alias.id.is_(None),
).distinct(gsm_alias.biobankId)
if limit:
result = result.limit(limit)
return result.all()
def _get_usable_blood_sample(self, pid, bid):
"""
Select 1ED04 or 1ED10 based on max collected date
:param pid: participant_id
:param bid: biobank_id
:return: tuple(blood_collected date, blood sample, blood site, blood order)
"""
_samples_sql = self.query.usable_blood_sample()
params = {
"pid_param": pid,
"bid_param": bid,
}
with self.samples_dao.session() as session:
result = session.execute(_samples_sql, params).first()
return result
def _get_usable_saliva_sample(self, pid, bid):
"""
Select 1SAL2 based on max collected date
:param pid: participant_id
:param bid: biobank_id
:return: tuple(saliva date, saliva sample, saliva site, saliva order)
"""
_samples_sql = self.query.usable_saliva_sample()
params = {
"pid_param": pid,
"bid_param": bid,
}
with self.samples_dao.session() as session:
result = session.execute(_samples_sql, params).first()
return result
def _get_remaining_saliva_participants(self, _config):
_saliva_sql = self.query.remaining_saliva_participants(_config)
params = {
"sample_status_param": SampleStatus.RECEIVED.__int__(),
"dob_param": GENOMIC_VALID_AGE,
"general_consent_param": QuestionnaireStatus.SUBMITTED.__int__(),
"ai_param": Race.AMERICAN_INDIAN_OR_ALASKA_NATIVE.__int__(),
"withdrawal_param": WithdrawalStatus.NOT_WITHDRAWN.__int__(),
"suspension_param": SuspensionStatus.NOT_SUSPENDED.__int__(),
"ignore_param": GenomicWorkflowState.IGNORE.__int__(),
}
with self.samples_dao.session() as session:
result = session.execute(_saliva_sql, params).fetchall()
return list([list(r) for r in zip(*result)])
def _create_new_genomic_set(self):
"""Inserts a new genomic set for this run"""
attributes = {
'genomicSetName': f'new_participant_workflow_{self.run_id}',
'genomicSetCriteria': '.',
'genomicSetVersion': 1,
'genomicSetStatus': GenomicSetStatus.VALID,
}
new_set_obj = GenomicSet(**attributes)
inserted_set = self.set_dao.insert(new_set_obj)
return inserted_set
def _create_new_set_member(self, **kwargs):
"""Inserts new GenomicSetMember object"""
new_member_obj = GenomicSetMember(**kwargs)
return self.member_dao.insert(new_member_obj)
def _get_new_york_flag_from_site(self, collected_site_id):
"""
Looks up whether a collected site's state is NY
:param collected_site_id: the id of the site
:return: int (1 or 0 for NY or Not)
"""
return int(self.site_dao.get(collected_site_id).state == 'NY')
def _get_new_york_flag_from_state_id(self, state_id):
"""
Looks up whether a collected site's state is NY
:param state_id: the code ID for the state
:return: int (1 or 0 for NY or Not)
"""
return int(self.code_dao.get(state_id).value.split('_')[1] == 'NY')
def _calculate_validation_flags(self, validation_criteria):
"""
Determines validation and flags for genomic sample
:param validation_criteria:
:return: list of validation flags
"""
# Process validation flags for inserting into genomic_set_member
flags = [flag for (passing, flag) in
zip(validation_criteria, self._VALIDATION_FLAGS)
if not passing]
return flags
class ManifestDefinitionProvider:
"""
Helper class to produce the definitions for each manifest
"""
# Metadata for the various manifests
ManifestDef = namedtuple('ManifestDef',
["job_run_field",
"source_data",
"destination_bucket",
"output_filename",
"columns",
"signal",
"query",
"params"])
def __init__(
self,
job_run_id=None,
bucket_name=None,
genome_type=None,
cvl_site_id='rdr',
**kwargs
):
# Attributes
self.job_run_id = job_run_id
self.bucket_name = bucket_name
self.cvl_site_id = cvl_site_id
self.kwargs = kwargs
self.genome_type = genome_type
self.query = GenomicQueryClass(
input_manifest=self.kwargs['kwargs'].get('input_manifest'),
genome_type=self.genome_type
)
self.query_dao = GenomicQueriesDao()
self.manifest_columns_config = {
GenomicManifestTypes.GEM_A1: (
'biobank_id',
'sample_id',
"sex_at_birth",
"consent_for_ror",
"date_of_consent_for_ror",
"chipwellbarcode",
"genome_center",
),
GenomicManifestTypes.GEM_A3: (
'biobank_id',
'sample_id',
'date_of_consent_removal',
),
GenomicManifestTypes.CVL_W1IL_PGX: (
'biobank_id',
'sample_id',
'vcf_raw_path',
'vcf_raw_index_path',
'vcf_raw_md5_path',
'gvcf_path',
'gvcf_md5_path',
'cram_name',
'sex_at_birth',
'ny_flag',
'genome_center',
'consent_for_gror',
'genome_type',
'informing_loop_pgx',
'aou_hdr_coverage',
'contamination',
'sex_ploidy'
),
GenomicManifestTypes.CVL_W1IL_HDR: (
'biobank_id',
'sample_id',
'vcf_raw_path',
'vcf_raw_index_path',
'vcf_raw_md5_path',
'gvcf_path',
'gvcf_md5_path',
'cram_name',
'sex_at_birth',
'ny_flag',
'genome_center',
'consent_for_gror',
'genome_type',
'informing_loop_hdr',
'aou_hdr_coverage',
'contamination',
'sex_ploidy'
),
GenomicManifestTypes.CVL_W2W: (
'biobank_id',
'sample_id',
'date_of_consent_removal'
),
GenomicManifestTypes.CVL_W3SR: (
"biobank_id",
"sample_id",
"parent_sample_id",
"collection_tubeid",
"sex_at_birth",
"ny_flag",
"genome_type",
"site_name",
"ai_an"
),
GenomicManifestTypes.AW3_ARRAY: (
"chipwellbarcode",
"biobank_id",
"sample_id",
"biobankidsampleid",
"sex_at_birth",
"site_id",
"red_idat_path",
"red_idat_md5_path",
"green_idat_path",
"green_idat_md5_path",
"vcf_path",
"vcf_index_path",
"vcf_md5_path",
"callrate",
"sex_concordance",
"contamination",
"processing_status",
"research_id",
"sample_source",
"pipeline_id",
"ai_an",
"blocklisted",
"blocklisted_reason"
),
GenomicManifestTypes.AW3_WGS: (
"biobank_id",
"sample_id",
"biobankidsampleid",
"sex_at_birth",
"site_id",
"vcf_hf_path",
"vcf_hf_index_path",
"vcf_hf_md5_path",
"cram_path",
"cram_md5_path",
"crai_path",
"gvcf_path",
"gvcf_md5_path",
"contamination",
"sex_concordance",
"processing_status",
"mean_coverage",
"research_id",
"sample_source",
"mapped_reads_pct",
"sex_ploidy",
"ai_an",
"blocklisted",
"blocklisted_reason"
),
GenomicManifestTypes.AW2F: (
"PACKAGE_ID",
"BIOBANKID_SAMPLEID",
"BOX_STORAGEUNIT_ID",
"BOX_ID/PLATE_ID",
"WELL_POSITION",
"SAMPLE_ID",
"PARENT_SAMPLE_ID",
"COLLECTION_TUBE_ID",
"MATRIX_ID",
"COLLECTION_DATE",
"BIOBANK_ID",
"SEX_AT_BIRTH",
"AGE",
"NY_STATE_(Y/N)",
"SAMPLE_TYPE",
"TREATMENTS",
"QUANTITY_(uL)",
"TOTAL_CONCENTRATION_(ng/uL)",
"TOTAL_DNA(ng)",
"VISIT_DESCRIPTION",
"SAMPLE_SOURCE",
"STUDY",
"TRACKING_NUMBER",
"CONTACT",
"EMAIL",
"STUDY_PI",
"TEST_NAME",
"FAILURE_MODE",
"FAILURE_MODE_DESC",
"PROCESSING_STATUS",
"CONTAMINATION",
"CONTAMINATION_CATEGORY",
"CONSENT_FOR_ROR",
),
}
def _get_source_data_query(self, manifest_type):
"""
Returns the query to use for manifest's source data
:param manifest_type:
:return: query object
"""
return self.query.genomic_data_config.get(manifest_type)
def get_def(self, manifest_type):
"""
Returns the manifest definition based on manifest_type
:param manifest_type:
:return: ManifestDef()
"""
now_formatted = clock.CLOCK.now().strftime("%Y-%m-%d-%H-%M-%S")
def_config = {
GenomicManifestTypes.GEM_A1: {
'job_run_field': 'gemA1ManifestJobRunId',
'output_filename': f'{GENOMIC_GEM_A1_MANIFEST_SUBFOLDER}/AoU_GEM_A1_manifest_{now_formatted}.csv',
'signal': 'manifest-generated'
},
GenomicManifestTypes.GEM_A3: {
'job_run_field': 'gemA3ManifestJobRunId',
'output_filename': f'{GENOMIC_GEM_A3_MANIFEST_SUBFOLDER}/AoU_GEM_A3_manifest_{now_formatted}.csv',
'signal': 'manifest-generated'
},
GenomicManifestTypes.CVL_W1IL_PGX: {
'job_run_field': 'cvlW1ilPgxJobRunId',
'output_filename':
f'{CVL_W1IL_PGX_MANIFEST_SUBFOLDER}/{self.cvl_site_id.upper()}_AoU_CVL_W1IL_'
f'{ResultsModuleType.PGXV1.name}_{now_formatted}.csv',
'signal': 'manifest-generated',
'query': self.query_dao.get_data_ready_for_w1il_manifest,
'params': {
'module': 'pgx',
'cvl_id': self.cvl_site_id
}
},
GenomicManifestTypes.CVL_W1IL_HDR: {
'job_run_field': 'cvlW1ilHdrJobRunId',
'output_filename':
f'{CVL_W1IL_HDR_MANIFEST_SUBFOLDER}/{self.cvl_site_id.upper()}_AoU_CVL_W1IL_'
f'{ResultsModuleType.HDRV1.name}_{now_formatted}.csv',
'query': self.query_dao.get_data_ready_for_w1il_manifest,
'params': {
'module': 'hdr',
'cvl_id': self.cvl_site_id
}
},
GenomicManifestTypes.CVL_W2W: {
'job_run_field': 'cvlW2wJobRunId',
'output_filename':
f'{CVL_W2W_MANIFEST_SUBFOLDER}/{self.cvl_site_id.upper()}_AoU_CVL_W2W_{now_formatted}.csv',
'query': self.query_dao.get_data_ready_for_w2w_manifest,
'params': {
'cvl_id': self.cvl_site_id
}
},
GenomicManifestTypes.CVL_W3SR: {
'job_run_field': 'cvlW3srManifestJobRunID',
'output_filename': f'{CVL_W3SR_MANIFEST_SUBFOLDER}/{self.cvl_site_id.upper()}_AoU_CVL_W3SR'
f'_{now_formatted}.csv',
'query': self.query_dao.get_w3sr_records,
'params': {
'site_id': self.cvl_site_id
}
},
GenomicManifestTypes.AW3_ARRAY: {
'job_run_field': 'aw3ManifestJobRunID',
'output_filename': f'{GENOMIC_AW3_ARRAY_SUBFOLDER}/AoU_DRCV_GEN_{now_formatted}.csv',
'signal': 'bypass',
'query': self.query_dao.get_aw3_array_records,
'params': {
'genome_type': self.genome_type
}
},
GenomicManifestTypes.AW3_WGS: {
'job_run_field': 'aw3ManifestJobRunID',
'output_filename': f'{GENOMIC_AW3_WGS_SUBFOLDER}/AoU_DRCV_SEQ_{now_formatted}.csv',
'signal': 'bypass',
'query': self.query_dao.get_aw3_wgs_records,
'params': {
'genome_type': self.genome_type
}
},
GenomicManifestTypes.AW2F: {
'job_run_field': 'aw2fManifestJobRunID',
'output_filename': f'{BIOBANK_AW2F_SUBFOLDER}/GC_AoU_DataType_PKG-YYMM-xxxxxx_contamination.csv',
'signal': 'bypass'
}
}
def_config = def_config[manifest_type]
return self.ManifestDef(
job_run_field=def_config.get('job_run_field'),
source_data=self._get_source_data_query(manifest_type),
destination_bucket=f'{self.bucket_name}',
output_filename=def_config.get('output_filename'),
columns=self.manifest_columns_config[manifest_type],
signal=def_config.get('signal'),
query=def_config.get('query'),
params=def_config.get('params')
)
class ManifestCompiler:
"""
This component compiles Genomic manifests
based on definitions provided by ManifestDefinitionProvider
"""
def __init__(
self,
run_id=None,
bucket_name=None,
max_num=None,
controller=None
):
self.run_id = run_id
self.bucket_name = bucket_name
self.max_num = max_num
self.controller = controller
self.output_file_name = None
self.manifest_def = None
self.def_provider = None
# Dao components
self.member_dao = GenomicSetMemberDao()
self.metrics_dao = GenomicGCValidationMetricsDao()
self.results_workflow_dao = GenomicResultWorkflowStateDao()
def generate_and_transfer_manifest(self, manifest_type, genome_type, version=None, **kwargs):
"""
Main execution method for ManifestCompiler
:return: result dict:
"code": (i.e. SUCCESS)
"feedback_file": None or feedback file record to update,
"record_count": integer
"""
self.def_provider = ManifestDefinitionProvider(
job_run_id=self.run_id,
bucket_name=self.bucket_name,
genome_type=genome_type,
cvl_site_id=self.controller.cvl_site_id,
kwargs=kwargs
)
self.manifest_def = self.def_provider.get_def(manifest_type)
source_data = self.pull_source_data()
if not source_data:
logging.info(f'No records found for manifest type: {manifest_type}.')
return {
"code": GenomicSubProcessResult.NO_FILES,
"record_count": 0,
}
validation_failed, message = self._validate_source_data(source_data, manifest_type)
if validation_failed:
message = f'{self.controller.job_id.name}: {message}'
self.controller.create_incident(
source_job_run_id=self.run_id,
code=GenomicIncidentCode.MANIFEST_GENERATE_DATA_VALIDATION_FAILED.name,
slack=True,
message=message
)
raise RuntimeError
if self.max_num and len(source_data) > self.max_num:
current_list, count = [], 0
for obj in source_data:
current_list.append(obj)
if len(current_list) == self.max_num:
count += 1
self.output_file_name = self.manifest_def.output_filename
self.output_file_name = f'{self.output_file_name.split(".csv")[0]}_{count}.csv'
file_path = f'{self.manifest_def.destination_bucket}/{self.output_file_name}'
logging.info(
f'Preparing manifest of type {manifest_type}...'
f'{file_path}'
)
self._write_and_upload_manifest(current_list)
self.controller.manifests_generated.append({
'file_path': file_path,
'record_count': len(current_list)
})
current_list.clear()
if current_list:
count += 1
self.output_file_name = self.manifest_def.output_filename
self.output_file_name = f'{self.output_file_name.split(".csv")[0]}_{count}.csv'
file_path = f'{self.manifest_def.destination_bucket}/{self.output_file_name}'
logging.info(
f'Preparing manifest of type {manifest_type}...'
f'{file_path}'
)
self._write_and_upload_manifest(current_list)
self.controller.manifests_generated.append({
'file_path': file_path,
'record_count': len(current_list)
})
else:
self.output_file_name = self.manifest_def.output_filename
# If the new manifest is a feedback manifest,
# it will have an input manifest
if "input_manifest" in kwargs.keys():
# AW2F manifest file name is based of of AW1
if manifest_type == GenomicManifestTypes.AW2F:
new_name = kwargs['input_manifest'].filePath.split('/')[-1]
new_name = new_name.replace('.csv', f'_contamination_{version}.csv')
self.output_file_name = self.manifest_def.output_filename.replace(
"GC_AoU_DataType_PKG-YYMM-xxxxxx_contamination.csv",
f"{new_name}"
)
file_path = f'{self.manifest_def.destination_bucket}/{self.output_file_name}'
logging.info(
f'Preparing manifest of type {manifest_type}...'
f'{file_path}'
)
self._write_and_upload_manifest(source_data)
self.controller.manifests_generated.append({
'file_path': file_path,
'record_count': len(source_data)
})
for row in source_data:
sample_id = row.sampleId if hasattr(row, 'sampleId') else row.sample_id
member = self.member_dao.get_member_from_sample_id(sample_id, genome_type)
if not member:
raise NotFound(f"Cannot find genomic set member with sample ID {sample_id}")
if self.manifest_def.job_run_field:
self.controller.member_ids_for_update.append(member.id)
# Handle Genomic States for manifests
if self.manifest_def.signal != "bypass":
# genomic workflow state
new_wf_state = GenomicStateHandler.get_new_state(
member.genomicWorkflowState,
signal=self.manifest_def.signal
)
if new_wf_state or new_wf_state != member.genomicWorkflowState:
self.member_dao.update_member_workflow_state(member, new_wf_state)
# result workflow state
cvl_manifest_data = CVLManifestData(manifest_type)
if cvl_manifest_data.is_cvl_manifest:
self.results_workflow_dao.insert_new_result_record(
member_id=member.id,
module_type=cvl_manifest_data.module_type,
state=cvl_manifest_data.result_state
)
# Updates job run field on set member
if self.controller.member_ids_for_update:
self.controller.execute_cloud_task({
'member_ids': list(set(self.controller.member_ids_for_update)),
'field': self.manifest_def.job_run_field,
'value': self.run_id,
'is_job_run': True
}, 'genomic_set_member_update_task')
return {
"code": GenomicSubProcessResult.SUCCESS,
}
def pull_source_data(self):
"""
Runs the source data query
:return: result set
"""
if self.manifest_def.query:
params = self.manifest_def.params or {}
return self.manifest_def.query(**params)
with self.member_dao.session() as session:
return session.execute(self.manifest_def.source_data).fetchall()
def _validate_source_data(self, data, manifest_type):
invalid = False
message = None
if manifest_type in [
GenomicManifestTypes.AW3_ARRAY,
GenomicManifestTypes.AW3_WGS
]:
prefix = get_biobank_id_prefix()
path_positions = []
biobank_ids, sample_ids, sex_at_birth = [], [], []
for i, col in enumerate(self.manifest_def.columns):
if 'sample_id' in col:
sample_ids = [row[i] for row in data]
if 'biobank_id' in col:
biobank_ids = [row[i] for row in data]
if 'sex_at_birth' in col:
sex_at_birth = [row[i] for row in data]
if '_path' in col:
path_positions.append(i)
needs_prefixes = any(bid for bid in biobank_ids if prefix not in bid)
if needs_prefixes:
message = 'Biobank IDs are missing correct prefix'
invalid = True
return invalid, message
biobank_ids.clear()
dup_sample_ids = {sample_id for sample_id in sample_ids if sample_ids.count(sample_id) > 1}
if dup_sample_ids:
message = f'Sample IDs {list(dup_sample_ids)} are not distinct'
invalid = True
return invalid, message
sample_ids.clear()
invalid_sex_values = any(val for val in sex_at_birth if val not in ['M', 'F', 'NA'])
if invalid_sex_values:
message = 'Invalid Sex at Birth values'
invalid = True
return invalid, message
sex_at_birth.clear()
for row in data:
for i, val in enumerate(row):
if i in path_positions and val:
if not val.startswith('gs://') \
or (val.startswith('gs://')
and len(val.split('gs://')[1].split('/')) < 3):
message = f'Path {val} is invalid formatting'
invalid = True
return invalid, message
return invalid, message
def _write_and_upload_manifest(self, source_data):
"""
writes data to csv file in bucket
:return: result code
"""
try:
# Use SQL exporter
exporter = SqlExporter(self.bucket_name)
with exporter.open_cloud_writer(self.output_file_name) as writer:
writer.write_header(self.manifest_def.columns)
writer.write_rows(source_data)
return GenomicSubProcessResult.SUCCESS
except RuntimeError:
return GenomicSubProcessResult.ERROR
class CVLManifestData:
result_state = None
module_type = ResultsModuleType.HDRV1
is_cvl_manifest = True
def __init__(self, manifest_type: GenomicManifestTypes):
self.manifest_type = manifest_type
self.get_is_cvl_manifest()
def get_is_cvl_manifest(self):
if 'cvl' not in self.manifest_type.name.lower():
self.is_cvl_manifest = False
return
self.get_module_type()
self.get_result_state()
def get_module_type(self) -> ResultsModuleType:
if 'pgx' in self.manifest_type.name.lower():
self.module_type = ResultsModuleType.PGXV1
return self.module_type
def get_result_state(self) -> ResultsWorkflowState:
manifest_name = self.manifest_type.name.rsplit('_', 1)[0] \
if self.manifest_type.name.count('_') > 1 else \
self.manifest_type.name
self.result_state = ResultsWorkflowState.lookup_by_name(manifest_name)
return self.result_state
| {
"content_hash": "0a370baa2a878ae025d3345016f18b71",
"timestamp": "",
"source": "github",
"line_count": 3886,
"max_line_length": 132,
"avg_line_length": 39.630468347915595,
"alnum_prop": 0.5572387730188826,
"repo_name": "all-of-us/raw-data-repository",
"id": "6c27289da3b7038d86ee170ad1ff74395572287e",
"size": "154004",
"binary": false,
"copies": "1",
"ref": "refs/heads/devel",
"path": "rdr_service/genomic/genomic_job_components.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "1866"
},
{
"name": "Mako",
"bytes": "1715"
},
{
"name": "Python",
"bytes": "17040924"
},
{
"name": "R",
"bytes": "2212"
},
{
"name": "Shell",
"bytes": "92213"
}
],
"symlink_target": ""
} |
import urllib, os
from tqdm import tqdm
class TqdmUpTo(tqdm):
"""Provides `update_to(n)` which uses `tqdm.update(delta_n)`.
https://pypi.python.org/pypi/tqdm
"""
def update_to(self, b=1, bsize=1, tsize=None):
"""
b : int, optional
Number of blocks transferred so far [default: 1].
bsize : int, optional
Size of each block (in tqdm units) [default: 1].
tsize : int, optional
Total size (in tqdm units). If [default: None] remains unchanged.
"""
if tsize is not None:
self.total = tsize
self.update(b * bsize - self.n) # will also set self.n = b * bsize
def download_file(url, filename):
""" file downloader with progress bar """
with TqdmUpTo(unit='B', unit_scale=True, miniters=1, desc=filename) as t:
urllib.urlretrieve(url, filename=filename, reporthook=t.update_to, data=None)
| {
"content_hash": "cceeb187259d3b7e33fec38b31970ecb",
"timestamp": "",
"source": "github",
"line_count": 24,
"max_line_length": 85,
"avg_line_length": 38.458333333333336,
"alnum_prop": 0.6056338028169014,
"repo_name": "jsqihui/reimplement-paper",
"id": "96dd1d2397486e86d29b2bec7c230c81e20adef4",
"size": "923",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "facenet/tensorflow/utils.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "3379"
}
],
"symlink_target": ""
} |
from djpj.decorator import pjax_block, pjax_template
| {
"content_hash": "8a27ed2dc258d5a00cba4398b37392c3",
"timestamp": "",
"source": "github",
"line_count": 1,
"max_line_length": 52,
"avg_line_length": 53,
"alnum_prop": 0.8301886792452831,
"repo_name": "AlexHill/djpj",
"id": "81f7c546cf3f57c8a4df36d62f41068505778768",
"size": "53",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "djpj/__init__.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "HTML",
"bytes": "70"
},
{
"name": "Python",
"bytes": "37422"
}
],
"symlink_target": ""
} |
"""
--- Part Two ---
As the door slides open, you are presented with a second door that uses a slightly more inspired security mechanism. Clearly unimpressed by the last version (in what movie is the password decrypted in order?!), the Easter Bunny engineers have worked out a better solution.
Instead of simply filling in the password from left to right, the hash now also indicates the position within the password to fill. You still look for hashes that begin with five zeroes; however, now, the sixth character represents the position (0-7), and the seventh character is the character to put in that position.
A hash result of 000001f means that f is the second character in the password. Use only the first result for each position, and ignore invalid positions.
For example, if the Door ID is abc:
The first interesting hash is from abc3231929, which produces 0000015...; so, 5 goes in position 1: _5______.
In the previous method, 5017308 produced an interesting hash; however, it is ignored, because it specifies an invalid position (8).
The second interesting hash is at index 5357525, which produces 000004e...; so, e goes in position 4: _5__e___.
You almost choke on your popcorn as the final character falls into place, producing the password 05ace8e3.
Given the actual Door ID and this new method, what is the password? Be extra proud of your solution if it uses a cinematic "decrypting" animation.
Your puzzle input is still wtnhxymk.
"""
import argparse
import hashlib
def calc_password(door_id):
password = ["_"] * 8
used_positions = ""
extra_char = 0
print("".join(password), end = "\r")
while "_" in password:
md5 = hashlib.new("md5")
md5.update((door_id + str(extra_char)).encode("utf-8"))
door_id_md5 = md5.hexdigest()
try:
if door_id_md5[:5] == "00000" and door_id_md5[5] not in used_positions:
password[int(door_id_md5[5])] = door_id_md5[6]
used_positions += door_id_md5[5]
print("".join(password), end="\r")
except Exception:
pass
extra_char += 1
return "".join(password)
parser = argparse.ArgumentParser(description='Advent of code.')
parser.add_argument('inputfile', type=argparse.FileType('r'), help='Path to input file')
args = parser.parse_args()
lines = "wtnhxymk"
print(calc_password(lines))
| {
"content_hash": "39ccf7a2a8025bc77dfabffb4e4fdc0e",
"timestamp": "",
"source": "github",
"line_count": 50,
"max_line_length": 319,
"avg_line_length": 47.56,
"alnum_prop": 0.6997476871320437,
"repo_name": "shaggy245/adventofcode",
"id": "bc56a41ebd7e8e8badd39190b4bc4f0f7c52048d",
"size": "2378",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "day05/day5-2.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "77773"
},
{
"name": "Ruby",
"bytes": "7736"
},
{
"name": "Tcl",
"bytes": "13290"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from django.db import models, migrations
import easy_thumbnails.fields
class Migration(migrations.Migration):
dependencies = [
('users', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='user',
name='avatar',
field=easy_thumbnails.fields.ThumbnailerImageField(null=True, upload_to='users/users/images/%Y/%m/%d', blank=True),
),
]
| {
"content_hash": "47d25e4a7579eb0c066494c192fc8aef",
"timestamp": "",
"source": "github",
"line_count": 19,
"max_line_length": 127,
"avg_line_length": 24.63157894736842,
"alnum_prop": 0.6260683760683761,
"repo_name": "chedv13/taskist",
"id": "7c4b5e8b4a0ce9bed52b3913080e7c5faa8c91a7",
"size": "492",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "taskist/users/migrations/0002_user_avatar.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "2052031"
},
{
"name": "HTML",
"bytes": "163908"
},
{
"name": "JavaScript",
"bytes": "2609685"
},
{
"name": "Nginx",
"bytes": "1095"
},
{
"name": "Python",
"bytes": "51129"
},
{
"name": "Shell",
"bytes": "4479"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('saas', '0001_initial'),
('users', '0002_auto_20150708_1621'),
]
operations = [
migrations.AddField(
model_name='usergroup',
name='customer',
field=models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, default=None, blank=True, to='saas.Customer', null=True, verbose_name='Customer'),
),
migrations.AddField(
model_name='userprofile',
name='customer',
field=models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, default=None, blank=True, to='saas.Customer', null=True, verbose_name='Customer'),
),
migrations.AlterField(
model_name='usergroup',
name='manager',
field=models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, default=None, blank=True, to='users.UserProfile', null=True, verbose_name='Group Manager'),
),
migrations.AlterField(
model_name='usergroup',
name='parent',
field=models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, default=None, blank=True, to='users.UserGroup', null=True, verbose_name='Parent Group'),
),
migrations.AlterField(
model_name='userprofile',
name='manager',
field=models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, default=None, to='users.UserProfile', blank=True, help_text="Select this user's manager", null=True, verbose_name='Manager'),
),
]
| {
"content_hash": "009c17b297dd6e720b341276c5e90d8a",
"timestamp": "",
"source": "github",
"line_count": 40,
"max_line_length": 206,
"avg_line_length": 42.475,
"alnum_prop": 0.6327251324308417,
"repo_name": "moshthepitt/answers",
"id": "3709c91e547fb15cbfbe67cd318ecd1e6ba6c6c0",
"size": "1723",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "users/migrations/0003_auto_20151209_1015.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "21826"
},
{
"name": "HTML",
"bytes": "78587"
},
{
"name": "JavaScript",
"bytes": "8141"
},
{
"name": "Python",
"bytes": "157546"
}
],
"symlink_target": ""
} |
import serial
import sys
import os
from serial.tools.list_ports import comports as list_serial_ports
default = {
'ARDUINO':"VID:PID=2341:0043", # Works for Uno R3, untested on anything else
'MICRO:BIT':"VID:PID=0D28:0204",'MICROBIT':"VID:PID=0D28:0204" # Now can use both 'micro:bit' and 'microbit'
}
default = str(default)
filename = os.path.join(os.path.dirname(__file__),'data.txt')
def export(devices=None):
if devices == None:
return
text = '{'
for name,vid in devices.items():
text += '"{}":"{}",'.format(name,vid)
text = text[0:len(text)-1]
text += '}'
file = open(filename,mode='w')
file.write(text)
file.close()
def getPort(device=None,deviceVID=None):
devices = setup()
if device != None:
device = device.upper()
try:
name = devices[device]
except:
if deviceVID != None:
devices[device] = deviceVID.upper()
else:
return None
elif deviceVID == None:
return None
if deviceVID != None:
name = deviceVID
export(devices)
ports = list_serial_ports()
for port in ports:
if name in port[2].upper():
return port[0]
return None
def connect(device=None,deviceVID=None,**kwargs):
s = serial.Serial(getPort(device,deviceVID),**kwargs)
s.close()
return s
def reset():
file = open(filename,mode='w')
file.write(default)
file.close()
def get_names():
file = open(filename)
text = file.read().splitlines()[0]
file.close()
devices = eval(text)
return devices
def openPort(port=None,**kwargs):
try:
kwargs['timeout']
except:
kwargs['timeout'] = 1
s = serial.Serial(port,**kwargs)
s.close()
return s
def setup():
while True:
try:
devices = get_names()
except:
reset()
continue
break
return devices
| {
"content_hash": "4f1ad1ad95dc5c1e371ae66bcb5bb61e",
"timestamp": "",
"source": "github",
"line_count": 92,
"max_line_length": 112,
"avg_line_length": 23,
"alnum_prop": 0.5340264650283554,
"repo_name": "ed-raspberrypi/connect",
"id": "1ada9df38c388c98c24d9d0b524dab8fbb8b4b8c",
"size": "2116",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "connect/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "3713"
}
],
"symlink_target": ""
} |
'''
Script to generate list of seed nodes for chainparams.cpp.
This script expects two text files in the directory that is passed as an
argument:
nodes_main.txt
nodes_test.txt
These files must consist of lines in the format
<ip>
<ip>:<port>
[<ipv6>]
[<ipv6>]:<port>
<onion>.onion
0xDDBBCCAA (IPv4 little-endian old pnSeeds format)
The output will be two data structures with the peers in binary format:
static SeedSpec6 pnSeed6_main[]={
...
}
static SeedSpec6 pnSeed6_test[]={
...
}
These should be pasted into `src/chainparamsseeds.h`.
'''
from __future__ import print_function, division
from base64 import b32decode
from binascii import a2b_hex
import sys, os
import re
# ipv4 in ipv6 prefix
pchIPv4 = bytearray([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0xff, 0xff])
# tor-specific ipv6 prefix
pchOnionCat = bytearray([0xFD,0x87,0xD8,0x7E,0xEB,0x43])
def name_to_ipv6(addr):
if len(addr)>6 and addr.endswith('.onion'):
vchAddr = b32decode(addr[0:-6], True)
if len(vchAddr) != 16-len(pchOnionCat):
raise ValueError('Invalid onion %s' % s)
return pchOnionCat + vchAddr
elif '.' in addr: # IPv4
return pchIPv4 + bytearray((int(x) for x in addr.split('.')))
elif ':' in addr: # IPv6
sub = [[], []] # prefix, suffix
x = 0
addr = addr.split(':')
for i,comp in enumerate(addr):
if comp == '':
if i == 0 or i == (len(addr)-1): # skip empty component at beginning or end
continue
x += 1 # :: skips to suffix
assert(x < 2)
else: # two bytes per component
val = int(comp, 16)
sub[x].append(val >> 8)
sub[x].append(val & 0xff)
nullbytes = 16 - len(sub[0]) - len(sub[1])
assert((x == 0 and nullbytes == 0) or (x == 1 and nullbytes > 0))
return bytearray(sub[0] + ([0] * nullbytes) + sub[1])
elif addr.startswith('0x'): # IPv4-in-little-endian
return pchIPv4 + bytearray(reversed(a2b_hex(addr[2:])))
else:
raise ValueError('Could not parse address %s' % addr)
def parse_spec(s, defaultport):
match = re.match('\[([0-9a-fA-F:]+)\](?::([0-9]+))?$', s)
if match: # ipv6
host = match.group(1)
port = match.group(2)
else:
(host,_,port) = s.partition(':')
if not port:
port = defaultport
else:
port = int(port)
host = name_to_ipv6(host)
return (host,port)
def process_nodes(g, f, structname, defaultport):
g.write('static SeedSpec6 %s[] = {\n' % structname)
first = True
for line in f:
comment = line.find('#')
if comment != -1:
line = line[0:comment]
line = line.strip()
if not line:
continue
if not first:
g.write(',\n')
first = False
(host,port) = parse_spec(line, defaultport)
hoststr = ','.join(('0x%02x' % b) for b in host)
g.write(' {{%s}, %i}' % (hoststr, port))
g.write('\n};\n')
def main():
if len(sys.argv)<2:
print(('Usage: %s <path_to_nodes_txt>' % sys.argv[0]), file=sys.stderr)
exit(1)
g = sys.stdout
indir = sys.argv[1]
g.write('#ifndef H_CHAINPARAMSSEEDS\n')
g.write('#define H_CHAINPARAMSSEEDS\n')
g.write('// List of fixed seed nodes for the bitcoin network\n')
g.write('// AUTOGENERATED by contrib/devtools/generate-seeds.py\n\n')
g.write('// Each line contains a 16-byte IPv6 address and a port.\n')
g.write('// IPv4 as well as onion addresses are wrapped inside a IPv6 address accordingly.\n')
with open(os.path.join(indir,'nodes_main.txt'),'r') as f:
process_nodes(g, f, 'pnSeed6_main', 51225)
g.write('\n')
with open(os.path.join(indir,'nodes_test.txt'),'r') as f:
process_nodes(g, f, 'pnSeed6_test', 25714)
g.write('#endif\n')
if __name__ == '__main__':
main()
| {
"content_hash": "dd6bb36104b78530db644de7a80414ae",
"timestamp": "",
"source": "github",
"line_count": 127,
"max_line_length": 98,
"avg_line_length": 31.362204724409448,
"alnum_prop": 0.5699221692191815,
"repo_name": "FoundationAwesome/coinawesome-qt",
"id": "1a6fa21cfe01d68be3080364d3e075b33c8a8ea4",
"size": "4187",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "share/seeds/generate-seeds.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Assembly",
"bytes": "51312"
},
{
"name": "C",
"bytes": "32951"
},
{
"name": "C++",
"bytes": "2530762"
},
{
"name": "CSS",
"bytes": "1127"
},
{
"name": "Groff",
"bytes": "12684"
},
{
"name": "HTML",
"bytes": "50620"
},
{
"name": "Makefile",
"bytes": "12659"
},
{
"name": "NSIS",
"bytes": "6077"
},
{
"name": "Objective-C",
"bytes": "858"
},
{
"name": "Objective-C++",
"bytes": "3517"
},
{
"name": "Python",
"bytes": "54355"
},
{
"name": "QMake",
"bytes": "13694"
},
{
"name": "Shell",
"bytes": "9083"
}
],
"symlink_target": ""
} |
import re
import urllib
import logging
import urlparse
import furl
from flask import request, url_for
from website import settings as website_settings
from api.base import settings as api_settings
# Keep me: Makes rubeus importable from website.util
from . import rubeus # noqa
logger = logging.getLogger(__name__)
guid_url_node_pattern = re.compile('^/project/[a-zA-Z0-9]{5,}/node(?=/[a-zA-Z0-9]{5,})')
guid_url_project_pattern = re.compile('^/project(?=/[a-zA-Z0-9]{5,})')
guid_url_profile_pattern = re.compile('^/profile(?=/[a-zA-Z0-9]{5,})')
waterbutler_action_map = {
'upload': 'file',
'delete': 'file',
'download': 'file',
'metadata': 'data',
'create_folder': 'file',
}
def conjunct(words, conj='and'):
words = list(words)
num_words = len(words)
if num_words == 0:
return ''
elif num_words == 1:
return words[0]
elif num_words == 2:
return ' {0} '.format(conj).join(words)
elif num_words > 2:
return ', '.join(words[:-1]) + ', {0} {1}'.format(conj, words[-1])
def _get_guid_url_for(url):
"""URL Post-processor transforms specific `/project/<pid>` or `/project/<pid>/node/<nid>`
urls into guid urls. Ex: `<nid>/wiki/home`.
"""
guid_url = guid_url_node_pattern.sub('', url, count=1)
guid_url = guid_url_project_pattern.sub('', guid_url, count=1)
guid_url = guid_url_profile_pattern.sub('', guid_url, count=1)
return guid_url
def api_url_for(view_name, _absolute=False, _xml=False, *args, **kwargs):
"""Reverse URL lookup for API routes (that use the JSONRenderer or XMLRenderer).
Takes the same arguments as Flask's url_for, with the addition of
`_absolute`, which will make an absolute URL with the correct HTTP scheme
based on whether the app is in debug mode. The _xml flag sets the renderer to use.
"""
renderer = 'XMLRenderer' if _xml else 'JSONRenderer'
url = url_for('{0}__{1}'.format(renderer, view_name), *args, **kwargs)
if _absolute:
# We do NOT use the url_for's _external kwarg because app.config['SERVER_NAME'] alters
# behavior in an unknown way (currently breaks tests). /sloria /jspies
return urlparse.urljoin(website_settings.DOMAIN, url)
return url
def api_v2_url(path_str,
params=None,
base_route=website_settings.API_DOMAIN,
base_prefix=api_settings.API_BASE,
**kwargs):
"""
Convenience function for APIv2 usage: Concatenates parts of the absolute API url based on arguments provided
For example: given path_str = '/nodes/abcd3/contributors/' and params {'filter[fullname]': 'bob'},
this function would return the following on the local staging environment:
'http://localhost:8000/nodes/abcd3/contributors/?filter%5Bfullname%5D=bob'
This is NOT a full lookup function. It does not verify that a route actually exists to match the path_str given.
"""
params = params or {} # Optional params dict for special-character param names, eg filter[fullname]
base_url = furl.furl(base_route + base_prefix)
sub_url = furl.furl(path_str)
base_url.path.add(sub_url.path.segments)
base_url.args.update(params)
base_url.args.update(kwargs)
return str(base_url)
def web_url_for(view_name, _absolute=False, _guid=False, *args, **kwargs):
"""Reverse URL lookup for web routes (those that use the OsfWebRenderer).
Takes the same arguments as Flask's url_for, with the addition of
`_absolute`, which will make an absolute URL with the correct HTTP scheme
based on whether the app is in debug mode.
"""
url = url_for('OsfWebRenderer__{0}'.format(view_name), *args, **kwargs)
if _guid:
url = _get_guid_url_for(url)
if _absolute:
# We do NOT use the url_for's _external kwarg because app.config['SERVER_NAME'] alters
# behavior in an unknown way (currently breaks tests). /sloria /jspies
return urlparse.urljoin(website_settings.DOMAIN, url)
return url
def is_json_request():
"""Return True if the current request is a JSON/AJAX request."""
content_type = request.content_type
return content_type and ('application/json' in content_type)
def waterbutler_url_for(route, provider, path, node, user=None, **kwargs):
"""DEPRECATED Use waterbutler_api_url_for
Reverse URL lookup for WaterButler routes
:param str route: The action to preform, upload, download, delete...
:param str provider: The name of the requested provider
:param str path: The path of the requested file or folder
:param Node node: The node being accessed
:param User user: The user whos cookie will be used or None
:param dict **query: Addition query parameters to be appended
"""
url = furl.furl(website_settings.WATERBUTLER_URL)
url.path.segments.append(waterbutler_action_map[route])
url.args.update({
'path': path,
'nid': node._id,
'provider': provider,
})
if user:
url.args['cookie'] = user.get_or_create_cookie()
elif website_settings.COOKIE_NAME in request.cookies:
url.args['cookie'] = request.cookies[website_settings.COOKIE_NAME]
view_only = False
if 'view_only' in kwargs:
view_only = kwargs.get('view_only')
else:
view_only = request.args.get('view_only')
url.args['view_only'] = view_only
url.args.update(kwargs)
return url.url
def waterbutler_api_url_for(node_id, provider, path='/', **kwargs):
assert path.startswith('/'), 'Path must always start with /'
url = furl.furl(website_settings.WATERBUTLER_URL)
segments = ['v1', 'resources', node_id, 'providers', provider] + path.split('/')[1:]
url.path.segments.extend([urllib.quote(x.encode('utf-8')) for x in segments])
url.args.update(kwargs)
return url.url
| {
"content_hash": "70009b7c2726b486b5ca2493e054eaf9",
"timestamp": "",
"source": "github",
"line_count": 159,
"max_line_length": 116,
"avg_line_length": 36.84905660377358,
"alnum_prop": 0.6613756613756614,
"repo_name": "arpitar/osf.io",
"id": "954c81afc74b0daea45a64bdae91045706370f77",
"size": "5884",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "website/util/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "117068"
},
{
"name": "HTML",
"bytes": "31536"
},
{
"name": "JavaScript",
"bytes": "1167909"
},
{
"name": "Mako",
"bytes": "523363"
},
{
"name": "Python",
"bytes": "3481265"
},
{
"name": "Shell",
"bytes": "1679"
}
],
"symlink_target": ""
} |
from gppylib.mainUtils import *
from optparse import Option, OptionGroup, OptionParser, OptionValueError, SUPPRESS_USAGE
import os, sys, getopt, socket, StringIO, signal
import datetime
from gppylib import gparray, gplog, pgconf, userinput, utils
from gppylib.commands import base, gp, pg, unix
from gppylib.db import catalog, dbconn
from gppylib.gpparseopts import OptParser, OptChecker
from gppylib.operations.startSegments import *
from gppylib.operations.buildMirrorSegments import *
from gppylib.programs import programIoUtils
from gppylib.system import configurationInterface as configInterface
from gppylib.system.environment import GpMasterEnvironment
from gppylib.testold.testUtils import *
from gppylib.utils import toNonNoneString, checkNotNone, readAllLinesFromFile, writeLinesToFile, TableLogger
logger = gplog.get_default_logger()
class FieldDefinition:
"""
Represent a field of our data. Note that we could infer columnName from name, but we would like
for columnName to be more stable than "name"
"""
def __init__(self, name, columnName, columnType, shortLabel=None):
self.__name = name
self.__columnName = columnName
self.__columnType = columnType
self.__shortLabel = shortLabel if shortLabel is not None else name
def getName(self): return self.__name
def getColumnName(self): return self.__columnName
def getColumnType(self): return self.__columnType
def getShortLabel(self): return self.__shortLabel
#
# __str__ needs to return naem -- we use this for output in some cases right now
#
def __str__(self): return self.__name
CATEGORY__SEGMENT_INFO = "Segment Info"
VALUE__HOSTNAME = FieldDefinition("Hostname", "hostname", "text")
VALUE__ADDRESS = FieldDefinition("Address", "address", "text")
VALUE__DATADIR = FieldDefinition("Datadir", "datadir", "text")
VALUE__PORT = FieldDefinition("Port", "port", "int")
CATEGORY__MIRRORING_INFO = "Mirroring Info"
VALUE__CURRENT_ROLE = FieldDefinition("Current role", "role", "text") # can't use current_role as name -- it's a reserved word
VALUE__PREFERRED_ROLE = FieldDefinition("Preferred role", "preferred_role", "text")
VALUE__MIRROR_STATUS = FieldDefinition("Mirror status", "mirror_status", "text")
CATEGORY__ERROR_GETTING_SEGMENT_STATUS = "Error Getting Segment Status"
VALUE__ERROR_GETTING_SEGMENT_STATUS = FieldDefinition("Error Getting Segment Status", "error_getting_status", "text")
CATEGORY__CHANGE_TRACKING_INFO = "Change Tracking Info"
VALUE__CHANGE_TRACKING_DATA_SIZE = FieldDefinition("Change tracking data size", "change_tracking_data_size", "text", "Change tracking size")
CATEGORY__RESYNCHRONIZATION_INFO = "Resynchronization Info"
VALUE__RESYNC_MODE = FieldDefinition("Resynchronization mode", "resync_mode", "text", "Resync mode")
VALUE__RESYNC_DATA_SYNCHRONIZED = FieldDefinition("Data synchronized", "data_synced_str", "text", "Data synced")
VALUE__RESYNC_EST_TOTAL_DATA = FieldDefinition("Estimated total data to synchronize", "est_total_bytes_to_sync_str", "text", "Est. total to sync")
VALUE__RESYNC_EST_PROGRESS_WITH_MIRROR = FieldDefinition("Estimated resync progress with mirror", "est_resync_progress_str", "text", "Est. resync progress")
VALUE__TOTAL_RESYNC_OBJECT_COUNT = FieldDefinition("Total resync objects", "totalResyncObjectCount", "text", "Total resync objects")
VALUE__RESYNC_OBJECT_COUNT = FieldDefinition("Objects to resync", "curResyncObjectCount", "text", "Objects to resync")
VALUE__RESYNC_EST_COMPLETION_TIME = FieldDefinition("Estimated resync end time", "est_resync_end_time_str", "text", "Est. resync end time")
CATEGORY__STATUS = "Status"
VALUE__MASTER_REPORTS_STATUS = FieldDefinition("Configuration reports status as", "status_in_config", "text", "Config status")
VALUE__MIRROR_SEGMENT_STATUS = FieldDefinition("Segment status", "segment_status", "text") # must not be same name as VALUE__SEGMENT_STATUS
VALUE__NONMIRROR_DATABASE_STATUS = FieldDefinition("Database status", "database_status", "text")
VALUE__ACTIVE_PID = FieldDefinition("PID", "active_pid", "text") # int would be better, but we print error messages here sometimes
# these are not in a category, used for other logging
VALUE__SEGMENT_STATUS = FieldDefinition("Instance status", "instance_status", "text", "Status")
VALUE__DBID = FieldDefinition("dbid", "dbid", "int")
VALUE__CONTENTID = FieldDefinition("contentid", "contentid", "int")
VALUE__RESYNC_DATA_SYNCHRONIZED_BYTES = FieldDefinition("Data synchronized (bytes)", "bytes_synced", "int8")
VALUE__RESYNC_EST_TOTAL_DATA_BYTES = FieldDefinition("Estimated total data to synchronize (bytes)", "est_total_bytes_to_sync", "int8")
VALUE__RESYNC_EST_PROGRESS_WITH_MIRROR_NUMERIC = FieldDefinition("Estimated resync progress with mirror (numeric)", "est_resync_progress_pct", "float")
VALUE__TOTAL_RESYNC_OBJECT_COUNT_INT = FieldDefinition("Total Resync Object Count (int)", "totalResyncObjCount", "int")
VALUE__RESYNC_OBJECT_COUNT_INT = FieldDefinition("Resync Object Count (int)", "curResyncObjCount", "int")
VALUE__RESYNC_EST_COMPLETION_TIME_TIMESTAMP = FieldDefinition("Estimated resync end time (timestamp)", "est_resync_end_time", "timestamp")
VALUE__HAS_DATABASE_STATUS_WARNING = FieldDefinition("Has database status warning", "has_status_warning", "bool")
VALUE__VERSION_STRING = FieldDefinition("Version", "version", "text")
VALUE__POSTMASTER_PID_FILE_EXISTS = FieldDefinition("File postmaster.pid (boolean)", "postmaster_pid_file_exists", "bool")
VALUE__POSTMASTER_PID_VALUE_INT = FieldDefinition("PID from postmaster.pid file (int)", "postmaster_pid", "int", "pid file PID")
VALUE__LOCK_FILES_EXIST = FieldDefinition("Lock files in /tmp (boolean)", "lock_files_exist", "bool", "local files exist")
VALUE__ACTIVE_PID_INT = FieldDefinition("Active PID (int)", "active_pid", "int")
VALUE__CHANGE_TRACKING_DATA_SIZE_BYTES = FieldDefinition("Change tracking data size (bytes)", "change_tracking_bytes", "int8")
VALUE__POSTMASTER_PID_FILE = FieldDefinition("File postmaster.pid", "postmaster_pid_file_exists", "text", "pid file exists") # boolean would be nice
VALUE__POSTMASTER_PID_VALUE = FieldDefinition("PID from postmaster.pid file", "postmaster_pid", "text", "pid file PID") # int would be better, but we print error messages here sometimes
VALUE__LOCK_FILES= FieldDefinition("Lock files in /tmp", "lock_files_exist", "text", "local files exist") # boolean would be nice
class GpStateData:
"""
Store key-value pairs of unpacked data for each segment in the cluster
Also provides categories on top of this
To add new values:
1) add CATEGORY_* and VALUE* constants as appropriate
2) update self.__categories and self.__entriesByCategories below
3) call .addValue from the code that loads the values (search for it down below)
"""
def __init__(self ):
self.__segmentData = []
self.__segmentDbIdToSegmentData = {}
self.__dbIdIsProbablyDown = {}
self.__contentsWithUpSegments = {}
self.__currentSegmentData = None
self.__categories = [
CATEGORY__SEGMENT_INFO,
CATEGORY__MIRRORING_INFO,
CATEGORY__ERROR_GETTING_SEGMENT_STATUS,
CATEGORY__CHANGE_TRACKING_INFO,
CATEGORY__RESYNCHRONIZATION_INFO,
CATEGORY__STATUS]
self.__entriesByCategory = {}
self.__entriesByCategory[CATEGORY__SEGMENT_INFO] = \
[VALUE__HOSTNAME,
VALUE__ADDRESS,
VALUE__DATADIR,
VALUE__PORT]
self.__entriesByCategory[CATEGORY__MIRRORING_INFO] = \
[VALUE__CURRENT_ROLE,
VALUE__PREFERRED_ROLE,
VALUE__MIRROR_STATUS]
self.__entriesByCategory[CATEGORY__ERROR_GETTING_SEGMENT_STATUS] = \
[VALUE__ERROR_GETTING_SEGMENT_STATUS]
self.__entriesByCategory[CATEGORY__CHANGE_TRACKING_INFO] = \
[VALUE__CHANGE_TRACKING_DATA_SIZE]
self.__entriesByCategory[CATEGORY__RESYNCHRONIZATION_INFO] = \
[VALUE__RESYNC_MODE,
VALUE__RESYNC_DATA_SYNCHRONIZED,
VALUE__RESYNC_EST_TOTAL_DATA,
VALUE__RESYNC_EST_PROGRESS_WITH_MIRROR,
VALUE__TOTAL_RESYNC_OBJECT_COUNT,
VALUE__RESYNC_OBJECT_COUNT,
VALUE__RESYNC_EST_COMPLETION_TIME]
self.__entriesByCategory[CATEGORY__STATUS] = \
[VALUE__ACTIVE_PID,
VALUE__MASTER_REPORTS_STATUS,
VALUE__MIRROR_SEGMENT_STATUS,
VALUE__NONMIRROR_DATABASE_STATUS]
self.__allValues = {}
for k in [VALUE__SEGMENT_STATUS, VALUE__DBID, VALUE__CONTENTID,
VALUE__RESYNC_DATA_SYNCHRONIZED_BYTES, VALUE__RESYNC_EST_TOTAL_DATA_BYTES,
VALUE__RESYNC_EST_PROGRESS_WITH_MIRROR_NUMERIC, VALUE__TOTAL_RESYNC_OBJECT_COUNT_INT,
VALUE__RESYNC_OBJECT_COUNT_INT, VALUE__RESYNC_EST_COMPLETION_TIME_TIMESTAMP, VALUE__HAS_DATABASE_STATUS_WARNING,
VALUE__VERSION_STRING, VALUE__POSTMASTER_PID_FILE_EXISTS, VALUE__LOCK_FILES_EXIST,
VALUE__ACTIVE_PID_INT, VALUE__POSTMASTER_PID_VALUE_INT,
VALUE__CHANGE_TRACKING_DATA_SIZE_BYTES,
VALUE__POSTMASTER_PID_FILE, VALUE__POSTMASTER_PID_VALUE, VALUE__LOCK_FILES
]:
self.__allValues[k] = True
for values in self.__entriesByCategory.values():
for v in values:
self.__allValues[v] = True
def beginSegment(self, segment):
self.__currentSegmentData = {}
self.__currentSegmentData["values"] = {}
self.__currentSegmentData["isWarning"] = {}
self.__segmentData.append(self.__currentSegmentData)
self.__segmentDbIdToSegmentData[segment.getSegmentDbId()] = self.__currentSegmentData
def addValue(self, key, value, isWarning=False):
self.__currentSegmentData["values"][key] = value
self.__currentSegmentData["isWarning"][key] = isWarning
assert key in self.__allValues;
def isClusterProbablyDown(self, gpArray):
"""
approximate whether or not the cluster has a problem and need to review
we could beef this up -- for example, the mirror is only useful
if we are in resync mode
"""
for seg in gpArray.getSegDbList():
if seg.getSegmentContentId() not in self.__contentsWithUpSegments:
return True
return False
def setSegmentProbablyDown(self, seg, peerPrimary, isThisSegmentProbablyDown):
"""
Mark whether this segment is probably down (based on isThisSegmentProbablyDown)
@param peerPrimary: if this is a mirror in file replication mode, this will be its primary
"""
if isThisSegmentProbablyDown:
self.__dbIdIsProbablyDown[seg.getSegmentDbId()] = True
else:
#
# a segment is "good to use" for the cluster only if it's a primary, or a mirror whose
# primary says that they are in sync (primary not in changetracking or resync)
#
isGoodToUse = seg.isSegmentPrimary(current_role=True) or peerPrimary.isSegmentModeSynchronized()
if isGoodToUse:
self.__contentsWithUpSegments[seg.getSegmentContentId()] = True
def isSegmentProbablyDown(self, seg):
return seg.getSegmentDbId() in self.__dbIdIsProbablyDown
def addSegmentToTableLogger(self, tabLog, segment, suppressCategories={}):
"""
@param suppressCategories map from [categoryName-> true value] for category names that should be suppressed
"""
for cat in self.__categories:
if not suppressCategories.get(cat):
keys = self.__entriesByCategory[cat]
self.addSectionToTableLogger(tabLog, segment, cat, keys)
def getStrValue(self, segment, key, defaultIfNone=""):
data = self.__segmentDbIdToSegmentData[segment.getSegmentDbId()]
valuesMap = data["values"]
val = valuesMap.get(key)
if val is None:
val = defaultIfNone
else:
val = str(val)
return val
def addSectionToTableLogger(self, tabLog, segment, sectionHeader, keys, categoryIndent="", indent=" "):
data = self.__segmentDbIdToSegmentData[segment.getSegmentDbId()]
valuesMap = data["values"]
isWarningMap = data["isWarning"]
hasValue = False
for k in keys:
if k in valuesMap:
hasValue = True
break
if not hasValue:
#
# skip sections for which we have no values!
#
return
tabLog.info([categoryIndent + sectionHeader])
for k in keys:
if k in valuesMap:
val = valuesMap[k]
if val is None:
val = ""
else:
val = str(val)
tabLog.infoOrWarn(isWarningMap[k], ["%s%s" %(indent, k), "= %s" % val])
#-------------------------------------------------------------------------
class GpSystemStateProgram:
#
# Constructor:
#
# @param options the options as returned by the options parser
#
def __init__(self, options):
self.__options = options
self.__pool = None
def __appendSegmentTripletToArray(self, segment, line):
"""
returns line
@param the line to which to append the triplet of address/datadir/port
"""
line.append(segment.getSegmentAddress())
line.append(segment.getSegmentDataDirectory())
line.append(str(segment.getSegmentPort()))
return line
def __getMirrorType(self, gpArray):
if gpArray.hasMirrors:
if gpArray.guessIsSpreadMirror():
return "Spread"
else:
return "Group"
else:
return "No Mirror"
def __showClusterConfig(self, gpEnv, gpArray):
"""
Returns the exitCode
"""
if gpArray.hasMirrors:
logger.info("-------------------------------------------------------------" )
logger.info("-Current GPDB mirror list and status" )
logger.info("-Type = %s" % self.__getMirrorType(gpArray) )
logger.info("-------------------------------------------------------------" )
primarySegments = [ seg for seg in gpArray.getSegDbList() if seg.isSegmentPrimary(False) ]
mirrorSegments = [ seg for seg in gpArray.getSegDbList() if seg.isSegmentMirror(False) ]
contentIdToMirror = GpArray.getSegmentsByContentId(mirrorSegments)
tabLog = TableLogger().setWarnWithArrows(True)
tabLog.info(["Status", "Data State", "Primary", "Datadir", "Port", "Mirror", "Datadir", "Port"])
numInChangeTracking = 0
numMirrorsActingAsPrimaries = 0
for primary in primarySegments:
mirror = contentIdToMirror[primary.getSegmentContentId()][0]
doWarn = False
status = ""
if primary.isSegmentMirror(True):
actingPrimary = mirror
actingMirror = primary
actMirrorStatus = "Available" if actingMirror.isSegmentUp() else "Failed"
status = "Mirror Active, Primary %s" % (actMirrorStatus)
numMirrorsActingAsPrimaries += 1
else:
actingPrimary = primary
actingMirror = mirror
actMirrorStatus = "Available" if actingMirror.isSegmentUp() else "Failed"
status = "Primary Active, Mirror %s" % (actMirrorStatus)
if actingPrimary.isSegmentModeInChangeLogging():
doWarn = True
numInChangeTracking += 1
dataStatus = gparray.getDataModeLabel(actingPrimary.getSegmentMode())
line = [status, dataStatus]
self.__appendSegmentTripletToArray(primary, line)
self.__appendSegmentTripletToArray(mirror, line)
tabLog.infoOrWarn(doWarn, line)
tabLog.outputTable()
logger.info("-------------------------------------------------------------" )
if numMirrorsActingAsPrimaries > 0:
logger.warn( "%s segment(s) configured as mirror(s) are acting as primaries" % numMirrorsActingAsPrimaries )
if numInChangeTracking > 0:
logger.warn( "%s segment(s) are in change tracking" % numInChangeTracking)
else:
logger.info("-------------------------------------------------------------" )
logger.info("-Primary list [Mirror not used]")
logger.info("-------------------------------------------------------------" )
tabLog = TableLogger().setWarnWithArrows(True)
tabLog.info(["Primary", "Datadir", "Port"])
for seg in [ seg for seg in gpArray.getSegDbList()]:
tabLog.info(self.__appendSegmentTripletToArray(seg, []))
tabLog.outputTable()
logger.info("-------------------------------------------------------------" )
return 0
def _showMirrorList(self,gpEnv, gpArray):
"""
Returns the exitCode
"""
exitCode = 0
if gpArray.hasMirrors:
tabLog = TableLogger().setWarnWithArrows(True)
tabLog.info(["Mirror","Datadir", "Port", "Status", "Data Status", ""])
# based off the bash version of -m "mirror list" option,
# the mirror list prints information about defined mirrors only
mirrorSegments = [ seg for seg in gpArray.getSegDbList() if seg.isSegmentMirror(False) ]
numMirrorsActingAsPrimaries = 0
numFailedMirrors = 0
numChangeTrackingMirrors = 0
for seg in mirrorSegments:
doWarn = False
status = ""
dataStatus = gparray.getDataModeLabel(seg.getSegmentMode())
if seg.isSegmentPrimary(True):
status = "Acting as Primary"
if seg.isSegmentModeInChangeLogging():
numChangeTrackingMirrors += 1
numMirrorsActingAsPrimaries += 1
elif seg.isSegmentUp():
status = "Passive"
else:
status = "Failed"
dataStatus = ""
doWarn = True
numFailedMirrors += 1
if doWarn:
exitCode = 1
line = self.__appendSegmentTripletToArray(seg, [])
line.extend([status, dataStatus])
tabLog.infoOrWarn(doWarn, line)
logger.info("-------------------------------------------------------------" )
logger.info("-Current GPDB mirror list and status" )
logger.info("-Type = %s" % self.__getMirrorType(gpArray) )
logger.info("-------------------------------------------------------------" )
tabLog.outputTable()
logger.info("-------------------------------------------------------------" )
if numMirrorsActingAsPrimaries > 0:
logger.warn( "%s segment(s) configured as mirror(s) are acting as primaries" % numMirrorsActingAsPrimaries )
if numFailedMirrors > 0:
logger.warn( "%s segment(s) configured as mirror(s) have failed" % numFailedMirrors )
if numChangeTrackingMirrors > 0:
logger.warn( "%s mirror segment(s) acting as primaries are in change tracking" % numChangeTrackingMirrors)
else:
logger.warn("-------------------------------------------------------------" )
logger.warn( "Mirror not used")
logger.warn("-------------------------------------------------------------" )
return exitCode
def __appendStandbySummary(self, hostNameToResults, standby, tabLog):
"""
Log information about the configured standby and its current status
"""
if standby is None:
tabLog.info(["Master standby", "= No master standby configured"])
else:
tabLog.info(["Master standby", "= %s" % standby.getSegmentHostName()])
(standbyStatusFetchWarning, outputFromStandbyCmd) = hostNameToResults[standby.getSegmentHostName()]
standbyData = outputFromStandbyCmd[standby.getSegmentDbId()] if standbyStatusFetchWarning is None else None
if standbyStatusFetchWarning is not None:
tabLog.warn(["Standby master state", "= Status could not be determined: %s" % standbyStatusFetchWarning])
elif standbyData[gp.SEGMENT_STATUS__HAS_POSTMASTER_PID_FILE] and \
standbyData[gp.SEGMENT_STATUS__GET_PID]['pid'] > 0 and \
standbyData[gp.SEGMENT_STATUS__GET_PID]['error'] is None:
tabLog.info(["Standby master state", "= Standby host passive"])
else:
tabLog.warn(["Standby master state", "= Standby host DOWN"])
def __showStatusStatistics(self, gpEnv, gpArray):
"""
Print high-level numeric stats about the cluster
returns the exit code
"""
hostNameToResults = self.__fetchAllSegmentData(gpArray)
logger.info("Greenplum instance status summary")
# master summary info
tabLog = TableLogger().setWarnWithArrows(True)
tabLog.addSeparator()
tabLog.info(["Master instance", "= Active"])
self.__appendStandbySummary(hostNameToResults, gpArray.standbyMaster, tabLog)
tabLog.info(["Total segment instance count from metadata", "= %s" % len(gpArray.getSegDbList())])
tabLog.addSeparator()
# primary and mirror segment info
for whichType in ["Primary", "Mirror"]:
tabLog.info(["%s Segment Status" % whichType])
tabLog.addSeparator()
if whichType == "Primary":
segs = [seg for seg in gpArray.getSegDbList() if seg.isSegmentPrimary(current_role=False)]
else:
segs = [seg for seg in gpArray.getSegDbList() if seg.isSegmentMirror(current_role=False)]
if not segs:
tabLog.info(["Mirrors not configured on this array"])
tabLog.addSeparator()
continue
numPostmasterPidFilesMissing = 0
numPostmasterProcessesMissing = 0
numLockFilesMissing = 0
numPostmasterPidsMissing = 0
for seg in segs:
(statusFetchWarning, outputFromCmd) = hostNameToResults[seg.getSegmentHostName()]
if statusFetchWarning is not None:
# I guess if we can't contact the segment that we can do this?
# or should add a new error row instead to account for this?
numPostmasterPidFilesMissing += 1
numLockFilesMissing += 1
numPostmasterProcessesMissing += 1
numPostmasterPidsMissing += 1
else:
segmentData = outputFromCmd[seg.getSegmentDbId()]
if not segmentData[gp.SEGMENT_STATUS__HAS_LOCKFILE]:
numLockFilesMissing += 1
if not segmentData[gp.SEGMENT_STATUS__HAS_POSTMASTER_PID_FILE]:
numPostmasterPidFilesMissing += 1
# note: this (which I think matches old behavior fairly closely)
# doesn't seem entirely correct -- we are checking whether netstat is
# there, but not really checking that the process is running on that port?
if segmentData[gp.SEGMENT_STATUS__GET_PID] is None or \
segmentData[gp.SEGMENT_STATUS__GET_PID]['pid'] == 0:
numPostmasterPidsMissing += 1
numPostmasterProcessesMissing += 1
elif segmentData[gp.SEGMENT_STATUS__GET_PID]['error'] is not None:
numPostmasterProcessesMissing += 1
numSegments = len(segs)
numValidAtMaster = len([seg for seg in segs if seg.isSegmentUp()])
numFailuresAtMaster = len([seg for seg in segs if seg.isSegmentDown()])
numPostmasterPidFilesFound = numSegments - numPostmasterPidFilesMissing
numLockFilesFound = numSegments - numLockFilesMissing
numPostmasterPidsFound = numSegments - numPostmasterPidsMissing
numPostmasterProcessesFound = numSegments - numPostmasterProcessesMissing
# print stuff
tabLog.info(["Total %s segments" % whichType.lower(), "= %d" % numSegments])
tabLog.info(["Total %s segment valid (at master)" % whichType.lower(), "= %d" % numValidAtMaster])
tabLog.infoOrWarn(numFailuresAtMaster > 0,
["Total %s segment failures (at master)" % whichType.lower(), "= %d" % numFailuresAtMaster])
tabLog.infoOrWarn(numPostmasterPidFilesMissing > 0,
["Total number of postmaster.pid files missing", "= %d" % numPostmasterPidFilesMissing])
tabLog.info( ["Total number of postmaster.pid files found", "= %d" % numPostmasterPidFilesFound])
tabLog.infoOrWarn(numPostmasterPidsMissing > 0,
["Total number of postmaster.pid PIDs missing", "= %d" % numPostmasterPidsMissing])
tabLog.info( ["Total number of postmaster.pid PIDs found", "= %d" % numPostmasterPidsFound])
tabLog.infoOrWarn(numLockFilesMissing > 0,
["Total number of /tmp lock files missing", "= %d" % numLockFilesMissing])
tabLog.info( ["Total number of /tmp lock files found", "= %d" % numLockFilesFound])
tabLog.infoOrWarn(numPostmasterProcessesMissing > 0,
["Total number postmaster processes missing", "= %d" % numPostmasterProcessesMissing])
tabLog.info( ["Total number postmaster processes found", "= %d" % numPostmasterProcessesFound])
if whichType == "Mirror":
numMirrorsActive = len([seg for seg in segs if seg.isSegmentPrimary(current_role=True)])
numMirrorsPassive = numSegments - numMirrorsActive
tabLog.infoOrWarn(numMirrorsActive > 0,
["Total number mirror segments acting as primary segments", "= %d" % numMirrorsActive])
tabLog.info( ["Total number mirror segments acting as mirror segments", "= %d" % numMirrorsPassive])
tabLog.addSeparator()
tabLog.outputTable()
def __fetchAllSegmentData(self, gpArray):
"""
returns a dict mapping hostName to the GpGetSgementStatusValues decoded result
"""
logger.info("Gathering data from segments...")
segmentsByHost = GpArray.getSegmentsByHostName(gpArray.getDbList())
segmentData = {}
dispatchCount = 0
hostNameToCmd = {}
for hostName, segments in segmentsByHost.iteritems():
cmd = gp.GpGetSegmentStatusValues("get segment version status", segments,
[gp.SEGMENT_STATUS__GET_VERSION,
gp.SEGMENT_STATUS__GET_PID,
gp.SEGMENT_STATUS__HAS_LOCKFILE,
gp.SEGMENT_STATUS__HAS_POSTMASTER_PID_FILE,
gp.SEGMENT_STATUS__GET_MIRROR_STATUS
],
verbose=logging_is_verbose(),
ctxt=base.REMOTE,
remoteHost=segments[0].getSegmentAddress())
hostNameToCmd[hostName] = cmd
self.__pool.addCommand(cmd)
dispatchCount+=1
self.__poolWait(dispatchCount)
hostNameToResults = {}
for hostName, cmd in hostNameToCmd.iteritems():
hostNameToResults[hostName] = cmd.decodeResults()
return hostNameToResults
def __showSummaryOfSegmentsWhichRequireAttention(self, gpEnv, gpArray):
"""
Prints out the current status of the cluster.
@param gpEnv the GpMasterEnvironment object
@param gpArray the array to display
returns the exit code
"""
exitCode = 0
if not gpArray.hasMirrors:
logger.info("Physical mirroring is not configured")
return 1
primarySegments = [ seg for seg in gpArray.getSegDbList() if seg.isSegmentPrimary(current_role=True) ]
mirrorSegments = [ seg for seg in gpArray.getSegDbList() if seg.isSegmentMirror(current_role=True) ]
contentIdToMirror = GpArray.getSegmentsByContentId(mirrorSegments)
hasWarnings = False
hostNameToResults = self.__fetchAllSegmentData(gpArray)
data = self.__buildGpStateData(gpArray, hostNameToResults)
def logSegments(segments, logAsPairs, additionalFieldsToLog=[]):
"""
helper function for logging a list of primaries, with their mirrors
@param logAsPairs if True, then segments should be primaries only, and we will log corresponding mirror datadir/port
@param additionalFieldsToLog should be a list of FieldDefinition objects
"""
tabLog = TableLogger().setWarnWithArrows(True)
for segment in segments:
if tabLog.getNumLines() == 0:
header = ["Current Primary" if logAsPairs else "Segment", "Port"]
header.extend([f.getShortLabel() for f in additionalFieldsToLog])
if logAsPairs:
header.extend(["Mirror", "Port"])
tabLog.info(header)
line = []
line.extend([segment.getSegmentAddress(), str(segment.getSegmentPort())])
for key in additionalFieldsToLog:
line.append(data.getStrValue(segment, key))
if logAsPairs:
mirror = contentIdToMirror[segment.getSegmentContentId()][0]
line.extend([mirror.getSegmentAddress(), str(mirror.getSegmentPort())])
tabLog.info(line)
tabLog.outputTable()
logger.info("----------------------------------------------------")
logger.info("Segment Mirroring Status Report")
# segment pairs that are in wrong roles
primariesInWrongRole = [s for s in gpArray.getSegDbList() if s.isSegmentPrimary(current_role=True) and \
not s.isSegmentPrimary(current_role=False)]
if primariesInWrongRole:
logger.info("----------------------------------------------------")
logger.info("Segments with Primary and Mirror Roles Switched")
logSegments(primariesInWrongRole, logAsPairs=True)
exitCode = 1
else:
pass # logger.info( "No segment pairs with switched roles")
# segment pairs that are in changetracking
primariesInChangeTracking = [s for s in gpArray.getSegDbList() if s.isSegmentPrimary(current_role=True) and \
s.isSegmentModeInChangeLogging()]
if primariesInChangeTracking:
logger.info("----------------------------------------------------")
logger.info("Primaries in Change Tracking")
logSegments(primariesInChangeTracking, logAsPairs=True, additionalFieldsToLog=[VALUE__CHANGE_TRACKING_DATA_SIZE])
exitCode = 1
else:
pass # logger.info( "No segment pairs are in change tracking")
# segments that are in resync
primariesInResync = [s for s in gpArray.getSegDbList() if s.isSegmentPrimary(current_role=True) and \
s.isSegmentModeInResynchronization()]
if primariesInResync:
logger.info("----------------------------------------------------")
logger.info("Segment Pairs in Resynchronization")
logSegments(primariesInResync, logAsPairs=True, additionalFieldsToLog=[VALUE__RESYNC_MODE, \
VALUE__RESYNC_EST_PROGRESS_WITH_MIRROR, VALUE__TOTAL_RESYNC_OBJECT_COUNT, VALUE__RESYNC_OBJECT_COUNT, VALUE__RESYNC_DATA_SYNCHRONIZED, \
VALUE__RESYNC_EST_TOTAL_DATA, VALUE__RESYNC_EST_COMPLETION_TIME, VALUE__CHANGE_TRACKING_DATA_SIZE])
exitCode = 1
else:
pass # logger.info( "No segment pairs are in resynchronization")
# segments that are down (excluding those that are part of changetracking)
changeTrackingMirrors = [contentIdToMirror[s.getSegmentContentId()][0] for s in primariesInChangeTracking]
changeTrackingMirrorsByDbId = GpArray.getSegmentsGroupedByValue(changeTrackingMirrors, gparray.Segment.getSegmentDbId)
segmentsThatAreDown = [s for s in gpArray.getSegDbList() if \
not s.getSegmentDbId() in changeTrackingMirrorsByDbId and \
data.isSegmentProbablyDown(s)]
if segmentsThatAreDown:
logger.info("----------------------------------------------------")
logger.info("Downed Segments (this excludes mirrors whose primaries are in change tracking" )
logger.info(" -- these, if any, are reported separately above")
logger.info(" also, this may include segments where status could not be retrieved)")
logSegments(segmentsThatAreDown, False, [VALUE__MASTER_REPORTS_STATUS, VALUE__SEGMENT_STATUS])
exitCode = 1
else:
pass # logger.info( "No segments are down")
self.__addClusterDownWarning(gpArray, data)
# final output -- no errors, then log this message
if exitCode == 0:
logger.info("----------------------------------------------------")
logger.info("All segments are running normally")
return exitCode
def __addClusterDownWarning(self, gpArray, gpStateData):
if gpStateData.isClusterProbablyDown(gpArray):
logger.warn("*****************************************************" )
logger.warn("DATABASE IS PROBABLY UNAVAILABLE" )
logger.warn("Review Instance Status in log file or screen output for more information" )
logger.warn("*****************************************************" )
def __getSegmentStatusColumns(self):
return [
VALUE__DBID,
VALUE__CONTENTID,
VALUE__HOSTNAME,
VALUE__ADDRESS,
VALUE__DATADIR,
VALUE__PORT,
VALUE__CURRENT_ROLE,
VALUE__PREFERRED_ROLE,
VALUE__MIRROR_STATUS,
VALUE__MASTER_REPORTS_STATUS,
VALUE__SEGMENT_STATUS,
VALUE__HAS_DATABASE_STATUS_WARNING,
VALUE__ERROR_GETTING_SEGMENT_STATUS,
VALUE__CHANGE_TRACKING_DATA_SIZE_BYTES,
VALUE__RESYNC_MODE,
VALUE__RESYNC_DATA_SYNCHRONIZED_BYTES,
VALUE__RESYNC_EST_TOTAL_DATA_BYTES,
VALUE__RESYNC_EST_PROGRESS_WITH_MIRROR_NUMERIC,
VALUE__RESYNC_EST_COMPLETION_TIME_TIMESTAMP,
VALUE__POSTMASTER_PID_FILE_EXISTS,
VALUE__POSTMASTER_PID_VALUE_INT,
VALUE__LOCK_FILES_EXIST,
VALUE__ACTIVE_PID_INT,
VALUE__RESYNC_EST_TOTAL_DATA,
VALUE__VERSION_STRING
]
def __segmentStatusPipeSeparatedForTableUse(self, gpEnv, gpArray):
"""
Print out the current status of the cluster (not including master+standby) as a pipe separate list
@param gpEnv the GpMasterEnvironment object
@param gpArray the array to display
returns the exit code
"""
hostNameToResults = self.__fetchAllSegmentData(gpArray)
data = self.__buildGpStateData(gpArray, hostNameToResults)
fields = self.__getSegmentStatusColumns()
rows = [] # [[f.getName() for f in fields]]
for seg in gpArray.getSegDbList():
row = []
for key in fields:
row.append(data.getStrValue(seg, key, ""))
rows.append(row)
# output rows and fieldNames!
self.__writePipeSeparated(rows, printToLogger=False)
return 0
def __printSampleExternalTableSqlForSegmentStatus(self, gpEnv):
scriptName = "%s/gpstate --segmentStatusPipeSeparatedForTableUse -q -d %s" % \
(sys.path[0], gpEnv.getMasterDataDir()) # todo: ideally, would escape here
columns = ["%s %s" % (f.getColumnName(), f.getColumnType()) for f in self.__getSegmentStatusColumns()]
sql = "\nDROP EXTERNAL TABLE IF EXISTS gpstate_segment_status;\n\n\nCREATE EXTERNAL WEB TABLE gpstate_segment_status\n" \
"(%s)\nEXECUTE '%s' ON MASTER\nFORMAT 'TEXT' (DELIMITER '|' NULL AS '');\n" % \
(", ".join(columns), scriptName )
print sql
return 0
def __writePipeSeparated(self, rows, printToLogger=True):
for row in rows:
escapedRow = [s.replace("|", "_") for s in row] # todo: can we escape it better?
str = "|".join(escapedRow)
if printToLogger:
logger.info(str)
else:
print str
def __showStatus(self, gpEnv, gpArray):
"""
Prints out the current status of the cluster.
@param gpEnv the GpMasterEnvironment object
@param gpArray the array to display
returns the exit code
"""
hasWarnings = False
hostNameToResults = self.__fetchAllSegmentData(gpArray)
#
# fetch data about master
#
master = gpArray.master
dbUrl = dbconn.DbURL(port=gpEnv.getMasterPort(), dbname='template1' )
conn = dbconn.connect(dbUrl, utility=True)
initDbVersion = dbconn.execSQLForSingletonRow(conn, "select productversion from gp_version_at_initdb limit 1;")[0]
pgVersion = dbconn.execSQLForSingletonRow(conn, "show server_version;")[0]
conn.close()
try:
# note: this is how old gpstate did this but ... can we do it without requiring a non-utility-mode
# connection? non-utility-mode connections can take a long time to quit out if there
# are segment failures and you need to wait for the prober (and this would print
# role as "utility" even though it's really a failed-dispatcher.
#
# for now, we use Verbose=True so we don't try any statements on the connection during connect
conn = dbconn.connect(dbUrl, utility=False, verbose=True)
conn.close()
qdRole = "dispatch"
except Exception:
qdRole = "utility" # unable to connect in non-utility, but we've been able to connect in utility so...
#
# print output about master
#
(statusFetchWarning, outputFromMasterCmd) = hostNameToResults[master.getSegmentHostName()]
masterData = outputFromMasterCmd[master.getSegmentDbId()] if statusFetchWarning is None else None
data = self.__buildGpStateData(gpArray, hostNameToResults)
logger.info( "----------------------------------------------------" )
logger.info("-Master Configuration & Status")
logger.info( "----------------------------------------------------" )
self.__addClusterDownWarning(gpArray, data)
tabLog = TableLogger().setWarnWithArrows(True)
tabLog.info(["Master host", "= %s" % master.getSegmentHostName()])
if statusFetchWarning is None:
pidData = masterData[gp.SEGMENT_STATUS__GET_PID]
tabLog.info(["Master postgres process ID", "= %s" % pidData['pid']])
else:
tabLog.warn(["Master port", "= Error fetching data: %s" % statusFetchWarning])
tabLog.info(["Master data directory", "= %s" % master.getSegmentDataDirectory()])
tabLog.info(["Master port", "= %d" % master.getSegmentPort()])
tabLog.info(["Master current role", "= %s" % qdRole])
tabLog.info(["Greenplum initsystem version", "= %s" % initDbVersion])
if statusFetchWarning is None:
if masterData[gp.SEGMENT_STATUS__GET_VERSION] is None:
tabLog.warn(["Greenplum current version", "= Unknown"])
else:
tabLog.info(["Greenplum current version", "= %s" % masterData[gp.SEGMENT_STATUS__GET_VERSION]])
else:
tabLog.warn(["Greenplum current version", "= Error fetching data: %s" % statusFetchWarning])
tabLog.info(["Postgres version", "= %s" % pgVersion])
self.__appendStandbySummary(hostNameToResults, gpArray.standbyMaster, tabLog)
tabLog.outputTable()
hasWarnings = hasWarnings or tabLog.hasWarnings()
#
# Output about segments
#
logger.info("----------------------------------------------------")
logger.info("Segment Instance Status Report")
tabLog = TableLogger().setWarnWithArrows(True)
categoriesToIgnoreOnMirror = {CATEGORY__CHANGE_TRACKING_INFO:True, CATEGORY__RESYNCHRONIZATION_INFO:True}
categoriesToIgnoreWithoutMirroring = {CATEGORY__CHANGE_TRACKING_INFO:True, CATEGORY__MIRRORING_INFO:True,
CATEGORY__RESYNCHRONIZATION_INFO:True}
for seg in gpArray.getSegDbList():
tabLog.addSeparator()
if gpArray.hasMirrors:
toSuppress = categoriesToIgnoreOnMirror if seg.isSegmentMirror(current_role=True) else {}
else: toSuppress = categoriesToIgnoreWithoutMirroring
data.addSegmentToTableLogger(tabLog, seg, toSuppress)
tabLog.outputTable()
hasWarnings = hasWarnings or tabLog.hasWarnings()
self.__addClusterDownWarning(gpArray, data)
if hasWarnings:
logger.warn("*****************************************************" )
logger.warn("Warnings have been generated during status processing" )
logger.warn("Check log file or review screen output" )
logger.warn("*****************************************************" )
return 1 if hasWarnings else 0
def __addResyncProgressFields(self, data, primary, primarySegmentData, isMirror):
"""
Add progress fields to the current segment in data, using the primary information provided.
@param isMirror True if the current segment is a mirror, False otherwise. Not all fields from the primary
data should be inserted (for example, change tracking size is not
considered to apply to the pair but only to the primary so it will not be
inserted for the mirror)
"""
mirrorData = primarySegmentData[gp.SEGMENT_STATUS__GET_MIRROR_STATUS]
#
# populate change tracking fields
#
if not isMirror: # we don't populate CHANGE_TRACKING values for the mirror
if primary.getSegmentMode() == gparray.MODE_RESYNCHRONIZATION or \
primary.getSegmentMode() == gparray.MODE_CHANGELOGGING:
if mirrorData is None or mirrorData["changeTrackingBytesUsed"] < 0:
# server returns <0 if there was an error calculating size
data.addValue(VALUE__CHANGE_TRACKING_DATA_SIZE, "unable to retrieve data size", isWarning=True)
data.addValue(VALUE__CHANGE_TRACKING_DATA_SIZE_BYTES, "", isWarning=True)
else:
data.addValue(VALUE__CHANGE_TRACKING_DATA_SIZE,
self.__abbreviateBytes(mirrorData["changeTrackingBytesUsed"]))
data.addValue(VALUE__CHANGE_TRACKING_DATA_SIZE_BYTES, mirrorData["changeTrackingBytesUsed"])
if mirrorData is None:
# MPP-14054
pass
#
# populate resync modes on primary and mirror
#
if primary.getSegmentMode() == gparray.MODE_RESYNCHRONIZATION:
if mirrorData is None:
data.addValue(VALUE__RESYNC_EST_PROGRESS_WITH_MIRROR, "unable to retrieve progress", isWarning=True)
else:
totalResyncObjectCount = mirrorData['totalResyncObjectCount']
if totalResyncObjectCount == -1:
totalResyncObjectCountStr = "Not Available"
else:
totalResyncObjectCountStr = str(totalResyncObjectCount)
resyncObjectCount = mirrorData['curResyncObjectCount']
if resyncObjectCount == -1:
resyncObjectCountStr = "Not Available"
else:
resyncObjectCountStr = str(resyncObjectCount)
dataSynchronizedBytes = mirrorData["resyncNumCompleted"] * 32L * 1024
dataSynchronizedStr = self.__abbreviateBytes( dataSynchronizedBytes )
resyncDataBytes = None
resyncProgressNumeric = None
totalDataToSynchronizeBytes = None
estimatedEndTimeTimestamp = None
if mirrorData["dataState"] == "InSync":
totalDataToSynchronizeStr = "Sync complete; awaiting config change"
resyncProgressNumeric = 1
resyncProgressStr = "100%"
estimatedEndTimeStr = ""
elif mirrorData["estimatedCompletionTimeSecondsSinceEpoch"] == 0:
totalDataToSynchronizeStr = "Not Available"
resyncProgressStr = "Not Available"
estimatedEndTimeStr = "Not Available"
else:
if mirrorData["resyncTotalToComplete"] == 0:
resyncProgressStr = "Not Available"
else:
resyncProgressNumeric = mirrorData["resyncNumCompleted"] / float(mirrorData["resyncTotalToComplete"])
percentComplete = 100 * resyncProgressNumeric
resyncProgressStr = "%.2f%%" % percentComplete
totalDataToSynchronizeBytes = mirrorData["resyncTotalToComplete"] * 32L * 1024
totalDataToSynchronizeStr = self.__abbreviateBytes( totalDataToSynchronizeBytes )
endTime = datetime.datetime.fromtimestamp(mirrorData["estimatedCompletionTimeSecondsSinceEpoch"])
estimatedEndTimeStr = str(endTime)
estimatedEndTimeTimestamp = endTime.isoformat()
data.addValue(VALUE__RESYNC_MODE, "Full" if mirrorData['isFullResync'] else "Incremental")
data.addValue(VALUE__RESYNC_DATA_SYNCHRONIZED, dataSynchronizedStr)
data.addValue(VALUE__RESYNC_DATA_SYNCHRONIZED_BYTES, dataSynchronizedBytes)
data.addValue(VALUE__RESYNC_EST_TOTAL_DATA, totalDataToSynchronizeStr)
data.addValue(VALUE__RESYNC_EST_TOTAL_DATA_BYTES, totalDataToSynchronizeBytes)
data.addValue(VALUE__RESYNC_EST_PROGRESS_WITH_MIRROR, resyncProgressStr)
data.addValue(VALUE__RESYNC_EST_PROGRESS_WITH_MIRROR_NUMERIC, resyncProgressNumeric)
data.addValue(VALUE__TOTAL_RESYNC_OBJECT_COUNT, totalResyncObjectCountStr)
data.addValue(VALUE__TOTAL_RESYNC_OBJECT_COUNT_INT, totalResyncObjectCount)
data.addValue(VALUE__RESYNC_OBJECT_COUNT, resyncObjectCountStr)
data.addValue(VALUE__RESYNC_OBJECT_COUNT_INT, resyncObjectCount)
data.addValue(VALUE__RESYNC_EST_COMPLETION_TIME, estimatedEndTimeStr)
data.addValue(VALUE__RESYNC_EST_COMPLETION_TIME_TIMESTAMP, estimatedEndTimeTimestamp)
def __buildGpStateData(self, gpArray, hostNameToResults):
data = GpStateData()
primaryByContentId = GpArray.getSegmentsByContentId(\
[s for s in gpArray.getSegDbList() if s.isSegmentPrimary(current_role=True)])
for seg in gpArray.getSegDbList():
(statusFetchWarning, outputFromCmd) = hostNameToResults[seg.getSegmentHostName()]
data.beginSegment(seg)
data.addValue(VALUE__DBID, seg.getSegmentDbId())
data.addValue(VALUE__CONTENTID, seg.getSegmentContentId())
data.addValue(VALUE__HOSTNAME, seg.getSegmentHostName())
data.addValue(VALUE__ADDRESS, seg.getSegmentAddress())
data.addValue(VALUE__DATADIR, seg.getSegmentDataDirectory())
data.addValue(VALUE__PORT, seg.getSegmentPort())
peerPrimary = None
data.addValue(VALUE__CURRENT_ROLE, "Primary" if seg.isSegmentPrimary(current_role=True) else "Mirror")
data.addValue(VALUE__PREFERRED_ROLE, "Primary" if seg.isSegmentPrimary(current_role=False) else "Mirror")
if gpArray.hasMirrors:
if seg.isSegmentPrimary(current_role=True):
data.addValue(VALUE__MIRROR_STATUS, gparray.getDataModeLabel(seg.getSegmentMode()))
else:
peerPrimary = primaryByContentId[seg.getSegmentContentId()][0]
if peerPrimary.isSegmentModeInChangeLogging():
data.addValue(VALUE__MIRROR_STATUS, "Out of Sync", isWarning=True)
else:
data.addValue(VALUE__MIRROR_STATUS, gparray.getDataModeLabel(seg.getSegmentMode()))
else:
data.addValue(VALUE__MIRROR_STATUS, "Physical replication not configured")
if statusFetchWarning is not None:
segmentData = None
data.addValue(VALUE__ERROR_GETTING_SEGMENT_STATUS, statusFetchWarning)
else:
segmentData = outputFromCmd[seg.getSegmentDbId()]
#
# Able to fetch from that segment, proceed
#
#
# mirror info
#
if gpArray.hasMirrors:
# print out mirroring state from the segment itself
if seg.isSegmentPrimary(current_role=True):
self.__addResyncProgressFields(data, seg, segmentData, False)
else:
(primaryStatusFetchWarning, primaryOutputFromCmd) = hostNameToResults[peerPrimary.getSegmentHostName()]
if primaryStatusFetchWarning is not None:
data.addValue(VALUE__ERROR_GETTING_SEGMENT_STATUS, "Primary resync status error:" + str(primaryStatusFetchWarning))
else:
self.__addResyncProgressFields(data, peerPrimary, primaryOutputFromCmd[peerPrimary.getSegmentDbId()], True)
#
# Now PID status
#
pidData = segmentData[gp.SEGMENT_STATUS__GET_PID]
found = segmentData[gp.SEGMENT_STATUS__HAS_POSTMASTER_PID_FILE]
data.addValue(VALUE__POSTMASTER_PID_FILE, "Found" if found else "Missing", isWarning=not found)
data.addValue(VALUE__POSTMASTER_PID_FILE_EXISTS, "t" if found else "f", isWarning=not found)
# PID from postmaster.pid
pidValueForSql = "" if pidData["pid"] == 0 else str(pidData["pid"])
data.addValue(VALUE__POSTMASTER_PID_VALUE, pidData["pid"], pidData['pid'] == 0)
data.addValue(VALUE__POSTMASTER_PID_VALUE_INT, pidValueForSql, pidData['pid'] == 0)
# has lock file
found = segmentData[gp.SEGMENT_STATUS__HAS_LOCKFILE]
data.addValue(VALUE__LOCK_FILES, "Found" if found else "Missing", isWarning=not found)
data.addValue(VALUE__LOCK_FILES_EXIST, "t" if found else "f", isWarning=not found)
if pidData['error'] is None:
data.addValue(VALUE__ACTIVE_PID, pidData["pid"])
data.addValue(VALUE__ACTIVE_PID_INT, pidValueForSql)
else:
data.addValue(VALUE__ACTIVE_PID, "Not found", True)
data.addValue(VALUE__ACTIVE_PID_INT, "", True)
data.addValue(VALUE__VERSION_STRING, segmentData[gp.SEGMENT_STATUS__GET_VERSION])
data.addValue(VALUE__MASTER_REPORTS_STATUS, "Up" if seg.isSegmentUp() else "Down", seg.isSegmentDown())
databaseStatus = None
databaseStatusIsWarning = False
if seg.isSegmentDown():
databaseStatus = "Down in configuration"
databaseStatusIsWarning = True
elif segmentData is None:
databaseStatus = "Unknown -- unable to load segment status"
databaseStatusIsWarning = True
elif segmentData[gp.SEGMENT_STATUS__GET_PID]['error'] is not None:
databaseStatus = "Process error -- database process may be down"
databaseStatusIsWarning = True
elif segmentData[gp.SEGMENT_STATUS__GET_MIRROR_STATUS] is None:
databaseStatus = "Unknown -- unable to load segment status"
databaseStatusIsWarning = True
else:
databaseStatus = segmentData[gp.SEGMENT_STATUS__GET_MIRROR_STATUS]["databaseStatus"]
databaseStatusIsWarning = databaseStatus == "Uninitialized" or databaseStatus == "Down"
if seg.isSegmentMirror(current_role=True):
data.addValue(VALUE__MIRROR_SEGMENT_STATUS, databaseStatus, databaseStatusIsWarning)
else:
data.addValue(VALUE__NONMIRROR_DATABASE_STATUS, databaseStatus, databaseStatusIsWarning)
data.addValue(VALUE__SEGMENT_STATUS, databaseStatus, databaseStatusIsWarning)
data.addValue(VALUE__HAS_DATABASE_STATUS_WARNING, "t" if databaseStatusIsWarning else "f", databaseStatusIsWarning)
data.setSegmentProbablyDown(seg, peerPrimary, databaseStatusIsWarning)
return data
def __abbreviateBytes(self, numBytes):
"""
Abbreviate bytes with 3 bytes of precision (so 1.45GB but also 12.3GB), except for numBytes < 1024
SAMPLE TEST:
def testAbbreviateBytes(bytes, expected=""):
# logger.info(" %s abbreviates to %s" % (bytes, self.__abbreviateBytes(bytes)))
if expected != self.__abbreviateBytes(bytes):
raise Exception("Invalid abbreviation for %s : %s" % (bytes, self.__abbreviateBytes(bytes)))
testAbbreviateBytes(0, "0 bytes")
testAbbreviateBytes(1, "1 byte")
testAbbreviateBytes(2, "2 bytes")
testAbbreviateBytes(13, "13 bytes")
testAbbreviateBytes(656, "656 bytes")
testAbbreviateBytes(999, "999 bytes")
testAbbreviateBytes(1000, "1000 bytes")
testAbbreviateBytes(1001, "1001 bytes")
testAbbreviateBytes(1024, "1.00 kB")
testAbbreviateBytes(1301, "1.27 kB")
testAbbreviateBytes(13501, "13.2 kB")
testAbbreviateBytes(135401, "132 kB")
testAbbreviateBytes(1354015, "1.29 MB")
testAbbreviateBytes(13544015, "12.9 MB")
testAbbreviateBytes(135440154, "129 MB")
testAbbreviateBytes(1354401574, "1.26 GB")
testAbbreviateBytes(13544015776, "12.6 GB")
testAbbreviateBytes(135440157769, "126 GB")
testAbbreviateBytes(1354401577609, "1.23 TB")
testAbbreviateBytes(13544015776094, "12.3 TB")
testAbbreviateBytes(135440157760944, "123 TB")
testAbbreviateBytes(1754401577609464, "1.56 PB")
testAbbreviateBytes(17544015776094646, "15.6 PB")
testAbbreviateBytes(175440157760946475, "156 PB")
testAbbreviateBytes(175440157760945555564, "155822 PB")
"""
abbreviations = [
(1024L*1024*1024*1024*1024, "PB"),
(1024L*1024*1024*1024, "TB"),
(1024L*1024*1024, "GB"),
(1024L*1024, "MB"),
(1024L, "kB"),
(1, "bytes")]
if numBytes == 1:
return "1 byte"
for factor, suffix in abbreviations:
if numBytes >= factor:
break
precision = 3
precisionForDisplay = precision - len('%d' % int(numBytes/factor))
if precisionForDisplay < 0 or numBytes < 1024:
precisionForDisplay = 0
return '%.*f %s' % (precisionForDisplay, float(numBytes) / factor, suffix)
def __showQuickStatus(self, gpEnv, gpArray):
exitCode = 0
logger.info("-Quick Greenplum database status from Master instance only")
logger.info( "----------------------------------------------------------")
segments = [seg for seg in gpArray.getDbList() if seg.isSegmentQE()]
upSegments = [seg for seg in segments if seg.isSegmentUp()]
downSegments = [seg for seg in segments if seg.isSegmentDown()]
logger.info("# of up segments, from configuration table = %s" % (len(upSegments)))
if len(downSegments) > 0:
exitCode = 1
logger.info("# of down segments, from configuration table = %s" % (len(downSegments)))
tabLog = TableLogger().setWarnWithArrows(True)
tabLog.info(["Down Segment", "Datadir", "Port"])
for seg in downSegments:
tabLog.info(self.__appendSegmentTripletToArray(seg, []))
tabLog.outputTable()
logger.info( "----------------------------------------------------------")
return exitCode
def __showPortInfo(self, gpEnv, gpArray):
logger.info("-Master segment instance %s port = %d" % (gpEnv.getMasterDataDir(), gpEnv.getMasterPort()))
logger.info("-Segment instance port assignments")
logger.info("----------------------------------")
tabLog = TableLogger().setWarnWithArrows(True)
tabLog.info([ "Host", "Datadir", "Port"])
for seg in gpArray.getSegDbList():
tabLog.info(self.__appendSegmentTripletToArray(seg, []))
tabLog.outputTable()
def __showStandbyMasterInformation(self, gpEnv, gpArray):
standby = gpArray.standbyMaster
#
# print standby configuration/status
#
if standby is None:
logger.info("Standby master instance not configured")
else:
cmd = gp.GpGetSegmentStatusValues("get standby segment version status", [standby],
[gp.SEGMENT_STATUS__GET_PID], verbose=logging_is_verbose(), ctxt=base.REMOTE,
remoteHost=standby.getSegmentAddress())
cmd.run()
# fetch standby pid
(standbyPidFetchWarning, outputFromCmd) = cmd.decodeResults()
if standbyPidFetchWarning is None:
pidData = outputFromCmd[standby.getSegmentDbId()][gp.SEGMENT_STATUS__GET_PID]
else:
pidData = {}
pidData['pid'] = 0
pidData['error'] = None
# Print output!
logger.info("Standby master details" )
logger.info("----------------------" )
tabLog = TableLogger().setWarnWithArrows(True)
tabLog.info(["Standby address", "= %s" % standby.getSegmentAddress()])
tabLog.info(["Standby data directory", "= %s" % standby.getSegmentDataDirectory()])
tabLog.info(["Standby port", "= %s" % standby.getSegmentPort()])
if standbyPidFetchWarning is not None:
tabLog.warn(["Standby PID", "= %s" % standbyPidFetchWarning ])
tabLog.warn(["Standby status", "= Status could not be determined"])
elif pidData['pid'] == 0:
tabLog.warn(["Standby PID", "= 0"])
tabLog.warn(["Standby status", "= Standby process not running"])
else:
if pidData['error'] is not None:
#
# we got a pid value but had some kind of error -- so possibly the PID
# is not actually active on its port. Print the error
#
tabLog.warn(["Standby PID", "= %s" % pidData['pid'], "%s" % pidData['error']])
tabLog.warn(["Standby status", "= Status could not be determined" ])
else:
tabLog.info(["Standby PID", "= %s" % pidData['pid']])
tabLog.info(["Standby status", "= Standby host passive" ])
tabLog.outputTable()
#
# now print pg_stat_replication
#
logger.info("-------------------------------------------------------------" )
logger.info("-pg_stat_replication" )
logger.info("-------------------------------------------------------------" )
dbUrl = dbconn.DbURL(port=gpEnv.getMasterPort(), dbname='template1')
conn = dbconn.connect(dbUrl, utility=True)
sql = "SELECT state, sync_state, sent_location, flush_location, replay_location FROM pg_stat_replication"
cur = dbconn.execSQL(conn, sql)
if cur.rowcount == 1:
row = cur.fetchall()[0]
logger.info("-WAL Sender State: %s" % row[0])
logger.info("-Sync state: %s" % row[1])
logger.info("-Sent Location: %s" % row[2])
logger.info("-Flush Location: %s" % row[3])
logger.info("-Replay Location: %s" % row[4])
elif cur.rowcount > 1:
logger.warning("pg_stat_replication shows more than 1 row.")
else:
logger.info("No entries found.")
logger.info("-------------------------------------------------------------" )
# done printing pg_stat_replication table
def __poolWait(self, dispatchCount):
self.__pool.wait_and_printdots(dispatchCount, self.__options.quiet)
def __showVersionInfo(self, gpEnv, gpArray):
exitCode = 0
logger.info("Loading version information")
segmentsAndMaster = [seg for seg in gpArray.getDbList()]
upSegmentsAndMaster = [seg for seg in segmentsAndMaster if seg.isSegmentUp()]
# fetch from hosts
segmentsByHost = GpArray.getSegmentsByHostName(upSegmentsAndMaster)
dispatchCount = 0
for hostName, segments in segmentsByHost.iteritems():
cmd = gp.GpGetSegmentStatusValues("get segment version status", segments,
[gp.SEGMENT_STATUS__GET_VERSION],
verbose=logging_is_verbose(),
ctxt=base.REMOTE,
remoteHost=segments[0].getSegmentAddress())
self.__pool.addCommand(cmd)
dispatchCount+=1
self.__poolWait(dispatchCount)
# group output
dbIdToVersion = {}
uniqueVersions = {}
for cmd in self.__pool.getCompletedItems():
(warning, outputFromCmd) = cmd.decodeResults()
if warning is None:
for seg in cmd.dblist:
version = outputFromCmd[seg.getSegmentDbId()][gp.SEGMENT_STATUS__GET_VERSION]
if version is not None:
dbIdToVersion[seg.getSegmentDbId()] = version
uniqueVersions[version] = True
else:
logger.warn(warning)
# print the list of all segments and warnings about trouble
tabLog = TableLogger().setWarnWithArrows(True)
tabLog.info(["Host","Datadir", "Port", "Version", ""])
for seg in segmentsAndMaster:
line = self.__appendSegmentTripletToArray(seg, [])
version = dbIdToVersion.get(seg.getSegmentDbId())
if version is None:
line.append("unable to retrieve version")
tabLog.warn(line)
else:
line.append(version)
tabLog.info(line)
tabLog.outputTable()
if len(uniqueVersions) > 1:
logger.warn("Versions for some segments do not match. Review table above for details.")
hadFailures = len(dbIdToVersion) != len(segmentsAndMaster)
if hadFailures:
logger.warn("Unable to retrieve version data from all segments. Review table above for details.")
if len(uniqueVersions) == 1 and not hadFailures:
# if we got data from all segments then we are confident they are all the same version
logger.info("All segments are running the same software version")
self.__pool.empty_completed_items()
return exitCode
def run(self):
# check that only one option is set
numSet = (1 if self.__options.showMirrorList else 0) + \
(1 if self.__options.showClusterConfig else 0) + \
(1 if self.__options.showQuickStatus else 0) + \
(1 if self.__options.showStatus else 0) + \
(1 if self.__options.showStatusStatistics else 0) + \
(1 if self.__options.segmentStatusPipeSeparatedForTableUse else 0) + \
(1 if self.__options.printSampleExternalTableSqlForSegmentStatus else 0) + \
(1 if self.__options.showPortInformation else 0) + \
(1 if self.__options.showStandbyMasterInformation else 0) + \
(1 if self.__options.showSummaryOfSegmentsWhichRequireAttention else 0) + \
(1 if self.__options.showVersionInfo else 0)
if numSet > 1:
raise ProgramArgumentValidationException("Too many output options specified")
if self.__options.parallelDegree < 1 or self.__options.parallelDegree > 64:
raise ProgramArgumentValidationException("Invalid parallelDegree provided with -B argument: %d" % self.__options.parallelDegree)
self.__pool = base.WorkerPool(self.__options.parallelDegree)
# load config
gpEnv = GpMasterEnvironment(self.__options.masterDataDirectory, True, self.__options.timeout, self.__options.retries)
confProvider = configInterface.getConfigurationProvider().initializeProvider(gpEnv.getMasterPort())
gpArray = confProvider.loadSystemConfig(useUtilityMode=True)
# do it!
if self.__options.showMirrorList:
exitCode = self._showMirrorList(gpEnv, gpArray)
elif self.__options.showClusterConfig:
exitCode = self.__showClusterConfig(gpEnv, gpArray)
elif self.__options.showQuickStatus:
exitCode = self.__showQuickStatus(gpEnv, gpArray)
elif self.__options.showStatus:
exitCode = self.__showStatus(gpEnv, gpArray)
elif self.__options.showVersionInfo:
exitCode = self.__showVersionInfo(gpEnv, gpArray)
elif self.__options.showSummaryOfSegmentsWhichRequireAttention:
exitCode = self.__showSummaryOfSegmentsWhichRequireAttention(gpEnv, gpArray)
elif self.__options.printSampleExternalTableSqlForSegmentStatus:
exitCode = self.__printSampleExternalTableSqlForSegmentStatus(gpEnv)
elif self.__options.showStandbyMasterInformation:
exitCode = self.__showStandbyMasterInformation(gpEnv, gpArray)
elif self.__options.showPortInformation:
exitCode = self.__showPortInfo(gpEnv, gpArray)
elif self.__options.segmentStatusPipeSeparatedForTableUse:
exitCode = self.__segmentStatusPipeSeparatedForTableUse(gpEnv, gpArray)
else:
# self.__options.showStatusStatistics OR default:
exitCode = self.__showStatusStatistics(gpEnv, gpArray)
return exitCode
def cleanup(self):
if self.__pool:
self.__pool.haltWork()
#-------------------------------------------------------------------------
@staticmethod
def createParser():
description = ("Display system state")
help = [""]
parser = OptParser(option_class=OptChecker,
description=' '.join(description.split()),
version='%prog version $Revision$')
parser.setHelp(help)
addStandardLoggingAndHelpOptions(parser, True)
addTo = OptionGroup(parser, "Connection Options")
parser.add_option_group(addTo)
addMasterDirectoryOptionForSingleClusterProgram(addTo)
addTo = OptionGroup(parser, "Output Options")
parser.add_option_group(addTo)
addTo.add_option('-m', None, default=False, action='store_true',
dest="showMirrorList",
metavar="<showMirrorList>",
help="Show mirror list from configuration")
addTo.add_option('-c', None, default=False, action='store_true',
dest="showClusterConfig",
metavar="<showClusterConfig>",
help="Show cluster configuration")
addTo.add_option("-Q", None, default=False, action="store_true",
dest="showQuickStatus",
metavar="<showQuickStatus>",
help="Show quick status")
addTo.add_option("-s", None, default=False, action="store_true",
dest="showStatus",
metavar="<showStatus>",
help="Show status")
addTo.add_option("-i", None, default=False, action="store_true",
dest="showVersionInfo",
metavar="<showVersionInfo>",
help="Show version information")
addTo.add_option("-p", None, default=False, action="store_true",
dest="showPortInformation",
metavar="<showPortInformation>",
help="Show port information")
addTo.add_option("-f", None, default=False, action="store_true",
dest="showStandbyMasterInformation",
metavar="<showStandbyMasterInformation>",
help="Show standby master information")
addTo.add_option("-b", None, default=False, action="store_true",
dest="showStatusStatistics",
metavar="<showStatusStatistics>",
help="Show status statistics")
addTo.add_option("-e", None, default=False, action="store_true",
dest="showSummaryOfSegmentsWhichRequireAttention",
metavar="<showSummaryOfSegmentsWhichRequireAttention>",
help="Show summary of segments needing attention")
#
# two experimental options for exposing segment status as a queryable web table
#
addTo.add_option("--segmentStatusPipeSeparatedForTableUse", None, default=False, action="store_true",
dest="segmentStatusPipeSeparatedForTableUse",
metavar="<segmentStatusPipeSeparatedForTableUse>",
help="Show status as pipe separated output")
addTo.add_option("--printSampleExternalTableSql", None, default=False, action="store_true",
dest="printSampleExternalTableSqlForSegmentStatus",
metavar="<printSampleExternalTableSqlForSegmentStatus>",
help="Print sample sql that can be run to create an external table on stop of gpstate --segmentStatusPipeSeparatedForTableUse")
addTo = OptionGroup(parser, "Other Options")
parser.add_option_group(addTo)
addTo.add_option("-B", None, type="int", default=16,
dest="parallelDegree",
metavar="<parallelDegree>",
help="Max # of workers to use querying segments for status. [default: %default]")
addTo.add_option("--timeout", None, type="int", default=None,
dest="timeout",
metavar="<timeout>",
help="Database connection timeout. [default: %default]")
addTo.add_option("--retries", None, type="int", default=None,
dest="retries",
metavar="<retries>",
help="Database connection retries. [default: %default]")
parser.set_defaults()
return parser
@staticmethod
def createProgram(options, args):
if len(args) > 0 :
raise ProgramArgumentValidationException(\
"too many arguments: only options may be specified", True)
return GpSystemStateProgram(options)
| {
"content_hash": "556a3957ce850b7be6f278eaa03400a2",
"timestamp": "",
"source": "github",
"line_count": 1511,
"max_line_length": 185,
"avg_line_length": 48.632693580410326,
"alnum_prop": 0.5887131892656905,
"repo_name": "Chibin/gpdb",
"id": "39784a7a7030002870799da25078f94455297637",
"size": "73652",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "gpMgmt/bin/gppylib/programs/clsSystemState.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "3737"
},
{
"name": "Batchfile",
"bytes": "11369"
},
{
"name": "C",
"bytes": "33469761"
},
{
"name": "C++",
"bytes": "2705055"
},
{
"name": "CSS",
"bytes": "7407"
},
{
"name": "Csound Score",
"bytes": "164"
},
{
"name": "DTrace",
"bytes": "3746"
},
{
"name": "Fortran",
"bytes": "14777"
},
{
"name": "GDB",
"bytes": "576"
},
{
"name": "Gherkin",
"bytes": "440687"
},
{
"name": "HTML",
"bytes": "355087"
},
{
"name": "Java",
"bytes": "186576"
},
{
"name": "JavaScript",
"bytes": "23969"
},
{
"name": "Lex",
"bytes": "195903"
},
{
"name": "M4",
"bytes": "97129"
},
{
"name": "Makefile",
"bytes": "422102"
},
{
"name": "Objective-C",
"bytes": "42255"
},
{
"name": "PLSQL",
"bytes": "218011"
},
{
"name": "PLpgSQL",
"bytes": "4947989"
},
{
"name": "Perl",
"bytes": "3906788"
},
{
"name": "Perl 6",
"bytes": "8302"
},
{
"name": "Python",
"bytes": "6267140"
},
{
"name": "Roff",
"bytes": "32274"
},
{
"name": "Ruby",
"bytes": "26862"
},
{
"name": "SQLPL",
"bytes": "642650"
},
{
"name": "Shell",
"bytes": "558642"
},
{
"name": "XS",
"bytes": "8405"
},
{
"name": "XSLT",
"bytes": "5779"
},
{
"name": "Yacc",
"bytes": "516996"
}
],
"symlink_target": ""
} |
from . import domainresource
class RiskAssessment(domainresource.DomainResource):
""" Potential outcomes for a subject with likelihood.
An assessment of the likely outcome(s) for a patient or other subject as
well as the likelihood of each outcome.
"""
resource_type = "RiskAssessment"
def __init__(self, jsondict=None, strict=True):
""" Initialize all valid properties.
:raises: FHIRValidationError on validation errors, unless strict is False
:param dict jsondict: A JSON dictionary to use for initialization
:param bool strict: If True (the default), invalid variables will raise a TypeError
"""
self.basedOn = None
""" Request fulfilled by this assessment.
Type `FHIRReference` referencing `Resource` (represented as `dict` in JSON). """
self.basis = None
""" Information used in assessment.
List of `FHIRReference` items referencing `Resource` (represented as `dict` in JSON). """
self.code = None
""" Type of assessment.
Type `CodeableConcept` (represented as `dict` in JSON). """
self.comment = None
""" Comments on the risk assessment.
Type `str`. """
self.condition = None
""" Condition assessed.
Type `FHIRReference` referencing `Condition` (represented as `dict` in JSON). """
self.context = None
""" Where was assessment performed?.
Type `FHIRReference` referencing `Encounter, EpisodeOfCare` (represented as `dict` in JSON). """
self.identifier = None
""" Unique identifier for the assessment.
Type `Identifier` (represented as `dict` in JSON). """
self.method = None
""" Evaluation mechanism.
Type `CodeableConcept` (represented as `dict` in JSON). """
self.mitigation = None
""" How to reduce risk.
Type `str`. """
self.occurrenceDateTime = None
""" When was assessment made?.
Type `FHIRDate` (represented as `str` in JSON). """
self.occurrencePeriod = None
""" When was assessment made?.
Type `Period` (represented as `dict` in JSON). """
self.parent = None
""" Part of this occurrence.
Type `FHIRReference` referencing `Resource` (represented as `dict` in JSON). """
self.performer = None
""" Who did assessment?.
Type `FHIRReference` referencing `Practitioner, Device` (represented as `dict` in JSON). """
self.prediction = None
""" Outcome predicted.
List of `RiskAssessmentPrediction` items (represented as `dict` in JSON). """
self.reasonCodeableConcept = None
""" Why the assessment was necessary?.
Type `CodeableConcept` (represented as `dict` in JSON). """
self.reasonReference = None
""" Why the assessment was necessary?.
Type `FHIRReference` referencing `Resource` (represented as `dict` in JSON). """
self.status = None
""" registered | preliminary | final | amended +.
Type `str`. """
self.subject = None
""" Who/what does assessment apply to?.
Type `FHIRReference` referencing `Patient, Group` (represented as `dict` in JSON). """
super(RiskAssessment, self).__init__(jsondict=jsondict, strict=strict)
def elementProperties(self):
js = super(RiskAssessment, self).elementProperties()
js.extend([
("basedOn", "basedOn", fhirreference.FHIRReference, False, None, False),
("basis", "basis", fhirreference.FHIRReference, True, None, False),
("code", "code", codeableconcept.CodeableConcept, False, None, False),
("comment", "comment", str, False, None, False),
("condition", "condition", fhirreference.FHIRReference, False, None, False),
("context", "context", fhirreference.FHIRReference, False, None, False),
("identifier", "identifier", identifier.Identifier, False, None, False),
("method", "method", codeableconcept.CodeableConcept, False, None, False),
("mitigation", "mitigation", str, False, None, False),
("occurrenceDateTime", "occurrenceDateTime", fhirdate.FHIRDate, False, "occurrence", False),
("occurrencePeriod", "occurrencePeriod", period.Period, False, "occurrence", False),
("parent", "parent", fhirreference.FHIRReference, False, None, False),
("performer", "performer", fhirreference.FHIRReference, False, None, False),
("prediction", "prediction", RiskAssessmentPrediction, True, None, False),
("reasonCodeableConcept", "reasonCodeableConcept", codeableconcept.CodeableConcept, False, "reason", False),
("reasonReference", "reasonReference", fhirreference.FHIRReference, False, "reason", False),
("status", "status", str, False, None, True),
("subject", "subject", fhirreference.FHIRReference, False, None, False),
])
return js
from . import backboneelement
class RiskAssessmentPrediction(backboneelement.BackboneElement):
""" Outcome predicted.
Describes the expected outcome for the subject.
"""
resource_type = "RiskAssessmentPrediction"
def __init__(self, jsondict=None, strict=True):
""" Initialize all valid properties.
:raises: FHIRValidationError on validation errors, unless strict is False
:param dict jsondict: A JSON dictionary to use for initialization
:param bool strict: If True (the default), invalid variables will raise a TypeError
"""
self.outcome = None
""" Possible outcome for the subject.
Type `CodeableConcept` (represented as `dict` in JSON). """
self.probabilityDecimal = None
""" Likelihood of specified outcome.
Type `float`. """
self.probabilityRange = None
""" Likelihood of specified outcome.
Type `Range` (represented as `dict` in JSON). """
self.qualitativeRisk = None
""" Likelihood of specified outcome as a qualitative value.
Type `CodeableConcept` (represented as `dict` in JSON). """
self.rationale = None
""" Explanation of prediction.
Type `str`. """
self.relativeRisk = None
""" Relative likelihood.
Type `float`. """
self.whenPeriod = None
""" Timeframe or age range.
Type `Period` (represented as `dict` in JSON). """
self.whenRange = None
""" Timeframe or age range.
Type `Range` (represented as `dict` in JSON). """
super(RiskAssessmentPrediction, self).__init__(jsondict=jsondict, strict=strict)
def elementProperties(self):
js = super(RiskAssessmentPrediction, self).elementProperties()
js.extend([
("outcome", "outcome", codeableconcept.CodeableConcept, False, None, True),
("probabilityDecimal", "probabilityDecimal", float, False, "probability", False),
("probabilityRange", "probabilityRange", range.Range, False, "probability", False),
("qualitativeRisk", "qualitativeRisk", codeableconcept.CodeableConcept, False, None, False),
("rationale", "rationale", str, False, None, False),
("relativeRisk", "relativeRisk", float, False, None, False),
("whenPeriod", "whenPeriod", period.Period, False, "when", False),
("whenRange", "whenRange", range.Range, False, "when", False),
])
return js
import sys
try:
from . import codeableconcept
except ImportError:
codeableconcept = sys.modules[__package__ + '.codeableconcept']
try:
from . import fhirdate
except ImportError:
fhirdate = sys.modules[__package__ + '.fhirdate']
try:
from . import fhirreference
except ImportError:
fhirreference = sys.modules[__package__ + '.fhirreference']
try:
from . import identifier
except ImportError:
identifier = sys.modules[__package__ + '.identifier']
try:
from . import period
except ImportError:
period = sys.modules[__package__ + '.period']
try:
from . import range
except ImportError:
range = sys.modules[__package__ + '.range']
| {
"content_hash": "e675494e4d6dcd5ca4f006b8f7807189",
"timestamp": "",
"source": "github",
"line_count": 210,
"max_line_length": 120,
"avg_line_length": 40.58571428571429,
"alnum_prop": 0.6091751730611287,
"repo_name": "all-of-us/raw-data-repository",
"id": "9398aa0c42845e2a2ba3a72a4ce54e5d1fb3abf6",
"size": "8706",
"binary": false,
"copies": "1",
"ref": "refs/heads/devel",
"path": "rdr_service/lib_fhir/fhirclient_3_0_0/models/riskassessment.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "1866"
},
{
"name": "Mako",
"bytes": "1715"
},
{
"name": "Python",
"bytes": "17040924"
},
{
"name": "R",
"bytes": "2212"
},
{
"name": "Shell",
"bytes": "92213"
}
],
"symlink_target": ""
} |
"""Logging functions.
This module has functions for logging errors and warnings.
"""
import sys
_num_errors = 0
_num_warnings = 0
def Error(msg):
"""Prints an error."""
global _num_errors
_num_errors += 1
print >> sys.stderr, ('ERROR: %s' % msg)
def Warning(msg):
"""Prints an warning."""
global _num_warnings
_num_warnings += 1
print >> sys.stderr, ('WARNING: %s' % msg)
def Info(msg):
"""Prints Info."""
print msg
def SourceError(source, msg):
"""Prints an error with source info"""
Error('%s:%d %s' % (source.file.source, source.line, msg))
def SourceWarning(source, msg):
"""Prints an warning with source info"""
Warning ('%s:%d %s' % (source.file.source, source.line, msg))
def FailIfHaveErrors():
"""Print status and exit if there were errors."""
global _num_errors
global _num_warnings
if _num_errors > 0 or _num_warnings > 0:
print >> sys.stderr, 'Num Errors:', _num_errors
print >> sys.stderr, 'Num Warnings:', _num_warnings
if _num_errors > 0:
sys.exit(1)
| {
"content_hash": "e905cfbdd22845949d168122973e7898",
"timestamp": "",
"source": "github",
"line_count": 51,
"max_line_length": 63,
"avg_line_length": 21.254901960784313,
"alnum_prop": 0.6070110701107011,
"repo_name": "AlexSc/HelloWorld",
"id": "1af21464957aec233571e3e5a3c824973e9c584a",
"size": "1698",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "nixysa/log.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "C++",
"bytes": "34982"
},
{
"name": "Python",
"bytes": "750719"
},
{
"name": "Shell",
"bytes": "810"
}
],
"symlink_target": ""
} |
import json
import requests
BASE_URL = "http://musicbrainz.org/ws/2/"
ARTIST_URL = BASE_URL + "artist/"
query_type = { "simple": {},
"atr": {"inc": "aliases+tags+ratings"},
"aliases": {"inc": "aliases"},
"releases": {"inc": "releases"}}
def query_site(url, params, uid="", fmt="json"):
params["fmt"] = fmt
r = requests.get(url + uid, params=params)
print "requesting", r.url
if r.status_code == requests.codes.ok:
return r.json()
else:
r.raise_for_status()
def query_by_name(url, params, name):
params["query"] = "artist:" + name
return query_site(url, params)
def pretty_print(data, indent=4):
if type(data) == dict:
print json.dumps(data, indent=indent, sort_keys=True)
else:
print data
def main():
results = query_by_name(ARTIST_URL, query_type["simple"], "Nirvana")
pretty_print(results)
artist_id = results["artists"][1]["id"]
print "\nARTIST:"
pretty_print(results["artists"][1])
artist_data = query_site(ARTIST_URL, query_type["releases"], artist_id)
releases = artist_data["releases"]
print "\nONE RELEASE:"
pretty_print(releases[0], indent=2)
release_titles = [r["title"] for r in releases]
print "\nALL TITLES:"
for t in release_titles:
print t
if __name__ == '__main__':
main()
| {
"content_hash": "2a577ebbee0d3960c2e578166c3e9480",
"timestamp": "",
"source": "github",
"line_count": 57,
"max_line_length": 75,
"avg_line_length": 24.263157894736842,
"alnum_prop": 0.586406362979031,
"repo_name": "tuanvu216/udacity-course",
"id": "13b27ffa77e3e337b169180036327a8491e76dbf",
"size": "1590",
"binary": false,
"copies": "9",
"ref": "refs/heads/master",
"path": "data_wrangling_with_mongodb/Lesson_1_Data_Extraction_Fundamentals/14-JSON_Playground/musicbrainz.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "3736"
},
{
"name": "HTML",
"bytes": "143388"
},
{
"name": "JavaScript",
"bytes": "169689"
},
{
"name": "Jupyter Notebook",
"bytes": "3237655"
},
{
"name": "Python",
"bytes": "400129"
},
{
"name": "Ruby",
"bytes": "448"
},
{
"name": "Shell",
"bytes": "538"
}
],
"symlink_target": ""
} |
import numpy as np
import os.path
import astropy.units as u
def waypoint(comps, intTimes, duration, mpath, tofile):
"""Generates waypoint dictionary for MissionSim
Args:
comps (array):
An array of completeness values for all stars
intTimes (array):
An array of predicted integration times for all stars
duration (int):
The length of time allowed for the waypoint calculation, defaults to 365
mpath (string):
The path to the directory to save a plot in.
tofile (string):
Name of the file containing a plot of total completeness over mission time,
by default genWaypoint does not create this plot
Returns:
dict:
Output dictionary containing the number of stars visited, the total
completness achieved, and the amount of time spent integrating.
"""
CbT = comps / intTimes
sInds_sorted = np.argsort(CbT)[::-1]
# run through sorted sInds until end of duration
intTime_sum = 0 * u.d
comp_sum = 0
num_stars = 0
comp_sums = []
intTime_sums = []
for sInd in sInds_sorted:
if intTime_sum + intTimes[sInd] > duration * u.d:
break
intTime_sum += intTimes[sInd]
comp_sum += comps[sInd]
num_stars += 1
comp_sums.append(comp_sum)
intTime_sums.append(intTime_sum.value)
# if a filename is specified, create a plot.
if tofile is not None: # pragma: no cover
import matplotlib.pyplot as plt
plt.scatter(intTime_sums, comp_sums, s=4, color="0.25")
plt.ylabel("Total Completeness")
plt.xlabel("Time (d)")
plt.title("Total Completeness Over {} Star Visits".format(num_stars))
plt.grid(True)
plt.savefig(os.path.join(mpath, tofile))
return {
"numStars": num_stars,
"Total Completeness": comp_sum,
"Total intTime": intTime_sum,
}
| {
"content_hash": "df9a9f729281cdc68b6bb3a4f780ab03",
"timestamp": "",
"source": "github",
"line_count": 64,
"max_line_length": 87,
"avg_line_length": 30.8125,
"alnum_prop": 0.6146044624746451,
"repo_name": "dsavransky/EXOSIMS",
"id": "e4a40cde14b7eaa2a72023a909093da43a9e4376",
"size": "1972",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "EXOSIMS/util/waypoint.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "8087"
},
{
"name": "Cython",
"bytes": "2459"
},
{
"name": "Python",
"bytes": "2936469"
}
],
"symlink_target": ""
} |
"""Float class.
Represents an unbounded float using a widget.
"""
# Copyright (c) Jupyter Development Team.
# Distributed under the terms of the Modified BSD License.
from traitlets import (
Instance, Unicode, CFloat, Bool, CaselessStrEnum, Tuple, TraitError, validate, default
)
from .widget_description import DescriptionWidget
from .trait_types import InstanceDict, NumberFormat
from .valuewidget import ValueWidget
from .widget import register, widget_serialization
from .widget_core import CoreWidget
from .widget_int import ProgressStyle, SliderStyle
class _Float(DescriptionWidget, ValueWidget, CoreWidget):
value = CFloat(0.0, help="Float value").tag(sync=True)
def __init__(self, value=None, **kwargs):
if value is not None:
kwargs['value'] = value
super(_Float, self).__init__(**kwargs)
class _BoundedFloat(_Float):
max = CFloat(100.0, help="Max value").tag(sync=True)
min = CFloat(0.0, help="Min value").tag(sync=True)
@validate('value')
def _validate_value(self, proposal):
"""Cap and floor value"""
value = proposal['value']
if self.min > value or self.max < value:
value = min(max(value, self.min), self.max)
return value
@validate('min')
def _validate_min(self, proposal):
"""Enforce min <= value <= max"""
min = proposal['value']
if min > self.max:
raise TraitError('Setting min > max')
if min > self.value:
self.value = min
return min
@validate('max')
def _validate_max(self, proposal):
"""Enforce min <= value <= max"""
max = proposal['value']
if max < self.min:
raise TraitError('setting max < min')
if max < self.value:
self.value = max
return max
@register
class FloatText(_Float):
""" Displays a float value within a textbox. For a textbox in
which the value must be within a specific range, use BoundedFloatText.
Parameters
----------
value : float
value displayed
step : float
step of the increment (if None, any step is allowed)
description : str
description displayed next to the text box
"""
_view_name = Unicode('FloatTextView').tag(sync=True)
_model_name = Unicode('FloatTextModel').tag(sync=True)
disabled = Bool(False, help="Enable or disable user changes").tag(sync=True)
continuous_update = Bool(False, help="Update the value as the user types. If False, update on submission, e.g., pressing Enter or navigating away.").tag(sync=True)
step = CFloat(None, allow_none=True, help="Minimum step to increment the value").tag(sync=True)
@register
class BoundedFloatText(_BoundedFloat):
""" Displays a float value within a textbox. Value must be within the range specified.
For a textbox in which the value doesn't need to be within a specific range, use FloatText.
Parameters
----------
value : float
value displayed
min : float
minimal value of the range of possible values displayed
max : float
maximal value of the range of possible values displayed
step : float
step of the increment (if None, any step is allowed)
description : str
description displayed next to the textbox
"""
_view_name = Unicode('FloatTextView').tag(sync=True)
_model_name = Unicode('BoundedFloatTextModel').tag(sync=True)
disabled = Bool(False, help="Enable or disable user changes").tag(sync=True)
continuous_update = Bool(False, help="Update the value as the user types. If False, update on submission, e.g., pressing Enter or navigating away.").tag(sync=True)
step = CFloat(None, allow_none=True, help="Minimum step to increment the value").tag(sync=True)
@register
class FloatSlider(_BoundedFloat):
""" Slider/trackbar of floating values with the specified range.
Parameters
----------
value : float
position of the slider
min : float
minimal position of the slider
max : float
maximal position of the slider
step : float
step of the trackbar
description : str
name of the slider
orientation : {'horizontal', 'vertical'}
default is 'horizontal', orientation of the slider
readout : {True, False}
default is True, display the current value of the slider next to it
readout_format : str
default is '.2f', specifier for the format function used to represent
slider value for human consumption, modeled after Python 3's format
specification mini-language (PEP 3101).
"""
_view_name = Unicode('FloatSliderView').tag(sync=True)
_model_name = Unicode('FloatSliderModel').tag(sync=True)
step = CFloat(0.1, help="Minimum step to increment the value").tag(sync=True)
orientation = CaselessStrEnum(values=['horizontal', 'vertical'],
default_value='horizontal', help="Vertical or horizontal.").tag(sync=True)
readout = Bool(True, help="Display the current value of the slider next to it.").tag(sync=True)
readout_format = NumberFormat(
'.2f', help="Format for the readout").tag(sync=True)
continuous_update = Bool(True, help="Update the value of the widget as the user is holding the slider.").tag(sync=True)
disabled = Bool(False, help="Enable or disable user changes").tag(sync=True)
style = InstanceDict(SliderStyle).tag(sync=True, **widget_serialization)
@register
class FloatProgress(_BoundedFloat):
""" Displays a progress bar.
Parameters
-----------
value : float
position within the range of the progress bar
min : float
minimal position of the slider
max : float
maximal position of the slider
description : str
name of the progress bar
orientation : {'horizontal', 'vertical'}
default is 'horizontal', orientation of the progress bar
bar_style: {'success', 'info', 'warning', 'danger', ''}
color of the progress bar, default is '' (blue)
colors are: 'success'-green, 'info'-light blue, 'warning'-orange, 'danger'-red
"""
_view_name = Unicode('ProgressView').tag(sync=True)
_model_name = Unicode('FloatProgressModel').tag(sync=True)
orientation = CaselessStrEnum(values=['horizontal', 'vertical'],
default_value='horizontal', help="Vertical or horizontal.").tag(sync=True)
bar_style = CaselessStrEnum(
values=['success', 'info', 'warning', 'danger', ''],
default_value='', allow_none=True,
help="Use a predefined styling for the progess bar.").tag(sync=True)
style = InstanceDict(ProgressStyle).tag(sync=True, **widget_serialization)
class _FloatRange(_Float):
value = Tuple(CFloat(), CFloat(), default_value=(0.0, 1.0),
help="Tuple of (lower, upper) bounds").tag(sync=True)
@property
def lower(self):
return self.value[0]
@lower.setter
def lower(self, lower):
self.value = (lower, self.value[1])
@property
def upper(self):
return self.value[1]
@upper.setter
def upper(self, upper):
self.value = (self.value[0], upper)
@validate('value')
def _validate_value(self, proposal):
lower, upper = proposal['value']
if upper < lower:
raise TraitError('setting lower > upper')
return lower, upper
class _BoundedFloatRange(_FloatRange):
step = CFloat(1.0, help="Minimum step that the value can take (ignored by some views)").tag(sync=True)
max = CFloat(100.0, help="Max value").tag(sync=True)
min = CFloat(0.0, help="Min value").tag(sync=True)
def __init__(self, *args, **kwargs):
min, max = kwargs.get('min', 0.0), kwargs.get('max', 100.0)
if not kwargs.get('value', None):
kwargs['value'] = (0.75 * min + 0.25 * max,
0.25 * min + 0.75 * max)
super(_BoundedFloatRange, self).__init__(*args, **kwargs)
@validate('min', 'max')
def _validate_bounds(self, proposal):
trait = proposal['trait']
new = proposal['value']
if trait.name == 'min' and new > self.max:
raise TraitError('setting min > max')
if trait.name == 'max' and new < self.min:
raise TraitError('setting max < min')
if trait.name == 'min':
self.value = (max(new, self.value[0]), max(new, self.value[1]))
if trait.name == 'max':
self.value = (min(new, self.value[0]), min(new, self.value[1]))
return new
@validate('value')
def _validate_value(self, proposal):
lower, upper = super(_BoundedFloatRange, self)._validate_value(proposal)
lower, upper = min(lower, self.max), min(upper, self.max)
lower, upper = max(lower, self.min), max(upper, self.min)
return lower, upper
@register
class FloatRangeSlider(_BoundedFloatRange):
""" Slider/trackbar that represents a pair of floats bounded by minimum and maximum value.
Parameters
----------
value : float tuple
range of the slider displayed
min : float
minimal position of the slider
max : float
maximal position of the slider
step : float
step of the trackbar
description : str
name of the slider
orientation : {'horizontal', 'vertical'}
default is 'horizontal'
readout : {True, False}
default is True, display the current value of the slider next to it
readout_format : str
default is '.2f', specifier for the format function used to represent
slider value for human consumption, modeled after Python 3's format
specification mini-language (PEP 3101).
"""
_view_name = Unicode('FloatRangeSliderView').tag(sync=True)
_model_name = Unicode('FloatRangeSliderModel').tag(sync=True)
step = CFloat(0.1, help="Minimum step to increment the value").tag(sync=True)
orientation = CaselessStrEnum(values=['horizontal', 'vertical'],
default_value='horizontal', help="Vertical or horizontal.").tag(sync=True)
readout = Bool(True, help="Display the current value of the slider next to it.").tag(sync=True)
readout_format = NumberFormat(
'.2f', help="Format for the readout").tag(sync=True)
continuous_update = Bool(True, help="Update the value of the widget as the user is sliding the slider.").tag(sync=True)
disabled = Bool(False, help="Enable or disable user changes").tag(sync=True)
style = InstanceDict(SliderStyle).tag(sync=True, **widget_serialization)
| {
"content_hash": "123dd28979da2a39532eb6f97745f874",
"timestamp": "",
"source": "github",
"line_count": 279,
"max_line_length": 167,
"avg_line_length": 37.92831541218638,
"alnum_prop": 0.6457191457191457,
"repo_name": "nitin-cherian/LifeLongLearning",
"id": "98d9ea8f43dc3fa5e16a0f34e94b67d22d6f3eb6",
"size": "10582",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Python/PythonProgrammingLanguage/Encapsulation/encap_env/lib/python3.5/site-packages/ipywidgets/widgets/widget_float.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "32365"
},
{
"name": "CSS",
"bytes": "10259"
},
{
"name": "HTML",
"bytes": "55977"
},
{
"name": "JavaScript",
"bytes": "7368910"
},
{
"name": "Jupyter Notebook",
"bytes": "768879"
},
{
"name": "Mako",
"bytes": "494"
},
{
"name": "Python",
"bytes": "17502534"
},
{
"name": "Shell",
"bytes": "7751"
},
{
"name": "Smarty",
"bytes": "30663"
}
],
"symlink_target": ""
} |
__author__ = 'leifos'
from ifind.seeker.common_helpers import file_exists
from ifind.seeker.common_helpers import AutoVivification
class TopicDocumentFileHandler(object):
def __init__(self, filename=None):
self.data = AutoVivification()
if filename:
self.read_file(filename)
def _put_in_line(self, line):
# handles the specific format of the line (assumes 3 columns: topic document value )
parts = line.partition(' ')
topic = parts[0]
parts = parts[2].partition(' ')
doc = parts[0]
value = parts[2].strip()
if topic and doc:
self.qrels[topic][doc] = int(value)
def _get_out_line(self, topic, doc):
# outputs the topic document and value in a specific way.
return "%s %s %d\n" % (topic, doc, self.data[topic][doc])
def read_file(self, filename):
if file_exists(filename):
infile = open(filename, "r" )
while infile:
line = infile.readline()
if not line:
infile.close()
break
else:
self._put_in_line(line)
else:
raise IOError("The topic/document judgement file '" + filename + "' was not found.")
def save_file(self, filename, append=False):
if append:
outfile = open(filename, "a")
else:
outfile = open(filename, "w")
for t in self.get_topic_list():
for d in self.get_doc_list(t):
out_line = self._get_out_line(t,d)
outfile.write (out_line)
outfile.close()
def get_value(self, topic, doc):
if topic not in self.data:
return 0
if doc in self.data[topic]:
return int(self.data[topic][doc])
else:
return 0
# if self.data[topic][doc]:
# return int(self.data[topic][doc])
# else:
# return 0
def get_value_if_exists(self, topic, doc):
if topic not in self.data:
return None
if doc in self.data[topic]:
return int(self.data[topic][doc])
else:
return None
# if self.data[topic][doc]:
# return int(self.data[topic][doc])
# else:
# return None
def get_doc_list(self, topic):
if self.data[topic]:
return self.data[topic]
else:
return []
def get_topic_list(self):
tl = []
if self.data:
for topic in self.data.keys():
tl.append(topic)
return tl
def get_topic_doc_dict(self):
return self.data
def add_topic_doc(self,topic, doc, value):
self.data[topic][doc] = value
def inc_topic_doc(self,topic, doc, value=1):
if self.data[topic][doc]:
self.data[topic][doc] = self.data[topic][doc] + value
else:
self.data[topic][doc] = value
def __str__(self):
return 'TOPICS READ IN: ' + str(len(self.data)) | {
"content_hash": "ec8af163917aaca8cde22f0ee0d40805",
"timestamp": "",
"source": "github",
"line_count": 111,
"max_line_length": 96,
"avg_line_length": 28.117117117117118,
"alnum_prop": 0.5200256328099968,
"repo_name": "leifos/ifind",
"id": "4166a60b17662e660687d4c2515d9e9b9c907900",
"size": "3121",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ifind/seeker/topic_document_file_handler.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "240161"
},
{
"name": "Shell",
"bytes": "392"
}
],
"symlink_target": ""
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.