code
stringlengths 3
1.05M
| repo_name
stringlengths 5
104
| path
stringlengths 4
251
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 3
1.05M
|
---|---|---|---|---|---|
#!/usr/bin/python
# -*- coding: utf-8 -*-
from dockpulp import Pulp, cli
import pytest
import os
import json
import logging
from flexmock import flexmock
# wrapper classes
class testbOpts(object):
def __init__(self, server="testserv", config_file="testconf",
debug=False, cert=True, key=True):
self.server = server
self.config_file = config_file
self.debug = debug
self.cert = cert
self.key = key
class testPulp(object):
def __init__(self):
self.certificate = None
self.key = None
self.AUTH_CER_FILE = ''
self.AUTH_KEY_FILE = ''
def set_certs(self, cert, key):
return
def setDebug():
return
def isRedirect():
return
def createRepo(self, arg1, arg2, desc=None, title=None, protected=None, productline=None,
library=None, distribution=None, prefix_with=None, rel_url=None,
download=None):
return
def getAncestors(self, arg):
return arg
def getPrefix(self):
return
def getSigstore(self):
return 'SIGSTORE'
def associate(self, arg1, arg2):
return {'id': 0}
def copy(self, arg1, arg2):
return
def listRepos(self, repos=None, content=None, history=None, labels=None):
return
def updateRepo(self, arg1, arg2):
return
def deleteRepo(self, arg1, arg2):
return
def emptyRepo(self, arg1):
return
# tests
class TestCLI(object):
# Tests of methods from CLI
@pytest.mark.parametrize('debug', [True, False])
@pytest.mark.parametrize('error', [True, False])
@pytest.mark.parametrize('cert, key',
[(None, True), (True, None),
(True, True), (None, None)])
def test_pulp_login(self, debug, cert, key, error):
bopts = testbOpts("testserv", "testconf", debug, cert, key)
p = testPulp()
(flexmock(Pulp)
.new_instances(p)
.with_args(Pulp, env=bopts.server, config_file=bopts.config_file))
if debug:
flexmock(testPulp)
(testPulp
.should_receive('setDebug')
.once()
.and_return(None))
flexmock(os.path)
if error and not (key or cert):
(os.path
.should_receive('exists')
.once()
.and_return(False))
with pytest.raises(SystemExit):
cli.pulp_login(bopts)
return
elif not cert or not key:
(os.path
.should_receive('exists')
.twice()
.and_return(True))
assert cli.pulp_login(bopts) is p
@pytest.mark.parametrize('bargs', ['1', '1 2'])
def test_do_ancestry(self, bargs):
bargs = bargs.split(" ")
bopts = testbOpts()
p = testPulp()
(flexmock(Pulp)
.new_instances(p)
.with_args(Pulp, env=bopts.server, config_file=bopts.config_file))
if len(bargs) != 1:
with pytest.raises(SystemExit):
cli.do_ancestry(bopts, bargs)
else:
assert cli.do_ancestry(bopts, bargs) is None
@pytest.mark.parametrize('bargs', ['1', '1 2'])
def test_do_associate(self, bargs):
bargs = bargs.split(" ")
bopts = testbOpts()
p = testPulp()
(flexmock(Pulp)
.new_instances(p)
.with_args(Pulp, env=bopts.server, config_file=bopts.config_file))
if len(bargs) != 2:
with pytest.raises(SystemExit):
cli.do_associate(bopts, bargs)
else:
assert cli.do_associate(bopts, bargs) is None
@pytest.mark.parametrize('lib', [True, False])
@pytest.mark.parametrize('img', [True, False])
@pytest.mark.parametrize('manifest', [True, False])
@pytest.mark.parametrize('noprefix', [True, False])
@pytest.mark.parametrize('args', ['1 2 3', '1 2', '1'])
def test_do_clone(self, args, lib, img, manifest, noprefix):
bopts = testbOpts()
p = testPulp()
if img:
images = {'1': '1'}
else:
images = {}
if manifest:
manifests = {'2': '2'}
else:
manifests = {}
oldinfo = [{'redirect': None, 'description': None, 'title': None,
'protected': "False", "images": images, "manifests": manifests}]
(flexmock(Pulp)
.new_instances(p)
.once()
.with_args(Pulp, env=bopts.server, config_file=bopts.config_file))
args = args.split(" ")
bargs = args[:]
if lib:
bargs.append('-l')
if noprefix:
bargs.append('--noprefix')
if lib and len(args) != 2:
with pytest.raises(SystemExit):
cli.do_clone(bopts, bargs)
elif not lib and len(args) != 3:
with pytest.raises(SystemExit):
cli.do_clone(bopts, bargs)
else:
if lib:
if noprefix:
repoid = '%s' % args[1]
else:
repoid = 'redhat-%s' % args[1]
productid = None
else:
if noprefix:
repoid = '%s-%s' % (args[1], args[2])
else:
repoid = 'redhat-%s-%s' % (args[1], args[2])
productid = args[1]
if noprefix:
prefix_with = ''
else:
prefix_with = 'redhat-'
tags = {'tag': '1:1'}
flexmock(testPulp)
if not noprefix:
(testPulp
.should_receive('getPrefix')
.once()
.and_return('redhat-'))
(testPulp
.should_receive('listRepos')
.once()
.with_args(args[0], content=True)
.and_return(oldinfo))
(testPulp
.should_receive('createRepo')
.once()
.with_args(repoid, None, desc=None, title=None, protected=False,
productline=productid, distribution=None, prefix_with=prefix_with)
.and_return(None))
if img:
(testPulp
.should_receive('copy')
.once()
.with_args(repoid, '1')
.and_return(None))
(testPulp
.should_receive('updateRepo')
.once()
.with_args(repoid, tags)
.and_return(None))
if manifest:
(testPulp
.should_receive('copy')
.once()
.with_args(repoid, '2')
.and_return(None))
assert cli.do_clone(bopts, bargs) is None
@pytest.mark.parametrize('lib', [True, False])
@pytest.mark.parametrize('noprefix', [True, False])
@pytest.mark.parametrize('download', ["true", "False"])
@pytest.mark.parametrize('args', ['1 2 3', '1 2',
'test /content/test', 'foo bar /content/foo/bar'])
def test_do_create(self, args, lib, noprefix, download):
bopts = testbOpts()
p = testPulp()
(flexmock(Pulp)
.new_instances(p)
.once()
.with_args(Pulp, env=bopts.server, config_file=bopts.config_file))
args = args.split(" ")
bargs = args[:]
if lib:
bargs.append('-l')
if noprefix:
bargs.append('--noprefix')
bargs.append('--download')
bargs.append(download)
flexmock(testPulp)
if not noprefix:
(testPulp
.should_receive('getPrefix')
.once()
.and_return('redhat-'))
(testPulp
.should_receive('isRedirect')
.and_return(True))
if not lib and noprefix and download == "true" and args[-1].startswith('/content') \
and args[0] == 'foo':
(testPulp
.should_receive('createRepo')
.with_args('foo-bar', '/content/foo/bar', library=lib, protected=False, title=None,
productline='foo', distribution=None, desc="No description",
prefix_with='', rel_url='content/foo/bar', download=True)
.and_return(None))
else:
(testPulp
.should_receive('createRepo')
.and_return(None))
if lib and len(args) != 2:
with pytest.raises(SystemExit):
cli.do_create(bopts, bargs)
elif not lib and len(args) != 3:
with pytest.raises(SystemExit):
cli.do_create(bopts, bargs)
elif not args[-1].startswith('/content'):
with pytest.raises(SystemExit):
cli.do_create(bopts, bargs)
else:
assert cli.do_create(bopts, bargs) is None
@pytest.mark.parametrize('bargs', ['1', None])
def test_do_delete(self, bargs):
if bargs is not None:
bargs = bargs.split(" ")
bopts = testbOpts()
p = testPulp()
(flexmock(Pulp)
.new_instances(p)
.with_args(Pulp, env=bopts.server, config_file=bopts.config_file))
if bargs is None:
with pytest.raises(SystemExit):
cli.do_delete(bopts, bargs)
else:
(flexmock(testPulp)
.should_receive('listRepos')
.with_args(bargs[0], content=True)
.once()
.and_return([{'images': {}, 'manifests': {}}]))
assert cli.do_delete(bopts, bargs) is None
@pytest.mark.parametrize('bargs', ['1', None])
def test_do_empty(self, bargs):
if bargs is not None:
bargs = bargs.split(" ")
bopts = testbOpts()
p = testPulp()
(flexmock(Pulp)
.new_instances(p)
.with_args(Pulp, env=bopts.server, config_file=bopts.config_file))
if bargs is None:
with pytest.raises(SystemExit):
cli.do_empty(bopts, bargs)
else:
assert cli.do_empty(bopts, bargs) is None
@pytest.mark.parametrize('silent', [True, False])
def test_do_list(self, caplog, silent):
bopts = testbOpts()
bargs = ['test-repo', '--content', '--details', '--labels', '--lists']
if silent:
bargs.append('--silent')
p = testPulp()
(flexmock(Pulp)
.new_instances(p)
.with_args(Pulp, env=bopts.server, config_file=bopts.config_file))
repos = [{'id': 'test-repo', 'detail': 'foobar',
'images': {'testimage': ['testtag']},
'v1_labels': {'testimage': {'testkey': 'testval'}},
'manifests': {'testmanifest': {'layers': ['testlayer1'], 'tag': 'testtag',
'config': 'testconfig', 'schema_version': 'testsv',
'v1id': 'testv1id', 'v1parent': 'testv1parent',
'v1labels': 'testv1labels'}},
'manifest_lists': {'testmanifestlist': {'mdigests': ['testmanifest'],
'tags': ['testtag']}},
'tags': {'testtag': 'testmanifest'},
'include_in_download_service': "True"}]
(flexmock(testPulp)
.should_receive('listRepos')
.with_args(repos=[bargs[0]], content=True, history=True, labels=True)
.and_return(repos))
caplog.setLevel(logging.INFO, logger="dockpulp")
response = cli.do_list(bopts, bargs)
if silent:
output = caplog.text()
jsontext = output[output.find('['):]
assert json.loads(jsontext) == repos
else:
assert response is None
def test_print_manifest_metadata(self):
manifest = 'testmanifest'
tag = 'testtag'
output = {manifest:
{'tag': tag,
'active': ' (active)',
'config': 'testconfig',
'schema_version': 'testsv'}}
assert cli._print_manifest_metadata(output, manifest, True) == tag
@pytest.mark.parametrize('bargs', ['test-repo -r /contentdist --download True', None])
def test_do_update(self, bargs):
if bargs is not None:
bargs = bargs.split(" ")
bopts = testbOpts()
p = testPulp()
(flexmock(Pulp)
.new_instances(p)
.with_args(Pulp, env=bopts.server, config_file=bopts.config_file))
if bargs is None:
with pytest.raises(SystemExit):
cli.do_update(bopts, bargs)
else:
assert cli.do_update(bopts, bargs) is None
| breillyr/dockpulp | tests/test_cli.py | Python | gpl-3.0 | 13,085 |
''' -- imports from python libraries -- '''
# from datetime import datetime
import datetime
import json
''' -- imports from installed packages -- '''
from django.http import HttpResponseRedirect # , HttpResponse uncomment when to use
from django.http import HttpResponse
from django.http import Http404
from django.shortcuts import render_to_response # , render uncomment when to use
from django.template import RequestContext
from django.template import TemplateDoesNotExist
from django.template.loader import render_to_string
from django.core.urlresolvers import reverse
from django.contrib.auth.models import User
from django.contrib.auth.decorators import login_required
from django.contrib.sites.models import Site
from mongokit import IS
try:
from bson import ObjectId
except ImportError: # old pymongo
from pymongo.objectid import ObjectId
''' -- imports from application folders/files -- '''
from gnowsys_ndf.settings import GAPPS, MEDIA_ROOT, GSTUDIO_TASK_TYPES
from gnowsys_ndf.ndf.models import NodeJSONEncoder
from gnowsys_ndf.settings import GSTUDIO_SITE_NAME
from gnowsys_ndf.ndf.models import Node, AttributeType, RelationType
from gnowsys_ndf.ndf.models import node_collection, triple_collection
from gnowsys_ndf.ndf.views.file import save_file
from gnowsys_ndf.ndf.templatetags.ndf_tags import edit_drawer_widget
from gnowsys_ndf.ndf.views.methods import get_node_common_fields, parse_template_data, get_execution_time, delete_node
from gnowsys_ndf.ndf.views.notify import set_notif_val
from gnowsys_ndf.ndf.views.methods import get_property_order_with_value
from gnowsys_ndf.ndf.views.methods import create_gattribute, create_grelation, create_task
from gnowsys_ndf.notification import models as notification
GST_COURSE = node_collection.one({'_type': "GSystemType", 'name': "Course"})
GST_ACOURSE = node_collection.one({'_type': "GSystemType", 'name': "Announced Course"})
app = GST_COURSE
@get_execution_time
def course(request, group_id, course_id=None):
"""
* Renders a list of all 'courses' available within the database.
"""
ins_objectid = ObjectId()
if ins_objectid.is_valid(group_id) is False:
group_ins = node_collection.find_one({'_type': "Group", "name": group_id})
auth = node_collection.one({'_type': 'Author', 'name': unicode(request.user.username) })
if group_ins:
group_id = str(group_ins._id)
else:
auth = node_collection.one({'_type': 'Author', 'name': unicode(request.user.username) })
if auth:
group_id = str(auth._id)
else:
pass
app_id = None
app_id = app._id
course_coll = None
all_course_coll = None
ann_course_coll = None
enrolled_course_coll = []
course_enrollment_status = None
app_set_id = None
if course_id is None:
course_ins = node_collection.find_one({'_type': "GSystemType", "name": "Course"})
if course_ins:
course_id = str(course_ins._id)
app_set = node_collection.one({'_type': "GSystemType", 'name': "Announced Course"})
app_set_id = app_set._id
# Course search view
title = GST_COURSE.name
if request.user.id:
course_coll = node_collection.find({'member_of': GST_COURSE._id,'group_set': ObjectId(group_id),'status':u"DRAFT"})
all_course_coll = node_collection.find({'member_of': {'$in': [GST_COURSE._id,GST_ACOURSE._id]},
'group_set': ObjectId(group_id),'status':{'$in':[u"PUBLISHED",u"DRAFT"]}})
auth_node = node_collection.one({'_type': "Author", 'created_by': int(request.user.id)})
'''
if auth_node.attribute_set:
for each in auth_node.attribute_set:
if each and "course_enrollment_status" in each:
course_enrollment_dict = each["course_enrollment_status"]
course_enrollment_status = [ObjectId(each) for each in course_enrollment_dict]
enrolled_course_coll = node_collection.find({'_id': {'$in': course_enrollment_status}})
'''
ann_course_coll = node_collection.find({'member_of': GST_ACOURSE._id, 'group_set': ObjectId(group_id),'status':u"PUBLISHED"})
return render_to_response("ndf/course.html",
{'title': title,
'app_id': app_id, 'course_gst': GST_COURSE,
'app_set_id': app_set_id,
'searching': True, 'course_coll': course_coll,
'groupid': group_id, 'group_id': group_id,
'all_course_coll': all_course_coll,
'enrolled_course_coll': enrolled_course_coll,
'ann_course_coll': ann_course_coll
},
context_instance=RequestContext(request)
)
@login_required
@get_execution_time
def create_edit(request, group_id, node_id=None):
"""Creates/Modifies details about the given quiz-item.
"""
ins_objectid = ObjectId()
if ins_objectid.is_valid(group_id) is False:
group_ins = node_collection.find_one({'_type': "Group","name": group_id})
auth = node_collection.one({'_type': 'Author', 'name': unicode(request.user.username) })
if group_ins:
group_id = str(group_ins._id)
else:
auth = node_collection.one({'_type': 'Author', 'name': unicode(request.user.username) })
if auth:
group_id = str(auth._id)
else:
pass
at_course_type = node_collection.one({'_type': 'AttributeType', 'name': 'nussd_course_type'})
context_variables = {'title': GST_COURSE.name,
'group_id': group_id,
'groupid': group_id
}
if node_id:
course_node = node_collection.one({'_type': u'GSystem', '_id': ObjectId(node_id)})
else:
course_node = node_collection.collection.GSystem()
available_nodes = node_collection.find({'_type': u'GSystem', 'member_of': ObjectId(GST_COURSE._id),'group_set': ObjectId(group_id),'status':{"$in":[u"DRAFT",u"PUBLISHED"]}})
nodes_list = []
for each in available_nodes:
nodes_list.append(str((each.name).strip().lower()))
if request.method == "POST":
# get_node_common_fields(request, course_node, group_id, GST_COURSE)
course_node.save(is_changed=get_node_common_fields(request, course_node, group_id, GST_COURSE))
create_gattribute(course_node._id, at_course_type, u"General")
return HttpResponseRedirect(reverse('course', kwargs={'group_id': group_id}))
else:
if node_id:
context_variables['node'] = course_node
context_variables['groupid'] = group_id
context_variables['group_id'] = group_id
context_variables['app_id'] = app._id
context_variables['nodes_list'] = json.dumps(nodes_list)
return render_to_response("ndf/course_create_edit.html",
context_variables,
context_instance=RequestContext(request)
)
# @login_required
@get_execution_time
def course_detail(request, group_id, _id):
ins_objectid = ObjectId()
if ins_objectid.is_valid(group_id) is False:
group_ins = node_collection.find_one({'_type': "Group", "name": group_id})
auth = node_collection.one({'_type': 'Author', 'name': unicode(request.user.username) })
if group_ins:
group_id = str(group_ins._id)
else:
auth = node_collection.one({'_type': 'Author', 'name': unicode(request.user.username) })
if auth:
group_id = str(auth._id)
else:
pass
course_structure_exists = False
enrolled_status = False
check_enroll_status = False
title = GST_COURSE.name
course_node = node_collection.one({"_id": ObjectId(_id)})
if course_node.collection_set:
course_structure_exists = True
gs_name = course_node.member_of_names_list[0]
context_variables = {'groupid': group_id,
'group_id': group_id,
'app_id': app._id,
'title': title,
'node': course_node,
'node_type': gs_name
}
if gs_name == "Course":
context_variables["course_structure_exists"] = course_structure_exists
if course_node.relation_set:
for rel in course_node.relation_set:
if "announced_as" in rel:
cnode = node_collection.one({'_id': ObjectId(rel["announced_as"][0])},{'_id':1})
context_variables["acnode"] = str(cnode['_id'])
check_enroll_status = True
break
else:
if course_node.relation_set:
for rel in course_node.relation_set:
if "announced_for" in rel:
cnode = node_collection.one({'_id': ObjectId(rel["announced_for"][0])})
context_variables["cnode"] = cnode
check_enroll_status = True
break
if request.user.id:
if check_enroll_status:
usr_id = int(request.user.id)
auth_node = node_collection.one({'_type': "Author", 'created_by': usr_id})
course_enrollment_status = {}
if auth_node.attribute_set:
for each in auth_node.attribute_set:
if each and "course_enrollment_status" in each:
course_enrollment_status = each["course_enrollment_status"]
if "acnode" in context_variables:
str_course_id = str(context_variables["acnode"])
else:
str_course_id = str(course_node._id)
if course_enrollment_status:
if str_course_id in course_enrollment_status:
enrolled_status = True
context_variables['enrolled_status'] = enrolled_status
return render_to_response("ndf/course_detail.html",
context_variables,
context_instance=RequestContext(request)
)
@login_required
@get_execution_time
def course_create_edit(request, group_id, app_id, app_set_id=None, app_set_instance_id=None, app_name=None):
"""
Creates/Modifies document of given sub-types of Course(s).
"""
auth = None
tiss_site = False
if ObjectId.is_valid(group_id) is False:
group_ins = node_collection.one({'_type': "Group", "name": group_id})
auth = node_collection.one({
'_type': 'Author', 'name': unicode(request.user.username)
})
if group_ins:
group_id = str(group_ins._id)
else:
auth = node_collection.one({
'_type': 'Author', 'name': unicode(request.user.username)
})
if auth:
group_id = str(auth._id)
else:
pass
if GSTUDIO_SITE_NAME is "TISS":
tiss_site = True
app = None
if app_id is None:
app = node_collection.one({'_type': "GSystemType", 'name': app_name})
if app:
app_id = str(app._id)
else:
app = node_collection.one({'_id': ObjectId(app_id)})
# app_set = ""
app_collection_set = []
title = ""
course_gst = None
course_gs = None
hide_mis_meta_content = True
mis_admin = None
property_order_list = []
template = ""
template_prefix = "mis"
if request.user:
if auth is None:
auth = node_collection.one({
'_type': 'Author', 'name': unicode(request.user.username)
})
agency_type = auth.agency_type
agency_type_node = node_collection.one({
'_type': "GSystemType", 'name': agency_type
}, {
'collection_set': 1
})
if agency_type_node:
for eachset in agency_type_node.collection_set:
app_collection_set.append(
node_collection.one({
"_id": eachset
}, {
'_id': 1, 'name': 1, 'type_of': 1
})
)
if app_set_id:
course_gst = node_collection.one({
'_type': "GSystemType", '_id': ObjectId(app_set_id)
}, {
'name': 1, 'type_of': 1
})
template = "ndf/" + course_gst.name.strip().lower().replace(' ', '_') \
+ "_create_edit.html"
title = course_gst.name
if app_set_instance_id:
course_gs = node_collection.one({
'_type': "GSystem", '_id': ObjectId(app_set_instance_id)
})
else:
course_gs = node_collection.collection.GSystem()
course_gs.member_of.append(course_gst._id)
property_order_list = get_property_order_with_value(course_gs)
if request.method == "POST":
# [A] Save course-node's base-field(s)
start_time = ""
if "start_time" in request.POST:
start_time = request.POST.get("start_time", "")
start_time = datetime.datetime.strptime(start_time, "%m/%Y")
end_time = ""
if "end_time" in request.POST:
end_time = request.POST.get("end_time", "")
end_time = datetime.datetime.strptime(end_time, "%m/%Y")
nussd_course_type = ""
if "nussd_course_type" in request.POST:
nussd_course_type = request.POST.get("nussd_course_type", "")
nussd_course_type = unicode(nussd_course_type)
unset_ac_options = []
if "unset-ac-options" in request.POST:
unset_ac_options = request.POST.getlist("unset-ac-options")
else:
# Just to execute loop at least once for Course Sub-Types
# other than 'Announced Course'
unset_ac_options = ["dummy"]
if course_gst.name == u"Announced Course":
announce_to_colg_list = request.POST.get(
"announce_to_colg_list", ""
)
announce_to_colg_list = [ObjectId(colg_id) for colg_id in announce_to_colg_list.split(",")]
colg_ids = []
# Parsing ObjectId -- from string format to ObjectId
for each in announce_to_colg_list:
if each and ObjectId.is_valid(each):
colg_ids.append(ObjectId(each))
# Fetching college(s)
colg_list_cur = node_collection.find({
'_id': {'$in': colg_ids}
}, {
'name': 1, 'attribute_set.enrollment_code': 1
})
if "_id" in course_gs:
# It means we are in editing mode of given Announced Course GSystem
unset_ac_options = [course_gs._id]
ac_nc_code_list = []
# Prepare a list
# 0th index (ac_node): Announced Course node,
# 1st index (nc_id): NUSSD Course node's ObjectId,
# 2nd index (nc_course_code): NUSSD Course's code
for cid in unset_ac_options:
ac_node = None
nc_id = None
nc_course_code = ""
# Here course_gst is Announced Course GSytemType's node
ac_node = node_collection.one({
'_id': ObjectId(cid), 'member_of': course_gst._id
})
# If ac_node found, means
# (1) we are dealing with creating Announced Course
# else,
# (2) we are in editing phase of Announced Course
course_node = None
if not ac_node:
# In this case, cid is of NUSSD Course GSystem
# So fetch that to extract course_code
# Set to nc_id
ac_node = None
course_node = node_collection.one({
'_id': ObjectId(cid)
})
else:
# In this case, fetch NUSSD Course from
# Announced Course GSystem's announced_for relationship
for rel in ac_node.relation_set:
if "announced_for" in rel:
course_node_ids = rel["announced_for"]
break
# Fetch NUSSD Course GSystem
if course_node_ids:
course_node = node_collection.find_one({
"_id": {"$in": course_node_ids}
})
# If course_code doesn't exists then
# set NUSSD Course GSystem's name as course_code
if course_node:
nc_id = course_node._id
for attr in course_node.attribute_set:
if "course_code" in attr:
nc_course_code = attr["course_code"]
break
if not nc_course_code:
nc_course_code = course_node.name.replace(" ", "-")
# Append to ac_nc_code_list
ac_nc_code_list.append([ac_node, nc_id, nc_course_code])
# For each selected college
# Create Announced Course GSystem
for college_node in colg_list_cur:
# Fetch Enrollment code from "enrollment_code" (Attribute)
college_enrollment_code = ""
if college_node:
for attr in college_node.attribute_set:
if attr and "enrollment_code" in attr:
college_enrollment_code = attr["enrollment_code"]
break
ann_course_id_list = []
# For each selected course to Announce
for ac_nc_code in ac_nc_code_list:
course_gs = ac_nc_code[0]
nc_id = ac_nc_code[1]
cnode_for_content = node_collection.one({'_id': ObjectId(nc_id)})
nc_course_code = ac_nc_code[2]
if not course_gs:
# Create new Announced Course GSystem
course_gs = node_collection.collection.GSystem()
course_gs.member_of.append(course_gst._id)
if tiss_site:
# Prepare name for Announced Course GSystem
c_name = unicode(
nc_course_code + " - " + college_enrollment_code + " - "
+ start_time.strftime("%b %Y") + " - "
+ end_time.strftime("%b %Y")
)
else:
# Prepare name for Announced Course GSystem
c_name = unicode(
nc_course_code + " - "+ start_time.strftime("%b %Y") + " - "
+ end_time.strftime("%b %Y")
)
request.POST["name"] = c_name
is_changed = get_node_common_fields(
request, course_gs, group_id, course_gst
)
if is_changed:
# Remove this when publish button is setup on interface
course_gs.status = u"PUBLISHED"
course_gs.content_org = cnode_for_content.content_org
course_gs.content = cnode_for_content.html_content
course_gs.save(is_changed=is_changed)
# [B] Store AT and/or RT field(s) of given course-node
for tab_details in property_order_list:
for field_set in tab_details[1]:
# Fetch only Attribute field(s) / Relation field(s)
if '_id' in field_set:
field_instance = node_collection.one({
'_id': field_set['_id']
})
field_instance_type = type(field_instance)
if (field_instance_type in
[AttributeType, RelationType]):
field_data_type = field_set['data_type']
# Fetch field's value depending upon AT/RT
# and Parse fetched-value depending upon
# that field's data-type
if field_instance_type == AttributeType:
if "File" in field_instance["validators"]:
# Special case: AttributeTypes that require file instance as it's value in which case file document's ObjectId is used
if field_instance["name"] in request.FILES:
field_value = request.FILES[field_instance["name"]]
else:
field_value = ""
# Below 0th index is used because that function returns tuple(ObjectId, bool-value)
if field_value != '' and field_value != u'':
file_name = course_gs.name + " -- " + field_instance["altnames"]
content_org = ""
tags = ""
field_value = save_file(field_value, file_name, request.user.id, group_id, content_org, tags, oid=True)[0]
else:
# Other AttributeTypes
field_value = request.POST.get(field_instance["name"], "")
if field_instance["name"] in ["start_time", "end_time"]:
# Course Duration
field_value = parse_template_data(field_data_type, field_value, date_format_string="%m/%Y")
else:
field_value = parse_template_data(field_data_type, field_value, date_format_string="%d/%m/%Y %H:%M")
course_gs_triple_instance = create_gattribute(course_gs._id, node_collection.collection.AttributeType(field_instance), field_value)
else:
# i.e if field_instance_type == RelationType
if field_instance["name"] == "announced_for":
field_value = ObjectId(nc_id)
# Pass ObjectId of selected Course
elif field_instance["name"] == "acourse_for_college":
field_value = college_node._id
# Pass ObjectId of selected College
course_gs_triple_instance = create_grelation(course_gs._id, node_collection.collection.RelationType(field_instance), field_value)
ann_course_id_list.append(course_gs._id)
#commented email notifications to all registered user after announcement
# if not tiss_site:
# site = Site.objects.get(pk=1)
# site = site.name.__str__()
# ann_course_url_link = "http://" + site + "/home/course/course_detail/" + \
# str(course_gs._id)
# user_obj = User.objects.all()
# # Sending email to all registered users on site NROER
# render_label = render_to_string(
# "notification/label.html",
# {"sender": "NROER eCourses",
# "activity": "Course Announcement",
# "conjunction": "-"
# })
# if user_obj:
# notification.create_notice_type(render_label," New eCourse '"\
# + str(course_gs.name) +"' has been announced."\
# +" Visit this link to enroll into this ecourse : " \
# + ann_course_url_link, "notification")
# notification.send(user_obj, render_label, {"from_user": "NROER eCourses"})
else:
is_changed = get_node_common_fields(request, course_gs, group_id, course_gst)
if is_changed:
# Remove this when publish button is setup on interface
course_gs.status = u"PUBLISHED"
course_gs.save(is_changed=is_changed)
# [B] Store AT and/or RT field(s) of given course-node
for tab_details in property_order_list:
for field_set in tab_details[1]:
# Fetch only Attribute field(s) / Relation field(s)
if '_id' in field_set:
field_instance = node_collection.one({'_id': field_set['_id']})
field_instance_type = type(field_instance)
if field_instance_type in [AttributeType, RelationType]:
field_data_type = field_set['data_type']
# Fetch field's value depending upon AT/RT
# and Parse fetched-value depending upon
# that field's data-type
if field_instance_type == AttributeType:
if "File" in field_instance["validators"]:
# Special case: AttributeTypes that require file instance as it's value in which case file document's ObjectId is used
if field_instance["name"] in request.FILES:
field_value = request.FILES[field_instance["name"]]
else:
field_value = ""
# Below 0th index is used because that function returns tuple(ObjectId, bool-value)
if field_value != '' and field_value != u'':
file_name = course_gs.name + " -- " + field_instance["altnames"]
content_org = ""
tags = ""
field_value = save_file(field_value, file_name, request.user.id, group_id, content_org, tags, oid=True)[0]
else:
# Other AttributeTypes
field_value = request.POST.get(field_instance["name"], "")
# if field_instance["name"] in ["start_time","end_time"]:
# field_value = parse_template_data(field_data_type, field_value, date_format_string="%m/%Y")
# elif field_instance["name"] in ["start_enroll", "end_enroll"]: #Student Enrollment DUration
# field_value = parse_template_data(field_data_type, field_value, date_format_string="%d/%m/%Y")
if field_instance["name"] in ["mast_tr_qualifications", "voln_tr_qualifications"]:
# Needs sepcial kind of parsing
field_value = []
tr_qualifications = request.POST.get(field_instance["name"], '')
if tr_qualifications:
qualifications_dict = {}
tr_qualifications = [qual.strip() for qual in tr_qualifications.split(",")]
for i, qual in enumerate(tr_qualifications):
if (i % 2) == 0:
if qual == "true":
qualifications_dict["mandatory"] = True
elif qual == "false":
qualifications_dict["mandatory"] = False
else:
qualifications_dict["text"] = unicode(qual)
field_value.append(qualifications_dict)
qualifications_dict = {}
elif field_instance["name"] in ["max_marks", "min_marks"]:
# Needed because both these fields' values are dependent upon evaluation_type field's value
evaluation_type = request.POST.get("evaluation_type", "")
if evaluation_type == u"Continuous":
field_value = None
field_value = parse_template_data(field_data_type, field_value, date_format_string="%d/%m/%Y %H:%M")
else:
field_value = parse_template_data(field_data_type, field_value, date_format_string="%d/%m/%Y %H:%M")
course_gs_triple_instance = create_gattribute(
course_gs._id,
node_collection.collection.AttributeType(field_instance),
field_value
)
else:
#i.e if field_instance_type == RelationType
if field_instance["name"] == "announced_for":
field_value = ObjectId(cid)
#Pass ObjectId of selected Course
elif field_instance["name"] == "acourse_for_college":
field_value = college_node._id
#Pass ObjectId of selected College
course_gs_triple_instance = create_grelation(
course_gs._id,
node_collection.collection.RelationType(field_instance),
field_value
)
if tiss_site:
return HttpResponseRedirect(
reverse(
app_name.lower() + ":" + template_prefix + '_app_detail',
kwargs={
'group_id': group_id, "app_id": app_id,
"app_set_id": app_set_id
}
)
)
else:
return HttpResponseRedirect(
reverse(
"course",
kwargs={
'group_id': group_id
}
)
)
univ = node_collection.one({
'_type': "GSystemType", 'name': "University"
}, {
'_id': 1
})
university_cur = None
if not mis_admin:
mis_admin = node_collection.one(
{'_type': "Group", 'name': "MIS_admin"},
{'_id': 1, 'name': 1, 'group_admin': 1}
)
if tiss_site:
hide_mis_meta_content = False
if univ and mis_admin:
university_cur = node_collection.find(
{'member_of': univ._id, 'group_set': mis_admin._id},
{'name': 1}
).sort('name', 1)
default_template = "ndf/course_create_edit.html"
context_variables = {
'groupid': group_id, 'group_id': group_id,
'app_id': app_id, 'app_name': app_name,
'app_collection_set': app_collection_set,
'app_set_id': app_set_id,
'title': title,
'hide_mis_meta_content':hide_mis_meta_content,
'tiss_site': tiss_site,
'university_cur': university_cur,
'property_order_list': property_order_list
}
if app_set_instance_id:
course_gs.get_neighbourhood(course_gs.member_of)
context_variables['node'] = course_gs
if "Announced Course" in course_gs.member_of_names_list:
for attr in course_gs.attribute_set:
if attr:
for eachk, eachv in attr.items():
context_variables[eachk] = eachv
for rel in course_gs.relation_set:
if rel:
for eachk, eachv in rel.items():
if eachv:
get_node_name = node_collection.one({'_id': eachv[0]})
context_variables[eachk] = get_node_name.name
try:
return render_to_response(
[template, default_template],
context_variables, context_instance=RequestContext(request)
)
except TemplateDoesNotExist as tde:
error_message = "\n CourseCreateEditViewError: This html template (" \
+ str(tde) + ") does not exists !!!\n"
raise Http404(error_message)
except Exception as e:
error_message = "\n CourseCreateEditViewError: " + str(e) + " !!!\n"
raise Exception(error_message)
@login_required
@get_execution_time
def mis_course_detail(request, group_id, app_id=None, app_set_id=None, app_set_instance_id=None, app_name=None):
"""
Detail view of NUSSD Course/ Announced Course
"""
# print "\n Found course_detail n gone inn this...\n\n"
auth = None
if ObjectId.is_valid(group_id) is False:
group_ins = node_collection.one({'_type': "Group", "name": group_id})
auth = node_collection.one({'_type': 'Author', 'name': unicode(request.user.username) })
if group_ins:
group_id = str(group_ins._id)
else:
auth = node_collection.one({'_type': 'Author', 'name': unicode(request.user.username) })
if auth:
group_id = str(auth._id)
else:
pass
app = None
if app_id is None:
app = node_collection.one({'_type': "GSystemType", 'name': app_name})
if app:
app_id = str(app._id)
else:
app = node_collection.one({'_id': ObjectId(app_id)})
app_name = app.name
# app_name = "mis"
app_set = ""
app_collection_set = []
title = ""
course_gst = None
course_gs = None
node = None
property_order_list = []
property_order_list_ac = []
is_link_needed = True # This is required to show Link button on interface that link's Student's/VoluntaryTeacher's node with it's corresponding Author node
template_prefix = "mis"
response_dict = {'success': False}
context_variables = {}
#Course structure collection _dict
course_collection_dict = {}
course_collection_list = []
course_structure_exists = False
if request.user:
if auth is None:
auth = node_collection.one({'_type': 'Author', 'name': unicode(request.user.username)})
if auth:
agency_type = auth.agency_type
agency_type_node = node_collection.one({'_type': "GSystemType", 'name': agency_type}, {'collection_set': 1})
if agency_type_node:
for eachset in agency_type_node.collection_set:
app_collection_set.append(node_collection.one({"_id": eachset}, {'_id': 1, 'name': 1, 'type_of': 1}))
if app_set_id:
course_gst = node_collection.one({'_type': "GSystemType", '_id': ObjectId(app_set_id)}, {'name': 1, 'type_of': 1})
title = course_gst.name
template = "ndf/course_list.html"
query = {}
college = {}
course = {}
ac_data_set = []
records_list = []
if course_gst.name == "Announced Course":
query = {
"member_of": course_gst._id,
"group_set": ObjectId(group_id),
"status": "PUBLISHED",
"attribute_set.ann_course_closure": u"Open",
}
res = node_collection.collection.aggregate([
{
'$match': query
}, {
'$project': {
'_id': 0,
'ac_id': "$_id",
'name': '$name',
'course': '$relation_set.announced_for',
'college': '$relation_set.acourse_for_college',
'nussd_course_type': '$attribute_set.nussd_course_type',
'created_at': "$created_at"
}
},
{
'$sort': {'created_at': 1}
}
])
records_list = res["result"]
if records_list:
for each in res["result"]:
if each["college"]:
colg_id = each["college"][0][0]
if colg_id not in college:
c = node_collection.one({"_id": colg_id}, {"name": 1, "relation_set.college_affiliated_to": 1})
each["college"] = c.name
each["college_id"] = c._id
college[colg_id] = {}
college[colg_id]["name"] = each["college"]
for rel in c.relation_set:
if rel and "college_affiliated_to" in rel:
univ_id = rel["college_affiliated_to"][0]
u = node_collection.one({"_id": univ_id}, {"name": 1})
each.update({"university": u.name})
college[colg_id]["university"] = each["university"]
college[colg_id]["university_id"] = u._id
each["university_id"] = u._id
else:
each["college"] = college[colg_id]["name"]
each["college_id"] = colg_id
each.update({"university": college[colg_id]["university"]})
each.update({"university_id": college[colg_id]["university_id"]})
if each["course"]:
course_id = each["course"][0][0]
if course_id not in course:
each["course"] = node_collection.one({"_id": course_id}).name
course[course_id] = each["course"]
else:
each["course"] = course[course_id]
ac_data_set.append(each)
column_headers = [
("name", "Announced Course Name"),
("course", "Course Name"),
("nussd_course_type", "Course Type"),
("college", "College"),
("university", "University")
]
else:
query = {
"member_of": course_gst._id,
"group_set": ObjectId(group_id),
}
res = node_collection.collection.aggregate([
{
'$match': query
}, {
'$project': {
'_id': 0,
'ac_id': "$_id",
'name': '$name',
'nussd_course_type': '$attribute_set.nussd_course_type',
'created_at': "$created_at"
}
},
{
'$sort': {'created_at': 1}
}
])
records_list = res["result"]
if records_list:
for each in res["result"]:
ac_data_set.append(each)
column_headers = [
("ac_id", "Edit"),
("name", "Course Name"),
("nussd_course_type", "Course Type"),
]
response_dict["column_headers"] = column_headers
response_dict["success"] = True
response_dict["students_data_set"] = ac_data_set
response_dict["groupid"] = group_id
response_dict["app_id"] = app_id
response_dict["app_set_id"] = app_set_id
if app_set_instance_id:
template = "ndf/course_details.html"
node = node_collection.one({'_type': "GSystem", '_id': ObjectId(app_set_instance_id)})
property_order_list = get_property_order_with_value(node)
node.get_neighbourhood(node.member_of)
if title == u"Announced Course":
property_order_list_ac = node.attribute_set
# Course structure as list of dicts
if node.collection_set:
course_structure_exists = True
context_variables = { 'groupid': group_id, 'group_id': group_id,
'app_id': app_id, 'app_name': app_name, 'app_collection_set': app_collection_set,
'app_set_id': app_set_id,
'course_gst_name': course_gst.name,
'title': title,
'course_structure_exists': course_structure_exists,
'node': node,
'property_order_list': property_order_list,
'property_order_list_ac': property_order_list_ac,
'is_link_needed': is_link_needed,
'response_dict':json.dumps(response_dict, cls=NodeJSONEncoder)
}
try:
# print "\n template-list: ", [template, default_template]
# template = "ndf/fgh.html"
# default_template = "ndf/dsfjhk.html"
# return render_to_response([template, default_template],
return render_to_response(template,
context_variables,
context_instance = RequestContext(request)
)
except TemplateDoesNotExist as tde:
error_message = "\n CourseDetailListViewError: This html template (" + str(tde) + ") does not exists !!!\n"
raise Http404(error_message)
except Exception as e:
error_message = "\n CourseDetailListViewError: " + str(e) + " !!!\n"
raise Exception(error_message)
# Ajax views for setting up Course Structure
@login_required
@get_execution_time
def create_course_struct(request, group_id, node_id):
"""
This view is to create the structure of the Course.
A Course holds CourseSection, which further holds CourseSubSection
in their respective collection_set.
A tree depiction to this is as follows:
Course Name:
1. CourseSection1
1.1. CourseSubSection1
1.2. CourseSubSection2
2. CourseSection2
2.1. CourseSubSection3
"""
ins_objectid = ObjectId()
if ins_objectid.is_valid(group_id) is False:
group_ins = node_collection.find_one({'_type': "Group","name": group_id})
auth = node_collection.one({'_type': 'Author', 'name': unicode(request.user.username) })
if group_ins:
group_id = str(group_ins._id)
else:
auth = node_collection.one({'_type': 'Author', 'name': unicode(request.user.username) })
if auth:
group_id = str(auth._id)
else:
pass
app_id = None
app_set_id = None
tiss_site = False
property_order_list_cs = []
property_order_list_css = []
course_structure_exists = False
title = "Course Authoring"
if GSTUDIO_SITE_NAME is "TISS":
tiss_site = True
course_node = node_collection.one({"_id": ObjectId(node_id)})
cs_gst = node_collection.one({'_type': "GSystemType", 'name': "CourseSection"})
cs_gs = node_collection.collection.GSystem()
cs_gs.member_of.append(cs_gst._id)
property_order_list_cs = get_property_order_with_value(cs_gs)
css_gst = node_collection.one({'_type': "GSystemType", 'name': "CourseSubSection"})
css_gs = node_collection.collection.GSystem()
css_gs.member_of.append(css_gst._id)
property_order_list_css = get_property_order_with_value(css_gs)
course_collection_list = course_node.collection_set
if course_collection_list:
course_structure_exists = True
# for attr in course_node.attribute_set:
# if attr.has_key("evaluation_type"):
# eval_type = attr["evaluation_type"]
#If evaluation_type flag is True, it is Final. If False, it is Continous
# if(eval_type==u"Final"):
# eval_type_flag = True
# else:
# eval_type_flag = False
if request.method == "GET":
app_id = request.GET.get("app_id", "")
app_set_id = request.GET.get("app_set_id", "")
return render_to_response("ndf/create_course_structure.html",
{'cnode': course_node,
'groupid': group_id,
'group_id': group_id,
'title': title,
'tiss_site':tiss_site,
'app_id': app_id, 'app_set_id': app_set_id,
'property_order_list': property_order_list_cs,
'property_order_list_css': property_order_list_css
},
context_instance=RequestContext(request)
)
@login_required
def save_course_section(request, group_id):
'''
Accepts:
* NUSSD Course/Course node _id
* CourseSection name
Actions:
* Creates CourseSection GSystem with name received.
* Appends this new CourseSection node id into
NUSSD Course/Course collection_set
Returns:
* success (i.e True/False)
* ObjectId of CourseSection node
'''
response_dict = {"success": False}
if request.is_ajax() and request.method == "POST":
cs_node_name = request.POST.get("cs_name", '')
course_node_id = request.POST.get("course_node_id", '')
cs_gst = node_collection.one({'_type': "GSystemType", 'name': "CourseSection"})
cs_new = node_collection.collection.GSystem()
cs_new.member_of.append(cs_gst._id)
cs_new.name = cs_node_name
cs_new.modified_by = int(request.user.id)
cs_new.created_by = int(request.user.id)
cs_new.contributors.append(int(request.user.id))
course_node = node_collection.one({"_id": ObjectId(course_node_id)})
cs_new.prior_node.append(ObjectId(course_node._id))
cs_new.save()
node_collection.collection.update({'_id': course_node._id}, {'$push': {'collection_set': cs_new._id }}, upsert=False, multi=False)
response_dict["success"] = True
response_dict["cs_new_id"] = str(cs_new._id)
return HttpResponse(json.dumps(response_dict))
@login_required
def save_course_sub_section(request, group_id):
'''
Accepts:
* CourseSection node _id
* CourseSubSection name
Actions:
* Creates CourseSubSection GSystem with name received.
* Appends this new CourseSubSection node id into
CourseSection collection_set
Returns:
* success (i.e True/False)
* ObjectId of CourseSubSection node
'''
response_dict = {"success": False}
if request.is_ajax() and request.method == "POST":
css_node_name = request.POST.get("css_name", '')
cs_node_id = request.POST.get("cs_node_id", '')
css_gst = node_collection.one({'_type': "GSystemType", 'name': "CourseSubSection"})
css_new = node_collection.collection.GSystem()
css_new.member_of.append(css_gst._id)
# set name
css_new.name = css_node_name
css_new.modified_by = int(request.user.id)
css_new.created_by = int(request.user.id)
css_new.contributors.append(int(request.user.id))
cs_node = node_collection.one({"_id": ObjectId(cs_node_id)})
css_new.prior_node.append(cs_node._id)
css_new.save()
node_collection.collection.update({'_id': cs_node._id}, {'$push': {'collection_set': css_new._id }}, upsert=False, multi=False)
response_dict["success"] = True
response_dict["css_new_id"] = str(css_new._id)
return HttpResponse(json.dumps(response_dict))
@login_required
def change_node_name(request, group_id):
'''
Accepts:
* CourseSection/ CourseSubSection node _id
* New name for CourseSection node
Actions:
* Updates received node's name
'''
response_dict = {"success": False}
if request.is_ajax() and request.method == "POST":
node_id = request.POST.get("node_id", '')
new_name = request.POST.get("new_name", '')
node = node_collection.one({"_id": ObjectId(node_id)})
node.name = new_name.strip()
node.save()
response_dict["success"] = True
return HttpResponse(json.dumps(response_dict))
@login_required
def change_order(request, group_id):
'''
Accepts:
* 2 node ids.
Basically, either of CourseSection or CourseSubSection
* Parent node id
Either a NUSSD Course/Course or CourseSection
Actions:
* Swaps the 2 node ids in the collection set of received
parent node
'''
response_dict = {"success": False}
collection_set_list = []
if request.is_ajax() and request.method == "POST":
node_id_up = request.POST.get("node_id_up", '')
node_id_down = request.POST.get("node_id_down", '')
parent_node_id = request.POST.get("parent_node", '')
parent_node = node_collection.one({"_id": ObjectId(parent_node_id)})
collection_set_list = parent_node.collection_set
a, b = collection_set_list.index(ObjectId(node_id_up)), collection_set_list.index(ObjectId(node_id_down))
collection_set_list[b], collection_set_list[a] = collection_set_list[a], collection_set_list[b]
node_collection.collection.update({'_id': parent_node._id}, {'$set': {'collection_set': collection_set_list }}, upsert=False, multi=False)
parent_node.reload()
response_dict["success"] = True
return HttpResponse(json.dumps(response_dict))
@login_required
def course_sub_section_prop(request, group_id):
'''
Accepts:
* CourseSubSection node _id
* Properties dict
Actions:
* Creates GAttributes with the values of received dict
for the respective CourseSubSection node
Returns:
* success (i.e True/False)
* If request.method is POST, all GAttributes in a dict structure,
'''
response_dict = {"success": False}
if request.is_ajax():
if request.method == "POST":
assessment_flag = False
css_node_id = request.POST.get("css_node_id", '')
prop_dict = request.POST.get("prop_dict", '')
assessment_chk = json.loads(request.POST.get("assessment_chk", ''))
prop_dict = json.loads(prop_dict)
css_node = node_collection.one({"_id": ObjectId(css_node_id)})
at_cs_hours = node_collection.one({'_type': 'AttributeType', 'name': 'course_structure_minutes'})
at_cs_assessment = node_collection.one({'_type': 'AttributeType', 'name': 'course_structure_assessment'})
at_cs_assignment = node_collection.one({'_type': 'AttributeType', 'name': 'course_structure_assignment'})
at_cs_min_marks = node_collection.one({'_type': 'AttributeType', 'name': 'min_marks'})
at_cs_max_marks = node_collection.one({'_type': 'AttributeType', 'name': 'max_marks'})
if assessment_chk is True:
create_gattribute(css_node._id, at_cs_assessment, True)
assessment_flag = True
for propk, propv in prop_dict.items():
# add attributes to css gs
if(propk == "course_structure_minutes"):
create_gattribute(css_node._id, at_cs_hours, int(propv))
elif(propk == "course_structure_assignment"):
create_gattribute(css_node._id, at_cs_assignment, propv)
if assessment_flag:
if(propk == "min_marks"):
create_gattribute(css_node._id, at_cs_min_marks, int(propv))
if(propk == "max_marks"):
create_gattribute(css_node._id, at_cs_max_marks, int(propv))
css_node.reload()
response_dict["success"] = True
else:
css_node_id = request.GET.get("css_node_id", '')
css_node = node_collection.one({"_id": ObjectId(css_node_id)})
if css_node.attribute_set:
for each in css_node.attribute_set:
for k, v in each.items():
response_dict[k] = v
response_dict["success"] = True
else:
response_dict["success"] = False
return HttpResponse(json.dumps(response_dict))
@login_required
def add_units(request, group_id):
'''
Accepts:
* CourseSubSection node _id
* NUSSD Course/Course node _id
Actions:
* Redirects to course_units.html
'''
variable = None
unit_node = None
css_node_id = request.GET.get('css_node_id', '')
unit_node_id = request.GET.get('unit_node_id', '')
course_node_id = request.GET.get('course_node', '')
app_id = request.GET.get('app_id', '')
app_set_id = request.GET.get('app_set_id', '')
css_node = node_collection.one({"_id": ObjectId(css_node_id)})
course_node = node_collection.one({"_id": ObjectId(course_node_id)})
title = "Course Units"
try:
unit_node = node_collection.one({"_id": ObjectId(unit_node_id)})
except:
unit_node = None
variable = RequestContext(request, {
'group_id': group_id, 'groupid': group_id,
'css_node': css_node,
'title': title,
'app_set_id': app_set_id,
'app_id': app_id,
'unit_node': unit_node,
'course_node': course_node,
})
template = "ndf/course_units.html"
return render_to_response(template, variable)
@login_required
def get_resources(request, group_id):
'''
Accepts:
* Name of GSystemType (Page, File, etc.)
* CourseSubSection node _id
* widget_for
Actions:
* Fetches all GSystems of selected GSystemType as resources
Returns:
* Returns Drawer with resources
'''
response_dict = {'success': False, 'message': ""}
try:
if request.is_ajax() and request.method == "POST":
css_node_id = request.POST.get('css_node_id', "")
unit_node_id = request.POST.get('unit_node_id', "")
widget_for = request.POST.get('widget_for', "")
resource_type = request.POST.get('resource_type', "")
resource_type = resource_type.strip()
list_resources = []
css_node = node_collection.one({"_id": ObjectId(css_node_id)})
try:
unit_node = node_collection.one({"_id": ObjectId(unit_node_id)})
except:
unit_node = None
if resource_type:
if resource_type == "Pandora":
resource_type = "Pandora_video"
resource_gst = node_collection.one({'_type': "GSystemType", 'name': resource_type})
res = node_collection.find(
{
'member_of': resource_gst._id,
'group_set': ObjectId(group_id),
'status': u"PUBLISHED"
}
)
for each in res:
list_resources.append(each)
drawer_template_context = edit_drawer_widget("CourseUnits", group_id, unit_node, None, checked="collection_set", left_drawer_content=list_resources)
drawer_template_context["widget_for"] = widget_for
drawer_widget = render_to_string(
'ndf/drawer_widget.html',
drawer_template_context,
context_instance=RequestContext(request)
)
return HttpResponse(drawer_widget)
else:
error_message = "Resource Drawer: Either not an ajax call or not a POST request!!!"
response_dict["message"] = error_message
return HttpResponse(json.dumps(response_dict))
except Exception as e:
error_message = "Resource Drawer: " + str(e) + "!!!"
response_dict["message"] = error_message
return HttpResponse(json.dumps(response_dict))
@login_required
def save_resources(request, group_id):
'''
Accepts:
* List of resources (i.e GSystem of Page, File, etc.)
* CourseSubSection node _id
Actions:
* Sets the received resources in respective node's collection_set
'''
response_dict = {"success": False,"create_new_unit": True}
if request.is_ajax() and request.method == "POST":
list_of_res = json.loads(request.POST.get('list_of_res', ""))
css_node_id = request.POST.get('css_node', "")
unit_name = request.POST.get('unit_name', "")
unit_name = unit_name.strip()
unit_node_id = request.POST.get('unit_node_id', "")
css_node = node_collection.one({"_id": ObjectId(css_node_id)})
list_of_res_ids = [ObjectId(each_res) for each_res in list_of_res]
try:
cu_new = node_collection.one({'_id': ObjectId(unit_node_id)})
except:
cu_new = None
if not cu_new:
cu_gst = node_collection.one({'_type': "GSystemType", 'name': "CourseUnit"})
cu_new = node_collection.collection.GSystem()
cu_new.member_of.append(cu_gst._id)
# set name
cu_new.name = unit_name.strip()
cu_new.modified_by = int(request.user.id)
cu_new.created_by = int(request.user.id)
cu_new.contributors.append(int(request.user.id))
cu_new.prior_node.append(css_node._id)
cu_new.save()
response_dict["create_new_unit"] = True
node_collection.collection.update({'_id': cu_new._id}, {'$set': {'name': unit_name }}, upsert=False, multi=False)
if cu_new._id not in css_node.collection_set:
node_collection.collection.update({'_id': css_node._id}, {'$push': {'collection_set': cu_new._id }}, upsert=False, multi=False)
node_collection.collection.update({'_id': cu_new._id}, {'$set': {'collection_set':list_of_res_ids}},upsert=False,multi=False)
cu_new.reload()
response_dict["success"] = True
response_dict["cu_new_id"] = str(cu_new._id)
return HttpResponse(json.dumps(response_dict))
@login_required
def create_edit_unit(request, group_id):
'''
Accepts:
* ObjectId of unit node if exists
* ObjectId of CourseSubSection node
Actions:
* Creates/Updates Unit node
Returns:
* success (i.e True/False)
'''
response_dict = {"success": False}
if request.is_ajax() and request.method == "POST":
css_node_id = request.POST.get("css_node_id", '')
unit_node_id = request.POST.get("unit_node_id", '')
unit_name = request.POST.get("unit_name", '')
css_node = node_collection.one({"_id": ObjectId(css_node_id)})
try:
cu_node = node_collection.one({'_id': ObjectId(unit_node_id)})
except:
cu_node = None
if cu_node is None:
cu_gst = node_collection.one({'_type': "GSystemType", 'name': "CourseUnit"})
cu_node = node_collection.collection.GSystem()
cu_node.member_of.append(cu_gst._id)
# set name
cu_node.name = unit_name.strip()
cu_node.modified_by = int(request.user.id)
cu_node.created_by = int(request.user.id)
cu_node.contributors.append(int(request.user.id))
cu_node.prior_node.append(css_node._id)
cu_node.save()
response_dict["unit_node_id"] = str(cu_node._id)
node_collection.collection.update({'_id': cu_node._id}, {'$set': {'name': unit_name}}, upsert=False, multi=False)
if cu_node._id not in css_node.collection_set:
node_collection.collection.update({'_id': css_node._id}, {'$push': {'collection_set': cu_node._id}}, upsert=False, multi=False)
return HttpResponse(json.dumps(response_dict))
@login_required
def delete_course(request, group_id, node_id):
del_stat = delete_item(node_id)
if del_stat:
return HttpResponseRedirect(reverse('course', kwargs={'group_id': ObjectId(group_id)}))
@login_required
def delete_from_course_structure(request, group_id):
'''
Accepts:
* ObjectId of node that is to be deleted.
It can be CourseSection/CourseSubSection/CourseUnit
Actions:
* Deletes the received node
Returns:
* success (i.e True/False)
'''
response_dict = {"success": False}
del_stat = False
if request.is_ajax() and request.method == "POST":
oid = request.POST.get("oid", '')
del_stat = delete_item(oid)
if del_stat:
response_dict["success"] = True
return HttpResponse(json.dumps(response_dict))
def delete_item(item):
node_item = node_collection.one({'_id': ObjectId(item)})
if u"CourseUnit" not in node_item.member_of_names_list and node_item.collection_set:
for each in node_item.collection_set:
d_st = delete_item(each)
del_status, del_status_msg = delete_node(
node_id=node_item._id,
deletion_type=0
)
return del_status
@login_required
def enroll_generic(request, group_id):
response_dict = {"success": False}
if request.is_ajax() and request.method == "POST":
course_enrollment_status_at = node_collection.one({
"_type": "AttributeType", "name": "course_enrollment_status"
})
node_id = request.POST.get('node_id', '')
usr_id = request.POST.get('usr_id', '')
usr_id = int(usr_id)
auth_node = node_collection.one({'_type': "Author", 'created_by': usr_id})
course_node = node_collection.one({'_id': ObjectId(node_id)})
course_enrollment_status = {}
if auth_node.attribute_set:
for each in auth_node.attribute_set:
if each and "course_enrollment_status" in each:
course_enrollment_status = each["course_enrollment_status"]
str_course_id = str(course_node._id)
if course_enrollment_status is not None:
if str_course_id not in course_enrollment_status:
course_enrollment_status.update({str_course_id: u"Approved"})
at_node = create_gattribute(auth_node["_id"], course_enrollment_status_at, course_enrollment_status)
response_dict['success'] = True
return HttpResponse(json.dumps(response_dict))
else:
return HttpResponse(json.dumps(response_dict))
@login_required
def remove_resource_from_unit(request, group_id):
'''
Accepts:
* ObjectId of node to be removed from collection_set.
* ObjectId of unit_node.
Actions:
* Removed res_id from unit_node's collection_set
Returns:
* success (i.e True/False)
'''
response_dict = {"success": False}
if request.is_ajax() and request.method == "POST":
unit_node_id = request.POST.get("unit_node_id", '')
res_id = request.POST.get("res_id", '')
unit_node = node_collection.one({'_id': ObjectId(unit_node_id)})
if unit_node.collection_set and res_id:
node_collection.collection.update({'_id': unit_node._id}, {'$pull': {'collection_set': ObjectId(res_id)}}, upsert=False, multi=False)
response_dict["success"] = True
return HttpResponse(json.dumps(response_dict))
| olympian94/gstudio | gnowsys-ndf/gnowsys_ndf/ndf/views/course.py | Python | agpl-3.0 | 64,077 |
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from math import sqrt
import unittest
from numpy import array, random, exp, abs, tile
from pyspark.mllib.linalg import Vector, SparseVector, DenseVector, VectorUDT, Vectors
from pyspark.mllib.linalg.distributed import RowMatrix
from pyspark.mllib.feature import HashingTF, IDF, StandardScaler, ElementwiseProduct, Word2Vec
from pyspark.testing.mllibutils import MLlibTestCase
class FeatureTest(MLlibTestCase):
def test_idf_model(self):
data = [
Vectors.dense([1, 2, 6, 0, 2, 3, 1, 1, 0, 0, 3]),
Vectors.dense([1, 3, 0, 1, 3, 0, 0, 2, 0, 0, 1]),
Vectors.dense([1, 4, 1, 0, 0, 4, 9, 0, 1, 2, 0]),
Vectors.dense([2, 1, 0, 3, 0, 0, 5, 0, 2, 3, 9])
]
model = IDF().fit(self.sc.parallelize(data, 2))
idf = model.idf()
self.assertEqual(len(idf), 11)
class Word2VecTests(MLlibTestCase):
def test_word2vec_setters(self):
model = Word2Vec() \
.setVectorSize(2) \
.setLearningRate(0.01) \
.setNumPartitions(2) \
.setNumIterations(10) \
.setSeed(1024) \
.setMinCount(3) \
.setWindowSize(6)
self.assertEqual(model.vectorSize, 2)
self.assertTrue(model.learningRate < 0.02)
self.assertEqual(model.numPartitions, 2)
self.assertEqual(model.numIterations, 10)
self.assertEqual(model.seed, 1024)
self.assertEqual(model.minCount, 3)
self.assertEqual(model.windowSize, 6)
def test_word2vec_get_vectors(self):
data = [
["a", "b", "c", "d", "e", "f", "g"],
["a", "b", "c", "d", "e", "f"],
["a", "b", "c", "d", "e"],
["a", "b", "c", "d"],
["a", "b", "c"],
["a", "b"],
["a"]
]
model = Word2Vec().fit(self.sc.parallelize(data))
self.assertEqual(len(model.getVectors()), 3)
class StandardScalerTests(MLlibTestCase):
def test_model_setters(self):
data = [
[1.0, 2.0, 3.0],
[2.0, 3.0, 4.0],
[3.0, 4.0, 5.0]
]
model = StandardScaler().fit(self.sc.parallelize(data))
self.assertIsNotNone(model.setWithMean(True))
self.assertIsNotNone(model.setWithStd(True))
self.assertEqual(model.transform([1.0, 2.0, 3.0]), DenseVector([-1.0, -1.0, -1.0]))
def test_model_transform(self):
data = [
[1.0, 2.0, 3.0],
[2.0, 3.0, 4.0],
[3.0, 4.0, 5.0]
]
model = StandardScaler().fit(self.sc.parallelize(data))
self.assertEqual(model.transform([1.0, 2.0, 3.0]), DenseVector([1.0, 2.0, 3.0]))
class ElementwiseProductTests(MLlibTestCase):
def test_model_transform(self):
weight = Vectors.dense([3, 2, 1])
densevec = Vectors.dense([4, 5, 6])
sparsevec = Vectors.sparse(3, [0], [1])
eprod = ElementwiseProduct(weight)
self.assertEqual(eprod.transform(densevec), DenseVector([12, 10, 6]))
self.assertEqual(
eprod.transform(sparsevec), SparseVector(3, [0], [3]))
class HashingTFTest(MLlibTestCase):
def test_binary_term_freqs(self):
hashingTF = HashingTF(100).setBinary(True)
doc = "a a b c c c".split(" ")
n = hashingTF.numFeatures
output = hashingTF.transform(doc).toArray()
expected = Vectors.sparse(n, {hashingTF.indexOf("a"): 1.0,
hashingTF.indexOf("b"): 1.0,
hashingTF.indexOf("c"): 1.0}).toArray()
for i in range(0, n):
self.assertAlmostEqual(output[i], expected[i], 14, "Error at " + str(i) +
": expected " + str(expected[i]) + ", got " + str(output[i]))
class DimensionalityReductionTests(MLlibTestCase):
denseData = [
Vectors.dense([0.0, 1.0, 2.0]),
Vectors.dense([3.0, 4.0, 5.0]),
Vectors.dense([6.0, 7.0, 8.0]),
Vectors.dense([9.0, 0.0, 1.0])
]
sparseData = [
Vectors.sparse(3, [(1, 1.0), (2, 2.0)]),
Vectors.sparse(3, [(0, 3.0), (1, 4.0), (2, 5.0)]),
Vectors.sparse(3, [(0, 6.0), (1, 7.0), (2, 8.0)]),
Vectors.sparse(3, [(0, 9.0), (2, 1.0)])
]
def assertEqualUpToSign(self, vecA, vecB):
eq1 = vecA - vecB
eq2 = vecA + vecB
self.assertTrue(sum(abs(eq1)) < 1e-6 or sum(abs(eq2)) < 1e-6)
def test_svd(self):
denseMat = RowMatrix(self.sc.parallelize(self.denseData))
sparseMat = RowMatrix(self.sc.parallelize(self.sparseData))
m = 4
n = 3
for mat in [denseMat, sparseMat]:
for k in range(1, 4):
rm = mat.computeSVD(k, computeU=True)
self.assertEqual(rm.s.size, k)
self.assertEqual(rm.U.numRows(), m)
self.assertEqual(rm.U.numCols(), k)
self.assertEqual(rm.V.numRows, n)
self.assertEqual(rm.V.numCols, k)
# Test that U returned is None if computeU is set to False.
self.assertEqual(mat.computeSVD(1).U, None)
# Test that low rank matrices cannot have number of singular values
# greater than a limit.
rm = RowMatrix(self.sc.parallelize(tile([1, 2, 3], (3, 1))))
self.assertEqual(rm.computeSVD(3, False, 1e-6).s.size, 1)
def test_pca(self):
expected_pcs = array([
[0.0, 1.0, 0.0],
[sqrt(2.0) / 2.0, 0.0, sqrt(2.0) / 2.0],
[sqrt(2.0) / 2.0, 0.0, -sqrt(2.0) / 2.0]
])
n = 3
denseMat = RowMatrix(self.sc.parallelize(self.denseData))
sparseMat = RowMatrix(self.sc.parallelize(self.sparseData))
for mat in [denseMat, sparseMat]:
for k in range(1, 4):
pcs = mat.computePrincipalComponents(k)
self.assertEqual(pcs.numRows, n)
self.assertEqual(pcs.numCols, k)
# We can just test the updated principal component for equality.
self.assertEqualUpToSign(pcs.toArray()[:, k - 1], expected_pcs[:, k - 1])
if __name__ == "__main__":
from pyspark.mllib.tests.test_feature import *
try:
import xmlrunner
testRunner = xmlrunner.XMLTestRunner(output='target/test-reports', verbosity=2)
except ImportError:
testRunner = None
unittest.main(testRunner=testRunner, verbosity=2)
| bdrillard/spark | python/pyspark/mllib/tests/test_feature.py | Python | apache-2.0 | 7,210 |
#!/usr/bin/env python2
##
# PyChat
# https://github.com/leosartaj/PyChat.git
#
# Copyright (c) 2014 Sartaj Singh
# Licensed under the MIT license.
##
import unittest
from PyChat.client.gui.helper.helperFunc import validate_host
class TestValidateHost(unittest.TestCase):
"""
tests the validate_host function in helperFunc module
"""
def _checkList(self, func, list_str, val=True):
"""
Iterates a list
and checks if when func is called
it returns val or not
"""
for item in list_str:
self.assertEqual(func(item), val)
def test_correct_host(self):
correct = '127.0.0.1'
self.assertEqual(validate_host(correct), True)
def test_incorrect_len_host(self):
inlen = ['127', '127.0', '127.0.', '127.0.0', '127.0.0.1.', '127.0.0.1.1']
self._checkList(validate_host, inlen, False)
def test_incorrect_format_host(self):
informat = ['-127.0.0.1', '127.0.0.256', '127.-1.-1.1']
self._checkList(validate_host, informat, False)
| leosartaj/PyChat | PyChat/client/gui/helper/tests/tests_helperFunc/test_validate_host.py | Python | mit | 1,055 |
from sdf import *
IMAGE = 'examples/butterfly.png'
w, h = measure_image(IMAGE)
f = rounded_box((w * 1.1, h * 1.1, 0.1), 0.05)
f |= image(IMAGE).extrude(1) & slab(z0=0, z1=0.075)
f.save('image.stl')
| fogleman/sdf | examples/image.py | Python | mit | 202 |
from django.forms.formsets import BaseFormSet, formset_factory
from django.forms.models import BaseModelFormSet
from django.forms.models import modelformset_factory
from django import forms
from models import PlanillaHistoricas, ConceptosFolios, Folios, Tomos
class PlanillaHistoricasForm(forms.Form):
codi_empl_per = forms.CharField(max_length=50, widget=forms.TextInput(attrs={'class': 'nombre', 'placeholder': 'Apellidos y Nombres'}))
desc_plan_stp = forms.CharField(max_length=200, widget=forms.Textarea(attrs={'rows': 1}))
def __init__(self, concepto, *args, **kwargs):
super(PlanillaHistoricasForm, self).__init__(*args, **kwargs)
campos = dict()
egr = 'border-color: #e9322d; -webkit-box-shadow: 0 0 6px #f8b9b7; -moz-box-shadow: 0 0 6px #f8b9b7; box-shadow: 0 0 6px #f8b9b7;';
ing = 'border-color: #2D78E9; -webkit-box-shadow: 0 0 6px #2D78E9; -moz-box-shadow: 0 0 6px #2D78E9; box-shadow: 0 0 6px #2D78E9;';
total = 'border-color: rgb(70, 136, 71); -webkit-box-shadow: 0 0 6px rgb(70, 136, 71); -moz-box-shadow: 0 0 6px rgb(70, 136, 71); box-shadow: 0 0 6px rgb(70, 136, 71);';
for conc in concepto:
codigo = conc.codi_conc_tco.codi_conc_tco
descripcion = conc.codi_conc_tco.desc_cort_tco
tipo = conc.codi_conc_tco.tipo_conc_tco
clase = 'remuneraciones' if codigo == 'C373' else 'descuentos' if codigo == 'C374' else 'total' if codigo == 'C12' else 'monto'
attrs = {
'class': clase + ' error',
'data-title': descripcion,
'data-tipo': tipo,
'style': 'width:auto;font-size:15px;' + (ing if tipo == '1' else egr if tipo == '2' else total if codigo in ('C373', 'C12', 'C374') else ''),
'maxlength': 35,
'placeholder': descripcion
}
if codigo in campos:
campos[codigo] += 1
else:
campos[codigo] = 1
index = campos[codigo]
flag = '_%s' % index
self.fields['%s%s' % (codigo, flag)] = forms.CharField(widget=forms.TextInput(attrs=attrs))
self.fields['codigos'] = forms.CharField(max_length=700, widget=forms.HiddenInput())
class BasePlanillaHistoricasFormSet(BaseFormSet):
def __init__(self, *args, **kwargs):
self.concepto = kwargs['concepto']
del kwargs['concepto']
super(BasePlanillaHistoricasFormSet, self).__init__(*args, **kwargs)
def _construct_form(self, i, **kwargs):
kwargs['concepto'] = self.concepto
return super(BasePlanillaHistoricasFormSet, self)._construct_form(i, **kwargs)
def add_fields(self, form, index):
super(BasePlanillaHistoricasFormSet, self).add_fields(form, index)
PlanillaHistoricasFormSet = formset_factory(#form=PlanillaHistoricasForm,
form=PlanillaHistoricasForm,
formset=BasePlanillaHistoricasFormSet,
extra=0, can_delete=False) #exclude=('id', )) | heraldmatias/django-payroll | src/inei/planilla/forms.py | Python | gpl-3.0 | 3,110 |
# Copyright (c) 2012-2022, Mark Peek <[email protected]>
# All rights reserved.
#
# See LICENSE file for full license.
#
# *** Do not modify - this file is autogenerated ***
from . import AWSObject, AWSProperty, PropsDictType
from .validators import boolean
class S3(AWSProperty):
"""
`S3 <http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-codestar-githubrepository-s3.html>`__
"""
props: PropsDictType = {
"Bucket": (str, True),
"Key": (str, True),
"ObjectVersion": (str, False),
}
class Code(AWSProperty):
"""
`Code <http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-codestar-githubrepository-code.html>`__
"""
props: PropsDictType = {
"S3": (S3, True),
}
class GitHubRepository(AWSObject):
"""
`GitHubRepository <http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-codestar-githubrepository.html>`__
"""
resource_type = "AWS::CodeStar::GitHubRepository"
props: PropsDictType = {
"Code": (Code, False),
"ConnectionArn": (str, False),
"EnableIssues": (boolean, False),
"IsPrivate": (boolean, False),
"RepositoryAccessToken": (str, False),
"RepositoryDescription": (str, False),
"RepositoryName": (str, True),
"RepositoryOwner": (str, True),
}
| cloudtools/troposphere | troposphere/codestar.py | Python | bsd-2-clause | 1,389 |
#
# Copyright (C) 2015 FreeIPA Contributors see COPYING for license
#
from __future__ import print_function
import errno
import logging
import os
import pickle
import shutil
import sys
import tempfile
import textwrap
import six
from ipalib.install import certmonger, sysrestore
from ipapython import ipautil
from ipapython.ipautil import (
format_netloc, ipa_generate_password, run, user_input)
from ipapython.admintool import ScriptError
from ipaplatform import services
from ipaplatform.paths import paths
from ipaplatform.tasks import tasks
from ipalib import api, errors, x509
from ipalib.constants import DOMAIN_LEVEL_0
from ipalib.util import (
validate_domain_name,
no_matching_interface_for_ip_address_warning,
)
import ipaclient.install.ntpconf
from ipaserver.install import (
adtrust, bindinstance, ca, dns, dsinstance,
httpinstance, installutils, kra, krbinstance,
ntpinstance, otpdinstance, custodiainstance, replication, service,
sysupgrade)
from ipaserver.install.installutils import (
IPA_MODULES, BadHostError, get_fqdn, get_server_ip_address,
is_ipa_configured, load_pkcs12, read_password, verify_fqdn,
update_hosts_file)
if six.PY3:
unicode = str
try:
from ipaserver.install import adtrustinstance
_server_trust_ad_installed = True
except ImportError:
_server_trust_ad_installed = False
NoneType = type(None)
logger = logging.getLogger(__name__)
SYSRESTORE_DIR_PATH = paths.SYSRESTORE
def validate_dm_password(password):
if len(password) < 8:
raise ValueError("Password must be at least 8 characters long")
if any(ord(c) < 0x20 for c in password):
raise ValueError("Password must not contain control characters")
if any(ord(c) >= 0x7F for c in password):
raise ValueError("Password must only contain ASCII characters")
# Disallow characters that pkisilent doesn't process properly:
bad_characters = '\\'
if any(c in bad_characters for c in password):
raise ValueError('Password must not contain these characters: %s' %
', '.join('"%s"' % c for c in bad_characters))
# TODO: Check https://fedorahosted.org/389/ticket/47849
# Actual behavior of setup-ds.pl is that it does not accept white
# space characters in password when called interactively but does when
# provided such password in INF file. But it ignores leading and trailing
# white spaces in INF file.
# Disallow leading/trailing whaitespaces
if password.strip() != password:
raise ValueError('Password must not start or end with whitespace.')
def validate_admin_password(password):
if len(password) < 8:
raise ValueError("Password must be at least 8 characters long")
if any(ord(c) < 0x20 for c in password):
raise ValueError("Password must not contain control characters")
if any(ord(c) >= 0x7F for c in password):
raise ValueError("Password must only contain ASCII characters")
# Disallow characters that pkisilent doesn't process properly:
bad_characters = '\\'
if any(c in bad_characters for c in password):
raise ValueError('Password must not contain these characters: %s' %
', '.join('"%s"' % c for c in bad_characters))
def read_cache(dm_password):
"""
Returns a dict of cached answers or empty dict if no cache file exists.
"""
if not os.path.isfile(paths.ROOT_IPA_CACHE):
return {}
top_dir = tempfile.mkdtemp("ipa")
fname = "%s/cache" % top_dir
try:
installutils.decrypt_file(paths.ROOT_IPA_CACHE,
fname,
dm_password,
top_dir)
except Exception as e:
shutil.rmtree(top_dir)
raise Exception("Decryption of answer cache in %s failed, please "
"check your password." % paths.ROOT_IPA_CACHE)
try:
with open(fname, 'rb') as f:
try:
optdict = pickle.load(f)
except Exception as e:
raise Exception("Parse error in %s: %s" %
(paths.ROOT_IPA_CACHE, str(e)))
except IOError as e:
raise Exception("Read error in %s: %s" %
(paths.ROOT_IPA_CACHE, str(e)))
finally:
shutil.rmtree(top_dir)
# These are the only ones that may be overridden
try:
del optdict['external_cert_files']
except KeyError:
pass
return optdict
def write_cache(options):
"""
Takes a dict as input and writes a cached file of answers
"""
top_dir = tempfile.mkdtemp("ipa")
fname = "%s/cache" % top_dir
try:
with open(fname, 'wb') as f:
pickle.dump(options, f)
installutils.encrypt_file(fname,
paths.ROOT_IPA_CACHE,
options['dm_password'],
top_dir)
except IOError as e:
raise Exception("Unable to cache command-line options %s" % str(e))
finally:
shutil.rmtree(top_dir)
def read_host_name(host_default, no_host_dns=False):
print("Enter the fully qualified domain name of the computer")
print("on which you're setting up server software. Using the form")
print("<hostname>.<domainname>")
print("Example: master.example.com.")
print("")
print("")
if host_default == "":
host_default = "master.example.com"
host_name = user_input("Server host name", host_default, allow_empty=False)
print("")
verify_fqdn(host_name, no_host_dns)
return host_name
def read_domain_name(domain_name, unattended):
print("The domain name has been determined based on the host name.")
print("")
if not unattended:
domain_name = str(user_input("Please confirm the domain name",
domain_name))
print("")
return domain_name
def read_realm_name(domain_name, unattended):
print("The kerberos protocol requires a Realm name to be defined.")
print("This is typically the domain name converted to uppercase.")
print("")
if unattended:
return domain_name.upper()
realm_name = str(user_input("Please provide a realm name",
domain_name.upper()))
upper_dom = realm_name.upper()
if upper_dom != realm_name:
print("An upper-case realm name is required.")
if not user_input("Do you want to use " + upper_dom +
" as realm name?", True):
raise ScriptError(
"An upper-case realm name is required. Unable to continue.")
else:
realm_name = upper_dom
print("")
return realm_name
def read_dm_password():
print("Certain directory server operations require an administrative user.")
print("This user is referred to as the Directory Manager and has full "
"access")
print("to the Directory for system management tasks and will be added to "
"the")
print("instance of directory server created for IPA.")
print("The password must be at least 8 characters long.")
print("")
# TODO: provide the option of generating a random password
dm_password = read_password("Directory Manager",
validator=validate_dm_password)
return dm_password
def read_admin_password():
print("The IPA server requires an administrative user, named 'admin'.")
print("This user is a regular system account used for IPA server "
"administration.")
print("")
# TODO: provide the option of generating a random password
admin_password = read_password("IPA admin",
validator=validate_admin_password)
return admin_password
def check_dirsrv(unattended):
(ds_unsecure, ds_secure) = dsinstance.check_ports()
if not ds_unsecure or not ds_secure:
msg = ("IPA requires ports 389 and 636 for the Directory Server.\n"
"These are currently in use:\n")
if not ds_unsecure:
msg += "\t389\n"
if not ds_secure:
msg += "\t636\n"
raise ScriptError(msg)
def common_cleanup(func):
def decorated(installer):
success = False
try:
func(installer)
success = True
except KeyboardInterrupt:
ds = installer._ds
print("\nCleaning up...")
if ds:
print("Removing configuration for %s instance" % ds.serverid)
ds.stop()
if ds.serverid:
try:
dsinstance.remove_ds_instance(ds.serverid)
except ipautil.CalledProcessError:
logger.error("Failed to remove DS instance. You "
"may need to remove instance data "
"manually")
raise ScriptError()
finally:
if not success and installer._installation_cleanup:
# Do a cautious clean up as we don't know what failed and
# what is the state of the environment
try:
installer._fstore.restore_file(paths.HOSTS)
except Exception:
pass
return decorated
def remove_master_from_managed_topology(api_instance, options):
try:
# we may force the removal
server_del_options = dict(
force=True,
ignore_topology_disconnect=options.ignore_topology_disconnect,
ignore_last_of_role=options.ignore_last_of_role
)
replication.run_server_del_as_cli(
api_instance, api_instance.env.host, **server_del_options)
except errors.ServerRemovalError as e:
raise ScriptError(str(e))
except Exception as e:
# if the master was already deleted we will just get a warning
logger.warning("Failed to delete master: %s", e)
@common_cleanup
def install_check(installer):
options = installer
dirsrv_pkcs12_file = installer._dirsrv_pkcs12_file
http_pkcs12_file = installer._http_pkcs12_file
pkinit_pkcs12_file = installer._pkinit_pkcs12_file
dirsrv_pkcs12_info = installer._dirsrv_pkcs12_info
http_pkcs12_info = installer._http_pkcs12_info
pkinit_pkcs12_info = installer._pkinit_pkcs12_info
external_cert_file = installer._external_cert_file
external_ca_file = installer._external_ca_file
http_ca_cert = installer._ca_cert
tasks.check_ipv6_stack_enabled()
tasks.check_selinux_status()
if options.master_password:
msg = ("WARNING:\noption '-P/--master-password' is deprecated. "
"KDC master password of sufficient strength is autogenerated "
"during IPA server installation and should not be set "
"manually.")
print(textwrap.fill(msg, width=79, replace_whitespace=False))
installer._installation_cleanup = True
print("\nThe log file for this installation can be found in "
"/var/log/ipaserver-install.log")
if (not options.external_ca and not options.external_cert_files and
is_ipa_configured()):
installer._installation_cleanup = False
raise ScriptError(
"IPA server is already configured on this system.\n"
"If you want to reinstall the IPA server, please uninstall "
"it first using 'ipa-server-install --uninstall'.")
client_fstore = sysrestore.FileStore(paths.IPA_CLIENT_SYSRESTORE)
if client_fstore.has_files():
installer._installation_cleanup = False
raise ScriptError(
"IPA client is already configured on this system.\n"
"Please uninstall it before configuring the IPA server, "
"using 'ipa-client-install --uninstall'")
fstore = sysrestore.FileStore(SYSRESTORE_DIR_PATH)
sstore = sysrestore.StateFile(SYSRESTORE_DIR_PATH)
# This will override any settings passed in on the cmdline
if os.path.isfile(paths.ROOT_IPA_CACHE):
if options.dm_password is not None:
dm_password = options.dm_password
else:
dm_password = read_password("Directory Manager", confirm=False)
if dm_password is None:
raise ScriptError("Directory Manager password required")
try:
cache_vars = read_cache(dm_password)
options.__dict__.update(cache_vars)
if cache_vars.get('external_ca', False):
options.external_ca = False
options.interactive = False
except Exception as e:
raise ScriptError("Cannot process the cache file: %s" % str(e))
# We only set up the CA if the PKCS#12 options are not given.
if options.dirsrv_cert_files:
setup_ca = False
else:
setup_ca = True
options.setup_ca = setup_ca
if not setup_ca and options.ca_subject:
raise ScriptError(
"--ca-subject cannot be used with CA-less installation")
if not setup_ca and options.subject_base:
raise ScriptError(
"--subject-base cannot be used with CA-less installation")
if not setup_ca and options.setup_kra:
raise ScriptError(
"--setup-kra cannot be used with CA-less installation")
print("======================================="
"=======================================")
print("This program will set up the FreeIPA Server.")
print("")
print("This includes:")
if setup_ca:
print(" * Configure a stand-alone CA (dogtag) for certificate "
"management")
if not options.no_ntp:
print(" * Configure the Network Time Daemon (ntpd)")
print(" * Create and configure an instance of Directory Server")
print(" * Create and configure a Kerberos Key Distribution Center (KDC)")
print(" * Configure Apache (httpd)")
if options.setup_kra:
print(" * Configure KRA (dogtag) for secret management")
if options.setup_dns:
print(" * Configure DNS (bind)")
if options.setup_adtrust:
print(" * Configure Samba (smb) and winbind for managing AD trusts")
if not options.no_pkinit:
print(" * Configure the KDC to enable PKINIT")
if options.no_ntp:
print("")
print("Excluded by options:")
print(" * Configure the Network Time Daemon (ntpd)")
if installer.interactive:
print("")
print("To accept the default shown in brackets, press the Enter key.")
print("")
if not options.external_cert_files:
# Make sure the 389-ds ports are available
check_dirsrv(not installer.interactive)
if not options.no_ntp:
try:
ipaclient.install.ntpconf.check_timedate_services()
except ipaclient.install.ntpconf.NTPConflictingService as e:
print(("WARNING: conflicting time&date synchronization service '%s'"
" will be disabled" % e.conflicting_service))
print("in favor of ntpd")
print("")
except ipaclient.install.ntpconf.NTPConfigurationError:
pass
# Check to see if httpd is already configured to listen on 443
if httpinstance.httpd_443_configured():
raise ScriptError("Aborting installation")
if not options.setup_dns and installer.interactive:
if ipautil.user_input("Do you want to configure integrated DNS "
"(BIND)?", False):
options.setup_dns = True
print("")
# check bind packages are installed
if options.setup_dns:
# Don't require an external DNS to say who we are if we are
# setting up a local DNS server.
options.no_host_dns = True
# check the hostname is correctly configured, it must be as the kldap
# utilities just use the hostname as returned by getaddrinfo to set
# up some of the standard entries
if options.host_name:
host_default = options.host_name
else:
host_default = get_fqdn()
try:
if not installer.interactive or options.host_name:
verify_fqdn(host_default, options.no_host_dns)
host_name = host_default
else:
host_name = read_host_name(host_default, options.no_host_dns)
except BadHostError as e:
raise ScriptError(e)
host_name = host_name.lower()
logger.debug("will use host_name: %s\n", host_name)
if not options.domain_name:
domain_name = read_domain_name(host_name[host_name.find(".")+1:],
not installer.interactive)
logger.debug("read domain_name: %s\n", domain_name)
try:
validate_domain_name(domain_name)
except ValueError as e:
raise ScriptError("Invalid domain name: %s" % unicode(e))
else:
domain_name = options.domain_name
domain_name = domain_name.lower()
if not options.realm_name:
realm_name = read_realm_name(domain_name, not installer.interactive)
logger.debug("read realm_name: %s\n", realm_name)
try:
validate_domain_name(realm_name, entity="realm")
except ValueError as e:
raise ScriptError("Invalid realm name: {}".format(unicode(e)))
else:
realm_name = options.realm_name.upper()
if not options.subject_base:
options.subject_base = installutils.default_subject_base(realm_name)
if not options.ca_subject:
options.ca_subject = \
installutils.default_ca_subject_dn(options.subject_base)
if options.http_cert_files:
if options.http_pin is None:
options.http_pin = installutils.read_password(
"Enter Apache Server private key unlock",
confirm=False, validate=False, retry=False)
if options.http_pin is None:
raise ScriptError(
"Apache Server private key unlock password required")
http_pkcs12_file, http_pin, http_ca_cert = load_pkcs12(
cert_files=options.http_cert_files,
key_password=options.http_pin,
key_nickname=options.http_cert_name,
ca_cert_files=options.ca_cert_files,
host_name=host_name)
http_pkcs12_info = (http_pkcs12_file.name, http_pin)
if options.dirsrv_cert_files:
if options.dirsrv_pin is None:
options.dirsrv_pin = read_password(
"Enter Directory Server private key unlock",
confirm=False, validate=False, retry=False)
if options.dirsrv_pin is None:
raise ScriptError(
"Directory Server private key unlock password required")
dirsrv_pkcs12_file, dirsrv_pin, dirsrv_ca_cert = load_pkcs12(
cert_files=options.dirsrv_cert_files,
key_password=options.dirsrv_pin,
key_nickname=options.dirsrv_cert_name,
ca_cert_files=options.ca_cert_files,
host_name=host_name)
dirsrv_pkcs12_info = (dirsrv_pkcs12_file.name, dirsrv_pin)
if options.pkinit_cert_files:
if options.pkinit_pin is None:
options.pkinit_pin = read_password(
"Enter Kerberos KDC private key unlock",
confirm=False, validate=False, retry=False)
if options.pkinit_pin is None:
raise ScriptError(
"Kerberos KDC private key unlock password required")
pkinit_pkcs12_file, pkinit_pin, pkinit_ca_cert = load_pkcs12(
cert_files=options.pkinit_cert_files,
key_password=options.pkinit_pin,
key_nickname=options.pkinit_cert_name,
ca_cert_files=options.ca_cert_files,
realm_name=realm_name)
pkinit_pkcs12_info = (pkinit_pkcs12_file.name, pkinit_pin)
if (options.http_cert_files and options.dirsrv_cert_files and
http_ca_cert != dirsrv_ca_cert):
raise ScriptError(
"Apache Server SSL certificate and Directory Server SSL "
"certificate are not signed by the same CA certificate")
if (options.http_cert_files and
options.pkinit_cert_files and
http_ca_cert != pkinit_ca_cert):
raise ScriptError(
"Apache Server SSL certificate and PKINIT KDC "
"certificate are not signed by the same CA certificate")
if not options.dm_password:
dm_password = read_dm_password()
if dm_password is None:
raise ScriptError("Directory Manager password required")
else:
dm_password = options.dm_password
if not options.master_password:
master_password = ipa_generate_password()
else:
master_password = options.master_password
if not options.admin_password:
admin_password = read_admin_password()
if admin_password is None:
raise ScriptError("IPA admin password required")
else:
admin_password = options.admin_password
# Configuration for ipalib, we will bootstrap and finalize later, after
# we are sure we have the configuration file ready.
cfg = dict(
context='installer',
confdir=paths.ETC_IPA,
in_server=True,
# make sure host name specified by user is used instead of default
host=host_name,
)
if setup_ca:
# we have an IPA-integrated CA
cfg['ca_host'] = host_name
# Create the management framework config file and finalize api
target_fname = paths.IPA_DEFAULT_CONF
fd = open(target_fname, "w")
fd.write("[global]\n")
fd.write("host=%s\n" % host_name)
fd.write("basedn=%s\n" % ipautil.realm_to_suffix(realm_name))
fd.write("realm=%s\n" % realm_name)
fd.write("domain=%s\n" % domain_name)
fd.write("xmlrpc_uri=https://%s/ipa/xml\n" % format_netloc(host_name))
fd.write("ldap_uri=ldapi://%%2fvar%%2frun%%2fslapd-%s.socket\n" %
installutils.realm_to_serverid(realm_name))
if setup_ca:
fd.write("enable_ra=True\n")
fd.write("ra_plugin=dogtag\n")
fd.write("dogtag_version=10\n")
else:
fd.write("enable_ra=False\n")
fd.write("ra_plugin=none\n")
fd.write("mode=production\n")
fd.close()
# Must be readable for everyone
os.chmod(target_fname, 0o644)
api.bootstrap(**cfg)
api.finalize()
if setup_ca:
ca.install_check(False, None, options)
if options.setup_kra:
kra.install_check(api, None, options)
if options.setup_dns:
dns.install_check(False, api, False, options, host_name)
ip_addresses = dns.ip_addresses
else:
ip_addresses = get_server_ip_address(host_name,
not installer.interactive, False,
options.ip_addresses)
# check addresses here, dns module is doing own check
no_matching_interface_for_ip_address_warning(ip_addresses)
instance_name = "-".join(realm_name.split("."))
dirsrv = services.knownservices.dirsrv
if (options.external_cert_files
and dirsrv.is_installed(instance_name)
and not dirsrv.is_running(instance_name)):
logger.debug('Starting Directory Server')
services.knownservices.dirsrv.start(instance_name)
if options.setup_adtrust:
adtrust.install_check(False, options, api)
# installer needs to update hosts file when DNS subsystem will be
# installed or custom addresses are used
if options.ip_addresses or options.setup_dns:
installer._update_hosts_file = True
print()
print("The IPA Master Server will be configured with:")
print("Hostname: %s" % host_name)
print("IP address(es): %s" % ", ".join(str(ip) for ip in ip_addresses))
print("Domain name: %s" % domain_name)
print("Realm name: %s" % realm_name)
print()
if setup_ca:
ca.print_ca_configuration(options)
print()
if options.setup_dns:
print("BIND DNS server will be configured to serve IPA domain with:")
print("Forwarders: %s" % (
"No forwarders" if not options.forwarders
else ", ".join([str(ip) for ip in options.forwarders])
))
print('Forward policy: %s' % options.forward_policy)
print("Reverse zone(s): %s" % (
"No reverse zone" if options.no_reverse or not dns.reverse_zones
else ", ".join(str(rz) for rz in dns.reverse_zones)
))
print()
if not options.setup_adtrust:
# If domain name and realm does not match, IPA server will not be able
# to establish trust with Active Directory. Print big fat warning.
realm_not_matching_domain = (domain_name.upper() != realm_name)
if realm_not_matching_domain:
print("WARNING: Realm name does not match the domain name.\n"
"You will not be able to establish trusts with Active "
"Directory unless\nthe realm name of the IPA server matches "
"its domain name.\n\n")
if installer.interactive and not user_input(
"Continue to configure the system with these values?", False):
raise ScriptError("Installation aborted")
options.realm_name = realm_name
options.domain_name = domain_name
options.dm_password = dm_password
options.master_password = master_password
options.admin_password = admin_password
options._host_name_overridden = bool(options.host_name)
options.host_name = host_name
options.ip_addresses = ip_addresses
installer._fstore = fstore
installer._sstore = sstore
installer._dirsrv_pkcs12_file = dirsrv_pkcs12_file
installer._http_pkcs12_file = http_pkcs12_file
installer._pkinit_pkcs12_file = pkinit_pkcs12_file
installer._dirsrv_pkcs12_info = dirsrv_pkcs12_info
installer._http_pkcs12_info = http_pkcs12_info
installer._pkinit_pkcs12_info = pkinit_pkcs12_info
installer._external_cert_file = external_cert_file
installer._external_ca_file = external_ca_file
installer._ca_cert = http_ca_cert
@common_cleanup
def install(installer):
options = installer
fstore = installer._fstore
sstore = installer._sstore
dirsrv_pkcs12_info = installer._dirsrv_pkcs12_info
http_pkcs12_info = installer._http_pkcs12_info
pkinit_pkcs12_info = installer._pkinit_pkcs12_info
http_ca_cert = installer._ca_cert
realm_name = options.realm_name
domain_name = options.domain_name
dm_password = options.dm_password
master_password = options.master_password
admin_password = options.admin_password
host_name = options.host_name
ip_addresses = options.ip_addresses
setup_ca = options.setup_ca
# Installation has started. No IPA sysrestore items are restored in case of
# failure to enable root cause investigation
installer._installation_cleanup = False
if installer.interactive:
print("")
print("The following operations may take some minutes to complete.")
print("Please wait until the prompt is returned.")
print("")
# set hostname (transient and static) if user instructed us to do so
if options._host_name_overridden:
tasks.backup_hostname(fstore, sstore)
tasks.set_hostname(host_name)
if installer._update_hosts_file:
update_hosts_file(ip_addresses, host_name, fstore)
# Create a directory server instance
if not options.external_cert_files:
# Configure ntpd
if not options.no_ntp:
ipaclient.install.ntpconf.force_ntpd(sstore)
ntp = ntpinstance.NTPInstance(fstore)
if not ntp.is_configured():
ntp.create_instance()
if options.dirsrv_cert_files:
ds = dsinstance.DsInstance(fstore=fstore,
domainlevel=options.domainlevel,
config_ldif=options.dirsrv_config_file)
installer._ds = ds
ds.create_instance(realm_name, host_name, domain_name,
dm_password, dirsrv_pkcs12_info,
idstart=options.idstart, idmax=options.idmax,
subject_base=options.subject_base,
ca_subject=options.ca_subject,
hbac_allow=not options.no_hbac_allow,
setup_pkinit=not options.no_pkinit)
else:
ds = dsinstance.DsInstance(fstore=fstore,
domainlevel=options.domainlevel,
config_ldif=options.dirsrv_config_file)
installer._ds = ds
ds.create_instance(realm_name, host_name, domain_name,
dm_password,
idstart=options.idstart, idmax=options.idmax,
subject_base=options.subject_base,
ca_subject=options.ca_subject,
hbac_allow=not options.no_hbac_allow,
setup_pkinit=not options.no_pkinit)
ntpinstance.ntp_ldap_enable(host_name, ds.suffix, realm_name)
else:
api.Backend.ldap2.connect()
ds = dsinstance.DsInstance(fstore=fstore,
domainlevel=options.domainlevel)
installer._ds = ds
ds.init_info(
realm_name, host_name, domain_name, dm_password,
options.subject_base, options.ca_subject, 1101, 1100, None,
setup_pkinit=not options.no_pkinit)
krb = krbinstance.KrbInstance(fstore)
if not options.external_cert_files:
krb.create_instance(realm_name, host_name, domain_name,
dm_password, master_password,
setup_pkinit=not options.no_pkinit,
pkcs12_info=pkinit_pkcs12_info,
subject_base=options.subject_base)
else:
krb.init_info(realm_name, host_name,
setup_pkinit=not options.no_pkinit,
subject_base=options.subject_base)
if setup_ca:
if not options.external_cert_files and options.external_ca:
# stage 1 of external CA installation
options.realm_name = realm_name
options.domain_name = domain_name
options.master_password = master_password
options.dm_password = dm_password
options.admin_password = admin_password
options.host_name = host_name
options.reverse_zones = dns.reverse_zones
cache_vars = {n: options.__dict__[n] for o, n in installer.knobs()
if n in options.__dict__}
write_cache(cache_vars)
ca.install_step_0(False, None, options)
else:
# Put the CA cert where other instances expect it
x509.write_certificate(http_ca_cert, paths.IPA_CA_CRT)
os.chmod(paths.IPA_CA_CRT, 0o444)
if not options.no_pkinit:
x509.write_certificate(http_ca_cert, paths.KDC_CA_BUNDLE_PEM)
else:
with open(paths.KDC_CA_BUNDLE_PEM, 'w'):
pass
os.chmod(paths.KDC_CA_BUNDLE_PEM, 0o444)
x509.write_certificate(http_ca_cert, paths.CA_BUNDLE_PEM)
os.chmod(paths.CA_BUNDLE_PEM, 0o444)
# we now need to enable ssl on the ds
ds.enable_ssl()
if setup_ca:
ca.install_step_1(False, None, options)
otpd = otpdinstance.OtpdInstance()
otpd.create_instance('OTPD', host_name,
ipautil.realm_to_suffix(realm_name))
custodia = custodiainstance.CustodiaInstance(host_name, realm_name)
custodia.create_instance()
# Create a HTTP instance
http = httpinstance.HTTPInstance(fstore)
if options.http_cert_files:
http.create_instance(
realm_name, host_name, domain_name, dm_password,
pkcs12_info=http_pkcs12_info, subject_base=options.subject_base,
auto_redirect=not options.no_ui_redirect,
ca_is_configured=setup_ca)
else:
http.create_instance(
realm_name, host_name, domain_name, dm_password,
subject_base=options.subject_base,
auto_redirect=not options.no_ui_redirect,
ca_is_configured=setup_ca)
tasks.restore_context(paths.CACHE_IPA_SESSIONS)
ca.set_subject_base_in_config(options.subject_base)
# configure PKINIT now that all required services are in place
krb.enable_ssl()
# Apply any LDAP updates. Needs to be done after the configuration file
# is created. DS is restarted in the process.
service.print_msg("Applying LDAP updates")
ds.apply_updates()
# Restart krb after configurations have been changed
service.print_msg("Restarting the KDC")
krb.restart()
if options.setup_kra:
kra.install(api, None, options)
if options.setup_dns:
dns.install(False, False, options)
else:
# Create a BIND instance
bind = bindinstance.BindInstance(fstore)
bind.setup(host_name, ip_addresses, realm_name,
domain_name, (), 'first', (),
zonemgr=options.zonemgr,
no_dnssec_validation=options.no_dnssec_validation)
bind.create_file_with_system_records()
if options.setup_adtrust:
adtrust.install(False, options, fstore, api)
# Set the admin user kerberos password
ds.change_admin_password(admin_password)
# Call client install script
service.print_msg("Configuring client side components")
try:
args = [paths.IPA_CLIENT_INSTALL, "--on-master", "--unattended",
"--domain", domain_name, "--server", host_name,
"--realm", realm_name, "--hostname", host_name]
if options.no_dns_sshfp:
args.append("--no-dns-sshfp")
if options.ssh_trust_dns:
args.append("--ssh-trust-dns")
if options.no_ssh:
args.append("--no-ssh")
if options.no_sshd:
args.append("--no-sshd")
if options.mkhomedir:
args.append("--mkhomedir")
run(args, redirect_output=True)
print()
except Exception:
raise ScriptError("Configuration of client side components failed!")
# Everything installed properly, activate ipa service.
services.knownservices.ipa.enable()
print("======================================="
"=======================================")
print("Setup complete")
print("")
print("Next steps:")
print("\t1. You must make sure these network ports are open:")
print("\t\tTCP Ports:")
print("\t\t * 80, 443: HTTP/HTTPS")
print("\t\t * 389, 636: LDAP/LDAPS")
print("\t\t * 88, 464: kerberos")
if options.setup_dns:
print("\t\t * 53: bind")
print("\t\tUDP Ports:")
print("\t\t * 88, 464: kerberos")
if options.setup_dns:
print("\t\t * 53: bind")
if not options.no_ntp:
print("\t\t * 123: ntp")
print("")
print("\t2. You can now obtain a kerberos ticket using the command: "
"'kinit admin'")
print("\t This ticket will allow you to use the IPA tools (e.g., ipa "
"user-add)")
print("\t and the web user interface.")
if not services.knownservices.ntpd.is_running():
print("\t3. Kerberos requires time synchronization between clients")
print("\t and servers for correct operation. You should consider "
"enabling ntpd.")
print("")
if setup_ca:
print(("Be sure to back up the CA certificates stored in " +
paths.CACERT_P12))
print("These files are required to create replicas. The password for "
"these")
print("files is the Directory Manager password")
if os.path.isfile(paths.ROOT_IPA_CACHE):
os.remove(paths.ROOT_IPA_CACHE)
@common_cleanup
def uninstall_check(installer):
options = installer
tasks.check_selinux_status()
installer._installation_cleanup = False
if not is_ipa_configured():
print("WARNING:\nIPA server is not configured on this system. "
"If you want to install the\nIPA server, please install "
"it using 'ipa-server-install'.")
fstore = sysrestore.FileStore(SYSRESTORE_DIR_PATH)
sstore = sysrestore.StateFile(SYSRESTORE_DIR_PATH)
# Configuration for ipalib, we will bootstrap and finalize later, after
# we are sure we have the configuration file ready.
cfg = dict(
context='installer',
confdir=paths.ETC_IPA,
in_server=True,
)
# We will need at least api.env, finalize api now. This system is
# already installed, so the configuration file is there.
api.bootstrap(**cfg)
api.finalize()
if installer.interactive:
print("\nThis is a NON REVERSIBLE operation and will delete all data "
"and configuration!\nIt is highly recommended to take a backup of "
"existing data and configuration using ipa-backup utility "
"before proceeding.\n")
if not user_input("Are you sure you want to continue with the "
"uninstall procedure?", False):
raise ScriptError("Aborting uninstall operation.")
try:
api.Backend.ldap2.connect(autobind=True)
domain_level = dsinstance.get_domain_level(api)
except Exception:
msg = ("\nWARNING: Failed to connect to Directory Server to find "
"information about replication agreements. Uninstallation "
"will continue despite the possible existing replication "
"agreements.\n\n"
"If this server is the last instance of CA, KRA, or DNSSEC "
"master, uninstallation may result in data loss.\n\n"
)
print(textwrap.fill(msg, width=80, replace_whitespace=False))
if (installer.interactive and not user_input(
"Are you sure you want to continue with the uninstall "
"procedure?", False)):
raise ScriptError("Aborting uninstall operation.")
else:
dns.uninstall_check(options)
if domain_level == DOMAIN_LEVEL_0:
rm = replication.ReplicationManager(
realm=api.env.realm,
hostname=api.env.host,
dirman_passwd=None,
conn=api.Backend.ldap2
)
agreements = rm.find_ipa_replication_agreements()
if agreements:
other_masters = [a.get('cn')[0][4:] for a in agreements]
msg = (
"\nReplication agreements with the following IPA masters "
"found: %s. Removing any replication agreements before "
"uninstalling the server is strongly recommended. You can "
"remove replication agreements by running the following "
"command on any other IPA master:\n" % ", ".join(
other_masters)
)
cmd = "$ ipa-replica-manage del %s\n" % api.env.host
print(textwrap.fill(msg, width=80, replace_whitespace=False))
print(cmd)
if (installer.interactive and
not user_input("Are you sure you want to continue with"
" the uninstall procedure?", False)):
raise ScriptError("Aborting uninstall operation.")
else:
remove_master_from_managed_topology(api, options)
api.Backend.ldap2.disconnect()
installer._fstore = fstore
installer._sstore = sstore
@common_cleanup
def uninstall(installer):
fstore = installer._fstore
sstore = installer._sstore
rv = 0
print("Shutting down all IPA services")
try:
services.knownservices.ipa.stop()
except Exception:
# Fallback to direct ipactl stop only if system command fails
try:
run([paths.IPACTL, "stop"], raiseonerr=False)
except Exception:
pass
ntpinstance.NTPInstance(fstore).uninstall()
kra.uninstall()
ca.uninstall()
dns.uninstall()
httpinstance.HTTPInstance(fstore).uninstall()
krbinstance.KrbInstance(fstore).uninstall()
dsinstance.DsInstance(fstore=fstore).uninstall()
if _server_trust_ad_installed:
adtrustinstance.ADTRUSTInstance(fstore).uninstall()
custodiainstance.CustodiaInstance().uninstall()
otpdinstance.OtpdInstance().uninstall()
tasks.restore_hostname(fstore, sstore)
fstore.restore_all_files()
try:
os.remove(paths.ROOT_IPA_CACHE)
except Exception:
pass
try:
os.remove(paths.ROOT_IPA_CSR)
except Exception:
pass
# ipa-client-install removes /etc/ipa/default.conf
sstore._load()
ipaclient.install.ntpconf.restore_forced_ntpd(sstore)
# Clean up group_exists (unused since IPA 2.2, not being set since 4.1)
sstore.restore_state("install", "group_exists")
services.knownservices.ipa.disable()
# remove upgrade state file
sysupgrade.remove_upgrade_file()
if fstore.has_files():
logger.error('Some files have not been restored, see '
'%s/sysrestore.index', SYSRESTORE_DIR_PATH)
has_state = False
for module in IPA_MODULES: # from installutils
if sstore.has_state(module):
logger.error('Some installation state for %s has not been '
'restored, see %s/sysrestore.state',
module, SYSRESTORE_DIR_PATH)
has_state = True
rv = 1
if has_state:
logger.error('Some installation state has not been restored.\n'
'This may cause re-installation to fail.\n'
'It should be safe to remove %s/sysrestore.state '
'but it may\n'
'mean your system hasn\'t be restored to its '
'pre-installation state.', SYSRESTORE_DIR_PATH)
# Note that this name will be wrong after the first uninstall.
dirname = dsinstance.config_dirname(
installutils.realm_to_serverid(api.env.realm))
dirs = [dirname, paths.PKI_TOMCAT_ALIAS_DIR, paths.HTTPD_ALIAS_DIR]
ids = certmonger.check_state(dirs)
if ids:
logger.error('Some certificates may still be tracked by '
'certmonger.\n'
'This will cause re-installation to fail.\n'
'Start the certmonger service and list the '
'certificates being tracked\n'
' # getcert list\n'
'These may be untracked by executing\n'
' # getcert stop-tracking -i <request_id>\n'
'for each id in: %s', ', '.join(ids))
# Remove the cert renewal lock file
try:
os.remove(paths.IPA_RENEWAL_LOCK)
except OSError as e:
if e.errno != errno.ENOENT:
logger.warning("Failed to remove file %s: %s",
paths.IPA_RENEWAL_LOCK, e)
print("Removing IPA client configuration")
try:
result = run([paths.IPA_CLIENT_INSTALL, "--on-master",
"--unattended", "--uninstall"],
raiseonerr=False, redirect_output=True)
if result.returncode not in [0, 2]:
raise RuntimeError("Failed to configure the client")
except Exception:
rv = 1
print("Uninstall of client side components failed!")
sys.exit(rv)
def init(installer):
installer.unattended = not installer.interactive
installer.domainlevel = installer.domain_level
installer._installation_cleanup = True
installer._ds = None
installer._dirsrv_pkcs12_file = None
installer._http_pkcs12_file = None
installer._pkinit_pkcs12_file = None
installer._dirsrv_pkcs12_info = None
installer._http_pkcs12_info = None
installer._pkinit_pkcs12_info = None
installer._external_cert_file = None
installer._external_ca_file = None
installer._ca_cert = None
installer._update_hosts_file = False
| apophys/freeipa | ipaserver/install/server/install.py | Python | gpl-3.0 | 44,536 |
from django.core.management.base import BaseCommand
from tracking.utils_salt import salt_load_computers
class Command(BaseCommand):
help = 'Loads data from Salt (minions/grains).'
def handle(self, *args, **options):
salt_load_computers()
| rockychen-dpaw/oim-cms | tracking/management/commands/salt_load_data.py | Python | apache-2.0 | 257 |
"""
Using names.txt (right click and 'Save Link/Target As...'), a 46K text file containing over five-thousand first names,
begin by sorting it into alphabetical order. Then working out the alphabetical value for each name,
multiply this value by its alphabetical position in the list to obtain a name score.
For example, when the list is sorted into alphabetical order, COLIN, which is worth 3 + 15 + 12 + 9 + 14 = 53,
is the 938th name in the list. So, COLIN would obtain a score of 938 x 53 = 49714.
What is the total of all the name scores in the file?
"""
namesFile = open('names.txt')
allNames = namesFile.readline()
namesFile.close()
allNames = allNames.replace('"', '')
names = sorted(allNames.split(','))
sumOfScoreOfNames = 0
valueOfA = ord('A')
i = 0
for name in names:
i += 1
sumOfCharsInName = 0
for c in name:
sumOfCharsInName += (ord(c) - valueOfA + 1)
scoreOfName = sumOfCharsInName * i
sumOfScoreOfNames += scoreOfName
print str(sumOfScoreOfNames) | pgrm/project-euler | 0001-0050/22-Names_scores.py | Python | apache-2.0 | 999 |
# Copyright 2014 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Shared testing utilities."""
class _Monkey(object):
# context-manager for replacing module names in the scope of a test.
def __init__(self, module, **kw):
self.module = module
if len(kw) == 0: # pragma: NO COVER
raise ValueError('_Monkey was used with nothing to monkey-patch')
self.to_restore = dict([(key, getattr(module, key)) for key in kw])
for key, value in kw.items():
setattr(module, key, value)
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
for key, value in self.to_restore.items():
setattr(self.module, key, value)
class _NamedTemporaryFile(object):
def __init__(self, suffix=''):
import os
import tempfile
filehandle, self.name = tempfile.mkstemp(suffix=suffix)
os.close(filehandle)
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
import os
os.remove(self.name)
class _GAXPageIterator(object):
def __init__(self, items, page_token):
self._items = items
self.page_token = page_token
def next(self):
items, self._items = self._items, None
return items
class _GAXBundlingEvent(object):
result = None
def __init__(self, result):
self._result = result
def is_set(self):
return self.result is not None
def wait(self, *_):
self.result = self._result
| VitalLabs/gcloud-python | gcloud/_testing.py | Python | apache-2.0 | 2,090 |
from django.core.management.base import BaseCommand
from firecares.firestation.models import FireDepartment
from firecares.tasks.update import calculate_department_census_geom
class Command(BaseCommand):
help = """Calculates and caches owned department boundaries based on census tracts that had a incidents responded to by the given department."""
def add_arguments(self, parser):
parser.add_argument('--ids', nargs='+', type=int, help='list of fire department ids to process')
def handle(self, *args, **options):
ids = options.get('ids')
if ids is None:
ids = FireDepartment.objects.all().values_list('id', flat=True)
for i in ids:
calculate_department_census_geom.delay(i)
self.stdout.write(self.style.MIGRATE_SUCCESS('Queued {} departments for census tract updates'.format(len(ids))))
| FireCARES/firecares | firecares/firestation/management/commands/calc-department-owned-tracts.py | Python | mit | 872 |
# ===============================================================================
# Copyright 2011 Jake Ross
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===============================================================================
# =============enthought library imports=======================
from __future__ import absolute_import
from __future__ import print_function
from threading import Thread
from traits.api import Instance, Enum, DelegatesTo, Property, Button, Any, Float
from traitsui.api import View, Item, HGroup, spring, ListEditor, VGroup, UItem
# =============standard library imports ========================
# =============local library imports ==========================
from pychron.managers.manager import Manager
from pychron.hardware.motion_controller import MotionController
from pychron.paths import paths
from pychron.core.helpers.filetools import parse_file
import six
class MotionControllerManager(Manager):
""" """
motion_controller = Instance(MotionController)
_axes = DelegatesTo("motion_controller", prefix="axes")
axes = Property
apply_button = Button("Apply")
read_button = Button("Read")
load_button = Button("Load")
# print_param_table = Button('Print')
motion_group = DelegatesTo("motion_controller", prefix="groupobj")
view_style = Enum("simple_view", "full_view")
selected = Any
xmove_to_button = Button("Move X")
ymove_to_button = Button("Move Y")
xtarget_position = Float
ytarget_position = Float
def kill(self, **kw):
super(MotionControllerManager, self).kill(**kw)
self.motion_controller.save_axes_parameters()
def _get_axis_by_id(self, aid):
return next((a for a in six.itervalues(self._axes) if a.id == int(aid)), None)
def _get_axes(self):
keys = list(self._axes.keys())
keys.sort()
axs = [self._axes[k] for k in keys]
if self.motion_group:
axs += [self.motion_group]
return axs
def _get_selected(self):
ax = self.selected
if ax is None:
ax = self.axes[0]
return ax
# handlers
def _xmove_to_button_fired(self):
self._move_to("x", self.xtarget_position)
def _ymove_to_button_fired(self):
self._move_to("y", self.ytarget_position)
def _move_to(self, k, v):
def func():
self.motion_controller.start_timer()
ax = self.motion_controller.axes[k]
self.motion_controller.destroy_group()
self.motion_controller._axis_move("{}PA{}".format(ax.id, v), block=k)
self.motion_controller.update_axes()
t = Thread(target=func)
t.start()
def _read_button_fired(self):
ax = self._get_selected()
ax._read_parameters_fired()
def _apply_button_fired(self):
ax = self._get_selected()
print(ax, ax.id)
if ax is not None:
ax.upload_parameters_to_device()
self.motion_controller.save_axes_parameters(axis=ax)
def _load_button_fired(self):
path = self.open_file_dialog(default_directory=paths.device_dir)
# path = os.path.join(root_dir, 'zobs', 'NewStage-Axis-1.txt')
if path is not None:
# sniff the file to get the axis
lines = parse_file(path)
aid = lines[0][0]
try:
ax = self._get_axis_by_id(aid)
func = ax.load_parameters_from_file
except ValueError:
# this is a txt file not a cfg
ax = self._get_selected()
if ax is not None:
func = ax.load
if ax is not None:
func(path)
# ax.load_parameters_from_file(path)
# ax.load_parameters_from_file(path)
def traits_view(self):
""" """
cgrp = VGroup(
Item(
"axes",
style="custom",
show_label=False,
editor=ListEditor(
use_notebook=True,
dock_style="tab",
page_name=".name",
selected="selected",
view="full_view",
),
),
HGroup(
spring,
Item("load_button"),
Item("read_button"),
Item("apply_button"),
show_labels=False,
),
)
tgrp = VGroup(
HGroup(UItem("xmove_to_button"), UItem("xtarget_position")),
HGroup(UItem("ymove_to_button"), UItem("ytarget_position")),
)
view = View(
VGroup(tgrp, cgrp),
resizable=True,
handler=self.handler_klass, # MotionControllerManagerHandler,
title="Configure Motion Controller",
)
return view
def configure_view(self):
v = View(
Item(
"axes",
style="custom",
show_label=False,
editor=ListEditor(
use_notebook=True,
dock_style="tab",
page_name=".name",
view=self.view_style,
selected="selected",
),
),
HGroup(
spring,
Item("load_button"),
Item("read_button"),
Item("apply_button"),
show_labels=False,
),
)
return v
# print [self._axes[k] for k in keys] + [self.motion_group]
# return [self._axes[k] for k in keys] + [self.motion_group]
# def _restore_fired(self):
# '''
# '''
# self.motion_controller.axes_factory()
# self.trait_property_changed('axes', None)
# for a in self.axes:
# a.load_
# def _apply_all_fired(self):
# '''
# '''
# # for a in self.axes:
# # a.upload_parameters_to_device()
# if sele
# # self.motion_controller.save()
# def _print_param_table_fired(self):
# table = []
# for a in self.axes:
# attrs, codes, params = a.load_parameters()
# table.append(params)
#
# try:
# p = '/Users/ross/Sandbox/unidex_dump.txt'
# with open(p, 'w') as f:
# for attr, code, ri in zip(attrs, codes, zip(*table)):
# l = ''.join(map('{:<20s}'.format, map(str, ri)))
# l = '{:<20s} {} - {}'.format(attr, code, l)
# f.write(l + '\n')
# print l
# except Exception, e:
# print 'exception', e
| USGSDenverPychron/pychron | pychron/managers/motion_controller_managers/motion_controller_manager.py | Python | apache-2.0 | 7,482 |
import csv
import random
import sys
import numpy as np
from src.gen_prog import GP
def rename(new_name):
def decorator(f):
f.__name__ = new_name
return f
return decorator
@rename("({0} + {1})")
def add(x, y):
return x + y
@rename("({0} - {1})")
def sub(x, y):
return x - y
@rename("({0} * {1})")
def mult(x, y):
return x * y
@rename("({0} / {1})")
def div(x, y):
try:
return x / y
except ZeroDivisionError:
return sys.maxsize
if __name__ == "__main__":
# only used when working with numbers
target_number = 5649163
# number based fitness - does not work with equations
def fitness(element):
generated = element.eval()
return abs(target_number - generated)
# number based fitness - does not work with equations
def length_fitness(element):
return fitness(element) + len(element.serialize())
def fitness2(element):
def func(x):
return 5 * x * x + 10 * x + 26
error = 0.
ec = func
for n in np.arange(-10., 11, 1):
error += abs(ec(n) - element.eval({'x': n}))
return error + float(len(element.serialize()))
# function set
functions = [add, sub, mult, div]
# fitness function to use
the_fitness = length_fitness
# equations
# terminals = random.sample(list(np.arange(-10, 11, 1)), 10) + ['x'] * 10
# numbers
terminals = random.sample(range(100), 10)
population = 100
depth = 5
crossover_rate = 0.9
mutation_rate = 0.01
iterations = 100
min_fitness = 0
gp = GP(terminal_set=terminals, function_set=functions, fitness=the_fitness, pop_size=population, depth=depth,
crossover_rate=crossover_rate, mutation_rate=mutation_rate, iterations=iterations, min_fitness=min_fitness)
fitness_evolution, average_fitness_evolution, best = gp.run()
print("best tree is: {}".format(str(best)))
print("fitness {}".format(the_fitness(best)))
print(fitness_evolution)
result_dir = "Results/"
res = "1"
with open(result_dir + "gp_out_{}.csv".format(res), 'w', newline="\n") as o:
out = csv.writer(o)
out.writerow(['generation', 'best_fitness', 'avg_value'])
for i, fit in enumerate(fitness_evolution):
out.writerow([i, fit, average_fitness_evolution[i]])
with open(result_dir + "gp_out_{}_info.txt".format(res), 'w', newline="\n") as o:
o.write("target number: {}\n".format(target_number))
o.write("best tree: {}\n".format(str(best)))
o.write("terminals: {}\n".format(terminals))
o.write("population: {}\n".format(population))
o.write("initial depth: {}\n".format(depth))
o.write("crossover rate: {}\n".format(crossover_rate))
o.write("mutation rate: {}\n".format(mutation_rate))
o.write("max iterations: {}\n".format(iterations))
o.write("min fitness: {}\n".format(min_fitness))
| juanpablos/CC5114-Projects | Tarea 4/Main.py | Python | mit | 2,973 |
"""Generic plugin support so we can find XBlocks.
This code is in the Runtime layer.
"""
import functools
import itertools
import logging
import pkg_resources
from xblock.internal import class_lazy
log = logging.getLogger(__name__)
PLUGIN_CACHE = {}
class PluginMissingError(Exception):
"""Raised when trying to load a plugin from an entry_point that cannot be found."""
pass
class AmbiguousPluginError(Exception):
"""Raised when a class name produces more than one entry_point."""
def __init__(self, all_entry_points):
classes = (entpt.load() for entpt in all_entry_points)
desc = ", ".join("{0.__module__}.{0.__name__}".format(cls) for cls in classes)
msg = "Ambiguous entry points for {}: {}".format(all_entry_points[0].name, desc)
super(AmbiguousPluginError, self).__init__(msg)
def default_select(identifier, all_entry_points):
"""
Raise an exception when we have ambiguous entry points.
"""
if len(all_entry_points) == 0:
raise PluginMissingError(identifier)
elif len(all_entry_points) == 1:
return all_entry_points[0]
elif len(all_entry_points) > 1:
raise AmbiguousPluginError(all_entry_points)
class Plugin(object):
"""Base class for a system that uses entry_points to load plugins.
Implementing classes are expected to have the following attributes:
`entry_point`: The name of the entry point to load plugins from.
"""
entry_point = None # Should be overwritten by children classes
@class_lazy
def extra_entry_points(cls): # pylint: disable=no-self-argument
"""
Temporary entry points, for register_temp_plugin. A list of pairs,
(identifier, entry_point):
[('test1', test1_entrypoint), ('test2', test2_entrypoint), ...]
"""
return []
@classmethod
def _load_class_entry_point(cls, entry_point):
"""
Load `entry_point`, and set the `entry_point.name` as the
attribute `plugin_name` on the loaded object
"""
class_ = entry_point.load()
setattr(class_, 'plugin_name', entry_point.name)
return class_
@classmethod
def load_class(cls, identifier, default=None, select=None):
"""Load a single class specified by identifier.
If `identifier` specifies more than a single class, and `select` is not None,
then call `select` on the list of entry_points. Otherwise, choose
the first one and log a warning.
If `default` is provided, return it if no entry_point matching
`identifier` is found. Otherwise, will raise a PluginMissingError
If `select` is provided, it should be a callable of the form::
def select(identifier, all_entry_points):
# ...
return an_entry_point
The `all_entry_points` argument will be a list of all entry_points matching `identifier`
that were found, and `select` should return one of those entry_points to be
loaded. `select` should raise `PluginMissingError` if no plugin is found, or `AmbiguousPluginError`
if too many plugins are found
"""
identifier = identifier.lower()
key = (cls.entry_point, identifier)
if key not in PLUGIN_CACHE:
if select is None:
select = default_select
all_entry_points = list(pkg_resources.iter_entry_points(cls.entry_point, name=identifier))
for extra_identifier, extra_entry_point in cls.extra_entry_points:
if identifier == extra_identifier:
all_entry_points.append(extra_entry_point)
try:
selected_entry_point = select(identifier, all_entry_points)
except PluginMissingError:
if default is not None:
return default
raise
PLUGIN_CACHE[key] = cls._load_class_entry_point(selected_entry_point)
return PLUGIN_CACHE[key]
@classmethod
def load_classes(cls):
"""Load all the classes for a plugin.
Produces a sequence containing the identifiers and their corresponding
classes for all of the available instances of this plugin.
"""
all_classes = itertools.chain(
pkg_resources.iter_entry_points(cls.entry_point),
(entry_point for identifier, entry_point in cls.extra_entry_points),
)
for class_ in all_classes:
try:
yield (class_.name, cls._load_class_entry_point(class_))
except Exception: # pylint: disable=broad-except
log.warning('Unable to load %s %r', cls.__name__, class_.name, exc_info=True)
@classmethod
def register_temp_plugin(cls, class_, identifier=None, dist='xblock'):
"""Decorate a function to run with a temporary plugin available.
Use it like this in tests::
@register_temp_plugin(MyXBlockClass):
def test_the_thing():
# Here I can load MyXBlockClass by name.
"""
from mock import Mock
if identifier is None:
identifier = class_.__name__.lower()
entry_point = Mock(
dist=Mock(key=dist),
load=Mock(return_value=class_),
)
entry_point.name = identifier
def _decorator(func): # pylint: disable=C0111
@functools.wraps(func)
def _inner(*args, **kwargs): # pylint: disable=C0111
old = list(cls.extra_entry_points)
cls.extra_entry_points.append((identifier, entry_point))
try:
return func(*args, **kwargs)
finally:
cls.extra_entry_points = old
return _inner
return _decorator
| GbalsaC/bitnamiP | venv/lib/python2.7/site-packages/xblock/plugin.py | Python | agpl-3.0 | 5,882 |
from flask import Flask, request, Response
import prometheus_client
from prometheus_client import start_http_server, Counter
REQUEST_COUNT = Counter('request_count', 'App Request Count',
['app_name', 'method', 'endpoint', 'http_status'])
app = Flask(__name__)
CONTENT_TYPE_LATEST = str('text/plain; version=0.0.4; charset=utf-8')
@app.after_request
def increment_request_count(response):
REQUEST_COUNT.labels('test_app', request.method, request.path,
response.status_code).inc()
return response
# Expose a metrics endpoint to return
# prometheus metrics
@app.route('/metrics')
def metrics():
return Response(prometheus_client.generate_latest(),
mimetype=CONTENT_TYPE_LATEST)
@app.route('/test')
def test():
return 'rest'
if __name__ == '__main__':
app.run(debug=True)
| amitsaha/python-web-app-recipes | telemetry/prometheus/flask/app1.py | Python | mit | 827 |
"OrderPortal: Load all CouchDB database design documents."
from __future__ import print_function, absolute_import
import os
import sys
import couchdb
from orderportal import constants
from orderportal import utils
def load_designs(db, root=None, verbose=False):
"Load all CouchDB database design documents."
if root is None:
root = os.path.join(constants.ROOT, 'designs')
for design in os.listdir(root):
views = dict()
path = os.path.join(root, design)
if not os.path.isdir(path): continue
path = os.path.join(root, design, 'views')
for filename in os.listdir(path):
name, ext = os.path.splitext(filename)
if ext != '.js': continue
with open(os.path.join(path, filename)) as codefile:
code = codefile.read()
if name.startswith('map_'):
name = name[len('map_'):]
key = 'map'
elif name.startswith('reduce_'):
name = name[len('reduce_'):]
key = 'reduce'
else:
key = 'map'
views.setdefault(name, dict())[key] = code
id = "_design/%s" % design
try:
doc = db[id]
except couchdb.http.ResourceNotFound:
if verbose: print('loading', id, file=sys.stderr)
db.save(dict(_id=id, views=views))
else:
if doc['views'] != views:
doc['views'] = views
if verbose: print('updating', id, file=sys.stderr)
db.save(doc)
elif verbose:
print('no change', id, file=sys.stderr)
def regenerate_views(db, root=None, verbose=False):
"Trigger CouchDB to regenerate views by accessing them."
if root is None:
root = os.path.join(constants.ROOT, 'designs')
viewnames = []
for design in os.listdir(root):
path = os.path.join(root, design)
if not os.path.isdir(path): continue
path = os.path.join(root, design, 'views')
for filename in os.listdir(path):
name, ext = os.path.splitext(filename)
if ext != '.js': continue
if name.startswith('map_'):
name = name[len('map_'):]
elif name.startswith('reduce_'):
name = name[len('reduce_'):]
viewname = design + '/' + name
if viewname not in viewnames:
viewnames.append(viewname)
for viewname in viewnames:
if verbose:
print('regenerating view', viewname)
view = db.view(viewname)
for row in view:
break
def get_args():
parser = utils.get_command_line_parser(description=
'Reload all CouchDB design documents.')
return parser.parse_args()
if __name__ == '__main__':
(options, args) = get_args()
utils.load_settings(filepath=options.settings,
verbose=options.verbose)
load_designs(utils.get_db(),
verbose=options.verbose)
regenerate_views(utils.get_db(),
verbose=options.verbose)
| ewels/OrderPortal | orderportal/scripts/load_designs.py | Python | mit | 3,124 |
from roompi.modules import RoomPiModule
class SomeModule(RoomPiModule):
module_name = 'SomeModule'
requires_thread = True | corvis/heats-roompi | testpackage/somemodule/__init__.py | Python | gpl-2.0 | 131 |
from gourmet.plugin import PrefsPlugin
from gourmet.prefs import get_prefs
import gtk
from gettext import gettext as _
from constants import PREF_NOTEBOOK, PREF_DEVTOKEN, DEFAULT_NOTEBOOK
class SL2EvernotePrefs (PrefsPlugin):
"""
A preference pane for the evernote shopping lists plugin.
It is where the user enters there evernote developer token and
the name of the notebook where shopping lists should be
saved.
"""
label = _("Evernote Info")
def __init__ (self, *args, **kwargs):
"""
Sets up the Evernote preference pane.
"""
# Create main widget
self.widget = gtk.VBox()
self.prefs = get_prefs()
# developer token label and help
dtHBox = gtk.HBox()
devTokenLabel = gtk.Label(_('Developer Token'))
devTokenLabel.set_alignment(xalign=0, yalign=0.5)
dtHBox.pack_start(devTokenLabel, expand=False, fill=False, padding=5)
dtEBox = gtk.EventBox()
dtEBox.connect("button_press_event", self.press_help, 'devkey')
img = gtk.Image()
img.set_from_stock(gtk.STOCK_ABOUT, gtk.ICON_SIZE_MENU)
dtEBox.add(img)
dtEBox.set_visible_window(False)
dtEBox.modify_bg(gtk.STATE_NORMAL, dtEBox.get_colormap().alloc_color("white"))
dtHBox.pack_start(dtEBox, expand=False, fill=False, padding=5)
self.widget.pack_start(dtHBox, expand=False, fill=False)
# developer token entry
self.devTokenEntry = gtk.Entry(max=0)
self.devTokenEntry.set_text(self.prefs.get(PREF_DEVTOKEN, ''))
self.devTokenEntry.connect('changed', self.save_change, PREF_DEVTOKEN)
self.widget.pack_start(self.devTokenEntry, expand=False, fill=False)
# Notebook label
nbHBox = gtk.HBox()
notebookLabel = gtk.Label(_('Shopping List Notebook'))
notebookLabel.set_alignment(xalign=0, yalign=0.5)
nbHBox.pack_start(notebookLabel, expand=False, fill=False, padding=5)
nbEBox = gtk.EventBox()
nbEBox.connect("button_press_event", self.press_help, 'notebook')
img = gtk.Image()
img.set_from_stock(gtk.STOCK_ABOUT, gtk.ICON_SIZE_MENU)
nbEBox.add(img)
nbEBox.set_visible_window(False)
nbHBox.pack_start(nbEBox, expand=False, fill=False, padding=5)
self.widget.pack_start(nbHBox, expand=False, fill=False)
# Notebook entry
self.notebookEntry = gtk.Entry(max=0)
self.notebookEntry.set_text(self.prefs.get(PREF_NOTEBOOK, DEFAULT_NOTEBOOK))
self.notebookEntry.connect('changed', self.save_change, PREF_NOTEBOOK)
self.widget.pack_start(self.notebookEntry, expand=False, fill=False)
# Show
self.widget.set_border_width(12)
self.widget.set_spacing(6)
self.widget.show_all()
def save_change(self, entry, key):
"""
Update the prefs with a changed value and saves them to disk.
Triggered when the user changes any of the entries.
@param entry: gtk.Entry -- the entry that was changed.
@param key: str -- The key to used to store the value in the
prefs.
"""
self.prefs[key] = entry.get_text()
self.prefs.save()
def press_help(self, widget, event, key):
"""
Displays a Help MessageDialog corresponding to the help icon
pressed.
@param widget: gtk.EventBox -- The event box that was clicked.
Not used.
@param event: gtk.gdk.Event -- The event generated. Not used.
@param key: str -- User defined key set by connect()
"""
print 'pressed %s', key
if key == 'devkey':
messageStr = "To access Evernote from an application that is "\
"not a webservice requires a misnamed 'Developer "\
"Token.' You can get a Developer Token from " \
"\nhttps://www.evernote.com/api/DeveloperToken.action"
else:
messageStr = 'This is the name of the notebook where the '\
"shopping lists will be posted. If it does not "\
"exist, it will be created."
message = gtk.MessageDialog(type=gtk.MESSAGE_INFO, buttons=gtk.BUTTONS_OK)
message.set_markup(messageStr)
message.connect("response", self.close_dialog)
message.run()
@staticmethod
def close_dialog(dialog, _):
"""
Closes a dialog box.
@param dialog: gtk.MessageDialog -- the dialog to be closed.
"""
dialog.destroy()
| nephlm/evernoteShopping | gourmet/plugins/sl2evernote/sl2evernotePrefs.py | Python | gpl-2.0 | 4,606 |
# Copyright (c) Jupyter Development Team.
# Distributed under the terms of the Modified BSD License.
version_info = (4, 0, 6)
__version__ = '.'.join(map(str, version_info))
| bdh1011/wau | venv/lib/python2.7/site-packages/jupyter_core/version.py | Python | mit | 175 |
##
# Copyright (c) 2012-2015 Apple Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##
"""
Tests for L{txdav.common.datastore.sql}.
"""
from twext.enterprise.dal.syntax import Select, Insert, Delete
from twisted.internet.defer import inlineCallbacks
from twisted.trial.unittest import TestCase
from txdav.common.datastore.sql_tables import schema
from txdav.common.datastore.test.util import CommonCommonTests
class OracleSpecificSQLStoreTests(CommonCommonTests, TestCase):
"""
Tests for shared functionality in L{txdav.common.datastore.sql}.
"""
@inlineCallbacks
def setUp(self):
"""
Set up two stores to migrate between.
"""
yield super(OracleSpecificSQLStoreTests, self).setUp()
yield self.buildStoreAndDirectory()
@inlineCallbacks
def test_logging(self):
"""
txn.execSQL works with all logging options on.
"""
# Patch config to turn on logging then rebuild the store
self.patch(self.store, "logLabels", True)
self.patch(self.store, "logStats", True)
self.patch(self.store, "logSQL", True)
txn = self.transactionUnderTest()
cs = schema.CALENDARSERVER
version = (yield Select(
[cs.VALUE],
From=cs,
Where=cs.NAME == "VERSION",
).on(txn))
self.assertNotEqual(version, None)
self.assertEqual(len(version), 1)
self.assertEqual(len(version[0]), 1)
@inlineCallbacks
def test_delete_returning(self):
"""
txn.execSQL works with all logging options on.
"""
txn = self.transactionUnderTest()
cs = schema.CALENDARSERVER
yield Insert(
{cs.NAME: "TEST", cs.VALUE: "Value"},
).on(txn)
yield self.commit()
txn = self.transactionUnderTest()
value = yield Delete(
From=cs,
Where=(cs.NAME == "TEST"),
Return=cs.VALUE,
).on(txn)
self.assertEqual(list(value), [["Value"]])
txn = self.transactionUnderTest()
value = yield Delete(
From=cs,
Where=(cs.NAME == "TEST"),
Return=cs.VALUE,
).on(txn)
self.assertEqual(list(value), [])
| red-hood/calendarserver | txdav/common/datastore/test/test_oracle_sql.py | Python | apache-2.0 | 2,777 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
def ShowResults(urls,array_ip,target, option):
newlist=[]
ip = ""
contador = 0
try:
if option == 1:
print "\n Domains and subdomains of "+ str(target) + " are:"
#Read the list to print the value in a line
for i in urls:
ip = array_ip[contador]
print "\n"
print "\t- " + i+ " ["+ip+"]"
contador += 1
if option == 2:
print "\nDomains contained in the IP "+ str(target) + " are:"
#print "\nDomains contained in the IP {} {} are:".format(target,target)
#Read the list to print the value in a line
for i in urls:
if i not in newlist:
newlist.append(i)
print "\n"
print "\t- " + i
except Exception as e:
print e
pass | n4xh4ck5/N4xD0rk | modules/showresults/showresults.py | Python | gpl-3.0 | 753 |
from werkzeug.security import (generate_password_hash, check_password_hash)
from sqlalchemy import text
from models import User, Playlist
import json
def user_exists(engine, user):
''' Checks to see if a username already exists '''
sql = text('SELECT COUNT(*) FROM users WHERE username=:user')
with engine.connect() as con:
res = con.execute(sql, user=user).fetchone()
return res[0] != 0
def create_user(engine, user, password, email):
''' Creates a user with the given information; saves a hashed password '''
sql = text('INSERT INTO users (username, password_hash, email)'
'VALUES (:user, :pw_hash, :email)', autocommit=True)
with engine.connect() as con:
pw_hash = generate_password_hash(password)
con.execute(sql, user=user, pw_hash=pw_hash, email=email)
def user_login(engine, user, password):
'''
Validates a password with a user. Checks that the hashed passwords match
'''
sql = text('SELECT password_hash, id '
'FROM users '
'WHERE username=:user')
with engine.connect() as con:
res = con.execute(sql, user=user).fetchone()
if res and len(res) > 0 and check_password_hash(res[0], password):
return get_user_by_id(engine, res[1])
def get_user_by_id(engine, user_id):
'''
Creates a User object for user with id of user_id
'''
sql = text('SELECT id, username FROM users WHERE id=:id')
with engine.connect() as con:
res = con.execute(sql, id=user_id).fetchone()
if res:
return User(user_id, username=res[1])
def create_playlist(engine, user_id, title):
'''
Creates a playlist in the given user's account
'''
sql = text('INSERT INTO playlists (title, user_id) '
'SELECT :title, :user_id FROM DUAL WHERE '
'NOT EXISTS (SELECT 1 FROM playlists WHERE title=:title '
' AND user_id=:user_id)', autocommit=True)
with engine.connect() as con:
con.execute(sql, user_id=user_id, title=title)
def search_songs(engine, query, limit=100, offset=0):
'''
Performs search query on songs
'''
query = "%{}%".format(query.lower())
sql = text("SELECT * "
"FROM songs "
"WHERE LOWER(title) LIKE :query "
" OR LOWER(artist) LIKE :query "
" OR id IN (SELECT song_id "
" FROM keywords "
" WHERE word LIKE :query)"
"LIMIT :limit OFFSET :offset")
sql_count = text("SELECT COUNT(*) "
"FROM songs "
"WHERE LOWER(title) LIKE :query "
" OR LOWER(artist) LIKE :query ")
with engine.connect() as con:
results = con.execute(sql,
query=query,
offset=offset,
limit=limit)
results_count = con.execute(sql_count, query=query).fetchone()[0]
return results_count, list(results)
def create_follower(engine, follower, user_followed):
sql = text('INSERT INTO following(user_id, following_id) '
'VALUES (:follower, :user_followed) '
'ON DUPLICATE KEY UPDATE user_id=user_id',
autocommit=True)
with engine.connect() as con:
con.execute(sql,
follower=follower,
user_followed=user_followed)
def get_user_followers(engine, user_id):
sql = text('SELECT * '
'FROM users '
'LEFT JOIN following '
'ON users.id=following.following_id '
'WHERE following.user_id=:user_id')
with engine.connect() as con:
return con.execute(sql, user_id=user_id)
def get_user_followings(engine, user_id):
sql = text('SELECT * '
'FROM users '
'LEFT JOIN following '
'ON users.id=following.user_id '
'WHERE following.following_id=:user_id')
with engine.connect() as con:
return con.execute(sql, user_id=user_id)
def search_users(engine, query):
query = '%{}%'.format(query.lower())
sql = text('SELECT * '
'FROM users '
'WHERE LOWER(username) LIKE :query;')
with engine.connect() as con:
return list(con.execute(sql, query=query).fetchall())
def user_playlists(engine, username):
''' Returns the playlists of a user '''
# ADVANCED
profile_plists = text('SELECT playlists.*, COUNT(songs.id), SUM(duration) '
'FROM playlists '
'LEFT JOIN playlist_entry '
'ON playlists.id=playlist_entry.playlist_id '
'LEFT JOIN songs '
'ON playlist_entry.song_id=songs.id '
'WHERE playlists.user_id=(SELECT users.id '
' FROM users '
' WHERE username=:user) '
'GROUP BY playlists.id')
with engine.connect() as con:
playlists = []
for p in con.execute(profile_plists, user=username):
playlists.append(Playlist(p.id,
title=p.title,
count=p['COUNT(songs.id)'],
duration=p['SUM(duration)']))
return playlists
def add_song_to_playlist(engine, song_id, playlist_id):
''' Adds song to the given playlist '''
sql = text('INSERT INTO playlist_entry (song_id, playlist_id, position) '
'SELECT :song_id, :playlist_id, IFNULL(MAX(position), -1) + 1 '
'FROM playlist_entry '
'WHERE playlist_id=:playlist_id', autocommit=True)
with engine.connect() as con:
con.execute(sql, song_id=song_id, playlist_id=playlist_id)
def create_song(engine, title, artist, album, duration, preview_url,
spotify_id):
''' Creates a song with the given information'''
sql = text('INSERT INTO songs (title, artist, album, duration, '
' preview_url, spotify_id) '
'SELECT :title, :artist, :album, :duration, :preview_url, '
' :spotify_id '
'FROM DUAL '
'WHERE NOT EXISTS (SELECT 1 FROM songs WHERE title=:title '
' AND artist=:artist '
' AND album=:album '
' AND spotify_id=:spotify_id)',
autocommit=True)
with engine.connect() as con:
con.execute(sql, title=title, artist=artist, album=album,
duration=duration, preview_url=preview_url,
spotify_id=spotify_id)
def remove_song_from_playlist(engine, position, playlist_id):
''' Removes song to the given playlist '''
sql = text('DELETE FROM playlist_entry '
'WHERE playlist_entry.position=:position '
'AND playlist_entry.playlist_id=:playlist_id', autocommit=True)
sql2 = text('UPDATE playlist_entry '
'SET position = position - 1 '
'WHERE position>:position '
'AND playlist_id=:playlist_id', autocommit=True)
with engine.connect() as con:
con.execute(sql, position=position, playlist_id=playlist_id)
con.execute(sql2, position=position, playlist_id=playlist_id)
def move_song_in_playlist(engine, old_position, new_position, playlist_id):
# No action needed
if new_position == old_position:
return
# Need to shift entries back
elif new_position > old_position:
sql = text('UPDATE playlist_entry '
'SET position=position-1 '
'WHERE position>:old_pos AND position<=:new_pos '
'AND playlist_id=:playlist_id '
'ORDER BY position', autocommit=True)
# Need to shift entries forward
else:
sql = text('UPDATE playlist_entry '
'SET position=position+1 '
'WHERE position<:old_pos AND position>=:new_pos '
'AND playlist_id=:playlist_id '
'ORDER BY position DESC', autocommit=True)
remove = text('UPDATE playlist_entry '
'SET position=-1 '
'WHERE position=:old_pos '
'AND playlist_id=:playlist_id', autocommit=True)
replace = text('UPDATE playlist_entry '
'SET position=:new_pos '
'WHERE position=-1 '
'AND playlist_id=:playlist_id', autocommit=True)
with engine.connect() as con:
# Swap moving entry to -1
con.execute(remove, old_pos=old_position, playlist_id=playlist_id)
# Move all in-between entries
con.execute(sql,
old_pos=old_position,
new_pos=new_position,
playlist_id=playlist_id)
# Swap moving entry to correct position
con.execute(replace, new_pos=new_position, playlist_id=playlist_id)
def remove_playlist_from_user(engine, user_id, playlist_id):
''' Removes playlist based on user_id '''
sql = text('DELETE FROM playlists '
'WHERE playlists.user_id=:user_id '
'AND playlists.id=:playlist_id', autocommit=True)
sql2 = text('DELETE FROM playlist_entry '
'WHERE playlist_entry.playlist_id=:playlist_id',
autocommit=True)
with engine.connect() as con:
con.execute(sql2, playlist_id=playlist_id)
con.execute(sql, user_id=user_id, playlist_id=playlist_id)
def playlist_songs(engine, playlist_id):
# ADVANCED
sql = text('SELECT *, votes.position AS vposition FROM songs '
'JOIN playlist_entry '
'ON songs.id=playlist_entry.song_id '
'LEFT JOIN votes '
'ON playlist_entry.position=votes.position '
'AND playlist_entry.playlist_id=votes.playlist_id '
'WHERE playlist_entry.playlist_id=:playlist_id '
'ORDER BY playlist_entry.position')
with engine.connect() as con:
return con.execute(sql, playlist_id=playlist_id)
def get_playlist_id(engine, playlist_name, uid):
sql = text('SELECT id '
'FROM playlists '
'WHERE title=:playlist_name AND user_id=:uid')
with engine.connect() as con:
query = con.execute(sql, playlist_name=playlist_name, uid=uid)
return query.fetchone()
def user_from_username(engine, username):
''' Checks to see if a username already exists '''
sql = text('SELECT * FROM users WHERE username=:username')
with engine.connect() as con:
res = con.execute(sql, username=username)
return res.fetchone()
def get_playlist_name(engine, playlist_id):
sql = text('SELECT title '
'FROM playlists '
'WHERE id=:playlist_id')
with engine.connect() as con:
query = con.execute(sql, playlist_id=playlist_id)
return query.fetchone()
def get_song_id(engine, trackname, trackartist):
sql = text('SELECT id '
'FROM songs '
'WHERE title=:trackname AND artist=:trackartist')
with engine.connect() as con:
query = con.execute(sql, trackname=trackname, trackartist=trackartist)
return query.fetchone()
def playlist_details(engine, playlist_id):
sql = text('SELECT * '
'FROM playlists '
'WHERE id=:playlist_id')
with engine.connect() as con:
return con.execute(sql, playlist_id=playlist_id).fetchone()
def playlist_update(engine, playlist_id, title):
sql = text('UPDATE playlists '
'SET title=:title '
'WHERE id=:playlist_id', autocommit=True)
with engine.connect() as con:
con.execute(sql, playlist_id=playlist_id, title=title)
def song_by_id(engine, song_id):
sql = text('SELECT * '
'FROM songs '
'WHERE id=:song_id')
with engine.connect() as con:
return con.execute(sql, song_id=song_id).fetchone()
def spotify_credentials_upsert(engine, user_id, token_info):
sql = text('INSERT INTO spotify_credentials(user_id, token_info) '
'VALUES (:user_id, :token_info) '
'ON DUPLICATE KEY UPDATE token_info=:token_info;',
autocommit=True)
with engine.connect() as con:
con.execute(sql, user_id=user_id, token_info=json.dumps(token_info))
def spotify_creds_for_user(engine, user_id):
sql = text('SELECT token_info '
'FROM spotify_credentials '
'WHERE user_id=:user_id;')
with engine.connect() as con:
res = con.execute(sql, user_id=user_id).first()
if res:
return json.loads(res[0])
def spotify_creds_delete(engine, user_id):
sql = text('DELETE FROM spotify_credentials '
'WHERE user_id=:user_id;',
autocommit=True)
with engine.connect() as con:
con.execute(sql, user_id=user_id)
def song_max_id(engine):
sql = text('SELECT MAX(id) '
'FROM songs;')
with engine.connect() as con:
return con.execute(sql).fetchone()[0]
def playlist_max_id(engine):
sql = text('SELECT MAX(id) '
'FROM playlists;')
with engine.connect() as con:
return con.execute(sql).fetchone()[0]
def get_playlist_interactions(engine):
'''
Returns playlist entries joined with their votes
'''
sql = text('SELECT playlist_entry.playlist_id AS playlist_id, '
' votes.position AS vote, '
' playlist_entry.song_id as song_id '
'FROM playlist_entry '
'LEFT JOIN votes '
'ON votes.playlist_id=playlist_entry.playlist_id '
'AND votes.position=playlist_entry.position;')
with engine.connect() as con:
return con.execute(sql)
def create_vote(engine, playlist_id, position):
sql = text('INSERT IGNORE INTO votes(playlist_id, position) '
'VALUES (:playlist_id, :position)', autocommit=True)
with engine.connect() as con:
con.execute(sql, playlist_id=playlist_id, position=position)
def delete_vote(engine, playlist_id, position):
sql = text('DELETE FROM votes '
'WHERE playlist_id=:playlist_id '
'AND position=:position', autocommit=True)
with engine.connect() as con:
con.execute(sql, playlist_id=playlist_id, position=position)
def song_sentiments(engine):
sql = text('SELECT id, pos, neg, neu '
'FROM songs;')
with engine.connect() as con:
return con.execute(sql).fetchall()
def song_artists(engine):
sql = text('SELECT id, artist '
'FROM songs;')
with engine.connect() as con:
return con.execute(sql).fetchall()
def song_details_many(engine, song_ids):
sql = text('SELECT * '
'FROM songs '
'WHERE id IN :song_ids')
with engine.connect() as con:
results = list(con.execute(sql, song_ids=song_ids).fetchall())
ordered = []
for i in song_ids:
try:
ordered.append(next(res for res in results if res.id == i))
except StopIteration:
print("Couldn't find song {}'s details".format(i))
return ordered
def song_lyrics(engine):
sql = text('SELECT id, lyrics '
'FROM songs;')
with engine.connect() as con:
return con.execute(sql).fetchall()
def delete_song_keywords(engine, song_id):
sql = text('DELETE FROM keywords WHERE song_id=:song_id;', autocommit=True)
with engine.connect() as con:
con.execute(sql, song_id=song_id)
def all_keywords(engine):
sql = text('SELECT * '
'FROM keywords;')
with engine.connect() as con:
return con.execute(sql).fetchall()
def song_keywords(engine, song_id):
sql = text('SELECT * '
'FROM keywords '
'WHERE song_id=:song_id;')
with engine.connect() as con:
return con.execute(sql, song_id=song_id).fetchall()
def add_song_keyword(engine, song_id, keyword, weight):
'''
keyword_tupes:
in form (keyword, weight)
'''
sql = text('INSERT INTO keywords(song_id, word, weight)'
'VALUES (:song_id, :keyword, :weight) '
'ON DUPLICATE KEY UPDATE weight=:weight;',
autocommit=True)
with engine.connect() as con:
con.execute(sql, song_id=song_id, keyword=keyword, weight=weight)
def get_wave_info(engine, song_id):
sql = text('SELECT wave_info '
'FROM songs '
'WHERE id=:song_id;')
with engine.connect() as con:
return con.execute(sql, song_id=song_id).fetchall()
def song_audio_features(engine):
sql = text('SELECT id, (tempo - min_tempo) / (max_tempo - min_tempo) t, '
' (pitch - min_pitch) / (max_pitch - min_pitch) pi, '
' (harmonic - min_harm) / (max_harm - min_harm) h, '
' (percussive - min_perc) / (max_perc - min_perc) pe '
'FROM songs, ('
' SELECT MIN(tempo) min_tempo, MAX(tempo) max_tempo, '
' MIN(pitch) min_pitch, MAX(pitch) max_pitch, '
' MIN(harmonic) min_harm, MAX(harmonic) max_harm, '
' MIN(percussive) min_perc, MAX(percussive) max_perc '
' FROM songs '
' WHERE tempo IS NOT NULL AND pitch IS NOT NULL AND '
' percussive IS NOT NULL AND harmonic IS NOT NULL '
' ) min_max')
with engine.connect() as con:
return con.execute(sql).fetchall()
def create_follower(engine, follower, user_followed):
sql = text('INSERT INTO following(user_id, following_id) '
'VALUES (:follower, :user_followed) '
'ON DUPLICATE KEY UPDATE user_id=user_id',
autocommit=True)
with engine.connect() as con:
con.execute(sql,
follower=follower,
user_followed=user_followed)
| DropMuse/DropMuse | app/db_utils.py | Python | mit | 18,390 |
from typing import Callable, Iterable, TYPE_CHECKING, overload, Any, cast, Mapping, Optional
import gi
from blueman.main.indicators.IndicatorInterface import IndicatorInterface
gi.require_version("Gtk", "3.0")
from gi.repository import Gtk
from blueman.Functions import create_menuitem
if TYPE_CHECKING:
from typing_extensions import Protocol
from blueman.plugins.applet.Menu import MenuItemDict, SubmenuItemDict
class MenuItemActivator(Protocol):
@overload
def __call__(self, idx: int) -> None:
...
@overload
def __call__(self, idx: int, subid: int) -> None:
...
@overload
def build_menu(items: Iterable["MenuItemDict"], activate: "MenuItemActivator") -> Gtk.Menu:
...
@overload
def build_menu(items: Iterable["SubmenuItemDict"], activate: Callable[[int], None]) -> Gtk.Menu:
...
def build_menu(items: Iterable[Mapping[str, Any]], activate: Callable[..., None]) -> Gtk.Menu:
menu = Gtk.Menu()
for index, item in enumerate(items):
if 'text' in item and 'icon_name' in item:
gtk_item: Gtk.MenuItem = create_menuitem(item['text'], item['icon_name'])
label = gtk_item.get_child()
assert isinstance(label, Gtk.Label)
if item['markup']:
label.set_markup_with_mnemonic(item['text'])
else:
label.set_text_with_mnemonic(item['text'])
gtk_item.connect('activate', cast(Callable[[Gtk.MenuItem], None], lambda _, idx=index: activate(idx)))
if 'submenu' in item:
gtk_item.set_submenu(build_menu(item['submenu'], cast(Callable[[int], None],
lambda subid, idx=index: activate(idx, subid))))
if 'tooltip' in item:
gtk_item.props.tooltip_text = item['tooltip']
gtk_item.props.sensitive = item['sensitive']
else:
gtk_item = Gtk.SeparatorMenuItem()
gtk_item.show()
menu.append(gtk_item)
return menu
class GtkStatusIcon(IndicatorInterface):
def __init__(self, icon_name: str, on_activate_menu_item: "MenuItemActivator",
on_activate_status_icon: Callable[[], None]) -> None:
self._on_activate = on_activate_menu_item
self.indicator = Gtk.StatusIcon(icon_name=icon_name)
self.indicator.set_title('blueman')
self.indicator.connect('popup-menu', self.on_popup_menu)
self.indicator.connect('activate', lambda _status_icon: on_activate_status_icon())
self._tooltip_title = ""
self._tooltip_text = ""
self._menu: Optional[Gtk.Menu] = None
def on_popup_menu(self, _status_icon: Gtk.StatusIcon, _button: int, _activate_time: int) -> None:
if self._menu:
self._menu.popup_at_pointer(None)
def set_icon(self, icon_name: str) -> None:
self.indicator.props.icon_name = icon_name
def set_tooltip_title(self, title: str) -> None:
self._tooltip_title = title
self._update_tooltip()
def set_tooltip_text(self, text: str) -> None:
self._tooltip_text = text
self._update_tooltip()
def _update_tooltip(self) -> None:
text = self._tooltip_title
if self._tooltip_text:
text += "\n" + self._tooltip_text
self.indicator.props.tooltip_markup = text
def set_visibility(self, visible: bool) -> None:
self.indicator.props.visible = visible
def set_menu(self, menu: Iterable["MenuItemDict"]) -> None:
self._menu = build_menu(menu, self._on_activate)
| blueman-project/blueman | blueman/main/indicators/GtkStatusIcon.py | Python | gpl-3.0 | 3,623 |
# -*- encoding: utf-8 -*-
from supriya.tools import osctools
from supriya.tools.requesttools.Request import Request
class StatusRequest(Request):
r'''A /status request.
::
>>> from supriya.tools import requesttools
>>> request = requesttools.StatusRequest()
>>> request
StatusRequest()
::
>>> message = request.to_osc_message()
>>> message
OscMessage(2)
::
>>> message.address == requesttools.RequestId.STATUS
True
'''
### CLASS VARIABLES ###
__slots__ = ()
### INITIALIZER ###
def __init__(
self,
):
Request.__init__(self)
### PUBLIC METHODS ###
def to_osc_message(self):
request_id = int(self.request_id)
message = osctools.OscMessage(
request_id,
)
return message
### PUBLIC PROPERTIES ###
@property
def response_specification(self):
return None
@property
def request_id(self):
from supriya.tools import requesttools
return requesttools.RequestId.STATUS | andrewyoung1991/supriya | supriya/tools/requesttools/StatusRequest.py | Python | mit | 1,106 |
__author__ = 'alisonbnt'
from flask import g, jsonify
from flask_restful import Resource
from app import db
from conf.auth import auth
from app.resources import parser
from app.models.UserModel import User
class UsersResource(Resource):
@staticmethod
@auth.login_required
def get():
return jsonify({
'id': g.user.id,
'name': g.user.name,
'email': g.user.email,
'username': g.user.username
})
@staticmethod
@auth.login_required
def post():
parser.add_argument('email', type=str)
parser.add_argument('password', type=str)
parser.add_argument('name', type=str)
parser.add_argument('username', type=str)
args = parser.parse_args()
if args['username'] is not None :
g.user.username = args['username']
if args['password'] is not None :
g.user.password = args['password']
if args['name'] is not None :
g.user.name = args['name']
if args['email'] is not None:
g.user.email = args['email']
db.session.add(g.user)
db.session.commit()
return jsonify({'operation_status': 'SUCCESS'})
@staticmethod
def put():
# parser.add_argument('uid', type=int)
parser.add_argument('email', type=str)
parser.add_argument('password', type=str)
parser.add_argument('name', type=str)
parser.add_argument('username', type=str)
args = parser.parse_args()
if args['username'] is None or args['password'] is None or args['name'] is None or args['email'] is None:
return jsonify({'required_fields': ['username', 'password', 'name', 'email']})
teste_cliente = User(args['name'], args['email'], args['username'])
teste_cliente.hash_password(args['password'])
db.session.add(teste_cliente)
db.session.commit()
return {'user': teste_cliente.id}
@staticmethod
@auth.login_required
def delete():
parser.add_argument('username', type=str)
args = parser.parse_args()
print(args)
if args['username'] == g.user.username:
db.session.delete(g.user)
db.session.commit()
else:
return jsonify({'operation_status': 'FAILURE', 'reason': 'Confirmation failure'})
return jsonify({'operation_status': 'SUCCESS'}) | processos-2015-1/api | app/resources/user_resource.py | Python | mit | 2,417 |
# -*- coding: utf-8 -*-
"""
This is the common settings file, intended to set sane defaults. If you have a
piece of configuration that's dependent on a set of feature flags being set,
then create a function that returns the calculated value based on the value of
FEATURES[...]. Modules that extend this one can change the feature
configuration in an environment specific config file and re-calculate those
values.
We should make a method that calls all these config methods so that you just
make one call at the end of your site-specific dev file to reset all the
dependent variables (like INSTALLED_APPS) for you.
Longer TODO:
1. Right now our treatment of static content in general and in particular
course-specific static content is haphazard.
2. We should have a more disciplined approach to feature flagging, even if it
just means that we stick them in a dict called FEATURES.
3. We need to handle configuration for multiple courses. This could be as
multiple sites, but we do need a way to map their data assets.
"""
# We intentionally define lots of variables that aren't used, and
# want to import all variables from base settings files
# pylint: disable=wildcard-import, unused-import, unused-wildcard-import
# Pylint gets confused by path.py instances, which report themselves as class
# objects. As a result, pylint applies the wrong regex in validating names,
# and throws spurious errors. Therefore, we disable invalid-name checking.
# pylint: disable=invalid-name
import imp
import sys
import os
from path import Path as path
from warnings import simplefilter
from django.utils.translation import ugettext_lazy as _
from .discussionsettings import *
from xmodule.modulestore.modulestore_settings import update_module_store_settings
from xmodule.modulestore.edit_info import EditInfoMixin
from openedx.core.lib.license import LicenseMixin
from lms.djangoapps.lms_xblock.mixin import LmsBlockMixin
################################### FEATURES ###################################
# The display name of the platform to be used in templates/emails/etc.
PLATFORM_NAME = "Your Platform Name Here"
CC_MERCHANT_NAME = PLATFORM_NAME
PLATFORM_FACEBOOK_ACCOUNT = "http://www.facebook.com/YourPlatformFacebookAccount"
PLATFORM_TWITTER_ACCOUNT = "@YourPlatformTwitterAccount"
ENABLE_JASMINE = False
DISCUSSION_SETTINGS = {
'MAX_COMMENT_DEPTH': 2,
}
LMS_ROOT_URL = "http://localhost:8000"
# Features
FEATURES = {
'DISPLAY_DEBUG_INFO_TO_STAFF': True,
'DISPLAY_HISTOGRAMS_TO_STAFF': False, # For large courses this slows down courseware access for staff.
'REROUTE_ACTIVATION_EMAIL': False, # nonempty string = address for all activation emails
'DEBUG_LEVEL': 0, # 0 = lowest level, least verbose, 255 = max level, most verbose
## DO NOT SET TO True IN THIS FILE
## Doing so will cause all courses to be released on production
'DISABLE_START_DATES': False, # When True, all courses will be active, regardless of start date
# for consistency in user-experience, keep the value of the following 3 settings
# in sync with the corresponding ones in cms/envs/common.py
'ENABLE_DISCUSSION_SERVICE': True,
'ENABLE_TEXTBOOK': True,
'ENABLE_STUDENT_NOTES': True, # enables the student notes API and UI.
# discussion home panel, which includes a subscription on/off setting for discussion digest emails.
# this should remain off in production until digest notifications are online.
'ENABLE_DISCUSSION_HOME_PANEL': False,
# Set this to True if you want the discussion digest emails enabled automatically for new users.
# This will be set on all new account registrations.
# It is not recommended to enable this feature if ENABLE_DISCUSSION_HOME_PANEL is not enabled, since
# subscribers who receive digests in that case will only be able to unsubscribe via links embedded
# in their emails, and they will have no way to resubscribe.
'ENABLE_DISCUSSION_EMAIL_DIGEST': False,
'ENABLE_DJANGO_ADMIN_SITE': True, # set true to enable django's admin site, even on prod (e.g. for course ops)
'ENABLE_SQL_TRACKING_LOGS': False,
'ENABLE_LMS_MIGRATION': False,
'ENABLE_MANUAL_GIT_RELOAD': False,
'ENABLE_MASQUERADE': True, # allow course staff to change to student view of courseware
'ENABLE_SYSADMIN_DASHBOARD': False, # sysadmin dashboard, to see what courses are loaded, to delete & load courses
'DISABLE_LOGIN_BUTTON': False, # used in systems where login is automatic, eg MIT SSL
# extrernal access methods
'AUTH_USE_OPENID': False,
'AUTH_USE_CERTIFICATES': False,
'AUTH_USE_OPENID_PROVIDER': False,
# Even though external_auth is in common, shib assumes the LMS views / urls, so it should only be enabled
# in LMS
'AUTH_USE_SHIB': False,
'AUTH_USE_CAS': False,
# This flag disables the requirement of having to agree to the TOS for users registering
# with Shib. Feature was requested by Stanford's office of general counsel
'SHIB_DISABLE_TOS': False,
# Toggles OAuth2 authentication provider
'ENABLE_OAUTH2_PROVIDER': False,
# Allows to enable an API endpoint to serve XBlock view, used for example by external applications.
# See jquey-xblock: https://github.com/edx-solutions/jquery-xblock
'ENABLE_XBLOCK_VIEW_ENDPOINT': False,
# Allows to configure the LMS to provide CORS headers to serve requests from other domains
'ENABLE_CORS_HEADERS': False,
# Can be turned off if course lists need to be hidden. Effects views and templates.
'COURSES_ARE_BROWSABLE': True,
# Enables ability to restrict enrollment in specific courses by the user account login method
'RESTRICT_ENROLL_BY_REG_METHOD': False,
# enable analytics server.
# WARNING: THIS SHOULD ALWAYS BE SET TO FALSE UNDER NORMAL
# LMS OPERATION. See analytics.py for details about what
# this does.
'RUN_AS_ANALYTICS_SERVER_ENABLED': False,
# Flip to True when the YouTube iframe API breaks (again)
'USE_YOUTUBE_OBJECT_API': False,
# Give a UI to show a student's submission history in a problem by the
# Staff Debug tool.
'ENABLE_STUDENT_HISTORY_VIEW': True,
# Provide a UI to allow users to submit feedback from the LMS (left-hand help modal)
'ENABLE_FEEDBACK_SUBMISSION': False,
# Turn on a page that lets staff enter Python code to be run in the
# sandbox, for testing whether it's enabled properly.
'ENABLE_DEBUG_RUN_PYTHON': False,
# Enable URL that shows information about the status of variuous services
'ENABLE_SERVICE_STATUS': False,
# Don't autoplay videos for students
'AUTOPLAY_VIDEOS': False,
# Enable instructor dash to submit background tasks
'ENABLE_INSTRUCTOR_BACKGROUND_TASKS': True,
# Enable instructor to assign individual due dates
# Note: In order for this feature to work, you must also add
# 'courseware.student_field_overrides.IndividualStudentOverrideProvider' to
# the setting FIELD_OVERRIDE_PROVIDERS, in addition to setting this flag to
# True.
'INDIVIDUAL_DUE_DATES': False,
# Enable Custom Courses for EdX
'CUSTOM_COURSES_EDX': False,
# Toggle to enable certificates of courses on dashboard
'ENABLE_VERIFIED_CERTIFICATES': False,
# for load testing
'AUTOMATIC_AUTH_FOR_TESTING': False,
# Toggle the availability of the shopping cart page
'ENABLE_SHOPPING_CART': False,
# Toggle storing detailed billing information
'STORE_BILLING_INFO': False,
# Enable flow for payments for course registration (DIFFERENT from verified student flow)
'ENABLE_PAID_COURSE_REGISTRATION': False,
# Enable the display of cosmetic course price display (set in course advanced settings)
'ENABLE_COSMETIC_DISPLAY_PRICE': False,
# Automatically approve student identity verification attempts
'AUTOMATIC_VERIFY_STUDENT_IDENTITY_FOR_TESTING': False,
# Disable instructor dash buttons for downloading course data
# when enrollment exceeds this number
'MAX_ENROLLMENT_INSTR_BUTTONS': 200,
# Grade calculation started from the instructor dashboard will write grades
# CSV files to the configured storage backend and give links for downloads.
'ENABLE_GRADE_DOWNLOADS': False,
# whether to use password policy enforcement or not
'ENFORCE_PASSWORD_POLICY': True,
# Give course staff unrestricted access to grade downloads (if set to False,
# only edX superusers can perform the downloads)
'ALLOW_COURSE_STAFF_GRADE_DOWNLOADS': False,
'ENABLED_PAYMENT_REPORTS': [
"refund_report",
"itemized_purchase_report",
"university_revenue_share",
"certificate_status"
],
# Turn off account locking if failed login attempts exceeds a limit
'ENABLE_MAX_FAILED_LOGIN_ATTEMPTS': True,
# Hide any Personally Identifiable Information from application logs
'SQUELCH_PII_IN_LOGS': True,
# Toggles the embargo functionality, which blocks users from
# the site or courses based on their location.
'EMBARGO': False,
# Whether the Wiki subsystem should be accessible via the direct /wiki/ paths. Setting this to True means
# that people can submit content and modify the Wiki in any arbitrary manner. We're leaving this as True in the
# defaults, so that we maintain current behavior
'ALLOW_WIKI_ROOT_ACCESS': True,
# Turn on/off Microsites feature
'USE_MICROSITES': False,
# Turn on third-party auth. Disabled for now because full implementations are not yet available. Remember to syncdb
# if you enable this; we don't create tables by default.
'ENABLE_THIRD_PARTY_AUTH': False,
# Toggle to enable alternate urls for marketing links
'ENABLE_MKTG_SITE': False,
# Prevent concurrent logins per user
'PREVENT_CONCURRENT_LOGINS': True,
# Turn on Advanced Security by default
'ADVANCED_SECURITY': True,
# When a logged in user goes to the homepage ('/') should the user be
# redirected to the dashboard - this is default Open edX behavior. Set to
# False to not redirect the user
'ALWAYS_REDIRECT_HOMEPAGE_TO_DASHBOARD_FOR_AUTHENTICATED_USER': True,
# When a user goes to the homepage ('/') the user sees the
# courses listed in the announcement dates order - this is default Open edX behavior.
# Set to True to change the course sorting behavior by their start dates, latest first.
'ENABLE_COURSE_SORTING_BY_START_DATE': True,
# Expose Mobile REST API. Note that if you use this, you must also set
# ENABLE_OAUTH2_PROVIDER to True
'ENABLE_MOBILE_REST_API': False,
# Enable the combined login/registration form
'ENABLE_COMBINED_LOGIN_REGISTRATION': False,
'ENABLE_COMBINED_LOGIN_REGISTRATION_FOOTER': False,
# Enable organizational email opt-in
'ENABLE_MKTG_EMAIL_OPT_IN': False,
# Show a section in the membership tab of the instructor dashboard
# to allow an upload of a CSV file that contains a list of new accounts to create
# and register for course.
'ALLOW_AUTOMATED_SIGNUPS': False,
# Enable display of enrollment counts in instructor dash, analytics section
'DISPLAY_ANALYTICS_ENROLLMENTS': True,
# Show the mobile app links in the footer
'ENABLE_FOOTER_MOBILE_APP_LINKS': False,
# Let students save and manage their annotations
'ENABLE_EDXNOTES': False,
# Milestones application flag
'MILESTONES_APP': False,
# Organizations application flag
'ORGANIZATIONS_APP': False,
# Prerequisite courses feature flag
'ENABLE_PREREQUISITE_COURSES': False,
# For easily adding modes to courses during acceptance testing
'MODE_CREATION_FOR_TESTING': False,
# For caching programs in contexts where the LMS can only
# be reached over HTTP.
'EXPOSE_CACHE_PROGRAMS_ENDPOINT': False,
# Courseware search feature
'ENABLE_COURSEWARE_SEARCH': False,
# Dashboard search feature
'ENABLE_DASHBOARD_SEARCH': False,
# log all information from cybersource callbacks
'LOG_POSTPAY_CALLBACKS': True,
# enable beacons for video timing statistics
'ENABLE_VIDEO_BEACON': False,
# enable beacons for lms onload event statistics
'ENABLE_ONLOAD_BEACON': False,
# Toggle platform-wide course licensing
'LICENSING': False,
# Certificates Web/HTML Views
'CERTIFICATES_HTML_VIEW': False,
# Batch-Generated Certificates from Instructor Dashboard
'CERTIFICATES_INSTRUCTOR_GENERATION': False,
# Course discovery feature
'ENABLE_COURSE_DISCOVERY': False,
# Setting for overriding default filtering facets for Course discovery
# COURSE_DISCOVERY_FILTERS = ["org", "language", "modes"]
# Software secure fake page feature flag
'ENABLE_SOFTWARE_SECURE_FAKE': False,
# Teams feature
'ENABLE_TEAMS': True,
# Show video bumper in LMS
'ENABLE_VIDEO_BUMPER': False,
# How many seconds to show the bumper again, default is 7 days:
'SHOW_BUMPER_PERIODICITY': 7 * 24 * 3600,
# Special Exams, aka Timed and Proctored Exams
'ENABLE_SPECIAL_EXAMS': False,
# Enable OpenBadge support. See the BADGR_* settings later in this file.
'ENABLE_OPENBADGES': False,
# Enable LTI Provider feature.
'ENABLE_LTI_PROVIDER': False,
# Show the language selector in the header
'SHOW_HEADER_LANGUAGE_SELECTOR': False,
# Show the language selector in the footer
'SHOW_FOOTER_LANGUAGE_SELECTOR': False,
# Write new CSM history to the extended table.
# This will eventually default to True and may be
# removed since all installs should have the separate
# extended history table.
'ENABLE_CSMH_EXTENDED': False,
# Read from both the CSMH and CSMHE history tables.
# This is the default, but can be disabled if all history
# lives in the Extended table, saving the frontend from
# making multiple queries.
'ENABLE_READING_FROM_MULTIPLE_HISTORY_TABLES': True,
# Display the 'Analytics' tab in the instructor dashboard for CCX courses.
# Note: This has no effect unless ANALYTICS_DASHBOARD_URL is already set,
# because without that setting, the tab does not show up for any courses.
'ENABLE_CCX_ANALYTICS_DASHBOARD_URL': False,
# Set this to False to facilitate cleaning up invalid xml from your modulestore.
'ENABLE_XBLOCK_XML_VALIDATION': True,
# Allow public account creation
'ALLOW_PUBLIC_ACCOUNT_CREATION': True,
# Enable footer banner for cookie consent.
# See https://cookieconsent.insites.com/ for more.
'ENABLE_COOKIE_CONSENT': False,
# Whether or not the dynamic EnrollmentTrackUserPartition should be registered.
'ENABLE_ENROLLMENT_TRACK_USER_PARTITION': True,
# Enable one click program purchase
# See LEARNER-493
'ENABLE_ONE_CLICK_PROGRAM_PURCHASE': False,
# Whether to display account activation notification on dashboard.
'DISPLAY_ACCOUNT_ACTIVATION_MESSAGE_ON_SIDEBAR': False,
# Allow users to change their email address.
'ALLOW_EMAIL_ADDRESS_CHANGE': True,
# Whether to check the "Notify users by email" checkbox in the batch enrollment form
# in the instructor dashboard.
'BATCH_ENROLLMENT_NOTIFY_USERS_DEFAULT': True,
}
# Settings for the course reviews tool template and identification key, set either to None to disable course reviews
COURSE_REVIEWS_TOOL_PROVIDER_FRAGMENT_NAME = 'coursetalk-reviews-fragment.html'
COURSE_REVIEWS_TOOL_PROVIDER_PLATFORM_KEY = 'edx'
# Ignore static asset files on import which match this pattern
ASSET_IGNORE_REGEX = r"(^\._.*$)|(^\.DS_Store$)|(^.*~$)"
# Used for A/B testing
DEFAULT_GROUPS = []
# If this is true, random scores will be generated for the purpose of debugging the profile graphs
GENERATE_PROFILE_SCORES = False
# Used with XQueue
XQUEUE_WAITTIME_BETWEEN_REQUESTS = 5 # seconds
# Used with Email sending
RETRY_ACTIVATION_EMAIL_MAX_ATTEMPTS = 5
RETRY_ACTIVATION_EMAIL_TIMEOUT = 0.5
############################# SET PATH INFORMATION #############################
PROJECT_ROOT = path(__file__).abspath().dirname().dirname() # /edx-platform/lms
REPO_ROOT = PROJECT_ROOT.dirname()
COMMON_ROOT = REPO_ROOT / "common"
OPENEDX_ROOT = REPO_ROOT / "openedx"
ENV_ROOT = REPO_ROOT.dirname() # virtualenv dir /edx-platform is in
COURSES_ROOT = ENV_ROOT / "data"
NODE_MODULES_ROOT = REPO_ROOT / "node_modules"
DATA_DIR = COURSES_ROOT
# TODO: Remove the rest of the sys.path modification here and in cms/envs/common.py
sys.path.append(REPO_ROOT)
sys.path.append(PROJECT_ROOT / 'djangoapps')
sys.path.append(COMMON_ROOT / 'djangoapps')
# For Node.js
system_node_path = os.environ.get("NODE_PATH", NODE_MODULES_ROOT)
node_paths = [
COMMON_ROOT / "static/js/vendor",
COMMON_ROOT / "static/coffee/src",
system_node_path,
]
NODE_PATH = ':'.join(node_paths)
# For geolocation ip database
GEOIP_PATH = REPO_ROOT / "common/static/data/geoip/GeoIP.dat"
GEOIPV6_PATH = REPO_ROOT / "common/static/data/geoip/GeoIPv6.dat"
# Where to look for a status message
STATUS_MESSAGE_PATH = ENV_ROOT / "status_message.json"
############################ Global Database Configuration #####################
DATABASE_ROUTERS = [
'openedx.core.lib.django_courseware_routers.StudentModuleHistoryExtendedRouter',
]
############################ OpenID Provider ##################################
OPENID_PROVIDER_TRUSTED_ROOTS = ['cs50.net', '*.cs50.net']
############################ OAUTH2 Provider ###################################
# OpenID Connect issuer ID. Normally the URL of the authentication endpoint.
OAUTH_OIDC_ISSUER = 'https:/example.com/oauth2'
# OpenID Connect claim handlers
OAUTH_OIDC_ID_TOKEN_HANDLERS = (
'edx_oauth2_provider.oidc.handlers.BasicIDTokenHandler',
'edx_oauth2_provider.oidc.handlers.ProfileHandler',
'edx_oauth2_provider.oidc.handlers.EmailHandler',
'oauth2_handler.IDTokenHandler'
)
OAUTH_OIDC_USERINFO_HANDLERS = (
'edx_oauth2_provider.oidc.handlers.BasicUserInfoHandler',
'edx_oauth2_provider.oidc.handlers.ProfileHandler',
'edx_oauth2_provider.oidc.handlers.EmailHandler',
'oauth2_handler.UserInfoHandler'
)
OAUTH_EXPIRE_CONFIDENTIAL_CLIENT_DAYS = 365
OAUTH_EXPIRE_PUBLIC_CLIENT_DAYS = 30
################################## DJANGO OAUTH TOOLKIT #######################################
OAUTH2_PROVIDER = {
'OAUTH2_VALIDATOR_CLASS': 'openedx.core.djangoapps.oauth_dispatch.dot_overrides.EdxOAuth2Validator',
'SCOPES': {
'read': 'Read scope',
'write': 'Write scope',
'email': 'Email scope',
# conform profile scope message that is presented to end-user
# to lms/templates/provider/authorize.html. This may be revised later.
'profile': 'Read your user profile',
},
}
# This is required for the migrations in oauth_dispatch.models
# otherwise it fails saying this attribute is not present in Settings
OAUTH2_PROVIDER_APPLICATION_MODEL = 'oauth2_provider.Application'
################################## TEMPLATE CONFIGURATION #####################################
# Mako templating
# TODO: Move the Mako templating into a different engine in TEMPLATES below.
import tempfile
MAKO_MODULE_DIR = os.path.join(tempfile.gettempdir(), 'mako_lms')
MAKO_TEMPLATES = {}
MAKO_TEMPLATES['main'] = [
PROJECT_ROOT / 'templates',
COMMON_ROOT / 'templates',
COMMON_ROOT / 'lib' / 'capa' / 'capa' / 'templates',
COMMON_ROOT / 'djangoapps' / 'pipeline_mako' / 'templates',
OPENEDX_ROOT / 'core' / 'djangoapps' / 'cors_csrf' / 'templates',
OPENEDX_ROOT / 'core' / 'djangoapps' / 'dark_lang' / 'templates',
OPENEDX_ROOT / 'core' / 'lib' / 'license' / 'templates',
]
# Django templating
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
# Don't look for template source files inside installed applications.
'APP_DIRS': False,
# Instead, look for template source files in these dirs.
'DIRS': [
PROJECT_ROOT / "templates",
COMMON_ROOT / 'templates',
COMMON_ROOT / 'lib' / 'capa' / 'capa' / 'templates',
COMMON_ROOT / 'djangoapps' / 'pipeline_mako' / 'templates',
COMMON_ROOT / 'static', # required to statically include common Underscore templates
],
# Options specific to this backend.
'OPTIONS': {
'loaders': [
# We have to use mako-aware template loaders to be able to include
# mako templates inside django templates (such as main_django.html).
'openedx.core.djangoapps.theming.template_loaders.ThemeTemplateLoader',
'edxmako.makoloader.MakoFilesystemLoader',
'edxmako.makoloader.MakoAppDirectoriesLoader',
],
'context_processors': [
'django.template.context_processors.request',
'django.template.context_processors.static',
'django.contrib.messages.context_processors.messages',
'django.template.context_processors.i18n',
'django.contrib.auth.context_processors.auth', # this is required for admin
'django.template.context_processors.csrf',
# Added for django-wiki
'django.template.context_processors.media',
'django.template.context_processors.tz',
'django.contrib.messages.context_processors.messages',
'sekizai.context_processors.sekizai',
# Hack to get required link URLs to password reset templates
'edxmako.shortcuts.marketing_link_context_processor',
# Shoppingcart processor (detects if request.user has a cart)
'shoppingcart.context_processor.user_has_cart_context_processor',
# Timezone processor (sends language and time_zone preference)
'courseware.context_processor.user_timezone_locale_prefs',
# Allows the open edX footer to be leveraged in Django Templates.
'edxmako.shortcuts.footer_context_processor',
# Online contextual help
'help_tokens.context_processor',
'openedx.core.djangoapps.site_configuration.context_processors.configuration_context'
],
# Change 'debug' in your environment settings files - not here.
'debug': False
}
}
]
DEFAULT_TEMPLATE_ENGINE = TEMPLATES[0]
# The template used to render a web fragment as a standalone page
STANDALONE_FRAGMENT_VIEW_TEMPLATE = 'fragment-view-chromeless.html'
###############################################################################################
# use the ratelimit backend to prevent brute force attacks
AUTHENTICATION_BACKENDS = (
'ratelimitbackend.backends.RateLimitModelBackend',
)
STUDENT_FILEUPLOAD_MAX_SIZE = 4 * 1000 * 1000 # 4 MB
MAX_FILEUPLOADS_PER_INPUT = 20
# Dev machines shouldn't need the book
# BOOK_URL = '/static/book/'
BOOK_URL = 'https://mitxstatic.s3.amazonaws.com/book_images/' # For AWS deploys
RSS_TIMEOUT = 600
# Configuration option for when we want to grab server error pages
STATIC_GRAB = False
DEV_CONTENT = True
EDX_ROOT_URL = ''
LOGIN_REDIRECT_URL = EDX_ROOT_URL + '/login'
LOGIN_URL = EDX_ROOT_URL + '/login'
COURSE_NAME = "6.002_Spring_2012"
COURSE_NUMBER = "6.002x"
COURSE_TITLE = "Circuits and Electronics"
### Dark code. Should be enabled in local settings for devel.
ENABLE_MULTICOURSE = False # set to False to disable multicourse display (see lib.util.views.edXhome)
WIKI_ENABLED = False
###
COURSE_DEFAULT = '6.002x_Fall_2012'
COURSE_SETTINGS = {
'6.002x_Fall_2012': {
'number': '6.002x',
'title': 'Circuits and Electronics',
'xmlpath': '6002x/',
'location': 'i4x://edx/6002xs12/course/6.002x_Fall_2012',
}
}
COURSE_MODE_DEFAULTS = {
'bulk_sku': None,
'currency': 'usd',
'description': None,
'expiration_datetime': None,
'min_price': 0,
'name': _('Audit'),
'sku': None,
'slug': 'audit',
'suggested_prices': '',
}
# IP addresses that are allowed to reload the course, etc.
# TODO (vshnayder): Will probably need to change as we get real access control in.
LMS_MIGRATION_ALLOWED_IPS = []
# These are standard regexes for pulling out info like course_ids, usage_ids, etc.
# They are used so that URLs with deprecated-format strings still work.
# Note: these intentionally greedily grab all chars up to the next slash including any pluses
# DHM: I really wanted to ensure the separators were the same (+ or /) but all patts I tried had
# too many inadvertent side effects :-(
COURSE_KEY_PATTERN = r'(?P<course_key_string>[^/+]+(/|\+)[^/+]+(/|\+)[^/?]+)'
COURSE_ID_PATTERN = COURSE_KEY_PATTERN.replace('course_key_string', 'course_id')
COURSE_KEY_REGEX = COURSE_KEY_PATTERN.replace('P<course_key_string>', ':')
USAGE_KEY_PATTERN = r'(?P<usage_key_string>(?:i4x://?[^/]+/[^/]+/[^/]+/[^@]+(?:@[^/]+)?)|(?:[^/]+))'
ASSET_KEY_PATTERN = r'(?P<asset_key_string>(?:/?c4x(:/)?/[^/]+/[^/]+/[^/]+/[^@]+(?:@[^/]+)?)|(?:[^/]+))'
USAGE_ID_PATTERN = r'(?P<usage_id>(?:i4x://?[^/]+/[^/]+/[^/]+/[^@]+(?:@[^/]+)?)|(?:[^/]+))'
# The space is required for space-dependent languages like Arabic and Farsi.
# However, backward compatibility with Ficus older releases is still maintained (space is still not valid)
# in the AccountCreationForm and the user_api through the ENABLE_UNICODE_USERNAME feature flag.
USERNAME_REGEX_PARTIAL = r'[\w .@_+-]+'
USERNAME_PATTERN = r'(?P<username>{regex})'.format(regex=USERNAME_REGEX_PARTIAL)
############################## EVENT TRACKING #################################
LMS_SEGMENT_KEY = None
# FIXME: Should we be doing this truncation?
TRACK_MAX_EVENT = 50000
DEBUG_TRACK_LOG = False
TRACKING_BACKENDS = {
'logger': {
'ENGINE': 'track.backends.logger.LoggerBackend',
'OPTIONS': {
'name': 'tracking'
}
}
}
# We're already logging events, and we don't want to capture user
# names/passwords. Heartbeat events are likely not interesting.
TRACKING_IGNORE_URL_PATTERNS = [r'^/event', r'^/login', r'^/heartbeat', r'^/segmentio/event', r'^/performance']
EVENT_TRACKING_ENABLED = True
EVENT_TRACKING_BACKENDS = {
'tracking_logs': {
'ENGINE': 'eventtracking.backends.routing.RoutingBackend',
'OPTIONS': {
'backends': {
'logger': {
'ENGINE': 'eventtracking.backends.logger.LoggerBackend',
'OPTIONS': {
'name': 'tracking',
'max_event_size': TRACK_MAX_EVENT,
}
}
},
'processors': [
{'ENGINE': 'track.shim.LegacyFieldMappingProcessor'},
{'ENGINE': 'track.shim.PrefixedEventProcessor'}
]
}
},
'segmentio': {
'ENGINE': 'eventtracking.backends.routing.RoutingBackend',
'OPTIONS': {
'backends': {
'segment': {'ENGINE': 'eventtracking.backends.segment.SegmentBackend'}
},
'processors': [
{
'ENGINE': 'eventtracking.processors.whitelist.NameWhitelistProcessor',
'OPTIONS': {
'whitelist': []
}
},
{
'ENGINE': 'track.shim.GoogleAnalyticsProcessor'
}
]
}
}
}
EVENT_TRACKING_PROCESSORS = []
# Backwards compatibility with ENABLE_SQL_TRACKING_LOGS feature flag.
# In the future, adding the backend to TRACKING_BACKENDS should be enough.
if FEATURES.get('ENABLE_SQL_TRACKING_LOGS'):
TRACKING_BACKENDS.update({
'sql': {
'ENGINE': 'track.backends.django.DjangoBackend'
}
})
EVENT_TRACKING_BACKENDS.update({
'sql': {
'ENGINE': 'track.backends.django.DjangoBackend'
}
})
TRACKING_SEGMENTIO_WEBHOOK_SECRET = None
TRACKING_SEGMENTIO_ALLOWED_TYPES = ['track']
TRACKING_SEGMENTIO_DISALLOWED_SUBSTRING_NAMES = ['.bi.']
TRACKING_SEGMENTIO_SOURCE_MAP = {
'analytics-android': 'mobile',
'analytics-ios': 'mobile',
}
######################## GOOGLE ANALYTICS ###########################
GOOGLE_ANALYTICS_ACCOUNT = None
GOOGLE_ANALYTICS_LINKEDIN = 'GOOGLE_ANALYTICS_LINKEDIN_DUMMY'
######################## OPTIMIZELY ###########################
OPTIMIZELY_PROJECT_ID = None
######################## subdomain specific settings ###########################
COURSE_LISTINGS = {}
VIRTUAL_UNIVERSITIES = []
############# XBlock Configuration ##########
# Import after sys.path fixup
from xmodule.modulestore.inheritance import InheritanceMixin
from xmodule.modulestore import prefer_xmodules
from xmodule.x_module import XModuleMixin
# These are the Mixins that should be added to every XBlock.
# This should be moved into an XBlock Runtime/Application object
# once the responsibility of XBlock creation is moved out of modulestore - cpennington
XBLOCK_MIXINS = (LmsBlockMixin, InheritanceMixin, XModuleMixin, EditInfoMixin)
# Allow any XBlock in the LMS
XBLOCK_SELECT_FUNCTION = prefer_xmodules
# Paths to wrapper methods which should be applied to every XBlock's FieldData.
XBLOCK_FIELD_DATA_WRAPPERS = ()
############# ModuleStore Configuration ##########
MODULESTORE_BRANCH = 'published-only'
CONTENTSTORE = None
DOC_STORE_CONFIG = {
'host': 'localhost',
'db': 'xmodule',
'collection': 'modulestore',
# If 'asset_collection' defined, it'll be used
# as the collection name for asset metadata.
# Otherwise, a default collection name will be used.
}
MODULESTORE = {
'default': {
'ENGINE': 'xmodule.modulestore.mixed.MixedModuleStore',
'OPTIONS': {
'mappings': {},
'stores': [
{
'NAME': 'split',
'ENGINE': 'xmodule.modulestore.split_mongo.split_draft.DraftVersioningModuleStore',
'DOC_STORE_CONFIG': DOC_STORE_CONFIG,
'OPTIONS': {
'default_class': 'xmodule.hidden_module.HiddenDescriptor',
'fs_root': DATA_DIR,
'render_template': 'edxmako.shortcuts.render_to_string',
}
},
{
'NAME': 'draft',
'ENGINE': 'xmodule.modulestore.mongo.DraftMongoModuleStore',
'DOC_STORE_CONFIG': DOC_STORE_CONFIG,
'OPTIONS': {
'default_class': 'xmodule.hidden_module.HiddenDescriptor',
'fs_root': DATA_DIR,
'render_template': 'edxmako.shortcuts.render_to_string',
}
}
]
}
}
}
#################### Python sandbox ############################################
CODE_JAIL = {
# Path to a sandboxed Python executable. None means don't bother.
'python_bin': None,
# User to run as in the sandbox.
'user': 'sandbox',
# Configurable limits.
'limits': {
# How many CPU seconds can jailed code use?
'CPU': 1,
},
}
# Some courses are allowed to run unsafe code. This is a list of regexes, one
# of them must match the course id for that course to run unsafe code.
#
# For example:
#
# COURSES_WITH_UNSAFE_CODE = [
# r"Harvard/XY123.1/.*"
# ]
COURSES_WITH_UNSAFE_CODE = []
############################### DJANGO BUILT-INS ###############################
# Change DEBUG in your environment settings files, not here
DEBUG = False
USE_TZ = True
SESSION_COOKIE_SECURE = False
SESSION_SAVE_EVERY_REQUEST = False
SESSION_SERIALIZER = 'django.contrib.sessions.serializers.PickleSerializer'
# CMS base
CMS_BASE = 'localhost:8001'
# Site info
SITE_NAME = "example.com"
HTTPS = 'on'
ROOT_URLCONF = 'lms.urls'
# NOTE: Please set ALLOWED_HOSTS to some sane value, as we do not allow the default '*'
# Platform Email
EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend'
DEFAULT_FROM_EMAIL = '[email protected]'
DEFAULT_FEEDBACK_EMAIL = '[email protected]'
SERVER_EMAIL = '[email protected]'
TECH_SUPPORT_EMAIL = '[email protected]'
CONTACT_EMAIL = '[email protected]'
BUGS_EMAIL = '[email protected]'
UNIVERSITY_EMAIL = '[email protected]'
PRESS_EMAIL = '[email protected]'
FINANCE_EMAIL = ''
# Platform mailing address
CONTACT_MAILING_ADDRESS = ''
# Account activation email sender address
ACTIVATION_EMAIL_FROM_ADDRESS = ''
ADMINS = ()
MANAGERS = ADMINS
# Static content
STATIC_URL = '/static/'
STATIC_ROOT = ENV_ROOT / "staticfiles"
STATICFILES_DIRS = [
COMMON_ROOT / "static",
PROJECT_ROOT / "static",
]
FAVICON_PATH = 'images/favicon.ico'
DEFAULT_COURSE_ABOUT_IMAGE_URL = 'images/pencils.jpg'
# User-uploaded content
MEDIA_ROOT = '/edx/var/edxapp/media/'
MEDIA_URL = '/media/'
# Locale/Internationalization
TIME_ZONE = 'America/New_York' # http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
LANGUAGE_CODE = 'en' # http://www.i18nguy.com/unicode/language-identifiers.html
# these languages display right to left
LANGUAGES_BIDI = ("he", "ar", "fa", "ur", "fa-ir", "rtl")
LANGUAGE_COOKIE = "openedx-language-preference"
# Sourced from http://www.localeplanet.com/icu/ and wikipedia
LANGUAGES = (
('en', u'English'),
('rtl', u'Right-to-Left Test Language'),
('eo', u'Dummy Language (Esperanto)'), # Dummy languaged used for testing
('fake2', u'Fake translations'), # Another dummy language for testing (not pushed to prod)
('am', u'አማርኛ'), # Amharic
('ar', u'العربية'), # Arabic
('az', u'azərbaycanca'), # Azerbaijani
('bg-bg', u'български (България)'), # Bulgarian (Bulgaria)
('bn-bd', u'বাংলা (বাংলাদেশ)'), # Bengali (Bangladesh)
('bn-in', u'বাংলা (ভারত)'), # Bengali (India)
('bs', u'bosanski'), # Bosnian
('ca', u'Català'), # Catalan
('ca@valencia', u'Català (València)'), # Catalan (Valencia)
('cs', u'Čeština'), # Czech
('cy', u'Cymraeg'), # Welsh
('da', u'dansk'), # Danish
('de-de', u'Deutsch (Deutschland)'), # German (Germany)
('el', u'Ελληνικά'), # Greek
('en-uk', u'English (United Kingdom)'), # English (United Kingdom)
('en@lolcat', u'LOLCAT English'), # LOLCAT English
('en@pirate', u'Pirate English'), # Pirate English
('es-419', u'Español (Latinoamérica)'), # Spanish (Latin America)
('es-ar', u'Español (Argentina)'), # Spanish (Argentina)
('es-ec', u'Español (Ecuador)'), # Spanish (Ecuador)
('es-es', u'Español (España)'), # Spanish (Spain)
('es-mx', u'Español (México)'), # Spanish (Mexico)
('es-pe', u'Español (Perú)'), # Spanish (Peru)
('et-ee', u'Eesti (Eesti)'), # Estonian (Estonia)
('eu-es', u'euskara (Espainia)'), # Basque (Spain)
('fa', u'فارسی'), # Persian
('fa-ir', u'فارسی (ایران)'), # Persian (Iran)
('fi-fi', u'Suomi (Suomi)'), # Finnish (Finland)
('fil', u'Filipino'), # Filipino
('fr', u'Français'), # French
('gl', u'Galego'), # Galician
('gu', u'ગુજરાતી'), # Gujarati
('he', u'עברית'), # Hebrew
('hi', u'हिन्दी'), # Hindi
('hr', u'hrvatski'), # Croatian
('hu', u'magyar'), # Hungarian
('hy-am', u'Հայերեն (Հայաստան)'), # Armenian (Armenia)
('id', u'Bahasa Indonesia'), # Indonesian
('it-it', u'Italiano (Italia)'), # Italian (Italy)
('ja-jp', u'日本語 (日本)'), # Japanese (Japan)
('kk-kz', u'қазақ тілі (Қазақстан)'), # Kazakh (Kazakhstan)
('km-kh', u'ភាសាខ្មែរ (កម្ពុជា)'), # Khmer (Cambodia)
('kn', u'ಕನ್ನಡ'), # Kannada
('ko-kr', u'한국어 (대한민국)'), # Korean (Korea)
('lt-lt', u'Lietuvių (Lietuva)'), # Lithuanian (Lithuania)
('ml', u'മലയാളം'), # Malayalam
('mn', u'Монгол хэл'), # Mongolian
('mr', u'मराठी'), # Marathi
('ms', u'Bahasa Melayu'), # Malay
('nb', u'Norsk bokmål'), # Norwegian Bokmål
('ne', u'नेपाली'), # Nepali
('nl-nl', u'Nederlands (Nederland)'), # Dutch (Netherlands)
('or', u'ଓଡ଼ିଆ'), # Oriya
('pl', u'Polski'), # Polish
('pt-br', u'Português (Brasil)'), # Portuguese (Brazil)
('pt-pt', u'Português (Portugal)'), # Portuguese (Portugal)
('ro', u'română'), # Romanian
('ru', u'Русский'), # Russian
('si', u'සිංහල'), # Sinhala
('sk', u'Slovenčina'), # Slovak
('sl', u'Slovenščina'), # Slovenian
('sq', u'shqip'), # Albanian
('sr', u'Српски'), # Serbian
('sv', u'svenska'), # Swedish
('sw', u'Kiswahili'), # Swahili
('ta', u'தமிழ்'), # Tamil
('te', u'తెలుగు'), # Telugu
('th', u'ไทย'), # Thai
('tr-tr', u'Türkçe (Türkiye)'), # Turkish (Turkey)
('uk', u'Українська'), # Ukranian
('ur', u'اردو'), # Urdu
('vi', u'Tiếng Việt'), # Vietnamese
('uz', u'Ўзбек'), # Uzbek
('zh-cn', u'中文 (简体)'), # Chinese (China)
('zh-hk', u'中文 (香港)'), # Chinese (Hong Kong)
('zh-tw', u'中文 (台灣)'), # Chinese (Taiwan)
)
LANGUAGE_DICT = dict(LANGUAGES)
USE_I18N = True
USE_L10N = True
STATICI18N_ROOT = PROJECT_ROOT / "static"
STATICI18N_OUTPUT_DIR = "js/i18n"
# Localization strings (e.g. django.po) are under this directory
LOCALE_PATHS = (REPO_ROOT + '/conf/locale',) # edx-platform/conf/locale/
# Messages
MESSAGE_STORAGE = 'django.contrib.messages.storage.session.SessionStorage'
# Guidelines for translators
TRANSLATORS_GUIDE = 'http://edx.readthedocs.org/projects/edx-developer-guide/en/latest/conventions/internationalization/i18n_translators_guide.html' # pylint: disable=line-too-long
#################################### GITHUB #######################################
# gitreload is used in LMS-workflow to pull content from github
# gitreload requests are only allowed from these IP addresses, which are
# the advertised public IPs of the github WebHook servers.
# These are listed, eg at https://github.com/edx/edx-platform/admin/hooks
ALLOWED_GITRELOAD_IPS = ['207.97.227.253', '50.57.128.197', '108.171.174.178']
#################################### AWS #######################################
# S3BotoStorage insists on a timeout for uploaded assets. We should make it
# permanent instead, but rather than trying to figure out exactly where that
# setting is, I'm just bumping the expiration time to something absurd (100
# years). This is only used if DEFAULT_FILE_STORAGE is overriden to use S3
# in the global settings.py
AWS_QUERYSTRING_EXPIRE = 10 * 365 * 24 * 60 * 60 # 10 years
################################# SIMPLEWIKI ###################################
SIMPLE_WIKI_REQUIRE_LOGIN_EDIT = True
SIMPLE_WIKI_REQUIRE_LOGIN_VIEW = False
################################# WIKI ###################################
from course_wiki import settings as course_wiki_settings
WIKI_ACCOUNT_HANDLING = False
WIKI_EDITOR = 'course_wiki.editors.CodeMirror'
WIKI_SHOW_MAX_CHILDREN = 0 # We don't use the little menu that shows children of an article in the breadcrumb
WIKI_ANONYMOUS = False # Don't allow anonymous access until the styling is figured out
WIKI_CAN_DELETE = course_wiki_settings.CAN_DELETE
WIKI_CAN_MODERATE = course_wiki_settings.CAN_MODERATE
WIKI_CAN_CHANGE_PERMISSIONS = course_wiki_settings.CAN_CHANGE_PERMISSIONS
WIKI_CAN_ASSIGN = course_wiki_settings.CAN_ASSIGN
WIKI_USE_BOOTSTRAP_SELECT_WIDGET = False
WIKI_LINK_LIVE_LOOKUPS = False
WIKI_LINK_DEFAULT_LEVEL = 2
##### Feedback submission mechanism #####
FEEDBACK_SUBMISSION_EMAIL = None
##### Zendesk #####
ZENDESK_URL = None
ZENDESK_USER = None
ZENDESK_API_KEY = None
ZENDESK_CUSTOM_FIELDS = {}
##### EMBARGO #####
EMBARGO_SITE_REDIRECT_URL = None
##### shoppingcart Payment #####
PAYMENT_SUPPORT_EMAIL = '[email protected]'
##### Using cybersource by default #####
CC_PROCESSOR_NAME = 'CyberSource'
CC_PROCESSOR = {
'CyberSource': {
'SHARED_SECRET': '',
'MERCHANT_ID': '',
'SERIAL_NUMBER': '',
'ORDERPAGE_VERSION': '7',
'PURCHASE_ENDPOINT': '',
},
'CyberSource2': {
"PURCHASE_ENDPOINT": '',
"SECRET_KEY": '',
"ACCESS_KEY": '',
"PROFILE_ID": '',
}
}
# Setting for PAID_COURSE_REGISTRATION, DOES NOT AFFECT VERIFIED STUDENTS
PAID_COURSE_REGISTRATION_CURRENCY = ['usd', '$']
# Members of this group are allowed to generate payment reports
PAYMENT_REPORT_GENERATOR_GROUP = 'shoppingcart_report_access'
################################# EdxNotes config #########################
# Configure the LMS to use our stub EdxNotes implementation
EDXNOTES_PUBLIC_API = 'http://localhost:8120/api/v1'
EDXNOTES_INTERNAL_API = 'http://localhost:8120/api/v1'
EDXNOTES_CONNECT_TIMEOUT = 0.5 # time in seconds
EDXNOTES_READ_TIMEOUT = 1.5 # time in seconds
########################## Parental controls config #######################
# The age at which a learner no longer requires parental consent, or None
# if parental consent is never required.
PARENTAL_CONSENT_AGE_LIMIT = 13
################################# Jasmine ##################################
JASMINE_TEST_DIRECTORY = PROJECT_ROOT + '/static/coffee'
######################### Branded Footer ###################################
# Constants for the footer used on the site and shared with other sites
# (such as marketing and the blog) via the branding API.
# URL for OpenEdX displayed in the footer
FOOTER_OPENEDX_URL = "http://open.edx.org"
# URL for the OpenEdX logo image
# We use logo images served from files.edx.org so we can (roughly) track
# how many OpenEdX installations are running.
# Site operators can choose from these logo options:
# * https://files.edx.org/openedx-logos/edx-openedx-logo-tag.png
# * https://files.edx.org/openedx-logos/edx-openedx-logo-tag-light.png"
# * https://files.edx.org/openedx-logos/edx-openedx-logo-tag-dark.png
FOOTER_OPENEDX_LOGO_IMAGE = "https://files.edx.org/openedx-logos/edx-openedx-logo-tag.png"
# This is just a placeholder image.
# Site operators can customize this with their organization's image.
FOOTER_ORGANIZATION_IMAGE = "images/logo.png"
# These are referred to both by the Django asset pipeline
# AND by the branding footer API, which needs to decide which
# version of the CSS to serve.
FOOTER_CSS = {
"openedx": {
"ltr": "style-lms-footer",
"rtl": "style-lms-footer-rtl",
},
"edx": {
"ltr": "style-lms-footer-edx",
"rtl": "style-lms-footer-edx-rtl",
},
}
# Cache expiration for the version of the footer served
# by the branding API.
FOOTER_CACHE_TIMEOUT = 30 * 60
# Max age cache control header for the footer (controls browser caching).
FOOTER_BROWSER_CACHE_MAX_AGE = 5 * 60
# Credit api notification cache timeout
CREDIT_NOTIFICATION_CACHE_TIMEOUT = 5 * 60 * 60
################################# Deprecation warnings #####################
# Ignore deprecation warnings (so we don't clutter Jenkins builds/production)
simplefilter('ignore')
################################# Middleware ###################################
MIDDLEWARE_CLASSES = (
'crum.CurrentRequestUserMiddleware',
'request_cache.middleware.RequestCache',
'openedx.core.djangoapps.monitoring_utils.middleware.MonitoringCustomMetrics',
'mobile_api.middleware.AppVersionUpgrade',
'openedx.core.djangoapps.header_control.middleware.HeaderControlMiddleware',
'microsite_configuration.middleware.MicrositeMiddleware',
'django_comment_client.middleware.AjaxExceptionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.contrib.sites.middleware.CurrentSiteMiddleware',
# Allows us to define redirects via Django admin
'django_sites_extensions.middleware.RedirectMiddleware',
# Instead of SessionMiddleware, we use a more secure version
# 'django.contrib.sessions.middleware.SessionMiddleware',
'openedx.core.djangoapps.safe_sessions.middleware.SafeSessionMiddleware',
# Instead of AuthenticationMiddleware, we use a cached backed version
#'django.contrib.auth.middleware.AuthenticationMiddleware',
'openedx.core.djangoapps.cache_toolbox.middleware.CacheBackedAuthenticationMiddleware',
# Enable SessionAuthenticationMiddleware in order to invalidate
# user sessions after a password change.
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'student.middleware.UserStandingMiddleware',
'openedx.core.djangoapps.contentserver.middleware.StaticContentServer',
# Adds user tags to tracking events
# Must go before TrackMiddleware, to get the context set up
'openedx.core.djangoapps.user_api.middleware.UserTagsEventContextMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'track.middleware.TrackMiddleware',
# CORS and CSRF
'corsheaders.middleware.CorsMiddleware',
'openedx.core.djangoapps.cors_csrf.middleware.CorsCSRFMiddleware',
'openedx.core.djangoapps.cors_csrf.middleware.CsrfCrossDomainCookieMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'splash.middleware.SplashMiddleware',
'openedx.core.djangoapps.geoinfo.middleware.CountryMiddleware',
'openedx.core.djangoapps.embargo.middleware.EmbargoMiddleware',
# Allows us to set user preferences
'openedx.core.djangoapps.lang_pref.middleware.LanguagePreferenceMiddleware',
# Allows us to dark-launch particular languages.
# Must be after LangPrefMiddleware, so ?preview-lang query params can override
# user's language preference. ?clear-lang resets to user's language preference.
'openedx.core.djangoapps.dark_lang.middleware.DarkLangMiddleware',
# Detects user-requested locale from 'accept-language' header in http request.
# Must be after DarkLangMiddleware.
'django.middleware.locale.LocaleMiddleware',
# 'debug_toolbar.middleware.DebugToolbarMiddleware',
'django_comment_client.utils.ViewNameMiddleware',
'codejail.django_integration.ConfigureCodeJailMiddleware',
# catches any uncaught RateLimitExceptions and returns a 403 instead of a 500
'ratelimitbackend.middleware.RateLimitMiddleware',
# for expiring inactive sessions
'openedx.core.djangoapps.session_inactivity_timeout.middleware.SessionInactivityTimeout',
# use Django built in clickjacking protection
'django.middleware.clickjacking.XFrameOptionsMiddleware',
# to redirected unenrolled students to the course info page
'courseware.middleware.RedirectMiddleware',
'course_wiki.middleware.WikiAccessMiddleware',
'openedx.core.djangoapps.theming.middleware.CurrentSiteThemeMiddleware',
'waffle.middleware.WaffleMiddleware',
# This must be last
'openedx.core.djangoapps.site_configuration.middleware.SessionCookieDomainOverrideMiddleware',
)
# Clickjacking protection can be enabled by setting this to 'DENY'
X_FRAME_OPTIONS = 'ALLOW'
# Platform for Privacy Preferences header
P3P_HEADER = 'CP="Open EdX does not have a P3P policy."'
############################### PIPELINE #######################################
PIPELINE_ENABLED = True
STATICFILES_STORAGE = 'openedx.core.storage.ProductionStorage'
# List of finder classes that know how to find static files in various locations.
# Note: the pipeline finder is included to be able to discover optimized files
STATICFILES_FINDERS = [
'openedx.core.djangoapps.theming.finders.ThemeFilesFinder',
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
'openedx.core.lib.xblock_pipeline.finder.XBlockPipelineFinder',
'pipeline.finders.PipelineFinder',
]
PIPELINE_CSS_COMPRESSOR = None
PIPELINE_JS_COMPRESSOR = 'pipeline.compressors.uglifyjs.UglifyJSCompressor'
# Don't wrap JavaScript as there is code that depends upon updating the global namespace
PIPELINE_DISABLE_WRAPPER = True
# Specify the UglifyJS binary to use
PIPELINE_UGLIFYJS_BINARY = 'node_modules/.bin/uglifyjs'
from openedx.core.lib.rooted_paths import rooted_glob
courseware_js = (
[
'coffee/src/' + pth + '.js'
for pth in ['courseware', 'histogram', 'navigation']
] +
['js/' + pth + '.js' for pth in ['ajax-error']] +
sorted(rooted_glob(PROJECT_ROOT / 'static', 'coffee/src/modules/**/*.js'))
)
proctoring_js = (
[
'proctoring/js/models/proctored_exam_allowance_model.js',
'proctoring/js/models/proctored_exam_attempt_model.js',
'proctoring/js/models/proctored_exam_model.js'
] +
[
'proctoring/js/collections/proctored_exam_allowance_collection.js',
'proctoring/js/collections/proctored_exam_attempt_collection.js',
'proctoring/js/collections/proctored_exam_collection.js'
] +
[
'proctoring/js/views/Backbone.ModalDialog.js',
'proctoring/js/views/proctored_exam_add_allowance_view.js',
'proctoring/js/views/proctored_exam_allowance_view.js',
'proctoring/js/views/proctored_exam_suspicious_monitor.js',
'proctoring/js/views/proctored_exam_attempt_view.js',
'proctoring/js/views/proctored_exam_view.js'
] +
[
'proctoring/js/proctored_app.js'
]
)
# Before a student accesses courseware, we do not
# need many of the JS dependencies. This includes
# only the dependencies used everywhere in the LMS
# (including the dashboard/account/profile pages)
# Currently, this partially duplicates the "main vendor"
# JavaScript file, so only one of the two should be included
# on a page at any time.
# In the future, we will likely refactor this to use
# RequireJS and an optimizer.
base_vendor_js = [
'common/js/vendor/jquery.js',
'common/js/vendor/jquery-migrate.js',
'js/vendor/jquery.cookie.js',
'js/vendor/url.min.js',
'common/js/vendor/underscore.js',
'common/js/vendor/underscore.string.js',
'common/js/vendor/picturefill.js',
# Make some edX UI Toolkit utilities available in the global "edx" namespace
'edx-ui-toolkit/js/utils/global-loader.js',
'edx-ui-toolkit/js/utils/string-utils.js',
'edx-ui-toolkit/js/utils/html-utils.js',
# Finally load RequireJS and dependent vendor libraries
'common/js/vendor/require.js',
'js/RequireJS-namespace-undefine.js',
'js/vendor/URI.min.js',
'common/js/vendor/backbone.js'
]
main_vendor_js = base_vendor_js + [
'js/vendor/json2.js',
'js/vendor/jquery-ui.min.js',
'js/vendor/timepicker/jquery.timepicker.min.js',
'js/vendor/jquery.qtip.min.js',
'js/vendor/jquery.ba-bbq.min.js',
]
# Common files used by both RequireJS code and non-RequireJS code
base_application_js = [
'js/src/utility.js',
'js/src/logger.js',
'js/user_dropdown_v1.js', # Custom dropdown keyboard handling for legacy pages
'js/dialog_tab_controls.js',
'js/src/string_utils.js',
'js/form.ext.js',
'js/src/ie_shim.js',
'js/src/accessibility_tools.js',
'js/toggle_login_modal.js',
'js/src/lang_edx.js',
]
dashboard_js = (
sorted(rooted_glob(PROJECT_ROOT / 'static', 'js/dashboard/**/*.js'))
)
discussion_js = (
rooted_glob(COMMON_ROOT / 'static', 'common/js/discussion/mathjax_include.js') +
rooted_glob(PROJECT_ROOT / 'static', 'coffee/src/customwmd.js') +
rooted_glob(PROJECT_ROOT / 'static', 'coffee/src/mathjax_accessible.js') +
rooted_glob(PROJECT_ROOT / 'static', 'coffee/src/mathjax_delay_renderer.js') +
sorted(rooted_glob(COMMON_ROOT / 'static', 'common/js/discussion/**/*.js'))
)
discussion_vendor_js = [
'js/Markdown.Converter.js',
'js/Markdown.Sanitizer.js',
'js/Markdown.Editor.js',
'js/vendor/jquery.timeago.js',
'js/src/jquery.timeago.locale.js',
'js/vendor/jquery.truncate.js',
'js/jquery.ajaxfileupload.js',
'js/split.js'
]
notes_js = sorted(rooted_glob(PROJECT_ROOT / 'static', 'coffee/src/notes/**/*.js'))
instructor_dash_js = sorted(rooted_glob(PROJECT_ROOT / 'static', 'js/instructor_dashboard/**/*.js'))
verify_student_js = [
'js/sticky_filter.js',
'js/query-params.js',
'js/verify_student/models/verification_model.js',
'js/verify_student/views/error_view.js',
'js/verify_student/views/image_input_view.js',
'js/verify_student/views/webcam_photo_view.js',
'js/verify_student/views/step_view.js',
'js/verify_student/views/intro_step_view.js',
'js/verify_student/views/make_payment_step_view.js',
'js/verify_student/views/payment_confirmation_step_view.js',
'js/verify_student/views/face_photo_step_view.js',
'js/verify_student/views/id_photo_step_view.js',
'js/verify_student/views/review_photos_step_view.js',
'js/verify_student/views/enrollment_confirmation_step_view.js',
'js/verify_student/views/pay_and_verify_view.js',
'js/verify_student/pay_and_verify.js',
]
reverify_js = [
'js/verify_student/views/error_view.js',
'js/verify_student/views/image_input_view.js',
'js/verify_student/views/webcam_photo_view.js',
'js/verify_student/views/step_view.js',
'js/verify_student/views/face_photo_step_view.js',
'js/verify_student/views/id_photo_step_view.js',
'js/verify_student/views/review_photos_step_view.js',
'js/verify_student/views/reverify_success_step_view.js',
'js/verify_student/models/verification_model.js',
'js/verify_student/views/reverify_view.js',
'js/verify_student/reverify.js',
]
incourse_reverify_js = [
'js/verify_student/views/error_view.js',
'js/verify_student/views/image_input_view.js',
'js/verify_student/views/webcam_photo_view.js',
'js/verify_student/models/verification_model.js',
'js/verify_student/views/incourse_reverify_view.js',
'js/verify_student/incourse_reverify.js',
]
ccx_js = sorted(rooted_glob(PROJECT_ROOT / 'static', 'js/ccx/**/*.js'))
certificates_web_view_js = [
'common/js/vendor/jquery.js',
'common/js/vendor/jquery-migrate.js',
'js/vendor/jquery.cookie.js',
'js/src/logger.js',
'js/utils/facebook.js',
]
credit_web_view_js = [
'common/js/vendor/jquery.js',
'common/js/vendor/jquery-migrate.js',
'js/vendor/jquery.cookie.js',
'js/src/logger.js',
]
PIPELINE_CSS = {
'style-vendor': {
'source_filenames': [
'css/vendor/font-awesome.css',
'css/vendor/jquery.qtip.min.css',
],
'output_filename': 'css/lms-style-vendor.css',
},
'style-vendor-tinymce-content': {
'source_filenames': [
'js/vendor/tinymce/js/tinymce/skins/studio-tmce4/content.min.css'
],
'output_filename': 'css/lms-style-vendor-tinymce-content.css',
},
'style-vendor-tinymce-skin': {
'source_filenames': [
'js/vendor/tinymce/js/tinymce/skins/studio-tmce4/skin.min.css'
],
'output_filename': 'css/lms-style-vendor-tinymce-skin.css',
},
'style-main-v1': {
'source_filenames': [
'css/lms-main-v1.css',
],
'output_filename': 'css/lms-main-v1.css',
},
'style-main-v1-rtl': {
'source_filenames': [
'css/lms-main-v1-rtl.css',
],
'output_filename': 'css/lms-main-v1-rtl.css',
},
'style-main-v2': {
'source_filenames': [
'css/lms-main-v2.css',
],
'output_filename': 'css/lms-main-v2.css',
},
'style-main-v2-rtl': {
'source_filenames': [
'css/lms-main-v2-rtl.css',
],
'output_filename': 'css/lms-main-v2-rtl.css',
},
'style-course-vendor': {
'source_filenames': [
'js/vendor/CodeMirror/codemirror.css',
'css/vendor/jquery.treeview.css',
'css/vendor/ui-lightness/jquery-ui-1.8.22.custom.css',
'js/vendor/timepicker/jquery.timepicker.css',
],
'output_filename': 'css/lms-style-course-vendor.css',
},
'style-course': {
'source_filenames': [
'css/lms-course.css',
],
'output_filename': 'css/lms-course.css',
},
'style-course-rtl': {
'source_filenames': [
'css/lms-course-rtl.css',
],
'output_filename': 'css/lms-course-rtl.css',
},
'style-student-notes': {
'source_filenames': [
'css/vendor/edxnotes/annotator.min.css',
],
'output_filename': 'css/lms-style-student-notes.css',
},
'style-discussion-main': {
'source_filenames': [
'css/discussion/lms-discussion-main.css',
],
'output_filename': 'css/discussion/lms-discussion-main.css',
},
'style-discussion-main-rtl': {
'source_filenames': [
'css/discussion/lms-discussion-main-rtl.css',
],
'output_filename': 'css/discussion/lms-discussion-main-rtl.css',
},
'style-inline-discussion': {
'source_filenames': [
'css/discussion/inline-discussion.css',
],
'output_filename': 'css/discussion/inline-discussion.css',
},
'style-inline-discussion-rtl': {
'source_filenames': [
'css/discussion/inline-discussion-rtl.css',
],
'output_filename': 'css/discussion/inline-discussion-rtl.css',
},
'style-xmodule-annotations': {
'source_filenames': [
'css/vendor/ova/annotator.css',
'css/vendor/ova/edx-annotator.css',
'css/vendor/ova/video-js.min.css',
'css/vendor/ova/rangeslider.css',
'css/vendor/ova/share-annotator.css',
'css/vendor/ova/richText-annotator.css',
'css/vendor/ova/tags-annotator.css',
'css/vendor/ova/flagging-annotator.css',
'css/vendor/ova/diacritic-annotator.css',
'css/vendor/ova/grouping-annotator.css',
'css/vendor/ova/ova.css',
'js/vendor/ova/catch/css/main.css'
],
'output_filename': 'css/lms-style-xmodule-annotations.css',
},
FOOTER_CSS['openedx']['ltr']: {
'source_filenames': [
'css/lms-footer.css',
],
'output_filename': 'css/lms-footer.css',
},
FOOTER_CSS['openedx']['rtl']: {
'source_filenames': [
'css/lms-footer-rtl.css',
],
'output_filename': 'css/lms-footer-rtl.css'
},
FOOTER_CSS['edx']['ltr']: {
'source_filenames': [
'css/lms-footer-edx.css',
],
'output_filename': 'css/lms-footer-edx.css'
},
FOOTER_CSS['edx']['rtl']: {
'source_filenames': [
'css/lms-footer-edx-rtl.css',
],
'output_filename': 'css/lms-footer-edx-rtl.css'
},
'style-certificates': {
'source_filenames': [
'certificates/css/main-ltr.css',
'css/vendor/font-awesome.css',
],
'output_filename': 'css/certificates-style.css'
},
'style-certificates-rtl': {
'source_filenames': [
'certificates/css/main-rtl.css',
'css/vendor/font-awesome.css',
],
'output_filename': 'css/certificates-style-rtl.css'
},
'style-learner-dashboard': {
'source_filenames': [
'css/lms-learner-dashboard.css',
],
'output_filename': 'css/lms-learner-dashboard.css',
},
'style-learner-dashboard-rtl': {
'source_filenames': [
'css/lms-learner-dashboard-rtl.css',
],
'output_filename': 'css/lms-learner-dashboard-rtl.css',
},
}
separately_bundled_js = set(courseware_js + discussion_js + notes_js + instructor_dash_js)
common_js = sorted(set(rooted_glob(COMMON_ROOT / 'static', 'coffee/src/**/*.js')) - separately_bundled_js)
xblock_runtime_js = [
'common/js/xblock/core.js',
'common/js/xblock/runtime.v1.js',
'lms/js/xblock/lms.runtime.v1.js',
]
lms_application_js = sorted(set(rooted_glob(PROJECT_ROOT / 'static', 'coffee/src/**/*.js')) - separately_bundled_js)
PIPELINE_JS = {
'base_application': {
'source_filenames': base_application_js,
'output_filename': 'js/lms-base-application.js',
},
'application': {
'source_filenames': (
common_js + xblock_runtime_js + base_application_js + lms_application_js +
[
'js/sticky_filter.js',
'js/query-params.js',
'common/js/vendor/moment-with-locales.js',
'common/js/vendor/moment-timezone-with-data.js',
]
),
'output_filename': 'js/lms-application.js',
},
'proctoring': {
'source_filenames': proctoring_js,
'output_filename': 'js/lms-proctoring.js',
},
'courseware': {
'source_filenames': courseware_js,
'output_filename': 'js/lms-courseware.js',
},
'base_vendor': {
'source_filenames': base_vendor_js,
'output_filename': 'js/lms-base-vendor.js',
},
'main_vendor': {
'source_filenames': main_vendor_js,
'output_filename': 'js/lms-main_vendor.js',
},
'lms_bootstrap': {
'source_filenames': [
'common/js/vendor/tether.js',
'common/js/vendor/bootstrap.js',
],
'output_filename': 'js/lms-bootstrap.js',
},
'module-descriptor-js': {
'source_filenames': rooted_glob(COMMON_ROOT / 'static/', 'xmodule/descriptors/js/*.js'),
'output_filename': 'js/lms-module-descriptors.js',
},
'module-js': {
'source_filenames': rooted_glob(COMMON_ROOT / 'static', 'xmodule/modules/js/*.js'),
'output_filename': 'js/lms-modules.js',
},
'discussion': {
'source_filenames': discussion_js,
'output_filename': 'js/discussion.js',
},
'discussion_vendor': {
'source_filenames': discussion_vendor_js,
'output_filename': 'js/discussion_vendor.js',
},
'notes': {
'source_filenames': notes_js,
'output_filename': 'js/notes.js',
},
'instructor_dash': {
'source_filenames': instructor_dash_js,
'output_filename': 'js/instructor_dash.js',
},
'dashboard': {
'source_filenames': dashboard_js,
'output_filename': 'js/dashboard.js'
},
'verify_student': {
'source_filenames': verify_student_js,
'output_filename': 'js/verify_student.js'
},
'reverify': {
'source_filenames': reverify_js,
'output_filename': 'js/reverify.js'
},
'incourse_reverify': {
'source_filenames': incourse_reverify_js,
'output_filename': 'js/incourse_reverify.js'
},
'ccx': {
'source_filenames': ccx_js,
'output_filename': 'js/ccx.js'
},
'footer_edx': {
'source_filenames': ['js/footer-edx.js'],
'output_filename': 'js/footer-edx.js'
},
'certificates_wv': {
'source_filenames': certificates_web_view_js,
'output_filename': 'js/certificates/web_view.js'
},
'credit_wv': {
'source_filenames': credit_web_view_js,
'output_filename': 'js/credit/web_view.js'
}
}
STATICFILES_IGNORE_PATTERNS = (
"*.py",
"*.pyc",
# It would be nice if we could do, for example, "**/*.scss",
# but these strings get passed down to the `fnmatch` module,
# which doesn't support that. :(
# http://docs.python.org/2/library/fnmatch.html
"sass/*.scss",
"sass/*/*.scss",
"sass/*/*/*.scss",
"sass/*/*/*/*.scss",
"coffee/*.coffee",
"coffee/*/*.coffee",
"coffee/*/*/*.coffee",
"coffee/*/*/*/*.coffee",
# Ignore tests
"spec",
"spec_helpers",
# Symlinks used by js-test-tool
"xmodule_js",
)
################################# DJANGO-REQUIRE ###############################
# The baseUrl to pass to the r.js optimizer, relative to STATIC_ROOT.
REQUIRE_BASE_URL = "./"
# The name of a build profile to use for your project, relative to REQUIRE_BASE_URL.
# A sensible value would be 'app.build.js'. Leave blank to use the built-in default build profile.
# Set to False to disable running the default profile (e.g. if only using it to build Standalone
# Modules)
REQUIRE_BUILD_PROFILE = "lms/js/build.js"
# The name of the require.js script used by your project, relative to REQUIRE_BASE_URL.
REQUIRE_JS = "common/js/vendor/require.js"
# Whether to run django-require in debug mode.
REQUIRE_DEBUG = False
# In production, the Django pipeline appends a file hash to JavaScript file names.
# This makes it difficult for RequireJS to load its requirements, since module names
# specified in JavaScript code do not include the hash.
# For this reason, we calculate the actual path including the hash on the server
# when rendering the page. We then override the default paths provided to RequireJS
# so it can resolve the module name to the correct URL.
#
# If you want to load JavaScript dependencies using RequireJS
# but you don't want to include those dependencies in the JS bundle for the page,
# then you need to add the js urls in this list.
REQUIRE_JS_PATH_OVERRIDES = {
'course_bookmarks/js/views/bookmark_button': 'course_bookmarks/js/views/bookmark_button.js',
'js/views/message_banner': 'js/views/message_banner.js',
'moment': 'common/js/vendor/moment-with-locales.js',
'moment-timezone': 'common/js/vendor/moment-timezone-with-data.js',
'js/courseware/course_home_events': 'js/courseware/course_home_events.js',
'js/courseware/accordion_events': 'js/courseware/accordion_events.js',
'js/dateutil_factory': 'js/dateutil_factory.js',
'js/courseware/link_clicked_events': 'js/courseware/link_clicked_events.js',
'js/courseware/toggle_element_visibility': 'js/courseware/toggle_element_visibility.js',
'js/student_account/logistration_factory': 'js/student_account/logistration_factory.js',
'js/student_profile/views/learner_profile_factory': 'js/student_profile/views/learner_profile_factory.js',
'js/courseware/courseware_factory': 'js/courseware/courseware_factory.js',
'js/groups/views/cohorts_dashboard_factory': 'js/groups/views/cohorts_dashboard_factory.js',
'js/groups/discussions_management/discussions_dashboard_factory': 'js/discussions_management/views/discussions_dashboard_factory.js',
'draggabilly': 'js/vendor/draggabilly.js',
'hls': 'common/js/vendor/hls.js'
}
########################## DJANGO WEBPACK LOADER ##############################
WEBPACK_LOADER = {
'DEFAULT': {
'BUNDLE_DIR_NAME': 'bundles/',
'STATS_FILE': os.path.join(STATIC_ROOT, 'webpack-stats.json')
}
}
########################## DJANGO DEBUG TOOLBAR ###############################
# We don't enable Django Debug Toolbar universally, but whenever we do, we want
# to avoid patching settings. Patched settings can cause circular import
# problems: http://django-debug-toolbar.readthedocs.org/en/1.0/installation.html#explicit-setup
DEBUG_TOOLBAR_PATCH_SETTINGS = False
################################# CELERY ######################################
# Celery's task autodiscovery won't find tasks nested in a tasks package.
# Tasks are only registered when the module they are defined in is imported.
CELERY_IMPORTS = (
'openedx.core.djangoapps.programs.tasks.v1.tasks',
)
# Message configuration
CELERY_TASK_SERIALIZER = 'json'
CELERY_RESULT_SERIALIZER = 'json'
CELERY_MESSAGE_COMPRESSION = 'gzip'
# Results configuration
CELERY_IGNORE_RESULT = False
CELERY_STORE_ERRORS_EVEN_IF_IGNORED = True
# Events configuration
CELERY_TRACK_STARTED = True
CELERY_SEND_EVENTS = True
CELERY_SEND_TASK_SENT_EVENT = True
# Exchange configuration
CELERY_DEFAULT_EXCHANGE = 'edx.core'
CELERY_DEFAULT_EXCHANGE_TYPE = 'direct'
# Queues configuration
HIGH_PRIORITY_QUEUE = 'edx.core.high'
DEFAULT_PRIORITY_QUEUE = 'edx.core.default'
LOW_PRIORITY_QUEUE = 'edx.core.low'
HIGH_MEM_QUEUE = 'edx.core.high_mem'
CELERY_QUEUE_HA_POLICY = 'all'
CELERY_CREATE_MISSING_QUEUES = True
CELERY_DEFAULT_QUEUE = DEFAULT_PRIORITY_QUEUE
CELERY_DEFAULT_ROUTING_KEY = DEFAULT_PRIORITY_QUEUE
CELERY_QUEUES = {
HIGH_PRIORITY_QUEUE: {},
LOW_PRIORITY_QUEUE: {},
DEFAULT_PRIORITY_QUEUE: {},
HIGH_MEM_QUEUE: {},
}
# let logging work as configured:
CELERYD_HIJACK_ROOT_LOGGER = False
################################ Block Structures ###################################
BLOCK_STRUCTURES_SETTINGS = dict(
# Delay, in seconds, after a new edit of a course is published
# before updating the block structures cache. This is needed
# for a better chance at getting the latest changes when there
# are secondary reads in sharded mongoDB clusters. See TNL-5041
# for more info.
COURSE_PUBLISH_TASK_DELAY=30,
# Delay, in seconds, between retry attempts if a task fails.
TASK_DEFAULT_RETRY_DELAY=30,
# Maximum number of retries per task.
TASK_MAX_RETRIES=5,
# Backend storage
# STORAGE_CLASS='storages.backends.s3boto.S3BotoStorage',
# STORAGE_KWARGS=dict(bucket='nim-beryl-test'),
# DIRECTORY_PREFIX='/modeltest/',
)
################################ Bulk Email ###################################
# Suffix used to construct 'from' email address for bulk emails.
# A course-specific identifier is prepended.
BULK_EMAIL_DEFAULT_FROM_EMAIL = '[email protected]'
# Parameters for breaking down course enrollment into subtasks.
BULK_EMAIL_EMAILS_PER_TASK = 100
# Initial delay used for retrying tasks. Additional retries use
# longer delays. Value is in seconds.
BULK_EMAIL_DEFAULT_RETRY_DELAY = 30
# Maximum number of retries per task for errors that are not related
# to throttling.
BULK_EMAIL_MAX_RETRIES = 5
# Maximum number of retries per task for errors that are related to
# throttling. If this is not set, then there is no cap on such retries.
BULK_EMAIL_INFINITE_RETRY_CAP = 1000
# We want Bulk Email running on the high-priority queue, so we define the
# routing key that points to it. At the moment, the name is the same.
BULK_EMAIL_ROUTING_KEY = HIGH_PRIORITY_QUEUE
# We also define a queue for smaller jobs so that large courses don't block
# smaller emails (see BULK_EMAIL_JOB_SIZE_THRESHOLD setting)
BULK_EMAIL_ROUTING_KEY_SMALL_JOBS = LOW_PRIORITY_QUEUE
# For emails with fewer than these number of recipients, send them through
# a different queue to avoid large courses blocking emails that are meant to be
# sent to self and staff
BULK_EMAIL_JOB_SIZE_THRESHOLD = 100
# Flag to indicate if individual email addresses should be logged as they are sent
# a bulk email message.
BULK_EMAIL_LOG_SENT_EMAILS = False
# Delay in seconds to sleep between individual mail messages being sent,
# when a bulk email task is retried for rate-related reasons. Choose this
# value depending on the number of workers that might be sending email in
# parallel, and what the SES rate is.
BULK_EMAIL_RETRY_DELAY_BETWEEN_SENDS = 0.02
############################# Persistent Grades ####################################
# Queue to use for updating persistent grades
RECALCULATE_GRADES_ROUTING_KEY = LOW_PRIORITY_QUEUE
############################# Email Opt In ####################################
# Minimum age for organization-wide email opt in
EMAIL_OPTIN_MINIMUM_AGE = PARENTAL_CONSENT_AGE_LIMIT
############################## Video ##########################################
YOUTUBE = {
# YouTube JavaScript API
'API': 'https://www.youtube.com/iframe_api',
'TEST_TIMEOUT': 1500,
# URL to get YouTube metadata
'METADATA_URL': 'https://www.googleapis.com/youtube/v3/videos/',
# Current youtube api for requesting transcripts.
# For example: http://video.google.com/timedtext?lang=en&v=j_jEn79vS3g.
'TEXT_API': {
'url': 'video.google.com/timedtext',
'params': {
'lang': 'en',
'v': 'set_youtube_id_of_11_symbols_here',
},
},
'IMAGE_API': 'http://img.youtube.com/vi/{youtube_id}/0.jpg', # /maxresdefault.jpg for 1920*1080
}
YOUTUBE_API_KEY = None
################################### APPS ######################################
INSTALLED_APPS = (
# Standard ones that are always installed...
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.humanize',
'django.contrib.messages',
'django.contrib.redirects',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.staticfiles',
'djcelery',
# Common views
'openedx.core.djangoapps.common_views',
# History tables
'simple_history',
# Database-backed configuration
'config_models',
'waffle',
# Monitor the status of services
'openedx.core.djangoapps.service_status',
# Display status message to students
'status',
# For asset pipelining
'edxmako',
'pipeline',
'static_replace',
'webpack_loader',
# For user interface plugins
'web_fragments',
'openedx.core.djangoapps.plugin_api',
# For content serving
'openedx.core.djangoapps.contentserver',
# Theming
'openedx.core.djangoapps.theming',
# Site configuration for theming and behavioral modification
'openedx.core.djangoapps.site_configuration',
# Our courseware
'courseware',
'student',
'static_template_view',
'staticbook',
'track',
'eventtracking.django.apps.EventTrackingConfig',
'util',
'certificates',
'dashboard',
'lms.djangoapps.instructor',
'lms.djangoapps.instructor_task',
'openedx.core.djangoapps.course_groups',
'bulk_email',
'branding',
'lms.djangoapps.grades.apps.GradesConfig',
# Student support tools
'support',
# External auth (OpenID, shib)
'openedx.core.djangoapps.external_auth',
'django_openid_auth',
# django-oauth2-provider (deprecated)
'provider',
'provider.oauth2',
'edx_oauth2_provider',
# django-oauth-toolkit
'oauth2_provider',
'openedx.core.djangoapps.oauth_dispatch.apps.OAuthDispatchAppConfig',
'third_party_auth',
# We don't use this directly (since we use OAuth2), but we need to install it anyway.
# When a user is deleted, Django queries all tables with a FK to the auth_user table,
# and since django-rest-framework-oauth imports this, it will try to access tables
# defined by oauth_provider. If those tables don't exist, an error can occur.
'oauth_provider',
'openedx.core.djangoapps.auth_exchange',
# For the wiki
'wiki', # The new django-wiki from benjaoming
'django_notify',
'course_wiki', # Our customizations
'mptt',
'sekizai',
#'wiki.plugins.attachments',
'wiki.plugins.links',
# Notifications were enabled, but only 11 people used it in three years. It
# got tangled up during the Django 1.8 migration, so we are disabling it.
# See TNL-3783 for details.
#'wiki.plugins.notifications',
'course_wiki.plugins.markdownedx',
# For testing
'django.contrib.admin', # only used in DEBUG mode
'django_nose',
'debug',
'django_extensions',
# Discussion forums
'django_comment_client',
'django_comment_common',
'discussion_api',
'lms.djangoapps.discussion',
# Notes
'notes',
'edxnotes',
# Splash screen
'splash',
# Monitoring
'openedx.core.djangoapps.datadog',
# User API
'rest_framework',
'openedx.core.djangoapps.user_api',
# Shopping cart
'shoppingcart',
# Notification preferences setting
'notification_prefs',
'notifier_api',
# Different Course Modes
'course_modes',
# Enrollment API
'enrollment',
# Student Identity Verification
'lms.djangoapps.verify_student',
# Dark-launching languages
'openedx.core.djangoapps.dark_lang',
# Microsite configuration
'microsite_configuration',
# RSS Proxy
'rss_proxy',
# Country embargo support
'openedx.core.djangoapps.embargo',
# Monitoring functionality
'openedx.core.djangoapps.monitoring',
# Course action state
'course_action_state',
# Additional problem types
'edx_jsme', # Molecular Structure
# Country list
'django_countries',
# edX Mobile API
'mobile_api',
'social_django',
# Surveys
'survey',
'lms.djangoapps.lms_xblock',
# Course data caching
'openedx.core.djangoapps.content.course_overviews',
'openedx.core.djangoapps.content.course_structures.apps.CourseStructuresConfig',
'openedx.core.djangoapps.content.block_structure.apps.BlockStructureConfig',
'lms.djangoapps.course_blocks',
# Coursegraph
'openedx.core.djangoapps.coursegraph.apps.CoursegraphConfig',
# Old course structure API
'course_structure_api',
# Mailchimp Syncing
'mailing',
# CORS and cross-domain CSRF
'corsheaders',
'openedx.core.djangoapps.cors_csrf',
'commerce',
# Credit courses
'openedx.core.djangoapps.credit',
# Course teams
'lms.djangoapps.teams',
'xblock_django',
# Bookmarks
'openedx.core.djangoapps.bookmarks',
# programs support
'openedx.core.djangoapps.programs',
# Catalog integration
'openedx.core.djangoapps.catalog',
# Self-paced course configuration
'openedx.core.djangoapps.self_paced',
'sorl.thumbnail',
# Credentials support
'openedx.core.djangoapps.credentials',
# edx-milestones service
'milestones',
# Gating of course content
'gating.apps.GatingConfig',
# Static i18n support
'statici18n',
# API access administration
'openedx.core.djangoapps.api_admin',
# Verified Track Content Cohorting (Beta feature that will hopefully be removed)
'openedx.core.djangoapps.verified_track_content',
# Video module configs (This will be moved to Video once it becomes an XBlock)
'openedx.core.djangoapps.video_config',
# Learner's dashboard
'learner_dashboard',
# Needed whether or not enabled, due to migrations
'badges.apps.BadgesConfig',
# Enables default site and redirects
'django_sites_extensions',
# Email marketing integration
'email_marketing',
# additional release utilities to ease automation
'release_util',
# Customized celery tasks, including persisting failed tasks so they can
# be retried
'celery_utils',
# Ability to detect and special-case crawler behavior
'openedx.core.djangoapps.crawlers',
# Unusual migrations
'database_fixups',
# Waffle related utilities
'openedx.core.djangoapps.waffle_utils',
# Features
'openedx.features.course_bookmarks',
'openedx.features.course_experience',
'openedx.features.course_search',
'openedx.features.enterprise_support',
'experiments',
)
######################### CSRF #########################################
# Forwards-compatibility with Django 1.7
CSRF_COOKIE_AGE = 60 * 60 * 24 * 7 * 52
# It is highly recommended that you override this in any environment accessed by
# end users
CSRF_COOKIE_SECURE = False
######################### Django Rest Framework ########################
REST_FRAMEWORK = {
'DEFAULT_PAGINATION_CLASS': 'openedx.core.lib.api.paginators.DefaultPagination',
'DEFAULT_RENDERER_CLASSES': (
'rest_framework.renderers.JSONRenderer',
),
'PAGE_SIZE': 10,
'URL_FORMAT_OVERRIDE': None,
'DEFAULT_THROTTLE_RATES': {
'user': '60/minute',
'service_user': '120/minute',
},
}
######################### MARKETING SITE ###############################
EDXMKTG_LOGGED_IN_COOKIE_NAME = 'edxloggedin'
EDXMKTG_USER_INFO_COOKIE_NAME = 'edx-user-info'
EDXMKTG_USER_INFO_COOKIE_VERSION = 1
MKTG_URLS = {}
MKTG_URL_LINK_MAP = {
'ABOUT': 'about',
'CONTACT': 'contact',
'FAQ': 'help',
'COURSES': 'courses',
'ROOT': 'root',
'TOS': 'tos',
'HONOR': 'honor', # If your site does not have an honor code, simply delete this line.
'PRIVACY': 'privacy',
'PRESS': 'press',
'BLOG': 'blog',
'DONATE': 'donate',
'SITEMAP.XML': 'sitemap_xml',
# Verified Certificates
'WHAT_IS_VERIFIED_CERT': 'verified-certificate',
}
STATIC_TEMPLATE_VIEW_DEFAULT_FILE_EXTENSION = 'html'
SUPPORT_SITE_LINK = ''
PASSWORD_RESET_SUPPORT_LINK = ''
ACTIVATION_EMAIL_SUPPORT_LINK = ''
############################# SOCIAL MEDIA SHARING #############################
# Social Media Sharing on Student Dashboard
SOCIAL_SHARING_SETTINGS = {
# Note: Ensure 'CUSTOM_COURSE_URLS' has a matching value in cms/envs/common.py
'CUSTOM_COURSE_URLS': False,
'DASHBOARD_FACEBOOK': False,
'CERTIFICATE_FACEBOOK': False,
'CERTIFICATE_FACEBOOK_TEXT': None,
'CERTIFICATE_TWITTER': False,
'CERTIFICATE_TWITTER_TEXT': None,
'DASHBOARD_TWITTER': False,
'DASHBOARD_TWITTER_TEXT': None
}
################# Social Media Footer Links #######################
# The names list controls the order of social media
# links in the footer.
SOCIAL_MEDIA_FOOTER_NAMES = [
"facebook",
"twitter",
"youtube",
"linkedin",
"google_plus",
"reddit",
]
# JWT Settings
JWT_AUTH = {
# TODO Set JWT_SECRET_KEY to a secure value. By default, SECRET_KEY will be used.
# 'JWT_SECRET_KEY': '',
'JWT_ALGORITHM': 'HS256',
'JWT_VERIFY_EXPIRATION': True,
# TODO Set JWT_ISSUER and JWT_AUDIENCE to values specific to your service/organization.
'JWT_ISSUER': 'change-me',
'JWT_AUDIENCE': None,
'JWT_PAYLOAD_GET_USERNAME_HANDLER': lambda d: d.get('username'),
'JWT_LEEWAY': 1,
'JWT_DECODE_HANDLER': 'edx_rest_framework_extensions.utils.jwt_decode_handler',
# Number of seconds before JWT tokens expire
'JWT_EXPIRATION': 30,
}
# The footer URLs dictionary maps social footer names
# to URLs defined in configuration.
SOCIAL_MEDIA_FOOTER_URLS = {}
# The display dictionary defines the title
# and icon class for each social media link.
SOCIAL_MEDIA_FOOTER_DISPLAY = {
"facebook": {
# Translators: This is the website name of www.facebook.com. Please
# translate this the way that Facebook advertises in your language.
"title": _("Facebook"),
"icon": "fa-facebook-square",
"action": _("Like {platform_name} on Facebook")
},
"twitter": {
# Translators: This is the website name of www.twitter.com. Please
# translate this the way that Twitter advertises in your language.
"title": _("Twitter"),
"icon": "fa-twitter",
"action": _("Follow {platform_name} on Twitter")
},
"linkedin": {
# Translators: This is the website name of www.linkedin.com. Please
# translate this the way that LinkedIn advertises in your language.
"title": _("LinkedIn"),
"icon": "fa-linkedin-square",
"action": _("Follow {platform_name} on LinkedIn")
},
"google_plus": {
# Translators: This is the website name of plus.google.com. Please
# translate this the way that Google+ advertises in your language.
"title": _("Google+"),
"icon": "fa-google-plus-square",
"action": _("Follow {platform_name} on Google+")
},
"tumblr": {
# Translators: This is the website name of www.tumblr.com. Please
# translate this the way that Tumblr advertises in your language.
"title": _("Tumblr"),
"icon": "fa-tumblr"
},
"meetup": {
# Translators: This is the website name of www.meetup.com. Please
# translate this the way that MeetUp advertises in your language.
"title": _("Meetup"),
"icon": "fa-calendar"
},
"reddit": {
# Translators: This is the website name of www.reddit.com. Please
# translate this the way that Reddit advertises in your language.
"title": _("Reddit"),
"icon": "fa-reddit",
"action": _("Subscribe to the {platform_name} subreddit"),
},
"vk": {
# Translators: This is the website name of https://vk.com. Please
# translate this the way that VK advertises in your language.
"title": _("VK"),
"icon": "fa-vk"
},
"weibo": {
# Translators: This is the website name of http://www.weibo.com. Please
# translate this the way that Weibo advertises in your language.
"title": _("Weibo"),
"icon": "fa-weibo"
},
"youtube": {
# Translators: This is the website name of www.youtube.com. Please
# translate this the way that YouTube advertises in your language.
"title": _("Youtube"),
"icon": "fa-youtube",
"action": _("Subscribe to the {platform_name} YouTube channel")
}
}
################# Mobile URLS ##########################
# These are URLs to the app store for mobile.
MOBILE_STORE_URLS = {
'apple': '#',
'google': '#'
}
################# Student Verification #################
VERIFY_STUDENT = {
"DAYS_GOOD_FOR": 365, # How many days is a verficiation good for?
# The variable represents the window within which a verification is considered to be "expiring soon."
"EXPIRING_SOON_WINDOW": 28,
}
DISABLE_ACCOUNT_ACTIVATION_REQUIREMENT_SWITCH = "verify_student_disable_account_activation_requirement"
### This enables the Metrics tab for the Instructor dashboard ###########
FEATURES['CLASS_DASHBOARD'] = False
if FEATURES.get('CLASS_DASHBOARD'):
INSTALLED_APPS += ('class_dashboard',)
################ Enable credit eligibility feature ####################
ENABLE_CREDIT_ELIGIBILITY = True
FEATURES['ENABLE_CREDIT_ELIGIBILITY'] = ENABLE_CREDIT_ELIGIBILITY
######################## CAS authentication ###########################
if FEATURES.get('AUTH_USE_CAS'):
CAS_SERVER_URL = 'https://provide_your_cas_url_here'
AUTHENTICATION_BACKENDS = (
'django.contrib.auth.backends.ModelBackend',
'django_cas.backends.CASBackend',
)
INSTALLED_APPS += ('django_cas',)
MIDDLEWARE_CLASSES += ('django_cas.middleware.CASMiddleware',)
############# Cross-domain requests #################
if FEATURES.get('ENABLE_CORS_HEADERS'):
CORS_ALLOW_CREDENTIALS = True
CORS_ORIGIN_WHITELIST = ()
CORS_ORIGIN_ALLOW_ALL = False
# Default cache expiration for the cross-domain proxy HTML page.
# This is a static page that can be iframed into an external page
# to simulate cross-domain requests.
XDOMAIN_PROXY_CACHE_TIMEOUT = 60 * 15
###################### Registration ##################################
# For each of the fields, give one of the following values:
# - 'required': to display the field, and make it mandatory
# - 'optional': to display the field, and make it non-mandatory
# - 'hidden': to not display the field
REGISTRATION_EXTRA_FIELDS = {
'confirm_email': 'hidden',
'level_of_education': 'optional',
'gender': 'optional',
'year_of_birth': 'optional',
'mailing_address': 'optional',
'goals': 'optional',
'honor_code': 'required',
'terms_of_service': 'hidden',
'city': 'hidden',
'country': 'hidden',
}
REGISTRATION_FIELD_ORDER = [
"name",
"first_name",
"last_name",
"username",
"email",
"confirm_email",
"password",
"city",
"state",
"country",
"gender",
"year_of_birth",
"level_of_education",
"company",
"title",
"mailing_address",
"goals",
"honor_code",
"terms_of_service",
]
# Optional setting to restrict registration / account creation to only emails
# that match a regex in this list. Set to None to allow any email (default).
REGISTRATION_EMAIL_PATTERNS_ALLOWED = None
########################## CERTIFICATE NAME ########################
CERT_NAME_SHORT = "Certificate"
CERT_NAME_LONG = "Certificate of Achievement"
#################### OpenBadges Settings #######################
BADGING_BACKEND = 'badges.backends.badgr.BadgrBackend'
# Be sure to set up images for course modes using the BadgeImageConfiguration model in the certificates app.
BADGR_API_TOKEN = None
# Do not add the trailing slash here.
BADGR_BASE_URL = "http://localhost:8005"
BADGR_ISSUER_SLUG = "example-issuer"
# Number of seconds to wait on the badging server when contacting it before giving up.
BADGR_TIMEOUT = 10
###################### Grade Downloads ######################
# These keys are used for all of our asynchronous downloadable files, including
# the ones that contain information other than grades.
GRADES_DOWNLOAD_ROUTING_KEY = HIGH_MEM_QUEUE
GRADES_DOWNLOAD = {
'STORAGE_TYPE': 'localfs',
'BUCKET': 'edx-grades',
'ROOT_PATH': '/tmp/edx-s3/grades',
}
FINANCIAL_REPORTS = {
'STORAGE_TYPE': 'localfs',
'BUCKET': 'edx-financial-reports',
'CUSTOM_DOMAIN': 'edx-financial-reports.s3.amazonaws.com',
'ROOT_PATH': '/tmp/edx-s3/financial_reports',
}
#### PASSWORD POLICY SETTINGS #####
PASSWORD_MIN_LENGTH = 8
PASSWORD_MAX_LENGTH = None
PASSWORD_COMPLEXITY = {"UPPER": 1, "LOWER": 1, "DIGITS": 1}
PASSWORD_DICTIONARY_EDIT_DISTANCE_THRESHOLD = None
PASSWORD_DICTIONARY = []
############################ ORA 2 ############################################
# By default, don't use a file prefix
ORA2_FILE_PREFIX = None
# Default File Upload Storage bucket and prefix. Used by the FileUpload Service.
FILE_UPLOAD_STORAGE_BUCKET_NAME = 'edxuploads'
FILE_UPLOAD_STORAGE_PREFIX = 'submissions_attachments'
##### ACCOUNT LOCKOUT DEFAULT PARAMETERS #####
MAX_FAILED_LOGIN_ATTEMPTS_ALLOWED = 5
MAX_FAILED_LOGIN_ATTEMPTS_LOCKOUT_PERIOD_SECS = 15 * 60
##### LMS DEADLINE DISPLAY TIME_ZONE #######
TIME_ZONE_DISPLAYED_FOR_DEADLINES = 'UTC'
# Source:
# http://loc.gov/standards/iso639-2/ISO-639-2_utf-8.txt according to http://en.wikipedia.org/wiki/ISO_639-1
# Note that this is used as the set of choices to the `code` field of the
# `LanguageProficiency` model.
ALL_LANGUAGES = (
[u"aa", u"Afar"],
[u"ab", u"Abkhazian"],
[u"af", u"Afrikaans"],
[u"ak", u"Akan"],
[u"sq", u"Albanian"],
[u"am", u"Amharic"],
[u"ar", u"Arabic"],
[u"an", u"Aragonese"],
[u"hy", u"Armenian"],
[u"as", u"Assamese"],
[u"av", u"Avaric"],
[u"ae", u"Avestan"],
[u"ay", u"Aymara"],
[u"az", u"Azerbaijani"],
[u"ba", u"Bashkir"],
[u"bm", u"Bambara"],
[u"eu", u"Basque"],
[u"be", u"Belarusian"],
[u"bn", u"Bengali"],
[u"bh", u"Bihari languages"],
[u"bi", u"Bislama"],
[u"bs", u"Bosnian"],
[u"br", u"Breton"],
[u"bg", u"Bulgarian"],
[u"my", u"Burmese"],
[u"ca", u"Catalan"],
[u"ch", u"Chamorro"],
[u"ce", u"Chechen"],
[u"zh", u"Chinese"],
[u"zh_HANS", u"Simplified Chinese"],
[u"zh_HANT", u"Traditional Chinese"],
[u"cu", u"Church Slavic"],
[u"cv", u"Chuvash"],
[u"kw", u"Cornish"],
[u"co", u"Corsican"],
[u"cr", u"Cree"],
[u"cs", u"Czech"],
[u"da", u"Danish"],
[u"dv", u"Divehi"],
[u"nl", u"Dutch"],
[u"dz", u"Dzongkha"],
[u"en", u"English"],
[u"eo", u"Esperanto"],
[u"et", u"Estonian"],
[u"ee", u"Ewe"],
[u"fo", u"Faroese"],
[u"fj", u"Fijian"],
[u"fi", u"Finnish"],
[u"fr", u"French"],
[u"fy", u"Western Frisian"],
[u"ff", u"Fulah"],
[u"ka", u"Georgian"],
[u"de", u"German"],
[u"gd", u"Gaelic"],
[u"ga", u"Irish"],
[u"gl", u"Galician"],
[u"gv", u"Manx"],
[u"el", u"Greek"],
[u"gn", u"Guarani"],
[u"gu", u"Gujarati"],
[u"ht", u"Haitian"],
[u"ha", u"Hausa"],
[u"he", u"Hebrew"],
[u"hz", u"Herero"],
[u"hi", u"Hindi"],
[u"ho", u"Hiri Motu"],
[u"hr", u"Croatian"],
[u"hu", u"Hungarian"],
[u"ig", u"Igbo"],
[u"is", u"Icelandic"],
[u"io", u"Ido"],
[u"ii", u"Sichuan Yi"],
[u"iu", u"Inuktitut"],
[u"ie", u"Interlingue"],
[u"ia", u"Interlingua"],
[u"id", u"Indonesian"],
[u"ik", u"Inupiaq"],
[u"it", u"Italian"],
[u"jv", u"Javanese"],
[u"ja", u"Japanese"],
[u"kl", u"Kalaallisut"],
[u"kn", u"Kannada"],
[u"ks", u"Kashmiri"],
[u"kr", u"Kanuri"],
[u"kk", u"Kazakh"],
[u"km", u"Central Khmer"],
[u"ki", u"Kikuyu"],
[u"rw", u"Kinyarwanda"],
[u"ky", u"Kirghiz"],
[u"kv", u"Komi"],
[u"kg", u"Kongo"],
[u"ko", u"Korean"],
[u"kj", u"Kuanyama"],
[u"ku", u"Kurdish"],
[u"lo", u"Lao"],
[u"la", u"Latin"],
[u"lv", u"Latvian"],
[u"li", u"Limburgan"],
[u"ln", u"Lingala"],
[u"lt", u"Lithuanian"],
[u"lb", u"Luxembourgish"],
[u"lu", u"Luba-Katanga"],
[u"lg", u"Ganda"],
[u"mk", u"Macedonian"],
[u"mh", u"Marshallese"],
[u"ml", u"Malayalam"],
[u"mi", u"Maori"],
[u"mr", u"Marathi"],
[u"ms", u"Malay"],
[u"mg", u"Malagasy"],
[u"mt", u"Maltese"],
[u"mn", u"Mongolian"],
[u"na", u"Nauru"],
[u"nv", u"Navajo"],
[u"nr", u"Ndebele, South"],
[u"nd", u"Ndebele, North"],
[u"ng", u"Ndonga"],
[u"ne", u"Nepali"],
[u"nn", u"Norwegian Nynorsk"],
[u"nb", u"Bokmål, Norwegian"],
[u"no", u"Norwegian"],
[u"ny", u"Chichewa"],
[u"oc", u"Occitan"],
[u"oj", u"Ojibwa"],
[u"or", u"Oriya"],
[u"om", u"Oromo"],
[u"os", u"Ossetian"],
[u"pa", u"Panjabi"],
[u"fa", u"Persian"],
[u"pi", u"Pali"],
[u"pl", u"Polish"],
[u"pt", u"Portuguese"],
[u"ps", u"Pushto"],
[u"qu", u"Quechua"],
[u"rm", u"Romansh"],
[u"ro", u"Romanian"],
[u"rn", u"Rundi"],
[u"ru", u"Russian"],
[u"sg", u"Sango"],
[u"sa", u"Sanskrit"],
[u"si", u"Sinhala"],
[u"sk", u"Slovak"],
[u"sl", u"Slovenian"],
[u"se", u"Northern Sami"],
[u"sm", u"Samoan"],
[u"sn", u"Shona"],
[u"sd", u"Sindhi"],
[u"so", u"Somali"],
[u"st", u"Sotho, Southern"],
[u"es", u"Spanish"],
[u"sc", u"Sardinian"],
[u"sr", u"Serbian"],
[u"ss", u"Swati"],
[u"su", u"Sundanese"],
[u"sw", u"Swahili"],
[u"sv", u"Swedish"],
[u"ty", u"Tahitian"],
[u"ta", u"Tamil"],
[u"tt", u"Tatar"],
[u"te", u"Telugu"],
[u"tg", u"Tajik"],
[u"tl", u"Tagalog"],
[u"th", u"Thai"],
[u"bo", u"Tibetan"],
[u"ti", u"Tigrinya"],
[u"to", u"Tonga (Tonga Islands)"],
[u"tn", u"Tswana"],
[u"ts", u"Tsonga"],
[u"tk", u"Turkmen"],
[u"tr", u"Turkish"],
[u"tw", u"Twi"],
[u"ug", u"Uighur"],
[u"uk", u"Ukrainian"],
[u"ur", u"Urdu"],
[u"uz", u"Uzbek"],
[u"ve", u"Venda"],
[u"vi", u"Vietnamese"],
[u"vo", u"Volapük"],
[u"cy", u"Welsh"],
[u"wa", u"Walloon"],
[u"wo", u"Wolof"],
[u"xh", u"Xhosa"],
[u"yi", u"Yiddish"],
[u"yo", u"Yoruba"],
[u"za", u"Zhuang"],
[u"zu", u"Zulu"]
)
### Apps only installed in some instances
OPTIONAL_APPS = (
'mentoring',
'problem_builder',
'edx_sga',
# edx-ora2
'submissions',
'openassessment',
'openassessment.assessment',
'openassessment.fileupload',
'openassessment.workflow',
'openassessment.xblock',
# edxval
'edxval',
# edX Proctoring
'edx_proctoring',
# Organizations App (http://github.com/edx/edx-organizations)
'organizations',
# Enterprise Apps (http://github.com/edx/edx-enterprise)
'enterprise',
'integrated_channels.integrated_channel',
'integrated_channels.sap_success_factors',
# Required by the Enterprise App
'django_object_actions', # https://github.com/crccheck/django-object-actions
)
for app_name in OPTIONAL_APPS:
# First attempt to only find the module rather than actually importing it,
# to avoid circular references - only try to import if it can't be found
# by find_module, which doesn't work with import hooks
try:
imp.find_module(app_name)
except ImportError:
try:
__import__(app_name)
except ImportError:
continue
INSTALLED_APPS += (app_name,)
### ADVANCED_SECURITY_CONFIG
# Empty by default
ADVANCED_SECURITY_CONFIG = {}
### External auth usage -- prefixes for ENROLLMENT_DOMAIN
SHIBBOLETH_DOMAIN_PREFIX = 'shib:'
OPENID_DOMAIN_PREFIX = 'openid:'
### Analytics Data API + Dashboard (Insights) settings
ANALYTICS_DATA_URL = ""
ANALYTICS_DATA_TOKEN = ""
ANALYTICS_DASHBOARD_URL = ""
ANALYTICS_DASHBOARD_NAME = PLATFORM_NAME + " Insights"
# REGISTRATION CODES DISPLAY INFORMATION SUBTITUTIONS IN THE INVOICE ATTACHMENT
INVOICE_CORP_ADDRESS = "Please place your corporate address\nin this configuration"
INVOICE_PAYMENT_INSTRUCTIONS = "This is where you can\nput directions on how people\nbuying registration codes"
# Country code overrides
# Used by django-countries
COUNTRIES_OVERRIDE = {
# Taiwan is specifically not translated to avoid it being translated as "Taiwan (Province of China)"
"TW": "Taiwan",
'XK': _('Kosovo'),
}
# which access.py permission name to check in order to determine if a course is visible in
# the course catalog. We default this to the legacy permission 'see_exists'.
COURSE_CATALOG_VISIBILITY_PERMISSION = 'see_exists'
# which access.py permission name to check in order to determine if a course about page is
# visible. We default this to the legacy permission 'see_exists'.
COURSE_ABOUT_VISIBILITY_PERMISSION = 'see_exists'
# Enrollment API Cache Timeout
ENROLLMENT_COURSE_DETAILS_CACHE_TIMEOUT = 60
OAUTH_ID_TOKEN_EXPIRATION = 60 * 60
# These tabs are currently disabled
NOTES_DISABLED_TABS = ['course_structure', 'tags']
# Configuration used for generating PDF Receipts/Invoices
PDF_RECEIPT_TAX_ID = 'add here'
PDF_RECEIPT_FOOTER_TEXT = 'add your own specific footer text here'
PDF_RECEIPT_DISCLAIMER_TEXT = 'add your own specific disclaimer text here'
PDF_RECEIPT_BILLING_ADDRESS = 'add your own billing address here with appropriate line feed characters'
PDF_RECEIPT_TERMS_AND_CONDITIONS = 'add your own terms and conditions'
PDF_RECEIPT_TAX_ID_LABEL = 'Tax ID'
PDF_RECEIPT_LOGO_PATH = PROJECT_ROOT + '/static/images/openedx-logo-tag.png'
# Height of the Logo in mm
PDF_RECEIPT_LOGO_HEIGHT_MM = 12
PDF_RECEIPT_COBRAND_LOGO_PATH = PROJECT_ROOT + '/static/images/logo.png'
# Height of the Co-brand Logo in mm
PDF_RECEIPT_COBRAND_LOGO_HEIGHT_MM = 12
# Use None for the default search engine
SEARCH_ENGINE = None
# Use LMS specific search initializer
SEARCH_INITIALIZER = "lms.lib.courseware_search.lms_search_initializer.LmsSearchInitializer"
# Use the LMS specific result processor
SEARCH_RESULT_PROCESSOR = "lms.lib.courseware_search.lms_result_processor.LmsSearchResultProcessor"
# Use the LMS specific filter generator
SEARCH_FILTER_GENERATOR = "lms.lib.courseware_search.lms_filter_generator.LmsSearchFilterGenerator"
# Override to skip enrollment start date filtering in course search
SEARCH_SKIP_ENROLLMENT_START_DATE_FILTERING = False
### PERFORMANCE EXPERIMENT SETTINGS ###
# CDN experiment/monitoring flags
CDN_VIDEO_URLS = {}
# Page onload event sampling rate (min 0.0, max 1.0)
ONLOAD_BEACON_SAMPLE_RATE = 0.0
# The configuration visibility of account fields.
ACCOUNT_VISIBILITY_CONFIGURATION = {
# Default visibility level for accounts without a specified value
# The value is one of: 'all_users', 'private'
"default_visibility": "all_users",
# The list of all fields that can be shared with other users
"shareable_fields": [
'username',
'profile_image',
'country',
'time_zone',
'language_proficiencies',
'bio',
'account_privacy',
# Not an actual field, but used to signal whether badges should be public.
'accomplishments_shared',
],
# The list of account fields that are always public
"public_fields": [
'username',
'profile_image',
'account_privacy',
],
# The list of account fields that are visible only to staff and users viewing their own profiles
"admin_fields": [
"username",
"email",
"date_joined",
"is_active",
"bio",
"country",
"profile_image",
"language_proficiencies",
"name",
"gender",
"goals",
"year_of_birth",
"level_of_education",
"mailing_address",
"requires_parental_consent",
"account_privacy",
"accomplishments_shared",
]
}
# E-Commerce API Configuration
ECOMMERCE_PUBLIC_URL_ROOT = None
ECOMMERCE_API_URL = None
ECOMMERCE_API_TIMEOUT = 5
ECOMMERCE_SERVICE_WORKER_USERNAME = 'ecommerce_worker'
COURSE_CATALOG_API_URL = None
CREDENTIALS_INTERNAL_SERVICE_URL = None
CREDENTIALS_PUBLIC_SERVICE_URL = None
# Reverification checkpoint name pattern
CHECKPOINT_PATTERN = r'(?P<checkpoint_name>[^/]+)'
# For the fields override feature
# If using FEATURES['INDIVIDUAL_DUE_DATES'], you should add
# 'courseware.student_field_overrides.IndividualStudentOverrideProvider' to
# this setting.
FIELD_OVERRIDE_PROVIDERS = ()
# Modulestore-level field override providers. These field override providers don't
# require student context.
MODULESTORE_FIELD_OVERRIDE_PROVIDERS = ()
# PROFILE IMAGE CONFIG
# WARNING: Certain django storage backends do not support atomic
# file overwrites (including the default, OverwriteStorage) - instead
# there are separate calls to delete and then write a new file in the
# storage backend. This introduces the risk of a race condition
# occurring when a user uploads a new profile image to replace an
# earlier one (the file will temporarily be deleted).
PROFILE_IMAGE_BACKEND = {
'class': 'storages.backends.overwrite.OverwriteStorage',
'options': {
'location': os.path.join(MEDIA_ROOT, 'profile-images/'),
'base_url': os.path.join(MEDIA_URL, 'profile-images/'),
},
}
PROFILE_IMAGE_DEFAULT_FILENAME = 'images/profiles/default'
PROFILE_IMAGE_DEFAULT_FILE_EXTENSION = 'png'
# This secret key is used in generating unguessable URLs to users'
# profile images. Once it has been set, changing it will make the
# platform unaware of current image URLs, resulting in reverting all
# users' profile images to the default placeholder image.
PROFILE_IMAGE_SECRET_KEY = 'placeholder secret key'
PROFILE_IMAGE_MAX_BYTES = 1024 * 1024
PROFILE_IMAGE_MIN_BYTES = 100
# Sets the maximum number of courses listed on the homepage
# If set to None, all courses will be listed on the homepage
HOMEPAGE_COURSE_MAX = None
################################ Settings for Credit Courses ################################
# Initial delay used for retrying tasks.
# Additional retries use longer delays.
# Value is in seconds.
CREDIT_TASK_DEFAULT_RETRY_DELAY = 30
# Maximum number of retries per task for errors that are not related
# to throttling.
CREDIT_TASK_MAX_RETRIES = 5
# Dummy secret key for dev/test
SECRET_KEY = 'dev key'
# Secret keys shared with credit providers.
# Used to digitally sign credit requests (us --> provider)
# and validate responses (provider --> us).
# Each key in the dictionary is a credit provider ID, and
# the value is the 32-character key.
CREDIT_PROVIDER_SECRET_KEYS = {}
# Maximum age in seconds of timestamps we will accept
# when a credit provider notifies us that a student has been approved
# or denied for credit.
CREDIT_PROVIDER_TIMESTAMP_EXPIRATION = 15 * 60
# The Help link to the FAQ page about the credit
CREDIT_HELP_LINK_URL = "#"
# Default domain for the e-mail address associated with users who are created
# via the LTI Provider feature. Note that the generated e-mail addresses are
# not expected to be active; this setting simply allows administrators to
# route any messages intended for LTI users to a common domain.
LTI_USER_EMAIL_DOMAIN = 'lti.example.com'
# An aggregate score is one derived from multiple problems (such as the
# cumulative score for a vertical element containing many problems). Sending
# aggregate scores immediately introduces two issues: one is a race condition
# between the view method and the Celery task where the updated score may not
# yet be visible to the database if the view has not yet returned (and committed
# its transaction). The other is that the student is likely to receive a stream
# of notifications as the score is updated with every problem. Waiting a
# reasonable period of time allows the view transaction to end, and allows us to
# collapse multiple score updates into a single message.
# The time value is in seconds.
LTI_AGGREGATE_SCORE_PASSBACK_DELAY = 15 * 60
# For help generating a key pair import and run `openedx.core.lib.rsa_key_utils.generate_rsa_key_pair()`
JWT_PRIVATE_SIGNING_KEY = None
JWT_EXPIRED_PRIVATE_SIGNING_KEYS = []
# Credit notifications settings
NOTIFICATION_EMAIL_CSS = "templates/credit_notifications/credit_notification.css"
NOTIFICATION_EMAIL_EDX_LOGO = "templates/credit_notifications/edx-logo-header.png"
################################ Settings for Microsites ################################
### Select an implementation for the microsite backend
# for MICROSITE_BACKEND possible choices are
# 1. microsite_configuration.backends.filebased.FilebasedMicrositeBackend
# 2. microsite_configuration.backends.database.DatabaseMicrositeBackend
MICROSITE_BACKEND = 'microsite_configuration.backends.filebased.FilebasedMicrositeBackend'
# for MICROSITE_TEMPLATE_BACKEND possible choices are
# 1. microsite_configuration.backends.filebased.FilebasedMicrositeTemplateBackend
# 2. microsite_configuration.backends.database.DatabaseMicrositeTemplateBackend
MICROSITE_TEMPLATE_BACKEND = 'microsite_configuration.backends.filebased.FilebasedMicrositeTemplateBackend'
# TTL for microsite database template cache
MICROSITE_DATABASE_TEMPLATE_CACHE_TTL = 5 * 60
################################ Settings for rss_proxy ################################
RSS_PROXY_CACHE_TIMEOUT = 3600 # The length of time we cache RSS retrieved from remote URLs in seconds
#### PROCTORING CONFIGURATION DEFAULTS
PROCTORING_BACKEND_PROVIDER = {
'class': 'edx_proctoring.backends.null.NullBackendProvider',
'options': {},
}
PROCTORING_BACKEND_PROVIDERS = {
'default': {
'class': 'edx_proctoring.backends.null.NullBackendProvider',
'options': {},
}
}
PROCTORING_SETTINGS = {}
#### Custom Courses for EDX (CCX) configuration
# This is an arbitrary hard limit.
# The reason we introcuced this number is because we do not want the CCX
# to compete with the MOOC.
CCX_MAX_STUDENTS_ALLOWED = 200
# Financial assistance settings
# Maximum and minimum length of answers, in characters, for the
# financial assistance form
FINANCIAL_ASSISTANCE_MIN_LENGTH = 800
FINANCIAL_ASSISTANCE_MAX_LENGTH = 2500
# Course Content Bookmarks Settings
MAX_BOOKMARKS_PER_COURSE = 100
#### Registration form extension. ####
# Only used if combined login/registration is enabled.
# This can be used to add fields to the registration page.
# It must be a path to a valid form, in dot-separated syntax.
# IE: custom_form_app.forms.RegistrationExtensionForm
# Note: If you want to use a model to store the results of the form, you will
# need to add the model's app to the ADDL_INSTALLED_APPS array in your
# lms.env.json file.
REGISTRATION_EXTENSION_FORM = None
# Identifier included in the User Agent from open edX mobile apps.
MOBILE_APP_USER_AGENT_REGEXES = [
r'edX/org.edx.mobile',
]
# cache timeout in seconds for Mobile App Version Upgrade
APP_UPGRADE_CACHE_TIMEOUT = 3600
# Offset for courseware.StudentModuleHistoryExtended which is used to
# calculate the starting primary key for the underlying table. This gap
# should be large enough that you do not generate more than N courseware.StudentModuleHistory
# records before you have deployed the app to write to coursewarehistoryextended.StudentModuleHistoryExtended
# if you want to avoid an overlap in ids while searching for history across the two tables.
STUDENTMODULEHISTORYEXTENDED_OFFSET = 10000
# Cutoff date for granting audit certificates
AUDIT_CERT_CUTOFF_DATE = None
################################ Settings for Credentials Service ################################
CREDENTIALS_SERVICE_USERNAME = 'credentials_service_user'
CREDENTIALS_GENERATION_ROUTING_KEY = HIGH_PRIORITY_QUEUE
WIKI_REQUEST_CACHE_MIDDLEWARE_CLASS = "request_cache.middleware.RequestCache"
# Settings for Comprehensive Theming app
# See https://github.com/edx/edx-django-sites-extensions for more info
# Default site to use if site matching request headers does not exist
SITE_ID = 1
# dir containing all themes
COMPREHENSIVE_THEME_DIRS = [REPO_ROOT / "themes"]
# Theme directory locale paths
COMPREHENSIVE_THEME_LOCALE_PATHS = []
# Theme to use when no site or site theme is defined,
# set to None if you want to use openedx theme
DEFAULT_SITE_THEME = None
ENABLE_COMPREHENSIVE_THEMING = True
# API access management
API_ACCESS_MANAGER_EMAIL = '[email protected]'
API_ACCESS_FROM_EMAIL = '[email protected]'
API_DOCUMENTATION_URL = 'http://course-catalog-api-guide.readthedocs.io/en/latest/'
AUTH_DOCUMENTATION_URL = 'http://course-catalog-api-guide.readthedocs.io/en/latest/authentication/index.html'
# Affiliate cookie tracking
AFFILIATE_COOKIE_NAME = 'affiliate_id'
############## Settings for RedirectMiddleware ###############
# Setting this to None causes Redirect data to never expire
# The cache is cleared when Redirect models are saved/deleted
REDIRECT_CACHE_TIMEOUT = None # The length of time we cache Redirect model data
REDIRECT_CACHE_KEY_PREFIX = 'redirects'
############## Settings for LMS Context Sensitive Help ##############
HELP_TOKENS_INI_FILE = REPO_ROOT / "lms" / "envs" / "help_tokens.ini"
HELP_TOKENS_BOOKS = {
'learner': 'http://edx.readthedocs.io/projects/open-edx-learner-guide',
'course_author': 'http://edx.readthedocs.io/projects/open-edx-building-and-running-a-course',
}
############## OPEN EDX ENTERPRISE SERVICE CONFIGURATION ######################
# The Open edX Enterprise service is currently hosted via the LMS container/process.
# However, for all intents and purposes this service is treated as a standalone IDA.
# These configuration settings are specific to the Enterprise service and you should
# not find references to them within the edx-platform project.
ENTERPRISE_ENROLLMENT_API_URL = LMS_ROOT_URL + "/api/enrollment/v1/"
ENTERPRISE_PUBLIC_ENROLLMENT_API_URL = ENTERPRISE_ENROLLMENT_API_URL
ENTERPRISE_COURSE_ENROLLMENT_AUDIT_MODES = ['audit', 'honor']
############## ENTERPRISE SERVICE API CLIENT CONFIGURATION ######################
# The LMS communicates with the Enterprise service via the EdxRestApiClient class
# These default settings are utilized by the LMS when interacting with the service,
# and are overridden by the configuration parameter accessors defined in aws.py
ENTERPRISE_API_URL = LMS_ROOT_URL + '/enterprise/api/v1/'
ENTERPRISE_SERVICE_WORKER_USERNAME = 'enterprise_worker'
ENTERPRISE_API_CACHE_TIMEOUT = 3600 # Value is in seconds
ENTERPRISE_CUSTOMER_LOGO_IMAGE_SIZE = 512 # Enterprise logo image size limit in KB's
############## ENTERPRISE SERVICE LMS CONFIGURATION ##################################
# The LMS has some features embedded that are related to the Enterprise service, but
# which are not provided by the Enterprise service. These settings provide base values
# for those features.
ENTERPRISE_PLATFORM_WELCOME_TEMPLATE = _(u'Welcome to {platform_name}.')
ENTERPRISE_SPECIFIC_BRANDED_WELCOME_TEMPLATE = _(
u'{start_bold}{enterprise_name}{end_bold} has partnered with {start_bold}'
'{platform_name}{end_bold} to offer you high-quality learning opportunities '
'from the world\'s best universities.'
)
ENTERPRISE_EXCLUDED_REGISTRATION_FIELDS = {
'age',
'level_of_education',
'gender',
'goals',
'year_of_birth',
'mailing_address',
}
ENTERPRISE_CUSTOMER_COOKIE_NAME = 'enterprise_customer_uuid'
############## Settings for Course Enrollment Modes ######################
COURSE_ENROLLMENT_MODES = {
"audit": 1,
"verified": 2,
"professional": 3,
"no-id-professional": 4,
"credit": 5,
"honor": 6,
}
############## Settings for the Discovery App ######################
COURSES_API_CACHE_TIMEOUT = 3600 # Value is in seconds
############## Settings for CourseGraph ############################
COURSEGRAPH_JOB_QUEUE = LOW_PRIORITY_QUEUE
EDX_GRADING_TYPE = "sequential"
| miptliot/edx-platform | lms/envs/common.py | Python | agpl-3.0 | 112,268 |
# -*- coding: utf-8 -*-
"""
/***************************************************************************
Name : Catalog on the fly
Description : Automatically adds images that are in the catalog layer that intersect with the map area.
Date : April, 2015
copyright : (C) 2015 by Luiz Motta
email : [email protected]
***************************************************************************/
/***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************/
"""
import os.path
from PyQt4.QtGui import ( QAction, QIcon )
from PyQt4.QtCore import ( Qt, QSettings, QTranslator, QCoreApplication, qVersion, pyqtSlot )
from qgis.core import ( QgsProject, QgsMapLayerRegistry )
from catalogotf import ( ProjectDockWidgetCatalogOTF, DockWidgetCatalogOTF )
def classFactory(iface):
return CatalogOTFPlugin( iface )
class CatalogOTFPlugin:
def __init__(self, iface):
def translate():
#
# For create file 'qm'
# 1) Define that files need for translation: catalogotf.pro
# 2) Create 'ts': pylupdate4 -verbose catalogotf.pro
# 3) Edit your translation: QtLinquist
# 4) Create 'qm': lrelease catalogotf_pt_BR.ts
#
dirname = os.path.dirname( os.path.abspath(__file__) )
locale = QSettings().value("locale/userLocale")
localePath = os.path.join( dirname, "i18n", "%s_%s.qm" % ( name_src, locale ) )
if os.path.exists(localePath):
self.translator = QTranslator()
self.translator.load(localePath)
if qVersion() > '4.3.3':
QCoreApplication.installTranslator(self.translator)
self.iface = iface
self.projOTF = ProjectDockWidgetCatalogOTF( iface )
self.name = u"&Catalog OTF"
self.dock = None
name_src = "catalogotf"
translate()
def _connect(self, isConnect = True):
signal_slot = (
{ 'signal': QgsProject.instance().readProject, 'slot': self.projOTF.onReadProject },
{ 'signal': QgsProject.instance().writeProject, 'slot': self.projOTF.onWriteProject }
)
if isConnect:
for item in signal_slot:
item['signal'].connect( item['slot'] )
else:
for item in signal_slot:
item['signal'].disconnect( item['slot'] )
def initGui(self):
msgtrans = QCoreApplication.translate("CatalogOTF", "Catalog on the fly")
icon = QIcon( os.path.join( os.path.dirname(__file__), 'catalogotf.svg' ) )
self.action = QAction( icon, msgtrans, self.iface.mainWindow() )
self.action.setObjectName("CatalogOTF")
self.action.setWhatsThis( msgtrans )
self.action.setStatusTip( msgtrans )
self.action.triggered.connect( self.run )
self.iface.addToolBarIcon( self.action )
self.iface.addPluginToRasterMenu( self.name, self.action )
self._connect()
def unload(self):
self.iface.removePluginMenu( self.name, self.action )
self.iface.removeToolBarIcon( self.action )
del self.action
del self.dock
self._connect( False )
@pyqtSlot()
def run(self):
self.dock = DockWidgetCatalogOTF( self.iface )
self.iface.addDockWidget( Qt.LeftDockWidgetArea , self.dock )
self.action.setEnabled( False )
| lmotta/catalog-on-the-fly | __init__.py | Python | gpl-2.0 | 3,754 |
################################################################################
# copyright 2010 Lucas Baudin <[email protected]> #
# #
# This file is part of Ultimate Smash Friends. #
# #
# Ultimate Smash Friends is free software: you can redistribute it and/or #
# modify it under the terms of the GNU General Public License as published by #
# the Free Software Foundation, either version 3 of the License, or (at your #
# option) any later version. #
# #
# Ultimate Smash Friends is distributed in the hope that it will be useful, but#
# WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or#
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for #
# more details. #
# #
# You should have received a copy of the GNU General Public License along with #
# Ultimate Smash Friends. If not, see <http://www.gnu.org/licenses/>. #
################################################################################
#standards imports
import pygame
import logging
from os.path import join
#our modules
from usf.widgets.widget import optimize_size
from usf.widgets.imagebutton import ImageButton
from usf.widgets.label import Label
from usf.widgets.box import HBox
from usf import CONFIG
class Spinner(HBox):
"""
A spinner widget. (which could be called "select" too)
It looks like this:
<- text ->
It can be used to select an option (like a character, a game
mode, etc...).
"""
def __init__(self, values, width=100):
"""
values is an array of string. Each string is an option.
"""
super(Spinner, self).__init__()
self.focusable = False
self.parentpos = (0, 0)
self.extend = False
self.values = values
self.orientation = True
self.center_width = width
self.surface = pygame.Surface((self.width, self.height))
self.widgets = []
self.left_arrow = ImageButton(
join("gui", CONFIG.general.THEME, "spinner_left.png"),
join("gui", CONFIG.general.THEME, "spinner_left_hover.png"))
self.left_arrow.set_size((37, 45))
self.add(self.left_arrow, margin = 0)
self.center = Label('',
background=join(
"gui",
CONFIG.general.THEME,
"spinner_center.png"),
align="center",
width=100,
height=35)
self.add(self.center, margin = 0, size=(self.center_width, 45))
self.right_arrow = ImageButton(
join("gui", CONFIG.general.THEME, "spinner_right.png"),
join("gui", CONFIG.general.THEME, "spinner_right_hover.png"))
self.right_arrow.set_size((37, 45))
self.add(self.right_arrow, margin = 0)
self.update_pos()
self.update_size()
self.text = values[0]
self.state = False
self.height = optimize_size((250, 30))[1]
self.width = (
optimize_size((25, 30))[0] * 2 +
optimize_size((self.center_width, 30))[0])
self.set_index(0)
def handle_mouse(self, event):
if self.state:
event.dict['pos'] = (
event.dict['pos'][0] - self.parentpos[0] - self.x,
event.dict['pos'][1] - self.parentpos[1] - self.y)
x = event.dict['pos'][0]
y = event.dict['pos'][1]
self.left_arrow.state = False
self.right_arrow.state = False
if (
0 < event.dict['pos'][0] < self.width and
0 < event.dict['pos'][1] < self.height):
self.state = True
for widget in self.widgets:
if (
widget.x < x < widget.x + widget.width and
widget.y < y < widget.y+widget.height):
if widget == self.left_arrow or widget == self.right_arrow:
widget.state = True
if event.type == pygame.MOUSEBUTTONUP:
if widget == self.right_arrow:
self.index += 1
if self.index > len(self.values)-1:
self.index = 0
else:
self.index -= 1
if self.index < 0:
self.index = len(self.values)-1
self.text = self.values[self.index]
self.center.set_text(self.text)
return (self, False)
return False, self
self.state = False
return (False, False)
def get_value(self):
"""
Get the current value of the spinner.
"""
return self.values[self.index]
def get_index(self):
"""
Get the index (the range of the current value).
"""
return self.index
def set_index(self, index):
"""
Set the index (the range of the current value) of the spinner.
"""
try:
self.index = index
self.text = self.values[self.index]
self.center.set_text(self.text)
except IndexError:
logging.warning("Not enough value in the spinner: " + str(index))
def set_value(self, value):
"""
Set the value of the spinner. The value must be in the array that was
passed in the __init__ function.
"""
try:
self.set_index(self.values.index(value))
except ValueError:
logging.warning("No entry named: " + str(value))
def handle_keys(self, event):
if (
event.dict["key"] == pygame.K_DOWN or
event.dict["key"] == pygame.K_UP) and not self.state:
self.state = True
self.right_arrow.state = True
self.left_arrow.state = True
return False, self
if event.dict["key"] == pygame.K_RIGHT:
if self.get_index() + 1 < len(self.values):
self.set_index(self.get_index() + 1)
return self, self
if event.dict["key"] == pygame.K_LEFT:
if self.get_index() > 0:
self.set_index(self.get_index() - 1)
return self, self
self.right_arrow.state = False
self.left_arrow.state = False
self.state = False
return False, False
| tshirtman/ultimate-smash-friends | usf/widgets/spinner.py | Python | gpl-3.0 | 6,964 |
import html
import os
import jinja2
from PyQt5 import QtCore, QtGui, QtWidgets
import util
from fa import maps
class GameView(QtCore.QObject):
"""
Helps with displaying games in the game widget. Forwards
interaction with the view.
"""
game_double_clicked = QtCore.pyqtSignal(object)
def __init__(self, model, view, delegate):
QtCore.QObject.__init__(self)
self._model = model
self._view = view
self._delegate = delegate
self._view.setModel(self._model)
self._view.setItemDelegate(self._delegate)
self._view.doubleClicked.connect(self._game_double_clicked)
self._view.viewport().installEventFilter(self._delegate.tooltip_filter)
# TODO make it a utility function?
def _model_items(self):
model = self._model
for i in range(model.rowCount(QtCore.QModelIndex())):
yield model.index(i, 0)
def _game_double_clicked(self, idx):
self.game_double_clicked.emit(idx.data().game)
class GameItemDelegate(QtWidgets.QStyledItemDelegate):
ICON_RECT = 100
ICON_CLIP_TOP_LEFT = 3
ICON_CLIP_BOTTOM_RIGHT = -7
ICON_SHADOW_OFFSET = 8
SHADOW_COLOR = QtGui.QColor("#202020")
FRAME_THICKNESS = 1
FRAME_COLOR = QtGui.QColor("#303030")
TEXT_OFFSET = 10
TEXT_RIGHT_MARGIN = 5
TEXT_WIDTH = 250
ICON_SIZE = 110
PADDING = 10
def __init__(self, formatter):
QtWidgets.QStyledItemDelegate.__init__(self)
self._formatter = formatter
self.tooltip_filter = GameTooltipFilter(self._formatter)
def paint(self, painter, option, index):
painter.save()
data = index.data()
text = self._formatter.text(data)
icon = self._formatter.icon(data)
self._draw_clear_option(painter, option)
self._draw_icon_shadow(painter, option)
self._draw_icon(painter, option, icon)
self._draw_frame(painter, option)
self._draw_text(painter, option, text)
painter.restore()
def _draw_clear_option(self, painter, option):
option.icon = QtGui.QIcon()
option.text = ""
option.widget.style().drawControl(
QtWidgets.QStyle.CE_ItemViewItem, option, painter, option.widget,
)
def _draw_icon_shadow(self, painter, option):
painter.fillRect(
option.rect.left() + self.ICON_SHADOW_OFFSET,
option.rect.top() + self.ICON_SHADOW_OFFSET,
self.ICON_RECT,
self.ICON_RECT,
self.SHADOW_COLOR,
)
def _draw_icon(self, painter, option, icon):
rect = option.rect.adjusted(
self.ICON_CLIP_TOP_LEFT,
self.ICON_CLIP_TOP_LEFT,
self.ICON_CLIP_BOTTOM_RIGHT,
self.ICON_CLIP_BOTTOM_RIGHT,
)
icon.paint(painter, rect, QtCore.Qt.AlignLeft | QtCore.Qt.AlignTop)
def _draw_frame(self, painter, option):
pen = QtGui.QPen()
pen.setWidth(self.FRAME_THICKNESS)
pen.setBrush(self.FRAME_COLOR)
pen.setCapStyle(QtCore.Qt.RoundCap)
painter.setPen(pen)
painter.drawRect(
option.rect.left() + self.ICON_CLIP_TOP_LEFT,
option.rect.top() + self.ICON_CLIP_TOP_LEFT,
self.ICON_RECT,
self.ICON_RECT,
)
def _draw_text(self, painter, option, text):
left_off = self.ICON_RECT + self.TEXT_OFFSET
top_off = self.TEXT_OFFSET
right_off = self.TEXT_RIGHT_MARGIN
bottom_off = 0
painter.translate(
option.rect.left() + left_off,
option.rect.top() + top_off,
)
clip = QtCore.QRectF(
0,
0,
option.rect.width() - left_off - right_off,
option.rect.height() - top_off - bottom_off,
)
html = QtGui.QTextDocument()
html.setHtml(text)
html.drawContents(painter, clip)
def sizeHint(self, option, index):
return QtCore.QSize(
self.ICON_SIZE + self.TEXT_WIDTH + self.PADDING,
self.ICON_SIZE,
)
class GameTooltipFilter(QtCore.QObject):
def __init__(self, formatter):
QtCore.QObject.__init__(self)
self._formatter = formatter
def eventFilter(self, obj, event):
if event.type() == QtCore.QEvent.ToolTip:
return self._handle_tooltip(obj, event)
else:
return super().eventFilter(obj, event)
def _handle_tooltip(self, widget, event):
view = widget.parent()
idx = view.indexAt(event.pos())
if not idx.isValid():
return False
tooltip_text = self._formatter.tooltip(idx.data())
QtWidgets.QToolTip.showText(event.globalPos(), tooltip_text, widget)
return True
class GameItemFormatter:
FORMATTER_FAF = str(util.THEME.readfile("games/formatters/faf.qthtml"))
FORMATTER_MOD = str(util.THEME.readfile("games/formatters/mod.qthtml"))
def __init__(self, playercolors, me):
self._colors = playercolors
self._me = me
self._tooltip_formatter = GameTooltipFormatter(self._me)
def _featured_mod(self, game):
return game.featured_mod in ["faf", "coop"]
def _host_color(self, game):
hostid = game.host_player.id if game.host_player is not None else -1
return self._colors.get_user_color(hostid)
def text(self, data):
game = data.game
players = game.num_players - len(game.observers)
formatting = {
"color": self._host_color(game),
"mapslots": game.max_players,
"mapdisplayname": html.escape(game.mapdisplayname),
"title": html.escape(game.title),
"host": html.escape(game.host),
"players": players,
"playerstring": "player" if players == 1 else "players",
"avgrating": int(game.average_rating),
}
if self._featured_mod(game):
return self.FORMATTER_FAF.format(**formatting)
else:
formatting["mod"] = html.escape(game.featured_mod)
return self.FORMATTER_MOD.format(**formatting)
def icon(self, data):
game = data.game
name = game.mapname.lower()
if game.password_protected:
return util.THEME.icon("games/private_game.png")
icon = maps.preview(name)
if icon is not None:
return icon
return util.THEME.icon("games/unknown_map.png")
def needed_map_preview(self, data):
game = data.game
name = game.mapname.lower()
if game.password_protected or maps.preview(name) is not None:
return None
return name
def _game_teams(self, game):
teams = {
index: [
game.to_player(name)
if game.is_connected(name)
else name
for name in team
]
for index, team in game.playing_teams.items()
}
# Sort teams into a list
# TODO - I believe there's a convention where team 1 is 'no team'
teamlist = sorted([indexed_team for indexed_team in teams.items()])
teamlist = [team for index, team in teamlist]
return teamlist
def _game_observers(self, game):
return [
game.to_player(name)
for name in game.observers
if game.is_connected(name)
]
def tooltip(self, data):
game = data.game
teams = self._game_teams(game)
observers = self._game_observers(game)
title = game.title
title = title.replace("<", "<")
title = title.replace(">", ">")
return self._tooltip_formatter.format(
title, teams, observers, game.sim_mods,
)
class GameTooltipFormatter:
def __init__(self, me):
self._me = me
template_abs_path = os.path.join(
util.COMMON_DIR, "games", "gameitem.qthtml",
)
with open(template_abs_path, "r") as templatefile:
self._template = jinja2.Template(templatefile.read())
def format(self, title, teams, observers, mods):
icon_path = os.path.join("chat", "countries/")
icon_abs_path = os.path.join(util.COMMON_DIR, icon_path)
return self._template.render(
title=title, teams=teams,
mods=mods.values(), observers=observers,
me=self._me.player,
iconpath=icon_abs_path,
)
class GameViewBuilder:
def __init__(self, me, player_colors):
self._me = me
self._player_colors = player_colors
def __call__(self, model, view):
game_formatter = GameItemFormatter(self._player_colors, self._me)
game_delegate = GameItemDelegate(game_formatter)
gameview = GameView(model, view, game_delegate)
return gameview
| FAForever/client | src/games/gameitem.py | Python | gpl-3.0 | 8,871 |
import pytest
"""
Order is important currently:
self._get_packages()
self._create_package()
self._get_package()
self._find_package()
self._copy_package()
self._rename_package()
self._remove_package()
"""
@pytest.fixture()
def create_package(remote, token):
"""
Adds a "testpackage0" for a test.
:param remote: See the correesponding fixture.
:param token: See the correesponding fixture.
"""
package = remote.new_package(token)
remote.modify_package(package, "name", "testpackage0", token)
remote.save_package(package, token)
@pytest.fixture()
def remove_package(remote, token):
"""
Removes a "testpackage0" for a test.
:param remote: See the correesponding fixture.
:param token: See the correesponding fixture.
"""
yield
remote.remove_package("testpackage0", token)
@pytest.mark.usefixtures("cobbler_xmlrpc_base")
class TestPackage:
@pytest.mark.usefixtures("remove_package")
def test_create_package(self, remote, token):
"""
Test: create/edit a package object
"""
packages = remote.get_packages(token)
package = remote.new_package(token)
assert remote.modify_package(package, "name", "testpackage0", token)
assert remote.save_package(package, token)
new_packages = remote.get_packages(token)
assert len(new_packages) == len(packages) + 1
@pytest.mark.usefixtures("create_package", "remove_package")
def test_get_packages(self, remote, token):
"""
Test: Get packages
"""
package = remote.get_packages()
@pytest.mark.usefixtures("create_package", "remove_package")
def test_get_package(self, remote):
"""
Test: Get a package object
"""
package = remote.get_package("testpackage0")
@pytest.mark.usefixtures("create_package", "remove_package")
def test_find_package(self, remote, token):
"""
Test: find a package object
"""
result = remote.find_package({"name": "testpackage0"}, token)
assert result
@pytest.mark.usefixtures("create_package", "remove_package")
def test_copy_package(self, remote, token):
"""
Test: copy a package object
"""
# Arrange --> Done in fixture
# Act
package = remote.get_item_handle("package", "testpackage0", token)
result = remote.copy_package(package, "testpackagecopy", token)
# Assert
assert result
# Cleanup
remote.remove_package("testpackagecopy", token)
@pytest.mark.usefixtures("create_package", "remove_package")
def test_rename_package(self, remote, token):
"""
Test: rename a package object
"""
package = remote.get_item_handle("package", "testpackage0", token)
assert remote.rename_package(package, "testpackage1", token)
package = remote.get_item_handle("package", "testpackage1", token)
assert remote.rename_package(package, "testpackage0", token)
@pytest.mark.usefixtures("create_package")
def test_remove_package(self, remote, token):
"""
Test: remove a package object
"""
assert remote.remove_package("testpackage0", token)
| jmaas/cobbler | tests/xmlrpcapi/package_test.py | Python | gpl-2.0 | 3,261 |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# Sorna documentation build configuration file, created by
# sphinx-quickstart on Tue Jul 28 20:12:45 2015.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
import shlex
sys.path.insert(0, '..')
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.viewcode',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'Sorna'
copyright = '2015, Lablup Inc.'
author = 'Lablup Inc.'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.1'
# The full version, including alpha/beta/rc tags.
release = '0.1'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'alabaster'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'h', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'r', 'sv', 'tr'
#html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# Now only 'ja' uses this config value
#html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'Sornadoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
# Latex figure (float) alignment
#'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'Sorna.tex', 'Sorna Documentation',
'Lablup Inc.', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'sorna', 'Sorna Documentation',
[author], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'Sorna', 'Sorna Documentation',
author, 'Sorna', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
| lablup/sorna-manager | docs/conf.py | Python | lgpl-3.0 | 9,232 |
# -*- coding: utf-8 -*-
################################################################################
# Copyright (C) 2012 Travis Shirk <[email protected]>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
################################################################################
'''
Here lies Apple frames, all of which are non-standard. All of these would have
been standard user text frames by anyone not being a bastard, on purpose.
'''
from .frames import Frame, TextFrame
class PCST(Frame):
'''Indicates a podcast. The 4 bytes of data is undefined, and is typically
all 0.'''
def __init__(self, id="PCST"):
super(PCST, self).__init__("PCST")
def render(self):
self.data = b"\x00" * 4
return super(PCST, self).render()
class TKWD(TextFrame):
'''Podcast keywords.'''
def __init__(self, id="TKWD"):
super(TKWD, self).__init__("TKWD")
class TDES(TextFrame):
'''Podcast description. One encoding byte followed by text per encoding.'''
def __init__(self, id="TDES"):
super(TDES, self).__init__("TDES")
class TGID(TextFrame):
'''Podcast URL of the audio file. This should be a W frame!'''
def __init__(self, id="TGID"):
super(TGID, self).__init__("TGID")
class WFED(TextFrame):
'''Another podcast URL, the feed URL it is said.'''
def __init__(self, id="WFED", url=""):
super(WFED, self).__init__("WFED", unicode(url))
| daltonsena/eyed3 | src/eyed3/id3/apple.py | Python | gpl-2.0 | 2,111 |
from django.conf.urls import *
urlpatterns = patterns('basic.messages.views',
url(r'(?P<mailbox>inbox|trash|sent)/$',
view='message_list',
name='messages'),
url(r'compose(?:/(?P<content_type_id>\w+):(?P<object_id>\w+))?/$',
view='message_create',
name='create'),
url(r'remove/(?P<object_id>\d+)/$',
view='message_remove',
name='remove'),
url(r'(?P<object_id>\d+)/reply/$',
view='message_reply',
name='reply'),
url(r'(?:(?P<mailbox>inbox|trash|sent)/)?(?P<object_id>\d+)/$',
view='message_detail',
name='message'),
url(r'',
view='message_list',
name='messages'),
)
| hittu123/ruhive | src/basic/messages/urls.py | Python | mit | 691 |
#coding: utf-8
import csv
import threading
import lucene
from lucene import getVMEnv
print "预处理"
INDEX_DIR = '../index/'
nt = 100000
WRITE_DIR = "../data/mult/"
lucene.initVM()
directory = lucene.SimpleFSDirectory(lucene.File(INDEX_DIR))
analyzer = lucene.StandardAnalyzer(lucene.Version.LUCENE_CURRENT)
class sub_thread(threading.Thread):
"""
"""
def __init__(self, cont,lab,i):
"""
Arguments:
- `train`:
"""
threading.Thread.__init__(self)
self.content = cont
self.label = lab
self.i = i
print "len label",len(self.label)
def run(self):
owf = "%sresult%s.csv"%(WRITE_DIR,self.i)
print owf
t = open(owf,"w")
getVMEnv().attachCurrentThread()
searcher = lucene.IndexSearcher(directory,True)
a = 0
for line in self.content:
query = lucene.QueryParser(lucene.Version.LUCENE_CURRENT,
'content',analyzer).parse(line)
results = searcher.search(query,None,1)
score_docs = results.scoreDocs
b = 0
for score_doc in score_docs:
doc = searcher.doc(score_doc.doc)
b += 1
result = doc['tag']
t.write("%s,\"%s\"\n"%(self.label[a],result.strip()))
a += 1
if a % 10 == 0:
print "线程%s 完成%s,百分之%s已经完成"%(self.i,a,1.0*a/len(self.content))
def div(n,length):
"""
"""
result = []
for i in range(length+1):
if i % n == 0 or i == length:
result.append(i)
return result
def main():
"""
"""
print "读文件"
f = open("../data/test.csv")
reader = csv.reader(f)
content = []
for row in reader:
content.append(row[0]+" "+row[1])
print "测试数据个数",len(content)
turn = div(nt,len(content))
print turn
f.close()
print "读标签"
g = open("../data/label.txt")
label = g.readlines()
label = [word.strip() for word in label]
label = label[1:]
for i in range(len(turn)-1):
sub_cont = content[turn[i] : turn[i+1] ]
sub_label = label[turn[i] : turn[i+1]][:]
mthread = sub_thread(sub_cont,sub_label,i)
mthread.start()
if __name__ == '__main__':
print "hello world"
main()
| lavizhao/keyword | lucene/mult_search.py | Python | apache-2.0 | 2,456 |
"""
A Printer for generating executable code.
The most important function here is srepr that returns a string so that the
relation eval(srepr(expr))=expr holds in an appropriate environment.
"""
from printer import Printer
from sympy.core import Basic
import sympy.mpmath.libmp as mlib
from sympy.mpmath.libmp import prec_to_dps, repr_dps
class ReprPrinter(Printer):
printmethod = "_sympyrepr"
def reprify(self, args, sep):
return sep.join([self.doprint(item) for item in args])
def emptyPrinter(self, expr):
if isinstance(expr, str):
return expr
elif hasattr(expr, "__srepr__"):
return expr.__srepr__()
elif hasattr(expr, "args") and hasattr(expr.args, "__iter__"):
l = []
for o in expr.args:
l.append(self._print(o))
return expr.__class__.__name__ + '(%s)'%', '.join(l)
elif hasattr(expr, "__module__") and hasattr(expr, "__name__"):
return "<'%s.%s'>"%(expr.__module__, expr.__name__)
else:
return str(expr)
def _print_Add(self, expr):
args = list(expr.args)
args.sort(Basic._compare_pretty)
args = map(self._print, args)
return "Add(%s)"%", ".join(args)
def _print_Function(self, expr):
r = '%s(%r)' % (expr.func.__base__.__name__, expr.func.__name__)
r+= '(%s)' % ', '.join([self._print(a) for a in expr.args])
return r
def _print_FunctionClass(self, expr):
return 'Function(%r)'%(expr.__name__)
def _print_GeometryEntity(self, expr):
# GeometryEntity is special -- its base is tuple
return repr(expr)
def _print_Infinity(self, expr):
return 'Infinity'
def _print_Integer(self, expr):
return '%s(%s)' % (expr.__class__.__name__, self._print(expr.p))
def _print_list(self, expr):
return "[%s]"%self.reprify(expr, ", ")
def _print_Matrix(self, expr):
l = []
for i in range(expr.rows):
l.append([])
for j in range(expr.cols):
l[-1].append(expr[i,j])
return '%s(%s)' % (expr.__class__.__name__, self._print(l))
def _print_NaN(self, expr):
return "nan"
def _print_NegativeInfinity(self, expr):
return "NegativeInfinity"
def _print_NegativeOne(self, expr):
return "NegativeOne"
def _print_One(self, expr):
return "One"
def _print_Rational(self, expr):
return '%s(%s, %s)' % (expr.__class__.__name__, self._print(expr.p), self._print(expr.q))
def _print_Fraction(self, expr):
return '%s(%s, %s)' % (expr.__class__.__name__, self._print(expr.numerator), self._print(expr.denominator))
def _print_Real(self, expr):
dps = prec_to_dps(expr._prec)
r = mlib.to_str(expr._mpf_, repr_dps(expr._prec))
return "%s('%s', prec=%i)" % (expr.__class__.__name__, r, dps)
def _print_Sum2(self, expr):
return "Sum2(%s, (%s, %s, %s))" % (self._print(expr.f), self._print(expr.i),
self._print(expr.a), self._print(expr.b))
def _print_Symbol(self, expr):
return "%s(%s)" % (expr.__class__.__name__, self._print(expr.name))
def _print_Predicate(self, expr):
return "%s(%s)" % (expr.__class__.__name__, self._print(expr.name))
def _print_str(self, expr):
return repr(expr)
def _print_tuple(self, expr):
if len(expr)==1:
return "(%s,)"%self._print(expr[0])
else:
return "(%s)"%self.reprify(expr, ", ")
def _print_WildFunction(self, expr):
return "%s('%s')" % (expr.__class__.__name__, expr.name)
def _print_Zero(self, expr):
return "Zero"
def _print_AlgebraicNumber(self, expr):
return "%s(%s, %s)" % (self.__class__.__name__,
self._print(self.coeffs()), self._print(expr.root))
def srepr(expr, **settings):
"""return expr in repr form"""
return ReprPrinter(settings).doprint(expr)
| tarballs-are-good/sympy | sympy/printing/repr.py | Python | bsd-3-clause | 4,037 |
from datetime import datetime
from http import client
from unittest import skipIf
import uuid
from django.urls import reverse
from rest_framework.test import APITestCase
from common.models import (
CassandraFamilyMember,
CassandraThing,
CassandraThingMultiplePK,
)
from common.serializers import CassandraFamilyMemberSerializer
from django_cassandra_engine.test import TestCase as CassandraTestCase
class TestModelViewSet(APITestCase):
url = "/common/thing-modelviewset/"
def setUp(self):
self.data2a = {
"id": "a9be910b-3338-4340-b773-f7ec2bc1ce1a",
"data_abstract": "TeXt",
}
self.data2b = {
"id": "a9be910b-3338-4340-b773-f7ec2bc1ce1b",
"data_abstract": "TeXt",
}
def test_create_thing2a(self):
response = self.client.post(self.url, self.data2a, format="json")
self.assertEqual(response.status_code, client.CREATED)
self.assertEqual(CassandraThing.objects.count(), 1)
self.assertEqual(CassandraThing.objects.get().id, uuid.UUID(self.data2a["id"]))
get_url = "{}{}/".format(self.url, self.data2a["id"])
response = self.client.get(get_url, format="json")
self.assertDictEqual(response.json(), self.data2a)
self.assertEqual(response.status_code, client.OK)
def test_create_thing2b(self):
response = self.client.post(self.url, self.data2b, format="json")
self.assertEqual(response.status_code, client.CREATED)
self.assertEqual(CassandraThing.objects.count(), 1)
self.assertEqual(
CassandraThing.objects.get(id=self.data2b["id"]).id,
uuid.UUID(self.data2b["id"]),
)
get_url = "{}{}/".format(self.url, self.data2b["id"])
response = self.client.get(get_url, format="json")
self.assertDictEqual(response.json(), self.data2b)
self.assertEqual(response.status_code, client.OK)
class TestListCreateAPIViewWithMultiplePK(APITestCase):
def setUp(self):
self.data = {
"id": "a9be910b-3338-4340-b773-f7ec2bc1ce1a",
"another_id": "a9be910b-3338-4340-b773-f7ec2bc1ce1b",
"data_abstract": "TeXt",
"created_on": "2016-11-12T23:12",
}
def test_create_multiple_pk_thing(self):
url = reverse("thing_listcreate_api")
response = self.client.post(url, self.data)
expected_json = {
"another_id": "a9be910b-3338-4340-b773-f7ec2bc1ce1b",
"created_on": "2016-11-12T23:12:00Z",
"data_abstract": "TeXt",
"id": "a9be910b-3338-4340-b773-f7ec2bc1ce1a",
}
self.assertEqual(response.status_code, client.CREATED)
self.assertDictEqual(response.json(), expected_json)
model = CassandraThingMultiplePK.objects.get(id=self.data["id"])
self.assertEqual(str(model.id), self.data["id"])
self.assertEqual(str(model.another_id), self.data["another_id"])
self.assertEqual(model.data_abstract, self.data["data_abstract"])
class TestSerializer(APITestCase, CassandraTestCase):
def test_serialize_creates(self):
now = datetime.now()
data = {
"id": str(uuid.uuid4()),
"first_name": "Homer",
"last_name": "Simpson",
"is_real": True,
"favourite_number": 10,
"favourite_float_number": float(10.10),
"created_on": now,
}
serializer = CassandraFamilyMemberSerializer(data=data)
serializer.is_valid()
self.assertEqual(serializer.errors, {})
self.assertEqual(serializer.is_valid(), True)
serializer.save()
self.assertEqual(CassandraFamilyMember.objects.all().count(), 1)
model = CassandraFamilyMember.objects.all()[0]
self.assertEqual(model.first_name, "Homer")
self.assertEqual(model.last_name, "Simpson")
self.assertEqual(model.is_real, True)
self.assertEqual(model.favourite_number, 10)
self.assertEqual(model.id, uuid.UUID(data["id"]))
| r4fek/django-cassandra-engine | testproject/common/tests/test_drf.py | Python | bsd-2-clause | 4,079 |
# -*- coding: utf-8 -*-
from operator import attrgetter
from pyangbind.lib.yangtypes import RestrictedPrecisionDecimalType
from pyangbind.lib.yangtypes import RestrictedClassType
from pyangbind.lib.yangtypes import TypedListType
from pyangbind.lib.yangtypes import YANGBool
from pyangbind.lib.yangtypes import YANGListType
from pyangbind.lib.yangtypes import YANGDynClass
from pyangbind.lib.yangtypes import ReferenceType
from pyangbind.lib.base import PybindBase
from collections import OrderedDict
from decimal import Decimal
from bitarray import bitarray
import six
# PY3 support of some PY2 keywords (needs improved)
if six.PY3:
import builtins as __builtin__
long = int
elif six.PY2:
import __builtin__
class state(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module openconfig-network-instance - based on the path /network-instances/network-instance/protocols/protocol/isis/levels/level/link-state-database/lsp/tlvs/tlv/isis-neighbor-attribute/neighbors/neighbor/undefined-subtlvs/undefined-subtlv/state. Each member element of
the container is represented as a class variable - with a specific
YANG type.
YANG Description: State parameters of the undefined sub-TLV.
"""
__slots__ = ("_path_helper", "_extmethods", "__type", "__length", "__value")
_yang_name = "state"
_pybind_generated_by = "container"
def __init__(self, *args, **kwargs):
self._path_helper = False
self._extmethods = False
self.__type = YANGDynClass(
base=RestrictedClassType(
base_type=int, restriction_dict={"range": ["0..255"]}, int_size=8
),
is_leaf=True,
yang_name="type",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="uint8",
is_config=False,
)
self.__length = YANGDynClass(
base=RestrictedClassType(
base_type=int, restriction_dict={"range": ["0..255"]}, int_size=8
),
is_leaf=True,
yang_name="length",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="uint8",
is_config=False,
)
self.__value = YANGDynClass(
base=bitarray,
is_leaf=True,
yang_name="value",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="binary",
is_config=False,
)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path() + [self._yang_name]
else:
return [
"network-instances",
"network-instance",
"protocols",
"protocol",
"isis",
"levels",
"level",
"link-state-database",
"lsp",
"tlvs",
"tlv",
"isis-neighbor-attribute",
"neighbors",
"neighbor",
"undefined-subtlvs",
"undefined-subtlv",
"state",
]
def _get_type(self):
"""
Getter method for type, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/levels/level/link_state_database/lsp/tlvs/tlv/isis_neighbor_attribute/neighbors/neighbor/undefined_subtlvs/undefined_subtlv/state/type (uint8)
YANG Description: TLV Type.
"""
return self.__type
def _set_type(self, v, load=False):
"""
Setter method for type, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/levels/level/link_state_database/lsp/tlvs/tlv/isis_neighbor_attribute/neighbors/neighbor/undefined_subtlvs/undefined_subtlv/state/type (uint8)
If this variable is read-only (config: false) in the
source YANG file, then _set_type is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_type() directly.
YANG Description: TLV Type.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=RestrictedClassType(
base_type=int, restriction_dict={"range": ["0..255"]}, int_size=8
),
is_leaf=True,
yang_name="type",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="uint8",
is_config=False,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """type must be of a type compatible with uint8""",
"defined-type": "uint8",
"generated-type": """YANGDynClass(base=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..255']}, int_size=8), is_leaf=True, yang_name="type", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='uint8', is_config=False)""",
}
)
self.__type = t
if hasattr(self, "_set"):
self._set()
def _unset_type(self):
self.__type = YANGDynClass(
base=RestrictedClassType(
base_type=int, restriction_dict={"range": ["0..255"]}, int_size=8
),
is_leaf=True,
yang_name="type",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="uint8",
is_config=False,
)
def _get_length(self):
"""
Getter method for length, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/levels/level/link_state_database/lsp/tlvs/tlv/isis_neighbor_attribute/neighbors/neighbor/undefined_subtlvs/undefined_subtlv/state/length (uint8)
YANG Description: TLV length.
"""
return self.__length
def _set_length(self, v, load=False):
"""
Setter method for length, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/levels/level/link_state_database/lsp/tlvs/tlv/isis_neighbor_attribute/neighbors/neighbor/undefined_subtlvs/undefined_subtlv/state/length (uint8)
If this variable is read-only (config: false) in the
source YANG file, then _set_length is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_length() directly.
YANG Description: TLV length.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=RestrictedClassType(
base_type=int, restriction_dict={"range": ["0..255"]}, int_size=8
),
is_leaf=True,
yang_name="length",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="uint8",
is_config=False,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """length must be of a type compatible with uint8""",
"defined-type": "uint8",
"generated-type": """YANGDynClass(base=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..255']}, int_size=8), is_leaf=True, yang_name="length", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='uint8', is_config=False)""",
}
)
self.__length = t
if hasattr(self, "_set"):
self._set()
def _unset_length(self):
self.__length = YANGDynClass(
base=RestrictedClassType(
base_type=int, restriction_dict={"range": ["0..255"]}, int_size=8
),
is_leaf=True,
yang_name="length",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="uint8",
is_config=False,
)
def _get_value(self):
"""
Getter method for value, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/levels/level/link_state_database/lsp/tlvs/tlv/isis_neighbor_attribute/neighbors/neighbor/undefined_subtlvs/undefined_subtlv/state/value (binary)
YANG Description: TLV value.
"""
return self.__value
def _set_value(self, v, load=False):
"""
Setter method for value, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/levels/level/link_state_database/lsp/tlvs/tlv/isis_neighbor_attribute/neighbors/neighbor/undefined_subtlvs/undefined_subtlv/state/value (binary)
If this variable is read-only (config: false) in the
source YANG file, then _set_value is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_value() directly.
YANG Description: TLV value.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=bitarray,
is_leaf=True,
yang_name="value",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="binary",
is_config=False,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """value must be of a type compatible with binary""",
"defined-type": "binary",
"generated-type": """YANGDynClass(base=bitarray, is_leaf=True, yang_name="value", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='binary', is_config=False)""",
}
)
self.__value = t
if hasattr(self, "_set"):
self._set()
def _unset_value(self):
self.__value = YANGDynClass(
base=bitarray,
is_leaf=True,
yang_name="value",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="binary",
is_config=False,
)
type = __builtin__.property(_get_type)
length = __builtin__.property(_get_length)
value = __builtin__.property(_get_value)
_pyangbind_elements = OrderedDict(
[("type", type), ("length", length), ("value", value)]
)
class state(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module openconfig-network-instance-l2 - based on the path /network-instances/network-instance/protocols/protocol/isis/levels/level/link-state-database/lsp/tlvs/tlv/isis-neighbor-attribute/neighbors/neighbor/undefined-subtlvs/undefined-subtlv/state. Each member element of
the container is represented as a class variable - with a specific
YANG type.
YANG Description: State parameters of the undefined sub-TLV.
"""
__slots__ = ("_path_helper", "_extmethods", "__type", "__length", "__value")
_yang_name = "state"
_pybind_generated_by = "container"
def __init__(self, *args, **kwargs):
self._path_helper = False
self._extmethods = False
self.__type = YANGDynClass(
base=RestrictedClassType(
base_type=int, restriction_dict={"range": ["0..255"]}, int_size=8
),
is_leaf=True,
yang_name="type",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="uint8",
is_config=False,
)
self.__length = YANGDynClass(
base=RestrictedClassType(
base_type=int, restriction_dict={"range": ["0..255"]}, int_size=8
),
is_leaf=True,
yang_name="length",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="uint8",
is_config=False,
)
self.__value = YANGDynClass(
base=bitarray,
is_leaf=True,
yang_name="value",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="binary",
is_config=False,
)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path() + [self._yang_name]
else:
return [
"network-instances",
"network-instance",
"protocols",
"protocol",
"isis",
"levels",
"level",
"link-state-database",
"lsp",
"tlvs",
"tlv",
"isis-neighbor-attribute",
"neighbors",
"neighbor",
"undefined-subtlvs",
"undefined-subtlv",
"state",
]
def _get_type(self):
"""
Getter method for type, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/levels/level/link_state_database/lsp/tlvs/tlv/isis_neighbor_attribute/neighbors/neighbor/undefined_subtlvs/undefined_subtlv/state/type (uint8)
YANG Description: TLV Type.
"""
return self.__type
def _set_type(self, v, load=False):
"""
Setter method for type, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/levels/level/link_state_database/lsp/tlvs/tlv/isis_neighbor_attribute/neighbors/neighbor/undefined_subtlvs/undefined_subtlv/state/type (uint8)
If this variable is read-only (config: false) in the
source YANG file, then _set_type is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_type() directly.
YANG Description: TLV Type.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=RestrictedClassType(
base_type=int, restriction_dict={"range": ["0..255"]}, int_size=8
),
is_leaf=True,
yang_name="type",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="uint8",
is_config=False,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """type must be of a type compatible with uint8""",
"defined-type": "uint8",
"generated-type": """YANGDynClass(base=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..255']}, int_size=8), is_leaf=True, yang_name="type", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='uint8', is_config=False)""",
}
)
self.__type = t
if hasattr(self, "_set"):
self._set()
def _unset_type(self):
self.__type = YANGDynClass(
base=RestrictedClassType(
base_type=int, restriction_dict={"range": ["0..255"]}, int_size=8
),
is_leaf=True,
yang_name="type",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="uint8",
is_config=False,
)
def _get_length(self):
"""
Getter method for length, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/levels/level/link_state_database/lsp/tlvs/tlv/isis_neighbor_attribute/neighbors/neighbor/undefined_subtlvs/undefined_subtlv/state/length (uint8)
YANG Description: TLV length.
"""
return self.__length
def _set_length(self, v, load=False):
"""
Setter method for length, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/levels/level/link_state_database/lsp/tlvs/tlv/isis_neighbor_attribute/neighbors/neighbor/undefined_subtlvs/undefined_subtlv/state/length (uint8)
If this variable is read-only (config: false) in the
source YANG file, then _set_length is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_length() directly.
YANG Description: TLV length.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=RestrictedClassType(
base_type=int, restriction_dict={"range": ["0..255"]}, int_size=8
),
is_leaf=True,
yang_name="length",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="uint8",
is_config=False,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """length must be of a type compatible with uint8""",
"defined-type": "uint8",
"generated-type": """YANGDynClass(base=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..255']}, int_size=8), is_leaf=True, yang_name="length", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='uint8', is_config=False)""",
}
)
self.__length = t
if hasattr(self, "_set"):
self._set()
def _unset_length(self):
self.__length = YANGDynClass(
base=RestrictedClassType(
base_type=int, restriction_dict={"range": ["0..255"]}, int_size=8
),
is_leaf=True,
yang_name="length",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="uint8",
is_config=False,
)
def _get_value(self):
"""
Getter method for value, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/levels/level/link_state_database/lsp/tlvs/tlv/isis_neighbor_attribute/neighbors/neighbor/undefined_subtlvs/undefined_subtlv/state/value (binary)
YANG Description: TLV value.
"""
return self.__value
def _set_value(self, v, load=False):
"""
Setter method for value, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/levels/level/link_state_database/lsp/tlvs/tlv/isis_neighbor_attribute/neighbors/neighbor/undefined_subtlvs/undefined_subtlv/state/value (binary)
If this variable is read-only (config: false) in the
source YANG file, then _set_value is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_value() directly.
YANG Description: TLV value.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=bitarray,
is_leaf=True,
yang_name="value",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="binary",
is_config=False,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """value must be of a type compatible with binary""",
"defined-type": "binary",
"generated-type": """YANGDynClass(base=bitarray, is_leaf=True, yang_name="value", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='binary', is_config=False)""",
}
)
self.__value = t
if hasattr(self, "_set"):
self._set()
def _unset_value(self):
self.__value = YANGDynClass(
base=bitarray,
is_leaf=True,
yang_name="value",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="binary",
is_config=False,
)
type = __builtin__.property(_get_type)
length = __builtin__.property(_get_length)
value = __builtin__.property(_get_value)
_pyangbind_elements = OrderedDict(
[("type", type), ("length", length), ("value", value)]
)
| napalm-automation/napalm-yang | napalm_yang/models/openconfig/network_instances/network_instance/protocols/protocol/isis/levels/level/link_state_database/lsp/tlvs/tlv/isis_neighbor_attribute/neighbors/neighbor/undefined_subtlvs/undefined_subtlv/state/__init__.py | Python | apache-2.0 | 26,641 |
# -*- coding: utf-8 -*-
# Copyright 2014, Digital Reasoning
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import datetime
import os
import subprocess
import warnings
from functools import wraps
# for setup.py
try:
from .exceptions import IncompatibleVersionException, InvalidVersionStringException
except Exception:
pass
VERSION = (0, 8, 0, 'dev', 0)
def get_version(version):
"""
Returns a PEP 440-compliant version number from VERSION.
Created by modifying django.utils.version.get_version
"""
# Now build the two parts of the version number:
# major = X.Y[.Z]
# sub = .devN - for development releases
# | {a|b|rc}N - for alpha, beta and rc releases
# | .postN - for post-release releases
assert len(version) == 5
version_parts = version[:3]
# Build the first part of the version
major = '.'.join(str(x) for x in version_parts)
# Just return it if this is a final release version
if version[3] == 'final':
return major
# Add the rest
sub = ''.join(str(x) for x in version[3:5])
if version[3] == 'dev':
# Override the sub part. Add in a timestamp
timestamp = get_git_changeset()
sub = 'dev%s' % (timestamp if timestamp else '')
return '%s.%s' % (major, sub)
if version[3] == 'post':
# We need a dot for post
return '%s.%s' % (major, sub)
elif version[3] in ('a', 'b', 'rc'):
# No dot for these
return '%s%s' % (major, sub)
else:
raise ValueError('Invalid version: %s' % str(version))
# Borrowed directly from django
def get_git_changeset():
"""Returns a numeric identifier of the latest git changeset.
The result is the UTC timestamp of the changeset in YYYYMMDDHHMMSS format.
This value isn't guaranteed to be unique, but collisions are very unlikely,
so it's sufficient for generating the development version numbers.
"""
repo_dir = os.path.dirname(os.path.abspath(__file__))
git_log = subprocess.Popen('git log --pretty=format:%ct --quiet -1 HEAD',
stdout=subprocess.PIPE, stderr=subprocess.PIPE,
shell=True, cwd=repo_dir, universal_newlines=True)
timestamp = git_log.communicate()[0]
try:
timestamp = datetime.datetime.utcfromtimestamp(int(timestamp))
return timestamp.strftime('%Y%m%d%H%M%S')
except ValueError:
return None
__version__ = get_version(VERSION)
def deprecated(func):
"""
This is a decorator which can be used to mark functions
as deprecated. It will result in a warning being emitted
when the function is used.
"""
@wraps(func)
def wrapper(*args, **kwargs):
warnings.warn("Call to deprecated function {}.".format(func.__name__),
category=DeprecationWarning)
return func(*args, **kwargs)
return wrapper
| stackdio/stackdio-python-client | stackdio/client/version.py | Python | apache-2.0 | 3,425 |
##########################################################################
#
#
# Copyright 2010 Dr D Studios Pty Limited (ACN 127 184 954) (Dr. D Studios),
# its affiliates and/or its licensors.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# * Neither the name of Image Engine Design nor the names of any
# other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
def exportSekelton(self):
self.skeleton = skeletonPrimitiveIO.generateCrowdSkeletonFromMayaSkeleton(skeletonPrimitiveIO.autoSearchMayaMainJoint())
skeletonPrimitiveIO.saveSkeletonPrimitive(self.skeleton, path.join(self.commonDir, "skeleton", self.charName + ".skel.cob"))
#TODO: would be nice if this was a proper FromMayaConverter (that would probably take the root joint as an argument)
#
# | lento/cortex | contrib/for_review/SkeletonPrimitive_DrD/py_SkeletonPrimitiveIO/exportSkeletonFromMaya.py | Python | bsd-3-clause | 2,235 |
class Program(object):
pass
| Neppord/py2py | py2py_lib/ast/program.py | Python | mit | 30 |
#!/usr/bin/python
import sys
import os
pin = int(os.environ['UMLFRI_PIN'])
pout = int(os.environ['UMLFRI_POUT'])
if os.name == 'nt':
import msvcrt
pin = msvcrt.open_osfhandle(pin, os.O_RDONLY)
pout = msvcrt.open_osfhandle(pout, os.O_APPEND)
sys.path.insert(0, os.environ['UMLFRI_PATH'])
sys.path.append(os.environ['UMLFRI_LIB'])
from org.umlfri.api.implementation import Server, FileChannel, MIMChannel
from org.umlfri.api.base import Application
fin = os.fdopen(pin, 'rb')
fout = os.fdopen(pout, 'wb')
channel = FileChannel(fin, fout)
if 'UMLFRI_PLUGIN_DEBUG' in os.environ:
channel = MIMChannel(channel)
server = Server(channel)
import plugin
if hasattr(plugin, 'get_main_loop'):
main_loop = plugin.get_main_loop()
else:
main_loop = None
def main(args):
server.start(main_loop)
adapter=Application(server, 'app')
plugin.plugin_main(adapter)
try:
server.main_loop()
except KeyboardInterrupt:
pass
return 0
if __name__ == '__main__':
sys.exit(main(sys.argv))
| umlfri/umlfri2 | addons/python_starter/starter/python_runner.py | Python | gpl-3.0 | 1,044 |
# coding=utf-8
# Copyright 2021 The Deadunits Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python2, python3
"""Implements common architectures in a generic way using tf.keras.Model.
Each generic model inherits from `tf.keras.Model`.
You can use following generic_models for now:
- GenericConvnet: sequential models include Conv2D's + Dense's.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
from deadunits.layers import MaskedLayer
from deadunits.layers import MeanReplacer
from deadunits.layers import TaylorScorer
import gin
from six.moves import range
import tensorflow.compat.v2 as tf
_default_generic_convnet_arch = [['C', 16, 5, {}], ['MP', 2, 2],
['C', 32, 5, {}], ['MP', 2, 2], ['F'],
['D', 256], ['O', 10]]
@gin.configurable
class GenericConvnet(tf.keras.Model):
"""Creates a tf.keras.Model from according to the flags and arch provided.
"""
_allowed_layers = {
'C': 'conv',
'D': 'dense',
'MP': 'maxpool',
'DO': 'dropout',
'O': 'output',
'F': 'flatten',
'GA': 'gap',
}
# Each layer should have the following form in the arch definition.
# 'C': Conv2D layer in the form ['C', n_units, filter_shape, **kwargs]
# 'MP': MaxPool2D layer in the form ['MP', pool_size, strides, **kwargs]
# 'D': Dense layer in the form ['D', n_units]
# 'DO': Dropout layer in the form ['DO', rate]
# 'F': Flatten layer in the form ['F']
# 'GA': Global average pooling 2D in the form ['GA']
# 'O': Dense layer with no activation in the form ['O', n_units]
def __init__(self,
model_arch=None,
name='GenericCifarConvnet',
f_activation=tf.keras.activations.relu,
use_batchnorm=False,
bn_is_affine=False,
use_dropout=False,
dropout_rate=0.5,
use_mean_replacer=False,
use_taylor_scorer=False,
use_masked_layers=False):
"""Initializes GenericConvnet instance with correct layers.
Args:
model_arch: list, consists of lists defining the cascaded network. refer
to `GenericConvnet._allowed_layers`.
name: str, name of the model.
f_activation: function, from tf.keras.activations
use_batchnorm: bool, if True BatchNormalization layer is used.
bn_is_affine: bool, if True BatchNormalization performs affine
transformation after the normalization.
use_dropout: bool, if True Dropout layer is used.
dropout_rate: float, dropout fraction for the Dropout layer.
use_mean_replacer: bool, if True MeanReplacer layer is used after each
layer.
use_taylor_scorer: bool, if True TaylorScorer layer is used after each
layer.
use_masked_layers: bool, if True each layer is wrapped with MaskedLayer.
Raises:
AssertionError: when the provided `model_arch` is not valid.
"""
if model_arch is None:
model_arch = _default_generic_convnet_arch
self._check_arch(model_arch)
super(GenericConvnet, self).__init__(name=name)
# Initial configration is saved to be able to clone the model.
self.init_config = dict([('model_arch', model_arch), ('name', name),
('f_activation', f_activation),
('use_batchnorm', use_batchnorm),
('bn_is_affine', bn_is_affine),
('use_dropout', use_dropout),
('dropout_rate', dropout_rate),
('use_mean_replacer', use_mean_replacer),
('use_taylor_scorer', use_taylor_scorer),
('use_masked_layers', use_masked_layers)])
# Wrap the layers if asked.
wrapper = lambda l: MaskedLayer(l) if use_masked_layers else l
# Forward chain has the attribute names in order and used to orchestrate
# the forward pass.
forward_chain = []
for t in model_arch:
# The order is:
# Layer + bn + Activation + taylorScorer + meanReplacer + Dropout
l_type = t[0]
l_name = self._get_layer_name(l_type)
forward_chain.append(l_name)
# If F(flatten) or O(output), we don't have extra layers(dropout,bn,etc..)
if l_type == 'F':
setattr(self, l_name, tf.keras.layers.Flatten())
elif l_type == 'GA':
setattr(self, l_name, tf.keras.layers.GlobalAvgPool2D())
elif l_type == 'MP':
setattr(self, l_name, tf.keras.layers.MaxPool2D(t[1], t[2]))
elif l_type == 'O':
setattr(self, l_name, tf.keras.layers.Dense(t[1], activation=None))
elif l_type == 'DO':
setattr(self, l_name, tf.keras.layers.Dropout(t[1]))
else:
if l_type == 'C':
setattr(
self, l_name,
wrapper(
tf.keras.layers.Conv2D(t[1], t[2], activation=None, **t[3])))
elif l_type == 'D':
setattr(self, l_name,
wrapper(tf.keras.layers.Dense(t[1], activation=None)))
if use_batchnorm:
c_name = l_name + '_bn'
setattr(
self, c_name,
tf.keras.layers.BatchNormalization(
center=bn_is_affine, scale=bn_is_affine))
forward_chain.append(c_name)
# Add activation
c_name = l_name + '_a'
setattr(self, c_name, f_activation)
forward_chain.append(c_name)
if use_taylor_scorer:
c_name = l_name + '_ts'
setattr(self, c_name, TaylorScorer())
forward_chain.append(c_name)
if use_mean_replacer:
c_name = l_name + '_mr'
setattr(self, c_name, MeanReplacer())
forward_chain.append(c_name)
if use_dropout:
c_name = l_name + '_dr'
setattr(self, c_name, tf.keras.layers.Dropout(dropout_rate))
forward_chain.append(c_name)
self.forward_chain = forward_chain
def call(self,
inputs,
training=False,
compute_mean_replacement_saliency=False,
compute_removal_saliency=False,
is_abs=True,
aggregate_values=False,
is_replacing=False,
return_nodes=None):
# We need to save the first_input for initiliazing our clone (see .clone()).
if not hasattr(self, 'first_input'):
self.first_input = inputs
x = inputs
return_dict = {}
for l_name in self.forward_chain:
node = getattr(self, l_name)
if isinstance(node, MeanReplacer):
x = node(x, is_replacing=is_replacing)
elif isinstance(node, TaylorScorer):
x = node(
x,
compute_mean_replacement_saliency=compute_mean_replacement_saliency,
compute_removal_saliency=compute_removal_saliency,
is_abs=is_abs,
aggregate_values=aggregate_values)
elif isinstance(
node, (tf.keras.layers.BatchNormalization, tf.keras.layers.Dropout)):
x = node(x, training=training)
else:
x = node(x)
if return_nodes and l_name in return_nodes:
return_dict[l_name] = x
if return_nodes:
return x, return_dict
else:
return x
def propagate_bias(self, l_name, input_tensor):
"""Propagates the given input to the bias of the next unit.
We expect `input_tensor` having constant values at `input_tensor[...,i]` for
every unit `i`. However this is not checked and if it is not constant,
mean of the all values are used to update the bias.
If input_tensor casted into same type as the parameters of the `l_name`.
Args:
l_name: str, name of a MaskedLayer such that `hasattr(self, l_name)` is
True.
input_tensor: Tensor, same shape as the output shape of the l_name. It
should also be a float type. i.e. tf.float16/32/64.
Raises:
ValueError: when the l_name is not in the `self.forward_chain` or if
there is no parameterized layer exists after `l_name`.
AssertionError: when the input_tensor is not float type.
"""
assert (input_tensor.dtype in [tf.float16, tf.float32, tf.float64])
current_i = self.forward_chain.index(l_name) + 1
if current_i == len(self.forward_chain):
raise ValueError('Output layer cannot propagate bias')
next_layer = getattr(self, self.forward_chain[current_i])
forward_tensor = input_tensor
# Including `tf.keras.layers.Dense`, too; since the output layer(Dense)
# is not wrapped with `MaskedLayer`.
parametered_layers = (MaskedLayer, tf.keras.layers.Dense,
tf.keras.layers.Conv2D)
while not isinstance(next_layer, parametered_layers):
forward_tensor = next_layer(forward_tensor)
current_i += 1
if current_i == len(self.forward_chain):
raise ValueError('No appropriate layer exists after'
'%s to propagate bias.' % l_name)
next_layer = getattr(self, self.forward_chain[current_i])
# So now we have propageted bias + currrent_bias. This should be our new
# bias.
forward_tensor = next_layer(forward_tensor)
# During Mean Replacement, forward_tensor[...,i] should be a constant
# tensor, but it is not verified.
bias2add = tf.reduce_mean(
forward_tensor, axis=list(range(forward_tensor.shape.ndims - 1)))
if isinstance(next_layer, MaskedLayer):
next_layer.layer.weights[1].assign(bias2add)
else:
next_layer.weights[1].assign(bias2add)
def get_allowed_layer_keys(self):
return list(self._allowed_layers.keys())
def get_layer_keys(self, layer_type, name_filter=lambda _: True):
"""Returns a list of layer_names matching the type and passing the filter.
`self.forward_chain` is filtered by type and layer_name.
Args:
layer_type: layer class to be matched.
name_filter: function, returning bool given a layer_name.
"""
res = []
for l_name in self.forward_chain:
if name_filter(l_name) and isinstance(getattr(self, l_name), layer_type):
res.append(l_name)
return res
def _get_layer_name(self, l_type):
"""Returns names for different layers by incrementing the counter.
Args:
l_type: str from self._allowed_layers.keys()
Returns:
attr_name: str unique attr name for the layer
"""
if not hasattr(self, 'layer_name_counter'):
self.layer_name_counter = {k: 1 for k in self._allowed_layers.keys()}
i = self.layer_name_counter[l_type]
self.layer_name_counter[l_type] += 1
return '%s_%d' % (self._allowed_layers[l_type], i)
def clone(self):
new_model = GenericConvnet(**self.init_config)
# Initilize the new_model params.
new_model(self.first_input)
new_model.set_weights(self.get_weights())
return new_model
def _check_arch(self, arch):
"""Checks the arch provided has the right form.
For some reason tensorflow wraps every list/dict to make it checkpointable.
For that reason we are using the super classes from collections module.
https://github.com/tensorflow/tensorflow/blob/master/tensorflow/python/training/checkpointable/data_structures.py
Args:
arch: architecture list.
Raises:
AssertionError: If the architecture list is not in the right format.
"""
assert arch is not None
assert len(arch) >= 1
for t in arch:
assert isinstance(t, collections.MutableSequence)
assert len(t) >= 1
assert t[0] in self.get_allowed_layer_keys()
if t[0] == 'C':
assert len(t) == 4
assert isinstance(t[1], int)
# t[2] can be an int or list of two integers.
assert (isinstance(t[2], int) or
(isinstance(t[2], collections.MutableSequence) and
len(t[2]) == 2) and all(isinstance(x, int) for x in t[2]))
assert isinstance(t[3], collections.MutableMapping)
if t[0] == 'MP':
assert len(t) == 3
assert (isinstance(t[1], int) or
(isinstance(t[1], collections.MutableSequence) and
len(t[1]) == 2) and all(isinstance(x, int) for x in t[1]))
assert (isinstance(t[2], int) or
(isinstance(t[2], collections.MutableSequence) and
len(t[2]) == 2) and all(isinstance(x, int) for x in t[2]))
if t[0] in ('F', 'GA'):
assert len(t) == 1
if t[0] in ('D', 'O'):
assert len(t) == 2
assert isinstance(t[1], int)
if t[0] == 'DO':
assert len(t) == 2
assert isinstance(t[1], float) and 0 < t[1] and t[1] < 1
| google-research/deadunits | deadunits/generic_convnet.py | Python | apache-2.0 | 13,192 |
#!/usr/bin/env python2
import sys
import os.path
import argparse
import json
import shutil
from urllib2 import build_opener, HTTPCookieProcessor, Request, HTTPHandler
from urllib import urlencode
from cookielib import CookieJar
from tempfile import mkstemp
from contextlib import closing
from subprocess import Popen, PIPE
from lxml import html
URL = 'http://www.accuradio.com'
cj = CookieJar()
handler = HTTPHandler(debuglevel=0)
opener = build_opener(handler, HTTPCookieProcessor(cj))
def fetch_channels(genre):
resp = opener.open('{}/finder/2013/channels/{}/?s=2013'.format(URL, genre))
content = resp.read()
root = html.fromstring(content)
return {r.attrib['data-name']: r.attrib['data-id']
for r in root.xpath('//a[@data-id and @data-name]')}
def fetch_channel_meta(channel, cid):
token = None
for c in cj:
if c.name == 'csrftoken':
token = c.value
assert token
data = {
'name': channel,
'o': cid,
'getando': '1',
'getts': '1',
'csrfmiddlewaretoken': token
}
req = Request('{}/c/m/json/channel/'.format(URL), urlencode(data))
req.add_header('X-CSRFToken', token)
# req.add_header('X-Requested-With', 'XMLHttpRequest')
resp = opener.open(req)
return json.load(resp)
def fetch_playlist(cid, ando, schedule):
url = '{}/playlist/json/{}/?ando={}&intro=true&spotschedule={}&fa=null'.format(
URL, cid, ando, schedule)
resp = opener.open(url)
return json.load(resp)
def set_tags(fname, info):
opts = []
ai = info['album']
if 'title' in ai:
opts.extend(('-A', ai['title']))
if 'year' in ai:
opts.extend(('-y', ai['year']))
ai = info['artist']
if 'artistdisplay' in ai:
opts.extend(('-R', ai['artistdisplay']))
opts.extend(('-a', info['track_artist']))
opts.extend(('-s', info['title']))
opts = [r.encode('utf-8') for r in opts]
Popen(['mp4tags'] + opts + [fname]).poll()
def fetch(channel, cid):
meta = fetch_channel_meta(channel, cid)
ando = meta['ando']
schedule = meta['spotschedule']
playlist = fetch_playlist(cid, ando, schedule)
for song in playlist:
if 'primary' not in song:
continue
fname = os.path.basename(song['fn']) + '.m4a'
if os.path.exists(fname):
continue
if fname.startswith('index'):
print song
if fname.startswith('protocol'):
print song
url = song['primary'] + song['fn'] + '.m4a'
try:
resp = opener.open(url)
except:
continue
print url
fd, tmpfname = mkstemp()
with closing(os.fdopen(fd, 'w')) as tmpfile:
shutil.copyfileobj(resp, tmpfile)
shutil.move(tmpfname, fname)
set_tags(fname, song)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='fetch music from accuradio.com')
parser.add_argument('genre', help='something like jazz or adultalternative')
parser.add_argument('channel', help='Groove Jazz or Latin Jazz', nargs='?', default=None)
args = parser.parse_args()
channels = fetch_channels(args.genre)
if args.channel and args.channel in channels:
fetch(args.channel, channels[args.channel])
else:
print '\n'.join(sorted(channels))
| baverman/accuradio | accuradio.py | Python | mit | 3,370 |
from nose.tools import make_decorator
from utils import clear_users as clear_users_func
def clear_users(func):
'''
Calls cheddar's delete all users method no matter the test result
'''
def new(*args, **kwargs):
try:
func(*args, **kwargs)
except Exception:
clear_users_func()
raise
clear_users_func()
new = make_decorator(func)(new)
return new
| smartfile/sharpy | tests/testing_tools/decorators.py | Python | bsd-3-clause | 433 |
# -*- coding: utf-8 -*-
# ------------------------------------------------------------
# pelisalacarta - XBMC Plugin
# Conector para turbobit
# http://blog.tvalacarta.info/plugin-xbmc/pelisalacarta/
# ------------------------------------------------------------
import re
from core import logger
def get_video_url(page_url, premium=False, user="", password="", video_password=""):
logger.info("(page_url='%s')" % page_url)
video_urls = []
return video_urls
# Encuentra vídeos del servidor en el texto pasado
def find_videos(data):
encontrados = set()
devuelve = []
# http://turbobit.net/scz8lxrrgllr.html
# http://www.turbobit.net/uzo3gcyfmt4b.html
# http://turbobit.net/eaz9ha3gop65/deadliest.catch.s08e09-killers.mp4.html
patronvideos = '(turbobit.net/[0-9a-z]+)'
logger.info("#" + patronvideos + "#")
matches = re.compile(patronvideos, re.DOTALL).findall(data)
for match in matches:
titulo = "[turbobit]"
url = "http://" + match + ".html"
if url not in encontrados:
logger.info(" url=" + url)
devuelve.append([titulo, url, 'turbobit'])
encontrados.add(url)
else:
logger.info(" url duplicada=" + url)
return devuelve
| neno1978/pelisalacarta | python/main-classic/servers/turbobit.py | Python | gpl-3.0 | 1,263 |
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from . import accounting_assert_test
| Aravinthu/odoo | addons/account_test/models/__init__.py | Python | agpl-3.0 | 137 |
"""
beam_block.core.beam_block_json
===============================
Calculates partial beam block(PBB) and cumulative beam block(CBB)
by using wradlib's beamblock and geotiff functions. PBB and CBB
are then used to created flags when a certain beam block fraction
is passed.
This code is adapted from code written by Kai Muehlbauer:
https://github.com/wradlib/wradlib/blob/master/notebooks/beamblockage/
wradlib_beamblock.ipynb
Note: The json format is based on X-SAPR variable format. Other radar formats
may be added in the future.
.. autosummary::
:toctreeL generated/
:template: dev_template.rst
json_beam_block
"""
import json
import numpy as np
import wradlib as wrl
def json_beam_block(json_data, tif_file,
beam_width=1.0):
"""
Beam Block Json Calculation
Parameters
----------
json_data : Json
Json object used.
tif_name : string
Name of geotiff file to use for the
calculation
Other Parameters
----------------
beam_width : float
Radar's beam width for calculation.
Default value is 1.0.
Returns
-------
pbb_all : array
Array of partial beam block fractions for each
gate in all sweeps.
cbb_all : array
Array of cumulative beam block fractions for
each gate in all sweeps.
References
----------
Bech, J., B. Codina, J. Lorente, and D. Bebbington,
2003: The sensitivity of single polarization weather
radar beam blockage correction to variability in the
vertical refractivity gradient. J. Atmos. Oceanic
Technol., 20, 845–855
Heistermann, M., Jacobi, S., and Pfaff, T., 2013:
Technical Note: An open source library for processing
weather radar data (wradlib), Hydrol. Earth Syst.
Sci., 17, 863-871, doi:10.5194/hess-17-863-2013
Helmus, J.J. & Collis, S.M., (2016). The Python ARM
Radar Toolkit (Py-ART), a Library for Working with
Weather Radar Data in the Python Programming Language.
Journal of Open Research Software. 4(1), p.e25.
DOI: http://doi.org/10.5334/jors.119
"""
variables = json_data['variables']
# Opening the tif file and getting the values ready to be
# converted into polar values.
rasterfile = tif_file
data_raster = wrl.io.open_raster(rasterfile)
rastervalues, rastercoords, proj = wrl.georef.extract_raster_dataset(
data_raster, nodata=None)
sitecoords = (np.float(variables['longitude']['data']),
np.float(variables['latitude']['data']),
np.float(variables['altitude']['data']))
pbb_arrays = []
cbb_arrays = []
_range = np.array(json.loads(variables['range']['data']))
# Cycling through all sweeps in the radar object.
beamradius = wrl.util.half_power_radius(_range, beam_width)
for i in range(
len(np.array(
json.loads(variables['sweep_start_ray_index']['data'])))):
index_start = np.array(
json.loads(variables['sweep_start_ray_index']['data']))[i]
index_end = np.array(
json.loads(variables['sweep_end_ray_index']['data']))[i] + 1
elevs = np.array(
json.loads(
variables['elevation']['data']))[index_start:index_end]
azimuths = np.array(
json.loads(
variables['azimuth']['data']))[index_start:index_end]
rg, azg = np.meshgrid(_range, azimuths)
rg, eleg = np.meshgrid(_range, elevs)
lon, lat, alt = wrl.georef.polar2lonlatalt_n(
rg, azg, eleg, sitecoords)
x_pol, y_pol = wrl.georef.reproject(
lon, lat, projection_target=proj)
polcoords = np.dstack((x_pol, y_pol))
rlimits = (x_pol.min(), y_pol.min(), x_pol.max(), y_pol.max())
ind = wrl.util.find_bbox_indices(rastercoords, rlimits)
rastercoords = rastercoords[0:ind[3], ind[0]:ind[2], ...]
rastervalues = rastervalues[0:ind[3], ind[0]:ind[2]]
# Map rastervalues to polar grid points.
polarvalues = wrl.ipol.cart2irregular_spline(
rastercoords, rastervalues, polcoords)
# Calculate partial beam blockage using wradlib.
pbb = wrl.qual.beam_block_frac(polarvalues, alt, beamradius)
pbb = np.ma.masked_invalid(pbb)
pbb_arrays.append(pbb)
# Calculate cumulative beam blockage using wradlib.
cbb = wrl.qual.cum_beam_block_frac(pbb)
cbb_arrays.append(cbb)
# Stacks all sweeps blockage data.
pbb_all = np.ma.concatenate(pbb_arrays)
cbb_all = np.ma.concatenate(cbb_arrays)
return pbb_all, cbb_all
| zssherman/beam_block | beam_block/core/beam_block_json.py | Python | bsd-2-clause | 4,656 |
from __future__ import print_function
import sys
import os
import os.path
import fnmatch
import json
import shlex
try:
import fcntl
def flock(fp):
fcntl.flock(fp.fileno(), fcntl.LOCK_EX)
def funlock(fp):
fcntl.flock(fp.fileno(), fcntl.LOCK_UN)
except:
import msvcrt
def flock(fp):
fp.seek(0)
msvcrt.locking(fp.fileno(), msvcrt.LK_LOCK, 1)
def funlock(fp):
fp.seek(0)
msvcrt.locking(fp.fileno(), msvcrt.LK_UNLCK, 1)
if len(sys.argv) < 5:
print("usage: clangdb.py ROOT_DIR BUILD_DIR COMPILER COMPILER_ARGS...",
file=sys.stderr)
exit(1)
root_dir = os.path.abspath(sys.argv[1])
build_dir = os.path.abspath(sys.argv[2])
compiler = sys.argv[3]
compiler_args = sys.argv[4:]
source_file = None
for arg in compiler_args:
for pattern in ['*.c', '*.cpp']:
if fnmatch.fnmatch(arg, pattern):
source_file = os.path.join(root_dir, arg)
break
if source_file:
break
if source_file:
db_path = os.path.join(build_dir, "compile_commands.json")
cmd = {
"command": "%s %s" % (compiler, ' '.join(compiler_args)),
"directory": root_dir,
"file": source_file,
}
try:
with open(db_path, 'a+') as fp:
try:
flock(fp)
try:
fp.seek(0)
db = json.loads(fp.read())
db[:]
except:
db = []
for index, item in enumerate(db):
if item["file"] == source_file:
db[index] = cmd
break
else:
db.append(cmd)
fp.seek(0)
fp.truncate()
fp.write(json.dumps(db, indent=2))
finally:
funlock(fp)
except:
e = sys.exc_info()[1]
print("error: unable to write clangdb to %s" % db_path, file=sys.stderr)
print(str(e), file=sys.stderr)
cmd = shlex.split(compiler) + compiler_args
os.execvp(cmd[0], cmd)
| roc-project/roc | scripts/wrappers/clangdb.py | Python | mpl-2.0 | 2,108 |
import networkx as nx
import copy
from nose.tools import assert_equals
from regraph import (print_graph,
NXGraph)
# from regraph.utils import assert_nx_graph_eq
from regraph.category_utils import (pullback,
pushout,
pullback_complement,
get_unique_map_to_pullback_complement)
def assert_edges_undir(edges1, edges2):
edgeset1 = set(edges1)
edgeset2 = set(edges2)
for edge in edgeset1:
if edge not in edgeset2 and (edge[1], edge[0]) not in edgeset2:
assert False
class TestCategoryUtils:
def __init__(self):
D = NXGraph()
D.add_node('square')
D.add_node('circle')
D.add_node('dark_square')
D.add_node('dark_circle')
D.add_edge('square', 'circle')
D.add_edge('circle', 'dark_circle')
D.add_edge('circle', 'dark_square')
D.add_edge('circle', 'circle')
self.D = D
A = NXGraph()
A.add_node(2)
A.add_node(3)
A.add_edge(2, 3)
self.A = A
B = NXGraph()
B.add_node(1)
B.add_node(2)
B.add_node(3)
B.add_edge(1, 2)
B.add_edge(2, 3)
self.B = B
C = NXGraph()
C.add_node(2)
C.add_node(3)
C.add_node('dark_square')
C.add_edge(2, 3)
C.add_edge(2, 'dark_square')
C.add_edge(2, 2)
self.C = C
self.homAB = {
2: 2,
3: 3
}
self.homAC = {
2: 2,
3: 3
}
self.homBD = {
1: 'square',
2: 'circle',
3: 'dark_circle'
}
self.homCD = {
2: 'circle',
3: 'dark_circle',
'dark_square': 'dark_square'
}
def test_pullback(self):
A, homAB, homAC = pullback(
self.B, self.C, self.D, self.homBD, self.homCD,
)
assert_equals(type(A), NXGraph)
assert_equals(set(A.nodes()), set(self.A.nodes()))
assert_edges_undir(A.edges(), self.A.edges())
assert_equals(homAB, self.homAB)
assert_equals(homAC, self.homAC)
def test_pullback_complement(self):
C, homAC, homCD = pullback_complement(
self.A, self.B, self.D, self.homAB, self.homBD
)
assert_equals(type(C), NXGraph)
test_graph = self.C.get_relabeled_graph(
{2: "circle", 3: "dark_circle", "dark_square": "dark_square"}
)
# assert_nx_graph_eq(test_graph, C)
assert(test_graph == C)
assert(id(self.D) != id(C))
def test_pullpack_complement_inplace(self):
D_copy = copy.deepcopy(self.D)
C, homAC, homCD = pullback_complement(
self.A, self.B, D_copy, self.homAB, self.homBD, inplace=True
)
assert_equals(type(C), NXGraph)
test_graph = self.C.get_relabeled_graph(
{2: "circle", 3: "dark_circle", "dark_square": "dark_square"}
)
assert(test_graph == C)
assert(id(D_copy) == id(C))
def test_pushout(self):
D, homBD, homCD = pushout(
self.A, self.B, self.C, self.homAB, self.homAC
)
assert_equals(type(D), NXGraph)
assert_equals(len(D.nodes()),
len(self.D.nodes()))
assert_equals(len(D.edges()),
len(self.D.edges()))
assert(id(self.B) != id(D))
def test_pushout_inplace(self):
B_copy = copy.deepcopy(self.B)
D, homBD, homCD = pushout(
self.A, B_copy, self.C, self.homAB, self.homAC, inplace=True
)
assert_equals(type(D), NXGraph)
assert_equals(len(D.nodes()),
len(self.D.nodes()))
assert_equals(len(D.edges()),
len(self.D.edges()))
assert(id(B_copy) == id(D))
def test_pushout_symmetry_directed(self):
A = NXGraph()
A.add_nodes_from(["a", "b"])
A.add_edges_from([("a", "b")])
B = NXGraph()
B.add_nodes_from([1, 2, 3])
B.add_edges_from([(2, 3), (3, 2), (1, 3)])
C = NXGraph()
C.add_nodes_from(["x", "y"])
C.add_edges_from([("x", "x"), ("x", "y")])
homAB = {"a": 2, "b": 3}
homAC = {"a": "x", "b": "x"}
D, homBD, homCD = pushout(
A, B, C, homAB, homAC
)
D_inv, homCD_inv, homBD_inv = pushout(
A, C, B, homAC, homAB
)
assert_equals(len(D.nodes()), len(D_inv.nodes()))
assert_equals(len(D.edges()), len(D_inv.edges()))
def test_get_unique_map_to_pullback_complement(self):
# a_b = {
# "circle1": "circle",
# "circle2": "circle"
# }
# b_c = {
# "circle": "circle",
# "triangle": "triangle",
# }
a_p = {
"circle1": "c1",
"circle2": "c2"
}
p_c = {
"c1": "circle",
"c2": "circle",
"square": "square"
}
a_prime_a = {
"circle1": "circle1",
"circle2": "circle2"
}
a_prime_z = {
"circle1": "circle1",
"circle2": "circle2"
}
z_c = {
"circle1": "circle",
"circle2": "circle",
"square": "square"
}
# z_p = get_unique_map_to_pullback_complement(
# a_b, b_c, a_p, p_c, a_z, z_c)
z_p = get_unique_map_to_pullback_complement(
a_p, p_c,
a_prime_a, a_prime_z,
z_c)
assert(z_p == {'circle1': 'c1', 'circle2': 'c2', 'square': 'square'})
| Kappa-Dev/ReGraph | tests/test_category_utils.py | Python | mit | 5,768 |
# Copyright 2019 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This application demonstrates how to do batch operations from a csv file
# using Cloud Spanner.
#
# For more information, see the README.md.
import argparse
import csv
from google.cloud import spanner
def is_bool_null(file):
# This function converts the boolean values in the dataset from strings to
# boolean data types. It also converts the string Null to a None data type
# indicating an empty cell.
data = list(csv.reader(file))
# Reads each line in the csv file.
for line in range(len(data)):
for cell in range(len(data[line])):
# Changes the string to boolean.
if data[line][cell] == 'true':
data[line][cell] = eval('True')
# Changes blank string to Python readable None type.
if data[line][cell] == '':
data[line][cell] = None
return (data)
def divide_chunks(lst, n):
# This function divides the csv file into chunks so that the mutation will
# commit every 500 rows.
for i in range(0, len(lst), n):
yield lst[i:i + n]
def insert_data(database, filepath, table_name, column_names):
# This function iterates over the list of files belonging to the dataset
# and writes each line into Cloud Spanner using the batch mutation
# function.
with open(filepath) as file:
data = is_bool_null(file)
data = tuple(data)
l_group = list(divide_chunks(data, 500))
# Inserts each chunk of data into database
for current_inserts in (l_group):
if current_inserts is not None:
with database.batch() as batch:
batch.insert(
table=table_name, columns=column_names, values=current_inserts)
def main(instance_id, database_id):
# Instantiates a Cloud Spanner client.
spanner_client = spanner.Client()
instance = spanner_client.instance(instance_id)
# Create the database.
ddl_statements = open('schema.ddl', 'r').read().split(';')
database = instance.database(database_id, ddl_statements=ddl_statements)
create_op = database.create()
create_op.result() # Wait for operation to complete.
# File paths.
comments_file = 'hnewscomments.csv'
stories_file = 'hnewsstories.csv'
# Sets the Column names.
s_columnnames = (
'id',
'by',
'author',
'dead',
'deleted',
'descendants',
'score',
'text',
'time',
'time_ts',
'title',
'url',
)
c_columnnames = (
'id',
'by',
'author',
'dead',
'deleted',
'parent',
'ranking',
'text',
'time',
'time_ts',
)
# Insert data.
insert_data(database, stories_file, 'stories', s_columnnames)
insert_data(database, comments_file, 'comments', c_columnnames)
if __name__ == '__main__':
parser = argparse.ArgumentParser(
formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument('instance_id', help='Your Cloud Spanner instance ID.')
parser.add_argument('database_id', help='Your Cloud Spanner database ID.')
args = parser.parse_args()
main(args.instance_id, args.database_id)
| cloudspannerecosystem/sampledb | batch_import.py | Python | apache-2.0 | 3,617 |
# -*- coding: utf-8 -*-
import io
import os
import pytest
import six
from anymarkup_core import *
from test import *
class TestSerialize(object):
"""Note: testing serialization is a bit tricky, since serializing dicts can result
in different order of values in serialized string in different runs.
That means that we can't just test whether the serialized string equals to expected
string. To solve this, we rather parse the serialized string back and make sure
that it equals the original structure.
"""
fixtures = os.path.join(os.path.dirname(__file__), 'fixtures')
def _read_decode(self, file):
if isinstance(file, six.string_types):
file = open(file, 'rb')
else:
file.seek(0)
return file.read().decode('utf-8')
@pytest.mark.parametrize(('struct', 'format'), [
(example_as_dict, 'ini'),
(example_as_dict, 'json'),
(example_as_dict, 'json5'),
(toml_example_as_dict, 'toml'),
(example_as_ordered_dict, 'xml'),
(example_as_dict, 'yaml'),
(example_as_ordered_dict, 'yaml'),
])
def test_serialize_basic(self, struct, format):
serialized = serialize(struct, format)
parsed_back = parse(serialized, format)
assert parsed_back == struct
assert type(parsed_back) == type(struct)
def test_serialize_works_with_wb_opened_file(self, tmpdir):
f = os.path.join(str(tmpdir), 'foo.xml')
fhandle = open(f, 'wb+')
serialize(example_as_ordered_dict, 'xml', fhandle)
assert self._read_decode(fhandle) == example_xml
def test_serialize_raises_with_unicode_opened_file(self, tmpdir):
# on Python 2, this can only be simulated with io.open
f = os.path.join(str(tmpdir), 'foo.json')
fhandle = io.open(f, 'w+', encoding='utf-8')
with pytest.raises(AnyMarkupError):
serialize(example_as_dict, 'json', fhandle)
@pytest.mark.parametrize(('struct', 'fmt', 'fname'), [
(example_as_dict, None, 'example.ini'),
(example_as_dict, None, 'example.json'),
(example_as_dict, 'json5', 'example.json5'),
(toml_example_as_dict, 'toml', 'example.toml'),
(example_as_ordered_dict, None, 'example.xml'),
(example_as_dict, None, 'example.yaml'),
(example_as_ordered_dict, None, 'example_ordered.yaml'),
])
def test_serialize_file_basic(self, struct, fmt, fname, tmpdir):
f = os.path.join(str(tmpdir), fname)
serialize_file(struct, f)
parsed_back = parse(self._read_decode(f), fmt)
assert parsed_back == struct
assert type(parsed_back) == type(struct)
def test_serialize_file_format_overrides_extension(self, tmpdir):
f = os.path.join(str(tmpdir), 'foo.ini')
serialize_file(example_as_dict, f, 'json')
assert parse(self._read_decode(f)) == example_as_dict
def test_parse_and_serialize_yaml_multiline_string(self):
# https://github.com/bkabrda/anymarkup-core/issues/1
inp = b'foo: |-\n line1\n line2\n line3\n'
assert serialize(parse(inp), 'yaml') == inp
| bkabrda/anymarkup-core | test/test_serialize.py | Python | bsd-3-clause | 3,156 |
from __future__ import absolute_import
import unittest
from dynamodb_mapper.model import DynamoDBModel
from dynamodb_mapper.migration import Migration, VersionError
# test case: field rename rename
class UserMigration(Migration):
def check_1(self, raw_data):
field_count = 0
field_count += u"id" in raw_data and isinstance(raw_data[u"id"], unicode)
field_count += u"energy" in raw_data and isinstance(raw_data[u"energy"], int)
field_count += u"mail" in raw_data and isinstance(raw_data[u"mail"], unicode)
return field_count == len(raw_data)
#No migrator for version 1, of course !
def check_2(self, raw_data):
# Stub. This is to check checker sorting only
return False
def migrate_to_2(self, raw_data):
# Stub. This is to check migrator sorting only
return raw_data
def check_11(self, raw_data):
field_count = 0
field_count += u"id" in raw_data and isinstance(raw_data[u"id"], unicode)
field_count += u"energy" in raw_data and isinstance(raw_data[u"energy"], int)
field_count += u"email" in raw_data and isinstance(raw_data[u"email"], unicode)
return field_count == len(raw_data)
def migrate_to_11(self, raw_data):
raw_data[u"email"] = raw_data[u"mail"]
del raw_data[u"mail"]
return raw_data
class User(DynamoDBModel):
__table__ = "user"
__hash_key__ = "id"
__migrator__ = UserMigration
__schema__ = {
"id": unicode,
"energy": int,
"email": unicode
}
class TestMigration(unittest.TestCase):
def test_init(self):
# check migrator list + natural order sort
m = UserMigration(User)
self.assertEquals(m._detectors, ['check_11', 'check_2', 'check_1'])
self.assertEquals(m._migrators, ['migrate_to_2', 'migrate_to_11'])
def test_version_detection_error(self):#TODO test exception
raw_data_version_error_type = {
u"id": u"Jackson",
u"energy": 6742348,
u"mail": "[email protected]",
}
raw_data_version_error_field = {
u"id": u"Jackson",
u"energy": 6742348,
u"e-mail": u"[email protected]",
}
m = UserMigration(User)
self.assertRaises(
VersionError,
m._detect_version,
raw_data_version_error_type,
)
def test_version_detection(self):#TODO test exception
raw_data_version_1_regular = {
u"id": u"Jackson",
u"energy": 6742348,
u"mail": u"[email protected]",
}
# Version 1 with no mail is detected as 11 as the migration is on mail field
raw_data_version_1_no_mail = {
u"id": u"Jackson",
u"energy": 6742348,
}
m = UserMigration(User)
self.assertEquals(m._detect_version(raw_data_version_1_regular), 1)
self.assertEquals(m._detect_version(raw_data_version_1_no_mail), 11)
def test_migration(self):
raw_data_version_1 = {
u"id": u"Jackson",
u"energy": 6742348,
u"mail": u"[email protected]",
}
raw_data_version_11 = {
u"id": u"Jackson",
u"energy": 6742348,
u"email": u"[email protected]",
}
m = UserMigration(User)
self.assertEquals(m._do_migration(1, raw_data_version_1), raw_data_version_11)
self.assertEquals(m._detect_version(raw_data_version_11), 11)
def test_auto_migration(self):
raw_data_version_1 = {
u"id": u"Jackson",
u"energy": 6742348,
u"mail": u"[email protected]",
}
raw_data_version_11 = {
u"id": u"Jackson",
u"energy": 6742348,
u"email": u"[email protected]",
}
m = UserMigration(User)
self.assertEquals(m(raw_data_version_1), raw_data_version_11)
# more a functional test than a unit test...
def test_real_model_migration(self):
raw_data_version_1 = {
u"id": u"Jackson",
u"energy": 6742348,
u"mail": u"[email protected]",
}
raw_data_version_11 = {
u"id": u"Jackson",
u"energy": 6742348,
u"email": u"[email protected]",
}
user = User._from_db_dict(raw_data_version_1)
# Raw_data still original => needed for ROC
self.assertEquals(user._raw_data, raw_data_version_1)
# Data is at latest revision => consistency
self.assertEquals(user._to_db_dict(), raw_data_version_11)
# check the migrator engine is persisted (cache)
assert isinstance(User.__migrator__, UserMigration)
| sxnol/dynamodb2-mapper | dynamodb2_mapper/tests/test_migrations.py | Python | lgpl-3.0 | 4,804 |
# ./_iso639a2.py
# -*- coding: utf-8 -*-
# PyXB bindings for NM:49097258e5c38f24397bbfa3fe9b068fcbb13543
# Generated 2015-07-06 15:53:56.868489 by PyXB version 1.2.4 using Python 2.7.6.final.0
# Namespace http://ddex.net/xml/20120719/iso639a2 [xmlns:iso639a2]
from __future__ import unicode_literals
import pyxb
import pyxb.binding
import pyxb.binding.saxer
import io
import pyxb.utils.utility
import pyxb.utils.domutils
import sys
import pyxb.utils.six as _six
# Unique identifier for bindings created at the same time
_GenerationUID = pyxb.utils.utility.UniqueIdentifier('urn:uuid:277bca86-23f7-11e5-a69e-080027960975')
# Version of PyXB used to generate the bindings
_PyXBVersion = '1.2.4'
# Generated bindings are not compatible across PyXB versions
if pyxb.__version__ != _PyXBVersion:
raise pyxb.PyXBVersionError(_PyXBVersion)
# Import bindings for namespaces imported into schema
import pyxb.binding.datatypes
# NOTE: All namespace declarations are reserved within the binding
Namespace = pyxb.namespace.NamespaceForURI('http://ddex.net/xml/20120719/iso639a2', create_if_missing=True)
Namespace.configureCategories(['typeBinding', 'elementBinding'])
def CreateFromDocument (xml_text, default_namespace=None, location_base=None):
"""Parse the given XML and use the document element to create a
Python instance.
@param xml_text An XML document. This should be data (Python 2
str or Python 3 bytes), or a text (Python 2 unicode or Python 3
str) in the L{pyxb._InputEncoding} encoding.
@keyword default_namespace The L{pyxb.Namespace} instance to use as the
default namespace where there is no default namespace in scope.
If unspecified or C{None}, the namespace of the module containing
this function will be used.
@keyword location_base: An object to be recorded as the base of all
L{pyxb.utils.utility.Location} instances associated with events and
objects handled by the parser. You might pass the URI from which
the document was obtained.
"""
if pyxb.XMLStyle_saxer != pyxb._XMLStyle:
dom = pyxb.utils.domutils.StringToDOM(xml_text)
return CreateFromDOM(dom.documentElement, default_namespace=default_namespace)
if default_namespace is None:
default_namespace = Namespace.fallbackNamespace()
saxer = pyxb.binding.saxer.make_parser(fallback_namespace=default_namespace, location_base=location_base)
handler = saxer.getContentHandler()
xmld = xml_text
if isinstance(xmld, _six.text_type):
xmld = xmld.encode(pyxb._InputEncoding)
saxer.parse(io.BytesIO(xmld))
instance = handler.rootObject()
return instance
def CreateFromDOM (node, default_namespace=None):
"""Create a Python instance from the given DOM node.
The node tag must correspond to an element declaration in this module.
@deprecated: Forcing use of DOM interface is unnecessary; use L{CreateFromDocument}."""
if default_namespace is None:
default_namespace = Namespace.fallbackNamespace()
return pyxb.binding.basis.element.AnyCreateFromDOM(node, default_namespace)
# Atomic simple type: {http://ddex.net/xml/20120719/iso639a2}LanguageCode
class LanguageCode (pyxb.binding.datatypes.string, pyxb.binding.basis.enumeration_mixin):
"""An ISO639-1 two-letter code representing a ddex:Language."""
_ExpandedName = pyxb.namespace.ExpandedName(Namespace, 'LanguageCode')
_XSDLocation = pyxb.utils.utility.Location('http://ddex.net/xml/20120719/iso639a2.xsd', 10, 3)
_Documentation = 'An ISO639-1 two-letter code representing a ddex:Language.'
LanguageCode._CF_enumeration = pyxb.binding.facets.CF_enumeration(value_datatype=LanguageCode, enum_prefix=None)
LanguageCode.aa = LanguageCode._CF_enumeration.addEnumeration(unicode_value='aa', tag='aa')
LanguageCode.ab = LanguageCode._CF_enumeration.addEnumeration(unicode_value='ab', tag='ab')
LanguageCode.ae = LanguageCode._CF_enumeration.addEnumeration(unicode_value='ae', tag='ae')
LanguageCode.af = LanguageCode._CF_enumeration.addEnumeration(unicode_value='af', tag='af')
LanguageCode.ak = LanguageCode._CF_enumeration.addEnumeration(unicode_value='ak', tag='ak')
LanguageCode.am = LanguageCode._CF_enumeration.addEnumeration(unicode_value='am', tag='am')
LanguageCode.an = LanguageCode._CF_enumeration.addEnumeration(unicode_value='an', tag='an')
LanguageCode.ar = LanguageCode._CF_enumeration.addEnumeration(unicode_value='ar', tag='ar')
LanguageCode.as_ = LanguageCode._CF_enumeration.addEnumeration(unicode_value='as', tag='as_')
LanguageCode.av = LanguageCode._CF_enumeration.addEnumeration(unicode_value='av', tag='av')
LanguageCode.ay = LanguageCode._CF_enumeration.addEnumeration(unicode_value='ay', tag='ay')
LanguageCode.az = LanguageCode._CF_enumeration.addEnumeration(unicode_value='az', tag='az')
LanguageCode.ba = LanguageCode._CF_enumeration.addEnumeration(unicode_value='ba', tag='ba')
LanguageCode.be = LanguageCode._CF_enumeration.addEnumeration(unicode_value='be', tag='be')
LanguageCode.bg = LanguageCode._CF_enumeration.addEnumeration(unicode_value='bg', tag='bg')
LanguageCode.bh = LanguageCode._CF_enumeration.addEnumeration(unicode_value='bh', tag='bh')
LanguageCode.bi = LanguageCode._CF_enumeration.addEnumeration(unicode_value='bi', tag='bi')
LanguageCode.bm = LanguageCode._CF_enumeration.addEnumeration(unicode_value='bm', tag='bm')
LanguageCode.bn = LanguageCode._CF_enumeration.addEnumeration(unicode_value='bn', tag='bn')
LanguageCode.bo = LanguageCode._CF_enumeration.addEnumeration(unicode_value='bo', tag='bo')
LanguageCode.br = LanguageCode._CF_enumeration.addEnumeration(unicode_value='br', tag='br')
LanguageCode.bs = LanguageCode._CF_enumeration.addEnumeration(unicode_value='bs', tag='bs')
LanguageCode.ca = LanguageCode._CF_enumeration.addEnumeration(unicode_value='ca', tag='ca')
LanguageCode.ce = LanguageCode._CF_enumeration.addEnumeration(unicode_value='ce', tag='ce')
LanguageCode.ch = LanguageCode._CF_enumeration.addEnumeration(unicode_value='ch', tag='ch')
LanguageCode.co = LanguageCode._CF_enumeration.addEnumeration(unicode_value='co', tag='co')
LanguageCode.cr = LanguageCode._CF_enumeration.addEnumeration(unicode_value='cr', tag='cr')
LanguageCode.cs = LanguageCode._CF_enumeration.addEnumeration(unicode_value='cs', tag='cs')
LanguageCode.cu = LanguageCode._CF_enumeration.addEnumeration(unicode_value='cu', tag='cu')
LanguageCode.cv = LanguageCode._CF_enumeration.addEnumeration(unicode_value='cv', tag='cv')
LanguageCode.cy = LanguageCode._CF_enumeration.addEnumeration(unicode_value='cy', tag='cy')
LanguageCode.da = LanguageCode._CF_enumeration.addEnumeration(unicode_value='da', tag='da')
LanguageCode.de = LanguageCode._CF_enumeration.addEnumeration(unicode_value='de', tag='de')
LanguageCode.dv = LanguageCode._CF_enumeration.addEnumeration(unicode_value='dv', tag='dv')
LanguageCode.dz = LanguageCode._CF_enumeration.addEnumeration(unicode_value='dz', tag='dz')
LanguageCode.ee = LanguageCode._CF_enumeration.addEnumeration(unicode_value='ee', tag='ee')
LanguageCode.el = LanguageCode._CF_enumeration.addEnumeration(unicode_value='el', tag='el')
LanguageCode.en = LanguageCode._CF_enumeration.addEnumeration(unicode_value='en', tag='en')
LanguageCode.eo = LanguageCode._CF_enumeration.addEnumeration(unicode_value='eo', tag='eo')
LanguageCode.es = LanguageCode._CF_enumeration.addEnumeration(unicode_value='es', tag='es')
LanguageCode.et = LanguageCode._CF_enumeration.addEnumeration(unicode_value='et', tag='et')
LanguageCode.eu = LanguageCode._CF_enumeration.addEnumeration(unicode_value='eu', tag='eu')
LanguageCode.fa = LanguageCode._CF_enumeration.addEnumeration(unicode_value='fa', tag='fa')
LanguageCode.ff = LanguageCode._CF_enumeration.addEnumeration(unicode_value='ff', tag='ff')
LanguageCode.fi = LanguageCode._CF_enumeration.addEnumeration(unicode_value='fi', tag='fi')
LanguageCode.fj = LanguageCode._CF_enumeration.addEnumeration(unicode_value='fj', tag='fj')
LanguageCode.fo = LanguageCode._CF_enumeration.addEnumeration(unicode_value='fo', tag='fo')
LanguageCode.fr = LanguageCode._CF_enumeration.addEnumeration(unicode_value='fr', tag='fr')
LanguageCode.fy = LanguageCode._CF_enumeration.addEnumeration(unicode_value='fy', tag='fy')
LanguageCode.ga = LanguageCode._CF_enumeration.addEnumeration(unicode_value='ga', tag='ga')
LanguageCode.gd = LanguageCode._CF_enumeration.addEnumeration(unicode_value='gd', tag='gd')
LanguageCode.gl = LanguageCode._CF_enumeration.addEnumeration(unicode_value='gl', tag='gl')
LanguageCode.gn = LanguageCode._CF_enumeration.addEnumeration(unicode_value='gn', tag='gn')
LanguageCode.gu = LanguageCode._CF_enumeration.addEnumeration(unicode_value='gu', tag='gu')
LanguageCode.gv = LanguageCode._CF_enumeration.addEnumeration(unicode_value='gv', tag='gv')
LanguageCode.ha = LanguageCode._CF_enumeration.addEnumeration(unicode_value='ha', tag='ha')
LanguageCode.he = LanguageCode._CF_enumeration.addEnumeration(unicode_value='he', tag='he')
LanguageCode.hi = LanguageCode._CF_enumeration.addEnumeration(unicode_value='hi', tag='hi')
LanguageCode.ho = LanguageCode._CF_enumeration.addEnumeration(unicode_value='ho', tag='ho')
LanguageCode.hr = LanguageCode._CF_enumeration.addEnumeration(unicode_value='hr', tag='hr')
LanguageCode.ht = LanguageCode._CF_enumeration.addEnumeration(unicode_value='ht', tag='ht')
LanguageCode.hu = LanguageCode._CF_enumeration.addEnumeration(unicode_value='hu', tag='hu')
LanguageCode.hy = LanguageCode._CF_enumeration.addEnumeration(unicode_value='hy', tag='hy')
LanguageCode.hz = LanguageCode._CF_enumeration.addEnumeration(unicode_value='hz', tag='hz')
LanguageCode.ia = LanguageCode._CF_enumeration.addEnumeration(unicode_value='ia', tag='ia')
LanguageCode.id = LanguageCode._CF_enumeration.addEnumeration(unicode_value='id', tag='id')
LanguageCode.ie = LanguageCode._CF_enumeration.addEnumeration(unicode_value='ie', tag='ie')
LanguageCode.ig = LanguageCode._CF_enumeration.addEnumeration(unicode_value='ig', tag='ig')
LanguageCode.ii = LanguageCode._CF_enumeration.addEnumeration(unicode_value='ii', tag='ii')
LanguageCode.ik = LanguageCode._CF_enumeration.addEnumeration(unicode_value='ik', tag='ik')
LanguageCode.io = LanguageCode._CF_enumeration.addEnumeration(unicode_value='io', tag='io')
LanguageCode.is_ = LanguageCode._CF_enumeration.addEnumeration(unicode_value='is', tag='is_')
LanguageCode.it = LanguageCode._CF_enumeration.addEnumeration(unicode_value='it', tag='it')
LanguageCode.iu = LanguageCode._CF_enumeration.addEnumeration(unicode_value='iu', tag='iu')
LanguageCode.ja = LanguageCode._CF_enumeration.addEnumeration(unicode_value='ja', tag='ja')
LanguageCode.jv = LanguageCode._CF_enumeration.addEnumeration(unicode_value='jv', tag='jv')
LanguageCode.ka = LanguageCode._CF_enumeration.addEnumeration(unicode_value='ka', tag='ka')
LanguageCode.kg = LanguageCode._CF_enumeration.addEnumeration(unicode_value='kg', tag='kg')
LanguageCode.ki = LanguageCode._CF_enumeration.addEnumeration(unicode_value='ki', tag='ki')
LanguageCode.kj = LanguageCode._CF_enumeration.addEnumeration(unicode_value='kj', tag='kj')
LanguageCode.kk = LanguageCode._CF_enumeration.addEnumeration(unicode_value='kk', tag='kk')
LanguageCode.kl = LanguageCode._CF_enumeration.addEnumeration(unicode_value='kl', tag='kl')
LanguageCode.km = LanguageCode._CF_enumeration.addEnumeration(unicode_value='km', tag='km')
LanguageCode.kn = LanguageCode._CF_enumeration.addEnumeration(unicode_value='kn', tag='kn')
LanguageCode.ko = LanguageCode._CF_enumeration.addEnumeration(unicode_value='ko', tag='ko')
LanguageCode.kr = LanguageCode._CF_enumeration.addEnumeration(unicode_value='kr', tag='kr')
LanguageCode.ks = LanguageCode._CF_enumeration.addEnumeration(unicode_value='ks', tag='ks')
LanguageCode.ku = LanguageCode._CF_enumeration.addEnumeration(unicode_value='ku', tag='ku')
LanguageCode.kv = LanguageCode._CF_enumeration.addEnumeration(unicode_value='kv', tag='kv')
LanguageCode.kw = LanguageCode._CF_enumeration.addEnumeration(unicode_value='kw', tag='kw')
LanguageCode.ky = LanguageCode._CF_enumeration.addEnumeration(unicode_value='ky', tag='ky')
LanguageCode.la = LanguageCode._CF_enumeration.addEnumeration(unicode_value='la', tag='la')
LanguageCode.lb = LanguageCode._CF_enumeration.addEnumeration(unicode_value='lb', tag='lb')
LanguageCode.lg = LanguageCode._CF_enumeration.addEnumeration(unicode_value='lg', tag='lg')
LanguageCode.li = LanguageCode._CF_enumeration.addEnumeration(unicode_value='li', tag='li')
LanguageCode.ln = LanguageCode._CF_enumeration.addEnumeration(unicode_value='ln', tag='ln')
LanguageCode.lo = LanguageCode._CF_enumeration.addEnumeration(unicode_value='lo', tag='lo')
LanguageCode.lt = LanguageCode._CF_enumeration.addEnumeration(unicode_value='lt', tag='lt')
LanguageCode.lu = LanguageCode._CF_enumeration.addEnumeration(unicode_value='lu', tag='lu')
LanguageCode.lv = LanguageCode._CF_enumeration.addEnumeration(unicode_value='lv', tag='lv')
LanguageCode.mg = LanguageCode._CF_enumeration.addEnumeration(unicode_value='mg', tag='mg')
LanguageCode.mh = LanguageCode._CF_enumeration.addEnumeration(unicode_value='mh', tag='mh')
LanguageCode.mi = LanguageCode._CF_enumeration.addEnumeration(unicode_value='mi', tag='mi')
LanguageCode.mk = LanguageCode._CF_enumeration.addEnumeration(unicode_value='mk', tag='mk')
LanguageCode.ml = LanguageCode._CF_enumeration.addEnumeration(unicode_value='ml', tag='ml')
LanguageCode.mn = LanguageCode._CF_enumeration.addEnumeration(unicode_value='mn', tag='mn')
LanguageCode.mo = LanguageCode._CF_enumeration.addEnumeration(unicode_value='mo', tag='mo')
LanguageCode.mr = LanguageCode._CF_enumeration.addEnumeration(unicode_value='mr', tag='mr')
LanguageCode.ms = LanguageCode._CF_enumeration.addEnumeration(unicode_value='ms', tag='ms')
LanguageCode.mt = LanguageCode._CF_enumeration.addEnumeration(unicode_value='mt', tag='mt')
LanguageCode.my = LanguageCode._CF_enumeration.addEnumeration(unicode_value='my', tag='my')
LanguageCode.na = LanguageCode._CF_enumeration.addEnumeration(unicode_value='na', tag='na')
LanguageCode.nb = LanguageCode._CF_enumeration.addEnumeration(unicode_value='nb', tag='nb')
LanguageCode.nd = LanguageCode._CF_enumeration.addEnumeration(unicode_value='nd', tag='nd')
LanguageCode.ne = LanguageCode._CF_enumeration.addEnumeration(unicode_value='ne', tag='ne')
LanguageCode.ng = LanguageCode._CF_enumeration.addEnumeration(unicode_value='ng', tag='ng')
LanguageCode.nl = LanguageCode._CF_enumeration.addEnumeration(unicode_value='nl', tag='nl')
LanguageCode.nn = LanguageCode._CF_enumeration.addEnumeration(unicode_value='nn', tag='nn')
LanguageCode.no = LanguageCode._CF_enumeration.addEnumeration(unicode_value='no', tag='no')
LanguageCode.nr = LanguageCode._CF_enumeration.addEnumeration(unicode_value='nr', tag='nr')
LanguageCode.nv = LanguageCode._CF_enumeration.addEnumeration(unicode_value='nv', tag='nv')
LanguageCode.ny = LanguageCode._CF_enumeration.addEnumeration(unicode_value='ny', tag='ny')
LanguageCode.oc = LanguageCode._CF_enumeration.addEnumeration(unicode_value='oc', tag='oc')
LanguageCode.oj = LanguageCode._CF_enumeration.addEnumeration(unicode_value='oj', tag='oj')
LanguageCode.om = LanguageCode._CF_enumeration.addEnumeration(unicode_value='om', tag='om')
LanguageCode.or_ = LanguageCode._CF_enumeration.addEnumeration(unicode_value='or', tag='or_')
LanguageCode.os = LanguageCode._CF_enumeration.addEnumeration(unicode_value='os', tag='os')
LanguageCode.pa = LanguageCode._CF_enumeration.addEnumeration(unicode_value='pa', tag='pa')
LanguageCode.pi = LanguageCode._CF_enumeration.addEnumeration(unicode_value='pi', tag='pi')
LanguageCode.pl = LanguageCode._CF_enumeration.addEnumeration(unicode_value='pl', tag='pl')
LanguageCode.ps = LanguageCode._CF_enumeration.addEnumeration(unicode_value='ps', tag='ps')
LanguageCode.pt = LanguageCode._CF_enumeration.addEnumeration(unicode_value='pt', tag='pt')
LanguageCode.qu = LanguageCode._CF_enumeration.addEnumeration(unicode_value='qu', tag='qu')
LanguageCode.rm = LanguageCode._CF_enumeration.addEnumeration(unicode_value='rm', tag='rm')
LanguageCode.rn = LanguageCode._CF_enumeration.addEnumeration(unicode_value='rn', tag='rn')
LanguageCode.ro = LanguageCode._CF_enumeration.addEnumeration(unicode_value='ro', tag='ro')
LanguageCode.ru = LanguageCode._CF_enumeration.addEnumeration(unicode_value='ru', tag='ru')
LanguageCode.rw = LanguageCode._CF_enumeration.addEnumeration(unicode_value='rw', tag='rw')
LanguageCode.sa = LanguageCode._CF_enumeration.addEnumeration(unicode_value='sa', tag='sa')
LanguageCode.sc = LanguageCode._CF_enumeration.addEnumeration(unicode_value='sc', tag='sc')
LanguageCode.sd = LanguageCode._CF_enumeration.addEnumeration(unicode_value='sd', tag='sd')
LanguageCode.se = LanguageCode._CF_enumeration.addEnumeration(unicode_value='se', tag='se')
LanguageCode.sg = LanguageCode._CF_enumeration.addEnumeration(unicode_value='sg', tag='sg')
LanguageCode.si = LanguageCode._CF_enumeration.addEnumeration(unicode_value='si', tag='si')
LanguageCode.sk = LanguageCode._CF_enumeration.addEnumeration(unicode_value='sk', tag='sk')
LanguageCode.sl = LanguageCode._CF_enumeration.addEnumeration(unicode_value='sl', tag='sl')
LanguageCode.sm = LanguageCode._CF_enumeration.addEnumeration(unicode_value='sm', tag='sm')
LanguageCode.sn = LanguageCode._CF_enumeration.addEnumeration(unicode_value='sn', tag='sn')
LanguageCode.so = LanguageCode._CF_enumeration.addEnumeration(unicode_value='so', tag='so')
LanguageCode.sq = LanguageCode._CF_enumeration.addEnumeration(unicode_value='sq', tag='sq')
LanguageCode.sr = LanguageCode._CF_enumeration.addEnumeration(unicode_value='sr', tag='sr')
LanguageCode.ss = LanguageCode._CF_enumeration.addEnumeration(unicode_value='ss', tag='ss')
LanguageCode.st = LanguageCode._CF_enumeration.addEnumeration(unicode_value='st', tag='st')
LanguageCode.su = LanguageCode._CF_enumeration.addEnumeration(unicode_value='su', tag='su')
LanguageCode.sv = LanguageCode._CF_enumeration.addEnumeration(unicode_value='sv', tag='sv')
LanguageCode.sw = LanguageCode._CF_enumeration.addEnumeration(unicode_value='sw', tag='sw')
LanguageCode.ta = LanguageCode._CF_enumeration.addEnumeration(unicode_value='ta', tag='ta')
LanguageCode.te = LanguageCode._CF_enumeration.addEnumeration(unicode_value='te', tag='te')
LanguageCode.tg = LanguageCode._CF_enumeration.addEnumeration(unicode_value='tg', tag='tg')
LanguageCode.th = LanguageCode._CF_enumeration.addEnumeration(unicode_value='th', tag='th')
LanguageCode.ti = LanguageCode._CF_enumeration.addEnumeration(unicode_value='ti', tag='ti')
LanguageCode.tk = LanguageCode._CF_enumeration.addEnumeration(unicode_value='tk', tag='tk')
LanguageCode.tl = LanguageCode._CF_enumeration.addEnumeration(unicode_value='tl', tag='tl')
LanguageCode.tn = LanguageCode._CF_enumeration.addEnumeration(unicode_value='tn', tag='tn')
LanguageCode.to = LanguageCode._CF_enumeration.addEnumeration(unicode_value='to', tag='to')
LanguageCode.tr = LanguageCode._CF_enumeration.addEnumeration(unicode_value='tr', tag='tr')
LanguageCode.ts = LanguageCode._CF_enumeration.addEnumeration(unicode_value='ts', tag='ts')
LanguageCode.tt = LanguageCode._CF_enumeration.addEnumeration(unicode_value='tt', tag='tt')
LanguageCode.tw = LanguageCode._CF_enumeration.addEnumeration(unicode_value='tw', tag='tw')
LanguageCode.ty = LanguageCode._CF_enumeration.addEnumeration(unicode_value='ty', tag='ty')
LanguageCode.ug = LanguageCode._CF_enumeration.addEnumeration(unicode_value='ug', tag='ug')
LanguageCode.uk = LanguageCode._CF_enumeration.addEnumeration(unicode_value='uk', tag='uk')
LanguageCode.ur = LanguageCode._CF_enumeration.addEnumeration(unicode_value='ur', tag='ur')
LanguageCode.uz = LanguageCode._CF_enumeration.addEnumeration(unicode_value='uz', tag='uz')
LanguageCode.ve = LanguageCode._CF_enumeration.addEnumeration(unicode_value='ve', tag='ve')
LanguageCode.vi = LanguageCode._CF_enumeration.addEnumeration(unicode_value='vi', tag='vi')
LanguageCode.vo = LanguageCode._CF_enumeration.addEnumeration(unicode_value='vo', tag='vo')
LanguageCode.wa = LanguageCode._CF_enumeration.addEnumeration(unicode_value='wa', tag='wa')
LanguageCode.wo = LanguageCode._CF_enumeration.addEnumeration(unicode_value='wo', tag='wo')
LanguageCode.xh = LanguageCode._CF_enumeration.addEnumeration(unicode_value='xh', tag='xh')
LanguageCode.yi = LanguageCode._CF_enumeration.addEnumeration(unicode_value='yi', tag='yi')
LanguageCode.yo = LanguageCode._CF_enumeration.addEnumeration(unicode_value='yo', tag='yo')
LanguageCode.za = LanguageCode._CF_enumeration.addEnumeration(unicode_value='za', tag='za')
LanguageCode.zh = LanguageCode._CF_enumeration.addEnumeration(unicode_value='zh', tag='zh')
LanguageCode.zu = LanguageCode._CF_enumeration.addEnumeration(unicode_value='zu', tag='zu')
LanguageCode._InitializeFacetMap(LanguageCode._CF_enumeration)
Namespace.addCategoryObject('typeBinding', 'LanguageCode', LanguageCode)
| Trax-air/ddexreader | ddexreader/ern35/_iso639a2.py | Python | mit | 20,867 |
"""Database for simulation data in a relational database
"""
import threading
import os
from functools import total_ordering
import numpy as np
from sqlalchemy import create_engine, and_, or_
from sqlalchemy.orm import sessionmaker, undefer
from sqlalchemy import Column, Integer, Float, PickleType, String
from sqlalchemy import ForeignKey
from sqlalchemy.orm import relationship, deferred
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.schema import Index
from nestedbasinsampling.utils import Signal
__all__ = ["Minimum", "Replica", "Run", "TransitionState", "Database"]
_schema_version = 2
verbose=False
Base = declarative_base()
@total_ordering
class Minimum(Base):
"""
The Minimum class represents a minimum in the database.
Parameters
----------
energy : float
coords : numpy array
coordinates
Attributes
----------
energy :
the energy of the minimum
coords :
the coordinates of the minimum. This is stored as a pickled numpy
array which SQL interprets as a BLOB.
fvib :
the log product of the squared normal mode frequencies. This is used in
the free energy calcualations
pgorder :
point group order
invalid :
a flag that can be used to indicate a problem with the minimum. E.g. if
the Hessian has more zero eigenvalues than expected.
user_data :
Space to store anything that the user wants. This is stored in SQL
as a BLOB, so you can put anything here you want as long as it's serializable.
Usually a dictionary works best.
Notes
-----
To avoid any double entries of minima and be able to compare them,
only use `Database.addMinimum()` to create a minimum object.
See Also
--------
Database, TransitionState
"""
__tablename__ = 'tbl_minima'
_id = Column(Integer, primary_key=True)
energy = Column(Float)
# deferred means the object is loaded on demand, that saves some time / memory for huge graphs
coords = deferred(Column(PickleType))
'''coordinates of the minimum'''
fvib = Column(Float)
"""log product of the squared normal mode frequencies"""
pgorder = Column(Integer)
"""point group order"""
invalid = Column(Integer)
"""flag indicating if the minimum is invalid"""
_hash = Column(Integer)
"""store hash for quick comparison"""
user_data = deferred(Column(PickleType))
"""this can be used to store information about the minimum"""
def __init__(self, energy, coords):
self.energy = energy
self.coords = np.copy(coords)
self.invalid = False
self._hash = hash(self.energy + self.coords.sum())
def id(self):
"""return the sql id of the object"""
return self._id
def __eq__(self, m):
"""m can be integer or Minima object"""
if self.id() is None:
return hash(self) == hash(m)
elif isinstance(m, type(self)):
assert m.id() is not None
return self.id() == m.id()
elif hasattr(m, 'id'):
return False
else:
return self.id() == m
def __gt__(self, m):
return self.energy > m.energy
def __hash__(self):
#_id = self.id()
#assert id is not None
#_id = self.energy ## needed to differentiate from Replica
return self._hash
def __deepcopy__(self, memo):
return self.__class__(self.energy, self.coords)
@total_ordering
class Replica(Base):
"""
The Replica class represents a replica in the database.
Parameters
----------
energy : float
coords : numpy array
coordinates
Attributes
----------
energy :
the energy of the replica
coords :
the coordinates of the replica. This is stored as a pickled numpy
array which SQL interprets as a BLOB.
invalid :
a flag that can be used to indicate a problem with the minimum. E.g. if
the Hessian has more zero eigenvalues than expected.
user_data :
Space to store anything that the user wants. This is stored in SQL
as a BLOB, so you can put anything here you want as long as it's serializable.
Usually a dictionary works best.
Notes
-----
To avoid any double entries of minima and be able to compare them,
only use `Database.addMinimum()` to create a minimum object.
See Also
--------
Database, Minimum, Run
"""
__tablename__ = 'tbl_replicas'
_id = Column(Integer, primary_key=True)
energy = Column(Float)
stepsize = Column(Float)
# deferred means the object is loaded on demand, that saves some time /
# memory for huge graphs
coords = deferred(Column(PickleType))
'''coordinates of the minimum'''
invalid = Column(Integer)
"""flag indicating if the replica is invalid"""
_hash = Column(Integer)
"""store hash for quick comparison"""
user_data = deferred(Column(PickleType))
"""this can be used to store information about the replica"""
def __init__(self, energy, coords, stepsize=None):
self.energy = energy
self.coords = np.copy(coords) if coords is not None else coords
self.stepsize = stepsize
csum = 0. if self.coords is None else self.coords.sum()
self._hash = hash(self.energy + csum)
self.invalid = False
def id(self):
"""return the sql id of the object"""
return self._id
def __eq__(self, replica):
"""m can be integer or Replica object"""
if self.id() is None:
return hash(self) == hash(replica)
elif isinstance(replica, type(self)):
assert replica.id() is not None
return self.id() == replica.id()
elif hasattr(replica, 'id'):
return False
else:
return self.id() == replica
def __gt__(self, rep):
return self.energy > rep.energy
def __hash__(self):
return self._hash
def __deepcopy__(self, memo):
return self.__class__(self.energy, self.coords, stepsize=self.stepsize)
class Run(Base):
"""
The NestedSamplingRun represents a nested sampling run
It begins at the parent replica and finishes at the child replica
Parameters
----------
Emax : numpy array
nlive : numpy int array
parent : Replica
child : Replica
volume : float, optional
stored : numpy array, optional
configs : numpy array, optional
stepsizes : numpy array, optional
Attributes
----------
Emax : numpy array
List of energies generated by nested sampling
nlive : numpy int array
List of live points present during each nested sampling step
parent : Replica
The Replica that the nested sampling started at
child : Replica
The Replica that the nested sampling run finished at
volume : float, optional
The volume enclosed by the contour defined by the parent replica
stored : numpy array, optional
List of the indexes that have been saved in configs and stepsizes
configs : numpy array, optional
List of the coordiates of the states visited by nested sampling
stepsizes : numpy array, optional
List of the stepsizes used to generated these states.
"""
__tablename__ = 'tbl_runs'
_id = Column(Integer, primary_key=True)
volume = Column(Float)
# deferred means the object is loaded on demand, that saves some time /
# memory for huge graphs
Emax = deferred(Column(PickleType))
nlive = deferred(Column(PickleType))
stored = deferred(Column(PickleType))
configs = deferred(Column(PickleType))
stepsizes = deferred(Column(PickleType))
_parent_id = Column(Integer, ForeignKey('tbl_replicas._id'))
parent = relationship("Replica",
primaryjoin="Replica._id==Run._parent_id")
'''The replica associated with the start of the nested sampling run'''
_childReplica_id = Column(Integer, ForeignKey('tbl_replicas._id'))
childReplica = relationship("Replica",
primaryjoin="Replica._id==Run._childReplica_id")
"""The replica associated with the end of the path"""
_childMinimum_id = Column(Integer, ForeignKey('tbl_minima._id'))
childMinimum = relationship("Minimum",
primaryjoin="Minimum._id==Run._childMinimum_id")
"""The minimum associated with the end of the path"""
#_child_id = Column(Integer, ForeignKey('tbl_replicas._id'))
#child = relationship("Replica",
# primaryjoin="Replica._id==Run._child_id")
'''The replica associated with the end of the nested sampling run'''
invalid = Column(Integer)
"""flag indicating if the run is invalid"""
_hash = Column(Integer)
"""store hash for quick comparison"""
user_data = deferred(Column(PickleType))
"""this can be used to store information about the nested sampling run"""
def __init__(self, Emax, nlive, parent=None, child=None,
volume=1., stored=None, configs=None, stepsizes=None):
self.Emax = np.array(Emax)
self.nlive = np.array(nlive, dtype=int)
self.volume = volume
if parent is not None:
self._parent_id = parent.id()
self.parent = parent
if child is not None:
self._child_id = child.id()
self.child = child
self.configs = np.array([]) if configs is None else np.array(configs)
self.stepsizes = np.array([]) if stepsizes is None else np.array(stepsizes)
if stored is not None:
self.stored = np.array(stored)
elif len(self.configs) == len(self.Emax):
if len(self.stepsizes):
assert len(self.configs) == len(self.stepsizes)
self.stored = np.arange(len(self.Emax))
else:
self.stored = np.array([], dtype=int)
self._hash = hash(self.Emax.sum())
self.invalid = False
@property
def child(self):
if self.childMinimum is not None:
return self.childMinimum
else:
return self.childReplica
@child.setter
def child(self, child):
if isinstance(child, Minimum):
self._childMinimum_id = child.id()
self.childMinimum = child
elif isinstance(child, Replica):
self._childReplica_id = child.id()
self.childReplica = child
self.childE = child.energy
def id(self):
"""return the sql id of the object"""
return self._id
def __eq__(self, run):
"""m can be integer or Minima object"""
assert self.id() is not None
if isinstance(run, Run):
assert run.id() is not None
return self.id() == run.id()
else:
return self.id() == run
def __hash__(self):
return self._hash
def calcWeights(self):
"""
"""
return calcRunWeights(self)
def calcAverageValue(self, func, std=True, weights=None):
"""
"""
weights = self.calcWeights() if weights is None else weights
return calcRunAverageValue(weights, func, std=True)
@property
def log_frac(self):
return (np.log(self.nlive) - np.log(self.nlive + 1)).cumsum()
@property
def log_frac2(self):
return (np.log(self.nlive) - np.log(self.nlive + 2)).cumsum()
@property
def log_frac_up(self):
return (np.log(self.nlive) - np.log(self.nlive - 1))[::-1].cumsum()[::-1]
@property
def log_frac2_up(self):
return (np.log(self.nlive) - np.log(self.nlive - 2))[::-1].cumsum()[::-1]
@property
def log_rel_error(self):
logX = self.log_frac
logX2 = self.log_frac2
return np.log1p(np.sqrt(np.exp(logX2 - 2 * logX) - 1))
def frac_index(self, frac, log=False):
"""
"""
logfrac = frac if log else np.log(frac)
logX = self.log_frac
return (logX.size - 1) - logX[::-1].searchsorted(logfrac, side='left')
def frac_energy(self, frac, log=False):
return self.Emax[self.frac_index(frac, log=log)]
def split(self, r1=None, r2=None):
""" Splits the run to go between replica r1 and r2
if r1 or r2 is None then does not split run at that point
"""
configs = self.configs
nlive = self.nlive
stepsizes = self.stepsizes
Emax = self.Emax
stored = self.stored
volume = self.volume
if not isinstance(r1, Replica):
if r1 is not None:
r1 = Replica(r1, None)
else:
r1 = self.parent
if not isinstance(r2, Replica):
if r2 is not None:
r2 = Replica(r2, None)
else:
r2 = self.child
Estart = r1.energy
Efinish = r2.energy
istart = Emax.size - Emax[::-1].searchsorted(
Estart, side='left')
iend = Emax.size - Emax[::-1].searchsorted(
Efinish, side='left')
jstart, jend = stored.searchsorted([istart, iend], side='left')
newEmax = Emax[istart:iend]
newnlive = nlive[istart:iend]
newStored, newStepsizes, newConfigs = None, None, None
if stored.size:
newStored = stored[jstart:jend] - istart
if stepsizes.size:
newStepsizes = stepsizes[jstart:jend]
if configs.size:
newConfigs = configs[jstart:jend]
return type(self)(newEmax, newnlive, r1, r2, volume=volume,
stored=newStored, configs=newConfigs, stepsizes=newStepsizes)
class Path(Base):
"""
The Path class represents a path between a replica and
another replica or minimum
Parameters
----------
energy : float
parent : Replica
child : Replica or Minimum
stored : numpy array
energies : numpy array, optional
configs : numpy array, optional
stepsizes : numpy array, optional
Attributes
----------
energy : float
The maximum energy visited by the path
parent : Replica
The Replica that the nested sampling started at
childReplica : Replica
The Replica that the nested sampling run finished at
childMinimum : Minimum
The Minimum the minimisation finishes at
energies : numpy array, optional
List of energies visted by the path
configs : numpy array, optional
List of the coordiates of the states visited by nested sampling
"""
__tablename__ = 'tbl_path'
_id = Column(Integer, primary_key=True)
energy = Column(Float)
childE = Column(Float)
# deferred means the object is loaded on demand, that saves some time / memory for huge graphs
energies = deferred(Column(PickleType))
stored = deferred(Column(PickleType))
configs = deferred(Column(PickleType))
_parent_id = Column(Integer, ForeignKey('tbl_replicas._id'))
parent = relationship("Replica",
primaryjoin="Replica._id==Path._parent_id")
"""The replica associated with the start of the path"""
_childReplica_id = Column(Integer, ForeignKey('tbl_replicas._id'))
childReplica = relationship("Replica",
primaryjoin="Replica._id==Path._childReplica_id")
"""The replica associated with the end of the path"""
_childMinimum_id = Column(Integer, ForeignKey('tbl_minima._id'))
childMinimum = relationship("Minimum",
primaryjoin="Minimum._id==Path._childMinimum_id")
"""The minimum associated with the end of the path"""
quench = Column(Integer)
minimum = Column(Integer)
ascent = Column(Integer)
invalid = Column(Integer)
"""flag indicating if the path is invalid"""
_hash = Column(Integer)
"""store hash for quick comparison"""
user_data = deferred(Column(PickleType))
"""this can be used to store information about the nested sampling run"""
def __init__(self, energy, parent, child, energies=None, stored=None,
configs=None, quench=False, minimum=False,
**user_data):
self.energy = np.array(energy)
self.parent = parent
self.child = child
self.energies = np.array([]) if energies is None else np.array(energies)
self.configs = np.array([]) if configs is None else np.array(configs)
if stored is not None:
self.stored = np.array(stored)
elif len(self.configs) == len(self.energies):
self.stored = np.arange(len(self.energies))
else:
self.stored = np.array([], dtype=int)
if user_data:
self.user_data = user_data
self.quench = quench
self.minimum = minimum
self._hash = hash(self.child) ^ hash(self.parent)
self.invalid = False
@property
def child(self):
if self.childMinimum is not None:
return self.childMinimum
else:
return self.childReplica
@child.setter
def child(self, child):
if isinstance(child, Minimum):
self._childMinimum_id = child.id()
self.childMinimum = child
elif isinstance(child, Replica):
self._childReplica_id = child.id()
self.childReplica = child
self.childE = child.energy
def id(self):
"""return the sql id of the object"""
return self._id
def __eq__(self, m):
"""m can be integer or Minima object"""
assert self.id() is not None
if isinstance(m, Minimum):
assert m.id() is not None
return self.id() == m.id()
else:
return self.id() == m
def __hash__(self):
return self._hash
class TransitionState(Base):
"""Transition state object
The TransitionState class represents a saddle point in the database.
Parameters
----------
energy : float
coords : numpy array
min1 : Minimum object
first minimum
min2 : Minimum object
first minimum
eigenval : float, optional
lowest (single negative) eigenvalue of the saddle point
eigenvec : numpy array, optional
eigenvector which corresponds to the negative eigenvalue
fvib : float
log product of squared frequencies for free energy calculation
pgorder : integer
point group order
Attributes
----------
energy :
The energy of the transition state
coords :
The coordinates of the transition state. This is stored as a pickled numpy
array which SQL interprets as a BLOB.
fvib :
The log product of the squared normal mode frequencies. This is used in
the free energy calcualations
pgorder :
The point group order
invalid :
A flag that is used to indicate a problem with the transition state. E.g. if
the Hessian has more than one negaive eigenvalue then it is a higher order saddle.
user_data :
Space to store anything that the user wants. This is stored in SQL
as a BLOB, so you can put anything here you want as long as it's serializable.
Usually a dictionary works best.
minimum1, minimum2 :
These returns the minima on either side of the transition state
eigenvec :
The vector which points along the direction crossing the transition state.
This is the eigenvector of the lowest non-zero eigenvalue.
eigenval :
The eigenvalue corresponding to `eigenvec`. A.k.a. the curvature
along the direction given by `eigenvec`
Notes
-----
To avoid any double entries and be able to compare them, only use
Database.addTransitionState to create a TransitionStateobject.
programming note: The functions in the database require that
ts.minimum1._id < ts.minimum2._id. This will be handled automatically
by the database, but we must remember not to screw it up
See Also
--------
Database, Minimum
"""
__tablename__ = "tbl_transition_states"
_id = Column(Integer, primary_key=True)
energy = Column(Float)
'''energy of transition state'''
coords = deferred(Column(PickleType))
'''coordinates of transition state'''
_minimum1_id = Column(Integer, ForeignKey('tbl_minima._id'))
minimum1 = relationship("Minimum",
primaryjoin="Minimum._id==TransitionState._minimum1_id")
'''first minimum which connects to transition state'''
_minimum2_id = Column(Integer, ForeignKey('tbl_minima._id'))
minimum2 = relationship("Minimum",
primaryjoin="Minimum._id==TransitionState._minimum2_id")
'''second minimum which connects to transition state'''
eigenval = Column(Float)
'''coordinates of transition state'''
eigenvec = deferred(Column(PickleType))
'''coordinates of transition state'''
fvib = Column(Float)
"""log product of the squared normal mode frequencies"""
pgorder = Column(Integer)
"""point group order"""
invalid = Column(Integer)
"""flag indicating if the transition state is invalid"""
user_data = deferred(Column(PickleType))
"""this can be used to store information about the transition state """
def __init__(self, energy, coords, min1, min2, eigenval=None, eigenvec=None):
assert min1.id() is not None
assert min2.id() is not None
self.energy = energy
self.coords = np.copy(coords)
if min1.id() < min2.id():
self.minimum1 = min1
self.minimum2 = min2
else:
self.minimum1 = min2
self.minimum2 = min1
if eigenvec is not None:
self.eigenvec = np.copy(eigenvec)
self.eigenval = eigenval
self.invalid = False
def id(self):
"""return the sql id of the object"""
return self._id
class SystemProperty(Base):
"""table to hold system properties like potential parameters and number of atoms
The properties can be stored as integers, floats, strings, or a pickled object.
Only one of the property value types should be set for each property.
"""
__tablename__ = "tbl_system_property"
_id = Column(Integer, primary_key=True)
property_name = Column(String)
int_value = Column(Integer)
float_value = Column(Float)
string_value = Column(String)
pickle_value = deferred(Column(PickleType))
def __init__(self, property_name):
self.property_name = property_name
@property
def name(self):
return self.property_name
def _values(self):
"""return a dictionary of the values that are not None"""
values = dict(int_value=self.int_value, float_value=self.float_value,
string_value=self.string_value, pickle_value=self.pickle_value)
values = dict([(k,v) for k,v in values.iteritems() if v is not None])
return values
@property
def value(self):
"""return the property value"""
actual_values = [v for v in self._values().values() if v is not None]
if len(actual_values) == 1:
return actual_values[0]
elif len(actual_values) == 0:
return None
elif len(actual_values) > 1:
print "SystemProperty: multiple property values are set"
return actual_values
return None
@value.setter
def value(self, value):
if isinstance(value, int):
dtype = "int"
elif isinstance(value, float):
dtype = "float"
elif isinstance(value, basestring):
dtype = "string"
else:
dtype = "pickle"
if dtype == "string":
self.string_value = value
elif dtype == "int":
self.int_value = value
elif dtype == "float":
self.float_value = value
elif dtype == "pickle":
self.pickle_value = value
else:
raise ValueError('dtype must be one of "int", "float", "string", "pickle", or None')
def item(self):
"""return a tuple of (name, value)"""
return self.name, self.value
#Index('idx_runs', Run.__table__.c._parent_id, Run.__table__.c._child_id)
Index('idx_transition_states', TransitionState.__table__.c._minimum1_id,
TransitionState.__table__.c._minimum2_id)
#Index('idx_replica_energy', Replica.__table__.c.energy)
Index('idx_minimum_energy', Minimum.__table__.c.energy)
Index('idx_transition_state_energy', Minimum.__table__.c.energy)
class IntervalCommit(object):
""" This class manages adding data to the database
Parameters
----------
db : database object
func : callable
The function that adds the data to the database
"""
def __init__(self, func, db, commit_interval=10):
self.db = db
self.func = func
self.commit_interval = commit_interval
self.count = 0
def __call__(self, *args, **kwargs):
kwargs['commit'] = self.count % self.commit_interval == 0
self.count += 1
return self.func(*args, **kwargs)
def __del__(self):
if self.commit_interval != 1:
self.commit()
def commit(self):
self.db.session.commit()
class MinimumAdder(object):
"""This class manages adding minima to the database
Parameters
----------
db : database object
Ecut: float, optional
energy cutoff, don't add minima which are higher in energy
max_n_minima : int, optional
keep only the max_n_minima with the lowest energies. If E is greater
than the minimum with the highest energy in the database, then don't add
this minimum and return None. Else add this minimum and delete the minimum
with the highest energy. if max_n_minima < 0 then it is ignored.
commit_interval : int, optional
Commit the database changes to the hard drive every `commit_interval` steps.
Committing too frequently can be slow, this is used to speed things up.
"""
def __init__(self, db, Ecut=None, max_n_minima=None, commit_interval=1):
self.db = db
self.Ecut = Ecut
self.max_n_minima = max_n_minima
self.commit_interval = commit_interval
self.count = 0
def __call__(self, E, coords):
"""this is called to add a minimum to the database"""
if self.Ecut is not None:
if E > self.Ecut:
return None
commit = self.count % self.commit_interval == 0
self.count += 1
return self.db.addMinimum(E, coords, max_n_minima=self.max_n_minima,
commit=commit)
def __del__(self):
"""ensure that all the changes to the database are committed to the hard drive
"""
if self.commit_interval != 1:
self.db.session.commit()
def commit(self):
self.db.session.commit()
def _compare_properties(prop, v2):
v1 = prop.value
try:
return bool(v1 == v2)
except Exception:
pass
try:
# see if they are numpy arrays
return np.all(v1 == v2)
except:
pass
print "warning, could not compare value", v2, "with", v1
return False
class Database(object):
"""Database storage class
The Database class handles the connection to the database. It has functions to create new Minima and
TransitionState objects. The objects are persistent in the database and exist as
soon as the Database class in connected to the database. If any value in the objects is changed,
the changes are automatically persistent in the database (TODO: be careful, check commit transactions, ...)
Database uses SQLAlchemy to connect to the database. Check the web page for available connectors. Unless
you know better, the standard sqlite should be used. The database can be generated in memory (default) or
written to a file if db is specified when creating the class.
Parameters
----------
db : string, optional
filename of new or existing database to connect to. default creates
new database in memory.
accuracy : float, optional
energy tolerance to count minima as equal
connect_string : string, optional
connection string, default is sqlite database
compareMinima : callable, `bool = compareMinima(min1, min2)`, optional
called to determine if two minima are identical. Only called
if the energies are within `accuracy` of each other.
createdb : boolean, optional
create database if not exists, default is true
Attributes
----------
engine : sqlalchemy database engine
session : sqlalchemy session
accuracy : float
on_minimum_removed : signal
called when a minimum is removed from the database
on_minimum_added : signal
called when a new, unique, minimum is added to the database
on_ts_removed : signal
called when a transition_state is removed from the database
on_ts_added : signal
called when a new, unique, transition state is added to the database
compareMinima
Examples
--------
>>> from pele.storage import Database
>>> db = Database(db="test.db")
>>> for energy in np.random.random(10):
>>> a.addMinimum(energy, np.random.random(10))
>>>
>>> for minimum in database.minima():
>>> print minimum.energy
See Also
--------
Minimum
TransitionState
"""
engine = None
session = None
connection = None
accuracy = 1e-3
compareMinima=None
def __init__(self, db=":memory:", accuracy=1e-3, connect_string='sqlite:///%s',
compareMinima=None, createdb=True, commit_interval=10):
self.accuracy=accuracy
self.compareMinima = compareMinima
self.commit_interval = commit_interval
if not os.path.isfile(db) or db == ":memory:":
newfile = True
if not createdb:
raise IOError("createdb is False, but database does not exist (%s)" % db)
else:
newfile = False
# set up the engine which will manage the backend connection to the database
self.engine = create_engine(connect_string % db, echo=verbose)
if not newfile and not self._is_nbs_database():
raise IOError("existing file (%s) is not a nbs database." % db)
# set up the tables and check the schema version
if newfile:
self._set_schema_version()
self._check_schema_version()
self._update_schema()
# self._check_schema_version_and_create_tables(newfile)
# set up the session which will manage the frontend connection to the database
Session = sessionmaker(bind=self.engine)
self.session = Session()
# these functions will be called when a minimum or transition state is
# added or removed
self.on_minimum_added = Signal()
self.on_minimum_removed = Signal()
self.on_ts_added = Signal()
self.on_ts_removed = Signal()
self.on_replica_added = Signal()
self.on_replica_removed = Signal()
self.on_run_added = Signal()
self.on_run_removed = Signal()
self.on_path_added = Signal()
self.on_path_removed = Signal()
self.lock = threading.Lock()
self.connection = self.engine.connect()
def _is_nbs_database(self):
conn = self.engine.connect()
result = True
if not all([self.engine.has_table("tbl_minima"),
self.engine.has_table("tbl_replicas"),
self.engine.has_table("tbl_runs"),
self.engine.has_table("tbl_transition_states")]):
result = False
conn.close()
return result
def _set_schema_version(self):
conn = self.engine.connect()
conn.execute("PRAGMA user_version = %d;"%_schema_version)
conn.close()
def _update_schema(self):
conn = self.engine.connect()
Base.metadata.create_all(bind=self.engine)
conn.close()
def _check_schema_version(self):
conn = self.engine.connect()
result=conn.execute("PRAGMA user_version;")
schema = result.fetchone()[0]
result.close()
conn.close()
if _schema_version != schema:
raise IOError("database schema outdated, current (newest) version: "
"%d (%d). Please use migrate_db.py in pele/scripts to update database"%(schema, _schema_version))
def paths(self):
return self.session.query(Path).all()
def addPath(self, energy, parent, child, quench=False, minimum=False,
energies=None, stored=None, configs=None, commit=True):
self.lock.acquire()
configs = None if configs is None else np.asanyarray(configs)
new = Path(energy, parent, child, quench=quench, minimum=minimum,
stored=stored, energies=energies, configs=configs)
self.session.add(new)
if commit:
self.session.commit()
self.lock.release()
self.on_path_added(new)
return new
def get_path(self, pathid):
""" returns nested sampling run corresponding to that id """
return self.session.query(Path).get(pathid)
def update_path(self, path, energy, parent, child,
energies=None, stored=None, configs=None, commit=True):
"""
Parameters
----------
path: NestedSamplingRun or id
NestedSamplingRun object or id to update
configs : list of numpy.array, optional
list of configurations of dead points
commit : bool, optional
commit changes to database
"""
pathid = path.id() if isinstance(path, Path) else path
self.lock.acquire()
path = self.get_path(pathid)
path.energy = energy
path.parent = parent
path.child = child
path.energies = energies
path.configs = None if configs is None else np.asanyarray(configs)
if commit:
self.session.commit()
self.lock.release()
return run
def path_adder(self, interval=None):
interval = self.commit_interval if interval is None else interval
return IntervalCommit(self.addPath, self, commit_interval=interval)
def runs(self):
return self.session.query(Run).all()
def addRun(self, Emax, Nlive, parent, child, volume=1.,
stored=None, configs=None, stepsizes=None, commit=True):
"""add a new minimum to database
Parameters
----------
Emax : list of floats
energy of dead points
Nlive : list of integers
number of live points
Nremove : list of integers
number of replicas removed
configs : list of numpy.array, optional
configurations of dead points
commit : bool, optional
commit changes to database
Returns
-------
run : NestedSamplingRun
Nested sampling run which was added
"""
self.lock.acquire()
configs = None if configs is None else np.asanyarray(configs)
new = Run(Emax, Nlive, parent, child, volume=volume,
stored=stored, configs=configs, stepsizes=stepsizes)
self.session.add(new)
if commit:
self.session.commit()
self.lock.release()
self.on_run_added(new)
return new
def get_run(self, runid):
""" returns nested sampling run corresponding to that id """
return self.session.query(Run).get(runid)
def update_run(self, run, Emax, Nlive, parent, child,
volume=1., configs=None, commit=True):
"""
Parameters
----------
run: NestedSamplingRun or id
NestedSamplingRun object or id to update
Emax: list of floats
list of energies of dead points
Nlive : list of integers
list of live points
Nremove : list of integers
list of number of replicas removed
configs : list of numpy.array, optional
list of configurations of dead points
commit : bool, optional
commit changes to database
"""
runid = run.id() if isinstance(run, Run) else run
self.lock.acquire()
run = self.get_run(runid)
run.Emax = np.asanyarray(Emax)
run.Nlive = np.asanyarray(Nlive)
run.parent = parent
run.child = child
run.volume = volume
run.configs = None if configs is None else np.asanyarray(configs)
if commit:
self.session.commit()
self.lock.release()
return run
def removeRun(self, run, commit=True):
self.session.delete(run)
if commit:
self.session.commit()
self.on_run_removed(run)
def run_adder(self, interval=None):
interval = self.commit_interval if interval is None else interval
return IntervalCommit(self.addRun, self, commit_interval=interval)
def _highest_energy_minimum(self):
"""return the minimum with the highest energy"""
candidates = self.session.query(Minimum).order_by(Minimum.energy.desc()).\
limit(1).all()
return candidates[0]
def get_lowest_energy_minimum(self):
"""return the minimum with the lowest energy"""
candidates = self.session.query(Minimum).order_by(Minimum.energy).\
limit(1).all()
return candidates[0]
def findMinimum(self, E, coords):
candidates = self.session.query(Minimum).\
options(undefer("coords")).\
filter(Minimum.energy > E-self.accuracy).\
filter(Minimum.energy < E+self.accuracy)
new = Minimum(E, coords)
for m in candidates:
if self.compareMinima:
if not self.compareMinima(new, m):
continue
return m
return None
def updateMinimumData(self, m, user_data, commit=True):
"""
Adds user_data to minimum
"""
self.lock.acquire()
m.user_data = user_data
self.session.add(m)
if commit:
self.session.commit()
self.lock.release()
self.on_minimum_added(m)
return m
def addMinimum(self, E, coords, commit=True, max_n_minima=-1, pgorder=None, fvib=None):
"""add a new minimum to database
Parameters
----------
E : float
coords : numpy.array
coordinates of the minimum
commit : bool, optional
commit changes to database
max_n_minima : int, optional
keep only the max_n_minima with the lowest energies. If E is greater
than the minimum with the highest energy in the database, then don't add
this minimum and return None. Else add this minimum and delete the minimum
with the highest energy. if max_n_minima < 0 then it is ignored.
Returns
-------
minimum : Minimum
minimum which was added (not necessarily a new minimum)
"""
self.lock.acquire()
# undefer coords because it is likely to be used by compareMinima and
# it is slow to load them individually by accessing the database repetitively.
candidates = self.session.query(Minimum).\
options(undefer("coords")).\
filter(Minimum.energy.between(E-self.accuracy, E+self.accuracy))
new = Minimum(E, coords)
for m in candidates:
if self.compareMinima:
if not self.compareMinima(new, m):
continue
self.lock.release()
return m
if max_n_minima is not None and max_n_minima > 0:
if self.number_of_minima() >= max_n_minima:
mmax = self._highest_energy_minimum()
if E >= mmax.energy:
# don't add the minimum
self.lock.release()
return None
else:
# remove the minimum with the highest energy and continue
self.removeMinimum(mmax, commit=commit)
if fvib is not None:
new.fvib = fvib
if pgorder is not None:
new.pgorder = pgorder
self.session.add(new)
if commit:
self.session.commit()
self.lock.release()
self.on_minimum_added(new)
return new
def getMinimum(self, mid):
"""return the minimum with a given id"""
return self.session.query(Minimum).get(mid)
def addReplica(self, energy, coords, commit=True, stepsize=None):
self.lock.acquire()
replica = Replica(energy, coords, stepsize=None)
self.session.add(replica)
if commit:
self.session.commit()
self.lock.release()
self.on_replica_added(replica)
return replica
def replica_adder(self, interval=None):
interval = self.commit_interval if interval is None else interval
return IntervalCommit(self.addReplica, self, commit_interval=interval)
def replicas(self):
return self.session.query(Replica).all()
def on_replica_added(self, replica):
pass
def addTransitionState(self, energy, coords, min1, min2, commit=True,
eigenval=None, eigenvec=None, pgorder=None, fvib=None):
"""Add transition state object
Parameters
----------
energy : float
energy of transition state
coords : numpy array
coordinates of transition state
min1, min2 : Minimum
minima on either side of the transition states
eigenval : float
the eigenvalue (curvature) across the transition state
eigenvec : numpy array
the eigenvector associated with eigenval
commit : bool
commit changes to sql database
Returns
-------
ts : TransitionState
the transition state object (not necessarily new)
"""
m1, m2 = min1, min2
if m1.id() > m2.id():
m1, m2 = m2, m1
candidates = self.session.query(TransitionState).\
options(undefer("coords")).\
filter(or_(
and_(TransitionState.minimum1==m1,
TransitionState.minimum2==m2),
and_(TransitionState.minimum1==m2,
TransitionState.minimum2==m1),
)).\
filter(TransitionState.energy.between(energy-self.accuracy, energy+self.accuracy))
for m in candidates:
return m
new = TransitionState(energy, coords, m1, m2, eigenval=eigenval, eigenvec=eigenvec)
if fvib is not None:
new.fvib = fvib
if pgorder is not None:
new.pgorder = pgorder
self.session.add(new)
if commit:
self.session.commit()
self.on_ts_added(new)
return new
def getTransitionState(self, min1, min2):
"""return the TransitionState between two minima
Returns
-------
ts : None or TransitionState
"""
m1, m2 = min1, min2
candidates = self.session.query(TransitionState).\
filter(or_(
and_(TransitionState.minimum1==m1,
TransitionState.minimum2==m2),
and_(TransitionState.minimum1==m2,
TransitionState.minimum2==m1),
))
for m in candidates:
return m
return None
def getTransitionStatesMinimum(self, min1):
"""return all transition states connected to a minimum
Returns
-------
ts : None or TransitionState
"""
candidates = self.session.query(TransitionState).\
filter(or_(TransitionState.minimum1==min1,
TransitionState.minimum2==min1))
return candidates.all()
def getTransitionStateFromID(self, id_):
"""return the transition state with id id_"""
return self.session.query(TransitionState).get(id_)
def minima(self, order_energy=True):
"""return an iterator over all minima in database
Parameters
----------
order_energy : bool
order the minima by energy
Notes
-----
Minimum.coords is deferred in database queries. If you need to access
coords for multiple minima it is *much* faster to `undefer` before
executing the query by, e.g.
`session.query(Minimum).options(undefer("coords"))`
"""
if order_energy:
return self.session.query(Minimum).order_by(Minimum.energy).all()
else:
return self.session.query(Minimum).all()
def transition_states(self, order_energy=False):
"""return an iterator over all transition states in database
"""
if order_energy:
return self.session.query(TransitionState).order_by(TransitionState.energy).all()
else:
return self.session.query(TransitionState).all()
def minimum_adder(self, Ecut=None, max_n_minima=None, commit_interval=1):
"""wrapper class to add minima
Since pickle cannot handle pointer to member functions, this class wraps the call to
add minimum.
Parameters
----------
Ecut: float, optional
energy cutoff, don't add minima which are higher in energy
max_n_minima : int, optional
keep only the max_n_minima with the lowest energies. If E is greater
than the minimum with the highest energy in the database, then don't add
this minimum and return None. Else add this minimum and delete the minimum
with the highest energy. if max_n_minima < 0 then it is ignored.
Returns
-------
handler: minimum_adder class
minimum handler to add minima
"""
return MinimumAdder(self, Ecut=Ecut, max_n_minima=max_n_minima,
commit_interval=commit_interval)
def removeMinimum(self, m, commit=True):
"""remove a minimum from the database
Remove a minimum and any objects (TransitionState)
pointing to that minimum.
"""
# delete any transition states objects pointing to min2
candidates = self.session.query(TransitionState).\
filter(or_(TransitionState.minimum1 == m,
TransitionState.minimum2 == m))
candidates = list(candidates)
for ts in candidates:
self.on_ts_removed(ts)
self.session.delete(ts)
self.on_minimum_removed(m)
# delete the minimum
self.session.delete(m)
if commit:
self.session.commit()
def mergeMinima(self, min1, min2):
"""merge two minima in the database
min2 will be deleted and everything that
points to min2 will point to min1 instead.
"""
# find all transition states for which ts.minimum1 is min2
candidates = self.session.query(TransitionState).\
filter(TransitionState.minimum1 == min2)
for ts in candidates:
# should we check if this will duplicate an existing transition state?
ts.minimum1 = min1
if ts.minimum1.id() > ts.minimum2.id():
ts.minimum1, ts.minimum2 = ts.minimum2, ts.minimum1
# find all transition states for which ts.minimum2 is min2
candidates = self.session.query(TransitionState).\
filter(TransitionState.minimum2 == min2)
for ts in candidates:
# should we check if this will duplicate an existing transition state?
ts.minimum2 = min1
if ts.minimum1.id() > ts.minimum2.id():
ts.minimum1, ts.minimum2 = ts.minimum2, ts.minimum1
candidates = self.session.query(Path).\
filter(Path.childMinimum == min2)
for path in candidates:
path.childMinimum = min1
self.session.delete(min2)
self.session.commit()
def remove_transition_state(self, ts, commit=True):
"""remove a transition states from the database
"""
self.on_ts_removed(ts)
self.session.delete(ts)
if commit:
self.session.commit()
def number_of_minima(self):
"""return the number of minima in the database
Notes
-----
This is much faster than len(database.minima()), but is is not instantaneous.
It takes a longer time for larger databases. The first call to number_of_minima()
can be much faster than subsequent calls.
"""
return self.session.query(Minimum).count()
def number_of_transition_states(self):
"""return the number of transition states in the database
Notes
-----
see notes for number_of_minima()
See Also
--------
number_of_minima
"""
return self.session.query(TransitionState).count()
def get_property(self, property_name):
"""return the minimum with a given name"""
candidates = self.session.query(SystemProperty).\
filter(SystemProperty.property_name == property_name)
return candidates.first()
def properties(self, as_dict=False):
query = self.session.query(SystemProperty)
if as_dict:
return dict([p.item() for p in query])
else:
return query.all()
def add_property(self, name, value, dtype=None, commit=True, overwrite=True):
"""add a system property to the database
Parameters
----------
name : string
the name of the property
value : object
the value of the property
dtype : string
the datatype of the property. This can be "int", "float",
"string", "pickle", or None. If None, the datatype will be
automatically determined.
This could anything, such as a potential parameter, the number of atoms, or the
list of frozen atoms. The properties can be stored as integers, floats,
strings, or a pickled object. Only one of the property value types
should be set for each property.
For a value of type "pickle", pass the object you want pickled, not
the pickled object. We will do the pickling and unpickling for you.
"""
new = self.get_property(name)
if new is None:
new = SystemProperty(name)
else:
# the database already has a property with this name, Try to determine if they are the same
same = _compare_properties(new, value)
if not same:
if not overwrite:
raise RuntimeError("property %s already exists and the value %s does not compare equal to the new value." % (new.item(), value))
print "warning: overwriting old property", new.item()
new.value = value
# if dtype is None:
# # try to determine type of the value
# if isinstance(value, int):
# dtype = "int"
# elif isinstance(value, float):
# dtype = "float"
# elif isinstance(value, basestring):
# dtype = "string"
# else:
# dtype = "pickle"
#
# if dtype == "string":
# new.string_value = value
# elif dtype == "int":
# new.int_value = value
# elif dtype == "float":
# new.float_value = value
# elif dtype == "pickle":
# new.pickle_value = value
# else:
# raise ValueError('dtype must be one of "int", "float", "string", "pickle", or None')
self.session.add(new)
if commit:
self.session.commit()
return new
def add_properties(self, properties, overwrite=True):
"""add multiple properties from a dictionary
properties : dict
a dictionary of (name, value) pairs. The data type of the value
will be determined automatically
"""
for name, value in properties.iteritems():
self.add_property(name, value, commit=True, overwrite=overwrite)
def test_fast_insert(): # pragma: no cover
"""bulk inserts are *really* slow, we should add something along the lines of this
answer to speed things up where needed
http://stackoverflow.com/questions/11769366/why-is-sqlalchemy-insert-with-sqlite-25-times-slower-than-using-sqlite3-directly
"""
db = Database()
print Minimum.__table__.insert()
db.engine.execute(
Minimum.__table__.insert(),
[dict(energy=.01, coords=np.array([0.,1.]), invalid=False),
dict(energy=.02, coords=np.array([0.,2.]), invalid=False),
]
)
m1, m2 = db.minima()[:2]
db.engine.execute(TransitionState.__table__.insert(),
[dict(energy=1., coords=np.array([1,1.]), _minimum1_id=m1.id(),
_minimum2_id=m2.id())
]
)
for m in db.minima():
print m.id()
print m.energy
print m.coords
print m.invalid, bool(m.invalid)
ts = db.transition_states()[0]
print ts.minimum1.energy
print ts.minimum2.energy
print ts.id()
from ..nestedsampling.integration import (
calcRunWeights, calcRunAverageValue)
if __name__ == "__main__":
db = Database('tmp.sql')
r1 = db.addReplica(1., np.random.random((100,31*3)))
r2 = db.addReplica(0., np.random.random((100,31*3)))
m = db.addMinimum(-1., np.random.random((100,31*3)))
run = db.addRun([0.,1.],[1,1],r1,r2)
path1 = db.addPath(1., r1, r2)
path2 = db.addPath(0., r2, m)
print m
if __name__ == "__main__":
test_fast_insert()
if __name__ == "__main__":
db = Database('test.sql')
Es = np.random.random(100)
Coords = np.random.random((100,31,3))
for E, coords in zip(Es, Coords): db.addMinimum(E, coords)
adder = db.minimum_adder(commit_interval=100)
for i in xrange(100): adder(np.random.random(), np.random.random((31,3)))
for E, coords in zip(Es, Coords): adder(E, coords)
| matthewghgriffiths/nestedbasinsampling | nestedbasinsampling/storage/database.py | Python | gpl-3.0 | 54,976 |
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Reaching definition analysis.
This analysis attaches a set of a Definition objects to each symbol, one
for each distinct definition that may reach it. The Definition objects are
mutable and may be used by subsequent analyses to further annotate data like
static type and value information.
The analysis also attaches the set of the symbols defined at the entry of
control flow statements.
Requires activity analysis.
"""
import weakref
import gast
from tensorflow.python.autograph.pyct import anno
from tensorflow.python.autograph.pyct import cfg
from tensorflow.python.autograph.pyct import transformer
class Definition(object):
"""Definition objects describe a unique definition of a variable.
Subclasses of this may be used by passing an appropriate factory function to
resolve.
Attributes:
param_of: Optional[ast.AST]
directives: Dict, optional definition annotations
"""
def __init__(self):
self.param_of = None
self.directives = {}
def __repr__(self):
return '%s[%d]' % (self.__class__.__name__, id(self))
class _NodeState(object):
"""Abstraction for the state of the CFG walk for reaching definition analysis.
This is a value type. Only implements the strictly necessary operators.
Attributes:
value: Dict[qual_names.QN, Set[Definition, ...]], the defined symbols and
their possible definitions
"""
def __init__(self, init_from=None):
if init_from:
if isinstance(init_from, _NodeState):
self.value = {
s: set(other_infos) for s, other_infos in init_from.value.items()
}
elif isinstance(init_from, dict):
self.value = {s: set((init_from[s],)) for s in init_from}
else:
assert False, init_from
else:
self.value = {}
def __eq__(self, other):
if frozenset(self.value.keys()) != frozenset(other.value.keys()):
return False
ret = all(self.value[s] == other.value[s] for s in self.value)
return ret
def __ne__(self, other):
return not self.__eq__(other)
def __or__(self, other):
assert isinstance(other, _NodeState)
result = _NodeState(self)
for s, other_infos in other.value.items():
if s in result.value:
result.value[s].update(other_infos)
else:
result.value[s] = set(other_infos)
return result
def __sub__(self, other):
assert isinstance(other, set)
result = _NodeState(self)
for s in other:
result.value.pop(s, None)
return result
def __repr__(self):
return 'NodeState[%s]=%s' % (id(self), repr(self.value))
class Analyzer(cfg.GraphVisitor):
"""CFG visitor that determines reaching definitions at statement level."""
def __init__(self, graph, definition_factory):
self._definition_factory = definition_factory
super(Analyzer, self).__init__(graph)
self.gen_map = {}
def init_state(self, _):
return _NodeState()
def visit_node(self, node):
prev_defs_out = self.out[node]
defs_in = _NodeState()
for n in node.prev:
defs_in |= self.out[n]
if anno.hasanno(node.ast_node, anno.Static.SCOPE):
node_scope = anno.getanno(node.ast_node, anno.Static.SCOPE)
# The definition objects created by each node must be singletons because
# their ids are used in equality checks.
if node not in self.gen_map:
node_symbols = {}
# Every binding operation (assign, nonlocal, global, etc.) counts as a
# definition, with the exception of del, which only deletes without
# creating a new variable.
newly_defined = ((node_scope.bound | node_scope.globals) -
node_scope.deleted)
for s in newly_defined:
def_ = self._definition_factory()
node_symbols[s] = def_
# Every param receives a definition. Params are not necessarily
# considered as "modified".
for s, p in node_scope.params.items():
def_ = self._definition_factory()
def_.param_of = weakref.ref(p)
node_symbols[s] = def_
self.gen_map[node] = _NodeState(node_symbols)
gen = self.gen_map[node]
kill = node_scope.modified | node_scope.deleted
defs_out = gen | (defs_in - kill)
gen = self.gen_map[node]
defs_out = gen | (defs_in - kill)
else:
assert self.can_ignore(node), (node.ast_node, node)
defs_out = defs_in
self.in_[node] = defs_in
self.out[node] = defs_out
return prev_defs_out != defs_out
class TreeAnnotator(transformer.Base):
"""AST visitor that annotates each symbol name with its reaching definitions.
Simultaneously, the visitor runs the dataflow analysis on each function node,
accounting for the effect of closures. For example:
def foo():
bar = 1
def baz():
# bar = 1 reaches here
"""
def __init__(self, source_info, graphs, definition_factory):
super(TreeAnnotator, self).__init__(source_info)
self.allow_skips = False
self.definition_factory = definition_factory
self.graphs = graphs
self.current_analyzer = None
self.current_cfg_node = None
def visit_FunctionDef(self, node):
parent_analyzer = self.current_analyzer
subgraph = self.graphs[node]
analyzer = Analyzer(subgraph, self.definition_factory)
analyzer.visit_forward()
# Recursively process any remaining subfunctions.
self.current_analyzer = analyzer
node.args = self.visit(node.args)
node.body = self.visit_block(node.body)
self.current_analyzer = parent_analyzer
return node
def visit_Name(self, node):
if self.current_analyzer is None:
# Names may appear outside function defs - for example in class
# definitions.
return node
analyzer = self.current_analyzer
cfg_node = self.current_cfg_node
assert cfg_node is not None, ('name node, %s, outside of any statement?'
% node.id)
qn = anno.getanno(node, anno.Basic.QN)
if isinstance(node.ctx, gast.Load):
anno.setanno(node, anno.Static.DEFINITIONS,
tuple(analyzer.in_[cfg_node].value.get(qn, ())))
else:
anno.setanno(node, anno.Static.DEFINITIONS,
tuple(analyzer.out[cfg_node].value.get(qn, ())))
return node
def _aggregate_predecessors_defined_in(self, node):
preds = self.current_analyzer.graph.stmt_prev[node]
node_defined_in = set()
for p in preds:
node_defined_in |= set(self.current_analyzer.out[p].value.keys())
anno.setanno(node, anno.Static.DEFINED_VARS_IN, frozenset(node_defined_in))
def visit_If(self, node):
self._aggregate_predecessors_defined_in(node)
return self.generic_visit(node)
def visit_For(self, node):
self._aggregate_predecessors_defined_in(node)
# Manually accounting for the shortcoming described in
# cfg.AstToCfg.visit_For.
parent = self.current_cfg_node
self.current_cfg_node = self.current_analyzer.graph.index[node.iter]
node.target = self.visit(node.target)
self.current_cfg_node = parent
node.iter = self.visit(node.iter)
node.body = self.visit_block(node.body)
node.orelse = self.visit_block(node.orelse)
return node
def visit_While(self, node):
self._aggregate_predecessors_defined_in(node)
return self.generic_visit(node)
def visit_Try(self, node):
self._aggregate_predecessors_defined_in(node)
return self.generic_visit(node)
def visit_ExceptHandler(self, node):
self._aggregate_predecessors_defined_in(node)
# TODO(mdan): Also track the exception type / name symbols.
node.body = self.visit_block(node.body)
return node
def visit(self, node):
parent = self.current_cfg_node
if (self.current_analyzer is not None and
node in self.current_analyzer.graph.index):
self.current_cfg_node = self.current_analyzer.graph.index[node]
node = super(TreeAnnotator, self).visit(node)
self.current_cfg_node = parent
return node
def resolve(node, source_info, graphs, definition_factory=Definition):
"""Resolves reaching definitions for each symbol.
Args:
node: ast.AST
source_info: transformer.SourceInfo
graphs: Dict[ast.FunctionDef, cfg.Graph]
definition_factory: Callable[[], Definition]
Returns:
ast.AST
"""
visitor = TreeAnnotator(source_info, graphs, definition_factory)
node = visitor.visit(node)
return node
| tensorflow/tensorflow | tensorflow/python/autograph/pyct/static_analysis/reaching_definitions.py | Python | apache-2.0 | 9,095 |
# coding=utf-8
import random
import time
import threading
import unittest
from lru_cache import LruCache
class TesLruCache(unittest.TestCase):
def test_cache_normal(self):
a = []
@LruCache(maxsize=2, timeout=1)
def foo(num):
a.append(num)
return num
foo(1)
foo(1)
self.assertEqual(a, [1])
def test_cache_none(self):
a = []
@LruCache(maxsize=2, timeout=1)
def foo(num):
a.append(num)
return None
foo(1)
foo(1)
self.assertEqual(a, [1])
def test_cache_when_timeout(self):
a = []
@LruCache(maxsize=2, timeout=1)
def foo(num):
a.append(num)
return num
foo(2)
time.sleep(2)
foo(2)
self.assertEqual(a, [2, 2])
def test_cache_when_cache_is_full(self):
a = []
@LruCache(maxsize=2, timeout=1)
def foo(num):
a.append(num)
return num
foo(1)
foo(2)
foo(3)
foo(1)
self.assertEqual(a, [1, 2, 3, 1])
def test_cache_with_multi_thread(self):
a = []
@LruCache(maxsize=10, timeout=1)
def foo(num):
a.append(num)
return num
for i in xrange(10):
threading.Thread(target=foo, args=(i, )).start()
main_thread = threading.currentThread()
for t in threading.enumerate():
if t is not main_thread:
t.join()
foo(random.randint(0, 9))
self.assertEqual(set(a), set(range(10)))
def test_cache_with_multi_thread_two_func(self):
a = []
@LruCache(maxsize=10, timeout=1)
def foo(num):
a.append(num)
return num
b = []
@LruCache(maxsize=10, timeout=1)
def bar(num):
b.append(num)
return num + 1
for i in xrange(10):
threading.Thread(target=foo, args=(i, )).start()
threading.Thread(target=bar, args=(i, )).start()
main_thread = threading.currentThread()
for t in threading.enumerate():
if t is not main_thread:
t.join()
feed = random.randint(0, 9)
self.assertEqual(foo(feed), feed)
self.assertEqual(bar(feed), feed + 1)
self.assertEqual(set(a), set(range(10)))
self.assertEqual(set(b), set(range(10)))
def test_cache_when_timeout_and_maxsize_is_none(self):
a = []
@LruCache()
def foo(num):
a.append(num)
return num
foo(1)
foo(1)
self.assertEqual(a, [1])
def test_cache_when_timeout_is_none(self):
a = []
@LruCache(maxsize=10)
def foo(num):
a.append(num)
return num
foo(1)
foo(1)
self.assertEqual(a, [1])
def test_cache_when_only_maxsize_is_none_normal(self):
a = []
@LruCache(timeout=2)
def foo(num):
a.append(num)
return num
foo(1)
foo(1)
self.assertEqual(a, [1])
def test_cache_when_only_maxsize_is_none_timeout(self):
a = []
@LruCache(timeout=1)
def foo(num):
a.append(num)
return num
foo(1)
time.sleep(2)
foo(1)
self.assertEqual(a, [1, 1])
def test_cache_when_only_maxsize_is_none_normal_method(self):
a = []
class Func(object):
@LruCache(timeout=2)
def foo(self, num):
a.append(num)
return num
fun = Func()
fun.foo(1)
fun.foo(1)
self.assertEqual(a, [1])
def test_cache_when_only_maxsize_is_none_normal_method_timeout(self):
a = []
class Func(object):
@LruCache(timeout=1)
def foo(self, num):
a.append(num)
return num
fun = Func()
fun.foo(1)
time.sleep(2)
fun.foo(1)
self.assertEqual(a, [1, 1])
def test_invalidate(self):
a = []
@LruCache()
def foo(num):
a.append(num)
return num
foo(1)
foo(1)
self.assertEqual(a, [1])
foo.invalidate(1)
foo(1)
self.assertEqual(a, [1, 1])
if __name__ == "__main__":
unittest.main()
| Backflipz/plugin.video.excubed | resources/lib/cache/tests.py | Python | gpl-2.0 | 4,445 |
#!/usr/bin/env python2.6
# -*- coding: utf-8 -*-
#!/usr/bin/env python
#
# Software License Agreement (GPLv2 License)
#
# Copyright (c) 2012 TheCorpora SL
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of
# the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
# MA 02110-1301, USA.
#
# Authors: Miguel Angel Julian <[email protected]>;
# Daniel Cuadrado <[email protected]>;
# Arturo Bajuelos <[email protected]>;
# Sergio Merino <[email protected]>;
import cherrypy
import os
import gen_grammar
import subprocess
from mako.template import Template
from tabsClass import TabClass
import simplejson
from subprocess import Popen, PIPE, STDOUT
import roslib
import signal
roslib.load_manifest('qbo_webi');
import rospy
import time
from uuid import getnode as get_mac
from poster.encode import multipart_encode
from poster.streaminghttp import register_openers
import urllib2
class VoiceRecognitionManager(TabClass):
def __init__(self,language):
self.ipWavServer = "audio.openqbo.org"
self.portWavServer="8588"
self.language = language
self.juliusPath=roslib.packages.get_pkg_dir("qbo_listen")
self.juliusAMPath="/usr/share/qbo-julius-model/"
self.htmlTemplate = Template(filename='voiceRecognition/templates/voiceRecognitionTemplate.html')
self.jsTemplate = Template(filename='voiceRecognition/templates/voiceRecognitionTemplate.js')
self.tmpdir="/tmp/"
self.LMPaths="/config/LM/"
self.LMFileName="/sentences.conf"
self.PhonemsFileName="/phonems"
self.TiedlistFileName="/tiedlist"
self.languages_names={'en':'English','es':'Spanish','pt':'Português','de':'Deutsch','fr':'Français','it':'Italiano'}
self.path = roslib.packages.get_pkg_dir("qbo_webi")+"/src/voiceRecognition/"
self.lan = self.language["current_language"]
self.mac = get_mac()
self.p = None
@cherrypy.expose
def voiceRecognitionJs(self, parameters=None):
self.lan = self.language["current_language"]
return self.jsTemplate.render(language=self.language)
def getLanguages(self):
try:
dirList=os.listdir(self.juliusPath+self.LMPaths)
dirList.sort()
except:
dirList=-1
return dirList
def isQboListenInstalled(self):
if self.getLanguages()==-1:
return False
else:
return True
def getLanguageModels(self,language):
try:
dirList=os.listdir(self.juliusPath+self.LMPaths+language)
dirList.sort()
except:
dirList=-1
return dirList
def getLMSentences(self,language,model):
try:
f = open(self.juliusPath+self.LMPaths+language+"/"+model+self.LMFileName,'r')
return f.read()
except:
sentences=""
return sentences
@cherrypy.expose
def getModels(self,lang):
modelList=""
try:
dirList=os.listdir(self.juliusPath+self.LMPaths+lang)
dirList.sort()
for model in dirList:
modelList=modelList+model+"::"
modelList=modelList[:-2]
except:
modelList=-1
return modelList
@cherrypy.expose
def test1(self,lang,text):
text=text.encode("utf-8")
f = open(self.tmpdir+'LModel', 'w')
f.write(text)
f.close()
words=gen_grammar.verrors(self.tmpdir+'LModel',self.juliusAMPath+lang+"/"+self.PhonemsFileName)
if words==0:
return ""
else:
wordsList=""
for word in words:
wordsList=wordsList+word+"::"
wordsList=wordsList[:-2]
return wordsList
@cherrypy.expose
def test2(self,lang,text):
errorlist=""
text=text.encode("utf-8")
print text
wordlist=text.split()
print wordlist
for word in wordlist:
if word[0]!="[" and word[0]!="<":
print word
f = open(self.tmpdir+'word', 'w')
f.write("[sentence]\n")
f.write(word)
f.close()
gen_grammar.createvoca(self.tmpdir+'word', self.juliusAMPath+lang+"/"+self.PhonemsFileName, self.tmpdir+'word')
print self.tmpdir+'word'
print self.juliusAMPath+lang+"/"+self.TiedlistFileName
if gen_grammar.perrors(self.tmpdir+'word.voca',self.juliusAMPath+lang+"/"+self.TiedlistFileName)!=0:
errorlist=errorlist+word+"::"
errorlist=errorlist[:-2]
return errorlist.upper()
@cherrypy.expose
def saveToFile(self,lang,text,model):
try:
#print self.juliusPath+self.LMPaths+language+"/"+model+self.LMFileName
text=text.encode("utf-8")
f = open(self.juliusPath+self.LMPaths+lang+"/"+model+self.LMFileName,'w')
f.write(text)
f.close()
gen_grammar.compilegrammar(model,lang)
subprocess.Popen("roslaunch qbo_listen voice_recognizer.launch".split())
except:
return "ERROR: Cant write the file"
return ""
@cherrypy.expose
def getFile(self,lang="",model=""):
if lang=="" or model=="":
return "ERROR: lang:"+lang+"; model:"+model
else:
#print self.getLMSentences(lang,model)
return self.getLMSentences(lang,model)
@cherrypy.expose
def index(self):
tmp=""
if self.isQboListenInstalled():
for lang in self.getLanguages():
for LM in self.getLanguageModels(lang):
text= self.getLMSentences(lang,LM)
break
break
return self.htmlTemplate.render(language=self.language,lannames=self.languages_names,alllanguage=self.getLanguages())
else:
return "Qbo listen not installed"
# return self.htmlTemplate.render(language=self.language)
@cherrypy.expose
def rec(self):
# n = self.getLenght("Arturo","sp")
# print "***** "+n
#Borramos la anterior grabacion, si la habia
try:
cmd="rm "+self.path+"tmp/*"
self.p = Popen(cmd.split())
except ValueError:
print "Nada que borrar"
'''
try:
cmd="rm "+self.path+"/*_en"
self.p = Popen(cmd.split())
except ValueError:
print "Nada que borrar"
try:
cmd="rm "+path+"/*sp"
print cmd
self.p = Popen(cmd.split())
except ValueError:
print "Nada que borrar"
'''
self.filename = str(self.mac)+"_"+self.lan
#filename = filename.replace("\"","")
# filename = "tmp.wav"
print "FILENAME == "+self.filename
print "grabnando!!!! "+self.path+"tmp/"+self.filename
cmd="arecord -f S16_LE -r 44100 -c 1 "+self.path+"tmp/"+self.filename
self.p = Popen(cmd.split())
name="oleole"
return name
@cherrypy.expose
def stop(self):
if(self.p==None):
print "P ES NULL!!??"
else:
print "matar grabacin"
self.p.send_signal(signal.SIGINT)
cmd="python "+self.path+"sendWav2Server.py "+self.path+"tmp/"+self.filename+" "+self.ipWavServer+" "+self.portWavServer
print cmd
out = runCmd(cmd)
print out[0]
if out[1] != "":
print "Error"
return "error"
return unicode(out[0],'utf8')
@cherrypy.expose
def play(self):
print "play sound"
os.system('aplay '+self.path+"tmp/"+self.filename)
return "ok"
@cherrypy.expose
def save(self,transcripcion):
print "SAVE! transcripcion="+transcripcion
cmd="python "+self.path+"sendTranscription2Server.py "+str(self.mac)+" \""+transcripcion+"\" "+self.lan+" "+self.ipWavServer+" "+self.portWavServer
print cmd
out = runCmd(cmd)
if out[1] != "":
print "Error "+out[1]
return "error"
return out[0]
# return "ok"
def runCmd(cmd, timeout=None):
'''
Will execute a command, read the output and return it back.
@param cmd: command to execute
@param timeout: process timeout in seconds
@return: a tuple of three: first stdout, then stderr, then exit code
@raise OSError: on missing command or if a timeout was reached
'''
ph_out = None # process output
ph_err = None # stderr
ph_ret = None # return code
p = subprocess.Popen(cmd, shell=True,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
# if timeout is not set wait for process to complete
if not timeout:
ph_ret = p.wait()
else:
fin_time = time.time() + timeout
while p.poll() == None and fin_time > time.time():
time.sleep(1)
# if timeout reached, raise an exception
if fin_time < time.time():
# starting 2.6 subprocess has a kill() method which is preferable
# p.kill()
os.kill(p.pid, signal.SIGKILL)
raise OSError("Process timeout has been reached")
ph_ret = p.returncode
ph_out, ph_err = p.communicate()
return (ph_out, ph_err, ph_ret)
| HailStorm32/Q.bo_stacks | qbo_webi/src/voiceRecognition/voiceRecognition.py | Python | lgpl-2.1 | 10,125 |
import numpy as np
import os
import sys
sys.path.append(os.path.abspath("../../python/KMeansRex"))
import KMeansRex
def make_data(n_examples_per_cluster=20, seed=0):
prng = np.random.RandomState(int(seed))
x1 = -5 + 0.1 * prng.randn(n_examples_per_cluster, 2)
x2 = 0 + 0.1 * prng.randn(n_examples_per_cluster, 2)
x3 = +5 + 0.1 * prng.randn(n_examples_per_cluster, 2)
return np.vstack([x1, x2, x3])
if __name__ == '__main__':
x_ND = make_data()
seed = 42
m_KD = KMeansRex.SampleRowsPlusPlus(x_ND, 3, seed=seed)
print 'INIT mu (KxD):'
print m_KD
m_KD, z_N = KMeansRex.RunKMeans(
x_ND, 3, Niter=25, initname='plusplus', seed=seed)
print
print "FINAL mu (KxD):"
print m_KD
| michaelchughes/KMeansRex | examples/python/simple_demo.py | Python | bsd-3-clause | 738 |
# ----------------------------------------------------------------------------
# pyglet
# Copyright (c) 2006-2008 Alex Holkner
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
# * Neither the name of pyglet nor the names of its
# contributors may be used to endorse or promote products
# derived from this software without specific prior written
# permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
# ----------------------------------------------------------------------------
'''Get environment information useful for debugging.
Intended usage is to create a file for bug reports, e.g.::
python -m pyglet.info > info.txt
'''
__docformat__ = 'restructuredtext'
__version__ = '$Id: $'
_first_heading = True
def _heading(heading):
global _first_heading
if not _first_heading:
print()
else:
_first_heading = False
print(heading)
print('-' * 78)
def dump_python():
'''Dump Python version and environment to stdout.'''
import os
import sys
print('sys.version:', sys.version)
print('sys.platform:', sys.platform)
print('sys.maxint:', sys.maxsize)
if sys.platform == 'darwin':
try:
from objc import __version__ as pyobjc_version
print('objc.__version__:', pyobjc_version)
except:
print('PyObjC not available')
print('os.getcwd():', os.getcwd())
for key, value in list(os.environ.items()):
if key.startswith('PYGLET_'):
print("os.environ['%s']: %s" % (key, value))
def dump_pyglet():
'''Dump pyglet version and options.'''
import pyglet
print('pyglet.version:', pyglet.version)
print('pyglet.compat_platform:', pyglet.compat_platform)
print('pyglet.__file__:', pyglet.__file__)
for key, value in list(pyglet.options.items()):
print("pyglet.options['%s'] = %r" % (key, value))
def dump_window():
'''Dump display, window, screen and default config info.'''
import pyglet.window
platform = pyglet.window.get_platform()
print('platform:', repr(platform))
display = platform.get_default_display()
print('display:', repr(display))
screens = display.get_screens()
for i, screen in enumerate(screens):
print('screens[%d]: %r' % (i, screen))
window = pyglet.window.Window(visible=False)
for key, value in window.config.get_gl_attributes():
print("config['%s'] = %r" % (key, value))
print('context:', repr(window.context))
_heading('window.context._info')
dump_gl(window.context)
window.close()
def dump_gl(context=None):
'''Dump GL info.'''
if context is not None:
info = context.get_info()
else:
from pyglet.gl import gl_info as info
print('gl_info.get_version():', info.get_version())
print('gl_info.get_vendor():', info.get_vendor())
print('gl_info.get_renderer():', info.get_renderer())
print('gl_info.get_extensions():')
extensions = list(info.get_extensions())
extensions.sort()
for name in extensions:
print(' ', name)
def dump_glu():
'''Dump GLU info.'''
from pyglet.gl import glu_info
print('glu_info.get_version():', glu_info.get_version())
print('glu_info.get_extensions():')
extensions = list(glu_info.get_extensions())
extensions.sort()
for name in extensions:
print(' ', name)
def dump_glx():
'''Dump GLX info.'''
try:
from pyglet.gl import glx_info
except:
print('GLX not available.')
return
import pyglet
window = pyglet.window.Window(visible=False)
print('context.is_direct():', window.context.is_direct())
window.close()
if not glx_info.have_version(1, 1):
print('Version: < 1.1')
else:
print('glx_info.get_server_vendor():', glx_info.get_server_vendor())
print('glx_info.get_server_version():', glx_info.get_server_version())
print('glx_info.get_server_extensions():')
for name in glx_info.get_server_extensions():
print(' ', name)
print('glx_info.get_client_vendor():', glx_info.get_client_vendor())
print('glx_info.get_client_version():', glx_info.get_client_version())
print('glx_info.get_client_extensions():')
for name in glx_info.get_client_extensions():
print(' ', name)
print('glx_info.get_extensions():')
for name in glx_info.get_extensions():
print(' ', name)
def dump_media():
'''Dump pyglet.media info.'''
import pyglet.media
print('audio driver:', pyglet.media.get_audio_driver())
def dump_avbin():
'''Dump AVbin info.'''
try:
import pyglet.media.avbin
print('Library:', pyglet.media.avbin.av)
print('AVbin version:', pyglet.media.avbin.av.avbin_get_version())
print('FFmpeg revision:', \
pyglet.media.avbin.av.avbin_get_ffmpeg_revision())
except:
print('AVbin not available.')
def dump_al():
'''Dump OpenAL info.'''
try:
from pyglet.media.drivers import openal
except:
print('OpenAL not available.')
return
print('Library:', openal.al._lib)
driver = openal.create_audio_driver()
print('Version:', driver.get_version())
print('Extensions:')
for extension in driver.get_extensions():
print(' ', extension)
def dump_wintab():
'''Dump WinTab info.'''
try:
from pyglet.input import wintab
except:
print('WinTab not available.')
return
interface_name = wintab.get_interface_name()
impl_version = wintab.get_implementation_version()
spec_version = wintab.get_spec_version()
print('WinTab: %s %d.%d (Spec %d.%d)' % (interface_name,
impl_version >> 8, impl_version & 0xff,
spec_version >> 8, spec_version & 0xff))
def _try_dump(heading, func):
_heading(heading)
try:
func()
except:
import traceback
traceback.print_exc()
def dump():
'''Dump all information to stdout.'''
_try_dump('Python', dump_python)
_try_dump('pyglet', dump_pyglet)
_try_dump('pyglet.window', dump_window)
_try_dump('pyglet.gl.glu_info', dump_glu)
_try_dump('pyglet.gl.glx_info', dump_glx)
_try_dump('pyglet.media', dump_media)
_try_dump('pyglet.media.avbin', dump_avbin)
_try_dump('pyglet.media.drivers.openal', dump_al)
_try_dump('pyglet.input.wintab', dump_wintab)
if __name__ == '__main__':
dump()
| AustinRoy7/Pomodoro-timer | venv/Lib/site-packages/pyglet/info.py | Python | mit | 7,680 |
class AppConsumer():
"""
The interface for common application consumers (e.g. sequence
number based and timestamp based)
:param face: the face to consume data with
:type face: Face
:param keyChain: the keyChain to verify received data with
:type keyChain: KeyChain
:param doVerify: flag for whether the consumer should skip verification
:type doVerify: bool
"""
def __init__(self, face, keyChain, doVerify):
self._face = face
self._keyChain = keyChain
self._doVerify = doVerify
return
def consume(self, name, onData, onVerifyFailed, onTimeout):
"""
Consume one piece of data, or consume continuously, depending on
child class's implementation
:param name: name / prefix to consume data under
:type name: Name
:param onData: onData(data) gets called after received data's onVerifyFailed
:type onData: function object
:param onVerifyFailed: onVerifyFailed(data) gets called if received data
cannot be verified
:type onVerifyFailed: function object
:param onTimeout: onTimeout(interest) gets called if a consumer interest times out
:type onTimeout: function object
"""
return
| remap/ndn-flow | framework/ndn_iot_python/python/ndn_iot_python/consumer/app_consumer.py | Python | lgpl-3.0 | 1,272 |
from __future__ import with_statement
import logging
import struct
import threading
import time
from hazelcast.config import PROPERTY_HEARTBEAT_INTERVAL, PROPERTY_HEARTBEAT_TIMEOUT
from hazelcast.core import CLIENT_TYPE
from hazelcast.exception import AuthenticationError
from hazelcast.future import ImmediateFuture
from hazelcast.protocol.client_message import BEGIN_END_FLAG, ClientMessage
from hazelcast.protocol.codec import client_authentication_codec, client_ping_codec
from hazelcast.serialization import INT_SIZE_IN_BYTES, FMT_LE_INT
from hazelcast.util import AtomicInteger
BUFFER_SIZE = 8192
PROTOCOL_VERSION = 1
DEFAULT_HEARTBEAT_INTERVAL = 5000
DEFAULT_HEARTBEAT_TIMEOUT = 60000
class ConnectionManager(object):
logger = logging.getLogger("ConnectionManager")
def __init__(self, client, new_connection_func):
self._new_connection_mutex = threading.RLock()
self._io_thread = None
self._client = client
self.connections = {}
self._pending_connections = {}
self._socket_map = {}
self._new_connection_func = new_connection_func
self._connection_listeners = []
def add_listener(self, on_connection_opened=None, on_connection_closed=None):
self._connection_listeners.append((on_connection_opened, on_connection_closed))
def get_connection(self, address):
try:
return self.connections[address]
except KeyError:
return None
def _cluster_authenticator(self, connection):
uuid = self._client.cluster.uuid
owner_uuid = self._client.cluster.owner_uuid
request = client_authentication_codec.encode_request(
username=self._client.config.group_config.name,
password=self._client.config.group_config.password,
uuid=uuid,
owner_uuid=owner_uuid,
is_owner_connection=False,
client_type=CLIENT_TYPE,
serialization_version=1)
def callback(f):
parameters = client_authentication_codec.decode_response(f.result())
if parameters["status"] != 0:
raise AuthenticationError("Authentication failed.")
connection.endpoint = parameters["address"]
self.owner_uuid = parameters["owner_uuid"]
self.uuid = parameters["uuid"]
return connection
return self._client.invoker.invoke_on_connection(request, connection).continue_with(callback)
def get_or_connect(self, address, authenticator=None):
if address in self.connections:
return ImmediateFuture(self.connections[address])
else:
with self._new_connection_mutex:
if address in self._pending_connections:
return self._pending_connections[address]
else:
authenticator = authenticator or self._cluster_authenticator
connection = self._new_connection_func(address,
connection_closed_callback=self._connection_closed,
message_callback=self._client.invoker._handle_client_message)
def on_auth(f):
if f.is_success():
self.logger.info("Authenticated with %s", f.result())
with self._new_connection_mutex:
self.connections[connection.endpoint] = f.result()
self._pending_connections.pop(address)
for on_connection_opened, _ in self._connection_listeners:
if on_connection_opened:
on_connection_opened(f.resul())
return f.result()
else:
self.logger.debug("Error opening %s", connection)
with self._new_connection_mutex:
try:
self._pending_connections.pop(address)
except KeyError:
pass
raise f.exception(), None, f.traceback()
future = authenticator(connection).continue_with(on_auth)
if not future.done():
self._pending_connections[address] = future
return future
def _connection_closed(self, connection, cause):
# if connection was authenticated, fire event
if connection.endpoint:
self.connections.pop(connection.endpoint)
for _, on_connection_closed in self._connection_listeners:
if on_connection_closed:
on_connection_closed(connection, cause)
else:
# clean-up unauthenticated connection
self._client.invoker.cleanup_connection(connection, cause)
def close_connection(self, address, cause):
try:
connection = self.connections[address]
connection.close(cause)
except KeyError:
logging.warn("No connection with %s was found to close.", address)
return False
class Heartbeat(object):
logger = logging.getLogger("ConnectionManager")
_heartbeat_timer = None
def __init__(self, client):
self._client = client
self._listeners = []
self._heartbeat_timeout = client.config.get_property_or_default(PROPERTY_HEARTBEAT_TIMEOUT,
DEFAULT_HEARTBEAT_TIMEOUT) / 1000
self._heartbeat_interval = client.config.get_property_or_default(PROPERTY_HEARTBEAT_INTERVAL,
DEFAULT_HEARTBEAT_INTERVAL) / 1000
def start(self):
def _heartbeat():
self._heartbeat()
self._heartbeat_timer = self._client.reactor.add_timer(self._heartbeat_interval, _heartbeat)
self._heartbeat_timer = self._client.reactor.add_timer(self._heartbeat_interval, _heartbeat)
def shutdown(self):
if self._heartbeat():
self._heartbeat_timer.cancel()
def add_listener(self, on_heartbeat_restored=None, on_heartbeat_stopped=None):
self._listeners.append((on_heartbeat_restored, on_heartbeat_stopped))
def _heartbeat(self):
now = time.time()
for connection in self._client.connection_manager.connections.values():
time_since_last_read = now - connection.last_read
if time_since_last_read > self._heartbeat_timeout:
if connection.heartbeating:
self.logger.warn(
"Heartbeat: Did not hear back after %ss from %s" % (time_since_last_read, connection))
self._on_heartbeat_stopped(connection)
if time_since_last_read > self._heartbeat_interval:
request = client_ping_codec.encode_request()
self._client.invoker.invoke_on_connection(request, connection, ignore_heartbeat=True)
else:
if not connection.heartbeating:
self._on_heartbeat_restored(connection)
def _on_heartbeat_restored(self, connection):
self.logger.info("Heartbeat: Heartbeat restored for connection %s" % connection)
connection.heartbeating = True
for callback, _ in self._listeners:
if callback:
callback(connection)
def _on_heartbeat_stopped(self, connection):
connection.heartbeating = False
for _, callback in self._listeners:
if callback:
callback(connection)
class Connection(object):
_closed = False
endpoint = None
heartbeating = True
is_owner = False
counter = AtomicInteger()
def __init__(self, address, connection_closed_callback, message_callback):
self._address = (address.host, address.port)
self.id = self.counter.get_and_increment()
self.logger = logging.getLogger("Connection[%s](%s:%d)" % (self.id, address.host, address.port))
self._connection_closed_callback = connection_closed_callback
self._message_callback = message_callback
self._read_buffer = ""
self.last_read = 0
def live(self):
return not self._closed
def send_message(self, message):
if not self.live():
raise IOError("Connection is not live.")
message.add_flag(BEGIN_END_FLAG)
self.write(message.buffer)
def receive_message(self):
self.last_read = time.time()
# split frames
while len(self._read_buffer) >= INT_SIZE_IN_BYTES:
frame_length = struct.unpack_from(FMT_LE_INT, self._read_buffer, 0)[0]
if frame_length > len(self._read_buffer):
return
message = ClientMessage(buffer(self._read_buffer, 0, frame_length))
self._read_buffer = self._read_buffer[frame_length:]
self._message_callback(message, self)
def write(self, data):
# must be implemented by subclass
pass
def close(self, cause):
pass
def __repr__(self):
return "Connection(address=%s, id=%s)" % (self._address, self.id)
| cangencer/hazelcast-python-client | hazelcast/connection.py | Python | apache-2.0 | 9,386 |
"""
This plugin captures logging statements issued during test execution. When an
error or failure occurs, the captured log messages are attached to the running
test in the test.capturedLogging attribute, and displayed with the error failure
output. It is enabled by default but can be turned off with the option
``--nologcapture``.
You can filter captured logging statements with the ``--logging-filter`` option.
If set, it specifies which logger(s) will be captured; loggers that do not match
will be passed. Example: specifying ``--logging-filter=sqlalchemy,myapp``
will ensure that only statements logged via sqlalchemy.engine, myapp
or myapp.foo.bar logger will be logged.
You can remove other installed logging handlers with the
``--logging-clear-handlers`` option.
"""
import logging
from logging import Handler
import threading
from nose.plugins.base import Plugin
from nose.util import anyp, ln, safe_str
try:
from io import StringIO
except ImportError:
from io import StringIO
log = logging.getLogger(__name__)
class FilterSet(object):
def __init__(self, filter_components):
self.inclusive, self.exclusive = self._partition(filter_components)
# @staticmethod
def _partition(components):
inclusive, exclusive = [], []
for component in components:
if component.startswith('-'):
exclusive.append(component[1:])
else:
inclusive.append(component)
return inclusive, exclusive
_partition = staticmethod(_partition)
def allow(self, record):
"""returns whether this record should be printed"""
if not self:
# nothing to filter
return True
return self._allow(record) and not self._deny(record)
# @staticmethod
def _any_match(matchers, record):
"""return the bool of whether `record` starts with
any item in `matchers`"""
def record_matches_key(key):
return record == key or record.startswith(key + '.')
return anyp(bool, list(map(record_matches_key, matchers)))
_any_match = staticmethod(_any_match)
def _allow(self, record):
if not self.inclusive:
return True
return self._any_match(self.inclusive, record)
def _deny(self, record):
if not self.exclusive:
return False
return self._any_match(self.exclusive, record)
class MyMemoryHandler(Handler):
def __init__(self, logformat, logdatefmt, filters):
Handler.__init__(self)
fmt = logging.Formatter(logformat, logdatefmt)
self.setFormatter(fmt)
self.filterset = FilterSet(filters)
self.buffer = []
def emit(self, record):
self.buffer.append(self.format(record))
def flush(self):
pass # do nothing
def truncate(self):
self.buffer = []
def filter(self, record):
if self.filterset.allow(record.name):
return Handler.filter(self, record)
def __getstate__(self):
state = self.__dict__.copy()
del state['lock']
return state
def __setstate__(self, state):
self.__dict__.update(state)
self.lock = threading.RLock()
class LogCapture(Plugin):
"""
Log capture plugin. Enabled by default. Disable with --nologcapture.
This plugin captures logging statements issued during test execution,
appending any output captured to the error or failure output,
should the test fail or raise an error.
"""
enabled = True
env_opt = 'NOSE_NOLOGCAPTURE'
name = 'logcapture'
score = 500
logformat = '%(name)s: %(levelname)s: %(message)s'
logdatefmt = None
clear = False
filters = ['-nose']
def options(self, parser, env):
"""Register commandline options.
"""
parser.add_option(
"--nologcapture", action="store_false",
default=not env.get(self.env_opt), dest="logcapture",
help="Disable logging capture plugin. "
"Logging configuration will be left intact."
" [NOSE_NOLOGCAPTURE]")
parser.add_option(
"--logging-format", action="store", dest="logcapture_format",
default=env.get('NOSE_LOGFORMAT') or self.logformat,
metavar="FORMAT",
help="Specify custom format to print statements. "
"Uses the same format as used by standard logging handlers."
" [NOSE_LOGFORMAT]")
parser.add_option(
"--logging-datefmt", action="store", dest="logcapture_datefmt",
default=env.get('NOSE_LOGDATEFMT') or self.logdatefmt,
metavar="FORMAT",
help="Specify custom date/time format to print statements. "
"Uses the same format as used by standard logging handlers."
" [NOSE_LOGDATEFMT]")
parser.add_option(
"--logging-filter", action="store", dest="logcapture_filters",
default=env.get('NOSE_LOGFILTER'),
metavar="FILTER",
help="Specify which statements to filter in/out. "
"By default, everything is captured. If the output is too"
" verbose,\nuse this option to filter out needless output.\n"
"Example: filter=foo will capture statements issued ONLY to\n"
" foo or foo.what.ever.sub but not foobar or other logger.\n"
"Specify multiple loggers with comma: filter=foo,bar,baz.\n"
"If any logger name is prefixed with a minus, eg filter=-foo,\n"
"it will be excluded rather than included. Default: "
"exclude logging messages from nose itself (-nose)."
" [NOSE_LOGFILTER]\n")
parser.add_option(
"--logging-clear-handlers", action="store_true",
default=False, dest="logcapture_clear",
help="Clear all other logging handlers")
parser.add_option(
"--logging-level", action="store",
default='NOTSET', dest="logcapture_level",
help="Set the log level to capture")
def configure(self, options, conf):
"""Configure plugin.
"""
self.conf = conf
# Disable if explicitly disabled, or if logging is
# configured via logging config file
if not options.logcapture or conf.loggingConfig:
self.enabled = False
self.logformat = options.logcapture_format
self.logdatefmt = options.logcapture_datefmt
self.clear = options.logcapture_clear
self.loglevel = options.logcapture_level
if options.logcapture_filters:
self.filters = options.logcapture_filters.split(',')
def setupLoghandler(self):
# setup our handler with root logger
root_logger = logging.getLogger()
if self.clear:
if hasattr(root_logger, "handlers"):
for handler in root_logger.handlers:
root_logger.removeHandler(handler)
for logger in list(logging.Logger.manager.loggerDict.values()):
if hasattr(logger, "handlers"):
for handler in logger.handlers:
logger.removeHandler(handler)
# make sure there isn't one already
# you can't simply use "if self.handler not in root_logger.handlers"
# since at least in unit tests this doesn't work --
# LogCapture() is instantiated for each test case while root_logger
# is module global
# so we always add new MyMemoryHandler instance
for handler in root_logger.handlers[:]:
if isinstance(handler, MyMemoryHandler):
root_logger.handlers.remove(handler)
root_logger.addHandler(self.handler)
# to make sure everything gets captured
loglevel = getattr(self, "loglevel", "NOTSET")
root_logger.setLevel(getattr(logging, loglevel))
def begin(self):
"""Set up logging handler before test run begins.
"""
self.start()
def start(self):
self.handler = MyMemoryHandler(self.logformat, self.logdatefmt,
self.filters)
self.setupLoghandler()
def end(self):
pass
def beforeTest(self, test):
"""Clear buffers and handlers before test.
"""
self.setupLoghandler()
def afterTest(self, test):
"""Clear buffers after test.
"""
self.handler.truncate()
def formatFailure(self, test, err):
"""Add captured log messages to failure output.
"""
return self.formatError(test, err)
def formatError(self, test, err):
"""Add captured log messages to error output.
"""
# logic flow copied from Capture.formatError
test.capturedLogging = records = self.formatLogRecords()
if not records:
return err
ec, ev, tb = err
return (ec, self.addCaptureToErr(ev, records), tb)
def formatLogRecords(self):
return list(map(safe_str, self.handler.buffer))
def addCaptureToErr(self, ev, records):
return '\n'.join([safe_str(ev), ln('>> begin captured logging <<')] + \
records + \
[ln('>> end captured logging <<')])
| Reagankm/KnockKnock | venv/lib/python3.4/site-packages/nose/plugins/logcapture.py | Python | gpl-2.0 | 9,358 |
# coding: utf-8
"""zmq poll function"""
# Copyright (C) PyZMQ Developers
# Distributed under the terms of the Modified BSD License.
from ._cffi import C, ffi, zmq_version_info
from .constants import *
from zmq.error import _check_rc
def _make_zmq_pollitem(socket, flags):
zmq_socket = socket._zmq_socket
zmq_pollitem = ffi.new('zmq_pollitem_t*')
zmq_pollitem.socket = zmq_socket
zmq_pollitem.fd = 0
zmq_pollitem.events = flags
zmq_pollitem.revents = 0
return zmq_pollitem[0]
def _make_zmq_pollitem_fromfd(socket_fd, flags):
zmq_pollitem = ffi.new('zmq_pollitem_t*')
zmq_pollitem.socket = ffi.NULL
zmq_pollitem.fd = socket_fd
zmq_pollitem.events = flags
zmq_pollitem.revents = 0
return zmq_pollitem[0]
def zmq_poll(sockets, timeout):
cffi_pollitem_list = []
low_level_to_socket_obj = {}
for item in sockets:
if isinstance(item[0], int):
low_level_to_socket_obj[item[0]] = item
cffi_pollitem_list.append(_make_zmq_pollitem_fromfd(item[0], item[1]))
else:
low_level_to_socket_obj[item[0]._zmq_socket] = item
cffi_pollitem_list.append(_make_zmq_pollitem(item[0], item[1]))
items = ffi.new('zmq_pollitem_t[]', cffi_pollitem_list)
list_length = ffi.cast('int', len(cffi_pollitem_list))
c_timeout = ffi.cast('long', timeout)
rc = C.zmq_poll(items, list_length, c_timeout)
_check_rc(rc)
result = []
for index in range(len(items)):
if not items[index].socket == ffi.NULL:
if items[index].revents > 0:
result.append((low_level_to_socket_obj[items[index].socket][0],
items[index].revents))
else:
result.append((items[index].fd, items[index].revents))
return result
__all__ = ['zmq_poll']
| kalikaneko/pyzqm-deb | zmq/backend/cffi/_poll.py | Python | lgpl-3.0 | 1,830 |
#!/usr/bin/env python
from jpb.utils import *
from jpb.source_provider import get_source_provider
import jpb.source_provider.rpmbuild as rpmbuild
import jpb.utils.rpm as rpm
from jpb.utils.log import init_logging
import os
import sys
import glob
import logging
import platform
from jpb.repo_provider.createrepo import createrepo
from jpb.repo_provider.createrepo import createrepo
#from jpb.build_provider.build import build as pbbuild
SRC_DIR="source"
config = {}
def _common_init():
init_logging()
logger = logging.getLogger("%s:init" % __name__)
for i in ('WORKSPACE', 'BUILD_NUMBER', 'JOB_NAME'):
config[i] = get_env(i)
if not config['WORKSPACE']:
logger.error("WORKSPACE not set")
sys.exit(1)
if not config['BUILD_NUMBER']:
logger.error("BUILD_NUMBER not set")
sys.exit(1)
os.chdir(config["WORKSPACE"])
def generate_source_package():
_common_init()
files=[]
cleanup_workspace([".rpm", ".tar.gz"])
rpm.clean_rootdir(rpm.TOPDIR)
logger = logging.getLogger("%s:generate_source_package" % __name__)
no_scm = get_env("JPB_SOURCE_NO_SCM")
sourcedir = get_env("JPB_SOURCE_DIR")
if not sourcedir:
sourcedir = SRC_DIR
source = os.path.join(config['WORKSPACE'], sourcedir)
spec=get_env("JPB_RPM_SPECFILE")
if not spec:
for i in os.listdir(source):
if i.endswith(".spec"):
spec = os.path.join(source, i)
break
if not spec:
logger.error("No spec file found")
sys.exit(1)
else:
if not os.path.isfile(spec):
logger.error("Spec file specified with JPB_RPM_SPECFILE not found")
sys.exit(1)
logger.info("Using specfile %s" % spec)
logger.info("Gathering source informations")
sf = rpm.SpecFile(spec)
if no_scm:
specfile = spec
tarball = sf.get_source_name()
else:
sp = get_source_provider(source)
commit_string = sp.commit_version_string()
if get_env("USE_ORIG_VERSION"):
release = sf.release
else:
release = generate_build_version(sf.release, config['BUILD_NUMBER'], commit_string)
tarball=sp.generate_tarball(sf.name, sf.version)
specfile = sf.name+".spec"
logger.info("Generating updated spec file %s" % specfile)
sf.write(specfile, tarball, release)
logger.info("Generating source package")
files.append(tarball)
files = files + sf.get_additional_sources()
if not rpmbuild.generate_src_package(specfile, files, sourcedir):
logger.error("Problem while generating the source package")
if not no_scm:
os.unlink(specfile)
os.unlink(tarball)
def generate_binary_package():
_common_init()
cleanup_workspace(['.rpm'],['src.rpm'])
logger = logging.getLogger("%s:generate_binary_package" % __name__)
srpm = ""
# find srpm
files = os.listdir(".")
for i in files:
if i.endswith(".src.rpm"):
srpm = i
break
if not srpm:
logger.error("No src.rpm found")
sys.exit(1)
logger.info("Using %s" % srpm)
arch = get_env("architecture")
distri = get_env("distribution")
logger.info("Building for distribution %s and architecture %s" % (distri, arch))
if (platform.dist()[0] == "fedora"):
from jpb.build_provider.mock import mock as cbuilder
elif (platform.dist()[0] == "SuSE"):
from jpb.build_provider.build import build as cbuilder
elif (platform.dist()[0] == "debian"):
from jpb.build_provider.mock import mock as cbuilder
else:
logger.error("Currently unsupported build platform")
sys.exit(1)
builder = cbuilder(config['WORKSPACE'], distribution = distri, architecture = arch)
if not builder.build(srpm):
logger.error("Build failed see log for details")
sys.exit(1)
def provide_package():
_common_init()
logger = logging.getLogger("%s:provide_package" % __name__)
arch = get_env("architecture")
distri = get_env("distribution")
reponame = get_env("REPONAME")
repositorypath = get_env("REPOSITORY")
rp = createrepo(config, distribution=distri, architecture=arch, repopath=repositorypath, reponame=reponame)
if not rp.add_to_repo(glob.glob('*.rpm')):
logger.error("Failed adding files to the repository")
sys.exit(1)
# vim:foldmethod=marker ts=2 ft=python ai sw=2
| bmiklautz/jenkins-package-builder | jpb/cli/__init__.py | Python | mit | 4,012 |
# print string "This program is to convert cm's to inches"
print "This program is to convert cm's to inches"
# declare variable inch which stores value 0.393701
inch = 0.393701
# declare variable cm1 which stores value
cm1 = 25
inch1 = 30
s1 = "You know 1 cm is %0.3f inches" % inch
print s1
s2 = "what is %d cms in inches ?"
s3 = "How many cm is %d inches ?"
s4 = "Oh it's %d inches, That's Gr8!"
s5 = "Ah so %d inches is equal to %d cms"
s6 = s2 % cm1
s7 = s3 % inch1
s8 = s4 % (cm1 * inch)
s9 = s5 % (inch1, inch1/inch)
print s6 + s8
print s7 + s9
| mrniranjan/python-scripts | reboot/math10.py | Python | gpl-2.0 | 568 |
# oppia/tests/tracker/test_tracker.py
from django.contrib.auth.models import User
from django.test import TestCase
from django.test.client import Client
| DigitalCampus/django-nurhi-oppia | oppia/tests/activitylog/test_activitylog.py | Python | gpl-3.0 | 153 |
"""A reference implementation of graddfril desktop aggregator.
Handles buffering of data from grabbing modules and sending it to storage server.
"""
| graddfril/desktop.grabber-aggregator-refimpl | graddfril/desktop/grabber_aggregator_refimpl/__init__.py | Python | agpl-3.0 | 150 |
# -*- coding: utf-8 -*-
# Copyright 2016, 2017 Mircea Ulinic. All rights reserved.
#
# The contents of this file are licensed under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with the
# License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under
# the License.
# std lib
import os
import time
import random
import hashlib
import logging
# third party libs
from timeout_decorator import timeout
# local modules
import dhcp_relay.exceptions
from dhcp_relay.commons import DHCPCommons
from dhcp_relay.defaults import DHCPDefaults
from dhcp_relay.globals import DHCPGlobals
from dhcp_relay.listener import DHCPListener
from dhcp_relay.pkt_crafter import DHCPPktCrafter
log = logging.getLogger(__name__)
_MAX_WAIT_TIME = DHCPDefaults.MAX_WAIT
class DHCPRelay(DHCPCommons, DHCPGlobals):
def __init__(self,
config_file=None,
server_ip=None,
server_id=None,
server_port=None,
client_ip=None,
client_port=None,
lease_time=None,
listener_threads=None,
max_wait=None,
log_level=None,
log_file=None,
log_full=None,
log_date_format=None,
daemon=None,
multiprocessing=None):
'''DHCP Relay constructor.'''
DHCPGlobals.__init__(self,
config_file=config_file,
server_ip=server_ip,
server_id=server_id,
server_port=server_port,
client_ip=client_ip,
client_port=client_port,
lease_time=lease_time,
listener_threads=listener_threads,
max_wait=max_wait,
log_level=log_level,
log_file=log_file,
log_full=log_full,
log_date_format=log_date_format,
daemon=daemon,
multiprocessing=multiprocessing)
DHCPCommons.__init__(self, self.MAX_WAIT)
_MAX_WAIT_TIME = self.MAX_WAIT
self._pkt_crafter = DHCPPktCrafter(self)
log.addHandler(self.LOGGING_HANDLER)
def connect(self):
'''
Virtually establish the connection to the DHCP server.
This is initiated in the main process only as it binds
the relay agent to the client IP address.
'''
if not self.SERVER_IP:
log.critical('Unable to turn up the DHCP relay.')
log.critical('Please specify at least the server IP address.')
return
self._pkt_crafter.connect()
_listeners = []
for _ in range(self.LISTENER_THREADS): # start as many listeners as needed
_listener = DHCPListener(self, self._pkt_crafter)
_listeners.append(_listener)
for _listener in _listeners:
_listener.start()
@staticmethod
def _get_xid():
'''Return the xID of the DHCP request packet.'''
_xid = [
random.randrange(255),
random.randrange(255),
random.randrange(255),
random.randrange(255),
] # random transaction ID
return _xid
@staticmethod
def _get_rid():
'''Return an unique request ID.'''
return hashlib.md5(str(time.time())).hexdigest()
def send_discover(self, mac, ip=None):
'''Send DHCP discover packet.'''
_xid = self._get_xid()
_rid = self._get_rid() # Unique Request ID
_xid_str = '.'.join([str(xid_e) for xid_e in xid])
self.xid_mac(_xid_str, mac)
self.subs_up(mac, False)
if not ip:
ip = self.DUMMY_IP_ADDRESS
# if no specific IP Address is requested,
# will try to request something dummy
return self._pkt_crafter.send_discover(_rid, _xid, mac, ip)
# sends DHCP Discover Packet
@timeout(_MAX_WAIT_TIME)
def bring_subscriber_up(self, mac, ip=None):
'''Bring the subscriber up.'''
if not self.send_discover(mac, ip):
return False
start_time = time.time()
while (not self.SUBS_UP.get(mac, '')):
continue # wait till subs comes up
self.subs_up_pop(mac)
return self.mac_ip_pop(mac) # returns the assigned IP Address
def bring_subscribers_list_up(self, mac_list):
'''
Bring up a list of subs.
This method is a bets effort therefore will not
send back a notification for each subscriber.
'''
for mac in mac_list:
self.send_discover(mac)
return True
def send_release(self, mac):
'''Send DHCP release request packet.'''
_xid = self._get_xid()
_rid = self._get_rid()
self._pkt_crafter.send_release(_rid, _xid, mac)
def bring_subscriber_down(self, mac):
'''Tear down the subscriber.'''
self.send_release(mac)
return True
| mirceaulinic/py-dhcp-relay | dhcp_relay/relay.py | Python | gpl-3.0 | 5,556 |
import json
from pprint import pprint
import time
import io
# from http://www.codigomanso.com/en/2011/05/trucomanso-transformar-el-tiempo-en-formato-24h-a-formato-12h-python/
def ampmformat (hhmmss):
"""
This method converts time in 24h format to 12h format
Example: "00:32" is "12:32 AM"
"13:33" is "01:33 PM"
"""
ampm = hhmmss.split (":")
if (len(ampm) == 0) or (len(ampm) > 3):
return hhmmss
# is AM? from [00:00, 12:00[
hour = int(ampm[0]) % 24
isam = (hour >= 0) and (hour < 12)
# 00:32 should be 12:32 AM not 00:32
if isam:
ampm[0] = ('12' if (hour == 0) else "%02d" % (hour))
else:
ampm[0] = ('12' if (hour == 12) else "%02d" % (hour-12))
return ':'.join (ampm) + (' AM' if isam else ' PM')
json_data=open('allData2003_2004.json')
data = json.load(json_data)
json_data.close()
# k ='690150'
# print data['690150']
output = {}
for k in data.keys():
for d in data[k]:
date = time.strptime(d['date'], "%b %d, %Y %I:%M:%S %p")
if k in output:
t = ampmformat('%02d:%02d:%02d' % (date.tm_hour, date.tm_min, date.tm_sec))
h = date.tm_hour
output[k]['sum'] += d['value']
output[k]['hourly'][h] += d['value']
else:
output[k] = { "sum": 0,
"hourly": [0]*24
}
t = ampmformat('%02d:%02d:%02d' % (date.tm_hour, date.tm_min, date.tm_sec))
h = date.tm_hour
output[k]['sum'] += d['value']
output[k]['hourly'][h] += d['value']
f = io.open('data.json', 'w', encoding='utf-8')
f.write(unicode(json.dumps(output, ensure_ascii=False)))
f.close()
json_output=open('data.json')
output_data = json.load(json_output)
pprint(output_data)
json_output.close()
| inachen/cs171-hw4-chen-ina | ProblemGeoUSA/data_wrangle_total.py | Python | mit | 1,779 |
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
import re
import copy
from parser_exception import ParserException
import utils
import xml_utils
VALID_UNDERLYING_TYPES_FOR_ENUMS = [
'int8_t', 'uint8_t', 'int16_t', 'uint16_t', 'int32_t', 'uint32_t', 'int64_t', 'uint64_t' ]
# Not allowed are 'float' and 'char'
class BaseType(object):
""" Abstract base class for all types
Attributes:
name -- Type name. Must be unique.
reference -- True if the type is only used as reference for other
types and should not be included in the output.
description -- Description of the type
string -- Short description for enum members
isEnum -- True if the type is an enum type
isStruct -- analog
isTypedef -- analog
isBuiltIn -- analog
level -- hierarchy level. Built-in types have level -1, the level
of a type is the highest level of a subtype plus one.
size -- Size
"""
def __init__(self, node):
self.node = node
self.name = node.get('name')
self._check_name()
self.description = None
self.string = None
self.isEnum = False
self.isStruct = False
self.isTypedef = False
self.isBuiltIn = False
self.level = None
self.size = None
def _check_name(self):
utils.check_name(self.name)
def evaluate(self, tree):
""" Load the state of the class from the corresponding XML node
Keyword arguments:
tree -- current tree of the communcation structure. Must contain
every available type, but some types may not be fully
evaluated
"""
pass
def create_hierarchy(self):
""" Create the type hierarchy
This method calculates the values for self.size and self.level. Must
not be called before all types are fully created.
"""
pass
def flattened(self):
""" Access the version with the flattened hierarchy """
return self
def __cmp__(self, other):
""" Compare two types
If types are sorted they are sorted first by level and then by name.
"""
if isinstance(other, BaseType):
return cmp(self.level, other.level) or cmp(self.name, other.name)
else:
return 1
class BuiltIn(BaseType):
""" Built-in types
These types correspond directly to types available by default in the
target environment. The hierarchy level will always be -1 (lowest level).
"""
def __init__(self, node):
BaseType.__init__(self, node)
self.isBuiltIn = True
self.level = -1
def evaluate(self, tree):
if self.node is None:
return
self.description = xml_utils.get_description(self.node)
self.string = xml_utils.get_string(self.node)
self.size = int(self.node.get('size'))
self.node = None
def _check_name(self):
""" Built-in types need no check """
pass
def dump(self):
return "%s : built-in|%i [%i]" % (self.name, self.level, self.size)
def __str__(self):
return "%s : built-in" % self.name
class Enum(BaseType):
class Element(object):
""" Sub-element of an enum-type """
def __init__(self, node, stronglyTyped):
""" Constructor
The name of the element has to be all upper case with underscores.
"""
self.name = node.get('name')
if not stronglyTyped:
if not re.match("^[0-9A-Z_]*$", self.name):
raise ParserException("Attribute name of element in enum has to be `UPPER_UNDERSCORE_STYLE` (found: '%s')" % (self.name))
else:
utils.check_name(self.name)
self.description = xml_utils.get_description(node)
self.string = xml_utils.get_string(node)
value = node.get('value')
self.value = None if (value is None) else int(value, 0)
def __str__(self):
return "%s = %s" % (self.name, self.value)
def __init__(self, node):
BaseType.__init__(self, node)
self._last_value = 0
self.elements = []
self.isStronglyTyped = None
self.numberOfElements = None
self.isEnum = True
# an enum does not depend on other types
self.level = 0
self.size = 1
def iter(self):
""" Iterate over all sub-elements of the enum """
for element in self.elements:
yield element
def evaluate(self, tree):
if self.node is None:
return
self.description = xml_utils.get_description(self.node)
self.string = xml_utils.get_string(self.node)
self.isStronglyTyped = False
typed = self.node.get('typed')
if typed is not None:
if typed in ["strong", "weak"]:
self.isStronglyTyped = (typed == "strong")
else:
raise ParserException("Attribute typed of element in enum has to be either `strong` or `weak` (found: '%s')" % (self.typed))
self.underlyingType = self.node.get('underlyingType')
if self.underlyingType is None:
self.underlyingType = "uint8_t"
if self.underlyingType is not None and self.underlyingType not in VALID_UNDERLYING_TYPES_FOR_ENUMS:
raise ParserException("Attribute underlyingType of element in enum has to be a built in value (found: '%s')" % (self.underlyingType))
# FIXME calculate actual size depending on the value for the enum elements
if '8' in self.underlyingType:
self.size = 1
elif '16' in self.underlyingType:
self.size = 2
elif '32' in self.underlyingType:
self.size = 4
elif '64' in self.underlyingType:
self.size = 8
for node in self.node.findall('element'):
self.__add(self.Element(node, self.isStronglyTyped))
self.numberOfElements = len(self.elements)
self.node = None
def __add(self, element):
""" Add an element to the enum.
This has to be done in the order the elements should appear later.
"""
if element.value == None:
element.value = self._last_value
self._last_value += 1
else:
try:
self._last_value = element.value + 1
except ValueError:
pass
self.elements.append(element)
def create_hierarchy(self):
pass
def dump(self):
str = "%s : enum|%i [%i]\n" % (self.name, self.level, self.size)
for element in self.iter():
str += " + %s\n" % element
return str[:-1]
def __str__(self):
return "%s : enum|%i" % (self.name, self.level)
class SubType:
""" Subordinate type for struct and typedefs.
Used to distinguish between normal and array types for the subtype of
struct elements and typedefs.
Attributes:
raw -- raw typename.
name -- type without a possible array definition.
count -- Number of array elements. Defaults to 1 if the type is no array.
isArray -- Subtype is an array
Example:
type = SubType("uint8_t")
=> .raw = "uint8_t"
.name = "uint8_t"
.isArray = False
.count = 1
.type = BuiltIn("uint8_t")
type = SubType("char[8])
=> .raw = "char[8]"
.name = "char"
.isArray = True
.count = 8
.type = BuiltIn("char")
"""
def __init__(self, value, types):
""" Constructor
Keyword Arguments:
value -- type name
types -- list of all available types
"""
self.raw = value
if value.endswith(']'):
self.isArray = True
self.name, number = value.split('[')
self.count = number[:-1]
else:
self.isArray = False
self.count = 1
self.name = value
try:
self.type = types[self.name]
except KeyError:
raise ParserException("Unknown type '%s'" % self.name)
self.size = None
def __str__(self):
return self.raw
class Struct(BaseType):
""" Representation of a Struct
Attributes:
size -- Size of the struct in bytes. Will always return the total size
of the flattened struct if the struct is part of inheritence
structure.
"""
class Element(object):
def __init__(self, node, tree):
self.name = node.get('name')
self.description = xml_utils.get_description(node)
self.string = xml_utils.get_string(node)
self.subtype = SubType(node.get('type'), tree.types)
self.unit = node.get('unit')
self.value = node.get('value')
self.level = None
self.size = None
def create_hierarchy(self):
subtype = self.subtype.type
if subtype.level is None:
subtype.create_hierarchy()
self.level = subtype.level
self.size = subtype.size
def __str__(self):
return "%s : %s" % (self.name, self.subtype)
def __init__(self, node):
BaseType.__init__(self, node)
self.isStruct = True
self.elements = []
self.extends = None
self.extending = []
self.typeIdentifier = None
self.__flattened = None
self.__typeIdentifierName = None
def iter(self):
""" Iterate over all sub-elements of the enum """
for element in self.elements:
yield element
def evaluate(self, tree):
if self.node is None:
return
self.description = xml_utils.get_description(self.node)
self.string = xml_utils.get_string(self.node)
for node in self.node.findall('element'):
try:
self.elements.append(self.Element(node, tree))
except ParserException as e:
raise ParserException("Error in definition of struct '%s': %s!" % (self.name, e))
basetype = self.node.get('extends')
if basetype is not None:
try:
self.extends = tree.types[basetype]
if not self.extends.isStruct:
raise ParserException("Struct '%s' is derived from non struct '%s'!" % (self.name, self.extends.name))
if self.extends.extends:
raise ParserException("Struct '%s' extends struct '%s'. Structs are only allowed to extend from those Structs, which do not extend anything!" % (self.name, self.extends.name))
self.__typeIdentifierName = self.node.get('typeIdentifier')
if self.__typeIdentifierName is None:
raise ParserException("Struct '%s' does extend '%s', but does not provide the attribute 'typeIdentifier'!" % (self.name, self.extends.name))
except KeyError:
raise ParserException("Unknown super type '%s' in struct '%s'!" % (basetype, self.name))
self.node = None
def create_hierarchy(self):
""" Create hierarchy
For this method self.size = 0 is used as sepecial value to detect
loops in the definition of types. In normal operation size will never be
zero, only during hierarchy creation.
"""
if self.level is not None:
return
self.size = 0
size = 0
self.level = 0
for element in self.elements:
if element.size == 0:
raise ParserException("Loop in the definition of '%s' and '%s' detected!" % (self.name, self.element.name))
element.create_hierarchy()
size += element.size
self.level = max(self.level, element.level)
if self.extends is not None:
if self.extends.size == 0:
raise ParserException("Loop in the definition of '%s' and '%s' detected!" % (self.name, self.extends.name))
self.extends.create_hierarchy()
typeIdentifierStructElement = self.extends.elements[0]
if not typeIdentifierStructElement.subtype.type.isEnum:
raise ParserException("Struct '%s' is extended by Struct '%s'. " \
"Structs which are extended by other must have an element named " \
"'type' of any enum type as their first element! It is used for " \
"type distinguishing at runtime." \
% (self.extends.name, self.name))
if not typeIdentifierStructElement.name == 'type':
raise ParserException("Struct '%s' is extended by Struct '%s'. Structs" \
"which are extended by other must have an element named 'type' as" \
"their first element! It is used for type distinguishing at runtime." \
% (self.extends.name, self.name))
for enumElement in typeIdentifierStructElement.subtype.type.elements:
if enumElement.name == self.__typeIdentifierName:
self.typeIdentifier = enumElement
break
if not self.typeIdentifier:
raise ParserException("Struct '%s' extends Struct '%s', but it's " \
"typeIdentifier '%s' is not member of enum '%s' which is the " \
"type of '%s.type'."
% (self.name, self.extends.name, self.__typeIdentifierName,
typeIdentifierStructElement.subtype.type.name, self.extends.name))
self.extends.__addExtending(self)
size += self.extends.size
self.level = max(self.level, self.extends.level)
if size > 48:
raise ParserException("Struct '%s' is with %i Byte too big. The maximum " \
"packet size is 48 Byte!" % (self.name, size))
self.size = size
self.level += 1
def flattened(self):
if self.__flattened is None:
if self.extends is None:
self.__flattened = self
else:
self.__flattened = copy.copy(self)
self.__flattened.elements = self.elements[:]
# prepend all elements for the super type
self.__flattened.elements[0:0] = self.extends.flattened().elements
return self.__flattened
def dump(self):
str = "%s : struct|%i [%i]\n" % \
(self.name, self.level, self.size)
for element in self.iter():
str += " + %s\n" % element
return str[:-1]
def __str__(self):
return "%s : struct|%i" % (self.name, self.level)
def __addExtending(self, extending):
for struct in self.extending:
if struct.typeIdentifier == extending.typeIdentifier:
raise ParserException("Duplicate TypeIdentifier '%s' in Struct group extending '%s'. ('%s' and '%s')" % (extending.typeIdentifier.name, self.name, struct.name, extending.name))
self.extending.append(extending)
class Typedef(BaseType):
def __init__(self, node):
BaseType.__init__(self, node)
self.subtype = None
self.unit = None
self.isTypedef = True
def evaluate(self, tree):
self.description = xml_utils.get_description(self.node)
self.string = xml_utils.get_string(self.node)
self.unit = self.node.get('unit')
try:
self.subtype = SubType(self.node.get('type'), tree.types)
except ParserException as e:
raise ParserException("Error in definition of typedef '%s': %s" % (self.name, e))
def create_hierarchy(self):
""" Create hierarchy
See Struct.create_hierarchy() for a detailed description of this
method.
"""
if self.level is not None:
return
self.size = 0
subtype = self.subtype.type
if subtype.level is None:
if self.subtype.size == 0:
raise ParserException("Loop in the definition of '%s' and '%s' detected!" % (self.name, self.subtype.name))
subtype.create_hierarchy()
self.level = subtype.level + 1
self.size = subtype.size
def dump(self):
return "%s : typedef|%i [%i]\n -> %s" % (self.name, self.level, self.size, self.subtype)
def __str__(self):
return "%s : typedef|%i" % (self.name, self.level)
| dergraaf/xpcc | tools/system_design/xmlparser/type.py | Python | bsd-3-clause | 14,044 |
"""
(This script is not generally useful for most ilastik users or developers.)
Input: hdf5 volume
Output: directory of .png tiles representing the volume.
"""
if __name__ == "__main__":
import sys
import h5py
import logging
import argparse
from lazyflow.utility import PathComponents, export_to_tiles
logger = logging.getLogger()
logger.addHandler(logging.StreamHandler(sys.stdout))
logger.setLevel(logging.INFO)
# Usage: python make_tiles.py --tile_size=250 /path/to/my_vol.h5/some/dataset /path/to/output_dir
parser = argparse.ArgumentParser()
parser.add_argument('--tile_size', type=int)
parser.add_argument('hdf5_dataset_path')
parser.add_argument('output_dir')
parsed_args = parser.parse_args( sys.argv[1:] )
path_comp = PathComponents( parsed_args.hdf5_dataset_path )
with h5py.File(path_comp.externalPath) as input_file:
vol_dset = input_file[path_comp.internalPath]
export_to_tiles( vol_dset, parsed_args.tile_size, parsed_args.output_dir )
| stuarteberg/lazyflow | bin/make_tiles.py | Python | lgpl-3.0 | 1,058 |
# -*- coding: utf-8 -*-
#+---------------------------------------------------------------------------+
#| 01001110 01100101 01110100 01111010 01101111 01100010 |
#| |
#| Netzob : Inferring communication protocols |
#+---------------------------------------------------------------------------+
#| Copyright (C) 2011 Georges Bossert and Frédéric Guihéry |
#| This program is free software: you can redistribute it and/or modify |
#| it under the terms of the GNU General Public License as published by |
#| the Free Software Foundation, either version 3 of the License, or |
#| (at your option) any later version. |
#| |
#| This program is distributed in the hope that it will be useful, |
#| but WITHOUT ANY WARRANTY; without even the implied warranty of |
#| MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
#| GNU General Public License for more details. |
#| |
#| You should have received a copy of the GNU General Public License |
#| along with this program. If not, see <http://www.gnu.org/licenses/>. |
#+---------------------------------------------------------------------------+
#| @url : http://www.netzob.org |
#| @contact : [email protected] |
#| @sponsors : Amossys, http://www.amossys.fr |
#| Supélec, http://www.rennes.supelec.fr/ren/rd/cidre/ |
#+---------------------------------------------------------------------------+
#+---------------------------------------------------------------------------+
#| Standard library imports |
#+---------------------------------------------------------------------------+
from gettext import gettext as _
import logging
import os
#+---------------------------------------------------------------------------+
#| Related third party imports |
#+---------------------------------------------------------------------------+
#+---------------------------------------------------------------------------+
#| Local application imports |
#+---------------------------------------------------------------------------+
from netzob.UI.NetzobWidgets import NetzobInfoMessage, NetzobErrorMessage
from netzob.Common.Plugins.Capturers.AbstractCapturerController import AbstractCapturerController
from NetworkCapturerView import NetworkCapturerView
from NetworkCapturer import NetworkCapturer
from netzob.Common.Models.L4NetworkMessage import L4NetworkMessage
from netzob.Common.NetzobException import NetzobImportException
class NetworkCapturerController(AbstractCapturerController):
"""NetworkCapturerController:
A controller liking the network capturer and its view in the netzob GUI.
"""
COLUMN_ID = 0
COLUMN_SELECTED = 1
def __init__(self, netzob, plugin):
"""Constructor of NetworkCapturerController:
@type netzob: netzob.NetzobGUI.NetzobGUI
@param netzob: the main netzob project.
"""
view = NetworkCapturerView(plugin, self)
super(NetworkCapturerController, self).__init__(netzob, plugin, view)
self.model = NetworkCapturer(netzob)
for device in self.model.getNetworkDevices():
self.view.deviceCombo.append_text(str(device))
def getImportLayer(self):
return self.model.importLayer
def setImportLayer(self, importLayer):
self.model.setImportLayer(importLayer)
importLayer = property(getImportLayer, setImportLayer)
def clearFilterButton_clicked_cb(self, widget):
self.view.filterEntry.set_text("")
def layerRadioButton_toggled_cb(self, widget):
if self.view.layerRadioButton1.get_active():
self.importLayer = 1
self.view.makeL1ImportTreeView()
elif self.view.layerRadioButton2.get_active():
self.importLayer = 2
self.view.makeL2ImportTreeView()
elif self.view.layerRadioButton3.get_active():
self.importLayer = 3
self.view.makeL3ImportTreeView()
else:
self.importLayer = 4
self.view.makeL4ImportTreeView()
def doReadMessages(self):
# Sanity checks
device = self.view.deviceCombo.get_active_text()
if device is None:
NetzobErrorMessage(_("Incorrect device"))
return
count = self.view.countEntry.get_text()
try:
count = int(count)
except ValueError:
NetzobErrorMessage(_("Incorrect count"))
return
if count < 1:
NetzobErrorMessage(_("Incorrect count"))
time = self.view.timeEntry.get_text()
try:
time = int(time)
except ValueError:
NetzobErrorMessage(_("Incorrect time"))
return
if time < 1:
NetzobErrorMessage(_("Incorrect time"))
# Launch packets capturing
try:
self.model.setBPFFilter(self.view.filterEntry.get_text().strip())
self.model.readMessages(self.callback_readMessage, device, count, time)
except NetzobImportException, importEx:
if importEx.statusCode == WARNING:
self.view.showWarning(importEx.message)
else:
NetzobErrorMessage(importEx.message)
def callback_readMessage(self, message):
# Display all read messages
if self.importLayer == 1:
self.view.listListStore.append([str(message.getID()), False,
message.getStringData()])
elif self.importLayer == 2:
self.view.listListStore.append([str(message.getID()), False,
str(message.getL2SourceAddress()),
str(message.getL2DestinationAddress()),
message.getStringData()])
elif self.importLayer == 3:
self.view.listListStore.append([str(message.getID()), False,
str(message.getL3SourceAddress()),
str(message.getL3DestinationAddress()),
message.getStringData()])
else:
self.view.listListStore.append([str(message.getID()), False,
str(message.getL3SourceAddress()), str(message.getL3DestinationAddress()),
str(message.getL4Protocol()), str(message.getL4SourcePort()), str(message.getL4DestinationPort()),
message.getStringData()])
def doGetMessageDetails(self, messageID):
return self.model.getMessageDetails(messageID)
def doImportMessages(self, selectedPackets):
self.model.saveMessagesInCurrentProject(selectedPackets)
| nagyistoce/netzob | src/netzob_plugins/Capturers/NetworkCapturer/NetworkCapturer/NetworkCapturerController.py | Python | gpl-3.0 | 7,440 |
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Changing field 'AuthoredData.user'
db.alter_column(u'dingos_authoring_authoreddata', 'user_id', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['auth.User'], null=True))
def backwards(self, orm):
# User chose to not deal with backwards NULL issues for 'AuthoredData.user'
raise RuntimeError("Cannot reverse this migration. 'AuthoredData.user' and its values cannot be restored.")
# The following code is provided here to aid in writing a correct migration
# Changing field 'AuthoredData.user'
db.alter_column(u'dingos_authoring_authoreddata', 'user_id', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['auth.User']))
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Group']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Permission']"}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'dingos.datatypenamespace': {
'Meta': {'object_name': 'DataTypeNameSpace'},
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.SlugField', [], {'max_length': '50', 'blank': 'True'}),
'uri': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'})
},
u'dingos.fact': {
'Meta': {'object_name': 'Fact'},
'fact_term': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['dingos.FactTerm']"}),
'fact_values': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['dingos.FactValue']", 'null': 'True', 'symmetrical': 'False'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'value_iobject_id': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'value_of_set'", 'null': 'True', 'to': u"orm['dingos.Identifier']"}),
'value_iobject_ts': ('django.db.models.fields.DateTimeField', [], {'null': 'True'})
},
u'dingos.factdatatype': {
'Meta': {'unique_together': "(('name', 'namespace'),)", 'object_name': 'FactDataType'},
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'kind': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'namespace': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'fact_data_type_set'", 'to': u"orm['dingos.DataTypeNameSpace']"})
},
u'dingos.factterm': {
'Meta': {'unique_together': "(('term', 'attribute'),)", 'object_name': 'FactTerm'},
'attribute': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'term': ('django.db.models.fields.CharField', [], {'max_length': '512'})
},
u'dingos.facttermnamespacemap': {
'Meta': {'object_name': 'FactTermNamespaceMap'},
'fact_term': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['dingos.FactTerm']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'namespaces': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['dingos.DataTypeNameSpace']", 'through': u"orm['dingos.PositionalNamespace']", 'symmetrical': 'False'})
},
u'dingos.factvalue': {
'Meta': {'unique_together': "(('value', 'fact_data_type', 'storage_location'),)", 'object_name': 'FactValue'},
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'fact_data_type': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'fact_value_set'", 'to': u"orm['dingos.FactDataType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'storage_location': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '256', 'blank': 'True'}),
'value': ('django.db.models.fields.TextField', [], {})
},
u'dingos.identifier': {
'Meta': {'unique_together': "(('uid', 'namespace'),)", 'object_name': 'Identifier'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'latest': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "'latest_of'", 'unique': 'True', 'null': 'True', 'to': u"orm['dingos.InfoObject']"}),
'namespace': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['dingos.IdentifierNameSpace']"}),
'uid': ('django.db.models.fields.SlugField', [], {'max_length': '255'})
},
u'dingos.identifiernamespace': {
'Meta': {'object_name': 'IdentifierNameSpace'},
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_substitution': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'name': ('django.db.models.fields.SlugField', [], {'max_length': '50', 'blank': 'True'}),
'uri': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'})
},
u'dingos.infoobject': {
'Meta': {'ordering': "['-timestamp']", 'unique_together': "(('identifier', 'timestamp'),)", 'object_name': 'InfoObject'},
'create_timestamp': ('django.db.models.fields.DateTimeField', [], {}),
'facts': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['dingos.Fact']", 'through': u"orm['dingos.InfoObject2Fact']", 'symmetrical': 'False'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'identifier': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'iobject_set'", 'to': u"orm['dingos.Identifier']"}),
'iobject_family': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'iobject_set'", 'to': u"orm['dingos.InfoObjectFamily']"}),
'iobject_family_revision': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'+'", 'to': u"orm['dingos.Revision']"}),
'iobject_type': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'iobject_set'", 'to': u"orm['dingos.InfoObjectType']"}),
'iobject_type_revision': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'+'", 'to': u"orm['dingos.Revision']"}),
'name': ('django.db.models.fields.CharField', [], {'default': "'Unnamed'", 'max_length': '255', 'blank': 'True'}),
'timestamp': ('django.db.models.fields.DateTimeField', [], {}),
'uri': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'})
},
u'dingos.infoobject2fact': {
'Meta': {'ordering': "['node_id__name']", 'object_name': 'InfoObject2Fact'},
'attributed_fact': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'attributes'", 'null': 'True', 'to': u"orm['dingos.InfoObject2Fact']"}),
'fact': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'iobject_thru'", 'to': u"orm['dingos.Fact']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'iobject': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'fact_thru'", 'to': u"orm['dingos.InfoObject']"}),
'namespace_map': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['dingos.FactTermNamespaceMap']", 'null': 'True'}),
'node_id': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['dingos.NodeID']"})
},
u'dingos.infoobjectfamily': {
'Meta': {'object_name': 'InfoObjectFamily'},
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '256'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '1024', 'blank': 'True'})
},
u'dingos.infoobjecttype': {
'Meta': {'unique_together': "(('name', 'iobject_family', 'namespace'),)", 'object_name': 'InfoObjectType'},
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'iobject_family': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'iobject_type_set'", 'to': u"orm['dingos.InfoObjectFamily']"}),
'name': ('django.db.models.fields.SlugField', [], {'max_length': '30'}),
'namespace': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'iobject_type_set'", 'blank': 'True', 'to': u"orm['dingos.DataTypeNameSpace']"})
},
u'dingos.nodeid': {
'Meta': {'object_name': 'NodeID'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'})
},
u'dingos.positionalnamespace': {
'Meta': {'object_name': 'PositionalNamespace'},
'fact_term_namespace_map': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'namespaces_thru'", 'to': u"orm['dingos.FactTermNamespaceMap']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'namespace': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'fact_term_namespace_map_thru'", 'to': u"orm['dingos.DataTypeNameSpace']"}),
'position': ('django.db.models.fields.SmallIntegerField', [], {})
},
u'dingos.revision': {
'Meta': {'object_name': 'Revision'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '32', 'blank': 'True'})
},
u'dingos_authoring.authoreddata': {
'Meta': {'unique_together': "(('group', 'user', 'identifier', 'kind', 'timestamp'),)", 'object_name': 'AuthoredData'},
'author_view': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['dingos_authoring.AuthorView']", 'null': 'True'}),
'data': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'group': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.Group']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'identifier': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['dingos_authoring.Identifier']"}),
'kind': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'latest': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '256'}),
'processing_id': ('django.db.models.fields.CharField', [], {'max_length': '128', 'blank': 'True'}),
'status': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'timestamp': ('django.db.models.fields.DateTimeField', [], {}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']", 'null': 'True'})
},
u'dingos_authoring.authorview': {
'Meta': {'object_name': 'AuthorView'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'})
},
u'dingos_authoring.groupnamespacemap': {
'Meta': {'object_name': 'GroupNamespaceMap'},
'allowed_namespaces': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'authoring_allowed_for'", 'blank': 'True', 'to': u"orm['dingos.IdentifierNameSpace']"}),
'default_namespace': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'authoring_default_for'", 'to': u"orm['dingos.IdentifierNameSpace']"}),
'group': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['auth.Group']", 'unique': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
u'dingos_authoring.identifier': {
'Meta': {'object_name': 'Identifier'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'})
},
u'dingos_authoring.infoobject2authoreddata': {
'Meta': {'object_name': 'InfoObject2AuthoredData'},
'authored_data': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['dingos_authoring.AuthoredData']", 'unique': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'iobject': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "'created_from_thru'", 'unique': 'True', 'to': u"orm['dingos.InfoObject']"})
}
}
complete_apps = ['dingos_authoring'] | siemens/django-dingos-authoring | dingos_authoring/south_migrations/0006_auto__chg_field_authoreddata_user.py | Python | gpl-2.0 | 17,097 |
#!/usr/bin/env python
# coding=utf-8
import random
import time
import urllib
import hmac
import hashlib
import binascii
import base64
import cos_cred
class Auth(object):
def __init__(self, cred):
self.cred = cred
def app_sign(self, bucket, cos_path, expired, upload_sign=True):
appid = self.cred.get_appid()
bucket = bucket.encode('utf8')
secret_id = self.cred.get_secret_id().encode('utf8')
now = int(time.time())
rdm = random.randint(0, 999999999)
cos_path = urllib.quote(cos_path.encode('utf8'), '~/')
if upload_sign:
fileid = '/%s/%s%s' % (appid, bucket, cos_path)
else:
fileid = cos_path
sign_tuple = (appid, secret_id, expired, now, rdm, fileid, bucket)
plain_text = 'a=%s&k=%s&e=%d&t=%d&r=%d&f=%s&b=%s' % sign_tuple
secret_key = self.cred.get_secret_key().encode('utf8')
sha1_hmac = hmac.new(secret_key, plain_text, hashlib.sha1)
hmac_digest = sha1_hmac.hexdigest()
hmac_digest = binascii.unhexlify(hmac_digest)
sign_hex = hmac_digest + plain_text
sign_base64 = base64.b64encode(sign_hex)
return sign_base64
# 单次签名(针对删除和更新操作)
# bucket: bucket名称
# cos_path: 要操作的cos路径, 以'/'开始
def sign_once(self, bucket, cos_path):
return self.app_sign(bucket, cos_path, 0)
# 多次签名(针对上传文件,创建目录, 获取文件目录属性, 拉取目录列表)
# bucket: bucket名称
# cos_path: 要操作的cos路径, 以'/'开始
# expired: 签名过期时间, UNIX时间戳
# 如想让签名在30秒后过期, 即可将expired设成当前时间加上30秒
def sign_more(self, bucket, cos_path, expired):
return self.app_sign(bucket, cos_path, expired)
# 下载签名(用于获取后拼接成下载链接,下载私有bucket的文件)
# bucket: bucket名称
# cos_path: 要下载的cos文件路径, 以'/'开始
# expired: 签名过期时间, UNIX时间戳
# 如想让签名在30秒后过期, 即可将expired设成当前时间加上30秒
def sign_download(self, bucket, cos_path, expired):
return self.app_sign(bucket, cos_path, expired, False)
| tencentyun/cos-python-sdk | qcloud_cos/cos_auth.py | Python | mit | 2,321 |
"""W3C Document Object Model implementation for Python.
The Python mapping of the Document Object Model is documented in <...>.
This package contains the following modules:
minidom -- A simple implementation of the Level 1 DOM with namespace
support added (based on the Level 2 specification).
"""
| atmark-techno/atmark-dist | user/python/Lib/xml/dom/__init__.py | Python | gpl-2.0 | 313 |
import random
CHARS = 'abcdefghijkmnpqrstuvwxyz3456789'
CURSE_CHARS = 'csfhuit'
NO_CURSE_CHARS = list(set(CHARS).difference(set(CURSE_CHARS)))
__all__ = ['randomish_id']
def randomish_id(length=6):
s = ''
# Algorithm for avoiding English curse words taken from Hashids
# (<http://hashids.org>).
for _ in range(length):
choices = NO_CURSE_CHARS if s[-1:] in CURSE_CHARS else CHARS
s += random.choice(choices)
return s
| editorsnotes/editorsnotes | editorsnotes/main/utils/randomish_id.py | Python | agpl-3.0 | 458 |
import sys
def swap_numbers(word):
return word[-1] + word[1:-1] + word[0]
def main():
test_cases = open(sys.argv[1], 'r')
for test in test_cases:
print(' '.join([swap_numbers(word) for word in test.strip().split(' ')]))
test_cases.close()
if __name__ == '__main__':
main()
| mpillar/codeeval | 0-easy/swap-numbers/main.py | Python | unlicense | 304 |
# -*- coding: utf-8 -*-
import pytest
from datetime import datetime
from wasp_general.task.thread import WThreadTask
from wasp_general.datetime import utc_datetime
from wasp_general.task.scheduler.proto import WScheduleTask, WScheduleRecord, WTaskSourceProto
from wasp_general.task.scheduler.proto import WRunningRecordRegistryProto, WSchedulerServiceProto
def test_abstract():
pytest.raises(TypeError, WTaskSourceProto)
pytest.raises(NotImplementedError, WTaskSourceProto.has_records, None)
pytest.raises(NotImplementedError, WTaskSourceProto.next_start, None)
pytest.raises(NotImplementedError, WTaskSourceProto.tasks_planned, None)
pytest.raises(NotImplementedError, WTaskSourceProto.scheduler_service, None)
pytest.raises(TypeError, WRunningRecordRegistryProto)
schedule = WScheduleRecord(TestWScheduleTask.DummyTask())
pytest.raises(NotImplementedError, WRunningRecordRegistryProto.exec, None, schedule)
pytest.raises(NotImplementedError, WRunningRecordRegistryProto.running_records, None)
pytest.raises(TypeError, WSchedulerServiceProto)
pytest.raises(NotImplementedError, WSchedulerServiceProto.update, None)
class TestWScheduleTask:
class DummyTask(WScheduleTask):
def thread_started(self):
pass
def thread_stopped(self):
pass
def test(self):
task = TestWScheduleTask.DummyTask()
assert(isinstance(task, WScheduleTask) is True)
assert(isinstance(task, WThreadTask) is True)
assert(task.stop_event() is not None)
assert(task.ready_event() is not None)
assert(task.uid() is not None)
assert(TestWScheduleTask.DummyTask().uid() != TestWScheduleTask.DummyTask().uid())
class TestWScheduleRecord:
def test(self):
task = TestWScheduleTask.DummyTask()
pytest.raises(TypeError, WScheduleRecord, task, policy=1)
schedule = WScheduleRecord(task)
assert(schedule.task() == task)
assert(schedule.policy() == WScheduleRecord.PostponePolicy.wait)
assert(schedule.task_group_id() is None)
assert(schedule.task_uid() == task.uid())
drop_callback_result = []
def drop_callback():
drop_callback_result.append(1)
wait_callback_result = []
def wait_callback():
wait_callback_result.append(1)
schedule = WScheduleRecord(task, on_drop=drop_callback)
assert(drop_callback_result == [])
schedule.task_dropped()
assert(drop_callback_result == [1])
schedule = WScheduleRecord(task, on_wait=wait_callback)
assert(wait_callback_result == [])
schedule.task_postponed()
assert(wait_callback_result == [1])
| a1ezzz/wasp-general | tests/wasp_general_task_scheduler_proto_test.py | Python | lgpl-3.0 | 2,491 |
#
# Copyright 2013 Hewlett-Packard Development Company, L.P.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import boto.cloudformation as cfn
import fixtures
import json
import mock
import os
import tempfile
import testtools
import testtools.matchers as ttm
from heat_cfntools.cfntools import cfn_helper
def popen_root_calls(calls):
kwargs = {'env': None, 'cwd': None, 'stderr': -1, 'stdout': -1}
return [
mock.call(['su', 'root', '-c', call], **kwargs)
for call in calls
]
class FakePOpen():
def __init__(self, stdout='', stderr='', returncode=0):
self.returncode = returncode
self.stdout = stdout
self.stderr = stderr
def communicate(self):
return (self.stdout, self.stderr)
def wait(self):
pass
class TestCommandRunner(testtools.TestCase):
def test_command_runner(self):
def returns(*args, **kwargs):
if args[0][3] == '/bin/command1':
return FakePOpen('All good')
elif args[0][3] == '/bin/command2':
return FakePOpen('Doing something', 'error', -1)
else:
raise Exception('This should never happen')
with mock.patch('subprocess.Popen') as mock_popen:
mock_popen.side_effect = returns
cmd2 = cfn_helper.CommandRunner('/bin/command2')
cmd1 = cfn_helper.CommandRunner('/bin/command1', cmd2)
cmd1.run('root')
self.assertEqual(
'CommandRunner:\n\tcommand: /bin/command1\n\tstdout: All good',
str(cmd1))
self.assertEqual(
'CommandRunner:\n\tcommand: /bin/command2\n\tstatus: -1\n'
'\tstdout: Doing something\n\tstderr: error',
str(cmd2))
calls = popen_root_calls(['/bin/command1', '/bin/command2'])
mock_popen.assert_has_calls(calls)
class TestPackages(testtools.TestCase):
def test_yum_install(self):
def returns(*args, **kwargs):
if args[0][3].startswith('rpm -q '):
return FakePOpen(returncode=1)
else:
return FakePOpen(returncode=0)
calls = ['which yum']
for pack in ('httpd', 'wordpress', 'mysql-server'):
calls.append('rpm -q %s' % pack)
calls.append('yum -y --showduplicates list available %s' % pack)
calls = popen_root_calls(calls)
packages = {
"yum": {
"mysql-server": [],
"httpd": [],
"wordpress": []
}
}
with mock.patch('subprocess.Popen') as mock_popen:
mock_popen.side_effect = returns
cfn_helper.PackagesHandler(packages).apply_packages()
mock_popen.assert_has_calls(calls, any_order=True)
def test_dnf_install_yum_unavailable(self):
def returns(*args, **kwargs):
if args[0][3].startswith('rpm -q ') \
or args[0][3] == 'which yum':
return FakePOpen(returncode=1)
else:
return FakePOpen(returncode=0)
calls = ['which yum']
for pack in ('httpd', 'wordpress', 'mysql-server'):
calls.append('rpm -q %s' % pack)
calls.append('dnf -y --showduplicates list available %s' % pack)
calls = popen_root_calls(calls)
packages = {
"yum": {
"mysql-server": [],
"httpd": [],
"wordpress": []
}
}
with mock.patch('subprocess.Popen') as mock_popen:
mock_popen.side_effect = returns
cfn_helper.PackagesHandler(packages).apply_packages()
mock_popen.assert_has_calls(calls, any_order=True)
def test_dnf_install(self):
def returns(*args, **kwargs):
if args[0][3].startswith('rpm -q '):
return FakePOpen(returncode=1)
else:
return FakePOpen(returncode=0)
calls = []
for pack in ('httpd', 'wordpress', 'mysql-server'):
calls.append('rpm -q %s' % pack)
calls.append('dnf -y --showduplicates list available %s' % pack)
calls = popen_root_calls(calls)
packages = {
"dnf": {
"mysql-server": [],
"httpd": [],
"wordpress": []
}
}
with mock.patch('subprocess.Popen') as mock_popen:
mock_popen.side_effect = returns
cfn_helper.PackagesHandler(packages).apply_packages()
mock_popen.assert_has_calls(calls, any_order=True)
def test_zypper_install(self):
def returns(*args, **kwargs):
if args[0][3].startswith('rpm -q '):
return FakePOpen(returncode=1)
else:
return FakePOpen(returncode=0)
calls = []
for pack in ('httpd', 'wordpress', 'mysql-server'):
calls.append('rpm -q %s' % pack)
calls.append('zypper -n --no-refresh search %s' % pack)
calls = popen_root_calls(calls)
packages = {
"zypper": {
"mysql-server": [],
"httpd": [],
"wordpress": []
}
}
with mock.patch('subprocess.Popen') as mock_popen:
mock_popen.side_effect = returns
cfn_helper.PackagesHandler(packages).apply_packages()
mock_popen.assert_has_calls(calls, any_order=True)
def test_apt_install(self):
packages = {
"apt": {
"mysql-server": [],
"httpd": [],
"wordpress": []
}
}
with mock.patch('subprocess.Popen') as mock_popen:
mock_popen.return_value = FakePOpen(returncode=0)
cfn_helper.PackagesHandler(packages).apply_packages()
self.assertTrue(mock_popen.called)
class TestServicesHandler(testtools.TestCase):
def test_services_handler_systemd(self):
calls = []
returns = []
# apply_services
calls.append('/bin/systemctl enable httpd.service')
returns.append(FakePOpen())
calls.append('/bin/systemctl status httpd.service')
returns.append(FakePOpen(returncode=-1))
calls.append('/bin/systemctl start httpd.service')
returns.append(FakePOpen())
calls.append('/bin/systemctl enable mysqld.service')
returns.append(FakePOpen())
calls.append('/bin/systemctl status mysqld.service')
returns.append(FakePOpen(returncode=-1))
calls.append('/bin/systemctl start mysqld.service')
returns.append(FakePOpen())
# monitor_services not running
calls.append('/bin/systemctl status httpd.service')
returns.append(FakePOpen(returncode=-1))
calls.append('/bin/systemctl start httpd.service')
returns.append(FakePOpen())
calls.append('/bin/services_restarted')
returns.append(FakePOpen())
calls.append('/bin/systemctl status mysqld.service')
returns.append(FakePOpen(returncode=-1))
calls.append('/bin/systemctl start mysqld.service')
returns.append(FakePOpen())
calls.append('/bin/services_restarted')
returns.append(FakePOpen())
# monitor_services running
calls.append('/bin/systemctl status httpd.service')
returns.append(FakePOpen())
calls.append('/bin/systemctl status mysqld.service')
returns.append(FakePOpen())
calls = popen_root_calls(calls)
services = {
"systemd": {
"mysqld": {"enabled": "true", "ensureRunning": "true"},
"httpd": {"enabled": "true", "ensureRunning": "true"}
}
}
hooks = [
cfn_helper.Hook(
'hook1',
'service.restarted',
'Resources.resource1.Metadata',
'root',
'/bin/services_restarted')
]
with mock.patch('os.path.exists') as mock_exists:
mock_exists.return_value = True
with mock.patch('subprocess.Popen') as mock_popen:
mock_popen.side_effect = returns
sh = cfn_helper.ServicesHandler(services, 'resource1', hooks)
sh.apply_services()
# services not running
sh.monitor_services()
# services running
sh.monitor_services()
mock_popen.assert_has_calls(calls, any_order=True)
mock_exists.assert_called_with('/bin/systemctl')
def test_services_handler_systemd_disabled(self):
calls = []
# apply_services
calls.append('/bin/systemctl disable httpd.service')
calls.append('/bin/systemctl status httpd.service')
calls.append('/bin/systemctl stop httpd.service')
calls.append('/bin/systemctl disable mysqld.service')
calls.append('/bin/systemctl status mysqld.service')
calls.append('/bin/systemctl stop mysqld.service')
calls = popen_root_calls(calls)
services = {
"systemd": {
"mysqld": {"enabled": "false", "ensureRunning": "false"},
"httpd": {"enabled": "false", "ensureRunning": "false"}
}
}
hooks = [
cfn_helper.Hook(
'hook1',
'service.restarted',
'Resources.resource1.Metadata',
'root',
'/bin/services_restarted')
]
with mock.patch('os.path.exists') as mock_exists:
mock_exists.return_value = True
with mock.patch('subprocess.Popen') as mock_popen:
mock_popen.return_value = FakePOpen()
sh = cfn_helper.ServicesHandler(services, 'resource1', hooks)
sh.apply_services()
mock_popen.assert_has_calls(calls, any_order=True)
mock_exists.assert_called_with('/bin/systemctl')
def test_services_handler_sysv_service_chkconfig(self):
def exists(*args, **kwargs):
return args[0] != '/bin/systemctl'
calls = []
returns = []
# apply_services
calls.append('/sbin/chkconfig httpd on')
returns.append(FakePOpen())
calls.append('/sbin/service httpd status')
returns.append(FakePOpen(returncode=-1))
calls.append('/sbin/service httpd start')
returns.append(FakePOpen())
# monitor_services not running
calls.append('/sbin/service httpd status')
returns.append(FakePOpen(returncode=-1))
calls.append('/sbin/service httpd start')
returns.append(FakePOpen())
calls.append('/bin/services_restarted')
returns.append(FakePOpen())
# monitor_services running
calls.append('/sbin/service httpd status')
returns.append(FakePOpen())
calls = popen_root_calls(calls)
services = {
"sysvinit": {
"httpd": {"enabled": "true", "ensureRunning": "true"}
}
}
hooks = [
cfn_helper.Hook(
'hook1',
'service.restarted',
'Resources.resource1.Metadata',
'root',
'/bin/services_restarted')
]
with mock.patch('os.path.exists') as mock_exists:
mock_exists.side_effect = exists
with mock.patch('subprocess.Popen') as mock_popen:
mock_popen.side_effect = returns
sh = cfn_helper.ServicesHandler(services, 'resource1', hooks)
sh.apply_services()
# services not running
sh.monitor_services()
# services running
sh.monitor_services()
mock_popen.assert_has_calls(calls)
mock_exists.assert_any_call('/bin/systemctl')
mock_exists.assert_any_call('/sbin/service')
mock_exists.assert_any_call('/sbin/chkconfig')
def test_services_handler_sysv_disabled_service_chkconfig(self):
def exists(*args, **kwargs):
return args[0] != '/bin/systemctl'
calls = []
# apply_services
calls.append('/sbin/chkconfig httpd off')
calls.append('/sbin/service httpd status')
calls.append('/sbin/service httpd stop')
calls = popen_root_calls(calls)
services = {
"sysvinit": {
"httpd": {"enabled": "false", "ensureRunning": "false"}
}
}
hooks = [
cfn_helper.Hook(
'hook1',
'service.restarted',
'Resources.resource1.Metadata',
'root',
'/bin/services_restarted')
]
with mock.patch('os.path.exists') as mock_exists:
mock_exists.side_effect = exists
with mock.patch('subprocess.Popen') as mock_popen:
mock_popen.return_value = FakePOpen()
sh = cfn_helper.ServicesHandler(services, 'resource1', hooks)
sh.apply_services()
mock_popen.assert_has_calls(calls)
mock_exists.assert_any_call('/bin/systemctl')
mock_exists.assert_any_call('/sbin/service')
mock_exists.assert_any_call('/sbin/chkconfig')
def test_services_handler_sysv_systemctl(self):
calls = []
returns = []
# apply_services
calls.append('/bin/systemctl enable httpd.service')
returns.append(FakePOpen())
calls.append('/bin/systemctl status httpd.service')
returns.append(FakePOpen(returncode=-1))
calls.append('/bin/systemctl start httpd.service')
returns.append(FakePOpen())
# monitor_services not running
calls.append('/bin/systemctl status httpd.service')
returns.append(FakePOpen(returncode=-1))
calls.append('/bin/systemctl start httpd.service')
returns.append(FakePOpen())
calls.append('/bin/services_restarted')
returns.append(FakePOpen())
# monitor_services running
calls.append('/bin/systemctl status httpd.service')
returns.append(FakePOpen())
calls = popen_root_calls(calls)
services = {
"sysvinit": {
"httpd": {"enabled": "true", "ensureRunning": "true"}
}
}
hooks = [
cfn_helper.Hook(
'hook1',
'service.restarted',
'Resources.resource1.Metadata',
'root',
'/bin/services_restarted')
]
with mock.patch('os.path.exists') as mock_exists:
mock_exists.return_value = True
with mock.patch('subprocess.Popen') as mock_popen:
mock_popen.side_effect = returns
sh = cfn_helper.ServicesHandler(services, 'resource1', hooks)
sh.apply_services()
# services not running
sh.monitor_services()
# services running
sh.monitor_services()
mock_popen.assert_has_calls(calls)
mock_exists.assert_called_with('/bin/systemctl')
def test_services_handler_sysv_disabled_systemctl(self):
calls = []
# apply_services
calls.append('/bin/systemctl disable httpd.service')
calls.append('/bin/systemctl status httpd.service')
calls.append('/bin/systemctl stop httpd.service')
calls = popen_root_calls(calls)
services = {
"sysvinit": {
"httpd": {"enabled": "false", "ensureRunning": "false"}
}
}
hooks = [
cfn_helper.Hook(
'hook1',
'service.restarted',
'Resources.resource1.Metadata',
'root',
'/bin/services_restarted')
]
with mock.patch('os.path.exists') as mock_exists:
mock_exists.return_value = True
with mock.patch('subprocess.Popen') as mock_popen:
mock_popen.return_value = FakePOpen()
sh = cfn_helper.ServicesHandler(services, 'resource1', hooks)
sh.apply_services()
mock_popen.assert_has_calls(calls)
mock_exists.assert_called_with('/bin/systemctl')
def test_services_handler_sysv_service_updaterc(self):
calls = []
returns = []
# apply_services
calls.append('/usr/sbin/update-rc.d httpd enable')
returns.append(FakePOpen())
calls.append('/usr/sbin/service httpd status')
returns.append(FakePOpen(returncode=-1))
calls.append('/usr/sbin/service httpd start')
returns.append(FakePOpen())
# monitor_services not running
calls.append('/usr/sbin/service httpd status')
returns.append(FakePOpen(returncode=-1))
calls.append('/usr/sbin/service httpd start')
returns.append(FakePOpen())
calls.append('/bin/services_restarted')
returns.append(FakePOpen())
# monitor_services running
calls.append('/usr/sbin/service httpd status')
returns.append(FakePOpen())
calls = popen_root_calls(calls)
services = {
"sysvinit": {
"httpd": {"enabled": "true", "ensureRunning": "true"}
}
}
hooks = [
cfn_helper.Hook(
'hook1',
'service.restarted',
'Resources.resource1.Metadata',
'root',
'/bin/services_restarted')
]
with mock.patch('os.path.exists') as mock_exists:
mock_exists.return_value = False
with mock.patch('subprocess.Popen') as mock_popen:
mock_popen.side_effect = returns
sh = cfn_helper.ServicesHandler(services, 'resource1', hooks)
sh.apply_services()
# services not running
sh.monitor_services()
# services running
sh.monitor_services()
mock_popen.assert_has_calls(calls)
mock_exists.assert_any_call('/bin/systemctl')
mock_exists.assert_any_call('/sbin/service')
mock_exists.assert_any_call('/sbin/chkconfig')
def test_services_handler_sysv_disabled_service_updaterc(self):
calls = []
returns = []
# apply_services
calls.append('/usr/sbin/update-rc.d httpd disable')
returns.append(FakePOpen())
calls.append('/usr/sbin/service httpd status')
returns.append(FakePOpen())
calls.append('/usr/sbin/service httpd stop')
returns.append(FakePOpen())
calls = popen_root_calls(calls)
services = {
"sysvinit": {
"httpd": {"enabled": "false", "ensureRunning": "false"}
}
}
hooks = [
cfn_helper.Hook(
'hook1',
'service.restarted',
'Resources.resource1.Metadata',
'root',
'/bin/services_restarted')
]
with mock.patch('os.path.exists') as mock_exists:
mock_exists.return_value = False
with mock.patch('subprocess.Popen') as mock_popen:
mock_popen.side_effect = returns
sh = cfn_helper.ServicesHandler(services, 'resource1', hooks)
sh.apply_services()
mock_popen.assert_has_calls(calls)
mock_exists.assert_any_call('/bin/systemctl')
mock_exists.assert_any_call('/sbin/service')
mock_exists.assert_any_call('/sbin/chkconfig')
class TestHupConfig(testtools.TestCase):
def test_load_main_section(self):
fcreds = tempfile.NamedTemporaryFile()
fcreds.write('AWSAccessKeyId=foo\nAWSSecretKey=bar\n'.encode('UTF-8'))
fcreds.flush()
main_conf = tempfile.NamedTemporaryFile()
main_conf.write(('''[main]
stack=teststack
credential-file=%s''' % fcreds.name).encode('UTF-8'))
main_conf.flush()
mainconfig = cfn_helper.HupConfig([open(main_conf.name)])
self.assertEqual(
'{stack: teststack, credential_file: %s, '
'region: nova, interval:10}' % fcreds.name,
str(mainconfig))
main_conf.close()
main_conf = tempfile.NamedTemporaryFile()
main_conf.write(('''[main]
stack=teststack
region=region1
credential-file=%s-invalid
interval=120''' % fcreds.name).encode('UTF-8'))
main_conf.flush()
e = self.assertRaises(Exception, cfn_helper.HupConfig,
[open(main_conf.name)])
self.assertIn('invalid credentials file', str(e))
fcreds.close()
def test_hup_config(self):
hooks_conf = tempfile.NamedTemporaryFile()
def write_hook_conf(f, name, triggers, path, action):
f.write((
'[%s]\ntriggers=%s\npath=%s\naction=%s\nrunas=root\n\n' % (
name, triggers, path, action)).encode('UTF-8'))
write_hook_conf(
hooks_conf,
'hook2',
'service2.restarted',
'Resources.resource2.Metadata',
'/bin/hook2')
write_hook_conf(
hooks_conf,
'hook1',
'service1.restarted',
'Resources.resource1.Metadata',
'/bin/hook1')
write_hook_conf(
hooks_conf,
'hook3',
'service3.restarted',
'Resources.resource3.Metadata',
'/bin/hook3')
write_hook_conf(
hooks_conf,
'cfn-http-restarted',
'service.restarted',
'Resources.resource.Metadata',
'/bin/cfn-http-restarted')
hooks_conf.flush()
fcreds = tempfile.NamedTemporaryFile()
fcreds.write('AWSAccessKeyId=foo\nAWSSecretKey=bar\n'.encode('UTF-8'))
fcreds.flush()
main_conf = tempfile.NamedTemporaryFile()
main_conf.write(('''[main]
stack=teststack
credential-file=%s
region=region1
interval=120''' % fcreds.name).encode('UTF-8'))
main_conf.flush()
mainconfig = cfn_helper.HupConfig([
open(main_conf.name),
open(hooks_conf.name)])
unique_resources = mainconfig.unique_resources_get()
self.assertThat([
'resource',
'resource1',
'resource2',
'resource3',
], ttm.Equals(sorted(unique_resources)))
hooks = sorted(mainconfig.hooks,
key=lambda hook: hook.resource_name_get())
self.assertEqual(len(hooks), 4)
self.assertEqual(
'{cfn-http-restarted, service.restarted,'
' Resources.resource.Metadata, root, /bin/cfn-http-restarted}',
str(hooks[0]))
self.assertEqual(
'{hook1, service1.restarted, Resources.resource1.Metadata,'
' root, /bin/hook1}', str(hooks[1]))
self.assertEqual(
'{hook2, service2.restarted, Resources.resource2.Metadata,'
' root, /bin/hook2}', str(hooks[2]))
self.assertEqual(
'{hook3, service3.restarted, Resources.resource3.Metadata,'
' root, /bin/hook3}', str(hooks[3]))
calls = []
calls.append('/bin/cfn-http-restarted')
calls.append('/bin/hook1')
calls.append('/bin/hook2')
calls.append('/bin/hook3')
calls = popen_root_calls(calls)
with mock.patch('subprocess.Popen') as mock_popen:
mock_popen.return_value = FakePOpen('All good')
for hook in hooks:
hook.event(hook.triggers, None, hook.resource_name_get())
hooks_conf.close()
fcreds.close()
main_conf.close()
mock_popen.assert_has_calls(calls)
class TestCfnHelper(testtools.TestCase):
def _check_metadata_content(self, content, value):
with tempfile.NamedTemporaryFile() as metadata_info:
metadata_info.write(content.encode('UTF-8'))
metadata_info.flush()
port = cfn_helper.metadata_server_port(metadata_info.name)
self.assertEqual(value, port)
def test_metadata_server_port(self):
self._check_metadata_content("http://172.20.42.42:8000\n", 8000)
def test_metadata_server_port_https(self):
self._check_metadata_content("https://abc.foo.bar:6969\n", 6969)
def test_metadata_server_port_noport(self):
self._check_metadata_content("http://172.20.42.42\n", None)
def test_metadata_server_port_justip(self):
self._check_metadata_content("172.20.42.42", None)
def test_metadata_server_port_weird(self):
self._check_metadata_content("::::", None)
self._check_metadata_content("beforecolons:aftercolons", None)
def test_metadata_server_port_emptyfile(self):
self._check_metadata_content("\n", None)
self._check_metadata_content("", None)
def test_metadata_server_nofile(self):
random_filename = self.getUniqueString()
self.assertEqual(None,
cfn_helper.metadata_server_port(random_filename))
def test_to_boolean(self):
self.assertTrue(cfn_helper.to_boolean(True))
self.assertTrue(cfn_helper.to_boolean('true'))
self.assertTrue(cfn_helper.to_boolean('yes'))
self.assertTrue(cfn_helper.to_boolean('1'))
self.assertTrue(cfn_helper.to_boolean(1))
self.assertFalse(cfn_helper.to_boolean(False))
self.assertFalse(cfn_helper.to_boolean('false'))
self.assertFalse(cfn_helper.to_boolean('no'))
self.assertFalse(cfn_helper.to_boolean('0'))
self.assertFalse(cfn_helper.to_boolean(0))
self.assertFalse(cfn_helper.to_boolean(None))
self.assertFalse(cfn_helper.to_boolean('fingle'))
def test_parse_creds_file(self):
def parse_creds_test(file_contents, creds_match):
with tempfile.NamedTemporaryFile(mode='w') as fcreds:
fcreds.write(file_contents)
fcreds.flush()
creds = cfn_helper.parse_creds_file(fcreds.name)
self.assertThat(creds_match, ttm.Equals(creds))
parse_creds_test(
'AWSAccessKeyId=foo\nAWSSecretKey=bar\n',
{'AWSAccessKeyId': 'foo', 'AWSSecretKey': 'bar'}
)
parse_creds_test(
'AWSAccessKeyId =foo\nAWSSecretKey= bar\n',
{'AWSAccessKeyId': 'foo', 'AWSSecretKey': 'bar'}
)
parse_creds_test(
'AWSAccessKeyId = foo\nAWSSecretKey = bar\n',
{'AWSAccessKeyId': 'foo', 'AWSSecretKey': 'bar'}
)
class TestMetadataRetrieve(testtools.TestCase):
def setUp(self):
super(TestMetadataRetrieve, self).setUp()
self.tdir = self.useFixture(fixtures.TempDir())
self.last_file = os.path.join(self.tdir.path, 'last_metadata')
def test_metadata_retrieve_files(self):
md_data = {"AWS::CloudFormation::Init": {"config": {"files": {
"/tmp/foo": {"content": "bar"}}}}}
md_str = json.dumps(md_data)
md = cfn_helper.Metadata('teststack', None)
with tempfile.NamedTemporaryFile(mode='w+') as default_file:
default_file.write(md_str)
default_file.flush()
self.assertThat(default_file.name, ttm.FileContains(md_str))
self.assertTrue(
md.retrieve(default_path=default_file.name,
last_path=self.last_file))
self.assertThat(self.last_file, ttm.FileContains(md_str))
self.assertThat(md_data, ttm.Equals(md._metadata))
md = cfn_helper.Metadata('teststack', None)
self.assertTrue(md.retrieve(default_path=default_file.name,
last_path=self.last_file))
self.assertThat(md_data, ttm.Equals(md._metadata))
def test_metadata_retrieve_none(self):
md = cfn_helper.Metadata('teststack', None)
default_file = os.path.join(self.tdir.path, 'default_file')
self.assertFalse(md.retrieve(default_path=default_file,
last_path=self.last_file))
self.assertIsNone(md._metadata)
displayed = self.useFixture(fixtures.StringStream('stdout'))
fake_stdout = displayed.stream
self.useFixture(fixtures.MonkeyPatch('sys.stdout', fake_stdout))
md.display()
fake_stdout.flush()
self.assertEqual(displayed.getDetails()['stdout'].as_text(), "")
def test_metadata_retrieve_passed(self):
md_data = {"AWS::CloudFormation::Init": {"config": {"files": {
"/tmp/foo": {"content": "bar"}}}}}
md_str = json.dumps(md_data)
md = cfn_helper.Metadata('teststack', None)
self.assertTrue(md.retrieve(meta_str=md_data,
last_path=self.last_file))
self.assertThat(md_data, ttm.Equals(md._metadata))
self.assertEqual(md_str, str(md))
displayed = self.useFixture(fixtures.StringStream('stdout'))
fake_stdout = displayed.stream
self.useFixture(fixtures.MonkeyPatch('sys.stdout', fake_stdout))
md.display()
fake_stdout.flush()
self.assertEqual(displayed.getDetails()['stdout'].as_text(),
"{\"AWS::CloudFormation::Init\": {\"config\": {"
"\"files\": {\"/tmp/foo\": {\"content\": \"bar\"}"
"}}}}\n")
def test_metadata_retrieve_by_key_passed(self):
md_data = {"foo": {"bar": {"fred.1": "abcd"}}}
md_str = json.dumps(md_data)
md = cfn_helper.Metadata('teststack', None)
self.assertTrue(md.retrieve(meta_str=md_data,
last_path=self.last_file))
self.assertThat(md_data, ttm.Equals(md._metadata))
self.assertEqual(md_str, str(md))
displayed = self.useFixture(fixtures.StringStream('stdout'))
fake_stdout = displayed.stream
self.useFixture(fixtures.MonkeyPatch('sys.stdout', fake_stdout))
md.display("foo")
fake_stdout.flush()
self.assertEqual(displayed.getDetails()['stdout'].as_text(),
"{\"bar\": {\"fred.1\": \"abcd\"}}\n")
def test_metadata_retrieve_by_nested_key_passed(self):
md_data = {"foo": {"bar": {"fred.1": "abcd"}}}
md_str = json.dumps(md_data)
md = cfn_helper.Metadata('teststack', None)
self.assertTrue(md.retrieve(meta_str=md_data,
last_path=self.last_file))
self.assertThat(md_data, ttm.Equals(md._metadata))
self.assertEqual(md_str, str(md))
displayed = self.useFixture(fixtures.StringStream('stdout'))
fake_stdout = displayed.stream
self.useFixture(fixtures.MonkeyPatch('sys.stdout', fake_stdout))
md.display("foo.bar.'fred.1'")
fake_stdout.flush()
self.assertEqual(displayed.getDetails()['stdout'].as_text(),
'"abcd"\n')
def test_metadata_retrieve_key_none(self):
md_data = {"AWS::CloudFormation::Init": {"config": {"files": {
"/tmp/foo": {"content": "bar"}}}}}
md_str = json.dumps(md_data)
md = cfn_helper.Metadata('teststack', None)
self.assertTrue(md.retrieve(meta_str=md_data,
last_path=self.last_file))
self.assertThat(md_data, ttm.Equals(md._metadata))
self.assertEqual(md_str, str(md))
displayed = self.useFixture(fixtures.StringStream('stdout'))
fake_stdout = displayed.stream
self.useFixture(fixtures.MonkeyPatch('sys.stdout', fake_stdout))
md.display("no_key")
fake_stdout.flush()
self.assertEqual(displayed.getDetails()['stdout'].as_text(), "")
def test_metadata_retrieve_by_nested_key_none(self):
md_data = {"foo": {"bar": {"fred.1": "abcd"}}}
md_str = json.dumps(md_data)
md = cfn_helper.Metadata('teststack', None)
self.assertTrue(md.retrieve(meta_str=md_data,
last_path=self.last_file))
self.assertThat(md_data, ttm.Equals(md._metadata))
self.assertEqual(md_str, str(md))
displayed = self.useFixture(fixtures.StringStream('stdout'))
fake_stdout = displayed.stream
self.useFixture(fixtures.MonkeyPatch('sys.stdout', fake_stdout))
md.display("foo.fred")
fake_stdout.flush()
self.assertEqual(displayed.getDetails()['stdout'].as_text(), "")
def test_metadata_retrieve_by_nested_key_none_with_matching_string(self):
md_data = {"foo": "bar"}
md_str = json.dumps(md_data)
md = cfn_helper.Metadata('teststack', None)
self.assertTrue(md.retrieve(meta_str=md_data,
last_path=self.last_file))
self.assertThat(md_data, ttm.Equals(md._metadata))
self.assertEqual(md_str, str(md))
displayed = self.useFixture(fixtures.StringStream('stdout'))
fake_stdout = displayed.stream
self.useFixture(fixtures.MonkeyPatch('sys.stdout', fake_stdout))
md.display("foo.bar")
fake_stdout.flush()
self.assertEqual(displayed.getDetails()['stdout'].as_text(), "")
def test_metadata_creates_cache(self):
temp_home = tempfile.mkdtemp()
def cleanup_temp_home(thome):
os.unlink(os.path.join(thome, 'cache', 'last_metadata'))
os.rmdir(os.path.join(thome, 'cache'))
os.rmdir(os.path.join(thome))
self.addCleanup(cleanup_temp_home, temp_home)
last_path = os.path.join(temp_home, 'cache', 'last_metadata')
md_data = {"AWS::CloudFormation::Init": {"config": {"files": {
"/tmp/foo": {"content": "bar"}}}}}
md_str = json.dumps(md_data)
md = cfn_helper.Metadata('teststack', None)
self.assertFalse(os.path.exists(last_path),
"last_metadata file already exists")
self.assertTrue(md.retrieve(meta_str=md_str, last_path=last_path))
self.assertTrue(os.path.exists(last_path),
"last_metadata file should exist")
# Ensure created dirs and file have right perms
self.assertTrue(os.stat(last_path).st_mode & 0o600 == 0o600)
self.assertTrue(
os.stat(os.path.dirname(last_path)).st_mode & 0o700 == 0o700)
def test_is_valid_metadata(self):
md_data = {"AWS::CloudFormation::Init": {"config": {"files": {
"/tmp/foo": {"content": "bar"}}}}}
md = cfn_helper.Metadata('teststack', None)
self.assertTrue(
md.retrieve(meta_str=md_data, last_path=self.last_file))
self.assertThat(md_data, ttm.Equals(md._metadata))
self.assertTrue(md._is_valid_metadata())
self.assertThat(
md_data['AWS::CloudFormation::Init'], ttm.Equals(md._metadata))
def test_remote_metadata(self):
md_data = {"AWS::CloudFormation::Init": {"config": {"files": {
"/tmp/foo": {"content": "bar"}}}}}
with mock.patch.object(
cfn.CloudFormationConnection, 'describe_stack_resource'
) as mock_dsr:
mock_dsr.return_value = {
'DescribeStackResourceResponse': {
'DescribeStackResourceResult': {
'StackResourceDetail': {'Metadata': md_data}}}}
md = cfn_helper.Metadata(
'teststack',
None,
access_key='foo',
secret_key='bar')
self.assertTrue(md.retrieve(last_path=self.last_file))
self.assertThat(md_data, ttm.Equals(md._metadata))
with tempfile.NamedTemporaryFile(mode='w') as fcreds:
fcreds.write('AWSAccessKeyId=foo\nAWSSecretKey=bar\n')
fcreds.flush()
md = cfn_helper.Metadata(
'teststack', None, credentials_file=fcreds.name)
self.assertTrue(md.retrieve(last_path=self.last_file))
self.assertThat(md_data, ttm.Equals(md._metadata))
def test_nova_meta_with_cache(self):
meta_in = {"uuid": "f9431d18-d971-434d-9044-5b38f5b4646f",
"availability_zone": "nova",
"hostname": "as-wikidatabase-4ykioj3lgi57.novalocal",
"launch_index": 0,
"meta": {},
"public_keys": {"heat_key": "ssh-rsa etc...\n"},
"name": "as-WikiDatabase-4ykioj3lgi57"}
md_str = json.dumps(meta_in)
md = cfn_helper.Metadata('teststack', None)
with tempfile.NamedTemporaryFile(mode='w+') as default_file:
default_file.write(md_str)
default_file.flush()
self.assertThat(default_file.name, ttm.FileContains(md_str))
meta_out = md.get_nova_meta(cache_path=default_file.name)
self.assertEqual(meta_in, meta_out)
def test_nova_meta_curl(self):
url = 'http://169.254.169.254/openstack/2012-08-10/meta_data.json'
temp_home = tempfile.mkdtemp()
cache_path = os.path.join(temp_home, 'meta_data.json')
def cleanup_temp_home(thome):
os.unlink(cache_path)
os.rmdir(thome)
self.addCleanup(cleanup_temp_home, temp_home)
meta_in = {"uuid": "f9431d18-d971-434d-9044-5b38f5b4646f",
"availability_zone": "nova",
"hostname": "as-wikidatabase-4ykioj3lgi57.novalocal",
"launch_index": 0,
"meta": {"freddy": "is hungry"},
"public_keys": {"heat_key": "ssh-rsa etc...\n"},
"name": "as-WikiDatabase-4ykioj3lgi57"}
md_str = json.dumps(meta_in)
def write_cache_file(*params, **kwargs):
with open(cache_path, 'w+') as cache_file:
cache_file.write(md_str)
cache_file.flush()
self.assertThat(cache_file.name, ttm.FileContains(md_str))
return FakePOpen('Downloaded', '', 0)
with mock.patch('subprocess.Popen') as mock_popen:
mock_popen.side_effect = write_cache_file
md = cfn_helper.Metadata('teststack', None)
meta_out = md.get_nova_meta(cache_path=cache_path)
self.assertEqual(meta_in, meta_out)
mock_popen.assert_has_calls(
popen_root_calls(['curl -o %s %s' % (cache_path, url)]))
def test_nova_meta_curl_corrupt(self):
url = 'http://169.254.169.254/openstack/2012-08-10/meta_data.json'
temp_home = tempfile.mkdtemp()
cache_path = os.path.join(temp_home, 'meta_data.json')
def cleanup_temp_home(thome):
os.unlink(cache_path)
os.rmdir(thome)
self.addCleanup(cleanup_temp_home, temp_home)
md_str = "this { is not really json"
def write_cache_file(*params, **kwargs):
with open(cache_path, 'w+') as cache_file:
cache_file.write(md_str)
cache_file.flush()
self.assertThat(cache_file.name, ttm.FileContains(md_str))
return FakePOpen('Downloaded', '', 0)
with mock.patch('subprocess.Popen') as mock_popen:
mock_popen.side_effect = write_cache_file
md = cfn_helper.Metadata('teststack', None)
meta_out = md.get_nova_meta(cache_path=cache_path)
self.assertEqual(None, meta_out)
mock_popen.assert_has_calls(
popen_root_calls(['curl -o %s %s' % (cache_path, url)]))
def test_nova_meta_curl_failed(self):
url = 'http://169.254.169.254/openstack/2012-08-10/meta_data.json'
temp_home = tempfile.mkdtemp()
cache_path = os.path.join(temp_home, 'meta_data.json')
def cleanup_temp_home(thome):
os.rmdir(thome)
self.addCleanup(cleanup_temp_home, temp_home)
with mock.patch('subprocess.Popen') as mock_popen:
mock_popen.return_value = FakePOpen('Failed', '', 1)
md = cfn_helper.Metadata('teststack', None)
meta_out = md.get_nova_meta(cache_path=cache_path)
self.assertEqual(None, meta_out)
mock_popen.assert_has_calls(
popen_root_calls(['curl -o %s %s' % (cache_path, url)]))
def test_get_tags(self):
fake_tags = {'foo': 'fee',
'apple': 'red'}
md_data = {"uuid": "f9431d18-d971-434d-9044-5b38f5b4646f",
"availability_zone": "nova",
"hostname": "as-wikidatabase-4ykioj3lgi57.novalocal",
"launch_index": 0,
"meta": fake_tags,
"public_keys": {"heat_key": "ssh-rsa etc...\n"},
"name": "as-WikiDatabase-4ykioj3lgi57"}
tags_expect = fake_tags
tags_expect['InstanceId'] = md_data['uuid']
md = cfn_helper.Metadata('teststack', None)
with mock.patch.object(md, 'get_nova_meta') as mock_method:
mock_method.return_value = md_data
tags = md.get_tags()
mock_method.assert_called_once_with()
self.assertEqual(tags_expect, tags)
def test_get_instance_id(self):
uuid = "f9431d18-d971-434d-9044-5b38f5b4646f"
md_data = {"uuid": uuid,
"availability_zone": "nova",
"hostname": "as-wikidatabase-4ykioj3lgi57.novalocal",
"launch_index": 0,
"public_keys": {"heat_key": "ssh-rsa etc...\n"},
"name": "as-WikiDatabase-4ykioj3lgi57"}
md = cfn_helper.Metadata('teststack', None)
with mock.patch.object(md, 'get_nova_meta') as mock_method:
mock_method.return_value = md_data
self.assertEqual(md.get_instance_id(), uuid)
mock_method.assert_called_once_with()
class TestCfnInit(testtools.TestCase):
def setUp(self):
super(TestCfnInit, self).setUp()
self.tdir = self.useFixture(fixtures.TempDir())
self.last_file = os.path.join(self.tdir.path, 'last_metadata')
def test_cfn_init(self):
with tempfile.NamedTemporaryFile(mode='w+') as foo_file:
md_data = {"AWS::CloudFormation::Init": {"config": {"files": {
foo_file.name: {"content": "bar"}}}}}
md = cfn_helper.Metadata('teststack', None)
self.assertTrue(
md.retrieve(meta_str=md_data, last_path=self.last_file))
md.cfn_init()
self.assertThat(foo_file.name, ttm.FileContains('bar'))
def test_cfn_init_with_ignore_errors_false(self):
md_data = {"AWS::CloudFormation::Init": {"config": {"commands": {
"00_foo": {"command": "/bin/command1",
"ignoreErrors": "false"}}}}}
with mock.patch('subprocess.Popen') as mock_popen:
mock_popen.return_value = FakePOpen('Doing something', 'error', -1)
md = cfn_helper.Metadata('teststack', None)
self.assertTrue(
md.retrieve(meta_str=md_data, last_path=self.last_file))
self.assertRaises(cfn_helper.CommandsHandlerRunError, md.cfn_init)
mock_popen.assert_has_calls(popen_root_calls(['/bin/command1']))
def test_cfn_init_with_ignore_errors_true(self):
calls = []
returns = []
calls.append('/bin/command1')
returns.append(FakePOpen('Doing something', 'error', -1))
calls.append('/bin/command2')
returns.append(FakePOpen('All good'))
calls = popen_root_calls(calls)
md_data = {"AWS::CloudFormation::Init": {"config": {"commands": {
"00_foo": {"command": "/bin/command1",
"ignoreErrors": "true"},
"01_bar": {"command": "/bin/command2",
"ignoreErrors": "false"}
}}}}
with mock.patch('subprocess.Popen') as mock_popen:
mock_popen.side_effect = returns
md = cfn_helper.Metadata('teststack', None)
self.assertTrue(
md.retrieve(meta_str=md_data, last_path=self.last_file))
md.cfn_init()
mock_popen.assert_has_calls(calls)
class TestSourcesHandler(testtools.TestCase):
def test_apply_sources_empty(self):
sh = cfn_helper.SourcesHandler({})
sh.apply_sources()
def _test_apply_sources(self, url, end_file):
dest = tempfile.mkdtemp()
self.addCleanup(os.rmdir, dest)
sources = {dest: url}
td = os.path.dirname(end_file)
er = "mkdir -p '%s'; cd '%s'; curl -s '%s' | gunzip | tar -xvf -"
calls = popen_root_calls([er % (dest, dest, url)])
with mock.patch.object(tempfile, 'mkdtemp') as mock_mkdtemp:
mock_mkdtemp.return_value = td
with mock.patch('subprocess.Popen') as mock_popen:
mock_popen.return_value = FakePOpen('Curl good')
sh = cfn_helper.SourcesHandler(sources)
sh.apply_sources()
mock_popen.assert_has_calls(calls)
mock_mkdtemp.assert_called_with()
def test_apply_sources_github(self):
url = "https://github.com/NoSuchProject/tarball/NoSuchTarball"
dest = tempfile.mkdtemp()
self.addCleanup(os.rmdir, dest)
sources = {dest: url}
er = "mkdir -p '%s'; cd '%s'; curl -s '%s' | gunzip | tar -xvf -"
calls = popen_root_calls([er % (dest, dest, url)])
with mock.patch('subprocess.Popen') as mock_popen:
mock_popen.return_value = FakePOpen('Curl good')
sh = cfn_helper.SourcesHandler(sources)
sh.apply_sources()
mock_popen.assert_has_calls(calls)
def test_apply_sources_general(self):
url = "https://website.no.existe/a/b/c/file.tar.gz"
dest = tempfile.mkdtemp()
self.addCleanup(os.rmdir, dest)
sources = {dest: url}
er = "mkdir -p '%s'; cd '%s'; curl -s '%s' | gunzip | tar -xvf -"
calls = popen_root_calls([er % (dest, dest, url)])
with mock.patch('subprocess.Popen') as mock_popen:
mock_popen.return_value = FakePOpen('Curl good')
sh = cfn_helper.SourcesHandler(sources)
sh.apply_sources()
mock_popen.assert_has_calls(calls)
def test_apply_source_cmd(self):
sh = cfn_helper.SourcesHandler({})
er = "mkdir -p '%s'; cd '%s'; curl -s '%s' | %s | tar -xvf -"
dest = '/tmp'
# test tgz
url = 'http://www.example.com/a.tgz'
cmd = sh._apply_source_cmd(dest, url)
self.assertEqual(er % (dest, dest, url, "gunzip"), cmd)
# test tar.gz
url = 'http://www.example.com/a.tar.gz'
cmd = sh._apply_source_cmd(dest, url)
self.assertEqual(er % (dest, dest, url, "gunzip"), cmd)
# test github - tarball 1
url = 'https://github.com/openstack/heat-cfntools/tarball/master'
cmd = sh._apply_source_cmd(dest, url)
self.assertEqual(er % (dest, dest, url, "gunzip"), cmd)
# test github - tarball 2
url = 'https://github.com/openstack/heat-cfntools/tarball/master/'
cmd = sh._apply_source_cmd(dest, url)
self.assertEqual(er % (dest, dest, url, "gunzip"), cmd)
# test tbz2
url = 'http://www.example.com/a.tbz2'
cmd = sh._apply_source_cmd(dest, url)
self.assertEqual(er % (dest, dest, url, "bunzip2"), cmd)
# test tar.bz2
url = 'http://www.example.com/a.tar.bz2'
cmd = sh._apply_source_cmd(dest, url)
self.assertEqual(er % (dest, dest, url, "bunzip2"), cmd)
# test zip
er = "mkdir -p '%s'; cd '%s'; curl -s -o '%s' '%s' && unzip -o '%s'"
url = 'http://www.example.com/a.zip'
d = "/tmp/tmp2I0yNK"
tmp = "%s/a.zip" % d
with mock.patch.object(tempfile, 'mkdtemp') as mock_mkdtemp:
mock_mkdtemp.return_value = d
cmd = sh._apply_source_cmd(dest, url)
self.assertEqual(er % (dest, dest, tmp, url, tmp), cmd)
# test gz
er = "mkdir -p '%s'; cd '%s'; curl -s '%s' | %s > '%s'"
url = 'http://www.example.com/a.sh.gz'
cmd = sh._apply_source_cmd(dest, url)
self.assertEqual(er % (dest, dest, url, "gunzip", "a.sh"), cmd)
# test bz2
url = 'http://www.example.com/a.sh.bz2'
cmd = sh._apply_source_cmd(dest, url)
self.assertEqual(er % (dest, dest, url, "bunzip2", "a.sh"), cmd)
# test other
url = 'http://www.example.com/a.sh'
cmd = sh._apply_source_cmd(dest, url)
self.assertEqual("", cmd)
mock_mkdtemp.assert_called_with()
| bbandaru/heat-cfntools | heat_cfntools/tests/test_cfn_helper.py | Python | apache-2.0 | 49,595 |
"""
Views related to course tabs
"""
from Crypto.Cipher import DES
import base64
from access import has_course_access
from util.json_request import expect_json, JsonResponse
from django.conf import settings
from django.http import HttpResponseNotFound
from django.contrib.auth.decorators import login_required
from django.core.exceptions import PermissionDenied
from django_future.csrf import ensure_csrf_cookie
from django.views.decorators.http import require_http_methods
from edxmako.shortcuts import render_to_response
from xmodule.modulestore.django import modulestore
from xmodule.modulestore.django import loc_mapper
from xmodule.modulestore.locator import BlockUsageLocator
from ..utils import get_modulestore
from django.utils.translation import ugettext as _
__all__ = ['tabs_handler']
def initialize_course_tabs(course, user):
"""
set up the default tabs
I've added this because when we add static tabs, the LMS either expects a None for the tabs list or
at least a list populated with the minimal times
@TODO: I don't like the fact that the presentation tier is away of these data related constraints, let's find a better
place for this. Also rather than using a simple list of dictionaries a nice class model would be helpful here
"""
# This logic is repeated in xmodule/modulestore/tests/factories.py
# so if you change anything here, you need to also change it there.
course.tabs = [
# Translators: "Courseware" is the title of the page where you access a course's videos and problems.
{"type": "courseware", "name": _("Courseware")},
# Translators: "Course Info" is the name of the course's information and updates page
{"type": "course_info", "name": _("Course Info")},
# Translators: "Discussion" is the title of the course forum page
{"type": "discussion", "name": _("Discussion")},
# Translators: "Wiki" is the title of the course's wiki page
{"type": "wiki", "name": _("Wiki")},
# Translators: "Progress" is the title of the student's grade information page
{"type": "progress", "name": _("Progress")},
]
modulestore('direct').update_item(course, user.id)
@expect_json
@login_required
@ensure_csrf_cookie
@require_http_methods(("GET", "POST", "PUT"))
def tabs_handler(request, tag=None, package_id=None, branch=None, version_guid=None, block=None):
"""
The restful handler for static tabs.
GET
html: return page for editing static tabs
json: not supported
PUT or POST
json: update the tab order. It is expected that the request body contains a JSON-encoded dict with entry "tabs".
The value for "tabs" is an array of tab locators, indicating the desired order of the tabs.
Creating a tab, deleting a tab, or changing its contents is not supported through this method.
Instead use the general xblock URL (see item.xblock_handler).
"""
locator = BlockUsageLocator(package_id=package_id, branch=branch, version_guid=version_guid, block_id=block)
if not has_course_access(request.user, locator):
raise PermissionDenied()
old_location = loc_mapper().translate_locator_to_location(locator)
store = get_modulestore(old_location)
course_item = store.get_item(old_location)
if 'application/json' in request.META.get('HTTP_ACCEPT', 'application/json'):
if request.method == 'GET':
raise NotImplementedError('coming soon')
else:
if 'tabs' in request.json:
def get_location_for_tab(tab):
""" Returns the location (old-style) for a tab. """
return loc_mapper().translate_locator_to_location(BlockUsageLocator(tab))
tabs = request.json['tabs']
# get list of existing static tabs in course
# make sure they are the same lengths (i.e. the number of passed in tabs equals the number
# that we know about) otherwise we will inadvertently drop some!
existing_static_tabs = [t for t in course_item.tabs if t['type'] == 'static_tab']
if len(existing_static_tabs) != len(tabs):
return JsonResponse(
{"error": "number of tabs must be {}".format(len(existing_static_tabs))}, status=400
)
# load all reference tabs, return BadRequest if we can't find any of them
tab_items = []
for tab in tabs:
item = modulestore('direct').get_item(get_location_for_tab(tab))
if item is None:
return JsonResponse(
{"error": "no tab for found location {}".format(tab)}, status=400
)
tab_items.append(item)
# now just go through the existing course_tabs and re-order the static tabs
reordered_tabs = []
static_tab_idx = 0
for tab in course_item.tabs:
if tab['type'] == 'static_tab':
reordered_tabs.append(
{'type': 'static_tab',
'name': tab_items[static_tab_idx].display_name,
'url_slug': tab_items[static_tab_idx].location.name,
}
)
static_tab_idx += 1
else:
reordered_tabs.append(tab)
# OK, re-assemble the static tabs in the new order
course_item.tabs = reordered_tabs
modulestore('direct').update_item(course_item, request.user.id)
return JsonResponse()
else:
raise NotImplementedError('Creating or changing tab content is not supported.')
elif request.method == 'GET': # assume html
# see tabs have been uninitialized (e.g. supporting courses created before tab support in studio)
if course_item.tabs is None or len(course_item.tabs) == 0:
initialize_course_tabs(course_item, request.user)
# first get all static tabs from the tabs list
# we do this because this is also the order in which items are displayed in the LMS
static_tabs_refs = [t for t in course_item.tabs if t['type'] == 'static_tab']
static_tabs = []
for static_tab_ref in static_tabs_refs:
static_tab_loc = old_location.replace(category='static_tab', name=static_tab_ref['url_slug'])
static_tabs.append(modulestore('direct').get_item(static_tab_loc))
components = [
loc_mapper().translate_location(
course_item.location.course_id, static_tab.location, False, True
)
for static_tab
in static_tabs
]
return render_to_response('edit-tabs.html', {
'context_course': course_item,
'components': components,
'course_locator': locator
})
else:
return HttpResponseNotFound()
# "primitive" tab edit functions driven by the command line.
# These should be replaced/deleted by a more capable GUI someday.
# Note that the command line UI identifies the tabs with 1-based
# indexing, but this implementation code is standard 0-based.
def validate_args(num, tab_type):
"Throws for the disallowed cases."
if num <= 1:
raise ValueError('Tabs 1 and 2 cannot be edited')
if tab_type == 'static_tab':
raise ValueError('Tabs of type static_tab cannot be edited here (use Studio)')
def primitive_delete(course, num):
"Deletes the given tab number (0 based)."
tabs = course.tabs
validate_args(num, tabs[num].get('type', ''))
del tabs[num]
# Note for future implementations: if you delete a static_tab, then Chris Dodge
# points out that there's other stuff to delete beyond this element.
# This code happens to not delete static_tab so it doesn't come up.
modulestore('direct').update_item(course, '**replace_user**')
def primitive_insert(course, num, tab_type, name):
"Inserts a new tab at the given number (0 based)."
validate_args(num, tab_type)
new_tab = {u'type': unicode(tab_type), u'name': unicode(name)}
tabs = course.tabs
tabs.insert(num, new_tab)
modulestore('direct').update_item(course, '**replace_user**')
| XiaodunServerGroup/xiaodun-platform | cms/djangoapps/contentstore/views/tabs.py | Python | agpl-3.0 | 8,469 |
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Deleting field 'Resource.resource_type'
db.delete_column('curated_resources_resource', 'resource_type_id')
# Adding M2M table for field resource_type on 'Resource'
db.create_table('curated_resources_resource_resource_type', (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('resource', models.ForeignKey(orm['curated_resources.resource'], null=False)),
('resourcetype', models.ForeignKey(orm['curated_resources.resourcetype'], null=False))
))
db.create_unique('curated_resources_resource_resource_type', ['resource_id', 'resourcetype_id'])
def backwards(self, orm):
# Adding field 'Resource.resource_type'
db.add_column('curated_resources_resource', 'resource_type',
self.gf('django.db.models.fields.related.ForeignKey')(default=1, related_name='resources', to=orm['curated_resources.ResourceType']),
keep_default=False)
# Removing M2M table for field resource_type on 'Resource'
db.delete_table('curated_resources_resource_resource_type')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'curated_resources.audience': {
'Meta': {'object_name': 'Audience'},
'curators': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'curated_resource_audiences'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['auth.User']"}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
'curated_resources.domain': {
'Meta': {'object_name': 'Domain'},
'curators': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'curated_resource_domains'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['auth.User']"}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'id_code': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'level': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'lft': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'parent': ('mptt.fields.TreeForeignKey', [], {'blank': 'True', 'related_name': "'children'", 'null': 'True', 'to': "orm['curated_resources.Domain']"}),
'rght': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'tree_id': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'})
},
'curated_resources.resource': {
'Meta': {'object_name': 'Resource'},
'cost': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'curators': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'curated_resource_resources'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['auth.User']"}),
'description': ('django.db.models.fields.TextField', [], {}),
'destination_content_type': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'links_to_resource'", 'null': 'True', 'to': "orm['contenttypes.ContentType']"}),
'destination_object_id': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'}),
'domains': ('mptt.fields.TreeManyToManyField', [], {'blank': 'True', 'related_name': "'resources'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['curated_resources.Domain']"}),
'duration': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'published': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
'related_to': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'related_to_rel_+'", 'null': 'True', 'to': "orm['curated_resources.Resource']"}),
'resource_type': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'resources'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['curated_resources.ResourceType']"}),
'short_title': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'default': 'datetime.datetime(2013, 6, 3, 0, 0)', 'unique': 'True', 'max_length': '75'}),
'suitable_for': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'resources'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['curated_resources.Audience']"}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'topics': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'resources'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['curated_resources.Topic']"})
},
'curated_resources.resourcetype': {
'Meta': {'object_name': 'ResourceType'},
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'resource_type': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'})
},
'curated_resources.topic': {
'Meta': {'object_name': 'Topic'},
'curators': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'curated_resource_topics'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['auth.User']"}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'})
}
}
complete_apps = ['curated_resources'] | evildmp/django-curated-resources | curated_resources/migrations/0007_auto__del_field_resource_resource_type.py | Python | bsd-2-clause | 9,905 |
import numpy
import matplotlib.pyplot as plot
X = numpy.linspace(-3, 2, 200)
Y = X ** 2 - 2 * X + 1.
plot.plot(X, Y)
plot.show()
| moonbury/notebooks | github/MatplotlibCookbook/Chapter 1/03.py | Python | gpl-3.0 | 131 |
#!/usr/bin/env python
# Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Given a GYP/GN filename, sort C-ish source files in that file.
Shows a diff and prompts for confirmation before doing the deed.
Works great with tools/git/for-all-touched-files.py.
Limitations:
1) Comments used as section headers
If a comment (1+ lines starting with #) appears in a source list without a
preceding blank line, the tool assumes that the comment is about the next
line. For example, given the following source list,
sources = [
"b.cc",
# Comment.
"a.cc",
"c.cc",
]
the tool will produce the following output:
sources = [
# Comment.
"a.cc",
"b.cc",
"c.cc",
]
This is not correct if the comment is for starting a new section like:
sources = [
"b.cc",
# These are for Linux.
"a.cc",
"c.cc",
]
The tool cannot disambiguate the two types of comments. The problem can be
worked around by inserting a blank line before the comment because the tool
interprets a blank line as the end of a source list.
2) Sources commented out
Sometimes sources are commented out with their positions kept in the
alphabetical order, but what if the list is not sorted correctly? For
example, given the following source list,
sources = [
"a.cc",
# "b.cc",
"d.cc",
"c.cc",
]
the tool will produce the following output:
sources = [
"a.cc",
"c.cc",
# "b.cc",
"d.cc",
]
This is because the tool assumes that the comment (# "b.cc",) is about the
next line ("d.cc",). This kind of errors should be fixed manually, or the
commented-out code should be deleted.
3) " and ' are used both used in the same source list (GYP only problem)
If both " and ' are used in the same source list, sources quoted with " will
appear first in the output. The problem is rare enough so the tool does not
attempt to normalize them. Hence this kind of errors should be fixed
manually.
4) Spaces and tabs used in the same source list
Similarly, if spaces and tabs are both used in the same source list, sources
indented with tabs will appear first in the output. This kind of errors
should be fixed manually.
"""
import difflib
import optparse
import re
import sys
from yes_no import YesNo
SUFFIXES = ['c', 'cc', 'cpp', 'h', 'mm', 'rc', 'rc.version', 'ico', 'def',
'release']
SOURCE_PATTERN = re.compile(r'^\s+[\'"].*\.(%s)[\'"],$' %
'|'.join([re.escape(x) for x in SUFFIXES]))
COMMENT_PATTERN = re.compile(r'^\s+#')
def SortSources(original_lines):
"""Sort source file names in |original_lines|.
Args:
original_lines: Lines of the original content as a list of strings.
Returns:
Lines of the sorted content as a list of strings.
The algorithm is fairly naive. The code tries to find a list of C-ish
source file names by a simple regex, then sort them. The code does not try
to understand the syntax of the build files. See the file comment above for
details.
"""
output_lines = []
comments = []
sources = []
for line in original_lines:
if re.search(COMMENT_PATTERN, line):
comments.append(line)
elif re.search(SOURCE_PATTERN, line):
# Associate the line with the preceding comments.
sources.append([line, comments])
comments = []
else:
# |sources| should be flushed first, to handle comments at the end of a
# source list correctly.
if sources:
for source_line, source_comments in sorted(sources):
output_lines.extend(source_comments)
output_lines.append(source_line)
sources = []
if comments:
output_lines.extend(comments)
comments = []
output_lines.append(line)
return output_lines
def ProcessFile(filename, should_confirm):
"""Process the input file and rewrite if needed.
Args:
filename: Path to the input file.
should_confirm: If true, diff and confirmation prompt are shown.
"""
original_lines = []
with open(filename, 'r') as input_file:
for line in input_file:
original_lines.append(line)
new_lines = SortSources(original_lines)
if original_lines == new_lines:
print '%s: no change' % filename
return
if should_confirm:
diff = difflib.unified_diff(original_lines, new_lines)
sys.stdout.writelines(diff)
if not YesNo('Use new file (y/N)'):
return
with open(filename, 'w') as output_file:
output_file.writelines(new_lines)
def main():
parser = optparse.OptionParser(usage='%prog filename1 filename2 ...')
parser.add_option('-f', '--force', action='store_false', default=True,
dest='should_confirm',
help='Turn off confirmation prompt.')
opts, filenames = parser.parse_args()
if len(filenames) < 1:
parser.print_help()
return 1
for filename in filenames:
ProcessFile(filename, opts.should_confirm)
if __name__ == '__main__':
sys.exit(main())
| hujiajie/chromium-crosswalk | tools/sort_sources.py | Python | bsd-3-clause | 5,062 |
# -*- coding: utf-8 -*-
"""
<DefineSource>
@Date : Fri Nov 14 13:20:38 2014 \n
@Author : Erwan Ledoux \n\n
</DefineSource>
The Printer is an object that can directly print
Strs in the Printer context.
"""
#<DefineAugmentation>
import ShareYourSystem as SYS
BaseModuleStr="ShareYourSystem.Standards.Interfacers.Interfacer"
DecorationModuleStr="ShareYourSystem.Standards.Classors.Doer"
SYS.setSubModule(globals())
SYS.addDo('Printer','_Print','Printing','Printed')
#</DefineAugmentation>
#<ImportSpecificModules>
import copy
#</ImportSpecificModules>
#<DefineLocals>
PrintDictIndentStr=" "
PrintListIndentStr=" "
PrintIndentStr=" /"
PrintEofStr="\n"
PrintIdBool=True
PrintCircularStr="{...}"
PrintAlineaStr=""
PrintAlreadyIdIntsList=[]
def getNumpyArrayStr(_NumpyArray):
#Definition the ShapeList
ShapeList=list(numpy.shape(_NumpyArray))
#debug
'''
print('Printer l.25 : getNumpyArrayStr')
print('ShapeList is',ShapeList)
print('')
'''
#Return the array directly if it is small or either a short represented version of it
if (len(ShapeList)==1 and ShapeList[0]<3) or (len(ShapeList)>1 and ShapeList[1]<3):
return str(_NumpyArray)
return "<numpy.ndarray shape "+str(ShapeList)+">"
def getPointerStr(_Variable,**_KwargVariablesDict):
#Debug
'''
print('Printer l.39 : getPointerStr')
print('')
'''
#Define
if hasattr(_Variable,'PrintingInfoStr'):
InfoStr=_Variable.PrintingInfoStr
else:
InfoStr=_KwargVariablesDict['InfoStr'] if 'InfoStr' in _KwargVariablesDict else ""
#Debug
'''
print('Printer l.71 : InfoStr')
print('InfoStr is ')
print(InfoStr)
print('')
'''
#set in the _KwargVariablesDict
if 'PrintDeepInt' not in _KwargVariablesDict:
_KwargVariablesDict['PrintDeepInt']=0
#Definition the Local alinea
PrintLocalAlineaStr=PrintAlineaStr if _KwargVariablesDict['PrintDeepInt']==0 else ""
#Define
if type(_Variable).__name__=='Database':
PrintedVariableStr=_Variable._Database__name
elif type(_Variable).__name__=='Collection':
PrintedVariableStr=_Variable._Collection__name
else:
PrintedVariableStr=_Variable.__name__ if hasattr(_Variable,__name__) else ""
#Debug
'''
print('l 85 Printer')
print('type(_Variable).__name__ is ')
print(type(_Variable).__name__)
print('PrintedVariableStr is ')
print(PrintedVariableStr)
print('')
'''
#set
PrintIdInt=_Variable.PrintIdInt if hasattr(
_Variable,'PrintIdInt'
) else id(_Variable)
#init
PointerStr=PrintLocalAlineaStr+"<"+PrintedVariableStr+" ("+_Variable.__class__.__name__
#Check
if PrintIdBool:
PointerStr+="), "+str(PrintIdInt)
else:
PointerStr+=")"
#add
PointerStr+=InfoStr+" >"
#return
return PointerStr
def getDictStr(
_DictatedVariable,**_KwargVariablesDict
):
#set in the _KwargVariablesDict
if 'PrintDeepInt' not in _KwargVariablesDict:
_KwargVariablesDict['PrintDeepInt']=0
#debug
'''
print('Printer l.59 : getDictStr')
print('_KwargVariablesDict is ',str(_KwargVariablesDict))
print('')
'''
#Global
global PrintAlineaStr
#Definition the LocalPrintAlineaStr
LocalPrintAlineaStr=PrintAlineaStr+"".join(
[PrintIndentStr]*(_KwargVariablesDict['PrintDeepInt']))
#Init the DictStr
DictStr="\n"+LocalPrintAlineaStr+"{ "
#Scan the Items (integrativ loop)
if type(_DictatedVariable)!=dict and hasattr(
_DictatedVariable,"items"
):
#debug
'''
print('l 135 Printer')
print('_DictatedVariable is ')
print(_DictatedVariable)
print('type(_DictatedVariable) is ')
print(type(_DictatedVariable))
print('')
'''
#items
PrintTuplesList=_DictatedVariable.items()
else:
#sort
PrintTuplesList=sorted(
_DictatedVariable.iteritems(), key=lambda key_value: key_value[0]
)
#Integrativ loop for seriaizing the items
for __PrintKeyStr,__PrintValueVariable in PrintTuplesList:
#debug
'''
print('Printer l.127')
print('__PrintKeyStr is',__PrintKeyStr)
print('')
'''
#set the begin of the line
DictStr+="\n"+LocalPrintAlineaStr+PrintDictIndentStr
#Force the cast into Str
if type(__PrintKeyStr) not in [unicode,str]:
__PrintKeyStr=str(__PrintKeyStr)
#Get the WordStrsList
WordStrsList=SYS.getWordStrsListWithStr(__PrintKeyStr)
#Init the PrintValueVariableStr
PrintValueVariableStr="None"
#Split the case if it is a pointing variable or not
if len(WordStrsList)>0:
#Value is displayed
"""
if SYS.getWordStrsListWithStr(__PrintKeyStr)[-1]=="Pointer":
#Pointer Case
PrintValueVariableStr=getPointerStr(
__PrintValueVariable,
**_KwargVariablesDict
)
"""
"""
elif ''.join(SYS.getWordStrsListWithStr(__PrintKeyStr)[-2:])=="PointersList":
#debug
'''
print('__PrintValueVariable is ',__PrintValueVariable)
print('')
'''
#Pointer Case
PrintValueVariableStr=str(
map(
lambda List:
getPointerStr(
List,
**_KwargVariablesDict),
__PrintValueVariable
)
) if type(__PrintValueVariable)==list else "None"
"""
#Special Suffix Cases
if PrintValueVariableStr=="None":
#debug
'''
print('go to represent')
print('__PrintKeyStr is ',__PrintKeyStr)
print('id(__PrintValueVariable) is ',id(__PrintValueVariable))
print('')
'''
#Other Cases
PrintValueVariableStr=getPrintStr(
__PrintValueVariable,
**_KwargVariablesDict
)
#Key and Value Case
DictStr+="'"+__PrintKeyStr+"' : "+PrintValueVariableStr
#Add a last line
DictStr+="\n"+LocalPrintAlineaStr+"}"
#debug
'''
print('DictStr is ',DictStr)
print('')
'''
#return the DictStr
return DictStr
def getListStr(_List,**_KwargVariablesDict):
#Global
global PrintAlineaStr
#set in the _KwargVariablesDict
if 'PrintDeepInt' not in _KwargVariablesDict:
_KwargVariablesDict['PrintDeepInt']=0
#debug
'''
print('Printer l.166 : getListStr')
print('_KwargVariablesDict is ',str(_KwargVariablesDict))
print('_List is '+str(_List))
print('')
'''
#Init the DictStr
if type(_List)==list:
BeginBracketStr='['
EndBracketStr=']'
else:
BeginBracketStr='('
EndBracketStr=')'
#Definition the LocalPrintAlineaStr
LocalPrintAlineaStr=PrintAlineaStr+"".join(
[PrintIndentStr]*(_KwargVariablesDict['PrintDeepInt']))
#Do the first Jump
ListStr="\n"+LocalPrintAlineaStr+BeginBracketStr
#Scan the Items (integrativ loop)
for ListInt,List in enumerate(_List):
#set the begin of the line
ListStr+="\n"+LocalPrintAlineaStr+PrintListIndentStr
#Get the represented version
PrintValueVariableStr=getPrintStr(
List,**dict(
_KwargVariablesDict,
**{'PrintingAlineaIsBool':False}
)
)
#Key and Value Case
ListStr+=str(ListInt)+" : "+PrintValueVariableStr
#Add a last line
ListStr+="\n"+LocalPrintAlineaStr+EndBracketStr
#return the DictStr
return ListStr
def getPrintStr(_Variable,**_KwargVariablesDict):
#Define global
global PrintAlreadyIdIntsList
#set in the _KwargVariablesDict
if 'PrintDeepInt' not in _KwargVariablesDict:
_KwargVariablesDict['PrintDeepInt']=0
#debug
'''
print('Printer l.213 : getPrintStr')
#print('_KwargVariablesDict is ',str(_KwargVariablesDict))
print('_Variable is '+str(_Variable))
#print('type(_Variable) is '+str(type(_Variable)))
#print("hasattr(_Variable,'__repr__') is "+str(hasattr(_Variable,"__repr__")))
##if hasattr(_Variable,"__repr__"):
# print('hasattr(_Variable.__class__,"InspectedOrderedDict") is '+str(
# hasattr(_Variable.__class__,"InspectedOrderedDict")))
# if hasattr(_Variable.__class__,"InspectedOrderedDict"):
# print("_Variable.__class__.InspectedOrderedDict['__repr__']['KwargVariablesListKeyStr'] is "+str(
# _Variable.__class__.InspectedOrderedDict['__repr__']['KwargVariablesListKeyStr']))
# print(_Variable.__class__.InspectedOrderedDict['__repr__']['KwargVariablesListKeyStr'])
print('')
'''
#None type
if type(_Variable)==None.__class__:
return "None"
#Special mongo database case
elif type(_Variable).__name__ in ["Database","Series","Collection"]:
#get
PrinterStr=getPointerStr(_Variable)
#return
return PrinterStr
#Dict types print
#if type(_Variable) in [dict,collections.OrderedDict]:
elif hasattr(_Variable,'items') and type(_Variable)!=type:
#Increment the deep
_KwargVariablesDict['PrintDeepInt']+=1
#debug
'''
print('This is a dictated type so get a represent like a dict')
print('')
'''
#id
PrintIdInt=id(_Variable)
#debug
'''
print('PrintIdInt is ',PrintIdInt)
print('PrintAlreadyIdIntsList is ',PrintAlreadyIdIntsList)
print('')
'''
#Check if it was already represented
if PrintIdInt not in PrintAlreadyIdIntsList:
#Debug
'''
print('PrintAlreadyIdIntsList is ',PrintAlreadyIdIntsList)
print('')
'''
#append
PrintAlreadyIdIntsList.append(PrintIdInt)
#Return the repr of the _Variable but shifted with the PrintAlineaStr
PrintStr=getDictStr(
_Variable,
**_KwargVariablesDict
)
else:
#Return the circular Str
PrintStr=PrintCircularStr+getPointerStr(_Variable,**_KwargVariablesDict)
#Debug
'''
print('PrintIdInt is ',PrintIdInt)
print('PrintStr is ',PrintStr)
print('')
'''
#return
return PrintStr
#List types print
elif type(_Variable) in [list,tuple]:
#id
PrintIdInt=id(_Variable)
#Check if it was already represented
if PrintIdInt not in PrintAlreadyIdIntsList:
#debug
'''
print('Printer l 389')
print('This is a listed type so get a represent like a list')
print('_Variable is ')
print(_Variable)
print('map(type,_Variable) is ')
print(map(type,_Variable))
print('')
'''
#append
PrintAlreadyIdIntsList.append(PrintIdInt)
#import numpy
import numpy
from pandas.core import series
#Check if it is a List of Objects or Python Types
if all(
map(
lambda __ElementVariable:
type(__ElementVariable) in [
float,int,str,unicode,numpy.float64,
] or type(__ElementVariable)==None.__class__,
_Variable
)
)==False:
#Increment the deep
_KwargVariablesDict['PrintDeepInt']+=1
#debug
'''
print('Print a represented version of the list')
print('')
'''
#Return
PrintStr=getListStr(_Variable,**_KwargVariablesDict)
else:
#debug
'''
print('Here just print the list directly')
print('')
'''
#Definition the Local alinea
PrintLocalAlineaStr=PrintAlineaStr if _KwargVariablesDict['PrintDeepInt']==0 else ""
#Return
PrintStr=PrintLocalAlineaStr+repr(
_Variable).replace("\n","\n"+PrintLocalAlineaStr)
#return
return PrintStr
else:
#Return the circular Str
return PrintCircularStr+getPointerStr(_Variable,**_KwargVariablesDict)
#Instance print
elif type(_Variable).__name__ in ["instancemethod"]:
#Debug
'''
print('Printer l 421')
print('This is a method ')
print('_Variable.__name__ is ',_Variable.__name__)
print('')
'''
#Definition the Local alinea
PrintLocalAlineaStr=PrintAlineaStr if _KwargVariablesDict['PrintDeepInt']==0 else ""
#append
PrintAlreadyIdIntsList.append(_Variable.im_self)
#return PrintAlineaStr+"instancemethod"
PrintStr=PrintLocalAlineaStr
PrintStr+="< bound method "+_Variable.__name__
PrintStr+=" of "+str(_Variable.im_self.__class__)
PrintStr+=" "+str(id(_Variable.im_self))+" >"
#PrintStr='inst'
#return
return PrintStr
#Str types
elif type(_Variable) in SYS.StrTypesList:
#debug
'''
print('This is a Str type so get a represent like a Str')
print('')
'''
#Definition the Local alinea
PrintLocalAlineaStr=PrintAlineaStr if _KwargVariablesDict['PrintDeepInt']==0 else ""
#Return
return PrintLocalAlineaStr+_Variable.replace("\n","\n"+PrintLocalAlineaStr)
#Other
#elif hasattr(_Variable,"__repr__") and hasattr(
# _Variable.__class__,"InspectInspectDict"
# ) and '__repr__' in _Variable.__class__.InspectInspectDict and _Variable.__class__.InspectInspectDict[
# '__repr__']['KwargVariablesListKeyStr']!="":
elif hasattr(_Variable.__class__,'__mro__'
) and SYS.PrinterClass in _Variable.__class__.__mro__:
#debug
'''
print('This is a representer so call the repr of it with the _KwargVariablesDict')
print('type(_Variable) is ',type(_Variable))
print('id(_Variable) is ',id(_Variable))
print('')
'''
#/################/#
# id CAREFULL !!!! THIS the id from the original object...
# ...not the copy ones either there are possibilities of circular print calls
#get
PrintIdInt=_Variable.PrintIdInt
#Check if it was already represented
if PrintIdInt not in PrintAlreadyIdIntsList:
#append
PrintAlreadyIdIntsList.append(PrintIdInt)
#Return the repr of the _Variable but shifted with the PrintAlineaStr
PrintStr=_Variable.__repr__(**_KwargVariablesDict)
#return
return PrintStr
else:
#Return the circular Str
return PrintCircularStr+getPointerStr(_Variable,**_KwargVariablesDict)
else:
#Debug
'''
print('This is not identified so call the repr of it')
print('')
'''
#Definition the Local alinea
PrintLocalAlineaStr=PrintAlineaStr if _KwargVariablesDict[
'PrintDeepInt']==0 else ""
#Define
PrintIdInt=id(_Variable)
#Debug
'''
print('PrintIdInt is ',PrintIdInt)
print('PrintAlreadyIdIntsList is ',PrintAlreadyIdIntsList)
print('')
'''
#Check if it was already represented
if PrintIdInt not in PrintAlreadyIdIntsList:
#debug
'''
print('Printer l 594')
print('type(_Variable) is ',type(_Variable))
print('')
'''
#Append but only for mutables variable
if type(_Variable) not in [bool,str,int,float]:
PrintAlreadyIdIntsList.append(PrintIdInt)
else:
#debug
'''
print('_Variable is ',_Variable)
print('')
'''
pass
#Return a repr of the _Variable but shifted with the PrintAlineaStr
PrintStr=PrintLocalAlineaStr+repr(_Variable).replace(
"\n",
"\n"+PrintLocalAlineaStr+"".join(
[
PrintIndentStr
]*2
)
)
#return
return PrintStr
else:
#Return the circular Str
return PrintLocalAlineaStr+PrintCircularStr+getPointerStr(
_Variable,**_KwargVariablesDict)
def _print(_Variable,**_KwargVariablesDict):
print(represent(_Variable,**_KwargVariablesDict))
def represent(_Variable,**_KwargVariablesDict):
#Definition the global
global PrintAlineaStr,PrintAlreadyIdIntsList
#Debug
'''
print('Printer l.545')
print('Reinit the PrintAlreadyIdIntsList')
print('')
'''
#Reinit
PrintAlreadyIdIntsList=[]
#Debug
'''
print('Printer l.554')
print('_KwargVariablesDict is ',_KwargVariablesDict)
print('')
'''
#Represent without shifting the Strs or not
if 'PrintingAlineaIsBool' not in _KwargVariablesDict or _KwargVariablesDict['PrintingAlineaIsBool']:
return getPrintStr(_Variable,**_KwargVariablesDict)
else:
PrintedOldAlineaStr=PrintAlineaStr
PrintAlineaStr=""
PrintStr=getPrintStr(_Variable,**_KwargVariablesDict)
PrintAlineaStr=PrintedOldAlineaStr
return PrintStr
def __main__represent(_PrintStr,**_KwargVariablesDict):
return represent(
_PrintStr,
**dict(_KwargVariablesDict,**{'PrintingAlineaIsBool':False})
)
def __main__print(_PrintStr,**_KwargVariablesDict):
return _print(
_PrintStr,
**dict(_KwargVariablesDict,**{'PrintingAlineaIsBool':False})
)
SYS._str = __main__represent
SYS._print = __main__print
#</DefineLocals>
#<DefineClass>
@DecorationClass()
class PrinterClass(BaseClass):
def default_init(self,
_PrintIdInt=0,
_PrintStr="",
_PrintingCopyVariable=None,
_PrintingInstanceSkipKeyStrsList=None,
_PrintingInstanceForceKeyStrsList=None,
_PrintingClassSkipKeyStrsList=[],
_PrintingClassForceKeyStrsList=[],
_PrintingBaseBool=True,
_PrintingNewInstanceBool=True,
_PrintingNewClassBool=True,
_PrintingOutBool=True,
_PrintingSelfBool=False,
_PrintingInfoStr="",
_PrintingInstanceForceBaseKeyStrsList=None,
**_KwargVariablesDict
):
#Call the parent init method
BaseClass.__init__(self,**_KwargVariablesDict)
#id
self.PrintIdInt=id(self)
#init
self.PrintingInstanceSkipKeyStrsList=[]
self.PrintingInstanceForceKeyStrsList=[]
def do__print(self,**_KwargVariablesDict):
#Debug
'''
print('l 680 _print')
#print('_KwargVariablesDict is ')
#print(_KwargVariablesDict)
print('self.PrintingCopyVariable.PrintingInstanceSkipKeyStrsList is ')
print(self.PrintingCopyVariable.PrintingInstanceSkipKeyStrsList)
print('')
'''
#/###################/#
# Check if it is a ReprStr
# or just a PrintStr
#Check
if self.PrintingSelfBool:
#Debug
'''
print('l 693')
print('we repr str here')
print('')
'''
#print
self.PrintStr=self.PrintingCopyVariable.getReprStr(
**_KwargVariablesDict
)
else:
#Debug
'''
print('l 705')
print('we just get print Str here')
print('')
'''
#print
self.PrintStr=getPrintStr(
self.PrintingCopyVariable,
**_KwargVariablesDict
)
#Check
if self.PrintingOutBool:
print(self.PrintStr)
def __repr__(self,**_KwargVariablesDict):
#Debug
'''
print('l 718 __repr__')
print('_KwargVariablesDict is ')
print(_KwargVariablesDict)
print('')
'''
#init a new one
self.PrintingCopyVariable=self.__class__()
#loop
for __ItemTuple in self.__dict__.items():
#Debug
'''
print('Try to copy')
print(__ItemTuple[0])
print('')
'''
#Check
if type(__ItemTuple[1]).__name__ not in ['Figure']:
#Debug
'''
print('Ok this is not an annoying type like pyplot Figure')
print('try a copy')
print('')
'''
#try
try:
#copy
self.PrintingCopyVariable.__dict__[__ItemTuple[0]]=copy.copy(
__ItemTuple[1]
)
except:
#debug
#print('Try to copy but FAILED')
#print(__ItemTuple[0])
#print('')
#pass
pass
else:
#Debug
'''
print('Ok this is like a pyplot figure... just alias')
print('')
'''
#copy
self.PrintingCopyVariable.__dict__[__ItemTuple[0]]=__ItemTuple[1]
#Debug
'''
print('l 764 OK')
print('type(self.PrintingCopyVariable) is ')
print(type(self.PrintingCopyVariable))
print('')
'''
#get
ReprStr=self._print(
self.PrintingCopyVariable,
_OutBool=False,
_SelfBool=True,
**_KwargVariablesDict
).PrintStr
#Debug
'''
print('l 763 Printer')
print('ReprStr is ')
print(ReprStr)
'''
#reset
self.PrintingSelfBool=False
self.PrintingOutBool=True
#return
return ReprStr
def getReprStr(self,**_KwargVariablesDict):
#Debug
'''
print('l 741 getReprStr')
print('_KwargVariablesDict is ')
print(_KwargVariablesDict)
print('')
'''
#debug
'''
_Variable.debug(('_Variable.__class__',self.__class__,[
'PrintingKeyStrsList',
'DefaultBaseKeyStrsList',
'DefaultSpecificKeyStrsList',
'PrintedNotSpecificKeyStrsList'
]))
'''
#/###################/#
# Print the Default Key Strs... form the Instance or the still the Class
#
#Debug
'''
print('Printer l 846')
print('self.PrintingInstanceSkipKeyStrsList is ')
print(self.PrintingInstanceSkipKeyStrsList)
print('')
'''
#Check
if self.PrintingClassSkipKeyStrsList==None:
self.PrintingClassSkipKeyStrsList=[]
if self.PrintingInstanceSkipKeyStrsList==None:
self.PrintingInstanceSkipKeyStrsList=[]
#filter the skip key strs
PrintedDefaultSpecificKeyStrsList=SYS._filter(
lambda __DefaultSpecificKeyStr:
__DefaultSpecificKeyStr not in list(
self.PrintingInstanceSkipKeyStrsList
)+list(
self.PrintingClassSkipKeyStrsList),
self.__class__.DefaultSpecificKeyStrsList
)
#Represent the Specific KeyStrs
PrintTuplesList=map(
lambda __SpecificKeyStr:
(
"<Spe>"+("<Instance>"
if __SpecificKeyStr in self.__dict__
else (
"<Instance>_"
if hasattr(
self.__class__,__SpecificKeyStr
) and type(getattr(
self.__class__,__SpecificKeyStr
))==property and getattr(
self.__class__,'_'+__SpecificKeyStr
)!=getattr(self,'_'+__SpecificKeyStr) and (
'_'+__SpecificKeyStr not in self.PrintingClassSkipKeyStrsList and __SpecificKeyStr not in self.PrintingInstanceSkipKeyStrsList
)
else
"<Class>"
)
)+__SpecificKeyStr,
getattr(self,__SpecificKeyStr)
),
PrintedDefaultSpecificKeyStrsList
)
#/###################/#
# Print the Default Base Key Strs... form the Instance or the still the Class
#
#Represent the BaseKeyStrs
if self.PrintingBaseBool:
#Debug
'''
print('Printer l 723')
print('We print the bases')
print('self.__class__.DefaultBaseKeyStrsList is ')
print(self.__class__.DefaultBaseKeyStrsList)
print('')
'''
#filter remove
PrintedDefaultBaseKeyStrsList=SYS._filter(
lambda __DefaultSpecificKeyStr:
__DefaultSpecificKeyStr not in list(
self.PrintingInstanceSkipKeyStrsList
)+list(self.PrintingClassSkipKeyStrsList),
self.__class__.DefaultBaseKeyStrsList
)
#filter add
if self.PrintingInstanceForceBaseKeyStrsList!=None:
#add
PrintedDefaultBaseKeyStrsList+=self.PrintingInstanceForceBaseKeyStrsList
#map
PrintTuplesList+=map(
lambda __BaseKeyStr:
(
"<Base>"+("<Instance>"
if __BaseKeyStr in self.__dict__
else "<Class>"
)+__BaseKeyStr
,
getattr(self,__BaseKeyStr)
),
PrintedDefaultBaseKeyStrsList
)
#/###################/#
# Print the New key strs in the instance
#
#print the NewInstanceKeyStrs in the __dict__
if self.PrintingNewInstanceBool:
#filter
PrintedNewInstanceTuplesList=SYS._filter(
lambda __NewItemTuple:
__NewItemTuple[0
] not in self.__class__.DefaultSpecificKeyStrsList+self.__class__.DefaultBaseKeyStrsList,
self.__dict__.items()
)
#filter
PrintedNewInstanceTuplesList=SYS._filter(
lambda __PrintedNewInstanceTuple:
__PrintedNewInstanceTuple[0] not in list(
self.PrintingInstanceSkipKeyStrsList)+list(
self.PrintingClassSkipKeyStrsList),
PrintedNewInstanceTuplesList
)
#map
PrintTuplesList+=map(
lambda __NewItemTuple:
(
"<New><Instance>"+__NewItemTuple[0],
__NewItemTuple[1]
),
PrintedNewInstanceTuplesList
)
#/###################/#
# Print the New key strs in the class
#
#Represent the NewClassKeyStrs in the _self.__class____.__dict__
if self.PrintingNewClassBool:
#filter
PrintedNewClassKeyStrsList=SYS._filter(
lambda __KeyStr:
__KeyStr not in self.__class__.KeyStrsList and __KeyStr not in self.__dict__,
SYS.getKeyStrsListWithClass(
self.__class__
)
)
#filter
PrintedNewClassKeyStrsList=SYS._filter(
lambda __NewClassKeyStr:
__NewClassKeyStr not in list(
self.PrintingInstanceSkipKeyStrsList)+list(
self.PrintingClassSkipKeyStrsList),
PrintedNewClassKeyStrsList
)
#filter
PrintTuplesList+=map(
lambda __NewKeyStr:
(
"<New><Class>"+__NewKeyStr,
self.__class__.__dict__[__NewKeyStr]
),
PrintedNewClassKeyStrsList
)
#/###################/#
# Print force key strs
#
#Debug
'''
print('Printer l 811')
print('We add some forced Key Strs')
print('')
'''
#Check
if self.PrintingInstanceForceKeyStrsList==None:
self.PrintingInstanceForceKeyStrsList=[]
#map
PrintTuplesList+=map(
lambda __PrintingKeyStr:
(
"<Spe><Instance>"+__PrintingKeyStr,
self.__dict__[__PrintingKeyStr]
)
if __PrintingKeyStr in self.__dict__ and __PrintingKeyStr in self.__class__.DefaultSpecificKeyStrsList
else(
(
"<Base><Instance>"+__PrintingKeyStr,
self.__dict__[__PrintingKeyStr]
)
if __PrintingKeyStr in self.__dict__ and __PrintingKeyStr in self.__class__.DefaultBaseKeyStrsList
else
(
(
"<Base><Class>"+__PrintingKeyStr,
getattr(self,__PrintingKeyStr)
)
if __PrintingKeyStr not in self.__dict__
else
(
"<New><Instance>"+__PrintingKeyStr,
self.__dict__[__PrintingKeyStr]
)
)
),
list(
self.PrintingInstanceForceKeyStrsList
)+list(self.PrintingClassForceKeyStrsList)
)
#Append
global PrintAlreadyIdIntsList
#debug
'''
print('Printer l.629')
print('id(self) is ',id(self))
print('self not in PrintAlreadyIdIntsList is ',str(
self not in PrintAlreadyIdIntsList))
print('')
'''
#define the PrintStr
self.PrintStr=getPointerStr(
self,
**_KwargVariablesDict
)+getPrintStr(
dict(PrintTuplesList),
**_KwargVariablesDict
)
#return
return self.PrintStr
def forcePrint(self,_KeyStrsList,_ClassStr):
#append
if self.__class__.__name__==_ClassStr:
#Check
if self.PrintingCopyVariable.PrintingInstanceForceKeyStrsList==None:
#alias
self.PrintingCopyVariable.PrintingInstanceForceKeyStrsList=_KeyStrsList
else:
#extend
self.PrintingCopyVariable.PrintingInstanceForceKeyStrsList.extend(_KeyStrsList)
else:
#Check
if self.PrintingCopyVariable.PrintingInstanceForceBaseKeyStrsList==None:
#alias
self.PrintingCopyVariable.PrintingInstanceForceBaseKeyStrsList=_KeyStrsList
else:
#extend
self.PrintingCopyVariable.PrintingInstanceForceBaseKeyStrsList.extend(_KeyStrsList)
#</DefineClass>
#<DefinePrint>
PrinterClass.PrintingClassSkipKeyStrsList.extend(
[
'DefaultInitBool',
'DoUnitsInt',
'PrintStr',
'PrintIdInt',
'PrintingCopyVariable',
'PrintingInstanceSkipKeyStrsList',
'PrintingInstanceForceKeyStrsList',
'PrintingClassSkipKeyStrsList',
'PrintingClassForceKeyStrsList',
'PrintingBaseBool',
'PrintingNewInstanceBool',
'PrintingNewClassBool',
'PrintingOutBool',
'PrintingSelfBool',
'PrintingInfoStr',
'PrintingInstanceForceBaseKeyStrsList'
]
)
#</DefinePrint> | Ledoux/ShareYourSystem | Pythonlogy/build/lib/ShareYourSystem/Standards/Interfacers/Printer/__init__.py | Python | mit | 26,126 |
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at https://mozilla.org/MPL/2.0/.
import os
from pathlib import Path
import sys
import importlib
from sisl.messages import warn
from sisl._environ import get_environ_variable
__all__ = ["import_user_presets", "import_user_plots",
"import_user_sessions", "import_user_plugins"]
USER_CUSTOM_FOLDER = get_environ_variable("SISL_CONFIGDIR") / "viz" / "plotly"
# Here we let python know that there are importable files
# in USER_CUSTOM_FOLDER
sys.path.append(str(USER_CUSTOM_FOLDER.resolve()))
def import_user_extension(extension_file):
"""
Basis for importing users extensions.
Parameters
------------
extension_file: str
the name of the file that you want to import (NOT THE FULL PATH).
"""
try:
return importlib.import_module(str(extension_file).replace(".py", ""))
except ModuleNotFoundError:
return None
#--------------------------------------
# Presets
#--------------------------------------
# File where the user's presets will be searched
PRESETS_FILE_NAME = "presets.py"
PRESETS_FILE = USER_CUSTOM_FOLDER / PRESETS_FILE_NAME
# We will look for presets under this variable
PRESETS_VARIABLE = "presets"
def import_user_presets():
"""
Imports the users presets.
All the presets that the user wants to import into sisl
should be in the 'presets' variable as a dict in the 'user_presets.py'
file. Then, this method will add them to the global dictionary of presets.
"""
from ._presets import add_presets
module = import_user_extension(PRESETS_FILE_NAME)
# Add these presets
if module is not None:
if PRESETS_VARIABLE in vars(module):
add_presets(**vars(module)[PRESETS_VARIABLE])
else:
warn(f"We found the custom presets file ({PRESETS_FILE}) but no '{PRESETS_VARIABLE}' variable was found.\n Please put your presets as a dict under this variable.")
return module
#--------------------------------------
# Plots
#--------------------------------------
# File where the user's plots will be searched
PLOTS_FILE_NAME = "plots.py"
PLOTS_FILE = USER_CUSTOM_FOLDER / PLOTS_FILE_NAME
def import_user_plots():
"""
Imports the user's plots.
We don't need to do anything here because all plots available
are tracked by checking the subclasses of `Plot`.
Therefore, the user only needs to make sure that their plot classes
are defined.
"""
return import_user_extension(PLOTS_FILE_NAME)
#--------------------------------------
# Sessions
#--------------------------------------
# File where the user's sessions will be searched
SESSION_FILE_NAME = "sessions.py"
SESSION_FILE = USER_CUSTOM_FOLDER / SESSION_FILE_NAME
def import_user_sessions():
"""
Imports the user's sessions.
We don't need to do anything here because all sessions available
are tracked by checking the subclasses of `Session`.
Therefore, the user only needs to make sure that their session classes
are defined.
"""
return import_user_extension(SESSION_FILE_NAME)
#----------------------------------------
# Plugins
#---------------------------------------
# This is a general file that the user can have for convenience so that everytime
# that sisl is imported, it can automatically import all their utilities that they
# developed to work with sisl
PLUGINS_FILE_NAME = "plugins.py"
def import_user_plugins():
"""
This imports an extra file where the user can do really anything
that they want to finish customizing the package.
"""
return import_user_extension(PLUGINS_FILE_NAME)
| zerothi/sisl | sisl/viz/backends/plotly/_user_customs.py | Python | mpl-2.0 | 3,819 |
# -*- coding: utf-8 -*-
# © 2016 Alessandro Fernandes Martini, Trustcode
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl.html).
from . import res_partner
| Trust-Code/trust-addons | partner_wkf/models/__init__.py | Python | agpl-3.0 | 170 |
from tree.tree import TreeNode
class AvlTree(object):
"""
An avl tree.
"""
def __init__(self):
# Root node of the tree.
self.node = None
self.height = -1
self.balance = 0
def insert(self, key):
"""
Insert new key into node
"""
# Create new node
n = TreeNode(key)
if not self.node:
self.node = n
self.node.left = AvlTree()
self.node.right = AvlTree()
elif key < self.node.val:
self.node.left.insert(key)
elif key > self.node.val:
self.node.right.insert(key)
self.re_balance()
def re_balance(self):
"""
Re balance tree. After inserting or deleting a node,
"""
self.update_heights(recursive=False)
self.update_balances(False)
while self.balance < -1 or self.balance > 1:
if self.balance > 1:
if self.node.left.balance < 0:
self.node.left.rotate_left()
self.update_heights()
self.update_balances()
self.rotate_right()
self.update_heights()
self.update_balances()
if self.balance < -1:
if self.node.right.balance > 0:
self.node.right.rotate_right()
self.update_heights()
self.update_balances()
self.rotate_left()
self.update_heights()
self.update_balances()
def update_heights(self, recursive=True):
"""
Update tree height
"""
if self.node:
if recursive:
if self.node.left:
self.node.left.update_heights()
if self.node.right:
self.node.right.update_heights()
self.height = 1 + max(self.node.left.height, self.node.right.height)
else:
self.height = -1
def update_balances(self, recursive=True):
"""
Calculate tree balance factor
"""
if self.node:
if recursive:
if self.node.left:
self.node.left.update_balances()
if self.node.right:
self.node.right.update_balances()
self.balance = self.node.left.height - self.node.right.height
else:
self.balance = 0
def rotate_right(self):
"""
Right rotation
"""
new_root = self.node.left.node
new_left_sub = new_root.right.node
old_root = self.node
self.node = new_root
old_root.left.node = new_left_sub
new_root.right.node = old_root
def rotate_left(self):
"""
Left rotation
"""
new_root = self.node.right.node
new_left_sub = new_root.left.node
old_root = self.node
self.node = new_root
old_root.right.node = new_left_sub
new_root.left.node = old_root
def in_order_traverse(self):
"""
In-order traversal of the tree
"""
result = []
if not self.node:
return result
result.extend(self.node.left.in_order_traverse())
result.append(self.node.key)
result.extend(self.node.right.in_order_traverse())
return result
| amaozhao/algorithms | algorithms/tree/avl/avl.py | Python | mit | 3,411 |
import sys
import collections as c
from scipy import special, stats
import numpy as n, pylab as p, networkx as x
class NetworkDrawer:
drawer_count=0
def __init__(self,metric="strength"):
self.drawer_count+=1
metric_=self.standardizeName(metric)
self.metric_=metric_
self.draw_count=0
def standardizeName(self,name):
if name in (["s","strength","st"]+["f","força","forca","fo"]):
name_="s"
if name in (["d","degree","dg"]+["g","grau","gr"]):
name_="d"
return name_
def makeLayout(self,network_measures,network_partitioning=None):
"""Delivers a sequence of user_ids and (x,y) pos.
"""
self.network_measures=network_measures
if self.metric_=="s":
measures_=network_measures.strengths
elif self.metric_=="d":
measures_=network_measures.degrees
else:
print("not known metric to make layout")
self.ordered_measures=ordered_measures = c.OrderedDict(sorted(measures_.items(), key=lambda x: x[1]))
self.measures=measures=list(ordered_measures.values())
self.authors=authors= list(ordered_measures.keys())
total=network_measures.N
if not network_partitioning:
self.k1=k1=round(total*.80)
self.k2=k2=round(total*.95)
self.periphery=authors[:k1]
self.intermediary=authors[k1:k2]
self.hubs=authors[k2:]
else:
sectors=network_partitioning.sectorialized_agents__
self.k1=k1=len(sectors[0])
self.k2=k2=k1+len(sectors[1])
self.periphery,self.intermediary,self.hubs=sectors
print("fractions ={:0.4f}, {:0.4f}, {:0.4f}".format(k1/total, (k2-k1)/total, 1-k2/total))
self.makeXY()
def drawNetwork(self, network,network_measures,filename="example.png",label="auto",network_partitioning=None):
p.clf()
if self.metric_=="s":
measures_=network_measures.strengths
elif self.metric_=="d":
measures_=network_measures.degree
else:
print("not known metric to make layout")
ordered_measures = c.OrderedDict(sorted(measures_.items(), key=lambda x: x[1]))
measures=list(ordered_measures.values())
authors= list(ordered_measures.keys())
total=network_measures.N
if not network_partitioning:
k1=k1=round(total*.80)
k2=k2=round(total*.95)
periphery=authors[:k1]
intermediary=authors[k1:k2]
hubs=authors[k2:]
else:
sectors=network_partitioning.sectorialized_agents__
k1=k1=len(sectors[0])
k2=k2=k1+len(sectors[1])
periphery,intermediary,hubs=(set(iii) for iii in sectors)
in_measures=network_measures.in_strengths
min_in=max(in_measures.values())/3+0.1
out_measures=network_measures.out_strengths
min_out=max(out_measures.values())/3+.1
self.clustering=clustering=network_measures.weighted_clusterings
A=x.drawing.nx_agraph.to_agraph(network.g)
A.node_attr['style']='filled'
A.graph_attr["bgcolor"]="black"
A.graph_attr["pad"]=.1
#A.graph_attr["size"]="9.5,12"
A.graph_attr["fontsize"]="25"
if label=="auto":
label=self.makeLabel()
A.graph_attr["label"]=label
A.graph_attr["fontcolor"]="white"
cm=p.cm.Reds(range(2**10)) # color table
self.cm=cm
nodes=A.nodes()
self.colors=colors=[]
self.inds=inds=[]
self.poss=poss=[]
for node in nodes:
n_=A.get_node(node)
ind_author=self.authors.index(n_)
inds.append(inds)
colors.append( '#%02x%02x%02x' % tuple([int(255*i) for i in cm[int(clustering[n_]*255)][:-1]]))
#n_.attr['fillcolor']= '#%02x%02x%02x' % tuple([255*i for i in cm[int(clustering[n_]*255)][:-1]])
n_.attr['fillcolor']= colors[-1]
n_.attr['fixedsize']=True
n_.attr['width']= abs(.6*(in_measures[n_]/min_in+ .05))
n_.attr['height']= abs(.6*(out_measures[n_]/min_out+.05))
if n_ in hubs:
n_.attr["shape"] = "hexagon"
elif n_ in intermediary:
pass
else:
n_.attr["shape"] = "diamond"
pos="%f,%f"%tuple(self.posXY[ind_author])
poss.append(pos)
n_.attr["pos"]=pos
n_.attr["pin"]=True
n_.attr["fontsize"]=25
n_.attr["fontcolor"]="white"
n_.attr["label"]=""
weights=[s[2]["weight"] for s in network_measures.edges]
self.weights=weights
max_weight=max(weights)
self.max_weight=max_weight
self.weights_=[]
edges=A.edges()
for e in edges:
factor=float(e.attr['weight'])
self.weights_.append(factor)
e.attr['penwidth']=.34*factor
e.attr["arrowsize"]=1.5
e.attr["arrowhead"]="lteeoldiamond"
w=factor/max_weight # factor em [0-1]
cor=p.cm.Spectral(int(w*255))
self.cor=cor
cor256=255*n.array(cor[:-1])
r0=int(cor256[0]/16)
r1=int(cor256[0]-r0*16)
r=hex(r0)[-1]+hex(r1)[-1]
g0=int(cor256[1]/16)
g1=int(cor256[1]-g0*16)
g=hex(g0)[-1]+hex(g1)[-1]
b0=int(cor256[2]/16)
b1=int(cor256[2]-b0*16)
b=hex(b0)[-1]+hex(b1)[-1]
#corRGB="#"+r+g+b+":#"+r+g+b
corRGB="#"+r+g+b
e.attr["color"]=corRGB
A.draw(filename, prog="neato") # twopi ou circo
################
self.A=A
self.draw_count+=1
def makeLabel(self):
label=""
if "window_size" in dir(self):
label+="w: {}, ".format(self.window_size)
#m: %i, N = %i, E = %i"%(self.draw_count*self.step_size,self.network_measures.N,self.network_measures.E)
if "step_size" in dir(self):
label+="m: {} ,".format(self.draw_count*self.step_size+self.offset)
else:
label+="m: %i, ".format(self.draw_count)
#self.network_measures.N,self.network_measures.E)
label+="N = %i, E = %i"%(self.network_measures.N,self.network_measures.E)
return label
def updateNetwork(self,network,networkMeasures=None):
pass
def makeXY(self):
size_periphery=self.k1
size_intermediary=self.k2-self.k1
size_hubs=self.network_measures.N-self.k2
if size_hubs%2==1:
size_hubs+=1
size_intermediary-=1
xh=n.linspace(0,0.5,size_hubs,endpoint=False)[::-1]
thetah=2*n.pi*xh
yh=n.sin(thetah)
xi=n.linspace(1,0.5, size_intermediary, endpoint=True)
thetai=2*n.pi*xi
yi=n.sin(thetai)
xp=n.linspace(.95,0.4, size_periphery)[::-1]
yp=n.linspace(.1,1.25, size_periphery)[::-1]
self.pos=((xp,yp),(xi,yi),(xh,yh))
XFACT=7
YFACT=3
self.posX=posX=n.hstack((xp,xi,xh))*XFACT
self.posY=posY=n.hstack((yp,yi,yh))*YFACT
self.posXY=n.vstack((posX.T,posY.T)).T
| ttm/gmaneLegacy | gmaneLegacy/networkDrawer.py | Python | unlicense | 7,293 |
"""
Team Testing Module
"""
import pytest
import api.user
import api.team
import api.common
import bcrypt
from api.common import WebException, InternalException
from common import clear_collections, ensure_empty_collections
from common import base_team, base_user
from conftest import setup_db, teardown_db
dict_filter = lambda dict, items: {k:v for k,v in dict.items() if k in items}
class TestNewStyleTeams(object):
@ensure_empty_collections("users", "teams")
@clear_collections("users", "teams")
def test_user_team_registration(self):
"""
Tests the newer and simplified user creation.
"""
user = dict_filter(base_user.copy(), ["username", "firstname", "lastname", "email"])
user["password"] = "test"
uid = api.user.create_simple_user_request(user)
team_data = {"team_name": "lolhax", "team_password": "s3cret"}
api.team.create_new_team_request(team_data, uid=uid)
team = api.user.get_team(uid=uid)
assert team["team_name"] == team_data["team_name"], "User does not belong to the new team."
assert api.team.get_team(name=user["username"])["size"] == 0 and api.team.get_team(name=team_data["team_name"])["size"] == 1, \
"Size calculations are incorrect for new registered team."
class TestTeams(object):
"""
API Tests for team.py
"""
def setup_class(self):
setup_db()
api.config.get_settings()
api.config.change_settings({"max_team_size": 5})
def teardown_class(self):
teardown_db()
@ensure_empty_collections("teams")
@clear_collections("teams")
def test_create_batch_teams(self, teams=10):
"""
Tests team creation.
Covers:
team.create_team
team.get_team
team.get_all_teams
"""
tids = []
for i in range(teams):
team = base_team.copy()
team["team_name"] += str(i)
tids.append(api.team.create_team(team))
assert len(set(tids)) == len(tids), "tids are not unique."
assert len(api.team.get_all_teams()) == len(tids), "Not all teams were created."
for i, tid in enumerate(tids):
name = base_team['team_name'] + str(i)
team_from_tid = api.team.get_team(tid=tid)
team_from_name = api.team.get_team(name=name)
assert team_from_tid == team_from_name, "Team lookup from tid and name are not the same."
@ensure_empty_collections("teams", "users")
@clear_collections("teams", "users")
def test_get_team_uids(self):
"""
Tests the code that retrieves the list of uids on a team
Covers:
team.create_team
user.create_user_request
team.get_team_uids
"""
tid = api.team.create_team(base_team.copy())
uids = []
for i in range(api.config.get_settings()["max_team_size"]):
test_user = base_user.copy()
test_user['username'] += str(i)
uids.append(api.user.create_user_request(test_user))
team_uids = api.team.get_team_uids(tid)
assert len(team_uids) == api.config.get_settings()["max_team_size"], "Team does not have correct number of members"
assert sorted(uids) == sorted(team_uids), "Team does not have the correct members"
@ensure_empty_collections("teams", "users")
@clear_collections("teams", "users")
def te_st_create_user_request_team_size_validation(self):
"""
Tests the team size restriction
Covers:
team.create_team
user.create_user_request
"""
api.team.create_team(base_team.copy())
uid = None
for i in range(api.config.get_settings()["max_team_size"]):
test_user = base_user.copy()
test_user['username'] += str(i)
uid = api.user.create_user_request(test_user)
with pytest.raises(WebException):
api.user.create_user_request(base_user.copy())
assert False, "Team has too many users"
api.user.disable_account(uid)
#Should be able to add another user after disabling one.
test_user = base_user.copy()
test_user['username'] += "addition"
api.user.create_user_request(test_user)
| alpire/picoCTF-web | unit_tests/team_test.py | Python | mit | 4,330 |
from django.db import models
from edc_appointment.models import AppointmentMixin
from edc_base.audit_trail import AuditTrail
from edc_base.model.models import BaseUuidModel
from edc_base.model.validators import datetime_not_before_study_start, datetime_not_future
from edc_base.model.validators.date import date_not_future
from edc_constants.choices import GENDER_UNDETERMINED
from edc_export.models import ExportTrackingFieldsMixin
from edc_offstudy.models import OffStudyMixin
from edc_registration.models import RegisteredSubject
from edc_sync.models import SyncModelMixin
from microbiome.apps.mb_maternal.models import MaternalLabourDel
from ..managers import InfantBirthModelManager
class InfantBirth(OffStudyMixin, SyncModelMixin, AppointmentMixin, ExportTrackingFieldsMixin, BaseUuidModel):
""" A model completed by the user on the infant's birth. """
off_study_model = ('mb_infant', 'InfantOffStudy')
registered_subject = models.OneToOneField(RegisteredSubject, null=True)
maternal_labour_del = models.ForeignKey(
MaternalLabourDel,
verbose_name="Mother's delivery record")
report_datetime = models.DateTimeField(
verbose_name="Date and Time infant enrolled",
validators=[
datetime_not_before_study_start,
datetime_not_future, ],
help_text='')
first_name = models.CharField(
max_length=25,
verbose_name="Infant's first name",
help_text="If infant name is unknown or not yet determined, "
"use Baby + birth order + mother's last name, e.g. 'Baby1Malane'")
initials = models.CharField(
max_length=3)
dob = models.DateField(
verbose_name='Date of Birth',
help_text="Must match labour and delivery report.",
validators=[date_not_future, ])
gender = models.CharField(
max_length=10,
choices=GENDER_UNDETERMINED)
objects = InfantBirthModelManager()
history = AuditTrail()
def natural_key(self):
return self.maternal_labour_del.natural_key()
natural_key.dependencies = ['mb_maternal.maternallabourdel', 'edc_registration.registered_subject']
def __unicode__(self):
return "{} ({}) {}".format(self.first_name, self.initials, self.gender)
def prepare_appointments(self, using):
"""Creates infant appointments relative to the date-of-delivery"""
relative_identifier = self.registered_subject.relative_identifier
maternal_labour_del = MaternalLabourDel.objects.get(
maternal_visit__appointment__registered_subject__subject_identifier=relative_identifier)
self.create_all(
base_appt_datetime=maternal_labour_del.delivery_datetime, using=using)
def get_subject_identifier(self):
return self.registered_subject.subject_identifier
class Meta:
app_label = 'mb_infant'
verbose_name = "Infant Birth"
| botswana-harvard/microbiome | microbiome/apps/mb_infant/models/infant_birth.py | Python | gpl-2.0 | 2,928 |
# coding=utf-8
# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import absolute_import, division, print_function, unicode_literals
import os
from pex.interpreter import PythonInterpreter
from pex.pex_builder import PEXBuilder
from pex.pex_info import PexInfo
from pants.backend.python.interpreter_cache import PythonInterpreterCache
from pants.backend.python.subsystems.pex_build_util import (PexBuilderWrapper,
has_python_requirements,
has_python_sources, has_resources,
is_python_target)
from pants.backend.python.subsystems.python_native_code import PythonNativeCode
from pants.backend.python.targets.python_binary import PythonBinary
from pants.backend.python.targets.python_requirement_library import PythonRequirementLibrary
from pants.base.build_environment import get_buildroot
from pants.base.exceptions import TaskError
from pants.build_graph.target_scopes import Scopes
from pants.task.task import Task
from pants.util.contextutil import temporary_dir
from pants.util.dirutil import safe_mkdir_for
from pants.util.fileutil import atomic_copy
from pants.util.memo import memoized_property
class PythonBinaryCreate(Task):
"""Create an executable .pex file."""
@classmethod
def subsystem_dependencies(cls):
return super(PythonBinaryCreate, cls).subsystem_dependencies() + (
PexBuilderWrapper.Factory,
PythonNativeCode.scoped(cls),
PythonInterpreterCache,
)
@memoized_property
def _python_native_code_settings(self):
return PythonNativeCode.scoped_instance(self)
@classmethod
def product_types(cls):
return ['pex_archives', 'deployable_archives']
@classmethod
def implementation_version(cls):
return super(PythonBinaryCreate, cls).implementation_version() + [('PythonBinaryCreate', 2)]
@property
def cache_target_dirs(self):
return True
@classmethod
def prepare(cls, options, round_manager):
# See comment below for why we don't use the GatherSources.PYTHON_SOURCES product.
round_manager.require_data(PythonInterpreter)
round_manager.optional_data('python') # For codegen.
round_manager.optional_product(PythonRequirementLibrary) # For local dists.
@staticmethod
def is_binary(target):
return isinstance(target, PythonBinary)
def __init__(self, *args, **kwargs):
super(PythonBinaryCreate, self).__init__(*args, **kwargs)
self._distdir = self.get_options().pants_distdir
def execute(self):
binaries = self.context.targets(self.is_binary)
# Check for duplicate binary names, since we write the pexes to <dist>/<name>.pex.
names = {}
for binary in binaries:
name = binary.name
if name in names:
raise TaskError('Cannot build two binaries with the same name in a single invocation. '
'{} and {} both have the name {}.'.format(binary, names[name], name))
names[name] = binary
with self.invalidated(binaries, invalidate_dependents=True) as invalidation_check:
python_deployable_archive = self.context.products.get('deployable_archives')
python_pex_product = self.context.products.get('pex_archives')
for vt in invalidation_check.all_vts:
pex_path = os.path.join(vt.results_dir, '{}.pex'.format(vt.target.name))
if not vt.valid:
self.context.log.debug('cache for {} is invalid, rebuilding'.format(vt.target))
self._create_binary(vt.target, vt.results_dir)
else:
self.context.log.debug('using cache for {}'.format(vt.target))
basename = os.path.basename(pex_path)
python_pex_product.add(vt.target, os.path.dirname(pex_path)).append(basename)
python_deployable_archive.add(vt.target, os.path.dirname(pex_path)).append(basename)
self.context.log.debug('created {}'.format(os.path.relpath(pex_path, get_buildroot())))
# Create a copy for pex.
pex_copy = os.path.join(self._distdir, os.path.basename(pex_path))
safe_mkdir_for(pex_copy)
atomic_copy(pex_path, pex_copy)
self.context.log.info('created pex {}'.format(os.path.relpath(pex_copy, get_buildroot())))
def _validate_interpreter_constraints(self, constraint_tgts):
"""Validate that the transitive constraints of the given PythonBinary target are compatible.
If no (local) interpreter can satisfy all of the given targets, raises
PythonInterpreterCache.UnsatisfiableInterpreterConstraintsError.
TODO: This currently does so by finding a concrete local interpreter that matches all of the
constraints, but it is possible to do this in memory instead.
see https://github.com/pantsbuild/pants/issues/7775
"""
PythonInterpreterCache.global_instance().select_interpreter_for_targets(constraint_tgts)
def _create_binary(self, binary_tgt, results_dir):
"""Create a .pex file for the specified binary target."""
# Note that we rebuild a chroot from scratch, instead of using the REQUIREMENTS_PEX
# and PYTHON_SOURCES products, because those products are already-built pexes, and there's
# no easy way to merge them into a single pex file (for example, they each have a __main__.py,
# metadata, and so on, which the merging code would have to handle specially).
interpreter = self.context.products.get_data(PythonInterpreter)
with temporary_dir() as tmpdir:
# Create the pex_info for the binary.
run_info_dict = self.context.run_tracker.run_info.get_as_dict()
build_properties = PexInfo.make_build_properties()
build_properties.update(run_info_dict)
pex_info = binary_tgt.pexinfo.copy()
pex_info.build_properties = build_properties
pex_builder = PexBuilderWrapper.Factory.create(
builder=PEXBuilder(path=tmpdir, interpreter=interpreter, pex_info=pex_info, copy=True),
log=self.context.log)
if binary_tgt.shebang:
self.context.log.info('Found Python binary target {} with customized shebang, using it: {}'
.format(binary_tgt.name, binary_tgt.shebang))
pex_builder.set_shebang(binary_tgt.shebang)
else:
self.context.log.debug('No customized shebang found for {}'.format(binary_tgt.name))
# Find which targets provide sources and which specify requirements.
source_tgts = []
req_tgts = []
constraint_tgts = []
for tgt in binary_tgt.closure(exclude_scopes=Scopes.COMPILE):
if has_python_sources(tgt) or has_resources(tgt):
source_tgts.append(tgt)
elif has_python_requirements(tgt):
req_tgts.append(tgt)
if is_python_target(tgt):
constraint_tgts.append(tgt)
# Add interpreter compatibility constraints to pex info. This will first check the targets for any
# constraints, and if they do not have any will resort to the global constraints.
self._validate_interpreter_constraints(constraint_tgts)
pex_builder.add_interpreter_constraints_from([binary_tgt])
# Dump everything into the builder's chroot.
for tgt in source_tgts:
pex_builder.add_sources_from(tgt)
# We need to ensure that we are resolving for only the current platform if we are
# including local python dist targets that have native extensions.
self._python_native_code_settings.check_build_for_current_platform_only(self.context.targets())
pex_builder.add_requirement_libs_from(req_tgts, platforms=binary_tgt.platforms)
# Build the .pex file.
pex_path = os.path.join(results_dir, '{}.pex'.format(binary_tgt.name))
pex_builder.build(pex_path)
return pex_path
| twitter/pants | src/python/pants/backend/python/tasks/python_binary_create.py | Python | apache-2.0 | 7,831 |
def caught(pyn, fpyn):
fx, fy = fpyn.xy()
return pyn.distance(fx, fy) <= 1
| aresnick/pynguin | doc/examples_src/threaded_pynd/00015.py | Python | gpl-3.0 | 83 |
"""
File Author: Will Lowry, Will Hescott
File Creation Date: 1/28/2015
File Purpose: To create our drive functions
Transmission gear ratio: 18.74/1
"""
import wpilib
from wpilib import CANTalon, Encoder, Timer, RobotDrive
from wpilib.interfaces import Gyro
from . import Component
import hal
class driveTrain(Component) :
def __init__(self, robot):
super().__init__()
self.robot = robot
# Constants
WHEEL_DIAMETER = 8
PI = 3.1415
ENCODER_TICK_COUNT_250 = 250
ENCODER_TICK_COUNT_360 = 360
ENCODER_GOAL = 0 # default
ENCODER_TOLERANCE = 1 # inch0
self.RPM = 4320/10.7
self.INCHES_PER_REV = WHEEL_DIAMETER * 3.1415
self.CONTROL_TYPE = False # False = disable PID components
self.LEFTFRONTCUMULATIVE = 0
self.LEFTBACKCUMULATIVE = 0
self.RIGHTFRONTCUMULATIVE = 0
self.RIGHTBACKCUMULATIVE = 0
self.rfmotor = CANTalon(0)
self.rbmotor = CANTalon(1)
self.lfmotor = CANTalon(2)
self.lbmotor = CANTalon(3)
self.lfmotor.reverseOutput(True)
self.lbmotor.reverseOutput(True)
#self.rfmotor.reverseOutput(True)
#self.rbmotor.reverseOutput(True)#practice bot only
self.rfmotor.enableBrakeMode(True)
self.rbmotor.enableBrakeMode(True)
self.lfmotor.enableBrakeMode(True)
self.lbmotor.enableBrakeMode(True)
absolutePosition = self.lbmotor.getPulseWidthPosition() & 0xFFF; # mask out the bottom12 bits, we don't care about the wrap arounds use the low level API to set the quad encoder signal
self.lbmotor.setEncPosition(absolutePosition)
absolutePosition = self.lfmotor.getPulseWidthPosition() & 0xFFF; # mask out the bottom12 bits, we don't care about the wrap arounds use the low level API to set the quad encoder signal
self.lfmotor.setEncPosition(absolutePosition)
absolutePosition = self.rbmotor.getPulseWidthPosition() & 0xFFF; # mask out the bottom12 bits, we don't care about the wrap arounds use the low level API to set the quad encoder signal
self.rbmotor.setEncPosition(absolutePosition)
absolutePosition = self.rfmotor.getPulseWidthPosition() & 0xFFF; # mask out the bottom12 bits, we don't care about the wrap arounds use the low level API to set the quad encoder signal
self.rfmotor.setEncPosition(absolutePosition)
self.rfmotor.setFeedbackDevice(CANTalon.FeedbackDevice.CtreMagEncoder_Relative)
self.rbmotor.setFeedbackDevice(CANTalon.FeedbackDevice.CtreMagEncoder_Relative)
self.lfmotor.setFeedbackDevice(CANTalon.FeedbackDevice.CtreMagEncoder_Relative)
self.lbmotor.setFeedbackDevice(CANTalon.FeedbackDevice.CtreMagEncoder_Relative)
#setting up the distances per rotation
self.lfmotor.configEncoderCodesPerRev(4096)
self.rfmotor.configEncoderCodesPerRev(4096)
self.lbmotor.configEncoderCodesPerRev(4096)
self.rbmotor.configEncoderCodesPerRev(4096)
self.lfmotor.setPID(0.0005, 0, 0.0, profile=0)
self.rfmotor.setPID(0.0005, 0, 0.0, profile=0)
self.lbmotor.setPID(0.0005, 0, 0.0, profile=0)
self.rbmotor.setPID(0.0005, 0, 0.0, profile=0)
self.lbmotor.configNominalOutputVoltage(+0.0, -0.0)
self.lbmotor.configPeakOutputVoltage(+12.0, -12.0)
self.lbmotor.setControlMode(CANTalon.ControlMode.Speed)
self.lfmotor.configNominalOutputVoltage(+0.0, -0.0)
self.lfmotor.configPeakOutputVoltage(+12.0, -12.0)
self.lfmotor.setControlMode(CANTalon.ControlMode.Speed)
self.rbmotor.configNominalOutputVoltage(+0.0, -0.0)
self.rbmotor.configPeakOutputVoltage(+12.0, -12.0)
self.rbmotor.setControlMode(CANTalon.ControlMode.Speed)
self.rfmotor.configNominalOutputVoltage(+0.0, -0.0)
self.rfmotor.configPeakOutputVoltage(+12.0, -12.0)
self.rfmotor.setControlMode(CANTalon.ControlMode.Speed)
self.rfmotor.setPosition(0)
self.rbmotor.setPosition(0)
self.lfmotor.setPosition(0)
self.lbmotor.setPosition(0)
self.lfmotor.reverseSensor(True)
self.lbmotor.reverseSensor(True)
'''
# changing the encoder output from DISTANCE to RATE (we're dumb)
self.lfencoder.setPIDSourceType(wpilib.PIDController.PIDSourceType.kRate)
self.lbencoder.setPIDSourceType(wpilib.PIDController.PIDSourceType.kRate)
self.rfencoder.setPIDSourceType(wpilib.PIDController.PIDSourceType.kRate)
self.rbencoder.setPIDSourceType(wpilib.PIDController.PIDSourceType.kRate)
# LiveWindow settings (Encoder)
wpilib.LiveWindow.addSensor("Drive Train", "Left Front Encoder", self.lfencoder)
wpilib.LiveWindow.addSensor("Drive Train", "Right Front Encoder", self.rfencoder)
wpilib.LiveWindow.addSensor("Drive Train", "Left Back Encoder", self.lbencoder)
wpilib.LiveWindow.addSensor("Drive Train", "Right Back Encoder", self.rbencoder)
'''
'''
# Checking the state of the encoders on the Smart Dashboard
wpilib.SmartDashboard.putBoolean("Right Front Encoder Enabled?", self.rfmotor.isSensorPresent)
wpilib.SmartDashboard.putBoolean("Right Back Encoder Enabled?", self.rbmotor.isSensorPresent)
wpilib.SmartDashboard.putBoolean("Left Front Encoder Enabled?", self.lfmotor.isSensorPresent)
wpilib.SmartDashboard.putBoolean("Left Back Encoder Enabled?", self.lbmotor.isSensorPresent)
'''
if self.CONTROL_TYPE:
# Initializing PID Controls
self.pidRightFront = wpilib.PIDController(0.002, 0.8, 0.005, 0, self.rfmotor.feedbackDevice, self.rfmotor, 0.02)
self.pidLeftFront = wpilib.PIDController(0.002, 0.8, 0.005, 0, self.lfmotor.feedbackDevice, self.lfmotor, 0.02)
self.pidRightBack = wpilib.PIDController(0.002, 0.8, 0.005, 0, self.rbmotor.feedbackDevice, self.rbmotor, 0.02)
self.pidLeftBack = wpilib.PIDController(0.002, 0.8, 0.005, 0, self.lbmotor.feedbackDevice, self.lbmotor, 0.02)
# PID Absolute Tolerance Settings
self.pidRightFront.setAbsoluteTolerance(0.05)
self.pidLeftFront.setAbsoluteTolerance(0.05)
self.pidRightBack.setAbsoluteTolerance(0.05)
self.pidLeftBack.setAbsoluteTolerance(0.05)
# PID Output Range Settings
self.pidRightFront.setOutputRange(-1, 1)
self.pidLeftFront.setOutputRange(-1, 1)
self.pidRightBack.setOutputRange(-1, 1)
self.pidLeftBack.setOutputRange(-1, 1)
# Enable PID
#self.enablePIDs()
'''
# LiveWindow settings (PID)
wpilib.LiveWindow.addActuator("Drive Train Right", "Right Front PID", self.pidRightFront)
wpilib.LiveWindow.addActuator("Drive Train Left", "Left Front PID", self.pidLeftFront)
wpilib.LiveWindow.addActuator("Drive Train Right", "Right Back PID", self.pidRightBack)
wpilib.LiveWindow.addActuator("Drive Train Left", "Left Back PID", self.pidLeftBack)
'''
self.dashTimer = Timer() # Timer for SmartDashboard updating
self.dashTimer.start()
'''
# Adding components to the LiveWindow (testing)
wpilib.LiveWindow.addActuator("Drive Train Left", "Left Front Motor", self.lfmotor)
wpilib.LiveWindow.addActuator("Drive Train Right", "Right Front Motor", self.rfmotor)
wpilib.LiveWindow.addActuator("Drive Train Left", "Left Back Motor", self.lbmotor)
wpilib.LiveWindow.addActuator("Drive Train Right", "Right Back Motor", self.rbmotor)
'''
def log(self):
#The log method puts interesting information to the SmartDashboard. (like velocity information)
'''
#no longer implemented because of change of hardware
wpilib.SmartDashboard.putNumber("Left Front Speed", self.lfmotor.getEncVelocity())
wpilib.SmartDashboard.putNumber("Right Front Speed", self.rfmotor.getEncVelocity())
wpilib.SmartDashboard.putNumber("Left Back Speed", self.lbmotor.getEncVelocity())
wpilib.SmartDashboard.putNumber("Right Back Speed", self.rbmotor.getEncVelocity())
'''
wpilib.SmartDashboard.putNumber("RF Mag Enc Position", self.rfmotor.getPosition())
wpilib.SmartDashboard.putNumber("RB Mag Enc Position", self.rbmotor.getPosition())
wpilib.SmartDashboard.putNumber("LF Mag Enc Position", self.lfmotor.getPosition())
wpilib.SmartDashboard.putNumber("LB Mag Enc Position", self.lbmotor.getPosition())
'''
wpilib.SmartDashboard.putNumber("Right Front Mag Distance(inches)", self.convertEncoderRaw(self.rfmotor.getPosition()*0.57))
wpilib.SmartDashboard.putNumber("Right Back Mag Distance(inches)", self.convertEncoderRaw(self.rbmotor.getPosition()*0.57))
wpilib.SmartDashboard.putNumber("Left Front Mag Distance(inches)", self.convertEncoderRaw(self.lfmotor.getPosition()*0.57))
wpilib.SmartDashboard.putNumber("Left Back Mag Distance(inches)", self.convertEncoderRaw(self.lbmotor.getPosition()*0.57))
'''
# drive forward function
def drive_forward(self, speed) :
self.drive.tankDrive(speed, speed, True)
# manual drive function for Tank Drive
def xboxTankDrive(self, leftSpeed, rightSpeed, leftB, rightB, leftT, rightT):
#self.lfmotor.setCloseLoopRampRate(1)
#self.lbmotor.setCloseLoopRampRate(1)
#self.rfmotor.setCloseLoopRampRate(1)
#self.rbmotor.setCloseLoopRampRate(1)
if (leftB == True): #Straight Button
rightSpeed = leftSpeed
if (rightB == True): #Slow Button
#leftSpeed = leftSpeed/1.75
#rightSpeed = rightSpeed/1.75
if(not(leftSpeed < -0.5 and rightSpeed > 0.5 or leftSpeed > -0.5 and rightSpeed < 0.5)): #only do t if not turning
leftSpeed = leftSpeed/1.75
rightSpeed = rightSpeed/1.75
# Fast button
if(rightT == True):
#self.lfmotor.setCloseLoopRampRate(24)
#self.lbmotor.setCloseLoopRampRate(24)
#self.rfmotor.setCloseLoopRampRate(24)
#self.rbmotor.setCloseLoopRampRate(24)
leftSpeed = leftSpeed*(1.75)
rightSpeed = rightSpeed*(1.75)
if(leftT == True):
leftSpeed = 0.1
rightSpeed = 0.1
# Creating margin for error when using the joysticks, as they're quite sensitive
if abs(rightSpeed) < 0.04 :
rightSpeed = 0
if abs(leftSpeed) < 0.04 :
leftSpeed = 0
if self.CONTROL_TYPE:
self.pidRightFront.setSetpoint(rightSpeed)
self.pidRightBack.setSetpoint(rightSpeed)
self.pidLeftFront.setSetpoint(leftSpeed)
self.pidLeftBack.setSetpoint(leftSpeed)
else:
self.lfmotor.set(leftSpeed*512)
self.rfmotor.set(rightSpeed*512)
self.lbmotor.set(leftSpeed*512)
self.rbmotor.set(rightSpeed*512)
#autononmous tank drive (to remove a need for a slow, striaght, or fast button)
def autonTankDrive(self, leftSpeed, rightSpeed):
self.log()
#self.drive.tankDrive(leftSpeed, rightSpeed, True)
self.rfmotor.set(rightSpeed)
self.rbmotor.set(rightSpeed*(-1))
self.lfmotor.set(leftSpeed)
self.lbmotor.set(leftSpeed*(-1))
# stop function
def drive_stop(self) :
self.drive.tankDrive(0,0)
# fucntion to reset the PID's and encoder values
def reset(self):
self.rfmotor.setPosition(0)
self.rbmotor.setPosition(0)
self.lfmotor.setPosition(0)
self.lbmotor.setPosition(0)
if self.CONTROL_TYPE:
self.LEFTFRONTCUMULATIVE = 0
self.RIGHTFRONTCUMULATIVE = 0
self.LEFTBACKCUMULATIVE= 0
self.RIGHTBACKCUMULATIVE = 0
self.pidLeftBack.setSetpoint(0)
self.pidLeftFront.setSetpoint(0)
self.pidRightBack.setSetpoint(0)
self.pidRightFront.setSetpoint(0)
# def getDistance(self)
# return (abs(self.convertEncoderRaw(LEFTFRONTCUMULATIVE) + abs(self.convertEncoderRaw(LEFTBACKCUMULATIVE)) + abs(self.convertEncoderRaw(RIGHTFRONTCUMULATIVE)) + abs(self.convertEncoderRaw(RIGHTBACKCUMULATIVE)))
def turn_angle(self, degrees):
desired_inches = self.INCHES_PER_DEGREE * degrees
if degrees < 0:
while (abs(self.lfencoder.getDistance()) + abs(self.rfencoder.getDistance())) <= desired_inches :
self.autonTankDrive(0.4, -0.4)
elif degrees > 0:
while (abs(self.lfencoder.getDistance()) + abs(self.rfencoder.getDistance())) <= desired_inches :
self.autonTankDrive(-0.4, 0.4)
# Enable PID Controllers
def enablePIDs(self):
'''
#No longer required because we swapped from analog encoders to magnetic encoders
self.pidLeftFront.enable()
self.pidLeftBack.enable()
self.pidRightFront.enable()
self.pidRightBack.enable()
'''
# Disable PID Controllers
def disablePIDs(self):
'''
#see explaination above
self.pidLeftFront.disable()
self.pidLeftBack.disable()
self.pidRightFront.disable()
self.pidRightBack.disable()
'''
def getAutonDistance(self):
return (self.convertEncoderRaw(abs(self.rfmotor.getPosition()*0.57))
+ self.convertEncoderRaw(abs(self.rbmotor.getPosition()*0.57))
+ self.convertEncoderRaw(abs(self.lfmotor.getPosition()*0.57))
+ self.convertEncoderRaw(abs(self.lbmotor.getPosition()*0.57)))/4
#detirmines how many ticks the encoder has processed
def getMotorDistance(self, motor, cumulativeDistance):
currentRollovers = 0 #number of times the encoder has gone from 1023 to 0
previousValue = cumulativeDistance #variable for comparison
currentValue = motor.getEncPosition() #variable for comparison
if(previousValue > currentValue): #checks to see if the encoder reset itself from 1023 to 0
currentRollovers += 1 #notes the rollover
return currentValue + (currentRollovers * 1024) #adds current value to the number of rollovers, each rollover == 1024 ticks
#converts ticks from getMotorDistance into inches
def convertEncoderRaw(self, selectedEncoderValue):
return selectedEncoderValue * self.INCHES_PER_REV
| Team74/FRC_2016_Python_Stronghold | components/drive.py | Python | gpl-3.0 | 14,598 |
#!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "bep_bep.settings")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
| ChrisHartley/bep_bep | manage.py | Python | gpl-3.0 | 250 |
#!/usr/bin/env python
import tensorflow as tf
import edward as ed
import numpy as np
from numpy import array
from numpy.linalg import norm
from edward.models import Dirichlet, Multinomial, Gamma, Poisson
sess = tf.Session()
def build_toy_dataset(n, p, A, b):
"""
toy HMM with:
n=number of timesteps,
p=m length array where m is the number of hidden states and p_i is the
initial probability of being in state i
A=mxm transition matrix indexed by i, j where the (i,j) element is the
probability of transitioning from element j to element i
b=m length array where b_i contains the poison rate for state i
"""
p = array(p)/float(sum(p))
z = [np.random.multinomial(1, p)]
obs = [np.random.poisson(z[-1].dot(b))]
for step in range(n-1):
z += [np.random.multinomial(1, z[-1].dot(A))]
obs += [float(np.random.poisson(z[-1].dot(b)))]
return obs, z
n = 162
p_true = [.7, .3]
A_true = array([[0.8,0.4],[0.2,0.6]])
b_true = [0.1, 3.]
obs_train, z_train = build_toy_dataset(n, p_true, A_true, b_true)
obs_test, z_test = build_toy_dataset(n, p_true, A_true, b_true)
#obs = tf.placeholder(tf.float32, [n])
def gen_hmm(vd):
z = tf.expand_dims(
tf.transpose(
tf.expand_dims(Multinomial(total_count=1., probs=vd['p']), 0)), 0)
obs = tf.expand_dims(
Poisson(rate=tf.matmul(tf.expand_dims(vd['b'],0), z[-1])), 0)
for t in range(n-1):
z_new = tf.transpose(Multinomial(total_count=1.,
probs=tf.transpose(tf.matmul(tf.transpose(vd['A']),z[-1]),
name='tx_prob')),name='z_new')
z = tf.concat([z,tf.expand_dims(z_new,0)],0)
obs = tf.concat([obs,
tf.expand_dims(
Poisson(rate=tf.matmul(
tf.expand_dims(vd['b'],0), z_new)),0)], 0)
return obs, z
p_p_alpha = [2.,2.]
p_A_alpha = [[2.,1.],[1.,2.]]
p_b_alpha = [0.5,2.0]
p_b_beta = [1.,1.]
q_p_alpha = tf.Variable(p_p_alpha)
q_A_alpha = tf.Variable(p_A_alpha)
q_b_alpha = tf.Variable(p_b_alpha)
q_b_beta = tf.Variable(p_b_beta)
p = Dirichlet(p_p_alpha, name='p')
A = Dirichlet(p_A_alpha, name='A')
b = Gamma(p_b_alpha, p_b_beta)
qp = Dirichlet(q_p_alpha, name='p')
qA = Dirichlet(q_A_alpha, name='A')
qb = Gamma(q_b_alpha, q_b_beta)
obs, z = gen_hmm({'p':p, 'A':A, 'b':b})
obs_train, z_train = build_toy_dataset(n, p_true, A_true, b_true)
obs_train = tf.expand_dims(tf.expand_dims(obs_train, 0), 0)
latent_vars = {p: qp, A: qA, b: qb}
data = {tf.squeeze(obs): tf.squeeze(obs_train)}
inference = ed.KLqp(latent_vars, data)
inference.run(n_samples=5, n_iter=2500)
print(qp.eval())
print(tf.transpose(qA).eval())
print(qb.eval())
obs_post = ed.copy(obs, {p: qp, A: qA, b: qb})
print("posterior observations")
print(tf.squeeze(obs_post).eval())
print("training observations")
print(tf.squeeze(obs_train).eval())
print("Mean absolute error on training data:")
print(ed.evaluate('mean_absolute_error', data={tf.squeeze(obs_post): tf.squeeze(obs_train)}))
print("test observations")
print(tf.squeeze(obs_test).eval())
print("Mean absolute error on test data:")
print(ed.evaluate('mean_absolute_error', data={tf.squeeze(obs_post): tf.squeeze(obs_test)}))
file_writer = tf.summary.FileWriter('/home/kyjohnso/projects/mlbslice/tb_logs',
tf.get_default_graph())
sess.close()
| kyjohnso/mlbslice | hmm_sandbox.py | Python | bsd-2-clause | 3,478 |
# (c) 2017 Red Hat Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
import pytest
from mock import patch
from .placebo_fixtures import placeboify, maybe_sleep
from ansible.modules.cloud.amazon import cloudformation as cfn_module
basic_yaml_tpl = """
---
AWSTemplateFormatVersion: '2010-09-09'
Description: 'Basic template that creates an S3 bucket'
Resources:
MyBucket:
Type: "AWS::S3::Bucket"
Outputs:
TheName:
Value:
!Ref MyBucket
"""
bad_json_tpl = """{
"AWSTemplateFormatVersion": "2010-09-09",
"Description": "Broken template, no comma here ->"
"Resources": {
"MyBucket": {
"Type": "AWS::S3::Bucket"
}
}
}"""
class FakeModule(object):
def __init__(self, **kwargs):
self.params = kwargs
def fail_json(self, *args, **kwargs):
self.exit_args = args
self.exit_kwargs = kwargs
raise Exception('FAIL')
def exit_json(self, *args, **kwargs):
self.exit_args = args
self.exit_kwargs = kwargs
raise Exception('EXIT')
def test_invalid_template_json(placeboify):
connection = placeboify.client('cloudformation')
params = {
'StackName': 'ansible-test-wrong-json',
'TemplateBody': bad_json_tpl,
}
m = FakeModule(disable_rollback=False)
with pytest.raises(Exception, message='Malformed JSON should cause the test to fail') as exc_info:
cfn_module.create_stack(m, params, connection)
assert exc_info.match('FAIL')
assert "ValidationError" in m.exit_kwargs['msg']
def test_client_request_token_s3_stack(maybe_sleep, placeboify):
connection = placeboify.client('cloudformation')
params = {
'StackName': 'ansible-test-client-request-token-yaml',
'TemplateBody': basic_yaml_tpl,
'ClientRequestToken': '3faf3fb5-b289-41fc-b940-44151828f6cf',
}
m = FakeModule(disable_rollback=False)
result = cfn_module.create_stack(m, params, connection)
assert result['changed']
assert len(result['events']) > 1
# require that the final recorded stack state was CREATE_COMPLETE
# events are retrieved newest-first, so 0 is the latest
assert 'CREATE_COMPLETE' in result['events'][0]
connection.delete_stack(StackName='ansible-test-client-request-token-yaml')
def test_basic_s3_stack(maybe_sleep, placeboify):
connection = placeboify.client('cloudformation')
params = {
'StackName': 'ansible-test-basic-yaml',
'TemplateBody': basic_yaml_tpl
}
m = FakeModule(disable_rollback=False)
result = cfn_module.create_stack(m, params, connection)
assert result['changed']
assert len(result['events']) > 1
# require that the final recorded stack state was CREATE_COMPLETE
# events are retrieved newest-first, so 0 is the latest
assert 'CREATE_COMPLETE' in result['events'][0]
connection.delete_stack(StackName='ansible-test-basic-yaml')
def test_delete_nonexistent_stack(maybe_sleep, placeboify):
connection = placeboify.client('cloudformation')
result = cfn_module.stack_operation(connection, 'ansible-test-nonexist', 'DELETE')
assert result['changed']
assert 'Stack does not exist.' in result['log']
def test_get_nonexistent_stack(placeboify):
connection = placeboify.client('cloudformation')
assert cfn_module.get_stack_facts(connection, 'ansible-test-nonexist') is None
def test_missing_template_body(placeboify):
m = FakeModule()
with pytest.raises(Exception, message='Expected module to fail with no template') as exc_info:
cfn_module.create_stack(
module=m,
stack_params={},
cfn=None
)
assert exc_info.match('FAIL')
assert not m.exit_args
assert "Either 'template' or 'template_url' is required when the stack does not exist." == m.exit_kwargs['msg']
| tsdmgz/ansible | test/units/modules/cloud/amazon/test_cloudformation.py | Python | gpl-3.0 | 4,428 |
#!/usr/bin/env python
# coding=utf-8
def process(a,b,add=0,sub=0,mut=0,div=0):
if add==1:
return a+b
if sub==1:
return a-b
if mut ==1:
return a*b
if div ==1:
return a/b
print process(1,2,add=1)
print process(1,2,sub=1)
print process(1,2,mut=1)
| zhaochl/python-utils | utils/fun_util.py | Python | apache-2.0 | 293 |
"""
Django settings for qlinkplanner project.
Generated by 'django-admin startproject' using Django 1.11.3.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.11/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.11/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = os.environ['SECRET']
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
# Allowed hosts that can reach the planner
ALLOWED_HOSTS = [
'localhost',
os.environ['URL']
]
# Application definition
INSTALLED_APPS = [
'planner',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'qlinkplanner.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'qlinkplanner.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.11/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/1.11/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.11/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
PROJECT_ROOT = os.path.dirname(os.path.abspath(__file__))
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.9/howto/static-files/
STATIC_ROOT = os.path.join(PROJECT_ROOT, 'staticfiles')
STATIC_URL = '/static/'
# Extra places for collectstatic to find static files.
STATICFILES_DIRS = (
os.path.join(PROJECT_ROOT, '../planner/static'),
)
# Simplified static file serving.
# https://warehouse.python.org/project/whitenoise/
STATICFILES_STORAGE = 'whitenoise.django.GzipManifestStaticFilesStorage'
## Logging
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'formatters': {
'verbose': {
'format': ('%(asctime)s [%(process)d] [%(levelname)s] '
'pathname=%(pathname)s lineno=%(lineno)s '
'funcname=%(funcName)s message=%(message)s'),
'datefmt': '%Y-%m-%d %H:%M:%S'
},
'simple': {
'format': '%(levelname)s %(message)s'
}
},
'handlers': {
'null': {
'level': 'DEBUG',
'class': 'logging.NullHandler',
},
'console': {
'level': 'DEBUG',
'class': 'logging.StreamHandler',
'formatter': 'verbose'
}
},
'loggers': {
'qlinkplanner': {
'handlers': ['console', ],
'level': 'INFO',
}
}
}
| nickubels/qlinkplanner | qlinkplanner/settings.py | Python | mit | 4,429 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import absolute_import, print_function, unicode_literals
from collections import namedtuple
import datetime
from difflib import SequenceMatcher
import logging
import time
from django.conf import settings
from django.utils.timezone import now
import magic
import requests
from blog.models import Tag
from core.storage import Qiniu
from .models import Book
logger = logging.getLogger(__name__)
def similar(a, b):
return SequenceMatcher(None, a, b).ratio()
class UpdateBookInfoFromDouban(object):
SEARCH_URL = 'https://api.douban.com/v2/book/search'
SUBJECT_URL = 'http://book.douban.com/subject/{id}/'
def __init__(self, verify=False):
self.session = requests.Session()
self.session.headers.update({
'DNT': '1',
'Accept-Encoding': 'gzip, deflate, sdch',
'Accept-Language': 'en-US,en;q=0.8,zh-CN;q=0.6,zh;q=0.4',
'Upgrade-Insecure-Requests': '1',
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/46.0.2450.0 Iron/46.0.2450.0 Safari/537.36', # noqa
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8', # noqa
'Cache-Control': 'max-age=0',
})
self.verify = verify
def search_book(self, book):
url = self.SEARCH_URL
params = {
'q': book.name,
}
if settings.DOUBAN_APIKEY:
params['apikey'] = settings.DOUBAN_APIKEY
books = self.session.get(url, params=params, verify=self.verify
).json()['books']
return books
def best_match(self, name, books):
match = namedtuple('Match', ['book', 'rate'])
similar_rates = [
match(book, similar(name, book['title']))
for book in books
]
min_rate = 0.5
best_similar = similar_rates[0]
for item in similar_rates:
if item.rate > best_similar.rate:
best_similar = item
if best_similar.rate < min_rate:
return
else:
return best_similar
def update(self, book, data):
if not book.author:
book.author = ', '.join(data['author'])
if not book.isbn:
book.isbn = data.get('isbn13', '')
if (not book.description) or book.description == book.name:
book.description = data['summary']
if not book.douban_url:
book.douban_url = 'http://book.douban.com/subject/%s/' % data['id']
book.douban_url = self.SUBJECT_URL.format(id=data['id'])
try:
img = self.download_img(data['images']['large'])
if img:
book.cover = Qiniu().upload(img)
book.save()
except Exception as e:
logger.exception(e)
self.update_book_tags(book, data['tags'])
def download_img(self, url):
resp = self.session.get(url, verify=self.verify)
if not resp.ok:
logger.info('get image error: %s', resp.status_code)
return
img = resp.content
mime = magic.from_buffer(img, mime=True)
if not mime.startswith('image'):
logger.info('not image: %s, ignore', mime)
return
return img
def __call__(self, book, keyword='cover_template'):
if keyword not in book.cover:
return
books = self.search_book(book)
if not books:
return
best_similar = self.best_match(book.name, books)
if best_similar is None:
return
self.update(book, best_similar.book)
return book
def get_book_tags(self, book,
url_base='https://api.douban.com/v2/book/{id}'):
"""从豆瓣获取书籍 tags"""
if not book.douban_url:
return []
douban_id = book.douban_url.split('/')[-2]
url = url_base.format(id=douban_id)
params = {}
if settings.DOUBAN_APIKEY:
params['apikey'] = settings.DOUBAN_APIKEY
logger.debug('url: %s, params: %s', url, params)
response = self.session.get(url, params=params, verify=self.verify)
logger.debug('response: %s', response)
result = response.json()
return result['tags']
def update_book_tags(self, book, tags=None):
if tags is None:
tags = self.get_book_tags(book)
for tag in tags:
name = tag['name']
instance, _ = Tag.objects.get_or_create(name=name)
if not book.tags.filter(pk=instance.pk).exists():
book.tags.add(instance)
return book
def update_books(sleep_days=8, recent_days=10, filter_kwargs=None):
def _update(filter_kwargs):
if filter_kwargs is None:
min_read_at = now() - datetime.timedelta(recent_days)
filter_kwargs = {
'last_read_at__gte': min_read_at
}
updater = UpdateBookInfoFromDouban()
for book in Book.objects.filter(**filter_kwargs):
updater(book)
logger.debug(unicode(book))
time.sleep(60 / 10)
while True:
_update(filter_kwargs)
logger.debug('sleep %s days', sleep_days)
time.sleep(60 * 60 * 24 * sleep_days)
| mozillazg/chendian-plus | chendian/book/utils.py | Python | mit | 5,400 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.