code
stringlengths 3
1.05M
| repo_name
stringlengths 5
104
| path
stringlengths 4
251
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 3
1.05M
|
---|---|---|---|---|---|
import math
# Проверка числа на простоту.
def check(N):
for i in range(2, round(math.sqrt(N))+1):
if N % i == 0:
return False
return True
# Тесты.
print(check(int(input())))
| alekseik1/python_mipt_study_1-2 | 2sem/credit_tasks/zero-questions/6.py | Python | gpl-3.0 | 209 |
"""
xModule implementation of a learning sequence
"""
# pylint: disable=abstract-method
import json
import logging
from pkg_resources import resource_string
import warnings
from lxml import etree
from xblock.core import XBlock
from xblock.fields import Integer, Scope, Boolean, Dict
from xblock.fragment import Fragment
from .exceptions import NotFoundError
from .fields import Date
from .mako_module import MakoModuleDescriptor
from .progress import Progress
from .x_module import XModule, STUDENT_VIEW
from .xml_module import XmlDescriptor
log = logging.getLogger(__name__)
# HACK: This shouldn't be hard-coded to two types
# OBSOLETE: This obsoletes 'type'
class_priority = ['problem', 'video']
# Make '_' a no-op so we can scrape strings. Using lambda instead of
# `django.utils.translation.ugettext_noop` because Django cannot be imported in this file
_ = lambda text: text
class SequenceFields(object):
has_children = True
# NOTE: Position is 1-indexed. This is silly, but there are now student
# positions saved on prod, so it's not easy to fix.
position = Integer(help="Last tab viewed in this sequence", scope=Scope.user_state)
due = Date(
display_name=_("Due Date"),
help=_("Enter the date by which problems are due."),
scope=Scope.settings,
)
# Entrance Exam flag -- see cms/contentstore/views/entrance_exam.py for usage
is_entrance_exam = Boolean(
display_name=_("Is Entrance Exam"),
help=_(
"Tag this course module as an Entrance Exam. "
"Note, you must enable Entrance Exams for this course setting to take effect."
),
default=False,
scope=Scope.settings,
)
individual_start_days = Integer(
help=_("Number of days from the base date to the chapter starts"),
scope=Scope.settings
)
individual_start_hours = Integer(
help=_("Number of hours from the base date to the chapter starts"),
scope=Scope.settings
)
individual_start_minutes = Integer(
help=_("Number of minutes from the base date to the chapter starts"),
scope=Scope.settings
)
individual_due_days = Integer(
help=_("Number of days from the base date to the due"),
scope=Scope.settings
)
individual_due_hours = Integer(
help=_("Number of hours from the base date to the due"),
scope=Scope.settings
)
individual_due_minutes = Integer(
help=_("Number of minutes from the base date to the due"),
scope=Scope.settings
)
progress_restriction = Dict(
help=_("Settings for progress restriction"),
default={
"type": "No Restriction",
},
scope=Scope.settings
)
class ProctoringFields(object):
"""
Fields that are specific to Proctored or Timed Exams
"""
is_time_limited = Boolean(
display_name=_("Is Time Limited"),
help=_(
"This setting indicates whether students have a limited time"
" to view or interact with this courseware component."
),
default=False,
scope=Scope.settings,
)
default_time_limit_minutes = Integer(
display_name=_("Time Limit in Minutes"),
help=_(
"The number of minutes available to students for viewing or interacting with this courseware component."
),
default=None,
scope=Scope.settings,
)
is_proctored_enabled = Boolean(
display_name=_("Is Proctoring Enabled"),
help=_(
"This setting indicates whether this exam is a proctored exam."
),
default=False,
scope=Scope.settings,
)
is_practice_exam = Boolean(
display_name=_("Is Practice Exam"),
help=_(
"This setting indicates whether this exam is for testing purposes only. Practice exams are not verified."
),
default=False,
scope=Scope.settings,
)
@property
def is_proctored_exam(self):
""" Alias the is_proctored_enabled field to the more legible is_proctored_exam """
return self.is_proctored_enabled
@is_proctored_exam.setter
def is_proctored_exam(self, value):
""" Alias the is_proctored_enabled field to the more legible is_proctored_exam """
self.is_proctored_enabled = value
@XBlock.wants('proctoring')
@XBlock.wants('credit')
class SequenceModule(SequenceFields, ProctoringFields, XModule):
''' Layout module which lays out content in a temporal sequence
'''
js = {
'coffee': [resource_string(__name__, 'js/src/sequence/display.coffee')],
'js': [resource_string(__name__, 'js/src/sequence/display/jquery.sequence.js')],
}
css = {
'scss': [resource_string(__name__, 'css/sequence/display.scss')],
}
js_module_name = "Sequence"
def __init__(self, *args, **kwargs):
super(SequenceModule, self).__init__(*args, **kwargs)
# If position is specified in system, then use that instead.
position = getattr(self.system, 'position', None)
if position is not None:
try:
self.position = int(self.system.position)
except (ValueError, TypeError):
# Check for https://openedx.atlassian.net/browse/LMS-6496
warnings.warn(
"Sequential position cannot be converted to an integer: {pos!r}".format(
pos=self.system.position,
),
RuntimeWarning,
)
def get_progress(self):
''' Return the total progress, adding total done and total available.
(assumes that each submodule uses the same "units" for progress.)
'''
# TODO: Cache progress or children array?
children = self.get_children()
progresses = [child.get_progress() for child in children]
progress = reduce(Progress.add_counts, progresses, None)
return progress
def handle_ajax(self, dispatch, data): # TODO: bounds checking
''' get = request.POST instance '''
if dispatch == 'goto_position':
# set position to default value if either 'position' argument not
# found in request or it is a non-positive integer
position = data.get('position', u'1')
if position.isdigit() and int(position) > 0:
self.position = int(position)
else:
self.position = 1
return json.dumps({'success': True})
raise NotFoundError('Unexpected dispatch type')
def student_view(self, context):
# If we're rendering this sequence, but no position is set yet,
# default the position to the first element
if self.position is None:
self.position = 1
## Returns a set of all types of all sub-children
contents = []
fragment = Fragment()
# Is this sequential part of a timed or proctored exam?
if self.is_time_limited:
view_html = self._time_limited_student_view(context)
# Do we have an alternate rendering
# from the edx_proctoring subsystem?
if view_html:
fragment.add_content(view_html)
return fragment
for child in self.get_display_items():
progress = child.get_progress()
rendered_child = child.render(STUDENT_VIEW, context)
fragment.add_frag_resources(rendered_child)
# `titles` is a list of titles to inject into the sequential tooltip display.
# We omit any blank titles to avoid blank lines in the tooltip display.
titles = [title.strip() for title in child.get_content_titles() if title.strip()]
childinfo = {
'content': rendered_child.content,
'title': "\n".join(titles),
'page_title': titles[0] if titles else '',
'progress_status': Progress.to_js_status_str(progress),
'progress_detail': Progress.to_js_detail_str(progress),
'type': child.get_icon_class(),
'id': child.scope_ids.usage_id.to_deprecated_string(),
}
if childinfo['title'] == '':
childinfo['title'] = child.display_name_with_default
contents.append(childinfo)
params = {
'items': contents,
'element_id': self.location.html_id(),
'item_id': self.location.to_deprecated_string(),
'position': self.position,
'tag': self.location.category,
'ajax_url': self.system.ajax_url,
}
fragment.add_content(self.system.render_template("seq_module.html", params))
return fragment
def _time_limited_student_view(self, context):
"""
Delegated rendering of a student view when in a time
limited view. This ultimately calls down into edx_proctoring
pip installed djangoapp
"""
# None = no overridden view rendering
view_html = None
proctoring_service = self.runtime.service(self, 'proctoring')
credit_service = self.runtime.service(self, 'credit')
# Is this sequence designated as a Timed Examination, which includes
# Proctored Exams
feature_enabled = (
proctoring_service and
credit_service and
self.is_time_limited
)
if feature_enabled:
user_id = self.runtime.user_id
user_role_in_course = 'staff' if self.runtime.user_is_staff else 'student'
course_id = self.runtime.course_id
content_id = self.location
context = {
'display_name': self.display_name,
'default_time_limit_mins': (
self.default_time_limit_minutes if
self.default_time_limit_minutes else 0
),
'is_practice_exam': self.is_practice_exam,
'due_date': self.due
}
# inject the user's credit requirements and fulfillments
if credit_service:
credit_state = credit_service.get_credit_state(user_id, course_id)
if credit_state:
context.update({
'credit_state': credit_state
})
# See if the edx-proctoring subsystem wants to present
# a special view to the student rather
# than the actual sequence content
#
# This will return None if there is no
# overridden view to display given the
# current state of the user
view_html = proctoring_service.get_student_view(
user_id=user_id,
course_id=course_id,
content_id=content_id,
context=context,
user_role=user_role_in_course
)
return view_html
def get_icon_class(self):
child_classes = set(child.get_icon_class()
for child in self.get_children())
new_class = 'other'
for c in class_priority:
if c in child_classes:
new_class = c
return new_class
class SequenceDescriptor(SequenceFields, ProctoringFields, MakoModuleDescriptor, XmlDescriptor):
"""
A Sequences Descriptor object
"""
mako_template = 'widgets/sequence-edit.html'
module_class = SequenceModule
show_in_read_only_mode = True
js = {
'coffee': [resource_string(__name__, 'js/src/sequence/edit.coffee')],
}
js_module_name = "SequenceDescriptor"
@classmethod
def definition_from_xml(cls, xml_object, system):
children = []
for child in xml_object:
try:
child_block = system.process_xml(etree.tostring(child, encoding='unicode'))
children.append(child_block.scope_ids.usage_id)
except Exception as e:
log.exception("Unable to load child when parsing Sequence. Continuing...")
if system.error_tracker is not None:
system.error_tracker(u"ERROR: {0}".format(e))
continue
return {}, children
def definition_to_xml(self, resource_fs):
xml_object = etree.Element('sequential')
for child in self.get_children():
self.runtime.add_block_as_child_node(child, xml_object)
return xml_object
@property
def non_editable_metadata_fields(self):
"""
`is_entrance_exam` should not be editable in the Studio settings editor.
"""
non_editable_fields = super(SequenceDescriptor, self).non_editable_metadata_fields
non_editable_fields.append(self.fields['is_entrance_exam'])
return non_editable_fields
def index_dictionary(self):
"""
Return dictionary prepared with module content and type for indexing.
"""
# return key/value fields in a Python dict object
# values may be numeric / string or dict
# default implementation is an empty dict
xblock_body = super(SequenceDescriptor, self).index_dictionary()
html_body = {
"display_name": self.display_name,
}
if "content" in xblock_body:
xblock_body["content"].update(html_body)
else:
xblock_body["content"] = html_body
xblock_body["content_type"] = "Sequence"
return xblock_body
| nttks/edx-platform | common/lib/xmodule/xmodule/seq_module.py | Python | agpl-3.0 | 13,629 |
"""
Tests for Studio Course Settings.
"""
import datetime
import json
import copy
import mock
from mock import patch
from django.utils.timezone import UTC
from django.test.utils import override_settings
from django.conf import settings
from models.settings.course_details import (CourseDetails, CourseSettingsEncoder)
from models.settings.course_grading import CourseGradingModel
from contentstore.utils import EXTRA_TAB_PANELS, reverse_course_url, reverse_usage_url
from xmodule.modulestore.tests.factories import CourseFactory
from models.settings.course_metadata import CourseMetadata
from xmodule.fields import Date
from .utils import CourseTestCase
from xmodule.modulestore.django import modulestore
from contentstore.views.component import ADVANCED_COMPONENT_POLICY_KEY
import ddt
from xmodule.modulestore import ModuleStoreEnum
def get_url(course_id, handler_name='settings_handler'):
return reverse_course_url(handler_name, course_id)
class CourseDetailsTestCase(CourseTestCase):
"""
Tests the first course settings page (course dates, overview, etc.).
"""
def test_virgin_fetch(self):
details = CourseDetails.fetch(self.course.id)
self.assertEqual(details.org, self.course.location.org, "Org not copied into")
self.assertEqual(details.course_id, self.course.location.course, "Course_id not copied into")
self.assertEqual(details.run, self.course.location.name, "Course name not copied into")
self.assertEqual(details.course_image_name, self.course.course_image)
self.assertIsNotNone(details.start_date.tzinfo)
self.assertIsNone(details.end_date, "end date somehow initialized " + str(details.end_date))
self.assertIsNone(details.enrollment_start, "enrollment_start date somehow initialized " + str(details.enrollment_start))
self.assertIsNone(details.enrollment_end, "enrollment_end date somehow initialized " + str(details.enrollment_end))
self.assertIsNone(details.syllabus, "syllabus somehow initialized" + str(details.syllabus))
self.assertIsNone(details.intro_video, "intro_video somehow initialized" + str(details.intro_video))
self.assertIsNone(details.effort, "effort somehow initialized" + str(details.effort))
def test_encoder(self):
details = CourseDetails.fetch(self.course.id)
jsondetails = json.dumps(details, cls=CourseSettingsEncoder)
jsondetails = json.loads(jsondetails)
self.assertEqual(jsondetails['course_image_name'], self.course.course_image)
self.assertIsNone(jsondetails['end_date'], "end date somehow initialized ")
self.assertIsNone(jsondetails['enrollment_start'], "enrollment_start date somehow initialized ")
self.assertIsNone(jsondetails['enrollment_end'], "enrollment_end date somehow initialized ")
self.assertIsNone(jsondetails['syllabus'], "syllabus somehow initialized")
self.assertIsNone(jsondetails['intro_video'], "intro_video somehow initialized")
self.assertIsNone(jsondetails['effort'], "effort somehow initialized")
def test_ooc_encoder(self):
"""
Test the encoder out of its original constrained purpose to see if it functions for general use
"""
details = {
'number': 1,
'string': 'string',
'datetime': datetime.datetime.now(UTC())
}
jsondetails = json.dumps(details, cls=CourseSettingsEncoder)
jsondetails = json.loads(jsondetails)
self.assertEquals(1, jsondetails['number'])
self.assertEqual(jsondetails['string'], 'string')
def test_update_and_fetch(self):
jsondetails = CourseDetails.fetch(self.course.id)
jsondetails.syllabus = "<a href='foo'>bar</a>"
# encode - decode to convert date fields and other data which changes form
self.assertEqual(
CourseDetails.update_from_json(self.course.id, jsondetails.__dict__, self.user).syllabus,
jsondetails.syllabus, "After set syllabus"
)
jsondetails.short_description = "Short Description"
self.assertEqual(
CourseDetails.update_from_json(self.course.id, jsondetails.__dict__, self.user).short_description,
jsondetails.short_description, "After set short_description"
)
jsondetails.overview = "Overview"
self.assertEqual(
CourseDetails.update_from_json(self.course.id, jsondetails.__dict__, self.user).overview,
jsondetails.overview, "After set overview"
)
jsondetails.intro_video = "intro_video"
self.assertEqual(
CourseDetails.update_from_json(self.course.id, jsondetails.__dict__, self.user).intro_video,
jsondetails.intro_video, "After set intro_video"
)
jsondetails.effort = "effort"
self.assertEqual(
CourseDetails.update_from_json(self.course.id, jsondetails.__dict__, self.user).effort,
jsondetails.effort, "After set effort"
)
jsondetails.start_date = datetime.datetime(2010, 10, 1, 0, tzinfo=UTC())
self.assertEqual(
CourseDetails.update_from_json(self.course.id, jsondetails.__dict__, self.user).start_date,
jsondetails.start_date
)
jsondetails.course_image_name = "an_image.jpg"
self.assertEqual(
CourseDetails.update_from_json(self.course.id, jsondetails.__dict__, self.user).course_image_name,
jsondetails.course_image_name
)
@override_settings(MKTG_URLS={'ROOT': 'dummy-root'})
def test_marketing_site_fetch(self):
settings_details_url = get_url(self.course.id)
with mock.patch.dict('django.conf.settings.FEATURES', {'ENABLE_MKTG_SITE': True}):
response = self.client.get_html(settings_details_url)
self.assertNotContains(response, "Course Summary Page")
self.assertNotContains(response, "Send a note to students via email")
self.assertContains(response, "course summary page will not be viewable")
self.assertContains(response, "Course Start Date")
self.assertContains(response, "Course End Date")
self.assertContains(response, "Enrollment Start Date")
self.assertContains(response, "Enrollment End Date")
self.assertContains(response, "not the dates shown on your course summary page")
self.assertContains(response, "Introducing Your Course")
self.assertContains(response, "Course Image")
self.assertContains(response, "Course Short Description")
self.assertNotContains(response, "Course Overview")
self.assertNotContains(response, "Course Introduction Video")
self.assertNotContains(response, "Requirements")
def test_editable_short_description_fetch(self):
settings_details_url = get_url(self.course.id)
with mock.patch.dict('django.conf.settings.FEATURES', {'EDITABLE_SHORT_DESCRIPTION': False}):
response = self.client.get_html(settings_details_url)
self.assertNotContains(response, "Course Short Description")
def test_regular_site_fetch(self):
settings_details_url = get_url(self.course.id)
with mock.patch.dict('django.conf.settings.FEATURES', {'ENABLE_MKTG_SITE': False}):
response = self.client.get_html(settings_details_url)
self.assertContains(response, "Course Summary Page")
self.assertContains(response, "Send a note to students via email")
self.assertNotContains(response, "course summary page will not be viewable")
self.assertContains(response, "Course Start Date")
self.assertContains(response, "Course End Date")
self.assertContains(response, "Enrollment Start Date")
self.assertContains(response, "Enrollment End Date")
self.assertNotContains(response, "not the dates shown on your course summary page")
self.assertContains(response, "Introducing Your Course")
self.assertContains(response, "Course Image")
self.assertContains(response, "Course Short Description")
self.assertContains(response, "Course Overview")
self.assertContains(response, "Course Introduction Video")
self.assertContains(response, "Requirements")
class CourseDetailsViewTest(CourseTestCase):
"""
Tests for modifying content on the first course settings page (course dates, overview, etc.).
"""
def alter_field(self, url, details, field, val):
"""
Change the one field to the given value and then invoke the update post to see if it worked.
"""
setattr(details, field, val)
# Need to partially serialize payload b/c the mock doesn't handle it correctly
payload = copy.copy(details.__dict__)
payload['start_date'] = CourseDetailsViewTest.convert_datetime_to_iso(details.start_date)
payload['end_date'] = CourseDetailsViewTest.convert_datetime_to_iso(details.end_date)
payload['enrollment_start'] = CourseDetailsViewTest.convert_datetime_to_iso(details.enrollment_start)
payload['enrollment_end'] = CourseDetailsViewTest.convert_datetime_to_iso(details.enrollment_end)
resp = self.client.ajax_post(url, payload)
self.compare_details_with_encoding(json.loads(resp.content), details.__dict__, field + str(val))
@staticmethod
def convert_datetime_to_iso(datetime_obj):
"""
Use the xblock serializer to convert the datetime
"""
return Date().to_json(datetime_obj)
def test_update_and_fetch(self):
details = CourseDetails.fetch(self.course.id)
# resp s/b json from here on
url = get_url(self.course.id)
resp = self.client.get_json(url)
self.compare_details_with_encoding(json.loads(resp.content), details.__dict__, "virgin get")
utc = UTC()
self.alter_field(url, details, 'start_date', datetime.datetime(2012, 11, 12, 1, 30, tzinfo=utc))
self.alter_field(url, details, 'start_date', datetime.datetime(2012, 11, 1, 13, 30, tzinfo=utc))
self.alter_field(url, details, 'end_date', datetime.datetime(2013, 2, 12, 1, 30, tzinfo=utc))
self.alter_field(url, details, 'enrollment_start', datetime.datetime(2012, 10, 12, 1, 30, tzinfo=utc))
self.alter_field(url, details, 'enrollment_end', datetime.datetime(2012, 11, 15, 1, 30, tzinfo=utc))
self.alter_field(url, details, 'short_description', "Short Description")
self.alter_field(url, details, 'overview', "Overview")
self.alter_field(url, details, 'intro_video', "intro_video")
self.alter_field(url, details, 'effort', "effort")
self.alter_field(url, details, 'course_image_name', "course_image_name")
def compare_details_with_encoding(self, encoded, details, context):
"""
compare all of the fields of the before and after dicts
"""
self.compare_date_fields(details, encoded, context, 'start_date')
self.compare_date_fields(details, encoded, context, 'end_date')
self.compare_date_fields(details, encoded, context, 'enrollment_start')
self.compare_date_fields(details, encoded, context, 'enrollment_end')
self.assertEqual(details['short_description'], encoded['short_description'], context + " short_description not ==")
self.assertEqual(details['overview'], encoded['overview'], context + " overviews not ==")
self.assertEqual(details['intro_video'], encoded.get('intro_video', None), context + " intro_video not ==")
self.assertEqual(details['effort'], encoded['effort'], context + " efforts not ==")
self.assertEqual(details['course_image_name'], encoded['course_image_name'], context + " images not ==")
def compare_date_fields(self, details, encoded, context, field):
"""
Compare the given date fields between the before and after doing json deserialization
"""
if details[field] is not None:
date = Date()
if field in encoded and encoded[field] is not None:
dt1 = date.from_json(encoded[field])
dt2 = details[field]
self.assertEqual(dt1, dt2, msg="{} != {} at {}".format(dt1, dt2, context))
else:
self.fail(field + " missing from encoded but in details at " + context)
elif field in encoded and encoded[field] is not None:
self.fail(field + " included in encoding but missing from details at " + context)
@ddt.ddt
class CourseGradingTest(CourseTestCase):
"""
Tests for the course settings grading page.
"""
def test_initial_grader(self):
test_grader = CourseGradingModel(self.course)
self.assertIsNotNone(test_grader.graders)
self.assertIsNotNone(test_grader.grade_cutoffs)
def test_fetch_grader(self):
test_grader = CourseGradingModel.fetch(self.course.id)
self.assertIsNotNone(test_grader.graders, "No graders")
self.assertIsNotNone(test_grader.grade_cutoffs, "No cutoffs")
for i, grader in enumerate(test_grader.graders):
subgrader = CourseGradingModel.fetch_grader(self.course.id, i)
self.assertDictEqual(grader, subgrader, str(i) + "th graders not equal")
@ddt.data(ModuleStoreEnum.Type.mongo, ModuleStoreEnum.Type.split)
def test_update_from_json(self, store):
self.course = CourseFactory.create(default_store=store)
test_grader = CourseGradingModel.fetch(self.course.id)
altered_grader = CourseGradingModel.update_from_json(self.course.id, test_grader.__dict__, self.user)
self.assertDictEqual(test_grader.__dict__, altered_grader.__dict__, "Noop update")
test_grader.graders[0]['weight'] = test_grader.graders[0].get('weight') * 2
altered_grader = CourseGradingModel.update_from_json(self.course.id, test_grader.__dict__, self.user)
self.assertDictEqual(test_grader.__dict__, altered_grader.__dict__, "Weight[0] * 2")
# test for bug LMS-11485
with modulestore().bulk_operations(self.course.id):
new_grader = test_grader.graders[0].copy()
new_grader['type'] += '_foo'
new_grader['short_label'] += '_foo'
new_grader['id'] = len(test_grader.graders)
test_grader.graders.append(new_grader)
# don't use altered cached def, get a fresh one
CourseGradingModel.update_from_json(self.course.id, test_grader.__dict__, self.user)
altered_grader = CourseGradingModel.fetch(self.course.id)
self.assertDictEqual(test_grader.__dict__, altered_grader.__dict__)
test_grader.grade_cutoffs['D'] = 0.3
altered_grader = CourseGradingModel.update_from_json(self.course.id, test_grader.__dict__, self.user)
self.assertDictEqual(test_grader.__dict__, altered_grader.__dict__, "cutoff add D")
test_grader.grace_period = {'hours': 4, 'minutes': 5, 'seconds': 0}
altered_grader = CourseGradingModel.update_from_json(self.course.id, test_grader.__dict__, self.user)
self.assertDictEqual(test_grader.__dict__, altered_grader.__dict__, "4 hour grace period")
def test_update_grader_from_json(self):
test_grader = CourseGradingModel.fetch(self.course.id)
altered_grader = CourseGradingModel.update_grader_from_json(
self.course.id, test_grader.graders[1], self.user
)
self.assertDictEqual(test_grader.graders[1], altered_grader, "Noop update")
test_grader.graders[1]['min_count'] = test_grader.graders[1].get('min_count') + 2
altered_grader = CourseGradingModel.update_grader_from_json(
self.course.id, test_grader.graders[1], self.user)
self.assertDictEqual(test_grader.graders[1], altered_grader, "min_count[1] + 2")
test_grader.graders[1]['drop_count'] = test_grader.graders[1].get('drop_count') + 1
altered_grader = CourseGradingModel.update_grader_from_json(
self.course.id, test_grader.graders[1], self.user)
self.assertDictEqual(test_grader.graders[1], altered_grader, "drop_count[1] + 2")
def test_update_cutoffs_from_json(self):
test_grader = CourseGradingModel.fetch(self.course.id)
CourseGradingModel.update_cutoffs_from_json(self.course.id, test_grader.grade_cutoffs, self.user)
# Unlike other tests, need to actually perform a db fetch for this test since update_cutoffs_from_json
# simply returns the cutoffs you send into it, rather than returning the db contents.
altered_grader = CourseGradingModel.fetch(self.course.id)
self.assertDictEqual(test_grader.grade_cutoffs, altered_grader.grade_cutoffs, "Noop update")
test_grader.grade_cutoffs['D'] = 0.3
CourseGradingModel.update_cutoffs_from_json(self.course.id, test_grader.grade_cutoffs, self.user)
altered_grader = CourseGradingModel.fetch(self.course.id)
self.assertDictEqual(test_grader.grade_cutoffs, altered_grader.grade_cutoffs, "cutoff add D")
test_grader.grade_cutoffs['Pass'] = 0.75
CourseGradingModel.update_cutoffs_from_json(self.course.id, test_grader.grade_cutoffs, self.user)
altered_grader = CourseGradingModel.fetch(self.course.id)
self.assertDictEqual(test_grader.grade_cutoffs, altered_grader.grade_cutoffs, "cutoff change 'Pass'")
def test_delete_grace_period(self):
test_grader = CourseGradingModel.fetch(self.course.id)
CourseGradingModel.update_grace_period_from_json(
self.course.id, test_grader.grace_period, self.user
)
# update_grace_period_from_json doesn't return anything, so query the db for its contents.
altered_grader = CourseGradingModel.fetch(self.course.id)
self.assertEqual(test_grader.grace_period, altered_grader.grace_period, "Noop update")
test_grader.grace_period = {'hours': 15, 'minutes': 5, 'seconds': 30}
CourseGradingModel.update_grace_period_from_json(
self.course.id, test_grader.grace_period, self.user)
altered_grader = CourseGradingModel.fetch(self.course.id)
self.assertDictEqual(test_grader.grace_period, altered_grader.grace_period, "Adding in a grace period")
test_grader.grace_period = {'hours': 1, 'minutes': 10, 'seconds': 0}
# Now delete the grace period
CourseGradingModel.delete_grace_period(self.course.id, self.user)
# update_grace_period_from_json doesn't return anything, so query the db for its contents.
altered_grader = CourseGradingModel.fetch(self.course.id)
# Once deleted, the grace period should simply be None
self.assertEqual(None, altered_grader.grace_period, "Delete grace period")
def test_update_section_grader_type(self):
# Get the descriptor and the section_grader_type and assert they are the default values
descriptor = modulestore().get_item(self.course.location)
section_grader_type = CourseGradingModel.get_section_grader_type(self.course.location)
self.assertEqual('notgraded', section_grader_type['graderType'])
self.assertEqual(None, descriptor.format)
self.assertEqual(False, descriptor.graded)
# Change the default grader type to Homework, which should also mark the section as graded
CourseGradingModel.update_section_grader_type(self.course, 'Homework', self.user)
descriptor = modulestore().get_item(self.course.location)
section_grader_type = CourseGradingModel.get_section_grader_type(self.course.location)
self.assertEqual('Homework', section_grader_type['graderType'])
self.assertEqual('Homework', descriptor.format)
self.assertEqual(True, descriptor.graded)
# Change the grader type back to notgraded, which should also unmark the section as graded
CourseGradingModel.update_section_grader_type(self.course, 'notgraded', self.user)
descriptor = modulestore().get_item(self.course.location)
section_grader_type = CourseGradingModel.get_section_grader_type(self.course.location)
self.assertEqual('notgraded', section_grader_type['graderType'])
self.assertEqual(None, descriptor.format)
self.assertEqual(False, descriptor.graded)
def test_get_set_grader_types_ajax(self):
"""
Test configuring the graders via ajax calls
"""
grader_type_url_base = get_url(self.course.id, 'grading_handler')
# test get whole
response = self.client.get_json(grader_type_url_base)
whole_model = json.loads(response.content)
self.assertIn('graders', whole_model)
self.assertIn('grade_cutoffs', whole_model)
self.assertIn('grace_period', whole_model)
# test post/update whole
whole_model['grace_period'] = {'hours': 1, 'minutes': 30, 'seconds': 0}
response = self.client.ajax_post(grader_type_url_base, whole_model)
self.assertEqual(200, response.status_code)
response = self.client.get_json(grader_type_url_base)
whole_model = json.loads(response.content)
self.assertEqual(whole_model['grace_period'], {'hours': 1, 'minutes': 30, 'seconds': 0})
# test get one grader
self.assertGreater(len(whole_model['graders']), 1) # ensure test will make sense
response = self.client.get_json(grader_type_url_base + '/1')
grader_sample = json.loads(response.content)
self.assertEqual(grader_sample, whole_model['graders'][1])
# test add grader
new_grader = {
"type": "Extra Credit",
"min_count": 1,
"drop_count": 2,
"short_label": None,
"weight": 15,
}
response = self.client.ajax_post(
'{}/{}'.format(grader_type_url_base, len(whole_model['graders'])),
new_grader
)
self.assertEqual(200, response.status_code)
grader_sample = json.loads(response.content)
new_grader['id'] = len(whole_model['graders'])
self.assertEqual(new_grader, grader_sample)
# test delete grader
response = self.client.delete(grader_type_url_base + '/1', HTTP_ACCEPT="application/json")
self.assertEqual(204, response.status_code)
response = self.client.get_json(grader_type_url_base)
updated_model = json.loads(response.content)
new_grader['id'] -= 1 # one fewer and the id mutates
self.assertIn(new_grader, updated_model['graders'])
self.assertNotIn(whole_model['graders'][1], updated_model['graders'])
def setup_test_set_get_section_grader_ajax(self):
"""
Populate the course, grab a section, get the url for the assignment type access
"""
self.populate_course()
sections = modulestore().get_items(self.course.id, qualifiers={'category': "sequential"})
# see if test makes sense
self.assertGreater(len(sections), 0, "No sections found")
section = sections[0] # just take the first one
return reverse_usage_url('xblock_handler', section.location)
def test_set_get_section_grader_ajax(self):
"""
Test setting and getting section grades via the grade as url
"""
grade_type_url = self.setup_test_set_get_section_grader_ajax()
response = self.client.ajax_post(grade_type_url, {'graderType': u'Homework'})
self.assertEqual(200, response.status_code)
response = self.client.get_json(grade_type_url + '?fields=graderType')
self.assertEqual(json.loads(response.content).get('graderType'), u'Homework')
# and unset
response = self.client.ajax_post(grade_type_url, {'graderType': u'notgraded'})
self.assertEqual(200, response.status_code)
response = self.client.get_json(grade_type_url + '?fields=graderType')
self.assertEqual(json.loads(response.content).get('graderType'), u'notgraded')
class CourseMetadataEditingTest(CourseTestCase):
"""
Tests for CourseMetadata.
"""
def setUp(self):
CourseTestCase.setUp(self)
self.fullcourse = CourseFactory.create()
self.course_setting_url = get_url(self.course.id, 'advanced_settings_handler')
self.fullcourse_setting_url = get_url(self.fullcourse.id, 'advanced_settings_handler')
def test_fetch_initial_fields(self):
test_model = CourseMetadata.fetch(self.course)
self.assertIn('display_name', test_model, 'Missing editable metadata field')
self.assertEqual(test_model['display_name']['value'], self.course.display_name)
test_model = CourseMetadata.fetch(self.fullcourse)
self.assertNotIn('graceperiod', test_model, 'blacklisted field leaked in')
self.assertIn('display_name', test_model, 'full missing editable metadata field')
self.assertEqual(test_model['display_name']['value'], self.fullcourse.display_name)
self.assertIn('rerandomize', test_model, 'Missing rerandomize metadata field')
self.assertIn('showanswer', test_model, 'showanswer field ')
self.assertIn('xqa_key', test_model, 'xqa_key field ')
@patch.dict(settings.FEATURES, {'ENABLE_EXPORT_GIT': True})
def test_fetch_giturl_present(self):
"""
If feature flag ENABLE_EXPORT_GIT is on, show the setting as a non-deprecated Advanced Setting.
"""
test_model = CourseMetadata.fetch(self.fullcourse)
self.assertIn('giturl', test_model)
@patch.dict(settings.FEATURES, {'ENABLE_EXPORT_GIT': False})
def test_fetch_giturl_not_present(self):
"""
If feature flag ENABLE_EXPORT_GIT is off, don't show the setting at all on the Advanced Settings page.
"""
test_model = CourseMetadata.fetch(self.fullcourse)
self.assertNotIn('giturl', test_model)
@patch.dict(settings.FEATURES, {'ENABLE_EXPORT_GIT': False})
def test_validate_update_filtered_off(self):
"""
If feature flag is off, then giturl must be filtered.
"""
# pylint: disable=unused-variable
is_valid, errors, test_model = CourseMetadata.validate_and_update_from_json(
self.course,
{
"giturl": {"value": "http://example.com"},
},
user=self.user
)
self.assertNotIn('giturl', test_model)
@patch.dict(settings.FEATURES, {'ENABLE_EXPORT_GIT': True})
def test_validate_update_filtered_on(self):
"""
If feature flag is on, then giturl must not be filtered.
"""
# pylint: disable=unused-variable
is_valid, errors, test_model = CourseMetadata.validate_and_update_from_json(
self.course,
{
"giturl": {"value": "http://example.com"},
},
user=self.user
)
self.assertIn('giturl', test_model)
@patch.dict(settings.FEATURES, {'ENABLE_EXPORT_GIT': True})
def test_update_from_json_filtered_on(self):
"""
If feature flag is on, then giturl must be updated.
"""
test_model = CourseMetadata.update_from_json(
self.course,
{
"giturl": {"value": "http://example.com"},
},
user=self.user
)
self.assertIn('giturl', test_model)
@patch.dict(settings.FEATURES, {'ENABLE_EXPORT_GIT': False})
def test_update_from_json_filtered_off(self):
"""
If feature flag is on, then giturl must not be updated.
"""
test_model = CourseMetadata.update_from_json(
self.course,
{
"giturl": {"value": "http://example.com"},
},
user=self.user
)
self.assertNotIn('giturl', test_model)
def test_validate_and_update_from_json_correct_inputs(self):
is_valid, errors, test_model = CourseMetadata.validate_and_update_from_json(
self.course,
{
"advertised_start": {"value": "start A"},
"days_early_for_beta": {"value": 2},
"advanced_modules": {"value": ['combinedopenended']},
},
user=self.user
)
self.assertTrue(is_valid)
self.assertTrue(len(errors) == 0)
self.update_check(test_model)
# fresh fetch to ensure persistence
fresh = modulestore().get_course(self.course.id)
test_model = CourseMetadata.fetch(fresh)
self.update_check(test_model)
# Tab gets tested in test_advanced_settings_munge_tabs
self.assertIn('advanced_modules', test_model, 'Missing advanced_modules')
self.assertEqual(test_model['advanced_modules']['value'], ['combinedopenended'], 'advanced_module is not updated')
def test_validate_and_update_from_json_wrong_inputs(self):
# input incorrectly formatted data
is_valid, errors, test_model = CourseMetadata.validate_and_update_from_json(
self.course,
{
"advertised_start": {"value": 1, "display_name": "Course Advertised Start Date", },
"days_early_for_beta": {"value": "supposed to be an integer",
"display_name": "Days Early for Beta Users", },
"advanced_modules": {"value": 1, "display_name": "Advanced Module List", },
},
user=self.user
)
# Check valid results from validate_and_update_from_json
self.assertFalse(is_valid)
self.assertEqual(len(errors), 3)
self.assertFalse(test_model)
error_keys = set([error_obj['model']['display_name'] for error_obj in errors])
test_keys = set(['Advanced Module List', 'Course Advertised Start Date', 'Days Early for Beta Users'])
self.assertEqual(error_keys, test_keys)
# try fresh fetch to ensure no update happened
fresh = modulestore().get_course(self.course.id)
test_model = CourseMetadata.fetch(fresh)
self.assertNotEqual(test_model['advertised_start']['value'], 1, 'advertised_start should not be updated to a wrong value')
self.assertNotEqual(test_model['days_early_for_beta']['value'], "supposed to be an integer",
'days_early_for beta should not be updated to a wrong value')
def test_correct_http_status(self):
json_data = json.dumps({
"advertised_start": {"value": 1, "display_name": "Course Advertised Start Date", },
"days_early_for_beta": {"value": "supposed to be an integer",
"display_name": "Days Early for Beta Users", },
"advanced_modules": {"value": 1, "display_name": "Advanced Module List", },
})
response = self.client.ajax_post(self.course_setting_url, json_data)
self.assertEqual(400, response.status_code)
def test_update_from_json(self):
test_model = CourseMetadata.update_from_json(
self.course,
{
"advertised_start": {"value": "start A"},
"days_early_for_beta": {"value": 2},
},
user=self.user
)
self.update_check(test_model)
# try fresh fetch to ensure persistence
fresh = modulestore().get_course(self.course.id)
test_model = CourseMetadata.fetch(fresh)
self.update_check(test_model)
# now change some of the existing metadata
test_model = CourseMetadata.update_from_json(
fresh,
{
"advertised_start": {"value": "start B"},
"display_name": {"value": "jolly roger"},
},
user=self.user
)
self.assertIn('display_name', test_model, 'Missing editable metadata field')
self.assertEqual(test_model['display_name']['value'], 'jolly roger', "not expected value")
self.assertIn('advertised_start', test_model, 'Missing revised advertised_start metadata field')
self.assertEqual(test_model['advertised_start']['value'], 'start B', "advertised_start not expected value")
def update_check(self, test_model):
"""
checks that updates were made
"""
self.assertIn('display_name', test_model, 'Missing editable metadata field')
self.assertEqual(test_model['display_name']['value'], self.course.display_name)
self.assertIn('advertised_start', test_model, 'Missing new advertised_start metadata field')
self.assertEqual(test_model['advertised_start']['value'], 'start A', "advertised_start not expected value")
self.assertIn('days_early_for_beta', test_model, 'Missing days_early_for_beta metadata field')
self.assertEqual(test_model['days_early_for_beta']['value'], 2, "days_early_for_beta not expected value")
def test_http_fetch_initial_fields(self):
response = self.client.get_json(self.course_setting_url)
test_model = json.loads(response.content)
self.assertIn('display_name', test_model, 'Missing editable metadata field')
self.assertEqual(test_model['display_name']['value'], self.course.display_name)
response = self.client.get_json(self.fullcourse_setting_url)
test_model = json.loads(response.content)
self.assertNotIn('graceperiod', test_model, 'blacklisted field leaked in')
self.assertIn('display_name', test_model, 'full missing editable metadata field')
self.assertEqual(test_model['display_name']['value'], self.fullcourse.display_name)
self.assertIn('rerandomize', test_model, 'Missing rerandomize metadata field')
self.assertIn('showanswer', test_model, 'showanswer field ')
self.assertIn('xqa_key', test_model, 'xqa_key field ')
def test_http_update_from_json(self):
response = self.client.ajax_post(self.course_setting_url, {
"advertised_start": {"value": "start A"},
"days_early_for_beta": {"value": 2},
})
test_model = json.loads(response.content)
self.update_check(test_model)
response = self.client.get_json(self.course_setting_url)
test_model = json.loads(response.content)
self.update_check(test_model)
# now change some of the existing metadata
response = self.client.ajax_post(self.course_setting_url, {
"advertised_start": {"value": "start B"},
"display_name": {"value": "jolly roger"}
})
test_model = json.loads(response.content)
self.assertIn('display_name', test_model, 'Missing editable metadata field')
self.assertEqual(test_model['display_name']['value'], 'jolly roger', "not expected value")
self.assertIn('advertised_start', test_model, 'Missing revised advertised_start metadata field')
self.assertEqual(test_model['advertised_start']['value'], 'start B', "advertised_start not expected value")
def test_advanced_components_munge_tabs(self):
"""
Test that adding and removing specific advanced components adds and removes tabs.
"""
self.assertNotIn(EXTRA_TAB_PANELS.get("open_ended"), self.course.tabs)
self.assertNotIn(EXTRA_TAB_PANELS.get("notes"), self.course.tabs)
self.client.ajax_post(self.course_setting_url, {
ADVANCED_COMPONENT_POLICY_KEY: {"value": ["combinedopenended"]}
})
course = modulestore().get_course(self.course.id)
self.assertIn(EXTRA_TAB_PANELS.get("open_ended"), course.tabs)
self.assertNotIn(EXTRA_TAB_PANELS.get("notes"), course.tabs)
self.client.ajax_post(self.course_setting_url, {
ADVANCED_COMPONENT_POLICY_KEY: {"value": []}
})
course = modulestore().get_course(self.course.id)
self.assertNotIn(EXTRA_TAB_PANELS.get("open_ended"), course.tabs)
class CourseGraderUpdatesTest(CourseTestCase):
"""
Test getting, deleting, adding, & updating graders
"""
def setUp(self):
"""Compute the url to use in tests"""
super(CourseGraderUpdatesTest, self).setUp()
self.url = get_url(self.course.id, 'grading_handler')
self.starting_graders = CourseGradingModel(self.course).graders
def test_get(self):
"""Test getting a specific grading type record."""
resp = self.client.get_json(self.url + '/0')
self.assertEqual(resp.status_code, 200)
obj = json.loads(resp.content)
self.assertEqual(self.starting_graders[0], obj)
def test_delete(self):
"""Test deleting a specific grading type record."""
resp = self.client.delete(self.url + '/0', HTTP_ACCEPT="application/json")
self.assertEqual(resp.status_code, 204)
current_graders = CourseGradingModel.fetch(self.course.id).graders
self.assertNotIn(self.starting_graders[0], current_graders)
self.assertEqual(len(self.starting_graders) - 1, len(current_graders))
def test_update(self):
"""Test updating a specific grading type record."""
grader = {
"id": 0,
"type": "manual",
"min_count": 5,
"drop_count": 10,
"short_label": "yo momma",
"weight": 17.3,
}
resp = self.client.ajax_post(self.url + '/0', grader)
self.assertEqual(resp.status_code, 200)
obj = json.loads(resp.content)
self.assertEqual(obj, grader)
current_graders = CourseGradingModel.fetch(self.course.id).graders
self.assertEqual(len(self.starting_graders), len(current_graders))
def test_add(self):
"""Test adding a grading type record."""
# the same url works for changing the whole grading model (graceperiod, cutoffs, and grading types) when
# the grading_index is None; thus, using None to imply adding a grading_type doesn't work; so, it uses an
# index out of bounds to imply create item.
grader = {
"type": "manual",
"min_count": 5,
"drop_count": 10,
"short_label": "yo momma",
"weight": 17.3,
}
resp = self.client.ajax_post('{}/{}'.format(self.url, len(self.starting_graders) + 1), grader)
self.assertEqual(resp.status_code, 200)
obj = json.loads(resp.content)
self.assertEqual(obj['id'], len(self.starting_graders))
del obj['id']
self.assertEqual(obj, grader)
current_graders = CourseGradingModel.fetch(self.course.id).graders
self.assertEqual(len(self.starting_graders) + 1, len(current_graders))
| c0710204/edx-platform | cms/djangoapps/contentstore/tests/test_course_settings.py | Python | agpl-3.0 | 38,800 |
# -*- coding: utf-8 -*-
#
# Copyright (C) 2007-2008 Johann Prieur <[email protected]>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
#
from papyon.service.AddressBook.scenario.base import BaseScenario
from papyon.service.AddressBook.scenario.base import Scenario
from update_memberships import UpdateMembershipsScenario
from papyon.profile import Membership
from papyon.profile import NetworkID
__all__ = ['BlockContactScenario']
class BlockContactScenario(BaseScenario):
def __init__(self, sharing, callback, errback, account='',
network=NetworkID.MSN, membership=Membership.NONE,
state='Accepted'):
"""Blocks a contact.
@param sharing: the membership service
@param callback: tuple(callable, *args)
@param errback: tuple(callable, *args)
"""
BaseScenario.__init__(self, Scenario.BLOCK_UNBLOCK, callback, errback)
self.__sharing = sharing
self.account = account
self.network = network
self.membership = membership
self.state = state
def execute(self):
new_membership = self.membership & ~Membership.ALLOW | Membership.BLOCK
um = UpdateMembershipsScenario(self.__sharing,
self._callback,
self._errback,
self._scenario,
self.account,
self.network,
self.state,
self.membership,
new_membership)
um()
| emesene/papyon | papyon/service/AddressBook/scenario/contacts/block_contact.py | Python | gpl-2.0 | 2,343 |
from .linetool import LineTool, ThickLineTool
from .recttool import RectangleTool
from .painttool import PaintTool
| stefanv/register_gui | viewer/canvastools/__init__.py | Python | bsd-3-clause | 115 |
# -*-coding:utf-8 -*-
'''
dac的@indicator版本
提供更简单的使用方式和实现方式
'''
import operator
from collections import (
deque,
)
from .base import (
BaseObject,
fcustom,
indicator,
icache,
t2order_if,
)
from .dac import (
XBASE, #100
CBASE, #XBASE*XBASE,用于XATR
FBASE, #10
)
XBASE = 100 #整数运算的放大倍数
CBASE = XBASE * XBASE #XATR倍数
FBASE = 10 #整数运算的放大倍数2
##########
#编写指标时,请务必确保这个判断中的标识字符串和下面赋值的名称的一致性,否则会每次都赋值和计算,内存立马挂掉
# if not hasattr(_ts,'ss'):
# _ts.ss = []
#
##########
###############
# 基本序列运算
#
###############
@indicator
def OPER1(source,oper,_ts=None):
'''
单参数序列运算
'''
if not _ts.initialized:
_ts.initialized = True
_ts.ss = []
for i in range(len(_ts.ss),len(source)):
#print 'new data:',source[i]
_ts.ss.append(oper(source[i]))
return _ts.ss
'''
不同的operator.xxx, 使OPER1下缓存的key不同,不会导致混淆
'''
NEG = fcustom(OPER1,oper=operator.neg)
ABS = fcustom(OPER1,oper=operator.abs)
NOT = fcustom(OPER1,oper=operator.not_)
@indicator
def OPER2(source1,source2,oper,_ts=None):
'''
双参数序列运算
'''
assert len(source1) == len(source2),'len(source1) != len(source2)'
if not _ts.initialized:
_ts.initialized = True
#print 'new oper2 ss'
_ts.ss = []
for i in range(len(_ts.ss),len(source1)):
#print 'new data:',source1[i],source2[i]
_ts.ss.append(oper(source1[i],source2[i]))
return _ts.ss
ADD = fcustom(OPER2,oper=operator.add)
SUB = fcustom(OPER2,oper=operator.sub)
MUL = fcustom(OPER2,oper=operator.mul)
#AND = fcustom(OPER2,oper=operator.and_) #这个是位操作
#OR = fcustom(OPER2,oper=operator.or_) #这个是位操作
#XOR = fcustom(OPER2,oper=operator.xor) #这个是位操作
LT = fcustom(OPER2,oper=operator.lt)
LE = fcustom(OPER2,oper=operator.le)
EQ = fcustom(OPER2,oper=operator.eq)
GT = fcustom(OPER2,oper=operator.gt)
GE = fcustom(OPER2,oper=operator.ge)
@indicator
def OPER21(source1,vs,oper,_ts=None):
'''
双参数运算,第一个为序列,第二个为数值
'''
if not _ts.initialized:
_ts.initialized = True
_ts.ss = []
for i in range(len(_ts.ss),len(source1)):
#print 'new data:',source1[i]
_ts.ss.append(oper(source1[i],vs))
return _ts.ss
ADD1 = fcustom(OPER21,oper=operator.add)
SUB1 = fcustom(OPER21,oper=operator.sub)
MUL1 = fcustom(OPER21,oper=operator.mul)
#AND1 = fcustom(OPER21,oper=operator.and_) #这个是位操作
#OR1 = fcustom(OPER21,oper=operator.or_) #这个是位操作
#XOR1 = fcustom(OPER21,oper=operator.xor) #这个是位操作
LT1 = fcustom(OPER21,oper=operator.lt)
LE1 = fcustom(OPER21,oper=operator.le)
EQ1 = fcustom(OPER21,oper=operator.eq)
GT1 = fcustom(OPER21,oper=operator.gt)
GE1 = fcustom(OPER21,oper=operator.ge)
@indicator
def AND(source1,source2,_ts=None):
'''
双序列参数AND运算
'''
assert len(source1) == len(source2),'len(source1) != len(source2)'
if not _ts.initialized:
_ts.initialized = True
_ts.ss = []
for i in range(len(_ts.ss),len(source1)):
#print 'new data:',source1[i],source2[i]
_ts.ss.append((source1[i] and source2[i])!=0)
return _ts.ss
@indicator
def GAND(_ts=None,*args):
assert len(args)>0,'GAND params number less than 1'
if not _ts.initialized:
_ts.initialized = True
_ts.ga = []
for i in range(len(_ts.ga),len(args[0])):
rv = all([vs[i] for vs in args])
_ts.ga.append(rv!=0)
return _ts.ga
@indicator
def GOR(_ts=None,*args):
assert len(args)>0,'GOR params number less than 1'
#print 'ts=%s,args=%s' % (_ts,args)
if not _ts.initialized:
_ts.initialized = True
_ts.gor = []
for i in range(len(_ts.gor),len(args[0])):
rv = any([vs[i] for vs in args])
_ts.gor.append(rv!=0)
return _ts.gor
#GAND = fcustom(GOPER,oper=all) #有可变参数时,就不能再有_ts之外的参数用fcustom指定默认值
#GOR = fcustom(GOPER,oper=any) #有可变参数时,就不能再有_ts之外的参数用fcustom指定默认值
@indicator
def DIV(source1,source2,_ts=None):
'''
序列除法
'''
assert len(source1) == len(source2),'len(source1) != len(source2)'
if not _ts.initialized:
_ts.initialized = True
_ts.ss = []
for i in range(len(_ts.ss),len(source1)):
#print 'new data:',source1[i],source2[i]
r = (source1[i]+source2[i]/2)/source2[i] if source2[i] != 0 else source1[i]*1000
_ts.ss.append(r)
return _ts.ss
@indicator
def DIV1(source1,vs,_ts=None):
'''
序列除常数
'''
assert vs!=0,'divisor vs == 0'
if not _ts.initialized:
_ts.initialized = True
_ts.ss = []
for i in range(len(_ts.ss),len(source1)):
#print 'new data:',source1[i]
_ts.ss.append((source1[i]+vs/2)/vs)
return _ts.ss
############
# 常用指标
#
############
@indicator
def ACCUMULATE(source,_ts=None):
'''
累加
'''
if not _ts.initialized:
_ts.initialized = True
_ts.sa = []
ss = _ts.sa[-1] if _ts.sa else 0
for i in range(len(_ts.sa),len(source)):
ss += source[i]
_ts.sa.append(ss)
#print id(_ts),id(source),source,_ts.sa
return _ts.sa
NSUM = ACCUMULATE
@indicator
def MSUM(source,mlen,_ts=None):
'''
移动求和
'''
if not _ts.initialized:
_ts.initialized = True
_ts.ms = []
ss = ACCUMULATE(source)
for i in range(len(_ts.ms),len(source)):
v = ss[i] - ss[i-mlen] if i>=mlen else ss[i]
_ts.ms.append(v)
return _ts.ms
@indicator
def MA(source,mlen,_ts=None):
'''
移动平均. 使用MSUM
使用方式:
rev = MA(source,13) #返回source的13期移动平均
当序列中元素个数<mlen时,结果序列为到该元素为止的所有元素值的平均
'''
assert mlen>0,u'mlen should > 0'
if not _ts.initialized:
_ts.initialized = True
_ts.ma = []
ms = MSUM(source,mlen)
for i in range(len(_ts.ma),len(source)):
#当累计个数<nlen时,求其平均值,而不是累计值/mlen
rlen = mlen if i>=mlen else i+1
_ts.ma.append((ms[i]+rlen/2)/rlen)
return _ts.ma
@indicator
def MA_2(source,mlen,_ts=None):
'''
移动平均. 直接计
使用方式:
rev = MA(source,13) #返回source的13期移动平均
当序列中元素个数<mlen时,结果序列为到该元素为止的所有元素值的平均
'''
assert mlen>0,u'mlen should > 0'
if not _ts.initialized:
_ts.initialized = True
_ts.sa = [0]*mlen #哨兵
_ts.ma = []
slen = len(_ts.ma)
ss = _ts.sa[-1]
for i in range(slen,len(source)):
ss += source[i]
_ts.sa.append(ss)
#print ss,_ts.sa[i-mlen]
#当累计个数<nlen时,求其平均值,而不是累计值/mlen
rlen = mlen if mlen < i+1 else i+1
_ts.ma.append((ss-_ts.sa[-rlen-1]+rlen/2)/rlen)
#print _ts.sa
return _ts.ma
@indicator
def NMA(source,_ts=None):
'''
总平均
使用方式:
rev = MA(source) #返回source的当期及之前的平均值
'''
if not _ts.initialized:
_ts.initialized = True
_ts.sa = [0] #哨兵
_ts.nma = []
#print 'initial NMA'
slen = len(_ts.nma)
ss = _ts.sa[-1]
for i in range(slen,len(source)):
ss += source[i]
_ts.sa.append(ss)
#print ss,_ts.sa[-1]
_ts.nma.append((ss+(i+1)/2)/(i+1))
#print _ts.sa
return _ts.nma
@indicator
def CEXPMA(source,mlen,_ts=None):
assert mlen>0,u'mlen should > 0'
if len(source) == 0:#不计算空序列,直接返回
return []
if not _ts.initialized:
_ts.initialized = True
#print 'new cexpma ema'
_ts.ema = [source[0]] #哨兵元素是source[0],确保计算得到的值在<mlen元素的情况下也正确
cur = _ts.ema[-1]
for i in range(len(_ts.ema),len(source)):
cur = (source[i]*2 + cur*(mlen-1) + (mlen+1)/2)/(mlen+1)
_ts.ema.append(cur)
return _ts.ema
EMA = CEXPMA
@indicator
def MACD(source,ifast=12,islow=26,idiff=9,_ts=None):
if not _ts.initialized:
_ts.initialized = True
_ts.diff = []
_ts.dea = []
src = MUL1(source,FBASE)
sfast = EMA(src,ifast)
sslow = EMA(src,islow)
_ts.diff = SUB(sfast,sslow)
_ts.dea = EMA(_ts.diff,idiff)
return _ts
@indicator
def TR(sclose,shigh,slow,_ts=None):
if len(sclose) == 0:
return []
if not _ts.initialized:
_ts.initialized = True
_ts.tr = [(shigh[0]-slow[0]) * XBASE]
for i in range(len(_ts.tr),len(sclose)):
#c,h,l = sclose[slen-1],shigh[slen],sclose[slen]
hh = shigh[i] if shigh[i] > sclose[i-1] else sclose[i-1]
ll = slow[i] if slow[i] < sclose[i-1] else sclose[i-1]
_ts.tr.append((hh-ll)*XBASE)
return _ts.tr
@indicator
def ATR(sclose,shigh,slow,length=20,_ts=None):
ltr = TR(sclose,shigh,slow)
return CEXPMA(ltr,length)
@indicator
def XATR(sclose,shigh,slow,length=20,_ts=None):
latr = ATR(sclose,shigh,slow,length)
return DIV(MUL1(latr,CBASE),sclose)
@indicator
def STREND(source,_ts=None):
''' 简单累积趋势2
与strend相比,上升过程中,平也当作上,下降中平作下
若当前趋势为上升或0,trend值为n>0
则新trend值为:
n+1 当前值 >= pre
-1 当前值 < pre
若当前趋势为下降,trend值为n(负数)
则下一trend值为:
n-1 当前值 <= pre
1 当前值 > pre
0为初始趋势(缺少判断时)
'''
if len(source) == 0:
return []
if not _ts.initialized:
_ts.initialized = True
_ts.sd = [0] #第一个是无趋势
slen = len(_ts.sd)
scur = _ts.sd[-1]
vpre = source[slen-1]
for i in range(slen,len(source)):
vcur = source[i]
if vcur > vpre:
scur = scur + 1 if scur > 0 else 1
elif vcur < vpre:
scur = scur - 1 if scur < 0 else -1
else: #curv == pre_v
scur = scur + 1 if scur >= 0 else scur-1 #最初为0时,也算上升
_ts.sd.append(scur)
vpre = vcur
return _ts.sd
#TMAX,TMIN,UCROSS,DCROSS
@indicator
def TMM(source,covered,vmm,fcmp,fgroup,_ts=None):
'''
vmm: 比较用的极值
fcmp: 比较函数
fgroup:整体比较函数
cover=0时,返回的是截止到当前元素的最值(cover<0也如此)
'''
assert covered >=0, 'TMM: cover <0'
if len(source) == 0:
return []
if not _ts.initialized:
_ts.initialized = True
#print 'new tmm'
_ts.tmm = [] #第一个是无趋势
_ts.buffer = None
slen = len(source)
pre_len = slen if slen <= covered else covered
cmm = _ts.tmm[-1] if _ts.tmm else vmm
for i in range(len(_ts.tmm),pre_len):
if fcmp(source[i],cmm):
cmm = source[i]
_ts.tmm.append(cmm)
if slen <= covered:
return _ts.tmm
tlen = len(_ts.tmm)
if _ts.buffer:
buffer = _ts.buffer
else:
buffer = _ts.buffer = deque(source[tlen-covered:tlen])
#print 'in tmm:tlen=%s,len(source)=%s' % (tlen,len(source))
for i in range(tlen,len(source)):
v = source[i]
buffer.append(v)
vquit=buffer.popleft()
if fcmp(v,cmm):
cmm = v
if cmm == vquit and v != cmm: #退出的正好是最大值,计算前covered-1个元素的最大值, pre=source[i-1]
#cmm = fgroup(source[i-covered+1:i+1])
cmm = fgroup(buffer)
_ts.tmm.append(cmm)
return _ts.tmm
TMAX = fcustom(TMM,vmm=-99999999,fcmp=operator.gt,fgroup=max)
TMIN = fcustom(TMM,vmm=99999999,fcmp=operator.lt,fgroup=min)
@indicator
def NMM(source,vmm,fcmp,_ts=None):
'''
从index0算起的极值.
相当于covered取最大值时的TMM
'''
if len(source) == 0:
return []
if not _ts.initialized:
_ts.initialized = True
#print 'new nmm'
_ts.nmm = [] #第一个是无趋势
slen = len(source)
cmm = _ts.nmm[-1] if _ts.nmm else vmm
for i in range(len(_ts.nmm),len(source)):
if fcmp(source[i],cmm):
cmm = source[i]
_ts.nmm.append(cmm)
return _ts.nmm
NMAX = fcustom(NMM,vmm=-99999999,fcmp=operator.gt)
NMIN = fcustom(NMM,vmm=99999999,fcmp=operator.lt)
@indicator
def CROSS(source1,source2,rcmp,_ts=None):
'''
source2去交叉source1
rcmp为判断已交叉状态的函数
返回值中,0为未×,1为×
'''
if len(source1) == 0:
return []
if not _ts.initialized:
_ts.initialized = True
_ts.crs = [1 if rcmp(source2[0],source1[0]) else 0] #第一个取决于状态,如果为已×,则为1
ps = _ts.crs[-1]
for i in range(len(_ts.crs),len(source1)):
cs = rcmp(source2[i],source1[i])
_ts.crs.append(1 if not ps and cs else 0)
ps = cs
return _ts.crs
UPCROSS = fcustom(CROSS,rcmp = operator.gt) #追击-平-平-超越,以及超越-平-超越均算×
DOWNCROSS = fcustom(CROSS,rcmp = operator.lt) #追击-平-平-超越,以及超越-平-超越均算×
@indicator
def NCROSS(source,target,rcmp,_ts=None):
'''
source去交叉target, target为数字
rcmp为判断已交叉状态的函数
返回值中,0为未×,1为×
'''
if len(source) == 0:
return []
if not _ts.initialized:
_ts.initialized = True
_ts.crs = [1 if rcmp(source[0],target) else 0] #第一个取决于状态,如果为已×,则为1
ps = _ts.crs[-1]
for i in range(len(_ts.crs),len(source)):
cs = rcmp(source[i],target)
_ts.crs.append(1 if not ps and cs else 0)
ps = cs
return _ts.crs
NUPCROSS = fcustom(NCROSS,rcmp = operator.gt) #追击-平-平-超越,以及超越-平-超越均算×
NDOWNCROSS = fcustom(NCROSS,rcmp = operator.lt) #追击-平-平-超越,以及超越-平-超越均算×
@indicator
def REF(source,offset=1,_ts=None):
'''
取得偏移为offset的序列
前offset部分用第一元素填充
如果仅用于比较,不建议用这个函数,而直接用[-1]下标比较
只有在偏移CROSS时才有意义
'''
if len(source) == 0:
return []
if not _ts.initialized:
_ts.initialized = True
_ts.ref = [source[0]]
#print 'initialize REF'
for i in range(len(_ts.ref),offset if offset <= len(source) else len(source)):
_ts.ref.append(source[0])
for i in range(len(_ts.ref),len(source)):
_ts.ref.append(source[i-offset])
return _ts.ref
NullMinute = BaseObject(sopen=[],sclose=[],shigh=[],slow=[],svol=[],iorder=[],sholding=[])
@indicator
def MINUTE(ticks,pre_min1=None,t2order=t2order_if,_ts=None):
'''
分钟切分
这个实现的最大问题是未处理最后一分钟的收尾
但这种场景仅用于保存数据, 可以在使用MINUTE之后, 由特别的语句去判断最后一个tick,并收尾最后一分钟
pre_min1为默认时,只能用当日分钟
反之延续之前的分钟
'''
if len(ticks) == 0:
return NullMinute
if not _ts.initialized:
_ts.initialized = True
if pre_min1 == None: #不接续
_ts.sopen = []
_ts.sclose = []
_ts.shigh = []
_ts.slow = []
_ts.svol = []
_ts.sholding=[]
_ts.iorder = []
_ts.min1 = []
else:
_ts.sopen = pre_min1.sopen
_ts.sclose = pre_min1.sclose
_ts.shigh = pre_min1.shigh
_ts.slow = pre_min1.slow
_ts.svol = pre_min1.svol
_ts.sholding=pre_min1.sholding
_ts.iorder = pre_min1.iorder
_ts.min1 = pre_min1.min1
_ts.cur = BaseObject(vopen = ticks[0].price,
vclose = ticks[0].price,
vhigh=ticks[0].price,
vlow=ticks[0].price,
open_dvol=ticks[0].dvolume,#存在初始误差
close_dvol=ticks[0].dvolume,
holding = ticks[0].holding,
min1=ticks[0].min1, #当日第一min1
iorder=t2order[ticks[0].min1]
) #这里对dvol的处理,使得中断恢复也必须从当日最开始开始,否则所有前述成交量被归结到第一tick
_ts.ilast = 0
_ts.modified = False #上周期完成标志
scur = _ts.cur
for i in range(_ts.ilast,len(ticks)):
tcur = ticks[i]
#if tcur.min1 != scur.min1: #切换
if tcur.min1 > scur.min1: #切换, 避免ticks数据错误引发分钟序列紊乱,如20120905:958:59:500插入在20120905:959:00:00之后,虽然非常罕见,但会导致分钟序列出现958->959->958->959...这个情况
_ts.sopen.append(scur.vopen)
_ts.sclose.append(scur.vclose)
_ts.shigh.append(scur.vhigh)
_ts.slow.append(scur.vlow)
_ts.svol.append(scur.close_dvol - scur.open_dvol)
_ts.sholding.append(scur.holding)
_ts.min1.append(scur.min1)
_ts.iorder.append(scur.iorder)
scur.vopen = scur.vclose = scur.vhigh = scur.vlow = tcur.price
scur.open_dvol = scur.close_dvol
scur.close_dvol = tcur.dvolume
scur.dvol = tcur.dvolume
scur.holding = tcur.holding
scur.min1 = tcur.min1
scur.iorder = t2order[tcur.min1]
_ts.modified = True
else: #未切换
scur.vclose = tcur.price
scur.close_dvol = tcur.dvolume
scur.holding = tcur.holding
#print scur.min1,'close:',scur.vclose
if tcur.price > scur.vhigh:
scur.vhigh = tcur.price
elif tcur.price < scur.vlow:
scur.vlow = tcur.price
_ts.modified = False
_ts.ilast = len(ticks)
return _ts
##以下为确认周期结束的iorder函数
XS3 = lambda x:x%3==0 and x>0 ##914归入下一周期
XS5 = lambda x: x%5==0 and x>0
XS10 = lambda x: x%10== 0 and x>0
XS15 = lambda x:x%15 == 0 and x>0
XS30 = lambda x:x%30 == 0 and x>0
XSDAY = lambda x:x == 270
NullXMinute = BaseObject(sopen=[],sclose=[],shigh=[],slow=[],svol=[],iorder=[],sholding=[])
@indicator
def XMINUTE(m1,sfunc,_ts=None):
'''
1分钟以上周期
sfunc为确认周期结束的点
'''
if len(m1.sclose) == 0:
return NullXMinute
if not _ts.initialized:
_ts.initialized = True
_ts.sopen = []
_ts.sclose = []
_ts.shigh = []
_ts.slow = []
_ts.svol = []
_ts.sholding=[]
_ts.iorder = []
_ts.xmin = [] #开盘分钟
_ts.cur = BaseObject(vopen = 0,
vclose = 0,
vhigh=0,
vlow=99999999,
xmin =0,
svol=0,
holding=0,
iorder=0,
)
_ts.ilast = 0
_ts.modified = False #上周期完成标志
scur = _ts.cur
for i in range(_ts.ilast,len(m1.sclose)):
morder = m1.iorder[i]
if scur.vopen == 0:
scur.vopen = m1.sopen[i]
scur.xmin = m1.min1[i]
scur.vclose = m1.sclose[i]
scur.svol += m1.svol[i]
scur.holding = m1.sholding[i]
if m1.shigh[i] > scur.vhigh:
scur.vhigh = m1.shigh[i]
if m1.slow[i] < scur.vlow:
scur.vlow = m1.slow[i]
_ts.modified = False
if sfunc(morder): #切换
_ts.sopen.append(scur.vopen)
_ts.sclose.append(scur.vclose)
_ts.shigh.append(scur.vhigh)
_ts.slow.append(scur.vlow)
_ts.svol.append(scur.svol)
_ts.sholding.append(scur.holding)
_ts.iorder.append(scur.iorder)
_ts.xmin.append(scur.xmin)
scur.vopen = 0
scur.vclose = 0
scur.vhigh = 0
scur.vlow = 99999999
scur.svol = 0
scur.xmin = 0
scur.holding = 0
scur.iorder += 1
_ts.modified = True
_ts.ilast = len(m1.sclose)
return _ts
MINUTE3 = fcustom(XMINUTE,sfunc=XS3)
MINUTE5 = fcustom(XMINUTE,sfunc=XS5)
MINUTE10 = fcustom(XMINUTE,sfunc=XS10)
MINUTE15 = fcustom(XMINUTE,sfunc=XS15)
MINUTE30 = fcustom(XMINUTE,sfunc=XS30)
MINUTED = fcustom(XMINUTE,sfunc=XSDAY)
| blmousee/pyctp | example/pyctp/dac2.py | Python | mit | 22,134 |
class Solution(object):
def searchInsert(self, nums, target):
"""
:type nums: List[int]
:type target: int
:rtype: int
O(logn)
"""
low = 0
high = len(nums) - 1
if target <= nums[low]:
return low
if target > nums[high]:
return high+1
while low < high:
mid = (low + high) // 2
# print low, high, mid
if nums[mid] < target <= nums[mid+1]:
return mid+1
if nums[mid] >= target:
high = mid
else:
low = mid
def searchInsert1(self, nums, target):
"""
:type nums: List[int]
:type target: int
:rtype: int
60ms O(n)
"""
index = 0
for num in nums:
if num < target:
index += 1
return index
if __name__ == '__main__':
target = 7
nums = [1,3,5,6]
# nums = [1]
print Solution().searchInsert(nums,target) | comicxmz001/LeetCode | Python/35. Search Insert Position.py | Python | mit | 973 |
# coding=utf-8
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for StatementVisitor."""
from __future__ import unicode_literals
import re
import subprocess
import textwrap
import unittest
import pythonparser
from pythonparser import ast
from grumpy.compiler import block
from grumpy.compiler import imputil
from grumpy.compiler import imputil_test
from grumpy.compiler import shard_test
from grumpy.compiler import stmt
from grumpy.compiler import util
class StatementVisitorTest(unittest.TestCase):
def testAssertNoMsg(self):
self.assertEqual((0, 'AssertionError()\n'), _GrumpRun(textwrap.dedent("""\
try:
assert False
except AssertionError as e:
print repr(e)""")))
def testAssertMsg(self):
want = (0, "AssertionError('foo',)\n")
self.assertEqual(want, _GrumpRun(textwrap.dedent("""\
try:
assert False, 'foo'
except AssertionError as e:
print repr(e)""")))
def testBareAssert(self):
# Assertion errors at the top level of a block should raise:
# https://github.com/google/grumpy/issues/18
want = (0, 'ok\n')
self.assertEqual(want, _GrumpRun(textwrap.dedent("""\
def foo():
assert False
try:
foo()
except AssertionError:
print 'ok'
else:
print 'bad'""")))
def testAssignAttribute(self):
self.assertEqual((0, '123\n'), _GrumpRun(textwrap.dedent("""\
e = Exception()
e.foo = 123
print e.foo""")))
def testAssignName(self):
self.assertEqual((0, 'bar\n'), _GrumpRun(textwrap.dedent("""\
foo = 'bar'
print foo""")))
def testAssignMultiple(self):
self.assertEqual((0, 'baz baz\n'), _GrumpRun(textwrap.dedent("""\
foo = bar = 'baz'
print foo, bar""")))
def testAssignSubscript(self):
self.assertEqual((0, "{'bar': None}\n"), _GrumpRun(textwrap.dedent("""\
foo = {}
foo['bar'] = None
print foo""")))
def testAssignTuple(self):
self.assertEqual((0, 'a b\n'), _GrumpRun(textwrap.dedent("""\
baz = ('a', 'b')
foo, bar = baz
print foo, bar""")))
def testAugAssign(self):
self.assertEqual((0, '42\n'), _GrumpRun(textwrap.dedent("""\
foo = 41
foo += 1
print foo""")))
def testAugAssignBitAnd(self):
self.assertEqual((0, '3\n'), _GrumpRun(textwrap.dedent("""\
foo = 7
foo &= 3
print foo""")))
def testAugAssignPow(self):
self.assertEqual((0, '64\n'), _GrumpRun(textwrap.dedent("""\
foo = 8
foo **= 2
print foo""")))
def testClassDef(self):
self.assertEqual((0, "<type 'type'>\n"), _GrumpRun(textwrap.dedent("""\
class Foo(object):
pass
print type(Foo)""")))
def testClassDefWithVar(self):
self.assertEqual((0, 'abc\n'), _GrumpRun(textwrap.dedent("""\
class Foo(object):
bar = 'abc'
print Foo.bar""")))
def testDeleteAttribute(self):
self.assertEqual((0, 'False\n'), _GrumpRun(textwrap.dedent("""\
class Foo(object):
bar = 42
del Foo.bar
print hasattr(Foo, 'bar')""")))
def testDeleteClassLocal(self):
self.assertEqual((0, 'False\n'), _GrumpRun(textwrap.dedent("""\
class Foo(object):
bar = 'baz'
del bar
print hasattr(Foo, 'bar')""")))
def testDeleteGlobal(self):
self.assertEqual((0, 'False\n'), _GrumpRun(textwrap.dedent("""\
foo = 42
del foo
print 'foo' in globals()""")))
def testDeleteLocal(self):
self.assertEqual((0, 'ok\n'), _GrumpRun(textwrap.dedent("""\
def foo():
bar = 123
del bar
try:
print bar
raise AssertionError
except UnboundLocalError:
print 'ok'
foo()""")))
def testDeleteNonexistentLocal(self):
self.assertRaisesRegexp(
util.ParseError, 'cannot delete nonexistent local',
_ParseAndVisit, 'def foo():\n del bar')
def testDeleteSubscript(self):
self.assertEqual((0, '{}\n'), _GrumpRun(textwrap.dedent("""\
foo = {'bar': 'baz'}
del foo['bar']
print foo""")))
def testExprCall(self):
self.assertEqual((0, 'bar\n'), _GrumpRun(textwrap.dedent("""\
def foo():
print 'bar'
foo()""")))
def testExprNameGlobal(self):
self.assertEqual((0, ''), _GrumpRun(textwrap.dedent("""\
foo = 42
foo""")))
def testExprNameLocal(self):
self.assertEqual((0, ''), _GrumpRun(textwrap.dedent("""\
foo = 42
def bar():
foo
bar()""")))
def testFor(self):
self.assertEqual((0, '1\n2\n3\n'), _GrumpRun(textwrap.dedent("""\
for i in (1, 2, 3):
print i""")))
def testForBreak(self):
self.assertEqual((0, '1\n'), _GrumpRun(textwrap.dedent("""\
for i in (1, 2, 3):
print i
break""")))
def testForContinue(self):
self.assertEqual((0, '1\n2\n3\n'), _GrumpRun(textwrap.dedent("""\
for i in (1, 2, 3):
print i
continue
raise AssertionError""")))
def testForElse(self):
self.assertEqual((0, 'foo\nbar\n'), _GrumpRun(textwrap.dedent("""\
for i in (1,):
print 'foo'
else:
print 'bar'""")))
def testForElseBreakNotNested(self):
self.assertRaisesRegexp(
util.ParseError, "'continue' not in loop",
_ParseAndVisit, 'for i in (1,):\n pass\nelse:\n continue')
def testForElseContinueNotNested(self):
self.assertRaisesRegexp(
util.ParseError, "'continue' not in loop",
_ParseAndVisit, 'for i in (1,):\n pass\nelse:\n continue')
def testFunctionDecorator(self):
self.assertEqual((0, '<b>foo</b>\n'), _GrumpRun(textwrap.dedent("""\
def bold(fn):
return lambda: '<b>' + fn() + '</b>'
@bold
def foo():
return 'foo'
print foo()""")))
def testFunctionDecoratorWithArg(self):
self.assertEqual((0, '<b id=red>foo</b>\n'), _GrumpRun(textwrap.dedent("""\
def tag(name):
def bold(fn):
return lambda: '<b id=' + name + '>' + fn() + '</b>'
return bold
@tag('red')
def foo():
return 'foo'
print foo()""")))
def testFunctionDef(self):
self.assertEqual((0, 'bar baz\n'), _GrumpRun(textwrap.dedent("""\
def foo(a, b):
print a, b
foo('bar', 'baz')""")))
def testFunctionDefGenerator(self):
self.assertEqual((0, "['foo', 'bar']\n"), _GrumpRun(textwrap.dedent("""\
def gen():
yield 'foo'
yield 'bar'
print list(gen())""")))
def testFunctionDefGeneratorReturnValue(self):
self.assertRaisesRegexp(
util.ParseError, 'returning a value in a generator function',
_ParseAndVisit, 'def foo():\n yield 1\n return 2')
def testFunctionDefLocal(self):
self.assertEqual((0, 'baz\n'), _GrumpRun(textwrap.dedent("""\
def foo():
def bar():
print 'baz'
bar()
foo()""")))
def testIf(self):
self.assertEqual((0, 'foo\n'), _GrumpRun(textwrap.dedent("""\
if 123:
print 'foo'
if '':
print 'bar'""")))
def testIfElif(self):
self.assertEqual((0, 'foo\nbar\n'), _GrumpRun(textwrap.dedent("""\
if True:
print 'foo'
elif False:
print 'bar'
if False:
print 'foo'
elif True:
print 'bar'""")))
def testIfElse(self):
self.assertEqual((0, 'foo\nbar\n'), _GrumpRun(textwrap.dedent("""\
if True:
print 'foo'
else:
print 'bar'
if False:
print 'foo'
else:
print 'bar'""")))
def testImport(self):
self.assertEqual((0, "<type 'dict'>\n"), _GrumpRun(textwrap.dedent("""\
import sys
print type(sys.modules)""")))
def testImportMember(self):
self.assertEqual((0, "<type 'dict'>\n"), _GrumpRun(textwrap.dedent("""\
from sys import modules
print type(modules)""")))
def testImportConflictingPackage(self):
self.assertEqual((0, ''), _GrumpRun(textwrap.dedent("""\
import time
from __go__.time import Now""")))
def testImportNative(self):
self.assertEqual((0, '1 1000000000\n'), _GrumpRun(textwrap.dedent("""\
from __go__.time import Nanosecond, Second
print Nanosecond, Second""")))
def testImportGrumpy(self):
self.assertEqual((0, ''), _GrumpRun(textwrap.dedent("""\
from __go__.grumpy import Assert
Assert(__frame__(), True, 'bad')""")))
def testImportNativeModuleRaises(self):
regexp = r'for native imports use "from __go__\.xyz import \.\.\." syntax'
self.assertRaisesRegexp(util.ImportError, regexp, _ParseAndVisit,
'import __go__.foo')
def testImportNativeType(self):
self.assertEqual((0, "<type 'Duration'>\n"), _GrumpRun(textwrap.dedent("""\
from __go__.time import type_Duration as Duration
print Duration""")))
def testPrintStatement(self):
self.assertEqual((0, 'abc 123\nfoo bar\n'), _GrumpRun(textwrap.dedent("""\
print 'abc',
print '123'
print 'foo', 'bar'""")))
def testImportFromFuture(self):
testcases = [
('from __future__ import print_function',
imputil.FUTURE_PRINT_FUNCTION),
('from __future__ import generators', 0),
('from __future__ import generators, print_function',
imputil.FUTURE_PRINT_FUNCTION),
]
for i, tc in enumerate(testcases):
source, want_flags = tc
mod = pythonparser.parse(textwrap.dedent(source))
node = mod.body[0]
got = imputil.import_from_future(node)
msg = '#{}: want {}, got {}'.format(i, want_flags, got)
self.assertEqual(want_flags, got, msg=msg)
def testImportFromFutureParseError(self):
testcases = [
# NOTE: move this group to testImportFromFuture as they are implemented
# by grumpy
('from __future__ import absolute_import',
r'future feature \w+ not yet implemented'),
('from __future__ import division',
r'future feature \w+ not yet implemented'),
('from __future__ import unicode_literals',
r'future feature \w+ not yet implemented'),
('from __future__ import braces', 'not a chance'),
('from __future__ import nonexistant_feature',
r'future feature \w+ is not defined'),
]
for tc in testcases:
source, want_regexp = tc
mod = pythonparser.parse(source)
node = mod.body[0]
self.assertRaisesRegexp(util.ParseError, want_regexp,
imputil.import_from_future, node)
def testImportWildcardMemberRaises(self):
regexp = r'wildcard member import is not implemented: from foo import *'
self.assertRaisesRegexp(util.ImportError, regexp, _ParseAndVisit,
'from foo import *')
regexp = (r'wildcard member import is not '
r'implemented: from __go__.foo import *')
self.assertRaisesRegexp(util.ImportError, regexp, _ParseAndVisit,
'from __go__.foo import *')
def testFutureFeaturePrintFunction(self):
want = "abc\n123\nabc 123\nabcx123\nabc 123 "
self.assertEqual((0, want), _GrumpRun(textwrap.dedent("""\
"module docstring is ok to proceed __future__"
from __future__ import print_function
print('abc')
print(123)
print('abc', 123)
print('abc', 123, sep='x')
print('abc', 123, end=' ')""")))
def testRaiseExitStatus(self):
self.assertEqual(1, _GrumpRun('raise Exception')[0])
def testRaiseInstance(self):
self.assertEqual((0, 'foo\n'), _GrumpRun(textwrap.dedent("""\
try:
raise RuntimeError('foo')
print 'bad'
except RuntimeError as e:
print e""")))
def testRaiseTypeAndArg(self):
self.assertEqual((0, 'foo\n'), _GrumpRun(textwrap.dedent("""\
try:
raise KeyError('foo')
print 'bad'
except KeyError as e:
print e""")))
def testRaiseAgain(self):
self.assertEqual((0, 'foo\n'), _GrumpRun(textwrap.dedent("""\
try:
try:
raise AssertionError('foo')
except AssertionError:
raise
except Exception as e:
print e""")))
def testRaiseTraceback(self):
self.assertEqual((0, ''), _GrumpRun(textwrap.dedent("""\
import sys
try:
try:
raise Exception
except:
e, _, tb = sys.exc_info()
raise e, None, tb
except:
e2, _, tb2 = sys.exc_info()
assert e is e2
assert tb is tb2""")))
def testReturn(self):
self.assertEqual((0, 'bar\n'), _GrumpRun(textwrap.dedent("""\
def foo():
return 'bar'
print foo()""")))
def testTryBareExcept(self):
self.assertEqual((0, ''), _GrumpRun(textwrap.dedent("""\
try:
raise AssertionError
except:
pass""")))
def testTryElse(self):
self.assertEqual((0, 'foo baz\n'), _GrumpRun(textwrap.dedent("""\
try:
print 'foo',
except:
print 'bar'
else:
print 'baz'""")))
def testTryMultipleExcept(self):
self.assertEqual((0, 'bar\n'), _GrumpRun(textwrap.dedent("""\
try:
raise AssertionError
except RuntimeError:
print 'foo'
except AssertionError:
print 'bar'
except:
print 'baz'""")))
def testTryFinally(self):
result = _GrumpRun(textwrap.dedent("""\
try:
print 'foo',
finally:
print 'bar'
try:
print 'foo',
raise Exception
finally:
print 'bar'"""))
self.assertEqual(1, result[0])
# Some platforms show "exit status 1" message so don't test strict equality.
self.assertIn('foo bar\nfoo bar\nException\n', result[1])
def testWhile(self):
self.assertEqual((0, '2\n1\n'), _GrumpRun(textwrap.dedent("""\
i = 2
while i:
print i
i -= 1""")))
def testWhileElse(self):
self.assertEqual((0, 'bar\n'), _GrumpRun(textwrap.dedent("""\
while False:
print 'foo'
else:
print 'bar'""")))
def testWith(self):
self.assertEqual((0, 'enter\n1\nexit\nenter\n2\nexit\n3\n'),
_GrumpRun(textwrap.dedent("""\
class ContextManager(object):
def __enter__(self):
print "enter"
def __exit__(self, exc_type, value, traceback):
print "exit"
a = ContextManager()
with a:
print 1
try:
with a:
print 2
raise RuntimeError
except RuntimeError:
print 3
""")))
def testWithAs(self):
self.assertEqual((0, '1 2 3\n'),
_GrumpRun(textwrap.dedent("""\
class ContextManager(object):
def __enter__(self):
return (1, (2, 3))
def __exit__(self, *args):
pass
with ContextManager() as [x, (y, z)]:
print x, y, z
""")))
def testWriteExceptDispatcherBareExcept(self):
visitor = stmt.StatementVisitor(_MakeModuleBlock())
handlers = [ast.ExceptHandler(type=ast.Name(id='foo')),
ast.ExceptHandler(type=None)]
self.assertEqual(visitor._write_except_dispatcher( # pylint: disable=protected-access
'exc', 'tb', handlers), [1, 2])
expected = re.compile(r'ResolveGlobal\(.*foo.*\bIsInstance\(.*'
r'goto Label1.*goto Label2', re.DOTALL)
self.assertRegexpMatches(visitor.writer.getvalue(), expected)
def testWriteExceptDispatcherBareExceptionNotLast(self):
visitor = stmt.StatementVisitor(_MakeModuleBlock())
handlers = [ast.ExceptHandler(type=None),
ast.ExceptHandler(type=ast.Name(id='foo'))]
self.assertRaisesRegexp(util.ParseError, r"default 'except:' must be last",
visitor._write_except_dispatcher, # pylint: disable=protected-access
'exc', 'tb', handlers)
def testWriteExceptDispatcherMultipleExcept(self):
visitor = stmt.StatementVisitor(_MakeModuleBlock())
handlers = [ast.ExceptHandler(type=ast.Name(id='foo')),
ast.ExceptHandler(type=ast.Name(id='bar'))]
self.assertEqual(visitor._write_except_dispatcher( # pylint: disable=protected-access
'exc', 'tb', handlers), [1, 2])
expected = re.compile(
r'ResolveGlobal\(.*foo.*\bif .*\bIsInstance\(.*\{.*goto Label1.*'
r'ResolveGlobal\(.*bar.*\bif .*\bIsInstance\(.*\{.*goto Label2.*'
r'\bRaise\(exc\.ToObject\(\), nil, tb\.ToObject\(\)\)', re.DOTALL)
self.assertRegexpMatches(visitor.writer.getvalue(), expected)
def _MakeModuleBlock():
return block.ModuleBlock(imputil_test.MockPath(), '__main__',
'<test>', '', imputil.FutureFeatures())
def _ParseAndVisit(source):
mod = pythonparser.parse(source)
future_features = imputil.visit_future(mod)
b = block.ModuleBlock(imputil_test.MockPath(), '__main__',
'<test>', source, future_features)
visitor = stmt.StatementVisitor(b)
visitor.visit(mod)
return visitor
def _GrumpRun(cmd):
p = subprocess.Popen(['grumprun'], stdin=subprocess.PIPE,
stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
out, _ = p.communicate(cmd)
return p.returncode, out
if __name__ == '__main__':
shard_test.main()
| S-YOU/grumpy | compiler/stmt_test.py | Python | apache-2.0 | 18,304 |
#!/usr/bin/env python
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
#
# See LICENSE for more details.
#
#
# Copyright: 2017 IBM
# Author: Pooja B Surya <[email protected]>
# Update: Sachin Sant <[email protected]>
import os
import re
import configparser
from avocado import Test
from avocado.utils import process
from avocado.utils import genio
from avocado.utils.software_manager import SoftwareManager
from avocado.utils import distro
class pvr(Test):
'''
Processor version register(pvr) test case
:avocado: tags=cpu,power
'''
def setUp(self):
if "ppc" not in os.uname()[4]:
self.cancel("supported only on Power platform")
smm = SoftwareManager()
detected_distro = distro.detect()
parser = configparser.ConfigParser()
parser.read(self.get_data("pvr.cfg"))
if detected_distro.name == "Ubuntu":
pkg = 'device-tree-compiler'
else:
pkg = 'dtc'
if not smm.check_installed(pkg) and not smm.install(pkg):
self.cancel("%s package is needed for the test to be run" % pkg)
val = genio.read_file("/proc/cpuinfo")
for line in val.splitlines():
if 'revision' in line:
rev = (line.split('revision')[1]).split()
self.log.info("Revision: %s %s" % (rev, rev[1]))
break
if 'pSeries' in val and 'POWER8' in val:
self.pvr_value = parser.get('PVR_Values', 'pvr_value_p8')
elif 'PowerNV' in val and 'POWER8' in val:
self.pvr_value = parser.get('PVR_Values', 'pvr_value_p8')
elif 'pSeries' in val and 'POWER9' in val:
if rev[1] == '1.2':
self.pvr_value = parser.get('PVR_Values',
'pvr_value_p9LPAR_1.2')
elif rev[1] == '2.2':
self.pvr_value = parser.get('PVR_Values',
'pvr_value_p9LPAR_2.2')
elif rev[1] == '2.3':
self.pvr_value = parser.get('PVR_Values',
'pvr_value_p9LPAR_2.3')
elif 'PowerNV' in val and 'POWER9' in val:
if rev[1] == '2.1':
self.pvr_value = parser.get('PVR_Values', 'pvr_value_p9NV_2.1')
elif rev[1] == '2.2':
self.pvr_value = parser.get('PVR_Values', 'pvr_value_p9NV_2.2')
elif rev[1] == '2.3':
self.pvr_value = parser.get('PVR_Values', 'pvr_value_p9NV_2.3')
elif 'pSeries' in val and 'POWER10' in val:
if rev[1] == '1.0':
self.pvr_value = parser.get('PVR_Values', 'pvr_value_p10_1')
elif rev[1] == '2.0':
self.pvr_value = parser.get('PVR_Values', 'pvr_value_p10_2')
else:
self.fail("Unsupported processor family")
def test(self):
self.log.info("====== Verifying CPU PVR entries =====")
self.log.info(self.pvr_value)
pvr_cpu = genio.read_file("/proc/cpuinfo")
res = re.sub(' ', '', pvr_cpu)
match = re.search(self.pvr_value, res)
self.log.info('self.pvr_value = %s, res = %s' % (self.pvr_value, res))
if match:
self.log.info("PVR from /proc/cpuinfo for the system is correct")
else:
self.fail("PVR from /proc/cpuinfo for the system is not correct")
pvr_dtc = process.run("dtc -I fs /proc/device-tree -O dts |grep %s | "
"head -1" % self.pvr_value, shell=True,
ignore_status=True)
if not pvr_dtc.exit_status:
self.log.info("PVR from device tree for the system is correct")
else:
self.fail("PVR from device tree for the system is not correct")
| avocado-framework-tests/avocado-misc-tests | cpu/pvr.py | Python | gpl-2.0 | 4,205 |
import collections
import copy
import logging
import asyncio
from aiozk import exc
from .iterables import drain
log = logging.getLogger(__name__)
class States:
CONNECTED = "connected"
SUSPENDED = "suspended"
READ_ONLY = "read_only"
LOST = "lost"
class SessionStateMachine:
valid_transitions = {
(States.LOST, States.CONNECTED),
(States.LOST, States.READ_ONLY),
(States.CONNECTED, States.SUSPENDED),
(States.CONNECTED, States.LOST),
(States.READ_ONLY, States.CONNECTED),
(States.READ_ONLY, States.SUSPENDED),
(States.READ_ONLY, States.LOST),
(States.SUSPENDED, States.CONNECTED),
(States.SUSPENDED, States.READ_ONLY),
(States.SUSPENDED, States.LOST),
}
def __init__(self, session):
self.session = session
self.current_state = States.LOST
self.futures = collections.defaultdict(set)
def transition_to(self, state):
if (self.current_state, state) not in self.valid_transitions:
raise exc.InvalidStateTransition(
"Invalid session state transition: %s -> %s" % (
self.current_state, state
)
)
log.debug("Session transition: %s -> %s", self.current_state, state)
self.current_state = state
for future in drain(self.futures[state]):
if not future.done():
future.set_result(None)
def wait_for(self, *states):
loop = asyncio.get_running_loop()
f = loop.create_future()
if self.current_state in states:
f.set_result(None)
else:
for state in states:
self.futures[state].add(f)
return f
def remove_waiting(self, future, *states):
for state in states:
self.futures[state].remove(future)
def waitings(self, *states):
futures = {}
for state in states:
futures[state] = copy.copy(self.futures[state])
return futures
def __eq__(self, state):
return self.current_state == state
def __ne__(self, state):
return self.current_state != state
def __str__(self):
return '<SessionStateMachine {}>'.format(self.current_state)
| tipsi/aiozk | aiozk/states.py | Python | mit | 2,280 |
# Copyright 2012 Pinterest.com
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import errno
import socket
import six
from pymemcache import pool
from pymemcache.exceptions import (
MemcacheClientError,
MemcacheUnknownCommandError,
MemcacheIllegalInputError,
MemcacheServerError,
MemcacheUnknownError,
MemcacheUnexpectedCloseError
)
RECV_SIZE = 4096
VALID_STORE_RESULTS = {
b'set': (b'STORED',),
b'add': (b'STORED', b'NOT_STORED'),
b'replace': (b'STORED', b'NOT_STORED'),
b'append': (b'STORED', b'NOT_STORED'),
b'prepend': (b'STORED', b'NOT_STORED'),
b'cas': (b'STORED', b'EXISTS', b'NOT_FOUND'),
}
VALID_STRING_TYPES = (six.text_type, six.string_types)
# Some of the values returned by the "stats" command
# need mapping into native Python types
def _parse_bool_int(value):
return int(value) != 0
def _parse_bool_string_is_yes(value):
return value == b'yes'
def _parse_float(value):
return float(value.replace(b':', b'.'))
def _parse_hex(value):
return int(value, 8)
STAT_TYPES = {
# General stats
b'version': six.binary_type,
b'rusage_user': _parse_float,
b'rusage_system': _parse_float,
b'hash_is_expanding': _parse_bool_int,
b'slab_reassign_running': _parse_bool_int,
# Settings stats
b'inter': six.binary_type,
b'growth_factor': float,
b'stat_key_prefix': six.binary_type,
b'umask': _parse_hex,
b'detail_enabled': _parse_bool_int,
b'cas_enabled': _parse_bool_int,
b'auth_enabled_sasl': _parse_bool_string_is_yes,
b'maxconns_fast': _parse_bool_int,
b'slab_reassign': _parse_bool_int,
b'slab_automove': _parse_bool_int,
}
# Common helper functions.
def _check_key(key, allow_unicode_keys, key_prefix=b''):
"""Checks key and add key_prefix."""
if allow_unicode_keys:
if isinstance(key, six.text_type):
key = key.encode('utf8')
elif isinstance(key, VALID_STRING_TYPES):
try:
key = key.encode('ascii')
except (UnicodeEncodeError, UnicodeDecodeError):
raise MemcacheIllegalInputError("Non-ASCII key: '%r'" % (key,))
key = key_prefix + key
if b' ' in key or b'\n' in key:
raise MemcacheIllegalInputError(
"Key contains space and/or newline: '%r'" % (key,)
)
if len(key) > 250:
raise MemcacheIllegalInputError("Key is too long: '%r'" % (key,))
return key
class Client(object):
"""
A client for a single memcached server.
*Keys and Values*
Keys must have a __str__() method which should return a str with no more
than 250 ASCII characters and no whitespace or control characters. Unicode
strings must be encoded (as UTF-8, for example) unless they consist only
of ASCII characters that are neither whitespace nor control characters.
Values must have a __str__() method to convert themselves to a byte
string. Unicode objects can be a problem since str() on a Unicode object
will attempt to encode it as ASCII (which will fail if the value contains
code points larger than U+127). You can fix this with a serializer or by
just calling encode on the string (using UTF-8, for instance).
If you intend to use anything but str as a value, it is a good idea to use
a serializer and deserializer. The pymemcache.serde library has some
already implemented serializers, including one that is compatible with
the python-memcache library.
*Serialization and Deserialization*
The constructor takes two optional functions, one for "serialization" of
values, and one for "deserialization". The serialization function takes
two arguments, a key and a value, and returns a tuple of two elements, the
serialized value, and an integer in the range 0-65535 (the "flags"). The
deserialization function takes three parameters, a key, value and flags
and returns the deserialized value.
Here is an example using JSON for non-str values:
.. code-block:: python
def serialize_json(key, value):
if type(value) == str:
return value, 1
return json.dumps(value), 2
def deserialize_json(key, value, flags):
if flags == 1:
return value
if flags == 2:
return json.loads(value)
raise Exception("Unknown flags for value: {1}".format(flags))
*Error Handling*
All of the methods in this class that talk to memcached can throw one of
the following exceptions:
* MemcacheUnknownCommandError
* MemcacheClientError
* MemcacheServerError
* MemcacheUnknownError
* MemcacheUnexpectedCloseError
* MemcacheIllegalInputError
* socket.timeout
* socket.error
Instances of this class maintain a persistent connection to memcached
which is terminated when any of these exceptions are raised. The next
call to a method on the object will result in a new connection being made
to memcached.
"""
def __init__(self,
server,
serializer=None,
deserializer=None,
connect_timeout=None,
timeout=None,
no_delay=False,
ignore_exc=False,
socket_module=socket,
key_prefix=b'',
default_noreply=True,
allow_unicode_keys=False):
"""
Constructor.
Args:
server: tuple(hostname, port)
serializer: optional function, see notes in the class docs.
deserializer: optional function, see notes in the class docs.
connect_timeout: optional float, seconds to wait for a connection to
the memcached server. Defaults to "forever" (uses the underlying
default socket timeout, which can be very long).
timeout: optional float, seconds to wait for send or recv calls on
the socket connected to memcached. Defaults to "forever" (uses the
underlying default socket timeout, which can be very long).
no_delay: optional bool, set the TCP_NODELAY flag, which may help
with performance in some cases. Defaults to False.
ignore_exc: optional bool, True to cause the "get", "gets",
"get_many" and "gets_many" calls to treat any errors as cache
misses. Defaults to False.
socket_module: socket module to use, e.g. gevent.socket. Defaults to
the standard library's socket module.
key_prefix: Prefix of key. You can use this as namespace. Defaults
to b''.
default_noreply: bool, the default value for 'noreply' as passed to
store commands (except from cas, incr, and decr, which default to
False).
allow_unicode_keys: bool, support unicode (utf8) keys
Notes:
The constructor does not make a connection to memcached. The first
call to a method on the object will do that.
"""
self.server = server
self.serializer = serializer
self.deserializer = deserializer
self.connect_timeout = connect_timeout
self.timeout = timeout
self.no_delay = no_delay
self.ignore_exc = ignore_exc
self.socket_module = socket_module
self.sock = None
if isinstance(key_prefix, six.text_type):
key_prefix = key_prefix.encode('ascii')
if not isinstance(key_prefix, bytes):
raise TypeError("key_prefix should be bytes.")
self.key_prefix = key_prefix
self.default_noreply = default_noreply
self.allow_unicode_keys = allow_unicode_keys
def check_key(self, key):
"""Checks key and add key_prefix."""
return _check_key(key, allow_unicode_keys=self.allow_unicode_keys,
key_prefix=self.key_prefix)
def _connect(self):
sock = self.socket_module.socket(self.socket_module.AF_INET,
self.socket_module.SOCK_STREAM)
sock.settimeout(self.connect_timeout)
sock.connect(self.server)
sock.settimeout(self.timeout)
if self.no_delay:
sock.setsockopt(self.socket_module.IPPROTO_TCP,
self.socket_module.TCP_NODELAY, 1)
self.sock = sock
def close(self):
"""Close the connection to memcached, if it is open. The next call to a
method that requires a connection will re-open it."""
if self.sock is not None:
try:
self.sock.close()
except Exception:
pass
self.sock = None
def set(self, key, value, expire=0, noreply=None):
"""
The memcached "set" command.
Args:
key: str, see class docs for details.
value: str, see class docs for details.
expire: optional int, number of seconds until the item is expired
from the cache, or zero for no expiry (the default).
noreply: optional bool, True to not wait for the reply (defaults to
self.default_noreply).
Returns:
If no exception is raised, always returns True. If an exception is
raised, the set may or may not have occurred. If noreply is True,
then a successful return does not guarantee a successful set.
"""
if noreply is None:
noreply = self.default_noreply
return self._store_cmd(b'set', key, expire, noreply, value)
def set_many(self, values, expire=0, noreply=None):
"""
A convenience function for setting multiple values.
Args:
values: dict(str, str), a dict of keys and values, see class docs
for details.
expire: optional int, number of seconds until the item is expired
from the cache, or zero for no expiry (the default).
noreply: optional bool, True to not wait for the reply (defaults to
self.default_noreply).
Returns:
If no exception is raised, always returns True. Otherwise all, some
or none of the keys have been successfully set. If noreply is True
then a successful return does not guarantee that any keys were
successfully set (just that the keys were successfully sent).
"""
# TODO: make this more performant by sending all the values first, then
# waiting for all the responses.
for key, value in six.iteritems(values):
self.set(key, value, expire, noreply)
return True
set_multi = set_many
def add(self, key, value, expire=0, noreply=None):
"""
The memcached "add" command.
Args:
key: str, see class docs for details.
value: str, see class docs for details.
expire: optional int, number of seconds until the item is expired
from the cache, or zero for no expiry (the default).
noreply: optional bool, True to not wait for the reply (defaults to
self.default_noreply).
Returns:
If noreply is True, the return value is always True. Otherwise the
return value is True if the value was stgored, and False if it was
not (because the key already existed).
"""
if noreply is None:
noreply = self.default_noreply
return self._store_cmd(b'add', key, expire, noreply, value)
def replace(self, key, value, expire=0, noreply=None):
"""
The memcached "replace" command.
Args:
key: str, see class docs for details.
value: str, see class docs for details.
expire: optional int, number of seconds until the item is expired
from the cache, or zero for no expiry (the default).
noreply: optional bool, True to not wait for the reply (defaults to
self.default_noreply).
Returns:
If noreply is True, always returns True. Otherwise returns True if
the value was stored and False if it wasn't (because the key didn't
already exist).
"""
if noreply is None:
noreply = self.default_noreply
return self._store_cmd(b'replace', key, expire, noreply, value)
def append(self, key, value, expire=0, noreply=None):
"""
The memcached "append" command.
Args:
key: str, see class docs for details.
value: str, see class docs for details.
expire: optional int, number of seconds until the item is expired
from the cache, or zero for no expiry (the default).
noreply: optional bool, True to not wait for the reply (defaults to
self.default_noreply).
Returns:
True.
"""
if noreply is None:
noreply = self.default_noreply
return self._store_cmd(b'append', key, expire, noreply, value)
def prepend(self, key, value, expire=0, noreply=None):
"""
The memcached "prepend" command.
Args:
key: str, see class docs for details.
value: str, see class docs for details.
expire: optional int, number of seconds until the item is expired
from the cache, or zero for no expiry (the default).
noreply: optional bool, True to not wait for the reply (defaults to
self.default_noreply).
Returns:
True.
"""
if noreply is None:
noreply = self.default_noreply
return self._store_cmd(b'prepend', key, expire, noreply, value)
def cas(self, key, value, cas, expire=0, noreply=False):
"""
The memcached "cas" command.
Args:
key: str, see class docs for details.
value: str, see class docs for details.
cas: int or str that only contains the characters '0'-'9'.
expire: optional int, number of seconds until the item is expired
from the cache, or zero for no expiry (the default).
noreply: optional bool, False to wait for the reply (the default).
Returns:
If noreply is True, always returns True. Otherwise returns None if
the key didn't exist, False if it existed but had a different cas
value and True if it existed and was changed.
"""
return self._store_cmd(b'cas', key, expire, noreply, value, cas)
def get(self, key, default=None):
"""
The memcached "get" command, but only for one key, as a convenience.
Args:
key: str, see class docs for details.
default: value that will be returned if the key was not found.
Returns:
The value for the key, or default if the key wasn't found.
"""
return self._fetch_cmd(b'get', [key], False).get(key, default)
def get_many(self, keys):
"""
The memcached "get" command.
Args:
keys: list(str), see class docs for details.
Returns:
A dict in which the keys are elements of the "keys" argument list
and the values are values from the cache. The dict may contain all,
some or none of the given keys.
"""
if not keys:
return {}
return self._fetch_cmd(b'get', keys, False)
get_multi = get_many
def gets(self, key, default=None, cas_default=None):
"""
The memcached "gets" command for one key, as a convenience.
Args:
key: str, see class docs for details.
default: value that will be returned if the key was not found.
cas_default: same behaviour as default argument.
Returns:
A tuple of (key, cas)
or (default, cas_defaults) if the key was not found.
"""
defaults = (default, cas_default)
return self._fetch_cmd(b'gets', [key], True).get(key, defaults)
def gets_many(self, keys):
"""
The memcached "gets" command.
Args:
keys: list(str), see class docs for details.
Returns:
A dict in which the keys are elements of the "keys" argument list and
the values are tuples of (value, cas) from the cache. The dict may
contain all, some or none of the given keys.
"""
if not keys:
return {}
return self._fetch_cmd(b'gets', keys, True)
def delete(self, key, noreply=None):
"""
The memcached "delete" command.
Args:
key: str, see class docs for details.
noreply: optional bool, True to not wait for the reply (defaults to
self.default_noreply).
Returns:
If noreply is True, always returns True. Otherwise returns True if
the key was deleted, and False if it wasn't found.
"""
if noreply is None:
noreply = self.default_noreply
cmd = b'delete ' + self.check_key(key)
if noreply:
cmd += b' noreply'
cmd += b'\r\n'
result = self._misc_cmd(cmd, b'delete', noreply)
if noreply:
return True
return result == b'DELETED'
def delete_many(self, keys, noreply=None):
"""
A convenience function to delete multiple keys.
Args:
keys: list(str), the list of keys to delete.
noreply: optional bool, True to not wait for the reply (defaults to
self.default_noreply).
Returns:
True. If an exception is raised then all, some or none of the keys
may have been deleted. Otherwise all the keys have been sent to
memcache for deletion and if noreply is False, they have been
acknowledged by memcache.
"""
if not keys:
return True
if noreply is None:
noreply = self.default_noreply
# TODO: make this more performant by sending all keys first, then
# waiting for all values.
for key in keys:
self.delete(key, noreply)
return True
delete_multi = delete_many
def incr(self, key, value, noreply=False):
"""
The memcached "incr" command.
Args:
key: str, see class docs for details.
value: int, the amount by which to increment the value.
noreply: optional bool, False to wait for the reply (the default).
Returns:
If noreply is True, always returns None. Otherwise returns the new
value of the key, or None if the key wasn't found.
"""
key = self.check_key(key)
cmd = b'incr ' + key + b' ' + six.text_type(value).encode('ascii')
if noreply:
cmd += b' noreply'
cmd += b'\r\n'
result = self._misc_cmd(cmd, b'incr', noreply)
if noreply:
return None
if result == b'NOT_FOUND':
return None
return int(result)
def decr(self, key, value, noreply=False):
"""
The memcached "decr" command.
Args:
key: str, see class docs for details.
value: int, the amount by which to increment the value.
noreply: optional bool, False to wait for the reply (the default).
Returns:
If noreply is True, always returns None. Otherwise returns the new
value of the key, or None if the key wasn't found.
"""
key = self.check_key(key)
cmd = b'decr ' + key + b' ' + six.text_type(value).encode('ascii')
if noreply:
cmd += b' noreply'
cmd += b'\r\n'
result = self._misc_cmd(cmd, b'decr', noreply)
if noreply:
return None
if result == b'NOT_FOUND':
return None
return int(result)
def touch(self, key, expire=0, noreply=None):
"""
The memcached "touch" command.
Args:
key: str, see class docs for details.
expire: optional int, number of seconds until the item is expired
from the cache, or zero for no expiry (the default).
noreply: optional bool, True to not wait for the reply (defaults to
self.default_noreply).
Returns:
True if the expiration time was updated, False if the key wasn't
found.
"""
if noreply is None:
noreply = self.default_noreply
key = self.check_key(key)
cmd = b'touch ' + key + b' ' + six.text_type(expire).encode('ascii')
if noreply:
cmd += b' noreply'
cmd += b'\r\n'
result = self._misc_cmd(cmd, b'touch', noreply)
if noreply:
return True
return result == b'TOUCHED'
def stats(self, *args):
"""
The memcached "stats" command.
The returned keys depend on what the "stats" command returns.
A best effort is made to convert values to appropriate Python
types, defaulting to strings when a conversion cannot be made.
Args:
*arg: extra string arguments to the "stats" command. See the
memcached protocol documentation for more information.
Returns:
A dict of the returned stats.
"""
result = self._fetch_cmd(b'stats', args, False)
for key, value in six.iteritems(result):
converter = STAT_TYPES.get(key, int)
try:
result[key] = converter(value)
except Exception:
pass
return result
def version(self):
"""
The memcached "version" command.
Returns:
A string of the memcached version.
"""
cmd = b"version\r\n"
result = self._misc_cmd(cmd, b'version', False)
if not result.startswith(b'VERSION '):
raise MemcacheUnknownError(
"Received unexpected response: %s" % (result, ))
return result[8:]
def flush_all(self, delay=0, noreply=None):
"""
The memcached "flush_all" command.
Args:
delay: optional int, the number of seconds to wait before flushing,
or zero to flush immediately (the default).
noreply: optional bool, True to not wait for the reply (defaults to
self.default_noreply).
Returns:
True.
"""
if noreply is None:
noreply = self.default_noreply
cmd = b'flush_all ' + six.text_type(delay).encode('ascii')
if noreply:
cmd += b' noreply'
cmd += b'\r\n'
result = self._misc_cmd(cmd, b'flush_all', noreply)
if noreply:
return True
return result == b'OK'
def quit(self):
"""
The memcached "quit" command.
This will close the connection with memcached. Calling any other
method on this object will re-open the connection, so this object can
be re-used after quit.
"""
cmd = b"quit\r\n"
self._misc_cmd(cmd, b'quit', True)
self.close()
def _raise_errors(self, line, name):
if line.startswith(b'ERROR'):
raise MemcacheUnknownCommandError(name)
if line.startswith(b'CLIENT_ERROR'):
error = line[line.find(b' ') + 1:]
raise MemcacheClientError(error)
if line.startswith(b'SERVER_ERROR'):
error = line[line.find(b' ') + 1:]
raise MemcacheServerError(error)
def _fetch_cmd(self, name, keys, expect_cas):
checked_keys = dict((self.check_key(k), k) for k in keys)
cmd = name + b' ' + b' '.join(checked_keys) + b'\r\n'
try:
if not self.sock:
self._connect()
self.sock.sendall(cmd)
buf = b''
result = {}
while True:
buf, line = _readline(self.sock, buf)
self._raise_errors(line, name)
if line == b'END':
return result
elif line.startswith(b'VALUE'):
if expect_cas:
_, key, flags, size, cas = line.split()
else:
try:
_, key, flags, size = line.split()
except Exception as e:
raise ValueError("Unable to parse line %s: %s"
% (line, str(e)))
buf, value = _readvalue(self.sock, buf, int(size))
key = checked_keys[key]
if self.deserializer:
value = self.deserializer(key, value, int(flags))
if expect_cas:
result[key] = (value, cas)
else:
result[key] = value
elif name == b'stats' and line.startswith(b'STAT'):
key_value = line.split()
result[key_value[1]] = key_value[2]
else:
raise MemcacheUnknownError(line[:32])
except Exception:
self.close()
if self.ignore_exc:
return {}
raise
def _store_cmd(self, name, key, expire, noreply, data, cas=None):
key = self.check_key(key)
if not self.sock:
self._connect()
if self.serializer:
data, flags = self.serializer(key, data)
else:
flags = 0
if not isinstance(data, six.binary_type):
try:
data = six.text_type(data).encode('ascii')
except UnicodeEncodeError as e:
raise MemcacheIllegalInputError(str(e))
extra = b''
if cas is not None:
extra += b' ' + cas
if noreply:
extra += b' noreply'
cmd = (name + b' ' + key + b' ' +
six.text_type(flags).encode('ascii') +
b' ' + six.text_type(expire).encode('ascii') +
b' ' + six.text_type(len(data)).encode('ascii') + extra +
b'\r\n' + data + b'\r\n')
try:
self.sock.sendall(cmd)
if noreply:
return True
buf = b''
buf, line = _readline(self.sock, buf)
self._raise_errors(line, name)
if line in VALID_STORE_RESULTS[name]:
if line == b'STORED':
return True
if line == b'NOT_STORED':
return False
if line == b'NOT_FOUND':
return None
if line == b'EXISTS':
return False
else:
raise MemcacheUnknownError(line[:32])
except Exception:
self.close()
raise
def _misc_cmd(self, cmd, cmd_name, noreply):
if not self.sock:
self._connect()
try:
self.sock.sendall(cmd)
if noreply:
return
_, line = _readline(self.sock, b'')
self._raise_errors(line, cmd_name)
return line
except Exception:
self.close()
raise
def __setitem__(self, key, value):
self.set(key, value, noreply=True)
def __getitem__(self, key):
value = self.get(key)
if value is None:
raise KeyError
return value
def __delitem__(self, key):
self.delete(key, noreply=True)
class PooledClient(object):
"""A thread-safe pool of clients (with the same client api).
Args:
max_pool_size: maximum pool size to use (going about this amount
triggers a runtime error), by default this is 2147483648L
when not provided (or none).
lock_generator: a callback/type that takes no arguments that will
be called to create a lock or sempahore that can
protect the pool from concurrent access (for example a
eventlet lock or semaphore could be used instead)
Further arguments are interpreted as for :py:class:`.Client` constructor.
"""
def __init__(self,
server,
serializer=None,
deserializer=None,
connect_timeout=None,
timeout=None,
no_delay=False,
ignore_exc=False,
socket_module=socket,
key_prefix=b'',
max_pool_size=None,
lock_generator=None,
default_noreply=True,
allow_unicode_keys=False):
self.server = server
self.serializer = serializer
self.deserializer = deserializer
self.connect_timeout = connect_timeout
self.timeout = timeout
self.no_delay = no_delay
self.ignore_exc = ignore_exc
self.socket_module = socket_module
self.default_noreply = default_noreply
self.allow_unicode_keys = allow_unicode_keys
if isinstance(key_prefix, six.text_type):
key_prefix = key_prefix.encode('ascii')
if not isinstance(key_prefix, bytes):
raise TypeError("key_prefix should be bytes.")
self.key_prefix = key_prefix
self.client_pool = pool.ObjectPool(
self._create_client,
after_remove=lambda client: client.close(),
max_size=max_pool_size,
lock_generator=lock_generator)
def check_key(self, key):
"""Checks key and add key_prefix."""
return _check_key(key, allow_unicode_keys=self.allow_unicode_keys,
key_prefix=self.key_prefix)
def _create_client(self):
client = Client(self.server,
serializer=self.serializer,
deserializer=self.deserializer,
connect_timeout=self.connect_timeout,
timeout=self.timeout,
no_delay=self.no_delay,
# We need to know when it fails *always* so that we
# can remove/destroy it from the pool...
ignore_exc=False,
socket_module=self.socket_module,
key_prefix=self.key_prefix,
default_noreply=self.default_noreply,
allow_unicode_keys=self.allow_unicode_keys)
return client
def close(self):
self.client_pool.clear()
def set(self, key, value, expire=0, noreply=None):
with self.client_pool.get_and_release(destroy_on_fail=True) as client:
return client.set(key, value, expire=expire, noreply=noreply)
def set_many(self, values, expire=0, noreply=None):
with self.client_pool.get_and_release(destroy_on_fail=True) as client:
return client.set_many(values, expire=expire, noreply=noreply)
set_multi = set_many
def replace(self, key, value, expire=0, noreply=None):
with self.client_pool.get_and_release(destroy_on_fail=True) as client:
return client.replace(key, value, expire=expire, noreply=noreply)
def append(self, key, value, expire=0, noreply=None):
with self.client_pool.get_and_release(destroy_on_fail=True) as client:
return client.append(key, value, expire=expire, noreply=noreply)
def prepend(self, key, value, expire=0, noreply=None):
with self.client_pool.get_and_release(destroy_on_fail=True) as client:
return client.prepend(key, value, expire=expire, noreply=noreply)
def cas(self, key, value, cas, expire=0, noreply=False):
with self.client_pool.get_and_release(destroy_on_fail=True) as client:
return client.cas(key, value, cas,
expire=expire, noreply=noreply)
def get(self, key, default=None):
with self.client_pool.get_and_release(destroy_on_fail=True) as client:
try:
return client.get(key, default)
except Exception:
if self.ignore_exc:
return None
else:
raise
def get_many(self, keys):
with self.client_pool.get_and_release(destroy_on_fail=True) as client:
try:
return client.get_many(keys)
except Exception:
if self.ignore_exc:
return {}
else:
raise
get_multi = get_many
def gets(self, key):
with self.client_pool.get_and_release(destroy_on_fail=True) as client:
try:
return client.gets(key)
except Exception:
if self.ignore_exc:
return (None, None)
else:
raise
def gets_many(self, keys):
with self.client_pool.get_and_release(destroy_on_fail=True) as client:
try:
return client.gets_many(keys)
except Exception:
if self.ignore_exc:
return {}
else:
raise
def delete(self, key, noreply=None):
with self.client_pool.get_and_release(destroy_on_fail=True) as client:
return client.delete(key, noreply=noreply)
def delete_many(self, keys, noreply=None):
with self.client_pool.get_and_release(destroy_on_fail=True) as client:
return client.delete_many(keys, noreply=noreply)
delete_multi = delete_many
def add(self, key, value, expire=0, noreply=None):
with self.client_pool.get_and_release(destroy_on_fail=True) as client:
return client.add(key, value, expire=expire, noreply=noreply)
def incr(self, key, value, noreply=False):
with self.client_pool.get_and_release(destroy_on_fail=True) as client:
return client.incr(key, value, noreply=noreply)
def decr(self, key, value, noreply=False):
with self.client_pool.get_and_release(destroy_on_fail=True) as client:
return client.decr(key, value, noreply=noreply)
def touch(self, key, expire=0, noreply=None):
with self.client_pool.get_and_release(destroy_on_fail=True) as client:
return client.touch(key, expire=expire, noreply=noreply)
def stats(self, *args):
with self.client_pool.get_and_release(destroy_on_fail=True) as client:
try:
return client.stats(*args)
except Exception:
if self.ignore_exc:
return {}
else:
raise
def version(self):
with self.client_pool.get_and_release(destroy_on_fail=True) as client:
return client.version()
def flush_all(self, delay=0, noreply=None):
with self.client_pool.get_and_release(destroy_on_fail=True) as client:
return client.flush_all(delay=delay, noreply=noreply)
def quit(self):
with self.client_pool.get_and_release(destroy_on_fail=True) as client:
try:
client.quit()
finally:
self.client_pool.destroy(client)
def __setitem__(self, key, value):
self.set(key, value, noreply=True)
def __getitem__(self, key):
value = self.get(key)
if value is None:
raise KeyError
return value
def __delitem__(self, key):
self.delete(key, noreply=True)
def _readline(sock, buf):
"""Read line of text from the socket.
Read a line of text (delimited by "\r\n") from the socket, and
return that line along with any trailing characters read from the
socket.
Args:
sock: Socket object, should be connected.
buf: String, zero or more characters, returned from an earlier
call to _readline or _readvalue (pass an empty string on the
first call).
Returns:
A tuple of (buf, line) where line is the full line read from the
socket (minus the "\r\n" characters) and buf is any trailing
characters read after the "\r\n" was found (which may be an empty
string).
"""
chunks = []
last_char = b''
while True:
# We're reading in chunks, so "\r\n" could appear in one chunk,
# or across the boundary of two chunks, so we check for both
# cases.
# This case must appear first, since the buffer could have
# later \r\n characters in it and we want to get the first \r\n.
if last_char == b'\r' and buf[0:1] == b'\n':
# Strip the last character from the last chunk.
chunks[-1] = chunks[-1][:-1]
return buf[1:], b''.join(chunks)
elif buf.find(b'\r\n') != -1:
before, sep, after = buf.partition(b"\r\n")
chunks.append(before)
return after, b''.join(chunks)
if buf:
chunks.append(buf)
last_char = buf[-1:]
buf = _recv(sock, RECV_SIZE)
if not buf:
raise MemcacheUnexpectedCloseError()
def _readvalue(sock, buf, size):
"""Read specified amount of bytes from the socket.
Read size bytes, followed by the "\r\n" characters, from the socket,
and return those bytes and any trailing bytes read after the "\r\n".
Args:
sock: Socket object, should be connected.
buf: String, zero or more characters, returned from an earlier
call to _readline or _readvalue (pass an empty string on the
first call).
size: Integer, number of bytes to read from the socket.
Returns:
A tuple of (buf, value) where value is the bytes read from the
socket (there will be exactly size bytes) and buf is trailing
characters read after the "\r\n" following the bytes (but not
including the \r\n).
"""
chunks = []
rlen = size + 2
while rlen - len(buf) > 0:
if buf:
rlen -= len(buf)
chunks.append(buf)
buf = _recv(sock, RECV_SIZE)
if not buf:
raise MemcacheUnexpectedCloseError()
# Now we need to remove the \r\n from the end. There are two cases we care
# about: the \r\n is all in the last buffer, or only the \n is in the last
# buffer, and we need to remove the \r from the penultimate buffer.
if rlen == 1:
# replace the last chunk with the same string minus the last character,
# which is always '\r' in this case.
chunks[-1] = chunks[-1][:-1]
else:
# Just remove the "\r\n" from the latest chunk
chunks.append(buf[:rlen - 2])
return buf[rlen:], b''.join(chunks)
def _recv(sock, size):
"""sock.recv() with retry on EINTR"""
while True:
try:
return sock.recv(size)
except IOError as e:
if e.errno != errno.EINTR:
raise
| ewdurbin/pymemcache | pymemcache/client/base.py | Python | apache-2.0 | 39,766 |
import pygraphviz as pgv
import random
import rag_custom
random.seed(1)
class Graph(object):
def __init__(self, n):
# TODO : Why shoukd this be a list, can't this be a dict as well
# Number of vertices do not remain the same
self.rows = [{} for i in range(n)]
self.edge_count = 0
self.vertex_count = n
self.prop = {}
def display(self):
for i in range(len(self.rows)):
for key in self.rows[i]:
if key < i:
print "(%d,%d) -> %d" % (i, key, self.rows[i][key])
@profile
def make_edge(self, i, j, wt):
try:
self.rows[i][j]
except KeyError:
self.edge_count += 1
self.rows[i][j] = wt
self.rows[j][i] = wt
def neighbors(self, i):
return self.rows[i].keys()
def get_weight(self, i, j):
return self.rows[i][j]
@profile
def merge(self, i, j):
if not self.has_edge(i, j):
raise ValueError('Cant merge non adjacent nodes')
# print "before ",self.order()
for x in self.neighbors(i):
if x == j:
continue
w1 = self.get_weight(x, i)
w2 = -1
if self.has_edge(x, j):
w2 = self.get_weight(x, j)
w = max(w1, w2)
self.make_edge(x, j, w)
self.remove_node(i)
# print "after",self.order()
def draw(self, name):
g = pgv.AGraph()
for i in range(len(self.rows)):
for key in self.rows[i]:
if key < i:
g.add_edge(i, key)
e = g.get_edge(i, key)
e.attr['label'] = str(self.rows[i][key])
g.layout('circo')
g.draw(name)
def has_edge(self, i, j):
try:
self.rows[i][j]
return True
except KeyError:
return False
def remove_node(self, x):
for i in self.neighbors(x):
del self.rows[i][x]
self.rows[x] = {}
def random_merge(self, minimum):
n = self.vertex_count
while n > minimum:
i = random.randint(0, self.vertex_count - 1)
if len(self.rows[i]) > 0:
k = random.choice(self.rows[i].keys())
self.merge(i, k)
n -= 1
def construct_rag(arr):
return rag_custom.construct_rag_3d_custom(arr)
| vighneshbirodkar/skimage-graph | graph_custom.py | Python | mit | 2,443 |
"""
missing types & inference
"""
import numpy as np
from pandas._config import get_option
from pandas._libs import lib
import pandas._libs.missing as libmissing
from pandas._libs.tslibs import NaT, iNaT
from .common import (
_NS_DTYPE,
_TD_DTYPE,
ensure_object,
is_bool_dtype,
is_complex_dtype,
is_datetime64_dtype,
is_datetime64tz_dtype,
is_datetimelike,
is_datetimelike_v_numeric,
is_dtype_equal,
is_extension_array_dtype,
is_float_dtype,
is_integer_dtype,
is_object_dtype,
is_period_dtype,
is_scalar,
is_string_dtype,
is_string_like_dtype,
is_timedelta64_dtype,
needs_i8_conversion,
pandas_dtype,
)
from .generic import (
ABCDatetimeArray,
ABCExtensionArray,
ABCGeneric,
ABCIndexClass,
ABCMultiIndex,
ABCSeries,
ABCTimedeltaArray,
)
from .inference import is_list_like
isposinf_scalar = libmissing.isposinf_scalar
isneginf_scalar = libmissing.isneginf_scalar
def isna(obj):
"""
Detect missing values for an array-like object.
This function takes a scalar or array-like object and indicates
whether values are missing (``NaN`` in numeric arrays, ``None`` or ``NaN``
in object arrays, ``NaT`` in datetimelike).
Parameters
----------
obj : scalar or array-like
Object to check for null or missing values.
Returns
-------
bool or array-like of bool
For scalar input, returns a scalar boolean.
For array input, returns an array of boolean indicating whether each
corresponding element is missing.
See Also
--------
notna : Boolean inverse of pandas.isna.
Series.isna : Detect missing values in a Series.
DataFrame.isna : Detect missing values in a DataFrame.
Index.isna : Detect missing values in an Index.
Examples
--------
Scalar arguments (including strings) result in a scalar boolean.
>>> pd.isna('dog')
False
>>> pd.isna(np.nan)
True
ndarrays result in an ndarray of booleans.
>>> array = np.array([[1, np.nan, 3], [4, 5, np.nan]])
>>> array
array([[ 1., nan, 3.],
[ 4., 5., nan]])
>>> pd.isna(array)
array([[False, True, False],
[False, False, True]])
For indexes, an ndarray of booleans is returned.
>>> index = pd.DatetimeIndex(["2017-07-05", "2017-07-06", None,
... "2017-07-08"])
>>> index
DatetimeIndex(['2017-07-05', '2017-07-06', 'NaT', '2017-07-08'],
dtype='datetime64[ns]', freq=None)
>>> pd.isna(index)
array([False, False, True, False])
For Series and DataFrame, the same type is returned, containing booleans.
>>> df = pd.DataFrame([['ant', 'bee', 'cat'], ['dog', None, 'fly']])
>>> df
0 1 2
0 ant bee cat
1 dog None fly
>>> pd.isna(df)
0 1 2
0 False False False
1 False True False
>>> pd.isna(df[1])
0 False
1 True
Name: 1, dtype: bool
"""
return _isna(obj)
isnull = isna
def _isna_new(obj):
if is_scalar(obj):
return libmissing.checknull(obj)
# hack (for now) because MI registers as ndarray
elif isinstance(obj, ABCMultiIndex):
raise NotImplementedError("isna is not defined for MultiIndex")
elif isinstance(
obj,
(
ABCSeries,
np.ndarray,
ABCIndexClass,
ABCExtensionArray,
ABCDatetimeArray,
ABCTimedeltaArray,
),
):
return _isna_ndarraylike(obj)
elif isinstance(obj, ABCGeneric):
return obj._constructor(obj._data.isna(func=isna))
elif isinstance(obj, list):
return _isna_ndarraylike(np.asarray(obj, dtype=object))
elif hasattr(obj, "__array__"):
return _isna_ndarraylike(np.asarray(obj))
else:
return obj is None
def _isna_old(obj):
"""Detect missing values. Treat None, NaN, INF, -INF as null.
Parameters
----------
arr: ndarray or object value
Returns
-------
boolean ndarray or boolean
"""
if is_scalar(obj):
return libmissing.checknull_old(obj)
# hack (for now) because MI registers as ndarray
elif isinstance(obj, ABCMultiIndex):
raise NotImplementedError("isna is not defined for MultiIndex")
elif isinstance(obj, (ABCSeries, np.ndarray, ABCIndexClass)):
return _isna_ndarraylike_old(obj)
elif isinstance(obj, ABCGeneric):
return obj._constructor(obj._data.isna(func=_isna_old))
elif isinstance(obj, list):
return _isna_ndarraylike_old(np.asarray(obj, dtype=object))
elif hasattr(obj, "__array__"):
return _isna_ndarraylike_old(np.asarray(obj))
else:
return obj is None
_isna = _isna_new
def _use_inf_as_na(key):
"""Option change callback for na/inf behaviour
Choose which replacement for numpy.isnan / -numpy.isfinite is used.
Parameters
----------
flag: bool
True means treat None, NaN, INF, -INF as null (old way),
False means None and NaN are null, but INF, -INF are not null
(new way).
Notes
-----
This approach to setting global module values is discussed and
approved here:
* http://stackoverflow.com/questions/4859217/
programmatically-creating-variables-in-python/4859312#4859312
"""
flag = get_option(key)
if flag:
globals()["_isna"] = _isna_old
else:
globals()["_isna"] = _isna_new
def _isna_ndarraylike(obj):
is_extension = is_extension_array_dtype(obj)
if not is_extension:
# Avoid accessing `.values` on things like
# PeriodIndex, which may be expensive.
values = getattr(obj, "values", obj)
else:
values = obj
dtype = values.dtype
if is_extension:
if isinstance(obj, (ABCIndexClass, ABCSeries)):
values = obj._values
else:
values = obj
result = values.isna()
elif isinstance(obj, ABCDatetimeArray):
return obj.isna()
elif is_string_dtype(dtype):
# Working around NumPy ticket 1542
shape = values.shape
if is_string_like_dtype(dtype):
# object array of strings
result = np.zeros(values.shape, dtype=bool)
else:
# object array of non-strings
result = np.empty(shape, dtype=bool)
vec = libmissing.isnaobj(values.ravel())
result[...] = vec.reshape(shape)
elif needs_i8_conversion(dtype):
# this is the NaT pattern
result = values.view("i8") == iNaT
else:
result = np.isnan(values)
# box
if isinstance(obj, ABCSeries):
result = obj._constructor(result, index=obj.index, name=obj.name, copy=False)
return result
def _isna_ndarraylike_old(obj):
values = getattr(obj, "values", obj)
dtype = values.dtype
if is_string_dtype(dtype):
# Working around NumPy ticket 1542
shape = values.shape
if is_string_like_dtype(dtype):
result = np.zeros(values.shape, dtype=bool)
else:
result = np.empty(shape, dtype=bool)
vec = libmissing.isnaobj_old(values.ravel())
result[:] = vec.reshape(shape)
elif is_datetime64_dtype(dtype):
# this is the NaT pattern
result = values.view("i8") == iNaT
else:
result = ~np.isfinite(values)
# box
if isinstance(obj, ABCSeries):
result = obj._constructor(result, index=obj.index, name=obj.name, copy=False)
return result
def notna(obj):
"""
Detect non-missing values for an array-like object.
This function takes a scalar or array-like object and indicates
whether values are valid (not missing, which is ``NaN`` in numeric
arrays, ``None`` or ``NaN`` in object arrays, ``NaT`` in datetimelike).
Parameters
----------
obj : array-like or object value
Object to check for *not* null or *non*-missing values.
Returns
-------
bool or array-like of bool
For scalar input, returns a scalar boolean.
For array input, returns an array of boolean indicating whether each
corresponding element is valid.
See Also
--------
isna : Boolean inverse of pandas.notna.
Series.notna : Detect valid values in a Series.
DataFrame.notna : Detect valid values in a DataFrame.
Index.notna : Detect valid values in an Index.
Examples
--------
Scalar arguments (including strings) result in a scalar boolean.
>>> pd.notna('dog')
True
>>> pd.notna(np.nan)
False
ndarrays result in an ndarray of booleans.
>>> array = np.array([[1, np.nan, 3], [4, 5, np.nan]])
>>> array
array([[ 1., nan, 3.],
[ 4., 5., nan]])
>>> pd.notna(array)
array([[ True, False, True],
[ True, True, False]])
For indexes, an ndarray of booleans is returned.
>>> index = pd.DatetimeIndex(["2017-07-05", "2017-07-06", None,
... "2017-07-08"])
>>> index
DatetimeIndex(['2017-07-05', '2017-07-06', 'NaT', '2017-07-08'],
dtype='datetime64[ns]', freq=None)
>>> pd.notna(index)
array([ True, True, False, True])
For Series and DataFrame, the same type is returned, containing booleans.
>>> df = pd.DataFrame([['ant', 'bee', 'cat'], ['dog', None, 'fly']])
>>> df
0 1 2
0 ant bee cat
1 dog None fly
>>> pd.notna(df)
0 1 2
0 True True True
1 True False True
>>> pd.notna(df[1])
0 True
1 False
Name: 1, dtype: bool
"""
res = isna(obj)
if is_scalar(res):
return not res
return ~res
notnull = notna
def _isna_compat(arr, fill_value=np.nan):
"""
Parameters
----------
arr: a numpy array
fill_value: fill value, default to np.nan
Returns
-------
True if we can fill using this fill_value
"""
dtype = arr.dtype
if isna(fill_value):
return not (is_bool_dtype(dtype) or is_integer_dtype(dtype))
return True
def array_equivalent(left, right, strict_nan=False):
"""
True if two arrays, left and right, have equal non-NaN elements, and NaNs
in corresponding locations. False otherwise. It is assumed that left and
right are NumPy arrays of the same dtype. The behavior of this function
(particularly with respect to NaNs) is not defined if the dtypes are
different.
Parameters
----------
left, right : ndarrays
strict_nan : bool, default False
If True, consider NaN and None to be different.
Returns
-------
b : bool
Returns True if the arrays are equivalent.
Examples
--------
>>> array_equivalent(
... np.array([1, 2, np.nan]),
... np.array([1, 2, np.nan]))
True
>>> array_equivalent(
... np.array([1, np.nan, 2]),
... np.array([1, 2, np.nan]))
False
"""
left, right = np.asarray(left), np.asarray(right)
# shape compat
if left.shape != right.shape:
return False
# Object arrays can contain None, NaN and NaT.
# string dtypes must be come to this path for NumPy 1.7.1 compat
if is_string_dtype(left) or is_string_dtype(right):
if not strict_nan:
# isna considers NaN and None to be equivalent.
return lib.array_equivalent_object(
ensure_object(left.ravel()), ensure_object(right.ravel())
)
for left_value, right_value in zip(left, right):
if left_value is NaT and right_value is not NaT:
return False
elif isinstance(left_value, float) and np.isnan(left_value):
if not isinstance(right_value, float) or not np.isnan(right_value):
return False
else:
if left_value != right_value:
return False
return True
# NaNs can occur in float and complex arrays.
if is_float_dtype(left) or is_complex_dtype(left):
# empty
if not (np.prod(left.shape) and np.prod(right.shape)):
return True
return ((left == right) | (isna(left) & isna(right))).all()
# numpy will will not allow this type of datetimelike vs integer comparison
elif is_datetimelike_v_numeric(left, right):
return False
# M8/m8
elif needs_i8_conversion(left) and needs_i8_conversion(right):
if not is_dtype_equal(left.dtype, right.dtype):
return False
left = left.view("i8")
right = right.view("i8")
# if we have structured dtypes, compare first
if left.dtype.type is np.void or right.dtype.type is np.void:
if left.dtype != right.dtype:
return False
return np.array_equal(left, right)
def _infer_fill_value(val):
"""
infer the fill value for the nan/NaT from the provided
scalar/ndarray/list-like if we are a NaT, return the correct dtyped
element to provide proper block construction
"""
if not is_list_like(val):
val = [val]
val = np.array(val, copy=False)
if is_datetimelike(val):
return np.array("NaT", dtype=val.dtype)
elif is_object_dtype(val.dtype):
dtype = lib.infer_dtype(ensure_object(val), skipna=False)
if dtype in ["datetime", "datetime64"]:
return np.array("NaT", dtype=_NS_DTYPE)
elif dtype in ["timedelta", "timedelta64"]:
return np.array("NaT", dtype=_TD_DTYPE)
return np.nan
def _maybe_fill(arr, fill_value=np.nan):
"""
if we have a compatible fill_value and arr dtype, then fill
"""
if _isna_compat(arr, fill_value):
arr.fill(fill_value)
return arr
def na_value_for_dtype(dtype, compat=True):
"""
Return a dtype compat na value
Parameters
----------
dtype : string / dtype
compat : boolean, default True
Returns
-------
np.dtype or a pandas dtype
Examples
--------
>>> na_value_for_dtype(np.dtype('int64'))
0
>>> na_value_for_dtype(np.dtype('int64'), compat=False)
nan
>>> na_value_for_dtype(np.dtype('float64'))
nan
>>> na_value_for_dtype(np.dtype('bool'))
False
>>> na_value_for_dtype(np.dtype('datetime64[ns]'))
NaT
"""
dtype = pandas_dtype(dtype)
if is_extension_array_dtype(dtype):
return dtype.na_value
if (
is_datetime64_dtype(dtype)
or is_datetime64tz_dtype(dtype)
or is_timedelta64_dtype(dtype)
or is_period_dtype(dtype)
):
return NaT
elif is_float_dtype(dtype):
return np.nan
elif is_integer_dtype(dtype):
if compat:
return 0
return np.nan
elif is_bool_dtype(dtype):
return False
return np.nan
def remove_na_arraylike(arr):
"""
Return array-like containing only true/non-NaN values, possibly empty.
"""
if is_extension_array_dtype(arr):
return arr[notna(arr)]
else:
return arr[notna(lib.values_from_object(arr))]
def is_valid_nat_for_dtype(obj, dtype):
"""
isna check that excludes incompatible dtypes
Parameters
----------
obj : object
dtype : np.datetime64, np.timedelta64, DatetimeTZDtype, or PeriodDtype
Returns
-------
bool
"""
if not lib.is_scalar(obj) or not isna(obj):
return False
if dtype.kind == "M":
return not isinstance(obj, np.timedelta64)
if dtype.kind == "m":
return not isinstance(obj, np.datetime64)
# must be PeriodDType
return not isinstance(obj, (np.datetime64, np.timedelta64))
| toobaz/pandas | pandas/core/dtypes/missing.py | Python | bsd-3-clause | 15,772 |
# $Id$
"""
This is a comment
"""
__RCSID__ = "$Revision: 1.19 $"
# $Source: /tmp/libdirac/tmp.stZoy15380/dirac/DIRAC3/DIRAC/Core/Workflow/Module.py,v $
import copy
import new, sys, os
#try: # this part to import as part of the DIRAC framework
from DIRAC.Core.Workflow.Parameter import *
#RICARDO PLEASE DO NOT CHANGE THIS BACK. IT BREAKS THE EXECUTION OF WORKFLOWS!
#from DIRAC.Core.Workflow.Step import *
#except: # this part is to import code without DIRAC
# from Parameter import *
class ModuleDefinition( AttributeCollection ):
def __init__( self, type = None, obj = None, parent = None ):
# we can create an object from another module
# or from the ParameterCollection
AttributeCollection.__init__( self )
self.main_class_obj = None # used for the interpretation only
#self.module_obj = None # used for the interpretation only
self.parent = parent
if ( obj == None ) or isinstance( obj, ParameterCollection ):
self.setType( 'nitgiven' )
self.setDescrShort( '' )
self.setDescription( '' )
self.setRequired( '' )
self.setBody( '' )
self.setOrigin( '' )
self.setVersion( 0.0 )
self.parameters = ParameterCollection( obj ) # creating copy
elif isinstance( obj, ModuleDefinition ):
self.setType( obj.getType() )
self.setDescrShort( obj.getDescrShort() )
self.setDescription( obj.getDescription() )
self.setBody( obj.getBody() )
self.setRequired( obj.getRequired() )
self.setOrigin( obj.getOrigin() )
self.setVersion( obj.getVersion() )
self.parameters = ParameterCollection( obj.parameters )
else:
raise TypeError( 'Can not create object type ' + str( type( self ) ) + ' from the ' + str( type( obj ) ) )
if type :
self.setType( type )
def createCode( self ):
return self.getBody() + '\n'
def __str__( self ):
return str( type( self ) ) + ':\n' + AttributeCollection.__str__( self ) + self.parameters.__str__()
def toXML( self ):
ret = '<ModuleDefinition>\n'
ret = ret + AttributeCollection.toXML( self )
ret = ret + self.parameters.toXML()
ret = ret + '</ModuleDefinition>\n'
return ret
def toXMLFile( self, outFile ):
if os.path.exists( outFile ):
os.remove( outFile )
xmlfile = open( outFile, 'w' )
xmlfile.write( self.toXML() )
xmlfile.close()
def loadCode( self ):
#print 'Loading code of the Module =', self.getType()
# version 1 - OLD sample
#ret = compile(self.getBody(),'<string>','exec')
#eval(ret)
#return ret #returning ref just in case we might need it
#
if len( self.getBody() ): # checking the size of the string
# version 2 - we assume that each self.body is a module oblect
#module = new.module(self.getType()) # create empty module object
#sys.modules[self.getType()] = module # add reference for the import operator
#exec self.getBody() in module.__dict__ # execute code itself
#self.module_obj = module # save pointer to this module
#if module.__dict__.has_key(self.getType()):
# self.main_class_obj = module.__dict__[self.getType()] # save class object
# version 3
# A.T. Use vars() function to inspect local objects instead of playing with
# fake modules. We assume that after the body execution there will be
# a class with name "self.getType()" defined in the local scope.
exec self.getBody()
if vars().has_key( self.getType() ):
self.main_class_obj = vars()[self.getType()] # save class object
else:
# it is possible to have this class in another module, we have to check for this
# but it is advisible to use 'from module import class' operator
# otherwise i could not find the module. But it is possible that
# in the future I can change this code to do it more wisely
raise TypeError( 'Can not find class ' + self.getType() + ' in the module created from the body of the module ' + self.getOrigin() )
else:
raise TypeError( 'The body of the Module ' + self.getType() + ' seems empty' )
return self.main_class_obj
class ModuleInstance( AttributeCollection ):
def __init__( self, name, obj = None, parent = None ):
AttributeCollection.__init__( self )
self.instance_obj = None # used for the interpretation only
self.parent = parent
if obj == None:
self.parameters = ParameterCollection()
elif isinstance( obj, ModuleInstance ) or isinstance( obj, ModuleDefinition ):
if name == None:
self.setName( obj.getName() )
else:
self.setName( name )
self.setType( obj.getType() )
self.setDescrShort( obj.getDescrShort() )
self.parameters = ParameterCollection( obj.parameters )
elif isinstance( obj, ParameterCollection ):
# set attributes
self.setName( name )
self.setType( "" )
self.setDescrShort( "" )
self.parameters = ParameterCollection( obj )
elif obj != None:
raise TypeError( 'Can not create object type ' + str( type( self ) ) + ' from the ' + str( type( obj ) ) )
def createCode( self, ind = 2 ):
str = indent( ind ) + self.getName() + ' = ' + self.getType() + '()\n'
str = str + self.parameters.createParametersCode( ind, self.getName() )
str = str + indent( ind ) + self.getName() + '.execute()\n\n'
return str
def __str__( self ):
return str( type( self ) ) + ':\n' + AttributeCollection.__str__( self ) + self.parameters.__str__()
def toXML( self ):
ret = '<ModuleInstance>\n'
ret = ret + AttributeCollection.toXML( self )
ret = ret + self.parameters.toXML()
ret = ret + '</ModuleInstance>\n'
return ret
def execute( self, step_parameters, definitions ):
#print 'Executing ModuleInstance ',self.getName(),'of type',self.getType()
self.instance_obj = definitions[self.getType()].main_class_obj() # creating instance
self.parameters.execute( self.getName() )
self.instance_obj.execute2()
class DefinitionsPool( dict ):
def __init__( self, parent, pool = None ):
dict.__init__( self )
self.parent = parent # this is a cache value, we propagate it into next level
if isinstance( pool, DefinitionsPool ):
for k in pool.keys():
v = pool[k]
if isinstance( v, ModuleDefinition ):
obj = ModuleDefinition( None, v, self.parent )
elif isinstance( v, StepDefinition ):
obj = StepDefinition( None, v, self.parent )
else:
raise TypeError( 'Error: __init__ Wrong type of object stored in the DefinitionPool ' + str( type( pool[v] ) ) )
self.append( obj )
elif pool != None:
raise TypeError( 'Can not create object type ' + str( type( self ) ) + ' from the ' + str( type( pool ) ) )
def __setitem__( self, i, obj ):
if not self.has_key( i ):
dict.__setitem__( self, i, obj )
# print 'We need to write piece of code to replace existent DefinitionsPool.__setitem__()'
# print 'For now we ignore it for the', obj.getType()
def append( self, obj ):
""" We add new Definition (Module, Step)
"""
self[obj.getType()] = obj
obj.setParent( self.parent )
return obj
def remove( self, obj ):
del self[obj.getType()]
obj.setParent( None )
def compare( self, s ):
if not isinstance( s, DefinitionsPool ):
return False # chacking types of objects
if len( s ) != len( self ):
return False # checkin size
# we need to compare the keys of dictionaries
if self.keys() != s.keys():
return False
for k in self.keys():
if ( not s.has_key( k ) ) or ( not self[k].compare( s[k] ) ):
return False
return True
def __str__( self ):
ret = str( type( self ) ) + ': number of Definitions:' + str( len( self ) ) + '\n'
index = 0
for k in self.keys():
ret = ret + 'definition(' + str( index ) + ')=' + str( self[k] ) + '\n'
index = index + 1
return ret
def setParent( self, parent ):
self.parent = parent
# we need to propagate it just in case it was different one
for k in self.keys():
self[k].setParent( parent )
def getParent( self ):
return self.parent
def updateParents( self, parent ):
self.parent = parent
for k in self.keys():
self[k].updateParents( parent )
def toXML( self ):
ret = ''
for k in self.keys():
ret = ret + self[k].toXML()
return ret
def createCode( self ):
str = ''
for k in self.keys():
#str=str+indent(2)+'# flush code for instance\n'
str = str + self[k].createCode()
return str
def loadCode( self ):
for k in self.keys():
# load code of the modules
self[k].loadCode()
class InstancesPool( list ):
def __init__( self, parent, pool = None ):
list.__init__( self )
self.parent = None # this is a cache value, we propagate it into next level
if isinstance( pool, InstancesPool ):
for v in pool:
# I need to check this fubction
# if it would be a costructor we coul pass parent into it
self.append( copy.deepcopy( v ) )
if isinstance( v, ModuleInstance ):
obj = ModuleInstance( None, v, self.parent )
elif isinstance( v, StepInstance ):
obj = StepInstance( None, v, self.parent )
else:
raise TypeError( 'Error: __init__ Wrong type of object stored in the DefinitionPool ' + str( type( pool[v] ) ) )
self.append( obj )
elif pool != None:
raise TypeError( 'Can not create object type ' + str( type( self ) ) + ' from the ' + str( type( pool ) ) )
def __str__( self ):
ret = str( type( self ) ) + ': number of Instances:' + str( len( self ) ) + '\n'
index = 0
for v in self:
ret = ret + 'instance(' + str( index ) + ')=' + str( v ) + '\n'
index = index + 1
return ret
def setParent( self, parent ):
self.parent = parent
for v in self:
v.setParent( parent )
def getParent( self ):
return self.parent
def updateParents( self, parent ):
self.parent = parent
for v in self:
v.updateParents( parent )
def append( self, obj ):
list.append( self, obj )
obj.setParent( self.parent )
def toXML( self ):
ret = ''
for v in self:
ret = ret + v.toXML()
return ret
def findIndex( self, name ):
i = 0
for v in self:
if v.getName() == name:
return i
i = i + 1
return - 1
def find( self, name ):
for v in self:
if v.getName() == name:
return v
return None
def delete( self, name ):
for v in self:
if v.getName() == name:
self.remove( v )
v.setParent( None )
def compare( self, s ):
if ( not isinstance( s, InstancesPool ) or ( len( s ) != len( self ) ) ):
return False
for v in self:
for i in s:
if v.getName() == i.getName():
if not v.compare( i ):
return False
else:
break
else:
#if we reached this place naturally we can not find matching name
return False
return True
def createCode( self ):
str = ''
for inst in self:
str = str + inst.createCode()
str = str + indent( 2 ) + '# output assignment\n'
for v in inst.parameters:
if v.isOutput():
str = str + v.createParameterCode( 2, 'self' )
str = str + '\n'
return str
| vmendez/DIRAC | Core/Workflow/Module.py | Python | gpl-3.0 | 11,483 |
from pylab import find
import numpy as np
from lmfit import minimize, Parameters, Parameter, report_errors
def get_histogram(var, nbins):
# quitamos nans
no_nans = ~isnan(var)
var = var[no_nans]
# histograma
h = hist(var, bins = nbins); close()
close() # para no generar figura
h_cnts = h[0] # counts n elementos
h_bound = h[1] # boundaries n+1 elementos
#
n = len(h_cnts)
h_x = zeros(n)
for i in range(n):
h_x[i] = .5*(h_bound[i] + h_bound[i+1])
#
return [h_cnts, h_x]
def residuals(params, x, y_data):
mu = params['mu'].value
sig = params['sig'].value
A = params['A'].value
fun_name = params['function'].value
#
"""if fun_name=="log-normal":
diff = (lognormal(x, A, mu, sig) - y_data)**2.
elif fun_name=="gauss":
diff = (gauss(x, A, mu, sig) - y_data)**2."""
diff = np.power(fun_name(x, A, mu, sig) - y_data, 2.0)
#print " diff---> %f" % mean(diff)
return diff
def make_fit(data, sems, func):
x = data[0]
y = data[1]
# create a set of Parameters
params = Parameters()
params.add('A')
params.add('mu')
params.add('sig')
params.add('function')
SEM_A = sems[0]
SEM_mu = sems[1]
SEM_sig = sems[2]
params['A'].value = SEM_A
params['A'].vary = True
"""params['A'].min =
params['A'].max = """
params['mu'].value = SEM_mu
params['mu'].vary = True
"""params['mu'].min =
params['mu'].max = """
params['sig'].value = SEM_sig
params['sig'].vary = True
"""params['sig'].min =
params['sig'].max ="""
params['function'].value= func_name
params['function'].vary = False
METHOD = "lbfgsb"#"leastsq"#"lbfgsb"
result = minimize(residuals, params, args=(x, y), method=METHOD)
# write error report
print " --------> METODO_FITEO: %s" % METHOD
#print " --------> funcion: %s" % func_name
#report_errors(params)
par = zeros(3)
par[0] = result.values['A']
par[1] = result.values['mu']
par[2] = result.values['sig']
return par
def func_nCR(data, rms_o, tau, q):
cte = 0.0
t, rms = data[0], data[1]
dt = t[1:-1] - t[0:-2]
integ = np.nan*np.ones(dt.size)
for i in range(dt.size):
integ[i] = np.sum(np.exp(t[1:i+2]/tau) * (rms[1:i+2]-rms_o) * dt[:(i+1)])
ncr = np.nan*np.ones(t.size)
ncr[1:-1] = np.exp(-t[1:-1]/tau) * (q*integ + cte)
return ncr
def nCR2(data, tau, q):
t, fc = data
to = 1.0 # to=1.0 : sheath trailing edge
cc = t[1:-1]<=to # la recuperacion ocurre despues de 'to'
cx = find(cc)
dt = t[1:-1] - t[0:-2]
nCR = np.nan*np.ones(t.size)
fcc = fc[1:-1]
for i in cx:
ind = cx[:(i+1)]
nCR[i+1] = q*np.sum(fcc[:(i+1)]*dt[:(i+1)])
cy = find(~cc)
no = cx[-1]
for i in cy:
nCR[i+1] = q*np.sum(fcc[:(i+1)]*dt[:(i+1)])
#print nCR[1:-1][no:i]; raw_input()
nCR[i+1] += (-1.0/tau)*np.sum( nCR[1:-1][no:i]*dt[no:i] )
return nCR
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
class fit_forbush():
def __init__(self, data, sems):
self.sems = sems
self.data = data
self.t = data[0]
self.rms = data[1]
self.crs = data[2]
def residuals(self, params):
tau = params['tau'].value
q = params['q'].value
#diff = np.power(fun_name(x, A, mu, sig) - y_data, 2.0)
t = self.t
crs = self.crs
#sqr = np.power(crs - func_nCR([t,self.rms], rms_o, tau, q), 2.0)
sqr = np.power(crs - nCR2([t,self.rms], tau, q), 2.0)
diff = np.nanmean(sqr)
print " diff---> %f, tau:%g, q:%g" % (diff, tau, q)
return diff
def make_fit(self):
sems = self.sems
#x = self.t #data[0]
#y = self.crs #data[1]
# create a set of Parameters
params = Parameters()
params.add('q')
params.add('tau')
SEM_tau = sems[0]
SEM_q = sems[1]
params['tau'].value = SEM_tau
params['tau'].vary = True
params['tau'].min = 1.0
params['tau'].max = 16.0
params['q'].value = SEM_q
params['q'].vary = True
params['q'].min = -600.0 #-1.0e3
params['q'].max = -200.0 #-1.0e1
METHOD = "lbfgsb"#"leastsq"#"lbfgsb"
result = minimize(self.residuals, params, method=METHOD)
# write error report
print " --------> METODO_FITEO: %s" % METHOD
#print " --------> funcion: %s" % func_name
#report_errors(params)
par = np.zeros(2)
par[0] = result.values['tau']
par[1] = result.values['q']
self.par = par
| jimsrc/seatos | etc/n_CR/opt_2/funcs.py | Python | mit | 5,218 |
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Capability base class.
All capabilities must inherit from this class.
Serves two purposes:
- Enforce all capabilities to have a device_name attribute.
This is a requirement for informative log and error messages in multi-device
testing.
- Dynamically identify all capabilities supported by GDM.
This is accomplished by scanning the capabilities folder and identifying all
classes
which subclass CapabilityBase.
"""
import abc
import functools
from typing import Any, Collection, Dict, Type
from gazoo_device import decorators
from gazoo_device import errors
from gazoo_device import extensions
from gazoo_device import gdm_logger
from gazoo_device.utility import common_utils
logger = gdm_logger.get_logger()
_CAPABILITY_INTERFACE_SUFFIX = "_base"
class CapabilityBase(abc.ABC):
"""Abstract base class for all capabilities."""
def __init__(self, device_name):
"""Set the device_name attribute of the capability.
Args:
device_name (str): name of the device instance the capability is attached
to. Used for error and log messages in multi-device tests.
"""
self._device_name = device_name
self._healthy = None
@decorators.CapabilityLogDecorator(logger, level=decorators.DEBUG)
def close(self):
"""Performs cleanup and releases capability resources."""
@decorators.DynamicProperty
def healthy(self):
"""Whether or not the capability has passed a health_check."""
if self._healthy is None:
try:
self.health_check()
self._healthy = True
except errors.CapabilityNotReadyError:
self._healthy = False
return self._healthy
@decorators.CapabilityLogDecorator(logger, level=decorators.DEBUG)
def health_check(self):
"""Checks capability readiness. Override in derived classes.
Raises:
CapabilityNotReadyError: if the capability is not ready.
"""
self._healthy = True
@classmethod
def get_capability_interface(cls):
"""Returns the capability interface class.
If called on a flavor class, the nearest parent interface will be
returned: FileTransferScp.get_capability_interface() ->
<class file_transfer_base.FileTransferBase>.
If called on an interface class, the same interface class will be
returned: FileTransferBase.get_capability_interface() ->
<class file_transfer_base.FileTransferBase>.
If called on CapabilityBase class, returns CapabilityBase (special case):
CapabilityBase.get_capability_interface() -> <class CapabilityBase>.
Returns:
type: capability interface class of this capability flavor.
Raises:
TypeError: if the capability flavor does not inherit from any
capability interface.
"""
if cls is CapabilityBase:
return CapabilityBase
for parent_class in cls.__mro__:
if parent_class in extensions.capability_interfaces.values():
return parent_class
raise TypeError(
"Capability {} does not inherit from a capability interface.".format(
cls))
@classmethod
@functools.lru_cache(maxsize=None)
def get_capability_name(cls) -> str:
"""Returns the name under which capability is accessible in device class.
This default implementation can be overridden in special cases.
Examples:
FileTransferBase.get_capability_name() -> "file_transfer"
FileTransferScp.get_capability_name() -> "file_transfer"
ABCDEventsBase.get_capability_name() -> "abcd_events"
"""
return get_default_capability_name(cls.get_capability_interface())
@decorators.CapabilityLogDecorator(logger, level=None)
def validate_required_keys(self,
key_list: Collection[str],
dictionary: Dict[str, Any],
dictionary_name: str):
"""Verify that the required keys are present in the provided dictionary.
self._device_name must be set before calling this method (by
CapabilityBase.__init__). All raised errors are converted to DeviceError by
the log decorator.
Args:
key_list: keys that are required in the dictionary.
dictionary: to check for presence of all required keys.
dictionary_name: name (or purpose) of the dictionary. Only
used in the error message. For example, "Commands".
Raises:
KeyError: if a required key is missing from the dictionary.
"""
missing_keys = [key for key in key_list if key not in dictionary]
if missing_keys:
raise KeyError(
"{} failed to create {!r} capability. Dictionary {!r} is missing the "
"following keys: {}".format(self._device_name,
self.get_capability_name(),
dictionary_name, missing_keys))
def get_default_capability_name(interface: Type[CapabilityBase]) -> str:
"""Generates the name under which a capability is accessible in device class.
This is the default name generation logic.
Examples:
FileTransferBase -> "file_transfer"
ABCDEventsBase -> "abcd_events"
Args:
interface (type): capability interface (a CapabilityBase subclass).
Returns:
str: capability name to use in device class capability definitions.
Raises:
RuntimeError: if unable to generate the name because the capability
interface does not follow the standard naming convention
(<some_capability>Base).
ValueError: if the interface is CapabilityBase.
"""
if interface is CapabilityBase:
raise ValueError("Cannot generate a capability name for the CapabilityBase "
"interface.")
snake_case_name = common_utils.title_to_snake_case(interface.__name__)
if not snake_case_name.endswith(_CAPABILITY_INTERFACE_SUFFIX):
raise RuntimeError(
"Unable to automatically generate the capability name from "
"capability interface {}. Interface snake case name {!r} does not "
"end in {!r}. Either rename the interface or override "
"get_capability_name() in the interface."
.format(interface, snake_case_name, _CAPABILITY_INTERFACE_SUFFIX))
return snake_case_name[:-len(_CAPABILITY_INTERFACE_SUFFIX)]
| google/gazoo-device | gazoo_device/capabilities/interfaces/capability_base.py | Python | apache-2.0 | 6,756 |
"""
This module implements an inversion of control framework. It allows
dependencies among functions and classes to be declared with decorators and the
resulting dependency graphs to be executed.
A decorator used to declare dependencies is called a :class:`ComponentType`, a
decorated function or class is called a component, and a collection of
interdependent components is called a graph.
In the example below, ``needs`` is a :class:`ComponentType`, ``one``, ``two``,
and ``add`` are components, and the relationship formed by their dependencies
is a graph.
.. code-block:: python
from insights import dr
class needs(dr.ComponentType):
pass
@needs()
def one():
return 1
@needs()
def two():
return 2
@needs(one, two)
def add(a, b):
return a + b
results = dr.run(add)
Once all components have been imported, the graphs they form can be run. To
execute a graph, ``dr`` sorts its components into an order that guarantees
dependencies are tried before dependents. Components that raise exceptions are
considered invalid, and their dependents will not be executed. If a component
is skipped because of a missing dependency, its dependents also will not be
executed.
During evaluation, results are accumulated into an object called a
:class:`Broker`, which is just a fancy dictionary. Brokers can be inspected
after a run for results, exceptions, tracebacks, and execution times. You also
can register callbacks with a broker that get invoked after the attempted
execution of every component, so you can inspect it during an evaluation
instead of at the end.
"""
from __future__ import print_function
import inspect
import logging
import json
import os
import pkgutil
import re
import six
import sys
import time
import traceback
from collections import defaultdict
from functools import reduce as _reduce
from insights.contrib import importlib
from insights.contrib.toposort import toposort_flatten
from insights.util import defaults, enum, KeyPassingDefaultDict
log = logging.getLogger(__name__)
GROUPS = enum("single", "cluster")
MODULE_NAMES = {}
BASE_MODULE_NAMES = {}
TYPE_OBSERVERS = defaultdict(set)
COMPONENTS_BY_TYPE = defaultdict(set)
DEPENDENCIES = defaultdict(set)
DEPENDENTS = defaultdict(set)
COMPONENTS = defaultdict(lambda: defaultdict(set))
DELEGATES = {}
HIDDEN = set()
IGNORE = defaultdict(set)
ENABLED = defaultdict(lambda: True)
def set_enabled(component, enabled=True):
"""
Enable a component for evaluation. If set to False, the component is
skipped, and all components that require it will not execute.
If component is a fully qualified name string of a callable object
instead of the callable object itself, the component's module is loaded
as a side effect of calling this function.
Args:
component (str or callable): fully qualified name of the component or
the component object itself.
enabled (bool): whether the component is enabled for evaluation.
Returns:
None
"""
if isinstance(component, six.string_types):
component = get_component(component)
if component:
ENABLED[component] = enabled
def is_enabled(component):
"""
Check to see if a component is enabled.
Args:
component (callable): The component to check. The component must
already be loaded.
Returns:
True if the component is enabled. False otherwise.
"""
return ENABLED[component]
def get_delegate(component):
return DELEGATES.get(component)
def add_ignore(c, i):
IGNORE[c].add(i)
def hashable(v):
try:
hash(v)
except:
return False
return True
def _get_from_module(name):
mod, _, n = name.rpartition(".")
if mod not in sys.modules:
importlib.import_module(mod)
return getattr(sys.modules[mod], n)
def _get_from_class(name):
mod, _, n = name.rpartition(".")
cls = _get_from_module(mod)
return getattr(cls, n)
def _import_component(name):
"""
Returns a class, function, or class method specified by the fully qualified
name.
"""
for f in (_get_from_module, _get_from_class):
try:
return f(name)
except:
pass
COMPONENT_IMPORT_CACHE = KeyPassingDefaultDict(_import_component)
def get_component(name):
""" Returns the class or function specified, importing it if necessary. """
return COMPONENT_IMPORT_CACHE[name]
def _find_component(name):
for d in DELEGATES:
if get_name(d) == name:
return d
COMPONENTS_BY_NAME = KeyPassingDefaultDict(_find_component)
def get_component_by_name(name):
"""
Look up a component by its fully qualified name. Return None if the
component hasn't been loaded.
"""
return COMPONENTS_BY_NAME[name]
@defaults(None)
def get_component_type(component):
return get_delegate(component).type
def get_components_of_type(_type):
return COMPONENTS_BY_TYPE.get(_type)
@defaults(None)
def get_group(component):
return get_delegate(component).group
def add_dependent(component, dep):
DEPENDENTS[component].add(dep)
def get_dependents(component):
return DEPENDENTS.get(component, set())
@defaults(set())
def get_dependencies(component):
return get_delegate(component).get_dependencies()
def add_dependency(component, dep):
get_delegate(component).add_dependency(dep)
class MissingRequirements(Exception):
"""
Raised during evaluation if a component's dependencies aren't met.
"""
def __init__(self, requirements):
self.requirements = requirements
super(MissingRequirements, self).__init__(requirements)
class SkipComponent(Exception):
"""
This class should be raised by components that want to be taken out of
dependency resolution.
"""
pass
def get_name(component):
"""
Attempt to get the string name of component, including module and class if
applicable.
"""
if six.callable(component):
name = getattr(component, "__qualname__", component.__name__)
return '.'.join([component.__module__, name])
return str(component)
def get_simple_name(component):
if six.callable(component):
return component.__name__
return str(component)
def get_metadata(component):
"""
Return any metadata dictionary associated with the component. Defaults to
an empty dictionary.
"""
return get_delegate(component).metadata if component in DELEGATES else {}
def get_tags(component):
"""
Return the set of tags associated with the component. Defaults to
``set()``.
"""
return get_delegate(component).tags if component in DELEGATES else set()
def get_links(component):
"""
Return the dictionary of links associated with the component. Defaults to
``dict()``.
"""
return get_delegate(component).links if component in DELEGATES else dict()
def get_module_name(obj):
try:
return inspect.getmodule(obj).__name__
except:
return None
def get_base_module_name(obj):
try:
return get_module_name(obj).split(".")[-1]
except:
return None
def mark_hidden(component):
global HIDDEN
if isinstance(component, (list, set)):
HIDDEN |= set(component)
else:
HIDDEN.add(component)
def is_hidden(component):
return component in HIDDEN
def walk_tree(root, method=get_dependencies):
for d in method(root):
yield d
for c in walk_tree(d, method=method):
yield c
def walk_dependencies(root, visitor):
"""
Call visitor on root and all dependencies reachable from it in breadth
first order.
Args:
root (component): component function or class
visitor (function): signature is `func(component, parent)`. The
call on root is `visitor(root, None)`.
"""
def visit(parent, visitor):
for d in get_dependencies(parent):
visitor(d, parent)
visit(d, visitor)
visitor(root, None)
visit(root, visitor)
def get_dependency_graph(component):
"""
Generate a component's graph of dependencies, which can be passed to
:func:`run` or :func:`run_incremental`.
"""
if component not in DEPENDENCIES:
raise Exception("%s is not a registered component." % get_name(component))
if not DEPENDENCIES[component]:
return {component: set()}
graph = defaultdict(set)
def visitor(c, parent):
if parent is not None:
graph[parent].add(c)
walk_dependencies(component, visitor)
graph = dict(graph)
# Find all items that don't depend on anything.
extra_items_in_deps = _reduce(set.union, graph.values(), set()) - set(graph.keys())
# Add empty dependencies where needed.
graph.update(dict((item, set()) for item in extra_items_in_deps))
return graph
def get_subgraphs(graph=None):
"""
Given a graph of possibly disconnected components, generate all graphs of
connected components. graph is a dictionary of dependencies. Keys are
components, and values are sets of components on which they depend.
"""
graph = graph or DEPENDENCIES
keys = set(graph)
frontier = set()
seen = set()
while keys:
frontier.add(keys.pop())
while frontier:
component = frontier.pop()
seen.add(component)
frontier |= set([d for d in get_dependencies(component) if d in graph])
frontier |= set([d for d in get_dependents(component) if d in graph])
frontier -= seen
yield dict((s, get_dependencies(s)) for s in seen)
keys -= seen
seen.clear()
def _import(path, continue_on_error):
log.debug("Importing %s" % path)
try:
return importlib.import_module(path)
except BaseException:
if not continue_on_error:
raise
def _load_components(path, include=".*", exclude="\\.tests", continue_on_error=True):
do_include = re.compile(include).search if include else lambda x: True
do_exclude = re.compile(exclude).search if exclude else lambda x: False
num_loaded = 0
if path.endswith(".py"):
path, _ = os.path.splitext(path)
path = path.rstrip("/").replace("/", ".")
if do_exclude(path):
return 0
package = _import(path, continue_on_error)
if not package:
return 0
num_loaded += 1
if not hasattr(package, "__path__"):
return num_loaded
prefix = package.__name__ + "."
for _, name, is_pkg in pkgutil.iter_modules(path=package.__path__, prefix=prefix):
if not name.startswith(prefix):
name = prefix + name
if is_pkg:
num_loaded += _load_components(name, include, exclude, continue_on_error)
else:
if do_include(name) and not do_exclude(name):
_import(name, continue_on_error)
num_loaded += 1
return num_loaded
def load_components(*paths, **kwargs):
"""
Loads all components on the paths. Each path should be a package or module.
All components beneath a path are loaded.
Args:
paths (str): A package or module to load
Keyword Args:
include (str): A regular expression of packages and modules to include.
Defaults to '.*'
exclude (str): A regular expression of packges and modules to exclude.
Defaults to 'test'
continue_on_error (bool): If True, continue importing even if something
raises an ImportError. If False, raise the first ImportError.
Returns:
int: The total number of modules loaded.
Raises:
ImportError
"""
num_loaded = 0
for path in paths:
num_loaded += _load_components(path, **kwargs)
return num_loaded
def first_of(dependencies, broker):
for d in dependencies:
if d in broker:
return broker[d]
def split_requirements(requires):
req_all = []
req_any = []
for r in requires:
if isinstance(r, list):
req_any.append(r)
else:
req_all.append(r)
return req_all, req_any
def stringify_requirements(requires):
if isinstance(requires, tuple):
req_all, req_any = requires
else:
req_all, req_any = split_requirements(requires)
pretty_all = [get_name(r) for r in req_all]
pretty_any = [str([get_name(r) for r in any_list]) for any_list in req_any]
result = "All: %s" % pretty_all + " Any: " + " Any: ".join(pretty_any)
return result
def _register_component(delegate):
component = delegate.component
dependencies = delegate.get_dependencies()
DEPENDENCIES[component] = dependencies
COMPONENTS[delegate.group][component] |= dependencies
COMPONENTS_BY_TYPE[delegate.type].add(component)
for k, v in COMPONENTS_BY_TYPE.items():
if issubclass(delegate.type, k) and delegate.type is not k:
v.add(component)
DELEGATES[component] = delegate
MODULE_NAMES[component] = get_module_name(component)
BASE_MODULE_NAMES[component] = get_base_module_name(component)
class ComponentType(object):
"""
ComponentType is the base class for all component type decorators.
For Example:
.. code-block:: python
class my_component_type(ComponentType):
pass
@my_component_type(SshDConfig, InstalledRpms, [ChkConfig, UnitFiles], optional=[IPTables, IpAddr])
def my_func(sshd_config, installed_rpms, chk_config, unit_files, ip_tables, ip_addr):
return installed_rpms.newest("bash")
Notice that the arguments to ``my_func`` correspond to the dependencies in
the ``@my_component_type`` and are in the same order.
When used, a ``my_component_type`` instance is created whose
``__init__`` gets passed dependencies and whose ``__call__`` gets
passed the component to run if dependencies are met.
Parameters to the decorator have these forms:
============ =============================== ==========================
Criteria Example Decorator Arguments Description
============ =============================== ==========================
Required ``SshDConfig, InstalledRpms`` A regular argument
At Least One ``[ChkConfig, UnitFiles]`` An argument as a list
Optional ``optional=[IPTables, IpAddr]`` A list following optional=
============ =============================== ==========================
If a parameter is required, the value provided for it is guaranteed not to
be ``None``. In the example above, ``sshd_config`` and ``installed_rpms``
will not be ``None``.
At least one of the arguments to parameters of an "at least one"
list will not be ``None``. In the example, either or both of ``chk_config``
and unit_files will not be ``None``.
Any or all arguments for optional parameters may be ``None``.
The following keyword arguments may be passed to the decorator:
Attributes:
requires (list): a list of components that all components decorated with
this type will implicitly require. Additional components passed to
the decorator will be appended to this list.
optional (list): a list of components that all components decorated with
this type will implicitly depend on optionally. Additional components
passed as ``optional`` to the decorator will be appended to this list.
metadata (dict): an arbitrary dictionary of information to associate
with the component you're decorating. It can be retrieved with
``get_metadata``.
tags (list): a list of strings that categorize the component. Useful for
formatting output or sifting through results for components you care
about.
group: ``GROUPS.single`` or ``GROUPS.cluster``. Used to organize
components into "groups" that run together with :func:`insights.core.dr.run`.
cluster (bool): if ``True`` will put the component into the
``GROUPS.cluster`` group. Defaults to ``False``. Overrides ``group``
if ``True``.
"""
requires = []
"""
a list of components that all components decorated with this type will
implicitly require. Additional components passed to the decorator will be
appended to this list.
"""
optional = []
"""
a list of components that all components decorated with this type will
implicitly depend on optionally. Additional components passed as
``optional`` to the decorator will be appended to this list.
"""
metadata = {}
"""
an arbitrary dictionary of information to associate with the component
you're decorating. It can be retrieved with ``get_metadata``.
"""
tags = []
"""
a list of strings that categorize the component. Useful for formatting
output or sifting through results for components you care about.
"""
group = GROUPS.single
"""
group: ``GROUPS.single`` or ``GROUPS.cluster``. Used to organize components
into "groups" that run together with :func:`insights.core.dr.run`.
"""
def __init__(self, *deps, **kwargs):
"""
This constructor is the parameterized part of a decorator.
"""
for k, v in kwargs.items():
setattr(self, k, v)
self.component = None
self.requires = []
self.at_least_one = []
self.deps = []
self.type = self.__class__
deps = list(deps) or kwargs.get("requires", [])
requires = list(self.__class__.requires) + deps
for d in requires:
if isinstance(d, list):
self.at_least_one.append(d)
self.deps.extend(d)
else:
self.requires.append(d)
self.deps.append(d)
self.optional = list(self.__class__.optional)
optional = kwargs.get("optional", [])
if optional and not isinstance(optional, list):
optional = [optional]
self.optional.extend(optional)
self.deps.extend(self.optional)
self.dependencies = set(self.deps)
self.metadata = {}
self.metadata.update(self.__class__.metadata)
self.metadata.update(kwargs.get("metadata", {}) or {})
self.group = kwargs.get("group", self.__class__.group)
if kwargs.get("cluster", False):
self.group = GROUPS.cluster
tags = []
tags.extend(self.__class__.tags)
tags.extend(kwargs.get("tags", []) or [])
self.tags = set(tags)
def __call__(self, component):
"""
This function is the part of the decorator that receives the function
or class.
"""
self.component = component
self.__name__ = component.__name__
self.__module__ = component.__module__
self.__doc__ = component.__doc__
self.__qualname__ = getattr(component, "__qualname__", None)
for d in self.dependencies:
add_dependent(d, component)
_register_component(self)
return component
def invoke(self, results):
"""
Handles invocation of the component. The default implementation invokes
it with positional arguments based on order of dependency declaration.
"""
args = [results.get(d) for d in self.deps]
return self.component(*args)
def get_missing_dependencies(self, broker):
"""
Gets required and at-least-one dependencies not provided by the broker.
"""
missing_required = [r for r in self.requires if r not in broker]
missing_at_least_one = [d for d in self.at_least_one if not set(d).intersection(broker)]
if missing_required or missing_at_least_one:
return (missing_required, missing_at_least_one)
def process(self, broker):
"""
Ensures dependencies have been met before delegating to `self.invoke`.
"""
if any(i in broker for i in IGNORE.get(self.component, [])):
raise SkipComponent()
missing = self.get_missing_dependencies(broker)
if missing:
raise MissingRequirements(missing)
return self.invoke(broker)
def get_dependencies(self):
return self.dependencies
def add_dependency(self, dep):
group = self.group
self.at_least_one[0].append(dep)
self.deps.append(dep)
self.dependencies.add(dep)
add_dependent(dep, self.component)
DEPENDENCIES[self.component].add(dep)
COMPONENTS[group][self.component].add(dep)
class Broker(object):
"""
The Broker is a fancy dictionary that keeps up with component instances as
a graph is evaluated. It's the state of the evaluation. Once a graph has
executed, the broker will contain everything about the evaluation:
component instances, timings, exceptions, and tracebacks.
You can either inspect the broker at the end of an evaluation, or you can
register callbacks with it, and they'll get invoked after each component
is called.
Attributes:
instances (dict): the component instances with components as keys.
missing_requirements (dict): components that didn't have their dependencies
met. Values are a two-tuple. The first element is the list of
required dependencies that were missing. The second element is the
list of "at least one" dependencies that were missing. For more
information on dependency types, see the :class:`ComponentType`
docs.
exceptions (defaultdict(list)): Components that raise any type of
exception except :class:`SkipComponent` during evaluation. The key
is the component, and the value is a list of exceptions. It's a
list because some components produce multiple instances.
tracebacks (dict): keys are exceptions and values are their text
tracebacks.
exec_times (dict): component -> float dictionary where values are the
number of seconds the component took to execute. Calculated using
:func:`time.time`. For components that produce multiple instances,
the execution time here is the sum of their individual execution
times.
"""
def __init__(self, seed_broker=None):
self.instances = dict(seed_broker.instances) if seed_broker else {}
self.missing_requirements = {}
self.exceptions = defaultdict(list)
self.tracebacks = {}
self.exec_times = {}
self.observers = defaultdict(set)
if seed_broker is not None:
self.observers.update(seed_broker.observers)
else:
self.observers[ComponentType] = set()
for k, v in TYPE_OBSERVERS.items():
self.observers[k] |= set(v)
def observer(self, component_type=ComponentType):
"""
You can use ``@broker.observer()`` as a decorator to your callback
instead of :func:`Broker.add_observer`.
"""
def inner(func):
self.add_observer(func, component_type)
return func
return inner
def add_observer(self, o, component_type=ComponentType):
"""
Add a callback that will get invoked after each component is called.
Args:
o (func): the callback function
Keyword Args:
component_type (ComponentType): the :class:`ComponentType` to observe.
The callback will fire any time an instance of the class or its
subclasses is invoked.
The callback should look like this:
.. code-block:: python
def callback(comp, broker):
value = broker.get(comp)
# do something with value
pass
"""
self.observers[component_type].add(o)
def fire_observers(self, component):
_type = get_component_type(component)
if not _type:
return
for k, v in self.observers.items():
if issubclass(_type, k):
for o in v:
try:
o(component, self)
except Exception as e:
log.exception(e)
def add_exception(self, component, ex, tb=None):
if isinstance(ex, MissingRequirements):
self.missing_requirements[component] = ex.requirements
else:
self.exceptions[component].append(ex)
self.tracebacks[ex] = tb
def __iter__(self):
return iter(self.instances)
def keys(self):
return self.instances.keys()
def items(self):
return self.instances.items()
def values(self):
return self.instances.values()
def get_by_type(self, _type):
"""
Return all of the instances of :class:`ComponentType` ``_type``.
"""
r = {}
for k, v in self.items():
if get_component_type(k) is _type:
r[k] = v
return r
def __contains__(self, component):
return component in self.instances
def __setitem__(self, component, instance):
msg = "Already exists in broker with key: %s"
if component in self.instances:
raise KeyError(msg % get_name(component))
self.instances[component] = instance
def __delitem__(self, component):
if component in self.instances:
del self.instances[component]
return
def __getitem__(self, component):
if component in self.instances:
return self.instances[component]
raise KeyError("Unknown component: %s" % get_name(component))
def get(self, component, default=None):
try:
return self[component]
except KeyError:
return default
def print_component(self, component_type):
print(json.dumps(
dict((get_name(c), self[c])
for c in sorted(self.get_by_type(component_type), key=get_name))))
def get_missing_requirements(func, requires, d):
"""
.. deprecated:: 1.x
"""
if not requires:
return None
if any(i in d for i in IGNORE.get(func, [])):
raise SkipComponent()
req_all, req_any = split_requirements(requires)
d = set(d.keys())
req_all = [r for r in req_all if r not in d]
req_any = [r for r in req_any if set(r).isdisjoint(d)]
if req_all or req_any:
return req_all, req_any
else:
return None
def add_observer(o, component_type=ComponentType):
"""
Add a callback that will get invoked after each component is called.
Args:
o (func): the callback function
Keyword Args:
component_type (ComponentType): the :class:`ComponentType` to observe.
The callback will fire any time an instance of the class or its
subclasses is invoked.
The callback should look like this:
.. code-block:: python
def callback(comp, broker):
value = broker.get(comp)
# do something with value
pass
"""
TYPE_OBSERVERS[component_type].add(o)
def observer(component_type=ComponentType):
"""
You can use ``@broker.observer()`` as a decorator to your callback
instead of :func:`add_observer`.
"""
def inner(func):
add_observer(func, component_type)
return func
return inner
def run_order(graph):
"""
Returns components in an order that satisfies their dependency
relationships.
"""
return toposort_flatten(graph, sort=False)
def _determine_components(components):
if isinstance(components, dict):
return components
if hashable(components) and components in COMPONENTS_BY_TYPE:
components = get_components_of_type(components)
if isinstance(components, (list, set)):
graph = {}
for c in components:
graph.update(get_dependency_graph(c))
return graph
if hashable(components) and components in DELEGATES:
return get_dependency_graph(components)
if hashable(components) and components in COMPONENTS:
return COMPONENTS[components]
def run(components=None, broker=None):
"""
Executes components in an order that satisfies their dependency
relationships.
Keyword Args:
components: Can be one of a dependency graph, a single component, a
component group, or a component type. If it's anything other than a
dependency graph, the appropriate graph is built for you and before
evaluation.
broker (Broker): Optionally pass a broker to use for evaluation. One is
created by default, but it's often useful to seed a broker with an
initial dependency.
Returns:
Broker: The broker after evaluation.
"""
components = components or COMPONENTS[GROUPS.single]
components = _determine_components(components)
broker = broker or Broker()
for component in run_order(components):
start = time.time()
try:
if (component not in broker and component in components and
component in DELEGATES and
is_enabled(component)):
log.info("Trying %s" % get_name(component))
result = DELEGATES[component].process(broker)
broker[component] = result
except MissingRequirements as mr:
if log.isEnabledFor(logging.DEBUG):
name = get_name(component)
reqs = stringify_requirements(mr.requirements)
log.debug("%s missing requirements %s" % (name, reqs))
broker.add_exception(component, mr)
except SkipComponent:
pass
except Exception as ex:
tb = traceback.format_exc()
log.warning(tb)
broker.add_exception(component, ex, tb)
finally:
broker.exec_times[component] = time.time() - start
broker.fire_observers(component)
return broker
def generate_incremental(components=None, broker=None):
components = components or COMPONENTS[GROUPS.single]
components = _determine_components(components)
seed_broker = broker or Broker()
for graph in get_subgraphs(components):
broker = Broker(seed_broker)
yield (graph, broker)
def run_incremental(components=None, broker=None):
"""
Executes components in an order that satisfies their dependency
relationships. Disjoint subgraphs are executed one at a time and a broker
containing the results for each is yielded. If a broker is passed here, its
instances are used to seed the broker used to hold state for each sub
graph.
Keyword Args:
components: Can be one of a dependency graph, a single component, a
component group, or a component type. If it's anything other than a
dependency graph, the appropriate graph is built for you and before
evaluation.
broker (Broker): Optionally pass a broker to use for evaluation. One is
created by default, but it's often useful to seed a broker with an
initial dependency.
Yields:
Broker: the broker used to evaluate each subgraph.
"""
for graph, _broker in generate_incremental(components, broker):
yield run(graph, broker=_broker)
def run_all(components=None, broker=None, pool=None):
if pool:
futures = []
for graph, _broker in generate_incremental(components, broker):
futures.append(pool.submit(run, graph, _broker))
return [f.result() for f in futures]
else:
return list(run_incremental(components=components, broker=broker))
| RedHatInsights/insights-core | insights/core/dr.py | Python | apache-2.0 | 32,029 |
from pycp2k.inputsection import InputSection
class _charge3(InputSection):
def __init__(self):
InputSection.__init__(self)
self.Atom = None
self.Charge = None
self._name = "CHARGE"
self._keywords = {'Atom': 'ATOM', 'Charge': 'CHARGE'}
| SINGROUP/pycp2k | pycp2k/classes/_charge3.py | Python | lgpl-3.0 | 282 |
# This file is part of BurnMan - a thermoelastic and thermodynamic toolkit for
# the Earth and Planetary Sciences
# Copyright (C) 2012 - 2022 by the BurnMan team, released under the GNU
# GPL v2 or later.
"""
Utils
-----
The utils submodule of BurnMan contains utility functions
that can be used by the other BurnMan modules.
They do not depend on any other BurnMan objects apart from
those in constants.py.
"""
from __future__ import absolute_import
from . import chemistry
from . import math
from . import misc
from . import reductions
from . import unitcell
from . import geotherm
| geodynamics/burnman | burnman/utils/__init__.py | Python | gpl-2.0 | 590 |
import warnings
from django.core.management.base import BaseCommand
class Command(BaseCommand):
help = "Runs this project as a FastCGI application. Requires flup."
args = '[various KEY=val options, use `runfcgi help` for help]'
def handle(self, *args, **options):
warnings.warn(
"FastCGI support has been deprecated and will be removed in Django 1.9.",
PendingDeprecationWarning)
from django.conf import settings
from django.utils import translation
# Activate the current language, because it won't get activated later.
try:
translation.activate(settings.LANGUAGE_CODE)
except AttributeError:
pass
from django.core.servers.fastcgi import runfastcgi
runfastcgi(args)
def usage(self, subcommand):
from django.core.servers.fastcgi import FASTCGI_HELP
return FASTCGI_HELP
| beckastar/django | django/core/management/commands/runfcgi.py | Python | bsd-3-clause | 919 |
"""
Settings package is acting exactly like settings module in standard django projects.
However, settings combines two distinct things:
(1) General project configuration, which is property of the project
(like which application to use, URL configuration, authentication backends...)
(2) Machine-specific environment configuration (database to use, cache URL, ...)
Thus, we're changing module into package:
* base.py contains (1), so no adjustments there should be needed to make project
on your machine
* config.py contains (2) with sensible default values that should make project
runnable on most expected machines
* local.py contains (2) for your specific machine. File your defaults there.
"""
from test_ella_tagging.settings.base import *
from test_ella_tagging.settings.config import *
try:
from test_ella_tagging.settings.local import *
except ImportError:
pass
| ella/ella-tagging | test_ella_tagging/settings/__init__.py | Python | bsd-3-clause | 904 |
from filebeat import BaseTest
import os
import time
"""
Tests for the multiline log messages
"""
class Test(BaseTest):
def test_java_elasticsearch_log(self):
"""
Test that multi lines for java logs works.
It checks that all lines which do not start with [ are append to the last line starting with [
"""
self.render_config_template(
path=os.path.abspath(self.working_dir) + "/log/*",
multiline=True,
pattern="^\[",
negate="true",
match="after"
)
os.mkdir(self.working_dir + "/log/")
self.copy_files(["logs/elasticsearch-multiline-log.log"],
source_dir="../files",
target_dir="log")
proc = self.start_beat()
# wait for the "Skipping file" log message
self.wait_until(
lambda: self.output_has(lines=20),
max_timeout=10)
proc.check_kill_and_wait()
output = self.read_output()
# Check that output file has the same number of lines as the log file
assert 20 == len(output)
def test_c_style_log(self):
"""
Test that multi lines for c style log works
It checks that all lines following a line with \\ are appended to the previous line
"""
self.render_config_template(
path=os.path.abspath(self.working_dir) + "/log/*",
multiline=True,
pattern="\\\\$",
match="before"
)
os.mkdir(self.working_dir + "/log/")
self.copy_files(["logs/multiline-c-log.log"],
source_dir="../files",
target_dir="log")
proc = self.start_beat()
# wait for the "Skipping file" log message
self.wait_until(
lambda: self.output_has(lines=4),
max_timeout=10)
proc.check_kill_and_wait()
output = self.read_output()
# Check that output file has the same number of lines as the log file
assert 4 == len(output)
def test_rabbitmq_multiline_log(self):
"""
Test rabbitmq multiline log
Special about this log file is that it has empty new lines
"""
self.render_config_template(
path=os.path.abspath(self.working_dir) + "/log/*",
multiline=True,
pattern="^=[A-Z]+",
match="after",
negate="true",
)
logentry = """=ERROR REPORT==== 3-Feb-2016::03:10:32 ===
connection <0.23893.109>, channel 3 - soft error:
{amqp_error,not_found,
"no queue 'bucket-1' in vhost '/'",
'queue.declare'}
"""
os.mkdir(self.working_dir + "/log/")
proc = self.start_beat()
testfile = self.working_dir + "/log/rabbitmq.log"
file = open(testfile, 'w')
iterations = 3
for n in range(0, iterations):
file.write(logentry)
file.close()
# wait for the "Skipping file" log message
self.wait_until(
lambda: self.output_has(lines=3),
max_timeout=10)
proc.check_kill_and_wait()
output = self.read_output()
# Check that output file has the same number of lines as the log file
assert 3 == len(output)
def test_max_lines(self):
"""
Test the maximum number of lines that is sent by multiline
All further lines are discarded
"""
self.render_config_template(
path=os.path.abspath(self.working_dir) + "/log/*",
multiline=True,
pattern="^\[",
negate="true",
match="after",
max_lines=3
)
os.mkdir(self.working_dir + "/log/")
self.copy_files(["logs/elasticsearch-multiline-log.log"],
source_dir="../files",
target_dir="log")
proc = self.start_beat()
self.wait_until(
lambda: self.output_has(lines=20),
max_timeout=10)
proc.check_kill_and_wait()
output = self.read_output()
# Checks line 3 is sent
assert True == self.log_contains(
"MetaDataMappingService.java:388", "output/filebeat")
# Checks line 4 is not sent anymore
assert False == self.log_contains(
"InternalClusterService.java:388", "output/filebeat")
# Check that output file has the same number of lines as the log file
assert 20 == len(output)
def test_timeout(self):
"""
Test that data is sent after timeout
"""
self.render_config_template(
path=os.path.abspath(self.working_dir) + "/log/*",
multiline=True,
pattern="^\[",
negate="true",
match="after",
)
os.mkdir(self.working_dir + "/log/")
testfile = self.working_dir + "/log/test.log"
file = open(testfile, 'w', 0)
file.write("[2015] hello world")
file.write("\n")
file.write(" First Line\n")
file.write(" Second Line\n")
proc = self.start_beat()
self.wait_until(
lambda: self.output_has(lines=1),
max_timeout=10)
# Because of the timeout the following two lines should be put together
file.write(" This should not be third\n")
file.write(" This should not be fourth\n")
# This starts a new pattern
file.write("[2016] Hello world\n")
# This line should be appended
file.write(" First line again\n")
self.wait_until(
lambda: self.output_has(lines=3),
max_timeout=10)
proc.check_kill_and_wait()
output = self.read_output()
assert 3 == len(output)
def test_max_bytes(self):
"""
Test the maximum number of bytes that is sent
"""
self.render_config_template(
path=os.path.abspath(self.working_dir) + "/log/*",
multiline=True,
pattern="^\[",
negate="true",
match="after",
max_bytes=60
)
os.mkdir(self.working_dir + "/log/")
self.copy_files(["logs/elasticsearch-multiline-log.log"],
source_dir="../files",
target_dir="log")
proc = self.start_beat()
self.wait_until(
lambda: self.output_has(lines=20),
max_timeout=10)
proc.check_kill_and_wait()
output = self.read_output()
# Check that first 60 chars are sent
assert True == self.log_contains("cluster.metadata", "output/filebeat")
# Checks that chars aferwards are not sent
assert False == self.log_contains("Zach", "output/filebeat")
# Check that output file has the same number of lines as the log file
assert 20 == len(output)
def test_close_timeout_with_multiline(self):
"""
Test if multiline events are split up with close_timeout
"""
self.render_config_template(
path=os.path.abspath(self.working_dir) + "/log/*",
multiline=True,
pattern="^\[",
negate="true",
match="after",
close_timeout="2s",
)
os.mkdir(self.working_dir + "/log/")
testfile = self.working_dir + "/log/test.log"
with open(testfile, 'w', 0) as file:
file.write("[2015] hello world")
file.write("\n")
file.write(" First Line\n")
file.write(" Second Line\n")
proc = self.start_beat()
# Wait until harvester is closed because of timeout
# This leads to the partial event above to be sent
self.wait_until(
lambda: self.log_contains(
"Closing harvester because close_timeout was reached"),
max_timeout=15)
# Because of the timeout the following two lines should be put together
with open(testfile, 'a', 0) as file:
file.write(" This should not be third\n")
file.write(" This should not be fourth\n")
# This starts a new pattern
file.write("[2016] Hello world\n")
# This line should be appended
file.write(" First line again\n")
self.wait_until(
lambda: self.output_has(lines=3),
max_timeout=10)
proc.check_kill_and_wait()
# close_timeout must have closed the reader exactly twice
self.wait_until(
lambda: self.log_contains_count(
"Closing harvester because close_timeout was reached") >= 1,
max_timeout=15)
output = self.read_output()
assert 3 == len(output)
def test_consecutive_newline(self):
"""
Test if consecutive multilines have an affect on multiline
"""
self.render_config_template(
path=os.path.abspath(self.working_dir) + "/log/*",
multiline=True,
pattern="^\[",
negate="true",
match="after",
close_timeout="2s",
)
logentry1 = """[2016-09-02 19:54:23 +0000] Started 2016-09-02 19:54:23 +0000 "GET" for /gaq?path=%2FCA%2FFallbrook%2F1845-Acacia-Ln&referer=http%3A%2F%2Fwww.xxxxx.com%2FAcacia%2BLn%2BFallbrook%2BCA%2Baddresses&search_bucket=none&page_controller=v9%2Faddresses&page_action=show at 23.235.47.31
X-Forwarded-For:72.197.227.93, 23.235.47.31
Processing by GoogleAnalyticsController#index as JSON
Parameters: {"path"=>"/CA/Fallbrook/1845-Acacia-Ln", "referer"=>"http://www.xxxx.com/Acacia+Ln+Fallbrook+CA+addresses", "search_bucket"=>"none", "page_controller"=>"v9/addresses", "page_action"=>"show"}
Completed 200 OK in 5ms (Views: 1.9ms)"""
logentry2 = """[2016-09-02 19:54:23 +0000] Started 2016-09-02 19:54:23 +0000 "GET" for /health_check at xxx.xx.44.181
X-Forwarded-For:
SetAdCodeMiddleware.default_ad_code referer
SetAdCodeMiddleware.default_ad_code path /health_check
SetAdCodeMiddleware.default_ad_code route """
os.mkdir(self.working_dir + "/log/")
testfile = self.working_dir + "/log/test.log"
with open(testfile, 'w', 0) as file:
file.write(logentry1 + "\n")
file.write(logentry2 + "\n")
proc = self.start_beat()
self.wait_until(
lambda: self.output_has(lines=2),
max_timeout=10)
proc.check_kill_and_wait()
output = self.read_output_json()
output[0]["message"] = logentry1
output[1]["message"] = logentry2
def test_invalid_config(self):
"""
Test that filebeat errors if pattern is missing config
"""
self.render_config_template(
path=os.path.abspath(self.working_dir + "/log/") + "*",
multiline=True,
match="after",
)
proc = self.start_beat()
self.wait_until(lambda: self.log_contains("missing required field accessing") == 1)
proc.check_kill_and_wait(exit_code=1)
| christiangalsterer/httpbeat | vendor/github.com/elastic/beats/filebeat/tests/system/test_multiline.py | Python | apache-2.0 | 11,132 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Part of the PsychoPy library
# Copyright (C) 2002-2018 Jonathan Peirce (C) 2019-2022 Open Science Tools Ltd.
# Distributed under the terms of the GNU General Public License (GPL).
# --------------------------------------------------------------------------
# This file is automatically generated during build (do not edit directly).
# --------------------------------------------------------------------------
import os
import sys
__version__ = '2022.1.1'
__license__ = 'GNU GPLv3 (or more recent equivalent)'
__author__ = 'Open Science Tools Ltd'
__author_email__ = '[email protected]'
__maintainer_email__ = '[email protected]'
__url__ = 'https://www.psychopy.org/'
__download_url__ = 'https://github.com/psychopy/psychopy/releases/'
__git_sha__ = 'n/a'
__build_platform__ = 'n/a'
__all__ = ["gui", "misc", "visual", "core",
"event", "data", "sound", "microphone"]
# for developers the following allows access to the current git sha from
# their repository
if __git_sha__ == 'n/a':
from subprocess import check_output, PIPE
# see if we're in a git repo and fetch from there
try:
thisFileLoc = os.path.split(__file__)[0]
output = check_output(['git', 'rev-parse', '--short', 'HEAD'],
cwd=thisFileLoc, stderr=PIPE)
except Exception:
output = False
if output:
__git_sha__ = output.strip() # remove final linefeed
# update preferences and the user paths
if 'installing' not in locals():
from psychopy.preferences import prefs
for pathName in prefs.general['paths']:
sys.path.append(pathName)
from psychopy.tools.versionchooser import useVersion, ensureMinimal
if sys.version_info.major < 3:
raise ImportError("psychopy does not support Python2 installations. "
"The last version to support Python2.7 was PsychoPy "
"2021.2.x")
# import readline here to get around an issue with sounddevice
# issues GH-2230 GH-2344 GH-2662
try:
import readline
except ImportError:
pass # all that will happen is the stderr/stdout might get redirected
| psychopy/psychopy | psychopy/__init__.py | Python | gpl-3.0 | 2,184 |
#from sklearn.neural_network import MLPClassifier
import ast
import numpy as np
import scipy
import pickle
import os
from collections import Counter
from time import time
from collections import defaultdict
from scipy.stats import randint as sp_randint
from sklearn.model_selection import GridSearchCV
from sklearn.model_selection import RandomizedSearchCV
import matplotlib.pyplot as plt
import plotly.plotly as py
import plotly.graph_objs as go
import plotly
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
from sklearn.datasets import make_moons, make_circles, make_classification
from sklearn.neighbors import KNeighborsClassifier
from sklearn.svm import SVC
from sklearn.svm import LinearSVC
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import RandomForestClassifier, AdaBoostClassifier, VotingClassifier
from sklearn.naive_bayes import GaussianNB
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
from sklearn.discriminant_analysis import QuadraticDiscriminantAnalysis
from sklearn.neural_network import MLPClassifier
from sklearn.decomposition import PCA
from sklearn import metrics
from sklearn.metrics import accuracy_score
from sklearn.metrics import average_precision_score
from sklearn.metrics import f1_score
from sklearn.metrics import log_loss
from sklearn.metrics import precision_score
from sklearn.metrics import recall_score
from sklearn.metrics import roc_auc_score
from sklearn.metrics import roc_curve, auc
#from sklearn.pipeline import Pipeline
from sklearn.ensemble import ExtraTreesClassifier
from sklearn.model_selection import cross_val_score
from sklearn.feature_selection import SelectFromModel
from imblearn.under_sampling import (EditedNearestNeighbours, RandomUnderSampler,
RepeatedEditedNearestNeighbours)
from imblearn.ensemble import EasyEnsemble
from imblearn.pipeline import Pipeline as im_Pipeline
import rank_scorers
import sampler
import feature_importance
import useClaimBuster
import dataset_utils
basepath = "/home/bt1/13CS10060/btp"
datapath = basepath+"/ayush_dataset"
workingdir = basepath + "/output_all"
import seaborn as sns
sns.set()
# Define some color for the plotting
almost_black = '#262626'
palette = sns.color_palette()
names = [
"KNN", "Linear SVM", "RBF SVM", "Decision Tree",
"Random Forest", "AdaBoost", "Naive Bayes", "Linear Discriminant Analysis",
"Quadratic Discriminant Analysis",
"MLP"]
classifiers = [
KNeighborsClassifier(weights='distance', n_neighbors=121),
SVC(kernel="linear", C=1, probability=True),
SVC(C=1, probability=True),
DecisionTreeClassifier(max_depth=10),
RandomForestClassifier(max_depth=10, n_estimators=10, max_features=1),
AdaBoostClassifier(),
GaussianNB(),
LinearDiscriminantAnalysis(solver='lsqr', shrinkage="auto"),
QuadraticDiscriminantAnalysis(),
MLPClassifier(solver='lbfgs', alpha=1e-5,hidden_layer_sizes=(100,75,50,25,15), max_iter=10000, random_state=1)
]
param_grid = {
"Linear SVM" : {
'C': [1,5,10,100,500,1000],
},
"RBF SVM" : {
'C': [1,5,10,100,500,1000],
'gamma': [1e-5, 1e-4,1e-3,1e-2,1e-1],
'kernel': ['poly', 'sigmoid'],
'degree': [3,5,8,10]
},
"KNN" : {
'weights': ['distance'],
'n_neighbors': [1,10,50,100]
}
}
'''
names = ["Decision Trees", "Neural Networks"]
classifiers = [
DecisionTreeClassifier(),
MLPClassifier(algorithm='adam', alpha=1e-5, hidden_layer_sizes=(15, 15), random_state=1, verbose=True)
]'''
evaluation_names = ["Accuracy","F1 Score","F1_Micro","F1_Macro","F1_Weighted","Log_Loss","Precision","Recall","ROC_AUC"]
evaluation_methods = []
def evaluate(y_true,y_pred):
return [accuracy_score(y_true, y_pred),
f1_score(y_true, y_pred, average="binary"),
#f1_score(y_true, y_pred, average='micro'),
#f1_score(y_true, y_pred, average='macro'),
#f1_score(y_true, y_pred, average='weighted'),
#log_loss(y_true,y_pred),
precision_score(y_true, y_pred, average="binary"),
recall_score(y_true, y_pred, average="binary"),
roc_auc_score(y_true, y_pred)]
def load_dataset(trainfilelist, indexlist):
x = []
Y = []
allfeatures = []
embed_feats = []
allindex = []
names = []
for i,files in enumerate(trainfilelist):
f1 = open(files[0], "r")
f3 = open(files[1], 'r')
f2 = open(indexlist[i], "r")
names = f1.readline()
names = names.strip().split(" ")[:-1]
# names = names[:60]
for lines in f1:
features = [float(value) for value in lines.split(' ')]
# features = features[:60] + [features[-1]]
# print(features)
allfeatures.append(features)
for lines in f2:
indexes = [int(value) for value in lines.split(' ')]
allindex.append(indexes)
for lines in f3:
embeds = [float(value) for value in lines.split(" ")]
embed_feats.append(embeds)
# from random import shuffle
# shuffle(allfeatures)
n = ["embed"+str(i) for i in range(300)]
n.extend(names)
print(len(allfeatures[0]))
for embeds,feature in zip(embed_feats, allfeatures):
f = []
f.extend(embeds)
f.extend(feature[:-1])
x.append(f)
#print(feature[-1])
Y.append(feature[-1])
# print(len(names),len(feature))
# print(Y.count(1))
# exit(0)
return n,x, Y, allindex
def feature_select(X,y):
clf = ExtraTreesClassifier()
clf = clf.fit(X, y)
print(clf.feature_importances_)
model = SelectFromModel(clf, prefit=True)
X_new = model.transform(X)
print(X_new.shape)
return X_new, y
def plot_data_and_sample(X,y, sampler):
# Instanciate a PCA object for the sake of easy visualisation
# pca = PCA(n_components=2)
# # Fit and transform x to visualise inside a 2D feature space
# X_vis = pca.fit_transform(X)
# X_resampled, y_resampled = sampler.fit_sample(X, y)
# print(len(X_resampled), len(y_resampled))
X_vis = X
X_res_vis = [X]
y_resampled=y
# for X_res in X_resampled:
# X_res_vis.append(pca.transform(X_res))
# Two subplots, unpack the axes array immediately
# f, (ax1, ax2) = plt.subplots(1, 2)
# ax1.scatter(X_vis[y == 0, 0], X_vis[y == 0, 1], label="Class #0", alpha=0.5,
# edgecolor=almost_black, facecolor=palette[0], linewidth=0.15)
# ax1.scatter(X_vis[y == 1, 0], X_vis[y == 1, 1], label="Class #1", alpha=0.5,
# edgecolor=almost_black, facecolor=palette[2], linewidth=0.15)
# ax1.set_title('Original set')
# ax2.scatter(X_vis[y == 0, 0], X_vis[y == 0, 1], label="Class #0", alpha=0.5,
# edgecolor=almost_black, facecolor=palette[0], linewidth=0.15)
# for iy, e in enumerate(X_res_vis):
# ax2.scatter(e[y_resampled[iy] == 1, 0], e[y_resampled[iy] == 1, 1],
# label="Class #1", alpha=0.5, edgecolor=almost_black,
# facecolor=np.random.rand(3,), linewidth=0.15)
# ax2.set_title('Easy ensemble')
# plt.show()
print(X)
X_vis0 = X_vis[y==0, 0]
X_vis1 = X_vis[y==1, 0]
X_vis0 = X_vis0.tolist()
X_vis1 = X_vis1.tolist()
X_vis0_probs_dict = {x:X_vis0.count(x)/len(X_vis0) for x in X_vis0}
X_vis0_probs = X_vis0_probs_dict.values()
X_vis1_probs_dict = {x:X_vis1.count(x)/len(X_vis1) for x in X_vis1}
X_vis1_probs = X_vis1_probs_dict.values()
# print(list(X_vis0_probs))
# print(list(range(100)))
# exit(0)
trace1 = go.Scatter(
x = list(range(62)),
y=list(X_vis0_probs),
name='Non Check-worthy'
# histnorm='probability'
)
trace2 = go.Scatter(
x = list(range(62)),
y=list(X_vis1_probs),
name='Check-worthy'
# histnorm='probability'
)
data = [trace1, trace2]
layout = go.Layout(
showlegend=True,
legend = dict(
x = 0.6,
y = 1
),
width=450,
height=400,
xaxis=dict(title='Length of sentence'),
yaxis=dict(title='Probability')
)
# fig = dict(data=data)
fig = go.Figure(data=data, layout=layout)
plotly.offline.plot(fig,image='png')
exit(0)
return X_resampled, y_resampled
def plot_ROC_curve(roc_curve):
false_positive_rate, true_positive_rate, _ = roc_curve
roc_auc = 0
roc_auc = auc(false_positive_rate, true_positive_rate)
plt.title('Receiver Operating Characteristic')
plt.plot(false_positive_rate, true_positive_rate, 'b',
label='AUC = %0.2f'% roc_auc)
plt.legend(loc='lower right')
plt.plot([0,1],[0,1],'r--')
plt.xlim([-0.1,1.2])
plt.ylim([-0.1,1.2])
plt.ylabel('True Positive Rate')
plt.xlabel('False Positive Rate')
plt.show()
def plot_PR_curve(pr_curve):
precision, recall, _ = pr_curve
plt.plot(recall, precision, lw=2, color='navy',
label='Precision-Recall curve')
plt.xlabel('Recall')
plt.ylabel('Precision')
plt.ylim([0.0, 1.05])
plt.xlim([0.0, 1.0])
plt.title('Precision-Recall')
plt.legend(loc="lower left")
plt.show()
# Utility function to report best scores
def report(results, n_top=10):
for i in range(1, n_top + 1):
candidates = np.flatnonzero(results['rank_test_score'] == i)
for candidate in candidates:
print("Model with rank: {0}".format(i))
print("Mean validation score: {0:.3f} (std: {1:.3f})".format(
results['mean_test_score'][candidate],
results['std_test_score'][candidate]))
print("Parameters: {0}".format(results['params'][candidate]))
print("")
def cross_validate(X,y):
for name, clf in zip(names[1:3], classifiers[1:3]):
scores = cross_val_score(clf, X, y, cv=4)
print("Name %s ROC_AUC: %0.2f (+/- %0.2f)" % (name, scores.mean(), scores.std() * 2))
def randomGridSearch(X,y):
for name, clf in zip(names[1:3], classifiers[1:3]):
# run randomized search
n_iter_search = 2
random_search = RandomizedSearchCV(clf, param_distributions=param_grid[name],
n_iter=n_iter_search)
start = time()
random_search.fit(X, y)
print("RandomizedSearchCV took %.2f seconds for %d candidates"
" parameter settings." % ((time() - start), n_iter_search))
report(random_search.cv_results_)
def gridSearch(X,y, working_dir):
for name, clf in zip(names[0:1], classifiers[0:1]):
# run grid search
clf = GridSearchCV(clf, param_grid=param_grid[name],cv=4, scoring="roc_auc" ,n_jobs=24)
start = time()
clf.fit(X, y)
with open(working_dir + "/grid_best_2" + name + '.pkl', 'wb') as f1:
pickle.dump(clf, f1)
print("GridSearchCV took %.2f seconds candidates"
" parameter settings." % ((time() - start)))
report(clf.cv_results_)
def normalize_topic_values(X, y):
X[X<1e-4] = 0
return X,y
def split_data(X,y, index, frac=0.2):
from collections import Counter
from sklearn.utils import shuffle
import random
c = Counter()
n = len(X)
X=np.asarray(X)
y = np.asarray(y)
index = np.asarray(index)
for i in range(n):
if(y[i] == 1):
c[index[i][0]] += 1;
l = list(c.items())
l = shuffle(l, random_state=101)
test_debates = []
test_size = int(frac* sum(y))
k = 0
while(test_size > 0):
test_debates.append(l[k][0])
test_size -= l[k][1]
k +=1
print(test_size, test_debates)
X_test = []
y_test = []
X_train = []
y_train = []
index_test = []
index_train = []
for i in np.random.permutation(n):
if(index[i][0] in test_debates):
X_test.append(X[i])
y_test.append(y[i])
index_test.append(index[i])
else:
X_train.append(X[i])
y_train.append(y[i])
index_train.append(index[i])
X_test = np.asarray(X_test)
y_test = np.asarray(y_test)
X_train = np.asarray(X_train)
y_train = np.asarray(y_train)
index_test = np.asarray(index_test)
index_train = np.asarray(index_train)
print(np.shape(X_train))
p = np.random.permutation(len(X_train))
test_p = np.random.permutation(len(X_test))
return X_train[p], X_test[test_p], y_train[p], y_test[test_p], index_train[p], index_test[test_p]
def evaluate(X_test, y_test, index_test, clf, name, sent_print=True):
y_hat = clf.predict(X_test)
report = metrics.classification_report(y_test, y_hat)
#print(str(score))
f = open(working_dir + "/" + name + '_report.txt', 'w')
f.write(name+"\n")
f.write(report)
print(report)
# try:
# plot_ROC_curve(metrics.roc_curve(y_test, clf.decision_function(X_test), pos_label=1))
# plot_PR_curve(metrics.precision_recall_curve(y_test,clf.decision_function(X_test), pos_label=1 ))
# except:
# pass
try:
y_prob = clf.predict_proba(X_test)[:,1]
except:
pass
ks = [10,20,30,40,50,60,70,80,90,100,200,300,500,1000]
allscores = rank_scorers.all_score(y_test, y_prob, ks)
for i,k in enumerate(ks):
print(k,round(allscores[i][0],3),round(allscores[i][1],3),round(allscores[i][2],3), sep="\t", file=f)
#print(allscores)
if(not sent_print):
return
sent_list = [dataset_utils.get_sentence(idx) for idx in index_test]
ff = open(working_dir + "/" + name + '_scores.txt', 'w')
for tag, score, sent in zip(y_test, y_prob, sent_list):
print(tag, score, sent, sep="\t", file=ff)
# buster_prob = dataset_utils.get_buster_score(index_test)
# allscores_buster = rank_scorers.all_score(y_test, buster_prob, ks)
# # for tag, score, sent in zip(y_test, buster_prob, sent_list):
# # print(tag, score, sent, sep="\t")
# print("ClaimBuster",file=f)
# for i,k in enumerate(ks):
# print(k,round(allscores_buster[i][0],3),round(allscores_buster[i][1],3),round(allscores_buster[i][2],3), sep="\t", file=f)
def ensemble_train(X,y, working_dir,n, name, svm=True):
ees = EasyEnsemble(random_state=557, n_subsets=n)
X_res, y_res = ees.fit_sample(X,y)
try:
raise Exception('Retrain')
with open(working_dir + "/" + name + '.pkl', 'rb') as f1:
clf = pickle.load(f1)
except:
# scores = cross_val_score(clf, X, y, cv=4, scoring="roc_auc")
# print("Name %s ROC_AUC: %0.2f (+/- %0.2f)" % (name, scores.mean(), scores.std() * 2))
clf = []
for i in range(len(X_res)):
print(Counter(y_res[i]))
if(svm):
clfi = SVC(kernel="linear", probability=True)
else:
clfi = AdaBoostClassifier(n_estimators=20)
#clfi=AdaBoostClassifier()
clfi.fit(X_res[i], y_res[i])
clf.append(clfi)
scores = cross_val_score(clfi, X_res[i], y_res[i], cv=4, scoring="roc_auc")
print("Name %s ROC_AUC: %0.2f (+/- %0.2f)" % (name, scores.mean(), scores.std() * 2))
with open(working_dir + "/" + name + '.pkl', 'wb') as f1:
pickle.dump(clf, f1)
return clf
def ensemble_predict_proba(clf, X):
y_proba = []
for clfi in clf:
y_probai = clfi.predict_proba(X)[:,-1]
y_proba.append(y_probai)
y_proba = np.asarray(y_proba)
y_proba_mean = np.mean(y_proba, axis=0)
y_hat = np.round(y_proba_mean)
return y_proba_mean, y_hat
sel_classifiers = [
SVC(kernel="linear", C=1, probability=True),
SVC(C=1, probability=True),
RandomForestClassifier(n_estimators=20),
AdaBoostClassifier(n_estimators=10)
]
sel_names = ["lsvm", "rsvm", "rfc", "ada"]
def main(working_dir, args):
f_names, X,y, index = load_dataset([(workingdir+"/features.ff", workingdir+"/embeddings.txt")], [workingdir+"/index.txt"])
print(len(X), len(y))
X = np.asarray(X)
y = np.asarray(y)
index = np.asarray(index)
f_names = np.asarray(f_names)
start = 300
X_part, y = normalize_topic_values(X[start:],y)
X[start:] = X_part[:]
print(np.shape(X), np.shape(f_names))
print(X[0])
# sel_feats = np.asarray(list(range(0,300)))# + list(range(413,414)))
#sel_feats = np.asarray(list(range(300,len(X[0]))))
sel_feats = np.asarray(list(range(0,300)))
X_posonly = X[:,sel_feats]
print(np.shape(X_posonly))
f_names = f_names[sel_feats]
print(f_names)
# index_no = index[y==0]
# print(np.shape(index_no))
# index_no_sampled = index_no[np.random.choice(range(len(index_no)), size=50, replace=False)]
# for indexi in index_no_sampled:
# print(dataset_utils.get_sentence(indexi))
# plot_data_and_sample(X_posonly,y,None)
# feature_importance.plot_feature_importance(X_posonly,y,f_names)
# exit(0)
#exit(0)
#feature_importance.plot_feature_importance(X,y,f_names)
#exit(0)
X_train, X_test, y_train, y_test, index_train, index_test = split_data(X_posonly, y, index)
pca = PCA(n_components=100)
X_train = pca.fit_transform(X_train)
print(np.shape(X_train))
X_test = pca.transform(X_test)
X_vis= X_train
# #Two subplots, unpack the axes array immediately
# f, ax1 = plt.subplots(1)
# ax1.scatter(X_vis[y_train == 0, 0], X_vis[y_train == 0, 1], label="Class #0", alpha=0.5,
# edgecolor=almost_black, facecolor=palette[0], linewidth=0.15)
# ax1.scatter(X_vis[y_train == 1, 0], X_vis[y_train == 1, 1], label="Class #1", alpha=0.5,
# edgecolor=almost_black, facecolor=palette[2], linewidth=0.15)
# ax1.set_title('Original set')
# plt.show()
# gridSearch(X,y, working_dir)
rsampler = RandomUnderSampler(random_state=487)
X_test_s, y_test_s = rsampler.fit_sample(X_test, y_test)
#sampler= EasyEnsemble()
#X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=.2, random_state=44)
ensemble_clf = [True, False]
for c in ensemble_clf:
n = 20
name = "embed50svm_"+str(n)+str(c)
fr = open(working_dir+"/"+name+"report.txt", "w")
clf = ensemble_train(X_train, y_train, working_dir,n, name, c)
y_prob, y_hat = ensemble_predict_proba(clf, X_test)
y_prob_s, y_hat_s = ensemble_predict_proba(clf, X_test_s)
report = metrics.classification_report(y_test, y_hat)
#print(str(score))
print(report)
print(report, file=fr)
#evaluate(X_test, y_test, index_test, clf, name)
ks = [10,20,30,40,50,60,70,80,90,100,200,300,500,1000]
allscores = rank_scorers.all_score(y_test, y_prob, ks)
for i,k in enumerate(ks):
#print(k,round(allscores[i][0],3),round(allscores[i][1],3),round(allscores[i][2],3), sep="\t")
print(k,round(allscores[i][0],3),round(allscores[i][1],3),round(allscores[i][2],3), sep="\t", file=fr)
report = metrics.classification_report(y_test_s, y_hat_s)
#print(str(score))
print(report)
print(report, file=fr)
#evaluate(X_test, y_test, index_test, clf, name)
ks = [10,20,30,40,50,60,70,80,90,100,200,300,500,1000]
allscores = rank_scorers.all_score(y_test_s, y_prob_s, ks)
for i,k in enumerate(ks):
#print(k,round(allscores[i][0],3),round(allscores[i][1],3),round(allscores[i][2],3), sep="\t")
print(k,round(allscores[i][0],3),round(allscores[i][1],3),round(allscores[i][2],3), sep="\t", file=fr)
#print(allscores)
# sent_list = [dataset_utils.get_sentence(idx) for idx in index_test]
# f = open(working_dir+"/"+name+"scores.txt", "w")
# for tag, score, sent in zip(y_test, y_prob, sent_list):
# print(tag, score, sent, sep="\t", file=f)
#buster_prob = dataset_utils.get_buster_score(index_test)
#allscores_buster = rank_scorers.all_score(y_test, buster_prob, ks)
# for tag, score, sent in zip(y_test, buster_prob, sent_list):
# print(tag, score, sent, sep="\t")
# print("ClaimBuster")
# for i,k in enumerate(ks):
# #print(k,round(allscores_buster[i][0],3),round(allscores_buster[i][1],3),round(allscores_buster[i][2],3), sep="\t")
# print(k,round(allscores_buster[i][0],3),round(allscores_buster[i][1],3),round(allscores_buster[i][2],3), sep="\t", file=fr)
rsampler = RandomUnderSampler(random_state=487)
X_train, y_train = rsampler.fit_sample(X_train, y_train)
#X_train, X_test = feature_importance.recursive_elimination(X_train,y_train, X_test)
X_test_s, y_test_s = rsampler.fit_sample(X_test, y_test)
#for h in [1]:
for name, clf in zip(sel_names, classifiers):
print(name)
name="embed50"+name
#clf = SVC(probability=True, kernel="linear")
# pipe_components[-1] = ('classification', clf)
# clf = im_Pipeline(pipe_components)
#print(clf)
try:
raise Exception('Retrain')
with open(working_dir + "/" + name + '.pkl', 'rb') as f1:
clf = pickle.load(f1)
except:
scores = cross_val_score(clf, X_train, y_train, cv=4, scoring="roc_auc")
#rec_scores = cross_val_score(clf, X_train, y_train, cv=4, scoring="roc_auc")
print("Name %s ROC_AUC: %0.2f (+/- %0.2f)" % (name, scores.mean(), scores.std() * 2))
clf.fit(X_train, y_train)
with open(working_dir + "/" + name + '.pkl', 'wb') as f1:
pickle.dump(clf, f1)
evaluate(X_test, y_test, index_test, clf, name)
evaluate(X_test_s, y_test_s, index_test, clf, name+"sampledtest", False )
if __name__ == '__main__':
import os
import sys
working_dir = workingdir+"/models_feat/finals_2" #os.argv[-1]
try:
os.makedirs(working_dir)
except:
pass
arguments = sys.argv[1:]
args = defaultdict(None)
for x in arguments:
x = x.split("=")
args[x[0].strip("-")] = x[1]
main(working_dir, args)
| cs60050/TeamGabru | 2-AlexNet/MLPClassifier.py | Python | mit | 22,568 |
# /usr/bin/python3
import asyncio
import sys
from os import symlink, remove
from os.path import abspath, dirname, join, exists
from aiohttp import web
from webdash import netconsole_controller
from webdash import networktables_controller
@asyncio.coroutine
def forward_request(request):
return web.HTTPFound("/index.html")
INIT_FILE = "Webdash_init.sh"
INSTALL_LOCATIONS = "/etc/init.d/" + INIT_FILE, "/etc/rc5.d/S99" + INIT_FILE
def run_server(port):
print("Starting Webdash Server.")
file_root = join(abspath(dirname(__file__)), "resources")
asyncio.async(netconsole_controller.netconsole_monitor())
networktables_controller.setup_networktables()
app = web.Application()
app.router.add_route("GET", "/networktables", networktables_controller.networktables_websocket)
app.router.add_route("GET", "/netconsole", netconsole_controller.netconsole_websocket)
app.router.add_route("GET", "/netconsole_dump", netconsole_controller.netconsole_log_dump)
app.router.add_route("GET", "/", forward_request)
app.router.add_static("/", file_root)
loop = asyncio.get_event_loop()
f = loop.create_server(app.make_handler(), '0.0.0.0', port)
srv = loop.run_until_complete(f)
print("RoboRIO Webdash listening on", srv.sockets[0].getsockname())
try:
loop.run_forever()
except KeyboardInterrupt:
pass
def main():
if len(sys.argv) <= 1:
run_server(5801)
elif sys.argv[1] == "install-initfile":
for tgt in INSTALL_LOCATIONS:
if not exists(dirname(tgt)):
print("ERROR: Installation path {} does not exist. (Are you sure you are "
"installing this on an os with sysvinit?)".format(dirname(tgt)))
exit(-1)
src = join(abspath(dirname(__file__)), INIT_FILE)
if exists(tgt):
res = input("{} already exists, Remove? (y/N)".format(tgt))
if res.lower() == "y":
remove(tgt)
else:
print("ERROR: Target already exists.")
exit(-1)
print("Symlinking {} to {}.".format(src, tgt))
symlink(src, tgt)
print("Successfully installed {}".format(INIT_FILE))
exit(1)
elif sys.argv[1] == "remove-initfile":
for tgt in INSTALL_LOCATIONS:
if not exists(tgt):
print("ERROR: Init file {} not installed.".format(tgt))
else:
remove(tgt)
print("Successfully removed {}".format(tgt))
exit(1)
else:
if len(sys.argv) > 1:
print("Unknown option: {}".format(sys.argv[1]))
print("Usage: webdash [ , install-initfile, remove-initfile]")
exit(1)
if __name__ == "__main__":
main() | computer-whisperer/RoboRIO-webdash | webdash/main.py | Python | bsd-3-clause | 2,813 |
# -*- coding: utf-8 -*-
"""The app module, containing the app factory function."""
import gevent.monkey
gevent.monkey.patch_all()
import logging
import sys
import os
import threading
from flask import Flask, render_template, current_app
from flask_restful import Api
from walle import commands
from walle.api import access as AccessAPI
from walle.api import api as BaseAPI
from walle.api import deploy as DeployAPI
from walle.api import environment as EnvironmentAPI
from walle.api import general as GeneralAPI
from walle.api import group as GroupAPI
from walle.api import passport as PassportAPI
from walle.api import project as ProjectAPI
from walle.api import repo as RepoApi
from walle.api import role as RoleAPI
from walle.api import server as ServerAPI
from walle.api import space as SpaceAPI
from walle.api import task as TaskAPI
from walle.api import user as UserAPI
from walle.config.settings_prod import ProdConfig
from walle.model.user import UserModel, AnonymousUser
from walle.service.code import Code
from walle.service.error import WalleError
from walle.service.extensions import bcrypt, csrf_protect, db, migrate
from walle.service.extensions import login_manager, mail, permission, socketio
from walle.service.websocket import WalleSocketIO
def create_app(config_object=ProdConfig):
"""An application factory, as explained here: http://flask.pocoo.org/docs/patterns/appfactories/.
:param config_object: The configuration object to use.
"""
app = Flask(__name__.split('.')[0])
app.config.from_object(config_object)
register_extensions(app)
register_blueprints(app)
register_errorhandlers(app)
register_shellcontext(app)
register_commands(app)
register_logging(app)
@app.before_request
def before_request():
# TODO
pass
@app.teardown_request
def shutdown_session(exception=None):
# TODO
from walle.model.database import db
db.session.remove()
@app.route('/api/websocket')
def index():
return render_template('socketio.html')
# 单元测试不用开启 websocket
if app.config.get('ENV') != 'test':
register_socketio(app)
try:
reload(sys)
sys.setdefaultencoding('utf-8')
except NameError:
pass
return app
def register_extensions(app):
"""Register Flask extensions."""
bcrypt.init_app(app)
db.init_app(app)
csrf_protect.init_app(app)
login_manager.session_protection = 'strong'
login_manager.anonymous_user = AnonymousUser
@login_manager.user_loader
def load_user(user_id):
current_app.logger.info(user_id)
return UserModel.query.get(user_id)
@login_manager.unauthorized_handler
def unauthorized():
# TODO log
return BaseAPI.ApiResource.json(code=Code.unlogin)
login_manager.init_app(app)
migrate.init_app(app, db)
mail.init_app(app)
permission.init_app(app)
return app
def register_blueprints(app):
"""Register Flask blueprints."""
api = Api(app)
api.add_resource(BaseAPI.Base, '/', endpoint='root')
api.add_resource(GeneralAPI.GeneralAPI, '/api/general/<string:action>', endpoint='general')
api.add_resource(SpaceAPI.SpaceAPI, '/api/space/', '/api/space/<int:space_id>', '/api/space/<int:space_id>/<string:action>', endpoint='space')
api.add_resource(DeployAPI.DeployAPI, '/api/deploy/', '/api/deploy/<int:task_id>', endpoint='deploy')
api.add_resource(AccessAPI.AccessAPI, '/api/access/', '/api/access/<int:access_id>', endpoint='access')
api.add_resource(RoleAPI.RoleAPI, '/api/role/', endpoint='role')
api.add_resource(GroupAPI.GroupAPI, '/api/group/', '/api/group/<int:group_id>', endpoint='group')
api.add_resource(PassportAPI.PassportAPI, '/api/passport/', '/api/passport/<string:action>', endpoint='passport')
api.add_resource(UserAPI.UserAPI, '/api/user/', '/api/user/<int:user_id>/<string:action>', '/api/user/<string:action>', '/api/user/<int:user_id>', endpoint='user')
api.add_resource(ServerAPI.ServerAPI, '/api/server/', '/api/server/<int:id>', endpoint='server')
api.add_resource(ProjectAPI.ProjectAPI, '/api/project/', '/api/project/<int:project_id>', '/api/project/<int:project_id>/<string:action>', endpoint='project')
api.add_resource(RepoApi.RepoAPI, '/api/repo/<string:action>/', endpoint='repo')
api.add_resource(TaskAPI.TaskAPI, '/api/task/', '/api/task/<int:task_id>', '/api/task/<int:task_id>/<string:action>', endpoint='task')
api.add_resource(EnvironmentAPI.EnvironmentAPI, '/api/environment/', '/api/environment/<int:env_id>', endpoint='environment')
return None
def register_errorhandlers(app):
"""Register error handlers."""
@app.errorhandler(WalleError)
def render_error(error):
# response 的 json 内容为自定义错误代码和错误信息
app.logger.error(error, exc_info=1)
return error.render_error()
def register_shellcontext(app):
"""Register shell context objects."""
def shell_context():
"""Shell context objects."""
return {
'db': db,
'User': UserModel,
}
app.shell_context_processor(shell_context)
def register_commands(app):
"""Register Click commands."""
app.cli.add_command(commands.test)
app.cli.add_command(commands.lint)
app.cli.add_command(commands.clean)
app.cli.add_command(commands.urls)
def register_logging(app):
# TODO https://blog.csdn.net/zwxiaoliu/article/details/80890136
# email errors to the administrators
import logging
from logging.handlers import RotatingFileHandler
# Formatter
formatter = logging.Formatter(
'%(asctime)s %(levelname)s %(pathname)s %(lineno)s %(module)s.%(funcName)s %(message)s')
# log dir
if not os.path.exists(app.config['LOG_PATH']):
os.makedirs(app.config['LOG_PATH'])
# FileHandler Info
file_handler_info = RotatingFileHandler(filename=app.config['LOG_PATH_INFO'])
file_handler_info.setFormatter(formatter)
file_handler_info.setLevel(logging.INFO)
info_filter = InfoFilter()
file_handler_info.addFilter(info_filter)
app.logger.addHandler(file_handler_info)
# FileHandler Error
file_handler_error = RotatingFileHandler(filename=app.config['LOG_PATH_ERROR'])
file_handler_error.setFormatter(formatter)
file_handler_error.setLevel(logging.ERROR)
app.logger.addHandler(file_handler_error)
def register_socketio(app):
if len(sys.argv) > 1 and sys.argv[1] == 'db':
return app
socketio.init_app(app, async_mode='gevent')
socketio.on_namespace(WalleSocketIO(namespace='/walle'))
socket_args = {"debug": app.config.get('DEBUG'), "host": app.config.get('HOST'), "port": app.config.get('PORT')}
socket_thread = threading.Thread(target=socketio.run, name="socket_thread", args=(app, ), kwargs=socket_args)
socket_thread.start()
return app
class InfoFilter(logging.Filter):
def filter(self, record):
"""only use INFO
筛选, 只需要 INFO 级别的log
:param record:
:return:
"""
if logging.INFO <= record.levelno < logging.ERROR:
# 已经是INFO级别了
# 然后利用父类, 返回 1
return 1
else:
return 0
| meolu/walle-web | walle/app.py | Python | apache-2.0 | 7,334 |
# GPIO Zero: a library for controlling the Raspberry Pi's GPIO pins
# Copyright (c) 2016-2019 Dave Jones <[email protected]>
# Copyright (c) 2019 Ben Nuttall <[email protected]>
# Copyright (c) 2016 Andrew Scheller <[email protected]>
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
from __future__ import (
unicode_literals,
absolute_import,
print_function,
division,
)
str = type('')
import os
import warnings
from mock import patch
import pytest
import errno
from gpiozero import *
from gpiozero.pins.mock import MockFactory
file_not_found = IOError(errno.ENOENT, 'File not found')
def test_default_pin_factory_order():
with patch('sys.path') as path, \
patch('io.open') as io, \
patch('os.environ.get') as get:
# ensure no pin libraries can be imported
path.return_value = []
# ensure /proc/device-tree... is not found when trying native
io.return_value.__enter__.side_effect = file_not_found
# ensure pin factory not set in env var
get.return_value = None
with warnings.catch_warnings(record=True) as ws:
warnings.resetwarnings()
with pytest.raises(BadPinFactory):
device = GPIODevice(2)
assert len(ws) == 4
assert all(w.category == PinFactoryFallback for w in ws)
assert ws[0].message.args[0].startswith('Falling back from rpigpio:')
assert ws[1].message.args[0].startswith('Falling back from rpio:')
assert ws[2].message.args[0].startswith('Falling back from pigpio:')
assert ws[3].message.args[0].startswith('Falling back from native:')
def test_device_bad_pin(mock_factory):
with pytest.raises(GPIOPinMissing):
device = GPIODevice()
with pytest.raises(PinInvalidPin):
device = GPIODevice(60)
with pytest.raises(PinInvalidPin):
device = GPIODevice('BCM60')
with pytest.raises(PinInvalidPin):
device = GPIODevice('WPI32')
with pytest.raises(PinInvalidPin):
device = GPIODevice(b'P2:2')
with pytest.raises(PinInvalidPin):
device = GPIODevice('J8:42')
with pytest.raises(PinInvalidPin):
device = GPIODevice('J8:1')
with pytest.raises(PinInvalidPin):
device = GPIODevice('foo')
def test_device_non_physical(mock_factory):
with warnings.catch_warnings(record=True) as w:
warnings.resetwarnings()
device = GPIODevice('GPIO37')
assert len(w) == 1
assert w[0].category == PinNonPhysical
def test_device_init(mock_factory):
pin = mock_factory.pin(2)
with GPIODevice(2) as device:
assert repr(device).startswith('<gpiozero.GPIODevice object')
assert not device.closed
assert device.pin is pin
with pytest.raises(TypeError):
GPIODevice(2, foo='bar')
def test_device_init_twice_same_pin(mock_factory):
with GPIODevice(2) as device:
with pytest.raises(GPIOPinInUse):
GPIODevice(2)
def test_device_init_twice_same_pin_different_spec(mock_factory):
with GPIODevice(2) as device:
with pytest.raises(GPIOPinInUse):
GPIODevice("BOARD3")
def test_device_init_twice_different_pin(mock_factory):
with GPIODevice(2) as device:
with GPIODevice(3) as device2:
pass
def test_device_close(mock_factory):
device = GPIODevice(2)
# Don't use "with" here; we're testing close explicitly
device.close()
assert device.closed
assert device.pin is None
def test_device_reopen_same_pin(mock_factory):
pin = mock_factory.pin(2)
with GPIODevice(2) as device:
pass
with GPIODevice(2) as device2:
assert not device2.closed
assert device2.pin is pin
assert device.closed
assert device.pin is None
def test_device_pin_parsing(mock_factory):
# MockFactory defaults to a Pi 3B layout
pin = mock_factory.pin(2)
with GPIODevice('GPIO2') as device:
assert device.pin is pin
with GPIODevice('BCM2') as device:
assert device.pin is pin
with GPIODevice('WPI8') as device:
assert device.pin is pin
with GPIODevice('BOARD3') as device:
assert device.pin is pin
with GPIODevice('J8:3') as device:
assert device.pin is pin
def test_device_repr(mock_factory):
with GPIODevice(4) as device:
assert repr(device) == (
'<gpiozero.GPIODevice object on pin %s, '
'is_active=False>' % device.pin)
def test_device_repr_after_close(mock_factory):
with GPIODevice(2) as device:
pass
assert repr(device) == '<gpiozero.GPIODevice object closed>'
def test_device_unknown_attr(mock_factory):
with GPIODevice(2) as device:
with pytest.raises(AttributeError):
device.foo = 1
def test_device_broken_attr(mock_factory):
with GPIODevice(2) as device:
del device._active_state
with pytest.raises(AttributeError):
device.value
def test_device_context_manager(mock_factory):
with GPIODevice(2) as device:
assert not device.closed
assert device.closed
def test_composite_device_sequence(mock_factory):
with CompositeDevice(InputDevice(4), InputDevice(5)) as device:
assert repr(device).startswith('<gpiozero.CompositeDevice object')
assert len(device) == 2
assert device[0].pin.number == 4
assert device[1].pin.number == 5
assert device.namedtuple._fields == ('device_0', 'device_1')
def test_composite_device_values(mock_factory):
with CompositeDevice(InputDevice(4), InputDevice(5)) as device:
assert repr(device) == '<gpiozero.CompositeDevice object containing 2 unnamed devices>'
assert device.value == (0, 0)
assert not device.is_active
device[0].pin.drive_high()
assert device.value == (1, 0)
assert device.is_active
def test_composite_device_named(mock_factory):
with CompositeDevice(
foo=InputDevice(4),
bar=InputDevice(5),
_order=('foo', 'bar')
) as device:
assert repr(device) == '<gpiozero.CompositeDevice object containing 2 devices: foo, bar>'
assert device.namedtuple._fields == ('foo', 'bar')
assert device.value == (0, 0)
assert not device.is_active
def test_composite_device_some_named(mock_factory):
with CompositeDevice(
InputDevice(4),
foobar=InputDevice(5),
) as device:
assert repr(device) == '<gpiozero.CompositeDevice object containing 2 devices: foobar and 1 unnamed>'
assert device.namedtuple._fields == ('device_0', 'foobar')
assert device.value == (0, 0)
assert not device.is_active
def test_composite_device_bad_init(mock_factory):
with pytest.raises(ValueError):
CompositeDevice(foo=1, bar=2, _order=('foo',))
with pytest.raises(ValueError):
CompositeDevice(close=1)
with pytest.raises(ValueError):
CompositeDevice(2)
with pytest.raises(ValueError):
CompositeDevice(mock_factory.pin(2))
def test_composite_device_read_only(mock_factory):
with CompositeDevice(foo=InputDevice(4), bar=InputDevice(5)) as device:
with pytest.raises(AttributeError):
device.foo = 1
def test_shutdown(mock_factory):
from gpiozero.devices import _shutdown
ds = DistanceSensor(17, 19)
f = Device.pin_factory
_shutdown()
assert ds.closed
assert not f.pins
assert Device.pin_factory is None
# Shutdown must be idempotent
_shutdown()
| RPi-Distro/python-gpiozero | tests/test_devices.py | Python | bsd-3-clause | 9,023 |
# -*- coding: utf-8 -*-
#
# Copyright (c) 2005,2006,2007,2008,2009 Brett Adams <[email protected]>
# Copyright (c) 2012-2015 Mario Frasca <[email protected]>
#
# This file is part of bauble.classic.
#
# bauble.classic is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# bauble.classic is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with bauble.classic. If not, see <http://www.gnu.org/licenses/>.
#
# Images table definition
#
#from bauble.plugins import BaubleTable, tables
#from bauble.treevieweditor import TreeViewEditorDialog
#
#
#class Image(BaubleTable):
#
# # not unique but if a duplicate uri is entered the user
# # should be asked if this is what they want
# uri = StringCol()
# label = StringCol(length=50, default=None)
#
# # copyright ?
# # owner ?
#
# # should accessions also have a images in case an accession
# # differs from a plant slightly or should it just have a different
# # species
# #plant = MultipleJoin("Plantnames", joinColumn="image_id")
# species = ForeignKey('Species', cascade=True)
#
#
# def __str__(self): return self.label
#
##
## Image editor
##
#class ImageEditor(TreeViewEditorDialog):
#
# visible_columns_pref = "editor.image.columns"
# column_width_pref = "editor.image.column_width"
# default_visible_list = ['label', 'uri', 'species']
#
# label = 'Images'
#
# def __init__(self, parent=None, select=None, defaults={}):
#
# TreeViewEditorDialog.__init__(self, tables["Image"],
# "Image Editor", parent,
# select=select, defaults=defaults)
# titles={"uri": "Location (URL)",
# "label": "Label",
# 'speciesID': 'Plant Name'}
# self.columns.titles = titles
| mfrasca/bauble.classic | bauble/plugins/plants/images.py | Python | gpl-2.0 | 2,233 |
from ped_core import keymap
from ped_core import clipboard
import curses
import pprint
import gc
from ped_core import editor_common
import re
from ped_core import keymap
from ped_core import keytab
import subprocess
from ped_dialog.dialog import Frame,ListBox,Toggle,Button,StaticText,Prompt,PasswordPrompt,Dialog,pad
from ped_dialog.file_browse import FileBrowseComponent
from ped_dialog.stream_select import StreamSelectComponent
from ped_core.editor_common import Editor
def screen_size( rows, columns ):
cmd = "resize -s %d %d >/dev/null 2>/dev/null"%(rows,columns)
subprocess.Popen(cmd,shell=True)
curses.resizeterm( rows, columns )
def read_str( win, y, x, width ):
out_str = ''
for ix in range(x,x+width):
rc = win.inch(y,ix)
out_str += chr(rc & curses.A_CHARTEXT)
return out_str
def match_chr( win, y, x, width, match_chr ):
for ix in range(x,x+width):
if match_chr != (win.inch(y,ix) & (curses.A_ALTCHARSET | curses.A_CHARTEXT)):
return False
return True
def match_attr( win, y, x, height, width, attr ):
for iy in range(y,y+height):
for ix in range(x,x+width):
rc = win.inch(iy,ix)
cc = chr(rc & curses.A_CHARTEXT)
r_attr = (rc & (curses.A_ATTRIBUTES|curses.A_COLOR))&0xFFBFFFFF
if not (attr == r_attr) and not cc.isspace():
return(False)
return(True)
def match_attr_str( win, y, x, width, attr ):
return match_attr( win, y, x, 1, width, attr)
def undo_all(ed):
while ed.isChanged():
ed.undo()
wait_for_screen(ed)
def window_pos(ed,line,pos):
sc_line,sc_pos = ed.scrPos(line,pos)
return((sc_line-ed.line)+1,sc_pos-ed.left)
def play_macro( ed_or_dialog, macro ):
keymap.start_recording()
for seq in macro:
keymap.record_seq(seq)
keymap.stop_recording()
keymap.start_playback()
while keymap.is_playback():
ed_or_dialog.main(False)
def validate_mark( ed, lines_to_test, start_line, end_line, start_pos, end_pos, do_validation = True, clip_type = clipboard.SPAN_CLIP ):
wait_for_screen(ed)
match_tuple = ( clip_type, [ "" for f in range(start_line,end_line+1) ] )
lidx = 0
for f_line in range(start_line,end_line+1):
s_pos = 0
e_pos = len(lines_to_test[f_line])
if clip_type == clipboard.RECT_CLIP:
s_pos = start_pos
e_pos = end_pos+1
elif clip_type == clipboard.SPAN_CLIP:
if f_line == start_line:
s_pos = start_pos
if f_line == end_line:
e_pos = end_pos+1
if e_pos > len(lines_to_test[f_line]):
e_pos = len(lines_to_test[f_line])
for f_pos in range(s_pos,e_pos):
sc_line,sc_pos = window_pos(ed,f_line,f_pos)
c_to_test = lines_to_test[f_line][f_pos]
match_tuple[1][lidx] += c_to_test
if do_validation and (sc_line >= 1 and sc_line < ed.max_y and sc_pos >= 0 and sc_pos < ed.max_x):
assert(match_attr(ed.scr,sc_line,sc_pos,1,1,curses.A_REVERSE))
assert(read_str(ed.scr,sc_line,sc_pos,1) == c_to_test)
if clip_type == clipboard.SPAN_CLIP and f_line != end_line and start_line != end_line:
match_tuple[1][lidx] += '\n'
lidx += 1
if do_validation:
marked_tuple = ed.get_marked()
assert(marked_tuple == match_tuple)
return match_tuple
def wait_for_screen(ed):
ed.showcursor(False)
ed.main(False)
while ed.has_changes():
ed.main(False)
ed.showcursor(True)
def pad(value,size):
""" pad a string with spaces to the size provided """
if len(value) < size:
value += ' '*(size-len(value))
return value
def validate_screen( ed, lines_to_test = None, start_line=-1, end_line=-1, start_pos=-1, end_pos=-1, do_validation=True ):
wait_for_screen(ed)
if start_line < 0:
start_line = ed.line
if end_line < 0:
end_line = ed.line + (ed.max_y-1)
if start_pos < 0:
start_pos = ed.left
if end_pos < 0:
end_pos = ed.left+ed.max_x
matched_screen = []
error_screen = []
lidx = 0
for f_line in range(start_line,end_line+1):
matched_line = ""
error_line = ""
s_pos = ed.left
if lines_to_test:
test_line = lines_to_test[f_line]
else:
test_line = ed.getContent(f_line,0,False,True)
test_line = pad(test_line,ed.left+ed.max_x)
e_pos = len(test_line)
if f_line == start_line:
s_pos = start_pos
elif f_line == end_line:
e_pos = end_pos+1
e_pos = min(e_pos,len(test_line))
for f_pos in range(s_pos,e_pos):
ff_line,ff_pos = ed.filePos(f_line,f_pos)
sc_line,sc_pos = window_pos(ed,ff_line,ff_pos)
if sc_line >= 1 and sc_line < ed.max_y and sc_pos >= 0 and sc_pos < ed.max_x:
c_to_test = test_line[f_pos]
c_from_scr = read_str(ed.scr,sc_line,sc_pos,1)
matched_line += c_from_scr
error_line += ' '
marked_error = False
if ed.isMark() and (ff_line >= min(ed.getLine(),ed.mark_line_start) and ff_line <= max(ed.getLine(),ed.mark_line_start)) and (ff_pos >= min(ed.getPos(),ed.mark_pos_start) and ff_pos <= max(ed.getPos(),ed.mark_pos_start)):
if not match_attr(ed.scr,sc_line,sc_pos,1,1,curses.A_REVERSE):
error_line = error_line[:-1] + '#'
marked_error = True
if not marked_error and (c_from_scr != c_to_test):
error_line = error_line[:-1] + c_from_scr
lidx += 1
matched_screen.append(matched_line)
error_screen.append(error_line)
any_errors = False
for le in error_screen:
if le.rstrip():
any_errors = True
break
if do_validation:
assert not any_errors, "Screen rendering error:\n%s"%pprint.pformat((any_errors,matched_screen,error_screen))
return (any_errors,matched_screen,error_screen)
def editor_test_suite(stdscr,testdir,wrapped,editor = None ):
screen_size( 30, 100 )
lines_to_test = ["This is the first line","This is the second line","This is the third line","This is the last line"]
lines_to_test += [ (("This is line %d "%f)*20).rstrip() for f in range(5,2000) ]
testfile = testdir.makefile(".txt",*lines_to_test)
fn = str(testfile)
if not editor:
max_y,max_x = stdscr.getmaxyx()
ed = editor_common.Editor(stdscr,stdscr.subwin(max_y,max_x,0,0),fn)
else:
ed = editor
ed.workfile.close()
if ed.mode:
ed.mode.finish(ed)
ed.workfile.filename = fn
ed.workfile.load()
ed.undo_mgr.flush_undo()
ed.flushChanges()
ed.invalidate_all()
gc.collect()
if wrapped:
ed.toggle_wrap()
validate_screen(ed)
assert(match_attr(ed.scr,0,0,1,ed.max_x,curses.A_REVERSE|curses.A_BOLD))
ef = ed.getWorkfile()
assert(isinstance(ef,editor_common.EditFile))
assert(ed.getFilename() == fn)
assert(isinstance(ed.getUndoMgr(),editor_common.undo.UndoManager))
assert(not ed.isChanged())
assert(ed.numLines() == 1999)
editor_test_suite.target_line = 1000
editor_test_suite.target_pos = len(lines_to_test[editor_test_suite.target_line])//2
def do_edit_tests( relative = False ):
if relative:
target_line = ed.getLine()
target_pos = ed.getPos()
else:
target_line = editor_test_suite.target_line
target_pos = editor_test_suite.target_pos
ed.goto(target_line,target_pos)
line = ed.getLine(True)
pos = ed.getPos(True)
(f_line,f_pos) = ed.filePos(line,pos)
(s_line,s_pos) = ed.filePos(line,ed.left)
assert(f_line == target_line)
assert(f_pos == target_pos)
validate_screen(ed)
before_string = ed.getContent(f_line)
ed.insert('X')
assert(ed.isChanged())
assert(ed.isLineChanged(target_line,False))
after_string = ed.getContent(f_line)
assert(after_string == before_string[:f_pos]+'X'+before_string[f_pos:])
validate_screen(ed)
undo_all(ed)
assert(ed.getContent(f_line) == before_string)
validate_screen(ed)
(cur_line,cur_pos) = ed.filePos(ed.getLine(True),ed.getPos(True))
before_string = ed.getContent(cur_line)
if cur_line <= ed.numLines()-1 and cur_pos < len(before_string)-1:
ed.delc()
assert(ed.isChanged())
assert(ed.isLineChanged(cur_line,False))
after_string = ed.getContent(cur_line)
if cur_pos+1 < len(before_string):
compare_string = before_string[:cur_pos] + before_string[cur_pos+1:]
elif cur_pos == len(before_string) and cur_line+1 < len(lines_to_test):
compare_string = before_string[:cur_pos] + lines_to_test[cur_line+1]
else:
compare_string = before_string
assert(after_string == compare_string)
validate_screen(ed)
undo_all(ed)
assert(ed.getContent(cur_line) == before_string)
validate_screen(ed)
ed.backspace()
assert(ed.isChanged())
assert(ed.isLineChanged(cur_line,False))
if cur_pos+1 < len(before_string):
compare_string = before_string[:cur_pos-1] + before_string[cur_pos:]
else:
compare_string = before_string[:cur_pos-1]
assert(ed.getContent(cur_line) == compare_string)
validate_screen(ed)
undo_all(ed)
assert(ed.getContent(cur_line) == before_string)
validate_screen(ed)
do_edit_tests()
editor_test_suite.target_pos = 5
do_edit_tests()
ed.endln()
assert(ed.getPos() == len(lines_to_test[editor_test_suite.target_line]))
do_edit_tests(True)
ed.endfile()
assert(ed.getLine() == ed.numLines()-1)
do_edit_tests(True)
ed.goto(0,0)
ed.endpg()
ed.endln()
sc_line,sc_pos = window_pos(ed,ed.getLine(),ed.getPos())
assert(sc_line == ed.max_y-1)
do_edit_tests(True)
ed.endfile()
ed.endln()
assert(ed.getLine(True) == ed.numLines(True)-1)
do_edit_tests(True)
start_line = ed.getLine(True)
ed.pageup()
assert(ed.getLine(True) == start_line - (ed.max_y-2))
do_edit_tests(True)
ed.pagedown()
assert(ed.getLine(True) == start_line)
do_edit_tests(True)
ed.cup()
assert(ed.getLine(True) == start_line -1 )
do_edit_tests(True)
ed.cdown()
assert(ed.getLine(True) == start_line )
do_edit_tests(True)
word_pos = []
in_word = False
for i in range(0,len(lines_to_test[editor_test_suite.target_line])):
if lines_to_test[editor_test_suite.target_line][i] != ' ':
if not in_word:
word_pos.append(i)
in_word = True
else:
in_word = False
word_pos.append(len(lines_to_test[editor_test_suite.target_line]))
for rfunc,lfunc in [(ed.next_word,ed.prev_word),(ed.cright,ed.cleft),(ed.scroll_right,ed.scroll_left)]:
if wrapped and rfunc == ed.scroll_right:
break
ed.goto(editor_test_suite.target_line,0)
prev_pos = ed.getPos()
while ed.getPos() < len(lines_to_test[editor_test_suite.target_line])-2:
rfunc()
if rfunc == ed.next_word:
assert(ed.getPos() in word_pos)
assert(ed.getPos() > prev_pos)
prev_pos = ed.getPos()
s_line,s_pos = ed.filePos(ed.getLine(True),ed.left)
assert(ed.getPos() >= s_pos and ed.getPos() < s_pos+ed.max_x)
validate_screen(ed)
while ed.getPos() > 0:
lfunc()
if ed.getLine() != editor_test_suite.target_line:
break
if lfunc == ed.prev_word:
assert(ed.getPos() in word_pos)
assert(ed.getPos() < prev_pos)
prev_pos = ed.getPos()
s_line,s_pos = ed.filePos(ed.getLine(True),ed.left)
assert(ed.getPos() >= s_pos and ed.getPos() < s_pos+ed.max_x)
validate_screen(ed)
ed.search("This is line 1010",True,False)
assert(ed.getLine() == 1009 and ed.getPos() == 16 and ed.isMark() and ed.mark_line_start == 1009 and ed.mark_pos_start == 0 and ed.getContent(ed.mark_line_start)[ed.mark_pos_start:ed.getPos()+1] == "This is line 1010")
validate_screen(ed)
ed.search("This is line 990",False,False)
assert(ed.getLine() == 989 and ed.getPos() == 338 and ed.isMark() and ed.mark_line_start == 989 and ed.mark_pos_start == 339-len("This is line 990") and ed.getContent(ed.mark_line_start)[ed.mark_pos_start:ed.getPos()+1] == "This is line 990")
validate_screen(ed)
success_count = 0
search_succeeded = ed.search("This is line 100[0,1,2]",down = True, next = False)
while search_succeeded:
success_count += 1
found_str = ""
if ed.isMark():
found_str = ed.getContent(ed.mark_line_start)[ed.mark_pos_start:ed.getPos()+1]
assert(re.match("This is line 100[0,1,2]",found_str))
validate_screen(ed)
search_succeeded = ed.searchagain()
assert(success_count == 60)
ed.goto(307,0)
play_macro(ed, [ 'fk06','down','l','i','n','e',' ','3','0','8','\t','down','l','i','n','e',' ','6','6','6','\t','\n','\t','\t','\n','\n' ] )
ed.goto(307,0)
assert(ed.getContent(ed.getLine()) == lines_to_test[ed.getLine()].replace('line 308','line 666'))
validate_screen(ed)
ed.goto(editor_test_suite.target_line,0)
ed.mark_span()
ed.goto(editor_test_suite.target_line,15)
validate_mark(ed, lines_to_test, editor_test_suite.target_line, editor_test_suite.target_line, 0, 15 )
ed.goto(editor_test_suite.target_line,15)
ed.mark_span()
ed.goto(editor_test_suite.target_line+5,25)
validate_mark(ed, lines_to_test, editor_test_suite.target_line, editor_test_suite.target_line+5, 15, 25 )
ed.goto(editor_test_suite.target_line,15)
ed.mark_span()
ed.goto(editor_test_suite.target_line+5,ed.max_x)
ed.cright()
validate_mark(ed, lines_to_test, editor_test_suite.target_line,editor_test_suite.target_line+5,15,ed.getPos())
ed.goto(editor_test_suite.target_line,15)
ed.mark_span()
ed.goto(editor_test_suite.target_line+5,ed.max_x)
ed.cright()
match_tuple = validate_mark(ed, lines_to_test, editor_test_suite.target_line,editor_test_suite.target_line+5,15,ed.getPos(),False)
ed.copy_marked()
ed.goto(editor_test_suite.target_line+25,0)
ed.paste()
for line in range(0,5):
assert(ed.getContent(editor_test_suite.target_line+25+line) == match_tuple[1][line].rstrip())
assert(ed.getContent(editor_test_suite.target_line+25+5).startswith(match_tuple[1][5]))
undo_all(ed)
for line in range(0,6):
assert(ed.getContent(editor_test_suite.target_line+25+line).startswith(lines_to_test[editor_test_suite.target_line+25+line]))
ed.goto(editor_test_suite.target_line,15)
ed.mark_span()
ed.goto(editor_test_suite.target_line+5,ed.max_x)
ed.cright()
f_line = ed.getLine()
f_pos = ed.getPos()
ed.copy_marked(True,False)
assert(ed.getLine()==editor_test_suite.target_line and ed.getPos()==15)
target_contents = ed.getContent(editor_test_suite.target_line)
match_contents = lines_to_test[editor_test_suite.target_line][0:15]+lines_to_test[f_line][f_pos+1:]
assert(target_contents.startswith(match_contents))
ed.goto(editor_test_suite.target_line+25,0)
ed.paste()
for line in range(0,5):
assert(ed.getContent(editor_test_suite.target_line+25+line) == match_tuple[1][line].rstrip())
assert(ed.getContent(editor_test_suite.target_line+25+5).startswith(match_tuple[1][5]))
undo_all(ed)
for line in range(0,6):
assert(ed.getContent(editor_test_suite.target_line+25+line).startswith(lines_to_test[editor_test_suite.target_line+25+line]))
ed.goto(editor_test_suite.target_line,15)
ed.mark_lines()
ed.goto(editor_test_suite.target_line+5,25)
validate_mark(ed,lines_to_test,editor_test_suite.target_line,editor_test_suite.target_line+5,15,25,True, clipboard.LINE_CLIP)
if not wrapped:
ed.goto(editor_test_suite.target_line,0)
ed.goto(editor_test_suite.target_line,15)
ed.mark_rect()
ed.goto(editor_test_suite.target_line+5,25)
validate_mark(ed,lines_to_test,editor_test_suite.target_line,editor_test_suite.target_line+5,15,25,True, clipboard.RECT_CLIP)
ed.goto(editor_test_suite.target_line,15)
ed.cr()
first_line = ed.getContent(editor_test_suite.target_line)
second_line = ed.getContent(editor_test_suite.target_line+1)
assert(len(first_line)==15 and first_line == lines_to_test[editor_test_suite.target_line][0:15])
assert(second_line == lines_to_test[editor_test_suite.target_line][15:].rstrip())
validate_screen(ed)
undo_all(ed)
validate_screen(ed)
while ed.isMark():
ed.mark_lines()
ed.goto(editor_test_suite.target_line,0)
ed.mark_lines()
ed.goto(editor_test_suite.target_line+5,0)
ed.tab()
for line in range(0,6):
assert(ed.getContent(editor_test_suite.target_line+line).startswith(' '*ed.workfile.get_tab_stop(0)))
validate_screen(ed)
ed.btab()
for line in range(0,6):
assert(ed.getContent(editor_test_suite.target_line+line).startswith(lines_to_test[editor_test_suite.target_line+line].rstrip()))
validate_screen(ed)
undo_all(ed)
validate_screen(ed)
while ed.isMark():
ed.mark_lines()
play_macro(ed, [ keytab.KEYTAB_ALTO ,'\t',keytab.KEYTAB_DOWN,'s','a','v','e','a','s','.','t','x','t','\n','\n',keytab.KEYTAB_REFRESH ] )
new_fn = ed.getWorkfile().getFilename()
assert(new_fn.endswith('saveas.txt'))
lidx = 0
for line in open(new_fn,'r'):
assert(line.startswith(lines_to_test[lidx].rstrip()))
lidx += 1
ed.invalidate_all()
ed.goto(editor_test_suite.target_line,15)
ed.insert('A test change')
ed.save()
lidx = 0
for line in open(new_fn,'r'):
if lidx == editor_test_suite.target_line:
assert(line[15:].startswith('A test change'))
else:
assert(line.startswith(lines_to_test[lidx].rstrip()))
lidx += 1
cur_line = ed.getLine()
cur_pos = ed.getPos()
play_macro(ed, [ keytab.KEYTAB_CR ] )
assert(ed.getLine() == cur_line+1 and ed.getPos() == 0)
start_line = ed.getLine()
play_macro(ed, [ keytab.KEYTAB_ALTM, keytab.KEYTAB_DOWN,keytab.KEYTAB_DOWN, keytab.KEYTAB_DOWN, keytab.KEYTAB_DOWN, keytab.KEYTAB_RIGHT, keytab.KEYTAB_RIGHT, keytab.KEYTAB_RIGHT, keytab.KEYTAB_CTRLC ] )
end_line = ed.getLine()
assert(clipboard.clip_type == clipboard.SPAN_CLIP and len(clipboard.clip) == (end_line-start_line)+1)
play_macro(ed, [ keytab.KEYTAB_ALTG, 'down', '4','0','0','\n'] )
assert(ed.getLine() == 400 )
def validate_rect( win,y,x,height,width,title,attr = curses.A_NORMAL ):
""" validate that a rect is rendered correctly """
assert(read_str(win,y,x+(width//2)-(len(title)//2),len(title)) == title)
assert(match_attr_str(win,y,x+(width//2)-(len(title)//2),len(title),attr))
assert(match_chr(win,y,x,1,curses.ACS_ULCORNER))
assert(match_attr_str(win,y,x,1,attr))
assert(match_chr(win,y,x+1,(width//2)-(len(title)//2)-1,curses.ACS_HLINE))
assert(match_chr(win,y,x+(width//2-len(title)//2)+len(title),width-((width//2-len(title)//2)+len(title))-1,curses.ACS_HLINE))
assert(match_attr_str(win,y,x+1,width-2,attr))
assert(match_chr(win,y+height-1,x+1,width-2,curses.ACS_HLINE))
assert(match_attr_str(win,y+height-1,x+1,width-2,attr))
assert(match_chr(win,y,x+width-1,1,curses.ACS_URCORNER))
assert(match_attr_str(win,y,x+width-1,1,attr))
for oy in range(0,height-2):
assert(match_chr(win,y+oy+1,x,1,curses.ACS_VLINE))
assert(match_attr_str(win,y+oy+1,x,1,attr))
assert(match_chr(win,y+oy+1,x+width-1,1,curses.ACS_VLINE))
assert(match_attr_str(win,y+oy+1,x+width-1,1,attr))
assert(match_chr(win,y+height-1,x,1,curses.ACS_LLCORNER))
assert(match_attr_str(win,y+height-1,x,1,attr))
assert(match_chr(win,y+height-1,x+width-1,1,curses.ACS_LRCORNER))
assert(match_attr_str(win,y+height-1,x+width-1,1,attr))
def validate_dialog( d ):
""" validate that a dialog is rendering it's state correctly """
for c in d.children:
if d.focus_list[d.current][1] == c:
c.focus()
if isinstance(c,Frame):
win = c.getparent()
if c.x >= 0:
x = c.x
y = c.y
height = c.h
width = c.w
else:
x = 0
y = 0
height,width = win.getmaxyx()
validate_rect( win, y,x,height,width,c.title)
elif isinstance(c,ListBox):
win = c.getparent()
x = c.x
y = c.y
height = c.height
width = c.width
validate_rect( win, y,x,height,width,c.label,(curses.A_BOLD if c.isfocus else curses.A_NORMAL))
x+=1
y+=1
width -= 2
height -= 2
top = c.top
off = 0
cy = -1
while top < len(c.list) and off < height:
if top == c.selection:
rattr = curses.A_REVERSE
cy = y+off
else:
if c.isfocus:
rattr = curses.A_BOLD
else:
rattr = curses.A_NORMAL
assert(read_str(win,y+off,x,width) == pad(c.list[top],width)[0:width])
assert(match_attr_str(win,y+off,x,width,rattr))
top += 1
off += 1
elif isinstance(c,Toggle):
win = c.getparent()
if c.isfocus:
lattr = curses.A_REVERSE
else:
lattr = curses.A_NORMAL
x = c.x
y = c.y
width = c.width
assert(read_str(win,y,x,width)==pad(c.list[c.selection],width)[0:width])
assert(match_attr_str(win,y,x,width,lattr))
elif isinstance(c,Button):
win = c.getparent()
if c.isfocus:
battr = curses.A_REVERSE
else:
battr = curses.A_NORMAL
label = "["+c.label+"]"
width = len(label)
assert(read_str(win,c.y,c.x,width) == label)
assert(match_attr_str(win,c.y,c.x,width,battr))
elif isinstance(c,StaticText):
win = c.getparent()
max_y,max_x = win.getmaxyx()
width = (max_x - c.x) - 1
x = c.x
assert(read_str(win,c.y,x,len(c.prompt[-width:])) == c.prompt[-width:])
assert(match_attr_str(win,c.y,x,len(c.prompt[-width:]),curses.A_NORMAL))
x += len(c.prompt[-width:])
width -= len(c.prompt[-width:])
if width > 0:
assert(read_str(win,c.y,x,c.width) == pad(c.value,c.width)[-width:])
assert(match_attr_str(win,c.y,x,c.width,curses.A_NORMAL))
elif isinstance(c,PasswordPrompt):
win = c.getparent()
if c.isfocus:
pattr = curses.A_BOLD
fattr = curses.A_REVERSE
else:
pattr = curses.A_NORMAL
fattr = curses.A_NORMAL
if c.width < 0:
(max_y,max_x) = win.getmaxyx()
c.width = max_x - (c.x+len(c.prompt)+2)
assert(read_str(win,c.y,c.x,len(c.prompt)) == c.prompt)
assert(match_attr_str(win,c.y,c.x,len(c.prompt),pattr))
assert(read_str(win,c.y,c.x+len(c.prompt),c.width) == pad(len(c.value)*"*",c.width))
assert(match_attr_str(win,c.y,c.x+len(c.prompt),c.width,fattr))
elif isinstance(c,Prompt):
win = c.getparent()
if c.isfocus:
pattr = curses.A_BOLD
fattr = curses.A_REVERSE
else:
pattr = curses.A_NORMAL
fattr = curses.A_NORMAL
if c.width < 0:
(max_y,max_x) = win.getmaxyx()
c.width = max_x - (c.x+len(c.prompt)+2)
assert(read_str(win,c.y,c.x,len(c.prompt)) == c.prompt)
assert(match_attr_str(win,c.y,c.x,len(c.prompt),pattr))
assert(read_str(win,c.y,c.x+len(c.prompt),c.width) == pad(c.value,c.width)[0:c.width])
assert(match_attr_str(win,c.y,c.x+len(c.prompt),c.width,fattr))
elif isinstance(c,FileBrowseComponent) or isinstance(c,StreamSelectComponent):
win = c.getparent()
if c.isfocus:
attr = curses.A_BOLD
else:
attr = curses.A_NORMAL
validate_rect(win,c.y,c.x,c.height,c.width,c.label,attr)
c.editor.main(False)
c.editor.main(False)
validate_screen(c.editor)
if d.focus_list[d.current][1] == c:
c.render()
c.getparent().refresh()
| jpfxgood/ped | tests/ped_test_util.py | Python | mit | 25,393 |
#!/usr/bin/env python
#
# Test: Schedule rules around either side of the current time with no
# particular order. This doesn't mean randomly though.
#
# Usage: python TimeSchedule_NoOrder.py
#
# Test success: Scheduled rules appear in the correct order.
# Test failure: Scheduled rules are not in the correct order.
#
# Note:
# - Test output can be found in TimeSchedule_NoOrder.log
#
# - The script assumes that the syntax for the REST commands are
# legal.
#
# Author: Jarrod N. Bakker
#
import acl_scheduling_test as ast
import os
if __name__ == "__main__":
test_name = os.path.basename(__file__)
filename_log_results = test_name[:-3] + "_results.log"
# Begin the test
times = ["+20", "+30", "+40", "+50", "+35", "-40", "+80", "-100",
"-10"]
ast.test_schedule(test_name, filename_log_results, times)
| bakkerjarr/ACLSwitch | Tests/Time_Enforcement/Scheduling/TimeSchedule_NoOrder.py | Python | apache-2.0 | 855 |
'''
Run on GPU: THEANO_FLAGS=mode=FAST_RUN,device=gpu,floatX=float32 python train_dictnet.py
'''
from __future__ import print_function
import numpy as np
from keras.layers.core import Dense
from keras.utils import np_utils
from keras.optimizers import SGD
import h5py
from build_model import build_model_train, build_model
from mjsynth_dictnet import MJSYNTH_DICTNET
if __name__ == '__main__':
batch_size = 1024
nb_epoch = 1
total_classes = 0
previous_samples = []
# Get Data and mapping for this round of training
nb_classes = 5000
nb_examples_per_class = 10
train_data = MJSYNTH_DICTNET("train", nb_classes, nb_examples_per_class, previous_samples)
total_classes += nb_classes
labels = np_utils.to_categorical(train_data.labels, nb_classes)
previous_samples = train_data.class_mapping
nb_samples = (nb_classes * nb_examples_per_class)
nb_train = np.int( 0.8 * nb_samples )
xtrain = train_data.x[:nb_train,:,:,:]
ytrain = labels[:nb_train,:]
xtest = train_data.x[nb_train:,:,:,:]
ytest = labels[nb_train:,:]
# Build model (except for last softmax layer)
model = build_model()
# Classification layer
model.add(Dense(nb_classes,activation='softmax',init='glorot_uniform'))
sgd = SGD(lr=0.1, decay=1e-6, momentum=0.9, nesterov=True)
model.compile(loss='categorical_crossentropy', optimizer=sgd)
# Train the model
model.fit(xtrain, ytrain, batch_size=batch_size, nb_epoch=nb_epoch,
show_accuracy=True, verbose=1, validation_data=(xtest, ytest))
# Save the model
json_string = model.to_json()
open('initial_charnet_architecture.json', 'w').write(json_string)
model.save_weights('initial_charnet_weights.h5')
np.save('initial_classes',previous_samples)
for i in range(3+6+1): # Run this loop 3 times (to get to 20000 classes) then 6 times to get to 80000
print ('Iteration: ',i,' Total Classes = ',total_classes)
# Get data and mapping for this round of training
if i < 3:
nb_classes = 5000
elif i < 9:
nb_classes = 10000
batch_size = 2048 # Needs to be ~1/5 of total classes per Jaderberg paper
else:
nb_classes = 8172
train_data = MJSYNTH_DICTNET("train", nb_classes, nb_examples_per_class, previous_samples)
labels = np_utils.to_categorical(train_data.labels, total_classes+nb_classes)
these_samples = train_data.class_mapping
nb_samples = (nb_classes * nb_examples_per_class)
nb_train = np.int( 0.8 * nb_samples )
xtrain = train_data.x[:nb_train,:,:,:]
ytrain = labels[:nb_train,:]
xtest = train_data.x[nb_train:,:,:,:]
ytest = labels[nb_train:,:]
# Save the mapping for this iteration of classes
previous_samples = these_samples + previous_samples # Need to prepend new samples
# Build a new model with nb_classes more classes and initialize it with the previous weights
model2 = build_model_train(previous_model=model)
# Classification layer
model2.add(Dense(total_classes+nb_classes,activation='softmax',init='glorot_uniform'))
# Overwrite the respective weights for previously trained softmax
weights = model2.layers[-1].get_weights() # Has shape (4096 x total_classes+nb_classes)
old_weights = model.layers[-1].get_weights() # Has shape (4096 x total_classes)
weights[0][:,-total_classes:] = old_weights[0] # Overwrite such that the first nb_classes cols are random
weights[1][-total_classes:] = old_weights[1]
model2.layers[-1].set_weights(weights)
total_classes += nb_classes
sgd = SGD(lr=0.1, decay=1e-6, momentum=0.9, nesterov=True)
model2.compile(loss='categorical_crossentropy', optimizer=sgd)
model2.fit(xtrain, ytrain, batch_size=batch_size, nb_epoch=nb_epoch,
show_accuracy=True, verbose=1, validation_data=(xtest, ytest))
# Save the model and weights
json_string = model2.to_json()
open('charnet_architecture_'+str(total_classes)+'.json', 'w').write(json_string)
model2.save_weights('charnet_weights_'+str(total_classes)+'.h5')
np.save('classes_'+str(total_classes),these_samples)
# Iterate models
model = model2
# Save the mapping built from this training script to lexicon.txt
np.save('lexicon_mapping',previous_samples)
| mathDR/reading-text-in-the-wild | DICT2/TRAIN/train_dictnet.py | Python | gpl-3.0 | 4,487 |
# -*- coding: utf-8 -*-
# /***************************************************************************
# Irmt
# A QGIS plugin
# OpenQuake Integrated Risk Modelling Toolkit
# -------------------
# begin : 2013-10-24
# copyright : (C) 2013 by GEM Foundation
# email : [email protected]
# ***************************************************************************/
#
# Copyright (c) 2013-2014, GEM Foundation.
#
# OpenQuake is free software: you can redistribute it and/or modify it
# under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# OpenQuake is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with OpenQuake. If not, see <http://www.gnu.org/licenses/>.
import unittest
from qgis.PyQt.QtGui import QIcon
from qgis.testing import start_app
from qgis.testing.mocked import get_iface
QGIS_APP = start_app()
IFACE = get_iface()
# import irmt
# IRMT = irmt.classFactory(IFACE)
# IRMT.ui = irmt.plugin.dlg.ui # useful for shorthand later
class IrmtTest(unittest.TestCase):
"""Test OpenQuake IRMT works."""
def setUp(self):
"""Runs before each test."""
pass
def tearDown(self):
"""Runs after each test."""
pass
def test_icon_png(self):
"""Test we can click OK."""
path = ':/plugins/IRMT/icon.png'
icon = QIcon(path)
self.assertFalse(icon.isNull())
def test_toggle_active_actions(self):
# print(IRMT.registered_actions())
# self.assertFalse()
pass
if __name__ == "__main__":
suite = unittest.makeSuite(IrmtTest)
runner = unittest.TextTestRunner(verbosity=2)
runner.run(suite)
| gem/oq-svir-qgis | svir/test/unit/test_irmt.py | Python | agpl-3.0 | 2,123 |
from __future__ import absolute_import
import logging
from typing import Any, Dict, List, Optional, Text
from argparse import ArgumentParser
from zerver.models import UserProfile
from django.utils.http import urlsafe_base64_encode
from django.utils.encoding import force_bytes
from django.contrib.auth.tokens import default_token_generator, PasswordResetTokenGenerator
from zerver.lib.send_email import send_email, FromAddress
from zerver.lib.management import ZulipBaseCommand
class Command(ZulipBaseCommand):
help = """Send email to specified email address."""
def add_arguments(self, parser):
# type: (ArgumentParser) -> None
parser.add_argument('--to', metavar='<to>', type=str,
help="email of users to send the email")
parser.add_argument('--target', metavar='<target>', type=str,
help="If you pass 'server' will send to everyone on server. "
"If you pass 'realm' will send to everyone on realm."
"Don't forget to specify the realm using -r or --realm flag.")
self.add_realm_args(parser)
def handle(self, *args, **options):
# type: (*Any, **str) -> None
realm = self.get_realm(options)
if options["to"] and options["target"]:
self.print_help("./manage.py", "send_password_reset_email")
exit(1)
if options["to"]:
users = [self.get_user(options["to"], realm)]
elif options["target"] == "realm":
users = UserProfile.objects.filter(realm=realm, is_active=True, is_bot=False,
is_mirror_dummy=False)
elif options["target"] == "server":
users = UserProfile.objects.filter(is_active=True, is_bot=False,
is_mirror_dummy=False)
self.send(users)
def send(self, users, subject_template_name='', email_template_name='',
use_https=True, token_generator=default_token_generator,
from_email=None, html_email_template_name=None):
# type: (List[UserProfile], str, str, bool, PasswordResetTokenGenerator, Optional[Text], Optional[str]) -> None
"""Sends one-use only links for resetting password to target users
"""
for user_profile in users:
context = {
'email': user_profile.email,
'domain': user_profile.realm.host,
'site_name': "zulipo",
'uid': urlsafe_base64_encode(force_bytes(user_profile.pk)),
'user': user_profile,
'token': token_generator.make_token(user_profile),
'protocol': 'https' if use_https else 'http',
}
logging.warning("Sending %s email to %s" % (email_template_name, user_profile.email,))
send_email('zerver/emails/password_reset', user_profile.email, from_name="Zulip Account Security",
from_address=FromAddress.NOREPLY, context=context)
| jrowan/zulip | zerver/management/commands/send_password_reset_email.py | Python | apache-2.0 | 3,076 |
# -*- coding:utf-8 -*-
from __future__ import unicode_literals
from django.conf.urls import patterns, url
from django.views.generic import RedirectView
from .models import Article, DateArticle
date_based_info_dict = {
'queryset': Article.objects.all(),
'date_field': 'date_created',
'month_format': '%m',
}
object_list_dict = {
'queryset': Article.objects.all(),
'paginate_by': 2,
}
object_list_no_paginate_by = {
'queryset': Article.objects.all(),
}
numeric_days_info_dict = dict(date_based_info_dict, day_format='%d')
date_based_datefield_info_dict = dict(date_based_info_dict, queryset=DateArticle.objects.all())
urlpatterns = patterns('',
(r'^accounts/login/$', 'django.contrib.auth.views.login', {'template_name': 'login.html'}),
(r'^accounts/logout/$', 'django.contrib.auth.views.logout'),
# Special URLs for particular regression cases.
url('^中文/$', 'view_tests.views.redirect'),
url('^中文/target/$', 'view_tests.views.index_page'),
)
# rediriects, both temporary and permanent, with non-ASCII targets
urlpatterns += patterns('',
('^nonascii_redirect/$', RedirectView.as_view(
url='/中文/target/', permanent=False)),
('^permanent_nonascii_redirect/$', RedirectView.as_view(
url='/中文/target/', permanent=True)),
)
urlpatterns += patterns('view_tests.views',
(r'^shortcuts/render_to_response/$', 'render_to_response_view'),
(r'^shortcuts/render_to_response/request_context/$', 'render_to_response_view_with_request_context'),
(r'^shortcuts/render_to_response/content_type/$', 'render_to_response_view_with_content_type'),
(r'^shortcuts/render_to_response/dirs/$', 'render_to_response_view_with_dirs'),
(r'^shortcuts/render/$', 'render_view'),
(r'^shortcuts/render/base_context/$', 'render_view_with_base_context'),
(r'^shortcuts/render/content_type/$', 'render_view_with_content_type'),
(r'^shortcuts/render/status/$', 'render_view_with_status'),
(r'^shortcuts/render/current_app/$', 'render_view_with_current_app'),
(r'^shortcuts/render/dirs/$', 'render_with_dirs'),
(r'^shortcuts/render/current_app_conflict/$', 'render_view_with_current_app_conflict'),
)
| rogerhu/django | tests/view_tests/generic_urls.py | Python | bsd-3-clause | 2,200 |
"""Support for Hangouts."""
import logging
import voluptuous as vol
from homeassistant import config_entries
from homeassistant.const import EVENT_HOMEASSISTANT_STOP
from homeassistant.helpers import dispatcher, intent
import homeassistant.helpers.config_validation as cv
# We need an import from .config_flow, without it .config_flow is never loaded.
from .intents import HelpIntent
from .config_flow import HangoutsFlowHandler # noqa: F401
from .const import (
CONF_BOT,
CONF_DEFAULT_CONVERSATIONS,
CONF_ERROR_SUPPRESSED_CONVERSATIONS,
CONF_INTENTS,
CONF_MATCHERS,
CONF_REFRESH_TOKEN,
CONF_SENTENCES,
DOMAIN,
EVENT_HANGOUTS_CONNECTED,
EVENT_HANGOUTS_CONVERSATIONS_CHANGED,
EVENT_HANGOUTS_CONVERSATIONS_RESOLVED,
INTENT_HELP,
INTENT_SCHEMA,
MESSAGE_SCHEMA,
SERVICE_RECONNECT,
SERVICE_SEND_MESSAGE,
SERVICE_UPDATE,
TARGETS_SCHEMA,
)
_LOGGER = logging.getLogger(__name__)
CONFIG_SCHEMA = vol.Schema(
{
DOMAIN: vol.Schema(
{
vol.Optional(CONF_INTENTS, default={}): vol.Schema(
{cv.string: INTENT_SCHEMA}
),
vol.Optional(CONF_DEFAULT_CONVERSATIONS, default=[]): [TARGETS_SCHEMA],
vol.Optional(CONF_ERROR_SUPPRESSED_CONVERSATIONS, default=[]): [
TARGETS_SCHEMA
],
}
)
},
extra=vol.ALLOW_EXTRA,
)
async def async_setup(hass, config):
"""Set up the Hangouts bot component."""
from homeassistant.components.conversation import create_matcher
config = config.get(DOMAIN)
if config is None:
hass.data[DOMAIN] = {
CONF_INTENTS: {},
CONF_DEFAULT_CONVERSATIONS: [],
CONF_ERROR_SUPPRESSED_CONVERSATIONS: [],
}
return True
hass.data[DOMAIN] = {
CONF_INTENTS: config[CONF_INTENTS],
CONF_DEFAULT_CONVERSATIONS: config[CONF_DEFAULT_CONVERSATIONS],
CONF_ERROR_SUPPRESSED_CONVERSATIONS: config[
CONF_ERROR_SUPPRESSED_CONVERSATIONS
],
}
if (
hass.data[DOMAIN][CONF_INTENTS]
and INTENT_HELP not in hass.data[DOMAIN][CONF_INTENTS]
):
hass.data[DOMAIN][CONF_INTENTS][INTENT_HELP] = {CONF_SENTENCES: ["HELP"]}
for data in hass.data[DOMAIN][CONF_INTENTS].values():
matchers = []
for sentence in data[CONF_SENTENCES]:
matchers.append(create_matcher(sentence))
data[CONF_MATCHERS] = matchers
hass.async_create_task(
hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_IMPORT}
)
)
return True
async def async_setup_entry(hass, config):
"""Set up a config entry."""
from hangups.auth import GoogleAuthError
try:
from .hangouts_bot import HangoutsBot
bot = HangoutsBot(
hass,
config.data.get(CONF_REFRESH_TOKEN),
hass.data[DOMAIN][CONF_INTENTS],
hass.data[DOMAIN][CONF_DEFAULT_CONVERSATIONS],
hass.data[DOMAIN][CONF_ERROR_SUPPRESSED_CONVERSATIONS],
)
hass.data[DOMAIN][CONF_BOT] = bot
except GoogleAuthError as exception:
_LOGGER.error("Hangouts failed to log in: %s", str(exception))
return False
dispatcher.async_dispatcher_connect(
hass, EVENT_HANGOUTS_CONNECTED, bot.async_handle_update_users_and_conversations
)
dispatcher.async_dispatcher_connect(
hass, EVENT_HANGOUTS_CONVERSATIONS_CHANGED, bot.async_resolve_conversations
)
dispatcher.async_dispatcher_connect(
hass,
EVENT_HANGOUTS_CONVERSATIONS_RESOLVED,
bot.async_update_conversation_commands,
)
hass.bus.async_listen_once(EVENT_HOMEASSISTANT_STOP, bot.async_handle_hass_stop)
await bot.async_connect()
hass.services.async_register(
DOMAIN,
SERVICE_SEND_MESSAGE,
bot.async_handle_send_message,
schema=MESSAGE_SCHEMA,
)
hass.services.async_register(
DOMAIN,
SERVICE_UPDATE,
bot.async_handle_update_users_and_conversations,
schema=vol.Schema({}),
)
hass.services.async_register(
DOMAIN, SERVICE_RECONNECT, bot.async_handle_reconnect, schema=vol.Schema({})
)
intent.async_register(hass, HelpIntent(hass))
return True
async def async_unload_entry(hass, _):
"""Unload a config entry."""
bot = hass.data[DOMAIN].pop(CONF_BOT)
await bot.async_disconnect()
return True
| fbradyirl/home-assistant | homeassistant/components/hangouts/__init__.py | Python | apache-2.0 | 4,555 |
#!/usr/bin/env python3
# written by sqall
# twitter: https://twitter.com/sqall01
# blog: https://h4des.org
# github: https://github.com/sqall01
#
# Licensed under the GNU Affero General Public License, version 3.
import sys
import os
import stat
from lib import ServerCommunication, ConnectionWatchdog, Receiver
from lib import SMTPAlert
from lib import KodiAlert, AlertEventHandler
from lib import GlobalData
import logging
import time
import random
import xml.etree.ElementTree
# Function creates a path location for the given user input.
def make_path(input_location: str) -> str:
# Do nothing if the given location is an absolute path.
if input_location[0] == "/":
return input_location
# Replace ~ with the home directory.
elif input_location[0] == "~":
pos = -1
for i in range(1, len(input_location)):
if input_location[i] == "/":
continue
pos = i
break
if pos == -1:
return os.environ["HOME"]
return os.path.join(os.environ["HOME"], input_location[pos:])
# Assume we have a given relative path.
return os.path.join(os.path.dirname(os.path.abspath(__file__)), input_location)
if __name__ == '__main__':
# generate object of the global needed data
globalData = GlobalData()
log_tag = os.path.basename(__file__)
# parse config file, get logfile configurations
# and initialize logging
try:
configRoot = xml.etree.ElementTree.parse(globalData.configFile).getroot()
logfile = make_path(str(configRoot.find("general").find("log").attrib["file"]))
# parse chosen log level
tempLoglevel = str(configRoot.find("general").find("log").attrib["level"])
tempLoglevel = tempLoglevel.upper()
if tempLoglevel == "DEBUG":
loglevel = logging.DEBUG
elif tempLoglevel == "INFO":
loglevel = logging.INFO
elif tempLoglevel == "WARNING":
loglevel = logging.WARNING
elif tempLoglevel == "ERROR":
loglevel = logging.ERROR
elif tempLoglevel == "CRITICAL":
loglevel = logging.CRITICAL
else:
raise ValueError("No valid log level in config file.")
# initialize logging
logging.basicConfig(format='%(asctime)s %(levelname)s: %(message)s',
datefmt='%m/%d/%Y %H:%M:%S',
filename=logfile,
level=loglevel)
except Exception as e:
print("Config could not be parsed.")
print(e)
sys.exit(1)
# parse the rest of the config with initialized logging
try:
# Check file permission of config file (do not allow it to be accessible by others).
config_stat = os.stat(globalData.configFile)
if (config_stat.st_mode & stat.S_IROTH
or config_stat.st_mode & stat.S_IWOTH
or config_stat.st_mode & stat.S_IXOTH):
raise ValueError("Config file is accessible by others. Please remove file permissions for others.")
# check if config and client version are compatible
version = float(configRoot.attrib["version"])
if version != globalData.version:
raise ValueError("Config version '%.3f' not "
% version
+ "compatible with client version '%.3f'."
% globalData.version)
# parse server configurations
server = str(configRoot.find("general").find("server").attrib["host"])
serverPort = int(configRoot.find("general").find("server").attrib["port"])
# Get TLS/SSL configurations.
ssl_enabled = (str(configRoot.find("general").find("ssl").attrib["enabled"]).upper() == "TRUE")
server_ca_file = None
client_cert_file = None
client_key_file = None
if ssl_enabled:
server_ca_file = os.path.abspath(make_path(str(configRoot.find("general").find("ssl").find("server").attrib[
"caFile"])))
if os.path.exists(server_ca_file) is False:
raise ValueError("Server CA does not exist.")
certificate_required = (str(configRoot.find("general").find("ssl").find("client").attrib[
"certificateRequired"]).upper() == "TRUE")
if certificate_required is True:
client_cert_file = os.path.abspath(
make_path(str(configRoot.find("general").find("ssl").find("client").attrib["certFile"])))
client_key_file = os.path.abspath(
make_path(str(configRoot.find("general").find("ssl").find("client").attrib["keyFile"])))
if (os.path.exists(client_cert_file) is False
or os.path.exists(client_key_file) is False):
raise ValueError("Client certificate or key does not exist.")
key_stat = os.stat(client_key_file)
if (key_stat.st_mode & stat.S_IROTH
or key_stat.st_mode & stat.S_IWOTH
or key_stat.st_mode & stat.S_IXOTH):
raise ValueError("Client key is accessible by others. Please remove file permissions for others.")
else:
logging.warning("[%s] TLS/SSL is disabled. Do NOT use this setting in a production environment."
% log_tag)
# get user credentials
username = str(configRoot.find("general").find("credentials").attrib["username"])
password = str(configRoot.find("general").find("credentials").attrib["password"])
# Get connection settings.
temp = (str(configRoot.find("general").find("connection").attrib["persistent"]).upper() == "TRUE")
if temp:
globalData.persistent = 1
else:
globalData.persistent = 0
# parse smtp options if activated
smtpActivated = (str(configRoot.find("smtp").find("general").attrib["activated"]).upper() == "TRUE")
smtpServer = ""
smtpPort = -1
smtpFromAddr = ""
smtpToAddr = ""
if smtpActivated is True:
smtpServer = str(configRoot.find("smtp").find("server").attrib["host"])
smtpPort = int(configRoot.find("smtp").find("server").attrib["port"])
smtpFromAddr = str(configRoot.find("smtp").find("general").attrib["fromAddr"])
smtpToAddr = str(configRoot.find("smtp").find("general").attrib["toAddr"])
# parse all alerts
for item in configRoot.find("alerts").iterfind("alert"):
alert = KodiAlert()
# get kodi settings
alert.host = str(item.find("kodi").attrib["host"])
alert.port = int(item.find("kodi").attrib["port"])
alert.displayTime = int(item.find("kodi").attrib["displayTime"])
alert.showMessage = (str(item.find("kodi").attrib["showMessage"]).upper() == "TRUE")
alert.pausePlayer = (str(item.find("kodi").attrib["pausePlayer"]).upper() == "TRUE")
alert.triggerDelay = int(item.find("kodi").attrib["triggerDelay"])
alert.displayReceivedMessage = (str(item.find("kodi").attrib["displayReceivedMessage"]).upper() == "TRUE")
alert.icon = make_path("./config/kodi_logo.png")
# these options are needed by the server to
# differentiate between the registered alerts
alert.id = int(item.find("general").attrib["id"])
alert.description = str(item.find("general").attrib["description"])
alert.alertLevels = list()
for alertLevelXml in item.iterfind("alertLevel"):
alert.alertLevels.append(int(alertLevelXml.text))
# check if description is empty
if len(alert.description) == 0:
raise ValueError("Description of alert %d is empty."
% alert.id)
# check if the id of the alert is unique
for registeredAlert in globalData.alerts:
if registeredAlert.id == alert.id:
raise ValueError("Id of alert %d is already taken."
% alert.id)
globalData.alerts.append(alert)
except Exception as e:
logging.exception("[%s] Could not parse config." % log_tag)
sys.exit(1)
random.seed()
# check if smtp is activated => generate object to send eMail alerts
if smtpActivated is True:
globalData.smtpAlert = SMTPAlert(smtpServer, smtpPort, smtpFromAddr, smtpToAddr)
else:
globalData.smtpAlert = None
# Generate object for the communication to the server and connect to it.
globalData.serverComm = ServerCommunication(server,
serverPort,
server_ca_file,
username,
password,
client_cert_file,
client_key_file,
AlertEventHandler(globalData),
globalData)
connectionRetries = 1
logging.info("[%s] Connecting to server." % log_tag)
while True:
# check if 5 unsuccessful attempts are made to connect
# to the server and if smtp alert is activated
# => send eMail alert
if (globalData.smtpAlert is not None
and (connectionRetries % 5) == 0):
globalData.smtpAlert.sendCommunicationAlert(connectionRetries)
if globalData.serverComm.initialize() is True:
# if smtp alert is activated
# => send email that communication problems are solved
if globalData.smtpAlert is not None:
globalData.smtpAlert.sendCommunicationAlertClear()
connectionRetries = 1
break
connectionRetries += 1
logging.critical("[%s] Connecting to server failed. Try again in 5 seconds." % log_tag)
time.sleep(5)
# when connected => generate watchdog object to monitor the
# server connection
logging.info("[%s] Starting watchdog thread." % log_tag)
watchdog = ConnectionWatchdog(globalData.serverComm,
globalData.pingInterval,
globalData.smtpAlert)
# set thread to daemon
# => threads terminates when main thread terminates
watchdog.daemon = True
watchdog.start()
# initialize all alerts
logging.info("[%s] Initializing alerts." % log_tag)
for alert in globalData.alerts:
alert.initialize()
logging.info("[%s] Client started." % log_tag)
# generate receiver to handle incoming data (for example status updates)
# (note: we will not return from the receiver unless the client is terminated)
receiver = Receiver(globalData.serverComm)
receiver.run()
| sqall01/alertR | alertClientKodi/alertRclient.py | Python | agpl-3.0 | 11,209 |
"""
Shogun demo, based on PyQT Demo by Eli Bendersky
Christian Widmer
Soeren Sonnenburg
License: GPLv3
"""
import numpy
import sys, os, csv
from PyQt4.QtCore import *
from PyQt4.QtGui import *
import matplotlib
from matplotlib.colorbar import make_axes, Colorbar
from matplotlib.backends.backend_qt4agg import FigureCanvasQTAgg as FigureCanvas
from matplotlib.backends.backend_qt4agg import NavigationToolbar2QT as NavigationToolbar
from matplotlib.figure import Figure
from shogun import *
import util
class Form(QMainWindow):
def __init__(self, parent=None):
super(Form, self).__init__(parent)
self.setWindowTitle('SHOGUN interactive demo')
self.data = DataHolder()
self.series_list_model = QStandardItemModel()
self.create_menu()
self.create_main_frame()
self.create_status_bar()
self.on_show()
def load_file(self, filename=None):
filename = QFileDialog.getOpenFileName(self,
'Open a data file', '.', 'CSV files (*.csv);;All Files (*.*)')
if filename:
self.data.load_from_file(filename)
self.fill_series_list(self.data.series_names())
self.status_text.setText("Loaded " + filename)
def on_show(self):
self.axes.clear()
self.axes.grid(True)
self.axes.plot(self.data.x1_pos, self.data.x2_pos, 'ro')
self.axes.plot(self.data.x1_neg, self.data.x2_neg, 'bo')
self.axes.set_xlim((-5,5))
self.axes.set_ylim((-5,5))
self.canvas.draw()
self.fill_series_list(self.data.get_stats())
def on_about(self):
msg = __doc__
QMessageBox.about(self, "About the demo", msg.strip())
def fill_series_list(self, names):
self.series_list_model.clear()
for name in names:
item = QStandardItem(name)
item.setCheckState(Qt.Unchecked)
item.setCheckable(False)
self.series_list_model.appendRow(item)
def onclick(self, event):
print 'button=%d, x=%d, y=%d, xdata=%f, ydata=%f'%(event.button, event.x, event.y, event.xdata, event.ydata)
if event.button==1:
label = 1.0
else:
label = -1.0
self.data.add_example(event.xdata, event.ydata, label)
self.on_show()
def clear(self):
self.data.clear()
self.on_show()
def enable_widgets(self):
kernel_name = self.kernel_combo.currentText()
if kernel_name == "LinearKernel":
self.sigma.setDisabled(True)
self.degree.setDisabled(True)
elif kernel_name == "PolynomialKernel":
self.sigma.setDisabled(True)
self.degree.setEnabled(True)
elif kernel_name == "GaussianKernel":
self.sigma.setEnabled(True)
self.degree.setDisabled(True)
def train_svm(self):
width = float(self.sigma.text())
degree = int(self.degree.text())
self.axes.clear()
self.axes.grid(True)
self.axes.plot(self.data.x1_pos, self.data.x2_pos, 'ro')
self.axes.plot(self.data.x1_neg, self.data.x2_neg, 'bo')
# train svm
labels = self.data.get_labels()
print type(labels)
lab = BinaryLabels(labels)
features = self.data.get_examples()
train = RealFeatures(features)
kernel_name = self.kernel_combo.currentText()
print "current kernel is %s" % (kernel_name)
if kernel_name == "LinearKernel":
gk = LinearKernel(train, train)
gk.set_normalizer(IdentityKernelNormalizer())
elif kernel_name == "PolynomialKernel":
gk = PolyKernel(train, train, degree, True)
gk.set_normalizer(IdentityKernelNormalizer())
elif kernel_name == "GaussianKernel":
gk = GaussianKernel(train, train, width)
cost = float(self.cost.text())
print "cost", cost
svm = LibSVM(cost, gk, lab)
svm.train()
svm.set_epsilon(1e-2)
x, y, z = util.compute_output_plot_isolines(svm, gk, train)
plt=self.axes.pcolor(x, y, z)
CS=self.axes.contour(x, y, z, [-1,0,1], linewidths=1, colors='black', hold=True)
#CS=self.axes.contour(x, y, z, linewidths=1, colors='black', hold=True)
#CS=self.axes.contour(x, y, z, 5, linewidths=1, colors='black', hold=True)
matplotlib.pyplot.clabel(CS, inline=1, fontsize=10)
self.axes.set_xlim((-5,5))
self.axes.set_ylim((-5,5))
cmap = matplotlib.cm.jet
norm = matplotlib.colors.Normalize(numpy.min(z), numpy.max(z))
print CS.get_clim()
if not self.cax:
self.cax, kw = make_axes(self.axes)
# ColorbarBase derives from ScalarMappable and puts a colorbar
# in a specified axes, so it has everything needed for a
# standalone colorbar. There are many more kwargs, but the
# following gives a basic continuous colorbar with ticks
# and labels.
cb1 = matplotlib.colorbar.ColorbarBase(self.cax, cmap=cmap,
norm=norm)
self.canvas.draw()
def create_main_frame(self):
self.main_frame = QWidget()
plot_frame = QWidget()
self.dpi = 100
self.fig = Figure((6.0, 6.0), dpi=self.dpi)
self.canvas = FigureCanvas(self.fig)
self.canvas.setParent(self.main_frame)
cid = self.canvas.mpl_connect('button_press_event', self.onclick)
self.axes = self.fig.add_subplot(111)
self.cax = None
#self.mpl_toolbar = NavigationToolbar(self.canvas, self.main_frame)
log_label = QLabel("Number of examples:")
self.series_list_view = QListView()
self.series_list_view.setModel(self.series_list_model)
cost_label = QLabel('C')
#self.cost = QSpinBox()#QLineEdit()
self.cost = QLineEdit()
self.cost.setText("1.0")
#self.cost.setMinimum(1)
spin_label2 = QLabel('sigma')
self.sigma = QLineEdit()
self.sigma.setText("1.2")
#self.sigma.setMinimum(1)
self.degree = QLineEdit()
self.degree.setText("2")
#self.sigma.setMinimum(1)
spins_hbox = QHBoxLayout()
spins_hbox.addWidget(cost_label)
spins_hbox.addWidget(self.cost)
spins_hbox.addWidget(spin_label2)
spins_hbox.addWidget(self.sigma)
spins_hbox.addWidget(self.degree)
spins_hbox.addStretch(1)
self.legend_cb = QCheckBox("Show Support Vectors")
self.legend_cb.setChecked(False)
self.show_button = QPushButton("&Train SVM")
self.connect(self.show_button, SIGNAL('clicked()'), self.train_svm)
self.clear_button = QPushButton("&Clear")
self.connect(self.clear_button, SIGNAL('clicked()'), self.clear)
self.kernel_combo = QComboBox()
self.kernel_combo.insertItem(-1, "GaussianKernel")
self.kernel_combo.insertItem(-1, "PolynomialKernel")
self.kernel_combo.insertItem(-1, "LinearKernel")
self.kernel_combo.maximumSize = QSize(300, 50)
self.connect(self.kernel_combo, SIGNAL("currentIndexChanged(QString)"), self.enable_widgets)
left_vbox = QVBoxLayout()
left_vbox.addWidget(self.canvas)
#left_vbox.addWidget(self.mpl_toolbar)
right0_vbox = QVBoxLayout()
right0_vbox.addWidget(log_label)
right0_vbox.addWidget(self.series_list_view)
#right0_vbox.addWidget(self.legend_cb)
right0_vbox.addStretch(1)
right2_vbox = QVBoxLayout()
right2_label = QLabel("Settings")
right2_vbox.addWidget(right2_label)
right2_vbox.addWidget(self.show_button)
right2_vbox.addWidget(self.kernel_combo)
right2_vbox.addLayout(spins_hbox)
right2_clearlabel = QLabel("Remove Data")
right2_vbox.addWidget(right2_clearlabel)
right2_vbox.addWidget(self.clear_button)
right2_vbox.addStretch(1)
right_vbox = QHBoxLayout()
right_vbox.addLayout(right0_vbox)
right_vbox.addLayout(right2_vbox)
hbox = QVBoxLayout()
hbox.addLayout(left_vbox)
hbox.addLayout(right_vbox)
self.main_frame.setLayout(hbox)
self.setCentralWidget(self.main_frame)
self.enable_widgets()
def create_status_bar(self):
self.status_text = QLabel("")
self.statusBar().addWidget(self.status_text, 1)
def create_menu(self):
self.file_menu = self.menuBar().addMenu("&File")
load_action = self.create_action("&Load file",
shortcut="Ctrl+L", slot=self.load_file, tip="Load a file")
quit_action = self.create_action("&Quit", slot=self.close,
shortcut="Ctrl+Q", tip="Close the application")
self.add_actions(self.file_menu,
(load_action, None, quit_action))
self.help_menu = self.menuBar().addMenu("&Help")
about_action = self.create_action("&About",
shortcut='F1', slot=self.on_about,
tip='About the demo')
self.add_actions(self.help_menu, (about_action,))
def add_actions(self, target, actions):
for action in actions:
if action is None:
target.addSeparator()
else:
target.addAction(action)
def create_action( self, text, slot=None, shortcut=None,
icon=None, tip=None, checkable=False,
signal="triggered()"):
action = QAction(text, self)
if icon is not None:
action.setIcon(QIcon(":/%s.png" % icon))
if shortcut is not None:
action.setShortcut(shortcut)
if tip is not None:
action.setToolTip(tip)
action.setStatusTip(tip)
if slot is not None:
self.connect(action, SIGNAL(signal), slot)
if checkable:
action.setCheckable(True)
return action
class DataHolder(object):
""" Just a thin wrapper over a dictionary that holds integer
data series. Each series has a name and a list of numbers
as its data. The length of all series is assumed to be
the same.
The series can be read from a CSV file, where each line
is a separate series. In each series, the first item in
the line is the name, and the rest are data numbers.
"""
def __init__(self, filename=None):
self.clear()
self.load_from_file(filename)
def clear(self):
self.x1_pos = []
self.x2_pos = []
self.x1_neg = []
self.x2_neg = []
def get_stats(self):
num_neg = len(self.x1_neg)
num_pos = len(self.x1_pos)
str_neg = "num negative examples: %i" % num_neg
str_pos = "num positive examples: %i" % num_pos
return (str_neg, str_pos)
def get_labels(self):
return numpy.array([1]*len(self.x1_pos) + [-1]*len(self.x1_neg), dtype=numpy.float64)
def get_examples(self):
num_pos = len(self.x1_pos)
num_neg = len(self.x1_neg)
examples = numpy.zeros((2,num_pos+num_neg))
for i in xrange(num_pos):
examples[0,i] = self.x1_pos[i]
examples[1,i] = self.x2_pos[i]
for i in xrange(num_neg):
examples[0,i+num_pos] = self.x1_neg[i]
examples[1,i+num_pos] = self.x2_neg[i]
return examples
def add_example(self, x1, x2, label):
if label==1:
self.x1_pos.append(x1)
self.x2_pos.append(x2)
else:
self.x1_neg.append(x1)
self.x2_neg.append(x2)
def load_from_file(self, filename=None):
self.data = {}
self.names = []
if filename:
for line in csv.reader(open(filename, 'rb')):
self.names.append(line[0])
self.data[line[0]] = map(int, line[1:])
self.datalen = len(line[1:])
def series_names(self):
""" Names of the data series
"""
return self.names
def series_len(self):
""" Length of a data series
"""
return self.datalen
def series_count(self):
return len(self.data)
def get_series_data(self, name):
return self.data[name]
def main():
app = QApplication(sys.argv)
form = Form()
form.show()
app.exec_()
if __name__ == "__main__":
main()
#~ dh = DataHolder('qt_mpl_data.csv')
#~ print dh.data
#~ print dh.get_series_data('1991 Sales')
#~ print dh.series_names()
#~ print dh.series_count()
| cfjhallgren/shogun | examples/undocumented/python/graphical/interactive_svm_demo.py | Python | gpl-3.0 | 12,586 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
import threading, thread
import requests, re
import random
import xml.etree.ElementTree as etree
from Utils import *
import ArtworkUtils as artutils
import SkinShortcutsIntegration as skinshortcuts
class ListItemMonitor(threading.Thread):
event = None
exit = False
delayedTaskInterval = 1795
lastWeatherNotificationCheck = None
lastNextAiredNotificationCheck = None
widgetContainerPrefix = ""
liPath = ""
liFile = ""
liLabel = ""
liTitle = ""
liDbId = ""
liImdb = ""
unwatched = 1
contentType = ""
allStudioLogos = {}
allStudioLogosColor = {}
LastCustomStudioImagesPath = ""
widgetTaskInterval = 590
moviesetCache = {}
extraFanartCache = {}
streamdetailsCache = {}
pvrArtCache = {}
tmdbinfocache = {}
omdbinfocache = {}
imdb_top250 = {}
cachePath = os.path.join(ADDON_DATA_PATH,"librarycache.json")
ActorImagesCachePath = os.path.join(ADDON_DATA_PATH,"actorimages.json")
def __init__(self, *args):
logMsg("ListItemMonitor - started")
self.event = threading.Event()
self.monitor = xbmc.Monitor()
threading.Thread.__init__(self, *args)
def stop(self):
logMsg("ListItemMonitor - stop called",0)
self.saveCacheToFile()
self.exit = True
self.event.set()
def run(self):
setAddonsettings()
self.getCacheFromFile()
playerTitle = ""
playerFile = ""
lastPlayerItem = ""
playerItem = ""
liPathLast = ""
curFolder = ""
curFolderLast = ""
lastListItem = ""
nextairedActive = False
screenSaverSetting = None
while (self.exit != True):
if xbmc.getCondVisibility("Player.HasAudio"):
#set window props for music player
try:
playerTitle = xbmc.getInfoLabel("Player.Title").decode('utf-8')
playerFile = xbmc.getInfoLabel("Player.Filenameandpath").decode('utf-8')
playerItem = playerTitle + playerFile
#only perform actions when the listitem has actually changed
if playerItem and playerItem != lastPlayerItem:
#clear all window props first
self.resetPlayerWindowProps()
self.setMusicPlayerDetails()
lastPlayerItem = playerItem
except Exception as e:
logMsg("ERROR in setMusicPlayerDetails ! --> " + str(e), 0)
elif lastPlayerItem:
#cleanup remaining window props
self.resetPlayerWindowProps()
playerItem = ""
lastPlayerItem = ""
if xbmc.getCondVisibility("Window.IsActive(visualisation) + Skin.HasSetting(SkinHelper.DisableScreenSaverOnFullScreenMusic)"):
#disable the screensaver if fullscreen music playback
if not screenSaverSetting:
screenSaverSetting = getJSON('Settings.GetSettingValue', '{"setting":"screensaver.mode"}')
if screenSaverSetting: setJSON('Settings.SetSettingValue', '{"setting":"screensaver.mode", "value": ""}')
elif screenSaverSetting:
setJSON('Settings.SetSettingValue', '{"setting":"screensaver.mode", "value": "%s"}' %screenSaverSetting)
screenSaverSetting = None
#auto close OSD after X seconds of inactivity
if xbmc.getCondVisibility("Window.IsActive(videoosd) | Window.IsActive(musicosd)"):
if xbmc.getCondVisibility("Window.IsActive(videoosd)"):
secondsToDisplay = xbmc.getInfoLabel("Skin.String(SkinHelper.AutoCloseVideoOSD)")
window = "videoosd"
elif xbmc.getCondVisibility("Window.IsActive(musicosd)"):
secondsToDisplay = xbmc.getInfoLabel("Skin.String(SkinHelper.AutoCloseMusicOSD)")
window = "musicosd"
else:
secondsToDisplay = ""
if secondsToDisplay and secondsToDisplay != "0":
while xbmc.getCondVisibility("Window.IsActive(%s)"%window):
if xbmc.getCondVisibility("System.IdleTime(%s)" %secondsToDisplay):
if xbmc.getCondVisibility("Window.IsActive(%s)"%window):
xbmc.executebuiltin("Dialog.Close(%s)" %window)
else:
xbmc.sleep(500)
#do some background stuff every 30 minutes
if self.delayedTaskInterval >= 1800 and not self.exit:
thread.start_new_thread(self.doBackgroundWork, ())
self.delayedTaskInterval = 0
#reload some widgets every 10 minutes
if self.widgetTaskInterval >= 600 and not self.exit:
self.resetGlobalWidgetWindowProps()
self.widgetTaskInterval = 0
#flush cache if videolibrary has changed
if WINDOW.getProperty("resetVideoDbCache") == "reset":
self.extraFanartCache = {}
self.streamdetailsCache = {}
WINDOW.clearProperty("resetVideoDbCache")
#flush cache if pvr settings have changed
if WINDOW.getProperty("resetPvrArtCache") == "reset":
self.pvrArtCache = {}
WINDOW.clearProperty("SkinHelper.PVR.ArtWork")
WINDOW.clearProperty("resetPvrArtCache")
if xbmc.getCondVisibility("[Window.IsMedia | !IsEmpty(Window(Home).Property(SkinHelper.WidgetContainer))]") and not self.exit:
try:
widgetContainer = WINDOW.getProperty("SkinHelper.WidgetContainer").decode('utf-8')
if widgetContainer:
self.widgetContainerPrefix = "Container(%s)."%widgetContainer
curFolder = xbmc.getInfoLabel("widget-%s-$INFO[Container(%s).NumItems]" %(widgetContainer,widgetContainer)).decode('utf-8')
else:
self.widgetContainerPrefix = ""
curFolder = xbmc.getInfoLabel("$INFO[Container.FolderPath]$INFO[Container.NumItems]").decode('utf-8')
self.liTitle = xbmc.getInfoLabel("%sListItem.Title" %self.widgetContainerPrefix).decode('utf-8')
self.liLabel = xbmc.getInfoLabel("%sListItem.Label" %self.widgetContainerPrefix).decode('utf-8')
except Exception as e:
logMsg(str(e),0)
curFolder = ""
self.liLabel = ""
self.liTitle = ""
#perform actions if the container path has changed
if (curFolder != curFolderLast):
self.resetWindowProps()
self.contentType = ""
curFolderLast = curFolder
if curFolder and self.liLabel:
#always wait for the contentType because plugins can be slow
for i in range(20):
self.contentType = getCurrentContentType(self.widgetContainerPrefix)
if self.contentType: break
else: xbmc.sleep(250)
if not self.widgetContainerPrefix and self.contentType:
self.setForcedView()
self.setContentHeader()
curListItem = curFolder + self.liLabel + self.liTitle
WINDOW.setProperty("curListItem",curListItem)
#only perform actions when the listitem has actually changed
if curListItem and curListItem != lastListItem and self.contentType:
#clear all window props first
self.resetWindowProps()
#generic props
self.liPath = xbmc.getInfoLabel("%sListItem.Path" %self.widgetContainerPrefix).decode('utf-8')
if not self.liPath: self.liPath = xbmc.getInfoLabel("%sListItem.FolderPath" %self.widgetContainerPrefix).decode('utf-8')
self.liFile = xbmc.getInfoLabel("%sListItem.FileNameAndPath" %self.widgetContainerPrefix).decode('utf-8')
self.liDbId = ""
self.liImdb = ""
if not self.liLabel == "..":
# monitor listitem props for music content
if self.contentType in ["albums","artists","songs"]:
try:
thread.start_new_thread(self.setMusicDetails, (True,))
self.setGenre()
except Exception as e:
logMsg("ERROR in setMusicDetails ! --> " + str(e), 0)
# monitor listitem props for video content
elif self.contentType in ["movies","setmovies","tvshows","seasons","episodes","sets"]:
try:
self.liDbId = xbmc.getInfoLabel("%sListItem.DBID"%self.widgetContainerPrefix).decode('utf-8')
if not self.liDbId or self.liDbId == "-1": self.liDbId = xbmc.getInfoLabel("%sListItem.Property(DBID)"%self.widgetContainerPrefix).decode('utf-8')
if self.liDbId == "-1": self.liDbId = ""
self.liImdb = xbmc.getInfoLabel("%sListItem.IMDBNumber"%self.widgetContainerPrefix).decode('utf-8')
if not self.liImdb: self.liImdb = xbmc.getInfoLabel("%sListItem.Property(IMDBNumber)"%self.widgetContainerPrefix).decode('utf-8')
self.setDuration()
self.setStudioLogo()
self.setGenre()
self.setDirector()
if self.liPath.startswith("plugin://") and not ("plugin.video.emby" in self.liPath or "script.skin.helper.service" in self.liPath):
#plugins only...
thread.start_new_thread(self.setAddonDetails, (True,))
self.setAddonName()
else:
#library only...
thread.start_new_thread(self.setTmdbInfo, (True,))
thread.start_new_thread(self.setOmdbInfo, (True,))
thread.start_new_thread(self.setAnimatedPoster, (True,))
self.setStreamDetails()
self.setMovieSetDetails()
self.checkExtraFanArt()
#nextaired workaround for info dialog
if widgetContainer == "999" and xbmc.getCondVisibility("!IsEmpty(%sListItem.TvShowTitle) + System.HasAddon(script.tv.show.next.aired)" %self.widgetContainerPrefix):
xbmc.executebuiltin("RunScript(script.tv.show.next.aired,tvshowtitle=%s)" %xbmc.getInfoLabel("%sListItem.TvShowTitle"%self.widgetContainerPrefix).replace("&",""))
nextairedActive = True
elif nextairedActive:
nextairedActive = False
xbmc.executebuiltin("RunScript(script.tv.show.next.aired,tvshowtitle=165628787629692696)")
except Exception as e:
logMsg("ERROR in LibraryMonitor ! --> " + str(e), 0)
# monitor listitem props when PVR is active
elif self.contentType in ["tvchannels","tvrecordings"]:
try:
self.setDuration()
thread.start_new_thread(self.setPVRThumbs, (True,))
self.setGenre()
except Exception as e:
logMsg("ERROR in LibraryMonitor ! --> " + str(e), 0)
#set some globals
liPathLast = self.liPath
lastListItem = curListItem
xbmc.sleep(100)
self.delayedTaskInterval += 0.1
self.widgetTaskInterval += 0.1
elif lastListItem and not self.exit:
#flush any remaining window properties
self.resetWindowProps()
WINDOW.clearProperty("SkinHelper.ContentHeader")
WINDOW.clearProperty("contenttype")
self.contentType = ""
if nextairedActive:
nextairedActive = False
xbmc.executebuiltin("RunScript(script.tv.show.next.aired,tvshowtitle=165628787629692696)")
lastListItem = ""
curListItem = ""
curFolder = ""
curFolderLast = ""
self.widgetContainerPrefix = ""
elif xbmc.getCondVisibility("Window.IsActive(fullscreenvideo)"):
#fullscreen video active
self.monitor.waitForAbort(2)
self.delayedTaskInterval += 2
self.widgetTaskInterval += 2
else:
#other window visible
self.monitor.waitForAbort(0.5)
self.delayedTaskInterval += 0.5
self.widgetTaskInterval += 0.5
def doBackgroundWork(self):
try:
if self.exit: return
logMsg("Started Background worker...")
self.getStudioLogos()
self.genericWindowProps()
if not self.imdb_top250: self.imdb_top250 = artutils.getImdbTop250()
self.checkNotifications()
self.saveCacheToFile()
logMsg("Ended Background worker...")
except Exception as e:
logMsg("ERROR in ListitemMonitor doBackgroundWork ! --> " + str(e), 0)
def saveCacheToFile(self):
libraryCache = {}
libraryCache["SetsCache"] = self.moviesetCache
libraryCache["tmdbinfocache"] = self.tmdbinfocache
saveDataToCacheFile(self.cachePath,libraryCache)
actorcache = WINDOW.getProperty("SkinHelper.ActorImages").decode("utf-8")
if actorcache:
saveDataToCacheFile(self.ActorImagesCachePath,eval(actorcache))
def getCacheFromFile(self):
#library items cache
data = getDataFromCacheFile(self.cachePath)
if data.has_key("SetsCache"):
self.moviesetCache = data["SetsCache"]
if data.has_key("tmdbinfocache"):
self.tmdbinfocache = data["tmdbinfocache"]
#actorimagescache
data = getDataFromCacheFile(self.ActorImagesCachePath)
if data: WINDOW.setProperty("SkinHelper.ActorImages", repr(data))
def checkNotifications(self):
try:
currentHour = time.strftime("%H")
#weather notifications
winw = xbmcgui.Window(12600)
if xbmc.getCondVisibility("Skin.HasSetting(EnableWeatherNotifications) + !IsEmpty(Window(Weather).Property(Alerts.RSS)) + !IsEmpty(Window(Weather).Property(Current.Condition))") and currentHour != self.lastWeatherNotificationCheck:
dialog = xbmcgui.Dialog()
dialog.notification(xbmc.getLocalizedString(31294), winw.getProperty("Alerts"), xbmcgui.NOTIFICATION_WARNING, 8000)
self.lastWeatherNotificationCheck = currentHour
#nextaired notifications
if (xbmc.getCondVisibility("Skin.HasSetting(EnableNextAiredNotifications) + System.HasAddon(script.tv.show.next.aired)") and currentHour != self.lastNextAiredNotificationCheck):
if (WINDOW.getProperty("NextAired.TodayShow")):
dialog = xbmcgui.Dialog()
dialog.notification(xbmc.getLocalizedString(31295), WINDOW.getProperty("NextAired.TodayShow"), xbmcgui.NOTIFICATION_WARNING, 8000)
self.lastNextAiredNotificationCheck = currentHour
except Exception as e:
logMsg("ERROR in checkNotifications ! --> " + str(e), 0)
def genericWindowProps(self):
#GET TOTAL ADDONS COUNT
allAddonsCount = 0
media_array = getJSON('Addons.GetAddons','{ }')
for item in media_array:
allAddonsCount += 1
WINDOW.setProperty("SkinHelper.TotalAddons",str(allAddonsCount))
addontypes = []
addontypes.append( ["executable", "SkinHelper.TotalProgramAddons", 0] )
addontypes.append( ["video", "SkinHelper.TotalVideoAddons", 0] )
addontypes.append( ["audio", "SkinHelper.TotalAudioAddons", 0] )
addontypes.append( ["image", "SkinHelper.TotalPicturesAddons", 0] )
for type in addontypes:
media_array = getJSON('Addons.GetAddons','{ "content": "%s" }' %type[0])
for item in media_array:
type[2] += 1
WINDOW.setProperty(type[1],str(type[2]))
#GET FAVOURITES COUNT
allFavouritesCount = 0
media_array = getJSON('Favourites.GetFavourites','{ }')
for item in media_array:
allFavouritesCount += 1
WINDOW.setProperty("SkinHelper.TotalFavourites",str(allFavouritesCount))
#GET TV CHANNELS COUNT
allTvChannelsCount = 0
if xbmc.getCondVisibility("Pvr.HasTVChannels"):
media_array = getJSON('PVR.GetChannels','{"channelgroupid": "alltv" }' )
for item in media_array:
allTvChannelsCount += 1
WINDOW.setProperty("SkinHelper.TotalTVChannels",str(allTvChannelsCount))
#GET MOVIE SETS COUNT
allMovieSetsCount = 0
allMoviesInSetCount = 0
media_array = getJSON('VideoLibrary.GetMovieSets','{}' )
for item in media_array:
allMovieSetsCount += 1
media_array2 = getJSON('VideoLibrary.GetMovieSetDetails','{"setid": %s}' %item["setid"])
for item in media_array2:
allMoviesInSetCount +=1
WINDOW.setProperty("SkinHelper.TotalMovieSets",str(allMovieSetsCount))
WINDOW.setProperty("SkinHelper.TotalMoviesInSets",str(allMoviesInSetCount))
#GET RADIO CHANNELS COUNT
allRadioChannelsCount = 0
if xbmc.getCondVisibility("Pvr.HasRadioChannels"):
media_array = getJSON('PVR.GetChannels','{"channelgroupid": "allradio" }' )
for item in media_array:
allRadioChannelsCount += 1
WINDOW.setProperty("SkinHelper.TotalRadioChannels",str(allRadioChannelsCount))
def resetWindowProps(self):
#reset all window props provided by the script...
WINDOW.clearProperty("SkinHelper.ListItemStudioLogo")
WINDOW.clearProperty("SkinHelper.ListItemStudioLogoColor")
WINDOW.clearProperty("SkinHelper.ListItemStudio")
WINDOW.clearProperty("SkinHelper.ListItemStudios")
WINDOW.clearProperty('SkinHelper.ListItemDuration')
WINDOW.clearProperty('SkinHelper.ListItemDuration.Hours')
WINDOW.clearProperty('SkinHelper.ListItemDuration.Minutes')
WINDOW.clearProperty('SkinHelper.ListItemSubtitles')
WINDOW.clearProperty('SkinHelper.ListItemSubtitles.Count')
WINDOW.clearProperty('SkinHelper.ListItemAllAudioStreams')
WINDOW.clearProperty('SkinHelper.ListItemAllAudioStreams.Count')
WINDOW.clearProperty('SkinHelper.ListItemLanguages')
WINDOW.clearProperty('SkinHelper.ListItemLanguages.Count')
WINDOW.clearProperty('SkinHelper.ListItemAudioStreams.Count')
WINDOW.clearProperty('SkinHelper.ListItemGenres')
WINDOW.clearProperty('SkinHelper.ListItemDirectors')
WINDOW.clearProperty('SkinHelper.ListItemVideoHeight')
WINDOW.clearProperty('SkinHelper.ListItemVideoWidth')
WINDOW.clearProperty('SkinHelper.ListItemTags')
WINDOW.clearProperty("SkinHelper.ExtraFanArtPath")
WINDOW.clearProperty("SkinHelper.Music.Banner")
WINDOW.clearProperty("SkinHelper.Music.ClearLogo")
WINDOW.clearProperty("SkinHelper.Music.DiscArt")
WINDOW.clearProperty("SkinHelper.Music.FanArt")
WINDOW.clearProperty("SkinHelper.Music.Thumb")
WINDOW.clearProperty("SkinHelper.Music.ArtistThumb")
WINDOW.clearProperty("SkinHelper.Music.AlbumThumb")
WINDOW.clearProperty("SkinHelper.Music.Info")
WINDOW.clearProperty("SkinHelper.Music.TrackList")
WINDOW.clearProperty("SkinHelper.Music.SongCount")
WINDOW.clearProperty("SkinHelper.Music.albumCount")
WINDOW.clearProperty("SkinHelper.Music.AlbumList")
WINDOW.clearProperty("SkinHelper.Music.ExtraFanArt")
WINDOW.clearProperty("SkinHelper.PVR.Thumb")
WINDOW.clearProperty("SkinHelper.PVR.FanArt")
WINDOW.clearProperty("SkinHelper.PVR.ChannelLogo")
WINDOW.clearProperty("SkinHelper.PVR.Poster")
WINDOW.clearProperty("SkinHelper.PVR.Landscape")
WINDOW.clearProperty("SkinHelper.PVR.ClearArt")
WINDOW.clearProperty("SkinHelper.PVR.CharacterArt")
WINDOW.clearProperty("SkinHelper.PVR.ClearLogo")
WINDOW.clearProperty("SkinHelper.PVR.Banner")
WINDOW.clearProperty("SkinHelper.PVR.DiscArt")
WINDOW.clearProperty("SkinHelper.PVR.Plot")
WINDOW.clearProperty("SkinHelper.PVR.Channel")
WINDOW.clearProperty("SkinHelper.PVR.Genre")
WINDOW.clearProperty("SkinHelper.PVR.ExtraFanArt")
WINDOW.clearProperty("SkinHelper.Player.AddonName")
WINDOW.clearProperty("SkinHelper.ForcedView")
WINDOW.clearProperty('SkinHelper.MovieSet.Title')
WINDOW.clearProperty('SkinHelper.MovieSet.Runtime')
WINDOW.clearProperty('SkinHelper.MovieSet.Duration')
WINDOW.clearProperty('SkinHelper.MovieSet.Duration.Hours')
WINDOW.clearProperty('SkinHelper.MovieSet.Duration.Minutes')
WINDOW.clearProperty('SkinHelper.MovieSet.Writer')
WINDOW.clearProperty('SkinHelper.MovieSet.Director')
WINDOW.clearProperty('SkinHelper.MovieSet.Genre')
WINDOW.clearProperty('SkinHelper.MovieSet.Country')
WINDOW.clearProperty('SkinHelper.MovieSet.Studio')
WINDOW.clearProperty('SkinHelper.MovieSet.Years')
WINDOW.clearProperty('SkinHelper.MovieSet.Year')
WINDOW.clearProperty('SkinHelper.MovieSet.Count')
WINDOW.clearProperty('SkinHelper.MovieSet.Plot')
WINDOW.clearProperty('SkinHelper.MovieSet.ExtendedPlot')
WINDOW.clearProperty('SkinHelper.RottenTomatoesMeter')
WINDOW.clearProperty('SkinHelper.RottenTomatoesRating')
WINDOW.clearProperty('SkinHelper.RottenTomatoesAudienceRating')
WINDOW.clearProperty('SkinHelper.RottenTomatoesAudienceReviews')
WINDOW.clearProperty('SkinHelper.RottenTomatoesAudienceMeter')
WINDOW.clearProperty('SkinHelper.RottenTomatoesConsensus')
WINDOW.clearProperty('SkinHelper.RottenTomatoesAwards')
WINDOW.clearProperty('SkinHelper.RottenTomatoesBoxOffice')
WINDOW.clearProperty('SkinHelper.RottenTomatoesFresh')
WINDOW.clearProperty('SkinHelper.RottenTomatoesRotten')
WINDOW.clearProperty('SkinHelper.RottenTomatoesImage')
WINDOW.clearProperty('SkinHelper.RottenTomatoesReviews')
WINDOW.clearProperty('SkinHelper.RottenTomatoesDVDRelease')
WINDOW.clearProperty('SkinHelper.MetaCritic.Rating')
WINDOW.clearProperty('SkinHelper.IMDB.Rating')
WINDOW.clearProperty('SkinHelper.IMDB.Votes')
WINDOW.clearProperty('SkinHelper.IMDB.MPAA')
WINDOW.clearProperty('SkinHelper.IMDB.Runtime')
WINDOW.clearProperty('SkinHelper.IMDB.Top250')
WINDOW.clearProperty('SkinHelper.TMDB.Budget')
WINDOW.clearProperty('SkinHelper.Budget.formatted')
WINDOW.clearProperty('SkinHelper.TMDB.Budget.mln')
WINDOW.clearProperty('SkinHelper.TMDB.Revenue')
WINDOW.clearProperty('SkinHelper.TMDB.Revenue.mln')
WINDOW.clearProperty('SkinHelper.TMDB.Revenue.formatted')
WINDOW.clearProperty('SkinHelper.TMDB.Tagline')
WINDOW.clearProperty('SkinHelper.TMDB.Homepage')
WINDOW.clearProperty('SkinHelper.TMDB.Status')
WINDOW.clearProperty('SkinHelper.TMDB.Popularity')
WINDOW.clearProperty('SkinHelper.AnimatedPoster')
WINDOW.clearProperty('SkinHelper.AnimatedFanart')
totalNodes = 50
for i in range(totalNodes):
if not WINDOW.getProperty('SkinHelper.MovieSet.' + str(i) + '.Title'): break
WINDOW.clearProperty('SkinHelper.MovieSet.' + str(i) + '.Title')
WINDOW.clearProperty('SkinHelper.MovieSet.' + str(i) + '.Plot')
WINDOW.clearProperty('SkinHelper.MovieSet.' + str(i) + '.FanArt')
WINDOW.clearProperty('SkinHelper.MovieSet.' + str(i) + '.Poster')
WINDOW.clearProperty('SkinHelper.MovieSet.' + str(i) + '.Landscape')
WINDOW.clearProperty('SkinHelper.MovieSet.' + str(i) + '.DiscArt')
WINDOW.clearProperty('SkinHelper.MovieSet.' + str(i) + '.ClearLogo')
WINDOW.clearProperty('SkinHelper.MovieSet.' + str(i) + '.ClearArt')
WINDOW.clearProperty('SkinHelper.MovieSet.' + str(i) + '.Banner')
WINDOW.clearProperty('SkinHelper.MovieSet.' + str(i) + '.Rating')
WINDOW.clearProperty('SkinHelper.MovieSet.' + str(i) + '.Year')
WINDOW.clearProperty('SkinHelper.MovieSet.' + str(i) + '.DBID')
WINDOW.clearProperty('SkinHelper.MovieSet.' + str(i) + '.Duration')
WINDOW.clearProperty('SkinHelper.MovieSet.' + str(i) + '.Resolution')
WINDOW.clearProperty('SkinHelper.MovieSet.' + str(i) + '.AspectRatio')
WINDOW.clearProperty('SkinHelper.MovieSet.' + str(i) + '.Codec')
WINDOW.clearProperty('SkinHelper.MovieSet.' + str(i) + '.AudioCodec')
WINDOW.clearProperty('SkinHelper.MovieSet.' + str(i) + '.AudioChannels')
WINDOW.clearProperty('SkinHelper.MovieSet.' + str(i) + '.AudioLanguage')
WINDOW.clearProperty('SkinHelper.MovieSet.' + str(i) + '.Subtitle')
for i in range(totalNodes):
if not WINDOW.getProperty('SkinHelper.ListItemGenre.' + str(i)): break
WINDOW.clearProperty('SkinHelper.ListItemGenre.' + str(i))
for i in range(totalNodes):
if not WINDOW.getProperty('SkinHelper.ListItemAudioStreams.%d.AudioCodec' % i): break
WINDOW.clearProperty('SkinHelper.ListItemAudioStreams.%d.Language' % i)
WINDOW.clearProperty('SkinHelper.ListItemAudioStreams.%d.AudioCodec' % i)
WINDOW.clearProperty('SkinHelper.ListItemAudioStreams.%d.AudioChannels' % i)
WINDOW.clearProperty('SkinHelper.ListItemAudioStreams.%d'%i)
for i in range(totalNodes):
if not WINDOW.getProperty('SkinHelper.ExtraFanArt.' + str(i)):
break
WINDOW.clearProperty('SkinHelper.ExtraFanArt.' + str(i))
def resetGlobalWidgetWindowProps(self):
WINDOW.setProperty("widgetreload2", time.strftime("%Y%m%d%H%M%S", time.gmtime()))
def resetPlayerWindowProps(self):
#reset all window props provided by the script...
WINDOW.setProperty("SkinHelper.Player.Music.Banner","")
WINDOW.setProperty("SkinHelper.Player.Music.ClearLogo","")
WINDOW.setProperty("SkinHelper.Player.Music.DiscArt","")
WINDOW.setProperty("SkinHelper.Player.Music.FanArt","")
WINDOW.setProperty("SkinHelper.Player.Music.Thumb","")
WINDOW.setProperty("SkinHelper.Player.Music.ArtistThumb","")
WINDOW.setProperty("SkinHelper.Player.Music.AlbumThumb","")
WINDOW.setProperty("SkinHelper.Player.Music.Info","")
WINDOW.setProperty("SkinHelper.Player.Music.TrackList","")
WINDOW.setProperty("SkinHelper.Player.Music.SongCount","")
WINDOW.setProperty("SkinHelper.Player.Music.albumCount","")
WINDOW.setProperty("SkinHelper.Player.Music.AlbumList","")
WINDOW.setProperty("SkinHelper.Player.Music.ExtraFanArt","")
def setMovieSetDetails(self):
#get movie set details -- thanks to phil65 - used this idea from his skin info script
allProperties = []
if not self.liDbId or not self.liPath: return
if self.exit: return
if self.liPath.startswith("videodb://movies/sets/"):
#try to get from cache first - use checksum compare because moviesets do not get refreshed automatically
checksum = repr(getJSON('VideoLibrary.GetMovieSetDetails', '{"setid": %s, "properties": [ "thumbnail" ], "movies": { "properties": [ "playcount"] }}' % self.liDbId))
cacheStr = self.liLabel+self.liDbId
if self.moviesetCache.get(cacheStr) and self.moviesetCache.get("checksum-" + cacheStr,"") == checksum:
allProperties = self.moviesetCache[cacheStr]
if self.liDbId and not allProperties:
#get values from json api
checksum = getJSON('VideoLibrary.GetMovieSetDetails', '{"setid": %s, "properties": [ "thumbnail" ], "movies": { "properties": [ "playcount"] }}' % self.liDbId)
json_response = getJSON('VideoLibrary.GetMovieSetDetails', '{"setid": %s, "properties": [ "thumbnail" ], "movies": { "properties": [ "rating", "art", "file", "year", "director", "writer", "playcount", "genre" , "thumbnail", "runtime", "studio", "plotoutline", "plot", "country", "streamdetails"], "sort": { "order": "ascending", "method": "year" }} }' % self.liDbId)
if json_response:
count = 0
runtime = 0
unwatchedcount = 0
watchedcount = 0
runtime = 0
runtime_mins = 0
writer = []
director = []
genre = []
country = []
studio = []
years = []
plot = ""
title_list = ""
title_header = "[B]" + str(json_response['limits']['total']) + " " + xbmc.getLocalizedString(20342) + "[/B][CR]"
set_fanart = []
for item in json_response['movies']:
if item["playcount"] == 0:
unwatchedcount += 1
else:
watchedcount += 1
art = item['art']
fanart = art.get('fanart', '')
set_fanart.append(fanart)
allProperties.append( ('SkinHelper.MovieSet.' + str(count) + '.Title',item['label']) )
allProperties.append( ('SkinHelper.MovieSet.' + str(count) + '.Poster',art.get('poster', '')) )
allProperties.append( ('SkinHelper.MovieSet.' + str(count) + '.FanArt',fanart) )
allProperties.append( ('SkinHelper.MovieSet.' + str(count) + '.Landscape',art.get('landscape', '')) )
allProperties.append( ('SkinHelper.MovieSet.' + str(count) + '.DiscArt',art.get('discart', '')) )
allProperties.append( ('SkinHelper.MovieSet.' + str(count) + '.ClearLogo',art.get('clearlogo', '')) )
allProperties.append( ('SkinHelper.MovieSet.' + str(count) + '.ClearArt',art.get('clearart', '')) )
allProperties.append( ('SkinHelper.MovieSet.' + str(count) + '.Banner',art.get('banner', '')) )
allProperties.append( ('SkinHelper.MovieSet.' + str(count) + '.Rating',str(item.get('rating', ''))) )
allProperties.append( ('SkinHelper.MovieSet.' + str(count) + '.Plot',item['plot']) )
allProperties.append( ('SkinHelper.MovieSet.' + str(count) + '.Year',str(item.get('year'))) )
allProperties.append( ('SkinHelper.MovieSet.' + str(count) + '.DBID',str(item.get('movieid'))) )
allProperties.append( ('SkinHelper.MovieSet.' + str(count) + '.Duration',str(item['runtime'] / 60)) )
if item.get('streamdetails',''):
streamdetails = item["streamdetails"]
audiostreams = streamdetails.get('audio',[])
videostreams = streamdetails.get('video',[])
subtitles = streamdetails.get('subtitle',[])
if len(videostreams) > 0:
stream = videostreams[0]
height = stream.get("height","")
width = stream.get("width","")
if height and width:
resolution = ""
if width <= 720 and height <= 480: resolution = "480"
elif width <= 768 and height <= 576: resolution = "576"
elif width <= 960 and height <= 544: resolution = "540"
elif width <= 1280 and height <= 720: resolution = "720"
elif width <= 1920 and height <= 1080: resolution = "1080"
elif width * height >= 6000000: resolution = "4K"
allProperties.append( ('SkinHelper.MovieSet.' + str(count) + '.Resolution',resolution) )
if stream.get("codec",""):
allProperties.append( ('SkinHelper.MovieSet.' + str(count) + '.Codec',str(stream["codec"])) )
if stream.get("aspect",""):
allProperties.append( ('SkinHelper.MovieSet.' + str(count) + '.AspectRatio',str(round(stream["aspect"], 2))) )
if len(audiostreams) > 0:
#grab details of first audio stream
stream = audiostreams[0]
allProperties.append( ('SkinHelper.MovieSet.' + str(count) + '.AudioCodec',stream.get('codec','')) )
allProperties.append( ('SkinHelper.MovieSet.' + str(count) + '.AudioChannels',str(stream.get('channels',''))) )
allProperties.append( ('SkinHelper.MovieSet.' + str(count) + '.AudioLanguage',stream.get('language','')) )
if len(subtitles) > 0:
#grab details of first subtitle
allProperties.append( ('SkinHelper.MovieSet.' + str(count) + '.SubTitle',subtitles[0].get('language','')) )
title_list += item['label'] + " (" + str(item['year']) + ")[CR]"
if item['plotoutline']:
plot += "[B]" + item['label'] + " (" + str(item['year']) + ")[/B][CR]" + item['plotoutline'] + "[CR][CR]"
else:
plot += "[B]" + item['label'] + " (" + str(item['year']) + ")[/B][CR]" + item['plot'] + "[CR][CR]"
runtime += item['runtime']
count += 1
if item.get("writer"):
writer += [w for w in item["writer"] if w and w not in writer]
if item.get("director"):
director += [d for d in item["director"] if d and d not in director]
if item.get("genre"):
genre += [g for g in item["genre"] if g and g not in genre]
if item.get("country"):
country += [c for c in item["country"] if c and c not in country]
if item.get("studio"):
studio += [s for s in item["studio"] if s and s not in studio]
years.append(str(item['year']))
allProperties.append( ('SkinHelper.MovieSet.Plot', plot) )
if json_response['limits']['total'] > 1:
allProperties.append( ('SkinHelper.MovieSet.ExtendedPlot', title_header + title_list + "[CR]" + plot) )
else:
allProperties.append( ('SkinHelper.MovieSet.ExtendedPlot', plot) )
allProperties.append( ('SkinHelper.MovieSet.Title', title_list) )
allProperties.append( ('SkinHelper.MovieSet.Runtime', str(runtime / 60)) )
self.setDuration(str(runtime / 60))
durationString = self.getDurationString(runtime / 60)
if durationString:
allProperties.append( ('SkinHelper.MovieSet.Duration', durationString[2]) )
allProperties.append( ('SkinHelper.MovieSet.Duration.Hours', durationString[0]) )
allProperties.append( ('SkinHelper.MovieSet.Duration.Minutes', durationString[1]) )
allProperties.append( ('SkinHelper.MovieSet.Writer', " / ".join(writer)) )
allProperties.append( ('SkinHelper.MovieSet.Director', " / ".join(director)) )
self.setDirector(" / ".join(director))
allProperties.append( ('SkinHelper.MovieSet.Genre', " / ".join(genre)) )
self.setGenre(" / ".join(genre))
allProperties.append( ('SkinHelper.MovieSet.Country', " / ".join(country)) )
studioString = " / ".join(studio)
allProperties.append( ('SkinHelper.MovieSet.Studio', studioString) )
self.setStudioLogo(studioString)
allProperties.append( ('SkinHelper.MovieSet.Years', " / ".join(years)) )
allProperties.append( ('SkinHelper.MovieSet.Year', years[0] + " - " + years[-1]) )
allProperties.append( ('SkinHelper.MovieSet.Count', str(json_response['limits']['total'])) )
allProperties.append( ('SkinHelper.MovieSet.WatchedCount', str(watchedcount)) )
allProperties.append( ('SkinHelper.MovieSet.UnWatchedCount', str(unwatchedcount)) )
allProperties.append( ('SkinHelper.MovieSet.Extrafanarts', repr(set_fanart)) )
#save to cache
self.moviesetCache[cacheStr] = allProperties
self.moviesetCache["checksum-" + cacheStr] = repr(checksum)
#Process properties
for item in allProperties:
if item[0] == "SkinHelper.MovieSet.Extrafanarts":
if xbmc.getCondVisibility("Skin.HasSetting(SkinHelper.EnableExtraFanart)"):
efaProp = 'EFA_FROMWINDOWPROP_' + cacheStr
WINDOW.setProperty(efaProp, item[1])
WINDOW.setProperty('SkinHelper.ExtraFanArtPath', "plugin://script.skin.helper.service/?action=EXTRAFANART&path=%s" %single_urlencode(try_encode(efaProp)))
else: WINDOW.setProperty(item[0],item[1])
def setContentHeader(self):
WINDOW.clearProperty("SkinHelper.ContentHeader")
itemscount = xbmc.getInfoLabel("Container.NumItems")
if itemscount:
if xbmc.getInfoLabel("Container.ListItemNoWrap(0).Label").startswith("*") or xbmc.getInfoLabel("Container.ListItemNoWrap(1).Label").startswith("*"):
itemscount = int(itemscount) - 1
headerprefix = ""
if self.contentType == "movies":
headerprefix = xbmc.getLocalizedString(36901)
elif self.contentType == "tvshows":
headerprefix = xbmc.getLocalizedString(36903)
elif self.contentType == "seasons":
headerprefix = xbmc.getLocalizedString(36905)
elif self.contentType == "episodes":
headerprefix = xbmc.getLocalizedString(36907)
elif self.contentType == "sets":
headerprefix = xbmc.getLocalizedString(36911)
elif self.contentType == "albums":
headerprefix = xbmc.getLocalizedString(36919)
elif self.contentType == "songs":
headerprefix = xbmc.getLocalizedString(36921)
elif self.contentType == "artists":
headerprefix = xbmc.getLocalizedString(36917)
if headerprefix:
WINDOW.setProperty("SkinHelper.ContentHeader","%s %s" %(itemscount,headerprefix) )
def setAddonName(self):
# set addon name as property
if not xbmc.Player().isPlayingAudio():
if (xbmc.getCondVisibility("Container.Content(plugins) | !IsEmpty(Container.PluginName)")):
AddonName = xbmc.getInfoLabel('Container.PluginName').decode('utf-8')
AddonName = xbmcaddon.Addon(AddonName).getAddonInfo('name')
WINDOW.setProperty("SkinHelper.Player.AddonName", AddonName)
def setGenre(self,genre=""):
if not genre: genre = xbmc.getInfoLabel('%sListItem.Genre' %self.widgetContainerPrefix).decode('utf-8')
genres = []
if "/" in genre:
genres = genre.split(" / ")
else:
genres.append(genre)
WINDOW.setProperty('SkinHelper.ListItemGenres', "[CR]".join(genres))
count = 0
for genre in genres:
WINDOW.setProperty("SkinHelper.ListItemGenre." + str(count),genre)
count +=1
def setDirector(self, director=""):
if not director: director = xbmc.getInfoLabel('%sListItem.Director'%self.widgetContainerPrefix).decode('utf-8')
directors = []
if "/" in director:
directors = director.split(" / ")
else:
directors.append(director)
WINDOW.setProperty('SkinHelper.ListItemDirectors', "[CR]".join(directors))
def setPVRThumbs(self, multiThreaded=False):
if WINDOW.getProperty("artworkcontextmenu"): return
title = self.liTitle
channel = xbmc.getInfoLabel("%sListItem.ChannelName"%self.widgetContainerPrefix).decode('utf-8')
#path = self.liFile
path = self.liPath
genre = xbmc.getInfoLabel("%sListItem.Genre"%self.widgetContainerPrefix).decode('utf-8')
if xbmc.getCondVisibility("%sListItem.IsFolder"%self.widgetContainerPrefix) and not channel and not title:
#assume grouped recordings curFolder
title = self.liLabel
if not xbmc.getCondVisibility("Skin.HasSetting(SkinHelper.EnablePVRThumbs)") or not title:
return
if self.exit: return
cacheStr = title + channel + "SkinHelper.PVR.Artwork"
if self.pvrArtCache.has_key(cacheStr):
artwork = self.pvrArtCache[cacheStr]
else:
if self.contentType == "tvrecordings": type = "recordings"
else: type = "channels"
artwork = artutils.getPVRThumbs(title, channel, type, path, genre)
self.pvrArtCache[cacheStr] = artwork
#return if another listitem was focused in the meanwhile
if multiThreaded and not (title == xbmc.getInfoLabel("ListItem.Title").decode('utf-8') or title == xbmc.getInfoLabel("%sListItem.Title"%self.widgetContainerPrefix).decode('utf-8') or title == xbmc.getInfoLabel("%sListItem.Label"%self.widgetContainerPrefix).decode('utf-8')):
return
#set window props
for key, value in artwork.iteritems():
WINDOW.setProperty("SkinHelper.PVR." + key,value)
def setStudioLogo(self,studio=""):
if not studio: studio = xbmc.getInfoLabel('%sListItem.Studio'%self.widgetContainerPrefix).decode('utf-8')
studios = []
if "/" in studio:
studios = studio.split(" / ")
WINDOW.setProperty("SkinHelper.ListItemStudio", studios[0])
WINDOW.setProperty('SkinHelper.ListItemStudios', "[CR]".join(studios))
else:
studios.append(studio)
WINDOW.setProperty("SkinHelper.ListItemStudio", studio)
WINDOW.setProperty("SkinHelper.ListItemStudios", studio)
studiologo = matchStudioLogo(studio, self.allStudioLogos)
studiologoColor = matchStudioLogo(studio, self.allStudioLogosColor)
WINDOW.setProperty("SkinHelper.ListItemStudioLogo", studiologo)
WINDOW.setProperty("SkinHelper.ListItemStudioLogoColor", studiologoColor)
return studiologo
def getStudioLogos(self):
#fill list with all studio logos
allLogos = {}
allLogosColor = {}
CustomStudioImagesPath = xbmc.getInfoLabel("Skin.String(SkinHelper.CustomStudioImagesPath)").decode('utf-8')
if CustomStudioImagesPath + xbmc.getSkinDir() != self.LastCustomStudioImagesPath:
#only proceed if the custom path or skin has changed...
self.LastCustomStudioImagesPath = CustomStudioImagesPath + xbmc.getSkinDir()
#add the custom path to the list
if CustomStudioImagesPath:
path = CustomStudioImagesPath
if not (CustomStudioImagesPath.endswith("/") or CustomStudioImagesPath.endswith("\\")):
CustomStudioImagesPath = CustomStudioImagesPath + os.sep()
allLogos = listFilesInPath(CustomStudioImagesPath, allLogos)
#add skin provided paths
if xbmcvfs.exists("special://skin/extras/flags/studios/"):
allLogos = listFilesInPath("special://skin/extras/flags/studios/", allLogos)
if xbmcvfs.exists("special://skin/extras/flags/studioscolor/"):
allLogosColor = listFilesInPath("special://skin/extras/flags/studioscolor/",allLogosColor)
#add images provided by the image resource addons
if xbmc.getCondVisibility("System.HasAddon(resource.images.studios.white)"):
allLogos = getResourceAddonFiles("resource.images.studios.white", allLogos)
if xbmc.getCondVisibility("System.HasAddon(resource.images.studios.coloured)"):
allLogosColor = getResourceAddonFiles("resource.images.studios.coloured",allLogosColor)
#assign all found logos in the list
self.allStudioLogos = allLogos
self.allStudioLogosColor = allLogosColor
#also store the logos in window property for access by webservice
WINDOW.setProperty("SkinHelper.allStudioLogos",repr(self.allStudioLogos))
WINDOW.setProperty("SkinHelper.allStudioLogosColor",repr(self.allStudioLogosColor))
def setDuration(self,currentDuration=""):
if xbmc.getCondVisibility("Skin.HasSetting(SkinHelper.DisableHoursDuration)"):
return
if not currentDuration:
currentDuration = xbmc.getInfoLabel("%sListItem.Duration"%self.widgetContainerPrefix)
if ":" in currentDuration:
durLst = currentDuration.split(":")
if len(durLst) == 1:
currentDuration = "0"
elif len(durLst) == 2:
currentDuration = durLst[0]
elif len(durLst) == 3:
currentDuration = str((int(durLst[0])*60) + int(durLst[1]))
# monitor listitem to set duration
if currentDuration:
durationString = self.getDurationString(currentDuration)
if durationString:
WINDOW.setProperty('SkinHelper.ListItemDuration', durationString[2])
WINDOW.setProperty('SkinHelper.ListItemDuration.Hours', durationString[0])
WINDOW.setProperty('SkinHelper.ListItemDuration.Minutes', durationString[1])
def getDurationString(self, duration):
if duration == None or duration == 0:
return None
try:
full_minutes = int(duration)
minutes = str(full_minutes % 60)
minutes = str(minutes).zfill(2)
hours = str(full_minutes // 60)
durationString = hours + ':' + minutes
except Exception as e:
logMsg("ERROR in getDurationString ! --> " + str(e), 0)
return None
return ( hours, minutes, durationString )
def setMusicPlayerDetails(self):
artwork = {}
artist = ""
title = ""
album = ""
#get the playing item from the player...
json_result = getJSON('Player.GetActivePlayers', '{}')
for item in json_result:
if item.get("type","") == "audio":
json_result = getJSON('Player.GetItem', '{ "playerid": %d, "properties": [ "title","albumid","artist","album","displayartist" ] }' %item.get("playerid"))
if json_result.get("title"):
if json_result.get("artist"):
artist = json_result.get("artist")
if isinstance(artist,list): artist = artist[0]
title = json_result.get("title")
album = json_result.get("album").split(" (")[0]
else:
if not artist:
#fix for internet streams
splitchar = None
if " - " in json_result.get("title"): splitchar = " - "
elif "- " in json_result.get("title"): splitchar = "- "
elif " -" in json_result.get("title"): splitchar = " -"
elif "-" in json_result.get("title"): splitchar = "-"
if splitchar:
artist = json_result.get("title").split(splitchar)[0]
title = json_result.get("title").split(splitchar)[1]
logMsg("setMusicPlayerDetails: " + repr(json_result))
artwork = artutils.getMusicArtwork(artist,album,title)
#merge comment from id3 tag with album info
if artwork.get("info") and xbmc.getInfoLabel("MusicPlayer.Comment"):
artwork["info"] = normalize_string(xbmc.getInfoLabel("MusicPlayer.Comment")).replace('\n', ' ').replace('\r', '').split(" a href")[0] + " - " + artwork["info"]
#set properties
for key, value in artwork.iteritems():
WINDOW.setProperty("SkinHelper.Player.Music." + key,value.encode("utf-8"))
def setMusicDetails(self,multiThreaded=False):
artwork = {}
if WINDOW.getProperty("artworkcontextmenu"): return
artist = xbmc.getInfoLabel("%sListItem.Artist"%self.widgetContainerPrefix).decode('utf-8')
album = xbmc.getInfoLabel("%sListItem.Album"%self.widgetContainerPrefix).decode('utf-8')
title = self.liTitle
label = self.liLabel
artwork = artutils.getMusicArtwork(artist,album,title)
if self.exit: return
#return if another listitem was focused in the meanwhile
if multiThreaded and label != xbmc.getInfoLabel("%sListItem.Label"%self.widgetContainerPrefix).decode('utf-8'):
return
#set properties
for key, value in artwork.iteritems():
WINDOW.setProperty("SkinHelper.Music." + key,value)
def setStreamDetails(self):
streamdetails = {}
if not self.liDbId: return
cacheStr = self.liDbId + self.contentType
if self.streamdetailsCache.get(cacheStr):
#get data from cache
streamdetails = self.streamdetailsCache[cacheStr]
else:
json_result = {}
# get data from json
if "movies" in self.contentType and self.liDbId:
json_result = getJSON('VideoLibrary.GetMovieDetails', '{ "movieid": %d, "properties": [ "title", "streamdetails", "tag" ] }' %int(self.liDbId))
elif self.contentType == "episodes" and self.liDbId:
json_result = getJSON('VideoLibrary.GetEpisodeDetails', '{ "episodeid": %d, "properties": [ "title", "streamdetails" ] }' %int(self.liDbId))
elif self.contentType == "musicvideos" and self.liDbId:
json_result = getJSON('VideoLibrary.GetMusicVideoDetails', '{ "musicvideoid": %d, "properties": [ "title", "streamdetails" ] }' %int(self.liDbId))
if json_result.has_key("streamdetails"):
audio = json_result["streamdetails"]['audio']
subtitles = json_result["streamdetails"]['subtitle']
video = json_result["streamdetails"]['video']
allAudio = []
allAudioStr = []
allSubs = []
allLang = []
count = 0
for item in audio:
codec = item['codec']
channels = item['channels']
if "ac3" in codec: codec = "Dolby D"
elif "dca" in codec: codec = "DTS"
elif "dts-hd" in codec or "dtshd" in codec: codec = "DTS HD"
if channels == 1: channels = "1.0"
elif channels == 2: channels = "2.0"
elif channels == 3: channels = "2.1"
elif channels == 4: channels = "4.0"
elif channels == 5: channels = "5.0"
elif channels == 6: channels = "5.1"
elif channels == 7: channels = "6.1"
elif channels == 8: channels = "7.1"
elif channels == 9: channels = "8.1"
elif channels == 10: channels = "9.1"
else: channels = str(channels)
language = item.get('language','')
if language and language not in allLang:
allLang.append(language)
streamdetails['SkinHelper.ListItemAudioStreams.%d.Language'% count] = item['language']
streamdetails['SkinHelper.ListItemAudioStreams.%d.AudioCodec'%count] = item['codec']
streamdetails['SkinHelper.ListItemAudioStreams.%d.AudioChannels'%count] = str(item['channels'])
sep = "•".decode('utf-8')
audioStr = '%s %s %s %s %s' %(language,sep,codec,sep,channels)
streamdetails['SkinHelper.ListItemAudioStreams.%d'%count] = audioStr
allAudioStr.append(audioStr)
count += 1
subscount = 0
subscountUnique = 0
for item in subtitles:
subscount += 1
if item['language'] not in allSubs:
allSubs.append(item['language'])
streamdetails['SkinHelper.ListItemSubtitles.%d'%subscountUnique] = item['language']
subscountUnique += 1
streamdetails['SkinHelper.ListItemSubtitles'] = " / ".join(allSubs)
streamdetails['SkinHelper.ListItemSubtitles.Count'] = str(subscount)
streamdetails['SkinHelper.ListItemAllAudioStreams'] = " / ".join(allAudioStr)
streamdetails['SkinHelper.ListItemAudioStreams.Count'] = str(len(allAudioStr))
streamdetails['SkinHelper.ListItemLanguages'] = " / ".join(allLang)
streamdetails['SkinHelper.ListItemLanguages.Count'] = str(len(allLang))
if len(video) > 0:
stream = video[0]
streamdetails['SkinHelper.ListItemVideoHeight'] = str(stream.get("height",""))
streamdetails['SkinHelper.ListItemVideoWidth'] = str(stream.get("width",""))
self.streamdetailsCache[cacheStr] = streamdetails
if json_result.get("tag"):
streamdetails["SkinHelper.ListItemTags"] = " / ".join(json_result["tag"])
if streamdetails:
#set the window properties
for key, value in streamdetails.iteritems():
WINDOW.setProperty(key,value)
def setForcedView(self):
currentForcedView = xbmc.getInfoLabel("Skin.String(SkinHelper.ForcedViews.%s)" %self.contentType)
if xbmc.getCondVisibility("Control.IsVisible(%s) | IsEmpty(Container.Viewmode)" %currentForcedView):
#skip if the view is already visible or if we're not in an actual media window
return
if self.contentType and currentForcedView and currentForcedView != "None" and xbmc.getCondVisibility("Skin.HasSetting(SkinHelper.ForcedViews.Enabled)") and not "pvr://guide" in self.liPath:
WINDOW.setProperty("SkinHelper.ForcedView",currentForcedView)
xbmc.executebuiltin("Container.SetViewMode(%s)" %currentForcedView)
if not xbmc.getCondVisibility("Control.HasFocus(%s)" %currentForcedView):
xbmc.sleep(100)
xbmc.executebuiltin("Container.SetViewMode(%s)" %currentForcedView)
xbmc.executebuiltin("SetFocus(%s)" %currentForcedView)
else:
WINDOW.clearProperty("SkinHelper.ForcedView")
def checkExtraFanArt(self):
efaPath = None
efaFound = False
extraFanArtfiles = []
filename = self.liFile
if xbmc.getCondVisibility("Window.IsActive(movieinformation) | !Skin.HasSetting(SkinHelper.EnableExtraFanart)"):
return
cachePath = self.liPath
if "plugin.video.emby.movies" in self.liPath or "plugin.video.emby.musicvideos" in self.liPath:
cachePath = filename
#get the item from cache first
if self.extraFanartCache.has_key(cachePath):
if self.extraFanartCache[cachePath][0] == "None":
return
else:
WINDOW.setProperty("SkinHelper.ExtraFanArtPath",self.extraFanartCache[cachePath][0])
count = 0
for file in self.extraFanartCache[cachePath][1]:
WINDOW.setProperty("SkinHelper.ExtraFanArt." + str(count),file)
count +=1
return
#support for emby addon
if "plugin.video.emby" in self.liPath:
efaPath = "plugin://plugin.video.emby/extrafanart?path=" + cachePath
efaFound = True
#lookup the extrafanart in the media location
elif (self.liPath != None and (self.contentType in ["movies","seasons","episodes","tvshows","setmovies"] ) and not "videodb:" in self.liPath):
# do not set extra fanart for virtuals
if "plugin://" in self.liPath or "addon://" in self.liPath or "sources" in self.liPath:
self.extraFanartCache[self.liPath] = "None"
else:
if "/" in self.liPath: splitchar = "/"
else: splitchar = "\\"
if xbmcvfs.exists(self.liPath + "extrafanart"+splitchar):
efaPath = self.liPath + "extrafanart"+splitchar
else:
pPath = self.liPath.rpartition(splitchar)[0]
pPath = pPath.rpartition(splitchar)[0]
if xbmcvfs.exists(pPath + splitchar + "extrafanart"+splitchar):
efaPath = pPath + splitchar + "extrafanart" + splitchar
if xbmcvfs.exists(efaPath):
dirs, files = xbmcvfs.listdir(efaPath)
count = 0
for file in files:
if file.lower().endswith(".jpg"):
efaFound = True
WINDOW.setProperty("SkinHelper.ExtraFanArt." + str(count),efaPath+file)
extraFanArtfiles.append(efaPath+file)
count +=1
if (efaPath != None and efaFound == True):
WINDOW.setProperty("SkinHelper.ExtraFanArtPath",efaPath)
self.extraFanartCache[cachePath] = [efaPath, extraFanArtfiles]
else:
self.extraFanartCache[cachePath] = ["None",[]]
def setAnimatedPoster(self,multiThreaded=False,liImdb=""):
#check animated posters
if not liImdb: liImdb = self.liImdb
if not xbmc.getCondVisibility("Skin.HasSetting(SkinHelper.EnableAnimatedPosters)") or not liImdb:
return
if WINDOW.getProperty("artworkcontextmenu"): return
if (self.contentType == "movies" or self.contentType=="setmovies"):
for type in ["poster","fanart"]:
image = artutils.getAnimatedArtwork(liImdb,type,self.liDbId)
#return if another listitem was focused in the meanwhile
if multiThreaded and not liImdb == self.liImdb:
return
if image != "None":
WINDOW.setProperty("SkinHelper.Animated%s"%type,image)
def setOmdbInfo(self,multiThreaded=False,liImdb=""):
result = {}
if not liImdb:
liImdb = self.liImdb
if not liImdb:
liImdb = self.liTitle
if not self.contentType in ["movies","setmovies","tvshows"]:
return
if self.omdbinfocache.get(liImdb):
#get data from cache
result = self.omdbinfocache[liImdb]
elif not WINDOW.getProperty("SkinHelper.DisableInternetLookups"):
#get info from OMDB
if not liImdb.startswith("tt"):
#get info by title and year
year = xbmc.getInfoLabel("%sListItem.Year"%self.widgetContainerPrefix).decode('utf-8')
title = self.liTitle
if self.contentType == "tvshows":
type = "series"
else: type = "movie"
url = 'http://www.omdbapi.com/?t=%s&y=%s&type=%s&plot=short&tomatoes=true&r=json' %(title,year,type)
else:
url = 'http://www.omdbapi.com/?i=%s&plot=short&tomatoes=true&r=json' %liImdb
res = requests.get(url)
omdbresult = json.loads(res.content.decode('utf-8','replace'))
if omdbresult.get("Response","") == "True":
#convert values from omdb to our window props
for key, value in omdbresult.iteritems():
if value and value != "N/A":
if key == "tomatoRating": result["SkinHelper.RottenTomatoesRating"] = value
elif key == "tomatoMeter": result["SkinHelper.RottenTomatoesMeter"] = value
elif key == "tomatoFresh": result["SkinHelper.RottenTomatoesFresh"] = value
elif key == "tomatoReviews": result["SkinHelper.RottenTomatoesReviews"] = value
elif key == "tomatoRotten": result["SkinHelper.RottenTomatoesRotten"] = value
elif key == "tomatoImage": result["SkinHelper.RottenTomatoesImage"] = value
elif key == "tomatoConsensus": result["SkinHelper.RottenTomatoesConsensus"] = value
elif key == "Awards": result["SkinHelper.RottenTomatoesAwards"] = value
elif key == "BoxOffice": result["SkinHelper.RottenTomatoesBoxOffice"] = value
elif key == "DVD": result["SkinHelper.RottenTomatoesDVDRelease"] = value
elif key == "tomatoUserMeter": result["SkinHelper.RottenTomatoesAudienceMeter"] = value
elif key == "tomatoUserRating": result["SkinHelper.RottenTomatoesAudienceRating"] = value
elif key == "tomatoUserReviews": result["SkinHelper.RottenTomatoesAudienceReviews"] = value
elif key == "Metascore": result["SkinHelper.MetaCritic.Rating"] = value
elif key == "imdbRating": result["SkinHelper.IMDB.Rating"] = value
elif key == "imdbVotes": result["SkinHelper.IMDB.Votes"] = value
elif key == "Rated": result["SkinHelper.IMDB.MPAA"] = value
elif key == "Runtime": result["SkinHelper.IMDB.Runtime"] = value
#imdb top250
result["SkinHelper.IMDB.Top250"] = self.imdb_top250.get(omdbresult["imdbID"],"")
#store to cache
self.omdbinfocache[liImdb] = result
#return if another listitem was focused in the meanwhile
if multiThreaded and not (liImdb == xbmc.getInfoLabel("%sListItem.IMDBNumber"%self.widgetContainerPrefix).decode('utf-8') or liImdb == xbmc.getInfoLabel("%sListItem.Property(IMDBNumber)"%self.widgetContainerPrefix).decode('utf-8')):
return
#set properties
for key, value in result.iteritems():
WINDOW.setProperty(key,value)
def setTmdbInfo(self,multiThreaded=False,liImdb=""):
result = {}
if not liImdb: liImdb = self.liImdb
if (self.contentType == "movies" or self.contentType=="setmovies") and liImdb:
if self.tmdbinfocache.get(liImdb):
#get data from cache
result = self.tmdbinfocache[liImdb]
elif not WINDOW.getProperty("SkinHelper.DisableInternetLookups"):
logMsg("Retrieving TMDB info for ImdbId--> %s - contentType: %s" %(liImdb,self.contentType))
#get info from TMDB
url = 'http://api.themoviedb.org/3/find/%s?external_source=imdb_id&api_key=%s' %(liImdb,artutils.tmdb_apiKey)
response = requests.get(url)
data = json.loads(response.content.decode('utf-8','replace'))
if data and data.get("movie_results"):
data = data.get("movie_results")
if len(data) == 1:
url = 'http://api.themoviedb.org/3/movie/%s?api_key=%s' %(data[0].get("id"),artutils.tmdb_apiKey)
response = requests.get(url)
data = json.loads(response.content.decode('utf-8','replace'))
if data.get("budget") and data.get("budget") > 0:
result["budget"] = str(data.get("budget",""))
mln = float(data.get("budget")) / 1000000
mln = "%.1f" % mln
result["budget.formatted"] = "$ %s mln." %mln.replace(".0","").replace(".",",")
result["budget.mln"] = mln
if data.get("revenue","") and data.get("revenue") > 0:
result["revenue"] = str(data.get("revenue",""))
mln = float(data.get("revenue")) / 1000000
mln = "%.1f" % mln
result["revenue.formatted"] = "$ %s mln." %mln.replace(".0","").replace(".",",")
result["revenue.mln"] = mln
result["tagline"] = data.get("tagline","")
result["homepage"] = data.get("homepage","")
result["status"] = data.get("status","")
result["popularity"] = str(data.get("popularity",""))
#save to cache
if result: self.tmdbinfocache[self.liImdb] = result
#return if another listitem was focused in the meanwhile
if multiThreaded and not (liImdb == xbmc.getInfoLabel("%sListItem.IMDBNumber"%self.widgetContainerPrefix).decode('utf-8') or liImdb == xbmc.getInfoLabel("%sListItem.Property(IMDBNumber)"%self.widgetContainerPrefix).decode('utf-8')):
return
#set properties
for key, value in result.iteritems():
WINDOW.setProperty("SkinHelper.TMDB." + key,value)
def setAddonDetails(self, multiThreaded=False):
#try to lookup additional artwork and properties for plugin content
preftype = self.contentType
title = self.liTitle
year = xbmc.getInfoLabel("%sListItem.Year"%self.widgetContainerPrefix).decode("utf8")
if not self.contentType in ["movies", "tvshows", "seasons", "episodes", "setmovies"] or not title or not year or not xbmc.getCondVisibility("Skin.HasSetting(SkinHelper.EnableAddonsLookups)"):
return
if self.exit: return
if xbmc.getCondVisibility("!IsEmpty(%sListItem.TvShowTitle)" %self.widgetContainerPrefix):
preftype = "tvshows"
title = xbmc.getInfoLabel("%sListItem.TvShowTitle"%self.widgetContainerPrefix).decode("utf8")
cacheStr = title + preftype + "SkinHelper.PVR.Artwork"
if self.pvrArtCache.has_key(cacheStr):
artwork = self.pvrArtCache[cacheStr]
else:
artwork = artutils.getAddonArtwork(title,year,preftype)
self.pvrArtCache[cacheStr] = artwork
#return if another listitem was focused in the meanwhile
if multiThreaded and not (title == xbmc.getInfoLabel("%sListItem.Title"%self.widgetContainerPrefix).decode('utf-8') or title == xbmc.getInfoLabel("%sListItem.TvShowTitle"%self.widgetContainerPrefix).decode("utf8")):
return
#set window props
for key, value in artwork.iteritems():
WINDOW.setProperty("SkinHelper.PVR." + key,value)
#set extended movie details
if (self.contentType == "movies" or self.contentType == "setmovies") and artwork.get("imdb_id"):
self.setTmdbInfo(False,artwork.get("imdb_id"))
self.setAnimatedPoster(False,artwork.get("imdb_id"))
self.setOmdbInfo(artwork.get("imdb_id"))
| Lunatixz/script.skin.helper.service | resources/lib/ListItemMonitor.py | Python | gpl-2.0 | 70,988 |
# encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Deleting field 'SubtitleLanguage.subtitles_fetched_count'
db.delete_column('videos_subtitlelanguage', 'subtitles_fetched_count')
# Deleting field 'Video.widget_views_count'
db.delete_column('videos_video', 'widget_views_count')
# Deleting field 'Video.subtitles_fetched_count'
db.delete_column('videos_video', 'subtitles_fetched_count')
def backwards(self, orm):
# Adding field 'SubtitleLanguage.subtitles_fetched_count'
db.add_column('videos_subtitlelanguage', 'subtitles_fetched_count', self.gf('django.db.models.fields.IntegerField')(default=0), keep_default=False)
# Adding field 'Video.widget_views_count'
db.add_column('videos_video', 'widget_views_count', self.gf('django.db.models.fields.IntegerField')(default=0, db_index=True), keep_default=False)
# Adding field 'Video.subtitles_fetched_count'
db.add_column('videos_video', 'subtitles_fetched_count', self.gf('django.db.models.fields.IntegerField')(default=0, db_index=True), keep_default=False)
models = {
'accountlinker.thirdpartyaccount': {
'Meta': {'unique_together': "(('type', 'username'),)", 'object_name': 'ThirdPartyAccount'},
'full_name': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '255', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'oauth_access_token': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'}),
'oauth_refresh_token': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'}),
'type': ('django.db.models.fields.CharField', [], {'max_length': '10'}),
'username': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'})
},
'auth.customuser': {
'Meta': {'object_name': 'CustomUser', '_ormbases': ['auth.User']},
'autoplay_preferences': ('django.db.models.fields.IntegerField', [], {'default': '1'}),
'award_points': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'biography': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'can_send_messages': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'blank': 'True'}),
'full_name': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '63', 'blank': 'True'}),
'homepage': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'}),
'is_partner': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'last_ip': ('django.db.models.fields.IPAddressField', [], {'max_length': '15', 'null': 'True', 'blank': 'True'}),
'notify_by_email': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'blank': 'True'}),
'notify_by_message': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'blank': 'True'}),
'partner': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['teams.Partner']", 'null': 'True', 'blank': 'True'}),
'picture': ('utils.amazon.fields.S3EnabledImageField', [], {'thumb_options': "{'upscale': True, 'crop': 'smart'}", 'max_length': '100', 'blank': 'True'}),
'preferred_language': ('django.db.models.fields.CharField', [], {'max_length': '16', 'blank': 'True'}),
'third_party_accounts': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'users'", 'symmetrical': 'False', 'to': "orm['accountlinker.ThirdPartyAccount']"}),
'user_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['auth.User']", 'unique': 'True', 'primary_key': 'True'}),
'valid_email': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'videos': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['videos.Video']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2013, 8, 23, 9, 33, 41, 505603)'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'blank': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2013, 8, 23, 9, 33, 41, 505501)'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'comments.comment': {
'Meta': {'object_name': 'Comment'},
'content': ('django.db.models.fields.TextField', [], {'max_length': '3000'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'content_type_set_for_comment'", 'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'object_pk': ('django.db.models.fields.TextField', [], {}),
'reply_to': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['comments.Comment']", 'null': 'True', 'blank': 'True'}),
'submit_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.CustomUser']"})
},
'contenttypes.contenttype': {
'Meta': {'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'subtitles.subtitlelanguage': {
'Meta': {'unique_together': "[('video', 'language_code')]", 'object_name': 'SubtitleLanguage'},
'created': ('django.db.models.fields.DateTimeField', [], {}),
'followers': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'new_followed_languages'", 'blank': 'True', 'to': "orm['auth.CustomUser']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_forked': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'language_code': ('django.db.models.fields.CharField', [], {'max_length': '16'}),
'official_signoff_count': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'pending_signoff_count': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'pending_signoff_expired_count': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'pending_signoff_unexpired_count': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'subtitles_complete': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'unofficial_signoff_count': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'video': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'newsubtitlelanguage_set'", 'to': "orm['videos.Video']"}),
'writelock_owner': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'writelocked_newlanguages'", 'null': 'True', 'to': "orm['auth.CustomUser']"}),
'writelock_session_key': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'writelock_time': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'})
},
'subtitles.subtitleversion': {
'Meta': {'unique_together': "[('video', 'subtitle_language', 'version_number'), ('video', 'language_code', 'version_number')]", 'object_name': 'SubtitleVersion'},
'author': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'newsubtitleversion_set'", 'to': "orm['auth.CustomUser']"}),
'created': ('django.db.models.fields.DateTimeField', [], {}),
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language_code': ('django.db.models.fields.CharField', [], {'max_length': '16'}),
'meta_1_content': ('apps.videos.metadata.MetadataContentField', [], {'default': "''", 'max_length': '255', 'blank': 'True'}),
'meta_2_content': ('apps.videos.metadata.MetadataContentField', [], {'default': "''", 'max_length': '255', 'blank': 'True'}),
'meta_3_content': ('apps.videos.metadata.MetadataContentField', [], {'default': "''", 'max_length': '255', 'blank': 'True'}),
'note': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '512', 'blank': 'True'}),
'origin': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '255', 'blank': 'True'}),
'parents': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['subtitles.SubtitleVersion']", 'symmetrical': 'False', 'blank': 'True'}),
'rollback_of_version_number': ('django.db.models.fields.PositiveIntegerField', [], {'default': 'None', 'null': 'True', 'blank': 'True'}),
'serialized_lineage': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'serialized_subtitles': ('django.db.models.fields.TextField', [], {}),
'subtitle_count': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'subtitle_language': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['subtitles.SubtitleLanguage']"}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '2048', 'blank': 'True'}),
'version_number': ('django.db.models.fields.PositiveIntegerField', [], {'default': '1'}),
'video': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'newsubtitleversion_set'", 'to': "orm['videos.Video']"}),
'visibility': ('django.db.models.fields.CharField', [], {'default': "'public'", 'max_length': '10'}),
'visibility_override': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '10', 'blank': 'True'})
},
'teams.application': {
'Meta': {'unique_together': "(('team', 'user', 'status'),)", 'object_name': 'Application'},
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'history': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'note': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'status': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'team': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'applications'", 'to': "orm['teams.Team']"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'team_applications'", 'to': "orm['auth.CustomUser']"})
},
'teams.partner': {
'Meta': {'object_name': 'Partner'},
'admins': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'managed_partners'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['auth.CustomUser']"}),
'can_request_paid_captions': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '250'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '50', 'db_index': 'True'})
},
'teams.project': {
'Meta': {'unique_together': "(('team', 'name'), ('team', 'slug'))", 'object_name': 'Project'},
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'max_length': '2048', 'null': 'True', 'blank': 'True'}),
'guidelines': ('django.db.models.fields.TextField', [], {'max_length': '2048', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'order': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'slug': ('django.db.models.fields.SlugField', [], {'db_index': 'True', 'max_length': '50', 'blank': 'True'}),
'team': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['teams.Team']"}),
'workflow_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'})
},
'teams.team': {
'Meta': {'object_name': 'Team'},
'applicants': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'applicated_teams'", 'symmetrical': 'False', 'through': "orm['teams.Application']", 'to': "orm['auth.CustomUser']"}),
'application_text': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'auth_provider_code': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '24', 'blank': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'deleted': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'header_html_text': ('django.db.models.fields.TextField', [], {'default': "''", 'blank': 'True'}),
'highlight': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_moderated': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'is_visible': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'blank': 'True'}),
'last_notification_time': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'logo': ('utils.amazon.fields.S3EnabledImageField', [], {'thumb_options': "{'upscale': True, 'autocrop': True}", 'max_length': '100', 'blank': 'True'}),
'max_tasks_per_member': ('django.db.models.fields.PositiveIntegerField', [], {'default': 'None', 'null': 'True', 'blank': 'True'}),
'membership_policy': ('django.db.models.fields.IntegerField', [], {'default': '4'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '250'}),
'page_content': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'partner': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'teams'", 'null': 'True', 'to': "orm['teams.Partner']"}),
'points': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'projects_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '50', 'db_index': 'True'}),
'subtitle_policy': ('django.db.models.fields.IntegerField', [], {'default': '10'}),
'task_assign_policy': ('django.db.models.fields.IntegerField', [], {'default': '10'}),
'task_expiration': ('django.db.models.fields.PositiveIntegerField', [], {'default': 'None', 'null': 'True', 'blank': 'True'}),
'third_party_accounts': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'teams'", 'symmetrical': 'False', 'to': "orm['accountlinker.ThirdPartyAccount']"}),
'translate_policy': ('django.db.models.fields.IntegerField', [], {'default': '10'}),
'users': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'teams'", 'symmetrical': 'False', 'through': "orm['teams.TeamMember']", 'to': "orm['auth.CustomUser']"}),
'video': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'intro_for_teams'", 'null': 'True', 'to': "orm['videos.Video']"}),
'video_policy': ('django.db.models.fields.IntegerField', [], {'default': '1'}),
'videos': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['videos.Video']", 'through': "orm['teams.TeamVideo']", 'symmetrical': 'False'}),
'workflow_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'})
},
'teams.teammember': {
'Meta': {'unique_together': "(('team', 'user'),)", 'object_name': 'TeamMember'},
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'role': ('django.db.models.fields.CharField', [], {'default': "'contributor'", 'max_length': '16', 'db_index': 'True'}),
'team': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'members'", 'to': "orm['teams.Team']"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'team_members'", 'to': "orm['auth.CustomUser']"})
},
'teams.teamvideo': {
'Meta': {'unique_together': "(('team', 'video'),)", 'object_name': 'TeamVideo'},
'added_by': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.CustomUser']"}),
'all_languages': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'partner_id': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '100', 'blank': 'True'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['teams.Project']"}),
'team': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['teams.Team']"}),
'thumbnail': ('utils.amazon.fields.S3EnabledImageField', [], {'max_length': '100', 'thumb_options': "{'upscale': True, 'crop': 'smart'}", 'null': 'True', 'thumb_sizes': '((290, 165), (120, 90))', 'blank': 'True'}),
'video': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['videos.Video']", 'unique': 'True'})
},
'videos.action': {
'Meta': {'object_name': 'Action'},
'action_type': ('django.db.models.fields.IntegerField', [], {}),
'comment': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['comments.Comment']", 'null': 'True', 'blank': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['videos.SubtitleLanguage']", 'null': 'True', 'blank': 'True'}),
'member': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['teams.TeamMember']", 'null': 'True', 'blank': 'True'}),
'new_language': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['subtitles.SubtitleLanguage']", 'null': 'True', 'blank': 'True'}),
'new_video_title': ('django.db.models.fields.CharField', [], {'max_length': '2048', 'blank': 'True'}),
'team': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['teams.Team']", 'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.CustomUser']", 'null': 'True', 'blank': 'True'}),
'video': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['videos.Video']", 'null': 'True', 'blank': 'True'})
},
'videos.subtitle': {
'Meta': {'unique_together': "(('version', 'subtitle_id'),)", 'object_name': 'Subtitle'},
'end_time': ('django.db.models.fields.IntegerField', [], {'default': 'None', 'null': 'True', 'db_column': "'end_time_ms'"}),
'end_time_seconds': ('django.db.models.fields.FloatField', [], {'null': 'True', 'db_column': "'end_time'"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'start_of_paragraph': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'start_time': ('django.db.models.fields.IntegerField', [], {'default': 'None', 'null': 'True', 'db_column': "'start_time_ms'"}),
'start_time_seconds': ('django.db.models.fields.FloatField', [], {'null': 'True', 'db_column': "'start_time'"}),
'subtitle_id': ('django.db.models.fields.CharField', [], {'max_length': '32', 'blank': 'True'}),
'subtitle_order': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'subtitle_text': ('django.db.models.fields.CharField', [], {'max_length': '1024', 'blank': 'True'}),
'version': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['videos.SubtitleVersion']", 'null': 'True'})
},
'videos.subtitlelanguage': {
'Meta': {'unique_together': "(('video', 'language', 'standard_language'),)", 'object_name': 'SubtitleLanguage'},
'created': ('django.db.models.fields.DateTimeField', [], {}),
'followers': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'followed_languages'", 'blank': 'True', 'to': "orm['auth.CustomUser']"}),
'had_version': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'has_version': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_complete': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'is_forked': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'is_original': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'language': ('django.db.models.fields.CharField', [], {'max_length': '16', 'blank': 'True'}),
'needs_sync': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'blank': 'True'}),
'new_subtitle_language': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'old_subtitle_version'", 'null': 'True', 'to': "orm['subtitles.SubtitleLanguage']"}),
'percent_done': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'standard_language': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['videos.SubtitleLanguage']", 'null': 'True', 'blank': 'True'}),
'subtitle_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'video': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['videos.Video']"}),
'writelock_owner': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.CustomUser']", 'null': 'True', 'blank': 'True'}),
'writelock_session_key': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'writelock_time': ('django.db.models.fields.DateTimeField', [], {'null': 'True'})
},
'videos.subtitlemetadata': {
'Meta': {'object_name': 'SubtitleMetadata'},
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'data': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.PositiveIntegerField', [], {}),
'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'subtitle': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['videos.Subtitle']"})
},
'videos.subtitleversion': {
'Meta': {'unique_together': "(('language', 'version_no'),)", 'object_name': 'SubtitleVersion'},
'datetime_started': ('django.db.models.fields.DateTimeField', [], {}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'forked_from': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['videos.SubtitleVersion']", 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_forked': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'language': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['videos.SubtitleLanguage']"}),
'moderation_status': ('django.db.models.fields.CharField', [], {'default': "'not__under_moderation'", 'max_length': '32', 'db_index': 'True'}),
'needs_sync': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'blank': 'True'}),
'new_subtitle_version': ('django.db.models.fields.related.OneToOneField', [], {'blank': 'True', 'related_name': "'old_subtitle_version'", 'unique': 'True', 'null': 'True', 'to': "orm['subtitles.SubtitleVersion']"}),
'note': ('django.db.models.fields.CharField', [], {'max_length': '512', 'blank': 'True'}),
'notification_sent': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'result_of_rollback': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'text_change': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'time_change': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '2048', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.CustomUser']"}),
'version_no': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'})
},
'videos.subtitleversionmetadata': {
'Meta': {'unique_together': "(('key', 'subtitle_version'),)", 'object_name': 'SubtitleVersionMetadata'},
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'data': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.PositiveIntegerField', [], {}),
'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'subtitle_version': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'metadata'", 'to': "orm['videos.SubtitleVersion']"})
},
'videos.usertestresult': {
'Meta': {'object_name': 'UserTestResult'},
'browser': ('django.db.models.fields.CharField', [], {'max_length': '1024'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75'}),
'get_updates': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'task1': ('django.db.models.fields.TextField', [], {}),
'task2': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'task3': ('django.db.models.fields.TextField', [], {'blank': 'True'})
},
'videos.video': {
'Meta': {'object_name': 'Video'},
'allow_community_edits': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'allow_video_urls_edit': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'blank': 'True'}),
'complete_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'duration': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'}),
'edited': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'featured': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'followers': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'followed_videos'", 'blank': 'True', 'to': "orm['auth.CustomUser']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_public': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'blank': 'True'}),
'is_subtitled': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'languages_count': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0', 'db_index': 'True'}),
'meta_1_content': ('videos.metadata.MetadataContentField', [], {'default': "''", 'max_length': '255', 'blank': 'True'}),
'meta_1_type': ('videos.metadata.MetadataTypeField', [], {'null': 'True', 'blank': 'True'}),
'meta_2_content': ('videos.metadata.MetadataContentField', [], {'default': "''", 'max_length': '255', 'blank': 'True'}),
'meta_2_type': ('videos.metadata.MetadataTypeField', [], {'null': 'True', 'blank': 'True'}),
'meta_3_content': ('videos.metadata.MetadataContentField', [], {'default': "''", 'max_length': '255', 'blank': 'True'}),
'meta_3_type': ('videos.metadata.MetadataTypeField', [], {'null': 'True', 'blank': 'True'}),
'moderated_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'moderating'", 'null': 'True', 'to': "orm['teams.Team']"}),
'primary_audio_language_code': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '16', 'blank': 'True'}),
's3_thumbnail': ('utils.amazon.fields.S3EnabledImageField', [], {'thumb_options': "{'upscale': True, 'crop': 'smart'}", 'max_length': '100', 'thumb_sizes': '((290, 165), (120, 90))', 'blank': 'True'}),
'small_thumbnail': ('django.db.models.fields.CharField', [], {'max_length': '500', 'blank': 'True'}),
'thumbnail': ('django.db.models.fields.CharField', [], {'max_length': '500', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '2048', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.CustomUser']", 'null': 'True', 'blank': 'True'}),
'video_id': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}),
'view_count': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0', 'db_index': 'True'}),
'was_subtitled': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True', 'blank': 'True'}),
'writelock_owner': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'writelock_owners'", 'null': 'True', 'to': "orm['auth.CustomUser']"}),
'writelock_session_key': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'writelock_time': ('django.db.models.fields.DateTimeField', [], {'null': 'True'})
},
'videos.videofeed': {
'Meta': {'object_name': 'VideoFeed'},
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_link': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '200'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.CustomUser']", 'null': 'True', 'blank': 'True'})
},
'videos.videometadata': {
'Meta': {'object_name': 'VideoMetadata'},
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'data': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.PositiveIntegerField', [], {}),
'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'video': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['videos.Video']"})
},
'videos.videourl': {
'Meta': {'object_name': 'VideoUrl'},
'added_by': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.CustomUser']", 'null': 'True', 'blank': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'original': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'owner_username': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'primary': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'type': ('django.db.models.fields.CharField', [], {'max_length': '1'}),
'url': ('django.db.models.fields.URLField', [], {'unique': 'True', 'max_length': '255'}),
'video': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['videos.Video']"}),
'videoid': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'})
}
}
complete_apps = ['videos']
| ujdhesa/unisubs | apps/videos/migrations/0159_drop_old_count_columns.py | Python | agpl-3.0 | 37,934 |
__author__ = 'Keith Kikta'
__copyright__ = "Copyright 2015, EPM Junkie"
__license__ = "BSD"
__version__ = "1.0"
__maintainer__ = "Keith Kikta"
__email__ = "[email protected]"
__status__ = "Alpha"
import sys
import re
from getopt import getopt, GetoptError
def main(argv):
source = ''
maps = ''
delimiter = "\t"
stats = False
opts = None
try:
opts, args = getopt(argv, "hf:m:d:", ["file=", "map=", "delim="])
except GetoptError:
pass
except Exception as e:
display_help(True, e)
for opt, arg in opts:
if opt == '-h': # display help
display_help()
sys.exit()
elif opt in ("-f", "--file"):
source = arg
elif opt in ("-m", "--map"):
maps = arg
elif opt in ("-d", "--delim"):
delimiter = arg
if source and maps:
performreplace(source, maps, delimiter)
else:
display_help()
def performreplace(source, maps, delimiter="\t"):
map = buildmap(maps, delimiter)
rep = dict((re.escape(k), v) for k, v in map.iteritems())
pat = re.compile("|".join(rep.keys()))
with open(source, 'r') as f:
text = f.read()
text = pat.sub(lambda m: rep[re.escape(m.group(0))], text)
print text
def buildmap(map, delimiter):
maps = {}
with open(map, 'r') as f:
for item in f:
items = item.split(delimiter)
if not items[0] in maps:
maps[items[0]] = items[1].strip()
else:
raise Exception(items[1], "Mapping already exists for " + items[0] + ":" + maps[items[0]])
return maps
def display_help(error=False, exception=''):
print '''Bulk Replace
Usage: python bulkreplace.py -f <source file> -m <mapping file>
-f, --file Source file (required)
-m, --map Mapping file for replacements (required)
-d, --delim Delimiter used to separate source and target in map
Flags:
-h This Help
'''
if exception:
print exception
if error:
sys.exit(2)
else:
sys.exit()
if __name__ == '__main__':
#performreplace(r'test-data\replace-source.txt', r'test-data\replace-maps.txt')
main(sys.argv[1:]) | newbish/pyEpmTools | bulkreplace.py | Python | bsd-2-clause | 2,260 |
import matplotlib.pyplot as plt
class PlotMP(object):
scripName="Default Scrip"
maxX=0
minX=0
maxY=0
minY=0
currentDay=0
ax=None
allTextObj=[]
dataForPlot={}
def __init__(self,scripName,currentDay,maxX,minX,maxY,minY):
self.maxX=maxX
self.minX=minX
self.maxY=maxY
self.minY=minY
self.scripName=scripName
self.currentDay=currentDay
self.plotInit(scripName,minX, maxX, minY, maxY)
def flushAllTextObjects(self):
for each in self.allTextObj:
each.remove()
self.allTextObj=[]
def udpateDataForPlot(self,data):
self.dataForPlot=data
self.plot(data)
def plot(self,timeFrameToAlphabetListSorted):
for price in timeFrameToAlphabetListSorted:
alphabetList=timeFrameToAlphabetListSorted[price]
alphabet = r' '.join(alphabetList)
plt.yticks(range(self.minY, self.maxY))
plt.xticks(range(self.minX, self.maxX))
txt = self.ax.text(self.currentDay, price, alphabet, fontsize=10)
mng = plt.get_current_fig_manager()
mng.resize(*mng.window.maxsize())
totalSizeOfDictionary=len(timeFrameToAlphabetListSorted)
if totalSizeOfDictionary >1:
minVal=timeFrameToAlphabetListSorted.keys()[0]
maxVal=timeFrameToAlphabetListSorted.keys()[totalSizeOfDictionary-1]
plt.axhspan(ymin=minVal-1,ymax=maxVal,xmin=0, xmax=1,facecolor='0.5', alpha=0.5)
plt.draw()
plt.pause(0.001)
def plotInit(self, scripName, minX, maxX, minY, maxY):
plt.ion()
fig = plt.figure()
ax = fig.add_subplot(111)
ax.set_autoscale_on(True) # enable autoscale
ax.autoscale_view(True, True, True)
ax.set_title(scripName)
ax.set_xlabel('Days')
ax.set_ylabel('Price')
ax.axis([minX, maxX, minY, maxY])
self.ax = ax | gullyy/auctionTheory | auctionTheory/src/graph/PlotMP.py | Python | mit | 1,983 |
from app.notify_client import _attach_current_user, NotifyAdminAPIClient
from app.notify_client.models import InvitedUser
class InviteApiClient(NotifyAdminAPIClient):
def __init__(self):
super().__init__("a" * 73, "b")
def init_app(self, app):
self.base_url = app.config['API_HOST_NAME']
self.admin_url = app.config['ADMIN_BASE_URL']
self.service_id = app.config['ADMIN_CLIENT_USER_NAME']
self.api_key = app.config['ADMIN_CLIENT_SECRET']
def create_invite(self, invite_from_id, service_id, email_address, permissions, auth_type):
data = {
'service': str(service_id),
'email_address': email_address,
'from_user': invite_from_id,
'permissions': permissions,
'auth_type': auth_type,
'invite_link_host': self.admin_url,
}
data = _attach_current_user(data)
resp = self.post(url='/service/{}/invite'.format(service_id), data=data)
return InvitedUser(**resp['data'])
def get_invites_for_service(self, service_id):
endpoint = '/service/{}/invite'.format(service_id)
resp = self.get(endpoint)
invites = resp['data']
invited_users = self._get_invited_users(invites)
return invited_users
def check_token(self, token):
resp = self.get(url='/invite/{}'.format(token))
return InvitedUser(**resp['data'])
def cancel_invited_user(self, service_id, invited_user_id):
data = {'status': 'cancelled'}
data = _attach_current_user(data)
self.post(url='/service/{0}/invite/{1}'.format(service_id, invited_user_id),
data=data)
def accept_invite(self, service_id, invited_user_id):
data = {'status': 'accepted'}
self.post(url='/service/{0}/invite/{1}'.format(service_id, invited_user_id),
data=data)
def _get_invited_users(self, invites):
invited_users = []
for invite in invites:
invited_user = InvitedUser(**invite)
invited_users.append(invited_user)
return invited_users
| gov-cjwaszczuk/notifications-admin | app/notify_client/invite_api_client.py | Python | mit | 2,123 |
ICON_PATH="/usr/share/pixmaps/xbmc/"
| samnazarko/osmc | package/mediacenter-eventclients-common-osmc/files/usr/share/pyshared/xbmc/defs.py | Python | gpl-2.0 | 37 |
SETTINGS = {
'ROOT_DIR': '',
} | weidwonder/OwnPyProfiler | own_py_profiler/settings.py | Python | mit | 34 |
#
# froide documentation build configuration file, created by
# sphinx-quickstart on Wed Jun 15 21:11:35 2011.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
# sys.path.insert(0, os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = [
"sphinx.ext.autodoc",
"sphinx.ext.intersphinx",
"sphinx.ext.todo",
"sphinx.ext.coverage",
"sphinx.ext.viewcode",
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ["_templates"]
# The suffix of source filenames.
source_suffix = ".rst"
# The encoding of source files.
# source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = "index"
# General information about the project.
project = "froide"
copyright = "2018, Stefan Wehrmeyer, Open Knowledge Foundation Deutschland"
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = "5.0"
# The full version, including alpha/beta/rc tags.
release = "5.0.0-alpha"
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
# language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
# today = ''
# Else, today_fmt is used as the format for a strftime call.
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ["_build"]
# The reST default role (used for this markup: `text`) to use for all documents.
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = "sphinx"
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
sys.path.append(os.path.abspath("_themes"))
html_theme_path = ["_themes"]
_html_theme = "default"
if os.path.exists(os.path.join(os.path.abspath("_themes"), _html_theme)):
html_theme = _html_theme
html_theme_options = {
"logo_icon": "froide.png",
"show_okfn_logo": False,
"show_version": True,
}
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
# html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
# html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
# html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
html_favicon = "favicon.png"
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ["_static"]
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
# html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
# html_additional_pages = {}
# If false, no module index is generated.
# html_domain_indices = True
# If false, no index is generated.
# html_use_index = True
# If true, the index is split into individual pages for each letter.
# html_split_index = False
# If true, links to the reST sources are added to the pages.
# html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
# html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
# html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = "froidedoc"
# -- Options for LaTeX output --------------------------------------------------
# The paper size ('letter' or 'a4').
# latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
# latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
("index", "froide.tex", "froide Documentation", "Stefan Wehrmeyer", "manual"),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
# latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
# latex_use_parts = False
# If true, show page references after internal links.
# latex_show_pagerefs = False
# If true, show URL addresses after external links.
# latex_show_urls = False
# Additional stuff for the LaTeX preamble.
# latex_preamble = ''
# Documents to append as an appendix to all manuals.
# latex_appendices = []
# If false, no module index is generated.
# latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [("index", "froide", "froide Documentation", ["Stefan Wehrmeyer"], 1)]
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {"http://docs.python.org/": None}
| fin/froide | docs/conf.py | Python | mit | 7,616 |
# -*- coding=utf-8 -*-
import sys
import time
import logging
import os
sys.path.append(os.getcwd())
logging.basicConfig()
from hammer.sqlhelper import SqlHelper
db_config = {
'host': 'localhost',
'port': 3306,
'user': 'root',
'password': '123456',
'db': 'test',
}
def test_create_table():
command = '''
CREATE TABLE `test_test` (
`id` int(11) unsigned NOT NULL AUTO_INCREMENT,
`name` varchar(10) DEFAULT NULL,
`age` int(11) DEFAULT NULL,
PRIMARY KEY (`id`)
) ENGINE=InnoDB AUTO_INCREMENT=34 DEFAULT CHARSET=utf8;
'''
sql.execute(command, True)
def test_select():
command = ''''''
def test_insert():
datas = [
{
'name': "a'b'c",
'age': 1,
'date': None,
},
{
'name': 'a"b"c',
'age': 1,
'date': None,
},
{
'name': 'a"b";\'c',
'age': 1,
'date': None,
},
{
'name': "a\"blll\";\'c",
'age': 1,
'date': '2018',
},
]
sql.insert_datas(datas, table_name = 'test')
def test_update():
datas = [
{
'id': 1,
'name': "a'b'c",
'age': 2,
'date': None,
},
{
'id': 2,
'name': 'a"b"c',
'age': 2,
'date': None,
},
{
'id': 3,
'name': 'a"b";\'c',
'age': 2,
'date': None,
},
{
'id': 4,
'name': "a\"blll\";\'c",
'age': 2,
'date': '2018-01-02',
},
]
sql.update_datas(datas, table_name = 'test')
def test_is_exists():
print(sql.is_exists('testdfads'))
def test_check_table_exists():
print(sql.check_table_exists('test', db_name = 'tesdt'))
if __name__ == '__main__':
sql = SqlHelper(**db_config)
# test_insert()
# test_update()
# test_is_exists()
# test_check_table_exists()
datas = []
for i in range(1, 3):
data = {
'id': i,
'name': "vvv",
'age': None,
'date': None,
}
datas.append(data)
print(datas)
print(len(datas))
start = time.time()
# sql.insert_datas(datas, table_name = 'test')
sql.update_datas(datas, table_name = 'test', update_keys = ['name', 'age'])
print(time.time() - start)
| awolfly9/hammer | test/test.py | Python | mit | 2,505 |
import os
import sys
import time
from magnolia.utility import *
from magnolia.utility import LOG as L
from magnolia.script.kancolle import testcase_normal
class TestCase(testcase_normal.TestCase_Normal):
def __init__(self, *args, **kwargs):
super(TestCase, self).__init__(*args, **kwargs)
@classmethod
def setUpClass(cls):
L.info("*** Start TestCase : %s *** " % __file__)
L.info("*** Debug Flag : %s ***" % str(cls.get("args.debug")))
def test_1(self):
L.info("*** Leveling : Attack 3-2 ***")
try:
self.minicap_start(); time.sleep(2)
self.assertTrue(self.initialize(self.get("leveling.composition")), "Can't Login & Check Start.")
while self.expedition_result(): time.sleep(1)
self.assertTrue(self.attack(self.get("leveling.fleet"), "14"), "Can't Start Attack 3-2.")
self.assertTrue(self.battle_all_stage("1", withdrawal=True), "Can't Finish Attack 3-2.")
while self.expedition_result(): time.sleep(1)
self.assertTrue(self.supply_and_docking(self.get("leveling.fleet")), "Can't Supply or Docking.")
self.assertTrue(self.home(), "Can't Return Home.")
while self.expedition_result(): time.sleep(1)
self.minicap_finish(); self.sleep(2)
except Exception as e:
self.minicap_finish(); time.sleep(2)
self.fail(str(e))
@classmethod
def tearDownClass(cls):
L.info("*** End TestCase : %s *** " % __file__)
| setsulla/stir | project/magnolia/script/kancolle/leveling.py | Python | mit | 1,534 |
#!/usr/bin/python3
import sys
import os
import smtplib
import email
import markdown
import yaml
import jinja2
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
from serverboards_aio import print
import serverboards_aio as serverboards
email_utils = email.utils
settings = {}
@serverboards.cache_ttl(600)
def get_template(filename):
with open(filename, 'rt') as fd:
return jinja2.Template(fd.read())
async def render_template(filename, context):
template = await get_template(filename)
return template.render(context)
@serverboards.rpc_method
async def base_url():
base_url_ = "http://localhost:8080"
try:
settings = await serverboards.rpc.call(
"settings.get", "serverboards.core.settings/base")
if settings:
base_url_ = settings["base_url"]
while base_url_.endswith("/"):
base_url_ = base_url_[0:-1]
except Exception as e:
serverboards.log_traceback(e)
pass
return base_url_
@serverboards.rpc_method
async def send_email(user=None, config=None, message=None, **extra):
if not settings:
await update_settings()
if not settings:
await serverboards.warning(
"Email not properly configured. Not sending emails: ", message["subject"])
return False
_to = config and config.get("email") or user["email"]
extra = {k: v
for k, v in message["extra"].items()
if k not in ['email', 'subject', 'body']}
extra["user"] = user
# serverboards.debug("email extra data: %s"%(repr(extra)))
return await send_email_action(
_to,
message["subject"],
message["body"],
**extra)
@serverboards.rpc_method
async def send_email_action(email=None, subject=None, body=None, **extra):
if not settings:
await update_settings()
if not settings.get("servername"):
await serverboards.warning(
"No email server configured. Not sending emails.")
return {
"sent": False
}
msg = MIMEMultipart('alternative')
# await serverboards.debug("email extra data: %s"%(repr(extra)))
base_url = settings["base_url"]
context = {
"email": email,
"user": extra.get("user"),
"subject": subject,
"body": markdown.markdown(body, safe_mode='escape'),
"settings": settings,
"APP_URL": base_url,
"type": extra.get("type", "MESSAGE"),
"url": extra.get("url", None)
}
body_html = await render_template(
os.path.join(os.path.dirname(__file__),
"email-template.html"),
context)
msg.attach(MIMEText(body, "plain", 'UTF-8'))
msg.attach(MIMEText(body_html, "html", 'UTF-8'))
msg["From"] = "Serverboards <%s>" % settings["from"]
msg["To"] = email
msg["Subject"] = subject
msg["Date"] = email_utils.formatdate()
if "message_id" in extra:
msg["Message-Id"] = "<%s>" % extra["message_id"]
if "reply_to" in extra:
msg["In-Reply-To"] = "<%s>" % extra["reply_to"]
if "thread_id" in extra:
msg["References"] = "<%s>" % extra["thread_id"]
if extra.get("test"):
with open("/tmp/lastmail.html", "w") as fd:
fd.write(markdown.markdown(body_html, safe_mode='escape'))
with open("/tmp/lastmail.md", "w") as fd:
fd.write(body)
def send_sync():
port = settings.get("port")
ssl = settings.get("ssl")
if port or ssl:
if port == '465' or ssl:
port = port or '465'
smtp = smtplib.SMTP_SSL(settings["servername"], int(port))
else:
smtp = smtplib.SMTP(settings["servername"], int(port))
else:
smtp = smtplib.SMTP(settings["servername"])
if settings.get("username"):
print("Login as ", repr(settings))
smtp.login(settings.get("username"), settings.get("password_pw"))
smtp.sendmail(settings["from"], email, msg.as_string())
smtp.close()
await serverboards.sync(send_sync)
await serverboards.info(
"Sent email to %s, with subject '%s'" % (email, subject)
)
return {
"sent": True
}
def test():
async def test_async():
print("Start debug")
global settings
settings = yaml.load(open("config.yaml"))
# {
# "servername" : "mail.serverboards.io",
# "port" : "",
# "from" : "[email protected]",
# "username" : "",
# "password_pw" : ""
# }
sea = await send_email_action(
"[email protected]",
"This is a test from s10s test",
"The body of the test",
message_id="[email protected]",
)
print("email action", sea)
assert sea == {"sent": True}
se = await send_email(
user={"email": "[email protected]"},
config={},
message={
"subject": "This is a test message",
"body": "Body of the test message\n\nAnother line",
"extra": {}
},
test=True,
message_id="[email protected]",
)
print("email to user", se)
assert se
print("Done")
await serverboards.curio.sleep(2)
sys.exit(0)
serverboards.test_mode(test_async)
async def update_settings():
await serverboards.debug("Get email settings.")
global settings
try:
settings_ = await serverboards.rpc.call(
"settings.get",
"serverboards.core.notifications/settings.email")
settings.update(settings_)
settings["base_url"] = await base_url()
except Exception as e:
serverboards.log_traceback(e)
settings = {}
def main():
serverboards.run_async(update_settings, result=False)
serverboards.loop()
if len(sys.argv) == 2 and sys.argv[1] == "--test":
test()
else:
main()
| serverboards/serverboards | plugins/core-notifications/serverboards_email.py | Python | apache-2.0 | 6,092 |
import tempfile
from email.parser import Parser
from GnuPGInterface import GnuPG
class PGPMimeParser(Parser):
def parse_pgpmime(self, message):
sig_count, sig_parts, sig_alg = 0, [], 'SHA1'
enc_count, enc_parts, enc_ver = 0, [], None
for part in message.walk():
mimetype = part.get_content_type()
if (sig_count > 1) and (mimetype == 'application/pgp-signature'):
sig = tempfile.NamedTemporaryFile()
sig.write(part.get_payload())
sig.flush()
msg = '\r\n'.join(sig_parts[0].as_string().splitlines(False))+'\r\n'
result = None
try:
gpg = GnuPG().run(['--utf8-strings', '--verify', sig.name, '-'],
create_fhs=['stdin', 'stderr'])
gpg.handles['stdin'].write(msg)
gpg.handles['stdin'].close()
result = gpg.handles['stderr'].read().decode('utf-8')
gpg.wait()
summary = ('verified', result)
except IOError:
summary = ('signed', result or 'Error running GnuPG')
for sig_part in sig_parts:
sig_part.openpgp = summary
# Reset!
sig_count, sig_parts = 0, []
elif sig_count > 0:
sig_parts.append(part)
sig_count += 1
elif enc_count > 0:
# FIXME: Decrypt and parse!
pass
elif mimetype == 'multipart/signed':
sig_alg = part.get_param('micalg', 'pgp-sha1').split('-')[1].upper()
sig_count = 1
elif mimetype == 'multipart/encrypted':
enc_count = 1
def parse(self, fp, headersonly=False):
message = Parser.parse(self, fp, headersonly=headersonly)
self.parse_pgpmime(message)
return message
| micahflee/Mailpile | mailpile/pgpmime.py | Python | agpl-3.0 | 1,698 |
# -*- coding: utf-8 -*-
# Generated by Django 1.10 on 2017-08-23 11:08
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('clinic', '0004_auto_20170823_0925'),
]
operations = [
migrations.CreateModel(
name='Appointment',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('first_name', models.CharField(max_length=128)),
('last_name', models.CharField(max_length=128)),
('appointment_day', models.DateField()),
('start_time', models.TimeField()),
('end_time', models.TimeField()),
('doctor', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='clinic.Doctor')),
],
),
migrations.AlterUniqueTogether(
name='appointment',
unique_together=set([('doctor', 'appointment_day', 'start_time', 'end_time')]),
),
migrations.AlterIndexTogether(
name='appointment',
index_together=set([('first_name', 'last_name')]),
),
]
| memclutter/clinic-crm | src/appointment/migrations/0001_initial.py | Python | bsd-2-clause | 1,296 |
#!/usr/bin/python
usage = """usage:
plab_assistant [--path_to_files=<filename>] [--username=<username>]
[--port=<number>] --path_to_nodes=<filename> [--ssh_key=<filename>] action
action = check, install, uninstall, gather_stats, get_logs (check attempts to add the
boot strap software to nodes that do not have it yet... a common problem on
planetlab)
path_to_nodes = a file containing a new line delimited file containing hosts
to install basic node to... optional, use plab list if unspecified.
username = the user name for the hosts
path_to_files = the path to a downloadable file that contains the installation
files A sample is available at http://www.acis.ufl.edu/~ipop/planetlab/ipop/
port = port the stats app is running on
ssh_key = path to the ssh key to be used
"""
import os, sys, time, signal, subprocess, re, getopt, xmlrpclib
def main():
optlist, args = getopt.getopt(sys.argv[1:], "", ["path_to_files=", \
"username=", "port=", "path_to_nodes=", "ssh_key="])
o_d = {}
for k,v in optlist:
o_d[k] = v
try:
nodes = None
if "--path_to_nodes" in o_d:
nodes = []
nodes_file = o_d["--path_to_nodes"]
f = open(nodes_file)
line = f.readline()
nodes.append(line.rstrip('\n\r '))
for line in f:
nodes.append(line.rstrip('\n\r '))
f.close()
action = args[0]
if action == "gather_stats":
plab = plab_assistant(action, nodes, port=(o_d["--port"]))
else:
username = o_d["--username"]
ssh_key = None
if "--ssh_key" in o_d:
ssh_key = o_d["--ssh_key"]
path_to_files = None
if "--path_to_files" in o_d:
path_to_files = o_d["--path_to_files"]
plab = plab_assistant(action, nodes, username=username, \
path_to_files=path_to_files, ssh_key=ssh_key)
except:
print_usage()
plab.run()
def print_usage():
print usage
sys.exit()
class plab_assistant:
def __init__(self, action, nodes = None, username = "", path_to_files = "", \
port = str(0), update_callback = False, ssh_key=None):
if action == "install":
self.task = self.install_node
elif action == "check":
self.task = self.check_node
elif action == "uninstall":
self.task = self.uninstall_node
elif action == "gather_stats":
self.task = self.get_stats
elif action == "get_logs":
self.task = self.get_logs
os.system("rm -rf logs")
os.system("mkdir logs")
else:
"Invalid action: " + action
print_usage()
self.port = str(port)
if nodes == None:
nodes = []
plab_rpc = xmlrpclib.ServerProxy('https://www.planet-lab.org/PLCAPI/', allow_none=True)
for node in plab_rpc.GetNodes({'AuthMethod': "anonymous"}, {}, ['hostname']):
nodes.append(node['hostname'])
self.nodes = nodes
self.username = username
self.path_to_files = path_to_files
self.update_callback = update_callback
if ssh_key != None:
self.ssh_key = "-o IdentityFile=" + ssh_key + " "
else:
self.ssh_key = ""
# Runs 32 threads at the same time, this works well because half of the ndoes
# contacted typically are unresponsive and take tcp time out to fail or in
# other cases, they are bandwidth limited while downloading the data for
# install
def run(self):
# process each node
pids = []
for node in self.nodes:
pid = os.fork()
if pid == 0:
self.task(node)
pids.append(pid)
while len(pids) >= 64:
time.sleep(5)
to_remove = []
for pid in pids:
try:
if os.waitpid(pid, os.P_NOWAIT) == (pid, 0):
to_remove.append(pid)
except:
to_remove.append(pid)
for pid in to_remove:
pids.remove(pid)
# make sure we cleanly exit
count = 0
while True:
if len(pids) == 0:
break
for pid in pids:
to_remove = []
try:
if os.waitpid(pid, os.P_NOWAIT) == (pid, 0):
to_remove.append(pid)
except:
to_remove.append(pid)
for pid in to_remove:
pids.remove(pid)
try:
os.kill(pid, signal.SIGKILL)
except:
pass
if count == 6:
for pid in pids:
try:
os.kill(pid, signal.SIGKILL)
except:
pass
break
count += 1
time.sleep(10)
def check_node(self, node):
self.node_install(node, True)
def install_node(self, node):
self.node_install(node, False)
# node is the hostname that we'll be installing the software stack unto
# check determines whether or not to check to see if software is already
# running and not install if it is.
def node_install(self, node, check):
e = ""
base_ssh = "/usr/bin/ssh -o StrictHostKeyChecking=no " + self.ssh_key + \
"-o HostbasedAuthentication=no -o CheckHostIP=no " + self.username + \
"@" + node + " "
if check:
try:
# This prints something if all is good ending this install attempt
ssh_cmd(base_ssh + "ps uax | grep basicnode | grep -v grep")
except:
#print node + " already installed or fail..."
sys.exit()
try:
# this helps us leave early in case the node is unaccessible
ssh_cmd(base_ssh + "pkill -KILL basicnode &> /dev/null")
ssh_cmd(base_ssh + "/home/" + self.username + "/node/clean.sh &> /dev/null")
ssh_cmd(base_ssh + "rm -rf /home/" + self.username + "/* &> /dev/null")
ssh_cmd(base_ssh + "wget --quiet " + self.path_to_files + " -O ~/node.tgz")
ssh_cmd(base_ssh + "tar -zxf node.tgz")
ssh_cmd(base_ssh + "/home/" + self.username + "/node/clean.sh &> /dev/null")
# this won't end unless we force it to! It should never take more than 20
# seconds for this to run... or something bad happened.
cmd = base_ssh + " /home/" + self.username + "/node/start_node.sh &> /dev/null"
pid = os.spawnvp(os.P_NOWAIT, 'ssh', cmd.split(' '))
time.sleep(20)
try:
if os.waitpid(pid, os.P_NOWAIT) != (pid, 0):
os.kill(pid, signal.SIGKILL)
except:
pass
print node + " done!"
if self.update_callback:
self.update_callback(node, 1)
except:
print node + " failed!"
if self.update_callback:
self.update_callback(node, 0)
sys.exit()
def uninstall_node(self, node):
base_ssh = "/usr/bin/ssh -o StrictHostKeyChecking=no " + self.ssh_key + \
"-o HostbasedAuthentication=no -o CheckHostIP=no " + self.username + \
"@" + node + " "
try:
# this helps us leave early in case the node is unaccessible
ssh_cmd(base_ssh + "pkill -KILL basicnode &> /dev/null")
ssh_cmd(base_ssh + "/home/" + self.username + "/node/clean.sh &> /dev/null")
ssh_cmd(base_ssh + "rm -rf /home/" + self.username + "/* &> /dev/null")
if self.update_callback:
self.update_callback(node, 0)
else:
print node + " done!"
except:
if self.update_callback:
self.update_callback(node, 1)
else:
print node + " failed!"
sys.exit()
def get_stats(self, node):
try:
server = xmlrpclib.Server('http://' + node + ':' + self.port)
stats = server.get_stats()
if 'dead' in stats:
mem = 0
cpu = 0.0
else:
mem = stats['mem']
cpu = stats['cpu']
except:
mem = -1
cpu = -1.1
data_points = {'host' : node, 'mem' : mem, 'cpu': cpu}
if self.update_callback:
self.update_callback(data_points)
else:
print data_points
sys.exit()
def get_logs(self, node):
os.system("mkdir logs/" + node)
cmd = "/usr/bin/scp -o StrictHostKeyChecking=no " + self.ssh_key + \
"-o HostbasedAuthentication=no -o CheckHostIP=no " + self.username + \
"@" + node + ":/home/" + self.username + "/node/node.log.* logs/" + node + \
"/."
try:
ssh_cmd(cmd)
except :
pass
sys.exit()
# This runs the ssh command monitoring it for any possible failures and raises
# an the KeyboardInterrupt if there is one.
def ssh_cmd(cmd):
p = subprocess.Popen(cmd.split(' '), stdout=subprocess.PIPE, stderr=subprocess.PIPE)
os.waitpid(p.pid, 0)
err = p.stderr.read()
out = p.stdout.read()
good_err = re.compile("Warning: Permanently added")
if (good_err.search(err) == None and err != '') or out != '':
#print cmd
#print "Err: " + err
#print "Out: " + out
raise KeyboardInterrupt
if __name__ == "__main__":
main()
| twchoi/tmp-brunet-deetoo | scripts/planetlab/plab_assistant.py | Python | gpl-2.0 | 8,540 |
import logging
from flask import Flask
from flask.ext.restful import Api, Resource, reqparse
from flask.ext.restful.representations.json import output_json
from tornado.wsgi import WSGIContainer
from tornado.httpserver import HTTPServer
from tornado.ioloop import IOLoop
output_json.func_globals['settings'] = {'ensure_ascii': False,
'encoding': 'utf8'}
app = Flask(__name__)
api = Api(app)
logging.basicConfig(format='%(levelname)s %(asctime)s %(filename)s %(lineno)d: %(message)s')
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
# NOTE: If you need to access data files, the path should be '/src/file.dat'
class StubAPI(Resource):
def __init__(self):
self.reqparse = reqparse.RequestParser()
# Takes a single argument, content, and coerces its type to a list
self.reqparse.add_argument('content', type=list, location='json')
super(StubAPI, self).__init__()
def post(self):
args = self.reqparse.parse_args()
content = args['content']
logger.info('Started processing content.')
try:
# Modify this portion to work with your unique code
temp_output = []
for x in content:
temp_output.append(x['HOLD_KEY'])
logger.info('Finished processing content.')
except Exception as e:
# If something goes wrong, log it and return nothing
logger.info(e)
# Make sure to update this line if you change the variable names
temp_output = {}
return temp_output
api.add_resource(StubAPI, '/')
if __name__ == '__main__':
# Fires up a server on port 5000 at '/'
# i.e., http://localhost:5000/
http_server = HTTPServer(WSGIContainer(app))
http_server.listen(5000)
IOLoop.instance().start()
| ericwhyne/api_stub | app.py | Python | mit | 1,857 |
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
# Copyright 2011 - 2012, Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Shared code between AMQP based openstack.common.rpc implementations.
The code in this module is shared between the rpc implemenations based on AMQP.
Specifically, this includes impl_kombu and impl_qpid. impl_carrot also uses
AMQP, but is deprecated and predates this code.
"""
import collections
import inspect
import sys
import uuid
from eventlet import greenpool
from eventlet import pools
from eventlet import queue
from eventlet import semaphore
from oslo.config import cfg
from neutron.openstack.common import excutils
from neutron.openstack.common.gettextutils import _
from neutron.openstack.common import local
from neutron.openstack.common import log as logging
from neutron.openstack.common.rpc import common as rpc_common
amqp_opts = [
cfg.BoolOpt('amqp_durable_queues',
default=False,
deprecated_name='rabbit_durable_queues',
deprecated_group='DEFAULT',
help='Use durable queues in amqp.'),
cfg.BoolOpt('amqp_auto_delete',
default=False,
help='Auto-delete queues in amqp.'),
]
cfg.CONF.register_opts(amqp_opts)
UNIQUE_ID = '_unique_id'
LOG = logging.getLogger(__name__)
class Pool(pools.Pool):
"""Class that implements a Pool of Connections."""
def __init__(self, conf, connection_cls, *args, **kwargs):
self.connection_cls = connection_cls
self.conf = conf
kwargs.setdefault("max_size", self.conf.rpc_conn_pool_size)
kwargs.setdefault("order_as_stack", True)
super(Pool, self).__init__(*args, **kwargs)
self.reply_proxy = None
# TODO(comstud): Timeout connections not used in a while
def create(self):
LOG.debug(_('Pool creating new connection'))
return self.connection_cls(self.conf)
def empty(self):
while self.free_items:
self.get().close()
# Force a new connection pool to be created.
# Note that this was added due to failing unit test cases. The issue
# is the above "while loop" gets all the cached connections from the
# pool and closes them, but never returns them to the pool, a pool
# leak. The unit tests hang waiting for an item to be returned to the
# pool. The unit tests get here via the tearDown() method. In the run
# time code, it gets here via cleanup() and only appears in service.py
# just before doing a sys.exit(), so cleanup() only happens once and
# the leakage is not a problem.
self.connection_cls.pool = None
_pool_create_sem = semaphore.Semaphore()
def get_connection_pool(conf, connection_cls):
with _pool_create_sem:
# Make sure only one thread tries to create the connection pool.
if not connection_cls.pool:
connection_cls.pool = Pool(conf, connection_cls)
return connection_cls.pool
class ConnectionContext(rpc_common.Connection):
"""The class that is actually returned to the create_connection() caller.
This is essentially a wrapper around Connection that supports 'with'.
It can also return a new Connection, or one from a pool.
The function will also catch when an instance of this class is to be
deleted. With that we can return Connections to the pool on exceptions
and so forth without making the caller be responsible for catching them.
If possible the function makes sure to return a connection to the pool.
"""
def __init__(self, conf, connection_pool, pooled=True, server_params=None):
"""Create a new connection, or get one from the pool."""
self.connection = None
self.conf = conf
self.connection_pool = connection_pool
if pooled:
self.connection = connection_pool.get()
else:
self.connection = connection_pool.connection_cls(
conf,
server_params=server_params)
self.pooled = pooled
def __enter__(self):
"""When with ConnectionContext() is used, return self."""
return self
def _done(self):
"""If the connection came from a pool, clean it up and put it back.
If it did not come from a pool, close it.
"""
if self.connection:
if self.pooled:
# Reset the connection so it's ready for the next caller
# to grab from the pool
self.connection.reset()
self.connection_pool.put(self.connection)
else:
try:
self.connection.close()
except Exception:
pass
self.connection = None
def __exit__(self, exc_type, exc_value, tb):
"""End of 'with' statement. We're done here."""
self._done()
def __del__(self):
"""Caller is done with this connection. Make sure we cleaned up."""
self._done()
def close(self):
"""Caller is done with this connection."""
self._done()
def create_consumer(self, topic, proxy, fanout=False):
self.connection.create_consumer(topic, proxy, fanout)
def create_worker(self, topic, proxy, pool_name):
self.connection.create_worker(topic, proxy, pool_name)
def join_consumer_pool(self, callback, pool_name, topic, exchange_name):
self.connection.join_consumer_pool(callback,
pool_name,
topic,
exchange_name)
def consume_in_thread(self):
self.connection.consume_in_thread()
def __getattr__(self, key):
"""Proxy all other calls to the Connection instance."""
if self.connection:
return getattr(self.connection, key)
else:
raise rpc_common.InvalidRPCConnectionReuse()
class ReplyProxy(ConnectionContext):
"""Connection class for RPC replies / callbacks."""
def __init__(self, conf, connection_pool):
self._call_waiters = {}
self._num_call_waiters = 0
self._num_call_waiters_wrn_threshhold = 10
self._reply_q = 'reply_' + uuid.uuid4().hex
super(ReplyProxy, self).__init__(conf, connection_pool, pooled=False)
self.declare_direct_consumer(self._reply_q, self._process_data)
self.consume_in_thread()
def _process_data(self, message_data):
msg_id = message_data.pop('_msg_id', None)
waiter = self._call_waiters.get(msg_id)
if not waiter:
LOG.warn(_('No calling threads waiting for msg_id : %(msg_id)s'
', message : %(data)s'), {'msg_id': msg_id,
'data': message_data})
LOG.warn(_('_call_waiters: %s') % str(self._call_waiters))
else:
waiter.put(message_data)
def add_call_waiter(self, waiter, msg_id):
self._num_call_waiters += 1
if self._num_call_waiters > self._num_call_waiters_wrn_threshhold:
LOG.warn(_('Number of call waiters is greater than warning '
'threshhold: %d. There could be a MulticallProxyWaiter '
'leak.') % self._num_call_waiters_wrn_threshhold)
self._num_call_waiters_wrn_threshhold *= 2
self._call_waiters[msg_id] = waiter
def del_call_waiter(self, msg_id):
self._num_call_waiters -= 1
del self._call_waiters[msg_id]
def get_reply_q(self):
return self._reply_q
def msg_reply(conf, msg_id, reply_q, connection_pool, reply=None,
failure=None, ending=False, log_failure=True):
"""Sends a reply or an error on the channel signified by msg_id.
Failure should be a sys.exc_info() tuple.
"""
with ConnectionContext(conf, connection_pool) as conn:
if failure:
failure = rpc_common.serialize_remote_exception(failure,
log_failure)
try:
msg = {'result': reply, 'failure': failure}
except TypeError:
msg = {'result': dict((k, repr(v))
for k, v in reply.__dict__.iteritems()),
'failure': failure}
if ending:
msg['ending'] = True
_add_unique_id(msg)
# If a reply_q exists, add the msg_id to the reply and pass the
# reply_q to direct_send() to use it as the response queue.
# Otherwise use the msg_id for backward compatibilty.
if reply_q:
msg['_msg_id'] = msg_id
conn.direct_send(reply_q, rpc_common.serialize_msg(msg))
else:
conn.direct_send(msg_id, rpc_common.serialize_msg(msg))
class RpcContext(rpc_common.CommonRpcContext):
"""Context that supports replying to a rpc.call."""
def __init__(self, **kwargs):
self.msg_id = kwargs.pop('msg_id', None)
self.reply_q = kwargs.pop('reply_q', None)
self.conf = kwargs.pop('conf')
super(RpcContext, self).__init__(**kwargs)
def deepcopy(self):
values = self.to_dict()
values['conf'] = self.conf
values['msg_id'] = self.msg_id
values['reply_q'] = self.reply_q
return self.__class__(**values)
def reply(self, reply=None, failure=None, ending=False,
connection_pool=None, log_failure=True):
if self.msg_id:
msg_reply(self.conf, self.msg_id, self.reply_q, connection_pool,
reply, failure, ending, log_failure)
if ending:
self.msg_id = None
def unpack_context(conf, msg):
"""Unpack context from msg."""
context_dict = {}
for key in list(msg.keys()):
# NOTE(vish): Some versions of python don't like unicode keys
# in kwargs.
key = str(key)
if key.startswith('_context_'):
value = msg.pop(key)
context_dict[key[9:]] = value
context_dict['msg_id'] = msg.pop('_msg_id', None)
context_dict['reply_q'] = msg.pop('_reply_q', None)
context_dict['conf'] = conf
ctx = RpcContext.from_dict(context_dict)
rpc_common._safe_log(LOG.debug, _('unpacked context: %s'), ctx.to_dict())
return ctx
def pack_context(msg, context):
"""Pack context into msg.
Values for message keys need to be less than 255 chars, so we pull
context out into a bunch of separate keys. If we want to support
more arguments in rabbit messages, we may want to do the same
for args at some point.
"""
context_d = dict([('_context_%s' % key, value)
for (key, value) in context.to_dict().iteritems()])
msg.update(context_d)
class _MsgIdCache(object):
"""This class checks any duplicate messages."""
# NOTE: This value is considered can be a configuration item, but
# it is not necessary to change its value in most cases,
# so let this value as static for now.
DUP_MSG_CHECK_SIZE = 16
def __init__(self, **kwargs):
self.prev_msgids = collections.deque([],
maxlen=self.DUP_MSG_CHECK_SIZE)
def check_duplicate_message(self, message_data):
"""AMQP consumers may read same message twice when exceptions occur
before ack is returned. This method prevents doing it.
"""
if UNIQUE_ID in message_data:
msg_id = message_data[UNIQUE_ID]
if msg_id not in self.prev_msgids:
self.prev_msgids.append(msg_id)
else:
raise rpc_common.DuplicateMessageError(msg_id=msg_id)
def _add_unique_id(msg):
"""Add unique_id for checking duplicate messages."""
unique_id = uuid.uuid4().hex
msg.update({UNIQUE_ID: unique_id})
LOG.debug(_('UNIQUE_ID is %s.') % (unique_id))
class _ThreadPoolWithWait(object):
"""Base class for a delayed invocation manager.
Used by the Connection class to start up green threads
to handle incoming messages.
"""
def __init__(self, conf, connection_pool):
self.pool = greenpool.GreenPool(conf.rpc_thread_pool_size)
self.connection_pool = connection_pool
self.conf = conf
def wait(self):
"""Wait for all callback threads to exit."""
self.pool.waitall()
class CallbackWrapper(_ThreadPoolWithWait):
"""Wraps a straight callback.
Allows it to be invoked in a green thread.
"""
def __init__(self, conf, callback, connection_pool):
"""Initiates CallbackWrapper object.
:param conf: cfg.CONF instance
:param callback: a callable (probably a function)
:param connection_pool: connection pool as returned by
get_connection_pool()
"""
super(CallbackWrapper, self).__init__(
conf=conf,
connection_pool=connection_pool,
)
self.callback = callback
def __call__(self, message_data):
self.pool.spawn_n(self.callback, message_data)
class ProxyCallback(_ThreadPoolWithWait):
"""Calls methods on a proxy object based on method and args."""
def __init__(self, conf, proxy, connection_pool):
super(ProxyCallback, self).__init__(
conf=conf,
connection_pool=connection_pool,
)
self.proxy = proxy
self.msg_id_cache = _MsgIdCache()
def __call__(self, message_data):
"""Consumer callback to call a method on a proxy object.
Parses the message for validity and fires off a thread to call the
proxy object method.
Message data should be a dictionary with two keys:
method: string representing the method to call
args: dictionary of arg: value
Example: {'method': 'echo', 'args': {'value': 42}}
"""
# It is important to clear the context here, because at this point
# the previous context is stored in local.store.context
if hasattr(local.store, 'context'):
del local.store.context
rpc_common._safe_log(LOG.debug, _('received %s'), message_data)
self.msg_id_cache.check_duplicate_message(message_data)
ctxt = unpack_context(self.conf, message_data)
method = message_data.get('method')
args = message_data.get('args', {})
version = message_data.get('version')
namespace = message_data.get('namespace')
if not method:
LOG.warn(_('no method for message: %s') % message_data)
ctxt.reply(_('No method for message: %s') % message_data,
connection_pool=self.connection_pool)
return
self.pool.spawn_n(self._process_data, ctxt, version, method,
namespace, args)
def _process_data(self, ctxt, version, method, namespace, args):
"""Process a message in a new thread.
If the proxy object we have has a dispatch method
(see rpc.dispatcher.RpcDispatcher), pass it the version,
method, and args and let it dispatch as appropriate. If not, use
the old behavior of magically calling the specified method on the
proxy we have here.
"""
ctxt.update_store()
try:
rval = self.proxy.dispatch(ctxt, version, method, namespace,
**args)
# Check if the result was a generator
if inspect.isgenerator(rval):
for x in rval:
ctxt.reply(x, None, connection_pool=self.connection_pool)
else:
ctxt.reply(rval, None, connection_pool=self.connection_pool)
# This final None tells multicall that it is done.
ctxt.reply(ending=True, connection_pool=self.connection_pool)
except rpc_common.ClientException as e:
LOG.debug(_('Expected exception during message handling (%s)') %
e._exc_info[1])
ctxt.reply(None, e._exc_info,
connection_pool=self.connection_pool,
log_failure=False)
except Exception:
# sys.exc_info() is deleted by LOG.exception().
exc_info = sys.exc_info()
LOG.error(_('Exception during message handling'),
exc_info=exc_info)
ctxt.reply(None, exc_info, connection_pool=self.connection_pool)
class MulticallProxyWaiter(object):
def __init__(self, conf, msg_id, timeout, connection_pool):
self._msg_id = msg_id
self._timeout = timeout or conf.rpc_response_timeout
self._reply_proxy = connection_pool.reply_proxy
self._done = False
self._got_ending = False
self._conf = conf
self._dataqueue = queue.LightQueue()
# Add this caller to the reply proxy's call_waiters
self._reply_proxy.add_call_waiter(self, self._msg_id)
self.msg_id_cache = _MsgIdCache()
def put(self, data):
self._dataqueue.put(data)
def done(self):
if self._done:
return
self._done = True
# Remove this caller from reply proxy's call_waiters
self._reply_proxy.del_call_waiter(self._msg_id)
def _process_data(self, data):
result = None
self.msg_id_cache.check_duplicate_message(data)
if data['failure']:
failure = data['failure']
result = rpc_common.deserialize_remote_exception(self._conf,
failure)
elif data.get('ending', False):
self._got_ending = True
else:
result = data['result']
return result
def __iter__(self):
"""Return a result until we get a reply with an 'ending' flag."""
if self._done:
raise StopIteration
while True:
try:
data = self._dataqueue.get(timeout=self._timeout)
result = self._process_data(data)
except queue.Empty:
self.done()
raise rpc_common.Timeout()
except Exception:
with excutils.save_and_reraise_exception():
self.done()
if self._got_ending:
self.done()
raise StopIteration
if isinstance(result, Exception):
self.done()
raise result
yield result
def create_connection(conf, new, connection_pool):
"""Create a connection."""
return ConnectionContext(conf, connection_pool, pooled=not new)
_reply_proxy_create_sem = semaphore.Semaphore()
def multicall(conf, context, topic, msg, timeout, connection_pool):
"""Make a call that returns multiple times."""
LOG.debug(_('Making synchronous call on %s ...'), topic)
msg_id = uuid.uuid4().hex
msg.update({'_msg_id': msg_id})
LOG.debug(_('MSG_ID is %s') % (msg_id))
_add_unique_id(msg)
pack_context(msg, context)
with _reply_proxy_create_sem:
if not connection_pool.reply_proxy:
connection_pool.reply_proxy = ReplyProxy(conf, connection_pool)
msg.update({'_reply_q': connection_pool.reply_proxy.get_reply_q()})
wait_msg = MulticallProxyWaiter(conf, msg_id, timeout, connection_pool)
with ConnectionContext(conf, connection_pool) as conn:
conn.topic_send(topic, rpc_common.serialize_msg(msg), timeout)
return wait_msg
def call(conf, context, topic, msg, timeout, connection_pool):
"""Sends a message on a topic and wait for a response."""
rv = multicall(conf, context, topic, msg, timeout, connection_pool)
# NOTE(vish): return the last result from the multicall
rv = list(rv)
if not rv:
return
return rv[-1]
def cast(conf, context, topic, msg, connection_pool):
"""Sends a message on a topic without waiting for a response."""
LOG.debug(_('Making asynchronous cast on %s...'), topic)
_add_unique_id(msg)
pack_context(msg, context)
with ConnectionContext(conf, connection_pool) as conn:
conn.topic_send(topic, rpc_common.serialize_msg(msg))
def fanout_cast(conf, context, topic, msg, connection_pool):
"""Sends a message on a fanout exchange without waiting for a response."""
LOG.debug(_('Making asynchronous fanout cast...'))
_add_unique_id(msg)
pack_context(msg, context)
with ConnectionContext(conf, connection_pool) as conn:
conn.fanout_send(topic, rpc_common.serialize_msg(msg))
def cast_to_server(conf, context, server_params, topic, msg, connection_pool):
"""Sends a message on a topic to a specific server."""
_add_unique_id(msg)
pack_context(msg, context)
with ConnectionContext(conf, connection_pool, pooled=False,
server_params=server_params) as conn:
conn.topic_send(topic, rpc_common.serialize_msg(msg))
def fanout_cast_to_server(conf, context, server_params, topic, msg,
connection_pool):
"""Sends a message on a fanout exchange to a specific server."""
_add_unique_id(msg)
pack_context(msg, context)
with ConnectionContext(conf, connection_pool, pooled=False,
server_params=server_params) as conn:
conn.fanout_send(topic, rpc_common.serialize_msg(msg))
def notify(conf, context, topic, msg, connection_pool, envelope):
"""Sends a notification event on a topic."""
LOG.debug(_('Sending %(event_type)s on %(topic)s'),
dict(event_type=msg.get('event_type'),
topic=topic))
_add_unique_id(msg)
pack_context(msg, context)
with ConnectionContext(conf, connection_pool) as conn:
if envelope:
msg = rpc_common.serialize_msg(msg)
conn.notify_send(topic, msg)
def cleanup(connection_pool):
if connection_pool:
connection_pool.empty()
def get_control_exchange(conf):
return conf.control_exchange
| ntt-sic/neutron | neutron/openstack/common/rpc/amqp.py | Python | apache-2.0 | 22,783 |
# pylint: skip-file
# flake8: noqa
def main():
'''
ansible oc module for registry
'''
module = AnsibleModule(
argument_spec=dict(
state=dict(default='present', type='str',
choices=['present', 'absent']),
debug=dict(default=False, type='bool'),
namespace=dict(default='default', type='str'),
name=dict(default=None, required=True, type='str'),
kubeconfig=dict(default='/etc/origin/master/admin.kubeconfig', type='str'),
images=dict(default=None, type='str'),
latest_images=dict(default=False, type='bool'),
labels=dict(default=None, type='list'),
ports=dict(default=['5000'], type='list'),
replicas=dict(default=1, type='int'),
selector=dict(default=None, type='str'),
service_account=dict(default='registry', type='str'),
mount_host=dict(default=None, type='str'),
volume_mounts=dict(default=None, type='list'),
env_vars=dict(default=None, type='dict'),
edits=dict(default=None, type='list'),
enforce_quota=dict(default=False, type='bool'),
force=dict(default=False, type='bool'),
daemonset=dict(default=False, type='bool'),
tls_key=dict(default=None, type='str'),
tls_certificate=dict(default=None, type='str'),
),
supports_check_mode=True,
)
results = Registry.run_ansible(module.params, module.check_mode)
if 'failed' in results:
module.fail_json(**results)
module.exit_json(**results)
if __name__ == '__main__':
main()
| andrewklau/openshift-tools | openshift/installer/vendored/openshift-ansible-3.5.13/roles/lib_openshift/src/ansible/oc_adm_registry.py | Python | apache-2.0 | 1,674 |
# #######
# Copyright (c) 2016 GigaSpaces Technologies Ltd. All rights reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# * See the License for the specific language governing permissions and
# * limitations under the License.
'''NFV-O packager utility config'''
from setuptools import setup
setup(
name='nfvo-packager',
version='0.1',
license='LICENSE',
packages=[
'nfvo_packager'
],
description='NFV-O packager utility',
install_requires=[
'pyyaml',
]
)
| 01000101/aria-csar-extension | setup.py | Python | apache-2.0 | 923 |
import os
import sys
import string
import xml.etree.ElementTree as etree
from xml.etree.ElementTree import SubElement
from utils import _make_path_relative
from utils import xml_indent
fs_encoding = sys.getfilesystemencoding()
def _get_filetype(fn):
if fn.rfind('.c') != -1 or fn.rfind('.C') != -1 or fn.rfind('.cpp') != -1:
return 1
# assemble file type
if fn.rfind('.s') != -1 or fn.rfind('.S') != -1:
return 2
# header type
if fn.rfind('.h') != -1:
return 5
if fn.rfind('.lib') != -1:
return 4
# other filetype
return 5
def MDK4AddGroupForFN(ProjectFiles, parent, name, filename, project_path):
group = SubElement(parent, 'Group')
group_name = SubElement(group, 'GroupName')
group_name.text = name
name = os.path.basename(filename)
path = os.path.dirname (filename)
basename = os.path.basename(path)
path = _make_path_relative(project_path, path)
path = os.path.join(path, name)
files = SubElement(group, 'Files')
file = SubElement(files, 'File')
file_name = SubElement(file, 'FileName')
name = os.path.basename(path)
if ProjectFiles.count(name):
name = basename + '_' + name
ProjectFiles.append(name)
file_name.text = name.decode(fs_encoding)
file_type = SubElement(file, 'FileType')
file_type.text = '%d' % _get_filetype(name)
file_path = SubElement(file, 'FilePath')
file_path.text = path.decode(fs_encoding)
def MDK4AddGroup(ProjectFiles, parent, name, files, project_path):
# don't add an empty group
if len(files) == 0:
return
group = SubElement(parent, 'Group')
group_name = SubElement(group, 'GroupName')
group_name.text = name
for f in files:
fn = f.rfile()
name = fn.name
path = os.path.dirname(fn.abspath)
basename = os.path.basename(path)
path = _make_path_relative(project_path, path)
path = os.path.join(path, name)
files = SubElement(group, 'Files')
file = SubElement(files, 'File')
file_name = SubElement(file, 'FileName')
name = os.path.basename(path)
if ProjectFiles.count(name):
name = basename + '_' + name
ProjectFiles.append(name)
file_name.text = name.decode(fs_encoding)
file_type = SubElement(file, 'FileType')
file_type.text = '%d' % _get_filetype(name)
file_path = SubElement(file, 'FilePath')
file_path.text = path.decode(fs_encoding)
def MDK4Project(target, script):
project_path = os.path.dirname(os.path.abspath(target))
project_uvopt = os.path.abspath(target).replace('uvproj', 'uvopt')
if os.path.isfile(project_uvopt):
os.unlink(project_uvopt)
tree = etree.parse('template.uvproj')
root = tree.getroot()
out = file(target, 'wb')
out.write('<?xml version="1.0" encoding="UTF-8" standalone="no" ?>\n')
CPPPATH = []
CPPDEFINES = []
LINKFLAGS = ''
CCFLAGS = ''
ProjectFiles = []
# add group
groups = tree.find('Targets/Target/Groups')
if groups is None:
groups = SubElement(tree.find('Targets/Target'), 'Groups')
groups.clear() # clean old groups
for group in script:
group_xml = MDK4AddGroup(ProjectFiles, groups, group['name'], group['src'], project_path)
# get each include path
if group.has_key('CPPPATH') and group['CPPPATH']:
if CPPPATH:
CPPPATH += group['CPPPATH']
else:
CPPPATH += group['CPPPATH']
# get each group's definitions
if group.has_key('CPPDEFINES') and group['CPPDEFINES']:
if CPPDEFINES:
CPPDEFINES += group['CPPDEFINES']
else:
CPPDEFINES += group['CPPDEFINES']
# get each group's link flags
if group.has_key('LINKFLAGS') and group['LINKFLAGS']:
if LINKFLAGS:
LINKFLAGS += ' ' + group['LINKFLAGS']
else:
LINKFLAGS += group['LINKFLAGS']
if group.has_key('LIBS') and group['LIBS']:
for item in group['LIBS']:
lib_path = ''
for path_item in group['LIBPATH']:
full_path = os.path.join(path_item, item + '.lib')
if os.path.isfile(full_path): # has this library
lib_path = full_path
if lib_path != '':
MDK4AddGroupForFN(ProjectFiles, groups, group['name'], lib_path, project_path)
# write include path, definitions and link flags
IncludePath = tree.find('Targets/Target/TargetOption/TargetArmAds/Cads/VariousControls/IncludePath')
IncludePath.text = ';'.join([_make_path_relative(project_path, os.path.normpath(i)) for i in CPPPATH])
Define = tree.find('Targets/Target/TargetOption/TargetArmAds/Cads/VariousControls/Define')
Define.text = ', '.join(set(CPPDEFINES))
Misc = tree.find('Targets/Target/TargetOption/TargetArmAds/LDads/Misc')
Misc.text = LINKFLAGS
xml_indent(root)
out.write(etree.tostring(root, encoding='utf-8'))
out.close()
# copy uvopt file
if os.path.exists('template.uvopt'):
import shutil
shutil.copy2('template.uvopt', 'project.uvopt')
def MDKProject(target, script):
template = file('template.Uv2', "rb")
lines = template.readlines()
project = file(target, "wb")
project_path = os.path.dirname(os.path.abspath(target))
line_index = 5
# write group
for group in script:
lines.insert(line_index, 'Group (%s)\r\n' % group['name'])
line_index += 1
lines.insert(line_index, '\r\n')
line_index += 1
# write file
ProjectFiles = []
CPPPATH = []
CPPDEFINES = []
LINKFLAGS = ''
CCFLAGS = ''
# number of groups
group_index = 1
for group in script:
# print group['name']
# get each include path
if group.has_key('CPPPATH') and group['CPPPATH']:
if CPPPATH:
CPPPATH += group['CPPPATH']
else:
CPPPATH += group['CPPPATH']
# get each group's definitions
if group.has_key('CPPDEFINES') and group['CPPDEFINES']:
if CPPDEFINES:
CPPDEFINES += ';' + group['CPPDEFINES']
else:
CPPDEFINES += group['CPPDEFINES']
# get each group's link flags
if group.has_key('LINKFLAGS') and group['LINKFLAGS']:
if LINKFLAGS:
LINKFLAGS += ' ' + group['LINKFLAGS']
else:
LINKFLAGS += group['LINKFLAGS']
# generate file items
for node in group['src']:
fn = node.rfile()
name = fn.name
path = os.path.dirname(fn.abspath)
basename = os.path.basename(path)
path = _make_path_relative(project_path, path)
path = os.path.join(path, name)
if ProjectFiles.count(name):
name = basename + '_' + name
ProjectFiles.append(name)
lines.insert(line_index, 'File %d,%d,<%s><%s>\r\n'
% (group_index, _get_filetype(name), path, name))
line_index += 1
group_index = group_index + 1
lines.insert(line_index, '\r\n')
line_index += 1
# remove repeat path
paths = set()
for path in CPPPATH:
inc = _make_path_relative(project_path, os.path.normpath(path))
paths.add(inc) #.replace('\\', '/')
paths = [i for i in paths]
CPPPATH = string.join(paths, ';')
definitions = [i for i in set(CPPDEFINES)]
CPPDEFINES = string.join(definitions, ', ')
while line_index < len(lines):
if lines[line_index].startswith(' ADSCINCD '):
lines[line_index] = ' ADSCINCD (' + CPPPATH + ')\r\n'
if lines[line_index].startswith(' ADSLDMC ('):
lines[line_index] = ' ADSLDMC (' + LINKFLAGS + ')\r\n'
if lines[line_index].startswith(' ADSCDEFN ('):
lines[line_index] = ' ADSCDEFN (' + CPPDEFINES + ')\r\n'
line_index += 1
# write project
for line in lines:
project.write(line)
project.close()
| credosemi/rt-thread-openrisc | tools/keil.py | Python | gpl-2.0 | 8,278 |
# vim: ft=python fileencoding=utf-8 sts=4 sw=4 et:
# Copyright 2014-2016 Florian Bruhin (The Compiler) <[email protected]>
#
# This file is part of qutebrowser.
#
# qutebrowser is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# qutebrowser is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with qutebrowser. If not, see <http://www.gnu.org/licenses/>.
"""A single value (with multiple layers possibly) in the config."""
import collections
class SettingValue:
"""Base class for setting values.
Intended to be sub-classed by config value "types".
Attributes:
typ: A BaseType subclass instance.
value: (readonly property) The currently valid, most important value.
values: An OrderedDict with the values on different layers, with the
most significant layer first.
"""
def __init__(self, typ, default=None, *, backends=None):
"""Constructor.
Args:
typ: The BaseType to use.
default: Raw value to set.
backend: A list of usertypes.Backend enum members to mark this
setting as unsupported with other backends.
"""
self.typ = typ
self.values = collections.OrderedDict.fromkeys(
['temp', 'conf', 'default'])
self.values['default'] = default
self.backends = backends
def __str__(self):
"""Get raw string value."""
return self.value()
def default(self):
"""Get the default value."""
return self.values['default']
def getlayers(self, startlayer):
"""Get a dict of values starting with startlayer.
Args:
startlayer: The first layer to include.
"""
idx = list(self.values.keys()).index(startlayer)
d = collections.OrderedDict(list(self.values.items())[idx:])
return d
def value(self, startlayer=None):
"""Get the first valid value starting from startlayer.
Args:
startlayer: The first layer to include.
"""
if startlayer is None:
d = self.values
else:
d = self.getlayers(startlayer)
for val in d.values():
if val is not None:
return val
raise ValueError("No valid config value found!")
def transformed(self):
"""Get the transformed value."""
return self.typ.transform(self.value())
def setv(self, layer, value, interpolated):
"""Set the value on a layer.
Args:
layer: The layer to set the value on, an element name of the
ValueLayers dict.
value: The value to set.
interpolated: The interpolated value, for typechecking (or None).
"""
if interpolated is not None:
self.typ.validate(interpolated)
self.values[layer] = value
| antoyo/qutebrowser | qutebrowser/config/value.py | Python | gpl-3.0 | 3,318 |
from hashlib import md5, sha1
import cherrypy
from cherrypy._cpcompat import ntob
from cherrypy.lib import httpauth
from cherrypy.test import helper
class HTTPAuthTest(helper.CPWebCase):
@staticmethod
def setup_server():
class Root:
@cherrypy.expose
def index(self):
return "This is public."
class DigestProtected:
@cherrypy.expose
def index(self):
return "Hello %s, you've been authorized." % (
cherrypy.request.login)
class BasicProtected:
@cherrypy.expose
def index(self):
return "Hello %s, you've been authorized." % (
cherrypy.request.login)
class BasicProtected2:
@cherrypy.expose
def index(self):
return "Hello %s, you've been authorized." % (
cherrypy.request.login)
def fetch_users():
return {'test': 'test'}
def sha_password_encrypter(password):
return sha1(ntob(password)).hexdigest()
def fetch_password(username):
return sha1(ntob('test')).hexdigest()
conf = {
'/digest': {
'tools.digest_auth.on': True,
'tools.digest_auth.realm': 'localhost',
'tools.digest_auth.users': fetch_users
},
'/basic': {
'tools.basic_auth.on': True,
'tools.basic_auth.realm': 'localhost',
'tools.basic_auth.users': {
'test': md5(ntob('test')).hexdigest()
}
},
'/basic2': {
'tools.basic_auth.on': True,
'tools.basic_auth.realm': 'localhost',
'tools.basic_auth.users': fetch_password,
'tools.basic_auth.encrypt': sha_password_encrypter
}
}
root = Root()
root.digest = DigestProtected()
root.basic = BasicProtected()
root.basic2 = BasicProtected2()
cherrypy.tree.mount(root, config=conf)
def testPublic(self):
self.getPage("/")
self.assertStatus('200 OK')
self.assertHeader('Content-Type', 'text/html;charset=utf-8')
self.assertBody('This is public.')
def testBasic(self):
self.getPage("/basic/")
self.assertStatus(401)
self.assertHeader('WWW-Authenticate', 'Basic realm="localhost"')
self.getPage('/basic/', [('Authorization', 'Basic dGVzdDp0ZX60')])
self.assertStatus(401)
self.getPage('/basic/', [('Authorization', 'Basic dGVzdDp0ZXN0')])
self.assertStatus('200 OK')
self.assertBody("Hello test, you've been authorized.")
def testBasic2(self):
self.getPage("/basic2/")
self.assertStatus(401)
self.assertHeader('WWW-Authenticate', 'Basic realm="localhost"')
self.getPage('/basic2/', [('Authorization', 'Basic dGVzdDp0ZX60')])
self.assertStatus(401)
self.getPage('/basic2/', [('Authorization', 'Basic dGVzdDp0ZXN0')])
self.assertStatus('200 OK')
self.assertBody("Hello test, you've been authorized.")
def testDigest(self):
self.getPage("/digest/")
self.assertStatus(401)
value = None
for k, v in self.headers:
if k.lower() == "www-authenticate":
if v.startswith("Digest"):
value = v
break
if value is None:
self._handlewebError(
"Digest authentification scheme was not found")
value = value[7:]
items = value.split(', ')
tokens = {}
for item in items:
key, value = item.split('=')
tokens[key.lower()] = value
missing_msg = "%s is missing"
bad_value_msg = "'%s' was expecting '%s' but found '%s'"
nonce = None
if 'realm' not in tokens:
self._handlewebError(missing_msg % 'realm')
elif tokens['realm'] != '"localhost"':
self._handlewebError(bad_value_msg %
('realm', '"localhost"', tokens['realm']))
if 'nonce' not in tokens:
self._handlewebError(missing_msg % 'nonce')
else:
nonce = tokens['nonce'].strip('"')
if 'algorithm' not in tokens:
self._handlewebError(missing_msg % 'algorithm')
elif tokens['algorithm'] != '"MD5"':
self._handlewebError(bad_value_msg %
('algorithm', '"MD5"', tokens['algorithm']))
if 'qop' not in tokens:
self._handlewebError(missing_msg % 'qop')
elif tokens['qop'] != '"auth"':
self._handlewebError(bad_value_msg %
('qop', '"auth"', tokens['qop']))
# Test a wrong 'realm' value
base_auth = (
'Digest '
'username="test", '
'realm="wrong realm", '
'nonce="%s", '
'uri="/digest/", '
'algorithm=MD5, '
'response="%s", '
'qop=auth, '
'nc=%s, '
'cnonce="1522e61005789929"'
)
auth = base_auth % (nonce, '', '00000001')
params = httpauth.parseAuthorization(auth)
response = httpauth._computeDigestResponse(params, 'test')
auth = base_auth % (nonce, response, '00000001')
self.getPage('/digest/', [('Authorization', auth)])
self.assertStatus(401)
# Test that must pass
base_auth = (
'Digest '
'username="test", '
'realm="localhost", '
'nonce="%s", '
'uri="/digest/", '
'algorithm=MD5, '
'response="%s", '
'qop=auth, '
'nc=%s, '
'cnonce="1522e61005789929"'
)
auth = base_auth % (nonce, '', '00000001')
params = httpauth.parseAuthorization(auth)
response = httpauth._computeDigestResponse(params, 'test')
auth = base_auth % (nonce, response, '00000001')
self.getPage('/digest/', [('Authorization', auth)])
self.assertStatus('200 OK')
self.assertBody("Hello test, you've been authorized.")
| heytcass/homeassistant-config | deps/cherrypy/test/test_httpauth.py | Python | mit | 6,313 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import division, print_function
""" Celestial body calculations and corrections """
__author__ = "Andy Casey <[email protected]>"
# This code has been modified from legacy codes. Credits are chronologically
# attributable to: Sergey Koposov, Kochukho, Kudryavtsev, W. Landsman,
# Chris McCarthy, and Jeff Valenti.
__all__ = ["baryvel", "corrections", "sol_corrections", "moon_distance"]
# Third-party
import numpy as np
import astropy.constants as constants
import astropy.coordinates as coord
import astropy.units as u
import ephem
from astropy.time import Time
def moon_distance(headers):
"""
Calculate the distance to the moon (in degrees) at the time of observations.
"""
moon = ephem.Moon()
observer = ephem.Observer()
# Position (irrelevant, but whatever).
observer.elevation = headers["ALT_OBS"]
observer.lon, observer.lat = headers["LONG_OBS"], headers["LAT_OBS"]
# Calculate it at the start of the exposure.
start_time = "{0} {1}".format(headers["UTDATE"].replace(":", "/"),
headers["UTSTART"])
end_time = "{0} {1}".format(headers["UTDATE"].replace(":", "/"),
headers["UTEND"])
distances = []
for observer_time in (start_time, end_time):
observer.date = observer_time
observer.epoch = observer_time
# Compute the position
moon.compute(observer)
moon_ra, moon_dec = moon.ra * 180./np.pi, moon.dec * 180./np.pi
# Distance to the MEANRA, MEANDEC position?
distances.append(np.sqrt((moon_ra - headers["MEANRA"])**2 \
+ (moon_dec - headers["MEANDEC"])**2))
return np.round(max(distances), 1)
def baryvel(dje):
"""
Calculates the heliocentric and barycentric velocity components of Earth.
Parameters
----------
dje : `~astropy.time.Time` or float
The Julian ephemeris date.
Returns
-------
dvelh : `~astropy.units.Quantity`
The heliocentric velocity components in (X, Y, Z) coordinates.
dvelb : `~astropy.units.Quantity`
The barycentric velocity components in (X, Y, Z) coordinates.
"""
if isinstance(dje, Time):
dje = dje.jd
# Prepare for the pain.
dcto = 2415020.
dcjul = 36525. # Days in Julian year
dc1 = 1.
# Constants dcfel(i,k) of fast changing elements.
dcfel = np.array([
1.7400353e00, 6.2833195099091e02, 5.2796e-6,
6.2565836e00, 6.2830194572674e02, -2.6180e-6,
4.7199666e00, 8.3997091449254e03, -1.9780e-5,
1.9636505e-1, 8.4334662911720e03, -5.6044e-5,
4.1547339e00, 5.2993466764997e01, 5.8845e-6,
4.6524223e00, 2.1354275911213e01, 5.6797e-6,
4.2620486e00, 7.5025342197656e00, 5.5317e-6,
1.4740694e00, 3.8377331909193e00, 5.6093e-6]).reshape(8, 3)
# Constants dceps and ccsel(i,k) of slowly changing elements.
dceps = np.array([4.093198e-1, -2.271110e-4, -2.860401e-8])
ccsel = np.array([
1.675104e-2, -4.179579e-5, -1.260516e-7,
2.220221e-1, 2.809917e-2, 1.852532e-5,
1.589963e00, 3.418075e-2, 1.430200e-5,
2.994089e00, 2.590824e-2, 4.155840e-6,
8.155457e-1, 2.486352e-2, 6.836840e-6,
1.735614e00, 1.763719e-2, 6.370440e-6,
1.968564e00, 1.524020e-2, -2.517152e-6,
1.282417e00, 8.703393e-3, 2.289292e-5,
2.280820e00, 1.918010e-2, 4.484520e-6,
4.833473e-2, 1.641773e-4, -4.654200e-7,
5.589232e-2, -3.455092e-4, -7.388560e-7,
4.634443e-2, -2.658234e-5, 7.757000e-8,
8.997041e-3, 6.329728e-6, -1.939256e-9,
2.284178e-2, -9.941590e-5, 6.787400e-8,
4.350267e-2, -6.839749e-5, -2.714956e-7,
1.348204e-2, 1.091504e-5, 6.903760e-7,
3.106570e-2, -1.665665e-4, -1.590188e-7]).reshape(17, 3)
# Constants of the arguments of the short-period perturbations.
dcargs = np.array([
5.0974222e0, -7.8604195454652e2,
3.9584962e0, -5.7533848094674e2,
1.6338070e0, -1.1506769618935e3,
2.5487111e0, -3.9302097727326e2,
4.9255514e0, -5.8849265665348e2,
1.3363463e0, -5.5076098609303e2,
1.6072053e0, -5.2237501616674e2,
1.3629480e0, -1.1790629318198e3,
5.5657014e0, -1.0977134971135e3,
5.0708205e0, -1.5774000881978e2,
3.9318944e0, 5.2963464780000e1,
4.8989497e0, 3.9809289073258e1,
1.3097446e0, 7.7540959633708e1,
3.5147141e0, 7.9618578146517e1,
3.5413158e0, -5.4868336758022e2]).reshape(15, 2)
# Amplitudes ccamps(n,k) of the short-period perturbations.
ccamps = np.array([
-2.279594e-5, 1.407414e-5, 8.273188e-6, 1.340565e-5, -2.490817e-7,
-3.494537e-5, 2.860401e-7, 1.289448e-7, 1.627237e-5, -1.823138e-7,
6.593466e-7, 1.322572e-5, 9.258695e-6, -4.674248e-7, -3.646275e-7,
1.140767e-5, -2.049792e-5, -4.747930e-6, -2.638763e-6, -1.245408e-7,
9.516893e-6, -2.748894e-6, -1.319381e-6, -4.549908e-6, -1.864821e-7,
7.310990e-6, -1.924710e-6, -8.772849e-7, -3.334143e-6, -1.745256e-7,
-2.603449e-6, 7.359472e-6, 3.168357e-6, 1.119056e-6, -1.655307e-7,
3.228859e-6, 1.308997e-7, 1.013137e-7, 2.403899e-6, -3.736225e-7,
3.442177e-7, 2.671323e-6, 1.832858e-6, -2.394688e-7, -3.478444e-7,
8.702406e-6, -8.421214e-6, -1.372341e-6, -1.455234e-6, -4.998479e-8,
-1.488378e-6, -1.251789e-5, 5.226868e-7, -2.049301e-7, 0,
-8.043059e-6, -2.991300e-6, 1.473654e-7, -3.154542e-7, 0,
3.699128e-6, -3.316126e-6, 2.901257e-7, 3.407826e-7, 0,
2.550120e-6, -1.241123e-6, 9.901116e-8, 2.210482e-7, 0,
-6.351059e-7, 2.341650e-6, 1.061492e-6, 2.878231e-7, 0]).reshape(15, 5)
# Constants csec3 and ccsec(n,k) of the secular perturbations in longitude.
ccsec3 = -7.757020e-8
ccsec = np.array([
1.289600e-6, 5.550147e-1, 2.076942e00,
3.102810e-5, 4.035027e00, 3.525565e-1,
9.124190e-6, 9.990265e-1, 2.622706e00,
9.793240e-7, 5.508259e00, 1.559103e01]).reshape(4, 3)
# Sidereal rates.
dcsld = 1.990987e-7 #sidereal rate in longitude
ccsgd = 1.990969e-7 #sidereal rate in mean anomaly
# Constants used in the calculation of the lunar contribution.
cckm = 3.122140e-5
ccmld = 2.661699e-6
ccfdi = 2.399485e-7
# Constants dcargm(i,k) of the arguments of the perturbations of the motion
# of the moon.
dcargm = np.array([5.1679830e0, 8.3286911095275e3, 5.4913150e0,
-7.2140632838100e3, 5.9598530e0, 1.5542754389685e4]).reshape(3, 2)
# Amplitudes ccampm(n,k) of the perturbations of the moon.
ccampm = np.array([
1.097594e-1, 2.896773e-7, 5.450474e-2, 1.438491e-7,
-2.223581e-2, 5.083103e-8, 1.002548e-2, -2.291823e-8,
1.148966e-2, 5.658888e-8, 8.249439e-3, 4.063015e-8]).reshape(3, 4)
# ccpamv(k) = a*m*dl,dt (planets), dc1mme = 1-mass(earth+moon)
ccpamv = np.array([8.326827e-11, 1.843484e-11, 1.988712e-12, 1.881276e-12])
dc1mme = 0.99999696e0
# Time arguments.
dt = (dje - dcto) / dcjul
tvec = np.array([1e0, dt, dt * dt])
# Values of all elements for the instant(aneous?) dje.
temp = np.dot(tvec.T, dcfel.T).T % (2 * np.pi)
dml = temp[0]
forbel = temp[1:8]
g = forbel[0]
deps = (tvec * dceps).sum() % (2 * np.pi)
sorbel = np.dot(tvec.T, ccsel.T).T % (2 * np.pi)
e = sorbel[0]
# Secular perturbations in longitude.
sn = np.sin(np.dot(tvec[0:2].T, ccsec[:, 1:3].T).T % (2 * np.pi))
# Periodic perturbations of the Earth-Moon barycenter.
pertl = (ccsec[:,0] * sn).sum() + dt * ccsec3 * sn[2]
pertld, pertr, pertrd = 0, 0, 0
for k in range(0, 15):
a = (dcargs[k,0] + dt * dcargs[k,1]) % 2 * np.pi
cosa, sina = np.cos(a), np.sin(a)
pertl += ccamps[k,0] * cosa + ccamps[k,1] * sina
pertr += ccamps[k,2] * cosa + ccamps[k,3] * sina
if k < 11:
pertld += (ccamps[k,1] * cosa - ccamps[k,0] * sina) * ccamps[k,4]
pertrd += (ccamps[k,3] * cosa - ccamps[k,2] * sina) * ccamps[k,4]
# Elliptic part of the motion of the Earth-Moon barycenter.
phi = (e * e / 4e0) * (((8e0 / e) - e) * np.sin(g) + 5 * np.sin(2 * g) \
+ (13 / 3.) * e * np.sin(3 * g))
f = g + phi
sinf, cosf = np.sin(f), np.cos(f)
dpsi = (dc1 - e * e) / (dc1 + e * cosf)
phid = 2 * e * ccsgd * ((1 + 1.5 * e**2) * cosf + e * (1.25 - 0.5 * sinf**2))
psid = ccsgd * e * sinf / np.sqrt(dc1 - e * e)
# Perturbed heliocentric motion of the Earth-Moon barycenter.
d1pdro = dc1 + pertr
drd = d1pdro * (psid + dpsi * pertrd)
drld = d1pdro * dpsi * (dcsld + phid + pertld)
dtl = (dml + phi + pertl) % (2 * np.pi)
dsinls = np.sin(dtl)
dcosls = np.cos(dtl)
dxhd = drd * dcosls - drld * dsinls
dyhd = drd * dsinls + drld * dcosls
# Influence of eccentricity, evection and variation on the geocentric
# motion of the moon.
pertl, pertld, pertp, pertpd = 0, 0, 0, 0
for k in range(0, 3):
a = (dcargm[k,0] + dt * dcargm[k,1]) % (2 * np.pi)
sina = np.sin(a)
cosa = np.cos(a)
pertl += ccampm[k,0] * sina
pertld += ccampm[k,1] * cosa
pertp += ccampm[k,2] * cosa
pertpd -= ccampm[k,3] * sina
# Heliocentric motion of the Earth.
tl = forbel[1] + pertl
sinlm = np.sin(tl)
coslm = np.cos(tl)
sigma = cckm / (1.0 + pertp)
a = sigma * (ccmld + pertld)
b = sigma * pertpd
dxhd = dxhd + a * sinlm + b * coslm
dyhd = dyhd - a * coslm + b * sinlm
dzhd = -sigma * ccfdi * np.cos(forbel[2])
# Barycentric motion of the Earth.
dxbd = dxhd * dc1mme
dybd = dyhd * dc1mme
dzbd = dzhd * dc1mme
for k in range(0, 4):
plon = forbel[k + 3]
pomg = sorbel[k + 1]
pecc = sorbel[k + 9]
tl = (plon + 2.0 * pecc * np.sin(plon - pomg)) % (2 * np.pi)
dxbd += ccpamv[k] * (np.sin(tl) + pecc * np.sin(pomg))
dybd -= ccpamv[k] * (np.cos(tl) + pecc * np.cos(pomg))
dzbd -= ccpamv[k] * sorbel[k + 13] * np.cos(plon - sorbel[k + 5])
# Transition to mean equator of date.
dcosep = np.cos(deps)
dsinep = np.sin(deps)
dyahd = dcosep * dyhd - dsinep * dzhd
dzahd = dsinep * dyhd + dcosep * dzhd
dyabd = dcosep * dybd - dsinep * dzbd
dzabd = dsinep * dybd + dcosep * dzbd
dvelh = constants.au * (np.array([dxhd, dyahd, dzahd])) / u.second
dvelb = constants.au * (np.array([dxbd, dyabd, dzabd])) / u.second
return (dvelh, dvelb)
# NOTE:
# We may want to change the syntax input for corrections so that it accepts a single
# sky coordinate instead of ra/dec.
# Similarly lon/lat/alt/jd could be replaced with a single astropy.units.Time
# class.
def corrections(lon, lat, alt, ra, dec, mjd):
"""
Calculate the heliocentric radial velocity corrections for an astronomical
source.
Parameters
----------
lon : `~astropy.coordinates.Longitude` or float
Earth longitude of the observatory (western direction is positive). Can
be anything that initialises an `~astropy.coordinates.Angle` object
(if float, in degrees).
lat : `~astropy.coordinates.Latitude` or float
Earth latitude of observatory. Can be anything that initialises an
`~astropy.coordinates.Latitude` object (if float, in degrees).
alt : `~astropy.units.Quantity` or float
Altitude of the observatory (if float, in meters).
ra : `~astropy.coordinates.Angle` or float
Right ascension of the object for epoch J2000 (if float, in degrees).
dec : `~astropy.coordinates.Angle` or float
Declination of the object for epoch J2000 (if float, in degrees).
mjd : float
The modified Julian date for the middle of exposure.
Returns
-------
barycorr : `~astropy.units.Quantity`
The barycentric velocity correction.
helcorr : `~astropy.units.Quantity`
The heliocentric velocity correction.
"""
if not isinstance(lon, coord.Longitude):
lon = coord.Longitude(lon * u.deg)
if not isinstance(lat, coord.Latitude):
lat = coord.Latitude(lat * u.deg)
if not isinstance(alt, u.Quantity):
alt *= u.m
if not isinstance(ra, u.Quantity):
ra *= u.deg
if not isinstance(dec, u.Quantity):
dec *= u.deg
# Here we specify the location so that we can easily calculate the mean
# local siderial time later on
time = Time(2.4e6 + mjd, format="jd", location=(lon, lat, alt))
epoch = time.datetime.year + time.datetime.month/12. \
+ time.datetime.day/365.
# Precess the coordinates to the current epoch
coordinate = coord.SkyCoord(ra, dec, frame="fk5").transform_to(
coord.FK5(equinox="J{}".format(epoch)))
# Convert geodetic latitude into geocentric latitude to correct for rotation
# of the Earth
dlat = ((-11. * 60. + 32.743) * np.sin(2 * lat) + 1.1633 * np.sin(4 * lat) \
- 0.0026 * np.sin(6 * lat)) * u.degree
geocentric_lat = lat + dlat / 3600.
# Calculate distance of observer from Earth center
r = alt + 6378160.0 * u.m * (0.998327073 \
+ 0.001676438 * np.cos(2 * geocentric_lat) \
- 0.000003510 * np.cos(4 * geocentric_lat) \
+ 0.000000008 * np.cos(6 * geocentric_lat))
# Calculate rotational velocity perpendicular to the radius vector
# Note: 23.934469591229 is the siderial day in hours for 1986
v = 2 * np.pi * r / (23.934469591229 * 3600 * u.second)
# Calculate vdiurnal velocity
vdiurnal = v * np.cos(lat) * np.cos(coordinate.dec) \
* np.sin(coordinate.ra - time.sidereal_time("mean"))
# Calculate baricentric and heliocentric velocities
vh, vb = baryvel(time)
# Project along the line of sight
projection = np.array([
np.cos(coordinate.dec) * np.cos(coordinate.ra),
np.cos(coordinate.dec) * np.sin(coordinate.ra),
np.sin(coordinate.dec)])
vbar = (vb * projection).sum()
vhel = (vh * projection).sum()
# Using baricentric velocity for correction
vbar_correction = vdiurnal + vbar
vhel_correction = vdiurnal + vhel
# [TODO] it may be useful to return other components of velocity or extra
# information about the transforms (e.g., gmst, ut, lmst, dlat, lat, vbar,
# vhel, etc)
return (vbar_correction, vhel_correction)
def sol_corrections(header):
alt_obs = header.get("ALT_OBS", None)
lat_obs = header.get("LAT_OBS", None)
long_obs = header.get("LONG_OBS", None)
if None in (alt_obs, lat_obs, long_obs):
# Try and determine it from the observatory name, if it exists.
origin = header.get("ORIGIN", None)
if origin is None:
raise KeyError("no observatory information available (ALT_OBS, "
"LAT_OBS, LONG_OBS) or ORIGIN")
raise NotImplementedError()
with resource_stream(__name__, "observatories.yaml") as fp:
observatories_dictionary = yaml.load(fp)
origin = origin.strip().lower()
if origin not in observatories_dictionary:
raise KeyError("could not find {} in the observatory dictionary"\
.format(origin))
observatory = observatories_dictionary[origin]
alt_obs = observatory["altitude"]
lat_obs = observatory["latitude"]
# Get the RA/DEC.
ra = header.get("RA", None) # assuming degrees
dec = header.get("DEC", None)
if None in (ra, dec):
raise KeyError("no position information (looked for RA/DEC)")
# Time of observation.
for k in ("UTDATE", "UTSTART", "UTEND"):
if k not in header:
raise KeyError("cannot find key {} in header".format(k))
ut_start = Time("{0}T{1}".format(header["UTDATE"].replace(":", "-"),
header["UTSTART"]), format="isot", scale="utc")
ut_end = Time("{0}T{1}".format(header["UTDATE"].replace(":", "-"),
header["UTEND"]), format="isot", scale="utc")
# Get the MJD of the mid-point of the observation.
mjd = (ut_end - ut_start).jd/2 + ut_start.mjd
# Calculate the correction.
return corrections(long_obs, lat_obs, alt_obs, ra, dec, mjd)
| GALAHProject/fits-spectrum-format | convert/motions.py | Python | mit | 16,386 |
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
from collections import defaultdict
import json
import os
import urlparse
from mach.config import ConfigSettings
from mach.logging import LoggingManager
from mozbuild.backend.common import CommonBackend
from mozbuild.base import MozbuildObject
from mozbuild.frontend.data import (
FinalTargetFiles,
FinalTargetPreprocessedFiles,
)
from mozbuild.frontend.data import JARManifest, ChromeManifestEntry
from mozpack.chrome.manifest import (
Manifest,
ManifestChrome,
ManifestOverride,
ManifestResource,
parse_manifest,
)
import mozpack.path as mozpath
class ChromeManifestHandler(object):
def __init__(self):
self.overrides = {}
self.chrome_mapping = defaultdict(set)
def handle_manifest_entry(self, entry):
format_strings = {
"content": "chrome://%s/content/",
"resource": "resource://%s/",
"locale": "chrome://%s/locale/",
"skin": "chrome://%s/skin/",
}
if isinstance(entry, (ManifestChrome, ManifestResource)):
if isinstance(entry, ManifestResource):
dest = entry.target
url = urlparse.urlparse(dest)
if not url.scheme:
dest = mozpath.normpath(mozpath.join(entry.base, dest))
if url.scheme == 'file':
dest = mozpath.normpath(url.path)
else:
dest = mozpath.normpath(entry.path)
base_uri = format_strings[entry.type] % entry.name
self.chrome_mapping[base_uri].add(dest)
if isinstance(entry, ManifestOverride):
self.overrides[entry.overloaded] = entry.overload
if isinstance(entry, Manifest):
for e in parse_manifest(None, entry.path):
self.handle_manifest_entry(e)
class ChromeMapBackend(CommonBackend):
def _init(self):
CommonBackend._init(self)
log_manager = LoggingManager()
self._cmd = MozbuildObject(self.environment.topsrcdir, ConfigSettings(),
log_manager, self.environment.topobjdir)
self._install_mapping = {}
self.manifest_handler = ChromeManifestHandler()
def consume_object(self, obj):
if isinstance(obj, JARManifest):
self._consume_jar_manifest(obj)
if isinstance(obj, ChromeManifestEntry):
self.manifest_handler.handle_manifest_entry(obj.entry)
if isinstance(obj, (FinalTargetFiles,
FinalTargetPreprocessedFiles)):
self._handle_final_target_files(obj)
return True
def _handle_final_target_files(self, obj):
for path, files in obj.files.walk():
for f in files:
dest = mozpath.join(obj.install_target, path, f.target_basename)
is_pp = isinstance(obj,
FinalTargetPreprocessedFiles)
self._install_mapping[dest] = f.full_path, is_pp
def consume_finished(self):
# Our result has three parts:
# A map from url prefixes to objdir directories:
# { "chrome://mozapps/content/": [ "dist/bin/chrome/toolkit/content/mozapps" ], ... }
# A map of overrides.
# A map from objdir paths to sourcedir paths, and a flag for whether the source was preprocessed:
# { "dist/bin/browser/chrome/browser/content/browser/aboutSessionRestore.js":
# [ "$topsrcdir/browser/components/sessionstore/content/aboutSessionRestore.js", false ], ... }
outputfile = os.path.join(self.environment.topobjdir, 'chrome-map.json')
with self._write_file(outputfile) as fh:
chrome_mapping = self.manifest_handler.chrome_mapping
overrides = self.manifest_handler.overrides
json.dump([
{k: list(v) for k, v in chrome_mapping.iteritems()},
overrides,
self._install_mapping,
], fh, sort_keys=True, indent=2)
| cstipkovic/spidermonkey-research | python/mozbuild/mozbuild/codecoverage/chrome_map.py | Python | mpl-2.0 | 4,195 |
import os
from distutils.core import setup
from distutils.extension import Extension
from Cython.Distutils import build_ext
import numpy
def scandir(dir, files=[]):
for file in os.listdir(dir):
path = os.path.join(dir, file)
if os.path.isfile(path) and path.endswith(".pyx"):
files.append(path.replace(os.path.sep, ".")[:-4])
elif os.path.isdir(path):
scandir(path, files)
return files
def makeExtension(extName):
extPath = extName.replace(".", os.path.sep) + ".pyx"
return Extension(
extName,
[extPath],
# your include_dirs must contains the '.' for setup to search all the
# subfolder of the codeRootFolder
include_dirs=['.', 'numpy.get_include()'],
extra_compile_args=["-O3", "-fopenmp", "-march=native", "-finline-functions", "-ffast-math", "-msse4"],
extra_link_args=['-fopenmp', "-finline-functions", "-ffast-math", "-msse4"]
)
extNames = scandir('lib')
extensions = [makeExtension(name) for name in extNames]
"""
extensions.append(Extension('lib._tifffile',
[os.path.join("lib", 'tifffile.c')],
include_dirs=['.', 'numpy.get_include()'],
extra_compile_args=["-Ofast", "-fopenmp", "-march=native", "-finline-functions", "-Wno-cpp", "-Wunused-but-set-variable"],
extra_link_args=['-fopenmp', "-finline-functions"]
),
)
extensions.append(Extension('lib.deconv',
[os.path.join("lib", 'deconv.c')],
include_dirs=['.', 'numpy.get_include()'],
extra_compile_args=["-Ofast", "-fopenmp", "-march=native", "-finline-functions", "-Wno-cpp", "-Wunused-but-set-variable"],
extra_link_args=['-fopenmp', "-finline-functions"]
),
)
"""
setup(
name="ICS",
ext_modules=extensions,
cmdclass={'build_ext': build_ext},
script_args=['build_ext'],
options={'build_ext': {'inplace': True, 'force': True}},
include_dirs=[numpy.get_include()]
)
| aurelienpierre/Image-Cases-Studies | setup.py | Python | gpl-3.0 | 2,163 |
'''apport package hook for udisks
(c) 2009 Canonical Ltd.
Author: Martin Pitt <[email protected]>
'''
import os
import os.path
import apport.hookutils
import dbus
UDISKS = 'org.freedesktop.UDisks'
def add_info(report):
apport.hookutils.attach_hardware(report)
user_rules = []
for f in os.listdir('/etc/udev/rules.d'):
if not f.startswith('70-persistent-') and f != 'README':
user_rules.append(f)
if user_rules:
report['CustomUdevRuleFiles'] = ' '.join(user_rules)
report['UDisksDump'] = apport.hookutils.command_output(['udisks', '--dump'])
report['Mounts'] = apport.hookutils.command_output(['mount'])
# grab SMART blobs
dkd = dbus.Interface(dbus.SystemBus().get_object(UDISKS,
'/org/freedesktop/UDisks'), UDISKS)
for d in dkd.EnumerateDevices():
dev_props = dbus.Interface(dbus.SystemBus().get_object(UDISKS, d),
dbus.PROPERTIES_IFACE)
blob = dev_props.Get(UDISKS, 'DriveAtaSmartBlob')
if len(blob) > 0:
report['AtaSmartBlob_' + os.path.basename(d)] = ''.join(map(chr, blob))
if __name__ == '__main__':
r = {}
add_info(r)
for k, v in r.iteritems():
print '%s: "%s"' % (k, v)
| Alberto-Beralix/Beralix | i386-squashfs-root/usr/share/apport/package-hooks/udisks.py | Python | gpl-3.0 | 1,236 |
# Copyright (C) 2013-2014 Computer Sciences Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
def constant(f):
def fset(self, value):
raise SyntaxError('Unable to change constants')
def fget(self):
return f()
return property(fget, fset)
class __EzBakePropertyConstants(object):
# [Accumulo Constants]
@constant # (type: string) Property which represents the name of the accumulo instance
def ACCUMULO_INSTANCE_NAME():
return "accumulo.instance.name"
@constant # (type: string) Property which represents the namespace in accumulo we are working in
def ACCUMULO_NAMESPACE():
return "accumulo.namespace"
@constant # (type: string) Encrypted Property which represents the password for the user to connect to the database with
def ACCUMULO_PASSWORD():
return "accumulo.password"
@constant # (type: boolean) Property used to indicate whether our connector is in mock mode
def ACCUMULO_USE_MOCK():
return "accumulo.use.mock"
@constant # (type: string) Property which is the username we connect to the database with
def ACCUMULO_USERNAME():
return "accumulo.username"
@constant # (type: string) Property which is a CSV of zookeeper connection strings (host:port) which are the zookeeper servers that accumulo users
def ACCUMULO_ZOOKEEPERS():
return "accumulo.zookeepers"
@constant # (type: int) Property which specifies the port of the accumulo proxy
def ACCUMULO_PROXY_PORT():
return "accumulo.proxy.port"
@constant # (type: string) Property which specifies the hostname of the accumulo proxy
def ACCUMULO_PROXY_HOST():
return "accumulo.proxy.host"
@constant # (type: boolean) Property which specifies if accumulo clients should use SSL
def ACCUMULO_USE_SSL():
return "accumulo.use.ssl"
@constant # (type: string) Property which specifies the path to the accumulo truststore
def ACCUMULO_SSL_TRUSTSTORE_PATH():
return "accumulo.ssl.truststore.path"
@constant # (type: string) Property which specifies the type of the accumulo truststore
def ACCUMULO_SSL_TRUSTSTORE_TYPE():
return "accumulo.ssl.truststore.type"
@constant # (type: string) Property which specifies the password for the accumulo truststore
def ACCUMULO_SSL_TRUSTSTORE_PASSWORD():
return "accumulo.ssl.truststore.password"
# [Application Constants]
@constant # (type: string) Property which represents the name of the application
def EZBAKE_APPLICATION_NAME():
return "application.name"
@constant # (type: string) Property which represents the version of the application
def EZBAKE_APPLICATION_VERSION():
return "ezbake.application.version"
# [Azkaban Constants]
@constant # (type: string) Property which represents url for azkaban
def AZKABAN_URL():
return "azkaban.url"
@constant # (type: string) Property which represents azkaban password
def AZKABAN_PASSWORD():
return "azkaban.password"
@constant # (type: string) Property which represents azkaban username
def AZKABAN_USERNAME():
return "azkaban.username"
# [Common Services Constants]
@constant # (type: string) Property which represents capco service
def CAPCO():
return "CapcoService"
@constant # (type: string) Property which represents datawarehouse service
def DATAWAREHOUSE():
return "warehaus"
@constant # (type: string) Property which represents document extraction service
def DOCUMENT_EXTRACTION():
return "docextract"
@constant # (type: string) Property which represents entity extraction service
def ENTITY_EXTRACTION():
return "entityextract"
@constant # (type: string) Property which represents ezdeployer service
def EZDEPLOYER():
return "ezdeployer"
@constant # (type: string) Property which represents ezsecurity service
def EZSECURITY():
return "EzbakeSecurityService"
@constant # (type: string) Property which represents ezsecurity registration service
def EZSECURITY_REGISTRATION():
return "EzSecurityRegistration"
@constant # (type: string) Property which represents geospatial extraction service
def GEOSPATIAL_EXTRACTION():
return "geosvc"
@constant # (type: string) Property which represents image indexer service
def IMAGE_INDEXER():
return "imageindexingservice"
@constant # (type: string) Property which represents image metadata extraction service
def IMAGE_METADATA_EXTRACTION():
return "imagemetadataextractionservice"
@constant # (type: string) Property which represents internal name service
def INTERNAL_NAME_SERVICE():
return "ins"
@constant # (type: string) Property which represents selector extraction service
def SELECTOR_EXTRACTION():
return "selext"
@constant # (type: string) Property which represents ssr service
def SSR():
return "ssrService"
@constant # (type: string) Property which represents temporal normalizer service
def TEMPORAL_NORMALIZER():
return "temporalsvc"
# [Elastic Search Constants]
@constant # (type: string) Property which represents elastic search cluster name key in ezconfig
def ELASTICSEARCH_CLUSTER_NAME():
return "elastic.cluster.name"
@constant # (type: string) Property which represents elastic search force refresh key in ezconfig
def ELASTICSEARCH_FORCE_REFRESH_ON_PUT():
return "elastic.force.refresh"
@constant # (type: string) Property which represents elastic search host name key in ezconfig
def ELASTICSEARCH_HOST():
return "elastic.host.name"
@constant # (type: string) Property which represents elastic search port key in ezconfig
def ELASTICSEARCH_PORT():
return "elastic.port"
# [Hadoop Constants]
@constant # (type: string) Property which represents ezconfig string to get default filesystem name
def HADOOP_FILESYSTEM_NAME():
return "fs.default.name"
@constant # (type: string) Property which represents ezconfig string to get hdfs implementation
def HADOOP_FILESYSTEM_IMPL():
return "fs.hdfs.impl"
@constant # (type: string) Property which represents ezconfig string to get filesystem use local value
def HADOOP_FILESYSTEM_USE_LOCAL():
return "fs.use.local"
# [Flume Constants]
@constant # (type: string) Property which represents flume key for agent type
def FLUME_AGENT_TYPE():
return "flume.agent.type"
@constant # (type: string) Property which represents flume key for backoff
def FLUME_BACK_OFF():
return "flume.backoff"
@constant # (type: string) Property which represents flume key for batch size
def FLUME_BATCH_SIZE():
return "flume.batch.size"
@constant # (type: string) Property which represents flume key for connect attempts
def FLUME_CONNECT_ATTEMPTS():
return "flume.connect.attempts"
@constant # (type: string) Property which represents flume key for connect timeout
def FLUME_CONNECT_TIMEOUT():
return "flume.connect.timeout"
@constant # (type: string) Property which represents flume key for headers
def FLUME_HEADERS():
return "flume.headers"
@constant # (type: string) Property which represents flume key for host selector
def FLUME_HOST_SELECTOR():
return "flume.host.selector"
@constant # (type: string) Property which represents flume key for hosts
def FLUME_HOSTS():
return "flume.hosts"
@constant # (type: string) Property which represents flume key for max attempts
def FLUME_MAX_ATTEMPTS():
return "flume.max.attempts"
@constant # (type: string) Property which represents flume key for max backoff
def FLUME_MAX_BACKOFF():
return "flume.max.backoff"
@constant # (type: string) Property which represents flume key for max events
def FLUME_MAX_EVENTS():
return "flume.max.events"
@constant # (type: string) Property which represents flume key for request timeout
def FLUME_REQUEST_TIMEOUT():
return "flume.request.timeout"
@constant # (type: string) Property which represents flume key for run interval
def FLUME_RUN_INTERVAL():
return "flume.run.interval"
@constant # (type: string) Property which represents flume key for sleep interval
def FLUME_SLEEP_INTERVAL():
return "flume.sleep.interval"
# [Kafka Constants]
@constant # (type: string) Property which represents kafka zookeeper connection string
def KAFKA_ZOOKEEPER():
return "kafka.zookeeper.connect"
@constant # (type: string) Property which represents kafka broker list ezconfig property
def KAFKA_BROKER_LIST():
return "kafka.metadata.broker.list"
@constant # (type: int) Property which represents the time that messages stay in memory before flushed to Kafka if using an async producer (in milliseconds)
def KAFKA_QUEUE_TIME():
return "kafka.queue.time"
@constant # (type: string) Property which represents the amount of messages that are queued in memory before flushing to Kafka if using an async producer
def KAFKA_QUEUE_SIZE():
return "kafka.queue.size"
@constant # (type: string) Property which represents the type of producer (sync or async) used by Kafka
def KAFKA_PRODUCER_TYPE():
return "kafka.producer.type"
@constant # (type: int) Property which represents the zookeeper timeout for Kafka consumers
def KAFKA_ZOOKEEPER_SESSION_TIMEOUT():
return "kafka.zk.sessiontimeout.ms"
# [Mongo Configuration Constants]
@constant # (type: string) Property which represents mongo db host name ezconfig key
def MONGODB_HOST_NAME():
return "mongodb.host.name"
@constant # (type: int) Property which represents mongo db port number key
def MONGODB_PORT():
return "mongodb.port"
@constant # (type: string) Property which represents mongo db database name ezconfig key
def MONGODB_DB_NAME():
return "mongodb.database.name"
@constant # (type: string) Property which represents mongo db user name ezconfig key
def MONGODB_USER_NAME():
return "mongodb.user.name"
@constant # (type: string) Property which represents mongo db password ezconfig key
def MONGODB_PASSWORD():
return "mongodb.password"
@constant # (type: string) Property which represents mongo db use ssl ezconfig key
def MONGODB_USE_SSL():
return "mongodb.use.ssl"
@constant # (type: string) Property which represents the connection string that can be used to access mongo
def MONGODB_CONNECTION_STRING():
return "mongodb.connection.string"
# [Postgres Constants]
@constant # (type: string) Property which represents postgres db ezconfig key
def POSTGRES_DB():
return "postgres.db"
@constant # (type: string) Property which represents postgres host ezconfig key
def POSTGRES_HOST():
return "postgres.host"
@constant # (type: string) Property which represents postgres password ezconfig key
def POSTGRES_PASSWORD():
return "postgres.password"
@constant # (type: string) Property which represents postgres port ezconfig key
def POSTGRES_PORT():
return "postgres.port"
@constant # (type: string) Property which represents postgres username ezconfig key
def POSTGRES_USERNAME():
return "postgres.username"
@constant # (type: string) Property which represents whether postgres connection uses ssl ezconfig key
def POSTGRES_USE_SSL():
return "postgres.use.ssl"
# [Redis Constants]
@constant # (type: int) Property which represents redis host ezconfig key
def REDIS_HOST():
return "redis.host"
@constant # (type: string) Property which represents redis post ezconfig key
def REDIS_PORT():
return "redis.port"
@constant # (type: int) Property which represents redis db index ezconfig key
def REDIS_DB_INDEX():
return "redis.db.index"
# [Security Constants]
@constant # (type: string) Property which represents the security id
def EZBAKE_SECURITY_ID():
return "ezbake.security.app.id"
@constant # (type: string) Property which represents cache type ezconfig key
def EZBAKE_USER_CACHE_TYPE():
return "ezbake.security.cache.type"
@constant # (type: string) Property which represents cache ttl ezconfig key
def EZBAKE_USER_CACHE_TTL():
return "ezbake.security.cache.ttl"
@constant # (type: string) Property which represents cache size ezconfig key
def EZBAKE_USER_CACHE_SIZE():
return "ezbake.security.cache.size"
@constant # (type: string) Property which represents request expiration ezconfig key
def EZBAKE_REQUEST_EXPIRATION():
return "ezbake.security.request.expiration"
@constant # (type: string) Property which represents token expiration ezconfig key
def EZBAKE_TOKEN_EXPIRATION():
return "ezbake.security.token.ttl"
@constant # (type: int) How long after being issued a proxy token should be valid
def EZBAKE_SECURITY_PROXYTOKEN_TTL():
return "ezbake.security.proxytoken.ttl"
@constant # (type: int) How long after expiration a token can be re-issued
def EZBAKE_SECURITY_TOKEN_REFRESH_LIMIT():
return "ezbake.security.token.refresh.limit"
@constant # (type: string) Property which represents app registration implementation ezconfig key
def EZBAKE_APP_REGISTRATION_IMPL():
return "ezbake.security.app.service.impl"
@constant # (type: string) Property which represents admins file ezconfig key
def EZBAKE_ADMINS_FILE():
return "ezbake.security.admins.file"
@constant # (type: string) Property which represents service implementation ezconfig key
def EZBAKE_USER_SERVICE_IMPL():
return "ezbake.security.user.service.impl"
@constant # (type: string) Property which represents mock server ezconfig key
def EZBAKE_SECURITY_SERVICE_MOCK_SERVER():
return "ezbake.security.server.mock"
@constant # (type: string) Property which represents use forward proxy ezconfig key
def EZBAKE_USE_FORWARD_PROXY():
return "ezbake.frontend.use.forward.proxy"
@constant # (type: string) Property which represents ssl protocol ezconfig key
def EZBAKE_SSL_PROTOCOL_KEY():
return "ezbake.ssl.protocol"
@constant # (type: string) Property which represents ssl ciphers ezconfig key
def EZBAKE_SSL_CIPHERS_KEY():
return "ezbake.ssl.ciphers"
@constant # (type: string) Property which represents peer validation ezconfig key
def EZBAKE_SSL_PEER_AUTH_REQUIRED():
return "ezbake.ssl.peer.validation"
@constant # (type: boolean) Property which tells us if we are using the default ssl key
def EZBAKE_SSL_USE_DEFAULT_SSL_KEY():
return "ezbake.security.default.ssl"
@constant # (type: string) Property which represents the trusted certificates file
def EZBAKE_APPLICATION_TRUSTED_CERT():
return "ezbake.ssl.trustedcert.file"
@constant # (type: string) Property which represents the private key file
def EZBAKE_APPLICATION_PRIVATE_KEY_FILE():
return "ezbake.ssl.privatekey.file"
@constant # (type: string) Property which represents the certificates file
def EZBAKE_APPLICATION_CERT_FILE():
return "ezbake.ssl.certificate.file"
@constant # (type: string) Property which represents the public key file for a service
def EZBAKE_APPLICATION_PUBLIC_KEY_FILE():
return "ezbake.ssl.servicekey.file"
# [SSL Constants]
@constant # (type: string) Property which represents the path to the system keystore
def SYSTEM_KEYSTORE_PATH():
return "system.keystore.path"
@constant # (type: string) Property which represents the type of the system keystore
def SYSTEM_KEYSTORE_TYPE():
return "system.keystore.type"
@constant # (type: string) Property which represents the password for the system keystore
def SYSTEM_KEYSTORE_PASSWORD():
return "system.keystore.password"
@constant # (type: string) Property which represents the path to the system truststore
def SYSTEM_TRUSTSTORE_PATH():
return "system.truststore.path"
@constant # (type: string) Property which represents the type of the system truststore
def SYSTEM_TRUSTSTORE_TYPE():
return "system.truststore.type"
@constant # (type: string) Property which represents the password for the system truststore
def SYSTEM_TRUSTSTORE_PASSWORD():
return "system.truststore.password"
@constant # (type: string) Property which represents keystore file ezconfig key
def EZBAKE_APPLICATION_KEYSTORE_FILE():
return "ezbake.ssl.keystore.file"
@constant # (type: string) Property which represents keystore type ezconfig key
def EZBAKE_APPLICATION_KEYSTORE_TYPE():
return "ezbake.ssl.keystore.type"
@constant # (type: string) Property which represents keystore password ezconfig key
def EZBAKE_APPLICATION_KEYSTORE_PASS():
return "ezbake.ssl.keystore.pass"
@constant # (type: string) Property which represents truststore file ezconfig key
def EZBAKE_APPLICATION_TRUSTSTORE_FILE():
return "ezbake.ssl.truststore.file"
@constant # (type: string) Property which represents truststore type ezconfig key
def EZBAKE_APPLICATION_TRUSTSTORE_TYPE():
return "ezbake.ssl.truststore.type"
@constant # (type: string) Property which represents truststore password ezconfig key
def EZBAKE_APPLICATION_TRUSTSTORE_PASS():
return "ezbake.ssl.truststore.pass"
# [Service Constants]
@constant # (type: string) Property representing the location to the certificates directory
def EZBAKE_CERTIFICATES_DIRECTORY():
return "ezbake.security.ssl.dir"
@constant # (type: string) Property which represents the name of the service
def EZBAKE_SERVICE_NAME():
return "service.name"
# [Storm Constants]
@constant # (type: string) Property representing the nimbus host
def STORM_NIMBUS_HOST():
return "storm.nimbus.host"
@constant # (type: int) Property representing the nimbus port
def STORM_NIMBUS_THRIFT_PORT():
return "storm.nimbus.thrift.port"
# [System Constants]
@constant # (type: string) Property which represents ezbake admin application deployment ezconfig key
def EZBAKE_ADMIN_APPLICATION_DEPLOYMENT():
return "ezbake.system.admin.application.deployment"
@constant # (type: string) Property which represents ezbake log directory ezconfig key
def EZBAKE_LOG_DIRECTORY():
return "ezbake.log.directory"
@constant # (type: string) Property which represents ezbake log standard out ezconfig key
def EZBAKE_LOG_TO_STDOUT():
return "ezbake.log.stdout"
@constant # (type: string) Property which represents the environment variable for the shared secret
def EZBAKE_SHARED_SECRET_ENVIRONMENT_VARIABLE():
return "ezbake.shared.secret.environment.variable"
# [System Services Constants]
@constant # (type: string) Property which represents frontend service
def FRONTEND():
return "ezfrontend"
# [Thrift Constants]
@constant # (type: string) Property which represents thrifts max idle clients ezconfig key
def THRIFT_MAX_IDLE_CLIENTS():
return "thrift.max.idle.clients"
@constant # (type: string) Property which represents thrifts max pool clients ezconfig key
def THRIFT_MAX_POOL_CLIENTS():
return "thrift.max.pool.clients"
@constant # (type: string) Property which represents thrifts milliseconds between client eviction checks ezconfig key
def THRIFT_MILLIS_BETWEEN_CLIENT_EVICTION_CHECKS():
return "thrift.millis.between.client.eviction.checks"
@constant # (type: string) Property which represents thrifts milliseconds before client eviction ezconfig key
def THRIFT_MILLIS_IDLE_BEFORE_EVICTION():
return "thrift.millis.idle.before.eviction"
@constant # (type: string) Property which represents thrifts server mode ezconfig key
def THRIFT_SERVER_MODE():
return "thrift.server.mode"
@constant # (type: string) Property which represents thrifts test pool on borrow ezconfig key
def THRIFT_TEST_ON_BORROW():
return "thrift.test.pool.on.borrow"
@constant # (type: string) Property which represents thrifts test while idle ezconfig key
def THRIFT_TEST_WHILE_IDLE():
return "thrift.test.pool.while.idle"
@constant # (type: string) Property which represents thrifts use ssl ezconfig key
def THRIFT_USE_SSL():
return "thrift.use.ssl"
@constant # (type: boolean) Property which represents if the client pool should block on exhaustion or throw an exception
def THRIFT_BLOCK_WHEN_EXHAUSTED():
return "thrift.block.when.exhausted"
@constant # (type: boolean) Property which tells us to actually pool clients or not
def THRIFT_ACTUALLY_POOL_CLIENTS():
return "thrift.actually.pool.clients"
@constant # (type: boolean) Log a stack trace whenever an object is abandonded from the pool
def THRIFT_LOG_ABANDONDED():
return "thrift.pool.log.abandoned"
@constant # (type: boolean) Whether to abandon objects if they exceed the abandon timeout when borrow is called
def THRIFT_ABANDON_ON_BORROW():
return "thrift.pool.abandon.on.borrow"
@constant # (type: boolean) Whether to abandon objects if they exceed the abandon timeout when the evictor runs
def THRIFT_ABANDON_ON_MAINTENANCE():
return "thrift.pool.abandon.on.maintenance"
@constant # (type: string) Timeout in seconds before an abandoned object is removed
def THRIFT_ABANDON_TIMEOUT():
return "thrift.pool.abandon.timeout"
# [Web Application Constants]
@constant # (type: string) Property which represents web application external domain ezconfig key
def EZBAKE_WEB_APPLICATION_EXTERNAL_DOMAIN():
return "web.application.external.domain"
@constant # (type: string) Property which represents web application metrics endpoint ezconfig key
def EZBAKE_WEB_APPLICATION_METRICS_ENDPOINT():
return "web.application.metrics.endpoint"
@constant # (type: string) Property which represents web application metrics siteid ezconfig key
def EZBAKE_WEB_APPLICATION_METRICS_SITEID():
return "web.application.metrics.siteid"
@constant # (type: string) Property which represents security description banner: text
def EZBAKE_WEB_APPLICATION_BANNER_TEXT():
return "web.application.security.banner.text"
@constant # (type: string) Property which represents security description banner: background color
def EZBAKE_WEB_APPLICATION_BANNER_BGCOLOR():
return "web.application.security.banner.background.color"
@constant # (type: string) Property which represents security description banner: text color
def EZBAKE_WEB_APPLICATION_BANNER_TEXTCOLOR():
return "web.application.security.banner.text.color"
# [Zookeeper Constants]
@constant # (type: string) Property which is a CSV of zookeeper servers (host:port)
def ZOOKEEPER_CONNECTION_STRING():
return "zookeeper.connection.string"
# [MonetDB Constants]
@constant # (type: string) The MonetDB Username
def MONETDB_USERNAME():
return "monetdb.username"
@constant # (type: string) The MonetDB Password
def MONETDB_PASSWORD():
return "monetdb.password"
@constant # (type: string) The hostname of the MonetDB server
def MONETDB_HOSTNAME():
return "monetdb.hostname"
@constant # (type: int) The port number that MonetDB is running on
def MONETDB_PORT():
return "monetdb.port"
EzBakePropertyConstants = __EzBakePropertyConstants()
| ezbake/ezbake-configuration | api/python/lib/ezconfiguration/constants/EzBakePropertyConstants.py | Python | apache-2.0 | 24,922 |
# Modified by CNSL
# 1) including TDNN based char embedding
# 06/02/17
# Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree. An additional grant
# of patent rights can be found in the PATENTS file in the same directory.
import torch
import torch.nn as nn
from . import layers
from .tdnn import TDNN
from .highway import Highway
import torch.nn.functional as F
import pdb
class RnnDocReader(nn.Module):
"""Network for the Document Reader module of DrQA."""
RNN_TYPES = {'lstm': nn.LSTM, 'gru': nn.GRU, 'rnn': nn.RNN}
def __init__(self, opt, padding_idx=0, padding_idx_char=0):
super(RnnDocReader, self).__init__()
# Store config
self.opt = opt
# Word embeddings (+1 for padding), usually initialized by GloVE
self.embedding = nn.Embedding(opt['vocab_size'],
opt['embedding_dim'],
padding_idx=padding_idx)
# Char embeddings (+1 for padding)
#pdb.set_trace()
if opt['add_char2word']:
self.char_embedding = nn.Embedding(opt['vocab_size_char'],
opt['embedding_dim_char'],
padding_idx=padding_idx_char)
self.char_embedding.weight = nn.Parameter(torch.Tensor(opt['vocab_size_char'],opt['embedding_dim_char']).uniform_(-1,1))
self.TDNN = TDNN(opt)
if opt['nLayer_Highway'] > 0 :
self.Highway = Highway(opt['embedding_dim'] + opt['embedding_dim_TDNN'], opt['nLayer_Highway'], F.relu)
# ...(maybe) keep them fixed (word only)
if opt['fix_embeddings']:
for p in self.embedding.parameters():
p.requires_grad = False
# Register a buffer to (maybe) fill later for keeping *some* fixed
if opt['tune_partial'] > 0:
buffer_size = torch.Size((
opt['vocab_size'] - opt['tune_partial'] - 2,
opt['embedding_dim']
))
self.register_buffer('fixed_embedding', torch.Tensor(buffer_size))
# Projection for attention weighted question
if opt['use_qemb']:
if opt['add_char2word']:
self.qemb_match = layers.SeqAttnMatch(opt['embedding_dim'] + opt['embedding_dim_TDNN'])
else:
self.qemb_match = layers.SeqAttnMatch(opt['embedding_dim'])
# Input size to RNN: word emb + question emb + manual features
if opt['add_char2word']:
doc_input_size = opt['embedding_dim'] + opt['num_features'] + opt['embedding_dim_TDNN']
else:
doc_input_size = opt['embedding_dim'] + opt['num_features']
if opt['add_char2word']:
doc_input_size += opt['embedding_dim'] + opt['embedding_dim_TDNN']
else:
doc_input_size += opt['embedding_dim']
if opt['use_qemb']:
pass
#pdb.set_trace()
# RNN document encoder
self.doc_rnn = layers.StackedBRNN(
input_size=doc_input_size,
hidden_size=opt['hidden_size'],
num_layers=opt['doc_layers'],
dropout_rate=opt['dropout_rnn'],
dropout_output=opt['dropout_rnn_output'],
concat_layers=opt['concat_rnn_layers'],
rnn_type=self.RNN_TYPES[opt['rnn_type']],
padding=opt['rnn_padding'],
)
# RNN question encoder
q_input_size = opt['embedding_dim']
if opt['add_char2word']:
q_input_size += opt['embedding_dim_TDNN']
self.question_rnn = layers.StackedBRNN(
input_size=q_input_size,
hidden_size=opt['hidden_size'],
num_layers=opt['question_layers'],
dropout_rate=opt['dropout_rnn'],
dropout_output=opt['dropout_rnn_output'],
concat_layers=opt['concat_rnn_layers'],
rnn_type=self.RNN_TYPES[opt['rnn_type']],
padding=opt['rnn_padding'],
)
# Output sizes of rnn encoders
doc_hidden_size = 2 * opt['hidden_size']
question_hidden_size = 2 * opt['hidden_size']
if opt['concat_rnn_layers']:
doc_hidden_size *= opt['doc_layers']
question_hidden_size *= opt['question_layers']
# Question merging
if opt['question_merge'] not in ['avg', 'self_attn']:
raise NotImplementedError('merge_mode = %s' % opt['merge_mode'])
if opt['question_merge'] == 'self_attn':
self.self_attn = layers.LinearSeqAttn(question_hidden_size)
# Q-P matching
opt['qp_rnn_size'] = doc_hidden_size + question_hidden_size
if opt['qp_bottleneck']:
opt['qp_rnn_size'] = opt['hidden_size']
self.qp_match = layers.GatedAttentionBilinearRNN(
x_size = doc_hidden_size,
y_size = question_hidden_size,
hidden_size= opt['qp_rnn_size'],
padding=opt['rnn_padding'],
rnn_type=self.RNN_TYPES[opt['rnn_type']],
birnn=opt['qp_birnn'],
concat = opt['qp_concat'],
gate=True
)
qp_matched_size = opt['qp_rnn_size']
if opt['qp_birnn']:
qp_matched_size = qp_matched_size * 2
if opt['qp_concat']:
qp_matched_size = qp_matched_size + doc_hidden_size
# Bilinear attention for span start/end
if opt['task_QA']:
self.start_attn = layers.BilinearSeqAttn(
qp_matched_size,
question_hidden_size
)
self.end_attn = layers.BilinearSeqAttn(
qp_matched_size,
question_hidden_size
)
# Paragraph Hierarchical Encoder
if opt['ans_sent_predict'] :
self.meanpoolLayer = layers.Selective_Meanpool(doc_hidden_size)
self.sentBRNN = layers.StackedBRNN(
input_size=qp_matched_size,
hidden_size=opt['hidden_size_sent'],
num_layers=opt['nLayer_Sent'],
concat_layers=False,
rnn_type=self.RNN_TYPES[opt['rnn_type']],
padding=opt['rnn_padding_sent'],
)
self.sentseqAttn = layers.BilinearSeqAttn(
opt['hidden_size_sent']*2,
question_hidden_size
)
#print('DEBUG (no hRNN)')
#def forward(self, x1, x1_f, x1_mask, x2, x2_mask, x1_c, x1_c_mask, x2_c, x2_c_mask):
#def forward(self, x1, x1_f, x1_mask, x2, x2_mask, x1_c=None, x2_c=None): # for this version, we do not utilize mask for char
def forward(self, x1, x1_f, x1_mask, x2, x2_mask, x1_c=None, x2_c=None, x1_sent_mask=None, word_boundary=None): # for this version, we do not utilize mask for char
#pdb.set_trace()
"""Inputs:
x1 = document word indices [batch * len_d]
x1_f = document word features indices [batch * len_d * nfeat]
x1_mask = document padding mask [batch * len_d] ==>
x2 = question word indices [batch * len_q]
x2_mask = question padding mask [batch * len_q] ==>
x1_c = document char indices [batch * len_d * max_char_per_word]
x1_c_mask = document char padding mask [batch * len_d * max_char_per_word] --> not implemented in this version
x2_c = question char indices [batch * len_q * max_char_per_word]
x2_c_mask = question char padding mask [batch * len_q * max_char_per_word] --> not implemented in this version
"""
# Embed both document and question
batch_size = x1.size()[0]
doc_len = x1.size()[1]
ques_len = x2.size()[1]
x1_emb = self.embedding(x1) # N x Td x D
x2_emb = self.embedding(x2) # N x Tq x D
if self.opt['add_char2word']:
max_wordL_d = x1_c.size()[2]
max_wordL_q = x2_c.size()[2]
x1_c = x1_c.view(-1, max_wordL_d)
x2_c = x2_c.view(-1, max_wordL_q)
x1_c_emb = self.char_embedding(x1_c)
x2_c_emb = self.char_embedding(x2_c)
x1_c_emb = x1_c_emb.view(batch_size,
doc_len,
max_wordL_d,
-1)
x2_c_emb = x2_c_emb.view(batch_size,
ques_len,
max_wordL_q,
-1)
# Produce char-aware word embed
x1_cw_emb = self.TDNN(x1_c_emb) # N x Td x sum(H)
x2_cw_emb = self.TDNN(x2_c_emb) # N x Tq x sum(H)
# Merge word + char
x1_emb = torch.cat((x1_emb, x1_cw_emb), 2)
x2_emb = torch.cat((x2_emb, x2_cw_emb), 2)
###x1_mask = torch.cat([x1_mask, x1_c_mask], 2) # For this version, we do not utilize char mask
###x2_mask = torch.cat([x2_mask, x2_c_mask], 2) # For this version, we do not utilize char mask
# Highway network
if self.opt['nLayer_Highway'] > 0:
[batch_size, seq_len, embed_size] = x1_emb.size()
x1_emb = self.Highway(x1_emb.view(-1, embed_size))
x1_emb = x1_emb.view(batch_size, -1, embed_size)
[batch_size, seq_len, embed_size] = x2_emb.size()
x2_emb = self.Highway(x2_emb.view(-1, embed_size))
x2_emb = x2_emb.view(batch_size, -1, embed_size)
else:
if (('x1_c' in locals()) and ('x2_c' in locals())):
#pdb.set_trace()
x1_sent_mask = x1_c
word_boundary = x2_c
# Dropout on embeddings
if self.opt['dropout_emb'] > 0:
x1_emb = nn.functional.dropout(x1_emb, p=self.opt['dropout_emb'], training=self.training)
x2_emb = nn.functional.dropout(x2_emb, p=self.opt['dropout_emb'], training=self.training)
# Add attention-weighted question representation
if self.opt['use_qemb']:
x2_weighted_emb = self.qemb_match(x1_emb, x2_emb, x2_mask)
drnn_input = torch.cat([x1_emb, x2_weighted_emb, x1_f], 2)
else:
drnn_input = torch.cat([x1_emb, x1_f], 2)
# Encode document with RNN
doc_hiddens = self.doc_rnn(drnn_input, x1_mask)
# Encode question with RNN
question_hiddens = self.question_rnn(x2_emb, x2_mask)
# QP matching
qp_matched_doc = self.qp_match(doc_hiddens, x1_mask, question_hiddens, x2_mask)
# Merge question hiddens
if self.opt['question_merge'] == 'avg':
q_merge_weights = layers.uniform_weights(question_hiddens, x2_mask)
elif self.opt['question_merge'] == 'self_attn':
q_merge_weights = self.self_attn(question_hiddens, x2_mask)
question_hidden = layers.weighted_avg(question_hiddens, q_merge_weights)
return_list = []
# Predict start and end positions
if self.opt['task_QA']:
start_scores = self.start_attn(qp_matched_doc, question_hidden, x1_mask)
end_scores = self.end_attn(qp_matched_doc, question_hidden, x1_mask)
return_list = return_list + [start_scores, end_scores]
# Pooling , currently no multi-task learning
if self.opt['ans_sent_predict']:
sent_hiddens = self.meanpoolLayer(qp_matched_doc, word_boundary)
if self.opt['nLayer_Sent'] > 0:
sent_hiddens = self.sentBRNN(sent_hiddens, x1_sent_mask)
sent_scores = self.sentseqAttn(sent_hiddens, question_hidden, x1_sent_mask)
return_list = return_list + [sent_scores]
return return_list
| calee88/ParlAI | parlai/agents/drqa_msmarco/rnet_qp.py | Python | bsd-3-clause | 11,967 |
# -*- coding: utf-8 -*-
import scrapy
from aliexpresscoupons.items import AliexpressItem
import datetime
class CouponsSpider(scrapy.Spider):
name = "coupons"
allowed_domains = ["coupon.aliexpress.com"]
start_urls = (
'http://coupon.aliexpress.com/proengine/sellerCouponList.htm',
)
def parse(self, response):
for li in response.xpath('//ul[@class="coupon-list clearfix"]/li'):
item = AliexpressItem()
item['url'] = li.xpath('a/@href').extract()[0]
item['storeName'] = ''.join(li.xpath('a/div/span[@class="store-name"]/descendant-or-self::*/text()').extract())
item['couponPrice'] = float(li.xpath('a/div/span[@class="coupon-price"]/em/text()').extract()[0].strip().replace('$', ''))
item['couponPriceText'] = ''.join(li.xpath('a/div/span[@class="coupon-price"]/descendant-or-self::*/text()').extract())
item['couponOrderPrice'] = float(li.xpath('a/div/span[@class="coupon-order-price"]/em/text()').extract()[0].strip().replace('$', ''))
item['couponOrderPriceText'] = ''.join(li.xpath('a/div/span[@class="coupon-order-price"]/descendant-or-self::*/text()').extract())
dateText = ''.join(li.xpath('a/div/span[@class="coupon-time"]/descendant-or-self::*/text()').extract())
_date = dateText.strip().replace('Valid before ', '').strip()
_date = datetime.datetime.strptime(_date, '%d %b,%Y')
item['couponTimeText'] = dateText
item['couponTime'] = _date
yield item
page = response.xpath('//a[@class="page-next"]/@page').extract()
if len(page) == 1:
page = page[0]
data = {
'page': page,
'_csrf_token_': response.xpath('//input[@name="_csrf_token_"]/@value').extract()[0],
}
headers = {
'Content-Type': 'application/x-www-form-urlencoded',
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8',
'Accept-Encoding': 'gzip,deflate',
'Accept-Language': 'en-US,en;q=0.8',
'Cache-Control': 'max-age=0',
'Connection': 'keep-alive',
}
yield scrapy.FormRequest('http://coupon.aliexpress.com/proengine/sellerCouponList.htm',
formdata=data,
headers=headers,
callback=self.parse) | ils78/aliexpresscoupons | aliexpresscoupons/spiders/coupons.py | Python | mit | 2,516 |
# coding=utf-8
# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
import os
from collections import defaultdict
from twitter.common.collections import OrderedSet
from pants.util.dirutil import fast_relpath
class ProductError(Exception): pass
class UnionProducts(object):
"""Here, products for a target are the ordered union of the products for its transitive deps."""
def __init__(self, products_by_target=None):
# A map of target to OrderedSet of product members.
self._products_by_target = products_by_target or defaultdict(OrderedSet)
def copy(self):
"""Returns a copy of this UnionProducts.
Edits to the copy's mappings will not affect the product mappings in the original.
The copy is shallow though, so edits to the the copy's product values will mutate the original's
product values.
:rtype: :class:`UnionProducts`
"""
products_by_target = defaultdict(OrderedSet)
for key, value in self._products_by_target.items():
products_by_target[key] = OrderedSet(value)
return UnionProducts(products_by_target=products_by_target)
def add_for_target(self, target, products):
"""Updates the products for a particular target, adding to existing entries."""
self._products_by_target[target].update(products)
def add_for_targets(self, targets, products):
"""Updates the products for the given targets, adding to existing entries."""
# FIXME: This is a temporary helper for use until the classpath has been split.
for target in targets:
self.add_for_target(target, products)
def remove_for_target(self, target, products):
"""Updates the products for a particular target, removing the given existing entries."""
for product in products:
self._products_by_target[target].discard(product)
def get_for_target(self, target, transitive=True):
"""Gets the transitive product deps for the given target."""
return self.get_for_targets([target], transitive=transitive)
def get_for_targets(self, targets, transitive=True):
"""Gets the transitive product deps for the given targets, in order."""
products = OrderedSet()
visited = set()
# Walk the targets transitively to aggregate their products. We do a breadth-first
for target in targets:
if transitive:
deps = target.closure(bfs=True)
else:
deps = [target]
for dep in deps:
if dep not in visited:
products.update(self._products_by_target[dep])
visited.add(dep)
return products
def target_for_product(self, product):
"""Looks up the target key for a product.
:param product: The product to search for
:return: None if there is no target for the product
"""
for target, products in self._products_by_target.items():
if product in products:
return target
return None
def __str__(self):
return "UnionProducts({})".format(self._products_by_target)
class RootedProducts(object):
"""File products of a build that have a concept of a 'root' directory.
E.g., classfiles, under a root package directory."""
def __init__(self, root):
self._root = root
self._rel_paths = OrderedSet()
def add_abs_paths(self, abs_paths):
for abs_path in abs_paths:
self._rel_paths.add(fast_relpath(abs_path, self._root))
def add_rel_paths(self, rel_paths):
self._rel_paths.update(rel_paths)
def root(self):
return self._root
def rel_paths(self):
return self._rel_paths
def abs_paths(self):
for relpath in self._rel_paths:
yield os.path.join(self._root, relpath)
def __bool__(self):
return self._rel_paths
__nonzero__ = __bool__
class MultipleRootedProducts(object):
"""A product consisting of multiple roots, with associated file products."""
def __init__(self):
self._rooted_products_by_root = {}
def add_rel_paths(self, root, rel_paths):
self._get_products_for_root(root).add_rel_paths(rel_paths)
def add_abs_paths(self, root, abs_paths):
self._get_products_for_root(root).add_abs_paths(abs_paths)
def rel_paths(self):
for root, products in self._rooted_products_by_root.items():
yield root, products.rel_paths()
def abs_paths(self):
for root, products in self._rooted_products_by_root.items():
yield root, products.abs_paths()
def _get_products_for_root(self, root):
return self._rooted_products_by_root.setdefault(root, RootedProducts(root))
def __bool__(self):
"""Return True if any of the roots contains products"""
for root, products in self.rel_paths():
if products:
return True
return False
__nonzero__ = __bool__
def __str__(self):
return "MultipleRootedProducts({})".format(self._rooted_products_by_root)
class Products(object):
"""An out-of-band 'dropbox' where tasks can place build product information for later tasks to use.
Historically, the only type of product was a ProductMapping. However this had some issues, as not
all products fit into the (basedir, [files-under-basedir]) paradigm. Also, ProductMapping docs
and varnames refer to targets, and implicitly expect the mappings to be keyed by a target, however
we sometimes also need to map sources to products.
So in practice we ended up abusing this in several ways:
1) Using fake basedirs when we didn't have a basedir concept.
2) Using objects other than strings as 'product paths' when we had a need to.
3) Using things other than targets as keys.
Right now this class is in an intermediate stage, as we transition to a more robust Products concept.
The abuses have been switched to use 'data_products' (see below) which is just a dictionary
of product type (e.g., 'classes_by_target') to arbitrary payload. That payload can be anything,
but the MultipleRootedProducts class is useful for products that do happen to fit into the
(basedir, [files-under-basedir]) paradigm.
The long-term future of Products is TBD. But we do want to make it easier to reason about
which tasks produce which products and which tasks consume them. Currently it's quite difficult
to match up 'requires' calls to the producers of those requirements, especially when the 'typename'
is in a variable, not a literal.
"""
class ProductMapping(object):
"""Maps products of a given type by target. Each product is a map from basedir to a list of
files in that dir.
"""
def __init__(self, typename):
self.typename = typename
self.by_target = defaultdict(lambda: defaultdict(list))
def empty(self):
return len(self.by_target) == 0
def add(self, target, basedir, product_paths=None):
"""
Adds a mapping of products for the given target, basedir pair.
If product_paths are specified, these will over-write any existing mapping for this target.
If product_paths is omitted, the current mutable list of mapped products for this target
and basedir is returned for appending.
"""
if product_paths is not None:
self.by_target[target][basedir].extend(product_paths)
else:
return self.by_target[target][basedir]
def has(self, target):
"""Returns whether we have a mapping for the specified target."""
return target in self.by_target
def get(self, target):
"""
Returns the product mapping for the given target as a tuple of (basedir, products list).
Can return None if there is no mapping for the given target.
"""
return self.by_target.get(target)
def __getitem__(self, target):
"""
Support for subscripting into this mapping. Returns the product mapping for the given target
as a map of <basedir> -> <products list>.
If no mapping exists, returns an empty map whose values default to empty lists. So you
can use the result without checking for None.
"""
return self.by_target[target]
def itermappings(self):
"""
Returns an iterable over all pairs (target, product) in this mapping.
Each product is itself a map of <basedir> -> <products list>.
"""
return self.by_target.iteritems()
def keys_for(self, basedir, product):
"""Returns the set of keys the given mapped product is registered under."""
keys = set()
for key, mappings in self.by_target.items():
for mapped in mappings.get(basedir, []):
if product == mapped:
keys.add(key)
break
return keys
def __repr__(self):
return 'ProductMapping({}) {{\n {}\n}}'.format(self.typename, '\n '.join(
'{} => {}\n {}'.format(str(target), basedir, outputs)
for target, outputs_by_basedir in self.by_target.items()
for basedir, outputs in outputs_by_basedir.items()))
def __bool__(self):
return not self.empty()
__nonzero__ = __bool__
def __init__(self):
# TODO(John Sirois): Kill products and simply have users register ProductMapping subtypes
# as data products. Will require a class factory, like `ProductMapping.named(typename)`.
self.products = {} # type -> ProductMapping instance.
self.required_products = set()
self.data_products = {} # type -> arbitrary object.
self.required_data_products = set()
def require(self, typename):
"""Registers a requirement that file products of the given type by mapped.
:param typename: the type or other key of a product mapping that should be generated.
"""
self.required_products.add(typename)
def isrequired(self, typename):
"""Checks if a particular product is required by any tasks."""
return typename in self.required_products
def get(self, typename):
"""Returns a ProductMapping for the given type name."""
return self.products.setdefault(typename, Products.ProductMapping(typename))
def require_data(self, typename):
"""Registers a requirement that data produced by tasks is required.
:param typename: the type or other key of a data product that should be generated.
"""
self.required_data_products.add(typename)
def is_required_data(self, typename):
"""Checks if a particular data product is required by any tasks."""
return typename in self.required_data_products
def safe_create_data(self, typename, init_func):
"""Ensures that a data item is created if it doesn't already exist."""
# Basically just an alias for readability.
self.get_data(typename, init_func)
def get_data(self, typename, init_func=None):
""" Returns a data product.
If the product isn't found, returns None, unless init_func is set, in which case the product's
value is set to the return value of init_func(), and returned."""
if typename not in self.data_products:
if not init_func:
return None
self.data_products[typename] = init_func()
return self.data_products.get(typename)
def get_only(self, product_type, target):
"""If there is exactly one product for the given product type and target, returns the
full filepath of said product.
Otherwise, raises a ProductError.
Useful for retrieving the filepath for the executable of a binary target.
"""
product_mapping = self.get(product_type).get(target)
if len(product_mapping) != 1:
raise ProductError('{} directories in product mapping: requires exactly 1.'
.format(len(product_mapping)))
for _, files in product_mapping.items():
if len(files) != 1:
raise ProductError('{} files in target directory: requires exactly 1.'
.format(len(files)))
return files[0]
| kslundberg/pants | src/python/pants/goal/products.py | Python | apache-2.0 | 11,915 |
import RPi.GPIO as GPIO
import time
buzzer_pin = 27
notes = {
'B0' : 31,
'C1' : 33, 'CS1' : 35,
'D1' : 37, 'DS1' : 39,
'EB1' : 39,
'E1' : 41,
'F1' : 44, 'FS1' : 46,
'G1' : 49, 'GS1' : 52,
'A1' : 55, 'AS1' : 58,
'BB1' : 58,
'B1' : 62,
'C2' : 65, 'CS2' : 69,
'D2' : 73, 'DS2' : 78,
'EB2' : 78,
'E2' : 82,
'F2' : 87, 'FS2' : 93,
'G2' : 98, 'GS2' : 104,
'A2' : 110, 'AS2' : 117,
'BB2' : 123,
'B2' : 123,
'C3' : 131, 'CS3' : 139,
'D3' : 147, 'DS3' : 156,
'EB3' : 156,
'E3' : 165,
'F3' : 175, 'FS3' : 185,
'G3' : 196, 'GS3' : 208,
'A3' : 220, 'AS3' : 233,
'BB3' : 233,
'B3' : 247,
'C4' : 262, 'CS4' : 277,
'D4' : 294, 'DS4' : 311,
'EB4' : 311,
'E4' : 330,
'F4' : 349, 'FS4' : 370,
'G4' : 392, 'GS4' : 415,
'A4' : 440, 'AS4' : 466,
'BB4' : 466,
'B4' : 494,
'C5' : 523, 'CS5' : 554,
'D5' : 587, 'DS5' : 622,
'EB5' : 622,
'E5' : 659,
'F5' : 698, 'FS5' : 740,
'G5' : 784, 'GS5' : 831,
'A5' : 880, 'AS5' : 932,
'BB5' : 932,
'B5' : 988,
'C6' : 1047, 'CS6' : 1109,
'D6' : 1175, 'DS6' : 1245,
'EB6' : 1245,
'E6' : 1319,
'F6' : 1397, 'FS6' : 1480,
'G6' : 1568, 'GS6' : 1661,
'A6' : 1760, 'AS6' : 1865,
'BB6' : 1865,
'B6' : 1976,
'C7' : 2093, 'CS7' : 2217,
'D7' : 2349, 'DS7' : 2489,
'EB7' : 2489,
'E7' : 2637,
'F7' : 2794, 'FS7' : 2960,
'G7' : 3136, 'GS7' : 3322,
'A7' : 3520, 'AS7' : 3729,
'BB7' : 3729,
'B7' : 3951,
'C8' : 4186, 'CS8' : 4435,
'D8' : 4699, 'DS8' : 4978
}
anmeldung = [
notes['F4'],notes['A4'],notes['G4'],notes['C4'],
]
anmeldung_tempo = [
0.25,0.25,0.25,0.35,
]
def buzz(frequency, length): #create the function "buzz" and feed it the pitch and duration)
if(frequency==0):
time.sleep(length)
return
period = 1.0 / frequency #in physics, the period (sec/cyc) is the inverse of the frequency (cyc/sec)
delayValue = period / 2 #calcuate the time for half of the wave
numCycles = int(length * frequency) #the number of waves to produce is the duration times the frequency
for i in range(numCycles): #start a loop from 0 to the variable "cycles" calculated above
GPIO.output(buzzer_pin, True) #set pin 27 to high
time.sleep(delayValue) #wait with pin 27 high
GPIO.output(buzzer_pin, False) #set pin 27 to low
time.sleep(delayValue) #wait with pin 27 low
def setup():
GPIO.setmode(GPIO.BCM)
GPIO.setup(buzzer_pin, GPIO.IN)
GPIO.setup(buzzer_pin, GPIO.OUT)
def destroy():
GPIO.cleanup() # Release resource
def play(melody,tempo,pause,pace=0.800):
for i in range(0, len(melody)): # Play song
noteDuration = tempo[i]*pace
buzz(melody[i],noteDuration) # Change the frequency along the song note
pauseBetweenNotes = noteDuration * pause
time.sleep(pauseBetweenNotes)
if __name__ == '__main__': # Program start from here
try:
setup()
print "Anmeldung"
play(anmeldung, anmeldung_tempo, 0.50, 1.5000)
time.sleep(2)
destroy()
except KeyboardInterrupt: # When 'Ctrl+C' is pressed, the child program destroy() will be executed.
destroy()
| lesscomplex/HomeSec | lock/buzz_anm.py | Python | agpl-3.0 | 2,986 |
"""
Counting sort is applicable when each input is known to belong to a particular
set, S, of possibilities. The algorithm runs in O(|S| + n) time and O(|S|)
memory where n is the length of the input. It works by creating an integer array
of size |S| and using the ith bin to count the occurrences of the ith member of
S in the input. Each input is then counted by incrementing the value of its
corresponding bin. Afterward, the counting array is looped through to arrange all
of the inputs in order. This sorting algorithm often cannot be used because S
needs to be reasonably small for the algorithm to be efficient, but it is extremely
fast and demonstrates great asymptotic behavior as n increases. It also can be
modified to provide stable behavior.
Best Case: O(n + k)
Average Case: O(n + k)
Worst Case: O(n + k)
Space Complexity: O(n + k)
"""
class CountingSort(object):
"""
Implementation notes:
1] Since the values range from 0 to k, create k+1 buckets.
2] To fill the buckets, iterate through the input list and
each time a value appears, increment the counter in its
bucket.
3] Now fill the input list with the compressed data in the
buckets. Each bucket's key represents a value in the
array. So for each bucket, from smallest key to largest,
add the index of the bucket to the input array and
decrease the counter in said bucket by one; until the
counter is zero.
"""
def sort(array):
maximum = max(array)
minimum = min(array)
count_array = [0] * (maximum - minimum + 1)
for val in array:
count_array[val - minimum] += 1
sorted_array = []
for i in range(minimum, maximum + 1):
if count_array[i - minimum] > 0:
for j in range(0, count_array[i - minimum]):
sorted_array.append(i)
return sorted_array
| rahulnadella/Sort_Algorithms | countingsort/countingsort.py | Python | bsd-2-clause | 1,805 |
__author__ = 'Jan Brennenstuhl'
__version__ = '0.1'
| jbspeakr/letterman.py | letterman/__init__.py | Python | mit | 52 |
import os
SECUREDROP_TARGET_PLATFORM = os.environ.get("SECUREDROP_TARGET_PLATFORM")
testinfra_hosts = [
"docker://{}-sd-sec-update".format(SECUREDROP_TARGET_PLATFORM)
]
def test_ensure_no_updates_avail(host):
"""
Test to make sure that there are no security-updates in the
base builder container.
"""
# Filter out all the security repos to their own file
# without this change all the package updates appeared as if they were
# coming from normal ubuntu update channel (since they get posted to both)
host.run('egrep "^deb.*security" /etc/apt/sources.list > /tmp/sec.list')
dist_upgrade_simulate = host.run('apt-get -s dist-upgrade '
'-oDir::Etc::Sourcelist=/tmp/sec.list '
'|grep "^Inst" |grep -i security')
# If the grep was successful that means security package updates found
# otherwise we get a non-zero exit code so no updates needed.
assert dist_upgrade_simulate.rc != 0
| ehartsuyker/securedrop | molecule/builder-xenial/tests/test_security_updates.py | Python | agpl-3.0 | 1,022 |
#!/usr/bin/python
#===============================================================================
#
# conversion script to create a mbstestlib readable file containing test specifications
# out of an testset file in XML format
#
#===============================================================================
# Input can be given via optional command line parameters.
#
#
# TODO: add check for joint count
# TODO: add model description to output (as comment)
import sys # for io
import xml.dom.minidom # for xml parsing
from glob import glob # for expanding wildcards in cmd line arguements
class _config:
default_input_file = 'testset-example.xml'
output_file_ext = '.txt'
empty_vecn = ""
zero_vec = "0 0 0"
unity_mat = "1 0 0 0 1 0 0 0 1"
case_defaults = { 'delta': "0.001",
'base_r': zero_vec,
'base_R': unity_mat,
'base_v': zero_vec,
'base_omega': zero_vec,
'base_vdot': zero_vec,
'base_omegadot': zero_vec,
'gravitiy': zero_vec,
'joints_q': empty_vecn,
'joints_qdot': empty_vecn,
'joints_qdotdot': empty_vecn,
'joints_tau': empty_vecn,
'tcp_r': zero_vec,
'tcp_R': unity_mat,
'tcp_v': zero_vec,
'tcp_omega': zero_vec,
'tcp_vdot': zero_vec,
'tcp_omegadot': zero_vec,
'f_ext': zero_vec,
'n_ext': zero_vec
}
case_output_order = [
'delta',
'base_r',
'base_R',
'base_v',
'base_omega',
'base_vdot',
'base_omegadot',
'gravitiy',
'joints_q',
'joints_qdot',
'joints_qdotdot',
'joints_tau',
'tcp_r',
'tcp_R',
'tcp_v',
'tcp_omega',
'tcp_vdot',
'tcp_omegadot',
'f_ext',
'n_ext'
]
class _state:
error_occured_while_processing_xml = False
input_file = ''
def getText(nodelist):
# str(method.childNodes[0].nodeValue) # TODO: remove
rc = []
for node in nodelist:
if node.nodeType == node.TEXT_NODE:
rc.append(node.data)
return ''.join(rc)
# inspired by http://code.activestate.com/recipes/52306-to-sort-a-dictionary/
def sortedDict(adict):
return [ adict[k] for k in sorted(adict.keys()) ]
# parses a specific node and either stores it's value in a dict or the default value
# may set the error bit
def parse_opt(nodename, valuetype, current_case, current_case_value_dict):
# if the node does not exist use the default value
nodelist = current_case.getElementsByTagName(nodename)
if nodelist.length == 0:
current_case_value_dict.update({nodename : _config.case_defaults.get(nodename)})
elif nodelist.length > 1:
_state.error_occured_while_processing_xml = True
print("'" + nodename + "' defined more than once.")
return
else:
# we have one single node to parse
node = nodelist[0]
value = node.getAttribute(valuetype)
if value == None:
# TODO: more advanced checks with regexp
_state.error_occured_while_processing_xml = True
print("'" + nodename + "' has an empty value or wrong type ('"+ valuetype +"').")
return
else :
current_case_value_dict.update({nodename : value})
return
def convert_xml_testset_2_raw_testset(mbs_test_set):
raw_testsets = dict([]) # filename:content dict
for mbs in mbs_test_set.getElementsByTagName('mbs'): # for every file
file = mbs.getAttribute('file')
raw_testset = []
if mbs.getElementsByTagName('model').length != 1:
_state.error_occured_while_processing_xml = True
print("Only one model allowed per file!")
return dict([])
# extract model
raw_testset.append("% " + mbs.getElementsByTagName('model')[0].getAttribute('desc'))
raw_testset.append(getText(mbs.getElementsByTagName('model')[0].childNodes))
# insert separation marker
raw_testset.append("\nendmodel")
# now process the cases
if mbs.getElementsByTagName('case').length == 0:
_state.error_occured_while_processing_xml = True
print("No cases defined!")
return dict([])
cases = dict([])
for case in mbs.getElementsByTagName('case'):
# TODO: sanity check -> number collisions
# parse case
case_nr = case.getAttribute('nr')
case_desc = case.getAttribute('desc')
case_value_dict = dict([])
# everything but joints does not have to be defined explicitly
# TODO: unify these calls in a generic way (e.g. add type to case_output_order and iterate over it)
parse_opt('delta', 'scalar', case, case_value_dict)
parse_opt('base_r', 'vector3', case, case_value_dict)
parse_opt('base_R', 'matrix3x3', case, case_value_dict)
parse_opt('base_v', 'vector3', case, case_value_dict)
parse_opt('base_omega', 'vector3', case, case_value_dict)
parse_opt('base_vdot', 'vector3', case, case_value_dict)
parse_opt('base_omegadot', 'vector3', case, case_value_dict)
parse_opt('gravitiy', 'vector3', case, case_value_dict)
# TODO: checks with n (the number of joints)
parse_opt('joints_q', 'vector_n', case, case_value_dict)
parse_opt('joints_qdot', 'vector_n', case, case_value_dict)
parse_opt('joints_qdotdot', 'vector_n', case, case_value_dict)
parse_opt('joints_tau', 'vector_n', case, case_value_dict)
parse_opt('tcp_r', 'vector3', case, case_value_dict)
parse_opt('tcp_R', 'matrix3x3', case, case_value_dict)
parse_opt('tcp_v', 'vector3', case, case_value_dict)
parse_opt('tcp_omega', 'vector3', case, case_value_dict)
parse_opt('tcp_vdot', 'vector3', case, case_value_dict)
parse_opt('tcp_omegadot', 'vector3', case, case_value_dict)
parse_opt('f_ext', 'vector3', case, case_value_dict)
parse_opt('n_ext', 'vector3', case, case_value_dict)
if _state.error_occured_while_processing_xml: return dict([])
# compile raw case output
case_content = ["\n" + case_desc]
for value_name in _config.case_output_order:
if case_value_dict.get(value_name) is None :
_state.error_occured_while_processing_xml = True
print("Not all values defined in one testcase!")
return dict([])
case_content.append(case_value_dict.get(value_name))
cases.update({case_nr : "\n".join(case_content)})
# flatten cases (and sort)
raw_testset.append("\n".join(sortedDict(cases)))
# update file:testset dict
raw_testsets.update({file : "\n".join(raw_testset)})
# return the dict of files:testsets
return raw_testsets
#===============================================================================
# process command line arguments (i.e. file i/o)
#===============================================================================
script_name = sys.argv[0][sys.argv[0].rfind("\\")+1:]
if len(sys.argv) == 1:
_state.input_file = _config.default_input_file
print("No command line arguments were given. Defaulting to:")
print("Input '" + _state.input_file + "'")
print("Usage hint: " + script_name + " [INPUTFILE(s)]\n")
elif len(sys.argv) == 2:
if sys.argv[1] == "--help":
print("Usage: " + script_name + " [INPUTFILE(s)]")
sys.exit()
else:
_state.input_file = glob(sys.argv[1])
#===============================================================================
# run the conversion
#===============================================================================
for inputfile in _state.input_file :
xmldom = xml.dom.minidom.parse(inputfile)
raw_testsets = convert_xml_testset_2_raw_testset(xmldom.firstChild)
if not _state.error_occured_while_processing_xml :
for k in raw_testsets.keys():
with open(k, 'w') as raw_testset_file:
raw_testset_file.write(raw_testsets.get(k))
print("File '" + k + "' written.")
#===============================================================================
# concluding housekeeping
#===============================================================================
if not _state.error_occured_while_processing_xml:
print("Conversion successful.")
else:
print("The xml file could not be processed properly. It most likely contains errors.")
sys.exit(_state.error_occured_while_processing_xml)
| SIM-TU-Darmstadt/mbslib | dependencies/mbstestlib/src/testsetXML2intermediateConverter.py | Python | lgpl-3.0 | 9,479 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright 2010-2012 Infracom & Eurotechnia ([email protected])
# This file is part of the Webcampak project.
# Webcampak is free software: you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License,
# or (at your option) any later version.
# Webcampak is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY;
# without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU General Public License for more details.
# You should have received a copy of the GNU General Public License along with Webcampak.
# If not, see http://www.gnu.org/licenses/
from __future__ import print_function
"""Webcampak Core Tool
Command Line tool to manage and synchronise all Webcampak core functions
Usage: python webcampak.py [options] [source]
Options:
-t ..., --type= ... Main action: capture, video, videocustom, graph
-s, --source= Source to be used
-g /home/user/webcampak/etc/config-general.cfg, --global=... Global configuration file (optional), will create it if it does not exist.
-c /home/user/webcampak/etc/config-source....cfg, --config=... Source configuration file (optional), will create it if it does not exist.
-v /home/user/webcampak/etc/config-source....cfg, --config=... Source video (daily or custom) configuration file (optional), will create it if it does not exist.
-m, --motion Used with motion detection, only capture when there is something moving
-h, --help Show this help
Examples:
webcampak.py -t capture -s 10 Capture using source 10 and default configuration files
webcampak.py --type=capture --source=10 Capture using source 10 and default configuration files
webcampak.py --type=video --source=10 Create daily video of source 10
webcampak.py --type=videocustom --source=10 Create custom video for source 10
This program is part of Webcampk tools. For more details please visit
http://www.webcampak.com
"""
__author__ = "Eurotechnia & Infracom"
__version__ = "$Revision: 0.9 $"
__copyright__ = "Copyright (c) 2011 Eurotechnia & Infracom"
__license__ = "GPLv3"
import os, sys, smtplib, datetime, tempfile, subprocess, datetime, shutil, time, ftplib
import getopt
import time
import smtplib
import zipfile
import socket
import urllib
import pwd
import locale
import gettext
from wpakDebug import Debug
from wpakFileManager import FileManager
from wpakConfig import Config
from wpakCapture import Capture
from wpakRRDGraph import RRDGraph
from wpakErrorManagement import ErrorManagement
from wpakEmailClass import EmailClass
from wpakVideo import Video
from dateutil import tz
from time import sleep
########################################################################
########################################################################
########################################################################
def usage():
print(__doc__)
def main(argv):
try:
opts, args = getopt.getopt(argv, "ht:s:g:c:m:v", ["help", "type=", "source=", "global=", "config=", "motion", "video="])
except getopt.GetoptError:
usage()
sys.exit(2)
global g
global c
global cfgcurrentsource
global cfgnowsource
global CmdMotion
global CmdSource
global CmdGlobalConfig
global CmdType
global CmdVideoConfig
global Debug
global FileManager
global Capture
global RRDGraph
global ErrorManagement
global EmailClass
global Video
CmdSource = ""
CmdGlobalConfig = ""
CmdSourceConfig = ""
CmdVideoConfig = ""
CmdType = ""
CmdMotion = ""
# Get command line parameters
for opt, arg in opts:
if opt in ("-h", "--help"):
usage()
sys.exit()
elif opt in ("-t", "--type"):
if arg == "video":
CmdType = arg
elif arg == "videocustom":
CmdType = arg
elif arg == "videopost":
CmdType = arg
elif arg == "capture":
CmdType = arg
elif arg == "capturesample":
CmdType = arg
elif arg == "rrdgraph":
CmdType = arg
else:
print("Error: python webcampak.py -t %(arg)s" % {'arg': arg})
print("Error: Unknown parameter")
usage()
sys.exit()
elif opt in ("-s", "--source"):
CmdSource = arg
elif opt in ("-g", "--global"):
CmdGlobalConfig = arg
elif opt in ("-c", "--config"):
CmdSourceConfig = arg
elif opt in ("-m", "--motion"):
CmdMotion = "motion"
elif opt in ("-v", "--video"):
CmdVideoConfig = arg
if CmdSource != "":
cfgcurrentsource = str(CmdSource)
# Global config or "g" is a set of configuration parameters shared between all sources
if CmdGlobalConfig != "" and os.path.isfile(CmdGlobalConfig):
g = Config(CmdGlobalConfig)
elif os.path.isfile("/home/" + pwd.getpwuid(os.getuid())[0] + "/webcampak/etc/config-general.cfg"):
g = Config("/home/" + pwd.getpwuid(os.getuid())[0] + "/webcampak/etc/config-general.cfg")
else:
if os.path.isfile("/home/" + pwd.getpwuid(os.getuid())[0] + "/webcampak/init/etc/config-general.cfg"):
FileManager.CheckDir("/home/" + pwd.getpwuid(os.getuid())[0] + "/webcampak/etc/config-general.cfg")
shutil.copy("/home/" + pwd.getpwuid(os.getuid())[0] + "/webcampak/init/etc/config-general.cfg", "/home/" + pwd.getpwuid(os.getuid())[0] + "/webcampak/etc/config-general.cfg")
g = Config("/home/" + pwd.getpwuid(os.getuid())[0] + "/webcampak/etc/config-general.cfg")
else:
print("Error: Unable to identify source configuration file")
usage()
sys.exit()
# Loading gettext translation functions
try:
languages = []
languages.append(g.getConfig('cfgsystemlang'))
t = gettext.translation(g.getConfig('cfggettextdomain'), g.getConfig('cfgbasedir') + g.getConfig('cfglocaledir'), languages, fallback=True)
_ = t.ugettext
t.install()
except:
print("No translation file available for your language")
#languages = []
#languages.append("en_US")
#t = gettext.translation(g.getConfig('cfggettextdomain'), g.getConfig('cfgbasedir') + g.getConfig('cfglocaledir'), languages)
#_ = t.ugettext
#t.install()
# Source config or "c" is a set of configuration parameters specific to one single source
if CmdSourceConfig != "" and os.path.isfile(CmdSourceConfig):
c = Config(CmdSourceConfig)
elif os.path.isfile("/home/" + pwd.getpwuid(os.getuid())[0] + "/webcampak/etc/config-source" + cfgcurrentsource + ".cfg"):
c = Config("/home/" + pwd.getpwuid(os.getuid())[0] + "/webcampak/etc/config-source" + cfgcurrentsource + ".cfg")
else:
if os.path.isfile("/home/" + pwd.getpwuid(os.getuid())[0] + "/webcampak/init/etc/config-source.cfg"):
FileManager.CheckDir("/home/" + pwd.getpwuid(os.getuid())[0] + "/webcampak/etc/config-source.cfg")
shutil.copy("/home/" + pwd.getpwuid(os.getuid())[0] + "/webcampak/init/etc/config-source.cfg", "/home/" + pwd.getpwuid(os.getuid())[0] + "/webcampak/etc/config-source" + cfgcurrentsource + ".cfg")
c = Config("/home/" + pwd.getpwuid(os.getuid())[0] + "/webcampak/etc/config-source" + cfgcurrentsource + ".cfg")
else:
print("Error: Unable to identify source configuration file")
usage()
sys.exit()
# We load standard classes, necessary within the whole software.
Debug = Debug(c, cfgcurrentsource, g, CmdType)
EmailClass = EmailClass(c, cfgcurrentsource, g, Debug)
ErrorManagement = ErrorManagement(c, cfgcurrentsource, g, Debug, CmdType, EmailClass)
# We capture the current date and time, this value is used through the whole software
# If capture is configured to be delayed there are two option, use script start date or capture date
cfgnowsource = datetime.datetime.utcnow()
if c.getConfig('cfgcapturetimezone') != "": # Update the timezone from UTC to the source's timezone
sourceTimezone = tz.gettz(c.getConfig('cfgcapturetimezone'))
cfgnowsource = cfgnowsource.replace(tzinfo=tz.gettz('UTC'))
cfgnowsource = cfgnowsource.astimezone(sourceTimezone)
if c.getConfig('cfgcapturedelay') != "0" and CmdType == "capture":
Debug.Display(_("Delaying the capture by %(CaptureDelay)s seconds.") % {'CaptureDelay': str(c.getConfig('cfgcapturedelay'))} )
time.sleep(int(c.getConfig('cfgcapturedelay')))
if c.getConfig('cfgcapturedelaydate') != "script":
cfgnowsource = datetime.datetime.utcnow()
if c.getConfig('cfgcapturetimezone') != "": # Update the timezone from UTC to the source's timezone
sourceTimezone = tz.gettz(c.getConfig('cfgcapturetimezone'))
cfgnowsource = cfgnowsource.replace(tzinfo=tz.gettz('UTC'))
cfgnowsource = cfgnowsource.astimezone(sourceTimezone)
sourceLiveDirectory = g.getConfig('cfgbasedir') + g.getConfig('cfgsourcesdir') + "source" + c.getConfig('cfgsourcewpakgetsourceid') + "/live/"
if c.getConfig('cfgsourcetype') == "wpak" and c.getConfig('cfgsourcewpaktype') == "get" and os.path.isfile(sourceLiveDirectory + "last-capture.txt"):
Debug.Display(_("Using last-capture.txt from source %(cfgsourcewpakgetsourceid)s as a date") % {'cfgsourcewpakgetsourceid': str(c.getConfig('cfgsourcewpakgetsourceid'))})
captureLastFile = Config(sourceLiveDirectory + "last-capture.txt")
f = captureLastFile.getStat('LastCapture')
cfgnowsource = datetime.datetime(*time.strptime(f[0] + f[1] + f[2] + f[3] + "/" + f[4] + f[5] + "/" + f[6] + f[7] + "/" + f[8] + f[9] + "/" + f[10] + f[11] + "/" + f[12] + f[13], "%Y/%m/%d/%H/%M/%S")[0:6])
sourceTimezone = tz.gettz(c.getConfig('cfgcapturetimezone'))
Debug.Display(_("Using new date set to: %(cfgnowsource)s") % {'cfgnowsource': str(cfgnowsource)})
FileManager = FileManager(c, cfgcurrentsource, g, Debug, cfgnowsource, CmdType, ErrorManagement)
# Two main classes are available:
# - "Video" to generate daily or customs videos
# - "Capture" to capture pictures from a source
if CmdType == "video":
# We start the process three times in a row to catch up missed creations
Video = Video(c, cfgcurrentsource, g, Debug, cfgnowsource, CmdType, FileManager, ErrorManagement, EmailClass, CmdVideoConfig)
Video.Main()
Video.Main()
Video.Main()
elif CmdType == "videocustom":
Video = Video(c, cfgcurrentsource, g, Debug, cfgnowsource, CmdType, FileManager, ErrorManagement, EmailClass, CmdVideoConfig)
Video.Main()
elif CmdType == "videopost":
Video = Video(c, cfgcurrentsource, g, Debug, cfgnowsource, CmdType, FileManager, ErrorManagement, EmailClass, CmdVideoConfig)
Video.Main()
elif CmdType == "capture" or CmdType == "capturesample":
Capture = Capture(c, cfgcurrentsource, g, Debug, cfgnowsource, CmdType, FileManager, ErrorManagement, EmailClass, CmdMotion)
Capture.Main() # Start capture process
elif CmdType == "rrdgraph":
RRDGraph = RRDGraph(c, cfgcurrentsource, g, Debug, cfgnowsource, CmdType, FileManager, ErrorManagement, EmailClass)
RRDGraph.Main() # Start RRD Graph process
else:
print("Error: Missing source number")
usage()
sys.exit()
if __name__ == "__main__":
main(sys.argv[1:])
| Webcampak/v2.0 | src/bin/webcampak.py | Python | gpl-3.0 | 11,254 |
from django.contrib import admin
from .models import BlogPost
# Register your models here.
admin.site.register(BlogPost)
| KevinBacas/FuzzeyPot | FuzzeyPot/Blog/admin.py | Python | mit | 122 |
import sys
import os.path
import urllib2
import re
from pyquery import PyQuery as pq
import common
def getId(url):
arr = url.split("/")
id = arr[len(arr) - 2]
return id
def getSiteUrl(urlRequest, monitor, rcbUrl):
result = ""
print("REQUEST: {0}".format(urlRequest))
try:
req = urllib2.urlopen(urlRequest, timeout=30)
url = req.geturl()
arr = url.split("/?")
arr1 = arr[0].split("//")
result = arr1[1].replace("www.", "")
result = result.split("/")[0]
except :
print("========== ERROR ===========")
#common.insertUnknowSite(rcbUrl, monitor)
return result
def getRcb(monitor):
print("hyip_stop.getRcb()")
rcb_url = "http://{0}/new".format(monitor)
d = pq(url=rcb_url)
list = d("a.joinnw")
siteList = []
for item in list:
obj = {}
obj['id'] = getId(item.get("href"))
if common.getSiteMonitorByRefSiteId(monitor, obj['id']) == None:
obj['siteRCBUrl'] = "http://{0}/details/aj/rcb/lid/{1}/".format(monitor, obj['id'])
obj['url'] = getSiteUrl(item.get("href"), monitor, obj['siteRCBUrl'])
obj['siteId'] = ""
if obj['url'] != '':
siteId = common.insertSite(obj)
obj['siteId'] = siteId
siteList.append(obj)
print("{0} - {1} - {2}".format(obj['id'], obj['url'], obj['siteId']))
for item in siteList:
common.insertSiteMonitor(item, monitor)
def checkPaid(siteUrl):
d = pq(url=siteUrl)
tables = d("#content2 table.listbody tr td:nth-child(6) center")
result = False
#print(tables)
for item in tables:
if re.search('paid', item.text_content(), re.IGNORECASE):
result = True
return result
def checkRcb(monitor):
siteMonitors = common.getSiteMonitor(monitor)
for item in siteMonitors:
print(item)
if item[2] == 0:
if checkPaid(item[1]):
common.setPaid(item[0])
def run():
MONITOR = "hyipstop.com"
getRcb(MONITOR)
#checkRcb(MONITOR)
| vietdh85/vh-utility | script/hyip_stop.py | Python | gpl-3.0 | 1,859 |
# coding=utf-8
"""html to jinja2首席挖洞官
将本目录/子目录下的html文件中使用到的 *静态文件* 的URL改为使用jinja2 + flask url_for渲染
href="css/base.css" -> href="{{ url_for("static", filename="css/base.css") }}"
挖洞前会在文件同目录做一个.bak后缀名的备份文件。
Usage:
$ cd my_project
$ python2 translate.py
"""
from __future__ import print_function
import re
import os
import shutil
types = ['css', 'html']
# href="css/base.css"
# src="img/a39-1.png"
def bak(filename):
"""备份
数据无价 谨慎操作
:type filename: str 文件名
"""
if os.path.exists(filename + ".bak"):
return # 如果已有备份文件 则不再重复生成备份文件
if os.path.isfile(filename):
shutil.copy(filename, filename + ".bak")
def rollback():
"""回滚
暂时用不到 先不写了
"""
pass
def translate(filename):
with open(filename, 'r+') as f:
replaced = re.sub(r'(href|src)="(css|img|font|js)/(.*?)"',
r'\g<1>="{{ url_for("static", filename="\g<2>/\g<3>") }}"', f.read())
f.seek(0)
f.write(replaced)
if __name__ == '__main__':
for paths, subs, files in os.walk(os.getcwd()):
# 遍历本路径下文件
for filename in files:
if filename.split('.')[-1] not in types:
# 后缀名不在翻译后缀名列表中的,不进行翻译
continue
fullname = os.path.join(paths, filename)
print("translating " + fullname)
bak(fullname)
translate(fullname)
| bllli/tsxyAssistant | app/templates/translate.py | Python | gpl-3.0 | 1,644 |
#!/usr/bin/env python3
# Copyright (c) 2015-2016 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
from test_framework.test_framework import ComparisonTestFramework
from test_framework.util import *
from test_framework.comptool import TestManager, TestInstance, RejectResult
from test_framework.blocktools import *
import copy
'''
In this test we connect to one node over p2p, and test block requests:
1) Valid blocks should be requested and become chain tip.
2) Invalid block with duplicated transaction should be re-requested.
3) Invalid block with bad coinbase value should be rejected and not
re-requested.
'''
# Use the ComparisonTestFramework with 1 node: only use --testbinary.
class InvalidBlockRequestTest(ComparisonTestFramework):
''' Can either run this test as 1 node with expected answers, or two and compare them.
Change the "outcome" variable from each TestInstance object to only do the comparison. '''
def __init__(self):
super().__init__()
self.num_nodes = 1
def run_test(self):
test = TestManager(self, self.options.tmpdir)
test.add_all_connections(self.nodes)
self.tip = None
self.block_time = None
NetworkThread().start() # Start up network handling in another thread
sync_masternodes(self.nodes)
test.run()
def get_tests(self):
if self.tip is None:
self.tip = int("0x" + self.nodes[0].getbestblockhash(), 0)
self.block_time = get_mocktime() + 1
'''
Create a new block with an anyone-can-spend coinbase
'''
height = 1
block = create_block(self.tip, create_coinbase(height), self.block_time)
self.block_time += 1
block.solve()
# Save the coinbase for later
self.block1 = block
self.tip = block.sha256
height += 1
yield TestInstance([[block, True]])
'''
Now we need that block to mature so we can spend the coinbase.
'''
test = TestInstance(sync_every_block=False)
for i in range(100):
block = create_block(self.tip, create_coinbase(height), self.block_time)
block.solve()
self.tip = block.sha256
self.block_time += 1
test.blocks_and_transactions.append([block, True])
height += 1
yield test
'''
Now we use merkle-root malleability to generate an invalid block with
same blockheader.
Manufacture a block with 3 transactions (coinbase, spend of prior
coinbase, spend of that spend). Duplicate the 3rd transaction to
leave merkle root and blockheader unchanged but invalidate the block.
'''
block2 = create_block(self.tip, create_coinbase(height), self.block_time)
self.block_time += 1
# b'0x51' is OP_TRUE
tx1 = create_transaction(self.block1.vtx[0], 0, b'\x51', 50 * COIN)
tx2 = create_transaction(tx1, 0, b'\x51', 50 * COIN)
block2.vtx.extend([tx1, tx2])
block2.hashMerkleRoot = block2.calc_merkle_root()
block2.rehash()
block2.solve()
orig_hash = block2.sha256
block2_orig = copy.deepcopy(block2)
# Mutate block 2
block2.vtx.append(tx2)
assert_equal(block2.hashMerkleRoot, block2.calc_merkle_root())
assert_equal(orig_hash, block2.rehash())
assert(block2_orig.vtx != block2.vtx)
self.tip = block2.sha256
yield TestInstance([[block2, RejectResult(16, b'bad-txns-duplicate')], [block2_orig, True]])
height += 1
'''
Make sure that a totally screwed up block is not valid.
'''
block3 = create_block(self.tip, create_coinbase(height), self.block_time)
self.block_time += 1
block3.vtx[0].vout[0].nValue = 1000 * COIN # Too high!
block3.vtx[0].sha256=None
block3.vtx[0].calc_sha256()
block3.hashMerkleRoot = block3.calc_merkle_root()
block3.rehash()
block3.solve()
yield TestInstance([[block3, RejectResult(16, b'bad-cb-amount')]])
if __name__ == '__main__':
InvalidBlockRequestTest().main()
| crowning-/dash | qa/rpc-tests/invalidblockrequest.py | Python | mit | 4,290 |
def ci():
return "ci"
| hallemaen/ci | ci/app.py | Python | mit | 28 |
# -*-coding:Utf-8 -*
# Copyright (c) 2010-2017 LE GOFF Vincent
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
# OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""Package contenant la commande 'bannir' et ses sous-commandes.
Dans ce fichier se trouve la commande même.
"""
from primaires.interpreteur.commande.commande import Commande
from .bloquer import PrmBloquer
from .joueur import PrmJoueur
from .liste import PrmListe
class CmdBannir(Commande):
"""Commande 'bannir'.
"""
def __init__(self):
"""Constructeur de la commande"""
Commande.__init__(self, "bannir", "ban")
self.groupe = "administrateur"
self.aide_courte = "gère le bannissement"
self.aide_longue = \
"Cette commande permet de gérer les bannissements, " \
"temporaires ou prolongés, de joueurs, comptes ou " \
"adresses."
def ajouter_parametres(self):
"""Ajout des paramètres"""
self.ajouter_parametre(PrmBloquer())
self.ajouter_parametre(PrmJoueur())
self.ajouter_parametre(PrmListe())
| vlegoff/tsunami | src/primaires/joueur/commandes/bannir/__init__.py | Python | bsd-3-clause | 2,516 |
PROJECT_NAME = "sen"
LOG_FILE_NAME = "sen.debug.log"
FALLBACK_LOG_PATH = "/tmp/sen.debug.log"
ISO_DATETIME_PARSE_STRING = "%Y-%m-%dT%H:%M:%S.%f"
| TomasTomecek/sen | sen/constants.py | Python | mit | 146 |
# coding: utf-8
from __future__ import (absolute_import, division, print_function, unicode_literals)
import json
import logging
import boto3
import os
import sys
import time
# Path to modules needed to package local lambda function for upload
currentdir = os.path.dirname(os.path.realpath(__file__))
sys.path.append(os.path.join(currentdir, "./vendored"))
# Modules downloaded into the vendored directory
# Logging for Serverless
log = logging.getLogger()
log.setLevel(logging.DEBUG)
# Initializing AWS services
dynamodb = boto3.resource('dynamodb')
sts = boto3.client('sts')
def handler(event, context):
log.debug("Received event {}".format(json.dumps(event)))
taskStatus = dynamodb.Table(os.environ['TAILOR_TABLENAME_TASKSTATUS'])
accountInfo = dynamodb.Table(os.environ['TAILOR_TABLENAME_ACCOUNTINFO'])
cbInfo = dynamodb.Table(os.environ['TAILOR_TABLENAME_CBINFO'])
incomingMessage = json.loads(event['Records'][0]['Sns']['Message'])
accountEmailAddress = incomingMessage['lambda']['accountEmailAddress']
getAccountInfo = accountInfo.get_item(
Key={
'accountEmailAddress': accountEmailAddress
}
)
laAccountId = getAccountInfo['Item']['accountId']
requestId = getAccountInfo['Item']['requestId']
accountCbAlias = getAccountInfo['Item']['accountCbAlias']
accountTagEnvironment = getAccountInfo['Item']['accountTagEnvironment']
# Update task start status
updateStatus = taskStatus.put_item(
Item={
"requestId": requestId,
"eventTimestamp": str(time.time()),
"period": "start",
"taskName": "ENTSUPPORT",
"function": "talr-entsupport",
"message": incomingMessage
}
)
getCbInfo = cbInfo.get_item(
Key={
'accountCbAlias': accountCbAlias
}
)
accountCompanyName = getCbInfo['Item']['accountCompanyName']
accountCbId = getCbInfo['Item']['accountCbId']
accountSupportTeamEmail = getCbInfo['Item']['accountSupportTeamEmail']
if accountTagEnvironment != 'tst':
# Payer account credentials
payerAssumeRole = sts.assume_role(
RoleArn="arn:aws:iam::" + accountCbId + ":role/tailor",
RoleSessionName="talrEntsupportPayerAssumeRole"
)
payerCredentials = payerAssumeRole['Credentials']
payer_aws_access_key_id = payerCredentials['AccessKeyId']
payer_aws_secret_access_key = payerCredentials['SecretAccessKey']
payer_aws_session_token = payerCredentials['SessionToken']
# Linked account credentials
paSupport = boto3.client(
'support',
aws_access_key_id=payer_aws_access_key_id,
aws_secret_access_key=payer_aws_secret_access_key,
aws_session_token=payer_aws_session_token,
)
# Create case in Payer Account requested Enterprise Support on Linked Account
createCase = paSupport.create_case(
subject='Enable Enterprise Support',
serviceCode='account-management',
severityCode='normal',
categoryCode='billing',
communicationBody='Please enable Enterprise Support on Linked Account: ' + laAccountId + '.',
ccEmailAddresses=[
accountSupportTeamEmail,
],
language='en',
issueType='customer-service'
)
print(createCase)
# Update task end status
updateStatus = taskStatus.put_item(
Item={
"requestId": requestId,
"eventTimestamp": str(time.time()),
"period": "end",
"taskName": "ENTSUPPORT",
"function": "talr-entsupport",
"message": incomingMessage
}
)
else:
print("No Enterprise Support enablement requested for", laAccountId)
# Update task end status
updateStatus = taskStatus.put_item(
Item={
"requestId": requestId,
"eventTimestamp": str(time.time()),
"period": "end",
"taskName": "ENTSUPPORT",
"function": "talr-entsupport",
"message": incomingMessage
}
)
return
| alanwill/aws-tailor | sam/functions/talr-entsupport/handler.py | Python | gpl-3.0 | 4,302 |
# Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class RGit2r(RPackage):
"""Interface to the 'libgit2' library, which is a pure C implementation of
the 'Git' core methods. Provides access to 'Git' repositories to extract
data and running some basic 'Git' commands."""
homepage = "https://github.com/ropensci/git2r"
url = "https://cloud.r-project.org/src/contrib/git2r_0.18.0.tar.gz"
list_url = "https://cloud.r-project.org/src/contrib/Archive/git2r"
version('0.27.1', sha256='099207f180aa45ddcc443cbb22487eafd14e1cd8e5979b3476214253fd773bc0')
version('0.26.1', sha256='13d609286a0af4ef75ba76f2c2f856593603b8014e311b88896243a50b417435')
version('0.26.0', sha256='56671389c3a50591e1dae3be8c3b0112d06d291f897d7fe14db17aea175616cf')
version('0.18.0', sha256='91b32e49afb859c0c4f6f77988343645e9499e5046ef08d945d4d8149b6eff2d')
version('0.15.0', sha256='682ab9e7f71b2ed13a9ef95840df3c6b429eeea070edeb4d21d725cf0b72ede6')
depends_on('[email protected]:', type=('build', 'run'))
depends_on('zlib')
depends_on('openssl')
depends_on('libgit2')
| iulian787/spack | var/spack/repos/builtin/packages/r-git2r/package.py | Python | lgpl-2.1 | 1,261 |
# coding: utf-8
"""
Kubernetes
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version: v1.8.2
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import os
import sys
import unittest
import kubernetes.client
from kubernetes.client.rest import ApiException
from kubernetes.client.models.v1_object_meta import V1ObjectMeta
class TestV1ObjectMeta(unittest.TestCase):
""" V1ObjectMeta unit test stubs """
def setUp(self):
pass
def tearDown(self):
pass
def testV1ObjectMeta(self):
"""
Test V1ObjectMeta
"""
# FIXME: construct object with mandatory attributes with example values
#model = kubernetes.client.models.v1_object_meta.V1ObjectMeta()
pass
if __name__ == '__main__':
unittest.main()
| mbohlool/client-python | kubernetes/test/test_v1_object_meta.py | Python | apache-2.0 | 929 |
#!/usr/bin/env python
import os.path
import sys
import gspread
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
if sys.argv[-1] == 'publish':
os.system('python setup.py sdist upload')
sys.exit()
def read(filename):
return open(os.path.join(os.path.dirname(__file__), filename)).read()
description = 'Google Spreadsheets Python API'
long_description = """
{index}
License
-------
MIT
Download
========
"""
long_description = long_description.lstrip("\n").format(index=read('docs/index.txt'))
setup(
name='gspread_old_fork',
packages=['gspread_old_fork'],
description=description,
long_description=long_description,
version=gspread.__version__,
author='Anton Burnashev',
author_email='[email protected]',
url='https://github.com/burnash/gspread',
keywords=['spreadsheets', 'google-spreadsheets'],
classifiers=[
"Programming Language :: Python",
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
"Development Status :: 3 - Alpha",
"Intended Audience :: Developers",
"Intended Audience :: End Users/Desktop",
"Intended Audience :: Science/Research",
"Topic :: Office/Business :: Financial :: Spreadsheet",
"Topic :: Software Development :: Libraries :: Python Modules"
],
license='MIT'
)
| finoptimal/gspread | setup.py | Python | mit | 1,460 |
"""
Google Maps Directions API methods to do things
related with network analysis
"""
from gasp.web import http_to_json
from . import select_api_key
from . import record_api_utilization
# ------------------------------ #
"""
Global Variables
"""
GOOGLE_GEOCODING_URL = 'https://maps.googleapis.com/maps/api/directions/json?'
# ------------------------------ #
def point_to_point(latA, lngA, latB, lngB, mode="driving"):
"""
Go from A to B with Google Maps Directions API
DRIVING OPTIONS: driving; walking; bicycling
"""
import polyline
# Get Key to be used
KEY_FID, GOOGLE_API_KEY, NR_REQUESTS = select_api_key()
path = http_to_json((
'{url}origin={lat},{lng}&'
'destination={__lat},{__lng}&'
'key={k}'
).format(
url=GOOGLE_GEOCODING_URL, lat=str(latA), lng=str(lngA),
__lat=str(latB), __lng=str(lngB), k=GOOGLE_API_KEY
))
# Record api utilization
record_api_utilization(KEY_FID, NR_REQUESTS + 1)
results = path['routes'][0]
results['polyline'] = polyline.decode(
results['overview_polyline']['points']
)
results['general_distance'] = results['legs'][0]['distance']
results['general_duration'] = results['legs'][0]['duration']
del results['overview_polyline']['points']
del results['legs'][0]['distance']
del results['legs'][0]['duration']
return results
def pnt_to_pnt_duration(latA, lngA, latB, lngB, mode="driving"):
"""
Return duration from going from A to B
DRIVING OPTIONS: driving; walking; bicycling
"""
# Get Key to be used
KEY_FID, GOOGLE_API_KEY, NR_REQUESTS = select_api_key()
path = http_to_json((
'{url}origin={lat},{lng}&'
'destination={__lat},{__lng}&'
'key={k}'
).format(
url=GOOGLE_GEOCODING_URL, lat=str(latA), lng=str(lngA),
__lat=str(latB), __lng=str(lngB), k=GOOGLE_API_KEY
))
# Record api utilization
record_api_utilization(KEY_FID, NR_REQUESTS + 1)
# Return result
return path["routes"][0]["legs"][0]["duration"]["value"]
def get_time_pnt_destinations(origin, destinations):
"""
Return the time needed to go from the origin to the nearest destination
origin = Point Geometry
destinations = list of dicts with the following structure
{id: value, x: value, y: value}
"""
for i in range(len(destinations)):
dist_path = point_to_point(
origin.GetY(), origin.GetX(),
destinations[i]['y'], destinations[i]['x']
)
if not i:
__id = destinations[i]['id']
duration = dist_path['general_duration']
distance = dist_path['general_distance']
else:
if dist_path['general_duration']['value'] < duration['value']:
__id = destinations[i]['id']
duration = dist_path['general_duration']
distance = dist_path['general_distance']
return __id, duration, distance
| JoaquimPatriarca/senpy-for-gis | gasp/fromapi/glg/directions.py | Python | gpl-3.0 | 3,090 |
from django.apps import AppConfig
from django.utils.translation import ugettext_lazy as _
from .widget import *
default_app_config = 'leonardo.module.media.MediaConfig'
class Default(object):
optgroup = 'Media'
@property
def apps(self):
return [
'leonardo.module',
'leonardo.module.media',
]
@property
def widgets(self):
return [
DownloadListWidget,
DownloadItemWidget,
InternetVideoWidget,
MediaGalleryWidget,
SimpleImageWidget,
VectorGraphicsWidget,
PdfDocumentWidget,
FlashObjectWidget,
]
plugins = [
('leonardo.module.media.apps.category_nested', 'List of directories'),
('leonardo.module.media.apps.category_simple', 'Simple list of directories'),
]
config = {
'MEDIA_PAGINATE_BY': (25, _('Pagination count for media files')),
'MEDIA_PUBLIC_UPLOAD_TO': ('public', _('Prefix for public files from MEDIA_ROOT')),
'MEDIA_PRIVATE_UPLOAD_TO': ('private', _('Prefix for private files from MEDIA_ROOT')),
'MEDIA_IS_PUBLIC_DEFAULT': (True, _('Set uploaded files to public automatically')),
'MEDIA_ENABLE_PERMISSIONS': (True, _(
'Permissions for downloadable items. Experimental feature.')),
'MEDIA_ALLOW_REGULAR_USERS_TO_ADD_ROOT_FOLDERS': (False, _('ALLOW_REGULAR_USERS_TO_ADD_ROOT_FOLDERS')),
'MEDIA_THUMB_SMALL_GEOM': ('64x64', _('MEDIA_THUMB_SMALL_GEOM')),
'MEDIA_THUMB_SMALL_OPT': ('', _('Another options for small thumnails')),
'MEDIA_THUMB_MEDIUM_GEOM': ('256x256', _('MEDIA_THUMB_MEDIUM_GEOM')),
'MEDIA_THUMB_MEDIUM_OPT': ('', _('Another options for medium thumnails')),
'MEDIA_THUMB_LARGE_GEOM': ('768x768', _('MEDIA_THUMB_LARGE_GEOM')),
'MEDIA_THUMB_LARGE_OPT': ('', _('Another options for large thumnails')),
'MEDIA_LOGICAL_STRUCTURE': (False, _('If is True all folders and files will has same path in the OS')),
}
page_actions = ['media/_actions.html']
class MediaConfig(AppConfig, Default):
name = 'leonardo.module.media'
verbose_name = "Media"
default = Default()
| amboycharlie/Child-Friendly-LCMS | leonardo/module/media/__init__.py | Python | apache-2.0 | 2,222 |
#This is practice code which seeks to recognize digits from the MNIST dataset
#primarily through convolutional neural networks.
#The data set consists of tens of thousands of greyscale images of handwritten digits.
#This was used to create a submission to Kaggle.com, a platform
#for data science competitions. This particular submission (having trained over
#additional epochs) got at the 8th percentile.
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
from scipy.ndimage.filters import gaussian_filter
from sklearn.model_selection import KFold, cross_val_score, train_test_split
from sklearn.preprocessing import LabelEncoder
from sklearn.metrics import accuracy_score
from sklearn.linear_model import SGDClassifier
from keras.preprocessing.image import ImageDataGenerator
from keras.utils import np_utils
from keras.models import Sequential
from keras.layers import Dense, Dropout, Flatten
from keras.layers.convolutional import Convolution2D, MaxPooling2D
from keras.callbacks import ModelCheckpoint, EarlyStopping
#Data is of 28x28 greyscale image. train has 1 more feature 'label' to indicate #. Other features are labeled pixel0, pixel1, pixelX
train = pd.read_csv('C:/Users/Michael Kang/Desktop/Data_Files/Digit Recognizer/Digit_train.csv')
test= pd.read_csv('C:/Users/Michael Kang/Desktop/Data_Files/Digit Recognizer/Digit_test.csv')
X_train_df = train.drop('label', axis=1)
Y_train_df = train['label']
X_test_df = test
X_train_df.shape, Y_train_df.shape, X_test_df.shape
#******************************************************************************************************
#Getting a look at the data
i=50
img=X_train_df.iloc[i].as_matrix()
img=img.reshape((28,28))
img=gaussian_filter(img, sigma=1) #I'm not sure if this helps that much...
plt.imshow(img,cmap='gray')
plt.title(X_train_df.iloc[i,0])
#*****************************************************************************************************
#This is SGD with just unprocessed data. Note that SVM takes way too long to run
sgd = SGDClassifier() #SGD Results: 86.41% +/-1.08%
cv = KFold(n_splits=5,shuffle=True,random_state=42)
results = cross_val_score(sgd, X_train_df, Y_train_df, cv=cv)
print("SGD Results: %.2f%% +/-%.2f%%" % (results.mean()*100, results.std()*100))
#Manually splitting training set into cross-validation sets using train_test_split
X_train_df_val, X_test_df_val, Y_train_df_val, Y_test_df_val = train_test_split(X_train_df, Y_train_df, train_size = 0.75, random_state = 46)
X_train_df_val.shape, X_test_df_val.shape, Y_train_df_val.shape, Y_test_df_val.shape
#checking to see if using accuracy_score and train_test_split will have a significant effect.
sgd = SGDClassifier() #SGD Results: 89.20%
sgd.fit(X_train_df_val, Y_train_df_val) #No really significant difference.
Y_pred = sgd.predict(X_test_df_val)
acc_1 = round(accuracy_score(Y_test_df_val, Y_pred)*100, 2)
#********************************************************************************************************
#SVC and Kneighbors both take too long to fit. Will try simplifying the data by binarizing.
X_train_df_binary = X_train_df
X_test_df_binary = X_test_df
X_train_df_binary[X_train_df_binary>0]=1
X_test_df_binary[X_test_df_binary>0]=1
#Looking at the data again
img=X_train_df_binary.iloc[i].as_matrix()
img=img.reshape((28,28))
img=gaussian_filter(img, sigma=1)
plt.imshow(img,cmap='gray')
plt.title(X_train_df_binary.iloc[i,0])
#histogram of values for index50 show that all pixels are either black or white
plt.hist(X_train_df_binary.iloc[i])
#Now fitting the binarized data into SGD, SVM, and Kneighbors
sgd = SGDClassifier() #SGD Results: 88.17% +/-0.42%
cv = KFold(n_splits=5,shuffle=True,random_state=42)
results = cross_val_score(sgd, X_train_df_binary, Y_train_df, cv=cv)
print('SGD Results: %.2f%% +/-%.2f%%' % (results.mean()*100, results.std()*100))
#Still, fitting SVM and KNeighbors using cross_val_score takes too long. Trying using .score/train_test_split
#Again manually splitting training set into cross-validation sets using train_test_split
X_train_df_val_bin, X_test_df_val_bin, Y_train_df_val_bin, Y_test_df_val_bin = train_test_split(X_train_df_binary, Y_train_df, train_size = 0.75, random_state = 46)
X_train_df_val_bin.shape, X_test_df_val_bin.shape, Y_train_df_val_bin.shape, Y_test_df_val_bin.shape
#*************************************************************************
#Now working with neural networks. To have reproducibility, we set randomizer seed
seed = 7
np.random.seed(seed)
#Keras requires inputs to be numpy arrays, not pandas dataframes.
temp_data = train.values
X_train = temp_data[:,1:].astype(float)
Y_train = temp_data[:,0]
X_test = test.values
#Another look at the data
X_image = X_train.reshape(42000, 28,28)
for i in range(0, 9):
plt.subplot(330 + 1 + i)
plt.imshow(X_image[i], cmap=plt.get_cmap('gray'))
# show the plot
plt.show()
#Noapplying one hot encoding to numpy array Y_train
#using keras packaging since it seems to work better without indexing errors...
encoder = LabelEncoder()
encoder.fit(Y_train)
dummy_y = np_utils.to_categorical(Y_train)
#Y_train.shape is now (42000, 10)
model = Sequential() #Neural Network Results: 96.66% +/-0.26%
model.add(Dense(200, input_dim=784, kernel_initializer='normal', activation='relu'))
model.add(Dense(30, kernel_initializer='normal', activation='relu'))
model.add(Dense(10, kernel_initializer='normal', activation='softmax'))
# Compile model
model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
#Checkpointing
filepath='C:/Users/Michael Kang/Desktop/Data_Files/Housing/weights.best.hdf5'
checkpoint = ModelCheckpoint(filepath, monitor='val_acc', verbose=1, save_best_only=True, mode='max')
callbacks_list = [checkpoint]
#Plot learning curves
model_history = model.fit(X_train, dummy_y, validation_split=0.33, epochs=15, batch_size=10, callbacks=callbacks_list)
plt.plot(model_history.history['acc'])
plt.plot(model_history.history['val_acc'])
plt.title('model accuracy')
plt.ylabel('accuracy')
plt.xlabel('epoch')
plt.legend(['train', 'test'], loc='upper left')
''' #This is another way to evaluate the model
create_baseline() #This yielded 97.64%
model.fit(X_train, dummy_y, epochs = 10, batch_size=10)
scores = model.evaluate(X, Y)
print("%s: %.2f%%" % (model.metrics_names[1], scores[1]*100))
'''
#If we were to use this neural network, would submit using the following code
'''create_baseline()
model.fit(X, Y, nb_epoch=10, batch_size=5, verbose=2)
Y_pred = model.predict(X_test)
Y_index = np.argmax(Y_pred,axis=1)
submission = pd.DataFrame({
'ImageId': (np.arange(Y_index.shape[0])+1),
'Label': Y_index
})
submission.to_csv('../output/submission.csv', index=False)
'''
#***************************************************************************************
#Now using a Convoluted Neural Netwok with feeds being the images themselves
X_train_2D = X_train.reshape(X_train.shape[0], 28, 28, 1) #Thus the images have dimensions 1x28x28 (depth 1 since no color)
#plt.imshow(X_train_2D[3],cmap='gray'). #The image was reshaped correctly
X_test_2D = X_test.reshape(X_test.shape[0], 28, 28, 1)
#Now normalize each value between 0 and 1 by dividing by 255.
#Will have to learn more about #standardScaler and pipeline, since I didn't get high accuracy when used on the above neural network
X_train_2D = X_train_2D / 255
X_test_2D = X_test_2D / 255
X_trained_2D, X_val_2D, Y_trained_2D, Y_val_2D = train_test_split(X_train_2D, dummy_y, train_size=0.7, random_state=seed)
#Creating & Compiling Model
#*************************LOOK AT HOW I IMPLEMENTED ON KAGGLE MNIST NOTEBOOK
model = Sequential() #These dimensions are obtained using print(model.output_shape)
model.add(Convolution2D(32, (3, 3), activation='relu', input_shape=(28,28,1))) #(None, 26, 26, 32)
model.add(Convolution2D(32, (3, 3), activation='relu')) #(None, 24, 24, 32)
model.add(MaxPooling2D(pool_size=(2,2))) #(None, 12, 12, 32)
model.add(Dropout(0.25))
model.add(Flatten()) #(None, 4608)
model.add(Dense(128, activation='relu')) #(None, 288)
model.add(Dropout(0.5))
model.add(Dense(10, activation='softmax')) #(None, 10)
#Adding augmentation
data_generated = ImageDataGenerator(zoom_range = 0.1, height_shift_range = 0.1, width_shift_range = 0.1, rotation_range = 15)
#Compiling now
model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
#Checkpointing
filepath='C:/Users/Michael Kang/Desktop/Data_Files/Housing/convoluted_weights.best.hdf5'
Early = EarlyStopping(monitor='val_acc', min_delta=0, patience=2, verbose=2, mode='auto')
checkpoint = ModelCheckpoint(filepath, monitor='val_acc', verbose=1, save_best_only=True, mode='max')
callbacks_list = [Early, checkpoint]
#callbacks_list = [checkpoint]
#Plot learning curves
conv_model_history = model.fit_generator(data_generated.flow(X_trained_2D, Y_trained_2D ,batch_size=32), steps_per_epoch = X_trained_2D.shape[0], epochs = 50, verbose=1, validation_data=(X_val_2D, Y_val_2D), callbacks=callbacks_list)
plt.plot(conv_model_history.history['acc'])
plt.plot(conv_model_history.history['val_acc'])
plt.title('model accuracy')
plt.ylabel('accuracy')
plt.xlabel('epoch')
plt.legend(['train', 'test'], loc='upper left')
'''
Y_pred = model.predict(X_test_2D, batch_size=32)
y_index = np.argmax(Y_pred,axis=1)
submission = pd.DataFrame({
'ImageId': (np.arange(y_index.shape[0])+1),
'Label': y_index
})
submission.to_csv('C:/Users/Michael Kang/Desktop/Data_Files/Digit Recognizer/3rd try.csv', index=False)
'''
| MichaelMKKang/Projects | Kaggle_Projects/DigitRecognizer.py | Python | mit | 10,079 |
import numpy as np
import pandas as pd
import pytest
from landlab import FieldError, RasterModelGrid
from landlab.components import COMPONENTS
_VALID_LOCS = {"grid", "node", "link", "patch", "corner", "face", "cell"}
_REQUIRED_ATTRS = {"doc", "mapping", "dtype", "intent", "optional", "units"}
_EXCLUDE_COMPONENTS = {
"ChannelProfiler",
"DrainageDensity",
"gFlex",
"HackCalculator",
"Lithology",
"LithoLayers",
"NetworkSedimentTransporter",
"Profiler",
"SoilMoisture",
"Vegetation",
}
@pytest.mark.parametrize("Comp", COMPONENTS)
def test_component_info_unit_agnostic(Comp):
"""Check for a valid _units_agnostic attribute"""
assert Comp._unit_agnostic in (True, False)
def _add_input_fields_to_grid(cls, grid):
for name, meta in cls._info.items():
if meta["intent"].startswith("in"):
at = cls.var_loc(name)
dtype = cls.var_type(name)
if at == "grid":
grid.at_grid[name] = np.array(0, dtype=dtype)
else:
grid.add_zeros(name, at=at, dtype=dtype)
return grid
@pytest.mark.parametrize("Comp", COMPONENTS)
def test_component_output_fields(Comp):
"""Check that required output fields exist with correct dtypes and locations"""
if Comp.name in _EXCLUDE_COMPONENTS:
pytest.skip("component explicitly excluded")
component_name = Comp._name
grid = RasterModelGrid((10, 10))
_add_input_fields_to_grid(Comp, grid)
Comp(grid)
for name, meta in Comp._info.items():
if meta["intent"].endswith("out") and not meta["optional"]:
at = meta["mapping"]
if name not in grid[at]:
raise ValueError(
f"{component_name} is missing output variable: {name} at {at}"
)
expected_dtype = meta["dtype"]
actual_dtype = grid[at][name].dtype
if actual_dtype != expected_dtype:
raise FieldError(
f"{component_name} output required variable: {name} at {at} has "
f"incorrect dtype. dtype must be {expected_dtype} and is "
f"{actual_dtype}"
)
@pytest.mark.parametrize("Comp", COMPONENTS)
def test_component_info_missing_attrs(Comp):
"""Check that in/out fields are not missing attributes"""
component_name = Comp._name
for name, meta in Comp._info.items():
at = meta["mapping"]
missing = ", ".join(sorted(_REQUIRED_ATTRS - set(meta)))
if missing:
raise ValueError(
f"{component_name} is missing attributes ({missing}) about variable: "
f"{name} at {at}"
)
@pytest.mark.parametrize("Comp", COMPONENTS)
def test_component_info_unknown_attrs(Comp):
"""Check that in/out fields have valid attributes"""
component_name = Comp._name
for name, meta in Comp._info.items():
at = meta["mapping"]
unknown = ", ".join(sorted(set(meta) - _REQUIRED_ATTRS))
if unknown:
raise ValueError(
f"{component_name} has extra attributes ({unknown}) about variable: "
f"{name} at {at}"
)
@pytest.mark.parametrize("Comp", COMPONENTS)
def test_component_info_valid_dtype(Comp):
"""Check that fields have a valid numpy dtype"""
component_name = Comp._name
for name, meta in Comp._info.items():
dtype = meta["dtype"]
try:
np.dtype(dtype)
except TypeError:
raise ValueError(
f"{component_name} has a bad dtype ({dtype}) for variable: {name}"
)
@pytest.mark.parametrize("Comp", COMPONENTS)
def test_component_info_valid_locations(Comp):
"""Check that fields are defined at valid locations"""
component_name = Comp._name
# verify all info exist:
for name, meta in Comp._info.items():
at = meta["mapping"]
# TODO: Verify that all units are UDUNITS compatible.
if at not in _VALID_LOCS:
raise ValueError(
f"{component_name} mapping for variable: {name} is invalid: {at}"
)
def test_consistent_doc_names():
out = []
for comp in COMPONENTS:
for name in comp._info:
temp = {"component": comp.__name__, "field": name}
for key in comp._info[name].keys():
temp[key] = comp._info[name][key]
out.append(temp)
df = pd.DataFrame(out)
unique_fields = df.field.unique().astype(str)
bad_fields = {}
for field in unique_fields:
where = df.field == field
if where.sum() > 1:
sel = df[where]
doc_vals = df.doc[where].values.astype(str)
inconsistent = []
for i in range(len(doc_vals) - 1):
if doc_vals[i] != doc_vals[-1]:
inconsistent.append(sel.component.values[i])
if len(inconsistent) > 0:
bad_fields[field] = inconsistent
if len(bad_fields) > 0:
msg = "The following fields have inconsistent documentation:\n"
for field in bad_fields.keys():
inconsistent = bad_fields[field]
msg += "\n" + field + ":\n " + "\n ".join(inconsistent)
raise ValueError(msg)
| landlab/landlab | tests/components/test_components.py | Python | mit | 5,319 |
import py, pytest
from _pytest.config import Conftest
def pytest_generate_tests(metafunc):
if "basedir" in metafunc.fixturenames:
metafunc.addcall(param="global")
metafunc.addcall(param="inpackage")
def pytest_funcarg__basedir(request):
def basedirmaker(request):
basedir = d = request.getfuncargvalue("tmpdir")
d.ensure("adir/conftest.py").write("a=1 ; Directory = 3")
d.ensure("adir/b/conftest.py").write("b=2 ; a = 1.5")
if request.param == "inpackage":
d.ensure("adir/__init__.py")
d.ensure("adir/b/__init__.py")
return d
return request.cached_setup(
lambda: basedirmaker(request), extrakey=request.param)
def ConftestWithSetinitial(path):
conftest = Conftest()
conftest.setinitial([path])
return conftest
class TestConftestValueAccessGlobal:
def test_basic_init(self, basedir):
conftest = Conftest()
conftest.setinitial([basedir.join("adir")])
assert conftest.rget("a") == 1
def test_onimport(self, basedir):
l = []
conftest = Conftest(onimport=l.append)
conftest.setinitial([basedir.join("adir"),
'--confcutdir=%s' % basedir])
assert len(l) == 1
assert conftest.rget("a") == 1
assert conftest.rget("b", basedir.join("adir", "b")) == 2
assert len(l) == 2
def test_immediate_initialiation_and_incremental_are_the_same(self, basedir):
conftest = Conftest()
snap0 = len(conftest._path2confmods)
conftest.getconftestmodules(basedir)
snap1 = len(conftest._path2confmods)
#assert len(conftest._path2confmods) == snap1 + 1
conftest.getconftestmodules(basedir.join('adir'))
assert len(conftest._path2confmods) == snap1 + 1
conftest.getconftestmodules(basedir.join('b'))
assert len(conftest._path2confmods) == snap1 + 2
def test_default_has_lower_prio(self, basedir):
conftest = ConftestWithSetinitial(basedir.join("adir"))
assert conftest.rget('Directory') == 3
#assert conftest.lget('Directory') == pytest.Directory
def test_value_access_not_existing(self, basedir):
conftest = ConftestWithSetinitial(basedir)
pytest.raises(KeyError, "conftest.rget('a')")
#pytest.raises(KeyError, "conftest.lget('a')")
def test_value_access_by_path(self, basedir):
conftest = ConftestWithSetinitial(basedir)
assert conftest.rget("a", basedir.join('adir')) == 1
#assert conftest.lget("a", basedir.join('adir')) == 1
assert conftest.rget("a", basedir.join('adir', 'b')) == 1.5
#assert conftest.lget("a", basedir.join('adir', 'b')) == 1
#assert conftest.lget("b", basedir.join('adir', 'b')) == 2
#assert pytest.raises(KeyError,
# 'conftest.lget("b", basedir.join("a"))'
#)
def test_value_access_with_init_one_conftest(self, basedir):
conftest = ConftestWithSetinitial(basedir.join('adir'))
assert conftest.rget("a") == 1
#assert conftest.lget("a") == 1
def test_value_access_with_init_two_conftests(self, basedir):
conftest = ConftestWithSetinitial(basedir.join("adir", "b"))
conftest.rget("a") == 1.5
#conftest.lget("a") == 1
#conftest.lget("b") == 1
def test_value_access_with_confmod(self, basedir):
startdir = basedir.join("adir", "b")
startdir.ensure("xx", dir=True)
conftest = ConftestWithSetinitial(startdir)
mod, value = conftest.rget_with_confmod("a", startdir)
assert value == 1.5
path = py.path.local(mod.__file__)
assert path.dirpath() == basedir.join("adir", "b")
assert path.purebasename.startswith("conftest")
def test_conftest_in_nonpkg_with_init(tmpdir):
tmpdir.ensure("adir-1.0/conftest.py").write("a=1 ; Directory = 3")
tmpdir.ensure("adir-1.0/b/conftest.py").write("b=2 ; a = 1.5")
tmpdir.ensure("adir-1.0/b/__init__.py")
tmpdir.ensure("adir-1.0/__init__.py")
conftest = ConftestWithSetinitial(tmpdir.join("adir-1.0", "b"))
def test_doubledash_not_considered(testdir):
conf = testdir.mkdir("--option")
conf.join("conftest.py").ensure()
conftest = Conftest()
conftest.setinitial([conf.basename, conf.basename])
l = conftest.getconftestmodules(None)
assert len(l) == 0
def test_issue151_load_all_conftests(testdir):
names = "code proj src".split()
for name in names:
p = testdir.mkdir(name)
p.ensure("conftest.py")
conftest = Conftest()
conftest.setinitial(names)
d = list(conftest._conftestpath2mod.values())
assert len(d) == len(names)
def test_conftest_global_import(testdir):
testdir.makeconftest("x=3")
p = testdir.makepyfile("""
import py, pytest
from _pytest.config import Conftest
conf = Conftest()
mod = conf.importconftest(py.path.local("conftest.py"))
assert mod.x == 3
import conftest
assert conftest is mod, (conftest, mod)
subconf = py.path.local().ensure("sub", "conftest.py")
subconf.write("y=4")
mod2 = conf.importconftest(subconf)
assert mod != mod2
assert mod2.y == 4
import conftest
assert conftest is mod2, (conftest, mod)
""")
res = testdir.runpython(p)
assert res.ret == 0
def test_conftestcutdir(testdir):
conf = testdir.makeconftest("")
p = testdir.mkdir("x")
conftest = Conftest(confcutdir=p)
conftest.setinitial([testdir.tmpdir])
l = conftest.getconftestmodules(p)
assert len(l) == 0
l = conftest.getconftestmodules(conf.dirpath())
assert len(l) == 0
assert conf not in conftest._conftestpath2mod
# but we can still import a conftest directly
conftest.importconftest(conf)
l = conftest.getconftestmodules(conf.dirpath())
assert l[0].__file__.startswith(str(conf))
# and all sub paths get updated properly
l = conftest.getconftestmodules(p)
assert len(l) == 1
assert l[0].__file__.startswith(str(conf))
def test_conftestcutdir_inplace_considered(testdir):
conf = testdir.makeconftest("")
conftest = Conftest(confcutdir=conf.dirpath())
conftest.setinitial([conf.dirpath()])
l = conftest.getconftestmodules(conf.dirpath())
assert len(l) == 1
assert l[0].__file__.startswith(str(conf))
def test_setinitial_confcut(testdir):
conf = testdir.makeconftest("")
sub = testdir.mkdir("sub")
sub.chdir()
for opts in (["--confcutdir=%s" % sub, sub],
[sub, "--confcutdir=%s" % sub],
["--confcutdir=.", sub],
[sub, "--confcutdir", sub],
[str(sub), "--confcutdir", "."],
):
conftest = Conftest()
conftest.setinitial(opts)
assert conftest._confcutdir == sub
assert conftest.getconftestmodules(sub) == []
assert conftest.getconftestmodules(conf.dirpath()) == []
@pytest.mark.multi(name='test tests whatever .dotdir'.split())
def test_setinitial_conftest_subdirs(testdir, name):
sub = testdir.mkdir(name)
subconftest = sub.ensure("conftest.py")
conftest = Conftest()
conftest.setinitial([sub.dirpath(), '--confcutdir=%s' % testdir.tmpdir])
if name not in ('whatever', '.dotdir'):
assert subconftest in conftest._conftestpath2mod
assert len(conftest._conftestpath2mod) == 1
else:
assert subconftest not in conftest._conftestpath2mod
assert len(conftest._conftestpath2mod) == 0
def test_conftest_confcutdir(testdir):
testdir.makeconftest("assert 0")
x = testdir.mkdir("x")
x.join("conftest.py").write(py.code.Source("""
def pytest_addoption(parser):
parser.addoption("--xyz", action="store_true")
"""))
result = testdir.runpytest("-h", "--confcutdir=%s" % x, x)
result.stdout.fnmatch_lines(["*--xyz*"])
def test_conftest_import_order(testdir, monkeypatch):
ct1 = testdir.makeconftest("")
sub = testdir.mkdir("sub")
ct2 = sub.join("conftest.py")
ct2.write("")
def impct(p):
return p
conftest = Conftest()
monkeypatch.setattr(conftest, 'importconftest', impct)
assert conftest.getconftestmodules(sub) == [ct1, ct2]
| geraldoandradee/pytest | testing/test_conftest.py | Python | mit | 8,268 |
# -*- coding: utf-8 -*-
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Generated code. DO NOT EDIT!
#
# Snippet for DeleteContent
# NOTE: This snippet has been automatically generated for illustrative purposes only.
# It may require modifications to work in your environment.
# To install the latest published package dependency, execute the following:
# python3 -m pip install google-cloud-dataplex
# [START dataplex_v1_generated_ContentService_DeleteContent_sync]
from google.cloud import dataplex_v1
def sample_delete_content():
# Create a client
client = dataplex_v1.ContentServiceClient()
# Initialize request argument(s)
request = dataplex_v1.DeleteContentRequest(
name="name_value",
)
# Make the request
client.delete_content(request=request)
# [END dataplex_v1_generated_ContentService_DeleteContent_sync]
| googleapis/python-dataplex | samples/generated_samples/dataplex_v1_generated_content_service_delete_content_sync.py | Python | apache-2.0 | 1,392 |
# coding: utf-8
# Copyright 2014 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Controllers for the editor view."""
import datetime
import imghdr
import json
import logging
import jinja2
from core.controllers import base
from core.domain import config_domain
from core.domain import dependency_registry
from core.domain import email_manager
from core.domain import event_services
from core.domain import exp_domain
from core.domain import exp_services
from core.domain import fs_domain
from core.domain import gadget_registry
from core.domain import interaction_registry
from core.domain import obj_services
from core.domain import rights_manager
from core.domain import rte_component_registry
from core.domain import stats_services
from core.domain import user_services
from core.domain import value_generators_domain
from core.platform import models
import feconf
import utils
current_user_services = models.Registry.import_current_user_services()
(user_models,) = models.Registry.import_models([models.NAMES.user])
# The frontend template for a new state. It is sent to the frontend when the
# exploration editor page is first loaded, so that new states can be
# added in a way that is completely client-side.
# IMPORTANT: Before adding this state to an existing exploration, the
# state name and the destination of the default rule should first be
# changed to the desired new state name.
NEW_STATE_TEMPLATE = {
'content': [{
'type': 'text',
'value': ''
}],
'interaction': exp_domain.State.NULL_INTERACTION_DICT,
'param_changes': [],
'unresolved_answers': {},
}
DEFAULT_TWITTER_SHARE_MESSAGE_EDITOR = config_domain.ConfigProperty(
'default_twitter_share_message_editor', {
'type': 'unicode',
},
'Default text for the Twitter share message for the editor',
default_value=(
'Check out this interactive lesson I created on Oppia - a free '
'platform for teaching and learning!'))
def get_value_generators_js():
"""Return a string that concatenates the JS for all value generators."""
all_value_generators = (
value_generators_domain.Registry.get_all_generator_classes())
value_generators_js = ''
for _, generator_cls in all_value_generators.iteritems():
value_generators_js += generator_cls.get_js_template()
return value_generators_js
def _require_valid_version(version_from_payload, exploration_version):
"""Check that the payload version matches the given exploration version."""
if version_from_payload is None:
raise base.BaseHandler.InvalidInputException(
'Invalid POST request: a version must be specified.')
if version_from_payload != exploration_version:
raise base.BaseHandler.InvalidInputException(
'Trying to update version %s of exploration from version %s, '
'which is too old. Please reload the page and try again.'
% (exploration_version, version_from_payload))
def require_editor(handler):
"""Decorator that checks if the user can edit the given exploration."""
def test_editor(self, exploration_id, escaped_state_name=None, **kwargs):
"""Gets the user and exploration id if the user can edit it.
Args:
self: the handler instance
exploration_id: the exploration id
escaped_state_name: the URL-escaped state name, if it exists
**kwargs: any other arguments passed to the handler
Returns:
The relevant handler, if the user is authorized to edit this
exploration.
Raises:
self.PageNotFoundException: if no such exploration or state exists.
self.UnauthorizedUserException: if the user exists but does not
have the right credentials.
"""
if not self.user_id:
self.redirect(current_user_services.create_login_url(
self.request.uri))
return
if self.username in config_domain.BANNED_USERNAMES.value:
raise self.UnauthorizedUserException(
u'您没有权限访问该页面.')
try:
exploration = exp_services.get_exploration_by_id(exploration_id)
except:
raise self.PageNotFoundException
if not rights_manager.Actor(self.user_id).can_edit(
feconf.ACTIVITY_TYPE_EXPLORATION, exploration_id):
raise self.UnauthorizedUserException(
u'您没有权限编辑课程.',
self.user_id)
if not escaped_state_name:
return handler(self, exploration_id, **kwargs)
state_name = utils.unescape_encoded_uri_component(escaped_state_name)
if state_name not in exploration.states:
logging.error('Could not find state: %s' % state_name)
logging.error('Available states: %s' % exploration.states.keys())
raise self.PageNotFoundException
return handler(self, exploration_id, state_name, **kwargs)
return test_editor
class EditorHandler(base.BaseHandler):
"""Base class for all handlers for the editor page."""
# The page name to use as a key for generating CSRF tokens.
PAGE_NAME_FOR_CSRF = 'editor'
class ExplorationPage(EditorHandler):
"""The editor page for a single exploration."""
EDITOR_PAGE_DEPENDENCY_IDS = ['codemirror']
def get(self, exploration_id):
"""Handles GET requests."""
if exploration_id in feconf.DISABLED_EXPLORATION_IDS:
self.render_template(
'error/disabled_exploration.html', iframe_restriction=None)
return
exploration = exp_services.get_exploration_by_id(
exploration_id, strict=False)
if (exploration is None or
not rights_manager.Actor(self.user_id).can_view(
feconf.ACTIVITY_TYPE_EXPLORATION, exploration_id)):
self.redirect('/')
return
can_edit = (
bool(self.user_id) and
self.username not in config_domain.BANNED_USERNAMES.value and
rights_manager.Actor(self.user_id).can_edit(
feconf.ACTIVITY_TYPE_EXPLORATION, exploration_id))
interaction_ids = (
interaction_registry.Registry.get_all_interaction_ids())
interaction_dependency_ids = (
interaction_registry.Registry.get_deduplicated_dependency_ids(
interaction_ids))
dependencies_html, additional_angular_modules = (
dependency_registry.Registry.get_deps_html_and_angular_modules(
interaction_dependency_ids + self.EDITOR_PAGE_DEPENDENCY_IDS))
interaction_templates = (
rte_component_registry.Registry.get_html_for_all_components() +
interaction_registry.Registry.get_interaction_html(
interaction_ids))
interaction_validators_html = (
interaction_registry.Registry.get_validators_html(
interaction_ids))
gadget_types = gadget_registry.Registry.get_all_gadget_types()
gadget_templates = (
gadget_registry.Registry.get_gadget_html(gadget_types))
self.values.update({
'GADGET_SPECS': gadget_registry.Registry.get_all_specs(),
'INTERACTION_SPECS': interaction_registry.Registry.get_all_specs(),
'PANEL_SPECS': feconf.PANELS_PROPERTIES,
'DEFAULT_OBJECT_VALUES': obj_services.get_default_object_values(),
'DEFAULT_TWITTER_SHARE_MESSAGE_EDITOR': (
DEFAULT_TWITTER_SHARE_MESSAGE_EDITOR.value),
'additional_angular_modules': additional_angular_modules,
'can_delete': rights_manager.Actor(
self.user_id).can_delete(
feconf.ACTIVITY_TYPE_EXPLORATION, exploration_id),
'can_edit': can_edit,
'can_modify_roles': rights_manager.Actor(
self.user_id).can_modify_roles(
feconf.ACTIVITY_TYPE_EXPLORATION, exploration_id),
'can_publicize': rights_manager.Actor(
self.user_id).can_publicize(
feconf.ACTIVITY_TYPE_EXPLORATION, exploration_id),
'can_publish': rights_manager.Actor(
self.user_id).can_publish(
feconf.ACTIVITY_TYPE_EXPLORATION, exploration_id),
'can_release_ownership': rights_manager.Actor(
self.user_id).can_release_ownership(
feconf.ACTIVITY_TYPE_EXPLORATION, exploration_id),
'can_unpublicize': rights_manager.Actor(
self.user_id).can_unpublicize(
feconf.ACTIVITY_TYPE_EXPLORATION, exploration_id),
'can_unpublish': rights_manager.Actor(
self.user_id).can_unpublish(
feconf.ACTIVITY_TYPE_EXPLORATION, exploration_id),
'dependencies_html': jinja2.utils.Markup(dependencies_html),
'gadget_templates': jinja2.utils.Markup(gadget_templates),
'interaction_templates': jinja2.utils.Markup(
interaction_templates),
'interaction_validators_html': jinja2.utils.Markup(
interaction_validators_html),
'meta_description': feconf.CREATE_PAGE_DESCRIPTION,
'nav_mode': feconf.NAV_MODE_CREATE,
'value_generators_js': jinja2.utils.Markup(
get_value_generators_js()),
'title': exploration.title,
'ALL_LANGUAGE_CODES': feconf.ALL_LANGUAGE_CODES,
'ALLOWED_GADGETS': feconf.ALLOWED_GADGETS,
'ALLOWED_INTERACTION_CATEGORIES': (
feconf.ALLOWED_INTERACTION_CATEGORIES),
'INVALID_PARAMETER_NAMES': feconf.INVALID_PARAMETER_NAMES,
'NEW_STATE_TEMPLATE': NEW_STATE_TEMPLATE,
'SHOW_TRAINABLE_UNRESOLVED_ANSWERS': (
feconf.SHOW_TRAINABLE_UNRESOLVED_ANSWERS),
'TAG_REGEX': feconf.TAG_REGEX,
})
self.render_template('exploration_editor/exploration_editor.html')
class ExplorationHandler(EditorHandler):
"""Page with editor data for a single exploration."""
PAGE_NAME_FOR_CSRF = 'editor'
def _get_exploration_data(
self, exploration_id, apply_draft=False, version=None):
"""Returns a description of the given exploration."""
try:
if apply_draft:
exploration = exp_services.get_exp_with_draft_applied(
exploration_id, self.user_id)
else:
exploration = exp_services.get_exploration_by_id(
exploration_id, version=version)
except:
raise self.PageNotFoundException
states = {}
for state_name in exploration.states:
state_dict = exploration.states[state_name].to_dict()
state_dict['unresolved_answers'] = (
stats_services.get_top_unresolved_answers_for_default_rule(
exploration_id, state_name))
states[state_name] = state_dict
exp_user_data = user_models.ExplorationUserDataModel.get(
self.user_id, exploration_id)
draft_changes = (exp_user_data.draft_change_list if exp_user_data
and exp_user_data.draft_change_list else None)
is_version_of_draft_valid = (
exp_services.is_version_of_draft_valid(
exploration_id, exp_user_data.draft_change_list_exp_version)
if exp_user_data and exp_user_data.draft_change_list_exp_version
else None)
editor_dict = {
'category': exploration.category,
'exploration_id': exploration_id,
'init_state_name': exploration.init_state_name,
'language_code': exploration.language_code,
'objective': exploration.objective,
'param_changes': exploration.param_change_dicts,
'param_specs': exploration.param_specs_dict,
'rights': rights_manager.get_exploration_rights(
exploration_id).to_dict(),
'show_state_editor_tutorial_on_load': (
self.user_id and not self.has_seen_editor_tutorial),
'skin_customizations': exploration.skin_instance.to_dict()[
'skin_customizations'],
'states': states,
'tags': exploration.tags,
'title': exploration.title,
'version': exploration.version,
'is_version_of_draft_valid': is_version_of_draft_valid,
'draft_changes': draft_changes
}
return editor_dict
def get(self, exploration_id):
"""Gets the data for the exploration overview page."""
if not rights_manager.Actor(self.user_id).can_view(
feconf.ACTIVITY_TYPE_EXPLORATION, exploration_id):
raise self.PageNotFoundException
# 'apply_draft' and 'v'(version) are optional parameters because the
# exploration history tab also uses this handler, and these parameters
# are not used by that tab.
version = self.request.get('v', default_value=None)
apply_draft = self.request.get('apply_draft', default_value=False)
self.values.update(
self._get_exploration_data(
exploration_id, apply_draft=apply_draft, version=version))
self.render_json(self.values)
@require_editor
def post(self, exploration_id):
"""Updates properties of the given exploration."""
exploration = exp_services.get_exploration_by_id(exploration_id)
version = self.payload.get('version')
_require_valid_version(version, exploration.version)
commit_message = self.payload.get('commit_message')
change_list = self.payload.get('change_list')
try:
exp_services.update_exploration(
self.user_id, exploration_id, change_list, commit_message)
except utils.ValidationError as e:
raise self.InvalidInputException(e)
self.values.update(self._get_exploration_data(exploration_id))
self.render_json(self.values)
@require_editor
def delete(self, exploration_id):
"""Deletes the given exploration."""
role = self.request.get('role')
if not role:
role = None
if role == rights_manager.ROLE_ADMIN:
if not self.is_admin:
logging.error(
'%s tried to delete an exploration, but is not an admin.'
% self.user_id)
raise self.UnauthorizedUserException(
'User %s does not have permissions to delete exploration '
'%s' % (self.user_id, exploration_id))
elif role == rights_manager.ROLE_MODERATOR:
if not self.is_moderator:
logging.error(
'%s tried to delete an exploration, but is not a '
'moderator.' % self.user_id)
raise self.UnauthorizedUserException(
'User %s does not have permissions to delete exploration '
'%s' % (self.user_id, exploration_id))
elif role is not None:
raise self.InvalidInputException('Invalid role: %s' % role)
logging.info(
'%s %s tried to delete exploration %s' %
(role, self.user_id, exploration_id))
exploration = exp_services.get_exploration_by_id(exploration_id)
can_delete = rights_manager.Actor(self.user_id).can_delete(
feconf.ACTIVITY_TYPE_EXPLORATION, exploration.id)
if not can_delete:
raise self.UnauthorizedUserException(
'User %s does not have permissions to delete exploration %s' %
(self.user_id, exploration_id))
is_exploration_cloned = rights_manager.is_exploration_cloned(
exploration_id)
exp_services.delete_exploration(
self.user_id, exploration_id, force_deletion=is_exploration_cloned)
logging.info(
'%s %s deleted exploration %s' %
(role, self.user_id, exploration_id))
class ExplorationRightsHandler(EditorHandler):
"""Handles management of exploration editing rights."""
PAGE_NAME_FOR_CSRF = 'editor'
@require_editor
def post(self, exploration_id):
"""Updates the editing rights for the given exploration."""
exploration = exp_services.get_exploration_by_id(exploration_id)
version = self.payload.get('version')
_require_valid_version(version, exploration.version)
is_public = self.payload.get('is_public')
is_publicized = self.payload.get('is_publicized')
is_community_owned = self.payload.get('is_community_owned')
new_member_username = self.payload.get('new_member_username')
new_member_role = self.payload.get('new_member_role')
viewable_if_private = self.payload.get('viewable_if_private')
if new_member_username:
if not rights_manager.Actor(
self.user_id).can_modify_roles(
feconf.ACTIVITY_TYPE_EXPLORATION,
exploration_id):
raise self.UnauthorizedUserException(
'Only an owner of this exploration can add or change '
'roles.')
new_member_id = user_services.get_user_id_from_username(
new_member_username)
if new_member_id is None:
raise Exception(
'Sorry, we could not find the specified user.')
rights_manager.assign_role_for_exploration(
self.user_id, exploration_id, new_member_id, new_member_role)
email_manager.send_role_notification_email(
self.user_id, new_member_id, new_member_role, exploration_id,
exploration.title)
elif is_public is not None:
exploration = exp_services.get_exploration_by_id(exploration_id)
if is_public:
try:
exploration.validate(strict=True)
except utils.ValidationError as e:
raise self.InvalidInputException(e)
exp_services.publish_exploration_and_update_user_profiles(
self.user_id, exploration_id)
exp_services.index_explorations_given_ids([exploration_id])
else:
rights_manager.unpublish_exploration(
self.user_id, exploration_id)
exp_services.delete_documents_from_search_index([
exploration_id])
elif is_publicized is not None:
exploration = exp_services.get_exploration_by_id(exploration_id)
if is_publicized:
try:
exploration.validate(strict=True)
except utils.ValidationError as e:
raise self.InvalidInputException(e)
rights_manager.publicize_exploration(
self.user_id, exploration_id)
else:
rights_manager.unpublicize_exploration(
self.user_id, exploration_id)
elif is_community_owned:
exploration = exp_services.get_exploration_by_id(exploration_id)
try:
exploration.validate(strict=True)
except utils.ValidationError as e:
raise self.InvalidInputException(e)
rights_manager.release_ownership_of_exploration(
self.user_id, exploration_id)
elif viewable_if_private is not None:
rights_manager.set_private_viewability_of_exploration(
self.user_id, exploration_id, viewable_if_private)
else:
raise self.InvalidInputException(
'No change was made to this exploration.')
self.render_json({
'rights': rights_manager.get_exploration_rights(
exploration_id).to_dict()
})
class ExplorationModeratorRightsHandler(EditorHandler):
"""Handles management of exploration rights by moderators."""
PAGE_NAME_FOR_CSRF = 'editor'
@base.require_moderator
def post(self, exploration_id):
"""Updates the publication status of the given exploration, and sends
an email to all its owners.
"""
exploration = exp_services.get_exploration_by_id(exploration_id)
action = self.payload.get('action')
email_body = self.payload.get('email_body')
version = self.payload.get('version')
_require_valid_version(version, exploration.version)
if action not in feconf.VALID_MODERATOR_ACTIONS:
raise self.InvalidInputException('Invalid moderator action.')
# If moderator emails can be sent, check that all the prerequisites are
# satisfied, otherwise do nothing.
if feconf.REQUIRE_EMAIL_ON_MODERATOR_ACTION:
if not email_body:
raise self.InvalidInputException(
'Moderator actions should include an email to the '
'recipient.')
email_manager.require_moderator_email_prereqs_are_satisfied()
# Perform the moderator action.
if action == 'unpublish_exploration':
rights_manager.unpublish_exploration(
self.user_id, exploration_id)
exp_services.delete_documents_from_search_index([
exploration_id])
elif action == 'publicize_exploration':
try:
exploration.validate(strict=True)
except utils.ValidationError as e:
raise self.InvalidInputException(e)
rights_manager.publicize_exploration(
self.user_id, exploration_id)
else:
raise self.InvalidInputException(
'No change was made to this exploration.')
exp_rights = rights_manager.get_exploration_rights(exploration_id)
# If moderator emails can be sent, send an email to the all owners of
# the exploration notifying them of the change.
if feconf.REQUIRE_EMAIL_ON_MODERATOR_ACTION:
for owner_id in exp_rights.owner_ids:
email_manager.send_moderator_action_email(
self.user_id, owner_id,
feconf.VALID_MODERATOR_ACTIONS[action]['email_intent'],
exploration.title, email_body)
self.render_json({
'rights': exp_rights.to_dict(),
})
class ResolvedAnswersHandler(EditorHandler):
"""Allows learners' answers for a state to be marked as resolved."""
PAGE_NAME_FOR_CSRF = 'editor'
@require_editor
def post(self, exploration_id, state_name):
"""Marks learners' answers as resolved."""
resolved_answers = self.payload.get('resolved_answers')
if not isinstance(resolved_answers, list):
raise self.InvalidInputException(
'Expected a list of resolved answers; received %s.' %
resolved_answers)
if 'resolved_answers' in self.payload:
event_services.DefaultRuleAnswerResolutionEventHandler.record(
exploration_id, state_name, resolved_answers)
self.render_json({})
class UntrainedAnswersHandler(EditorHandler):
"""Returns answers that learners have submitted, but that Oppia hasn't been
explicitly trained to respond to be an exploration author.
"""
NUMBER_OF_TOP_ANSWERS_PER_RULE = 50
def get(self, exploration_id, escaped_state_name):
"""Handles GET requests."""
try:
exploration = exp_services.get_exploration_by_id(exploration_id)
except:
raise self.PageNotFoundException
state_name = utils.unescape_encoded_uri_component(escaped_state_name)
if state_name not in exploration.states:
# If trying to access a non-existing state, there is no training
# data associated with it.
self.render_json({'unhandled_answers': []})
return
state = exploration.states[state_name]
# TODO(bhenning): Answers should be bound to a particular exploration
# version or interaction ID.
# TODO(bhenning): If the top 100 answers have already been classified,
# then this handler will always return an empty list.
# TODO(bhenning): This entire function will not work as expected until
# the answers storage backend stores answers in a non-lossy way.
# Currently, answers are stored as HTML strings and they are not able
# to be converted back to the original objects they started as, so the
# normalization calls in this function will not work correctly on those
# strings. Once this happens, this handler should also be tested.
# The total number of possible answers is 100 because it requests the
# top 50 answers matched to the default rule and the top 50 answers
# matched to the classifier individually.
answers = stats_services.get_top_state_rule_answers(
exploration_id, state_name, [
exp_domain.DEFAULT_RULESPEC_STR,
exp_domain.CLASSIFIER_RULESPEC_STR])[
:self.NUMBER_OF_TOP_ANSWERS_PER_RULE]
interaction = state.interaction
unhandled_answers = []
if feconf.SHOW_TRAINABLE_UNRESOLVED_ANSWERS and interaction.id:
interaction_instance = (
interaction_registry.Registry.get_interaction_by_id(
interaction.id))
try:
# Normalize the answers.
for answer in answers:
answer['value'] = interaction_instance.normalize_answer(
answer['value'])
trained_answers = set()
for answer_group in interaction.answer_groups:
for rule_spec in answer_group.rule_specs:
if (rule_spec.rule_type ==
exp_domain.CLASSIFIER_RULESPEC_STR):
trained_answers.update(
interaction_instance.normalize_answer(trained)
for trained
in rule_spec.inputs['training_data'])
# Include all the answers which have been confirmed to be
# associated with the default outcome.
trained_answers.update(set(
interaction_instance.normalize_answer(confirmed)
for confirmed
in interaction.confirmed_unclassified_answers))
unhandled_answers = [
answer for answer in answers
if answer['value'] not in trained_answers
]
except Exception as e:
logging.warning(
'Error loading untrained answers for interaction %s: %s.' %
(interaction.id, e))
self.render_json({
'unhandled_answers': unhandled_answers
})
class ExplorationDownloadHandler(EditorHandler):
"""Downloads an exploration as a zip file, or dict of YAML strings
representing states.
"""
def get(self, exploration_id):
"""Handles GET requests."""
try:
exploration = exp_services.get_exploration_by_id(exploration_id)
except:
raise self.PageNotFoundException
if not rights_manager.Actor(self.user_id).can_view(
feconf.ACTIVITY_TYPE_EXPLORATION, exploration_id):
raise self.PageNotFoundException
version = self.request.get('v', default_value=exploration.version)
output_format = self.request.get('output_format', default_value='zip')
width = int(self.request.get('width', default_value=80))
# If the title of the exploration has changed, we use the new title
filename = 'oppia-%s-v%s' % (
utils.to_ascii(exploration.title.replace(' ', '')), version)
if output_format == feconf.OUTPUT_FORMAT_ZIP:
self.response.headers['Content-Type'] = 'text/plain'
self.response.headers['Content-Disposition'] = (
'attachment; filename=%s.zip' % str(filename))
self.response.write(
exp_services.export_to_zip_file(exploration_id, version))
elif output_format == feconf.OUTPUT_FORMAT_JSON:
self.render_json(exp_services.export_states_to_yaml(
exploration_id, version=version, width=width))
else:
raise self.InvalidInputException(
'Unrecognized output format %s' % output_format)
class StateYamlHandler(EditorHandler):
"""Given a representation of a state, converts it to a YAML string.
Note that this handler is stateless; it does not make use of the storage
layer.
"""
def get(self):
"""Handles GET requests."""
try:
state_dict = json.loads(self.request.get('stringified_state'))
width = json.loads(self.request.get('stringified_width'))
except Exception:
raise self.PageNotFoundException
self.render_json({
'yaml': exp_services.convert_state_dict_to_yaml(state_dict, width),
})
class ExplorationResourcesHandler(EditorHandler):
"""Manages assets associated with an exploration."""
# @require_editor
def get(self, exploration_id):
"""Handles GET requests."""
fs = fs_domain.AbstractFileSystem(
fs_domain.ExplorationFileSystem(exploration_id))
dir_list = fs.listdir('')
self.render_json({'filepaths': dir_list})
class ExplorationSnapshotsHandler(EditorHandler):
"""Returns the exploration snapshot history."""
def get(self, exploration_id):
"""Handles GET requests."""
try:
snapshots = exp_services.get_exploration_snapshots_metadata(
exploration_id)
except:
raise self.PageNotFoundException
# Patch `snapshots` to use the editor's display name.
for snapshot in snapshots:
if snapshot['committer_id'] != feconf.SYSTEM_COMMITTER_ID:
snapshot['committer_id'] = user_services.get_username(
snapshot['committer_id'])
self.render_json({
'snapshots': snapshots,
})
class ExplorationRevertHandler(EditorHandler):
"""Reverts an exploration to an older version."""
@require_editor
def post(self, exploration_id):
"""Handles POST requests."""
current_version = self.payload.get('current_version')
revert_to_version = self.payload.get('revert_to_version')
if not isinstance(revert_to_version, int):
raise self.InvalidInputException(
'Expected an integer version to revert to; received %s.' %
revert_to_version)
if not isinstance(current_version, int):
raise self.InvalidInputException(
'Expected an integer current version; received %s.' %
current_version)
if revert_to_version < 1 or revert_to_version >= current_version:
raise self.InvalidInputException(
'Cannot revert to version %s from version %s.' %
(revert_to_version, current_version))
exp_services.discard_draft(exploration_id, self.user_id)
exp_services.revert_exploration(
self.user_id, exploration_id, current_version, revert_to_version)
self.render_json({})
class ExplorationStatisticsHandler(EditorHandler):
"""Returns statistics for an exploration."""
def get(self, exploration_id, exploration_version):
"""Handles GET requests."""
try:
exp_services.get_exploration_by_id(exploration_id)
except:
raise self.PageNotFoundException
self.render_json(stats_services.get_exploration_stats(
exploration_id, exploration_version))
class ExplorationStatsVersionsHandler(EditorHandler):
"""Returns statistics versions for an exploration."""
def get(self, exploration_id):
"""Handles GET requests."""
try:
exp_services.get_exploration_by_id(exploration_id)
except:
raise self.PageNotFoundException
self.render_json({
'versions': stats_services.get_versions_for_exploration_stats(
exploration_id)})
class StateRulesStatsHandler(EditorHandler):
"""Returns detailed learner answer statistics for a state."""
def get(self, exploration_id, escaped_state_name):
"""Handles GET requests."""
try:
exploration = exp_services.get_exploration_by_id(exploration_id)
except:
raise self.PageNotFoundException
state_name = utils.unescape_encoded_uri_component(escaped_state_name)
if state_name not in exploration.states:
logging.error('Could not find state: %s' % state_name)
logging.error('Available states: %s' % exploration.states.keys())
raise self.PageNotFoundException
self.render_json({
'rules_stats': stats_services.get_state_rules_stats(
exploration_id, state_name)
})
class ImageUploadHandler(EditorHandler):
"""Handles image uploads."""
# @require_editor
def post(self, exploration_id):
"""Saves an image uploaded by a content creator."""
raw = self.request.get('image')
filename = self.payload.get('filename')
if not raw:
raise self.InvalidInputException('No image supplied')
file_format = imghdr.what(None, h=raw)
if file_format not in feconf.ACCEPTED_IMAGE_FORMATS_AND_EXTENSIONS:
allowed_formats = ', '.join(
feconf.ACCEPTED_IMAGE_FORMATS_AND_EXTENSIONS.keys())
raise Exception('Image file not recognized: it should be in '
'one of the following formats: %s.' %
allowed_formats)
if not filename:
raise self.InvalidInputException('No filename supplied')
if '/' in filename or '..' in filename:
raise self.InvalidInputException(
'Filenames should not include slashes (/) or consecutive dot '
'characters.')
if '.' in filename:
dot_index = filename.rfind('.')
primary_name = filename[:dot_index]
extension = filename[dot_index + 1:].lower()
if (extension not in
feconf.ACCEPTED_IMAGE_FORMATS_AND_EXTENSIONS[file_format]):
raise self.InvalidInputException(
'Expected a filename ending in .%s; received %s' %
(file_format, filename))
else:
primary_name = filename
filepath = '%s.%s' % (primary_name, file_format)
fs = fs_domain.AbstractFileSystem(
fs_domain.ExplorationFileSystem(exploration_id))
if fs.isfile(filepath):
raise self.InvalidInputException(
u'文件名 %s 已经存在. 请更换一个'
u'名称.' % filepath)
fs.commit(self.user_id, filepath, raw)
self.render_json({'filepath': filepath})
class StartedTutorialEventHandler(EditorHandler):
"""Records that this user has started the state editor tutorial."""
def post(self):
"""Handles GET requests."""
user_services.record_user_started_state_editor_tutorial(self.user_id)
class EditorAutosaveHandler(ExplorationHandler):
"""Handles requests from the editor for draft autosave."""
@require_editor
def post(self, exploration_id):
"""Handles PUT requests for draft updation."""
# Raise an Exception if the draft change list fails non-strict
# validation.
try:
change_list = self.payload.get('change_list')
if change_list is None:
# """Handles POST request for discarding draft changes."""
exp_services.discard_draft(exploration_id, self.user_id)
self.render_json({})
return
version = self.payload.get('version')
exp_services.create_or_update_draft(
exploration_id, self.user_id, change_list, version,
datetime.datetime.utcnow())
except utils.ValidationError as e:
# We leave any pre-existing draft changes in the datastore.
raise self.InvalidInputException(e)
# If the value passed here is False, have the user discard the draft
# changes. We save the draft to the datastore even if the version is
# invalid, so that it is available for recovery later.
self.render_json({
'is_version_of_draft_valid': exp_services.is_version_of_draft_valid(
exploration_id, version)})
# @require_editor
# def post(self, exploration_id):
# """Handles POST request for discarding draft changes."""
# exp_services.discard_draft(exploration_id, self.user_id)
# self.render_json({})
| zgchizi/oppia-uc | core/controllers/editor.py | Python | apache-2.0 | 37,959 |
# XXX Final exam problem 4. Work here.
posts.update({'permalink':permalink}, {'$inc': {'comments.' + comment_ordinal + '.num_likes': 1}}); | hemmerling/nosql-mongodb2013 | src/m101j/final/final-4/hemmerling_final4.py | Python | apache-2.0 | 139 |
"""
A test spanning all the capabilities of all the serializers.
This class sets up a model for each model field type
(except for image types, because of the PIL dependency).
"""
from django.db import models
from django.contrib.contenttypes.models import ContentType
# The following classes are for testing basic data
# marshalling, including NULL values.
class BooleanData(models.Model):
data = models.BooleanField(null=True)
class CharData(models.Model):
data = models.CharField(maxlength=30, null=True)
class DateData(models.Model):
data = models.DateField(null=True)
class DateTimeData(models.Model):
data = models.DateTimeField(null=True)
class EmailData(models.Model):
data = models.EmailField(null=True)
class FileData(models.Model):
data = models.FileField(null=True, upload_to='/foo/bar')
class FilePathData(models.Model):
data = models.FilePathField(null=True)
class FloatData(models.Model):
data = models.FloatField(null=True, decimal_places=3, max_digits=5)
class IntegerData(models.Model):
data = models.IntegerField(null=True)
# class ImageData(models.Model):
# data = models.ImageField(null=True)
class IPAddressData(models.Model):
data = models.IPAddressField(null=True)
class NullBooleanData(models.Model):
data = models.NullBooleanField(null=True)
class PhoneData(models.Model):
data = models.PhoneNumberField(null=True)
class PositiveIntegerData(models.Model):
data = models.PositiveIntegerField(null=True)
class PositiveSmallIntegerData(models.Model):
data = models.PositiveSmallIntegerField(null=True)
class SlugData(models.Model):
data = models.SlugField(null=True)
class SmallData(models.Model):
data = models.SmallIntegerField(null=True)
class TextData(models.Model):
data = models.TextField(null=True)
class TimeData(models.Model):
data = models.TimeField(null=True)
class USStateData(models.Model):
data = models.USStateField(null=True)
class XMLData(models.Model):
data = models.XMLField(null=True)
class Tag(models.Model):
"""A tag on an item."""
data = models.SlugField()
content_type = models.ForeignKey(ContentType)
object_id = models.PositiveIntegerField()
content_object = models.GenericForeignKey()
class Meta:
ordering = ["data"]
class GenericData(models.Model):
data = models.CharField(maxlength=30)
tags = models.GenericRelation(Tag)
# The following test classes are all for validation
# of related objects; in particular, forward, backward,
# and self references.
class Anchor(models.Model):
"""This is a model that can be used as
something for other models to point at"""
data = models.CharField(maxlength=30)
class FKData(models.Model):
data = models.ForeignKey(Anchor, null=True)
class M2MData(models.Model):
data = models.ManyToManyField(Anchor, null=True)
class O2OData(models.Model):
data = models.OneToOneField(Anchor, null=True)
class FKSelfData(models.Model):
data = models.ForeignKey('self', null=True)
class M2MSelfData(models.Model):
data = models.ManyToManyField('self', null=True, symmetrical=False)
# The following test classes are for validating the
# deserialization of objects that use a user-defined
# field as the primary key.
# Some of these data types have been commented out
# because they can't be used as a primary key on one
# or all database backends.
class BooleanPKData(models.Model):
data = models.BooleanField(primary_key=True)
class CharPKData(models.Model):
data = models.CharField(maxlength=30, primary_key=True)
# class DatePKData(models.Model):
# data = models.DateField(primary_key=True)
# class DateTimePKData(models.Model):
# data = models.DateTimeField(primary_key=True)
class EmailPKData(models.Model):
data = models.EmailField(primary_key=True)
class FilePKData(models.Model):
data = models.FileField(primary_key=True, upload_to='/foo/bar')
class FilePathPKData(models.Model):
data = models.FilePathField(primary_key=True)
class FloatPKData(models.Model):
data = models.FloatField(primary_key=True, decimal_places=3, max_digits=5)
class IntegerPKData(models.Model):
data = models.IntegerField(primary_key=True)
# class ImagePKData(models.Model):
# data = models.ImageField(primary_key=True)
class IPAddressPKData(models.Model):
data = models.IPAddressField(primary_key=True)
class NullBooleanPKData(models.Model):
data = models.NullBooleanField(primary_key=True)
class PhonePKData(models.Model):
data = models.PhoneNumberField(primary_key=True)
class PositiveIntegerPKData(models.Model):
data = models.PositiveIntegerField(primary_key=True)
class PositiveSmallIntegerPKData(models.Model):
data = models.PositiveSmallIntegerField(primary_key=True)
class SlugPKData(models.Model):
data = models.SlugField(primary_key=True)
class SmallPKData(models.Model):
data = models.SmallIntegerField(primary_key=True)
# class TextPKData(models.Model):
# data = models.TextField(primary_key=True)
# class TimePKData(models.Model):
# data = models.TimeField(primary_key=True)
class USStatePKData(models.Model):
data = models.USStateField(primary_key=True)
# class XMLPKData(models.Model):
# data = models.XMLField(primary_key=True)
| jamslevy/gsoc | thirdparty/google_appengine/lib/django/tests/regressiontests/serializers_regress/models.py | Python | apache-2.0 | 5,334 |
#!/usr/bin/env python
#
# Copyright 2007 Doug Hellmann.
#
#
# All Rights Reserved
#
# Permission to use, copy, modify, and distribute this software and
# its documentation for any purpose and without fee is hereby
# granted, provided that the above copyright notice appear in all
# copies and that both that copyright notice and this permission
# notice appear in supporting documentation, and that the name of Doug
# Hellmann not be used in advertising or publicity pertaining to
# distribution of the software without specific, written prior
# permission.
#
# DOUG HELLMANN DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
# INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN
# NO EVENT SHALL DOUG HELLMANN BE LIABLE FOR ANY SPECIAL, INDIRECT OR
# CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS
# OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT,
# NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
# CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
#
"""Pretty print with pprint
"""
__version__ = "$Id$"
#end_pymotw_header
from pprint import pprint
from pprint_data import data
pprint(data, depth=1)
| qilicun/python | python2/PyMOTW-1.132/PyMOTW/pprint/pprint_depth.py | Python | gpl-3.0 | 1,202 |
import numpy as np
import amnet
import z3
from numpy.linalg import norm
import sys
import unittest
import itertools
VISUALIZE = True # output graphviz drawings
if VISUALIZE:
import amnet.vis
class TestSmt(unittest.TestCase):
@classmethod
def setUpClass(cls):
print 'Setting up test floats.'
cls.floatvals = np.concatenate(
(np.linspace(-5., 5., 11), np.linspace(-5., 5., 10)),
axis=0
)
cls.floatvals2 = np.concatenate(
(np.linspace(-5., 5., 3), np.linspace(-.5, .5, 2)),
axis=0
)
cls.floatvals3 = np.linspace(-5., 5., 3)
cls.FPTOL = 1e-8
# set up global z3 parameters
# parameters from https://stackoverflow.com/a/12516269
#z3.set_param('auto_config', False)
#z3.set_param('smt.case_split', 5)
#z3.set_param('smt.relevancy', 2)
def validate_outputs(self, phi, onvals, true_f=None, verbose=False):
# encode phi using default context and solver
enc = amnet.smt.SmtEncoder(phi=phi, solver=None)
# tap the input and output vars
invar = enc.var_of_input()
outvar = enc.var_of(phi)
# check dimensions
self.assertEqual(phi.indim, len(invar))
self.assertEqual(phi.outdim, len(outvar))
# go through inputs
for val in onvals:
# get a new value
fpval = np.array(val)
self.assertEqual(len(fpval), phi.indim)
# evaluate using the Amn tree
fpeval = phi.eval(fpval)
self.assertEqual(len(fpeval), phi.outdim)
if verbose:
print 'inp:', fpval
print 'fpeval: ', fpeval
# compare to true floating point function, if it's provided
if true_f is not None:
true_eval = true_f(fpval)
if verbose: print 'true_eval: ', true_eval
self.assertAlmostEqual(norm(true_eval - fpeval), 0)
# set the z3 input
enc.solver.push()
for i in range(len(invar)):
enc.solver.add(invar[i] == fpval[i])
# run z3 to check for satisfiability
result = enc.solver.check()
#if verbose: print enc.solver
self.assertTrue(result == z3.sat)
# extract the output
model = enc.solver.model()
smteval = np.zeros(len(outvar))
for i in range(len(outvar)):
smteval[i] = amnet.util.mfp(model, outvar[i])
# check that the outputs match
if verbose: print 'smteval: ', smteval
self.assertAlmostEqual(norm(smteval - fpeval), 0)
enc.solver.pop()
def donot_test_SmtEncoder_mu_big(self):
xyz = amnet.Variable(3, name='xyz')
x = amnet.atoms.select(xyz, 0)
y = amnet.atoms.select(xyz, 1)
z = amnet.atoms.select(xyz, 2)
w = amnet.Mu(x, y, z)
def true_mu(fpin):
x, y, z = fpin
return x if z <= 0 else y
self.validate_outputs(
phi=w,
onvals=itertools.product(self.floatvals, repeat=w.indim),
true_f=true_mu
)
def test_SmtEncoder_mu_small(self):
xyz = amnet.Variable(3, name='xyz')
x = amnet.atoms.select(xyz, 0)
y = amnet.atoms.select(xyz, 1)
z = amnet.atoms.select(xyz, 2)
w = amnet.Mu(x, y, z)
def true_mu(fpin):
x, y, z = fpin
return x if z <= 0 else y
self.validate_outputs(
phi=w,
onvals=itertools.product(self.floatvals2, repeat=w.indim),
true_f=true_mu
)
if VISUALIZE: amnet.vis.quick_vis(phi=w, title='mu')
def test_SmtEncoder_max_all_2(self):
xy = amnet.Variable(2, name='xy')
phi_max2 = amnet.atoms.max_all(xy)
self.assertEqual(phi_max2.indim, 2)
def true_max2(fpin):
x, y = fpin
return max(x, y)
self.validate_outputs(
phi=phi_max2,
onvals=itertools.product(self.floatvals, repeat=phi_max2.indim),
true_f=true_max2
)
def test_SmtEncoder_min_all_2(self):
xy = amnet.Variable(2, name='xy')
phi_min2 = amnet.atoms.min_all(xy)
self.assertEqual(phi_min2.indim, 2)
def true_min2(fpin):
x, y = fpin
return min(x, y)
self.validate_outputs(
phi=phi_min2,
onvals=itertools.product(self.floatvals, repeat=phi_min2.indim),
true_f=true_min2
)
def test_SmtEncoder_max_all_3_small(self):
xyz = amnet.Variable(3, name='xy')
phi_max3 = amnet.atoms.max_all(xyz)
self.assertEqual(phi_max3.indim, 3)
def true_max3(fpin):
x, y, z = fpin
return max(x, y, z)
self.validate_outputs(
phi=phi_max3,
onvals=itertools.product(self.floatvals2, repeat=phi_max3.indim),
true_f=true_max3
)
def test_SmtEncoder_min_all_3_small(self):
xyz = amnet.Variable(3, name='xy')
phi_min3 = amnet.atoms.min_all(xyz)
self.assertEqual(phi_min3.indim, 3)
def true_min3(fpin):
x, y, z = fpin
return min(x, y, z)
self.validate_outputs(
phi=phi_min3,
onvals=itertools.product(self.floatvals2, repeat=phi_min3.indim),
true_f=true_min3
)
def test_SmtEncoder_add_all(self):
xyz = amnet.Variable(3, name='xyz')
phi_add = amnet.atoms.add_all(xyz)
self.assertEqual(phi_add.outdim, 1)
self.assertEqual(phi_add.indim, 3)
def true_add(fpin):
return sum(fpin)
self.validate_outputs(
phi=phi_add,
onvals=itertools.product(self.floatvals2, repeat=phi_add.indim),
true_f=true_add
)
def test_SmtEncoder_add_list(self):
xyz = amnet.Variable(2+2+2, name='xyz')
x = amnet.Linear(np.eye(2, 6, 0), xyz)
y = amnet.Linear(np.eye(2, 6, 2), xyz)
z = amnet.Linear(np.eye(2, 6, 4), xyz)
phi_add_list = amnet.atoms.add_list([x, y, z])
self.assertEqual(x.outdim, 2)
self.assertEqual(y.outdim, 2)
self.assertEqual(z.outdim, 2)
self.assertEqual(phi_add_list.outdim, 2)
self.assertEqual(phi_add_list.indim, 6)
def true_add(fpin):
x, y, z = fpin[0:2], fpin[2:4], fpin[4:6]
return x + y + z
self.validate_outputs(
phi=phi_add_list,
onvals=itertools.product(self.floatvals3, repeat=phi_add_list.indim),
true_f=true_add
)
def test_SmtEncoder_triplexer(self):
np.random.seed(1)
TOTAL_RUNS=5
#print ""
for iter in range(TOTAL_RUNS):
#print "Testing random triplexer [%d/%d]..." % (iter+1, TOTAL_RUNS),
# create a random triplexer
x = amnet.Variable(1, name='x')
a = 3 * (2 * np.random.rand(4) - 1)
b = 3 * (2 * np.random.rand(4) - 1)
c = 3 * (2 * np.random.rand(4) - 1)
d = 3 * (2 * np.random.rand(4) - 1)
e = 3 * (2 * np.random.rand(4) - 1)
f = 3 * (2 * np.random.rand(4) - 1)
phi_tri = amnet.atoms.triplexer(x, a, b, c, d, e, f)
def true_tri(fpin):
return amnet.atoms.fp_triplexer(fpin, a, b, c, d, e, f)
xvals = 50 * (2 * np.random.rand(100) - 1)
onvals = itertools.product(xvals, repeat=1)
self.validate_outputs(
phi=phi_tri,
onvals=onvals,
true_f=true_tri
)
#print "done!"
def test_SmtEncoder_max_aff(self):
np.random.seed(1)
m = 10
n = 4
A = np.random.randint(-5, 6, m*n).reshape((m, n))
b = np.random.randint(-5, 6, m).reshape((m,))
b[np.random.randint(0, n)] = 0 # make sure there is a Linear term
x = amnet.Variable(n, name='x')
y = amnet.atoms.max_aff(A, x, b)
self.assertEqual(y.indim, n)
self.assertEqual(y.outdim, 1)
def true_max_aff(fpin):
vals = np.dot(A, fpin) + b
assert len(vals) == m
return np.max(vals)
self.validate_outputs(
phi=y,
onvals=itertools.product(self.floatvals3, repeat=y.indim),
true_f=true_max_aff
)
# visualize max_aff
if VISUALIZE: amnet.vis.quick_vis(y, title='max_aff')
def test_SmtEncoder_min_aff(self):
np.random.seed(1)
m = 10
n = 4
A = np.random.randint(-5, 6, m*n).reshape((m, n))
b = np.random.randint(-5, 6, m).reshape((m,))
b[np.random.randint(0, n)] = 0 # make sure there is a Linear term
x = amnet.Variable(n, name='x')
y = amnet.atoms.min_aff(A, x, b)
self.assertEqual(y.indim, n)
self.assertEqual(y.outdim, 1)
def true_min_aff(fpin):
vals = np.dot(A, fpin) + b
assert len(vals) == m
return np.min(vals)
self.validate_outputs(
phi=y,
onvals=itertools.product(self.floatvals3, repeat=y.indim),
true_f=true_min_aff
)
# visualize min_aff
if VISUALIZE: amnet.vis.quick_vis(y, title='min_aff')
def test_SmtEncoder_dag(self):
xyz = amnet.Variable(3, name='xyz')
x = amnet.atoms.select(xyz, 0)
yz = amnet.Linear(
np.array([[0, 1, 0], [0, 0, 1]]),
xyz
)
maxyz = amnet.atoms.max_all(yz)
twoxp1 = amnet.Affine(
np.array([[2]]),
x,
np.array([1])
)
twox = amnet.atoms.add2(x, x)
threex = amnet.atoms.add2(x, twox)
fivexp1 = amnet.atoms.add2(twoxp1, threex)
phi = amnet.atoms.add2(fivexp1, maxyz)
def true_dag(fpin):
x, y, z = fpin
return 5*x + 1 + max(y, z)
self.validate_outputs(
phi=phi,
onvals=itertools.product(self.floatvals2, repeat=3),
true_f=true_dag
)
# visualize dag
if VISUALIZE: amnet.vis.quick_vis(phi, title='dag')
def test_SmtEncoder_relu_1(self):
x = amnet.Variable(1, name='x')
y = amnet.atoms.relu(x)
def true_relu(fpin):
return max(fpin[0], 0)
self.validate_outputs(
phi=y,
onvals=itertools.product(self.floatvals, repeat=y.indim),
true_f=true_relu
)
def test_SmtEncoder_relu_2(self):
x = amnet.Variable(3, name='x')
y = amnet.atoms.relu(x)
def true_relu(fpin):
return np.maximum(fpin, 0)
self.validate_outputs(
phi=y,
onvals=itertools.product(self.floatvals2, repeat=y.indim),
true_f=true_relu
)
# visualize relu
if VISUALIZE: amnet.vis.quick_vis(y, title='relu_2')
def test_SmtEncoder_relu_old(self):
x = amnet.Variable(3, name='x')
y = amnet.atoms.relu_old(x)
def true_relu(fpin):
return np.maximum(fpin, 0)
self.validate_outputs(
phi=y,
onvals=itertools.product(self.floatvals2, repeat=y.indim),
true_f=true_relu
)
# visualize relu_old
if VISUALIZE: amnet.vis.quick_vis(y, title='relu_old')
def test_SmtEncoder_gates(self):
xy_z1z2 = amnet.Variable(2+2+1+1, name='xyz1z2')
x = amnet.Linear(
np.eye(2, 6, 0),
xy_z1z2
)
y = amnet.Linear(
np.eye(2, 6, 2),
xy_z1z2
)
z1 = amnet.atoms.select(xy_z1z2, 4)
z2 = amnet.atoms.select(xy_z1z2, 5)
phi_and = amnet.atoms.gate_and(x, y, z1, z2)
phi_or = amnet.atoms.gate_or(x, y, z1, z2)
phi_xor = amnet.atoms.gate_xor(x, y, z1, z2)
phi_not = amnet.atoms.gate_not(x, y, z1)
# check dimensions
self.assertEqual(xy_z1z2.outdim, 6)
self.assertEqual(x.outdim, 2)
self.assertEqual(y.outdim, 2)
self.assertEqual(z1.outdim, 1)
self.assertEqual(z2.outdim, 1)
self.assertEqual(phi_and.outdim, 2)
self.assertEqual(phi_or.outdim, 2)
self.assertEqual(phi_xor.outdim, 2)
self.assertEqual(phi_not.outdim, 2)
# true gate functions
def true_and(fpin):
return fpin[0:2] if (fpin[4] <= 0 and fpin[5] <= 0) else fpin[2:4]
def true_or(fpin):
return fpin[0:2] if (fpin[4] <= 0 or fpin[5] <= 0) else fpin[2:4]
def true_xor(fpin):
return fpin[0:2] if ((fpin[4] <= 0) != (fpin[5] <= 0)) else fpin[2:4]
def true_not(fpin): # ignores last input
return fpin[2:4] if (fpin[4] <= 0) else fpin[0:2]
# evaluate
vals = np.array([1, -2, -3, 4])
sels = itertools.product([-1, 0, 1], repeat=2)
onvals = [np.concatenate((vals, sel), axis=0) for sel in sels]
self.validate_outputs(phi=phi_and, onvals=onvals, true_f=true_and)
self.validate_outputs(phi=phi_or, onvals=onvals, true_f=true_or)
self.validate_outputs(phi=phi_xor, onvals=onvals, true_f=true_xor)
self.validate_outputs(phi=phi_not, onvals=onvals, true_f=true_not)
def test_SmtEncoder_cmp(self):
xyz = amnet.Variable(2+2+1, name='xyz')
x = amnet.Linear(
np.eye(2, 5, 0),
xyz
)
y = amnet.Linear(
np.eye(2, 5, 2),
xyz
)
z = amnet.atoms.select(xyz, 4)
phi_eq = amnet.atoms.cmp_eq(x, y, z)
phi_neq = amnet.atoms.cmp_neq(x, y, z)
phi_ge = amnet.atoms.cmp_ge(x, y, z)
phi_gt = amnet.atoms.cmp_gt(x, y, z)
phi_le = amnet.atoms.cmp_le(x, y, z)
phi_lt = amnet.atoms.cmp_lt(x, y, z)
# check dimensions
self.assertEqual(xyz.outdim, 5)
self.assertEqual(x.outdim, 2)
self.assertEqual(y.outdim, 2)
self.assertEqual(z.outdim, 1)
self.assertEqual(phi_eq.outdim, 2)
self.assertEqual(phi_neq.outdim, 2)
self.assertEqual(phi_ge.outdim, 2)
self.assertEqual(phi_gt.outdim, 2)
self.assertEqual(phi_le.outdim, 2)
self.assertEqual(phi_lt.outdim, 2)
# true cmp functions
def true_eq(fpin):
x, y, z = fpin[0:2], fpin[2:4], fpin[4]
return x if z == 0 else y
def true_neq(fpin):
x, y, z = fpin[0:2], fpin[2:4], fpin[4]
return x if z != 0 else y
def true_ge(fpin):
x, y, z = fpin[0:2], fpin[2:4], fpin[4]
return x if z >= 0 else y
def true_gt(fpin):
x, y, z = fpin[0:2], fpin[2:4], fpin[4]
return x if z > 0 else y
def true_le(fpin):
x, y, z = fpin[0:2], fpin[2:4], fpin[4]
return x if z <= 0 else y
def true_lt(fpin):
x, y, z = fpin[0:2], fpin[2:4], fpin[4]
return x if z < 0 else y
# evaluate
vals = np.array([1, -2, -3, 4])
sels = itertools.product([-1.1, -0.5, 0, 0.0, 0.01, 1, 12.0], repeat=1)
onvals = [np.concatenate((vals, sel), axis=0) for sel in sels]
self.validate_outputs(phi=phi_eq, onvals=onvals, true_f=true_eq)
self.validate_outputs(phi=phi_neq, onvals=onvals, true_f=true_neq)
self.validate_outputs(phi=phi_ge, onvals=onvals, true_f=true_ge)
self.validate_outputs(phi=phi_gt, onvals=onvals, true_f=true_gt)
self.validate_outputs(phi=phi_le, onvals=onvals, true_f=true_le)
self.validate_outputs(phi=phi_lt, onvals=onvals, true_f=true_lt)
def test_SmtEncoder_identity(self):
x = amnet.Variable(2, name='x')
w = np.array([[1, 2], [3, 4]])
b = np.array([-1, -1])
y = amnet.Affine(w, x, b)
z = amnet.atoms.identity(y)
self.assertEqual(y.outdim, 2)
self.assertEqual(z.outdim, 2)
self.assertEqual(z.indim, 2)
def true_z(fpin):
return np.dot(w, fpin) + b
self.validate_outputs(
phi=z,
onvals=itertools.product(self.floatvals, repeat=z.indim),
true_f=true_z
)
def test_SmtEncoder_absval1(self):
x = amnet.Variable(1, name='x')
y = amnet.atoms.absval(x)
self.assertEqual(y.outdim, 1)
self.assertEqual(y.indim, 1)
def true_absval(fpin):
return abs(fpin)
self.validate_outputs(
phi=y,
onvals=itertools.product(self.floatvals, repeat=y.indim),
true_f = true_absval
)
# visualize absval1
if VISUALIZE: amnet.vis.quick_vis(y, title='absval1')
def test_SmtEncoder_absval3(self):
x = amnet.Variable(3, name='x')
y = amnet.atoms.absval(x)
self.assertEqual(y.outdim, 3)
self.assertEqual(y.indim, 3)
def true_absval(fpin):
x1, x2, x3 = fpin
return np.array([abs(x1), abs(x2), abs(x3)])
self.validate_outputs(
phi=y,
onvals=itertools.product(self.floatvals2, repeat=y.indim),
true_f=true_absval
)
# visualize absval3
if VISUALIZE: amnet.vis.quick_vis(y, title='absval3')
def test_SmtEncoder_sat1(self):
x = amnet.Variable(1, name='x')
y1 = amnet.atoms.sat(x)
y2 = amnet.atoms.sat(x, lo=-3, hi=3)
y3 = amnet.atoms.sat(x, lo=-2, hi=1.5)
self.assertEqual(y1.outdim, 1)
self.assertEqual(y1.indim, 1)
self.assertEqual(y2.outdim, 1)
self.assertEqual(y2.indim, 1)
self.assertEqual(y3.outdim, 1)
self.assertEqual(y3.indim, 1)
# manual tests
self.assertAlmostEqual(norm(y1.eval(np.array([-2])) - np.array([-1])), 0)
self.assertAlmostEqual(norm(y1.eval(np.array([-0.5])) - np.array([-0.5])), 0)
self.assertAlmostEqual(norm(y1.eval(np.array([0])) - np.array([0.0])), 0)
self.assertAlmostEqual(norm(y1.eval(np.array([0.6])) - np.array([0.6])), 0)
self.assertAlmostEqual(norm(y1.eval(np.array([1.6])) - np.array([1.0])), 0)
# automatic tests
def true_sat1(fpval, lo, hi):
x = fpval
if lo <= x <= hi:
return x
elif x < lo:
return lo
else:
return hi
self.validate_outputs(
phi=y1,
onvals=itertools.product(self.floatvals, repeat=y1.indim),
true_f=lambda z: true_sat1(z, -1, 1)
)
self.validate_outputs(
phi=y2,
onvals=itertools.product(self.floatvals, repeat=y2.indim),
true_f=lambda z: true_sat1(z, -3, 3)
)
self.validate_outputs(
phi=y3,
onvals=itertools.product(self.floatvals, repeat=y3.indim),
true_f=lambda z: true_sat1(z, -2, 1.5)
)
# visualize sat1
if VISUALIZE: amnet.vis.quick_vis(y1, title='sat1')
def test_SmtEncoder_sat3(self):
x = amnet.Variable(3, name='x')
y1 = amnet.atoms.sat(x)
y2 = amnet.atoms.sat(x, lo=-3, hi=3)
y3 = amnet.atoms.sat(x, lo=-2, hi=1.5)
self.assertEqual(y1.outdim, 3)
self.assertEqual(y1.indim, 3)
self.assertEqual(y2.outdim, 3)
self.assertEqual(y2.indim, 3)
self.assertEqual(y3.outdim, 3)
self.assertEqual(y3.indim, 3)
# manual tests
self.assertAlmostEqual(norm(y1.eval(np.array([-2, 1.6, 0.5])) - np.array([-1, 1, 0.5])), 0)
self.assertAlmostEqual(norm(y2.eval(np.array([-2, 1.6, 0.5])) - np.array([-2, 1.6, 0.5])), 0)
self.assertAlmostEqual(norm(y3.eval(np.array([-2, 1.6, 0.5])) - np.array([-2, 1.5, 0.5])), 0)
# visualize sat3
if VISUALIZE: amnet.vis.quick_vis(y1, title='sat3')
# automatic tests
def true_sat3(fpin, lo, hi):
return np.clip(fpin, lo, hi)
self.validate_outputs(
phi=y1,
onvals=itertools.product(self.floatvals2, repeat=y1.indim),
true_f=lambda z: true_sat3(z, -1, 1)
)
self.validate_outputs(
phi=y2,
onvals=itertools.product(self.floatvals2, repeat=y2.indim),
true_f=lambda z: true_sat3(z, -3, 3)
)
self.validate_outputs(
phi=y3,
onvals=itertools.product(self.floatvals2, repeat=y3.indim),
true_f=lambda z: true_sat3(z, -2, 1.5)
)
def test_SmtEncoder_dz1(self):
x = amnet.Variable(1, name='x')
y1 = amnet.atoms.dz(x)
y2 = amnet.atoms.dz(x, lo=-3, hi=3)
y3 = amnet.atoms.dz(x, lo=-2, hi=1.5)
self.assertEqual(y1.outdim, 1)
self.assertEqual(y1.indim, 1)
self.assertEqual(y2.outdim, 1)
self.assertEqual(y2.indim, 1)
self.assertEqual(y3.outdim, 1)
self.assertEqual(y3.indim, 1)
# manual tests
self.assertAlmostEqual(norm(y1.eval(np.array([-2])) - np.array([-1])), 0)
self.assertAlmostEqual(norm(y1.eval(np.array([-0.5])) - np.array([0])), 0)
self.assertAlmostEqual(norm(y1.eval(np.array([0])) - np.array([0])), 0)
self.assertAlmostEqual(norm(y1.eval(np.array([0.6])) - np.array([0])), 0)
self.assertAlmostEqual(norm(y1.eval(np.array([1.6])) - np.array([0.6])), 0)
# automatic tests
def true_dz1(fpval, lo, hi):
x = fpval
if lo <= x <= hi:
return 0
elif x < lo:
return x-lo
else:
return x-hi
self.validate_outputs(
phi=y1,
onvals=itertools.product(self.floatvals, repeat=y1.indim),
true_f=lambda z: true_dz1(z, -1, 1)
)
self.validate_outputs(
phi=y2,
onvals=itertools.product(self.floatvals, repeat=y2.indim),
true_f=lambda z: true_dz1(z, -3, 3)
)
self.validate_outputs(
phi=y3,
onvals=itertools.product(self.floatvals, repeat=y3.indim),
true_f=lambda z: true_dz1(z, -2, 1.5)
)
# visualize dz1
if VISUALIZE: amnet.vis.quick_vis(y1, title='dz1')
def test_SmtEncoder_dz3(self):
x = amnet.Variable(3, name='x')
y1 = amnet.atoms.dz(x)
y2 = amnet.atoms.dz(x, lo=-3, hi=3)
y3 = amnet.atoms.dz(x, lo=-2, hi=1.5)
self.assertEqual(y1.outdim, 3)
self.assertEqual(y1.indim, 3)
self.assertEqual(y2.outdim, 3)
self.assertEqual(y2.indim, 3)
self.assertEqual(y3.outdim, 3)
self.assertEqual(y3.indim, 3)
# manual tests
self.assertAlmostEqual(norm(y1.eval(np.array([-2, 1.6, 0.5])) - np.array([-1, 0.6, 0])), 0)
self.assertAlmostEqual(norm(y2.eval(np.array([-2, 1.6, 0.5])) - np.array([0, 0, 0])), 0)
self.assertAlmostEqual(norm(y3.eval(np.array([-2, 1.6, 0.5])) - np.array([0, 0.1, 0])), 0)
# visualize dz3
if VISUALIZE: amnet.vis.quick_vis(y1, title='dz3')
# automatic tests
def true_dz3(fpin, lo, hi):
retv = np.array(fpin)
retv[(retv >= lo) & (retv <= hi)] = 0
retv[retv > hi] -= hi
retv[retv < lo] -= lo
return retv
self.validate_outputs(
phi=y1,
onvals=itertools.product(self.floatvals2, repeat=y1.indim),
true_f=lambda z: true_dz3(z, -1, 1)
)
self.validate_outputs(
phi=y2,
onvals=itertools.product(self.floatvals2, repeat=y2.indim),
true_f=lambda z: true_dz3(z, -3, 3)
)
self.validate_outputs(
phi=y3,
onvals=itertools.product(self.floatvals2, repeat=y3.indim),
true_f=lambda z: true_dz3(z, -2, 1.5)
)
def test_SmtEncoder_norminf1(self):
x = amnet.Variable(1, name='x')
y = amnet.atoms.norminf(x)
self.assertEqual(y.indim, 1)
self.assertEqual(y.outdim, 1)
# visualize norminf1
if VISUALIZE: amnet.vis.quick_vis(y, title='norminf1')
# automatic tests
def true_norminf(fpin):
self.assertEqual(len(fpin), 1)
return norm(fpin, ord=np.inf)
self.validate_outputs(
phi=y,
onvals=itertools.product(self.floatvals, repeat=y.indim),
true_f=true_norminf
)
def test_SmtEncoder_norminf3(self):
x = amnet.Variable(3, name='x')
y = amnet.atoms.norminf(x)
self.assertEqual(y.indim, 3)
self.assertEqual(y.outdim, 1)
# visualize norminf3
if VISUALIZE: amnet.vis.quick_vis(y, title='norminf3')
# automatic tests
def true_norminf(fpin):
self.assertEqual(len(fpin), 3)
return norm(fpin, ord=np.inf)
self.validate_outputs(
phi=y,
onvals=itertools.product(self.floatvals2, repeat=y.indim),
true_f=true_norminf
)
def test_SmtEncoder_norm11(self):
x = amnet.Variable(1, name='x')
y = amnet.atoms.norm1(x)
self.assertEqual(y.indim, 1)
self.assertEqual(y.outdim, 1)
# visualize norm11
if VISUALIZE: amnet.vis.quick_vis(y, title='norm11')
# automatic tests
def true_norm1(fpin):
self.assertEqual(len(fpin), 1)
return norm(fpin, ord=1)
self.validate_outputs(
phi=y,
onvals=itertools.product(self.floatvals, repeat=y.indim),
true_f=true_norm1
)
def test_SmtEncoder_norm13(self):
x = amnet.Variable(3, name='x')
y = amnet.atoms.norm1(x)
self.assertEqual(y.indim, 3)
self.assertEqual(y.outdim, 1)
# visualize norm13
if VISUALIZE: amnet.vis.quick_vis(y, title='norm13')
# automatic tests
def true_norm1(fpin):
self.assertEqual(len(fpin), 3)
return norm(fpin, ord=1)
self.validate_outputs(
phi=y,
onvals=itertools.product(self.floatvals2, repeat=y.indim),
true_f=true_norm1
)
def test_SmtEncoder_phase_vgc(self):
alpha1 = 1.5
alpha2 = -0.7
x = amnet.Variable(2, name='x')
e = amnet.atoms.select(x, 0)
edot = amnet.atoms.select(x, 1)
phi_vgc1 = amnet.atoms.phase_vgc(e, edot, alpha=alpha1)
phi_vgc2 = amnet.atoms.phase_vgc(e, edot, alpha=alpha2)
self.assertEqual(phi_vgc1.indim, 2)
self.assertEqual(phi_vgc1.outdim, 1)
self.assertEqual(phi_vgc2.indim, 2)
self.assertEqual(phi_vgc2.outdim, 1)
# visualize vgc
if VISUALIZE:
ctx = amnet.smt.NamingContext(phi_vgc1)
ctx.rename(e, 'e')
ctx.rename(edot, 'edot')
ctx.rename(phi_vgc1, 'phi_vgc1')
amnet.vis.quick_vis(phi_vgc1, title='phase_vgc', ctx=ctx)
# manual tests
self.assertAlmostEqual(norm(phi_vgc1.eval(np.array([1.1, 1.2])) - np.array([alpha1 * 1.1])), 0)
self.assertAlmostEqual(norm(phi_vgc1.eval(np.array([1.1, -1.2])) - np.array([0])), 0)
self.assertAlmostEqual(norm(phi_vgc1.eval(np.array([-1.1, -1.2])) - np.array([alpha1 * (-1.1)])), 0)
self.assertAlmostEqual(norm(phi_vgc1.eval(np.array([-1.1, 1.2])) - np.array([0])), 0)
self.assertAlmostEqual(norm(phi_vgc1.eval(np.array([1.1, 0])) - np.array([0])), 0)
self.assertAlmostEqual(norm(phi_vgc1.eval(np.array([0, 1.2])) - np.array([0])), 0)
self.assertAlmostEqual(norm(phi_vgc1.eval(np.array([-1.1, 0])) - np.array([0])), 0)
self.assertAlmostEqual(norm(phi_vgc1.eval(np.array([0, -1.2])) - np.array([0])), 0)
self.assertAlmostEqual(norm(phi_vgc1.eval(np.array([0, 0])) - np.array([0])), 0)
# automatic tests
def true_phase_vgc(fpin, alpha):
x1, x2 = fpin
return alpha*x1 if x1*x2 > 0 else 0
self.validate_outputs(
phi=phi_vgc1,
onvals=itertools.product(self.floatvals2, repeat=phi_vgc1.indim),
true_f=lambda xi: true_phase_vgc(xi, alpha=alpha1)
)
self.validate_outputs(
phi=phi_vgc2,
onvals=itertools.product(self.floatvals2, repeat=phi_vgc2.indim),
true_f=lambda xi: true_phase_vgc(xi, alpha=alpha2)
)
def test_NamingContext_multiple_contexts_for(self):
x = amnet.Variable(2, name='x')
y = amnet.Variable(3, name='y')
phi_x = amnet.atoms.max_all(x)
phi_y = amnet.atoms.max_all(y)
# multiple context names
ctx_list = amnet.smt.NamingContext.multiple_contexts_for([phi_x, phi_y])
self.assertEqual(len(ctx_list), 2)
# make sure all names are unique
names = []
for ctx in ctx_list:
names.extend(ctx.symbols.keys())
self.assertEqual(len(names), len(set(names)))
if VISUALIZE:
amnet.vis.quick_vis(phi_x, title='multiple_contexts_phi_x', ctx=ctx_list[0])
amnet.vis.quick_vis(phi_y, title='multiple_contexts_phi_y', ctx=ctx_list[1])
def test_SmtEncoder_multiple_encode(self):
x = amnet.Variable(2, name='x')
y = amnet.Variable(3, name='y')
z = amnet.Variable(2, name='z')
phi_x = amnet.atoms.max_all(x)
phi_y = amnet.atoms.max_all(y)
phi_z = amnet.atoms.max_all(z)
# encode the AMNs
enc_x, enc_y, enc_z = amnet.smt.SmtEncoder.multiple_encode(phi_x, phi_y, phi_z)
solver = enc_x.solver
if VISUALIZE:
amnet.vis.quick_vis(phi_x, title='multiple_encode_phi_x', ctx=enc_x.ctx)
amnet.vis.quick_vis(phi_y, title='multiple_encode_phi_y', ctx=enc_y.ctx)
amnet.vis.quick_vis(phi_z, title='multiple_encode_phi_z', ctx=enc_z.ctx)
# make sure solver object is the same
self.assertTrue(enc_x.solver is solver)
self.assertTrue(enc_y.solver is solver)
self.assertTrue(enc_z.solver is solver)
# link the outputs of x and y to the inputs of z
phi_x_out = enc_x.var_of(phi_x)
phi_y_out = enc_y.var_of(phi_y)
z_in = enc_z.var_of_input()
self.assertEqual(len(phi_x_out), 1)
self.assertEqual(len(phi_y_out), 1)
self.assertEqual(len(z_in), 2)
# solver.add(z_in[0] == phi_x_out[0])
# solver.add(z_in[1] == phi_y_out[0])
amnet.util.eqv_z3(solver, z_in, [phi_x_out[0], phi_y_out[0]])
#print "Linked solver:", solver
# input variables to the linked network
x_in = enc_x.var_of_input()
y_in = enc_y.var_of_input()
phi_z_out = enc_z.var_of(phi_z)
self.assertEqual(len(x_in), 2)
self.assertEqual(len(y_in), 3)
self.assertEqual(len(phi_z_out), 1)
# do some test cases
def do_testcase(xf, yf, fpeval):
solver.push()
#print "Pre-input solver:", solver
amnet.util.eqv_z3(solver, x_in, xf)
amnet.util.eqv_z3(solver, y_in, yf)
#print "Post-input solver:", solver
# check for sat
result = solver.check()
self.assertTrue(result == z3.sat)
self.assertFalse(result == z3.unsat)
# extract the output
model = solver.model()
smteval = amnet.util.mfpv(model, phi_z_out)
#print smteval
# check that the outputs match
self.assertAlmostEqual(norm(smteval - fpeval), 0)
solver.pop()
do_testcase(
xf=np.array([1, 0]),
yf=np.array([-1, -4, 0]),
fpeval=np.array([1])
)
do_testcase(
xf=np.array([1, 4.1]),
yf=np.array([-1, 4.1, 0]),
fpeval=np.array([4.1])
)
do_testcase(
xf = np.array([-1, 0]),
yf = np.array([3, -4, 5]),
fpeval = np.array([5])
)
do_testcase(
xf=np.array([-1, 0]),
yf=np.array([3, 20, 5]),
fpeval=np.array([20])
)
do_testcase(
xf=np.array([-1, -17.1]),
yf=np.array([0, -4, -5]),
fpeval=np.array([0])
)
if __name__ == '__main__':
suite = unittest.TestLoader().loadTestsFromTestCase(TestSmt)
result = unittest.TextTestRunner(verbosity=2).run(suite)
sys.exit(not result.wasSuccessful())
| ipapusha/amnet | tests/test_smt.py | Python | bsd-3-clause | 32,730 |
import base64
from django.test import TestCase
from django.test.utils import override_settings
import json
import re
from student.tests.factories import UserFactory
from unittest import SkipTest
from user_api.models import UserPreference
from user_api.tests.factories import UserPreferenceFactory
from django_comment_common import models
from opaque_keys.edx.locations import SlashSeparatedCourseKey
TEST_API_KEY = "test_api_key"
USER_LIST_URI = "/user_api/v1/users/"
USER_PREFERENCE_LIST_URI = "/user_api/v1/user_prefs/"
ROLE_LIST_URI = "/user_api/v1/forum_roles/Moderator/users/"
@override_settings(EDX_API_KEY=TEST_API_KEY)
class ApiTestCase(TestCase):
LIST_URI = USER_LIST_URI
def basic_auth(self, username, password):
return {'HTTP_AUTHORIZATION': 'Basic ' + base64.b64encode('%s:%s' % (username, password))}
def request_with_auth(self, method, *args, **kwargs):
"""Issue a get request to the given URI with the API key header"""
return getattr(self.client, method)(*args, HTTP_X_EDX_API_KEY=TEST_API_KEY, **kwargs)
def get_json(self, *args, **kwargs):
"""Make a request with the given args and return the parsed JSON repsonse"""
resp = self.request_with_auth("get", *args, **kwargs)
self.assertHttpOK(resp)
self.assertTrue(resp["Content-Type"].startswith("application/json"))
return json.loads(resp.content)
def get_uri_for_user(self, target_user):
"""Given a user object, get the URI for the corresponding resource"""
users = self.get_json(USER_LIST_URI)["results"]
for user in users:
if user["id"] == target_user.id:
return user["url"]
self.fail()
def get_uri_for_pref(self, target_pref):
"""Given a user preference object, get the URI for the corresponding resource"""
prefs = self.get_json(USER_PREFERENCE_LIST_URI)["results"]
for pref in prefs:
if (pref["user"]["id"] == target_pref.user.id and pref["key"] == target_pref.key):
return pref["url"]
self.fail()
def assertAllowedMethods(self, uri, expected_methods):
"""Assert that the allowed methods for the given URI match the expected list"""
resp = self.request_with_auth("options", uri)
self.assertHttpOK(resp)
allow_header = resp.get("Allow")
self.assertIsNotNone(allow_header)
allowed_methods = re.split('[^A-Z]+', allow_header)
self.assertItemsEqual(allowed_methods, expected_methods)
def assertSelfReferential(self, obj):
"""Assert that accessing the "url" entry in the given object returns the same object"""
copy = self.get_json(obj["url"])
self.assertEqual(obj, copy)
def assertUserIsValid(self, user):
"""Assert that the given user result is valid"""
self.assertItemsEqual(user.keys(), ["email", "id", "name", "username", "preferences", "url"])
self.assertItemsEqual(
user["preferences"].items(),
[(pref.key, pref.value) for pref in self.prefs if pref.user.id == user["id"]]
)
self.assertSelfReferential(user)
def assertPrefIsValid(self, pref):
self.assertItemsEqual(pref.keys(), ["user", "key", "value", "url"])
self.assertSelfReferential(pref)
self.assertUserIsValid(pref["user"])
def assertHttpOK(self, response):
"""Assert that the given response has the status code 200"""
self.assertEqual(response.status_code, 200)
def assertHttpForbidden(self, response):
"""Assert that the given response has the status code 403"""
self.assertEqual(response.status_code, 403)
def assertHttpBadRequest(self, response):
"""Assert that the given response has the status code 400"""
self.assertEqual(response.status_code, 400)
def assertHttpMethodNotAllowed(self, response):
"""Assert that the given response has the status code 405"""
self.assertEqual(response.status_code, 405)
class EmptyUserTestCase(ApiTestCase):
def test_get_list_empty(self):
result = self.get_json(self.LIST_URI)
self.assertEqual(result["count"], 0)
self.assertIsNone(result["next"])
self.assertIsNone(result["previous"])
self.assertEqual(result["results"], [])
class EmptyRoleTestCase(ApiTestCase):
"""Test that the endpoint supports empty result sets"""
course_id = SlashSeparatedCourseKey.from_deprecated_string("org/course/run")
LIST_URI = ROLE_LIST_URI + "?course_id=" + course_id.to_deprecated_string()
def test_get_list_empty(self):
"""Test that the endpoint properly returns empty result sets"""
result = self.get_json(self.LIST_URI)
self.assertEqual(result["count"], 0)
self.assertIsNone(result["next"])
self.assertIsNone(result["previous"])
self.assertEqual(result["results"], [])
class UserApiTestCase(ApiTestCase):
def setUp(self):
super(UserApiTestCase, self).setUp()
self.users = [
UserFactory.create(
email="test{0}@test.org".format(i),
profile__name="Test {0}".format(i)
)
for i in range(5)
]
self.prefs = [
UserPreferenceFactory.create(user=self.users[0], key="key0"),
UserPreferenceFactory.create(user=self.users[0], key="key1"),
UserPreferenceFactory.create(user=self.users[1], key="key0")
]
class RoleTestCase(UserApiTestCase):
course_id = SlashSeparatedCourseKey.from_deprecated_string("org/course/run")
LIST_URI = ROLE_LIST_URI + "?course_id=" + course_id.to_deprecated_string()
def setUp(self):
super(RoleTestCase, self).setUp()
(role, _) = models.Role.objects.get_or_create(
name=models.FORUM_ROLE_MODERATOR,
course_id=self.course_id
)
for user in self.users:
user.roles.add(role)
def test_options_list(self):
self.assertAllowedMethods(self.LIST_URI, ["OPTIONS", "GET", "HEAD"])
def test_post_list_not_allowed(self):
self.assertHttpMethodNotAllowed(self.request_with_auth("post", self.LIST_URI))
def test_put_list_not_allowed(self):
self.assertHttpMethodNotAllowed(self.request_with_auth("put", self.LIST_URI))
def test_patch_list_not_allowed(self):
raise SkipTest("Django 1.4's test client does not support patch")
def test_delete_list_not_allowed(self):
self.assertHttpMethodNotAllowed(self.request_with_auth("delete", self.LIST_URI))
def test_list_unauthorized(self):
self.assertHttpForbidden(self.client.get(self.LIST_URI))
@override_settings(DEBUG=True)
@override_settings(EDX_API_KEY=None)
def test_debug_auth(self):
self.assertHttpOK(self.client.get(self.LIST_URI))
@override_settings(DEBUG=False)
@override_settings(EDX_API_KEY=TEST_API_KEY)
def test_basic_auth(self):
# ensure that having basic auth headers in the mix does not break anything
self.assertHttpOK(
self.request_with_auth("get", self.LIST_URI,
**self.basic_auth("someuser", "somepass")))
self.assertHttpForbidden(
self.client.get(self.LIST_URI, **self.basic_auth("someuser", "somepass")))
def test_get_list_nonempty(self):
result = self.get_json(self.LIST_URI)
users = result["results"]
self.assertEqual(result["count"], len(self.users))
self.assertEqual(len(users), len(self.users))
self.assertIsNone(result["next"])
self.assertIsNone(result["previous"])
for user in users:
self.assertUserIsValid(user)
def test_required_parameter(self):
response = self.request_with_auth("get", ROLE_LIST_URI)
self.assertHttpBadRequest(response)
def test_get_list_pagination(self):
first_page = self.get_json(self.LIST_URI, data={
"page_size": 3,
"course_id": self.course_id.to_deprecated_string(),
})
self.assertEqual(first_page["count"], 5)
first_page_next_uri = first_page["next"]
self.assertIsNone(first_page["previous"])
first_page_users = first_page["results"]
self.assertEqual(len(first_page_users), 3)
second_page = self.get_json(first_page_next_uri)
self.assertEqual(second_page["count"], 5)
self.assertIsNone(second_page["next"])
second_page_prev_uri = second_page["previous"]
second_page_users = second_page["results"]
self.assertEqual(len(second_page_users), 2)
self.assertEqual(self.get_json(second_page_prev_uri), first_page)
for user in first_page_users + second_page_users:
self.assertUserIsValid(user)
all_user_uris = [user["url"] for user in first_page_users + second_page_users]
self.assertEqual(len(set(all_user_uris)), 5)
class UserViewSetTest(UserApiTestCase):
LIST_URI = USER_LIST_URI
def setUp(self):
super(UserViewSetTest, self).setUp()
self.detail_uri = self.get_uri_for_user(self.users[0])
# List view tests
def test_options_list(self):
self.assertAllowedMethods(self.LIST_URI, ["OPTIONS", "GET", "HEAD"])
def test_post_list_not_allowed(self):
self.assertHttpMethodNotAllowed(self.request_with_auth("post", self.LIST_URI))
def test_put_list_not_allowed(self):
self.assertHttpMethodNotAllowed(self.request_with_auth("put", self.LIST_URI))
def test_patch_list_not_allowed(self):
raise SkipTest("Django 1.4's test client does not support patch")
def test_delete_list_not_allowed(self):
self.assertHttpMethodNotAllowed(self.request_with_auth("delete", self.LIST_URI))
def test_list_unauthorized(self):
self.assertHttpForbidden(self.client.get(self.LIST_URI))
@override_settings(DEBUG=True)
@override_settings(EDX_API_KEY=None)
def test_debug_auth(self):
self.assertHttpOK(self.client.get(self.LIST_URI))
@override_settings(DEBUG=False)
@override_settings(EDX_API_KEY=TEST_API_KEY)
def test_basic_auth(self):
# ensure that having basic auth headers in the mix does not break anything
self.assertHttpOK(
self.request_with_auth("get", self.LIST_URI,
**self.basic_auth('someuser', 'somepass')))
self.assertHttpForbidden(
self.client.get(self.LIST_URI, **self.basic_auth('someuser', 'somepass')))
def test_get_list_nonempty(self):
result = self.get_json(self.LIST_URI)
self.assertEqual(result["count"], 5)
self.assertIsNone(result["next"])
self.assertIsNone(result["previous"])
users = result["results"]
self.assertEqual(len(users), 5)
for user in users:
self.assertUserIsValid(user)
def test_get_list_pagination(self):
first_page = self.get_json(self.LIST_URI, data={"page_size": 3})
self.assertEqual(first_page["count"], 5)
first_page_next_uri = first_page["next"]
self.assertIsNone(first_page["previous"])
first_page_users = first_page["results"]
self.assertEqual(len(first_page_users), 3)
second_page = self.get_json(first_page_next_uri)
self.assertEqual(second_page["count"], 5)
self.assertIsNone(second_page["next"])
second_page_prev_uri = second_page["previous"]
second_page_users = second_page["results"]
self.assertEqual(len(second_page_users), 2)
self.assertEqual(self.get_json(second_page_prev_uri), first_page)
for user in first_page_users + second_page_users:
self.assertUserIsValid(user)
all_user_uris = [user["url"] for user in first_page_users + second_page_users]
self.assertEqual(len(set(all_user_uris)), 5)
# Detail view tests
def test_options_detail(self):
self.assertAllowedMethods(self.detail_uri, ["OPTIONS", "GET", "HEAD"])
def test_post_detail_not_allowed(self):
self.assertHttpMethodNotAllowed(self.request_with_auth("post", self.detail_uri))
def test_put_detail_not_allowed(self):
self.assertHttpMethodNotAllowed(self.request_with_auth("put", self.detail_uri))
def test_patch_detail_not_allowed(self):
raise SkipTest("Django 1.4's test client does not support patch")
def test_delete_detail_not_allowed(self):
self.assertHttpMethodNotAllowed(self.request_with_auth("delete", self.detail_uri))
def test_get_detail_unauthorized(self):
self.assertHttpForbidden(self.client.get(self.detail_uri))
def test_get_detail(self):
user = self.users[1]
uri = self.get_uri_for_user(user)
self.assertEqual(
self.get_json(uri),
{
"email": user.email,
"id": user.id,
"name": user.profile.name,
"username": user.username,
"preferences": dict([
(user_pref.key, user_pref.value)
for user_pref in self.prefs
if user_pref.user == user
]),
"url": uri
}
)
class UserPreferenceViewSetTest(UserApiTestCase):
LIST_URI = USER_PREFERENCE_LIST_URI
def setUp(self):
super(UserPreferenceViewSetTest, self).setUp()
self.detail_uri = self.get_uri_for_pref(self.prefs[0])
# List view tests
def test_options_list(self):
self.assertAllowedMethods(self.LIST_URI, ["OPTIONS", "GET", "HEAD"])
def test_put_list_not_allowed(self):
self.assertHttpMethodNotAllowed(self.request_with_auth("put", self.LIST_URI))
def test_patch_list_not_allowed(self):
raise SkipTest("Django 1.4's test client does not support patch")
def test_delete_list_not_allowed(self):
self.assertHttpMethodNotAllowed(self.request_with_auth("delete", self.LIST_URI))
def test_list_unauthorized(self):
self.assertHttpForbidden(self.client.get(self.LIST_URI))
@override_settings(DEBUG=True)
@override_settings(EDX_API_KEY=None)
def test_debug_auth(self):
self.assertHttpOK(self.client.get(self.LIST_URI))
def test_get_list_nonempty(self):
result = self.get_json(self.LIST_URI)
self.assertEqual(result["count"], 3)
self.assertIsNone(result["next"])
self.assertIsNone(result["previous"])
prefs = result["results"]
self.assertEqual(len(prefs), 3)
for pref in prefs:
self.assertPrefIsValid(pref)
def test_get_list_filter_key_empty(self):
result = self.get_json(self.LIST_URI, data={"key": "non-existent"})
self.assertEqual(result["count"], 0)
self.assertEqual(result["results"], [])
def test_get_list_filter_key_nonempty(self):
result = self.get_json(self.LIST_URI, data={"key": "key0"})
self.assertEqual(result["count"], 2)
prefs = result["results"]
self.assertEqual(len(prefs), 2)
for pref in prefs:
self.assertPrefIsValid(pref)
self.assertEqual(pref["key"], "key0")
def test_get_list_filter_user_empty(self):
def test_id(user_id):
result = self.get_json(self.LIST_URI, data={"user": user_id})
self.assertEqual(result["count"], 0)
self.assertEqual(result["results"], [])
test_id(self.users[2].id)
# TODO: If the given id does not match a user, then the filter is a no-op
# test_id(42)
# test_id("asdf")
def test_get_list_filter_user_nonempty(self):
user_id = self.users[0].id
result = self.get_json(self.LIST_URI, data={"user": user_id})
self.assertEqual(result["count"], 2)
prefs = result["results"]
self.assertEqual(len(prefs), 2)
for pref in prefs:
self.assertPrefIsValid(pref)
self.assertEqual(pref["user"]["id"], user_id)
def test_get_list_pagination(self):
first_page = self.get_json(self.LIST_URI, data={"page_size": 2})
self.assertEqual(first_page["count"], 3)
first_page_next_uri = first_page["next"]
self.assertIsNone(first_page["previous"])
first_page_prefs = first_page["results"]
self.assertEqual(len(first_page_prefs), 2)
second_page = self.get_json(first_page_next_uri)
self.assertEqual(second_page["count"], 3)
self.assertIsNone(second_page["next"])
second_page_prev_uri = second_page["previous"]
second_page_prefs = second_page["results"]
self.assertEqual(len(second_page_prefs), 1)
self.assertEqual(self.get_json(second_page_prev_uri), first_page)
for pref in first_page_prefs + second_page_prefs:
self.assertPrefIsValid(pref)
all_pref_uris = [pref["url"] for pref in first_page_prefs + second_page_prefs]
self.assertEqual(len(set(all_pref_uris)), 3)
# Detail view tests
def test_options_detail(self):
self.assertAllowedMethods(self.detail_uri, ["OPTIONS", "GET", "HEAD"])
def test_post_detail_not_allowed(self):
self.assertHttpMethodNotAllowed(self.request_with_auth("post", self.detail_uri))
def test_put_detail_not_allowed(self):
self.assertHttpMethodNotAllowed(self.request_with_auth("put", self.detail_uri))
def test_patch_detail_not_allowed(self):
raise SkipTest("Django 1.4's test client does not support patch")
def test_delete_detail_not_allowed(self):
self.assertHttpMethodNotAllowed(self.request_with_auth("delete", self.detail_uri))
def test_detail_unauthorized(self):
self.assertHttpForbidden(self.client.get(self.detail_uri))
def test_get_detail(self):
pref = self.prefs[1]
uri = self.get_uri_for_pref(pref)
self.assertEqual(
self.get_json(uri),
{
"user": {
"email": pref.user.email,
"id": pref.user.id,
"name": pref.user.profile.name,
"username": pref.user.username,
"preferences": dict([
(user_pref.key, user_pref.value)
for user_pref in self.prefs
if user_pref.user == pref.user
]),
"url": self.get_uri_for_user(pref.user),
},
"key": pref.key,
"value": pref.value,
"url": uri,
}
)
class PreferenceUsersListViewTest(UserApiTestCase):
LIST_URI = "/user_api/v1/preferences/key0/users/"
def test_options(self):
self.assertAllowedMethods(self.LIST_URI, ["OPTIONS", "GET", "HEAD"])
def test_put_not_allowed(self):
self.assertHttpMethodNotAllowed(self.request_with_auth("put", self.LIST_URI))
def test_patch_not_allowed(self):
raise SkipTest("Django 1.4's test client does not support patch")
def test_delete_not_allowed(self):
self.assertHttpMethodNotAllowed(self.request_with_auth("delete", self.LIST_URI))
def test_unauthorized(self):
self.assertHttpForbidden(self.client.get(self.LIST_URI))
@override_settings(DEBUG=True)
@override_settings(EDX_API_KEY=None)
def test_debug_auth(self):
self.assertHttpOK(self.client.get(self.LIST_URI))
def test_get_basic(self):
result = self.get_json(self.LIST_URI)
self.assertEqual(result["count"], 2)
self.assertIsNone(result["next"])
self.assertIsNone(result["previous"])
users = result["results"]
self.assertEqual(len(users), 2)
for user in users:
self.assertUserIsValid(user)
def test_get_pagination(self):
first_page = self.get_json(self.LIST_URI, data={"page_size": 1})
self.assertEqual(first_page["count"], 2)
first_page_next_uri = first_page["next"]
self.assertIsNone(first_page["previous"])
first_page_users = first_page["results"]
self.assertEqual(len(first_page_users), 1)
second_page = self.get_json(first_page_next_uri)
self.assertEqual(second_page["count"], 2)
self.assertIsNone(second_page["next"])
second_page_prev_uri = second_page["previous"]
second_page_users = second_page["results"]
self.assertEqual(len(second_page_users), 1)
self.assertEqual(self.get_json(second_page_prev_uri), first_page)
for user in first_page_users + second_page_users:
self.assertUserIsValid(user)
all_user_uris = [user["url"] for user in first_page_users + second_page_users]
self.assertEqual(len(set(all_user_uris)), 2)
| geekaia/edx-platform | common/djangoapps/user_api/tests/test_views.py | Python | agpl-3.0 | 20,805 |
# vim: ft=python fileencoding=utf-8 sts=4 sw=4 et:
# Copyright 2016-2020 Ryan Roden-Corrent (rcorre) <[email protected]>
#
# This file is part of qutebrowser.
#
# qutebrowser is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# qutebrowser is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with qutebrowser. If not, see <http://www.gnu.org/licenses/>.
"""Test the keyhint widget."""
import pytest
from qutebrowser.misc import objects
from qutebrowser.misc.keyhintwidget import KeyHintView
def expected_text(*args):
"""Helper to format text we expect the KeyHintView to generate.
Args:
args: One tuple for each row in the expected output.
Tuples are of the form: (prefix, color, suffix, command).
"""
text = '<table>'
for group in args:
text += ("<tr>"
"<td>{}</td>"
"<td style='color: {}'>{}</td>"
"<td style='padding-left: 2ex'>{}</td>"
"</tr>").format(*group)
return text + '</table>'
@pytest.fixture
def keyhint(qtbot, config_stub, key_config_stub):
"""Fixture to initialize a KeyHintView."""
config_stub.val.colors.keyhint.suffix.fg = 'yellow'
keyhint = KeyHintView(0, None)
qtbot.add_widget(keyhint)
assert keyhint.text() == ''
return keyhint
def test_show_and_hide(qtbot, keyhint):
with qtbot.waitSignal(keyhint.update_geometry):
with qtbot.waitExposed(keyhint):
keyhint.show()
keyhint.update_keyhint('normal', '')
assert not keyhint.isVisible()
def test_position_change(keyhint, config_stub):
config_stub.val.statusbar.position = 'top'
stylesheet = keyhint.styleSheet()
assert 'border-bottom-right-radius' in stylesheet
assert 'border-top-right-radius' not in stylesheet
def test_suggestions(keyhint, config_stub):
"""Test that keyhints are shown based on a prefix."""
bindings = {'normal': {
'aa': 'message-info cmd-aa',
'ab': 'message-info cmd-ab',
'aba': 'message-info cmd-aba',
'abb': 'message-info cmd-abb',
'xd': 'message-info cmd-xd',
'xe': 'message-info cmd-xe',
}}
default_bindings = {'normal': {
'ac': 'message-info cmd-ac',
}}
config_stub.val.bindings.default = default_bindings
config_stub.val.bindings.commands = bindings
keyhint.update_keyhint('normal', 'a')
assert keyhint.text() == expected_text(
('a', 'yellow', 'a', 'message-info cmd-aa'),
('a', 'yellow', 'b', 'message-info cmd-ab'),
('a', 'yellow', 'ba', 'message-info cmd-aba'),
('a', 'yellow', 'bb', 'message-info cmd-abb'),
('a', 'yellow', 'c', 'message-info cmd-ac'))
def test_suggestions_special(keyhint, config_stub):
"""Test that special characters work properly as prefix."""
bindings = {'normal': {
'<Ctrl-C>a': 'message-info cmd-Cca',
'<Ctrl-C><Ctrl-C>': 'message-info cmd-CcCc',
'<Ctrl-C><Ctrl-X>': 'message-info cmd-CcCx',
'cbb': 'message-info cmd-cbb',
'xd': 'message-info cmd-xd',
'xe': 'message-info cmd-xe',
}}
default_bindings = {'normal': {
'<Ctrl-C>c': 'message-info cmd-Ccc',
}}
config_stub.val.bindings.default = default_bindings
config_stub.val.bindings.commands = bindings
keyhint.update_keyhint('normal', '<Ctrl+c>')
assert keyhint.text() == expected_text(
('<Ctrl+c>', 'yellow', 'a', 'message-info cmd-Cca'),
('<Ctrl+c>', 'yellow', 'c', 'message-info cmd-Ccc'),
('<Ctrl+c>', 'yellow', '<Ctrl+c>',
'message-info cmd-CcCc'),
('<Ctrl+c>', 'yellow', '<Ctrl+x>',
'message-info cmd-CcCx'))
def test_suggestions_with_count(keyhint, config_stub, monkeypatch, stubs):
"""Test that a count prefix filters out commands that take no count."""
monkeypatch.setattr(objects, 'commands', {
'foo': stubs.FakeCommand(name='foo', takes_count=lambda: False),
'bar': stubs.FakeCommand(name='bar', takes_count=lambda: True),
})
bindings = {'normal': {'aa': 'foo', 'ab': 'bar'}}
config_stub.val.bindings.default = bindings
config_stub.val.bindings.commands = bindings
keyhint.update_keyhint('normal', '2a')
assert keyhint.text() == expected_text(
('a', 'yellow', 'b', 'bar'),
)
def test_special_bindings(keyhint, config_stub):
"""Ensure a prefix of '<' doesn't suggest special keys."""
bindings = {'normal': {
'<a': 'message-info cmd-<a',
'<b': 'message-info cmd-<b',
'<ctrl-a>': 'message-info cmd-ctrla',
}}
config_stub.val.bindings.default = {}
config_stub.val.bindings.commands = bindings
keyhint.update_keyhint('normal', '<')
assert keyhint.text() == expected_text(
('<', 'yellow', 'a', 'message-info cmd-<a'),
('<', 'yellow', 'b', 'message-info cmd-<b'))
def test_color_switch(keyhint, config_stub):
"""Ensure the keyhint suffix color can be updated at runtime."""
bindings = {'normal': {'aa': 'message-info cmd-aa'}}
config_stub.val.colors.keyhint.suffix.fg = '#ABCDEF'
config_stub.val.bindings.default = {}
config_stub.val.bindings.commands = bindings
keyhint.update_keyhint('normal', 'a')
assert keyhint.text() == expected_text(('a', '#ABCDEF', 'a',
'message-info cmd-aa'))
def test_no_matches(keyhint, config_stub):
"""Ensure the widget isn't visible if there are no keystrings to show."""
bindings = {'normal': {
'aa': 'message-info cmd-aa',
'ab': 'message-info cmd-ab',
}}
config_stub.val.bindings.default = {}
config_stub.val.bindings.commands = bindings
keyhint.update_keyhint('normal', 'z')
assert not keyhint.text()
assert not keyhint.isVisible()
@pytest.mark.parametrize('blacklist, expected', [
(['ab*'], expected_text(('a', 'yellow', 'a', 'message-info cmd-aa'))),
(['*'], ''),
])
def test_blacklist(keyhint, config_stub, blacklist, expected):
"""Test that blacklisted keychains aren't hinted."""
config_stub.val.keyhint.blacklist = blacklist
bindings = {'normal': {
'aa': 'message-info cmd-aa',
'ab': 'message-info cmd-ab',
'aba': 'message-info cmd-aba',
'abb': 'message-info cmd-abb',
'xd': 'message-info cmd-xd',
'xe': 'message-info cmd-xe',
}}
config_stub.val.bindings.default = {}
config_stub.val.bindings.commands = bindings
keyhint.update_keyhint('normal', 'a')
assert keyhint.text() == expected
def test_delay(qtbot, stubs, monkeypatch, config_stub, key_config_stub):
timer = stubs.FakeTimer()
monkeypatch.setattr(
'qutebrowser.misc.keyhintwidget.usertypes.Timer',
lambda *_: timer)
interval = 200
bindings = {'normal': {'aa': 'message-info cmd-aa'}}
config_stub.val.keyhint.delay = interval
config_stub.val.bindings.default = {}
config_stub.val.bindings.commands = bindings
keyhint = KeyHintView(0, None)
keyhint.update_keyhint('normal', 'a')
assert timer.isSingleShot()
assert timer.interval() == interval
| t-wissmann/qutebrowser | tests/unit/misc/test_keyhints.py | Python | gpl-3.0 | 7,603 |
# -*- coding: utf-8 -*-
#
# Copyright © 2016 Mathieu Duponchelle <[email protected]>
# Copyright © 2016 Collabora Ltd
#
# This library is free software; you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the Free
# Software Foundation; either version 2.1 of the License, or (at your option)
# any later version.
#
# This library is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
# details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with this library. If not, see <http://www.gnu.org/licenses/>.
"""
A set of extensions with no external dependencies
"""
import sys
from hotdoc.extensions.syntax_highlighting.syntax_extension import (
SyntaxHighlightingExtension)
from hotdoc.extensions.search.search_extension import SearchExtension
from hotdoc.extensions.tags.tag_extension import TagExtension
from hotdoc.extensions.devhelp.devhelp_extension import DevhelpExtension
from hotdoc.extensions.license.license_extension import LicenseExtension
from hotdoc.extensions.git_upload.git_upload_extension import (
GitUploadExtension)
from hotdoc.extensions.edit_on_github.edit_on_github_extension import (
EditOnGitHubExtension)
if sys.version_info[1] >= 5:
from hotdoc.extensions.dbus.dbus_extension import DBusExtension
def get_extension_classes():
"""
Hotdoc's setuptools entry point
"""
res = [SyntaxHighlightingExtension, SearchExtension, TagExtension,
DevhelpExtension, LicenseExtension, GitUploadExtension,
EditOnGitHubExtension]
if sys.version_info[1] >= 5:
res += [DBusExtension]
try:
from hotdoc.extensions.c.c_extension import CExtension
res += [CExtension]
except ImportError:
pass
try:
from hotdoc.extensions.gi.gi_extension import GIExtension
res += [GIExtension]
except ImportError:
pass
try:
from hotdoc.extensions.gst.gst_extension import GstExtension
res += [GstExtension]
except ImportError:
pass
return res
| thiblahute/hotdoc | hotdoc/extensions/__init__.py | Python | lgpl-2.1 | 2,294 |
# -*- coding: utf-8 -*-
#
# File: src/webframe/management/commands/pref.py
# Date: 2020-04-22 21:35
# Author: Kenson Man <[email protected]>
# Desc: Import / Create / Update / Delete preference
#
from django.conf import settings
from django.contrib.auth import get_user_model
from django.core.management.base import BaseCommand, CommandError
from django.db import transaction
from django.db.models import Q
from pathlib import Path
from webframe.functions import TRUE_VALUES, LogMessage as lm, getTime
from webframe.models import Preference, AbstractPreference
from uuid import UUID
import logging, os, glob, sys, re
logger=logging.getLogger('webframe.commands.prefs')
class Command(BaseCommand):
help = '''Mainpulate the preference in database. Including insert/update/delete/view/import/gensecret/gendoc; Importing support csv|xlsx file.'''
def __getIndent__(self, indent=0, ch=' '):
return ch*indent
def create_parser(self, cmdName, subcommand, **kwargs):
parser=super().create_parser(cmdName, subcommand, **kwargs)
parser.epilog='''Example:\r\n
\tpref import path_to_prefs #Import a folder or a csv/xlsx file\r\n
\tpref set ABC --value="def" #Set the preference "ABC" to value "def"\r\n
\tpref gensecret #Generate the encryption secret; PLEASE backup in secure way.\r\n
\tpref gendoc prefsDoc.html #Generate the documentation and save as as output.html
'''
return parser
def add_arguments(self, parser):
#Default Value
pattern='Pref({pref.id}:{pref.name}): {pref.value}'
action='show'
max=256
wildcard='*'
tmpl='webframe/prefsDoc.html'
#Adding arguments
parser.add_argument('action', type=str, help='The action to be taken. One of import/export/show/set/delete/gensecret/gendoc; Default is {0}'.format(action), default=action)
parser.add_argument('name', type=str, nargs='?', help='[import/export/show/set/delete/gendoc]; The name of the preference or path of importing/exporting file (csv|xlsx);')
parser.add_argument('--file', dest='file', type=str, help='[import/export/gendoc]; The file path for import/export/output.')
parser.add_argument('--value', dest='value', type=str, help='[set/delete]; The value of the preference;', default=None)
parser.add_argument('--owner', dest='owner', type=str, help='[set/delete]; The owner of the preference; Optional;', default=None)
parser.add_argument('--noowner', dest='noowner', action='store_true', help='[show/set/delete]; The target preference has no owner; Optional; Default False')
parser.add_argument('--parent', dest='parent', type=str, help='[show/set/delete]; The parent\'s name of the preference. Optional;', default=None)
parser.add_argument('--noparent', dest='noparent', action='store_true', help='[show/set/delete]; The target preference has no parent; Optional; Default False')
parser.add_argument('--pattern', dest='pattern', type=str, help='[show]; The output pattern. {0}'.format(pattern), default=pattern)
parser.add_argument('--max', dest='max', type=int, help='[show]; The maximum number of preference to show. Default is {0}'.format(max), default=max)
parser.add_argument('--wildcard', dest='wildcard', type=str, help='[show]; Specify the wildcard; Default is {0}'.format(wildcard), default=wildcard)
#Importing
parser.add_argument('--sep', dest='separator', type=str, default=',', help='[import]; The separator when CSV importing; Default \",\"')
parser.add_argument('--encoding', dest='encoding', type=str, default='utf-8', help='[import]; The encoding when CSV importing; Default \"utf-8\"')
parser.add_argument('--quotechar', dest='quotechar', type=str, default='\"', help='[import]; The quote-char when CSV importing; Default double quote: \"')
parser.add_argument('--filepath', dest='filepath', action='store_true', help='[import]; Import the file-path in preferences; Default False')
parser.add_argument('--force', '-f ', dest='force', action='store_true', help='[import]; Force the import', default=False)
#Generate Doc
parser.add_argument('--tmpl', dest='tmpl', type=str, help="[gendoc]; The template name when generating document; Default: {0}".format(tmpl), default=tmpl)
def __get_owner__(self, owner=None):
if not owner: return None
logger.debug('Getting owner by: "%s"', owner)
owner=owner if owner else self.kwargs['owner']
return get_user_model().objects.get(username=owner) if owner else None
def __get_parent__(self, parent=None):
parent=parent if parent else self.kwargs['parent']
if parent:
try:
#Get parent by uuid
return Preference.objects.get(id=parent)
except:
try:
#Get parent by name
return Preference.objects.get(name=parent)
except:
pass
return None
def __get_pref__(self, **kwargs):
owner=kwargs['owner'] if 'owner' in kwargs else self.__get_owner__()
parent=kwargs['parent'] if 'parent' in kwargs else self.__get_parent__()
name=kwargs['name'] if 'name' in kwargs else self.kwargs['name']
lang=kwargs['lang'] if 'lang' in kwargs else None
if self.kwargs['filepath']: name=os.path.basename(name)
if self.kwargs['parent'] and parent==None:
raise Preference.DoesNotExist('Parent Preference not found: {0}'.format(self.kwargs['parent']))
rst=Preference.objects.all()
if name and name!='*':
rst=rst.filter(name=name)
if owner:
rst=rst.filter(owner=owner)
elif self.kwargs['noowner']:
rst=rst.filter(owner__isnull=True)
if parent:
rst=rst.filter(parent=parent)
elif self.kwargs['noparent']:
rst=rst.filter(parent__isnull=True)
if self.kwargs['filepath']:
rst=rst.filter(tipe=AbstractPreference.TYPE_FILEPATH)
rst=rst.order_by('owner', 'parent', 'sequence', 'name')
return rst
def __get_name__( self, name ):
'''
Get the name and sequence according to the name.
@param name The string including the sequence and name. For example, '01.Target' will return a tuple (1, 'Target')
@return A tuple including the sequence and the name
'''
p=re.search(r'^\d+\.', name)
if p:
s=p.group(0)
return name[len(s):].strip(), int(name[0:len(s)-1])
return (name, sys.maxsize if hasattr(sys, 'maxsize') else sys.maxint) #Default append
def output( self, pref, pattern=None ):
pattern=pattern if pattern else self.kwargs['pattern']
print(pattern.format(pref=pref))
pattern=' {0}'.format(pattern)
for ch in pref.childs:
self.output(ch, pattern)
def handle(self, *args, **kwargs):
verbosity=int(kwargs['verbosity'])
if verbosity==3:
logger.setLevel(logging.DEBUG)
elif verbosity==2:
logger.setLevel(logging.INFO)
elif verbosity==1:
logger.setLevel(logging.WARNING)
else:
logger.setLevel(logging.ERROR)
self.kwargs=kwargs
action=kwargs['action']
if action=='import':
self.imp()
elif action=='create': #for backward compatibility
self.set()
elif action=='update': #for backward compatibility
self.set()
elif action=='set':
self.set()
elif action=='delete':
self.delete()
elif action=='show':
self.show()
elif action=='gensecret':
self.gensecret()
elif action=='gendoc':
self.gendoc()
elif action=='export':
self.expCsv()
else:
logger.warning('Unknown action: {0}'.format(action))
logger.warn('DONE!')
def show(self):
logger.info('Showing the preference ...')
q=Preference.objects.all()
if self.kwargs['name']:
logger.info(' with the name filter: {0}'.format(self.kwargs['name']))
if self.kwargs['wildcard'] in self.kwargs['name']:
q=q.filter(name__icontains=self.kwargs['name'].replace(self.kwargs['wildcard'], ''))
else:
q=q.filter(name=self.kwargs['name'])
if self.kwargs['value']:
logger.info(' with the value filter: {0}'.format(self.kwargs['value']))
q=q.filter(value__icontains=self.kwargs['value'])
if self.kwargs['owner']:
logger.info(' which belongs to user: {0}'.format(self.kwargs['owner']))
q=q.filter(owner__username=self.kwargs['owner'])
if self.kwargs['parent']:
logger.info(' which belongs to preference: {0}'.format(self.kwargs['parent']))
q=q.filter(parent__name__iexact=self.kwargs['parent'])
else:
q=q.filter(parent__isnull=True)
for p in q:
self.output(p)
logger.warning('There have {0} preference(s) has been shown'.format(len(q)))
def set(self):
with transaction.atomic():
try:
pref=self.__get_pref__()
if pref.count()<1: raise Preference.DoesNotExist
cnt=pref.update(value=self.kwargs['value'])
logger.info('{0} of Preference(s) has been updated'.format(cnt))
except Preference.DoesNotExist:
p=Preference(name=self.kwargs['name'], value=self.kwargs['value'], owner=owner, parent=parent)
p.save()
logger.info('The preference<{0}> has been created with value: {1}'.format(p.name, p.value))
def delete(self):
pref=self.__get_pref__()
cnt=pref.count()
pref.delete()
logger.warning('{0} of Preference(s) has been deleted'.format(cnt))
def expRow( self, wr, pref, indent=0 ):
'''
Import the specified preference to csv.
'''
cnt=0
tab=self.__getIndent__(indent)
logger.debug(lm('{0}Exporting preference: {1}::{2}...', tab, pref.id, pref.name))
wr.writerow([
pref.name # [0]
, pref.realValue # [1]
, pref.parent.id if pref.parent else '' # [2]
, pref.owner.username if pref.owner else '' # [3]
, pref.helptext # [4]
, Preference.TYPES[pref.tipe][1] # [5]
, pref.encrypted # [6]
, pref.regex # [7]
])
cnt+=1
for p in pref.childs:
cnt+=self.expRow(wr, p, indent+3)
return cnt
def expCsv( self ):
'''
Import the specified list of preferences to csv.
'''
import csv
f=self.kwargs['file']
with open(f, 'w', encoding=self.kwargs['encoding']) as fp:
wr=csv.writer(fp, delimiter=self.kwargs['separator'], quotechar=self.kwargs['quotechar'], quoting=csv.QUOTE_MINIMAL, skipinitialspace=True)
cnt=0
for p in self.__get_pref__():
cnt+=self.expRow(wr, p, 0)
logger.info(lm('Exported {0} records', cnt))
def improw( self, cols, idx=0 ):
try:
name=cols[0]
val=cols[1]
parent=self.__get_parent__(cols[2])
owner=self.__get_owner__(cols[3])
helptext=cols[4]
tipe=cols[5]
encrypted=cols[6] in TRUE_VALUES
regex=cols[7]
lang=cols[8] if len(cols)>8 else None
logger.debug(' Importing row: {0}: {1} [{2}]'.format(idx, name, 'encrypted' if encrypted else 'clear-text'))
self.kwargs['name']=name
pref=self.__get_pref__(name=name, owner=owner, parent=parent, lang=lang)
if pref.count()<1: raise Preference.DoesNotExist
for p in pref:
p.encrypted=encrypted
p.helptext=helptext
p.tipe=tipe
p.regex=regex
#The value must be the last steps to set due to validation. Otherwise, once importing/assign a new value into this field, the last validation rule may be applied incorrectly
p.value=val
p.save()
except Preference.DoesNotExist:
Preference(name=name, _value=val, owner=owner, parent=parent, encrypted=encrypted, helptext=helptext, regex=regex, lang=lang).save()
except:
logger.debug(cols)
logger.exception('Error when handling the column')
raise
def impXlsx( self, f ):
'''
Import xlsx file.
'''
from openpyxl import load_workbook
wb=load_workbook(filename=f)
ws=wb.active
logger.info(' Importing worksheet: {0}!{1}'.format(f, ws.title))
cnt=0
with transaction.atomic():
for r in range(1, ws.max_row+1):
cols=list()
name=ws.cell(row=r, column=1).value
if isinstance(name, str): name=name.strip()
if not name: continue #Skip the row when it has no pref.name
if r==1 and (name.upper()=='ID' or name.upper()=='NAME' or name.upper()=='ID/Name'): continue #Skip the first row if header row
cols.append(name) #Name/ID
cols.append(ws.cell(row=r, column=2).value) #Value
cols.append(ws.cell(row=r, column=3).value) #Parent
cols.append(ws.cell(row=r, column=4).value) #Owner
cols.append(ws.cell(row=r, column=5).value) #Reserved
cols.append(ws.cell(row=r, column=6).value) #Tipe
cols.append(ws.cell(row=r, column=7).value) #encrypted
self.improw( cols, r )
cnt+=1
logger.info(' Imported {0} row(s)'.format(cnt))
def impCsv( self, f ):
'''
Import the csv file.
'''
import csv
with transaction.atomic():
logger.info(' Importing csv: {0}'.format(f))
cnt=0
with open(f, 'r', encoding=self.kwargs['encoding']) as fp:
if self.kwargs['quotechar']:
rows=csv.reader(fp, delimiter=self.kwargs['separator'], quotechar=self.kwargs['quotechar'], quoting=csv.QUOTE_MINIMAL, skipinitialspace=True)
else:
rows=csv.reader(fp, delimiter=self.kwargs['separator'], quoting=csv.QUOTE_NONE, skipinitialspace=True)
for row in rows:
if len(row)<1: continue #Skip the empty row
name=row[0].strip()
if not name: continue #Skip the row when it has no name
if cnt==0 and (name.upper()=='ID' or name.upper()=='NAME' or name.upper()=='ID/NAME'): continue #Skip the first row if header row
self.improw( row, cnt )
cnt+=1
logger.info(' Imported {0} row(s)'.format(cnt))
def impdir( self, d ):
if os.path.isdir(d):
logger.info('Importing directory: {0}'.format(d))
else:
logger.warning('This is not the directory: {0}'.format(d))
return
cnt=0
with transaction.atomic():
p=Preference.objects.pref('IMPORTED_PREFERENCES', returnValue=False)
p.helptext='<p>Sysetm use only! <strong>DO NOT MODIFY</strong> youself unless you understand the risk.</p>'
p.save()
for f in os.listdir(d):
if not (f.upper().endswith('.XLSX') or f.upper().endswith('.CSV')): continue #only support *.xlsx and *.csv
f=os.path.join(d, f)
try:
Preference.objects.get(name=f, parent=p)
if self.kwargs['force']: raise Preference.DoesNotExist
except Preference.DoesNotExist:
self.impfile( f )
cnt+=1
Preference(name=f, parent=p).save()
logger.debug('Imported {0} file(s)'.format(cnt))
def impfile( self, f ):
if not (os.path.isfile(f) and os.access(f, os.R_OK)):
logger.warning('The file is not readable: {0}'.format(f))
return
fn=f.lower()
if fn.endswith('.xlsx'):
self.impXlsx(f)
elif fn.endswith('.csv'):
self.impCsv(f)
else:
logger.info('Unsupported file: {0}'.format(f))
def imppath( self, p, parent=None):
name, seq=self.__get_name__(os.path.basename(p))
if os.path.isdir(p):
try:
pref=self.__get_pref__(name=name)
if pref.count()<1: raise Preference.DoesNotExist
pref=pref[0]
except Preference.DoesNotExist:
pref=Preference(name=name, parent=parent)
pref.tipe=AbstractPreference.TYPE_FILEPATH
pref.sequence=seq
pref.save()
for f in os.listdir(p):
path=os.path.join(p, f)
self.imppath(path, pref)
#Handling the ordering after import all the childs
ord=1
for c in pref.childs:
c.sequence=ord
c.save()
ord+=1
else:
try:
pref=self.__get_pref__(name=name)
if pref.count()<1: raise Preference.DoesNotExist
pref=pref[0]
except Preference.DoesNotExist:
pref=Preference(name=name, parent=parent)
pref.pathValue=p if os.path.isabs(p) else os.path.abspath(p)
pref.tipe=AbstractPreference.TYPE_FILEPATH
pref.sequence=seq
pref.save()
def imp(self):
disableOrder=getattr(settings, 'DISABLE_REORDER', False)
setattr(settings, 'DISABLE_REORDER', True) #Disable the re-ordering features during importing
try:
f=self.kwargs['file']
if self.kwargs['filepath']:
self.imppath(f)
elif os.path.isdir(f):
self.impdir(f)
elif os.path.isfile(f):
self.impfile(f)
finally:
setattr(settings, 'DISABLE_REORDER', disableOrder) #Resume the re-ordering features after importing
def gensecret(self):
from webframe.models import AbstractPreference
key=AbstractPreference.__getSecret__()
logger.warning(lm('Your secret is: {0}', key))
def gendoc(self):
from django.shortcuts import render
from django.template import loader, Template, Context
from webframe.providers import template_injection, fmt_injection
tmpl=getattr(self.kwargs, 'tmpl', 'webframe/prefDoc.html')
logger.warning(lm('Generating the documents according template: {0}', tmpl))
tmpl=loader.get_template(tmpl)
params=dict()
params.update(template_injection(None))
params.update(fmt_injection(None))
#params['target']=Preference.objects.filter(parent__isnull=True)
params['target']=self.__get_pref__()
params['TYPES']=Preference.TYPES
params['now']=getTime('now')
txt=tmpl.render(params)
output=self.kwargs.get('file')
if not output: output='prefsDoc.html'
logger.warning(lm('Generated! Outputing into: {0}', output))
with open(output, 'w') as f:
f.write(txt)
| kensonman/webframe | management/commands/pref.py | Python | apache-2.0 | 18,742 |
# -*- coding: utf-8 -*-
##############################################################################
#
# Copyright (c) Camptocamp SA
# Author: Arnaud WÃŒst
#
#
# This file is part of the c2c_report_tools module.
#
#
# WARNING: This program as such is intended to be used by professional
# programmers who take the whole responsability of assessing all potential
# consequences resulting from its eventual inadequacies and bugs
# End users who are looking for a ready-to-use solution with commercial
# garantees and support are strongly adviced to contract a Free Software
# Service Company
#
# This program is Free Software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
#
##############################################################################
from openerp.osv import osv
import time
import datetime
from datetime import timedelta
import os.path
import openerp.tools
import re
class c2c_helper(osv.osv):
_name = "c2c_helper"
""" a class that provide useful methods for template development """
# format of the dates
timeformat = " %d.%m.%y"
@staticmethod
def unique(seq, keepstr=True):
"""return a list whithout duplicated elements
found here: http://groups.google.com/group/comp.lang.python/msg/7b0d896a96d907f3
"""
t = type(seq)
if t==str:
t = (list, ''.join)[bool(keepstr)]
seen = []
return t(c for c in seq if not (c in seen or seen.append(c)))
@staticmethod
def comma_me(amount, decimals=2, separator="'"):
""" transform a number into a number with thousand separators """
if type(amount) is int:
amount = str(('%.'+str(decimals)+'f')%float(amount))
elif type(amount) is float :
amount = str(('%.'+str(decimals)+'f')%amount)
else :
amount = str(amount)
orig = amount
new = re.sub("^(-?\d+)(\d{3})", "\g<1>"+separator+"\g<2>", amount)
if orig == new:
return new
else:
return c2c_helper.comma_me(new)
@staticmethod
def format_date(date, timeformat=None):
"""transform an english formated date into a swiss formated date (the format define is define as a class constant c2c_helper.timeformat """
if timeformat == None:
timeformat = c2c_helper.timeformat
if date:
return time.strftime(timeformat, time.strptime(date, "%Y-%m-%d"))
return None
@staticmethod
def parse_datetime(datetime):
"""parse an openerp datetime value and return a python time struct """
return time.strptime(datetime, "%Y-%m-%d %H:%M:%S")
@staticmethod
def parse_date(date):
"""parse an openerp date value and return a python time struct """
return time.strptime(date, "%Y-%m-%d")
@staticmethod
def encode_entities(s):
"""replace template knotty symbols by their html code"""
s = s.replace('&', '&')
#an opening symbol without a closing symbol would crash the server...
if s.count('>') != s.count('<'):
s= s.replace('<', '<')
s= s.replace('>', '>')
return s
@staticmethod
def ellipsis(string, maxlen=100, ellipsis = '...'):
"""cut a string if its length is greater than maxlen and add ellipsis (...) after """
ellipsis = ellipsis or ''
if len(string) > maxlen:
#the string must be cutted
result = string[:maxlen - len(ellipsis) ] + (ellipsis, '')[len(string) < maxlen]
else:
result = string
return result
@staticmethod
def exchange_currency(cr, amount, from_currency_id, to_currency_id, date=None):
""" exchange an amount from a currency to another.
date format: python struct_time returned by gmtime()
"""
if amount == 0:
return 0
#no need to compute anything if we do not need to exchange the amount
if from_currency_id == to_currency_id:
return amount
if from_currency_id == None or to_currency_id == None:
raise osv.except_osv('Param Errors', 'Currencies can not be None')
#default date
if not date:
date = time.gmtime(time.time())
#format the date for the sql
date_sql = time.strftime("%Y-%m-%d", date)
currencies_sql = ",".join(map(str,[from_currency_id, to_currency_id]))
#get all the rates defines for the two currencies
query = '''SELECT currency_id, rate, name
FROM res_currency_rate
WHERE currency_id IN (%s)
AND name <= '%s'
ORDER BY name ASC ''' % (currencies_sql, date_sql)
cr.execute(query)
rates_db = cr.fetchall()
rates = {}
#update the currencies rate until the rate's date is greater than the given one
#in order to get the last rate defined before the date
for rate in rates_db:
if time.strptime(rate[2], "%Y-%m-%d") <= date:
rates[rate[0]] = rate[1]
#exchange
result = False
if from_currency_id in rates and to_currency_id in rates:
result = amount * rates[to_currency_id] / rates[from_currency_id]
return result
@staticmethod
def week_first_day(date):
""" return the date of the first day of the week concerned by the given date
'date' is a datetime
"""
if date.weekday() == 0: #monday
return date
return date - timedelta(days=date.weekday())
@staticmethod
def week_last_day(date):
""" return the date of the last day of the week concerned by the given date
last_day_name can be "saturday" or "sunday"
'date' is a datetime
"""
if date.weekday() == 6: #sunday
return date
return date + timedelta(days=6-date.weekday())
@staticmethod
def month_first_day(dt, d_years=0, d_months=0):
"""
return the first day of the month concerned by the given date (param dt)
ignore otional params, there are just here to be used by c2c_helper.last_day_of_the_month
found here: http://code.activestate.com/recipes/476197/
"""
#convert the param to a datetime for processing
my_date = dt
if isinstance(dt,time.struct_time):
my_date = datetime.datetime(*dt[:6])
# d_years, d_months are "deltas" to apply to dt
y, m = my_date.year + d_years, my_date.month + d_months
a, m = divmod(m-1, 12)
res = datetime.datetime(y+a, m+1, 1,0,0,0)
#return a struct_time if the param was a struct_time
if isinstance(dt, time.struct_time):
res = res.timetuple()
return res
@staticmethod
def month_last_day(dt):
"""
return the last day of the month
found here: http://code.activestate.com/recipes/476197/
"""
my_date = dt
if isinstance(dt,time.struct_time):
my_date = datetime.datetime(*dt[:6])
res = c2c_helper.month_first_day(my_date, 0, 1) + datetime.timedelta(-1)
if isinstance(dt,time.struct_time):
res = res.timetuple()
return res
| VitalPet/c2c-rd-addons | c2c_reporting_tools_chricar/c2c_helper.py | Python | agpl-3.0 | 8,306 |
"""Helper methods for classification."""
import numpy
from gewittergefahr.gg_utils import error_checking
def classification_cutoffs_to_ranges(class_cutoffs, non_negative_only=True):
"""Converts classification cutoffs to min/max for each class.
C = number of classes
c = C - 1 = number of cutoffs
:param class_cutoffs: length-c numpy array of class cutoffs.
:param non_negative_only: Boolean flag. If True, class cutoffs/minima/
maxima must be non-negative.
:return: class_cutoffs: Same as input, but containing only unique values and
sorted in ascending order.
:return: class_minima: length-C numpy array of class minima, sorted in
ascending order.
:return: class_maxima: length-C numpy array of class maxima, sorted in
ascending order.
"""
error_checking.assert_is_boolean(non_negative_only)
error_checking.assert_is_numpy_array(class_cutoffs, num_dimensions=1)
if non_negative_only:
error_checking.assert_is_greater_numpy_array(class_cutoffs, 0.)
else:
error_checking.assert_is_numpy_array_without_nan(class_cutoffs)
class_cutoffs = numpy.sort(numpy.unique(class_cutoffs))
num_classes = len(class_cutoffs) + 1
class_minima = numpy.full(num_classes, numpy.nan)
class_maxima = numpy.full(num_classes, numpy.nan)
for k in range(num_classes):
if k == 0:
class_maxima[k] = class_cutoffs[k]
if non_negative_only:
class_minima[k] = 0.
else:
class_minima[k] = -numpy.inf
elif k == num_classes - 1:
class_minima[k] = class_cutoffs[k - 1]
class_maxima[k] = numpy.inf
else:
class_minima[k] = class_cutoffs[k - 1]
class_maxima[k] = class_cutoffs[k]
return class_cutoffs, class_minima, class_maxima
def classify_values(input_values, class_cutoffs, non_negative_only=True):
"""Assigns each element of input array to one class.
N = number of values to classify
C = number of classes
c = C - 1 = number of cutoffs
:param input_values: length-N numpy array of values to classify.
:param class_cutoffs: length-c numpy array of class cutoffs.
:param non_negative_only: Boolean flag. If True, all values (class ranges
and values to classify) must be non-negative.
:return: class_labels: length-N numpy array of integer class labels.
"""
_, class_minima, class_maxima = classification_cutoffs_to_ranges(
class_cutoffs, non_negative_only=non_negative_only)
error_checking.assert_is_numpy_array_without_nan(input_values)
error_checking.assert_is_numpy_array(input_values, num_dimensions=1)
if non_negative_only:
error_checking.assert_is_geq_numpy_array(input_values, 0.)
num_inputs = len(input_values)
class_labels = numpy.full(num_inputs, -1, dtype=int)
num_classes = len(class_minima)
for k in range(num_classes):
these_flags = numpy.logical_and(
input_values >= class_minima[k], input_values < class_maxima[k])
these_indices = numpy.where(these_flags)[0]
class_labels[these_indices] = k
return class_labels
| thunderhoser/GewitterGefahr | gewittergefahr/gg_utils/classification_utils.py | Python | mit | 3,206 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.